From 26f3df7a45b80c2afba6804121eb1f8d48899d74 Mon Sep 17 00:00:00 2001 From: Haoyu Wang <30562758+blueyo0@users.noreply.github.com> Date: Mon, 2 Jan 2023 20:43:15 +0800 Subject: [PATCH 01/25] [Feature] nnUNet-style Gaussian Noise and Blur (#2373) ## Motivation implement nnUNet-style Gaussian Noise and Blur --- mmseg/datasets/__init__.py | 4 +- mmseg/datasets/transforms/__init__.py | 5 +- mmseg/datasets/transforms/transforms.py | 179 ++++++++++++++++++++++++ tests/test_datasets/test_transform.py | 108 ++++++++++++++ 4 files changed, 294 insertions(+), 2 deletions(-) diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index 58f71b62a2..8ae2574afe 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -19,6 +19,7 @@ from .potsdam import PotsdamDataset from .stare import STAREDataset from .transforms import (CLAHE, AdjustGamma, BioMedical3DRandomCrop, + BioMedicalGaussianBlur, BioMedicalGaussianNoise, GenerateEdge, LoadAnnotations, LoadBiomedicalAnnotation, LoadBiomedicalData, LoadBiomedicalImageFromFile, LoadImageFromNDArray, @@ -42,5 +43,6 @@ 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple', 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', - 'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge' + 'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge', + 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur' ] diff --git a/mmseg/datasets/transforms/__init__.py b/mmseg/datasets/transforms/__init__.py index 7f67acec02..4ea3d81c98 100644 --- a/mmseg/datasets/transforms/__init__.py +++ b/mmseg/datasets/transforms/__init__.py @@ -3,17 +3,20 @@ from .loading import (LoadAnnotations, LoadBiomedicalAnnotation, LoadBiomedicalData, LoadBiomedicalImageFromFile, LoadImageFromNDArray) +# yapf: disable from .transforms import (CLAHE, AdjustGamma, BioMedical3DRandomCrop, + BioMedicalGaussianBlur, BioMedicalGaussianNoise, GenerateEdge, PhotoMetricDistortion, RandomCrop, RandomCutOut, RandomMosaic, RandomRotate, Rerange, ResizeShortestEdge, ResizeToMultiple, RGB2Gray, SegRescale) +# yapf: enable __all__ = [ 'LoadAnnotations', 'RandomCrop', 'BioMedical3DRandomCrop', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', 'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple', 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', - 'ResizeShortestEdge' + 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur' ] diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py index 5d1173f254..94e2643473 100644 --- a/mmseg/datasets/transforms/transforms.py +++ b/mmseg/datasets/transforms/transforms.py @@ -10,6 +10,7 @@ from mmcv.transforms.utils import cache_randomness from mmengine.utils import is_tuple_of from numpy import random +from scipy.ndimage import gaussian_filter from mmseg.datasets.dataset_wrappers import MultiImageMixDataset from mmseg.registry import TRANSFORMS @@ -1507,3 +1508,181 @@ def transform(self, results: dict) -> dict: def __repr__(self): return self.__class__.__name__ + f'(crop_shape={self.crop_shape})' + + +@TRANSFORMS.register_module() +class BioMedicalGaussianNoise(BaseTransform): + """Add random Gaussian noise to image. + + Modified from https://github.com/MIC-DKFZ/batchgenerators/blob/7651ece69faf55263dd582a9f5cbd149ed9c3ad0/batchgenerators/transforms/noise_transforms.py#L53 # noqa:E501 + + Copyright (c) German Cancer Research Center (DKFZ) + Licensed under the Apache License, Version 2.0 + + Required Keys: + + - img (np.ndarray): Biomedical image with shape (N, Z, Y, X), + N is the number of modalities, and data type is float32. + + Modified Keys: + + - img + + Args: + prob (float): Probability to add Gaussian noise for + each sample. Default to 0.1. + mean (float): Mean or “centre” of the distribution. Default to 0.0. + std (float): Standard deviation of distribution. Default to 0.1. + """ + + def __init__(self, + prob: float = 0.1, + mean: float = 0.0, + std: float = 0.1) -> None: + super().__init__() + assert 0.0 <= prob <= 1.0 and std >= 0.0 + self.prob = prob + self.mean = mean + self.std = std + + def transform(self, results: Dict) -> Dict: + """Call function to add random Gaussian noise to image. + + Args: + results (dict): Result dict. + + Returns: + dict: Result dict with random Gaussian noise. + """ + if np.random.rand() < self.prob: + rand_std = np.random.uniform(0, self.std) + noise = np.random.normal( + self.mean, rand_std, size=results['img'].shape) + # noise is float64 array, convert to the results['img'].dtype + noise = noise.astype(results['img'].dtype) + results['img'] = results['img'] + noise + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'mean={self.mean}, ' + repr_str += f'std={self.std})' + return repr_str + + +@TRANSFORMS.register_module() +class BioMedicalGaussianBlur(BaseTransform): + """Add Gaussian blur with random sigma to image. + + Modified from https://github.com/MIC-DKFZ/batchgenerators/blob/7651ece69faf55263dd582a9f5cbd149ed9c3ad0/batchgenerators/transforms/noise_transforms.py#L81 # noqa:E501 + + Copyright (c) German Cancer Research Center (DKFZ) + Licensed under the Apache License, Version 2.0 + + Required Keys: + + - img (np.ndarray): Biomedical image with shape (N, Z, Y, X), + N is the number of modalities, and data type is float32. + + Modified Keys: + + - img + + Args: + sigma_range (Tuple[float, float]|float): range to randomly + select sigma value. Default to (0.5, 1.0). + prob (float): Probability to apply Gaussian blur + for each sample. Default to 0.2. + prob_per_channel (float): Probability to apply Gaussian blur + for each channel (axis N of the image). Default to 0.5. + different_sigma_per_channel (bool): whether to use different + sigma for each channel (axis N of the image). Default to True. + different_sigma_per_axis (bool): whether to use different + sigma for axis Z, X and Y of the image. Default to True. + """ + + def __init__(self, + sigma_range: Tuple[float, float] = (0.5, 1.0), + prob: float = 0.2, + prob_per_channel: float = 0.5, + different_sigma_per_channel: bool = True, + different_sigma_per_axis: bool = True) -> None: + super().__init__() + assert 0.0 <= prob <= 1.0 + assert 0.0 <= prob_per_channel <= 1.0 + assert isinstance(sigma_range, Sequence) and len(sigma_range) == 2 + self.sigma_range = sigma_range + self.prob = prob + self.prob_per_channel = prob_per_channel + self.different_sigma_per_channel = different_sigma_per_channel + self.different_sigma_per_axis = different_sigma_per_axis + + def _get_valid_sigma(self, value_range) -> Tuple[float, ...]: + """Ensure the `value_range` to be either a single value or a sequence + of two values. If the `value_range` is a sequence, generate a random + value with `[value_range[0], value_range[1]]` based on uniform + sampling. + + Modified from https://github.com/MIC-DKFZ/batchgenerators/blob/7651ece69faf55263dd582a9f5cbd149ed9c3ad0/batchgenerators/augmentations/utils.py#L625 # noqa:E501 + + Args: + value_range (tuple|list|float|int): the input value range + """ + if (isinstance(value_range, (list, tuple))): + if (value_range[0] == value_range[1]): + value = value_range[0] + else: + orig_type = type(value_range[0]) + value = np.random.uniform(value_range[0], value_range[1]) + value = orig_type(value) + return value + + def _gaussian_blur(self, data_sample: np.ndarray) -> np.ndarray: + """Random generate sigma and apply Gaussian Blur to the data + Args: + data_sample (np.ndarray): data sample with multiple modalities, + the data shape is (N, Z, Y, X) + """ + sigma = None + for c in range(data_sample.shape[0]): + if np.random.rand() < self.prob_per_channel: + # if no `sigma` is generated, generate one + # if `self.different_sigma_per_channel` is True, + # re-generate random sigma for each channel + if (sigma is None or self.different_sigma_per_channel): + if (not self.different_sigma_per_axis): + sigma = self._get_valid_sigma(self.sigma_range) + else: + sigma = [ + self._get_valid_sigma(self.sigma_range) + for _ in data_sample.shape[1:] + ] + # apply gaussian filter with `sigma` + data_sample[c] = gaussian_filter( + data_sample[c], sigma, order=0) + return data_sample + + def transform(self, results: Dict) -> Dict: + """Call function to add random Gaussian blur to image. + + Args: + results (dict): Result dict. + + Returns: + dict: Result dict with random Gaussian noise. + """ + if np.random.rand() < self.prob: + results['img'] = self._gaussian_blur(results['img']) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'prob_per_channel={self.prob_per_channel}, ' + repr_str += f'sigma_range={self.sigma_range}, ' + repr_str += 'different_sigma_per_channel='\ + f'{self.different_sigma_per_channel}, ' + repr_str += 'different_sigma_per_axis='\ + f'{self.different_sigma_per_axis})' + return repr_str diff --git a/tests/test_datasets/test_transform.py b/tests/test_datasets/test_transform.py index 2c18b8e027..397d1c0da6 100644 --- a/tests/test_datasets/test_transform.py +++ b/tests/test_datasets/test_transform.py @@ -778,3 +778,111 @@ def test_biomedical3d_random_crop(): assert crop_results['img'].shape[1:] == (d - 20, h - 20, w - 20) assert crop_results['img_shape'] == (d - 20, h - 20, w - 20) assert crop_results['gt_seg_map'].shape == (d - 20, h - 20, w - 20) + + +def test_biomedical_gaussian_noise(): + # test assertion for invalid prob + with pytest.raises(AssertionError): + transform = dict(type='BioMedicalGaussianNoise', prob=1.5) + TRANSFORMS.build(transform) + + # test assertion for invalid std + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalGaussianNoise', prob=0.2, mean=0.5, std=-0.5) + TRANSFORMS.build(transform) + + transform = dict(type='BioMedicalGaussianNoise', prob=1.0) + noise_module = TRANSFORMS.build(transform) + assert str(noise_module) == 'BioMedicalGaussianNoise'\ + '(prob=1.0, ' \ + 'mean=0.0, ' \ + 'std=0.1)' + + transform = dict(type='BioMedicalGaussianNoise', prob=1.0) + noise_module = TRANSFORMS.build(transform) + results = dict( + img_path=osp.join(osp.dirname(__file__), '../data/biomedical.nii.gz')) + from mmseg.datasets.transforms import LoadBiomedicalImageFromFile + transform = LoadBiomedicalImageFromFile() + results = transform(copy.deepcopy(results)) + original_img = copy.deepcopy(results['img']) + results = noise_module(results) + assert original_img.shape == results['img'].shape + + +def test_biomedical_gaussian_blur(): + # test assertion for invalid prob + with pytest.raises(AssertionError): + transform = dict(type='BioMedicalGaussianBlur', prob=-1.5) + TRANSFORMS.build(transform) + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalGaussianBlur', prob=1.0, sigma_range=0.6) + smooth_module = TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalGaussianBlur', prob=1.0, sigma_range=(0.6)) + smooth_module = TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalGaussianBlur', prob=1.0, sigma_range=(15, 8, 9)) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalGaussianBlur', prob=1.0, sigma_range='0.16') + TRANSFORMS.build(transform) + + transform = dict( + type='BioMedicalGaussianBlur', prob=1.0, sigma_range=(0.7, 0.8)) + smooth_module = TRANSFORMS.build(transform) + assert str( + smooth_module + ) == 'BioMedicalGaussianBlur(prob=1.0, ' \ + 'prob_per_channel=0.5, '\ + 'sigma_range=(0.7, 0.8), ' \ + 'different_sigma_per_channel=True, '\ + 'different_sigma_per_axis=True)' + + transform = dict(type='BioMedicalGaussianBlur', prob=1.0) + smooth_module = TRANSFORMS.build(transform) + assert str( + smooth_module + ) == 'BioMedicalGaussianBlur(prob=1.0, ' \ + 'prob_per_channel=0.5, '\ + 'sigma_range=(0.5, 1.0), ' \ + 'different_sigma_per_channel=True, '\ + 'different_sigma_per_axis=True)' + + results = dict( + img_path=osp.join(osp.dirname(__file__), '../data/biomedical.nii.gz')) + from mmseg.datasets.transforms import LoadBiomedicalImageFromFile + transform = LoadBiomedicalImageFromFile() + results = transform(copy.deepcopy(results)) + original_img = copy.deepcopy(results['img']) + results = smooth_module(results) + assert original_img.shape == results['img'].shape + # the max value in the smoothed image should be less than the original one + assert original_img.max() >= results['img'].max() + assert original_img.min() <= results['img'].min() + + transform = dict( + type='BioMedicalGaussianBlur', + prob=1.0, + different_sigma_per_axis=False) + smooth_module = TRANSFORMS.build(transform) + + results = dict( + img_path=osp.join(osp.dirname(__file__), '../data/biomedical.nii.gz')) + from mmseg.datasets.transforms import LoadBiomedicalImageFromFile + transform = LoadBiomedicalImageFromFile() + results = transform(copy.deepcopy(results)) + original_img = copy.deepcopy(results['img']) + results = smooth_module(results) + assert original_img.shape == results['img'].shape + # the max value in the smoothed image should be less than the original one + assert original_img.max() >= results['img'].max() + assert original_img.min() <= results['img'].min() From 3ca690bad371fb751259ef3c7d66397dd8e81e9f Mon Sep 17 00:00:00 2001 From: Fivethousand <38652084+Fivethousand5k@users.noreply.github.com> Date: Mon, 2 Jan 2023 21:29:03 +0800 Subject: [PATCH 02/25] [Feature] Add BioMedicalRandomGamma (#2406) Add the random gamma correction transform for biomedical images, which follows the design of the nnUNet. --- docs/zh_cn/advanced_guides/datasets.md | 2 +- mmseg/datasets/__init__.py | 7 +- mmseg/datasets/transforms/__init__.py | 8 +- mmseg/datasets/transforms/transforms.py | 119 ++++++++++++++++++++++++ tests/test_datasets/test_transform.py | 67 ++++++++++++- 5 files changed, 195 insertions(+), 8 deletions(-) diff --git a/docs/zh_cn/advanced_guides/datasets.md b/docs/zh_cn/advanced_guides/datasets.md index 06a75e54bd..29062e73f3 100644 --- a/docs/zh_cn/advanced_guides/datasets.md +++ b/docs/zh_cn/advanced_guides/datasets.md @@ -1,4 +1,4 @@ -# 数据集 +# 数据集 在 MMSegmentation 算法库中, 所有 Dataset 类的功能有两个: 加载[预处理](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/user_guides/2_dataset_prepare.md) 之后的数据集的信息, 和将数据送入[数据集变换流水线](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/datasets/basesegdataset.py#L141) 中, 进行[数据变换操作](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/zh_cn/advanced_guides/transforms.md). 加载的数据集信息包括两类: 元信息 (meta information), 数据集本身的信息, 例如数据集总共的类别, 和它们对应调色盘信息: 数据信息 (data information) 是指每组数据中图片和对应标签的路径. 下文中介绍了 MMSegmentation 1.x 中数据集的常用接口, 和 mmseg 数据集基类中数据信息加载与修改数据集类别的逻辑, 以及数据集与数据变换流水线 (pipeline) 的关系. diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index 8ae2574afe..64bddf8081 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -18,9 +18,10 @@ from .pascal_context import PascalContextDataset, PascalContextDataset59 from .potsdam import PotsdamDataset from .stare import STAREDataset +# yapf: disable from .transforms import (CLAHE, AdjustGamma, BioMedical3DRandomCrop, BioMedicalGaussianBlur, BioMedicalGaussianNoise, - GenerateEdge, LoadAnnotations, + BioMedicalRandomGamma, GenerateEdge, LoadAnnotations, LoadBiomedicalAnnotation, LoadBiomedicalData, LoadBiomedicalImageFromFile, LoadImageFromNDArray, PackSegInputs, PhotoMetricDistortion, RandomCrop, @@ -30,7 +31,6 @@ from .voc import PascalVOCDataset # yapf: enable - __all__ = [ 'BaseSegDataset', 'BioMedical3DRandomCrop', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', @@ -44,5 +44,6 @@ 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', 'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge', - 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur' + 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur', + 'BioMedicalRandomGamma' ] diff --git a/mmseg/datasets/transforms/__init__.py b/mmseg/datasets/transforms/__init__.py index 4ea3d81c98..e584377368 100644 --- a/mmseg/datasets/transforms/__init__.py +++ b/mmseg/datasets/transforms/__init__.py @@ -6,8 +6,9 @@ # yapf: disable from .transforms import (CLAHE, AdjustGamma, BioMedical3DRandomCrop, BioMedicalGaussianBlur, BioMedicalGaussianNoise, - GenerateEdge, PhotoMetricDistortion, RandomCrop, - RandomCutOut, RandomMosaic, RandomRotate, Rerange, + BioMedicalRandomGamma, GenerateEdge, + PhotoMetricDistortion, RandomCrop, RandomCutOut, + RandomMosaic, RandomRotate, Rerange, ResizeShortestEdge, ResizeToMultiple, RGB2Gray, SegRescale) @@ -18,5 +19,6 @@ 'RGB2Gray', 'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple', 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', - 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur' + 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur', + 'BioMedicalRandomGamma' ] diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py index 94e2643473..5d25e12641 100644 --- a/mmseg/datasets/transforms/transforms.py +++ b/mmseg/datasets/transforms/transforms.py @@ -1686,3 +1686,122 @@ def __repr__(self): repr_str += 'different_sigma_per_axis='\ f'{self.different_sigma_per_axis})' return repr_str + + +@TRANSFORMS.register_module() +class BioMedicalRandomGamma(BaseTransform): + """Using random gamma correction to process the biomedical image. + + Modified from + https://github.com/MIC-DKFZ/batchgenerators/blob/master/batchgenerators/transforms/color_transforms.py#L132 # noqa:E501 + With licence: Apache 2.0 + + Required Keys: + + - img (np.ndarray): Biomedical image with shape (N, Z, Y, X), + N is the number of modalities, and data type is float32. + + Modified Keys: + - img + + Args: + prob (float): The probability to perform this transform. Default: 0.5. + gamma_range (Tuple[float]): Range of gamma values. Default: (0.5, 2). + invert_image (bool): Whether invert the image before applying gamma + augmentation. Default: False. + per_channel (bool): Whether perform the transform each channel + individually. Default: False + retain_stats (bool): Gamma transformation will alter the mean and std + of the data in the patch. If retain_stats=True, the data will be + transformed to match the mean and standard deviation before gamma + augmentation. Default: False. + """ + + def __init__(self, + prob: float = 0.5, + gamma_range: Tuple[float] = (0.5, 2), + invert_image: bool = False, + per_channel: bool = False, + retain_stats: bool = False): + assert 0 <= prob and prob <= 1 + assert isinstance(gamma_range, tuple) and len(gamma_range) == 2 + assert isinstance(invert_image, bool) + assert isinstance(per_channel, bool) + assert isinstance(retain_stats, bool) + self.prob = prob + self.gamma_range = gamma_range + self.invert_image = invert_image + self.per_channel = per_channel + self.retain_stats = retain_stats + + @cache_randomness + def _do_gamma(self): + """Whether do adjust gamma for image.""" + return np.random.rand() < self.prob + + def _adjust_gamma(self, img: np.array): + """Gamma adjustment for image. + + Args: + img (np.array): Input image before gamma adjust. + + Returns: + np.arrays: Image after gamma adjust. + """ + + if self.invert_image: + img = -img + + def _do_adjust(img): + if retain_stats_here: + img_mean = img.mean() + img_std = img.std() + if np.random.random() < 0.5 and self.gamma_range[0] < 1: + gamma = np.random.uniform(self.gamma_range[0], 1) + else: + gamma = np.random.uniform( + max(self.gamma_range[0], 1), self.gamma_range[1]) + img_min = img.min() + img_range = img.max() - img_min # range + img = np.power(((img - img_min) / float(img_range + 1e-7)), + gamma) * img_range + img_min + if retain_stats_here: + img = img - img.mean() + img = img / (img.std() + 1e-8) * img_std + img = img + img_mean + return img + + if not self.per_channel: + retain_stats_here = self.retain_stats + img = _do_adjust(img) + else: + for c in range(img.shape[0]): + img[c] = _do_adjust(img[c]) + if self.invert_image: + img = -img + return img + + def transform(self, results: dict) -> dict: + """Call function to perform random gamma correction + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with random gamma correction performed. + """ + do_gamma = self._do_gamma() + + if do_gamma: + results['img'] = self._adjust_gamma(results['img']) + else: + pass + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'gamma_range={self.gamma_range},' + repr_str += f'invert_image={self.invert_image},' + repr_str += f'per_channel={self.per_channel},' + repr_str += f'retain_stats={self.retain_stats}' + return repr_str diff --git a/tests/test_datasets/test_transform.py b/tests/test_datasets/test_transform.py index 397d1c0da6..c218ebd666 100644 --- a/tests/test_datasets/test_transform.py +++ b/tests/test_datasets/test_transform.py @@ -8,7 +8,8 @@ from PIL import Image from mmseg.datasets.transforms import * # noqa -from mmseg.datasets.transforms import PhotoMetricDistortion, RandomCrop +from mmseg.datasets.transforms import (LoadBiomedicalImageFromFile, + PhotoMetricDistortion, RandomCrop) from mmseg.registry import TRANSFORMS from mmseg.utils import register_all_modules @@ -886,3 +887,67 @@ def test_biomedical_gaussian_blur(): # the max value in the smoothed image should be less than the original one assert original_img.max() >= results['img'].max() assert original_img.min() <= results['img'].min() + + +def test_BioMedicalRandomGamma(): + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalRandomGamma', prob=-1, gamma_range=(0.7, 1.2)) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalRandomGamma', prob=1.2, gamma_range=(0.7, 1.2)) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalRandomGamma', prob=1.0, gamma_range=(0.7)) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalRandomGamma', + prob=1.0, + gamma_range=(0.7, 0.2, 0.3)) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalRandomGamma', + prob=1.0, + gamma_range=(0.7, 2), + invert_image=1) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalRandomGamma', + prob=1.0, + gamma_range=(0.7, 2), + per_channel=1) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict( + type='BioMedicalRandomGamma', + prob=1.0, + gamma_range=(0.7, 2), + retain_stats=1) + TRANSFORMS.build(transform) + + test_img = 'tests/data/biomedical.nii.gz' + results = dict(img_path=test_img) + transform = LoadBiomedicalImageFromFile() + results = transform(copy.deepcopy(results)) + origin_img = results['img'] + transform2 = dict( + type='BioMedicalRandomGamma', + prob=1.0, + gamma_range=(0.7, 2), + ) + transform2 = TRANSFORMS.build(transform2) + results = transform2(results) + transformed_img = results['img'] + assert origin_img.shape == transformed_img.shape From 6af2b8eab90e2973113b0e4c61e37c215889cc4f Mon Sep 17 00:00:00 2001 From: legendchilli <37359271+suyanzhou626@users.noreply.github.com> Date: Tue, 3 Jan 2023 13:37:48 +0800 Subject: [PATCH 03/25] [Feature] Add BioMedical3DPad (#2383) ## Motivation Add the 3d pad transform for biomedical images, which follows the design of the nnUNet. --- mmseg/datasets/__init__.py | 9 +- mmseg/datasets/transforms/__init__.py | 12 +-- mmseg/datasets/transforms/transforms.py | 132 ++++++++++++++++++++++++ tests/test_datasets/test_transform.py | 43 ++++++++ 4 files changed, 186 insertions(+), 10 deletions(-) diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index 64bddf8081..d0aeeaa335 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -19,9 +19,10 @@ from .potsdam import PotsdamDataset from .stare import STAREDataset # yapf: disable -from .transforms import (CLAHE, AdjustGamma, BioMedical3DRandomCrop, - BioMedicalGaussianBlur, BioMedicalGaussianNoise, - BioMedicalRandomGamma, GenerateEdge, LoadAnnotations, +from .transforms import (CLAHE, AdjustGamma, BioMedical3DPad, + BioMedical3DRandomCrop, BioMedicalGaussianBlur, + BioMedicalGaussianNoise, BioMedicalRandomGamma, + GenerateEdge, LoadAnnotations, LoadBiomedicalAnnotation, LoadBiomedicalData, LoadBiomedicalImageFromFile, LoadImageFromNDArray, PackSegInputs, PhotoMetricDistortion, RandomCrop, @@ -45,5 +46,5 @@ 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', 'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur', - 'BioMedicalRandomGamma' + 'BioMedicalRandomGamma', 'BioMedical3DPad' ] diff --git a/mmseg/datasets/transforms/__init__.py b/mmseg/datasets/transforms/__init__.py index e584377368..d066609227 100644 --- a/mmseg/datasets/transforms/__init__.py +++ b/mmseg/datasets/transforms/__init__.py @@ -4,11 +4,11 @@ LoadBiomedicalData, LoadBiomedicalImageFromFile, LoadImageFromNDArray) # yapf: disable -from .transforms import (CLAHE, AdjustGamma, BioMedical3DRandomCrop, - BioMedicalGaussianBlur, BioMedicalGaussianNoise, - BioMedicalRandomGamma, GenerateEdge, - PhotoMetricDistortion, RandomCrop, RandomCutOut, - RandomMosaic, RandomRotate, Rerange, +from .transforms import (CLAHE, AdjustGamma, BioMedical3DPad, + BioMedical3DRandomCrop, BioMedicalGaussianBlur, + BioMedicalGaussianNoise, BioMedicalRandomGamma, + GenerateEdge, PhotoMetricDistortion, RandomCrop, + RandomCutOut, RandomMosaic, RandomRotate, Rerange, ResizeShortestEdge, ResizeToMultiple, RGB2Gray, SegRescale) @@ -20,5 +20,5 @@ 'ResizeToMultiple', 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur', - 'BioMedicalRandomGamma' + 'BioMedicalRandomGamma', 'BioMedical3DPad' ] diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py index 5d25e12641..7e0bd0da64 100644 --- a/mmseg/datasets/transforms/transforms.py +++ b/mmseg/datasets/transforms/transforms.py @@ -1805,3 +1805,135 @@ def __repr__(self): repr_str += f'per_channel={self.per_channel},' repr_str += f'retain_stats={self.retain_stats}' return repr_str + + +@TRANSFORMS.register_module() +class BioMedical3DPad(BaseTransform): + """Pad the biomedical 3d image & biomedical 3d semantic segmentation maps. + + Required Keys: + + - img (np.ndarry): Biomedical image with shape (N, Z, Y, X) by default, + N is the number of modalities. + - gt_seg_map (np.ndarray, optional): Biomedical seg map with shape + (Z, Y, X) by default. + + Modified Keys: + + - img (np.ndarry): Biomedical image with shape (N, Z, Y, X) by default, + N is the number of modalities. + - gt_seg_map (np.ndarray, optional): Biomedical seg map with shape + (Z, Y, X) by default. + + Added Keys: + + - pad_shape (Tuple[int, int, int]): The padded shape. + + Args: + pad_shape (Tuple[int, int, int]): Fixed padding size. + Expected padding shape (Z, Y, X). + pad_val (float): Padding value for biomedical image. + The padding mode is set to "constant". The value + to be filled in padding area. Default: 0. + seg_pad_val (int): Padding value for biomedical 3d semantic + segmentation maps. The padding mode is set to "constant". + The value to be filled in padding area. Default: 0. + """ + + def __init__(self, + pad_shape: Tuple[int, int, int], + pad_val: float = 0., + seg_pad_val: int = 0) -> None: + + # check pad_shape + assert pad_shape is not None + if not isinstance(pad_shape, tuple): + assert len(pad_shape) == 3 + + self.pad_shape = pad_shape + self.pad_val = pad_val + self.seg_pad_val = seg_pad_val + + def _pad_img(self, results: dict) -> None: + """Pad images according to ``self.pad_shape`` + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: The dict contains the padded image and shape + information. + """ + padded_img = self._to_pad( + results['img'], pad_shape=self.pad_shape, pad_val=self.pad_val) + + results['img'] = padded_img + results['pad_shape'] = padded_img.shape[1:] + + def _pad_seg(self, results: dict) -> None: + """Pad semantic segmentation map according to ``self.pad_shape`` if + ``gt_seg_map`` is not None in results dict. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Update the padded gt seg map in dict. + """ + if results.get('gt_seg_map', None) is not None: + pad_gt_seg = self._to_pad( + results['gt_seg_map'][None, ...], + pad_shape=results['pad_shape'], + pad_val=self.seg_pad_val) + results['gt_seg_map'] = pad_gt_seg[1:] + + @staticmethod + def _to_pad(img: np.ndarray, + pad_shape: Tuple[int, int, int], + pad_val: Union[int, float] = 0) -> np.ndarray: + """Pad the given 3d image to a certain shape with specified padding + value. + + Args: + img (ndarray): Biomedical image with shape (N, Z, Y, X) + to be padded. N is the number of modalities. + pad_shape (Tuple[int,int,int]): Expected padding shape (Z, Y, X). + pad_val (float, int): Values to be filled in padding areas + and the padding_mode is set to 'constant'. Default: 0. + + Returns: + ndarray: The padded image. + """ + # compute pad width + d = max(pad_shape[0] - img.shape[1], 0) + pad_d = (d // 2, d - d // 2) + h = max(pad_shape[1] - img.shape[2], 0) + pad_h = (h // 2, h - h // 2) + w = max(pad_shape[2] - img.shape[2], 0) + pad_w = (w // 2, w - w // 2) + + pad_list = [(0, 0), pad_d, pad_h, pad_w] + + img = np.pad(img, pad_list, mode='constant', constant_values=pad_val) + return img + + def transform(self, results: dict) -> dict: + """Call function to pad images, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Updated result dict. + """ + self._pad_img(results) + self._pad_seg(results) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'pad_shape={self.pad_shape}, ' + repr_str += f'pad_val={self.pad_val}), ' + repr_str += f'seg_pad_val={self.seg_pad_val})' + return repr_str diff --git a/tests/test_datasets/test_transform.py b/tests/test_datasets/test_transform.py index c218ebd666..8ed1005ac6 100644 --- a/tests/test_datasets/test_transform.py +++ b/tests/test_datasets/test_transform.py @@ -951,3 +951,46 @@ def test_BioMedicalRandomGamma(): results = transform2(results) transformed_img = results['img'] assert origin_img.shape == transformed_img.shape + + +def test_BioMedical3DPad(): + # test assertion. + with pytest.raises(AssertionError): + transform = dict(type='BioMedical3DPad', pad_shape=None) + TRANSFORMS.build(transform) + + with pytest.raises(AssertionError): + transform = dict(type='BioMedical3DPad', pad_shape=[256, 256]) + TRANSFORMS.build(transform) + + data_info1 = dict(img=np.random.random((8, 6, 4, 4))) + + transform = dict(type='BioMedical3DPad', pad_shape=(6, 6, 6)) + transform = TRANSFORMS.build(transform) + results = transform(copy.deepcopy(data_info1)) + assert results['img'].shape[1:] == (6, 6, 6) + assert results['pad_shape'] == (6, 6, 6) + + transform = dict(type='BioMedical3DPad', pad_shape=(4, 6, 6)) + transform = TRANSFORMS.build(transform) + results = transform(copy.deepcopy(data_info1)) + assert results['img'].shape[1:] == (6, 6, 6) + assert results['pad_shape'] == (6, 6, 6) + + data_info2 = dict( + img=np.random.random((8, 6, 4, 4)), + gt_seg_map=np.random.randint(0, 2, (6, 4, 4))) + + transform = dict(type='BioMedical3DPad', pad_shape=(6, 6, 6)) + transform = TRANSFORMS.build(transform) + results = transform(copy.deepcopy(data_info2)) + assert results['img'].shape[1:] == (6, 6, 6) + assert results['gt_seg_map'].shape[1:] == (6, 6, 6) + assert results['pad_shape'] == (6, 6, 6) + + transform = dict(type='BioMedical3DPad', pad_shape=(4, 6, 6)) + transform = TRANSFORMS.build(transform) + results = transform(copy.deepcopy(data_info2)) + assert results['img'].shape[1:] == (6, 6, 6) + assert results['gt_seg_map'].shape[1:] == (6, 6, 6) + assert results['pad_shape'] == (6, 6, 6) From bd29c20778697ca00ad8a8b575ac595c9492fa0d Mon Sep 17 00:00:00 2001 From: unrealMJ <45420156+unrealMJ@users.noreply.github.com> Date: Wed, 4 Jan 2023 20:39:03 +0800 Subject: [PATCH 04/25] CodeCamp #150 [Feature] Add ISNet (#2400) ## Motivation Support ISNet. paper link: [ISNet: Integrate Image-Level and Semantic-Level Context for Semantic Segmentation](https://openaccess.thecvf.com/content/ICCV2021/papers/Jin_ISNet_Integrate_Image-Level_and_Semantic-Level_Context_for_Semantic_Segmentation_ICCV_2021_paper.pdf) ## Modification Add ISNet decoder head. Add ISNet config. --- projects/isnet/README.md | 57 +++ ...et_r50-d8_8xb2-160k_cityscapes-512x1024.py | 80 +++++ projects/isnet/decode_heads/__init__.py | 3 + projects/isnet/decode_heads/isnet_head.py | 337 ++++++++++++++++++ 4 files changed, 477 insertions(+) create mode 100644 projects/isnet/README.md create mode 100644 projects/isnet/configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py create mode 100644 projects/isnet/decode_heads/__init__.py create mode 100644 projects/isnet/decode_heads/isnet_head.py diff --git a/projects/isnet/README.md b/projects/isnet/README.md new file mode 100644 index 0000000000..b2623e39f8 --- /dev/null +++ b/projects/isnet/README.md @@ -0,0 +1,57 @@ +# ISNet + +[ISNet: Integrate Image-Level and Semantic-Level Context for Semantic Segmentation](https://arxiv.org/pdf/2108.12382.pdf) + +## Description + +This is an implementation of [ISNet](https://arxiv.org/pdf/2108.12382.pdf). +[Official Repo](https://github.com/SegmentationBLWX/sssegmentation) + +## Usage + +### Prerequisites + +- Python 3.7 +- PyTorch 1.6 or higher +- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) v1.0.0rc2 or higher + +All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `isnet/` root directory, run the following line to add the current directory to `PYTHONPATH`: + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +### Training commands + +```shell +mim train mmsegmentation configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py --work-dir work_dirs/isnet +``` + +To train on multiple GPUs, e.g. 8 GPUs, run the following command: + +```shell +mim train mmsegmentation configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py --work-dir work_dirs/isnet --launcher pytorch --gpus 8 +``` + +### Testing commands + +```shell +mim test mmsegmentation configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py --work-dir work_dirs/isnet --checkpoint ${CHECKPOINT_PATH} +``` + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| ISNet | R-50-D8 | 512x1024 | - | - | - | 79.32 | 80.88 | [config](configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isnet/isnet_r50-d8_cityscapes-512x1024_20230104-a7a8ccf2.pth) | + +## Citation + +```bibtex +@article{Jin2021ISNetII, + title={ISNet: Integrate Image-Level and Semantic-Level Context for Semantic Segmentation}, + author={Zhenchao Jin and B. Liu and Qi Chu and Nenghai Yu}, + journal={2021 IEEE/CVF International Conference on Computer Vision (ICCV)}, + year={2021}, + pages={7169-7178} +} +``` diff --git a/projects/isnet/configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py b/projects/isnet/configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py new file mode 100644 index 0000000000..a00d39237d --- /dev/null +++ b/projects/isnet/configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py @@ -0,0 +1,80 @@ +_base_ = [ + '../../../configs/_base_/datasets/cityscapes.py', + '../../../configs/_base_/default_runtime.py', + '../../../configs/_base_/schedules/schedule_80k.py' +] + +data_root = '../../data/cityscapes/' +train_dataloader = dict(dataset=dict(data_root=data_root)) +val_dataloader = dict(dataset=dict(data_root=data_root)) +test_dataloader = dict(dataset=dict(data_root=data_root)) + +custom_imports = dict(imports=['projects.isnet.decode_heads']) + +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) + +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='ISNetHead', + in_channels=(256, 512, 1024, 2048), + input_transform='multiple_select', + in_index=(0, 1, 2, 3), + channels=512, + dropout_ratio=0.1, + transform_channels=256, + concat_input=True, + with_shortcut=False, + shortcut_in_channels=256, + shortcut_feat_channels=48, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=[ + dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + loss_name='loss_o'), + dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=0.4, + loss_name='loss_d'), + ]), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=512, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + train_cfg=dict(), + # test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)) + test_cfg=dict(mode='whole')) diff --git a/projects/isnet/decode_heads/__init__.py b/projects/isnet/decode_heads/__init__.py new file mode 100644 index 0000000000..a451629c4c --- /dev/null +++ b/projects/isnet/decode_heads/__init__.py @@ -0,0 +1,3 @@ +from .isnet_head import ISNetHead + +__all__ = ['ISNetHead'] diff --git a/projects/isnet/decode_heads/isnet_head.py b/projects/isnet/decode_heads/isnet_head.py new file mode 100644 index 0000000000..9c8df540ee --- /dev/null +++ b/projects/isnet/decode_heads/isnet_head.py @@ -0,0 +1,337 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from torch import Tensor + +from mmseg.models.decode_heads.decode_head import BaseDecodeHead +from mmseg.models.losses import accuracy +from mmseg.models.utils import SelfAttentionBlock, resize +from mmseg.registry import MODELS +from mmseg.utils import SampleList + + +class ImageLevelContext(nn.Module): + """ Image-Level Context Module + Args: + feats_channels (int): Input channels of query/key feature. + transform_channels (int): Output channels of key/query transform. + concat_input (bool): whether to concat input feature. + align_corners (bool): align_corners argument of F.interpolate. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, + feats_channels, + transform_channels, + concat_input=False, + align_corners=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None): + super().__init__() + self.align_corners = align_corners + self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.correlate_net = SelfAttentionBlock( + key_in_channels=feats_channels * 2, + query_in_channels=feats_channels, + channels=transform_channels, + out_channels=feats_channels, + share_key_query=False, + query_downsample=None, + key_downsample=None, + key_query_num_convs=2, + value_out_num_convs=1, + key_query_norm=True, + value_out_norm=True, + matmul_norm=True, + with_out=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + if concat_input: + self.bottleneck = ConvModule( + feats_channels * 2, + feats_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + + '''forward''' + + def forward(self, x): + x_global = self.global_avgpool(x) + x_global = resize( + x_global, + size=x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + feats_il = self.correlate_net(x, torch.cat([x_global, x], dim=1)) + if hasattr(self, 'bottleneck'): + feats_il = self.bottleneck(torch.cat([x, feats_il], dim=1)) + return feats_il + + +class SemanticLevelContext(nn.Module): + """ Semantic-Level Context Module + Args: + feats_channels (int): Input channels of query/key feature. + transform_channels (int): Output channels of key/query transform. + concat_input (bool): whether to concat input feature. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, + feats_channels, + transform_channels, + concat_input=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None): + super().__init__() + self.correlate_net = SelfAttentionBlock( + key_in_channels=feats_channels, + query_in_channels=feats_channels, + channels=transform_channels, + out_channels=feats_channels, + share_key_query=False, + query_downsample=None, + key_downsample=None, + key_query_num_convs=2, + value_out_num_convs=1, + key_query_norm=True, + value_out_norm=True, + matmul_norm=True, + with_out=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + if concat_input: + self.bottleneck = ConvModule( + feats_channels * 2, + feats_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + + '''forward''' + + def forward(self, x, preds, feats_il): + inputs = x + batch_size, num_channels, h, w = x.size() + num_classes = preds.size(1) + feats_sl = torch.zeros(batch_size, h * w, num_channels).type_as(x) + for batch_idx in range(batch_size): + # (C, H, W), (num_classes, H, W) --> (H*W, C), (H*W, num_classes) + feats_iter, preds_iter = x[batch_idx], preds[batch_idx] + feats_iter, preds_iter = feats_iter.reshape( + num_channels, -1), preds_iter.reshape(num_classes, -1) + feats_iter, preds_iter = feats_iter.permute(1, + 0), preds_iter.permute( + 1, 0) + # (H*W, ) + argmax = preds_iter.argmax(1) + for clsid in range(num_classes): + mask = (argmax == clsid) + if mask.sum() == 0: + continue + feats_iter_cls = feats_iter[mask] + preds_iter_cls = preds_iter[:, clsid][mask] + weight = torch.softmax(preds_iter_cls, dim=0) + feats_iter_cls = feats_iter_cls * weight.unsqueeze(-1) + feats_iter_cls = feats_iter_cls.sum(0) + feats_sl[batch_idx][mask] = feats_iter_cls + feats_sl = feats_sl.reshape(batch_size, h, w, num_channels) + feats_sl = feats_sl.permute(0, 3, 1, 2).contiguous() + feats_sl = self.correlate_net(inputs, feats_sl) + if hasattr(self, 'bottleneck'): + feats_sl = self.bottleneck(torch.cat([feats_il, feats_sl], dim=1)) + return feats_sl + + +@MODELS.register_module() +class ISNetHead(BaseDecodeHead): + """ISNet: Integrate Image-Level and Semantic-Level + Context for Semantic Segmentation + + This head is the implementation of `ISNet` + `_. + + Args: + transform_channels (int): Output channels of key/query transform. + concat_input (bool): whether to concat input feature. + with_shortcut (bool): whether to use shortcut connection. + shortcut_in_channels (int): Input channels of shortcut. + shortcut_feat_channels (int): Output channels of shortcut. + dropout_ratio (float): Ratio of dropout. + """ + + def __init__(self, transform_channels, concat_input, with_shortcut, + shortcut_in_channels, shortcut_feat_channels, dropout_ratio, + **kwargs): + super().__init__(**kwargs) + + self.in_channels = self.in_channels[-1] + + self.bottleneck = ConvModule( + self.in_channels, + self.channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.ilc_net = ImageLevelContext( + feats_channels=self.channels, + transform_channels=transform_channels, + concat_input=concat_input, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.slc_net = SemanticLevelContext( + feats_channels=self.channels, + transform_channels=transform_channels, + concat_input=concat_input, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.decoder_stage1 = nn.Sequential( + ConvModule( + self.channels, + self.channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Dropout2d(dropout_ratio), + nn.Conv2d( + self.channels, + self.num_classes, + kernel_size=1, + stride=1, + padding=0, + bias=True), + ) + + if with_shortcut: + self.shortcut = ConvModule( + shortcut_in_channels, + shortcut_feat_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.decoder_stage2 = nn.Sequential( + ConvModule( + self.channels + shortcut_feat_channels, + self.channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Dropout2d(dropout_ratio), + nn.Conv2d( + self.channels, + self.num_classes, + kernel_size=1, + stride=1, + padding=0, + bias=True), + ) + else: + self.decoder_stage2 = nn.Sequential( + nn.Dropout2d(dropout_ratio), + nn.Conv2d( + self.channels, + self.num_classes, + kernel_size=1, + stride=1, + padding=0, + bias=True), + ) + + self.conv_seg = None + self.dropout = None + + def forward(self, inputs): + x = self._transform_inputs(inputs) + feats = self.bottleneck(x[-1]) + + feats_il = self.ilc_net(feats) + + preds_stage1 = self.decoder_stage1(feats) + preds_stage1 = resize( + preds_stage1, + size=feats.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + + feats_sl = self.slc_net(feats, preds_stage1, feats_il) + + if hasattr(self, 'shortcut'): + shortcut_out = self.shortcut(x[0]) + feats_sl = resize( + feats_sl, + size=shortcut_out.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + feats_sl = torch.cat([feats_sl, shortcut_out], dim=1) + preds_stage2 = self.decoder_stage2(feats_sl) + + return preds_stage1, preds_stage2 + + def loss_by_feat(self, seg_logits: Tensor, + batch_data_samples: SampleList) -> dict: + seg_label = self._stack_batch_gt(batch_data_samples) + loss = dict() + + if self.sampler is not None: + seg_weight = self.sampler.sample(seg_logits[-1], seg_label) + else: + seg_weight = None + seg_label = seg_label.squeeze(1) + + for seg_logit, loss_decode in zip(seg_logits, self.loss_decode): + seg_logit = resize( + input=seg_logit, + size=seg_label.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + loss[loss_decode.name] = loss_decode( + seg_logit, + seg_label, + seg_weight, + ignore_index=self.ignore_index) + + loss['acc_seg'] = accuracy( + seg_logits[-1], seg_label, ignore_index=self.ignore_index) + return loss + + def predict_by_feat(self, seg_logits: Tensor, + batch_img_metas: List[dict]) -> Tensor: + _, seg_logits_stage2 = seg_logits + return super().predict_by_feat(seg_logits_stage2, batch_img_metas) From 2d67e51db362dfe557c1565d96abc635d85845ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B0=B8=E9=9F=AC?= <53283758+Dominic23331@users.noreply.github.com> Date: Fri, 6 Jan 2023 16:14:54 +0800 Subject: [PATCH 05/25] CodeCamp #140 [New] [Feature] Add synapse dataset and data augmentation in dev-1.x. (#2432) ## Motivation Add Synapse dataset in MMSegmentation. Old PR: https://github.com/open-mmlab/mmsegmentation/pull/2372. --- configs/_base_/datasets/synapse.py | 41 +++++ docs/en/user_guides/2_dataset_prepare.md | 81 +++++++++ mmseg/datasets/__init__.py | 10 +- mmseg/datasets/synapse.py | 28 ++++ mmseg/datasets/transforms/__init__.py | 8 +- mmseg/datasets/transforms/transforms.py | 78 +++++++++ mmseg/utils/__init__.py | 6 +- mmseg/utils/class_names.py | 14 ++ .../ann_dir/case0005_slice000.png | Bin 0 -> 334 bytes .../ann_dir/case0005_slice001.png | Bin 0 -> 334 bytes .../img_dir/case0005_slice000.jpg | Bin 0 -> 33367 bytes .../img_dir/case0005_slice001.jpg | Bin 0 -> 33679 bytes tests/test_datasets/test_dataset.py | 15 +- tests/test_datasets/test_transform.py | 62 +++++++ tools/dataset_converters/synapse.py | 155 ++++++++++++++++++ 15 files changed, 486 insertions(+), 12 deletions(-) create mode 100644 configs/_base_/datasets/synapse.py create mode 100644 mmseg/datasets/synapse.py create mode 100644 tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png create mode 100644 tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice001.png create mode 100644 tests/data/pseudo_synapse_dataset/img_dir/case0005_slice000.jpg create mode 100644 tests/data/pseudo_synapse_dataset/img_dir/case0005_slice001.jpg create mode 100644 tools/dataset_converters/synapse.py diff --git a/configs/_base_/datasets/synapse.py b/configs/_base_/datasets/synapse.py new file mode 100644 index 0000000000..86852918cd --- /dev/null +++ b/configs/_base_/datasets/synapse.py @@ -0,0 +1,41 @@ +dataset_type = 'SynapseDataset' +data_root = 'data/synapse/' +img_scale = (224, 224) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='RandomRotFlip', rotate_prob=0.5, flip_prob=0.5, degree=20), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +train_dataloader = dict( + batch_size=6, + num_workers=2, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='img_dir/train', seg_map_path='ann_dir/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mDice']) +test_evaluator = val_evaluator diff --git a/docs/en/user_guides/2_dataset_prepare.md b/docs/en/user_guides/2_dataset_prepare.md index a795e3bfcc..5de5de2282 100644 --- a/docs/en/user_guides/2_dataset_prepare.md +++ b/docs/en/user_guides/2_dataset_prepare.md @@ -414,3 +414,84 @@ The contents of LIP datasets include: │   │ │ ├── 100034_483681.png │   │ │ ├── ... ``` + +## Synapse dataset + +This dataset could be download from [this page](https://www.synapse.org/#!Synapse:syn3193805/wiki/) + +To follow the data preparation setting of [TransUNet](https://arxiv.org/abs/2102.04306), which splits original training set (30 scans) +into new training (18 scans) and validation set (12 scans). Please run the following command to prepare the dataset. + +```shell +unzip RawData.zip +cd ./RawData/Training +``` + +Then create `train.txt` and `val.txt` to split dataset. + +According to TransUnet, the following is the data set division. + +train.txt + +```none +img0005.nii.gz +img0006.nii.gz +img0007.nii.gz +img0009.nii.gz +img0010.nii.gz +img0021.nii.gz +img0023.nii.gz +img0024.nii.gz +img0026.nii.gz +img0027.nii.gz +img0028.nii.gz +img0030.nii.gz +img0031.nii.gz +img0033.nii.gz +img0034.nii.gz +img0037.nii.gz +img0039.nii.gz +img0040.nii.gz +``` + +val.txt + +```none +img0008.nii.gz +img0022.nii.gz +img0038.nii.gz +img0036.nii.gz +img0032.nii.gz +img0002.nii.gz +img0029.nii.gz +img0003.nii.gz +img0001.nii.gz +img0004.nii.gz +img0025.nii.gz +img0035.nii.gz +``` + +The contents of synapse datasets include: + +```none +├── Training +│ ├── img +│ │ ├── img0001.nii.gz +│ │ ├── img0002.nii.gz +│ │ ├── ... +│ ├── label +│ │ ├── label0001.nii.gz +│ │ ├── label0002.nii.gz +│ │ ├── ... +│ ├── train.txt +│ ├── val.txt +``` + +Then, use this command to convert synapse dataset. + +```shell +python tools/dataset_converters/synapse.py --dataset-path /path/to/synapse +``` + +Noted that MMSegmentation default evaluation metric (such as mean dice value) is calculated on 2D slice image, +which is not comparable to results of 3D scan in some paper such as [TransUNet](https://arxiv.org/abs/2102.04306). diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index d0aeeaa335..28f067983c 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -18,6 +18,7 @@ from .pascal_context import PascalContextDataset, PascalContextDataset59 from .potsdam import PotsdamDataset from .stare import STAREDataset +from .synapse import SynapseDataset # yapf: disable from .transforms import (CLAHE, AdjustGamma, BioMedical3DPad, BioMedical3DRandomCrop, BioMedicalGaussianBlur, @@ -26,9 +27,9 @@ LoadBiomedicalAnnotation, LoadBiomedicalData, LoadBiomedicalImageFromFile, LoadImageFromNDArray, PackSegInputs, PhotoMetricDistortion, RandomCrop, - RandomCutOut, RandomMosaic, RandomRotate, Rerange, - ResizeShortestEdge, ResizeToMultiple, RGB2Gray, - SegRescale) + RandomCutOut, RandomMosaic, RandomRotate, + RandomRotFlip, Rerange, ResizeShortestEdge, + ResizeToMultiple, RGB2Gray, SegRescale) from .voc import PascalVOCDataset # yapf: enable @@ -46,5 +47,6 @@ 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', 'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur', - 'BioMedicalRandomGamma', 'BioMedical3DPad' + 'BioMedicalRandomGamma', 'BioMedical3DPad', 'RandomRotFlip', + 'SynapseDataset' ] diff --git a/mmseg/datasets/synapse.py b/mmseg/datasets/synapse.py new file mode 100644 index 0000000000..6f83b64150 --- /dev/null +++ b/mmseg/datasets/synapse.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.registry import DATASETS +from .basesegdataset import BaseSegDataset + + +@DATASETS.register_module() +class SynapseDataset(BaseSegDataset): + """Synapse dataset. + + Before dataset preprocess of Synapse, there are total 13 categories of + foreground which does not include background. After preprocessing, 8 + foreground categories are kept while the other 5 foreground categories are + handled as background. The ``img_suffix`` is fixed to '.jpg' and + ``seg_map_suffix`` is fixed to '.png'. + """ + METAINFO = dict( + classes=('background', 'aorta', 'gallbladder', 'left_kidney', + 'right_kidney', 'liver', 'pancreas', 'spleen', 'stomach'), + palette=[[0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0], + [0, 255, 255], [255, 0, 255], [255, 255, 0], [60, 255, 255], + [240, 240, 240]]) + + def __init__(self, + img_suffix='.jpg', + seg_map_suffix='.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/mmseg/datasets/transforms/__init__.py b/mmseg/datasets/transforms/__init__.py index d066609227..e39f05c921 100644 --- a/mmseg/datasets/transforms/__init__.py +++ b/mmseg/datasets/transforms/__init__.py @@ -8,9 +8,9 @@ BioMedical3DRandomCrop, BioMedicalGaussianBlur, BioMedicalGaussianNoise, BioMedicalRandomGamma, GenerateEdge, PhotoMetricDistortion, RandomCrop, - RandomCutOut, RandomMosaic, RandomRotate, Rerange, - ResizeShortestEdge, ResizeToMultiple, RGB2Gray, - SegRescale) + RandomCutOut, RandomMosaic, RandomRotate, + RandomRotFlip, Rerange, ResizeShortestEdge, + ResizeToMultiple, RGB2Gray, SegRescale) # yapf: enable __all__ = [ @@ -20,5 +20,5 @@ 'ResizeToMultiple', 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur', - 'BioMedicalRandomGamma', 'BioMedical3DPad' + 'BioMedicalRandomGamma', 'BioMedical3DPad', 'RandomRotFlip' ] diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py index 7e0bd0da64..21b8e34e33 100644 --- a/mmseg/datasets/transforms/transforms.py +++ b/mmseg/datasets/transforms/transforms.py @@ -861,6 +861,84 @@ def __repr__(self): return repr_str +@TRANSFORMS.register_module() +class RandomRotFlip(BaseTransform): + """Rotate and flip the image & seg or just rotate the image & seg. + + Required Keys: + + - img + - gt_seg_map + + Modified Keys: + + - img + - gt_seg_map + + Args: + rotate_prob (float): The probability of rotate image. + flip_prob (float): The probability of rotate&flip image. + degree (float, tuple[float]): Range of degrees to select from. If + degree is a number instead of tuple like (min, max), + the range of degree will be (``-degree``, ``+degree``) + """ + + def __init__(self, rotate_prob=0.5, flip_prob=0.5, degree=(-20, 20)): + self.rotate_prob = rotate_prob + self.flip_prob = flip_prob + assert 0 <= rotate_prob <= 1 and 0 <= flip_prob <= 1 + if isinstance(degree, (float, int)): + assert degree > 0, f'degree {degree} should be positive' + self.degree = (-degree, degree) + else: + self.degree = degree + assert len(self.degree) == 2, f'degree {self.degree} should be a ' \ + f'tuple of (min, max)' + + def random_rot_flip(self, results: dict) -> dict: + k = np.random.randint(0, 4) + results['img'] = np.rot90(results['img'], k) + for key in results.get('seg_fields', []): + results[key] = np.rot90(results[key], k) + axis = np.random.randint(0, 2) + results['img'] = np.flip(results['img'], axis=axis).copy() + for key in results.get('seg_fields', []): + results[key] = np.flip(results[key], axis=axis).copy() + return results + + def random_rotate(self, results: dict) -> dict: + angle = np.random.uniform(min(*self.degree), max(*self.degree)) + results['img'] = mmcv.imrotate(results['img'], angle=angle) + for key in results.get('seg_fields', []): + results[key] = mmcv.imrotate(results[key], angle=angle) + return results + + def transform(self, results: dict) -> dict: + """Call function to rotate or rotate & flip image, semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Rotated or rotated & flipped results. + """ + rotate_flag = 0 + if random.random() < self.rotate_prob: + results = self.random_rotate(results) + rotate_flag = 1 + if random.random() < self.flip_prob and rotate_flag == 0: + results = self.random_rot_flip(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(rotate_prob={self.rotate_prob}, ' \ + f'flip_prob={self.flip_prob}, ' \ + f'degree={self.degree})' + return repr_str + + @TRANSFORMS.register_module() class RandomMosaic(BaseTransform): """Mosaic augmentation. Given 4 images, mosaic transform combines them into diff --git a/mmseg/utils/__init__.py b/mmseg/utils/__init__.py index 0fd58218d6..661796147d 100644 --- a/mmseg/utils/__init__.py +++ b/mmseg/utils/__init__.py @@ -6,8 +6,8 @@ get_palette, isaid_classes, isaid_palette, loveda_classes, loveda_palette, potsdam_classes, potsdam_palette, stare_classes, stare_palette, - vaihingen_classes, vaihingen_palette, voc_classes, - voc_palette) + synapse_classes, synapse_palette, vaihingen_classes, + vaihingen_palette, voc_classes, voc_palette) # yapf: enable from .collect_env import collect_env from .io import datafrombytes @@ -27,5 +27,5 @@ 'cityscapes_palette', 'ade_palette', 'voc_palette', 'cocostuff_palette', 'loveda_palette', 'potsdam_palette', 'vaihingen_palette', 'isaid_palette', 'stare_palette', 'dataset_aliases', 'get_classes', 'get_palette', - 'datafrombytes' + 'datafrombytes', 'synapse_palette', 'synapse_classes' ] diff --git a/mmseg/utils/class_names.py b/mmseg/utils/class_names.py index a62eaac973..662199f21e 100644 --- a/mmseg/utils/class_names.py +++ b/mmseg/utils/class_names.py @@ -265,6 +265,20 @@ def stare_palette(): return [[120, 120, 120], [6, 230, 230]] +def synapse_palette(): + """Synapse palette for external use.""" + return [[0, 0, 0], [0, 0, 255], [0, 255, 0], [255, 0, 0], [0, 255, 255], + [255, 0, 255], [255, 255, 0], [60, 255, 255], [240, 240, 240]] + + +def synapse_classes(): + """Synapse class names for external use.""" + return [ + 'background', 'aorta', 'gallbladder', 'left_kidney', 'right_kidney', + 'liver', 'pancreas', 'spleen', 'stomach' + ] + + def lip_classes(): """LIP class names for external use.""" return [ diff --git a/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png b/tests/data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png new file mode 100644 index 0000000000000000000000000000000000000000..a22059b58e158571cf61c33338a04517cdf549b6 GIT binary patch literal 334 zcmeAS@N?(olHy`uVBq!ia0y~yU;;9k7&w3=!$sk4H3kMoQBN1gkcv5P4;nH81rIIw j>i;&Aa{)-f$ORABm~0prbptmi;&Aa{)-f$ORABm~0prbptmsxsc!l2<}dBceg+if?hO0kl^m_!6CteySsak=J#ID zyw@}Rdb)eM=lkZ=xBsg8Zk<(i>+HSOUgvrCc@@BtmzI+TKp+4By&S;v0`LYvLPUfh zA|OE^5M*Q|6f`_^G*nbHVjNsdJaQ6B3UU%MGAcTD1}Yj>S~4<5ekN89P97c}N(Mnu z0WJ}CZXT|GeguS!jEshgMud(|#6?X;&GkP#o;v|-Bsc&L0E1`%IBXCY8}!@*P`=C) z0rW2e_|F3b2Zl#LgdicKpuSvChXufaz+gCdFaiQR{L9t;FXsVxYy=!?P6KWE28IB4UzP^bCwl%sjk&`~rePZ{A8t%gD;ftEp>f zYH91}nweXCu(YzaadmU|@bvQb3H=xr{^|3V2v~eVVp4L-x6~gwxq0~og+;~HHMMp1 z4UJ9BKf8N+`}zk4hbAVc{!Guz&dsl_Z)|RD@9ggFpPgS^UR~eZ-rfI$3-p5X{}}%Y z?Eiub`vn&qJUkd4@((T$oaak`vEdP@IT3Lr-a|~BacQ`Mk?Qa&b2i~ZI|j%i-JuUs#U1^p);A#MmHbt8CS1Y+~5v9Y-c zPEKj&C?hlv5aoF%MDuPWYczBMub)y)^)t`guRe3X8A(5lG7$@GrM6j% zs$4UhA=Bx)RJSJaH6g-d@cf1(2`q9JT#Jkd^=^Ka=e*%V1HIb##!eDMR5OZYJoMtr zvAqGihfFg`UrM$04FH1kMFIiJ19b}5V@7b&j|fKK|C?yBwZ|#UI1w|_h;$Epssq>( zKPM{Qz~_SVg_JN z542?fuMYp`sy~&=dkQUX14T2|!)oA*m zaL%TK;$rrc7A`OZKM;{SF@m04znESbgG80@e%olvCuyOT{4Bn+6N(?@jg+|KK=6QuSB8Rcb?qM6O)vXWlUfMsRTII+Ej z|APL8_?w9=9EWA)MG?!>_0l^@*$>}REIi7EK~U_LlFBILPTKfB@7&mseI$m$Ug@gF zm44cy2OIUpi_l0%MYL4FTtN00ED(DTxVp|4u2C%RE-<-I)iPLf36Piy^liAxi)W#V z&0?@(7{f>anAbMxj?f!13v8_7<`s+j5J3zDZTz5okHz(#5ub@Liw5TY?Et;K8NA-dHFwIT&Vs6L!dDUakeHz$HtQV|U+WRJ0#ZE& z21q_jW>W)fVq5kVk=FuZeI5(e`8}a~pE4ClVV&q{5|1Q;X&4>0Q7^Xl(D;A=xL&@O z0^AKLX*EwX>awuV&dk(m2m5~$(P)qla;9%QM)cRwsz$MXu+T{`UDgzM2I|f4Df+9f zMY!Mo`7n0XBB}&xI?BVWOoxz9{l$(AsI%<3*VmQ#%x-_A=bv_z@u$yeY#@6wekL6F z8ww@nqV1)Xp-FImrK4>eA{6?rO#7^i2HLCjUWI^x4kSQ)r~M2BW3i0IbRn^TXA zq3R>0T%t>V`gQ~*=6QjK_rE0Mi?!Dz7{*lK)WbBY_1GE}{Isc$QLD)Oc#7oJ+cqIMQKT#(g>@HF*++Ty7#1Q1V_vY5)viAMwzFuMN z`1D6BF!8bswOluuN;1dsG4Q2O6{lxi2eLNce{F=sc0Y@u-JK=zZ_RS|}KaI9RZk|*u8l`BDudEHcxxIh!1L=b?-ffdm1OV z;aRKj7iFYZ%f+#E5D2((^z*zd|Z zuzyl`)getCHtBcNM0Y5yl$ol44Dbh^rcVv(I!VjjzM!|}G7W}E>T)U9oEY8W%n0i} zVWkpGN2hr#*z?N%PRNZsVwI4727(5#o)CjLSezGcBhYuO{+KBFlGc$wZi1ig!Beg= z&hYgOTni&%^k0UAD2h8*V@6U+2iC?Ts?$R-ka$2#x0t;i)Abj(o?40PlgHhxo+#eI zvu2ss%6kt@N50+j$sey|*PAs)JiEBgs{x;gB3GJyn;DddWE)h)VvbkflHh~InEn}Z z_OGS$UNLfCyTzz8hi9D&Y|x(p_M^wXt2g%n+Q52Y$$Wd9Z?=uE^=H8`V{o%k`bfZD@5oQKdYwJvaB7OHhYq1#=T0O`-` zf&4|KDo#B+J9tp^J29krq&rg*`g_tz(@fdNFd{8# z1*%VqV`bKmechPB<+GjLSU+1zT<^fru;Td)l-K6GMS%iIky!rT2P(4f;fToTu13wrb>ZrqQheZ3bCH$NO2wESJ zw<^{pegmrL^@1B{o@A82Cg0pShY+&r2Vu%5c1Fuk5vbnwPo(77Y{nX-ZGqEWp#*qM z+zy+Gs~cc}f=2cbeB<7LYvC!J8ey2+yztcc0176XQ)vOKz#fz&={=syg^bbF?65y?COAjN+fBtP6>|PN;KGXZwqt?S2y2D0?k!2A6XwrjwnzPFp*8?NLXdTITx9_f$3widGMjSe3m zZYy%(1U-Qm13I#i9ufSIQhUJ+Q;=&?^B|l^b0y)4J;EVh{hr+xWl@%x*7MbwwS&cP zutRY44U}Zi-;qbsk&CtO09$YsV>>^2$eYkCT)PXrNO2YHSg?0d0{vi>Arhp3+j73a zqgw|?jh7n#pE4gKVk#ybOTZ{u)MKaM02)l!pMr-Q5(rj~UX@i&=w!}c z%p%|Yh|OGO3P3LU1BZf$S>;XQ)a@l3>D(C})*I24x%Ce}fI?!+qIb$|$8jh{0aW0r^#AitCA}_XP&0q=< zzyD?LJO0YXv-Q{R#|e+c4u{hr;_3~Q+hxnE)?BA13n0LBp-i@XkiDqS4a6uJ1mFDJxD@_ihb0 zg`-5};>L7p=sAa(dI6_mp*$cDJQR9QV$P)1OTdYFD>cw+kK5V>>%-_bWZK(D?#uO> z$n3HlAdJ+b-KRnD$t`YCwCvC&=ZN#6w<eM#fRf-fL_A%_GKjD81w4~20T%3Cs!?&#U&$W-5swsmmfqkGJ;7qNG3 zd<$vLr?+P_UyTrBYN!1znDTjf_9`}ca%g>oi-3iQ5Jj{o=H1N!l)V0Hnf7gt!q;xY zSDC{w>V|hyY4M<^jf`C{%1!JcTOr=-(4t|Jmk<4$Af&kY`XvQl6|%Ga!Tu2T75a$0 z|LJep$;16$jLA3Cc77Upy=$2`idFKN?B9zx?y?;QcA7K9N6-N$H;qS2>9#RBV1WS^$=fLZWRTPudZaZZr6{0>{?iyOcJL%LQw4S_3L=_vH7&r z;%)3DxCh1XhM#a`2CZUGp_#26_P|x=MHs zqxTCF6}@Oy5}j!NyS0;Zv<5j`n`j=r?z)bju(2K3`M9H%l=vcm!OHQxX6BJRh<|sw zM`zi}A*rxcLu-%&@lHfzvd6j+J?6KwAh}VBM3dB|vQ1K_k*2#(Z=gh}d{OjyWq6gr zd9m%-rmEh@fy|d-w>sthM2yD7kSz>;w5QB6cS@wo7GFn{Kr2>ec86U13A686tDvJ; z+jcXCl;V=}s}D~Jp!L_Qq8@NVcKI1vWbM+3!cI0Gr!rp{-fzyP)O0QGBR%}~eIvHi z!6Y0lK+ZG;TF+UVZX6eU`h7kd8mUje?muD!K3(WJEJnyhZsiVFTNv)ezPX z-L2C#kjsgSmFO@fa3M2sUf=<3(ykSJ$#bGW_4;{QFrv!&up7T>a5Yu-s;|WEBMb`d zCw@Tiv6^vsH@AdRR*!)MpS}^Q?v9DS*yX(Sh6KGLL`R7T1PdeqR%N|-f{ z=xA|rcUH)4Q)hTYU8NFw-Sb6|8)Jk?|MXYw+Ro>kIsGN`WCK|UE(2XELh1LKdG!T( z2T5Ov+~U~rp0-uEy7%tKAS2T^oD$S;qr$fgADG~Z@)k4a-{F168oqEFK}7d)j1mD~ z!@aJZUa+BbwJbHi9D&e^@5~I(TOW1Y$qI> zP@z(gg2{H~VN5tGUSethN(MicP=IMhPo=?cQvTl=Ucd#ENH+>bEOP4PJQSVlR>U?& zx2y+nH3HY*%mZDR35C`BC;OIfore(lf<){=nnwGN7&2^7Y}ubORy`)sV++j)1LYR> z-!p9GLM_aP(oenC>&x@RY-km;^!WugNL_p7{2h=kJc@_Rz_hb33R9CQ=)EgxkEN@@{>Gq*;RoNrTsBz%&L zc;0x<3oUNF(8XxZ+spC~4I-nJHcq2AA!Ms)fGBf)n-yCf0ZinMJBAlSV5jf_BTydr zRSp9^=A1t5i-6hVJ!(SwGntLCgYJd0pMhX4egn#ojtSnPTG&8I@%*4g@-BJ!?&#xn z5tM8TFBeOe>V-$kK@srYLva@tYe}b~#s&bG$cEF?r?ClTq{xyS$FQf3jSI+F^u|4w z4N1nlMP#*n-dX^;#HJ6&4duG<7(~yyOwX%8(g0Kcn^{M_s@svIX20W=D&?EmXCS_> zna5#ZUn3z_Vjo@3)xmzzq@kyqa{v*Hc`@ZtX!YMM*MS+pWmYnfxk=XSTw1!F;~0sP zceS72hIk0X7sCmA@{ID-BrVE~stApNL%G>OWR$H43Pg1LDXwuaVG=jFMgr#BlTlqA zF)f2{!ZHC8c)%F(j0v^;<%@u$#kRH_qPd^5)$e*)XcA_@2>%u4+=%gKVvoe;H{Z@@ zR%Z{jo3#0$*H-g3GDxkNV5h%j^A$!m)Av5jIOW3+wE4{CEyZlL6!Kyp*5t1#b^FFj z>1TMY)<4{xeTr)~n@k0o96f1=bOi?Bj}g==#%5$}RJW;+F*t)HcKxMU#;8u~!s^qm zb=YPYgSLEjCMO!(Jb%ulLP@@7C>n3&FhqKGW?|~wT6m+i6B2x_dle$RMBxpj@9fI) zP8+Pu%~IL66dJ?hi1edo=KIVHC$-Zz)uk)j`np;$;}ni(WORJ0)10dFu4AHWY;J98 zOd05C-TWv2->X0-)VwyJUxrXeIbx2Po@#r~mcti1ygBA&x4D1&b@A14t&P8m<_r=# zckQRlbFhA5()%hhl1--xfdpYVPXviYQm~IHP0Z}(Mm`KI1|+_Iplhbba7TiQzA?AF zz+Yvl(1!IGKvY3?XwDh!m$!8-jyww2DGiNuu(>{A(;(4}h3Hw6nP;g?B#9z!j3! z#_68deWFN)f z&t!TXbcHIm5_0tPD<^zJ@Sb%Ydj{l>*m;D>9gwcrFr3EtI>nwoEldZAaE&~%@cu>1d;jZW`PmGdSvy0C1>rNCmrD)xc+OabJ??B_u4DI@Bh~gA!ATR6TkFJZU9Y@Ppd_9zl;=k! z62=%re58roa6;Ri7)}1K7E#xl41IHA<3*{$_kNdjn83m4d7%T2@WQ)N)4LfuH`=jO zV54;kUe@z?-idpXHV=#MAgHv_5L*eub%y8QuXwe*y0j-jztZeg@hLWLX@sX49f3o- zdlo{Iv}qAT3Zkq*HyEzmh}P)VTZx-3Ek3$~`rr{=Lb$cOnkAIj5aak$WNJ+||AXSI zXhj~^lP_IjIe4I7>eI=pbB0}>pq@(EYU)c1LCV71<_2HArm~fjw_BL2Hq5}%Sz@|J z089HhhW#vG!@I5U$cTBA>g{Si047GWQogvp7 zEC)yDJr&LNfg`((NbDWXCF)VC`v7$i@F7FSzSV4ts1Jb(p=<6O%Nr7`dO|GNjlan! z6oJgXnq$6cjIka>i81>pxKD`FY~@~+D~^F7kt}`$L5_wqP6^jhb3g?1Ksii(gp_FB zV_U=1QP~*;zEy*xk_47Vke$$k} zZ2APwy8i8!zo^@1jxvh95qfVLBo@*zWnYo4hDwVgc->F+cZPgpc$7sc122OUTUC}e z-K@@Af0A{{+v@&HQe z*^V>GZuf0|jw2^XVAMgBC2YzMF`0Ej+ZZdrSg!LV$hJO4Em?zpdQNBZc1amUe4spI zQaYTRZCSBKXseQI?oehc0=QdvKmMUwD~NW(p;zD#!yh+^5k-DSaFjfp zg?A%K)15U#-{K9-MagS}GR$%}_2<$^th`T5Te>-gciDmq+BZ2Yku*80 z<%#M-$<2J99{R_}XBwmF^nQKZ(4T>y^H3No^Sj*1)6Cs^kjTUu_zmsdN7+VJWQuMw z_tLbK)}{>@pD{AyUqaGg{1&#_IPcGes&79mmcf*jn4@!mFF?F!(r!sy=ZN>tWCSo0 z9I1vxjPhg0#*E>NmsKBGlL=eB%REi^6lz`n`# zdYC4v7|$IK^v#|`z$`taYtWE?6DeuBdAH&6C>&wyY zd#~OdogY#ton%ED(VJsn5iQ&5Ytl|Yudqu%@Gczmlo)Ivb!d9>6O~DANAF!@({ZVD z%qV4|%Oz;adS!0~qpn2X;_x+9yJDbw@Rw%*E#)A&yrdJlc%ap+y5Uh&ps&kB8??Y* z@gx!Yv`%YzR`0=!6#5y643{5(|u?te6RTie)*;1|;>*-cO4;vq^&MOo9 zGHB-c&?*3*a#+fF8@HHxVRI6Ie?V|*dvWWlX7(6Y(PLLmSl#ptXg>9D_W2*cW_Z4S z2OG3aIP@{pe*59*M}c%r3pvX|yZ?V^F0yI&HwSXYyE&sDiYFha;_g&^xKkk8biXHY z6E>+;w+l}>7FSxCj_uQ89*q_L5Jb6UAymoivz-@d=}h~|_cH&dCnyL+UjMjuRkVn& z@BNlZaNcJO-bn1R^>Q)y?5ZRQn=^i7HIZmh)K^1eezQmr^`&-*sk77R+UBiASx_MV z#ksH?#QXXfF?sDO3Ut9P&5zN3HJlx18n~3a*tm$P?6aE+nXa4p!e7g`>)-5Is*JTz zSNa+Yt)!7c<~@|~vW?v^S`g&L@AKjW_I3exq8HwuLM&Hj%e^qMs`awsn z(x0_)ablRcMs!kJV&=)WcS4SQkxv%}fwvSrgA&rUWy(yv7PF5X97Jgn6d!6GSy9YX znc9AhGDA~_ZD?ce@ZN?Iy#{XjP0dbRKFAu7@aAT3l>d>r}si~ z@$MH8{|-2p;ArYD>HsulZyC0%GKyrP_H=Gp#C=!d>{s41 z6lQJLcY@{dXHl!$i|yQLRa&Kve9yo27+M@Saw=*#+-=EYs92&jEQkQ+{_v#mYKt@l zs0wiaafX)eYt}qp`?-!77iEYHN#j-~c~|tkBJ4$V{bgjE8*0%Y>7~{7qaG+OkV#&1 z8(sP$yI#lAg2-}YorG9ShbBBwx2_%!8-8t(vVe;MH^L=rol0tkAd=gj+1}6Ecgk1bHP48%4L`0VvM<% zNu%V?`vxgS4mb)iVV>CgNH1c==ly2#C)f$^b|w!7cbM1EG4a8uPZ@}>feSU-jLz77 z^%7odjG&#nwxyY%L36X4W3ApCLr@h{ck{vabe=q5aetY>OF_0Pw*fuOj&k*|AmbC| zCwbsJ>GNZT$^eNz3%CJ5yHLkB%iv+(#Dkzt#T2yJ*xA~CD?8DcuwXBRl~1%NOJcTJ zc4IX(!#Lhp1P^XRxB93c!dKd*2y+;;8b@X@!Acf;1`6o0J2k*tC%UPLEmVuN3rz}$ zeNL_iI#pB=tJ%We2GK*OgI@1~M{A~dwyFH{X=~lP`tkc-8X`nA&22=rhE!4N_TVj( zmJ^iTnWM1$7H^!$*C{&CL(*7rT_C=jB>7q}-Avp2hxobcJN2Db= z`0j$5Y9W|~6>n}cic06aT>Cf*mr8Vyj)$VfCJFFsj-|z!FN#kt8=huwUzcmwaLeTw zb^Bvo%87e)`dX#}tN-E_`NuKxUkRr73e8#j{cx}{ZJY+g_YF5TdfUVlnbSe6i~6Od zG|WIG0)#w_eF&s-7Il?*nb;V@4pJ#^&NxjjSWy$_6u7t6Ui_S z)@e5e=xz$*x18&ueXy96M}ChXY}lk{<^f`iahoW4!5>zcHZ_;ZX|0dH%jYuHiwQmP zCR(+8urv;k8aNrXX>fs7h7iK?T z)r!@+z>jw=!>`e1^XO9xk~uHkuJxuORD+EV4t8z51Kcx^&VG6s{ys|j$r~X(9Jc?o z-BQM~9vvmJE;txM2#+621}D5z=XF>Goo=^^P}S_ zW)9oK`yZp&$c1lZ_b&Wq%U|~y;jnGg^4;5()|^MmD9*&O)z*~AMh^s?jUZi%{rF+& z$=Z1+$vMz>V)$Zd@o0?uRcerGopRDe_c!yYj&Gk(wlx{EF|Kxpl8LzR&-!0RJ^vMJ zfn{wsQ4Fy*&+O8_1X0WJ^HSZjezl3W`Q~hf`kM?6Ogjrh0 z429oG;Grt@BJk4_TlK_S4ue6ISAD8{vFQ$n%9DA%(gpb`W%lK&uhO>edb}U3zb?vv#^18AEU1|%d9oTX?4Q`yyxIm7-7|^3!QoC21%B%q@8k_7YAx%e@cp&QYU%?i@r_GEYb2na1r~Ji5toJEc z$XELu%%Xbl8z$Ge>Q8v=LD)4d5BF29*7wtSc_MMedOsHnMU{NVAl&X{2D}pszhf4g z(4&z>UzX>=H6t9}nNN-mWvg!+EwNjR#Y~03nTi`1706R|uUW5n70S(NL2_H6&Y3=! zj-knH&gH`e`W)zV7#V7O(lWH*(=1GBWxP>@k1mQO^f%|5;mT=Vh~EgluC|$}{#BQ| z7XF1>?aGygoPzbmdrbO53VG2~NDIot;^tV}lxyk@6r($hOld$E~l2%)k3rIda|_u^mJkv7-Smo57we&j(Z z>1q%VRVq83quice*HXUav0=7}B@-ZO1#Ub;|8te_g-bFDQ&JsHRs>68C!e`4W8trD zj!?q8EUNR6O>7k}-yWPwA@)|%-;PPBIo}&ldR8s}uIyA8Myx2qMvuzKB6Q7Cy4|3N zh*sbzdn{#F(k_hIasDJ)kc{8o^~xVJ9c#EPxcwfJOz6!^aP!n4I&=X+Xlh?{DXmn9hdDeLoGzl0`D}fI z6Rt>lmlM+eeeMPiw8d{@F`4vMK)Vk*s+K9=b)Jip4TWx9WCY&-8EdtERfD(^($cPU z_ae4w;(ntT&!G-b3!C-c$Y|RI(c$XK!>GGVw)vKwxF=#r7S`8?E&;J|3?zz@mnqYp z=MSN!noGgG=mPjn1KZ_ZagptKmEI^drP&hX0ed6LTTn84cQI-v_tjD4WjI~)i1m>- z&w&2ca?W{z7Mp>hQZgkfyD9QFySmR?Y>!h(1V%epSs1$i-q+gF5jJ$w9NzK+0@(P7 z_Q)4x$4PIAhod_sPCszA>QR2t#VcQUf9wYfTtN`OwgEN^OPYI=uN@i!&7J|V8LpT6 z`BhO)6#d7Fg_b>3k0~Xly$eL%7MQ16M)XU;3`JI(HByfDAZ8DWbtyO${&ZSU;wWJE z47BYjS<&by+AJ*GdLKv@MV{xZh<8H?@VSWN5n>}N_x<;9Zz{Tv7>X<82|bfLcSNvz zv759OeJGGGvrzB->}iR9ZW%O#W6GIsRJRXl7%kvo&XK0MpGElIIeB&7X}@?VNV3dH z6N3bf{V{Jvgjp4#HdHgBctPI?D8`FBHS! zU+K)^w%o?Z1EXK9K45fP3s5}+$j1^zk{ptV+Gdky`0UvZm5V$!C~;lG&Z%YKN{dK7 z3=Ysi+;k9ouUGUqYEm2xIp@fVpSr8etDc`5pHu^bHvKU#3Oj|DsZ`269UY#5&FkE7 z&EQnoXTUA0Yn19Gb){^EJHcXGrZ$7erhYr{(|X^%EJePd?*~Py<-#cWB$#Ca66t(7 zlzw_%YxW_X97O;YwVD+!WuB;mcrG}AXdtHiz1G@vo_ocfoTr=N(n_#By_m7s_ez%S z+xNlITLux?Ka0ryT@4FPPx{_#OC|~Q4_-HS)yUv%b zuD5mCxqhRzy!XuttsrHhw^-oh*ZKwf(fd{c@*CU@#E>t?s{4?n zSj|~R4VV9B|4!{6+GXB}hwDW(`VFbS9&abR!)u`w3;uDBYi>_rxLh z7yA$Z9gpg!g>>u~DI5@p&+BUR7Os5{c=I&2sWmb)t=ix8ib!^gC5S_gK_mhw$rmyt z^bJ1`^dKKb!&xd~f1TG9_tBMa{00#9+M)9?%i}|`%&^9joG3v9H86;*Y7Th?Y`lx2 zYpi!;2YakUmJIk3)r(Vsl@ z6Sd4{XGq>$#O@!!Gq$`a5A5*>pc_L|8TM@>438H%5+EL7`R(4a-Pop;s^zX0zw*L0 zuL?nmQI51f2e2i?S+FBL`pe7)hQ{Gq$rmjn?=Q52mNZ`pglBN(O zIxGotiSnFIy9t}-#1KZy_6bKS6(unfT!BXw6$snE*L}5k(yB~KwU=?AA?t!JLQ$od zm&I->5S=rwKdeu52UUF>6)qpZPtCwH@R!%cef~k2Mf~$2F9O{&U}deDOt(?Q8TJg! z*+%@wuAqv6PlF9o3?I>4M7`GwHN!fc*q$`a&yz#PoT1nVisS_>FD*zl7@AdjGzQTr z>4Oc`H|OB^IT?78xOTe|7}67sppgyW3NcCy){kjb3Vc*Q@FRvw>MEM$d#aLm7WWlU z`7bw78eX|7eH;x$6Lsm%SPv3z<5!5Z&d`&Iqw(99X^>yb`>d-hLWO1aO8Pj+$KQ6c zKH-ZQ{N-9|qk+e?9?K0o*2&!|hR2gSDEmE1k5KgIINX*pGjNE%xFaBGWlB|14{ZE?SdPvI&|`^*O%3%O1jDEFb5{IG2afIbZ5F0mHxA4 zOor1E^CqKzg{Ez%Q;eZ*%I?*agTvSAEM>-q5lp`)xFAt_qZ|Z zh}qbO30)%dVu%KR`&`ux=}WU$A~AAwSBS)EFLPHmOt~TBXgLjrM;0Fs-eh57oi2y* zu(5Y4?I2V;Clccq7dbk*9SDB}C+&pz;@)b!FCWh3nf6p6(6|frpV&wbkgVeMcfZP& zwOjqk2y8|jwn6Yf^lj^Y-E4^ojV^pOk+I%S=`)U}u2M<%n(sbnsF67*M)eo1#_Z_# ze-a~Iwn z^PRKu&2;yC*{?jT9}zXl?@563lBVo&H{Ut81@X%+%Hvbe{OnrEuhGxEEd;iA$II*9 zG06-mJFaBGkrcC|TZVOox=Y<9#2E3;*Y@rDVYC9~H=?2!`V^~^MRksayzP{*iNQZ%s>0j7(%Husq4(oM}JcVq%lr&;R`0w(n_AU#qWCi3FzPS@^u*~_ssc|`#~m_2sUrj zpX~~gyX$HPJJ3*4zwUXMa2|)N^Y_1<10r1F?nE4h6Qm9dz%29=Qmx^Dx0&z*k zJuo3-_p&n9G=Gy*&Yiak=#58{__5<1^9}S3FWvhi3F}aJCtyo%Gx`|_(>q-( z_-j&)Bx98D#QWWRGxt~brrF3!LIba@Gz&o$36%zXtisSyOfWuLm`d>&CMfg@-y4bh^;1`Q z6`CrR7HNJQ&+VDGcHAgP;CL2hHp0Eugs+*;}%9`h0! zD6)_T<`z9J`*_xt$ENaUK=I5zb+lk_hU0V8GcZI}fh5<7e#Kj`9)lrc&ms-S+PPsx zj0f5_(eGz(SyJxp*0i$KBF!h-lnc`vOLcG$OHW*uW!RgjKM2d(8~UXL(~%C&?mbpG{=$LY_-d4SC!qgEE3O?pA`NjlFVrwlR{<& z36Z%dLEIM1i9d3%Sn6&hVwt53aE9Ww=6fR%4O!ehvEM$qn+=CEZdk3nO2%P#(YOzN zWG1FfMX)wwfX9#OcOKg=u05Ps)86VlS(&0VgA$mBOMUw$M_p4TLo0G|OY-Nmuf(^& zD0jM}^&$mn1{>8*&0<<=8&;dx0ZENKp*4KyQNyg*y$e>YUU#<8(q28!XY&v@MvZy`&A-wzT z3aMi4Ju^QT&7IXSiNl>YUFOdqWmrQ?7n z+rjERWG3pJ+dhGiuBu<^vZ!w-j8EwK3(-df0r~aG5n6+cst0zt$PexoNV&KYOF>-{ z-{jFYC;N>QfZuMm0TS-L^5_#SAkR;EtglE-uq4)T}_9biaI~hw7b?fhx_Iy zX=_V+k8p5z!pDrXN1p2LbPCxRG-Hk7yU2FIzexA%%IH=ey5pY|nqHH>sjGa$j3Xx! zE&Ag4G=EESe@&xk!*|@Tc-6n9A31iX^^OLaSl`@1#QbH;&T*Xptv|JCT`bb@R>(z0 z0pz?K(dma-WN^$fj*G+*lrTp|YDA+N4wgfQ=2mDAhptJJ^^##ex$en1vN%O}+OMMF zD3kA#s?jwkFrQg81m$7|TmoK$>b5omE)?)~PiFU;aKh62yG}zAkg=IJ8*0x0ZjV8> zd}mhh7+Jjj7T~smwR$c_ZCmnq&=8+5cJuqu_FH+uI%|j{sevLOb72qM34s zkAa`k*Qj<`Iy7efipC_`jF_Qf4T=NT_2NqsTS zBpiJSB7v1PMpnETHOg~u$LMhxr5>hUH4D8g*Wn2GAfqBU2-j?c$_i7UiV;|SkqV9^ zvn=DOI1HUrb$!3h`Q_+yM<>n>Ln?p@B6p*mzsP1a(q&wY@H-mPGeG+(} z{U^N2Nq5xm1n0iml!p@znqKEz-js2`^h}RJu*uF4Sxvk5=r1YpHH|+oD^&~H#2-dW zInE6S`Z6)@@#S7$%4lLk%Ki=_Z+PCSKuGM-e}4J?hd>$-i3*zAMc}hlD2%2LzCTiQ zalX32pI+!UV}-nvAkK6^GIkNTA;KTnPc2RIgc}hq0(W{O>+>u70H20zIdYG&JIwld zg%-uenT8@`Vqd>7do(po%jU=&MV%B!g#KZvG+_sjXFkRRraTa^`!tlYx3&=lK$H?`)kJ!6B3D-55prWVGFe75HIU4QQ z$zYkO;HjO0r?87YW5K1fyzTgzUl+k*Wr~F`pUG<7Kq|ByBo;99ME+ISx}1$f%3KA? zAV9nuh!Qc{<-x5(n3!j9i?PpL>+#Z_(rVqG_E78N5AIP^2pyw5F#G`PHgCZB?Q%wk z)`Moo@W2SpXgD=!>1k^0hM?9p(qVkelu!EGol2hu9(DDlo}0@&-k4}=Q|p`YoH=KO zFIGrYfxO9Vxzl3d-TFO+HiIKBK*SK;ia8O)!%mBMC`xnITB&tBmc2T zB@@}+xw%91LQ%Ccbbsvw7a)~Gz0JM!4{eZcmd|jq1Oh}?3o&WLtG>~-pcBoq>(|IV z<0r!sP11s!ADVuPZL%IZ$e)0*YjB4$d^NXvBHM{8y$00J|LCA>`Y#3wZPU~@CT+jI z^yB&1clhEP@rTF*OVwVb$DeCMe@S;+de0X$T$D5?f5u^vv9i!`l8kCJC9S0TJFPMO z?V52TNxN?v0KB`gubu&c!deAA@3%2LF4e6a3_#G5+`}#~X`XrXno-k}Ay$S8OCbPH zfk}^4f-cm2Wc(09M|R4+7jcCB);X4j$S_#O(Wb9u^6-jiE6T1QL^jysm!8I#NAX0! zYrEh> zGI3y6S#ieu%=Gw)F=z77G>Y1)p~fuvAUY7`oDy^1z|iJwo(C;ob6#Swv!CGtF-Y`A zA>6wi8!6n=Ke7I0OALIvR;+=+qAscTw2UmBYML@Q8dEgNpVqn2J<+BSV-;C}z|x55F8EvgQ23VLk5ROJ5a&5)Byg}?MqM*}ZDAN=A)?X& zbGjLsLfFcTc`il}EY|i6B$|1BJnC?zG9|;{2*gbV@*H|UP}0O-{%ut&UFQ9hW5N!Y zOh!&|$dSZvH!*)#VCQ_|S@^sey`|@=${_{t_c0%=GkqE9e``MK+(5aZjX!r=&$v0} z_P6G#QS4VnBUR>}z6Id+B>4WZApcz?&%Ef@hNg4uz-!%YmeRAa;am4pkUh%eStrXQ zOZ91hr8@*hO{jq*?amm0-GGhG7OYEsk5hxp+{0Wp5761 zO8pcGFP&h{j2rd|C(yk2dR3A>ViFfobW6C5AqPwS8A61Swu~piNv(8NBpR6y$vWcc zE$bO9-~(Xdx_}2N%io`w_i0&J;z5*pmEhKOqFCR0e%v$RHyo>g}6i z;vlXpUzt=*V31c|DSAgcO1G5GTJq1x$eO}jf9>?)HCs6`w zU-S_+M5!`jdXX2xZ@g=`FdNyy>s4)gxwKe=F=8PE2p_HGZW?5Lkrob>*71+s4qM6A zsr6Rqo3ZBn)Z)NNnxSpHy?!W0LXJ22>x2vS90HLMI1Ij!3A{#wtbt-3zTy7(U9I|KFZLA<4hp1 zY)jT7FH!d{QSL3+=Lq%i@%ZPXOBokYqRo(*GTIv9?I7PolayE6z|V-1;!168yFbeg z1I00$A)uK~?=QDGH%2LnM(dwpP3RklVtJ_UcC;OBGA)+6P$m;6@?X$sZwQL%A?B%e*VA$qQy$zSgP$a#ES;i}^M4P^-$LG4W7Uo`<=pQ>-QY3-!YIY1lWLH6~_?ReW& z1Jx-$)Vh#VhHa`42>giN^xLW5h|Aku+*f{tgiy+n*Q#0Mu6kc;j_b*Fa>TI=RzVLg zxvI&jd1OucGPhGfet4q{249zF61A4!(x~A#j8C=)59n^3Z0rPOPl9J|v{j9fE)#hA zQn_^x)g)iNDgTN@TTY%a?si>ru#~Mom}pljc|#|*a3wsnuTu)j?QG{^bn|hYW+7Vd zIPM0;7982_{{@J=KN-(Cnzsf>d%wSwkVKUCvw{cJ#<~=gl>J0=?jzSmm?M`s9RC+ zut4ef{!@pO$q~hZIhY?TH1D$!#X7IvteRxc{G&hh5FT8&+wzL5X~3v(piyEZm`5ku zcP!_eg!dXpxtZ^(-*YPQYtzQ|w9+>p8+V<{bbs6m5SY8?H+uUk3Lzz^Qe)wno_llN zlNa|e%R*5J-?S-Ei$*B7ngJxjzmsHE7WOK}zfaJIqKLCUIzEU&vZeehU_eqY0Lw&w zHy<6dg2JjpPwT7cJCW+*ZZ3_gyKQ$L7kQC~DeYtC#J;V~S^&g_J=*%!k>4sXBaWog zLUTHqg5GGdA$ajP%iT;M=658dh8Ma%Xf`Xx=jUMsI2W#=NuzdG6kv@FpX9>k(Tu8o zT^irgiJ@=NVQTt1Rx_cR z0)q-Cvz{6L0)%f#`|N`*1m<~-up>A(od`(rkUL24DTJAG{;kF%{!>7#=@+k$c}XU_ zRFJq&zI~H@wE?r;dhL)GDq~!n?fGk^J6F*& z4_SLPeF=dKlEW-1YeUl&+pSH!oIGGjoo*^$XVRG~-w>y+!fu#=?F+rg!%_TZ^J+@p zkSjK!`#jlDx~}*yU}^hV9sJqLZJcwd-9bc!XALbK*+~3&y59f2`HX(i63QDmR|78R zkIUnq_{}Gpd*M$yyzK&>#|__so`uWXjM>clXLIVa47ZFfX?zxla0G^Gr_7L<7F zd6qjE%q_C+QFJ(MQ23A|dC6u^)^YXcr?|(Kl7s{OgP?s>EhRCS<(JP$sRL!`d46j< z&L0m_L|<~3#KY;fpQ5rbR!V#s`?_~%7Hb6%vZn*k#|XP+1vF2wP=)X%(j>I&y^i}g zOgE}YoB6;>1?Qb!53NqEZM{vcZS;&XJ*I4eOm79W^jT}yceii(_Uo1dx!jA^UVHvK z74?A2XrMn`wbOgEUVT#%c|jK-nT!so4tEotF4Q$UwQ!jS3*%I|C*IQC{EBzh(m#na z?BcWEt;>aH1i5v&vhJ!*Yr}qQ2wae~!li(-BD3wgT%JB= zy!@tNqPfS%TIT0R)*LKLrfy6-@5QK69=xtg-#kjRWmU_EMw?rJ9QWQVs3H$|L;73I zkW3N5a7(|GjGKc)x&G?ADJt5#EHK~Mge#L^25(0!iG$XiG+4zD`nym&f-A{&5069H zm+&NhE?AgkQ|T}O?@T%6`OjQHw|g(!Ht4A8ZZ18}qs;x7?_|TYUa?Z{O`J&A7uQ@g zy33jxR#1V@xqpw!s;MvZvc!99JFv#(yJ9~IO#1-a+2u|ZjQ5>#_RW|5rnWDy)LIA+ z*xcfaJhaKF+RsVRoZK1<9YNnN##UZr%?T3Z<; zseRx56YyB-iWs#ynxIIVbnMNkENKz>g6x|6JAQ4A=wkXc(=t!?g^FmZuS}#<^`m3~ z7SAb)k3(=0ar9#kTF>!(&7lEW>7vcKREe%Ztc>>HN(#UTTm*iGA52qb;hYllxHl6WkLA8e z!feQZ|0Q;~OM#*B>rwk$n|P$$&YXf)QcYa_j9uqpT{b3Z0hf8Qih}dFMwF_p<*_lZ z_o%8z*eKF2x&4-PEbC9yild&vNDlPGH`^C^*jB1BQq29j{P3T_^jvev%77t?27PG9 zvE`~Fv#|80gt^^Itbmv%QPaS6Xbi9LJO_qQhC%Hu|MX=M6hkMow&RkyT~XDXX#RVx z2qRdP(q%pS{C@tm7@0}zZCGz#?j3^o{L>wCO7*uPMU_Vy+n`8^Mo`W=`*Y*Szr{ts zQu*<>MMCOs(`)O00huDV#%XKl|6kh4TAyZXCrZxdTHF4ui~Ud29n*C(L#WbWt0WGL z;&(Z>C2W^Yq7_+^+}9Mo%{)LHKNxdo5bfx+m5ME-Ekl+YR3X&UbPl}#3%r?GivH))-XLI96rk;MY=Lv7(&Jlevh1JhQ0b6C}hgZr2?@6CkYJ zD*qlU`sDW9#36~fgK(^2x8+hS#3z(}X?PQM_D&}rp$E?uZL3`&D@XugkjM0~ zlT(Zw>gG5#I64*X&fi0*@$*5D=#z_%KE}t4rl%R7S%?qi;e$~kG=Hyy|pDwDy z=_RtCpVe{|puM|@Elv{(7Aq*`l*d$fXi}tcRfan=ul^KfC~{Dru8cYho<0hf^E^|! z#5`>Xsv-|NmV`rwOGeGG*Ejh{v6J`=^ePInSW(7%9iAuk{2H=N zf3er{1w$nX&9NqS+i1v@pbtU$&H13fr9o!EY;i87^_HArEgqO6XkH`*!9aV8N;c-$ zhcwO`fr3s#G?GI8vb{w*eXCPMd$ck(GPW+?@-< zFSoPdI{Qb;&JL5eKB0WU8nyo8t=s1VwtCju+KSq1ls(Vq7AumzfzrWz7@Z>bg6~o( z6QpjuX>rev=W<^zpOokkT4IsULqqw5yQqv~cOsN3^Ge36U3xFr@3)f2qAgN>6t=~G z4)TWHO@iNAZIKMrXY!-7y}ihZL*3>Ia%endDGp+Ug4~1MLPbrUJ9#zhA86w}h|xdq zU^@>j)mEBj?rr9Ur!I-G|{bb64IjAV4$EKHdA5;bhkC%QK8Vc%Iop%ok- zkQ8~@S93p;=LM!VZ@k4ZzLn{zH&A(DBz@9c4@w{iiWkz#ibeTzqZXmNt!+%KP@}R5 z6(cuc&E!DnOA>C7Eh638(ZUD+j(R_ps!##hOhXe9f8}3^N{V|sFN`S=QQ-W$uSsuHlyXUINIdxj9?)jZ= zl9X|m5)jZfH`M>dcE0cX%jVy!v7<$>0ima`FBu~f`S2F;_o~u^zW_!DH&4FHZs*lT zRK7+S^Yv2apHNqonFXETiSqT^c)esbn0b&$&^o(N_&$QmGGM9-n!HOK{>BM@ox*Pj z=TTkH2J^^J#W+w4MRfC*ITC6WF7R#Mo9=h2ce13>7K_{Oij52aZeB6OMKuj#RYR*S9JpCHMsAQ<{I(Edaf@(5Cg4>$v(p>jG zbGM?}6Jz6ePrHXDuf}AHaCj1|wjGIbDJ3R%ab-Xiw#<(5+&DI`v<>t2S9i#IHu_b$ z1)9Y0zOAL-STip#ViYH%{h>m%05Hj^S z5BbBy4YR|tZ-&IhaMp-bSvL=1m&fXiOD4>rI=lOs(#*}Le9yR7U)g|RV}sz(nq}Z(?5JOtYITAwS&q{IiA(fTtL?< z(dbFZyWQ66LLuz0YF8f9_JLPdPLXpHE<6s7p)1L$Z)Uulrs2X?5XNz)CgKH#o`l27 zXhE@G;>n@mX{%q^U%=`ot?auPUJ+Q&*>q=>=q9H_zWxseGIQfDv|jnVPKfSx=;de`LshD>Fx(FvoOP zPEZ#cl@u!SWRpA*^X)qainJIA!vACwx8;q5%Wkb2IZ-WRGSmr}9v=!#?GfUM@i?Yn zKMAON)5|_j&7UN{W{t^{&f8%7CDI6sBBRnm{cb#~HN1oK+-dKv-%kP2Qw|1_h7w}} zWZ_2eDXzQ56BMH7rKt6#^5;_ntxMYY8ca4c$k|WEbL8vm2IpHRsV{}^;~R`|m>9%q zl2qOthmTa(*EUYzFWKOxD5|)6>(169+;AaaC+08Q>JwjMj3v938bVKWF5)vK7ZAp0 zuR!NN4cq26Kbm>(7}c}xZqt)p2!-T29JT3RW#fe`}5hY zPOS0KOW)*GBNeVV%jA!Af}dhQVmR>J;2E*qw4Luq)e*V{N_wV5t8nsM*k0arRO0C_ zYKhwi#jtLEg1f5T;Y9opYIC{C8e(Gd?AZ!Cd>aTRe_}?;pY2C1K+gLEN{oZLoGRSV zznxp9I=iF$-Z`72Jhf+7q-(DFNA#Aq=@>Z(HwWi0VBGrCcGJ8CH&PGXc>%J*nLAMh zSPlPpHju8YG$_cylHRss19)Pzl-_R2Q)+FNW@Qt89-*gq8?xL zA$ZB>fnb;L>KND;+i`Tum+{I4DK_n0aNg2=PC!$Wt0DVsu} z9Knt;H}K{xdX24DWV0}-IgX2ZnLp$+^j;NF`lO}24n*dAuA&$!2B9w#{tNU5mrHzQ zBiIMn)v^&r=oL@qGj$e=jn@#r+mS$)UK6MZ+rn>_U$)16;1F)4uUeX1l|VyTj`x;%Ld9^NSP0>yzb^YOh}kb*|HNO=+ZCxuVw5Mr?^Qwd zVb@}_Lg zgY~7L#j86SqDkQ|lkC6ldicSUfrNv{X}wq${bC=EK21`b1#*K!S@q={?F(sp@qZfT z5QK{61j(;khTj;N}qe`fX^Zra=ueR?+AxM**w_n3ZX=&WO| z`h$3)Jo8(~guPCd-fcL|Cn&N@NQ?)BjH$S!iu+j=+Pq--q3V|&GV60+HI;0dR2OIWXL1zd1a z4R0hLijjXr;8m_6DTbp>JW)2p_V+}i%C=ls3?>ONH?S(Yfy})QQI=2kfHu3JHMw{gw$&lqzYxxWIhBWK;hUviMj_R!CKg#=HMXUUfcd6Jbv) znpq*Mh9u{{H`el_)$vM_smsx%HYG`t+lUYJd9=%eIhM>~R`F?P(peAwI5CkB)prcv zY^|v=KGLy}KI-va5ire#se(COUN5%4f8o-C607Moq0(;^ebUf{_S1tDf4%)(UgV3! z%K)6Bm%bB`kDT0n%VpnToav}5a4GL?V1`Iz_wh6P?4yjex?JFx{q)I&*#_YRNqf$h z+dLepb8t+hyXst~cbwgK!YCTs02Q~ByZL0hw#L1xvXZ6!G=-=Ia)Z;DeNZhp4q2{=!4sS>! z*jew`k7Kky(Jzy~DRFlRJP*yj94=+q;#^a5mjQ=0hobmSBFY#10tj9QkCX5%bs#*J z!;qJz3bv7ZNH2%?2^Mb@J+GO3=iudad5f7C z`zh1n3XZVt?uX%|A9O4Qj{Jx1bNRK}0g@y1k$w2ED!|~}zg55;(j&`D0hh`bOFOU} z9sT|Cq)~!qHr=jtFoVQw-x4Xlo-Y)iriF(J6mwyjoxlG`I&9t6G$o~J&-1d%{QeL! zESkdr%7r=pd@k|wmd5IzV5B%>7oDs457=9e92#<+kPbNhojv-=5PQdi zE*rtp(^>0fu&|QOTA*Y$XcFNbHjVh1(n1a}okWSv?p50zmDr6K7ktpsdGDIpn5Ab} zpQhHF{L@7$)WpudOot4Ho zIsOTVWBn*|kHrPq2zesG&o6kM>(fjtuRLGFs?aAKcHEKZc$ohUz z@DV!6Od{2dxwte4P^^Y};g|)&L5mh8N6AAhrXYL*yEKX;Z8u-;wF@(ER$j0;l^aH-zs`gx z?~Mm_P6jqEFE1VDP&A0>7c%%*3VlmF^34SHI=DAU91vHIGH3e!q~85@smEVAY7=UG0rrjhtN6F_MnZc4m$`leSK=5@NC zzGM@si_Ss2;I0iJ;UWq1=jl+1H(w<2-wNuQ<4!ns%rkG*KswJdiVm?I+!qGPH6a{v zE#O+wY9&YS<^HMV{(Iq=d}2+X0c^nJ6f=b5JDLjCM;9Uge4*|hxuy}4^j4tfBW%4b9`BmJ@tqU zm4Ct(DDGLi#0l3;X_}?U5!gzSdf-Dx#K*mS32HqO)vqHffsTG z>K}qz?X%N%RFVB;XHBznuj8o<<2Y1A$FuB+L*&1PCvmV3smcvkkgJzUDYaS+tGl%S z1z<|zu4t|FNNl7VJ1@Yulc2G>T;Do$WrV|q{Z1*`g|uxq+yDJOj|4nS~P{G&>(G&SY z2*vGrwSHUG>)4XMAlx6_AwC{9){(H#i{|Ix287Y6e-Mw=%hOEDIpn$tQfoF9cwj;% z1^O(3R#|@Nsn;ZfL5hiFL#Mc>VmFQObwA9@?p{J@l#%2b^&D4H^=Z z4^>qWzp-1-0drA;xS#!$V%Ah`xeRSiA9wGvq4d}vgHUlAs*i{Ip0Bdg@QaFiR#D+d zj=f+Czs=<={=dK!_HUG5zJ8V!Jyw1+W|nM&BzsoELFh-DEuq$7J_X=Ch|oQD%2)=K zzfU27VY->XB(mTe4&GRK{ugD#6WTPbf2xeI?63KELtS5MCP)|Ev(wKkwSLhUKqIik zFjpI#d=71VB!4qFvM)-%&VQ=n50~A)pUd2M?hAV*+NXlA z7gdln_0DFsS~c;ir^c=>XYFVZ?Zi+BJ9w~rpN7yZSZI$T(yJZ`f7M_RZHy2(>K+qk z-#|MDaVFnA>Bz>BSRe?Xf$Wn$H_i&2LPkjUDQ8MS9P27m4KsLsP7e4K*XF?nzv(kO z$@*Qtc9_?{P~L!>wJOJciYlZyw6-rpEF2~Ly=wUllV;(bhPYg|f$xUM+TxzTA*-EDfLO9R}mL)bGHcR>ua=B#jbv;t;ybZIquL2#vD{9i!t>p8(FPm4e*K+Ad zsKb}Unp4U5tTk;otiCuQ<`vT7GJI(Gdg!wWlLBPflXF5J_?X|zlBMjD?J>c}RbK%o z@T^)s#uKb2eF(}38KDtAa>I2n~_f>x_6;-%N-94Xf;5 znKlh^(WHL1mB?}!mn!li6j}1!kAszLcN1G%j`%DjQ&a4gQtFIN+WcdfC%4m$PPmo{ z)Jt992@8-;N{#(!3Bak+)LRmq;leIzvJs5vY@%;PI{xIt-3$r+{{3)upJ8qKJ;~mi zqyRV^=htrBmgqObFP$lMhkDG^Ppbpo1M=0oVQ&qcg@u0tw(`yTh8dT-1vshk{AA@D zb0LvqUkvj};n^CMDxF#wp1qz?c%sXopC2sUm!LfU^|z1IfXhlyEMTreSU+qbCV$xi z6@e%0Jp_c83~bOxNd3OJ1_*4-GeIpdrm>)uEWJkt0+nw8yul2rC{DMC2uq}Rxa7GFJsZYzV|34z@ssQ$MRRe zWI^(NtVFMkJ0pDH4Nc=cOY3QKI1~xV-!x{cd?Z5lhaLS7&lAVqn#MMzx1%|gz&7dm zRk4r#aN+ybVaO5ZzoPQ!sLh)c-{mLW7NV%OC(etHAU=x(UkUtnB-?+ zrpI6HZ;bfS;lp7BOaloJl$ir=CLsLQAxscg_TeYSGfD+$&szWQAfaeCQ^WCt6=bZT zAk`rBNYz#acNSF2cxCbHJj|btchpVZW1({UaiOgYAg*ByP*3c)d##Gaoz~d|$WtP2!SwB{(j0(xONne?!7|4If1_n3} zl~`#aM-$~vWsk6}5si4*dGZ~786SF7Ab%=5CH7BzKV1Itq74uXOs#Nw{#dlK+VI&@ zwbt~SK2+vdmkWWrs3AQ)LBE%iWtzoWSdNcky!kt&;egwAr(sVn6Y=*S8PC7uX(;ZI z>4P`)I&4Z5sNimYyCM3#7y0SA4;wgxwe*<^Ss&?Bo8@TIJ+EnZGJ&{%+J@SxXl!y( zfA~p^RN7{&=IAqM%skyx^R%s#e5m~-F$onqy!UN5ep=k6lc7jdgZtQD|1%x?zqv*p zev3lH9MZf^-vx;j<;| zhWQtwT-|G$d5snprl_r4;Mf8yO)hSj2|L7qX{&J5vHmO43i~S4lZ>i)(8aM~zYeld zcJ3EIBXPOPod=Ub&0sh}Ql?2vB}gAQ{Pxgq1#-D_2BT#A#C#cw7=nkvjlTTy!W*G9 zZMoAUnQMTB<2oMw;t3mf5yJrwYX)t>?aRBF_Vv&EXkU6#RdZw?Z9cE!F}vY;ca4Im3O~&aynK_wx3ph5x(!G zi0Q#qC#_h8;onauK7q6y4UpPnQ37YV6ds>C_|DA1vJ~<3ciOLyNXYat`iS3; ziU?ZOY#N|q&;ZK`!=J%z@G=YtA=FAHpwU{9-Nzs6wsnrg@omfWIW8?6h!5DNTYb)8 zE3XvmKJp=ctVo`{2JS;yEV4a>+uR}7c~}P0RI2&EoQX!GK)z)tJ`$nz@ zhFa0gw1~_)Y3A_lhHD5$Xt!onl#$WKK3jk>a#~07&@YJD@;QdP`5=!2n1d_?-IwU1 zlW|@8ag0q4OkE5bi8+6PZ574G(Qbm>^yN-KworI*h(D%oVn0*;yTetCG`2MAf$i%% zvV|Oj)17OSDx339pRUskzfzy2XCuC>xyVDk?oGr{;5(0kvbY=#!FMK!+8!ib%}4mF z4(PP@;bf8G_gc2VVFzcH5~%8eUZIKM_gIV}0Kqg_v(OOf1Zx)l$PEB?4W3iw{N*y-==bh=i`?{w- zWqt+{a?is$7A$pw{($JbPgnH&Q&set)}~$1ml4cYO{BszccedLDf zVbM6O5pWy{KpdPR+SNrQ>DtsltFy2C;DD?FxQ)QUx*saTF-r;jkH_JwmnT|$l3=yu z<}K|c^2S%rWKKtO;?)={deKR(ENBzgCBOa2#ufcq>+NZ#LdYI72P4J6GIwBr`1?df zT{F(qp`Z(E#9vuO)=;Z{u>`~1z^)v|yCoVOkPE%Y3VZ?(GxgRVYp9PpAh7G@IZ-4f zQg~77{q=ov=#Ldur289ppI`;hH6W!EHQqBzcXy(Ol zpGMCQY^ZsZ@-L#tE~+2rIKvPMN{_nxuBNb5HmYQayRCuS!dq22e!u{PBt&v zsI<9OG(C=!ACE;Gw01Ri;*))PeWQVZ?_W0z(Gt?k@}0eDvR@Tgxp9n7kV_KLKGVcJ zYu=j2ZArauO3ocV&$P!IOl+np5$;6DLn}(GevfT{7p%)D>;en$n=>4*VU|nK3vJT5 zF8c&o$#+IiI!;zq*3+X!C6@T@0(%80eQZ-nkL|`iJX4qUTEXNc-ls`~X}(SEqYJbT zEt$?me14YT(4f75yL%!uGr8Tu1M*Raz=ZErm#mxXIp6Q7k)s_a2?P?C>Z6&Pe|VA> zgz4{&HTLS8-#IhD6Fk;2NkQqGV#Jzi`fHGe=$rl}e;vq!!078-FO@eU@L4`Xnv=+9 z#A(ei`-Y@^AsDyaLK^{hbxx@s1Xw{&K+OGh{L#l8$lGr{_m8uMT)8=j1 zR~rp2_zC<|ggcmQQcH6D$DV*>d7ZUZF3` zR7)u~;}BR^jfqGzApUGBakFHLlQmjr+@~iju-hU`5~bedDJ9;Xt7oMe3xg1=YQBJ5 zjK1h-npRFFCia2g%?|oMIVlnY8MrB~8~2)fN8^k{Hmr(?^`TnGF5UhMSU2Nr0Zaxq zQD%h+d=R0KpMd+cl8tj5wB7ynw+J$nAhn9!TIH6Qujg7u9BRQO?977OoT& z{hYZwHuda!sXZ&L!y=KSaF~Zj`C&r%yALC^K7JoJE++`n={k<`WRP4UsKf_VitX^M zs3bX^clL$-f?}1O1Jw~IW1us0!DMc(mquu763~RiW}z4tHLR(x*}p55F!O>v6I$rw z!pIoW7*pt()Ra~1;Ye4Qqx1sc^p+AwZ9GUnM)6mSYssz;4>e`ZmA7?E8QksRyF$s_xO4V0YJoSRnKJP>!c!_kMASvu#V|vV z>Y!df$>oOk$r}n>UyOR5DaOmoF2-&Lp&j2%Crv>i+mwtaM+_hpWi^)oG^|DfCy^C5 zUEbt?;y*t1E;MPxS9DDlG%Ay8??i>C*coZQbgJA2a!*Vm7?B4fr$;tEsYPL$8bG|T ziHk&ea7v|u##=`ZxcwKrq0%#f7CwDP_^jT72YE9CKm>niWt3CAX=!EXE5gd@kl ztP#CStascq>e+)`Ca=pr6O!nhVfWpBgU#|4C7OF=h%aG~-d~kbB_I+`sP$a^1f(ls zt0ql%?#(_UpsqX)_XpTMNYKV)I|#*zw3dex)5LcX7n%g^p?I>QP!!-8r@pFF zYCTSn6-l|!ooEL~2n*ZF(O_J@KqWM-6@)?#4ZiTT1~B{sObI2zPpcrozCp`|S9D^v(vdsCah?A{48e$!GibRwDYQ znSXG9eQqEOfw1CY7@KrIZYWyzE@69@wJ+xCSiKynC8niY6@lh>?(1QfC;txq!hR#b zFJhe>CsroObrioU24@TiB{Ic06YvO^dq<{sy&oQ;d}2U|U@3R#VL_S3QEn8jq_9ik zJMY+c4B$Ug3qnBlY~qO%cr_LIO8fJDi0L#WiZ3?de*~0Nd)J*nUj`D_CPlzw}jiSzq!ujHuTiMKOhw%=}7RKS`3#R z-c?i65n^u}B-1b?M(JHQXThPi34A9S%)0*j)&76qx_tW!kO!0g1q#V!p5SNbUtQ_* zc>vDBpvXvB$}&cMqd>ZIFkoxG(wtoIobuP(=JWUSpVS>=-% z7(QXHqOL+3Bv7zrCjr^(MX0TC@z#NrL?cQSQH5kN@>PlTg{yJUbtr z%KcOMp_;nVQ|csQ`Oyy?*a4hc%*xBEaTY}PA1&j*j-u#^d@s1pLymSxXvf1@T@iHA zCjKz77pe@b6uNRMcFe5J>Q}xN+1?zHnEkK>j`d4&Wrbv zz%@M9hu3667O19nCsvVw1{oKvV(Fr(3`>v$laWtK;Yd znKo#s?EamLy8L@yV&*>5?GmmouDb8zDAFtEMtvYhY%0>jsugG}hLz-mr* zq*E@XcF^ftZKrO;Aar072DkZ}D9p6Uc`E1gQM&WXMm}8-cbycOYR9z)X67_|c%Xl1 zep`r!_1KYe9^aZ532=Y4fl>B}+GKEzNqv$I9?lyxJ@Mzdy0} z&7v*-Lf5dDw6Z?WuEFE|TwYwCR-Zv##7+Vxt8exn;bX*TU?EfRlVE6NrBE|uLOx^< z=Q5<{uFKeVpd^Y;w*O^a6-<}|P9=o%{bo~}Ydbyd1Pdi}qAW<11HkZ%KVgxS4@cqK zQIu(03aa`=z7gX6Wzp&CRsEl&82e&c^B_tO;_{k{CLi+^fk^}cgPjE&;+5hYO&R*N z706}0e2g;4f@U-^+t;%~zQA*%Ft+32TnzIf7>+}B4&b{cSfKoo-y_KD7Prt-Pffo+9Lw7(GL-55%AAL- zYP^8$BjOI8j}Uxfgx;&c@2u~6w9(BP4}MOLOdK$P1hrrW>YX-m+UysDk+<4q@LQR1 z@(|s99oyD-WH`;_Hhq~~plwg%mY(9zbn6C(GqYR2YbD~4bj)L>yMaXy67j-m!ZR4& zpa%#LBBclg`s;e>(r>$FOFVBx)+a8zK`(KGcMw&iM1Rb|!=*x89-3L2uwtGz?z&YM z*HzuWfJ*@UK|d(sk&D|rOMBMpfiBkWQb`coeCRja(8O0Nq_AZyj{sgD9~GXy(R!w_1;ooV-Z@lhoq@jpB|foZtT9r7K!(;pMlqKY|kJC zspGH^xlG{KVEzfb+t}+2XhW0Aw?m!}RYmnzYERqnd;ekM|Ke)%f4ov1Y$e$sxzGsa zL>Iudct3@)PZ))*4H=tXI2$K7Vqz%Z^JG5kwu;IVe3;;O+@dg1ZKHYur7-Es#L41PJc#PU8|FIKkZ=f=qw! zjl7wuuV&s@)!eTAt8bs`eeXH@ti9Gc&vVbKzzYQ#c^Lo%0ss)=13WJRZviw^R0t{x z8UzACM@PfJBE-hR#Ka=OC%`47Af=+DASEZKhO#qI)3VZ$lQZ%&v2t+o@bFME2#N}D ziLi6?aQ(+1AarzeEKDq7Y;0mK8gd%0|L*I#6Tm}50+0YOh!#M?1A*~C&%FQ@;(wxm z{>ugYj~56DjEsT`K|{yDMBGsK0zd+R!AQtp6cl7+#NGag?*U{y6nq*^NmK&Wj}TfH zLayM%A82$^Rb51Clc!K_Q`Zo5jF+#7Nl58mGcYnS^YHTV3kV9meJ3p=D<`j@uA!-= zt)r`FW^Q3=Wo=^%b949b^z!xz4GWL>^f@vrDfvrE>ep{+=|6Mx@_!W+78O_5)YjEE zG&VJN_w@Gl!v_Y3eosx${F$AbUszk;*xcIQ+1=YeJHNQRy1u!+yZ?{8K!`m5r|Z8a z_P@ys50MuVGBOw$@*jDDkUS9q#zRJ-;Y7ukRE2zWA)w_7MkAC;{880~PRFfwN@VIf ziSZK3vqpdRAF2J9%>K_M7V>{7v;R4<|0S;l01FI4ygV=-AP(F;MKR_e{r^6g&ZME| zXzMbbqDV36fQ3$7RdbtZ{e;N3eTh|nWl6orU-~-zdY5-oL;{gy;Uez5BHHKUG{Lxu zIKMGuI5;H;r+Usy!kd~xz{{SFbmtpaMbAL>=vHd;2{R@ph1 z&TYmi`H#RZ5Go0n1S#I055ZytuFr_J%iR};N9O81QuF<8mZo(ROk7SdmP_2XtD+v- zQfnOafzF!~Ra)8WU$NgZCqsaHmvo@Wd+pz_iY|r$wPD{6!3EJi3vIau0!bkhOPo#< z+z&Pj5t%RZEqITbk(bs!E{ze-fG{irj*K>Aaoe%4Ll|?puRW z>nl9jXYMpy$9x#e63%o`Lk9fdz>rUsh|7`6sB1VrnU{zD4+zDa8+6*&<8P3V->)G0 zB{U4K#W;D8~TLu}~wuu_WPIM_KoL4;cXAbhD#wnmB(LO+$k)}(8ks5Co zTl7nIg4GDdyx=$k)fCU*-$!Z`(a?q>>Ya4M(sORc&Q8H`rK=ot5UVx=Fj#BWMEFyh zpnWn)O!Y**F07C&B&c4LTgkK6RWYb_O?FQLA2W2DDX_Ii@M-txK7D%1;^o&;`8y)# z&PTz~j{=;%!OAD-8ms3V%nyA^-!H->Lm+{AK!1BZ-`6DR&|2~$(dkQGqh@T2fW`cU zm7lR^AxUQCBM$`+_p{>v^<5B6<6`PCgxb3w#M(>KZ=OrkchA-LE1z&Ua&h>0mfKpJ zH>CYc@o3J#d-7EQGyaSM{$Gg#TAnyO!WzmT&0RLLaNVumfN9t+57P3MToYzH-)CW2;NE%33QT0 zx=ts8?JnKFHAfMHe?&jFtwvMIZ#4Nlhe32K@KKZg8=ISj$t&vMtU1K23XQZ#>F0kr z$Z%Q2U!%PFI4zCRHpa{syRgYQh}DVp3}pLFf5(WVr(++GH(MlEK|*hxw@j%n=7LFhUO86gsoc!Ou) zzKGVU*ifI5g%4J0;S+d%u+-}oyt7cMPh^G^*Mt%sMCpkts!tzpN%_>GyNm`F#Jm7#5uFOnYs}D zBms!$a7!g!qQr|uB2C7`%xaop&u2hHv&7fg*#0jI2eNOX7M;!p8+wm``d1Oyu6Tb* zYuLais>9HOJqa9q<`eY(a|z8|F58T#cMMr(rjU*@p^f8`q`5KfzZHaMgx~uI1|4yL zjfz_pu=6`4E})A#=IVZGj_D1})2q#RmxVeu9^tgt;9-a8G5sQ{#{edai8nDTQhnc^ zEEpm+h zN^_?i#9LLs>REsD2dC@ZfYb>Eu}Q^wsTXmgQ=LoL()`J|Z8iBq`zCpU0hM~zC1_kO zmN1qqq&%+#JQwBQc$}zA6XP`5+src_{SY>-d?T0?QE}*QUK}f_MFNy(A)ml+_{XzUraT12e`Ot z9vREEOr74E#!slA*!uLI3>CpEY%BA@ z&2L|r3aTxodpD#lx(Ldy@_A8y0iiXZlgYt7dM>Cpu1aY6)3kVK1wNp`1B9pR&sgmz zE0?pB^Od2laOSmYg|i8N+&Vzn4yBgc+j`!T#G&UngYwH_6Q25DxJctEDxo`V61|S` z5h&Qi88)F#ieABSTEg8SAYKMYGtE;ca*Zbu`;xZV+43;_O0 z<7Hv`@=KCj51rq}iN1aXziR_Mo?xJalJl3x-8t#+WYN46k~3U8Udxj8tHcp==7 zKF~NeY9SgS=*X^Uc|U`M#9z@gXTgnBFl_&<+n^cXiU!Dkdc5Qh?CgW3Ozv&`Q}6Og zNfa)ag6i+0a{1NM3QAm3?Ju74j;SD(a0$4XmKIQlhG6S>_4U3NF@YFaNN|X+fTyNC z?%m2h#K}fTLLKTpy_KMdi!IT)jaG{9X{Y}3Zm&PPuyQcy8E|<-EWm6>C!NU?H;NDX z*ir=KXHPVrEY+8&^vcgzdM!WYJOet}OE;7yCyPH0mG$%N_$X<9C-{r$I*bFb$5JFzj9PCOQ1TEr8Rr;yYZwK;IKKs~3%0_-{>QvVVRh;42lnNI=9yO){jqr z_hr-fpJwA_OOXdx1oKj8{Da#B%3JC`v3Epd!u`^CXmVqZYX1$zWWH19yjpHLQmo!IWx9&ur!O-g+n7rAmW zvfYYJ!0(kxSqKXo=s&)!u+_UtiypvpMfI|u0B0@p_m*!VGk`Q63TAob^J^nKnD}O} zCn$q1$A6bIBtnzWp4=dbd%>FQrnMGacYeVkog49IIVi1-A$;=2xC9TaZcYUSydN%9 zBl;mpg71R32uKs*0_&xQeb=e6DD`&|tRQMt}bV@zKQ(Uwnoh3%`qfU|5K@(?px9v6#ckYK@ylSKI2wNnBlh27Jqz z)iww&LLjFxW+D^d0=bo?SHzJjs1%9>Ptx8iC1@9)ZQUc9^2GGgP%vN^^p z!hB*GTB!Ls^#2|Uu(FvzaHY%bAOSmp8-wngR6I|*zjvDRWg3eE^B7~Q5yHt?;$>or zOth_DgfR|tBg*8I9b@_5KX7}y^!H^XP6dgN6M{Y*&uD4j1-X{cu3i%Nse7f%%}ABc z*ps^qJ_A=;v-6Irx{f47xpFaHU|{oK9d4S?J=R(FP+8|Q zu#j$k@9V1DqknwR3VsImizx%s@FI9YbadYEm1p4fcfut7_Y|G1B1z^Dl4n5F8cgw} zgvjJAI0V;LVP@9k^{#QL#mAQ6_?wRCtDUpbVqoz!i;dft_uEW4?7fO+O@)>Ua~r-qNwE z0UA@4CImsbc?eh|Lu4&UkUf!oG#WUiYn7d{>Ekf6lpJ>bn^$ihu(gV4zI6W!!pi%Y zaQSgJaJ&uh&||MKTXzt8gX0yqDs>iPbDxY=%TSHjxaVN5h1DpL3RcTk4jX{IS_@?~2mC;f@Yz!PE<|a3=`F4o^K@EV$r$>tHA>@?eYWrMMOGRiP{mSZsd`0G?Cn0tO3 zmlm6cTS_>tu7IEq-+}-y3*!b-ReIgw_U&VL z%Q?Cuia{;=8P_H&L&V~M2y~wD* zRyBbh6md0M@i46eXmjSOYB)N6@@$j%P@7~Ak+4qaLXgNz0Ffec60tipL2!_Z26DNl z#?Zqw;PUzh4zcq~?yd*BiS>&ZYQ^wm;(?uPJqiofA3Dr&itY@EpKd%6aSX5&v3jW| zR9Qi$)x1$*Tt*Km|zSl9EZcRj#am(eHUHqAHkD;+b z?Zu_#tIA*eF8^kLK+jy<0^W&VC4mL=gCed&<)N&~!tssbdNY#&)Vj;`wR(wi3~BYK zZ&SwY?=6Ewq}6Bug~ojo*DEA>>nxA)Ba&ipgJ^2h;rhGlxtZVFsiLb=dWkHzxPnkPxw97` z@Xtm{fl|uvrw1pw$Ho;e%KA4m>u3wjQ@G&kyziK-s*EHH#HmJ~D2jbUufJp|ni*84CMvADv(Rn3vv_p9~>cGY{V1Cb?q!xl~V zEY;iG&wzii3WQslg-MIKGXw`sw1^S-8O4a?YnSY(t8fw_8A-GQd4GeY7L;QghP1DOLEuok6)H zVdH6D&L7w8Ntt?gvW2clOHC3|$aD9Dqq1w63@djdf{5tb0 zi;c{2y73>iGSe+|)??5%h%uCneG-03zL0C8JE_`Xdm=*qpnK5*X)s{~X%aIs4Q)C? z@;|Jy`Vuu*{bIV_bu}CiaXV6St!S}Pic8z?d9Vi2v4zYJ7~t4x{a#S&4)z>+^nmqg z*48E25cLD9<8AKuqMyUa3Sy2E{BkaDAism&=;`{QanA8_)c zpP>meviUGjUE^Jpg?SA>uaN7QVdZHI`+8HU&xBl*D$!CkOLB0lJ=9~A_2**H!i<*U zN3J;EiC(s9TgS{xtHzMyZI%6jF}MJgQGbx=y>TSztF-Ew&8B_NLXb16GQ$IN(d}1D zx%rPQELL_T05Yz^-L?*I{Ax|=3PlfH#?AxNwb0B}7^|bhrV0NGopV$l(;%^kXCOSq z1mn0SO|#{g!uR)^-cerouoWyzK%fF$`4v2TnJl$ma&hVUsz7nQ#T-V1r9-=U{R{}^ z;KH0DQ!&IhhFs?$pijzr$xkn3QVH);8!)^(H`4E(lt~v^SoHhJ0O}eqyu$wmSTg~tOPU@$3TsE_osQ4CN$V*~y__8`4eSpg{y53hl z(6f2lVTJ^qayh|IaiRmo*?1y!FAJ;}>XKNaWUTPc6`lbvpZn~w@>}0NElR#AvA)80 zY`g$x=;Zeq+{+RzR3}Jb$cuIkvZ5y4sT;NmLEu_+GEE=G?Pg!gShkUv0PmXfop6e0 zfIz9OO0kgMG|ki#A6F2L=xGJ4mPSVUqQx;2k0vyub#+8s-uz^jy1&V94N+xXMf$}z zpS%dJy4UnoD`K{k(819!=aj%->2)=;@c+?lRpI*ny+7$+FBePA$D^9$EN?B5XW*@T z!&r=B{Bq^iw~VWUI7I(3iLE?{Mk*Ww|3r=#XAMp*Dm~7xYHMf>t{+8?X0@99vKjC* z^?K&8!m=(oHF79Pc%w~Hf^3DmE^4;#>-N(2!dmJ+cn;1S`HCvk?6j10uT=_VC;Wl8 z*m0R5kv=R32NZtZbTd!;q&YGHk|tF7k3g;(Lb_8k@BQGlUoai`DR3ZSkR-O!8NqoA z;q5b}%-ssh)tp(x{{56L{YiVHJSF<dyzJPDtl)Nxf1_f+=Z9r9z$+^?ho;qXmsKz`0`Eiykf#< z0+~h*!U}>VkyJ_xN-+5Njo=W2g5NSDflctH1F!bIqPasBgH#if{{&Gm&DSNA`|#le z3Yo}Qj%!I~#GH#VEw+1hZ_rUc16g%!UyQB41~?B-XK_oAHH(IPP&ctKZX36q{^JfZ za|m-<89XoEAoCw(PyIR6x9!%y))Y*BRIc!JY}z3tLv4D zUZx@`l%g8qlf0x&vK~)tsJ|rd7*lFF(9N;2NVJ`>|EfNZ*pUJg`-ggtz5VY*aKSe!V_pituZFm@m40#h~G{ED_^jb#WEhZ~U*532FB1=Sg z9d(jdBV8AVDv}N?uA*G$`zv`pT5k}NwpC@YDU#0HTp5yN>m3NySYsPZ zY0A>(C=6|sga&Kw>JV9$Qeyp1F3%ZsL28WB6BK3>~2*C;;b+ z@eugS?CWpze>n|{oBF$jW@!rNMbj0x&3q$B4yJdaZbn&T4E2>-@dUKx_3c1evX>Q1 zXPK1^)4%FeO3&)~ae@oMAkQqS zfHn-r>p&i6vvp_jC6YKu_X8JacFsT_;3FNr^NlWnk%q7ZG^P<8JSw<;vBuUU?ibY5 zuMo3wr;V9wo2=I;?cv5OiRA@v^ANH)^whezbHyPRTRJ8CK^2sP&f1%*mKWkplw**F z155DPnI$?RMY(x+O^L7B=HMdm&D40Ok6hLe!acQT zNYR(Pm>T<~PvqTznuIpgS&~ATH;uj|k!nHTb_tXs%7uHLgW<#^dtr?ED*YEB-B)D9 z4k}xx zgfy;9sF&j_e7oqm@N@X=;cfg@)XqUgBc}iA9W8K|T1syagD*rik-o_YE}Z&)^cTnd zJ6n?-=D?eTxE38A-{t&t*IJA}3%Px?NDQy9L%NJw1zt)%4aWpP zJzux3w2X(RPrtmsT%nv^6cTHrnk(0M1~4-|FZo6YJouL+&+eo$U1B8SmHr)xg859t^ zTL>hr^T?;yHytpG^uThz5sxfBKnK>&A!ZUUkxHY;AJ?Wq@rcqUP6Lbqj%g&cX?XtM zeGu4RRV`uvP8uW33^|Y)h*>T2yO@ht_+9;J)OzA!^vxL~1&Y<%+HA(q3(Cyk@~?71 zY{Rm2CKAC38zh7vzA}+$C!DkwD1R%-L3;jT9eB+*N;;vQw?}g`Lk< z?S2$t;p5aMn(<^dMK<4>L@dxZ#O8Bwh2wtfzO5W40_W`d@5Q{9fTqZi)mJ+CTv2@z zqYPE~9kA@fFT{`8HFQ1dv~ zdG;n$5MFeZgR67viJjk{hV>OH%#GjGo_S$d-&PKHa;&enCw<=f1E;roOjqaShZcnpQK6RJxO#vnlY*`qxDH1Fci z;7r3)s3@e(lXRv2=4I&nI=}v+NMquzz0lh~STE2%&b4x=6z9dYKTU(wW_zy3;(T`<$A1)w;+fma75$-1*Sp=16?Odvn|s0;SoC5MBfbXF0&pT-1)vs9){ zB)BN6=R;2nTL}FPhyUm^+O{KL+635z z<4*AX1mx2}0NG6KK^mol&>jbrS;-!ZBnjdDjy*JQfa11LE#{FVI8MiVoEjk97;@#{ zbQE^h@KE>J!9-L}gz9(UZSq=q^15|@{?AsASy}rCf?kKNeaN!Nqj)CZjWWS)@XDbS zBQzWSyT(kJ(;|GU%izx9lK`a7&X&~hDcZ3!i7%0A$^!If z>i%`L%acSB59DAzk(jyO)O!syMVp6zW+2{YiA~~*(vIz?ATGg`Vkw@gu_|9D90J$H z!O{oLSTUW}S*E3#f4yI&x(*Wm6NHOpYdrVF{JReSpVq=L6FI`jgjj~$itGLYHk8aW zwO58kPM)mR;&WIYPv1(@55#B)T-M1PRO&wz6zDByTXHg2J_GG3h2Z4>rkH|M7x=Yh zn1@`6$zS-}O*}C$AMlBhpM5cTIdk~XQe6UHpy}od$fdd&^YKRf%D|v`2ZTFS=i++SKzlC z)M4_(;Z$Kyzn%(S3*1cn%Z<=Jt!F?lhs4L@^{-$}kZ_YI=hRsa`i3SJQ9}fT+1G3( z?_1EaXNAl-aiLjqB8?W^5A}l&>c$)}LKTFzCzlJ@YHSM+T zchk_>6Z#9yso<$l1HWI}Pc$%(!@X(FgLqqDWE|db|HVaH~#J?$hy;S?DFv5QXRnr$T_a zYX6_RPq&GhNmlUzt9dvIl{-uS5Xo=c$W=Mux4b+U_$Xh9c!fiE{;NaU85K>UOqy)d zsP!qCPiK}SEKi~?niA(*=1v(zUBd0Bhw@j1poSl}0fuV#;;as#9_RnOG53t_Jh98O z*Yxe|O!5n&-_+vvoL8`%?!@ps1K9aBD@!tL40VhNdC+D7ks7?k-)$Q6td)A+*xUMH zTurJ;1(5gx9?)jrn~K*Rj0cX9ugT-`5PHHvnXq=0L`_M^VZ~I3a%%RvC~y0mAy#Ft z{&s@@Vd5R0L1ufIlU*QhsbpRa1PuEaDdHkQ;qjN(h)-Cz$HZN%>ETRO*E=PZnNg(P9r~0<&aaZ+cZ@6g9ql*OEWCXha8trBSIPrd(UQNdj zj*Lfw!5#8J{43#N+Mk61bYkiYLcnK(YArn<1}m;iEMrYGO(BvLi-GWf5`8O{rAeyY zmMj>9Crm;_?Iy&?Zd2FX+CB(G9Y-Ipxdwu7wd0JJ4c`}!)S%K!Sg1~4gU`0xAZy6; zXDT&~Nu1=9LagR4jTPX7!5rLo{-)akMC%^-B%Xzk61=E3oR$Kk(#iuqRT93RD*p78y>+;;PnZ0?T@QPmX z6n2mis{cpthwnVkze#fKLV1aTNc9?a|K_N>CsCaq>)&Bro&uu!mB6fT!Sb6(hcdR& ziD>m8@A`{~e$E;BBMFL#inKSJyz7g1=8g&AggiHRzNR7bZ{o~U^F%1^yKm(<9LkhS z8Vh1Sn?v$b>+s^$-f`aLeYAwc2@@RYYYVZ({w^LwBXg;(Anxo$3KH2-t3vfIAdBK+ zB$E-?%=nl1P=bMNrZxEne|u#$IP*J5m_lM;b=H6V6RAarUY|XQj?>tOjoFSe?+*=y z%0ll6IV(Z0tVIstcLx%b-)2dbdml0OJNqkb`8F62TUzU{j8*621M|HCBM%)fSyHQ8 zqY1iu@gZcA`=ZUJ^_OIeZFJi4Dj}xfuhqJ|mbgQ+&GrnRfxaWkDb3Tm_9kGFb0Pdu zB}B%*i}=AB+{0eZS)e~w9^l9&ON_9zjDESJ;=sz=EF{NwiAnEprO1(HozqsY!|?*g z_wdRbY!7*mkO&-KvDUyq95&0Tq?Z-es3L;M)L$lR*nFe@7P`=k@qXTWcWCAIt) z6aL1U;Rp!EmQ_QZcMq+seg*`>0oUpOMz$te@ne^Nb8>ZrM{K4xc}%Ct5K0?65Qs&( zdQg6A>{Bm}f5~JxM7QM4D*Ml*;AvEXJeJ5)x0=+ksZa-NePLTpCUD+VJYVfU_X$@F zv^Y5Ax@k|pH>XYG;l#<=% z_feS(T%U2kc*B&_Aoe~TMQ3zDh)BA=$6Xp4mP^J4wK^G%2GKB1*&^litu_}>EGn>y z0d!aqkWgz0NTJ(jm{HboFYl;%V-%`1swa>CAdEpN`~JSQ=EaK_0{udNd?_mE63;LD z5!*S-(Be0f6jzTRA1b}pNVj|SLq9lyYiyVO6}2Yg4ZAFsOAE*gemgU_xrK|;V3n=9 z62$dW6H(n)UC|GHADLaLj!y;dK6t_P(zo~q7CDCgauPq<);bb>ZGtb0nEU!npe#}- z&`_QH5T@D`u*W-5ZDpaKxS%%xg}XewotRd5o1F1&Ts=5jb^K28U&%(2!ZCwJafQ`E||!Ah-fR3U_F(=td*TXFfVOxt-b?)YEz8)xB1n6`00cf=j7XW0?#t2+r0!ABWH{j(#^qcEO`FcUw&o&-aX2*H6_+ zml|fEuj%%uw5% zWnUyLe%eZNhIIlMPTeIWPJR}aCM($Gdb=IeN1J%@ei*{GKV$sL=w6J-NL=6VP-Xg)te42NOlHuAsD`?2x>sbiO50w$N)4|_5jXYAz%o=& zK+x}D-2JLUPShS|3k{_w-xclL!g9V^_Cu{Ov$Bos$hg{nc38!|AClVYmsv#^u-sB9 z$$focFc&9i#*t*`zwQsPcmyu6>;CE~lp0CVw`9`$$|<48n=Nr%6vcrr-wCtUMre@s z{G$~pXaJIYwd->~8%IU3Jg?WPw8-v(N6PdJXbHJpmD;hH?q&-NZ98o8=?iZBZ9obV zRqSo>$a-Bf|7%}GmHw7q5&p*?9;Tdx zx@0V`odNZ+K<275mcC|-vbK&0HjVdA81cM42qhQN;rB1wcKr$$&-Ue9L+ZWd`IpGV zIP_J5^!4L%@mj~EedjGtN2Y1Yf_oK|GO3rf1n-8byuS%j5PATQEVS!tQ9n(7fR4qk z?eeCf`p*DvpI;-T{IFGHxt1)q_;=PA&-yLt&A_Xs98A_o_KeVNsoxRgrzK0jZ=+DN zrKXN^O(%Pr`6&8b1|N#5BrasRWb&Vs%(DtQw)80}X?H#}Ri_qy&p{(;`Uq zv?~X-%Z4@e=O!LG({%QlSY1&8rU`wzmVIx~592i5nhdL}(#7b=JPzbxBn~rj(5*Uo ztA4JQ1x1Pv=aB?fW5%+3yi3hPmTN`u#chT^YCHhSV5$bBjw*yp9&7%=Y zBjMjxsaE)vd=gL86bU-lmA0T@Ns_r1@w?)Hue6m|d#Lz6ViJ6MSCaMqm*mVP=*Q22GLUDWLW0aAmCIP~L`aLxgzc*gcF!pn6m=?hBUAgg7jSJ3St$>3`sN z4Ky0i(-9x-`W`RSYA#!~Upgj)l(`8k;b+$oj-`L>ShSr#S05j=tON;AVSBlkmRco{ z%X1DIWizb?--7k9NqeeIwh5z22KRGrZjh=S9xSfW7XJ8JS-lg+Bqus_xg{O1Z^`bR zMN>6bbn_E{d}a6(#L(QMb?X_$7wC*66gI|Dk1`bXzjy1Ji35$3=G_e7I1MW)RQ7l~ z5<)R&6l4WHqd1tg{grDy>yb~#OwK_^C)l+oK}qvdN1ABoI&7jg7U|QuGDa_s{6jcQ zjP~qhlZ)?TuCG5=xU)!YLXJ`vk;HX{&NPo*^y`#>&J`^&OUdxf&CHeVMsWf!Dz$2N zzUu>DCwBAL9_%(}Pk&Gjn%lD3`!kAO4}Qo{JG&mq?I2vj<A7bXmw2t<(h`)K!s%MKe2|wmbXsddJZ*JWAk7MVzr>1 zbpKray^D7fVq5ftlxd>Mf7)0&nKpF{maNg5z=2`fvuJp`RPnWX8ArkR;g!D zv2K&79FV=I7x1!b>cpPi$!&PNR?q02ihOMm@)feL3S_*UA@hyXvMDZ?-vSOkr#n_JKOTKwUlmQOn%I9 zXa*|C>8f0FOM5ZuI}-A6qwPut%D3z@WV%s7CF5(ifBwY(EBW_DK;6eQoG(X3j*4kPcJ_CWV9BQS73P<~_UcM;gY=bKU+4NK@kvGhc}HZ((*jcVKVGZb zw3p&|D`A3aojbG4xL3b!Ed#4>D`Xo*ytOAfHdiN5&2nW9W6&%FsIPfa%GST3R7BU6 zICIc;WO806hMWJ-QsaNfjS!m+3;74()!|D)E*oPjv#c|Pp8MHXh7rvxo;`2Sg`R<7 z^plTHMQ$z+2l4+Y>_%rhyZ%-)<@Mz5VOZ7AAG4XkMA25Iaa40%ST*Yf-_f#gH5YDo zMnGYIM$kZ-($6D?sv^39m~hp^MEUawWc*Hc&cA1bCtUm%=q*rJNJSeUBVd6{v;>S> zitp692VkkUZ&eb5LbDJe*Op(sR~G}g@e0wH&)TgKiO)Bnq$#EXj)D)5yI_y&lf%># z&#D^Iq*tQS-dnv9mpqd>sw|roj%@*TyXj%3{>*Z(C5N0Px=pQHlv6iK$I>9e!(Nl#*SZoVTVWw z^0GL}jeM4`bMIt`EqJhHWkpj-J9PcNWL(O+YEr@14?WLLa~cxo}CC) z(CdMv`c$+$mw6Co`Oc+bnA%ZN@W8-T`CsAuTnHwm{~hGKLh?4Syen_{b2(es-`--E zct>%>aR`SuS6&-Wm?5ard;B3>dthIT+=vY*=IU+2-KzeB@hBzJs}-O=SE zwg07^V%FU+p$Scp6A3U-chB`YE0y9pm*Ka7Fp#VV(Y5?Zv@oObrb$%vwqhVQ{*XaB zGl}TNZJ?=V4>3~aR+NZ2JtEEXD%~5Knt(ZV%Q79 z3=XO(@>ygjXwbWEwGrGQjU6Bgs5^VPeeS!MZ_KA$%i)pW(b-v^yL#Nh-Z z#KjAO*or4bMc> zcZbP-=(^}E@%Y{wHM@E*$F~)at;qsTj5rl@kbeK&==ACPI+@Pe^u`;)_pN?&U4332 zRX=vGn=?o2vtjLwd7P2xRLof%2tPNduZK|Ube}&bGl}b6OizQKwy#3SQd_V zd5s}j`a4C8($vX%g^NK!Ai%T<@{s_YBQdGTzipO6TnK1G1COC~mQTo!Z`Ki&P*49* zP09_CCFWV=n&kPO89&^eC3rN-wNzuPD!W03m*FHQXv(&Y1>+B}l}QrSd{^2hIJ<0%+$bFkHBfAJ{htREzL2AU;Hj-J}O0DL&2 zb>#9$t+I122ud=ewp&v(tqNbz7lB=!@ybN&sTN(ep=}Qxr{oR*S zY!@y5BPVzAj60=89m*goRlO(X=~Nc!+Sqe=Qf{5Avz(FKQl&9lm%tbb;Lo|rM*C;6 zdmB?{{0qGeA*i3&3aPbXU9pt=^NREmmnVbSv9FJIv6pPG*7XWWz0!u@fYJ-mgC0MJ zVr=c+B;rLHG8>BQtoHz{x_5GszPDgV)&z{#C2G?muryei)p^d+B;+6W<0BTAOgu*6 z$Ht9kV8$RWkI2$OcxcD=h2TcRpP*NqcN_{B)f^c6G9LV<(p-FBmf(cFetG<}7YCPx ztH`BQJ1*`}^Glvv6L~Bp136fTXW5Z zvxj2nm!o>GH&fuQZEe^6V!ctQqVr&c2<`=s`YvB*B-zMgQPUpN+;+M@1*GuCU}EQl z)5P>Q5-&b@A7tkPekWyKI5!F_LwU@ZEx>N$le^**Uaah9`uvzmF)9g4tZzq@*6iW= zT;pqq_=!kRl>bFIjKK zYU*$!vJOm^98B8b802#DaPX>EjIX(^9d%|}=Al_Oh&#i?Cu;LoWKlv+%jyq}x=qfcb)Aq)*l=?1^Zb4=2l2=CH@6$7CnVIS^p^ zU279l|MKi?+p45O;2A&ya&YxeC3M`#qO%mzbYgJVR5;`RSQsN)?s|Ko8b>@~Vi5=P!dAn<6oUl3)N=xtY!T7)mqD3QxN%rx>>vZaZ;~-fl z_@Aa{px&n1<|`)^-wXp8Sgy%P5I#!?g4^NA`Xlo@KlH)B6eRC|fz*FeDqo6U;IUTaY#YkT_qcNx&GtU4hDmKzT9mcoeykA6Cnp*=C0>w{d}Uc&ZAUl7zQ79etcXNS7!iL#VajnLs!TmJ#e z8vNerSWNzuO?-P?{ogKoZ#2!Docm(Ee@am4AEs%jh1?F6#Ta=gI;cQ z|1zA)dwHu0gRGFGtfH1X!W!%?*{I@k(M3|a?1jclG9hRItNS|f3@`)vwiIu9T9f!0 z%29-s0$H>^4~%s1DZ^Cso8dxE8;%MgLP$)rA=ZwH8$r(i6XGcU`rKxGFTqP_X>nZI zK!t6V(zCh$ zp|!UPiYwaIM!RVof(3$0(zpc=ZV4LPJtTN=cb5>-xFlEu0fIMf!L@M+?%KG!liT~& z+2`(Ex6ZD=>iiE~YgVn7epqwNF+Uq34}v|xOF0IFv8HktFqI%|=AZxl6ZU7qKSBC0 zqXI+LhvD0FPqjn3$~g&sgzL43b$@>U1xy$P8f;nr|7ga4QjldLUr7(`Lu*)8ebR=? z$j`67d=kGBcF#uR1X;u~!GsFXEbY6a^UNGoyC)US1Ta#3X{9vLsAUx{4<(7@@NUuR zXNk|S_WuH=VNHsYbfHRGU$#U}Fh!pm8Nn7p7!Nk+lM%Y=YmwI|^ipgnl%bt54{GR^ zdG`h03tskcA*!g+;8ccNvT71U^6LHi?<``Q_gJKVxV7!D1TK)+{tV3WkMwUV95M&N zT}cWqTiuj8>2c&=c5uow&r;V4u{5E25#SOSc9-TN$FVd$JHzSY3YoG0c=EK`L(+W~ z&f^AgM-moEUvwKIYa8ziCBY%Ir()6C4T-}N-6Yj#_q1<~WeZkq@AY-~y1>uF)k8;p z(p7J~ixx$y;bFb#yptD5rLyzDAk{a)`9g6c;N+e5<7=gqtvkVNNPjApHce(A>oNNi zmDCpd8-_t%+|V%riA;}q+R*QkF*rX684`Gwt-+bC*^kU1(zW9+M8r+?o`j3&OO(*O zJKv~F9Gf|<9%eRn(f14DH>JKyj3V;jDYxc>kzH(P7&H*Od-Aew@(5TXqJlCQJcgIb z2OKl;`h}XXE+mO5C`oy21r6j(PJ!|@zG6Pew#nxxL~ zzm&RNy7N}f>*I~f3OnnTm+{@d_ruDwy#O>9x<*hD9*?lWFO{YJ-y7Q;k^izD=~hCb zMjoPSXouh~m%Wr>w&WO5HXeJ#8zo=Mnxkg^ydH2D!}MS>0JCp3#C!B+T5aYMhfVP3 zJMQj*w~D%u?bvRLk*XhqMgG4$Z7dkHQ)lCZGyHwjFtmSJ>%7NNW*2wH*yM;0?nBZ| zO?e99$vmZV*2HVZDqQGJwkkt^0l7teZ|N5A?;NQm6jOXPR_jxOc+T>iNbAqD@ik8p z;el&&Yr!X5%C{1o^80tRBl!~--%4<56AuQ4@(`i$UvlRix)rQn3lO21~vY4GwgnCCbHBa zlkse`mO-CUA)=S2`5qh4cOtU1aPv!ms4`fOHJwA|>OO45!en;`zu+ zWAXW0tqRBh`lb+^iq^pkR^53maE!aYjJc-y6Esb3ZycU;kS}Bg>_@XMAgK6 z`dnu}i}rp8#f5lA$=La3K%QzlXY2!ovf3TP043RGD-LUrC;sj+4DF3ixF+OufkSvJ zZTA}3o3_QLzr;t^0`Gxc$WN3Wrt{y0`yWMISPY-`B=RPYDzK;?{PY)4=9)Za9h8OZ zl{IE48F=29QKKbi4lD$ARX=9hRG5JS!Ojq?mF$89l8_B- ziO7ugpW3U;Lhx->)&kVTnAGcR42;_@;t^qcLF6_Zi5y9guV|%)5AetZTqas!rKr~g zg)%u|+)XVByV{mWakB!1gvc;fh=Dtr?T&WQO=rejQb+!_qwC)ebgQKIbZtLY2>a^h z!cf>!fO9Uwfx;Wj%68N98M=YI?HhoYoEGQhpy!>ZhJ$5qDOW3*w!~EV59W#QOv;gb z{9PF7^My5Y%z)kNhe{I+D-XV5Z6?#jXFY0XQctoi@7Gk~KjzfjCD$TNmgWtwB-ml& zQFzWXiO0x3z<2FFn0GPRIQvj*lpK7klalQC3XYq&!GOsYe9nbT#L&5gzI2$geyCGj zzi;;(HsdU}yEChPPvor{hH;;ZBsXw!^#`j0f4pyVqPrB2$gfEwr`dWFd3;|GgE6z* znBGQgplW^g?WWA!sL*u9FFq@ah0K)NPKz;|zg&E=g^ilcp@Nt{WjZKn0K`*SdU4)-;?6tjVqolB?*<@svdz z*?GQ7eJpuw^y7RDG_-Uk>LCMd9wzE=Iv^U;m=CjSE5f+vi+SWR09gSS!1R(mHA=3P zkKBrt$MhK~Y@_>8=WJpU>d>3z&Pa3C2~^qsg(J3ccOee;(r^5I&GB81w;o9tkf%@0 zm9XvlmIi@0JYaceO1H&^P3Oz(XV0v(qc&7)Csyky)lk~qSEIYm3*A=mKkU6Yc1DS? zid%)gA@JGW`FvOq>dvxmgKFklxEZUI5f5+ip*LZvF~(6GgZ+$|H$?7pFZBre{`Ph_ zD)M&pX=iMhI@e&CSRZ5Q<<~w`T-@nI`4>P&=ay8z=F{LEf7`U_W>x#ge8MQv$T@w1 zjSPR*LukEY5i>+kV<7_sABF~sUT5RDRP#AiAwru9cjP91$*50pOs~GLrzuryo!`? zjPSk!#XT1OxJ`p$i3y ztBj0fbnR$r+((H7VbE3;2R^a3!5#;o7PCIO+TJA5hcC_`TlJPAL>=bl#d&E!*tRqkIhP9f3GKerf4}a@KYUMV3 zOD1?C$Qjp-fc>N7SGy+5zwKeMj{HM}?B5dvZW%4m5N4D2{=&N7y+U;+IctTatj;Qf zlRKWpNIghpW7xE!{mHM@ZkwT4N&;5Vp=6v-PDkXQgv=Ydy5@br$^(lqfovZ!H_l;< zh@t#xZ!#`gKRn_)+RhfzHLdS~W-UaD0o`A~fglf`dnbuxfLD@-Y7X4}s-fFcG^jMd zF0sG#Hao#q6w!KRmLTj-H=}xr_q3pKDxNFW9KJ@MOlL;7eFvFVj>N}R7de1~oO)FR z`G7-<0z2KN_NI#o!jq&L>?vw_jhw0Tiy2Y1Md5FhpF7Qt^sa_#tD~=QRKe-Pk&vTx zgv^QZf}n#Q>-B9tHiE4zIQ4}7Lk_l@%Z%_`_7Okvfz%H|;oT5hao9_VkJ3V2$#4V`;8HA!?Em@$an|mtDkH8# z`MGd6bYfrreWWhnQ1tM_zVf+rRx+M%{bvAl-p0(s!oCRN-Y-;m_AWn^Dz@ni@yODi zf|+ebOz&&Hf#y6$0k{}`vFYemDBocr)tokPs^m$^1*al#)En8dyMX7*t==cw%GJ`N zWC@5&Z{(SHosK5)5*Oo%8D-=?PdcRj4PmS^BvJaHEvvO z@oqqyG`1z?0;s{k+D~MCEYN&2AJ1l9ZIz4f&=&1vFx5=q8FgUJg^H_kNha90dpPWN zw-UKFPGL#Zd!Cqr*~V@5@T$3a*tyUa$7Q_oaTU9)e;>KAN2%h6?J4sbKJ^3%!W+uP zPxFe3ykeH;r{=0~gdnD;5ZBwO!qOwhC8(7(A(`FiUtmi|-!1RE&{7b3O7W0LE zFwAvQ4TI8o?c3~|-}ZLpL%e+&yzmV*sh@iAs(8%Nk#>>~&Y z*QI3^4G`5{GGH#J$6Mu%qja*n4RY%%ZpE%n?`p-Jw6xCC^dpvI6kPS|##^0EuFaU4 zXVri^i-sdD-!mWAC1Pu>E0MgegT%7qX&~dR>whP+n?VAvl2qDXQIHiKGHi-9grH z6ES3>qNYOYcP$F#>c~?2NM6hZkgaQMz~!IM^JT4xX7n!*!->(^ApjW;irAEuUxX{KspM{jiqH*6lQ%Q z1}fez>lx>nmhqb2CmImuGKab#n52c{FCSR3z#l~V)tkKA*@ZWtwG0P?&st9!K%%eB zkePJ=SBkv!ODu$QlGy`O{HW2Cgiu~MCh!N+a>y^N-@I$MZB#Kby+JdXu=xujTTCt| z?SoeX^c>>8(Rose8yC(dJ5Y9+nj#y0ctv`4NJma!dP&QL?7xMxsDy0G2zfKp7~*y( zxIvG$DX?_-pY%ZD$L|g+)pPpY7{53+V3z+@c7^SU4^Q48gX5xq{skapCB_@mb`C%? zNZ`dFQG$J@t}@^~o37BmVdno9OKDI)xS&kdZx;c&MN)_*n)CL&2M#gVPhy2AokXOT zNF}GJ8r=4y>jfXZ{g4%->d{74Z+L|Kml41yndtKyHIxgHDo+oS=EQ-~tYxi0f+ z6rqnRyC$K(Y@PbeTg|LKo#`Q6+#)*l_nZaJn6mx;9a?U~>c)}f^m72GMc)&&57YTP zd(Yh7IHn7d@1U7~eqB4hQ!_MU=hvyg zDJE82x%8W^AO@R2OlYfVSO>GVjB427U3l-#uh<+~#kHSCFO?xNWQE%m$C8BEJc^Xj z-G@SA`=uu#yKv*Wiq;0V*57e&$>h&GX`8YfLK+)#IL_s0Q|Ux1RB_RK0)rnI&emK7x(M;p6cp}}PM`HN+_(yUBEiEH zT(vqYx1OR)R~x6{ul@Q{g1f~9qElv2^fPD zRCj4n%Pb}qW}7({3@0tJPJp{Hw1W63BMc@Fk2rsHiQcp|9l1|Sf*NvQ>gc!Zd-<~) zxM3UgDB`WPMo5XTbr(aPx25^ZJ`=G(VL(w~7ssBcM<}a`xr;LdY49H$tLfjGIy8k6 z9gXLpVNiR)>)1-<{hCm|)r$6f_+DZ*!|#D9v2J54T%2PJadL*M$N+p8ztc437}QXV zWH?;>$bBiASx*0UM+l5@(xdmSN7$mQZO-kP_}<;M2e|&-LP%hC+{J zTH|}fussL?s5T+Ru&N3tjlPv7H(_w!;>0BCPj%#LYBYZVhwc|WZs?ylmGsEw$$X%y zH@f7m>$ZcD-8=n6p*7tt3Zq7M6XpC%yS4ycW`|NT^EZ|cJ0Okg4+(6yWw|0AfNpzo zoc0Ir(#(&_!vlHiyqq2-tQ+-z0mZpr-Cjqd9-^O#*HmYyjiX+ax5$5BD{gZTLL`(2&)$IOTC$h|SC z!Y^pHkvDszy82V`wS@QfuNL(svttE=^xSuZ@n`l;)SZhLjTaV;miVgV$3@VSwmBBJ zTdjdltf%~c0nxQ9hwr;K7i3p<>N@H%XYQav{3dfLLdHSHkvAQ?qn~WRz=r}#64{N6 z3&9cmdV`erveA}kPOya-Ie;2!GkZr{Q)rDx>7#?&8N~VUac7x(0$lj8uOXuqUhx*4 zOmrIzV@sGkO4&O&u$UKQ#%M!TL!;a8C8f(!9ZYk}`RX6);EQk_|EUzD-D)B@>gZ$U zh({Zj=hV1CQv)pbye~&<1f%OG;b-?hB@gJT!|cCxQMf6(&?jeM6t*Lw_-V#iyVv|p zIGW?OKQ<)30g+BS-*x@bZZTt7t^|5B+JoMbN4BK0V@lPlEb9HUnCL>)*lZUx(|=cq zs1=j0nYDTI*V0}OJwwXUbzd9HoCvHuz`rKqd;0Ys_(vA?DB+5|f>#Mq zfHBvWSy=>rGk!Kd>_g|FH$nJ(x^#ZPv{A>twEL1e9ANud=hJ&EdeOeJ7AMza{^30tgH38F%3P!E zN!&oiBex0acIyn3d@u15Wc?_m)>Mj_K3P}SJW-!MQKIl>+S*BD}wOC#)w6WDiRrByh4ecA2Ozm03LO50B^?r{JJluiZaz1g7 zccE<=ht$vKekRILH2tdRwmR;n1LZddsN2P6hRMb#0yTwvqcR)cr{>hqW;%S8R5vc0 z13WnE`KpS4h9WFi7n!PoNsK7R!xN^p)AoP9t_?|Z?J1g6gWe``0w{Uif$F&GcD+y7j`)=%gk3tyu4@x7b47+kaP6f2$6icPj66BMsiRcLOE z)wC@%AkjBUc{b}oTc+NcVm0|;D>?Nbp(D((`($;xgi9A-aZs@ zWWQK{O8}r<3eDJTipq5rO;o_1KU?I>m#cqkP!~+W00|+_JLjIk0|+566@I+AAKM8w zg!Wq7Op>c}45L*IE5GWpjs$n%uVQ5N&4?N`yM_cCe*@KLe{#M3)FQGr63d#qS1*rZ z{|X(Ppt@}8FA{+CkS`k>^;kKa56xdcT7{+4@ zz5IeJ=i*~EUCj{IlJIVo+j8`SN?^ghjuZbgMdrj!`R~JYrrYF2`Z|U+m~Ea;7uyfa zz+H--k6?SXIogo;nJgGfC3pJB><4QR^dnw4KW|^XVTD>@^jqNUUqFVob%(LI_2PFn zM&9FnNy3$px-{>&jR-8rnc)824A^OqK0(wmPM*%KzpRbGI3@Am9=#oNb@dnt->_~4 zTQH=kETqbjXXBK-*!l6}4#DP*nq^R2I#|Wk#_XWwqg`V7UHz3f7WkL%<*vFR`ggTh zOPnV3KGD;|{jYz*K(UHAKWpFTrv$dy2=*o7V?K7(x%Hl2lK*o>z|=N9U;C=b{Z__1qHE zCI4c0`A1%(y9wPZ2~;mxnzv-IUvz%~wcLOw1tc`lDvZu>GF1;A7(47YGDG`@)Vqv@ zyJ-Elb^V_f_J4T-KMjv&ZaT15J$$(fs1<9s`0-0zOR!z2pG3Mw|7qeg1F#WyR8u%FW<%Kl$Rn(b!PK*^Jfy;`GnIpZMb}1_ZzOm8R|k5rF9-Bul>ZC`DE`; zPJO+&-{ue@?o&lK#)ID252c|B z{E@PXr(ZTay#0z%@?7t(M)CVOoDc(7q5Zyfa`w`tX z4lUiY^Yc-4NvesbBbi9v>LOZK$X4>T{N{^S#`O2?XLusI@1Hd%%=c3jQKV9`FfMWl z9avBB1^bFrtWY_(Zf&!xI(7yL?HnHEr8v?ha=r!N(`}RAxtG1Zk*j6FolrE>;F23o zaGp7V)y=3zEx5(Qt6lBPb}$9u8@gk!f*D=R0#mD3Vz?!#!s{cp6--sR*w`FapME^- zUBZ%HHq4InNOo)_h=dwbIjd+9nCW_UQeA?|#Kky|&Ri;c&Ext6iaDKpA!_LU(N*9^ z8LDk|45o>*^#b{JEl^4<*wYT0`Vj42M}snbY)sEw=gj-UjC*^B*!6bCIp+qf_$OD^ z?cR9c%p%D-ORWWCUy|xW_ydk-vC8+~rH+mkU16f4aOlz(G24bI_Y!6CaYGUHbHiT( zFXu_A0myT)xI9u+sZx$1v#6_*JN4~q5vmngsqu5U)|odg6UE&LPZ!A&0vIPq!M)YH z37C7)uNF^AdLV90a9uJS;Ja1*V6h?dYC~b%NDkHX+qW<>jK^fSW58d4%I{^S8GApG zfFXZ#*Am36r<|;EN>OID>2uUi&#RV=`&G>*C<~j}BHD9b5l90V7 z6q%Sq-Vq^09yBO44~M#VJ))`FGiQx(^y(FjqQ?9z68Y{%#`zqr1N^WR#HQbcP@wM# z^zufS(Qfn2#&L9)^60%*X+*f+X@AvMWIpFI#=3?(ZYsPy%lDSN?WNo#A01vqMzTh> z35UivIt4(HZKfo$s`VL#KTFK8!XPiHfAc6gaB;gSxl&Odq zc6sYJ{|f?YS4i#2Zy?hI1J%+G zDWZoKzF{Os(r-gfjWI%J4d=X|moqFcV(Fz?D0JoFPhrT5dx4^3m7mZl@lXNjcb$~T z73s5zzX14)AfeCyB9Xw>))*p@RS-l;sC{6wc5F7zH3(Z|XbG3UTINN@0ty5$A@wg| zXJ?BUWu+67o@B4B&1Yp6A#rma=8>plUx^lG*OyWT=X86?vXhqg}@cB zi;e6hjXJM52fCP`3zQP_A1@GJn8+_^dVfJy5ADup%0U&`SWyw$t_j`lqf7tjuEQZ6 zYxkma+8>iY1?-kiqf2G1X<3o&zca@GfD(kvR>p&bqeaxMKOEGfjR-A3b8BsvXFCcP zZZ*Qs$v)DqhWStYtEL_#<-a~J=q(JPYpq7?Az3@#iT;i^%$q(eX6%G zpTzw~HdD^AC>L@mdb0u*Yv7QUp}>$sxE8HZ49m4kZ{mFBlHuk?t56|!adVXXAh2JQ z>!R3^33`e^baq2Bo@xNMmh#68k@kaiTie;~zYJMLb>-+Cd&l5Khfxt`-_i#YGjC?I z%K_UGWvWE1dhk|J0YQ4-G1NDyuFvBdgb-Kc`58rlzd_y~U9b@M6EJ|e^$n^i?s<$1 zSH$x^d{jh&D$+lZ@ZHUF!=)GG+7DATZsA$Vvv%%sKkRWk7N%#Y4;Pg14U!cONN!i< zH8O7QiCv-d1&s%h#VKB;NANC*!F}XM(-Y%*TziE?U7o$Sr^*t}<(hp!IPty|wLP!#o$13(7ESD8uQvdW~a8cl(_usQn5Am3XPeh_-1VM1%&wbw>(U zzIXFJn@)-92;&Orc0Q;5&_xXVW~_ZvfD>EC&NFHI7@FA2xW_gjdn=t z-+1;4SN;X02%A=$h>WaB#PI>)3gAJo{DJ;r$|v7M<^O}xD?mDgldb$B=7`=4=r*W>Tk^ZVbpMzvhlGHpKAg4Aten@69uSzz8 zjK2FKqKG)4a@(`!rkm!fXb;XyN=R;lamMTB$!hf` z3yzLvvaU z!p{0M{&nS;Aucc6KeYOb?Tc?H|E^&*S7H{z6;?wVwmOhir&pn>7a_K+X%_LSu(1o) zb#h(F*lM)PNXC<=eckeT2iA-AYJ~e70?l^M#_^QPwcR(S*lh3kazt6-xvQBk7_hTQD+h8}) zWTd$@;#20JO$eg8_hsY$NW0q1J72IR(hwK1YB8>^hY+4+?l*A3PbOYXD#Vwe6=*g**t$01Hr(AQ$!lyx@pC@XniYTQgnat1oEWLp3y z1SIDM<|~MGq#n@%PMe@yvJ%2sieu{&hob$V`7*>RpD_o(5b>M zEO6xrDexLz6tZH*chXI!a|K{@8xiet5GLL~G?VY&!EC zMGDLyHyolW5nRTS``Wy+NMlb9Om{b}-AaTs&TD%(SnP*jm8mAZ(>kYP8`}VrWn*Vq zFVN{w8o^ZXfLjF(<_Rb_U8WIC4Ptv4YZjB>FD`2Yamu<4t=5N&hqs9yTtcR@$hkJ^ zGqvd95Uy4oiHyYlYuHC(?|Tpm)nTtN6+%0GE?~k%kJpi{Ua;4DtEg#9>I>>?@&nb+ zfZsW&x~y}$*xbRBp2g{!98J#lxmoH8&b(hkd1=(}yT&t|2y!exX??67tpylOd^C)f z#zfrbA)t#MrP0!>#-E>$w7F*-j$ZrVPmomOJf_Pvwv6E5bBYH-?g9!-q?HFXJB zXE=JY7HKlk7)4B=+v($22XMpbNErf&afUcnGu>ALk--gP%qR5R3LDD8Hh%$=2D~Qn zs*;r~iCK;*6`V{7ds~KIRdyL%j{Fsb_<+>^YRC0>xuSd{KY0B?cS_>8AxHIz1HNhG z?jFGZYJeJF$^f|1B-xg>VjYe5O=*%B_rqVFPAa8Qf( ztkw%!QDcw>78eM>B}k66W;pT6uIb*4s3I?Q^)*Y@@;`CfX+Y4HlC%(wDsnwY3S5kJ zh_ec1zlQ1rX`06P$eH+SvG>gIcW4G^f_6i_$I;64Iep3FG`Qgc7BIilTiJ`H^~uF& za{YMJznYslGxn1V;KNJvUl!hasJaH&|X*LcqDyUMgK{6E0l8#F)m zZ1fX{7YDfeB*xTGJ+4mQQUb8NCF!9})jITHHR)X_C}z-Gev01lkWNWydr`CG62dwS zwE-UZ$2&Dt{D`0w5Fi5*fRc>9Z}TxOdf~_R0svbVjU$O07gG=n<9;G8ZqIFwauN;^ zYrC5Sram9|PEU**7Vg;9Vuym6Bq4T72Ih}5n%(O+VO#xHw^W{#~e-(Pb+ z1Bn9Yo6+^kFA|3=&#gBe6MP_J|0#(@ln@qrjqkW=m}MVtf&~qs{$c7e6|&Xli2xteu$A zz8;H-2;#&2T-DE>!HXyBtv)tQQ{vkygBj=nY=_seZeE@ljc9PU8?lUnV`N;*MB>_6 z=JQZ5sjX2elZR{=H##_a*j~7qqe=QzJ-^(QPlx~#yEL^CeAcUDT^mvJYbS&XQvl4r z(L)GM2E5EPD_34hk!_-ni^MCu;^}E5dmi(p%_r+U4#NTOKwAUkMWR^H9y5xVuQ|%} z01r2XcXaIW8;QZ&X?|5-x0(6`Bd*@__6tyu1RX0jV7Hn7_k`prXCdY-`}DBX)5Qo9 z{)Ln5r@~V9A2HT8!x$v}WuM+BbQQ%g5W)gim6G`UDyYkcm z%{SaYUaQ$v{N8H6w;No{Sk!{dCyU}JpPMUg9y>FQL ztzS-$97{Ax`}WYp^M7VI|Ft~)%NkFChDT9=cV@X@g}A_#mLtij;Z|AaXAoyU)mdDJ ztOyARbz)WWkHVH>pJXRMILja#Hh8TUn^QD?WnrB6<%29S(1kMP1H0l|x>a*C(WyBT z${z@?NFo{mC}N7)y!y9sc{L@AxjKu<#BH z>VR6dFCnh1;#%j>)hR)Kmh3^=EN3CIozHu}7p3owh*kkDJpdcMJsZDvBU~jR&Brxk zlts*ljU#(!=r=rze4u86si?EYa!pklMR zc32;^%0{oyBU?swvvy?a8?vRC_cTGzA3t~Py&eAgQ*F1NK{ET98ff_~x#LoA9)S;$ z57krB%^69ypZ5^s_MhP(0w~zAx=v|D7yfJqnB(xc$){w%G~b4Apc*oBSJ5DuN&b})BQ5it`&529s< zK!!@4Bt^bB2o{!=ElZBLpdS#ZmR}UwZ*jayYLDBE^{J_+?=~V#L8YQ0bC!J8U|%}G zLk}tvqCVA1vs+A@@D1(T;M+Fa5~leva6ZDyWV%Yz03lKaP!d$I3RE~q=F7RKajkd} zVNj`6CUk5Fscmdtfy>8#eJAwheLZ}B*m04KVtwNvN0004Q4%j}I#$l;9L`E8gg9C+ zQlhu`s)azNH=gAhHrX&l{V}{&qa#C-*$IJ0{}|(@Qy*b()_lqlrWs_a|E}7qASKlI zFJPnC8f6>O_NSIjj160&+U{qaeL)Dq(G?j!--O@UIA341(qH_!J}oSl$Y|)CYs5(} zF?PCG?VpLtKx3<5QGWZO75?xorjRxRH<@vxoK3@!7O&C^ zkLa#kl8<9ijW(Ogj5v|`9rLP7mQN8u{2POQa+x#R$UY5*n7=fc3`&znA zadP$7p)h|}4MQVEjw~kD0VxAO@@Cq+_StOk=(@AcL9>+e^II2ueoSN-OIo3}rC!!g zl9&F`(x(pdvhTv2kTLZF3*FB0oxIjjrLH%YdG?mei(d#u4z>N88fbnt22Y4o$S-8& zD^eY@%Mu?d`NSFkR)lN6^`73k7N04mj5hS>8NXM)7gXw#GM$A}v!z4T5)w!hAOG~S z#5=Hts|cJz8_|P=Mal`2(Ky!L&2_%1&$nzX_m#Noca1oN*ILNSUc}=ct%M#--sp#Y z0ihC+nVn^0F^(ufR8zXom%NZb%%rOWVc6T+IDo8kTE$V-0(6CY^j=b1U+A6E5gu{q z?x7?elY6&)NaG+Ebqve?8%zMznbhU+ofboz&xqhWCQ=EkIXWb1>LP_CG0a(+w!;zl zpY25BRLF|Jd)z9Pw`VG9msdhXIvl6mfMXpD@B|;AZy=1Y{5tp!SHr=wUPIQfkS1e1 z^3+MMPAf!m_@$tQxhU0ZF-bmPZUKI+5eqJf86v|8b6-za<11_{Sr*i2D)zOk5&z0E zn(ITGlV8q$%=TRX|L7Rd7%xmYCKR^kt=5ZH`ORr>JS&}M4S9MCZ{i!)l zwa5^A7+l!CHA5aqT3I*L0N0Cu@(ut60B+(KHtTrX?BwBJKx_hc)-AG6n+tqhE{VbY z0{ai_*#xc#=&96G{dg}B-=N^XvU2~0X!uVg#Q*dT=reyR&%Nl{OQf=Yc}YU2KnJ8Z zX$H+j2#4y|dW>Jlk=Hk%Ws7l7;TQhD?g8>lBbJe=&dbXZtikT~GwL7DcXze&JnNDH zUB7tn3yU;N%eP{*(dDm^BWZ%LUNHbcs3FeE(uNK`W(DJ=2=QqqX5LX`=-VH%bS6N2 zhDSEA_udm60+GKi=ngld&j3Q4mE!eZxd7+QN5qP|>C5jr;I!cY?w3PSl^t{&6U(Up z6iWxEB?(YpflRY^B=1=S-Gp{?vy;G!ZE6{SAax-=y||PCC5|xUBla)u`LI5Yvk;*^ zG2VUojyelTh9ppydKBPuw-N;8*X>T-LoeyTO}jzm6C#ZL!`{Y8{Om+-Q?2gPr$^lk z%ODEZmiE0B7L3KfXd``cbxLnADDF&VGQP?U)t|0FG7R!^gm zr6HU(daGlBn&9Hir-qg(+^hUvv$pcfqeXWKQHC4+2ES)_{pp@KT~pd3@N?&G4r)J1sK!e0h`R3&g zAp0>fKC32hr|oI**58M}jLA^uw{@zi$9#+zuAGt2di?|MTGFIhqV|-zz;A|fUL`Lc z+9ReSG1l*qwOwT^B{HOQ2c{o5r)S-wCAu&9fU(FpivO>zUN-R>JN~z}Sq;koKFnqpPwCt|H4xq_`PIY`Zu}f(p@)ltUji(2E_wwi;#y@cqLNj{?4;d;x#HV{*}<+MF%|{ zb3kLEStj%yQAT|4+I)dlr!W)MQTwqZ$+{h=!L*fEe#b>O8w{pNEik)AqWpHQ3)Ar* zt~0VyxO{xvvVmlg^`6f{c&GE!ze@GbV@ebdx95ESY{pQ9|Jm=YPd{8{#9s&*3E-k* zo`izKviMA)))GP|j*Ln*DoNUrq@(@C%Ev)fOiJ&#yDRxzHgv#i){H46Fqv_&+qfSU zbhDnXKpttFyJbm7@tLi`GQdkrZpRwM7jt+Rfe9vaU8z-{K|uEGQ8|8O)9AsKt{(%F zt&IkLTgH&P)a%j}L(W2xAia3)Fj5#o;wwAxL3i4o??S}y7Nt7S*{3=H#B_o*Cdg+> zbWu2@LloTLYFUCiF7f#H2;RU2#f~y+k0&uz!{5Kdr6otJP(hTR)JV|9l9)^Pke2m4 zz`HkGgHUbxN7b9a1i)oCCe=C1eeQkk%>FL&<{bXO^SeI1a8J`eQKse=#z}b-drdkO z{iZ8`nxZ*jk0PmeMy~Gm2$o(FSkYQ))5>tlGWC#7$sEw1JMV1&x$Tb92MNN7=K+_Q zPKBn^MA(>Hk#A1zKetFMJL>R{{E_*$mi-@`?f>?tZ12jE*D3$O7H=tb(zEOb%+6h{ z^Hbe-s~jqD*6z(T8Zod^Jj>b2ZJ(7uPUxVEgu=+49&a4|0c$J$W7(44q4c{GJX{SI z?gL-#twg2C{MWcY|BNyUHIQF9BmqIisW|xSB0-lV_;H%s8hEl3hq?|I3p+pO)f9gW zi26Y+OG-86%wwdGQwK?^n1OLWVWgf*3=u4dY|{aiEdXt2I&Ln86SebrvQ`o8vLoU6m^OnYPZ(xM==(+?f|OzqjS_`Z?hQtI%i}Ko*um zu1OipM02}U!?zD+8+H~!3*ooFZ}Oc8k50)VNLR}QFY?yV{>Zp%PPY08ZV(N_VJhNB}<#ItApmSTlWn{g6xb6KgW#>5Ti4Tkk>* zeZVX96J&QfZv!<*HqapcL6uvw$iljTE;cD^Ur$R7eG$sk9FKkb;u~~bJTOx?tD7S4 zc6fqL>?cq{eb`c65_@E%C-WQkostU*}nL-tC2PJv`%I%JcC2Gy-SuMkMab z3esHMbh6$ixJZQaUdOf}r2^usC|;|2Wz$Da*xauKztZ)``~@6^t~9QCg7Z$Y!YohO zMvFfarTa6^IzY{O4+61~(|WMsV>Q>Oq+i)fnNpKf*7T846hY>}vrdAC#&X>y4fa+w zfA|*q0=gW@@$DUMF;_RX;gnVQ81Xk_7-H+HgyPja1`Bj` Date: Mon, 9 Jan 2023 13:09:23 +0800 Subject: [PATCH 06/25] [Doc] Add checklist of ISNet (#2460) ## Motivation As title. ## Modification - projects/isnet/README.md --- projects/isnet/README.md | 60 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/projects/isnet/README.md b/projects/isnet/README.md index b2623e39f8..3a3172a9d9 100644 --- a/projects/isnet/README.md +++ b/projects/isnet/README.md @@ -55,3 +55,63 @@ mim test mmsegmentation configs/isnet_r50-d8_8xb2-160k_cityscapes-512x1024.py -- pages={7169-7178} } ``` + +## Checklist + +The progress of ISNet. + + + +- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [x] Finish the code + + + + - [x] Basic docstrings & proper citation + + + + - [x] Test-time correctness + + + + - [x] A full README + + + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + + + - [ ] Unit tests + + + + - [ ] Code polishing + + + + - [ ] Metafile.yml + + + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + + + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. From 4e759bb1d2542960cf7956e4a5b6587ad978097d Mon Sep 17 00:00:00 2001 From: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Date: Tue, 10 Jan 2023 17:11:59 +0800 Subject: [PATCH 07/25] [CI] Remove test py3.6 (#2468) ## Motivation as title ## Modification 1. .circleci/test.yml 2. .github/workflows/merge_stage_test.yml --- .circleci/test.yml | 8 ++++---- .github/workflows/merge_stage_test.yml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.circleci/test.yml b/.circleci/test.yml index 414e3c4ced..20391768f9 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -149,13 +149,14 @@ workflows: name: minimum_version_cpu torch: 1.6.0 torchvision: 0.7.0 - python: 3.6.9 # The lowest python 3.6.x version available on CircleCI images + python: "3.7" requires: - lint - build_cpu: name: maximum_version_cpu - torch: 1.13.0 - torchvision: 0.14.0 + # TODO: Fix torch 1.13 forward crush + torch: 1.12.0 + torchvision: 0.13.0 python: 3.9.0 requires: - minimum_version_cpu @@ -187,4 +188,3 @@ workflows: only: - dev-1.x - 1.x - - master diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index 65af4d2bcc..c08b3d2a19 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -22,7 +22,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - python-version: [3.6, 3.8, 3.9] + python-version: [3.8, 3.9] torch: [1.8.1] include: - torch: 1.8.1 From f90f7a56d4a5e4e83689864f7d5031b3b15259c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B0=A2=E6=98=95=E8=BE=B0?= Date: Tue, 10 Jan 2023 18:03:43 +0800 Subject: [PATCH 08/25] [Fix] Fix incorrect `img_shape` value assignment in RandomCrop (#2469) ## Motivation Fix incorrect `img_shape` value assignment. ## Modification - mmseg/datasets/transforms/transforms.py --- mmseg/datasets/transforms/transforms.py | 4 ++-- tests/test_datasets/test_transform.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py index 21b8e34e33..36292cf87b 100644 --- a/mmseg/datasets/transforms/transforms.py +++ b/mmseg/datasets/transforms/transforms.py @@ -314,9 +314,9 @@ def transform(self, results: dict) -> dict: # crop semantic seg for key in results.get('seg_fields', []): results[key] = self.crop(results[key], crop_bbox) - img_shape = img.shape + results['img'] = img - results['img_shape'] = img_shape + results['img_shape'] = img.shape[:2] return results def __repr__(self): diff --git a/tests/test_datasets/test_transform.py b/tests/test_datasets/test_transform.py index f052e6e65e..a0c949af38 100644 --- a/tests/test_datasets/test_transform.py +++ b/tests/test_datasets/test_transform.py @@ -321,7 +321,7 @@ def test_random_crop(): results = pipeline(results) assert results['img'].shape[:2] == (h - 20, w - 20) - assert results['img_shape'][:2] == (h - 20, w - 20) + assert results['img_shape'] == (h - 20, w - 20) assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20) From 546f3b5b20f82c6eca11ae304e9533cd263558d5 Mon Sep 17 00:00:00 2001 From: MengzhangLI Date: Wed, 11 Jan 2023 11:50:47 +0800 Subject: [PATCH 09/25] [Doc] Update ZN dataset preparation of Synapse (#2465) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Motivation - Add Chinese version of Synapse dataset preparation. - Modify all `,` and `。` to `,` and `.` in `docs/zh_cn/user_guides/2_dataset_prepare.md`. --- docs/en/user_guides/2_dataset_prepare.md | 15 +- docs/zh_cn/user_guides/2_dataset_prepare.md | 183 +++++++++++++++----- 2 files changed, 147 insertions(+), 51 deletions(-) diff --git a/docs/en/user_guides/2_dataset_prepare.md b/docs/en/user_guides/2_dataset_prepare.md index 5de5de2282..e9c7683dc0 100644 --- a/docs/en/user_guides/2_dataset_prepare.md +++ b/docs/en/user_guides/2_dataset_prepare.md @@ -138,6 +138,13 @@ mmsegmentation │ │ ├── ann_dir │ │ │ ├── train │ │ │ ├── val +│ ├── synapse +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val ``` ### Cityscapes @@ -323,7 +330,7 @@ For Potsdam dataset, please run the following command to download and re-organiz python tools/dataset_converters/potsdam.py /path/to/potsdam ``` -In our default setting, it will generate 3456 images for training and 2016 images for validation. +In our default setting, it will generate 3,456 images for training and 2,016 images for validation. ### ISPRS Vaihingen @@ -376,7 +383,7 @@ You may need to follow the following structure for dataset preparation after dow python tools/dataset_converters/isaid.py /path/to/iSAID ``` -In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation. +In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33,978 images for training and 11,644 images for validation. ## LIP(Look Into Person) dataset @@ -429,7 +436,7 @@ cd ./RawData/Training Then create `train.txt` and `val.txt` to split dataset. -According to TransUnet, the following is the data set division. +According to TransUNet, the following is the data set division. train.txt @@ -493,5 +500,7 @@ Then, use this command to convert synapse dataset. python tools/dataset_converters/synapse.py --dataset-path /path/to/synapse ``` +In our default setting, it will generate 2,211 2D images for training and 1,568 2D images for validation. + Noted that MMSegmentation default evaluation metric (such as mean dice value) is calculated on 2D slice image, which is not comparable to results of 3D scan in some paper such as [TransUNet](https://arxiv.org/abs/2102.04306). diff --git a/docs/zh_cn/user_guides/2_dataset_prepare.md b/docs/zh_cn/user_guides/2_dataset_prepare.md index a546b1a3d0..a8dde9211a 100644 --- a/docs/zh_cn/user_guides/2_dataset_prepare.md +++ b/docs/zh_cn/user_guides/2_dataset_prepare.md @@ -1,6 +1,6 @@ ## 准备数据集(待更新) -推荐用软链接,将数据集根目录链接到 `$MMSEGMENTATION/data` 里。如果您的文件夹结构是不同的,您也许可以试着修改配置文件里对应的路径。 +推荐用软链接, 将数据集根目录链接到 `$MMSEGMENTATION/data` 里. 如果您的文件夹结构是不同的, 您也许可以试着修改配置文件里对应的路径. ```none mmsegmentation @@ -119,51 +119,58 @@ mmsegmentation │ │ ├── ann_dir │ │ │ ├── train │ │ │ ├── val +│ ├── synapse +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ ├── val ``` ### Cityscapes -注册成功后,数据集可以在 [这里](https://www.cityscapes-dataset.com/downloads/) 下载。 +注册成功后, 数据集可以在 [这里](https://www.cityscapes-dataset.com/downloads/) 下载. -通常情况下,`**labelTrainIds.png` 被用来训练 cityscapes。 +通常情况下, `**labelTrainIds.png` 被用来训练 cityscapes. 基于 [cityscapesscripts](https://github.com/mcordts/cityscapesScripts), 我们提供了一个 [脚本](https://github.com/open-mmlab/mmsegmentation/blob/master/tools/convert_datasets/cityscapes.py), -去生成 `**labelTrainIds.png`。 +去生成 `**labelTrainIds.png`. ```shell -# --nproc 8 意味着有 8 个进程用来转换,它也可以被忽略。 +# --nproc 8 意味着有 8 个进程用来转换,它也可以被忽略. python tools/convert_datasets/cityscapes.py data/cityscapes --nproc 8 ``` ### Pascal VOC -Pascal VOC 2012 可以在 [这里](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar) 下载。 -此外,许多最近在 Pascal VOC 数据集上的工作都会利用增广的数据,它们可以在 [这里](http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz) 找到。 +Pascal VOC 2012 可以在 [这里](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar) 下载. +此外, 许多最近在 Pascal VOC 数据集上的工作都会利用增广的数据, 它们可以在 [这里](http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz) 找到. -如果您想使用增广后的 VOC 数据集,请运行下面的命令来将数据增广的标注转成正确的格式。 +如果您想使用增广后的 VOC 数据集, 请运行下面的命令来将数据增广的标注转成正确的格式. ```shell -# --nproc 8 意味着有 8 个进程用来转换,它也可以被忽略。 +# --nproc 8 意味着有 8 个进程用来转换,它也可以被忽略. python tools/convert_datasets/voc_aug.py data/VOCdevkit data/VOCdevkit/VOCaug --nproc 8 ``` -关于如何拼接数据集 (concatenate) 并一起训练它们,更多细节请参考 [拼接连接数据集](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/tutorials/customize_datasets.md#%E6%8B%BC%E6%8E%A5%E6%95%B0%E6%8D%AE%E9%9B%86) 。 +关于如何拼接数据集 (concatenate) 并一起训练它们, 更多细节请参考 [拼接连接数据集](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/tutorials/customize_datasets.md#%E6%8B%BC%E6%8E%A5%E6%95%B0%E6%8D%AE%E9%9B%86) . ### ADE20K -ADE20K 的训练集和验证集可以在 [这里](http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip) 下载。 -您还可以在 [这里](http://data.csail.mit.edu/places/ADEchallenge/release_test.zip) 下载验证集。 +ADE20K 的训练集和验证集可以在 [这里](http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip) 下载. +您还可以在 [这里](http://data.csail.mit.edu/places/ADEchallenge/release_test.zip) 下载验证集. ### Pascal Context -Pascal Context 的训练集和验证集可以在 [这里](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar) 下载。 -注册成功后,您还可以在 [这里](http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2010test.tar) 下载验证集。 +Pascal Context 的训练集和验证集可以在 [这里](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar) 下载. +注册成功后, 您还可以在 [这里](http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2010test.tar) 下载验证集. -为了从原始数据集里切分训练集和验证集, 您可以在 [这里](https://codalabuser.blob.core.windows.net/public/trainval_merged.json) -下载 trainval_merged.json。 +为了从原始数据集里切分训练集和验证集, 您可以在 [这里](https://codalabuser.blob.core.windows.net/public/trainval_merged.json) +下载 trainval_merged.json. -如果您想使用 Pascal Context 数据集, -请安装 [细节](https://github.com/zhanghang1989/detail-api) 然后再运行如下命令来把标注转换成正确的格式。 +如果您想使用 Pascal Context 数据集, +请安装 [细节](https://github.com/zhanghang1989/detail-api) 然后再运行如下命令来把标注转换成正确的格式. ```shell python tools/convert_datasets/pascal_context.py data/VOCdevkit data/VOCdevkit/VOC2010/trainval_merged.json @@ -171,64 +178,64 @@ python tools/convert_datasets/pascal_context.py data/VOCdevkit data/VOCdevkit/VO ### CHASE DB1 -CHASE DB1 的训练集和验证集可以在 [这里](https://staffnet.kingston.ac.uk/~ku15565/CHASE_DB1/assets/CHASEDB1.zip) 下载。 +CHASE DB1 的训练集和验证集可以在 [这里](https://staffnet.kingston.ac.uk/~ku15565/CHASE_DB1/assets/CHASEDB1.zip) 下载. -为了将 CHASE DB1 数据集转换成 MMSegmentation 的格式,您需要运行如下命令: +为了将 CHASE DB1 数据集转换成 MMSegmentation 的格式,您需要运行如下命令: ```shell python tools/convert_datasets/chase_db1.py /path/to/CHASEDB1.zip ``` -这个脚本将自动生成正确的文件夹结构。 +这个脚本将自动生成正确的文件夹结构. ### DRIVE -DRIVE 的训练集和验证集可以在 [这里](https://drive.grand-challenge.org/) 下载。 -在此之前,您需要注册一个账号,当前 '1st_manual' 并未被官方提供,因此需要您从其他地方获取。 +DRIVE 的训练集和验证集可以在 [这里](https://drive.grand-challenge.org/) 下载. +在此之前, 您需要注册一个账号, 当前 '1st_manual' 并未被官方提供, 因此需要您从其他地方获取. -为了将 DRIVE 数据集转换成 MMSegmentation 格式,您需要运行如下命令: +为了将 DRIVE 数据集转换成 MMSegmentation 格式, 您需要运行如下命令: ```shell python tools/convert_datasets/drive.py /path/to/training.zip /path/to/test.zip ``` -这个脚本将自动生成正确的文件夹结构。 +这个脚本将自动生成正确的文件夹结构. ### HRF -首先,下载 [healthy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy.zip) [glaucoma.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma.zip), [diabetic_retinopathy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy.zip), [healthy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy_manualsegm.zip), [glaucoma_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma_manualsegm.zip) 以及 [diabetic_retinopathy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy_manualsegm.zip) 。 +首先, 下载 [healthy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy.zip) [glaucoma.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma.zip), [diabetic_retinopathy.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy.zip), [healthy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/healthy_manualsegm.zip), [glaucoma_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/glaucoma_manualsegm.zip) 以及 [diabetic_retinopathy_manualsegm.zip](https://www5.cs.fau.de/fileadmin/research/datasets/fundus-images/diabetic_retinopathy_manualsegm.zip). -为了将 HRF 数据集转换成 MMSegmentation 格式,您需要运行如下命令: +为了将 HRF 数据集转换成 MMSegmentation 格式, 您需要运行如下命令: ```shell python tools/convert_datasets/hrf.py /path/to/healthy.zip /path/to/healthy_manualsegm.zip /path/to/glaucoma.zip /path/to/glaucoma_manualsegm.zip /path/to/diabetic_retinopathy.zip /path/to/diabetic_retinopathy_manualsegm.zip ``` -这个脚本将自动生成正确的文件夹结构。 +这个脚本将自动生成正确的文件夹结构. ### STARE -首先,下载 [stare-images.tar](http://cecas.clemson.edu/~ahoover/stare/probing/stare-images.tar), [labels-ah.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-ah.tar) 和 [labels-vk.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-vk.tar) 。 +首先, 下载 [stare-images.tar](http://cecas.clemson.edu/~ahoover/stare/probing/stare-images.tar), [labels-ah.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-ah.tar) 和 [labels-vk.tar](http://cecas.clemson.edu/~ahoover/stare/probing/labels-vk.tar). -为了将 STARE 数据集转换成 MMSegmentation 格式,您需要运行如下命令: +为了将 STARE 数据集转换成 MMSegmentation 格式, 您需要运行如下命令: ```shell python tools/convert_datasets/stare.py /path/to/stare-images.tar /path/to/labels-ah.tar /path/to/labels-vk.tar ``` -这个脚本将自动生成正确的文件夹结构。 +这个脚本将自动生成正确的文件夹结构. ### Dark Zurich -因为我们只支持在此数据集上测试模型,所以您只需下载[验证集](https://data.vision.ee.ethz.ch/csakarid/shared/GCMA_UIoU/Dark_Zurich_val_anon.zip) 。 +因为我们只支持在此数据集上测试模型, 所以您只需下载[验证集](https://data.vision.ee.ethz.ch/csakarid/shared/GCMA_UIoU/Dark_Zurich_val_anon.zip). ### Nighttime Driving -因为我们只支持在此数据集上测试模型,所以您只需下载[测试集](http://data.vision.ee.ethz.ch/daid/NighttimeDriving/NighttimeDrivingTest.zip) 。 +因为我们只支持在此数据集上测试模型,所以您只需下载[测试集](http://data.vision.ee.ethz.ch/daid/NighttimeDriving/NighttimeDrivingTest.zip). ### LoveDA -可以从 Google Drive 里下载 [LoveDA数据集](https://drive.google.com/drive/folders/1ibYV0qwn4yuuh068Rnc-w4tPi0U0c-ti?usp=sharing) 。 +可以从 Google Drive 里下载 [LoveDA数据集](https://drive.google.com/drive/folders/1ibYV0qwn4yuuh068Rnc-w4tPi0U0c-ti?usp=sharing). 或者它还可以从 [zenodo](https://zenodo.org/record/5706578#.YZvN7SYRXdF) 下载, 您需要运行如下命令: @@ -241,46 +248,46 @@ wget https://zenodo.org/record/5706578/files/Val.zip wget https://zenodo.org/record/5706578/files/Test.zip ``` -对于 LoveDA 数据集,请运行以下命令下载并重新组织数据集 +对于 LoveDA 数据集,请运行以下命令下载并重新组织数据集: ```shell python tools/convert_datasets/loveda.py /path/to/loveDA ``` -请参照 [这里](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/inference.md) 来使用训练好的模型去预测 LoveDA 测试集并且提交到官网。 +请参照 [这里](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/inference.md) 来使用训练好的模型去预测 LoveDA 测试集并且提交到官网. -关于 LoveDA 的更多细节可以在[这里](https://github.com/Junjue-Wang/LoveDA) 找到。 +关于 LoveDA 的更多细节可以在[这里](https://github.com/Junjue-Wang/LoveDA) 找到. ### ISPRS Potsdam [Potsdam](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-potsdam/) -数据集是一个有着2D 语义分割内容标注的城市遥感数据集。 -数据集可以从挑战[主页](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/) 获得。 -需要其中的 '2_Ortho_RGB.zip' 和 '5_Labels_all_noBoundary.zip'。 +数据集是一个有着2D 语义分割内容标注的城市遥感数据集. +数据集可以从挑战[主页](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/) 获得. +需要其中的 `2_Ortho_RGB.zip` 和 `5_Labels_all_noBoundary.zip`. -对于 Potsdam 数据集,请运行以下命令下载并重新组织数据集 +对于 Potsdam 数据集,请运行以下命令下载并重新组织数据集 ```shell python tools/convert_datasets/potsdam.py /path/to/potsdam ``` -使用我们默认的配置, 将生成 3456 张图片的训练集和 2016 张图片的验证集。 +使用我们默认的配置, 将生成 3,456 张图片的训练集和 2,016 张图片的验证集. ### ISPRS Vaihingen [Vaihingen](https://www2.isprs.org/commissions/comm2/wg4/benchmark/2d-sem-label-vaihingen/) -数据集是一个有着2D 语义分割内容标注的城市遥感数据集。 +数据集是一个有着2D 语义分割内容标注的城市遥感数据集. 数据集可以从挑战 [主页](https://www2.isprs.org/commissions/comm2/wg4/benchmark/data-request-form/). -需要其中的 'ISPRS_semantic_labeling_Vaihingen.zip' 和 'ISPRS_semantic_labeling_Vaihingen_ground_truth_eroded_COMPLETE.zip'。 +需要其中的 'ISPRS_semantic_labeling_Vaihingen.zip' 和 'ISPRS_semantic_labeling_Vaihingen_ground_truth_eroded_COMPLETE.zip'. -对于 Vaihingen 数据集,请运行以下命令下载并重新组织数据集 +对于 Vaihingen 数据集, 请运行以下命令下载并重新组织数据集 ```shell python tools/convert_datasets/vaihingen.py /path/to/vaihingen ``` -使用我们默认的配置 (`clip_size`=512, `stride_size`=256), 将生成 344 张图片的训练集和 398 张图片的验证集。 +使用我们默认的配置 (`clip_size`=512, `stride_size`=256), 将生成 344 张图片的训练集和 398 张图片的验证集. ### iSAID @@ -290,7 +297,7 @@ iSAID 数据集(训练集/验证集)的注释可以从 [iSAID](https://captain-w 该数据集是一个大规模的实例分割(也可以用于语义分割)的遥感数据集. -下载后,在数据集转换前,您需要将数据集文件夹调整成如下格式. +下载后, 在数据集转换前, 您需要将数据集文件夹调整成如下格式. ``` │ ├── iSAID @@ -316,4 +323,84 @@ iSAID 数据集(训练集/验证集)的注释可以从 [iSAID](https://captain-w python tools/convert_datasets/isaid.py /path/to/iSAID ``` -使用我们默认的配置 (`patch_width`=896, `patch_height`=896, `overlap_area`=384), 将生成 33978 张图片的训练集和 11644 张图片的验证集。 +使用我们默认的配置 (`patch_width`=896, `patch_height`=896, `overlap_area`=384), 将生成 33,978 张图片的训练集和 11,644 张图片的验证集. + +## Synapse dataset + +这个数据集可以在这个[网页](https://www.synapse.org/#!Synapse:syn3193805/wiki/) 里被下载. +我们参考了 [TransUNet](https://arxiv.org/abs/2102.04306) 里面的数据集预处理的设置, 它将原始数据集 (30 套 3D 样例) 切分出 18 套用于训练, 12 套用于验证. 请参考以下步骤来准备该数据集: + +```shell +unzip RawData.zip +cd ./RawData/Training +``` + +随后新建 `train.txt` 和 `val.txt`. + +根据 TransUNet 来将训练集和验证集如下划分: + +train.txt + +```none +img0005.nii.gz +img0006.nii.gz +img0007.nii.gz +img0009.nii.gz +img0010.nii.gz +img0021.nii.gz +img0023.nii.gz +img0024.nii.gz +img0026.nii.gz +img0027.nii.gz +img0028.nii.gz +img0030.nii.gz +img0031.nii.gz +img0033.nii.gz +img0034.nii.gz +img0037.nii.gz +img0039.nii.gz +img0040.nii.gz +``` + +val.txt + +```none +img0008.nii.gz +img0022.nii.gz +img0038.nii.gz +img0036.nii.gz +img0032.nii.gz +img0002.nii.gz +img0029.nii.gz +img0003.nii.gz +img0001.nii.gz +img0004.nii.gz +img0025.nii.gz +img0035.nii.gz +``` + +此时, synapse 数据集包括了以下内容: + +```none +├── Training +│ ├── img +│ │ ├── img0001.nii.gz +│ │ ├── img0002.nii.gz +│ │ ├── ... +│ ├── label +│ │ ├── label0001.nii.gz +│ │ ├── label0002.nii.gz +│ │ ├── ... +│ ├── train.txt +│ ├── val.txt +``` + +随后, 运行下面的数据集转换脚本来处理 synapse 数据集: + +```shell +python tools/dataset_converters/synapse.py --dataset-path /path/to/synapse +``` + +使用我们默认的配置, 将生成 2,211 张 2D 图片的训练集和 1,568 张图片的验证集. + +需要注意的是 MMSegmentation 默认的评价指标 (例如平均 Dice 值) 都是基于每帧 2D 图片计算的, 这与基于每套 3D 图片计算评价指标的 [TransUNet](https://arxiv.org/abs/2102.04306) 是不同的. From 8dae9465a6edc7958fc40e9c8870c3afd1227f66 Mon Sep 17 00:00:00 2001 From: Wencheng Wu <41542251+274869388@users.noreply.github.com> Date: Wed, 11 Jan 2023 11:51:29 +0800 Subject: [PATCH 10/25] [Feature] Add `gt_edge_map` field. (#2466) ## Motivation The motivation of this PR is to add `gt_edge_map` field to support boundary loss. ## Modification - GenerateEdge Modify `gt_edge` field to `gt_edge_map`. - PackSegInputs Add `gt_edge_map` to data_sample. - stack_batch Pad `gt_edge_map` to max_shape. ## BC-breaking (Optional) No ## Use cases (Optional) Reference `GenerateEdge`. --- mmseg/datasets/transforms/formatting.py | 6 ++++++ mmseg/datasets/transforms/transforms.py | 4 ++-- mmseg/utils/misc.py | 5 +++++ tests/test_datasets/test_transform.py | 2 +- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/mmseg/datasets/transforms/formatting.py b/mmseg/datasets/transforms/formatting.py index bb4db4484e..f4018f788f 100644 --- a/mmseg/datasets/transforms/formatting.py +++ b/mmseg/datasets/transforms/formatting.py @@ -73,6 +73,12 @@ def transform(self, results: dict) -> dict: ...].astype(np.int64))) data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) + if 'gt_edge_map' in results: + gt_edge_data = dict( + data=to_tensor(results['gt_edge_map'][None, + ...].astype(np.int64))) + data_sample.set_data(dict(gt_edge_map=PixelData(**gt_edge_data))) + img_meta = {} for key in self.meta_keys: if key in results: diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py index 36292cf87b..fbb890b559 100644 --- a/mmseg/datasets/transforms/transforms.py +++ b/mmseg/datasets/transforms/transforms.py @@ -1245,7 +1245,7 @@ class GenerateEdge(BaseTransform): - gt_seg_map Added Keys: - - gt_edge (np.ndarray, uint8): The edge annotation generated from the + - gt_edge_map (np.ndarray, uint8): The edge annotation generated from the seg map by extracting border between different semantics. Args: @@ -1296,7 +1296,7 @@ def transform(self, results: Dict) -> Dict: (self.edge_width, self.edge_width)) edge = cv2.dilate(edge, kernel) - results['gt_edge'] = edge + results['gt_edge_map'] = edge results['edge_width'] = self.edge_width return results diff --git a/mmseg/utils/misc.py b/mmseg/utils/misc.py index aa30893609..09d2349c15 100644 --- a/mmseg/utils/misc.py +++ b/mmseg/utils/misc.py @@ -98,6 +98,11 @@ def stack_batch(inputs: List[torch.Tensor], del data_sample.gt_sem_seg.data data_sample.gt_sem_seg.data = F.pad( gt_sem_seg, padding_size, value=seg_pad_val) + if 'gt_edge_map' in data_sample: + gt_edge_map = data_sample.gt_edge_map.data + del data_sample.gt_edge_map.data + data_sample.gt_edge_map.data = F.pad( + gt_edge_map, padding_size, value=seg_pad_val) data_sample.set_metainfo({ 'img_shape': tensor.shape[-2:], 'pad_shape': data_sample.gt_sem_seg.shape, diff --git a/tests/test_datasets/test_transform.py b/tests/test_datasets/test_transform.py index a0c949af38..146e639d77 100644 --- a/tests/test_datasets/test_transform.py +++ b/tests/test_datasets/test_transform.py @@ -792,7 +792,7 @@ def test_generate_edge(): results['img_shape'] = seg_map.shape results = transform(results) - assert np.all(results['gt_edge'] == np.array([ + assert np.all(results['gt_edge_map'] == np.array([ [0, 0, 0, 1, 0], [0, 0, 1, 1, 1], [0, 1, 1, 1, 0], From ce09639a21f0d5def4ad017a2920107774cbb860 Mon Sep 17 00:00:00 2001 From: Tianlong Ai <50650583+AI-Tianlong@users.noreply.github.com> Date: Thu, 12 Jan 2023 17:12:40 +0800 Subject: [PATCH 11/25] CodeCamp #151[Feature] Support HieraSeg on cityscapes (#2444) ## Support `HieraSeg` interface on `cityscapes` ## Motivation Support `HieraSeg` interface on cityscapes dataset Paper link : https://ieeexplore.ieee.org/document/9878466/ ``` @article{li2022deep, title={Deep Hierarchical Semantic Segmentation}, author={Li, Liulei and Zhou, Tianfei and Wang, Wenguan and Li, Jianwu and Yang, Yi}, journal={CVPR}, year={2022} } ``` ## Modification Add `HieraSeg_Projects` on `projects/` Add `sep_aspp_contrast_head` decoder head. Add `HieraSeg` config. Add `hiera_loss`, `hiera_triplet_loss_cityscape`, `tree_triplet_loss` --- projects/HieraSeg/README.md | 93 ++++++++ .../configs/_base_/datasets/cityscapes.py | 67 ++++++ .../configs/_base_/default_runtime.py | 15 ++ .../deeplabv3plus_r50-d8_vd_contrast.py | 55 +++++ .../configs/_base_/schedules/schedule_80k.py | 24 ++ ...us_r101-d8_4xb2-80l_cityscapes-512x1024.py | 21 ++ projects/HieraSeg/decode_head/__init__.py | 4 + .../decode_head/sep_aspp_contrast_head.py | 178 ++++++++++++++ projects/HieraSeg/losses/__init__.py | 4 + .../losses/hiera_triplet_loss_cityscape.py | 218 ++++++++++++++++++ projects/HieraSeg/losses/tree_triplet_loss.py | 86 +++++++ 11 files changed, 765 insertions(+) create mode 100644 projects/HieraSeg/README.md create mode 100644 projects/HieraSeg/configs/_base_/datasets/cityscapes.py create mode 100644 projects/HieraSeg/configs/_base_/default_runtime.py create mode 100644 projects/HieraSeg/configs/_base_/models/deeplabv3plus_r50-d8_vd_contrast.py create mode 100644 projects/HieraSeg/configs/_base_/schedules/schedule_80k.py create mode 100644 projects/HieraSeg/configs/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80l_cityscapes-512x1024.py create mode 100644 projects/HieraSeg/decode_head/__init__.py create mode 100644 projects/HieraSeg/decode_head/sep_aspp_contrast_head.py create mode 100644 projects/HieraSeg/losses/__init__.py create mode 100644 projects/HieraSeg/losses/hiera_triplet_loss_cityscape.py create mode 100644 projects/HieraSeg/losses/tree_triplet_loss.py diff --git a/projects/HieraSeg/README.md b/projects/HieraSeg/README.md new file mode 100644 index 0000000000..5519ec6916 --- /dev/null +++ b/projects/HieraSeg/README.md @@ -0,0 +1,93 @@ +# HieraSeg + +Support `Deep Hierarchical Semantic Segmentation` interface on `cityscapes` + +## Description + +Author: AI-Tianlong + +This project implements `HieraSeg` inference in the `cityscapes` dataset + +## Usage + +### Prerequisites + +- Python 3.8 +- PyTorch 1.6 or higher +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) v1.0.0rc3 +- mmcv v2.0.0rc3 +- mmengine + +### Dataset preparing + +preparing `cityscapes` dataset like this [structure](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#prepare-datasets) + +### Testing commands + +please put [`hieraseg_deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024_20230112_125023-bc59a3d1.pth`](https://download.openmmlab.com/mmsegmentation/v0.5/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024_20230112_125023-bc59a3d1.pth) to `mmsegmentation/checkpoints` + +#### Multi-GPUs Test + +```bash +# --tta optional, multi-scale test, need mmengine >=0.4.0 +bash tools/dist_test.sh [configs] [model weights] [number of gpu] --tta +``` + +#### Example + +```shell +bash tools/dist_test.sh projects/HieraSeg_project/configs/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80l_cityscapes-512x1024.py checkpoints/hieraseg_deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024_20230112_125023-bc59a3d1.pth 2 --tta +``` + +## Results + +### Cityscapes + +| Method | Backbone | Crop Size | mIoU | mIoU (ms+flip) | config | model | +| :--------: | :------: | :-------: | :---: | :------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DeeplabV3+ | R-101-D8 | 512x1024 | 81.61 | 82.71 | [config](https://github.com/open-mmlab/mmsegmentation/tree/dev-1.x/projects/HieraSeg/configs/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80l_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80k_cityscapes-512x1024_20230112_125023-bc59a3d1.pth) | + + + +## Citation + +This project is modified from [qhanghu/HSSN_pytorch](https://github.com/qhanghu/HSSN_pytorch) + +```bibtex +@article{li2022deep, + title={Deep Hierarchical Semantic Segmentation}, + author={Li, Liulei and Zhou, Tianfei and Wang, Wenguan and Li, Jianwu and Yang, Yi}, + journal={CVPR}, + year={2022} +} +``` + +## Checklist + +- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [x] Finish the code + + - [x] Basic docstrings & proper citation + + - [x] Test-time correctness + + - [x] A full README + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + - [ ] Unit tests + + - [ ] Code polishing + + - [ ] Metafile.yml + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/HieraSeg/configs/_base_/datasets/cityscapes.py b/projects/HieraSeg/configs/_base_/datasets/cityscapes.py new file mode 100644 index 0000000000..1698e04721 --- /dev/null +++ b/projects/HieraSeg/configs/_base_/datasets/cityscapes.py @@ -0,0 +1,67 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] +tta_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='Resize', scale_factor=r, keep_ratio=True) + for r in img_ratios + ], + [ + dict(type='RandomFlip', prob=0., direction='horizontal'), + dict(type='RandomFlip', prob=1., direction='horizontal') + ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')] + ]) +] +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='leftImg8bit/train', seg_map_path='gtFine/train'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='leftImg8bit/val', seg_map_path='gtFine/val'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/projects/HieraSeg/configs/_base_/default_runtime.py b/projects/HieraSeg/configs/_base_/default_runtime.py new file mode 100644 index 0000000000..272b4d2467 --- /dev/null +++ b/projects/HieraSeg/configs/_base_/default_runtime.py @@ -0,0 +1,15 @@ +default_scope = 'mmseg' +env_cfg = dict( + cudnn_benchmark=True, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer') +log_processor = dict(by_epoch=False) +log_level = 'INFO' +load_from = None +resume = False + +tta_model = dict(type='SegTTAModel') diff --git a/projects/HieraSeg/configs/_base_/models/deeplabv3plus_r50-d8_vd_contrast.py b/projects/HieraSeg/configs/_base_/models/deeplabv3plus_r50-d8_vd_contrast.py new file mode 100644 index 0000000000..a6af45ce84 --- /dev/null +++ b/projects/HieraSeg/configs/_base_/models/deeplabv3plus_r50-d8_vd_contrast.py @@ -0,0 +1,55 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255) +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='ResNetV1d', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DepthwiseSeparableASPPContrastHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + c1_in_channels=256, + c1_channels=48, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + proj='convmlp', + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/projects/HieraSeg/configs/_base_/schedules/schedule_80k.py b/projects/HieraSeg/configs/_base_/schedules/schedule_80k.py new file mode 100644 index 0000000000..0dcd6c4d1b --- /dev/null +++ b/projects/HieraSeg/configs/_base_/schedules/schedule_80k.py @@ -0,0 +1,24 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) +# learning policy +param_scheduler = [ + dict( + type='PolyLR', + eta_min=1e-4, + power=0.9, + begin=0, + end=80000, + by_epoch=False) +] +# training schedule for 80k +train_cfg = dict(type='IterBasedTrainLoop', max_iters=80000, val_interval=8000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=8000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) diff --git a/projects/HieraSeg/configs/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80l_cityscapes-512x1024.py b/projects/HieraSeg/configs/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80l_cityscapes-512x1024.py new file mode 100644 index 0000000000..0d02bef5dc --- /dev/null +++ b/projects/HieraSeg/configs/hieraseg/hieraseg_deeplabv3plus_r101-d8_4xb2-80l_cityscapes-512x1024.py @@ -0,0 +1,21 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8_vd_contrast.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] + +custom_imports = dict(imports=[ + 'projects.HieraSeg.decode_head.sep_aspp_contrast_head', + 'projects.HieraSeg.losses.hiera_triplet_loss_cityscape' +]) + +model = dict( + pretrained=None, + backbone=dict(depth=101), + decode_head=dict( + num_classes=26, + loss_decode=dict( + type='HieraTripletLossCityscape', num_classes=19, + loss_weight=1.0)), + auxiliary_head=dict(num_classes=19), + test_cfg=dict(mode='whole', is_hiera=True, hiera_num_classes=7)) diff --git a/projects/HieraSeg/decode_head/__init__.py b/projects/HieraSeg/decode_head/__init__.py new file mode 100644 index 0000000000..da454ea339 --- /dev/null +++ b/projects/HieraSeg/decode_head/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .sep_aspp_contrast_head import DepthwiseSeparableASPPContrastHead + +__all__ = ['DepthwiseSeparableASPPContrastHead'] diff --git a/projects/HieraSeg/decode_head/sep_aspp_contrast_head.py b/projects/HieraSeg/decode_head/sep_aspp_contrast_head.py new file mode 100644 index 0000000000..75f67e7457 --- /dev/null +++ b/projects/HieraSeg/decode_head/sep_aspp_contrast_head.py @@ -0,0 +1,178 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from torch import Tensor + +from mmseg.models.decode_heads.sep_aspp_head import DepthwiseSeparableASPPHead +from mmseg.models.losses import accuracy +from mmseg.models.utils import resize +from mmseg.registry import MODELS + + +class ProjectionHead(nn.Module): + """ProjectionHead, project feature map to specific channels. + + Args: + dim_in (int): Input channels. + norm_cfg (dict): config of norm layer. + proj_dim (int): Output channels. Default: 256. + proj (str): Projection type, 'linear' or 'convmlp'. Default: 'convmlp' + """ + + def __init__(self, + dim_in: int, + norm_cfg: dict, + proj_dim: int = 256, + proj: str = 'convmlp'): + super().__init__() + assert proj in ['convmlp', 'linear'] + if proj == 'linear': + self.proj = nn.Conv2d(dim_in, proj_dim, kernel_size=1) + elif proj == 'convmlp': + self.proj = nn.Sequential( + nn.Conv2d(dim_in, dim_in, kernel_size=1), + build_norm_layer(norm_cfg, dim_in)[1], nn.ReLU(inplace=True), + nn.Conv2d(dim_in, proj_dim, kernel_size=1)) + + def forward(self, x): + return torch.nn.functional.normalize(self.proj(x), p=2, dim=1) + + +@MODELS.register_module() +class DepthwiseSeparableASPPContrastHead(DepthwiseSeparableASPPHead): + """Deep Hierarchical Semantic Segmentation. This head is the implementation + of ``_. + + Based on Encoder-Decoder with Atrous Separable Convolution for + Semantic Image Segmentation. + `DeepLabV3+ `_. + + Args: + proj (str): The type of ProjectionHead, 'linear' or 'convmlp', + default 'convmlp' + """ + + def __init__(self, proj: str = 'convmlp', **kwargs): + super().__init__(**kwargs) + self.proj_head = ProjectionHead( + dim_in=2048, norm_cfg=self.norm_cfg, proj=proj) + self.register_buffer('step', torch.zeros(1)) + + def forward(self, inputs): + """Forward function.""" + self.step += 1 + embedding = self.proj_head(inputs[-1]) + x = self._transform_inputs(inputs) + aspp_outs = [ + resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + ] + aspp_outs.extend(self.aspp_modules(x)) + aspp_outs = torch.cat(aspp_outs, dim=1) + output = self.bottleneck(aspp_outs) + if self.c1_bottleneck is not None: + c1_output = self.c1_bottleneck(inputs[0]) + output = resize( + input=output, + size=c1_output.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + output = torch.cat([output, c1_output], dim=1) + output = self.sep_bottleneck(output) + output = self.cls_seg(output) + return output, embedding + + def predict_by_feat(self, seg_logits: Tensor, + batch_img_metas: List[dict]) -> Tensor: + """Transform a batch of output seg_logits to the input shape. + + Args: + seg_logits (Tensor): The output from decode head forward function. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + Tensor: Outputs segmentation logits map. + """ + # HieraSeg decode_head output is: (out, embedding) :tuple, + # only need 'out' here. + if isinstance(seg_logits, tuple): + seg_logit = seg_logits[0] + + if seg_logit.size(1) == 26: + seg_logit[:, 0:2] += seg_logit[:, -7] + seg_logit[:, 2:5] += seg_logit[:, -6] + seg_logit[:, 5:8] += seg_logit[:, -5] + seg_logit[:, 8:10] += seg_logit[:, -4] + seg_logit[:, 10:11] += seg_logit[:, -3] + seg_logit[:, 11:13] += seg_logit[:, -2] + seg_logit[:, 13:19] += seg_logit[:, -1] + elif seg_logit.size(1) == 12: + seg_logit[:, 0:1] = seg_logit[:, 0:1] + \ + seg_logit[:, 7] + seg_logit[:, 10] + seg_logit[:, 1:5] = seg_logit[:, 1:5] + \ + seg_logit[:, 8] + seg_logit[:, 11] + seg_logit[:, 5:7] = seg_logit[:, 5:7] + \ + seg_logit[:, 9] + seg_logit[:, 11] + elif seg_logit.size(1) == 25: + seg_logit[:, 0:1] = seg_logit[:, 0:1] + \ + seg_logit[:, 20] + seg_logit[:, 23] + seg_logit[:, 1:8] = seg_logit[:, 1:8] + \ + seg_logit[:, 21] + seg_logit[:, 24] + seg_logit[:, 10:12] = seg_logit[:, 10:12] + \ + seg_logit[:, 21] + seg_logit[:, 24] + seg_logit[:, 13:16] = seg_logit[:, 13:16] + \ + seg_logit[:, 21] + seg_logit[:, 24] + seg_logit[:, 8:10] = seg_logit[:, 8:10] + \ + seg_logit[:, 22] + seg_logit[:, 24] + seg_logit[:, 12:13] = seg_logit[:, 12:13] + \ + seg_logit[:, 22] + seg_logit[:, 24] + seg_logit[:, 16:20] = seg_logit[:, 16:20] + \ + seg_logit[:, 22] + seg_logit[:, 24] + + # seg_logit = seg_logit[:,:-self.test_cfg['hiera_num_classes']] + seg_logit = seg_logit[:, :-7] + seg_logit = resize( + input=seg_logit, + size=batch_img_metas[0]['img_shape'], + mode='bilinear', + align_corners=self.align_corners) + + return seg_logit + + def losses(self, results, seg_label): + """Compute segmentation loss.""" + seg_logit_before = results[0] + embedding = results[1] + loss = dict() + seg_logit = resize( + input=seg_logit_before, + size=seg_label.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + if self.sampler is not None: + seg_weight = self.sampler.sample(seg_logit, seg_label) + else: + seg_weight = None + seg_label = seg_label.squeeze(1) + seg_logit_before = resize( + input=seg_logit_before, + scale_factor=0.5, + mode='bilinear', + align_corners=self.align_corners) + loss['loss_seg'] = self.loss_decode( + self.step, + embedding, + seg_logit_before, + seg_logit, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + loss['acc_seg'] = accuracy(seg_logit, seg_label) + return loss diff --git a/projects/HieraSeg/losses/__init__.py b/projects/HieraSeg/losses/__init__.py new file mode 100644 index 0000000000..47d2686482 --- /dev/null +++ b/projects/HieraSeg/losses/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hiera_triplet_loss_cityscape import HieraTripletLossCityscape + +__all__ = ['HieraTripletLossCityscape'] diff --git a/projects/HieraSeg/losses/hiera_triplet_loss_cityscape.py b/projects/HieraSeg/losses/hiera_triplet_loss_cityscape.py new file mode 100644 index 0000000000..a784f13e62 --- /dev/null +++ b/projects/HieraSeg/losses/hiera_triplet_loss_cityscape.py @@ -0,0 +1,218 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmseg.models.builder import LOSSES +from mmseg.models.losses.cross_entropy_loss import CrossEntropyLoss +from .tree_triplet_loss import TreeTripletLoss + +hiera_map = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6] +hiera_index = [[0, 2], [2, 5], [5, 8], [8, 10], [10, 11], [11, 13], [13, 19]] + +hiera = { + 'hiera_high': { + 'flat': [0, 2], + 'construction': [2, 5], + 'object': [5, 8], + 'nature': [8, 10], + 'sky': [10, 11], + 'human': [11, 13], + 'vehicle': [13, 19] + } +} + + +def prepare_targets(targets): + b, h, w = targets.shape + targets_high = torch.ones( + (b, h, w), dtype=targets.dtype, device=targets.device) * 255 + indices_high = [] + for index, high in enumerate(hiera['hiera_high'].keys()): + indices = hiera['hiera_high'][high] + for ii in range(indices[0], indices[1]): + targets_high[targets == ii] = index + indices_high.append(indices) + + return targets, targets_high, indices_high + + +def losses_hiera(predictions, + targets, + targets_top, + num_classes, + indices_high, + eps=1e-8): + """Implementation of hiera loss. + + Args: + predictions (torch.Tensor): seg logits produced by decode head. + targets (torch.Tensor): The learning label of the prediction. + targets_top (torch.Tensor): The hierarchy ground truth of the learning + label. + num_classes (int): Number of categories. + indices_high (List[List[int]]): Hierarchy indices of each hierarchy. + eps (float):Term added to the Logarithm to improve numerical stability. + """ + b, _, h, w = predictions.shape + predictions = torch.sigmoid(predictions.float()) + void_indices = (targets == 255) + targets[void_indices] = 0 + targets = F.one_hot(targets, num_classes=num_classes).permute(0, 3, 1, 2) + void_indices2 = (targets_top == 255) + targets_top[void_indices2] = 0 + targets_top = F.one_hot(targets_top, num_classes=7).permute(0, 3, 1, 2) + + MCMA = predictions[:, :num_classes, :, :] + MCMB = torch.zeros((b, 7, h, w)).to(predictions) + for ii in range(7): + MCMB[:, ii:ii + 1, :, :] = torch.max( + torch.cat([ + predictions[:, indices_high[ii][0]:indices_high[ii][1], :, :], + predictions[:, num_classes + ii:num_classes + ii + 1, :, :] + ], + dim=1), 1, True)[0] + + MCLB = predictions[:, num_classes:num_classes + 7, :, :] + MCLA = predictions[:, :num_classes, :, :].clone() + for ii in range(7): + for jj in range(indices_high[ii][0], indices_high[ii][1]): + MCLA[:, jj:jj + 1, :, :] = torch.min( + torch.cat([ + predictions[:, jj:jj + 1, :, :], MCLB[:, ii:ii + 1, :, :] + ], + dim=1), 1, True)[0] + + valid_indices = (~void_indices).unsqueeze(1) + num_valid = valid_indices.sum() + valid_indices2 = (~void_indices2).unsqueeze(1) + num_valid2 = valid_indices2.sum() + # channel_num*sum()/one_channel_valid already has a weight + loss = ( + (-targets[:, :num_classes, :, :] * torch.log(MCLA + eps) - + (1.0 - targets[:, :num_classes, :, :]) * torch.log(1.0 - MCMA + eps)) + * valid_indices).sum() / num_valid / num_classes + loss += ((-targets_top[:, :, :, :] * torch.log(MCLB + eps) - + (1.0 - targets_top[:, :, :, :]) * torch.log(1.0 - MCMB + eps)) * + valid_indices2).sum() / num_valid2 / 7 + + return 5 * loss + + +def losses_hiera_focal(predictions, + targets, + targets_top, + num_classes, + indices_high, + eps=1e-8, + gamma=2): + """Implementation of hiera loss. + + Args: + predictions (torch.Tensor): seg logits produced by decode head. + targets (torch.Tensor): The learning label of the prediction. + targets_top (torch.Tensor): The hierarchy ground truth of the learning + label. + num_classes (int): Number of categories. + indices_high (List[List[int]]): Hierarchy indices of each hierarchy. + eps (float):Term added to the Logarithm to improve numerical stability. + Defaults: 1e-8. + gamma (int): The exponent value. Defaults: 2. + """ + b, _, h, w = predictions.shape + predictions = torch.sigmoid(predictions.float()) + void_indices = (targets == 255) + targets[void_indices] = 0 + targets = F.one_hot(targets, num_classes=num_classes).permute(0, 3, 1, 2) + void_indices2 = (targets_top == 255) + targets_top[void_indices2] = 0 + targets_top = F.one_hot(targets_top, num_classes=7).permute(0, 3, 1, 2) + + MCMA = predictions[:, :num_classes, :, :] + MCMB = torch.zeros((b, 7, h, w), + dtype=predictions.dtype, + device=predictions.device) + for ii in range(7): + MCMB[:, ii:ii + 1, :, :] = torch.max( + torch.cat([ + predictions[:, indices_high[ii][0]:indices_high[ii][1], :, :], + predictions[:, num_classes + ii:num_classes + ii + 1, :, :] + ], + dim=1), 1, True)[0] + + MCLB = predictions[:, num_classes:num_classes + 7, :, :] + MCLA = predictions[:, :num_classes, :, :].clone() + for ii in range(7): + for jj in range(indices_high[ii][0], indices_high[ii][1]): + MCLA[:, jj:jj + 1, :, :] = torch.min( + torch.cat([ + predictions[:, jj:jj + 1, :, :], MCLB[:, ii:ii + 1, :, :] + ], + dim=1), 1, True)[0] + + valid_indices = (~void_indices).unsqueeze(1) + num_valid = valid_indices.sum() + valid_indices2 = (~void_indices2).unsqueeze(1) + num_valid2 = valid_indices2.sum() + # channel_num*sum()/one_channel_valid already has a weight + loss = ((-targets[:, :num_classes, :, :] * torch.pow( + (1.0 - MCLA), gamma) * torch.log(MCLA + eps) - + (1.0 - targets[:, :num_classes, :, :]) * torch.pow(MCMA, gamma) * + torch.log(1.0 - MCMA + eps)) * + valid_indices).sum() / num_valid / num_classes + loss += ( + (-targets_top[:, :, :, :] * torch.pow( + (1.0 - MCLB), gamma) * torch.log(MCLB + eps) - + (1.0 - targets_top[:, :, :, :]) * torch.pow(MCMB, gamma) * + torch.log(1.0 - MCMB + eps)) * valid_indices2).sum() / num_valid2 / 7 + + return 5 * loss + + +@LOSSES.register_module() +class HieraTripletLossCityscape(nn.Module): + """Modified from https://github.com/qhanghu/HSSN_pytorch/blob/main/mmseg/mo + dels/losses/hiera_triplet_loss_cityscape.py.""" + + def __init__(self, num_classes, use_sigmoid=False, loss_weight=1.0): + super().__init__() + self.num_classes = num_classes + self.loss_weight = loss_weight + self.treetripletloss = TreeTripletLoss(num_classes, hiera_map, + hiera_index) + self.ce = CrossEntropyLoss() + + def forward(self, + step, + embedding, + cls_score_before, + cls_score, + label, + weight=None, + **kwargs): + targets, targets_top, indices_top = prepare_targets(label) + + loss = losses_hiera(cls_score, targets, targets_top, self.num_classes, + indices_top) + ce_loss = self.ce(cls_score[:, :-7], label) + ce_loss2 = self.ce(cls_score[:, -7:], targets_top) + loss = loss + ce_loss + ce_loss2 + + loss_triplet, class_count = self.treetripletloss(embedding, label) + class_counts = [ + torch.ones_like(class_count) + for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(class_counts, class_count, async_op=False) + class_counts = torch.cat(class_counts, dim=0) + + if torch.distributed.get_world_size() == torch.nonzero( + class_counts, as_tuple=False).size(0): + factor = 1 / 4 * (1 + torch.cos( + torch.tensor((step.item() - 80000) / 80000 * + math.pi))) if step.item() < 80000 else 0.5 + loss += factor * loss_triplet + + return loss * self.loss_weight diff --git a/projects/HieraSeg/losses/tree_triplet_loss.py b/projects/HieraSeg/losses/tree_triplet_loss.py new file mode 100644 index 0000000000..ccc0937405 --- /dev/null +++ b/projects/HieraSeg/losses/tree_triplet_loss.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmseg.models.builder import LOSSES + + +@LOSSES.register_module() +class TreeTripletLoss(nn.Module): + """TreeTripletLoss. Modified from https://github.com/qhanghu/HSSN_pytorch/b + lob/main/mmseg/models/losses/tree_triplet_loss.py. + + Args: + num_classes (int): Number of categories. + hiera_map (List[int]): Hierarchy map of each category. + hiera_index (List[List[int]]): Hierarchy indices of each hierarchy. + ignore_index (int): Specifies a target value that is ignored and + does not contribute to the input gradients. Defaults: 255. + + Examples: + >>> num_classes = 19 + >>> hiera_map = [ + 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6] + >>> hiera_index = [ + 0, 2], [2, 5], [5, 8], [8, 10], [10, 11], [11, 13], [13, 19]] + """ + + def __init__(self, num_classes, hiera_map, hiera_index, ignore_index=255): + super().__init__() + + self.ignore_label = ignore_index + self.num_classes = num_classes + self.hiera_map = hiera_map + self.hiera_index = hiera_index + + def forward(self, feats: torch.Tensor, labels=None, max_triplet=200): + labels = labels.unsqueeze(1).float().clone() + labels = torch.nn.functional.interpolate( + labels, (feats.shape[2], feats.shape[3]), mode='nearest') + labels = labels.squeeze(1).long() + assert labels.shape[-1] == feats.shape[-1], '{} {}'.format( + labels.shape, feats.shape) + + labels = labels.view(-1) + feats = feats.permute(0, 2, 3, 1) + feats = feats.contiguous().view(-1, feats.shape[-1]) + + triplet_loss = 0 + exist_classes = torch.unique(labels) + exist_classes = [x for x in exist_classes if x != 255] + class_count = 0 + + for ii in exist_classes: + index_range = self.hiera_index[self.hiera_map[ii]] + index_anchor = labels == ii + index_pos = (labels >= index_range[0]) & ( + labels < index_range[-1]) & (~index_anchor) + index_neg = (labels < index_range[0]) | (labels >= index_range[-1]) + + min_size = min( + torch.sum(index_anchor), torch.sum(index_pos), + torch.sum(index_neg), max_triplet) + + feats_anchor = feats[index_anchor][:min_size] + feats_pos = feats[index_pos][:min_size] + feats_neg = feats[index_neg][:min_size] + + distance = torch.zeros(min_size, 2).to(feats) + distance[:, 0:1] = 1 - (feats_anchor * feats_pos).sum(1, True) + distance[:, 1:2] = 1 - (feats_anchor * feats_neg).sum(1, True) + + # margin always 0.1 + (4-2)/4 since the hierarchy is three level + # TODO: should include label of pos is the same as anchor + margin = 0.6 * torch.ones(min_size).to(feats) + + tl = distance[:, 0] - distance[:, 1] + margin + tl = F.relu(tl) + + if tl.size(0) > 0: + triplet_loss += tl.mean() + class_count += 1 + if class_count == 0: + return None, torch.tensor([0]).to(feats) + triplet_loss /= class_count + return triplet_loss, torch.tensor([class_count]).to(feats) From d505ec1c949f0c4d9f54e62f473ea7b720ce5332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B0=A2=E6=98=95=E8=BE=B0?= Date: Fri, 13 Jan 2023 20:06:30 +0800 Subject: [PATCH 12/25] [Doc] Fix API document (#2483) ## Motivation As title. ## Modification - docs/en/api.rst - docs/zh_cn/api.rst - add `scipy` to readthedocs requirement. --- docs/en/api.rst | 56 +++++++++++++++++------------------- docs/zh_cn/api.rst | 54 ++++++++++++++++------------------ requirements/readthedocs.txt | 5 ++-- 3 files changed, 54 insertions(+), 61 deletions(-) diff --git a/docs/en/api.rst b/docs/en/api.rst index 12ec13b2bd..94f64313d0 100644 --- a/docs/en/api.rst +++ b/docs/en/api.rst @@ -11,8 +11,13 @@ datasets .. automodule:: mmseg.datasets :members: -transforms +samplers ^^^^^^^^^^ +.. automodule:: mmseg.datasets.samplers + :members: + +transforms +^^^^^^^^^^^^ .. automodule:: mmseg.datasets.transforms :members: @@ -25,12 +30,12 @@ hooks :members: optimizers -^^^^^^^^^^ +^^^^^^^^^^^^^^^ .. automodule:: mmseg.engine.optimizers :members: mmseg.evaluation ------------------ +-------------- metrics ^^^^^^^^^^ @@ -40,51 +45,42 @@ metrics mmseg.models -------------- -models -^^^^^^^^^^ -.. automodule:: mmseg.models - :members: - -segmentors -^^^^^^^^^^ -.. automodule:: mmseg.models.segmentors - :members: - backbones -^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^ .. automodule:: mmseg.models.backbones :members: decode_heads -^^^^^^^^^^^^ +^^^^^^^^^^^^^^^ .. automodule:: mmseg.models.decode_heads :members: -losses +segmentors ^^^^^^^^^^ -.. automodule:: mmseg.models.losses +.. automodule:: mmseg.models.segmentors :members: -utils +losses ^^^^^^^^^^ -.. automodule:: mmseg.models.utils +.. automodule:: mmseg.models.losses :members: necks -^^^^^^^^^^ +^^^^^^^^^^^^ .. automodule:: mmseg.models.necks :members: -mmseg.registry --------------- -.. automodule:: mmseg.registry +utils +^^^^^^^^^^ +.. automodule:: mmseg.models.utils :members: + mmseg.structures ------------------ +-------------------- structures -^^^^^^^^^^ +^^^^^^^^^^^^^^^^^ .. automodule:: mmseg.structures :members: @@ -93,12 +89,12 @@ sampler .. automodule:: mmseg.structures.sampler :members: +mmseg.visualization +-------------------- +.. automodule:: mmseg.visualization + :members: + mmseg.utils -------------- .. automodule:: mmseg.utils :members: - -mmseg.visualization ----------------------- -.. automodule:: mmseg.visualization - :members: diff --git a/docs/zh_cn/api.rst b/docs/zh_cn/api.rst index be68c7579d..94f64313d0 100644 --- a/docs/zh_cn/api.rst +++ b/docs/zh_cn/api.rst @@ -11,8 +11,13 @@ datasets .. automodule:: mmseg.datasets :members: -transforms +samplers ^^^^^^^^^^ +.. automodule:: mmseg.datasets.samplers + :members: + +transforms +^^^^^^^^^^^^ .. automodule:: mmseg.datasets.transforms :members: @@ -25,7 +30,7 @@ hooks :members: optimizers -^^^^^^^^^^ +^^^^^^^^^^^^^^^ .. automodule:: mmseg.engine.optimizers :members: @@ -40,51 +45,42 @@ metrics mmseg.models -------------- -models -^^^^^^^^^^ -.. automodule:: mmseg.models - :members: - -segmentors -^^^^^^^^^^ -.. automodule:: mmseg.models.segmentors - :members: - backbones -^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^ .. automodule:: mmseg.models.backbones :members: decode_heads -^^^^^^^^^^^^ +^^^^^^^^^^^^^^^ .. automodule:: mmseg.models.decode_heads :members: -losses +segmentors ^^^^^^^^^^ -.. automodule:: mmseg.models.losses +.. automodule:: mmseg.models.segmentors :members: -utils +losses ^^^^^^^^^^ -.. automodule:: mmseg.models.utils +.. automodule:: mmseg.models.losses :members: necks -^^^^^^^^^^ +^^^^^^^^^^^^ .. automodule:: mmseg.models.necks :members: -mmseg.registry --------------- -.. automodule:: mmseg.registry +utils +^^^^^^^^^^ +.. automodule:: mmseg.models.utils :members: + mmseg.structures --------------- +-------------------- structures -^^^^^^^^^^ +^^^^^^^^^^^^^^^^^ .. automodule:: mmseg.structures :members: @@ -93,12 +89,12 @@ sampler .. automodule:: mmseg.structures.sampler :members: -mmseg.utils --------------- -.. automodule:: mmseg.utils +mmseg.visualization +-------------------- +.. automodule:: mmseg.visualization :members: -mmseg.visualization +mmseg.utils -------------- -.. automodule:: mmseg.visualization +.. automodule:: mmseg.utils :members: diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt index af6029b9ad..1b5d8443b4 100644 --- a/requirements/readthedocs.txt +++ b/requirements/readthedocs.txt @@ -1,5 +1,6 @@ -mmcv>=2.0.0rc0 -mmengine +mmcv>=2.0.0rc1,<2.1.0 +mmengine>=0.1.0,<1.0.0 prettytable +scipy torch torchvision From 6d8ba3b5a01746d0adb6b41b8f5d5dec86c48432 Mon Sep 17 00:00:00 2001 From: Vladimir Loginov Date: Tue, 17 Jan 2023 15:18:46 +0300 Subject: [PATCH 13/25] Update basesegdataset.py (#2492) ## Motivation Makes docstring to be consistent with actual argument name. ## Modification Minor fix ## BC-breaking (Optional) No --- mmseg/datasets/basesegdataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmseg/datasets/basesegdataset.py b/mmseg/datasets/basesegdataset.py index e97f8ca9d1..cb11eb64c6 100644 --- a/mmseg/datasets/basesegdataset.py +++ b/mmseg/datasets/basesegdataset.py @@ -47,7 +47,7 @@ class BaseSegDataset(BaseDataset): data_root (str, optional): The root directory for ``data_prefix`` and ``ann_file``. Defaults to None. data_prefix (dict, optional): Prefix for training data. Defaults to - dict(img_path=None, seg_path=None). + dict(img_path=None, seg_map_path=None). img_suffix (str): Suffix of images. Default: '.jpg' seg_map_suffix (str): Suffix of segmentation maps. Default: '.png' filter_cfg (dict, optional): Config for filter data. Defaults to None. From c2ba8ebac1772766c6d9703fa317f742f4efbe7e Mon Sep 17 00:00:00 2001 From: MengzhangLI Date: Thu, 19 Jan 2023 14:26:26 +0800 Subject: [PATCH 14/25] [Fix][Doc] Fix link of preprocessing and order of operations in ZN dataset.md doc. (#2494) ## Motivation Ref: https://github.com/open-mmlab/mmsegmentation/pull/2464#discussion_r1072211706 Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> --- docs/zh_cn/advanced_guides/datasets.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh_cn/advanced_guides/datasets.md b/docs/zh_cn/advanced_guides/datasets.md index 29062e73f3..0f3ad2b682 100644 --- a/docs/zh_cn/advanced_guides/datasets.md +++ b/docs/zh_cn/advanced_guides/datasets.md @@ -165,7 +165,7 @@ print(dataset[0]) ## BaseSegDataset -由于 MMSegmentation 中的所有数据集的基本功能均包括加载[预处理](https://mmsegmentation.readthedocs.io/en/dev-1.x/advanced_guides/models.html#id2) 之后的数据集的信息, 和将数据送入数据集变换流水线中, 因此在 MMSegmentation 中将其中的共同接口抽象成 [`BaseSegDataset`](https://mmsegmentation.readthedocs.io/en/dev-1.x/api.html?highlight=BaseSegDataset#mmseg.datasets.BaseSegDataset),它继承自 [MMEngine 的 `BaseDataset`](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/basedataset.md), 遵循 OpenMMLab 数据集初始化统一流程, 支持高效的内部数据存储格式, 支持数据集拼接、数据集重复采样等功能. +由于 MMSegmentation 中的所有数据集的基本功能均包括(1) 加载[数据集预处理](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/zh_cn/user_guides/2_dataset_prepare.md) 之后的数据信息和 (2) 将数据送入数据变换流水线中进行数据变换, 因此在 MMSegmentation 中将其中的共同接口抽象成 [`BaseSegDataset`](https://mmsegmentation.readthedocs.io/en/dev-1.x/api.html?highlight=BaseSegDataset#mmseg.datasets.BaseSegDataset),它继承自 [MMEngine 的 `BaseDataset`](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/basedataset.md), 遵循 OpenMMLab 数据集初始化统一流程, 支持高效的内部数据存储格式, 支持数据集拼接、数据集重复采样等功能. 在 MMSegmentation BaseSegDataset 中重新定义了**数据信息加载方法**(`load_data_list`)和并新增了 `get_label_map` 方法用来**修改数据集的类别信息**. ### 数据信息加载 From f678a5c974ca5210611ded6395253ff70c255842 Mon Sep 17 00:00:00 2001 From: MengzhangLI Date: Thu, 19 Jan 2023 16:29:56 +0800 Subject: [PATCH 15/25] [Doc] Add ZN Customized_runtime Doc in dev-1.x (#2502) Old PR: https://github.com/open-mmlab/mmsegmentation/pull/2169 --- .../advanced_guides/customize_runtime.md | 266 ++++++------------ 1 file changed, 90 insertions(+), 176 deletions(-) diff --git a/docs/zh_cn/advanced_guides/customize_runtime.md b/docs/zh_cn/advanced_guides/customize_runtime.md index 1afd95a9a6..a80aca6345 100644 --- a/docs/zh_cn/advanced_guides/customize_runtime.md +++ b/docs/zh_cn/advanced_guides/customize_runtime.md @@ -1,248 +1,162 @@ -# 自定义运行设定(待更新) +# 自定义运行设定 -## 自定义优化设定 +## 实现自定义钩子 -### 自定义 PyTorch 支持的优化器 +### Step 1: 创建一个新的钩子 -我们已经支持 PyTorch 自带的所有优化器,唯一需要修改的地方是在配置文件里的 `optimizer` 域里面。 -例如,如果您想使用 `ADAM` (注意如下操作可能会让模型表现下降),可以使用如下修改: +MMEngine 已实现了训练和测试常用的[钩子](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/tutorials/hook.md), +当有定制化需求时, 可以按照如下示例实现适用于自身训练需求的钩子, 例如想修改一个超参数 `model.hyper_paramete` 的值, 让它随着训练迭代次数而变化: ```python -optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) -``` - -为了修改模型的学习率,使用者仅需要修改配置文件里 optimizer 的 `lr` 即可。 -使用者可以参照 PyTorch 的 [API 文档](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) -直接设置参数。 - -### 自定义自己实现的优化器 - -#### 1. 定义一个新的优化器 +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence -一个自定义的优化器可以按照如下去定义: +from mmengine.hooks import Hook +from mmengine.model import is_model_wrapper -假如您想增加一个叫做 `MyOptimizer` 的优化器,它的参数分别有 `a`, `b`, 和 `c`。 -您需要创建一个叫 `mmseg/core/optimizer` 的新文件夹。 -然后再在文件,即 `mmseg/core/optimizer/my_optimizer.py` 里面去实现这个新优化器: +from mmseg.registry import HOOKS -```python -from .registry import OPTIMIZERS -from torch.optim import Optimizer +@HOOKS.register_module() +class NewHook(Hook): + """Docstring for NewHook. + """ -@OPTIMIZERS.register_module() -class MyOptimizer(Optimizer): - - def __init__(self, a, b, c) + def __init__(self, a: int, b: int) -> None: + self.a = a + self.b = b + def before_train_iter(self, + runner, + batch_idx: int, + data_batch: Optional[Sequence[dict]] = None) -> None: + cur_iter = runner.iter + # 当模型被包在 wrapper 里时获取这个模型 + if is_model_wrapper(runner.model): + model = runner.model.module + model.hyper_parameter = self.a * cur_iter + self.b ``` -#### 2. 增加优化器到注册表 (registry) +### Step 2: 导入一个新的钩子 -为了让上述定义的模块被框架发现,首先这个模块应该被导入到主命名空间 (main namespace) 里。 -有两种方式可以实现它。 +为了让上面定义的模块可以被执行的程序发现, 这个模块需要先被导入主命名空间 (main namespace) 里面, +假设 NewHook 在 `mmseg/engine/hooks/new_hook.py` 里面, 有两种方式去实现它: -- 修改 `mmseg/core/optimizer/__init__.py` 来导入它 - - 新的被定义的模块应该被导入到 `mmseg/core/optimizer/__init__.py` 这样注册表将会发现新的模块并添加它 +- 修改 `mmseg/engine/hooks/__init__.py` 来导入它. + 新定义的模块应该在 `mmseg/engine/hooks/__init__.py` 里面导入, 这样注册器可以发现并添加这个新的模块: ```python -from .my_optimizer import MyOptimizer +from .new_hook import NewHook + +__all__ = [..., NewHook] ``` -- 在配置文件里使用 `custom_imports` 去手动导入它 +- 在配置文件里使用 custom_imports 来手动导入它. ```python -custom_imports = dict(imports=['mmseg.core.optimizer.my_optimizer'], allow_failed_imports=False) +custom_imports = dict(imports=['mmseg.engine.hooks.new_hook'], allow_failed_imports=False) ``` -`mmseg.core.optimizer.my_optimizer` 模块将会在程序运行的开始被导入,并且 `MyOptimizer` 类将会自动注册。 -需要注意只有包含 `MyOptimizer` 类的包 (package) 应当被导入。 -而 `mmseg.core.optimizer.my_optimizer.MyOptimizer` **不能** 被直接导入。 - -事实上,使用者完全可以用另一个按这样导入方法的文件夹结构,只要模块的根路径已经被添加到 `PYTHONPATH` 里面。 - -#### 3. 在配置文件里定义优化器 +### Step 3: 修改配置文件 -之后您可以在配置文件的 `optimizer` 域里面使用 `MyOptimizer` -在配置文件里,优化器被定义在 `optimizer` 域里,如下所示: +可以按照如下方式, 在训练或测试中配置并使用自定义的钩子. 不同钩子在同一位点的优先级可以参考[这里](https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/tutorials/hook.md#%E5%86%85%E7%BD%AE%E9%92%A9%E5%AD%90), 自定义钩子如果没有指定优先, 默认是 `NORMAL`. ```python -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +custom_hooks = [ + dict(type='NewHook', a=a_value, b=b_value, priority='ABOVE_NORMAL') +] ``` -为了使用您自己的优化器,这个域可以被改成: - -```python -optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) -``` +## 实现自定义优化器 -### 自定义优化器的构造器 (constructor) +### Step 1: 创建一个新的优化器 -有些模型可能需要在优化器里有一些特别参数的设置,例如 批归一化层 (BatchNorm layers) 的 权重衰减 (weight decay)。 -使用者可以通过自定义优化器的构造器去微调这些细粒度参数。 +如果增加一个叫作 `MyOptimizer` 的优化器, 它有参数 `a`, `b` 和 `c`. 推荐在 `mmseg/engine/optimizers/my_optimizer.py` 文件中实现 ```python -from mmcv.utils import build_from_cfg - -from mmcv.runner.optimizer import OPTIMIZER_BUILDERS, OPTIMIZERS -from mmseg.utils import get_root_logger -from .my_optimizer import MyOptimizer - - -@OPTIMIZER_BUILDERS.register_module() -class MyOptimizerConstructor(object): +from mmseg.registry import OPTIMIZERS +from torch.optim import Optimizer - def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): - def __call__(self, model): - - return my_optimizer +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + def __init__(self, a, b, c) ``` -默认的优化器构造器的实现可以参照 [这里](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11) ,它也可以被用作新的优化器构造器的模板。 - -### 额外的设置 - -优化器没有实现的一些技巧应该通过优化器构造器 (optimizer constructor) 或者钩子 (hook) 去实现,如设置基于参数的学习率 (parameter-wise learning rates)。我们列出一些常见的设置,它们可以稳定或加速模型的训练。 -如果您有更多的设置,欢迎在 PR 和 issue 里面提交。 - -- __使用梯度截断 (gradient clip) 去稳定训练__: - - 一些模型需要梯度截断去稳定训练过程,如下所示 - - ```python - optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) - ``` - - 如果您的配置继承自已经设置了 `optimizer_config` 的基础配置 (base config),您可能需要 `_delete_=True` 来重写那些不需要的设置。更多细节请参照 [配置文件文档](https://mmsegmentation.readthedocs.io/en/latest/config.html) 。 - -- __使用动量计划表 (momentum schedule) 去加速模型收敛__: - - 我们支持动量计划表去让模型基于学习率修改动量,这样可能让模型收敛地更快。 - 动量计划表经常和学习率计划表 (LR scheduler) 一起使用,例如如下配置文件就在 3D 检测里经常使用以加速收敛。 - 更多细节请参考 [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) 和 [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130) 的实现。 - - ```python - lr_config = dict( - policy='cyclic', - target_ratio=(10, 1e-4), - cyclic_times=1, - step_ratio_up=0.4, - ) - momentum_config = dict( - policy='cyclic', - target_ratio=(0.85 / 0.95, 1), - cyclic_times=1, - step_ratio_up=0.4, - ) - ``` - -## 自定义训练计划表 - -我们根据默认的训练迭代步数 40k/80k 来设置学习率,这在 MMCV 里叫做 [`PolyLrUpdaterHook`](https://github.com/open-mmlab/mmcv/blob/826d3a7b68596c824fa1e2cb89b6ac274f52179c/mmcv/runner/hooks/lr_updater.py#L196) 。 -我们也支持许多其他的学习率计划表:[这里](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py) ,例如 `CosineAnnealing` 和 `Poly` 计划表。下面是一些例子: - -- 步计划表 Step schedule: - - ```python - lr_config = dict(policy='step', step=[9, 10]) - ``` +### Step 2: 导入一个新的优化器 -- 余弦退火计划表 ConsineAnnealing schedule: +为了让上面定义的模块可以被执行的程序发现, 这个模块需要先被导入主命名空间 (main namespace) 里面, +假设 `MyOptimizer` 在 `mmseg/engine/optimizers/my_optimizer.py` 里面, 有两种方式去实现它: - ```python - lr_config = dict( - policy='CosineAnnealing', - warmup='linear', - warmup_iters=1000, - warmup_ratio=1.0 / 10, - min_lr_ratio=1e-5) - ``` - -## 自定义工作流 (workflow) - -工作流是一个专门定义运行顺序和轮数 (running order and epochs) 的列表 (phase, epochs)。 -默认情况下它设置成: +- 修改 `mmseg/engine/optimizers/__init__.py` 来导入它. + 新定义的模块应该在 `mmseg/engine/optimizers/__init__.py` 里面导入, 这样注册器可以发现并添加这个新的模块: ```python -workflow = [('train', 1)] +from .my_optimizer import MyOptimizer ``` -意思是训练是跑 1 个 epoch。有时候使用者可能想检查模型在验证集上的一些指标(如 损失 loss,精确性 accuracy),我们可以这样设置工作流: +- 在配置文件里使用 `custom_imports` 来手动导入它. ```python -[('train', 1), ('val', 1)] +custom_imports = dict(imports=['mmseg.engine.optimizers.my_optimizer'], allow_failed_imports=False) ``` -于是 1 个 epoch 训练,1 个 epoch 验证将交替运行。 +### Step 3: 修改配置文件 -**注意**: +随后需要修改配置文件 `optim_wrapper` 里的 `optimizer` 参数, 如果要使用你自己的优化器 `MyOptimizer`, 字段可以被修改成: -1. 模型的参数在验证的阶段不会被自动更新 -2. 配置文件里的关键词 `total_epochs` 仅控制训练的 epochs 数目,而不会影响验证时的工作流 -3. 工作流 `[('train', 1), ('val', 1)]` 和 `[('train', 1)]` 将不会改变 `EvalHook` 的行为,因为 `EvalHook` 被 `after_train_epoch` - 调用而且验证的工作流仅仅影响通过调用 `after_val_epoch` 的钩子 (hooks)。因此, `[('train', 1), ('val', 1)]` 和 `[('train', 1)]` - 的区别仅在于 runner 将在每次训练 epoch 结束后计算在验证集上的损失 +```python +optim_wrapper = dict(type='OptimWrapper', + optimizer=dict(type='MyOptimizer', + a=a_value, b=b_value, c=c_value), + clip_grad=None) +``` -## 自定义钩 (hooks) +## 实现自定义优化器封装构造器 -### 使用 MMCV 实现的钩子 (hooks) +### Step 1: 创建一个新的优化器封装构造器 -如果钩子已经在 MMCV 里被实现,如下所示,您可以直接修改配置文件来使用钩子: +构造器可以用来创建优化器, 优化器包, 以及自定义模型网络不同层的超参数. 一些模型的优化器可能会根据特定的参数而调整, 例如 BatchNorm 层的 weight decay. 使用者可以通过自定义优化器构造器来精细化设定不同参数的优化策略. ```python -custom_hooks = [ - dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') -] -``` +from mmengine.optim import DefaultOptimWrapperConstructor +from mmseg.registry import OPTIM_WRAPPER_CONSTRUCTORS -### 修改默认的运行时间钩子 (runtime hooks) +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class LearningRateDecayOptimizerConstructor(DefaultOptimWrapperConstructor): + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): -以下的常用的钩子没有被 `custom_hooks` 注册: + def __call__(self, model): -- log_config -- checkpoint_config -- evaluation -- lr_config -- optimizer_config -- momentum_config + return my_optimizer +``` -在这些钩子里,只有 logger hook 有 `VERY_LOW` 优先级,其他的优先级都是 `NORMAL`。 -上述提及的教程已经包括了如何修改 `optimizer_config`,`momentum_config` 和 `lr_config`。 -这里我们展示我们如何处理 `log_config`, `checkpoint_config` 和 `evaluation`。 +默认的优化器构造器在[这里](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/optimizer/default_constructor.py#L19) 被实现, 它也可以用来作为新的优化器构造器的模板. -#### 检查点配置文件 (Checkpoint config) +### Step 2: 导入一个新的优化器封装构造器 -MMCV runner 将使用 `checkpoint_config` 去初始化 [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py#L9). +为了让上面定义的模块可以被执行的程序发现, 这个模块需要先被导入主命名空间 (main namespace) 里面, 假设 `MyOptimizerConstructor` 在 `mmseg/engine/optimizers/my_optimizer_constructor.py` 里面, 有两种方式去实现它: + +- 修改 `mmseg/engine/optimizers/__init__.py` 来导入它. + 新定义的模块应该在 `mmseg/engine/optimizers/__init__.py` 里面导入, 这样注册器可以发现并添加这个新的模块: ```python -checkpoint_config = dict(interval=1) +from .my_optimizer_constructor import MyOptimizerConstructor ``` -使用者可以设置 `max_keep_ckpts` 来仅保存一小部分检查点或者通过 `save_optimizer` 来决定是否保存优化器的状态字典 (state dict of optimizer)。 更多使用参数的细节请参考 [这里](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook) 。 - -#### 日志配置文件 (Log config) - -`log_config` 包裹了许多日志钩 (logger hooks) 而且能去设置间隔 (intervals)。现在 MMCV 支持 `WandbLoggerHook`, `MlflowLoggerHook` 和 `TensorboardLoggerHook`。 -详细的使用请参照 [文档](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook) 。 +- 在配置文件里使用 `custom_imports` 来手动导入它. ```python -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) +custom_imports = dict(imports=['mmseg.engine.optimizers.my_optimizer_constructor'], allow_failed_imports=False) ``` -#### 评估配置文件 (Evaluation config) +### Step 3: 修改配置文件 -`evaluation` 的配置文件将被用来初始化 [`EvalHook`](https://github.com/open-mmlab/mmsegmentation/blob/e3f6f655d69b777341aec2fe8829871cc0beadcb/mmseg/core/evaluation/eval_hooks.py#L7) 。 -除了 `interval` 键,其他的像 `metric` 这样的参数将被传递给 `dataset.evaluate()` 。 +随后需要修改配置文件 `optim_wrapper` 里的 `constructor` 参数, 如果要使用你自己的优化器封装构造器 `MyOptimizerConstructor`, 字段可以被修改成: ```python -evaluation = dict(interval=1, metric='mIoU') +optim_wrapper = dict(type='OptimWrapper', + constructor='MyOptimizerConstructor', + clip_grad=None) ``` From e394e2aa28d33f740af6866089e294fbf55ec70b Mon Sep 17 00:00:00 2001 From: Tianlong Ai <50650583+AI-Tianlong@users.noreply.github.com> Date: Fri, 20 Jan 2023 14:25:51 +0800 Subject: [PATCH 16/25] CodeCamp #1555[Feature] Support Mapillary Vistas Dataset (#2484) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Support `Mapillary Vistas Dataset` ## Motivation Support **`Mapillary Vistas Dataset`** Dataset Paper link : https://ieeexplore.ieee.org/document/9878466/ Download and more information view https://www.mapillary.com/dataset/vistas ``` @InProceedings{Neuhold_2017_ICCV, author = {Neuhold, Gerhard and Ollmann, Tobias and Rota Bulo, Samuel and Kontschieder, Peter}, title = {The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes}, booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)}, month = {Oct}, year = {2017} } ``` ## Modification Add `Mapillary_dataset` in `mmsegmentation/projects` Add `configs/_base_/mapillary_v1_2.py` and `configs/_base_/mapillary_v2_0.py` Add `configs/deeplabv3plus_r18-d8_4xb2-80k_mapillay-512x1024.py` to test training and testing on Mapillary datasets Add `docs/en/user_guides/2_dataset_prepare.md` , add Mapillary Vistas Dataset Preparing and Structure. Add `tools/dataset_converters/mapillary.py` to convert RGB labels to Mask labels. Co-authored-by: 谢昕辰 --- projects/mapillary_dataset/README.md | 85 ++++++ .../configs/_base_/datasets/mapillary_v1_2.py | 69 +++++ .../configs/_base_/datasets/mapillary_v2_0.py | 69 +++++ ...lus_r101-d8_4xb2-240k_mapillay-512x1024.py | 103 ++++++++ .../docs/en/user_guides/2_dataset_prepare.md | 117 +++++++++ .../mmseg/datasets/mapillary_v1_2.py | 65 +++++ .../mmseg/datasets/mapillary_v2_0.py | 114 ++++++++ .../tools/dataset_converters/mapillary.py | 245 ++++++++++++++++++ 8 files changed, 867 insertions(+) create mode 100644 projects/mapillary_dataset/README.md create mode 100644 projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py create mode 100644 projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py create mode 100644 projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py create mode 100644 projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md create mode 100644 projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py create mode 100644 projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py create mode 100644 projects/mapillary_dataset/tools/dataset_converters/mapillary.py diff --git a/projects/mapillary_dataset/README.md b/projects/mapillary_dataset/README.md new file mode 100644 index 0000000000..2b3099522e --- /dev/null +++ b/projects/mapillary_dataset/README.md @@ -0,0 +1,85 @@ +# Mapillary Vistas Dataset + +Support **`Mapillary Vistas Dataset`** + +## Description + +Author: AI-Tianlong + +This project implements **`Mapillary Vistas Dataset`** + +### Dataset preparing + +Preparing `Mapillary Vistas Dataset` dataset following [Mapillary Vistas Dataset Preparing Guide](https://github.com/open-mmlab/mmsegmentation/tree/dev-1.x/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md) + +```none + mmsegmentation + ├── mmseg + ├── tools + ├── configs + ├── data + │ ├── mapillary + │ │ ├── training + │ │ │ ├── images + │ │ │ ├── v1.2 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │   │   │ └── panoptic + │ │ │ ├── v2.0 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │ │ │ ├── panoptic + | │   │   │ └── polygons + │ │ ├── validation + │ │ │ ├── images + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │   │   │ └── panoptic + │ │ │ ├── v2.0 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │ │ │ ├── panoptic + | │   │   │ └── polygons +``` + +### Training commands with `deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py` + +```bash +# Dataset train commands +# at `mmsegmentation` folder +bash tools/dist_train.sh projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py 4 +``` + +## Checklist + +- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [x] Finish the code + + - [x] Basic docstrings & proper citation + + - [ ] Test-time correctness + + - [x] A full README + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + - [ ] Unit tests + + - [ ] Code polishing + + - [ ] Metafile.yml + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py new file mode 100644 index 0000000000..a0e7d14b52 --- /dev/null +++ b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v1_2.py @@ -0,0 +1,69 @@ +# dataset settings +dataset_type = 'MapillaryDataset_v1_2' +data_root = 'data/mapillary/' +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] +tta_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='Resize', scale_factor=r, keep_ratio=True) + for r in img_ratios + ], + [ + dict(type='RandomFlip', prob=0., direction='horizontal'), + dict(type='RandomFlip', prob=1., direction='horizontal') + ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')] + ]) +] +train_dataloader = dict( + batch_size=2, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='training/images', + seg_map_path='training/v1.2/labels_mask'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='validation/images', + seg_map_path='validation/v1.2/labels_mask'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py new file mode 100644 index 0000000000..7332d43fad --- /dev/null +++ b/projects/mapillary_dataset/configs/_base_/datasets/mapillary_v2_0.py @@ -0,0 +1,69 @@ +# dataset settings +dataset_type = 'MapillaryDataset_v2_0' +data_root = 'data/mapillary/' +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='RandomResize', + scale=(2048, 1024), + ratio_range=(0.5, 2.0), + keep_ratio=True), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='PackSegInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(2048, 1024), keep_ratio=True), + # add loading annotation after ``Resize`` because ground truth + # does not need to do resize data transform + dict(type='LoadAnnotations'), + dict(type='PackSegInputs') +] +img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] +tta_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='Resize', scale_factor=r, keep_ratio=True) + for r in img_ratios + ], + [ + dict(type='RandomFlip', prob=0., direction='horizontal'), + dict(type='RandomFlip', prob=1., direction='horizontal') + ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')] + ]) +] +train_dataloader = dict( + batch_size=2, + num_workers=4, + persistent_workers=True, + sampler=dict(type='InfiniteSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='training/images', + seg_map_path='training/v2.0/labels_mask'), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix=dict( + img_path='validation/images', + seg_map_path='validation/v2.0/labels_mask'), + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) +test_evaluator = val_evaluator diff --git a/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py b/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py new file mode 100644 index 0000000000..6f7ad65ed8 --- /dev/null +++ b/projects/mapillary_dataset/configs/deeplabv3plus_r101-d8_4xb2-240k_mapillay-512x1024.py @@ -0,0 +1,103 @@ +_base_ = ['./_base_/datasets/mapillary_v1_2.py'] # v 1.2 labels +# _base_ = ['./_base_/datasets/mapillary_v2_0.py'] # v2.0 labels +custom_imports = dict(imports=[ + 'projects.mapillary_dataset.mmseg.datasets.mapillary_v1_2', + 'projects.mapillary_dataset.mmseg.datasets.mapillary_v2_0', +]) + +norm_cfg = dict(type='SyncBN', requires_grad=True) +data_preprocessor = dict( + type='SegDataPreProcessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_val=0, + seg_pad_val=255, + size=(512, 1024)) + +model = dict( + type='EncoderDecoder', + data_preprocessor=data_preprocessor, + pretrained=None, + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DepthwiseSeparableASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + c1_in_channels=256, + c1_channels=48, + dropout_ratio=0.1, + num_classes=66, # v1.2 + # num_classes=124, # v2.0 + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=66, # v1.2 + # num_classes=124, # v2.0 + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) +default_scope = 'mmseg' +env_cfg = dict( + cudnn_benchmark=True, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='SegLocalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_processor = dict(by_epoch=False) +log_level = 'INFO' +load_from = None +resume = False +tta_model = dict(type='SegTTAModel') +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001), + clip_grad=None) +param_scheduler = [ + dict( + type='PolyLR', + eta_min=0.0001, + power=0.9, + begin=0, + end=240000, + by_epoch=False) +] +train_cfg = dict( + type='IterBasedTrainLoop', max_iters=240000, val_interval=24000) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=24000), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='SegVisualizationHook')) diff --git a/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md b/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md new file mode 100644 index 0000000000..405e533156 --- /dev/null +++ b/projects/mapillary_dataset/docs/en/user_guides/2_dataset_prepare.md @@ -0,0 +1,117 @@ +## Prepare datasets + +It is recommended to symlink the dataset root to `$MMSEGMENTATION/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +```none +mmsegmentation +├── mmseg +├── tools +├── configs +├── data +│ ├── mapillary +│ │ ├── training +│ │ │ ├── images +│ │ │ ├── v1.2 +| │ │ │ ├── instances +| │ │ │ ├── labels +| │ │ │ ├── labels_mask +| │   │   │ └── panoptic +│ │ │ ├── v2.0 +| │ │ │ ├── instances +| │ │ │ ├── labels +| │ │ │ ├── labels_mask +| │ │ │ ├── panoptic +| │   │   │ └── polygons +│ │ ├── validation +│ │ │ ├── images +| │ │ │ ├── instances +| │ │ │ ├── labels +| │ │ │ ├── labels_mask +| │   │   │ └── panoptic +│ │ │ ├── v2.0 +| │ │ │ ├── instances +| │ │ │ ├── labels +| │ │ │ ├── labels_mask +| │ │ │ ├── panoptic +| │   │   │ └── polygons +``` + +## Mapillary Vistas Datasets + +- The dataset could be download [here](https://www.mapillary.com/dataset/vistas) after registration. +- Assumption you have put the dataset zip file in `mmsegmentation/data` +- Please run the following commands to unzip dataset. + ```bash + cd data + mkdir mapillary + unzip -d mapillary An-ZjB1Zm61yAZG0ozTymz8I8NqI4x0MrYrh26dq7kPgfu8vf9ImrdaOAVOFYbJ2pNAgUnVGBmbue9lTgdBOb5BbKXIpFs0fpYWqACbrQDChAA2fdX0zS9PcHu7fY8c-FOvyBVxPNYNFQuM.zip + ``` +- After unzip, you will get Mapillary Vistas Dataset like this structure. + ```none + ├── data + │ ├── mapillary + │ │ ├── training + │ │ │ ├── images + │ │ │ ├── v1.2 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │   │   │ └── panoptic + │ │ │ ├── v2.0 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── panoptic + | │   │   │ └── polygons + │ │ ├── validation + │ │ │ ├── images + | │ │ │ ├── instances + | │ │ │ ├── labels + | │   │   │ └── panoptic + │ │ │ ├── v2.0 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── panoptic + | │   │   │ └── polygons + ``` +- run following commands to convert RGB labels to mask labels + ```bash + # --nproc optional, default 1, whether use multi-progress + # --version optional, 'v1.2', 'v2.0','all', default 'all', choose convert which version labels + # run this command at 'mmsegmentation/projects/Mapillary_dataset' folder + cd mmsegmentation/projects/mapillary_dataset + python tools/dataset_converters/mapillary.py ../../data/mapillary --nproc 8 --version all + ``` + After then, you will get this structure + ```none + mmsegmentation + ├── mmseg + ├── tools + ├── configs + ├── data + │ ├── mapillary + │ │ ├── training + │ │ │ ├── images + │ │ │ ├── v1.2 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │   │   │ └── panoptic + │ │ │ ├── v2.0 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │ │ │ ├── panoptic + | │   │   │ └── polygons + │ │ ├── validation + │ │ │ ├── images + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │   │   │ └── panoptic + │ │ │ ├── v2.0 + | │ │ │ ├── instances + | │ │ │ ├── labels + | │ │ │ ├── labels_mask + | │ │ │ ├── panoptic + | │   │   │ └── polygons + ``` diff --git a/projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py b/projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py new file mode 100644 index 0000000000..975d07b24e --- /dev/null +++ b/projects/mapillary_dataset/mmseg/datasets/mapillary_v1_2.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.datasets.basesegdataset import BaseSegDataset +from mmseg.registry import DATASETS + + +@DATASETS.register_module() +class MapillaryDataset_v1_2(BaseSegDataset): + """Mapillary Vistas Dataset. + + Dataset paper link: + http://ieeexplore.ieee.org/document/8237796/ + + v1.2 contain 66 object classes. + (37 instance-specific) + + v2.0 contain 124 object classes. + (70 instance-specific, 46 stuff, 8 void or crowd). + + The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png' for Mapillary Vistas Dataset. + """ + METAINFO = dict( + classes=('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail', + 'Barrier', 'Wall', 'Bike Lane', 'Crosswalk - Plain', + 'Curb Cut', 'Parking', 'Pedestrian Area', 'Rail Track', + 'Road', 'Service Lane', 'Sidewalk', 'Bridge', 'Building', + 'Tunnel', 'Person', 'Bicyclist', 'Motorcyclist', + 'Other Rider', 'Lane Marking - Crosswalk', + 'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow', + 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench', + 'Bike Rack', 'Billboard', 'Catch Basin', 'CCTV Camera', + 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole', + 'Phone Booth', 'Pothole', 'Street Light', 'Pole', + 'Traffic Sign Frame', 'Utility Pole', 'Traffic Light', + 'Traffic Sign (Back)', 'Traffic Sign (Front)', 'Trash Can', + 'Bicycle', 'Boat', 'Bus', 'Car', 'Caravan', 'Motorcycle', + 'On Rails', 'Other Vehicle', 'Trailer', 'Truck', + 'Wheeled Slow', 'Car Mount', 'Ego Vehicle', 'Unlabeled'), + palette=[[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153], + [180, 165, 180], [90, 120, 150], [102, 102, 156], + [128, 64, 255], [140, 140, 200], [170, 170, 170], + [250, 170, 160], [96, 96, 96], + [230, 150, 140], [128, 64, 128], [110, 110, 110], + [244, 35, 232], [150, 100, 100], [70, 70, 70], [150, 120, 90], + [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200], + [200, 128, 128], [255, 255, 255], [64, 170, + 64], [230, 160, 50], + [70, 130, 180], [190, 255, 255], [152, 251, 152], + [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30], + [100, 140, 180], [220, 220, 220], [220, 128, 128], + [222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33], + [100, 128, 160], [142, 0, 0], [70, 100, 150], [210, 170, 100], + [153, 153, 153], [128, 128, 128], [0, 0, 80], [250, 170, 30], + [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32], + [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90], + [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110], + [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, + 10], [0, 0, 0]]) + + def __init__(self, + img_suffix='.jpg', + seg_map_suffix='.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py b/projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py new file mode 100644 index 0000000000..9c67a8b212 --- /dev/null +++ b/projects/mapillary_dataset/mmseg/datasets/mapillary_v2_0.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmseg.datasets.basesegdataset import BaseSegDataset +from mmseg.registry import DATASETS + + +@DATASETS.register_module() +class MapillaryDataset_v2_0(BaseSegDataset): + """Mapillary Vistas Dataset. + + Dataset paper link: + http://ieeexplore.ieee.org/document/8237796/ + + v1.2 contain 66 object classes. + (37 instance-specific) + + v2.0 contain 124 object classes. + (70 instance-specific, 46 stuff, 8 void or crowd). + + The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png' for Mapillary Vistas Dataset. + """ + METAINFO = dict( + classes=( + 'Bird', 'Ground Animal', 'Ambiguous Barrier', 'Concrete Block', + 'Curb', 'Fence', 'Guard Rail', 'Barrier', 'Road Median', + 'Road Side', 'Lane Separator', 'Temporary Barrier', 'Wall', + 'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Driveway', + 'Parking', 'Parking Aisle', 'Pedestrian Area', 'Rail Track', + 'Road', 'Road Shoulder', 'Service Lane', 'Sidewalk', + 'Traffic Island', 'Bridge', 'Building', 'Garage', 'Tunnel', + 'Person', 'Person Group', 'Bicyclist', 'Motorcyclist', + 'Other Rider', 'Lane Marking - Dashed Line', + 'Lane Marking - Straight Line', 'Lane Marking - Zigzag Line', + 'Lane Marking - Ambiguous', 'Lane Marking - Arrow (Left)', + 'Lane Marking - Arrow (Other)', 'Lane Marking - Arrow (Right)', + 'Lane Marking - Arrow (Split Left or Straight)', + 'Lane Marking - Arrow (Split Right or Straight)', + 'Lane Marking - Arrow (Straight)', 'Lane Marking - Crosswalk', + 'Lane Marking - Give Way (Row)', + 'Lane Marking - Give Way (Single)', + 'Lane Marking - Hatched (Chevron)', + 'Lane Marking - Hatched (Diagonal)', 'Lane Marking - Other', + 'Lane Marking - Stop Line', 'Lane Marking - Symbol (Bicycle)', + 'Lane Marking - Symbol (Other)', 'Lane Marking - Text', + 'Lane Marking (only) - Dashed Line', + 'Lane Marking (only) - Crosswalk', 'Lane Marking (only) - Other', + 'Lane Marking (only) - Test', 'Mountain', 'Sand', 'Sky', 'Snow', + 'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench', 'Bike Rack', + 'Catch Basin', 'CCTV Camera', 'Fire Hydrant', 'Junction Box', + 'Mailbox', 'Manhole', 'Parking Meter', 'Phone Booth', 'Pothole', + 'Signage - Advertisement', 'Signage - Ambiguous', 'Signage - Back', + 'Signage - Information', 'Signage - Other', 'Signage - Store', + 'Street Light', 'Pole', 'Pole Group', 'Traffic Sign Frame', + 'Utility Pole', 'Traffic Cone', 'Traffic Light - General (Single)', + 'Traffic Light - Pedestrians', 'Traffic Light - General (Upright)', + 'Traffic Light - General (Horizontal)', 'Traffic Light - Cyclists', + 'Traffic Light - Other', 'Traffic Sign - Ambiguous', + 'Traffic Sign (Back)', 'Traffic Sign - Direction (Back)', + 'Traffic Sign - Direction (Front)', 'Traffic Sign (Front)', + 'Traffic Sign - Parking', 'Traffic Sign - Temporary (Back)', + 'Traffic Sign - Temporary (Front)', 'Trash Can', 'Bicycle', 'Boat', + 'Bus', 'Car', 'Caravan', 'Motorcycle', 'On Rails', 'Other Vehicle', + 'Trailer', 'Truck', 'Vehicle Group', 'Wheeled Slow', 'Water Valve', + 'Car Mount', 'Dynamic', 'Ego Vehicle', 'Ground', 'Static', + 'Unlabeled'), + palette=[[165, 42, 42], [0, 192, 0], [250, 170, 31], [250, 170, 32], + [196, 196, 196], [190, 153, 153], [180, 165, 180], + [90, 120, 150], [250, 170, 33], [250, 170, 34], + [128, 128, 128], [250, 170, 35], [102, 102, 156], + [128, 64, 255], [140, 140, 200], [170, 170, 170], + [250, 170, 36], [250, 170, 160], [250, 170, 37], [96, 96, 96], + [230, 150, 140], [128, 64, 128], [110, 110, 110], + [110, 110, 110], [244, 35, 232], [128, 196, + 128], [150, 100, 100], + [70, 70, 70], [150, 150, 150], [150, 120, 90], [220, 20, 60], + [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200], + [255, 255, 255], [255, 255, 255], [250, 170, 29], + [250, 170, 28], [250, 170, 26], [250, 170, + 25], [250, 170, 24], + [250, 170, 22], [250, 170, 21], [250, 170, + 20], [255, 255, 255], + [250, 170, 19], [250, 170, 18], [250, 170, + 12], [250, 170, 11], + [255, 255, 255], [255, 255, 255], [250, 170, 16], + [250, 170, 15], [250, 170, 15], [255, 255, 255], + [255, 255, 255], [255, 255, 255], [255, 255, 255], + [64, 170, 64], [230, 160, 50], + [70, 130, 180], [190, 255, 255], [152, 251, 152], + [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30], + [100, 140, 180], [220, 128, 128], [222, 40, + 40], [100, 170, 30], + [40, 40, 40], [33, 33, 33], [100, 128, 160], [20, 20, 255], + [142, 0, 0], [70, 100, 150], [250, 171, 30], [250, 172, 30], + [250, 173, 30], [250, 174, 30], [250, 175, + 30], [250, 176, 30], + [210, 170, 100], [153, 153, 153], [153, 153, 153], + [128, 128, 128], [0, 0, 80], [210, 60, 60], [250, 170, 30], + [250, 170, 30], [250, 170, 30], [250, 170, + 30], [250, 170, 30], + [250, 170, 30], [192, 192, 192], [192, 192, 192], + [192, 192, 192], [220, 220, 0], [220, 220, 0], [0, 0, 196], + [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32], + [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90], + [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110], + [0, 0, 70], [0, 0, 142], [0, 0, 192], [170, 170, 170], + [32, 32, 32], [111, 74, 0], [120, 10, 10], [81, 0, 81], + [111, 111, 0], [0, 0, 0]]) + + def __init__(self, + img_suffix='.jpg', + seg_map_suffix='.png', + **kwargs) -> None: + super().__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) diff --git a/projects/mapillary_dataset/tools/dataset_converters/mapillary.py b/projects/mapillary_dataset/tools/dataset_converters/mapillary.py new file mode 100644 index 0000000000..3ccb2d67b3 --- /dev/null +++ b/projects/mapillary_dataset/tools/dataset_converters/mapillary.py @@ -0,0 +1,245 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from functools import partial + +import mmcv +import numpy as np +from mmengine.utils import (mkdir_or_exist, scandir, track_parallel_progress, + track_progress) + +colormap_v1_2 = np.array([[165, 42, 42], [0, 192, 0], [196, 196, 196], + [190, 153, 153], [180, 165, 180], [90, 120, 150], + [102, 102, 156], [128, 64, 255], [140, 140, 200], + [170, 170, 170], [250, 170, 160], [96, 96, 96], + [230, 150, 140], [128, 64, 128], [110, 110, 110], + [244, 35, 232], [150, 100, 100], [70, 70, 70], + [150, 120, 90], [220, 20, 60], [255, 0, 0], + [255, 0, 100], [255, 0, 200], [200, 128, 128], + [255, 255, 255], [64, 170, 64], [230, 160, 50], + [70, 130, 180], [190, 255, 255], [152, 251, 152], + [107, 142, 35], [0, 170, 30], [255, 255, 128], + [250, 0, 30], [100, 140, 180], [220, 220, 220], + [220, 128, 128], [222, 40, 40], [100, 170, 30], + [40, 40, 40], [33, 33, 33], [100, 128, 160], + [142, 0, 0], [70, 100, 150], [210, 170, 100], + [153, 153, 153], [128, 128, 128], [0, 0, 80], + [250, 170, 30], [192, 192, 192], [220, 220, 0], + [140, 140, 20], [119, 11, 32], [150, 0, 255], + [0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], + [0, 80, 100], [128, 64, 64], [0, 0, 110], [0, 0, 70], + [0, 0, 192], [32, 32, 32], [120, 10, 10], [0, 0, 0]]) + +colormap_v2_0 = np.array([[165, 42, 42], [0, 192, 0], [250, 170, 31], + [250, 170, 32], [196, 196, 196], [190, 153, 153], + [180, 165, 180], [90, 120, 150], [250, 170, 33], + [250, 170, 34], [128, 128, 128], [250, 170, 35], + [102, 102, 156], [128, 64, 255], [140, 140, 200], + [170, 170, 170], [250, 170, 36], [250, 170, 160], + [250, 170, 37], [96, 96, 96], [230, 150, 140], + [128, 64, 128], [110, 110, 110], [110, 110, 110], + [244, 35, 232], [128, 196, 128], [150, 100, 100], + [70, 70, 70], [150, 150, 150], [150, 120, 90], + [220, 20, 60], [220, 20, 60], [255, 0, 0], + [255, 0, 100], [255, 0, 200], [255, 255, 255], + [255, 255, 255], [250, 170, 29], [250, 170, 28], + [250, 170, 26], [250, 170, 25], [250, 170, 24], + [250, 170, 22], [250, 170, 21], [250, 170, 20], + [255, 255, 255], [250, 170, 19], [250, 170, 18], + [250, 170, 12], [250, 170, 11], [255, 255, 255], + [255, 255, 255], [250, 170, 16], [250, 170, 15], + [250, 170, 15], [255, 255, 255], [255, 255, 255], + [255, 255, 255], [255, 255, 255], [64, 170, 64], + [230, 160, 50], [70, 130, 180], [190, 255, 255], + [152, 251, 152], [107, 142, 35], [0, 170, 30], + [255, 255, 128], [250, 0, 30], [100, 140, 180], + [220, 128, 128], [222, 40, 40], [100, 170, 30], + [40, 40, 40], [33, 33, 33], [100, 128, 160], + [20, 20, 255], [142, 0, 0], [70, 100, 150], + [250, 171, 30], [250, 172, 30], [250, 173, 30], + [250, 174, 30], [250, 175, 30], [250, 176, 30], + [210, 170, 100], [153, 153, 153], [153, 153, 153], + [128, 128, 128], [0, 0, 80], [210, 60, 60], + [250, 170, 30], [250, 170, 30], [250, 170, 30], + [250, 170, 30], [250, 170, 30], [250, 170, 30], + [192, 192, 192], [192, 192, 192], [192, 192, 192], + [220, 220, 0], [220, 220, 0], [0, 0, 196], + [192, 192, 192], [220, 220, 0], [140, 140, 20], + [119, 11, 32], [150, 0, 255], [0, 60, 100], + [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], + [128, 64, 64], [0, 0, 110], [0, 0, 70], [0, 0, 142], + [0, 0, 192], [170, 170, 170], [32, 32, 32], + [111, 74, 0], [120, 10, 10], [81, 0, 81], + [111, 111, 0], [0, 0, 0]]) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert Mapillary dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='Mapillary folder path') + parser.add_argument( + '--version', + default='all', + help="Mapillary labels version, 'v1.2','v2.0','all'") + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--nproc', default=1, type=int, help='number of process') + args = parser.parse_args() + return args + + +def mapillary_colormap2label(colormap: np.ndarray) -> list: + """Create a `list` shaped (256^3, 1), convert each color palette to a + number, which can use to find the correct label value. + + For example labels 0--Bird--[165, 42, 42] + (165*256 + 42) * 256 + 42 = 10824234 (This is list's index]) + `colormap2label[10824234] = 0` + + In converting, if a RGB pixel value is [165, 42, 42], + through colormap2label[10824234]-->can quickly find + this labels value is 0. + Through matrix multiply to compute a img is very fast. + + Args: + colormap (np.ndarray): Mapillary Vistas Dataset palette + + Returns: + list: values are mask labels, + indexes are palette's convert results.、 + """ + colormap2label = np.zeros(256**3, dtype=np.longlong) + for i, colormap_ in enumerate(colormap): + colormap2label[(colormap_[0] * 256 + colormap_[1]) * 256 + + colormap_[2]] = i + return colormap2label + + +def mapillary_masklabel(rgb_label: np.ndarray, + colormap2label: list) -> np.ndarray: + """Computing a img mask label through `colormap2label` get in + `mapillary_colormap2label(COLORMAP: np.ndarray)` + + Args: + rgb_label (np.array): a RGB labels img. + colormap2label (list): get in mapillary_colormap2label(colormap) + + Returns: + np.ndarray: mask labels array. + """ + colormap_ = rgb_label.astype('uint32') + idx = np.array((colormap_[:, :, 0] * 256 + colormap_[:, :, 1]) * 256 + + colormap_[:, :, 2]).astype('uint32') + return colormap2label[idx] + + +def RGB2Mask(rgb_label_path: str, colormap2label: list) -> None: + """Mapillary Vistas Dataset provide 8-bit with color-palette class-specific + labels for semantic segmentation. However, semantic segmentation needs + single channel mask labels. + + This code is about converting mapillary RGB labels + {traing,validation/v1.2,v2.0/labels} to mask labels + {{traing,validation/v1.2,v2.0/labels_mask} + + Args: + rgb_label_path (str): image absolute path. + dataset_version (str): v1.2 or v2.0 to choose color_map . + """ + rgb_label = mmcv.imread(rgb_label_path, channel_order='rgb') + + masks_label = mapillary_masklabel(rgb_label, colormap2label) + + mmcv.imwrite( + masks_label.astype(np.uint8), + rgb_label_path.replace('labels', 'labels_mask')) + + +def main(): + colormap2label_v1_2 = mapillary_colormap2label(colormap_v1_2) + colormap2label_v2_0 = mapillary_colormap2label(colormap_v2_0) + + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = dataset_path + else: + out_dir = args.out_dir + + RGB_labels_path = [] + RGB_labels_v1_2_path = [] + RGB_labels_v2_0_path = [] + print('Scanning labels path....') + for label_path in scandir(dataset_path, suffix='.png', recursive=True): + if 'labels' in label_path: + rgb_label_path = osp.join(dataset_path, label_path) + RGB_labels_path.append(rgb_label_path) + if 'v1.2' in label_path: + RGB_labels_v1_2_path.append(rgb_label_path) + elif 'v2.0' in label_path: + RGB_labels_v2_0_path.append(rgb_label_path) + + if args.version == 'all': + print(f'Totaly found {len(RGB_labels_path)} {args.version} RGB labels') + elif args.version == 'v1.2': + print(f'Found {len(RGB_labels_v1_2_path)} {args.version} RGB labels') + elif args.version == 'v2.0': + print(f'Found {len(RGB_labels_v2_0_path)} {args.version} RGB labels') + print('Making directories...') + mkdir_or_exist(osp.join(out_dir, 'training', 'v1.2', 'labels_mask')) + mkdir_or_exist(osp.join(out_dir, 'validation', 'v1.2', 'labels_mask')) + mkdir_or_exist(osp.join(out_dir, 'training', 'v2.0', 'labels_mask')) + mkdir_or_exist(osp.join(out_dir, 'validation', 'v2.0', 'labels_mask')) + print('Directories Have Made...') + + if args.nproc > 1: + if args.version == 'all': + print('Converting v1.2 ....') + track_parallel_progress( + partial(RGB2Mask, colormap2label=colormap2label_v1_2), + RGB_labels_v1_2_path, + nproc=args.nproc) + print('Converting v2.0 ....') + track_parallel_progress( + partial(RGB2Mask, colormap2label=colormap2label_v2_0), + RGB_labels_v2_0_path, + nproc=args.nproc) + elif args.version == 'v1.2': + print('Converting v1.2 ....') + track_parallel_progress( + partial(RGB2Mask, colormap2label=colormap2label_v1_2), + RGB_labels_v1_2_path, + nproc=args.nproc) + elif args.version == 'v2.0': + print('Converting v2.0 ....') + track_parallel_progress( + partial(RGB2Mask, colormap2label=colormap2label_v2_0), + RGB_labels_v2_0_path, + nproc=args.nproc) + + else: + if args.version == 'all': + print('Converting v1.2 ....') + track_progress( + partial(RGB2Mask, colormap2label=colormap2label_v1_2), + RGB_labels_v1_2_path) + print('Converting v2.0 ....') + track_progress( + partial(RGB2Mask, colormap2label=colormap2label_v2_0), + RGB_labels_v2_0_path) + elif args.version == 'v1.2': + print('Converting v1.2 ....') + track_progress( + partial(RGB2Mask, colormap2label=colormap2label_v1_2), + RGB_labels_v1_2_path) + elif args.version == 'v2.0': + print('Converting v2.0 ....') + track_progress( + partial(RGB2Mask, colormap2label=colormap2label_v2_0), + RGB_labels_v2_0_path) + + print('Have convert Mapillary Vistas Datasets RGB labels to Mask labels!') + + +if __name__ == '__main__': + args = parse_args() + main() From 7fc8ca0312271e5bade549a601f4efd62545eaa1 Mon Sep 17 00:00:00 2001 From: Leeinsn <48157973+Leeinsn@users.noreply.github.com> Date: Fri, 20 Jan 2023 15:34:11 +0800 Subject: [PATCH 17/25] CodeCamp #141 [Feature] Add BioMedical3DRandomFlip. (#2404) ## Motivation Support for biomedical 3d images augmentation. ## Modification Add BioMedical3DRandomFlip in mmseg/datasets/transforms/transforms.py. Co-authored-by: MeowZheng --- mmseg/datasets/__init__.py | 24 ++--- mmseg/datasets/transforms/__init__.py | 16 ++-- mmseg/datasets/transforms/transforms.py | 121 +++++++++++++++++++++++- tests/test_datasets/test_transform.py | 95 ++++++++++++++++++- 4 files changed, 235 insertions(+), 21 deletions(-) diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index 28f067983c..8aa2e8d1a8 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -21,9 +21,9 @@ from .synapse import SynapseDataset # yapf: disable from .transforms import (CLAHE, AdjustGamma, BioMedical3DPad, - BioMedical3DRandomCrop, BioMedicalGaussianBlur, - BioMedicalGaussianNoise, BioMedicalRandomGamma, - GenerateEdge, LoadAnnotations, + BioMedical3DRandomCrop, BioMedical3DRandomFlip, + BioMedicalGaussianBlur, BioMedicalGaussianNoise, + BioMedicalRandomGamma, GenerateEdge, LoadAnnotations, LoadBiomedicalAnnotation, LoadBiomedicalData, LoadBiomedicalImageFromFile, LoadImageFromNDArray, PackSegInputs, PhotoMetricDistortion, RandomCrop, @@ -34,15 +34,15 @@ # yapf: enable __all__ = [ - 'BaseSegDataset', 'BioMedical3DRandomCrop', 'CityscapesDataset', - 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', - 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', - 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', - 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', - 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', 'LoadAnnotations', - 'RandomCrop', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', - 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', 'RandomCutOut', - 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple', + 'BaseSegDataset', 'BioMedical3DRandomCrop', 'BioMedical3DRandomFlip', + 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset', + 'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset', + 'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset', + 'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset', + 'MultiImageMixDataset', 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', + 'LoadAnnotations', 'RandomCrop', 'SegRescale', 'PhotoMetricDistortion', + 'RandomRotate', 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', + 'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple', 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', 'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge', diff --git a/mmseg/datasets/transforms/__init__.py b/mmseg/datasets/transforms/__init__.py index e39f05c921..25f4ee4a98 100644 --- a/mmseg/datasets/transforms/__init__.py +++ b/mmseg/datasets/transforms/__init__.py @@ -5,12 +5,13 @@ LoadImageFromNDArray) # yapf: disable from .transforms import (CLAHE, AdjustGamma, BioMedical3DPad, - BioMedical3DRandomCrop, BioMedicalGaussianBlur, - BioMedicalGaussianNoise, BioMedicalRandomGamma, - GenerateEdge, PhotoMetricDistortion, RandomCrop, - RandomCutOut, RandomMosaic, RandomRotate, - RandomRotFlip, Rerange, ResizeShortestEdge, - ResizeToMultiple, RGB2Gray, SegRescale) + BioMedical3DRandomCrop, BioMedical3DRandomFlip, + BioMedicalGaussianBlur, BioMedicalGaussianNoise, + BioMedicalRandomGamma, GenerateEdge, + PhotoMetricDistortion, RandomCrop, RandomCutOut, + RandomMosaic, RandomRotate, RandomRotFlip, Rerange, + ResizeShortestEdge, ResizeToMultiple, RGB2Gray, + SegRescale) # yapf: enable __all__ = [ @@ -20,5 +21,6 @@ 'ResizeToMultiple', 'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile', 'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge', 'ResizeShortestEdge', 'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur', - 'BioMedicalRandomGamma', 'BioMedical3DPad', 'RandomRotFlip' + 'BioMedical3DRandomFlip', 'BioMedicalRandomGamma', 'BioMedical3DPad', + 'RandomRotFlip' ] diff --git a/mmseg/datasets/transforms/transforms.py b/mmseg/datasets/transforms/transforms.py index fbb890b559..ef4e78dd8c 100644 --- a/mmseg/datasets/transforms/transforms.py +++ b/mmseg/datasets/transforms/transforms.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import copy import warnings -from typing import Dict, Sequence, Tuple, Union +from typing import Dict, List, Optional, Sequence, Tuple, Union import cv2 import mmcv @@ -2015,3 +2015,122 @@ def __repr__(self): repr_str += f'pad_val={self.pad_val}), ' repr_str += f'seg_pad_val={self.seg_pad_val})' return repr_str + + +@TRANSFORMS.register_module() +class BioMedical3DRandomFlip(BaseTransform): + """Flip biomedical 3D images and segmentations. + + Modified from https://github.com/MIC-DKFZ/batchgenerators/blob/master/batchgenerators/transforms/spatial_transforms.py # noqa:E501 + + Copyright 2021 Division of + Medical Image Computing, German Cancer Research Center (DKFZ) and Applied + Computer Vision Lab, Helmholtz Imaging Platform. + Licensed under the Apache-2.0 License. + + Required Keys: + + - img (np.ndarry): Biomedical image with shape (N, Z, Y, X) by default, + N is the number of modalities. + - gt_seg_map (np.ndarray, optional): Biomedical seg map with shape + (Z, Y, X) by default. + + Modified Keys: + + - img (np.ndarry): Biomedical image with shape (N, Z, Y, X) by default, + N is the number of modalities. + - gt_seg_map (np.ndarray, optional): Biomedical seg map with shape + (Z, Y, X) by default. + + Added Keys: + + - do_flip + - flip_axes + + Args: + prob (float): Flipping probability. + axes (Tuple[int, ...]): Flipping axes with order 'ZXY'. + swap_label_pairs (Optional[List[Tuple[int, int]]]): + The segmentation label pairs that are swapped when flipping. + """ + + def __init__(self, + prob: float, + axes: Tuple[int, ...], + swap_label_pairs: Optional[List[Tuple[int, int]]] = None): + self.prob = prob + self.axes = axes + self.swap_label_pairs = swap_label_pairs + assert prob >= 0 and prob <= 1 + if axes is not None: + assert max(axes) <= 2 + + @staticmethod + def _flip(img, direction: Tuple[bool, bool, bool]) -> np.ndarray: + if direction[0]: + img[:, :] = img[:, ::-1] + if direction[1]: + img[:, :, :] = img[:, :, ::-1] + if direction[2]: + img[:, :, :, :] = img[:, :, :, ::-1] + return img + + def _do_flip(self, img: np.ndarray) -> Tuple[bool, bool, bool]: + """Call function to determine which axis to flip. + + Args: + img (np.ndarry): Image or segmentation map array. + Returns: + tuple: Flip action, whether to flip on the z, x, and y axes. + """ + flip_c, flip_x, flip_y = False, False, False + if self.axes is not None: + flip_c = 0 in self.axes and np.random.rand() < self.prob + flip_x = 1 in self.axes and np.random.rand() < self.prob + if len(img.shape) == 4: + flip_y = 2 in self.axes and np.random.rand() < self.prob + return flip_c, flip_x, flip_y + + def _swap_label(self, seg: np.ndarray) -> np.ndarray: + out = seg.copy() + for first, second in self.swap_label_pairs: + first_area = (seg == first) + second_area = (seg == second) + out[first_area] = second + out[second_area] = first + return out + + def transform(self, results: Dict) -> Dict: + """Call function to flip and swap pair labels. + + Args: + results (dict): Result dict. + Returns: + dict: Flipped results, 'do_flip', 'flip_axes' keys are added into + result dict. + """ + # get actual flipped axis + if 'do_flip' not in results: + results['do_flip'] = self._do_flip(results['img']) + if 'flip_axes' not in results: + results['flip_axes'] = self.axes + # flip image + results['img'] = self._flip( + results['img'], direction=results['do_flip']) + # flip seg + if results['gt_seg_map'] is not None: + if results['gt_seg_map'].shape != results['img'].shape: + results['gt_seg_map'] = results['gt_seg_map'][None, :] + results['gt_seg_map'] = self._flip( + results['gt_seg_map'], direction=results['do_flip']) + results['gt_seg_map'] = results['gt_seg_map'].squeeze() + # swap label pairs + if self.swap_label_pairs is not None: + results['gt_seg_map'] = self._swap_label(results['gt_seg_map']) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, axes={self.axes}, ' \ + f'swap_label_pairs={self.swap_label_pairs})' + return repr_str diff --git a/tests/test_datasets/test_transform.py b/tests/test_datasets/test_transform.py index 146e639d77..906b3c27e8 100644 --- a/tests/test_datasets/test_transform.py +++ b/tests/test_datasets/test_transform.py @@ -8,7 +8,8 @@ from PIL import Image from mmseg.datasets.transforms import * # noqa -from mmseg.datasets.transforms import (LoadBiomedicalImageFromFile, +from mmseg.datasets.transforms import (LoadBiomedicalData, + LoadBiomedicalImageFromFile, PhotoMetricDistortion, RandomCrop) from mmseg.registry import TRANSFORMS from mmseg.utils import register_all_modules @@ -1056,3 +1057,95 @@ def test_BioMedical3DPad(): assert results['img'].shape[1:] == (6, 6, 6) assert results['gt_seg_map'].shape[1:] == (6, 6, 6) assert results['pad_shape'] == (6, 6, 6) + + +def test_biomedical_3d_flip(): + # test assertion for invalid prob + with pytest.raises(AssertionError): + transform = dict(type='BioMedical3DRandomFlip', prob=1.5, axes=(0, 1)) + transform = TRANSFORMS.build(transform) + + # test assertion for invalid direction + with pytest.raises(AssertionError): + transform = dict(type='BioMedical3DRandomFlip', prob=1, axes=(0, 1, 3)) + transform = TRANSFORMS.build(transform) + + # test flip axes are (0, 1, 2) + transform = dict(type='BioMedical3DRandomFlip', prob=1, axes=(0, 1, 2)) + transform = TRANSFORMS.build(transform) + + # test with random 3d data + results = dict() + results['img_path'] = 'Null' + results['img_shape'] = (1, 16, 16, 16) + results['img'] = np.random.randn(1, 16, 16, 16) + results['gt_seg_map'] = np.random.randint(0, 4, (16, 16, 16)) + + original_img = results['img'].copy() + original_seg = results['gt_seg_map'].copy() + + # flip first time + results = transform(results) + with pytest.raises(AssertionError): + assert np.equal(original_img, results['img']).all() + with pytest.raises(AssertionError): + assert np.equal(original_seg, results['gt_seg_map']).all() + + # flip second time + results = transform(results) + assert np.equal(original_img, results['img']).all() + assert np.equal(original_seg, results['gt_seg_map']).all() + + # test with actual data and flip axes are (0, 1) + # load biomedical 3d img and seg + data_prefix = osp.join(osp.dirname(__file__), '../data') + input_results = dict(img_path=osp.join(data_prefix, 'biomedical.npy')) + biomedical_loader = LoadBiomedicalData(with_seg=True) + data = biomedical_loader(copy.deepcopy(input_results)) + results = data.copy() + + original_img = data['img'].copy() + original_seg = data['gt_seg_map'].copy() + + # test flip axes are (0, 1) + transform = dict(type='BioMedical3DRandomFlip', prob=1, axes=(0, 1)) + transform = TRANSFORMS.build(transform) + + # flip first time + results = transform(results) + with pytest.raises(AssertionError): + assert np.equal(original_img, results['img']).all() + with pytest.raises(AssertionError): + assert np.equal(original_seg, results['gt_seg_map']).all() + + # flip second time + results = transform(results) + assert np.equal(original_img, results['img']).all() + assert np.equal(original_seg, results['gt_seg_map']).all() + + # test transform with flip axes = (1) + transform = dict(type='BioMedical3DRandomFlip', prob=1, axes=(1, )) + transform = TRANSFORMS.build(transform) + results = data.copy() + results = transform(results) + results = transform(results) + assert np.equal(original_img, results['img']).all() + assert np.equal(original_seg, results['gt_seg_map']).all() + + # test transform with swap_label_pairs + transform = dict( + type='BioMedical3DRandomFlip', + prob=1, + axes=(1, 2), + swap_label_pairs=[(0, 1)]) + transform = TRANSFORMS.build(transform) + results = data.copy() + results = transform(results) + + with pytest.raises(AssertionError): + assert np.equal(original_seg, results['gt_seg_map']).all() + + # swap twice + results = transform(results) + assert np.equal(original_img, results['img']).all() + assert np.equal(original_seg, results['gt_seg_map']).all() From 37af545f6b43dd9df5695861893807bf48504e12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B0=A2=E6=98=95=E8=BE=B0?= Date: Fri, 20 Jan 2023 21:40:13 +0800 Subject: [PATCH 18/25] [Fix] Fix inference api and support setting palette to SegLocalVisualizer (#2475) as title Co-authored-by: MengzhangLI --- mmseg/apis/inference.py | 5 +++-- mmseg/visualization/local_visualizer.py | 16 +++++++++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/mmseg/apis/inference.py b/mmseg/apis/inference.py index 9abc85d627..d1cc545598 100644 --- a/mmseg/apis/inference.py +++ b/mmseg/apis/inference.py @@ -93,8 +93,9 @@ def init_model(config: Union[str, Path, Config], def _preprare_data(imgs: ImageType, model: BaseSegmentor): cfg = model.cfg - if dict(type='LoadAnnotations') in cfg.test_pipeline: - cfg.test_pipeline.remove(dict(type='LoadAnnotations')) + for t in cfg.test_pipeline: + if t.get('type') == 'LoadAnnotations': + cfg.test_pipeline.remove(t) is_batch = True if not isinstance(imgs, (list, tuple)): diff --git a/mmseg/visualization/local_visualizer.py b/mmseg/visualization/local_visualizer.py index 070b06b73b..27443f2c57 100644 --- a/mmseg/visualization/local_visualizer.py +++ b/mmseg/visualization/local_visualizer.py @@ -1,5 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union import mmcv import numpy as np @@ -9,6 +9,7 @@ from mmseg.registry import VISUALIZERS from mmseg.structures import SegDataSample +from mmseg.utils import get_classes, get_palette @VISUALIZERS.register_module() @@ -55,14 +56,23 @@ def __init__(self, image: Optional[np.ndarray] = None, vis_backends: Optional[Dict] = None, save_dir: Optional[str] = None, + palette: Optional[Union[str, List]] = None, + classes: Optional[Union[str, List]] = None, + dataset_name: Optional[str] = None, alpha: float = 0.8, **kwargs): super().__init__(name, image, vis_backends, save_dir, **kwargs) - self.alpha = alpha + self.alpha: float = alpha # Set default value. When calling # `SegLocalVisualizer().dataset_meta=xxx`, # it will override the default value. - self.dataset_meta = {} + if dataset_name is None: + dataset_name = 'cityscapes' + classes = classes if classes else get_classes(dataset_name) + palette = palette if palette else get_palette(dataset_name) + assert len(classes) == len( + palette), 'The length of classes should be equal to palette' + self.dataset_meta: dict = {'classes': classes, 'palette': palette} def _draw_sem_seg(self, image: np.ndarray, sem_seg: PixelData, classes: Optional[Tuple[str]], From a115b103237c813197fe4b6f8e3c510afd38b92b Mon Sep 17 00:00:00 2001 From: MengzhangLI Date: Sun, 22 Jan 2023 15:55:30 +0800 Subject: [PATCH 19/25] [Doc] Add EN datasets.md in dev-1.x (#2464) Add English version of `datasets.md`, the Chinese version is in https://github.com/open-mmlab/mmsegmentation/pull/2387. Co-authored-by: MeowZheng --- docs/en/advanced_guides/datasets.md | 387 +++++++++++++++++++++++++++- 1 file changed, 386 insertions(+), 1 deletion(-) diff --git a/docs/en/advanced_guides/datasets.md b/docs/en/advanced_guides/datasets.md index 157ea3aad8..733e2a26d9 100644 --- a/docs/en/advanced_guides/datasets.md +++ b/docs/en/advanced_guides/datasets.md @@ -1 +1,386 @@ -# Datasets +# Dataset + +Dataset classes in MMSegmentation have two functions: (1) load data information after [data preparation](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/user_guides/2_dataset_prepare.md) +and (2) send data into [dataset transform pipeline](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/datasets/basesegdataset.py#L141) to do [data augmentation](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/zh_cn/advanced_guides/transforms.md). +There are 2 kinds of loaded information: (1) meta information which is original dataset information such as categories (classes) of dataset and their corresponding palette information, (2) data information which includes +the path of dataset images and labels. +The tutorial includes some main interfaces in MMSegmentation 1.x dataset class: methods of loading data information and modifying dataset classes in base dataset class, and the relationship between dataset and the data transform pipeline. + +## Main Interfaces + +Take Cityscapes as an example, if you want to run the example, please download and [preprocess](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/en/user_guides/2_dataset_prepare.md#cityscapes) +Cityscapes dataset in `data` directory, before running the demo code: + +Instantiate Cityscapes training dataset: + +```python +from mmseg.datasets import CityscapesDataset +from mmseg.utils import register_all_modules +register_all_modules() + +data_root = 'data/cityscapes/' +data_prefix=dict(img_path='leftImg8bit/train', seg_map_path='gtFine/train') +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PackSegInputs') +] + +dataset = CityscapesDataset(data_root=data_root, data_prefix=data_prefix, test_mode=False, pipeline=train_pipeline) +``` + +Get the length of training set: + +```python +print(len(dataset)) + +2975 +``` + +Get data information: The type of data information is `dict` which includes several keys: + +- `'img_path'`: path of images +- `'seg_map_path'`: path of segmentation labels +- `'seg_fields'`: saving label fields +- `'sample_idx'`: the index of the current sample + +There are also `'label_map'` and `'reduce_zero_label'` whose functions would be introduced in the next section. + +```python +# Acquire data information of first sample in dataset +print(dataset.get_data_info(0)) + +{'img_path': 'data/cityscapes/leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png', + 'seg_map_path': 'data/cityscapes/gtFine/train/aachen/aachen_000000_000019_gtFine_labelTrainIds.png', + 'label_map': None, + 'reduce_zero_label': False, + 'seg_fields': [], + 'sample_idx': 0} +``` + +Get dataset meta information: the type of MMSegmentation meta information is also `dict`, which includes `'classes'` field for dataset classes and `'palette'` field for corresponding colors in visualization, and has `'label_map'` field and `'reduce_zero_label'` filed. + +```python +print(dataset.metainfo) + +{'classes': ('road', + 'sidewalk', + 'building', + 'wall', + 'fence', + 'pole', + 'traffic light', + 'traffic sign', + 'vegetation', + 'terrain', + 'sky', + 'person', + 'rider', + 'car', + 'truck', + 'bus', + 'train', + 'motorcycle', + 'bicycle'), + 'palette': [[128, 64, 128], + [244, 35, 232], + [70, 70, 70], + [102, 102, 156], + [190, 153, 153], + [153, 153, 153], + [250, 170, 30], + [220, 220, 0], + [107, 142, 35], + [152, 251, 152], + [70, 130, 180], + [220, 20, 60], + [255, 0, 0], + [0, 0, 142], + [0, 0, 70], + [0, 60, 100], + [0, 80, 100], + [0, 0, 230], + [119, 11, 32]], + 'label_map': None, + 'reduce_zero_label': False} +``` + +The return value of dataset `__getitem__` method is the output of data samples after data augmentation, whose type is also `dict`. It has two fields: `'inputs'` corresponding to images after data augmentation, +and `'data_samples'` corresponding to `SegDataSample`\](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/docs/zh_cn/advanced_guides/structures.md) which is new data structures in MMSegmentation 1.x, +and `gt_sem_seg` of `SegDataSample` has labels after data augmentation operations. + +```python +print(dataset[0]) + +{'inputs': tensor([[[131, 130, 130, ..., 23, 23, 23], + [132, 132, 132, ..., 23, 22, 23], + [134, 133, 133, ..., 23, 23, 23], + ..., + [ 66, 67, 67, ..., 71, 71, 71], + [ 66, 67, 66, ..., 68, 68, 68], + [ 67, 67, 66, ..., 70, 70, 70]], + + [[143, 143, 142, ..., 28, 28, 29], + [145, 145, 145, ..., 28, 28, 29], + [145, 145, 145, ..., 27, 28, 29], + ..., + [ 75, 75, 76, ..., 80, 81, 81], + [ 75, 76, 75, ..., 80, 80, 80], + [ 77, 76, 76, ..., 82, 82, 82]], + + [[126, 125, 126, ..., 21, 21, 22], + [127, 127, 128, ..., 21, 21, 22], + [127, 127, 126, ..., 21, 21, 22], + ..., + [ 63, 63, 64, ..., 69, 69, 70], + [ 64, 65, 64, ..., 69, 69, 69], + [ 65, 66, 66, ..., 72, 71, 71]]], dtype=torch.uint8), + 'data_samples': + _gt_sem_seg: + )} +``` + +## BaseSegDataset + +As mentioned above, dataset classes have the same functions, we implemented [`BaseSegDataset`](https://mmsegmentation.readthedocs.io/en/dev-1.x/api.html?highlight=BaseSegDataset#mmseg.datasets.BaseSegDataset) to reues the common functions. +It inherits [`BaseDataset` of MMEngine](https://github.com/open-mmlab/mmengine/blob/main/docs/en/advanced_tutorials/basedataset.md) and follows unified initialization process of OpenMMLab. It supports the highly effective interior storing format, some functions like +dataset concatenation and repeatedly sampling. In MMSegmentation `BaseSegDataset`, the **method of loading data information** (`load_data_list`) is redefined and adds new `get_label_map` method to **modify dataset classes information**. + +### Loading Dataset Information + +The loaded data information includes the path of images samples and annotations samples, the detailed implementation could be found in +[`load_data_list`](https://github.com/open-mmlab/mmsegmentation/blob/163277bfe0fa8fefb63ee5137917fafada1b301c/mmseg/datasets/basesegdataset.py#L231) of `BaseSegDataset` in MMSegmentation. +There are two main methods to acquire the path of images and labels: + +1. Load file paths according to the dirictory and suffix of input images and annotations + +If the dataset directory structure is organized as below, the [`load_data_list`](https://github.com/open-mmlab/mmsegmentation/blob/163277bfe0fa8fefb63ee5137917fafada1b301c/mmseg/datasets/basesegdataset.py#L231) can parse dataset directory Structure: + +``` +├── data +│ ├── my_dataset +│ │ ├── img_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{img_suffix} +│ │ │ │ ├── yyy{img_suffix} +│ │ │ ├── val +│ │ │ │ ├── zzz{img_suffix} +│ │ ├── ann_dir +│ │ │ ├── train +│ │ │ │ ├── xxx{seg_map_suffix} +│ │ │ │ ├── yyy{seg_map_suffix} +│ │ │ ├── val +│ │ │ │ ├── zzz{seg_map_suffix} +``` + +Here is an example pf ADE20K, and below the directory structure of the dataset: + +``` +├── ade +│ ├── ADEChallengeData2016 +│ │ ├── annotations +│ │ │ ├── training +│ │ │ │ ├── ADE_train_00000001.png +│ │ │ │ ├── ... +│ │ │ │── validation +│ │ │ │ ├── ADE_val_00000001.png +│ │ │ │ ├── ... +│ │ ├── images +│ │ │ ├── training +│ │ │ │ ├── ADE_train_00000001.jpg +│ │ │ │ ├── ... +│ │ │ ├── validation +│ │ │ │ ├── ADE_val_00000001.jpg +│ │ │ │ ├── ... +``` + +```python +from mmseg.datasets import ADE20KDataset + +ADE20KDataset(data_root = 'data/ade/ADEChallengeData2016', + data_prefix=dict(img_path='images/training', seg_map_path='annotations/training'), + img_suffix='.jpg', + seg_map_suffix='.png', + reduce_zero_label=True) +``` + +2. Load file paths from annotation file + +Dataset also can load an annotation file which includes the data sample paths of dataset. +Take PascalContext dataset instance as an example, its input annotation file is: + +```python +2008_000008 +... +``` + +It needs to define `ann_file` when instantiation: + +```python +PascalContextDataset(data_root='data/VOCdevkit/VOC2010/', + data_prefix=dict(img_path='JPEGImages', seg_map_path='SegmentationClassContext'), + ann_file='ImageSets/SegmentationContext/train.txt') +``` + +### Modification of Dataset Classes + +- Use `metainfo` input argument + +Meta information is defined as class variables, such as `METAINFO` variable of Cityscapes: + +```python +class CityscapesDataset(BaseSegDataset): + """Cityscapes dataset. + + The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is + fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. + """ + METAINFO = dict( + classes=('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', + 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', + 'motorcycle', 'bicycle'), + palette=[[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, + 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], + [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], + [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]]) + +``` + +Here `'classes'` defines class names of Cityscapes dataset annotations, if users only concern some classes about vehicles and **ignore other classes**, +the meta information of dataset could be modified by defined input argument `metainfo` when instantiating Cityscapes dataset: + +```python +from mmseg.datasets import CityscapesDataset + +data_root = 'data/cityscapes/' +data_prefix=dict(img_path='leftImg8bit/train', seg_map_path='gtFine/train') +# metainfo only keep classes below: +metainfo=dict(classes=( 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')) +dataset = CityscapesDataset(data_root=data_root, data_prefix=data_prefix, metainfo=metainfo) + +print(dataset.metainfo) + +{'classes': ('car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle'), + 'palette': [[0, 0, 142], + [0, 0, 70], + [0, 60, 100], + [0, 80, 100], + [0, 0, 230], + [119, 11, 32], + [128, 64, 128], + [244, 35, 232], + [70, 70, 70], + [102, 102, 156], + [190, 153, 153], + [153, 153, 153], + [250, 170, 30], + [220, 220, 0], + [107, 142, 35], + [152, 251, 152], + [70, 130, 180], + [220, 20, 60], + [255, 0, 0]], + # pixels whose label index are 255 would be ignored when calculating loss + 'label_map': {0: 255, + 1: 255, + 2: 255, + 3: 255, + 4: 255, + 5: 255, + 6: 255, + 7: 255, + 8: 255, + 9: 255, + 10: 255, + 11: 255, + 12: 255, + 13: 0, + 14: 1, + 15: 2, + 16: 3, + 17: 4, + 18: 5}, + 'reduce_zero_label': False} +``` + +Meta information is different from default setting of Cityscapes dataset. Moreover, `label_map` field is also defined, which is used for modifying label index of each pixel on segmentation mask. +The segmentation label would re-map class information by `label_map`, [here](https://github.com/open-mmlab/mmsegmentation/blob/dev-1.x/mmseg/datasets/basesegdataset.py#L151) is detailed implementation: + +```python +gt_semantic_seg_copy = gt_semantic_seg.copy() +for old_id, new_id in results['label_map'].items(): + gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id +``` + +- Using `reduce_zero_label` input argument + +To ignore label 0 (such as ADE20K dataset), we can use `reduce_zero_label` (default to `False`) argument of BaseSegDataset and its subclasses. +When `reduce_zero_label` is `True`, label 0 in segmentation annotations would be set as 255 (models of MMSegmentation would ignore label 255 in calculating loss) and indices of other labels will minus 1: + +```python +gt_semantic_seg[gt_semantic_seg == 0] = 255 +gt_semantic_seg = gt_semantic_seg - 1 +gt_semantic_seg[gt_semantic_seg == 254] = 255 +``` + +## Dataset and Data Transform Pipeline + +If the argument `pipeline` is defined, the return value of `__getitem__` method is after data argument. +If dataset input argument does not define pipeline, it is the same as return value of `get_data_info` method. + +```python +from mmseg.datasets import CityscapesDataset + +data_root = 'data/cityscapes/' +data_prefix=dict(img_path='leftImg8bit/train', seg_map_path='gtFine/train') +dataset = CityscapesDataset(data_root=data_root, data_prefix=data_prefix, test_mode=False) + +print(dataset[0]) + +{'img_path': 'data/cityscapes/leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png', + 'seg_map_path': 'data/cityscapes/gtFine/train/aachen/aachen_000000_000019_gtFine_labelTrainIds.png', + 'label_map': None, + 'reduce_zero_label': False, + 'seg_fields': [], + 'sample_idx': 0} +``` From b9b5d8bf5243d1616cbc2be7090b89e039dcde01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B0=A2=E6=98=95=E8=BE=B0?= Date: Mon, 30 Jan 2023 10:31:12 +0800 Subject: [PATCH 20/25] [CI] Upgrade the version of isort to fix lint error (#2519) ## Motivation https://github.com/open-mmlab/mmeval/pull/85 --------- Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> --- .circleci/test.yml | 9 ++++----- .github/workflows/merge_stage_test.yml | 18 +++++++++--------- .github/workflows/pr_stage_test.yml | 12 ++++++------ .pre-commit-config.yaml | 4 ++-- requirements/mminstall.txt | 4 ++-- 5 files changed, 23 insertions(+), 24 deletions(-) diff --git a/.circleci/test.yml b/.circleci/test.yml index 20391768f9..d460690065 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -61,9 +61,9 @@ jobs: command: | pip install git+https://github.com/open-mmlab/mmengine.git@main pip install -U openmim - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' pip install -r requirements/tests.txt -r requirements/optional.txt - run: name: Build and install @@ -97,7 +97,6 @@ jobs: command: | git clone -b main --depth 1 https://github.com/open-mmlab/mmengine.git /home/circleci/mmengine git clone -b dev-1.x --depth 1 https://github.com/open-mmlab/mmclassification.git /home/circleci/mmclassification - git clone -b dev-3.x --depth 1 https://github.com/open-mmlab/mmdetection.git /home/circleci/mmdetection - run: name: Build Docker image command: | @@ -108,9 +107,9 @@ jobs: command: | docker exec mmseg pip install -e /mmengine docker exec mmseg pip install -U openmim - docker exec mmseg mim install 'mmcv>=2.0.0rc3' + docker exec mmseg mim install 'mmcv==2.0.0rc3' docker exec mmseg pip install -e /mmclassification - docker exec mmseg pip install -e /mmdetection + docker exec mmseg mim install 'mmdet==3.0.0rc5' docker exec mmseg pip install -r requirements/tests.txt -r requirements/optional.txt - run: name: Build and install diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index c08b3d2a19..2493528076 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -44,9 +44,9 @@ jobs: python -V pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install @@ -100,9 +100,9 @@ jobs: python -V pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install @@ -166,9 +166,9 @@ jobs: python -V pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install @@ -209,9 +209,9 @@ jobs: python -V pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install @@ -246,7 +246,7 @@ jobs: pip install git+https://github.com/open-mmlab/mmengine.git mim install 'mmcv>=2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install diff --git a/.github/workflows/pr_stage_test.yml b/.github/workflows/pr_stage_test.yml index 66661ec8f3..df73baba8e 100644 --- a/.github/workflows/pr_stage_test.yml +++ b/.github/workflows/pr_stage_test.yml @@ -44,9 +44,9 @@ jobs: run: | pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install @@ -100,9 +100,9 @@ jobs: python -V pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install @@ -135,9 +135,9 @@ jobs: python -V pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies run: pip install -r requirements/tests.txt -r requirements/optional.txt - name: Build and install diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 03b537683a..70952b7c9e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,8 +3,8 @@ repos: rev: 5.0.4 hooks: - id: flake8 - - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + - repo: https://github.com/zhouzaida/isort + rev: 5.12.1 hooks: - id: isort - repo: https://github.com/pre-commit/mirrors-yapf diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt index d27af8dd0f..2c8e9d6a22 100644 --- a/requirements/mminstall.txt +++ b/requirements/mminstall.txt @@ -1,4 +1,4 @@ mmcls>=1.0.0rc0 -mmcv>=2.0.0rc3,<2.1.0 -mmdet>=3.0.0rc4 +mmcv==2.0.0rc3 +mmdet==3.0.0rc5 mmengine>=0.1.0,<1.0.0 From 6b53ec0a1fa8b248b49c47b2d604c5295eb007bc Mon Sep 17 00:00:00 2001 From: Siddharth Ancha Date: Sun, 29 Jan 2023 23:02:09 -0500 Subject: [PATCH 21/25] [Doc] Fix minor typo in migration `package.md` (#2518) Co-authored-by: xiexinch --- docs/en/migration/package.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en/migration/package.md b/docs/en/migration/package.md index ca24df5887..95fefe1310 100644 --- a/docs/en/migration/package.md +++ b/docs/en/migration/package.md @@ -96,13 +96,13 @@ OpenMMLab 2.0 defines the `BaseDataset` to function and interface of dataset, an | Packages/Modules | Changes | | :-------------------: | :------------------------------------------------------------------------------------------ | -| `mmseg.pipelines` | Renamed to `mmseg.transforms` | -| `mmseg.sampler` | Move in `mmengine.dataset.sampler` | -| `CustomDataset` | Renamed to `BaseDataset` and inherited from `BaseDataset` in MMEngine | +| `mmseg.pipelines` | Moved in `mmcv.transforms` | +| `mmseg.sampler` | Moved in `mmengine.dataset.sampler` | +| `CustomDataset` | Renamed to `BaseSegDataset` and inherited from `BaseDataset` in MMEngine | | `DefaultFormatBundle` | Replaced with `PackSegInputs` | -| `LoadImageFromFile` | Move in `mmcv.transforms.LoadImageFromFile` | +| `LoadImageFromFile` | Moved in `mmcv.transforms.LoadImageFromFile` | | `LoadAnnotations` | Moved in `mmcv.transforms.LoadAnnotations` | -| `Resize` | Moved in `mmcv.transforms` and split into `Resize`, `RandomResize` and `RandomChoiseResize` | +| `Resize` | Moved in `mmcv.transforms` and split into `Resize`, `RandomResize` and `RandomChoiceResize` | | `RandomFlip` | Moved in `mmcv.transforms.RandomFlip` | | `Pad` | Moved in `mmcv.transforms.Pad` | | `Normalize` | Moved in `mmcv.transforms.Normalize` | From 74e8b89b17ae3975fede50c74b691d556a36d006 Mon Sep 17 00:00:00 2001 From: Siddharth Ancha Date: Sun, 29 Jan 2023 23:17:15 -0500 Subject: [PATCH 22/25] [Fix] Switch order of `reduce_zero_label` and applying `label_map` in 1.x (#2517) This is an almost exact duplicate of #2500 (that was made to the `master` branch) now applied to the `1.x` branch. --- ## Motivation I want to fix a bug through this PR. The bug occurs when two options -- `reduce_zero_label=True`, and custom classes are used. `reduce_zero_label` remaps the GT seg labels by remapping the zero-class to 255 which is ignored. Conceptually, this should occur *before* the `label_map` is applied, which maps *already reduced labels*. However, currently, the `label_map` is applied before the zero label is reduced. ## Modification The modification is simple: - I've just interchanged the order of the two operations by moving a few lines from bottom to top. - I've added a test that passes when the fix is introduced, and fails on the original `master` branch. ## BC-breaking (Optional) I do not anticipate this change braking any backward-compatibility. ## Checklist - [x] Pre-commit or other linting tools are used to fix the potential lint issues. - _I've fixed all linting/pre-commit errors._ - [x] The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness. - _I've added a unit test._ - [x] If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMDet3D. - _I don't think this change affects MMDet or MMDet3D._ - [x] The documentation has been modified accordingly, like docstring or example tutorials. - _This change fixes an existing bug and doesn't require modifying any documentation/docstring._ --- mmseg/datasets/transforms/loading.py | 16 ++++++------ tests/test_datasets/test_loading.py | 37 ++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/mmseg/datasets/transforms/loading.py b/mmseg/datasets/transforms/loading.py index ea51e0df59..65c0dfec47 100644 --- a/mmseg/datasets/transforms/loading.py +++ b/mmseg/datasets/transforms/loading.py @@ -96,14 +96,6 @@ def _load_seg_map(self, results: dict) -> None: img_bytes, flag='unchanged', backend=self.imdecode_backend).squeeze().astype(np.uint8) - # modify if custom classes - if results.get('label_map', None) is not None: - # Add deep copy to solve bug of repeatedly - # replace `gt_semantic_seg`, which is reported in - # https://github.com/open-mmlab/mmsegmentation/pull/1445/ - gt_semantic_seg_copy = gt_semantic_seg.copy() - for old_id, new_id in results['label_map'].items(): - gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id # reduce zero_label if self.reduce_zero_label is None: self.reduce_zero_label = results['reduce_zero_label'] @@ -116,6 +108,14 @@ def _load_seg_map(self, results: dict) -> None: gt_semantic_seg[gt_semantic_seg == 0] = 255 gt_semantic_seg = gt_semantic_seg - 1 gt_semantic_seg[gt_semantic_seg == 254] = 255 + # modify if custom classes + if results.get('label_map', None) is not None: + # Add deep copy to solve bug of repeatedly + # replace `gt_semantic_seg`, which is reported in + # https://github.com/open-mmlab/mmsegmentation/pull/1445/ + gt_semantic_seg_copy = gt_semantic_seg.copy() + for old_id, new_id in results['label_map'].items(): + gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id results['gt_seg_map'] = gt_semantic_seg results['seg_fields'].append('gt_seg_map') diff --git a/tests/test_datasets/test_loading.py b/tests/test_datasets/test_loading.py index 29a594b4a2..3d5569682a 100644 --- a/tests/test_datasets/test_loading.py +++ b/tests/test_datasets/test_loading.py @@ -144,6 +144,43 @@ def test_load_seg_custom_classes(self): assert gt_array.dtype == np.uint8 np.testing.assert_array_equal(gt_array, true_mask) + # test with removing a class and reducing zero label simultaneously + results = dict( + img_path=img_path, + seg_map_path=gt_path, + # since reduce_zero_label is True, there are only 4 real classes. + # if the full set of classes is ["A", "B", "C", "D"], the + # following label map simulates the dataset option + # classes=["A", "C", "D"] which removes class "B". + label_map={ + 0: 0, + 1: 255, # simulate removing class 1 + 2: 1, + 3: 2 + }, + reduce_zero_label=True, # reduce zero label + seg_fields=[]) + + load_imgs = LoadImageFromFile() + results = load_imgs(copy.deepcopy(results)) + + # reduce zero label + load_anns = LoadAnnotations() + results = load_anns(copy.deepcopy(results)) + + gt_array = results['gt_seg_map'] + + true_mask = np.ones_like(gt_array) * 255 # all zeros get mapped to 255 + true_mask[2:4, 2:4] = 0 # 1s are reduced to class 0 mapped to class 0 + true_mask[2:4, 6:8] = 255 # 2s are reduced to class 1 which is removed + true_mask[6:8, 2:4] = 1 # 3s are reduced to class 2 mapped to class 1 + true_mask[6:8, 6:8] = 2 # 4s are reduced to class 3 mapped to class 2 + + assert results['seg_fields'] == ['gt_seg_map'] + assert gt_array.shape == (10, 10) + assert gt_array.dtype == np.uint8 + np.testing.assert_array_equal(gt_array, true_mask) + # test no custom classes results = dict( img_path=img_path, From d1c0a3efd4f7d96449a7d52c616df7b82f63fe21 Mon Sep 17 00:00:00 2001 From: Siddharth Ancha Date: Sun, 29 Jan 2023 23:35:55 -0500 Subject: [PATCH 23/25] [Fix] Unfinished label conversion from `-1` to `255` in 1.x (#2516) ## Motivation This is motivated by a previously unfinished PR (#2332). In that PR, the label -1 was changed to 255 in `BaseSegDataset`, which is correct. However, it was changed at only one location. There is another location in `mmseg/datasets/basesegdataset.py` where -1 was still being used that was not converted to 255. I have now converted it to 255. This is exactly same as a similar fix to the `master` branch via #2515 . ## Modification I've simply converted the snipped ```python if new_id != -1: new_palette.append(palette[old_id]) ``` to ```python if new_id != 255: new_palette.append(palette[old_id]) ``` ## Checklist - [x] Pre-commit or other linting tools are used to fix the potential lint issues. - _I've fixed all linting/pre-commit errors._ - [x] The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness. - _No unit tests need to be added or were affected. - [x] If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMDet3D. - _I don't think this change affects MMDet or MMDet3D._ - [x] The documentation has been modified accordingly, like docstring or example tutorials. - _This change fixes an existing bug and doesn't require modifying any documentation/docstring._ --- mmseg/datasets/basesegdataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmseg/datasets/basesegdataset.py b/mmseg/datasets/basesegdataset.py index cb11eb64c6..e7f96f7d2c 100644 --- a/mmseg/datasets/basesegdataset.py +++ b/mmseg/datasets/basesegdataset.py @@ -220,7 +220,7 @@ def _update_palette(self) -> list: # return subset of palette for old_id, new_id in sorted( self.label_map.items(), key=lambda x: x[1]): - if new_id != -1: + if new_id != 255: new_palette.append(palette[old_id]) new_palette = type(palette)(new_palette) else: From 981f3a488e183f54e3b65d4e2b7e4309472ad43b Mon Sep 17 00:00:00 2001 From: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Date: Mon, 30 Jan 2023 13:49:54 +0800 Subject: [PATCH 24/25] [Fix]Fix mmcv version 2.0.0rc3 for win test (#2531) as title --- .github/workflows/merge_stage_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index 2493528076..7728392481 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -244,7 +244,7 @@ jobs: python -V pip install -U openmim pip install git+https://github.com/open-mmlab/mmengine.git - mim install 'mmcv>=2.0.0rc3' + mim install 'mmcv==2.0.0rc3' pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x mim install 'mmdet==3.0.0rc5' - name: Install unittest dependencies From 18ee41eb7a06938ec97cad698d8d4c2238b25f6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B0=A2=E6=98=95=E8=BE=B0?= Date: Mon, 30 Jan 2023 17:53:07 +0800 Subject: [PATCH 25/25] Bump v1.0.0rc4 (#2529) --- README.md | 6 ++-- README_zh-CN.md | 2 +- docker/serve/Dockerfile | 2 +- docs/en/notes/changelog.md | 42 ++++++++++++++++++++++++++++ docs/en/notes/faq.md | 56 ++++++++++++++++++++------------------ mmseg/__init__.py | 6 ++-- mmseg/version.py | 2 +- 7 files changed, 80 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index f1d25cc0f6..d42be540dc 100644 --- a/README.md +++ b/README.md @@ -62,11 +62,11 @@ The 1.x branch works with **PyTorch 1.6+**. ## What's New -v1.0.0rc3 was released in 31/12/2022. +v1.0.0rc4 was released on 30/01/2023. Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history. -- Support test time augmentation ([#2184](https://github.com/open-mmlab/mmsegmentation/pull/2184)) -- Add 'Projects/' folder and the first example project ([#2412](https://github.com/open-mmlab/mmsegmentation/pull/2412)) +- Support ISNet (ICCV'2021) in projects ([#2400](https://github.com/open-mmlab/mmsegmentation/pull/2400)) +- Support HSSN (CVPR'2022) in projects ([#2444](https://github.com/open-mmlab/mmsegmentation/pull/2444)) ## Installation diff --git a/README_zh-CN.md b/README_zh-CN.md index f31f816834..bbebab5d04 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -61,7 +61,7 @@ MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 O ## 更新日志 -最新版本 v1.0.0rc3 在 2022.12.31 发布。 +最新版本 v1.0.0rc4 在 2023.01.30 发布。 如果想了解更多版本更新细节和历史信息,请阅读[更新日志](docs/en/notes/changelog.md)。 ## 安装 diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile index cea2694d6f..2dddc6cdf3 100644 --- a/docker/serve/Dockerfile +++ b/docker/serve/Dockerfile @@ -4,7 +4,7 @@ ARG CUDNN="8" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel ARG MMCV="2.0.0rc3" -ARG MMSEG="1.0.0rc3" +ARG MMSEG="1.0.0rc4" ENV PYTHONUNBUFFERED TRUE diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index afed3dd084..ae9e565333 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,5 +1,47 @@ # Changelog of v1.x +## v1.0.0rc4(01/30/2023) + +### Highlights + +- Support ISNet (ICCV'2021) in projects ([#2400](https://github.com/open-mmlab/mmsegmentation/pull/2400)) +- Support HSSN (CVPR'2022) in projects ([#2444](https://github.com/open-mmlab/mmsegmentation/pull/2444)) + +### Features + +- Add Gaussian Noise and Blur for biomedical data ([#2373](https://github.com/open-mmlab/mmsegmentation/pull/2373)) +- Add BioMedicalRandomGamma ([#2406](https://github.com/open-mmlab/mmsegmentation/pull/2406)) +- Add BioMedical3DPad ([#2383](https://github.com/open-mmlab/mmsegmentation/pull/2383)) +- Add BioMedical3DRandomFlip ([#2404](https://github.com/open-mmlab/mmsegmentation/pull/2404)) +- Add `gt_edge_map` field to SegDataSample ([#2466](https://github.com/open-mmlab/mmsegmentation/pull/2466)) +- Support synapse dataset ([#2432](https://github.com/open-mmlab/mmsegmentation/pull/2432), [#2465](https://github.com/open-mmlab/mmsegmentation/pull/2465)) +- Support Mapillary Vistas Dataset in projects ([#2484](https://github.com/open-mmlab/mmsegmentation/pull/2484)) +- Switch order of `reduce_zero_label` and applying `label_map` ([#2517](https://github.com/open-mmlab/mmsegmentation/pull/2517)) + +### Documentation + +- Add ZN Customized_runtime Doc ([#2502](https://github.com/open-mmlab/mmsegmentation/pull/2502)) +- Add EN datasets.md ([#2464](https://github.com/open-mmlab/mmsegmentation/pull/2464)) +- Fix minor typo in migration `package.md` ([#2518](https://github.com/open-mmlab/mmsegmentation/pull/2518)) + +### Bug fix + +- Fix incorrect `img_shape` value assignment in RandomCrop ([#2469](https://github.com/open-mmlab/mmsegmentation/pull/2469)) +- Fix inference api and support setting palette to SegLocalVisualizer ([#2475](https://github.com/open-mmlab/mmsegmentation/pull/2475)) +- Unfinished label conversion from `-1` to `255` ([#2516](https://github.com/open-mmlab/mmsegmentation/pull/2516)) + +### New Contributors + +- @blueyo0 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2373 +- @Fivethousand5k made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2406 +- @suyanzhou626 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2383 +- @unrealMJ made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2400 +- @Dominic23331 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2432 +- @AI-Tianlong made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2444 +- @morkovka1337 made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2492 +- @Leeinsn made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2404 +- @siddancha made their first contribution in https://github.com/open-mmlab/mmsegmentation/pull/2516 + ## v1.0.0rc3(31/12/2022) ### Highlights diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index 4903747ed2..48e97429c1 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -6,33 +6,35 @@ We list some common troubles faced by many users and their corresponding solutio The compatible MMSegmentation and MMCV versions are as below. Please install the correct version of MMCV to avoid installation issues. -| MMSegmentation version | MMCV version | MMClassification (optional) version | MMDetection (optional) version | -| :--------------------: | :-------------------------: | :---------------------------------: | :----------------------------: | -| 1.0.0rc3 | mmcv >= 2.0.0rc3 | mmcls>=1.0.0rc0 | mmdet>=3.0.0rc4 | -| 1.0.0rc2 | mmcv >= 2.0.0rc3 | mmcls>=1.0.0rc0 | mmdet>=3.0.0rc4 | -| 1.0.0rc1 | mmcv >= 2.0.0rc1 | mmcls>=1.0.0rc0 | Not required | -| 1.0.0rc0 | mmcv >= 2.0.0rc1 | mmcls>=1.0.0rc0 | Not required | -| master | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | -| 0.24.1 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | -| 0.23.0 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | -| 0.22.0 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | -| 0.21.1 | mmcv-full>=1.4.4, \<=1.6.0 | Not required | Not required | -| 0.20.2 | mmcv-full>=1.3.13, \<=1.6.0 | Not required | Not required | -| 0.19.0 | mmcv-full>=1.3.13, \<1.3.17 | Not required | Not required | -| 0.18.0 | mmcv-full>=1.3.13, \<1.3.17 | Not required | Not required | -| 0.17.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | -| 0.16.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | -| 0.15.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | -| 0.14.1 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | -| 0.14.0 | mmcv-full>=1.3.1, \<1.3.2 | Not required | Not required | -| 0.13.0 | mmcv-full>=1.3.1, \<1.3.2 | Not required | Not required | -| 0.12.0 | mmcv-full>=1.1.4, \<1.3.2 | Not required | Not required | -| 0.11.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | -| 0.10.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | -| 0.9.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | -| 0.8.0 | mmcv-full>=1.1.4, \<1.2.0 | Not required | Not required | -| 0.7.0 | mmcv-full>=1.1.2, \<1.2.0 | Not required | Not required | -| 0.6.0 | mmcv-full>=1.1.2, \<1.2.0 | Not required | Not required | +| MMSegmentation version | MMCV version | MMClassification (optional) version | MMDetection (optional) version | +| :--------------------: | :----------------------------: | :---------------------------------: | :----------------------------: | +| 1.x/dev-1.x branch | mmcv == 2.0.0rc3 | mmcls>=1.0.0rc0 | mmdet>=3.0.0rc4, \<=3.0.0rc5> | +| 1.0.0rc4 | mmcv == 2.0.0rc3 | mmcls>=1.0.0rc0 | mmdet>=3.0.0rc4, \<=3.0.0rc5> | +| 1.0.0rc3 | mmcv == 2.0.0rc3 | mmcls>=1.0.0rc0 | mmdet>=3.0.0rc4 \<=3.0.0rc5> | +| 1.0.0rc2 | mmcv == 2.0.0rc3 | mmcls>=1.0.0rc0 | mmdet>=3.0.0rc4 \<=3.0.0rc5> | +| 1.0.0rc1 | mmcv >= 2.0.0rc1, \<=2.0.0rc3> | mmcls>=1.0.0rc0 | Not required | +| 1.0.0rc0 | mmcv >= 2.0.0rc1, \<=2.0.0rc3> | mmcls>=1.0.0rc0 | Not required | +| master | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.24.1 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.23.0 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.22.0 | mmcv-full>=1.4.4, \<=1.6.0 | mmcls>=0.20.1, \<=1.0.0 | Not required | +| 0.21.1 | mmcv-full>=1.4.4, \<=1.6.0 | Not required | Not required | +| 0.20.2 | mmcv-full>=1.3.13, \<=1.6.0 | Not required | Not required | +| 0.19.0 | mmcv-full>=1.3.13, \<1.3.17 | Not required | Not required | +| 0.18.0 | mmcv-full>=1.3.13, \<1.3.17 | Not required | Not required | +| 0.17.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.16.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.15.0 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.14.1 | mmcv-full>=1.3.7, \<1.3.17 | Not required | Not required | +| 0.14.0 | mmcv-full>=1.3.1, \<1.3.2 | Not required | Not required | +| 0.13.0 | mmcv-full>=1.3.1, \<1.3.2 | Not required | Not required | +| 0.12.0 | mmcv-full>=1.1.4, \<1.3.2 | Not required | Not required | +| 0.11.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | +| 0.10.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | +| 0.9.0 | mmcv-full>=1.1.4, \<1.3.0 | Not required | Not required | +| 0.8.0 | mmcv-full>=1.1.4, \<1.2.0 | Not required | Not required | +| 0.7.0 | mmcv-full>=1.1.2, \<1.2.0 | Not required | Not required | +| 0.6.0 | mmcv-full>=1.1.2, \<1.2.0 | Not required | Not required | ## How to know the number of GPUs needed to train the model diff --git a/mmseg/__init__.py b/mmseg/__init__.py index b395013526..59380655a2 100644 --- a/mmseg/__init__.py +++ b/mmseg/__init__.py @@ -8,7 +8,7 @@ from .version import __version__, version_info MMCV_MIN = '2.0.0rc3' -MMCV_MAX = '2.1.0' +MMCV_MAX = '2.0.0rc3' MMENGINE_MIN = '0.1.0' MMENGINE_MAX = '1.0.0' @@ -58,9 +58,9 @@ def digit_version(version_str: str, length: int = 4): mmcv_version = digit_version(mmcv.__version__) -assert (mmcv_min_version <= mmcv_version < mmcv_max_version), \ +assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \ f'MMCV=={mmcv.__version__} is used but incompatible. ' \ - f'Please install mmcv>={mmcv_min_version}, <{mmcv_max_version}.' + f'Please install mmcv==2.0.0rc3.' mmengine_min_version = digit_version(MMENGINE_MIN) mmengine_max_version = digit_version(MMENGINE_MAX) diff --git a/mmseg/version.py b/mmseg/version.py index 6931108fe7..ae61f8bf7b 100644 --- a/mmseg/version.py +++ b/mmseg/version.py @@ -1,6 +1,6 @@ # Copyright (c) Open-MMLab. All rights reserved. -__version__ = '1.0.0rc3' +__version__ = '1.0.0rc4' def parse_version_info(version_str):