diff --git a/datasets/cityscapes.py b/datasets/cityscapes.py index f51ee58f0..86929264d 100644 --- a/datasets/cityscapes.py +++ b/datasets/cityscapes.py @@ -90,15 +90,15 @@ def __init__(self, root, split='train', mode='fine', target_type='semantic', tra if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir): raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the' ' specified "split" and "mode" are inside the "root" directory') - + for city in os.listdir(self.images_dir): img_dir = os.path.join(self.images_dir, city) target_dir = os.path.join(self.targets_dir, city) for file_name in os.listdir(img_dir): self.images.append(os.path.join(img_dir, file_name)) - target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0], - self._get_target_suffix(self.mode, self.target_type)) + target_name = f"{file_name.split('_leftImg8bit')[0]}_{self._get_target_suffix(self.mode, self.target_type)}" + self.targets.append(os.path.join(target_dir, target_name)) @classmethod @@ -135,13 +135,13 @@ def _load_json(self, path): return data def _get_target_suffix(self, mode, target_type): - if target_type == 'instance': - return '{}_instanceIds.png'.format(mode) - elif target_type == 'semantic': - return '{}_labelIds.png'.format(mode) - elif target_type == 'color': - return '{}_color.png'.format(mode) - elif target_type == 'polygon': - return '{}_polygons.json'.format(mode) + if target_type == 'color': + return f'{mode}_color.png' elif target_type == 'depth': - return '{}_disparity.png'.format(mode) \ No newline at end of file + return f'{mode}_disparity.png' + elif target_type == 'instance': + return f'{mode}_instanceIds.png' + elif target_type == 'polygon': + return f'{mode}_polygons.json' + elif target_type == 'semantic': + return f'{mode}_labelIds.png' \ No newline at end of file diff --git a/datasets/utils.py b/datasets/utils.py index 6d41011ec..0d94e1909 100644 --- a/datasets/utils.py +++ b/datasets/utils.py @@ -38,9 +38,7 @@ def makedir_exist_ok(dirpath): try: os.makedirs(dirpath) except OSError as e: - if e.errno == errno.EEXIST: - pass - else: + if e.errno != errno.EEXIST: raise @@ -63,10 +61,10 @@ def download_url(url, root, filename=None, md5=None): # downloads file if os.path.isfile(fpath) and check_integrity(fpath, md5): - print('Using downloaded and verified file: ' + fpath) + print(f'Using downloaded and verified file: {fpath}') else: try: - print('Downloading ' + url + ' to ' + fpath) + print(f'Downloading {url} to {fpath}') urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True)) diff --git a/datasets/voc.py b/datasets/voc.py index 2a971bd46..891c07a51 100644 --- a/datasets/voc.py +++ b/datasets/voc.py @@ -93,14 +93,14 @@ def __init__(self, if year=='2012_aug': is_aug = True year = '2012' - + self.root = os.path.expanduser(root) self.year = year self.url = DATASET_YEAR_DICT[year]['url'] self.filename = DATASET_YEAR_DICT[year]['filename'] self.md5 = DATASET_YEAR_DICT[year]['md5'] self.transform = transform - + self.image_set = image_set base_dir = DATASET_YEAR_DICT[year]['base_dir'] voc_root = os.path.join(self.root, base_dir) @@ -112,7 +112,7 @@ def __init__(self, if not os.path.isdir(voc_root): raise RuntimeError('Dataset not found or corrupted.' + ' You can use download=True to download it') - + if is_aug and image_set=='train': mask_dir = os.path.join(voc_root, 'SegmentationClassAug') assert os.path.exists(mask_dir), "SegmentationClassAug not found, please refer to README.md and prepare it manually" @@ -129,9 +129,9 @@ def __init__(self, with open(os.path.join(split_f), "r") as f: file_names = [x.strip() for x in f.readlines()] - - self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] - self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names] + + self.images = [os.path.join(image_dir, f'{x}.jpg') for x in file_names] + self.masks = [os.path.join(mask_dir, f'{x}.png') for x in file_names] assert (len(self.images) == len(self.masks)) def __getitem__(self, index): diff --git a/main.py b/main.py index 129edaeba..ea9f0a4d8 100644 --- a/main.py +++ b/main.py @@ -32,10 +32,15 @@ def get_argparser(): help="num classes (default: None)") # Deeplab Options - available_models = sorted(name for name in network.modeling.__dict__ if name.islower() and \ - not (name.startswith("__") or name.startswith('_')) and callable( - network.modeling.__dict__[name]) - ) + available_models = sorted( + name + for name in network.modeling.__dict__ + if name.islower() + and not name.startswith("__") + and not name.startswith('_') + and callable(network.modeling.__dict__[name]) + ) + parser.add_argument("--model", type=str, default='deeplabv3plus_mobilenet', choices=available_models, help='model name') parser.add_argument("--separable_conv", action='store_true', default=False, @@ -281,7 +286,7 @@ def save_ckpt(path): "scheduler_state": scheduler.state_dict(), "best_score": best_score, }, path) - print("Model saved as %s" % path) + print(f"Model saved as {path}") utils.mkdir('checkpoints') # Restore diff --git a/metrics/stream_metrics.py b/metrics/stream_metrics.py index 33b8fe9ac..73321cb21 100644 --- a/metrics/stream_metrics.py +++ b/metrics/stream_metrics.py @@ -48,11 +48,10 @@ def to_str(results): def _fast_hist(self, label_true, label_pred): mask = (label_true >= 0) & (label_true < self.n_classes) - hist = np.bincount( + return np.bincount( self.n_classes * label_true[mask].astype(int) + label_pred[mask], minlength=self.n_classes ** 2, ).reshape(self.n_classes, self.n_classes) - return hist def get_results(self): """Returns accuracy score evaluation result. @@ -85,7 +84,7 @@ def reset(self): class AverageMeter(object): """Computes average values""" def __init__(self): - self.book = dict() + self.book = {} def reset_all(self): self.book.clear() diff --git a/network/_deeplab.py b/network/_deeplab.py index c82f7e970..aa9d935ea 100644 --- a/network/_deeplab.py +++ b/network/_deeplab.py @@ -134,11 +134,13 @@ class ASPP(nn.Module): def __init__(self, in_channels, atrous_rates): super(ASPP, self).__init__() out_channels = 256 - modules = [] - modules.append(nn.Sequential( - nn.Conv2d(in_channels, out_channels, 1, bias=False), - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True))) + modules = [ + nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True), + ) + ] rate1, rate2, rate3 = tuple(atrous_rates) modules.append(ASPPConv(in_channels, out_channels, rate1)) @@ -155,9 +157,7 @@ def __init__(self, in_channels, atrous_rates): nn.Dropout(0.1),) def forward(self, x): - res = [] - for conv in self.convs: - res.append(conv(x)) + res = [conv(x) for conv in self.convs] res = torch.cat(res, dim=1) return self.project(res) diff --git a/network/backbone/hrnetv2.py b/network/backbone/hrnetv2.py index 2888748e6..1d4fcd7b2 100644 --- a/network/backbone/hrnetv2.py +++ b/network/backbone/hrnetv2.py @@ -118,16 +118,28 @@ def __init__(self, stage, output_branches, c): nn.Upsample(scale_factor=(2.0 ** (branch_number - branch_output_number)), mode='nearest'), )) elif branch_number < branch_output_number: - downsampling_fusion = [] - for _ in range(branch_output_number - branch_number - 1): - downsampling_fusion.append(nn.Sequential( - nn.Conv2d(c * (2 ** branch_number), c * (2 ** branch_number), kernel_size=3, stride=2, - padding=1, - bias=False), - nn.BatchNorm2d(c * (2 ** branch_number), eps=1e-05, momentum=0.1, affine=True, - track_running_stats=True), + downsampling_fusion = [ + nn.Sequential( + nn.Conv2d( + c * (2 ** branch_number), + c * (2 ** branch_number), + kernel_size=3, + stride=2, + padding=1, + bias=False, + ), + nn.BatchNorm2d( + c * (2 ** branch_number), + eps=1e-05, + momentum=0.1, + affine=True, + track_running_stats=True, + ), nn.ReLU(inplace=True), - )) + ) + for _ in range(branch_output_number - branch_number - 1) + ] + downsampling_fusion.append(nn.Sequential( nn.Conv2d(c * (2 ** branch_number), c * (2 ** branch_output_number), kernel_size=3, stride=2, padding=1, @@ -224,15 +236,17 @@ def __init__(self, c=48, num_blocks=[1, 4, 3], num_classes=1000): # Classifier (extra module if want to use for classification): # pool, reduce dimensionality, flatten, connect to linear layer for classification: - out_channels = sum([c * 2 ** i for i in range(len(num_blocks)+1)]) # total output channels of HRNetV2 + out_channels = sum(c * 2 ** i for i in range(len(num_blocks)+1)) pool_feature_map = 8 self.bn_classifier = nn.Sequential( nn.Conv2d(out_channels, out_channels // 4, kernel_size=1, bias=False), - nn.BatchNorm2d(out_channels // 4, eps=1e-05, affine=True, track_running_stats=True), + nn.BatchNorm2d( + out_channels // 4, eps=1e-05, affine=True, track_running_stats=True + ), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(pool_feature_map), nn.Flatten(), - nn.Linear(pool_feature_map * pool_feature_map * (out_channels // 4), num_classes), + nn.Linear(pool_feature_map ** 2 * (out_channels // 4), num_classes), ) @staticmethod diff --git a/network/backbone/mobilenetv2.py b/network/backbone/mobilenetv2.py index 234dbc7f9..818705c26 100644 --- a/network/backbone/mobilenetv2.py +++ b/network/backbone/mobilenetv2.py @@ -76,10 +76,7 @@ def __init__(self, inp, oup, stride, dilation, expand_ratio): def forward(self, x): x_pad = F.pad(x, self.input_padding) - if self.use_res_connect: - return x + self.conv(x_pad) - else: - return self.conv(x_pad) + return x + self.conv(x_pad) if self.use_res_connect else self.conv(x_pad) class MobileNetV2(nn.Module): def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8): @@ -95,10 +92,7 @@ def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_r """ super(MobileNetV2, self).__init__() block = InvertedResidual - input_channel = 32 - last_channel = 1280 self.output_stride = output_stride - current_stride = 1 if inverted_residual_setting is None: inverted_residual_setting = [ # t, c, n, s @@ -113,14 +107,18 @@ def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_r # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: - raise ValueError("inverted_residual_setting should be non-empty " - "or a 4-element list, got {}".format(inverted_residual_setting)) + raise ValueError( + f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}" + ) + + input_channel = 32 # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) + last_channel = 1280 self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features = [ConvBNReLU(3, input_channel, stride=2)] - current_stride *= 2 + current_stride = 1 * 2 dilation=1 previous_dilation = 1 diff --git a/network/backbone/resnet.py b/network/backbone/resnet.py index 366a5721b..4da371a2d 100644 --- a/network/backbone/resnet.py +++ b/network/backbone/resnet.py @@ -135,8 +135,10 @@ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + raise ValueError( + f"replace_stride_with_dilation should be None or a 3-element tuple, got {replace_stride_with_dilation}" + ) + self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, @@ -184,14 +186,31 @@ def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer(planes * block.expansion), ) - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) + layers = [ + block( + self.inplanes, + planes, + stride, + downsample, + self.groups, + self.base_width, + previous_dilation, + norm_layer, + ) + ] + self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=self.groups, - base_width=self.base_width, dilation=self.dilation, - norm_layer=norm_layer)) + layers.extend( + block( + self.inplanes, + planes, + groups=self.groups, + base_width=self.base_width, + dilation=self.dilation, + norm_layer=norm_layer, + ) + for _ in range(1, blocks) + ) return nn.Sequential(*layers) diff --git a/network/modeling.py b/network/modeling.py index fa18c0ed9..70caedcef 100644 --- a/network/modeling.py +++ b/network/modeling.py @@ -11,20 +11,19 @@ def _segm_hrnet(name, backbone_name, num_classes, pretrained_backbone): # the final output channels is dependent on highest resolution channel config (c). # output of backbone will be the inplanes to assp: hrnet_channels = int(backbone_name.split('_')[-1]) - inplanes = sum([hrnet_channels * 2 ** i for i in range(4)]) - low_level_planes = 256 # all hrnet version channel output from bottleneck is the same + inplanes = sum(hrnet_channels * 2 ** i for i in range(4)) aspp_dilate = [12, 24, 36] # If follow paper trend, can put [24, 48, 72]. if name=='deeplabv3plus': return_layers = {'stage4': 'out', 'layer1': 'low_level'} + low_level_planes = 256 # all hrnet version channel output from bottleneck is the same classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) elif name=='deeplabv3': return_layers = {'stage4': 'out'} classifier = DeepLabHead(inplanes, num_classes, aspp_dilate) backbone = IntermediateLayerGetter(backbone, return_layers=return_layers, hrnet_flag=True) - model = DeepLabV3(backbone, classifier) - return model + return DeepLabV3(backbone, classifier) def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone): @@ -38,48 +37,42 @@ def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_bac backbone = resnet.__dict__[backbone_name]( pretrained=pretrained_backbone, replace_stride_with_dilation=replace_stride_with_dilation) - - inplanes = 2048 - low_level_planes = 256 + inplanes = 2048 if name=='deeplabv3plus': return_layers = {'layer4': 'out', 'layer1': 'low_level'} + low_level_planes = 256 + classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) elif name=='deeplabv3': return_layers = {'layer4': 'out'} classifier = DeepLabHead(inplanes , num_classes, aspp_dilate) backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) - model = DeepLabV3(backbone, classifier) - return model + return DeepLabV3(backbone, classifier) def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone): - if output_stride==8: - aspp_dilate = [12, 24, 36] - else: - aspp_dilate = [6, 12, 18] - + aspp_dilate = [12, 24, 36] if output_stride==8 else [6, 12, 18] backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride) - + # rename layers - backbone.low_level_features = backbone.features[0:4] + backbone.low_level_features = backbone.features[:4] backbone.high_level_features = backbone.features[4:-1] backbone.features = None backbone.classifier = None inplanes = 320 - low_level_planes = 24 - if name=='deeplabv3plus': return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'} + low_level_planes = 24 + classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) elif name=='deeplabv3': return_layers = {'high_level_features': 'out'} classifier = DeepLabHead(inplanes , num_classes, aspp_dilate) backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) - model = DeepLabV3(backbone, classifier) - return model + return DeepLabV3(backbone, classifier) def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone): diff --git a/network/utils.py b/network/utils.py index 58ea389fa..70e0c0058 100644 --- a/network/utils.py +++ b/network/utils.py @@ -56,7 +56,7 @@ def __init__(self, model, return_layers, hrnet_flag=False): self.hrnet_flag = hrnet_flag orig_return_layers = return_layers - return_layers = {k: v for k, v in return_layers.items()} + return_layers = dict(return_layers.items()) layers = OrderedDict() for name, module in model.named_children(): layers[name] = module @@ -87,7 +87,5 @@ def forward(self, x): x2 = F.interpolate(x[2], size=(output_h, output_w), mode='bilinear', align_corners=False) x3 = F.interpolate(x[3], size=(output_h, output_w), mode='bilinear', align_corners=False) x = torch.cat([x[0], x1, x2, x3], dim=1) - out[out_name] = x - else: - out[out_name] = x + out[out_name] = x return out diff --git a/predict.py b/predict.py index a5dcfdd88..2e2b82980 100644 --- a/predict.py +++ b/predict.py @@ -30,10 +30,15 @@ def get_argparser(): choices=['voc', 'cityscapes'], help='Name of training set') # Deeplab Options - available_models = sorted(name for name in network.modeling.__dict__ if name.islower() and \ - not (name.startswith("__") or name.startswith('_')) and callable( - network.modeling.__dict__[name]) - ) + available_models = sorted( + name + for name in network.modeling.__dict__ + if name.islower() + and not name.startswith("__") + and not name.startswith('_') + and callable(network.modeling.__dict__[name]) + ) + parser.add_argument("--model", type=str, default='deeplabv3plus_mobilenet', choices=available_models, help='model name') @@ -51,7 +56,7 @@ def get_argparser(): help='batch size for validation (default: 4)') parser.add_argument("--crop_size", type=int, default=513) - + parser.add_argument("--ckpt", default=None, type=str, help="resume from checkpoint") parser.add_argument("--gpu_id", type=str, default='0', @@ -69,31 +74,31 @@ def main(): os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - print("Device: %s" % device) + print(f"Device: {device}") # Setup dataloader image_files = [] if os.path.isdir(opts.input): for ext in ['png', 'jpeg', 'jpg', 'JPEG']: - files = glob(os.path.join(opts.input, '**/*.%s'%(ext)), recursive=True) + files = glob(os.path.join(opts.input, f'**/*.{ext}'), recursive=True) if len(files)>0: image_files.extend(files) elif os.path.isfile(opts.input): image_files.append(opts.input) - + # Set up model (all models are 'constructed at network.modeling) model = network.modeling.__dict__[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride) if opts.separable_conv and 'plus' in opts.model: network.convert_to_separable_conv(model.classifier) utils.set_bn_momentum(model.backbone, momentum=0.01) - + if opts.ckpt is not None and os.path.isfile(opts.ckpt): # https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu')) model.load_state_dict(checkpoint["model_state"]) model = nn.DataParallel(model) model.to(device) - print("Resume model from %s" % opts.ckpt) + print(f"Resume model from {opts.ckpt}") del checkpoint else: print("[!] Retrain") @@ -126,7 +131,7 @@ def main(): img = Image.open(img_path).convert('RGB') img = transform(img).unsqueeze(0) # To tensor of NCHW img = img.to(device) - + pred = model(img).max(1)[1].cpu().numpy()[0] # HW colorized_preds = decode_fn(pred).astype('uint8') colorized_preds = Image.fromarray(colorized_preds) diff --git a/utils/ext_transforms.py b/utils/ext_transforms.py index 201a179fd..7d2badf85 100644 --- a/utils/ext_transforms.py +++ b/utils/ext_transforms.py @@ -29,12 +29,10 @@ def __call__(self, img, lbl): Returns: PIL Image: Randomly flipped image. """ - if random.random() < self.p: - return F.hflip(img), F.hflip(lbl) - return img, lbl + return (F.hflip(img), F.hflip(lbl)) if random.random() < self.p else (img, lbl) def __repr__(self): - return self.__class__.__name__ + '(p={})'.format(self.p) + return self.__class__.__name__ + f'(p={self.p})' @@ -58,7 +56,7 @@ def __call__(self, img, lbl): return img, lbl def __repr__(self): - format_string = self.__class__.__name__ + '(' + format_string = f'{self.__class__.__name__}(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) @@ -170,11 +168,11 @@ def __init__(self, degrees, resample=False, expand=False, center=None): if degrees < 0: raise ValueError("If degrees is a single number, it must be positive.") self.degrees = (-degrees, degrees) - else: - if len(degrees) != 2: - raise ValueError("If degrees is a sequence, it must be of len 2.") + elif len(degrees) == 2: self.degrees = degrees + else: + raise ValueError("If degrees is a sequence, it must be of len 2.") self.resample = resample self.expand = expand self.center = center @@ -185,9 +183,7 @@ def get_params(degrees): Returns: sequence: params to be passed to ``rotate`` for random rotation. """ - angle = random.uniform(degrees[0], degrees[1]) - - return angle + return random.uniform(degrees[0], degrees[1]) def __call__(self, img, lbl): """ @@ -227,12 +223,10 @@ def __call__(self, img, lbl): Returns: PIL Image: Randomly flipped image. """ - if random.random() < self.p: - return F.hflip(img), F.hflip(lbl) - return img, lbl + return (F.hflip(img), F.hflip(lbl)) if random.random() < self.p else (img, lbl) def __repr__(self): - return self.__class__.__name__ + '(p={})'.format(self.p) + return self.__class__.__name__ + f'(p={self.p})' class ExtRandomVerticalFlip(object): @@ -253,12 +247,10 @@ def __call__(self, img, lbl): PIL Image: Randomly flipped image. PIL Image: Randomly flipped label. """ - if random.random() < self.p: - return F.vflip(img), F.vflip(lbl) - return img, lbl + return (F.vflip(img), F.vflip(lbl)) if random.random() < self.p else (img, lbl) def __repr__(self): - return self.__class__.__name__ + '(p={})'.format(self.p) + return self.__class__.__name__ + f'(p={self.p})' class ExtPad(object): def __init__(self, diviser=32): @@ -295,7 +287,7 @@ def __call__(self, pic, lbl): return torch.from_numpy( np.array( pic, dtype=np.float32).transpose(2, 0, 1) ), torch.from_numpy( np.array( lbl, dtype=self.target_type) ) def __repr__(self): - return self.__class__.__name__ + '()' + return f'{self.__class__.__name__}()' class ExtNormalize(object): """Normalize a tensor image with mean and standard deviation. @@ -375,7 +367,10 @@ def __call__(self, img, lbl): PIL Image: Cropped image. PIL Image: Cropped label. """ - assert img.size == lbl.size, 'size of img and lbl should be the same. %s, %s'%(img.size, lbl.size) + assert ( + img.size == lbl.size + ), f'size of img and lbl should be the same. {img.size}, {lbl.size}' + if self.padding > 0: img = F.pad(img, self.padding) lbl = F.pad(lbl, self.padding) @@ -455,15 +450,18 @@ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: - raise ValueError("If {} is a single number, it must be non negative.".format(name)) + raise ValueError(f"If {name} is a single number, it must be non negative.") value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: - raise ValueError("{} values should be between {}".format(name, bound)) + raise ValueError(f"{name} values should be between {bound}") else: - raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name)) + raise TypeError( + f"{name} should be a single number or a list/tuple with lenght 2." + ) + # if value is 0 or (1., 1.) for brightness/contrast/saturation # or (0., 0.) for hue, do nothing @@ -500,9 +498,7 @@ def get_params(brightness, contrast, saturation, hue): transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor))) random.shuffle(transforms) - transform = Compose(transforms) - - return transform + return Compose(transforms) def __call__(self, img, lbl): """ @@ -517,7 +513,7 @@ def __call__(self, img, lbl): return transform(img), lbl def __repr__(self): - format_string = self.__class__.__name__ + '(' + format_string = f'{self.__class__.__name__}(' format_string += 'brightness={0}'.format(self.brightness) format_string += ', contrast={0}'.format(self.contrast) format_string += ', saturation={0}'.format(self.saturation) @@ -539,7 +535,7 @@ def __call__(self, img): return self.lambd(img) def __repr__(self): - return self.__class__.__name__ + '()' + return f'{self.__class__.__name__}()' class Compose(object): @@ -564,7 +560,7 @@ def __call__(self, img): return img def __repr__(self): - format_string = self.__class__.__name__ + '(' + format_string = f'{self.__class__.__name__}(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) diff --git a/utils/loss.py b/utils/loss.py index 64a5f542c..36c4c542d 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -15,7 +15,4 @@ def forward(self, inputs, targets): inputs, targets, reduction='none', ignore_index=self.ignore_index) pt = torch.exp(-ce_loss) focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss - if self.size_average: - return focal_loss.mean() - else: - return focal_loss.sum() \ No newline at end of file + return focal_loss.mean() if self.size_average else focal_loss.sum() \ No newline at end of file diff --git a/utils/visualizer.py b/utils/visualizer.py index d1280e2f7..b2d023c50 100644 --- a/utils/visualizer.py +++ b/utils/visualizer.py @@ -20,9 +20,9 @@ def vis_scalar(self, name, x, y, opts=None): x = [x] if not isinstance(y, list): y = [y] - + if self.id is not None: - name = "[%s]"%self.id + name + name = f"[{self.id}]" + name default_opts = { 'title': name } if opts is not None: default_opts.update(opts) @@ -37,9 +37,9 @@ def vis_image(self, name, img, env=None, opts=None): """ vis image in visdom """ if env is None: - env = self.env + env = self.env if self.id is not None: - name = "[%s]"%self.id + name + name = f"[{self.id}]" + name #win = self.cur_win.get(name, None) default_opts = { 'title': name } if opts is not None: @@ -50,13 +50,14 @@ def vis_image(self, name, img, env=None, opts=None): # self.cur_win[name] = self.vis.image( img=img, opts=default_opts, env=env ) def vis_table(self, name, tbl, opts=None): - #win = self.cur_win.get(name, None) - - tbl_str = "
Term | \Value | \
---|---|
%s | \