diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9621a1fe95..86c01b09dc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,22 +30,14 @@ repos: rev: v0.7.0 hooks: - id: ruff - args: - - --fix - - - repo: https://github.com/asottile/pyupgrade - rev: v3.19.0 - hooks: - - id: pyupgrade - args: [--py39-plus, --keep-runtime-typing] - name: Upgrade code with exceptions + args: ["--fix"] exclude: | (?x)( ^versioneer.py| ^monai/_version.py| - ^monai/networks/| # avoid typing rewrites - ^monai/apps/detection/utils/anchor_utils.py| # avoid typing rewrites - ^tests/test_compute_panoptic_quality.py # avoid typing rewrites + ^monai/networks/| # todo: avoid typing rewrites + ^monai/apps/detection/utils/anchor_utils.py| # todo: avoid typing rewrites + ^tests/test_compute_panoptic_quality.py # todo: avoid typing rewrites ) - repo: https://github.com/asottile/yesqa diff --git a/monai/apps/deepgrow/dataset.py b/monai/apps/deepgrow/dataset.py index 802d86e0c7..e597188e74 100644 --- a/monai/apps/deepgrow/dataset.py +++ b/monai/apps/deepgrow/dataset.py @@ -201,14 +201,9 @@ def _save_data_2d(vol_idx, vol_image, vol_label, dataset_dir, relative_path): logging.warning(f"Unique labels {unique_labels_count} exceeds 20. Please check if this is correct.") logging.info( - "{} => Image Shape: {} => {}; Label Shape: {} => {}; Unique Labels: {}".format( - vol_idx, - vol_image.shape, - image_count, - vol_label.shape if vol_label is not None else None, - label_count, - unique_labels_count, - ) + f"{vol_idx} => Image Shape: {vol_image.shape} => {image_count};" + f" Label Shape: {vol_label.shape if vol_label is not None else None} => {label_count};" + f" Unique Labels: {unique_labels_count}" ) return data_list @@ -259,13 +254,8 @@ def _save_data_3d(vol_idx, vol_image, vol_label, dataset_dir, relative_path): logging.warning(f"Unique labels {unique_labels_count} exceeds 20. Please check if this is correct.") logging.info( - "{} => Image Shape: {} => {}; Label Shape: {} => {}; Unique Labels: {}".format( - vol_idx, - vol_image.shape, - image_count, - vol_label.shape if vol_label is not None else None, - label_count, - unique_labels_count, - ) + f"{vol_idx} => Image Shape: {vol_image.shape} => {image_count};" + f" Label Shape: {vol_label.shape if vol_label is not None else None} => {label_count};" + f" Unique Labels: {unique_labels_count}" ) return data_list diff --git a/monai/apps/nnunet/nnunet_bundle.py b/monai/apps/nnunet/nnunet_bundle.py index df8f09bf4b..47a0755ddf 100644 --- a/monai/apps/nnunet/nnunet_bundle.py +++ b/monai/apps/nnunet/nnunet_bundle.py @@ -13,7 +13,7 @@ import os import shutil from pathlib import Path -from typing import Any, Optional, Union +from typing import Any import numpy as np import torch @@ -36,9 +36,9 @@ def get_nnunet_trainer( - dataset_name_or_id: Union[str, int], + dataset_name_or_id: str | int, configuration: str, - fold: Union[int, str], + fold: int | str, trainer_class_name: str = "nnUNetTrainer", plans_identifier: str = "nnUNetPlans", use_compressed_data: bool = False, @@ -46,7 +46,7 @@ def get_nnunet_trainer( only_run_validation: bool = False, disable_checkpointing: bool = False, device: str = "cuda", - pretrained_model: Optional[str] = None, + pretrained_model: str | None = None, ) -> Any: # type: ignore """ Get the nnUNet trainer instance based on the provided configuration. @@ -166,7 +166,7 @@ class ModelnnUNetWrapper(torch.nn.Module): restoring network architecture, and setting up the predictor for inference. """ - def __init__(self, predictor: object, model_folder: Union[str, Path], model_name: str = "model.pt"): # type: ignore + def __init__(self, predictor: object, model_folder: str | Path, model_name: str = "model.pt"): # type: ignore super().__init__() self.predictor = predictor @@ -294,7 +294,7 @@ def forward(self, x: MetaTensor) -> MetaTensor: return MetaTensor(out_tensor, meta=x.meta) -def get_nnunet_monai_predictor(model_folder: Union[str, Path], model_name: str = "model.pt") -> ModelnnUNetWrapper: +def get_nnunet_monai_predictor(model_folder: str | Path, model_name: str = "model.pt") -> ModelnnUNetWrapper: """ Initializes and returns a `nnUNetMONAIModelWrapper` containing the corresponding `nnUNetPredictor`. The model folder should contain the following files, created during training: @@ -426,9 +426,9 @@ def get_network_from_nnunet_plans( plans_file: str, dataset_file: str, configuration: str, - model_ckpt: Optional[str] = None, + model_ckpt: str | None = None, model_key_in_ckpt: str = "model", -) -> Union[torch.nn.Module, Any]: +) -> torch.nn.Module | Any: """ Load and initialize a nnUNet network based on nnUNet plans and configuration. @@ -518,7 +518,7 @@ def convert_monai_bundle_to_nnunet(nnunet_config: dict, bundle_root_folder: str, from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name def subfiles( - folder: Union[str, Path], prefix: Optional[str] = None, suffix: Optional[str] = None, sort: bool = True + folder: str | Path, prefix: str | None = None, suffix: str | None = None, sort: bool = True ) -> list[str]: res = [ i.name diff --git a/monai/handlers/stats_handler.py b/monai/handlers/stats_handler.py index 214872fef4..1dafde0f54 100644 --- a/monai/handlers/stats_handler.py +++ b/monai/handlers/stats_handler.py @@ -260,7 +260,7 @@ def _default_iteration_print(self, engine: Engine) -> None: "ignoring non-scalar output in StatsHandler," " make sure `output_transform(engine.state.output)` returns" " a scalar or dictionary of key and scalar pairs to avoid this warning." - " {}:{}".format(name, type(value)) + f" {name}:{type(value)}" ) continue # not printing multi dimensional output out_str += self.key_var_format.format(name, value.item() if isinstance(value, torch.Tensor) else value) @@ -273,7 +273,7 @@ def _default_iteration_print(self, engine: Engine) -> None: "ignoring non-scalar output in StatsHandler," " make sure `output_transform(engine.state.output)` returns" " a scalar or a dictionary of key and scalar pairs to avoid this warning." - " {}".format(type(loss)) + f" {type(loss)}" ) if not out_str: diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 44a03710de..20e2d74c8c 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -257,7 +257,7 @@ def _default_iteration_writer(self, engine: Engine, writer: SummaryWriter | Summ "ignoring non-scalar output in TensorBoardStatsHandler," " make sure `output_transform(engine.state.output)` returns" " a scalar or dictionary of key and scalar pairs to avoid this warning." - " {}:{}".format(name, type(value)) + f" {name}:{type(value)}" ) continue # not plot multi dimensional output self._write_scalar( @@ -280,7 +280,7 @@ def _default_iteration_writer(self, engine: Engine, writer: SummaryWriter | Summ "ignoring non-scalar output in TensorBoardStatsHandler," " make sure `output_transform(engine.state.output)` returns" " a scalar or a dictionary of key and scalar pairs to avoid this warning." - " {}".format(type(loss)) + f" {type(loss)}" ) writer.flush() diff --git a/monai/losses/adversarial_loss.py b/monai/losses/adversarial_loss.py index c7be79243f..b2c27a41ee 100644 --- a/monai/losses/adversarial_loss.py +++ b/monai/losses/adversarial_loss.py @@ -57,8 +57,7 @@ def __init__( if criterion.lower() not in list(AdversarialCriterions): raise ValueError( - "Unrecognised criterion entered for Adversarial Loss. Must be one in: %s" - % ", ".join(AdversarialCriterions) + f"Unrecognised criterion entered for Adversarial Loss. Must be one in: {', '.join(AdversarialCriterions)}" ) # Depending on the criterion, a different activation layer is used. diff --git a/monai/losses/dice.py b/monai/losses/dice.py index ed88100edd..948749606b 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -494,7 +494,7 @@ def __init__( raise ValueError(f"dist_matrix must be C x C, got {dist_matrix.shape[0]} x {dist_matrix.shape[1]}.") if weighting_mode not in ["default", "GDL"]: - raise ValueError("weighting_mode must be either 'default' or 'GDL, got %s." % weighting_mode) + raise ValueError(f"weighting_mode must be either 'default' or 'GDL', got {weighting_mode}.") self.m = dist_matrix if isinstance(self.m, np.ndarray): diff --git a/monai/losses/ds_loss.py b/monai/losses/ds_loss.py index 6a604aa22d..ef359bcfd0 100644 --- a/monai/losses/ds_loss.py +++ b/monai/losses/ds_loss.py @@ -11,7 +11,6 @@ from __future__ import annotations -from typing import Union import torch import torch.nn.functional as F @@ -70,7 +69,7 @@ def get_loss(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = F.interpolate(target, size=input.shape[2:], mode=self.interp_mode) return self.loss(input, target) # type: ignore[no-any-return] - def forward(self, input: Union[None, torch.Tensor, list[torch.Tensor]], target: torch.Tensor) -> torch.Tensor: + def forward(self, input: None | torch.Tensor | list[torch.Tensor], target: torch.Tensor) -> torch.Tensor: if isinstance(input, (list, tuple)): weights = self.get_weights(levels=len(input)) loss = torch.tensor(0, dtype=torch.float, device=target.device) diff --git a/monai/losses/focal_loss.py b/monai/losses/focal_loss.py index 28d1c0cdc9..a1145c521e 100644 --- a/monai/losses/focal_loss.py +++ b/monai/losses/focal_loss.py @@ -13,7 +13,6 @@ import warnings from collections.abc import Sequence -from typing import Optional import torch import torch.nn.functional as F @@ -153,7 +152,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: if target.shape != input.shape: raise ValueError(f"ground truth has different shape ({target.shape}) from input ({input.shape})") - loss: Optional[torch.Tensor] = None + loss: torch.Tensor | None = None input = input.float() target = target.float() if self.use_softmax: @@ -203,7 +202,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: def softmax_focal_loss( - input: torch.Tensor, target: torch.Tensor, gamma: float = 2.0, alpha: Optional[float] = None + input: torch.Tensor, target: torch.Tensor, gamma: float = 2.0, alpha: float | None = None ) -> torch.Tensor: """ FL(pt) = -alpha * (1 - pt)**gamma * log(pt) @@ -225,7 +224,7 @@ def softmax_focal_loss( def sigmoid_focal_loss( - input: torch.Tensor, target: torch.Tensor, gamma: float = 2.0, alpha: Optional[float] = None + input: torch.Tensor, target: torch.Tensor, gamma: float = 2.0, alpha: float | None = None ) -> torch.Tensor: """ FL(pt) = -alpha * (1 - pt)**gamma * log(pt) diff --git a/monai/losses/perceptual.py b/monai/losses/perceptual.py index 2ae03bc8dc..7774c39f10 100644 --- a/monai/losses/perceptual.py +++ b/monai/losses/perceptual.py @@ -95,8 +95,7 @@ def __init__( if network_type.lower() not in list(PercetualNetworkType): raise ValueError( - "Unrecognised criterion entered for Adversarial Loss. Must be one in: %s" - % ", ".join(PercetualNetworkType) + "Unrecognised criterion entered for Perceptual Loss. Must be one in: {}".format(", ".join(PercetualNetworkType)) ) if cache_dir: diff --git a/monai/losses/spatial_mask.py b/monai/losses/spatial_mask.py index a4c16236a2..0f823410dd 100644 --- a/monai/losses/spatial_mask.py +++ b/monai/losses/spatial_mask.py @@ -14,7 +14,7 @@ import inspect import warnings from collections.abc import Callable -from typing import Any, Optional +from typing import Any import torch from torch.nn.modules.loss import _Loss @@ -47,7 +47,7 @@ def __init__( if not callable(self.loss): raise ValueError("The loss function is not callable.") - def forward(self, input: torch.Tensor, target: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, input: torch.Tensor, target: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor: """ Args: input: the shape should be BNH[WD]. diff --git a/monai/losses/sure_loss.py b/monai/losses/sure_loss.py index fa8820885d..30aed2a4dc 100644 --- a/monai/losses/sure_loss.py +++ b/monai/losses/sure_loss.py @@ -11,7 +11,7 @@ from __future__ import annotations -from typing import Callable, Optional +from typing import Callable import torch import torch.nn as nn @@ -42,10 +42,10 @@ def sure_loss_function( operator: Callable, x: torch.Tensor, y_pseudo_gt: torch.Tensor, - y_ref: Optional[torch.Tensor] = None, - eps: Optional[float] = -1.0, - perturb_noise: Optional[torch.Tensor] = None, - complex_input: Optional[bool] = False, + y_ref: torch.Tensor | None = None, + eps: float = -1.0, + perturb_noise: torch.Tensor | None = None, + complex_input: bool = False, ) -> torch.Tensor: """ Args: @@ -131,7 +131,7 @@ class SURELoss(_Loss): (https://arxiv.org/pdf/2310.01799.pdf) """ - def __init__(self, perturb_noise: Optional[torch.Tensor] = None, eps: Optional[float] = None) -> None: + def __init__(self, perturb_noise: torch.Tensor | None = None, eps: float | None = None) -> None: """ Args: perturb_noise (torch.Tensor, optional): The noise vector of shape @@ -149,8 +149,8 @@ def forward( operator: Callable, x: torch.Tensor, y_pseudo_gt: torch.Tensor, - y_ref: Optional[torch.Tensor] = None, - complex_input: Optional[bool] = False, + y_ref: torch.Tensor | None = None, + complex_input: bool = False, ) -> torch.Tensor: """ Args: diff --git a/monai/metrics/utils.py b/monai/metrics/utils.py index 972ec0061e..606a54669b 100644 --- a/monai/metrics/utils.py +++ b/monai/metrics/utils.py @@ -13,7 +13,7 @@ import warnings from collections.abc import Iterable, Sequence -from functools import lru_cache, partial +from functools import partial, cache from types import ModuleType from typing import Any @@ -465,7 +465,7 @@ def prepare_spacing( ENCODING_KERNEL = {2: [[8, 4], [2, 1]], 3: [[[128, 64], [32, 16]], [[8, 4], [2, 1]]]} -@lru_cache(maxsize=None) +@cache def _get_neighbour_code_to_normals_table(device=None): """ returns a lookup table. For every binary neighbour code (2x2x2 neighbourhood = 8 neighbours = 8 bits = 256 codes) diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 2ac37f2f81..3dc7897feb 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -21,7 +21,7 @@ from collections.abc import Hashable, Mapping, Sequence from copy import deepcopy from functools import partial -from typing import Any, Callable, Union +from typing import Any, Callable import numpy as np import torch @@ -1217,7 +1217,7 @@ def __init__(self, name: str, *args, **kwargs) -> None: transform, _ = optional_import("torchio.transforms", "0.18.0", min_version, name=name) self.trans = transform(*args, **kwargs) - def __call__(self, img: Union[NdarrayOrTensor, Mapping[Hashable, NdarrayOrTensor]]): + def __call__(self, img: NdarrayOrTensor | Mapping[Hashable, NdarrayOrTensor]): """ Args: img: an instance of torchio.Subject, torchio.Image, numpy.ndarray, torch.Tensor, SimpleITK.Image, @@ -1249,7 +1249,7 @@ def __init__(self, name: str, *args, **kwargs) -> None: transform, _ = optional_import("torchio.transforms", "0.18.0", min_version, name=name) self.trans = transform(*args, **kwargs) - def __call__(self, img: Union[NdarrayOrTensor, Mapping[Hashable, NdarrayOrTensor]]): + def __call__(self, img: NdarrayOrTensor | Mapping[Hashable, NdarrayOrTensor]): """ Args: img: an instance of torchio.Subject, torchio.Image, numpy.ndarray, torch.Tensor, SimpleITK.Image, diff --git a/pyproject.toml b/pyproject.toml index add6642dba..ef85068ad7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ target-version = "py39" select = [ "E", "F", "W", # flake8 "NPY", # NumPy specific rules + "UP", # pyupgrade ] extend-ignore = [ "E741", # ambiguous variable name diff --git a/tests/profile_subclass/profiling.py b/tests/profile_subclass/profiling.py index ffa6a8b17d..18aecea2fb 100644 --- a/tests/profile_subclass/profiling.py +++ b/tests/profile_subclass/profiling.py @@ -63,9 +63,8 @@ def main(): b_min, b_avg, b_med, b_std = bench(tensor_1, tensor_2) print( - "Type {} time (microseconds): min: {}, avg: {}, median: {}, and std {}.".format( - t.__name__, (10**6 * b_min), (10**6) * b_avg, (10**6) * b_med, (10**6) * b_std - ) + f"Type {t.__name__} time (microseconds):" + f" min: {10**6 * b_min}, avg: {(10**6) * b_avg}, median: {(10**6) * b_med}, and std {(10**6) * b_std}." )