diff --git a/monai/apps/auto3dseg/bundle_gen.py b/monai/apps/auto3dseg/bundle_gen.py index d053ea37ef..b62b29d15c 100644 --- a/monai/apps/auto3dseg/bundle_gen.py +++ b/monai/apps/auto3dseg/bundle_gen.py @@ -264,8 +264,7 @@ def _run_cmd(self, cmd: str, devices_info: str = "") -> subprocess.CompletedProc look_up_option(self.device_setting["MN_START_METHOD"], ["bcprun"]) except ValueError as err: raise NotImplementedError( - f"{self.device_setting['MN_START_METHOD']} is not supported yet." - "Try modify BundleAlgo._run_cmd for your cluster." + f"{self.device_setting['MN_START_METHOD']} is not supported yet. Try modify BundleAlgo._run_cmd for your cluster." ) from err return _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"]) diff --git a/monai/apps/detection/networks/retinanet_detector.py b/monai/apps/detection/networks/retinanet_detector.py index 17e70d1371..743dee9321 100644 --- a/monai/apps/detection/networks/retinanet_detector.py +++ b/monai/apps/detection/networks/retinanet_detector.py @@ -342,8 +342,7 @@ def set_regular_matcher( """ if fg_iou_thresh < bg_iou_thresh: raise ValueError( - "Require fg_iou_thresh >= bg_iou_thresh. " - f"Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}." + f"Require fg_iou_thresh >= bg_iou_thresh. Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}." ) self.proposal_matcher = Matcher( fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=allow_low_quality_matches @@ -519,7 +518,7 @@ def forward( else: if self.inferer is None: raise ValueError( - "`self.inferer` is not defined." "Please refer to function self.set_sliding_window_inferer(*)." + "`self.inferer` is not defined. Please refer to function self.set_sliding_window_inferer(*)." ) head_outputs = predict_with_inferer( images, self.network, keys=[self.cls_key, self.box_reg_key], inferer=self.inferer diff --git a/monai/apps/detection/transforms/box_ops.py b/monai/apps/detection/transforms/box_ops.py index 6e08a88e59..fa714daad1 100644 --- a/monai/apps/detection/transforms/box_ops.py +++ b/monai/apps/detection/transforms/box_ops.py @@ -267,7 +267,7 @@ def convert_box_to_mask( boxes_only_mask = np.ones(box_size, dtype=np.int16) * np.int16(labels_np[b]) # apply to global mask slicing = [b] - slicing.extend(slice(boxes_np[b, d], boxes_np[b, d + spatial_dims]) for d in range(spatial_dims)) # type:ignore + slicing.extend(slice(boxes_np[b, d], boxes_np[b, d + spatial_dims]) for d in range(spatial_dims)) # type: ignore boxes_mask_np[tuple(slicing)] = boxes_only_mask return convert_to_dst_type(src=boxes_mask_np, dst=boxes, dtype=torch.int16)[0] diff --git a/monai/apps/detection/transforms/dictionary.py b/monai/apps/detection/transforms/dictionary.py index f52ab53531..f7c1ce0770 100644 --- a/monai/apps/detection/transforms/dictionary.py +++ b/monai/apps/detection/transforms/dictionary.py @@ -125,10 +125,8 @@ def __init__(self, box_keys: KeysCollection, box_ref_image_keys: str, allow_miss super().__init__(box_keys, allow_missing_keys) box_ref_image_keys_tuple = ensure_tuple(box_ref_image_keys) if len(box_ref_image_keys_tuple) > 1: - raise ValueError( - "Please provide a single key for box_ref_image_keys.\ - All boxes of box_keys are attached to box_ref_image_keys." - ) + raise ValueError("Please provide a single key for box_ref_image_keys.\ + All boxes of box_keys are attached to box_ref_image_keys.") self.box_ref_image_keys = box_ref_image_keys def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]: @@ -289,10 +287,8 @@ def __init__( super().__init__(box_keys, allow_missing_keys) box_ref_image_keys_tuple = ensure_tuple(box_ref_image_keys) if len(box_ref_image_keys_tuple) > 1: - raise ValueError( - "Please provide a single key for box_ref_image_keys.\ - All boxes of box_keys are attached to box_ref_image_keys." - ) + raise ValueError("Please provide a single key for box_ref_image_keys.\ + All boxes of box_keys are attached to box_ref_image_keys.") self.box_ref_image_keys = box_ref_image_keys self.image_meta_key = image_meta_key or f"{box_ref_image_keys}_{image_meta_key_postfix}" self.converter_to_image_coordinate = AffineBox() @@ -310,10 +306,8 @@ def extract_affine(self, data: Mapping[Hashable, torch.Tensor]) -> tuple[Ndarray else: raise ValueError(f"{meta_key} is not found. Please check whether it is the correct the image meta key.") if "affine" not in meta_dict: - raise ValueError( - f"'affine' is not found in {meta_key}. \ - Please check whether it is the correct the image meta key." - ) + raise ValueError(f"'affine' is not found in {meta_key}. \ + Please check whether it is the correct the image meta key.") affine: NdarrayOrTensor = meta_dict["affine"] if self.affine_lps_to_ras: # RAS affine @@ -815,16 +809,12 @@ def __init__( ) -> None: box_keys_tuple = ensure_tuple(box_keys) if len(box_keys_tuple) != 1: - raise ValueError( - "Please provide a single key for box_keys.\ - All label_keys are attached to this box_keys." - ) + raise ValueError("Please provide a single key for box_keys.\ + All label_keys are attached to this box_keys.") box_ref_image_keys_tuple = ensure_tuple(box_ref_image_keys) if len(box_ref_image_keys_tuple) != 1: - raise ValueError( - "Please provide a single key for box_ref_image_keys.\ - All box_keys and label_keys are attached to this box_ref_image_keys." - ) + raise ValueError("Please provide a single key for box_ref_image_keys.\ + All box_keys and label_keys are attached to this box_ref_image_keys.") self.label_keys = ensure_tuple(label_keys) super().__init__(box_keys_tuple, allow_missing_keys) @@ -1091,10 +1081,8 @@ def __init__( box_keys_tuple = ensure_tuple(box_keys) if len(box_keys_tuple) != 1: - raise ValueError( - "Please provide a single key for box_keys.\ - All label_keys are attached to this box_keys." - ) + raise ValueError("Please provide a single key for box_keys.\ + All label_keys are attached to this box_keys.") self.box_keys = box_keys_tuple[0] self.label_keys = ensure_tuple(label_keys) diff --git a/monai/apps/detection/utils/anchor_utils.py b/monai/apps/detection/utils/anchor_utils.py index 20f6fc6025..0306a95c7e 100644 --- a/monai/apps/detection/utils/anchor_utils.py +++ b/monai/apps/detection/utils/anchor_utils.py @@ -124,10 +124,8 @@ def __init__( aspect_ratios = (aspect_ratios,) * len(self.sizes) if len(self.sizes) != len(aspect_ratios): - raise ValueError( - "len(sizes) and len(aspect_ratios) should be equal. \ - It represents the number of feature maps." - ) + raise ValueError("len(sizes) and len(aspect_ratios) should be equal. \ + It represents the number of feature maps.") spatial_dims = len(ensure_tuple(aspect_ratios[0][0])) + 1 spatial_dims = look_up_option(spatial_dims, [2, 3]) @@ -172,16 +170,12 @@ def generate_anchors( scales_t = torch.as_tensor(scales, dtype=dtype, device=device) # sized (N,) aspect_ratios_t = torch.as_tensor(aspect_ratios, dtype=dtype, device=device) # sized (M,) or (M,2) if (self.spatial_dims >= 3) and (len(aspect_ratios_t.shape) != 2): - raise ValueError( - f"In {self.spatial_dims}-D image, aspect_ratios for each level should be \ - {len(aspect_ratios_t.shape) - 1}-D. But got aspect_ratios with shape {aspect_ratios_t.shape}." - ) + raise ValueError(f"In {self.spatial_dims}-D image, aspect_ratios for each level should be \ + {len(aspect_ratios_t.shape) - 1}-D. But got aspect_ratios with shape {aspect_ratios_t.shape}.") if (self.spatial_dims >= 3) and (aspect_ratios_t.shape[1] != self.spatial_dims - 1): - raise ValueError( - f"In {self.spatial_dims}-D image, aspect_ratios for each level should has \ - shape (_,{self.spatial_dims - 1}). But got aspect_ratios with shape {aspect_ratios_t.shape}." - ) + raise ValueError(f"In {self.spatial_dims}-D image, aspect_ratios for each level should has \ + shape (_,{self.spatial_dims - 1}). But got aspect_ratios with shape {aspect_ratios_t.shape}.") # if 2d, w:h = 1:aspect_ratios if self.spatial_dims == 2: diff --git a/monai/apps/reconstruction/transforms/array.py b/monai/apps/reconstruction/transforms/array.py index 911d7a06bb..c1a43043e4 100644 --- a/monai/apps/reconstruction/transforms/array.py +++ b/monai/apps/reconstruction/transforms/array.py @@ -61,10 +61,8 @@ def __init__( real/imaginary parts. """ if len(center_fractions) != len(accelerations): - raise ValueError( - "Number of center fractions \ - should match number of accelerations" - ) + raise ValueError("Number of center fractions \ + should match number of accelerations") self.center_fractions = center_fractions self.accelerations = accelerations diff --git a/monai/auto3dseg/analyzer.py b/monai/auto3dseg/analyzer.py index 8d662df83d..3dbfbaebbe 100644 --- a/monai/auto3dseg/analyzer.py +++ b/monai/auto3dseg/analyzer.py @@ -105,7 +105,7 @@ def update_ops_nested_label(self, nested_key: str, op: Operations) -> None: raise ValueError("Nested_key input format is wrong. Please ensure it is like key1#0#key2") root: str child_key: str - (root, _, child_key) = keys + root, _, child_key = keys if root not in self.ops: self.ops[root] = [{}] self.ops[root][0].update({child_key: None}) @@ -952,8 +952,7 @@ def __call__(self, data: dict) -> dict: self.hist_range = nr_channels * self.hist_range if len(self.hist_range) != nr_channels: raise ValueError( - f"There is a mismatch between the number of channels ({nr_channels}) " - f"and histogram ranges ({len(self.hist_range)})." + f"There is a mismatch between the number of channels ({nr_channels}) and histogram ranges ({len(self.hist_range)})." ) # perform calculation diff --git a/monai/bundle/scripts.py b/monai/bundle/scripts.py index 9fdee6acd0..fa9ba27096 100644 --- a/monai/bundle/scripts.py +++ b/monai/bundle/scripts.py @@ -1948,7 +1948,7 @@ def create_workflow( """ _args = update_kwargs(args=args_file, workflow_name=workflow_name, config_file=config_file, **kwargs) - (workflow_name, config_file) = _pop_args( + workflow_name, config_file = _pop_args( _args, workflow_name=ConfigWorkflow, config_file=None ) # the default workflow name is "ConfigWorkflow" if isinstance(workflow_name, str): diff --git a/monai/bundle/utils.py b/monai/bundle/utils.py index 53d619f234..d37d7f1c05 100644 --- a/monai/bundle/utils.py +++ b/monai/bundle/utils.py @@ -124,10 +124,8 @@ "run_name": None, # may fill it at runtime "save_execute_config": True, - "is_not_rank0": ( - "$torch.distributed.is_available() \ - and torch.distributed.is_initialized() and torch.distributed.get_rank() > 0" - ), + "is_not_rank0": ("$torch.distributed.is_available() \ + and torch.distributed.is_initialized() and torch.distributed.get_rank() > 0"), # MLFlowHandler config for the trainer "trainer": { "_target_": "MLFlowHandler", diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 066cec41b7..21b24840b5 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -139,7 +139,7 @@ class DatasetFunc(Dataset): """ def __init__(self, data: Any, func: Callable, **kwargs) -> None: - super().__init__(data=None, transform=None) # type:ignore + super().__init__(data=None, transform=None) # type: ignore self.src = data self.func = func self.kwargs = kwargs @@ -1635,7 +1635,7 @@ def _cachecheck(self, item_transformed): return (_data, _meta) return _data else: - item: list[dict[Any, Any]] = [{} for _ in range(len(item_transformed))] # type:ignore + item: list[dict[Any, Any]] = [{} for _ in range(len(item_transformed))] # type: ignore for i, _item in enumerate(item_transformed): for k in _item: meta_i_k = self._load_meta_cache(meta_hash_file_name=f"{hashfile.name}-{k}-meta-{i}") diff --git a/monai/data/wsi_reader.py b/monai/data/wsi_reader.py index 62081d61d1..a9f7fab1c2 100644 --- a/monai/data/wsi_reader.py +++ b/monai/data/wsi_reader.py @@ -416,8 +416,7 @@ def get_data( # Check if there are three color channels for RGB elif mode in "RGB" and patch.shape[self.channel_dim] != 3: raise ValueError( - f"The image is expected to have three color channels in '{mode}' mode but has " - f"{patch.shape[self.channel_dim]}. " + f"The image is expected to have three color channels in '{mode}' mode but has {patch.shape[self.channel_dim]}. " ) # Get patch-related metadata metadata: dict = self._get_metadata(wsi=each_wsi, patch=patch, location=location, size=size, level=level) diff --git a/monai/handlers/utils.py b/monai/handlers/utils.py index b6771f2dcc..02975039b3 100644 --- a/monai/handlers/utils.py +++ b/monai/handlers/utils.py @@ -48,7 +48,7 @@ def stopping_fn_from_loss() -> Callable[[Engine], Any]: """ def stopping_fn(engine: Engine) -> Any: - return -engine.state.output # type:ignore + return -engine.state.output # type: ignore return stopping_fn diff --git a/monai/losses/dice.py b/monai/losses/dice.py index cd76ec1323..d757db2557 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -203,11 +203,9 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: self.class_weight = torch.as_tensor([self.class_weight] * num_of_classes) else: if self.class_weight.shape[0] != num_of_classes: - raise ValueError( - """the length of the `weight` sequence should be the same as the number of classes. + raise ValueError("""the length of the `weight` sequence should be the same as the number of classes. If `include_background=False`, the weight should not include - the background category class 0.""" - ) + the background category class 0.""") if self.class_weight.min() < 0: raise ValueError("the value/values of the `weight` should be no less than 0.") # apply class_weight to loss diff --git a/monai/losses/focal_loss.py b/monai/losses/focal_loss.py index caa237fca8..7ab54c319d 100644 --- a/monai/losses/focal_loss.py +++ b/monai/losses/focal_loss.py @@ -183,11 +183,9 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: self.class_weight = torch.as_tensor([self.class_weight] * num_of_classes) else: if self.class_weight.shape[0] != num_of_classes: - raise ValueError( - """the length of the `weight` sequence should be the same as the number of classes. + raise ValueError("""the length of the `weight` sequence should be the same as the number of classes. If `include_background=False`, the weight should not include - the background category class 0.""" - ) + the background category class 0.""") if self.class_weight.min() < 0: raise ValueError("the value/values of the `weight` should be no less than 0.") # apply class_weight to loss diff --git a/monai/metrics/utils.py b/monai/metrics/utils.py index a451b1a770..4a60e438cf 100644 --- a/monai/metrics/utils.py +++ b/monai/metrics/utils.py @@ -320,7 +320,7 @@ def get_edge_surface_distance( edges_spacing = None if use_subvoxels: edges_spacing = spacing if spacing is not None else ([1] * len(y_pred.shape)) - (edges_pred, edges_gt, *areas) = get_mask_edges( + edges_pred, edges_gt, *areas = get_mask_edges( y_pred, y, crop=True, spacing=edges_spacing, always_return_as_numpy=False ) if not edges_gt.any(): diff --git a/monai/networks/blocks/warp.py b/monai/networks/blocks/warp.py index d10089c597..9a0d19e4fd 100644 --- a/monai/networks/blocks/warp.py +++ b/monai/networks/blocks/warp.py @@ -20,6 +20,7 @@ from monai.config.deviceconfig import USE_COMPILED from monai.networks.layers.spatial_transforms import grid_pull from monai.networks.utils import meshgrid_ij +from monai.transforms.spatial.functional import _compiled_unsupported from monai.utils import GridSampleMode, GridSamplePadMode, optional_import _C, _ = optional_import("monai._C") @@ -138,7 +139,9 @@ def forward(self, image: torch.Tensor, ddf: torch.Tensor): grid = self.get_reference_grid(ddf, jitter=self.jitter) + ddf grid = grid.permute([0] + list(range(2, 2 + spatial_dims)) + [1]) # (batch, ..., spatial_dims) - if not USE_COMPILED: # pytorch native grid_sample + _use_compiled = USE_COMPILED and not _compiled_unsupported(image.device) + + if not _use_compiled: # pytorch native grid_sample for i, dim in enumerate(grid.shape[1:-1]): grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1 index_ordering: list[int] = list(range(spatial_dims - 1, -1, -1)) diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 0628a7fbc4..f0c1d1949d 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -11,6 +11,7 @@ """ A collection of "vanilla" transforms for IO functions. """ + from __future__ import annotations import inspect diff --git a/monai/transforms/regularization/array.py b/monai/transforms/regularization/array.py index 6b979e564a..1eb533dd6b 100644 --- a/monai/transforms/regularization/array.py +++ b/monai/transforms/regularization/array.py @@ -24,7 +24,6 @@ class Mixer(RandomizableTransform): - def __init__(self, batch_size: int, alpha: float = 1.0) -> None: """ Mixer is a base class providing the basic logic for the mixup-class of diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index b6bf211cc4..07df90502e 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -34,6 +34,7 @@ from monai.transforms.croppad.array import CenterSpatialCrop, ResizeWithPadOrCrop from monai.transforms.inverse import InvertibleTransform from monai.transforms.spatial.functional import ( + _compiled_unsupported, affine_func, convert_box_to_points, convert_points_to_box, @@ -2104,14 +2105,15 @@ def __call__( _align_corners = self.align_corners if align_corners is None else align_corners img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype, device=_device) sr = min(len(img_t.peek_pending_shape() if isinstance(img_t, MetaTensor) else img_t.shape[1:]), 3) + _use_compiled = USE_COMPILED and not _compiled_unsupported(img_t.device) backend, _interp_mode, _padding_mode, _ = resolves_modes( self.mode if mode is None else mode, self.padding_mode if padding_mode is None else padding_mode, backend=None, - use_compiled=USE_COMPILED, + use_compiled=_use_compiled, ) - if USE_COMPILED or backend == TransformBackends.NUMPY: + if _use_compiled or backend == TransformBackends.NUMPY: grid_t, *_ = convert_to_dst_type(grid[:sr], img_t, dtype=grid.dtype, wrap_sequence=True) if isinstance(grid, torch.Tensor) and grid_t.data_ptr() == grid.data_ptr(): grid_t = grid_t.clone(memory_format=torch.contiguous_format) @@ -2122,7 +2124,7 @@ def __call__( grid_t[i] = ((_dim - 1) / _dim) * grid_t[i] + t if _align_corners else grid_t[i] + t elif _align_corners: grid_t[i] = ((_dim - 1) / _dim) * (grid_t[i] + 0.5) - if USE_COMPILED and backend == TransformBackends.TORCH: # compiled is using torch backend param name + if _use_compiled and backend == TransformBackends.TORCH: # compiled is using torch backend param name grid_t = moveaxis(grid_t, 0, -1) # type: ignore out = grid_pull( img_t.unsqueeze(0), @@ -2140,6 +2142,20 @@ def __call__( [_map_coord(c, grid_np, order=_interp_mode, mode=_padding_mode) for c in img_np] ) out = convert_to_dst_type(out, img_t)[0] + else: + # Fallback to PyTorch grid_sample when compiled extension is unsupported. + # Convert grid coordinates from compiled convention [0, size-1] to PyTorch [-1, 1] + for i, dim in enumerate(img_t.shape[1 : 1 + sr]): + _dim = max(2, dim) + grid_t[i] = (grid_t[i] * 2.0 / _dim) - 1.0 + grid_t = moveaxis(grid_t, 0, -1) # type: ignore + out = torch.nn.functional.grid_sample( + img_t.unsqueeze(0), + grid_t.unsqueeze(0), + mode=_interp_mode, + padding_mode=_padding_mode, + align_corners=None if _align_corners == TraceKeys.NONE else _align_corners, # type: ignore + )[0] else: grid_t = moveaxis(grid[list(range(sr - 1, -1, -1))], 0, -1) # type: ignore grid_t = convert_to_dst_type(grid_t, img_t, wrap_sequence=True)[0].unsqueeze(0) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 3001dd1e64..add7046b13 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -54,6 +54,35 @@ __all__ = ["spatial_resample", "orientation", "flip", "resize", "rotate", "zoom", "rotate90", "affine_func"] +def _compiled_unsupported(device: torch.device) -> bool: + """ + Return True if ``monai._C`` (the compiled C extension providing ``grid_pull``) is not + compiled with support for the given CUDA device's compute capability. + + Args: + device: The torch device to check for compiled extension support. + + Returns: + True if the device is CUDA with compute capability major >= 12 (Blackwell+), + False otherwise. Always returns False for CPU devices. + + Note: + ``monai._C`` is built at install time against a fixed set of CUDA architectures. + NVIDIA Blackwell GPUs (sm_120, compute capability 12.x) and newer were not included in + the default ``TORCH_CUDA_ARCH_LIST`` when the MONAI slim image was originally built, + so executing ``grid_pull`` on those devices produces incorrect results. Falling back to + the PyTorch-native ``affine_grid`` + ``grid_sample`` path (``USE_COMPILED=False``) gives + correct output on all architectures. + + The threshold (``major >= 12``) matches the first architecture family (Blackwell, sm_120) + that shipped after the highest sm supported in the current default build list (sm_90, + Hopper). Adjust this constant when ``monai._C`` is rebuilt with sm_120+ support. + """ + if device.type != "cuda": + return False + return torch.cuda.get_device_properties(device).major >= 12 + + def _maybe_new_metatensor(img, dtype=None, device=None): """create a metatensor with fresh metadata if track_meta is True otherwise convert img into a torch tensor""" return convert_to_tensor( @@ -158,7 +187,8 @@ def spatial_resample( xform_shape = [-1] + in_sp_size img = img.reshape(xform_shape) img = img.to(dtype_pt) - if isinstance(mode, int) or USE_COMPILED: + _use_compiled = USE_COMPILED and not _compiled_unsupported(img.device) + if isinstance(mode, int) or _use_compiled: dst_xform = create_translate(spatial_rank, [float(d - 1) / 2 for d in spatial_size]) xform = xform @ convert_to_dst_type(dst_xform, xform)[0] affine_xform = monai.transforms.Affine( diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 3dc7897feb..7df6e2c5ef 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -702,7 +702,7 @@ def __init__( # if the root log level is higher than INFO, set a separate stream handler to record console = logging.StreamHandler(sys.stdout) console.setLevel(logging.INFO) - console.is_data_stats_handler = True # type:ignore[attr-defined] + console.is_data_stats_handler = True # type: ignore[attr-defined] _logger.addHandler(console) def __call__( diff --git a/tests/integration/test_loader_semaphore.py b/tests/integration/test_loader_semaphore.py index 78baedc264..c32bcb0b8b 100644 --- a/tests/integration/test_loader_semaphore.py +++ b/tests/integration/test_loader_semaphore.py @@ -10,6 +10,7 @@ # limitations under the License. """this test should not generate errors or UserWarning: semaphore_tracker: There appear to be 1 leaked semaphores""" + from __future__ import annotations import multiprocessing as mp diff --git a/tests/profile_subclass/profiling.py b/tests/profile_subclass/profiling.py index 18aecea2fb..6106259526 100644 --- a/tests/profile_subclass/profiling.py +++ b/tests/profile_subclass/profiling.py @@ -12,6 +12,7 @@ Comparing torch.Tensor, SubTensor, SubWithTorchFunc, MetaTensor Adapted from https://github.com/pytorch/pytorch/tree/v1.11.0/benchmarks/overrides_benchmark """ + from __future__ import annotations import argparse diff --git a/tests/profile_subclass/pyspy_profiling.py b/tests/profile_subclass/pyspy_profiling.py index fac425f577..671dc74c01 100644 --- a/tests/profile_subclass/pyspy_profiling.py +++ b/tests/profile_subclass/pyspy_profiling.py @@ -12,6 +12,7 @@ To be used with py-spy, comparing torch.Tensor, SubTensor, SubWithTorchFunc, MetaTensor Adapted from https://github.com/pytorch/pytorch/tree/v1.11.0/benchmarks/overrides_benchmark """ + from __future__ import annotations import argparse diff --git a/tests/transforms/croppad/test_pad_nd_dtypes.py b/tests/transforms/croppad/test_pad_nd_dtypes.py index 7fa633b8aa..a3f5f93a2d 100644 --- a/tests/transforms/croppad/test_pad_nd_dtypes.py +++ b/tests/transforms/croppad/test_pad_nd_dtypes.py @@ -12,6 +12,7 @@ Tests for pad_nd dtype support and backend selection. Validates PyTorch padding preference and NumPy fallback behavior. """ + from __future__ import annotations import unittest diff --git a/tests/transforms/test_spacing.py b/tests/transforms/test_spacing.py index 3862472753..620fa6d340 100644 --- a/tests/transforms/test_spacing.py +++ b/tests/transforms/test_spacing.py @@ -22,16 +22,19 @@ from monai.data.meta_tensor import MetaTensor from monai.data.utils import affine_to_spacing from monai.transforms import Spacing +from monai.transforms.spatial.functional import _compiled_unsupported from monai.utils import fall_back_tuple from tests.lazy_transforms_utils import test_resampler_lazy from tests.test_utils import TEST_DEVICES, TEST_NDARRAYS_ALL, assert_allclose, dict_product, skip_if_quick -# Define the static parts of each test case -_template_5_expected_output = ( - torch.tensor([[[[0.75, 0.75]], [[0.75, 0.75]], [[0.75, 0.75]]]]) - if USE_COMPILED - else torch.tensor([[[[0.95527864, 0.95527864]], [[1.0, 1.0]], [[1.0, 1.0]]]]) -) +_TEMPLATE_5_COMPILED = torch.tensor([[[[0.75, 0.75]], [[0.75, 0.75]], [[0.75, 0.75]]]]) +_TEMPLATE_5_NATIVE = torch.tensor([[[[0.95527864, 0.95527864]], [[1.0, 1.0]], [[1.0, 1.0]]]]) + + +def _template_5_expected_output(device: torch.device) -> torch.Tensor: + if USE_COMPILED and not _compiled_unsupported(device): + return _TEMPLATE_5_COMPILED + return _TEMPLATE_5_NATIVE all_template_parts = [ [ @@ -241,6 +244,8 @@ def test_spacing( test_resampler_lazy(tr, res, init_param=init_param, call_param=call_param) + if callable(expected_output): + expected_output = expected_output(device) assert_allclose(res, expected_output, atol=1e-1, rtol=1e-1) sr = min(len(res.shape) - 1, 3) if isinstance(init_param["pixdim"], float): diff --git a/tests/transforms/test_spatial_gpu_support.py b/tests/transforms/test_spatial_gpu_support.py new file mode 100644 index 0000000000..d4c92a46e2 --- /dev/null +++ b/tests/transforms/test_spatial_gpu_support.py @@ -0,0 +1,83 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test GPU support detection and fallback paths for spatial transforms.""" + +from __future__ import annotations + +import unittest + +import torch + +from monai.transforms.spatial.functional import _compiled_unsupported + + +class TestCompiledUnsupported(unittest.TestCase): + """Test _compiled_unsupported device detection.""" + + def test_cpu_device_always_supported(self): + """CPU devices should never be marked unsupported.""" + device = torch.device("cpu") + self.assertFalse(_compiled_unsupported(device)) + + def test_non_cuda_device_always_supported(self): + """Non-CUDA devices should always be supported.""" + device = torch.device("cpu") + self.assertFalse(_compiled_unsupported(device)) + + @unittest.skipIf(not torch.cuda.is_available(), reason="CUDA not available") + def test_cuda_device_detection(self): + """Verify CUDA compute capability detection.""" + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + if device.type == "cuda": + cc_major = torch.cuda.get_device_properties(device).major + unsupported = _compiled_unsupported(device) + # Device is unsupported if cc_major >= 12 + if cc_major >= 12: + self.assertTrue(unsupported) + else: + self.assertFalse(unsupported) + + def test_compiled_unsupported_return_type(self): + """Verify return type is bool.""" + device = torch.device("cpu") + result = _compiled_unsupported(device) + self.assertIsInstance(result, bool) + + +class TestResampleFallback(unittest.TestCase): + """Test Resample fallback behavior on unsupported devices.""" + + @unittest.skipIf(not torch.cuda.is_available(), reason="CUDA not available") + def test_resample_compilation_flag_respected(self): + """Verify Resample respects _compiled_unsupported check.""" + # This would require internal inspection or output verification + # Could test with mock device properties or actual Blackwell GPU + + def test_compiled_unsupported_logic(self): + """Test that unsupported devices are correctly detected.""" + # CPU should be supported + cpu_device = torch.device("cpu") + self.assertFalse(_compiled_unsupported(cpu_device)) + + # Verify logic: return True if CUDA and cc_major >= 12 + cuda_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + if cuda_device.type == "cuda": + cc_major = torch.cuda.get_device_properties(cuda_device).major + expected = cc_major >= 12 + actual = _compiled_unsupported(cuda_device) + self.assertEqual(actual, expected) + + +if __name__ == "__main__": + unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/versioneer.py b/versioneer.py index a06587fc3f..5d0a606c91 100644 --- a/versioneer.py +++ b/versioneer.py @@ -273,6 +273,7 @@ [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ + # pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring # pylint:disable=missing-class-docstring,too-many-branches,too-many-statements # pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error