Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions monai/losses/image_dissimilarity.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss

from monai.networks.layers import gaussian_1d, separable_filtering
from monai.networks.layers import separable_filtering
from monai.utils import LossReduction
from monai.utils.module import look_up_option

Expand All @@ -34,11 +34,11 @@ def make_triangular_kernel(kernel_size: int) -> torch.Tensor:


def make_gaussian_kernel(kernel_size: int) -> torch.Tensor:
sigma = torch.tensor(kernel_size / 3.0)
kernel = gaussian_1d(sigma=sigma, truncated=kernel_size // 2, approx="sampled", normalize=False) * (
2.5066282 * sigma
)
return kernel[:kernel_size]
sigma = kernel_size / 3.0
half = kernel_size // 2
x = torch.arange(-half, half + 1, dtype=torch.float)
kernel = torch.exp(-0.5 / (sigma * sigma) * x**2)
return kernel
Comment on lines +37 to +41
Copy link
Contributor

@coderabbitai coderabbitai bot Mar 18, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Verify sample-count behavior implied by the current formula.
python - <<'PY'
for kernel_size in [2, 4, 6, 8]:
    half = kernel_size // 2
    generated_len = len(list(range(-half, half + 1)))
    print(f"kernel_size={kernel_size}, generated_len={generated_len}")
PY

Repository: Project-MONAI/MONAI

Length of output: 186


🏁 Script executed:

# Read the full function and surrounding context
cat -n monai/losses/image_dissimilarity.py | head -60

Repository: Project-MONAI/MONAI

Length of output: 2515


🏁 Script executed:

# Search for all usages of make_gaussian_kernel in the codebase
rg "make_gaussian_kernel" -A 3 -B 1

Repository: Project-MONAI/MONAI

Length of output: 2625


🏁 Script executed:

# Check if there are tests for this function
fd "test.*\.py" | xargs rg "make_gaussian_kernel" -l

Repository: Project-MONAI/MONAI

Length of output: 143


🏁 Script executed:

# Read the test file to see the complete test for make_gaussian_kernel
cat -n tests/losses/image_dissimilarity/test_local_normalized_cross_correlation_loss.py | grep -A 10 "test_gaussian_kernel\|make_gaussian_kernel"

Repository: Project-MONAI/MONAI

Length of output: 1536


Guard kernel_size to prevent length mismatch for even values.

The current implementation produces kernel_size + 1 samples for even inputs (e.g., kernel_size=4 returns length 5). The existing test only validates odd values [3, 5, 7, 9, 11, 15], so this bug goes undetected. The proposed ValueError guard for non-positive and even values prevents silent failures.

Additionally, add a docstring to clarify that kernel_size must be a positive odd integer.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@monai/losses/image_dissimilarity.py` around lines 37 - 41, The kernel
generation allows even kernel_size values which produces kernel_size+1 samples;
add an input guard in the function that computes sigma/half/x/kernel to raise a
ValueError if kernel_size is non-positive or even (i.e., if kernel_size <= 0 or
kernel_size % 2 == 0) and update the function's docstring to state that
kernel_size must be a positive odd integer; locate the check around the
variables kernel_size, sigma, half, x, kernel to ensure the validation runs
before computing sigma and x.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These were preexisting. I would say out of scope for this PR.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@aymuos15 Fair point — the even kernel_size guard predates this PR. Would you like me to open a follow-up issue to track it?

( ˘︹˘ )∫



kernel_dict = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import torch
from parameterized import parameterized

from monai.losses.image_dissimilarity import LocalNormalizedCrossCorrelationLoss
from monai.losses.image_dissimilarity import LocalNormalizedCrossCorrelationLoss, make_gaussian_kernel

device = "cuda" if torch.cuda.is_available() else "cpu"

Expand Down Expand Up @@ -113,6 +113,25 @@
},
-0.95406944,
],
# Regression tests for gh-8780: gaussian kernel_size > 3 was broken due to
# truncated parameter being passed as pixel radius instead of sigma multiplier.
# Identical images must yield loss == -1.0 for any kernel size.
[
{"spatial_dims": 1, "kernel_type": "gaussian", "kernel_size": 5},
{
"pred": torch.arange(0, 5).reshape(1, 1, -1).to(dtype=torch.float, device=device),
"target": torch.arange(0, 5).reshape(1, 1, -1).to(dtype=torch.float, device=device),
},
-1.0,
],
[
{"spatial_dims": 1, "kernel_type": "gaussian", "kernel_size": 9},
{
"pred": torch.arange(0, 9).reshape(1, 1, -1).to(dtype=torch.float, device=device),
"target": torch.arange(0, 9).reshape(1, 1, -1).to(dtype=torch.float, device=device),
},
-1.0,
],
]


Expand All @@ -138,6 +157,15 @@ def test_ill_shape(self):
torch.ones((1, 3, 4, 4, 4), dtype=torch.float, device=device),
)

def test_gaussian_kernel_shape_and_symmetry(self):
# gh-8780: kernel must have correct length, be symmetric, and peak at center
for kernel_size in [3, 5, 7, 9, 11, 15]:
k = make_gaussian_kernel(kernel_size)
self.assertEqual(len(k), kernel_size)
self.assertTrue(torch.allclose(k, k.flip(0)), f"kernel_size={kernel_size} not symmetric")
self.assertEqual(k.argmax().item(), kernel_size // 2)
np.testing.assert_allclose(k.max().item(), 1.0, rtol=1e-6)

def test_ill_opts(self):
pred = torch.ones((1, 3, 3, 3, 3), dtype=torch.float)
target = torch.ones((1, 3, 3, 3, 3), dtype=torch.float)
Expand Down
Loading