Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 25 additions & 15 deletions monai/networks/layers/filtering.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def __init__(self, spatial_sigma, color_sigma):
self.len_spatial_sigma = 3
else:
raise ValueError(
f"len(spatial_sigma) {spatial_sigma} must match number of spatial dims {self.ken_spatial_sigma}."
f"len(spatial_sigma) {spatial_sigma} must match number of spatial dims (1, 2 or 3)."
)

# Register sigmas as trainable parameters.
Expand All @@ -231,6 +231,10 @@ def __init__(self, spatial_sigma, color_sigma):
self.sigma_color = torch.nn.Parameter(torch.tensor(color_sigma))

def forward(self, input_tensor):
if len(input_tensor.shape) < 3:
raise ValueError(
f"Input must have at least 3 dimensions (batch, channel, *spatial_dims), got {len(input_tensor.shape)}"
)
if input_tensor.shape[1] != 1:
raise ValueError(
f"Currently channel dimensions >1 ({input_tensor.shape[1]}) are not supported. "
Expand All @@ -239,24 +243,25 @@ def forward(self, input_tensor):
)

len_input = len(input_tensor.shape)
spatial_dims = len_input - 2

# C++ extension so far only supports 5-dim inputs.
if len_input == 3:
if spatial_dims == 1:
input_tensor = input_tensor.unsqueeze(3).unsqueeze(4)
elif len_input == 4:
elif spatial_dims == 2:
input_tensor = input_tensor.unsqueeze(4)

if self.len_spatial_sigma != len_input:
raise ValueError(f"Spatial dimension ({len_input}) must match initialized len(spatial_sigma).")
if self.len_spatial_sigma != spatial_dims:
raise ValueError(f"Spatial dimension ({spatial_dims}) must match initialized len(spatial_sigma).")

prediction = TrainableBilateralFilterFunction.apply(
input_tensor, self.sigma_x, self.sigma_y, self.sigma_z, self.sigma_color
)

# Make sure to return tensor of the same shape as the input.
if len_input == 3:
if spatial_dims == 1:
prediction = prediction.squeeze(4).squeeze(3)
elif len_input == 4:
elif spatial_dims == 2:
prediction = prediction.squeeze(4)

return prediction
Expand Down Expand Up @@ -389,7 +394,7 @@ def __init__(self, spatial_sigma, color_sigma):
self.len_spatial_sigma = 3
else:
raise ValueError(
f"len(spatial_sigma) {spatial_sigma} must match number of spatial dims {self.ken_spatial_sigma}."
f"len(spatial_sigma) {spatial_sigma} must match number of spatial dims (1, 2, or 3)."
)
Comment on lines 395 to 398
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Same bug: self.len_spatial_sigma undefined in else branch.

Identical issue as TrainableBilateralFilter.__init__.

Proposed fix
         else:
             raise ValueError(
-                f"len(spatial_sigma) {spatial_sigma} must match number of spatial dims {self.len_spatial_sigma}."
+                f"len(spatial_sigma) must be 1, 2, or 3, got {len(spatial_sigma)}."
             )
🧰 Tools
🪛 Ruff (0.14.14)

[warning] 396-398: Avoid specifying long messages outside the exception class

(TRY003)

🤖 Prompt for AI Agents
In `@monai/networks/layers/filtering.py` around lines 395 - 398, The else branch
references an undefined attribute self.len_spatial_sigma; fix it by using a
defined value (e.g., compute len_spatial = len(self.spatial_sigma) or use
self.spatial_ndim) when building the error message in the failing branch of the
initializer (same place as TrainableBilateralFilter.__init__). Replace
self.len_spatial_sigma with the actual computed length (len(self.spatial_sigma)
or self.spatial_ndim) so the ValueError message prints a valid
expected-dimension value.


# Register sigmas as trainable parameters.
Expand All @@ -399,9 +404,13 @@ def __init__(self, spatial_sigma, color_sigma):
self.sigma_color = torch.nn.Parameter(torch.tensor(color_sigma))

def forward(self, input_tensor, guidance_tensor):
if len(input_tensor.shape) < 3:
raise ValueError(
f"Input must have at least 3 dimensions (batch, channel, *spatial_dims), got {len(input_tensor.shape)}"
)
if input_tensor.shape[1] != 1:
raise ValueError(
f"Currently channel dimensions >1 ({input_tensor.shape[1]}) are not supported. "
f"Currently channel dimensions > 1 ({input_tensor.shape[1]}) are not supported. "
"Please use multiple parallel filter layers if you want "
"to filter multiple channels."
)
Expand All @@ -412,26 +421,27 @@ def forward(self, input_tensor, guidance_tensor):
)

len_input = len(input_tensor.shape)
spatial_dims = len_input - 2

# C++ extension so far only supports 5-dim inputs.
if len_input == 3:
if spatial_dims == 1:
input_tensor = input_tensor.unsqueeze(3).unsqueeze(4)
guidance_tensor = guidance_tensor.unsqueeze(3).unsqueeze(4)
elif len_input == 4:
elif spatial_dims == 2:
input_tensor = input_tensor.unsqueeze(4)
guidance_tensor = guidance_tensor.unsqueeze(4)

if self.len_spatial_sigma != len_input:
raise ValueError(f"Spatial dimension ({len_input}) must match initialized len(spatial_sigma).")
if self.len_spatial_sigma != spatial_dims:
raise ValueError(f"Spatial dimension ({spatial_dims}) must match initialized len(spatial_sigma).")

prediction = TrainableJointBilateralFilterFunction.apply(
input_tensor, guidance_tensor, self.sigma_x, self.sigma_y, self.sigma_z, self.sigma_color
)

# Make sure to return tensor of the same shape as the input.
if len_input == 3:
if spatial_dims == 1:
prediction = prediction.squeeze(4).squeeze(3)
elif len_input == 4:
elif spatial_dims == 2:
prediction = prediction.squeeze(4)

return prediction
Loading