Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# Copyright 2026 NXP
#
# This source code is licensed under the BSD-style license found in the
Expand All @@ -6,15 +6,16 @@
from executorch.backends.nxp.backend.edge_helper import try_get_arg
from executorch.backends.nxp.backend.ir.converter.node_converter import (
CustomDelegationOptions,
is_not_qdq_node,
NodeConverter,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target
)
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.fx.passes.infra.partitioner import Partition
from torch.nn import Parameter


Expand Down Expand Up @@ -63,34 +64,9 @@
if bounds not in ClampConverter.SUPPORTED_BOUNDS.values():
return False

return True

@classmethod
def supports_partitioning_result(
cls,
node: Node,
partition_list: list[Partition],
custom_delegation_options: CustomDelegationOptions,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
) -> bool:
bounds = cls._get_clamp_bounds(node)

if bounds in [cls.SUPPORTED_BOUNDS["Relu"], cls.SUPPORTED_BOUNDS["Relu6"]]:
# If this is the only operator in the partition, NeutronConverter will not create a NeutronNode for some
# reason.
clamp_partitions = [p for p in partition_list if node in p.nodes]
if len(clamp_partitions) != 1:
return False # Should never happen

clamp_partition = clamp_partitions[0]
non_q_dq_partition_nodes = list(
filter(is_not_qdq_node, clamp_partition.nodes)
)
if len(non_q_dq_partition_nodes) <= 1:
return False # This would be the only node in the partition, which would cause a crash later on.

return True
# `clamp` is converted to `relu`, so we need to check if such activation
# is supported.
return activation_supported_on_target(node, neutron_target_spec)

def convert(self, node: Node):
"""Convert the `aten.clamp.default` operator to Neutron IR `Relu*` operators.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand All @@ -7,9 +7,13 @@
CustomDelegationOptions,
NodeConverter,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target
)
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.nn import Parameter

Expand All @@ -33,6 +37,17 @@
_, min_value, max_value = node.args
return (min_value, max_value) in HardTanhConverter.supported_modes_map.keys()

@staticmethod
def _is_supported_on_target(
node: Node,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
# `hardtanh` is converted to `relu`, so we need to check if such activation
# is supported.
return activation_supported_on_target(node, neutron_target_spec)

def convert(self, node: Node):
"""Convert 'aten::hardtanh' to it's supported ReLU equivalent."""
self.assert_convertible(node)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# Copyright 2026 NXP
#
# This source code is licensed under the BSD-style license found in the
Expand All @@ -7,9 +7,13 @@
CustomDelegationOptions,
NodeConverter,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target
)
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.leaky_relu_options import (
LeakyRelu,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.nn import Parameter

Expand All @@ -24,6 +28,15 @@
) -> bool:
return True

@staticmethod
def _is_supported_on_target(
node: Node,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
return activation_supported_on_target(node, neutron_target_spec)

def convert(self, node: Node):
"""Convert the `aten.leaky_relu.default` operator to Neutron IR `LeakyRelu`.
The schema is:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2024-2025 NXP
# Copyright 2024-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand All @@ -7,9 +7,13 @@
CustomDelegationOptions,
NodeConverter,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target
)
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.nn import Parameter

Expand All @@ -24,6 +28,15 @@
) -> bool:
return True

@staticmethod
def _is_supported_on_target(
node: Node,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
return activation_supported_on_target(node, neutron_target_spec)

def convert(self, node: Node):
self.assert_convertible(node)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand All @@ -7,9 +7,13 @@
CustomDelegationOptions,
NodeConverter,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target
)
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.nn import Parameter

Expand All @@ -24,6 +28,15 @@
) -> bool:
return True

@staticmethod
def _is_supported_on_target(
node: Node,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
return activation_supported_on_target(node, neutron_target_spec)

def convert(self, node: Node):
self.assert_convertible(node)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand All @@ -7,9 +7,13 @@
CustomDelegationOptions,
)
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target
)
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.nn import Parameter

Expand All @@ -24,6 +28,15 @@
) -> bool:
return True

@staticmethod
def _is_supported_on_target(
node: Node,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
return activation_supported_on_target(node, neutron_target_spec)

def convert(self, node: Node):
self.assert_convertible(node)

Expand Down
30 changes: 28 additions & 2 deletions backends/nxp/backend/neutron_operator_support.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec

from executorch.backends.nxp.backend.data_format import NXP_NODE_FORMAT
from executorch.backends.nxp.backend.edge_helper import input_tensor
from executorch.backends.nxp.backend.ir.converter.conversion.translator import (
dims_to_channels_last,
)
from torch.fx import Node

def is_tensor_invariant_permutation(
input_shape: list[int], permutation: list[int]
Expand Down Expand Up @@ -77,3 +82,24 @@
return True

return False


def activation_supported_on_target(
node: Node, neutron_target_spec: NeutronTargetSpec
) -> bool:
"""This function determines if the current NeutronSoftware properly supports an activation operator represented by the given node.

:param node: The node representing the activation operator.
:param neutron_target_spec: Object for querying the target platform to retrieve its properties.
"""
input_shape = list(input_tensor(node, 0).shape)
if node.args[0].meta[NXP_NODE_FORMAT].is_channels_first():
input_shape = dims_to_channels_last(input_shape)

n = 1 if len(input_shape) == 1 else input_shape[0]
c = input_shape[-1]
num_macs = neutron_target_spec.get_num_macs()

# activations in Neutron are delegable only
# if `num_channels` % `num_macs` == 0 and `num_batches` == 1
Copy link
Collaborator

@MartinPavella MartinPavella Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is incorrect.

You call this function in every activation converter's is_supported_on_target() method. So this condition has to always be satisfied to delegate an activation. But operators which can use the Custom Activation feature of Neutron aren't restricted like this.

For example test_leaky_relu_converter.py contained tests with input shape (23, ), and it delegated successfully.

Your task was aimed only at solving the issue when the activations were the only ops in the model. But here you are introducing a check which effects every case (including models with multiple nodes), and it breaks existing working functionality.

I see you changed all existing tests to satisfy this condition. But the tests were passing, and they should still be passing if Neutron supports them.

Our goal is to delegate as many operators to Neutron as possible. Therefore, we cannot introduce arbitrary restrictions like this. Every single test that started failing when you introduced this change must still be passing. You cannot just modify the tests and pretend it didn't happen. If a test fails -> there is an issue. In this case, the issue was caused by your changes.


It appears you have misunderstood the task. In clamp_converter.py, you have removed the method supports_partitioning_result(). What was the motivation for this? Instead, you should have added this method to every "activation_converter" and used it to prohibit delegation if the activation was the only node in the partition (and perhaps consider some operator-specific conditions).

Unless I'm missing something, a complete rework is required.

Copy link
Contributor Author

@novak-vaclav novak-vaclav Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As I told you and the team multiple times at the regular meetups, I noticed that the converters did NOT check whether the node is delegable. Such functionality was not implemented, when in my opinion it should have been. I also believe that the absence of such functionality was the root cause of ReLU not being delegated when there are no other nodes in the partition (aside from QDQ nodes of course).
In the Neutron Converter documentation, there is a list of conditions, such as num_channels % num_macs == 0 that are not being checked. That is the whole motivation behind all these changes.
The tests needed to be modified to correctly reflect the newly imposed restrictions. I wanted to adhere to the documentation the Neutron team provided, however if you think it is better to start checking the requirements only when we encounter a situation when it stops working, that is fine by me. Just wanted to do the due dilligence of properly implementing the conversion of one of the most important operators there are, such as ReLU or Sigmoid.
As I am looking at it right now, some of the code is quite old, for example the functionality concerning ReLU. Maybe the restrictions did not exist back then.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's not escalate this.

The bottom line is this:

  • You wanted to adhere to the Neutron documentation, even in cases when we have working implementation which contradicts the documentation. So you prohibited these cases, which solved the issue of your ticket, but it also broke some supported cases.
  • I think we should support as many cases as possible. We know the Neutron documentation is not 100% accurate, so I think prohibiting working cases just because they contradict the docs is not the way to go. Based on this opinion, the solution should have been different.

We can discuss it tomorrow if you want.

P.S.
I didn't mean to sound aggressive in my first comment. I apologize if my phrasing was inappropriate.

return n == 1 and c % num_macs == 0
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@ def reseed_model_per_test_run():


class ConvBlocksWithAbs(torch.nn.Module):
def __init__(self, conv_in_channels: int = 3):
def __init__(self, conv_in_channels: int = 3, conv_out_channels: int = 8):
super().__init__()
self.block1 = torch.nn.Sequential(
torch.nn.Conv2d(
in_channels=conv_in_channels,
out_channels=3,
out_channels=conv_out_channels,
kernel_size=(2, 2),
stride=(2, 2),
),
Expand Down Expand Up @@ -64,8 +64,10 @@ def forward(self, x):
return x.abs()


def test_conv_abs(mocker, use_qat, input_shape: tuple[int] = (1, 3, 112, 112)):
model = ConvBlocksWithAbs(conv_in_channels=input_shape[1])
def test_conv_abs(mocker, use_qat, input_shape: tuple[int] = (1, 8, 112, 112)):
model = ConvBlocksWithAbs(
conv_in_channels=input_shape[1], conv_out_channels=input_shape[1]
)

converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def forward(self, x):
],
)
def test_convert_clamp__supported(mocker, min, max):
input_shape = (23,)
input_shape = (24,)
model = AddClampModule(min, max)

converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program")
Expand Down Expand Up @@ -100,21 +100,31 @@ def test_convert_clamp__supported(mocker, min, max):

# noinspection PyShadowingBuiltins
@pytest.mark.parametrize(
"min, max",
"input_shape, min, max",
[
pytest.param(0, 6, id="min = 0, max = 6 (Relu6)"),
pytest.param(0, None, id="min = 0, max = None (Relu)"),
pytest.param(
(1, 8, 8, 7),
0,
6,
id="min = 0, max = 6 (Relu6), num_channels not divisible by NUM_MACS",
),
pytest.param(
(1, 8, 8, 7),
0,
None,
id="min = 0, max = None (Relu), num_channels not divisible by NUM_MACS",
),
pytest.param(
(2, 16, 8, 8), 0, None, id="min = 0, max = None (Relu), num_batches != 1"
),
],
)
def test_convert_clamp__single_op__not_delegated_variants(min, max):
# Test that Clamp representable as Relu6 or Relu is NOT delegated, because it is a single op model which is not
# supported by Neutron.
input_shape = (23,)
def test_convert_clamp__unsupported_shape(input_shape, min, max):
model = ClampModule(min, max)

delegated_ep = to_quantized_edge_program(model, input_shape).exported_program()

# Make sure the `clamp` was NOT delegated (single op model).
# Make sure the `clamp` was NOT delegated.
assert not graph_contains_any_of_ops(delegated_ep.graph, [ExecutorchDelegateCall])
assert graph_contains_any_of_ops(delegated_ep.graph, [Clamp])

Expand All @@ -129,7 +139,7 @@ def test_convert_clamp__single_op__not_delegated_variants(min, max):
)
def test_convert_clamp__single_op__delegated_variants(mocker, min, max):
# Test that Clamp representable as Relu0To1 or ReluN1To1 is delegated, even though it is a single op model.
input_shape = (23,)
input_shape = (24,)
model = ClampModule(min, max)

converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program")
Expand Down Expand Up @@ -166,7 +176,7 @@ def test_convert_clamp__single_op__delegated_variants(mocker, min, max):
],
)
def test_convert_clamp__no_delegation__unsupported_bounds(min, max):
input_shape = (23,)
input_shape = (24,)
model = AddClampModule(min, max)

delegated_ep = to_quantized_edge_program(model, input_shape).exported_program()
Expand Down
Loading
Loading