Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions tests/validation/fixtures/ConvolutionLayerFixture.h
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,11 @@ class ConvolutionValidationGenericFixture : public framework::Fixture
bool padded_weights = false,
bool updated_sq_info_after_config = false)
{
#ifndef ARM_COMPUTE_CPU_ENABLED
ARM_COMPUTE_UNUSED(updated_sq_info_after_config);
ARM_COMPUTE_ERROR_ON(updated_sq_info_after_config);
#endif // ARM_COMPUTE_CPU_ENABLED

if (std::is_same<TensorType, Tensor>::value && // Cpu
(data_type == DataType::F16 || weights_data_type == DataType::F16) && !CPUInfo::get().has_fp16())
{
Expand Down Expand Up @@ -236,13 +241,15 @@ class ConvolutionValidationGenericFixture : public framework::Fixture
_use_dynamic_output_quant = true;
}

#ifdef ARM_COMPUTE_CPU_ENABLED
if (updated_sq_info_after_config)
{
_target = compute_gemmlowp_target_for_updated_sq_info_after_config(
input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info,
pre_pad_layer, padded_weights);
}
else
#endif // ARM_COMPUTE_CPU_ENABLED
{
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights,
dilation, act_info, pre_pad_layer, padded_weights);
Expand Down Expand Up @@ -484,6 +491,7 @@ class ConvolutionValidationGenericFixture : public framework::Fixture
return dst;
}

#ifdef ARM_COMPUTE_CPU_ENABLED
// Compute the target when updating static quantization information after configuration.
TensorType compute_gemmlowp_target_for_updated_sq_info_after_config(TensorShape input_shape,
TensorShape weights_shape,
Expand All @@ -496,6 +504,9 @@ class ConvolutionValidationGenericFixture : public framework::Fixture
PaddingList pre_pad_layer = PaddingList({}),
bool padded_weights = false)
{
// Runtime assert rather than static_assert because this template gets
// instantiated with FunctionType other than NEGEMMConvolutionLayer. The
// function call itself is prevented at runtime with conditions.
ARM_COMPUTE_ASSERT((std::is_same<FunctionType, NEGEMMConvolutionLayer>::value == true));
ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);

Expand Down Expand Up @@ -612,6 +623,7 @@ class ConvolutionValidationGenericFixture : public framework::Fixture
}
return dst;
}
#endif // ARM_COMPUTE_CPU_ENABLED

SimpleTensor<TO> compute_reference(const TensorShape &input_shape,
const TensorShape &weights_shape,
Expand Down
14 changes: 13 additions & 1 deletion tests/validation/fixtures/GEMMLowpFixture.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2025 Arm Limited.
* Copyright (c) 2017-2026 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
Expand Down Expand Up @@ -105,6 +105,7 @@ struct TensorFillInfo
int32_t hash{0};
};

#ifdef ARM_COMPUTE_CPU_ENABLED
template <typename TensorType,
typename AccessorType,
typename FunctionType,
Expand All @@ -129,6 +130,9 @@ TensorType compute_gemmlowp_target_for_updated_sq_info_after_config(
DataType data_type_output = DataType::UNKNOWN,
const ActivationLayerInfo &act_info = ActivationLayerInfo())
{
// Runtime assert rather than static_assert because this template gets
// instantiated with FunctionType other than NEGEMMLowpMatrixMultiplyCore. The
// function call itself is prevented at runtime with conditions.
ARM_COMPUTE_ASSERT((std::is_same<FunctionType, NEGEMMLowpMatrixMultiplyCore>::value == true));
ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a));
ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_b));
Expand Down Expand Up @@ -222,6 +226,7 @@ TensorType compute_gemmlowp_target_for_updated_sq_info_after_config(
gemmlowp.run();
return output;
}
#endif // ARM_COMPUTE_CPU_ENABLED

template <typename TensorType,
typename AccessorType,
Expand Down Expand Up @@ -730,6 +735,12 @@ class GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture : publ
bool updated_sq_info_after_config = false,
const ActivationLayerInfo &act_info = ActivationLayerInfo())
{
#ifndef ARM_COMPUTE_CPU_ENABLED
ARM_COMPUTE_UNUSED(updated_sq_info_after_config, act_info);
ARM_COMPUTE_ERROR_ON(updated_sq_info_after_config);
#endif // ARM_COMPUTE_CPU_ENABLED

#ifdef ARM_COMPUTE_CPU_ENABLED
if (updated_sq_info_after_config)
{
return compute_gemmlowp_target_for_updated_sq_info_after_config<
Expand All @@ -739,6 +750,7 @@ class GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture : publ
arm_compute::DataType::UNKNOWN, act_info);
}
else
#endif // ARM_COMPUTE_CPU_ENABLED
{
return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d,
reinterpret_output_as_3d, qasymm8_t, true, run_twice>(
Expand Down