Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 30 additions & 17 deletions flashlight/fl/test/autograd/AutogradNormalizationTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,10 @@ TEST(AutogradNormalizationTest, BatchNormEvalModeOutputSingleAxis) {

auto expectedOut = (thisInput - thisMean) / std::sqrt(thisVar + 1E-5);
expectedOut = expectedOut * thisWeight + thisBias;
ASSERT_TRUE(allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1E-5));
ASSERT_TRUE(
allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1E-5)
);
}

// test on empty weigts and bias
Expand All @@ -93,8 +95,10 @@ TEST(AutogradNormalizationTest, BatchNormEvalModeOutputSingleAxis) {
auto thisVar = runningVar.tensor().flatten()(i).scalar<float>();

auto expectedOut = (thisInput - thisMean) / std::sqrt(thisVar + 1E-5);
ASSERT_TRUE(allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1E-5));
ASSERT_TRUE(
allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1E-5)
);
}
}

Expand Down Expand Up @@ -123,9 +127,7 @@ TEST(AutogradNormalizationTest, BatchNormEvalModeOutputMultipleAxis) {
1E-5
));
for(int i = 0; i < nfeatures; ++i) {
std::array<fl::Index, 4> sel = {
i % 13, (i / 13) % 13, (i / 13) / 13, fl::span
};
std::array<fl::Index, 4> sel = {i % 13, (i / 13) % 13, (i / 13) / 13, fl::span};
auto thisInput = input.tensor()(sel[0], sel[1], sel[2], sel[3]);
auto thisMean = runningMean.tensor().flatten()(i).scalar<float>();
auto thisVar = runningVar.tensor().flatten()(i).scalar<float>();
Expand All @@ -135,8 +137,10 @@ TEST(AutogradNormalizationTest, BatchNormEvalModeOutputMultipleAxis) {
auto expectedOut = (thisInput - thisMean) / std::sqrt(thisVar + 1e-5);
expectedOut = expectedOut * thisWeight + thisBias;

ASSERT_TRUE(allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1e-4));
ASSERT_TRUE(
allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1e-4)
);
}

// test on empty weigts and bias
Expand All @@ -153,15 +157,20 @@ TEST(AutogradNormalizationTest, BatchNormEvalModeOutputMultipleAxis) {
));
for(int i = 0; i < nfeatures; ++i) {
std::array<fl::Index, 4> sel = {
i % 13, (i / 13) % 13, (i / 13) / 13, fl::span
i % 13,
(i / 13) % 13,
(i / 13) / 13,
fl::span
};
auto thisInput = input.tensor()(sel[0], sel[1], sel[2], sel[3]);
auto thisMean = runningMean.tensor().flatten()(i).scalar<float>();
auto thisVar = runningVar.tensor().flatten()(i).scalar<float>();

auto expectedOut = (thisInput - thisMean) / std::sqrt(thisVar + 1e-5);
ASSERT_TRUE(allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 5e-5));
ASSERT_TRUE(
allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 5e-5)
);
}
}

Expand Down Expand Up @@ -230,7 +239,10 @@ TEST(AutogradNormalizationTest, BatchNormTrainModeOutputMultipleAxis) {

for(int i = 0; i < nfeatures; ++i) {
std::array<fl::Index, 4> sel = {
i % 13, (i / 13) % 13, (i / 13) / 13, fl::span
i % 13,
(i / 13) % 13,
(i / 13) / 13,
fl::span
};
auto thisInput = input.tensor()(sel[0], sel[1], sel[2], sel[3]);
auto thisMean = avg.tensor().flatten()(i).scalar<float>();
Expand All @@ -240,8 +252,10 @@ TEST(AutogradNormalizationTest, BatchNormTrainModeOutputMultipleAxis) {

auto expectedOut = (thisInput - thisMean) / std::sqrt(thisVar + 1e-5);
expectedOut = expectedOut * thisWeight + thisBias;
ASSERT_TRUE(allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1e-5));
ASSERT_TRUE(
allClose(
out.tensor()(sel[0], sel[1], sel[2], sel[3]), expectedOut, 1e-5)
);
}
}

Expand Down Expand Up @@ -528,8 +542,7 @@ TEST_F(AutogradTestF16, LayerNormJacobianF16) {

std::vector<int> featAxes = {0, 1, 2, 3};
const float inputScale = 4.0; // scale the input to prevent grad underflow
auto input =
Variable(inputScale * fl::rand({2, 2, 2, 4}, fl::dtype::f16), true);
auto input = Variable(inputScale * fl::rand({2, 2, 2, 4}, fl::dtype::f16), true);
auto nfeatures = 1;
for(auto ax : featAxes)
nfeatures *= input.dim(ax);
Expand Down
1 change: 1 addition & 0 deletions uncrustify.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ indent_func_class_param = false
indent_func_ctor_var_param = false
indent_template_param = false
indent_cpp_lambda_body = true
indent_cpp_lambda_only_once = true
use_indent_func_call_param = true
donot_indent_func_def_close_paren = true
align_func_params = false
Expand Down
Loading