diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 4d2f951481518b..4fd6bd93895f2f 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -1651,9 +1651,12 @@ void Compiler::optCreateComplementaryAssertion(AssertionIndex assertionIndex) optMapComplementary(optAddAssertion(reversed), assertionIndex); } else if (candidateAssertion.KindIs(OAK_LT_UN, OAK_LE_UN) && - candidateAssertion.GetOp2().KindIs(O2K_CHECKED_BOUND_ADD_CNS)) + candidateAssertion.GetOp2().KindIs(O2K_CHECKED_BOUND_ADD_CNS, O2K_VN)) { - // Assertions such as "X > checkedBndVN" aren't very useful. + // Assertions such as "X u> checkedBndVN" or "X u> Y" aren't very useful: unsigned ">" / + // ">=" complementaries don't tighten ranges (they describe non-contiguous regions in the + // signed-int domain that Range can't represent) and rarely match a direct relop fold + // either. Skip the complementary to keep assertion-table pressure down. return; } else if (AssertionDsc::IsReversible(candidateAssertion.GetKind())) @@ -1825,15 +1828,16 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree) } // "X relop Y" where neither side is a constant nor a checked bound. - // For now, we only create such assertions for signed comparisons of int32 (and smaller, after promotion). + // We create such assertions for both signed and unsigned comparisons of int32 (and smaller, after promotion). // This widens what global assertion prop can reason about: e.g. "b > a" combined with "a > 10" - // can be used to deduce "b > 10". + // can be used to deduce "b > 10". The unsigned variant enables folding patterns like + // "(uint)(i + 1) <= (uint)len" given "(uint)i < (uint)len". // // To keep table pressure under control, we only create the assertion if at least one of the // operands already has assertions registered. Otherwise the new assertion has no other facts // it can chain with and is unlikely to enable any deduction, while still consuming a slot // (and potentially crowding out useful ones). - if (!isUnsignedRelop && (op1VN != op2VN) && !vnStore->IsVNConstant(op1VN) && !vnStore->IsVNConstant(op2VN) && + if ((op1VN != op2VN) && !vnStore->IsVNConstant(op1VN) && !vnStore->IsVNConstant(op2VN) && (optAssertionHasAssertionsForVN(op1VN) || optAssertionHasAssertionsForVN(op2VN))) { AssertionDsc dsc = AssertionDsc::CreateRelopVN(this, relopFunc, op1VN, op2VN); diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 34de76d5adc9c6..622a2dd55e4d0d 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -667,6 +667,100 @@ Range RangeCheck::GetRangeFromAssertions(Compiler* comp, ValueNum num, ASSERT_VA return GetRangeFromAssertionsWorker(comp, num, assertions, budget, &set); } +//------------------------------------------------------------------------ +// TryFoldRelopOfAddByOneFromAssertions: Try to fold "(ADD(base, 1), other)" +// using an asserted "base < other". +// +// The proof: an asserted "base < other" implies "base <= other - 1", and since "other" is +// a 32-bit value, "base + 1" cannot overflow past "other" in either signedness: +// * Signed: base < other => base <= INT32_MAX - 1 => base + 1 <= other +// (no signed overflow). +// * Unsigned: (uint)base < (uint)other => (uint)base <= UINT32_MAX - 1 +// => (uint)(base + 1) <= (uint)other (no unsigned wrap). +// This catches patterns like "(uint)(offset + 1) <= (uint)length" given +// "(uint)offset < (uint)length", which is the typical "Slice(offset + 1)" check. +// +// Only "<=" / ">" can be concluded once normalized to "ADD(base, 1) other"; +// "<", ">=", "==", "!=" remain undetermined and are reported as not-folded. +// +// Arguments: +// comp - the compiler instance +// op1VN, op2VN - the operands of the relop being evaluated; op1VN is expected to be +// ADD(base, 1) and op2VN the "other" side +// cmpOper - the (signed-form) compare operator +// isUnsigned - true if the relop is unsigned +// assertions - the live assertions +// pResult - on success, set to [1..1] for GT_LE or [0..0] for GT_GT +// +// Return Value: +// true if a fold succeeded; false otherwise (caller should leave the range as-is). +// +bool RangeCheck::TryFoldRelopOfAddByOneFromAssertions(Compiler* comp, + ValueNum op1VN, + ValueNum op2VN, + genTreeOps cmpOper, + bool isUnsigned, + ASSERT_VALARG_TP assertions, + Range* pResult) +{ + if ((cmpOper != GT_LE) && (cmpOper != GT_GT)) + { + return false; + } + + // Require op1 to be ADD(base, 1) where base has assertions registered. This keeps the + // assertion-table scan proportional to cases that can actually benefit. + ValueNum base; + int addCns; + if (!comp->vnStore->IsVNBinFuncWithConst(op1VN, VNF_ADD, &base, &addCns) || (addCns != 1) || + (base == op2VN) || !comp->optAssertionHasAssertionsForVN(base)) + { + return false; + } + + BitVecOps::Iter iter(comp->apTraits, assertions); + unsigned assertionBit = 0; + while (iter.NextElem(&assertionBit)) + { + const Compiler::AssertionDsc& a = comp->optGetAssertion(GetAssertionIndex(assertionBit)); + + // We only consume O2K_VN-style relop assertions; checked-bound assertions + // already have dedicated handling elsewhere. + if (!a.IsRelop() || !a.GetOp2().KindIs(Compiler::O2K_VN)) + { + continue; + } + + ValueNum aOp1 = a.GetOp1().GetVN(); + ValueNum aOp2 = a.GetOp2().GetVN(); + bool aIsUnsigned; + genTreeOps aCmp = Compiler::AssertionDsc::ToCompareOper(a.GetKind(), &aIsUnsigned); + + // Normalize so that the assertion reads "base op2VN". + if ((aOp1 == op2VN) && (aOp2 == base)) + { + aCmp = GenTree::SwapRelop(aCmp); + } + else if ((aOp1 != base) || (aOp2 != op2VN)) + { + continue; + } + + // Signedness must match: e.g. signed "base < other" doesn't imply + // "(uint)base != UINT32_MAX" (negative base has bit 31 set), so the + // no-overflow argument doesn't carry across signedness. + // Also require a strict less-than: only "<" gives "base + 1 <= other". + if ((aIsUnsigned != isUnsigned) || (aCmp != GT_LT)) + { + continue; + } + + *pResult = Range(Limit(Limit::keConstant, (cmpOper == GT_LE) ? 1 : 0)); + return true; + } + return false; +} + //------------------------------------------------------------------------ // GetRangeFromAssertionsWorker: Cheaper version of TryGetRange that is based purely on assertions // and does not require a full range analysis based on SSA. @@ -902,6 +996,15 @@ Range RangeCheck::GetRangeFromAssertionsWorker( } } } + + // Generic fold: see TryFoldRelopOfAddByOneFromAssertions. + Range foldedRange = Range(Limit(Limit::keUndef)); + if (!result.IsSingleValueConstant() && + TryFoldRelopOfAddByOneFromAssertions(comp, funcApp.m_args[0], funcApp.m_args[1], cmpOper, + isUnsigned, assertions, &foldedRange)) + { + result = foldedRange; + } } break; } @@ -1411,10 +1514,6 @@ void RangeCheck::MergeEdgeAssertionsWorker(Compiler* comp ValueNum op2VN = curAssertion.GetOp2().GetVN(); cmpOper = Compiler::AssertionDsc::ToCompareOper(curAssertion.GetKind(), &isUnsigned); - if (isUnsigned) - { - continue; - } ValueNum otherVN; if (op1VN == normalLclVN) @@ -1431,6 +1530,17 @@ void RangeCheck::MergeEdgeAssertionsWorker(Compiler* comp cmpOper = GenTree::SwapRelop(cmpOper); } + // For unsigned compares, we can only derive a useful constant range when the + // direction is "<" or "<=". The asserted "X u< Y" implies X is in [0, Y_upper - 1] + // only when otherVN is known to be non-negative; otherwise (uint)Y could be huge + // and we cannot bound X within the signed-int domain. Skip unsigned ">/>=" since + // they would produce non-contiguous ranges (existing behavior at the tightening + // step below also assumes lLimit = 0 for unsigned LT/LE). + if (isUnsigned && (cmpOper != GT_LT) && (cmpOper != GT_LE)) + { + continue; + } + if (budget <= 0) { continue; @@ -1443,6 +1553,12 @@ void RangeCheck::MergeEdgeAssertionsWorker(Compiler* comp continue; } + if (isUnsigned && (otherRange.LowerLimit().GetConstant() < 0)) + { + // For unsigned LT/LE we need otherVN's range to be non-negative. + continue; + } + // Derive a constant limit for normalLclVN from the constant range of otherVN. // We use the most useful bound for each direction of the comparison. int derivedLimit; diff --git a/src/coreclr/jit/rangecheck.h b/src/coreclr/jit/rangecheck.h index ea399f334edbeb..62d3c7d01bf9e1 100644 --- a/src/coreclr/jit/rangecheck.h +++ b/src/coreclr/jit/rangecheck.h @@ -783,6 +783,16 @@ class RangeCheck int budget, ValueNumStore::SmallValueNumSet* visited); + // Try to fold "(ADD(base, 1), other)" using an asserted "base < other". + // Returns true if a fold succeeded and writes [0..0] / [1..1] into '*pResult'. + static bool TryFoldRelopOfAddByOneFromAssertions(Compiler* comp, + ValueNum op1VN, + ValueNum op2VN, + genTreeOps cmpOper, + bool isUnsigned, + ASSERT_VALARG_TP assertions, + Range* pResult); + int GetArrLength(ValueNum vn); // Check whether the computed range is within 0 and upper bounds. This function diff --git a/src/tests/JIT/opt/RangeChecks/ElidedBoundsChecks.cs b/src/tests/JIT/opt/RangeChecks/ElidedBoundsChecks.cs index d6196e0a1922a9..e31d07073aee9f 100644 --- a/src/tests/JIT/opt/RangeChecks/ElidedBoundsChecks.cs +++ b/src/tests/JIT/opt/RangeChecks/ElidedBoundsChecks.cs @@ -95,6 +95,18 @@ static bool TryStripFirstChar(ref ReadOnlySpan span, char value) return false; } + [MethodImpl(MethodImplOptions.NoInlining)] + static ReadOnlySpan SliceOffsetPlusOne(ReadOnlySpan span, int offset) + { + // X64-NOT: ThrowArgumentOutOfRangeException + // ARM64-NOT: ThrowArgumentOutOfRangeException + if ((uint)offset < (uint)span.Length) + { + return span.Slice(offset + 1); + } + return span; + } + [Fact] public static int TestEntryPoint() { @@ -139,6 +151,12 @@ public static int TestEntryPoint() if (TryStripFirstChar(ref chars, 'h') != false) return 0; + if (SliceOffsetPlusOne("hello".AsSpan(), 1).Length != 3) + return 0; + + if (SliceOffsetPlusOne("hello".AsSpan(), 100).Length != 5) + return 0; + return 100; } }