1//===- CorrelatedValuePropagation.cpp - Propagate CFG-derived info --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Correlated Value Propagation pass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
14#include "llvm/ADT/DepthFirstIterator.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/DomTreeUpdater.h"
18#include "llvm/Analysis/GlobalsModRef.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/LazyValueInfo.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/IR/Attributes.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/Constant.h"
26#include "llvm/IR/ConstantRange.h"
27#include "llvm/IR/Constants.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Instructions.h"
34#include "llvm/IR/IntrinsicInst.h"
35#include "llvm/IR/Operator.h"
36#include "llvm/IR/PassManager.h"
37#include "llvm/IR/PatternMatch.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/Casting.h"
41#include "llvm/Transforms/Utils/Local.h"
42#include <cassert>
43#include <optional>
44#include <utility>
45
46using namespace llvm;
47
48#define DEBUG_TYPE "correlated-value-propagation"
49
50STATISTIC(NumPhis, "Number of phis propagated");
51STATISTIC(NumPhiCommon, "Number of phis deleted via common incoming value");
52STATISTIC(NumSelects, "Number of selects propagated");
53STATISTIC(NumCmps, "Number of comparisons propagated");
54STATISTIC(NumReturns, "Number of return values propagated");
55STATISTIC(NumDeadCases, "Number of switch cases removed");
56STATISTIC(NumSDivSRemsNarrowed,
57 "Number of sdivs/srems whose width was decreased");
58STATISTIC(NumSDivs, "Number of sdiv converted to udiv");
59STATISTIC(NumUDivURemsNarrowed,
60 "Number of udivs/urems whose width was decreased");
61STATISTIC(NumAShrsConverted, "Number of ashr converted to lshr");
62STATISTIC(NumAShrsRemoved, "Number of ashr removed");
63STATISTIC(NumSRems, "Number of srem converted to urem");
64STATISTIC(NumSExt, "Number of sext converted to zext");
65STATISTIC(NumSIToFP, "Number of sitofp converted to uitofp");
66STATISTIC(NumSICmps, "Number of signed icmp preds simplified to unsigned");
67STATISTIC(NumAnd, "Number of ands removed");
68STATISTIC(NumNW, "Number of no-wrap deductions");
69STATISTIC(NumNSW, "Number of no-signed-wrap deductions");
70STATISTIC(NumNUW, "Number of no-unsigned-wrap deductions");
71STATISTIC(NumAddNW, "Number of no-wrap deductions for add");
72STATISTIC(NumAddNSW, "Number of no-signed-wrap deductions for add");
73STATISTIC(NumAddNUW, "Number of no-unsigned-wrap deductions for add");
74STATISTIC(NumSubNW, "Number of no-wrap deductions for sub");
75STATISTIC(NumSubNSW, "Number of no-signed-wrap deductions for sub");
76STATISTIC(NumSubNUW, "Number of no-unsigned-wrap deductions for sub");
77STATISTIC(NumMulNW, "Number of no-wrap deductions for mul");
78STATISTIC(NumMulNSW, "Number of no-signed-wrap deductions for mul");
79STATISTIC(NumMulNUW, "Number of no-unsigned-wrap deductions for mul");
80STATISTIC(NumShlNW, "Number of no-wrap deductions for shl");
81STATISTIC(NumShlNSW, "Number of no-signed-wrap deductions for shl");
82STATISTIC(NumShlNUW, "Number of no-unsigned-wrap deductions for shl");
83STATISTIC(NumAbs, "Number of llvm.abs intrinsics removed");
84STATISTIC(NumOverflows, "Number of overflow checks removed");
85STATISTIC(NumSaturating,
86 "Number of saturating arithmetics converted to normal arithmetics");
87STATISTIC(NumNonNull, "Number of function pointer arguments marked non-null");
88STATISTIC(NumCmpIntr, "Number of llvm.[us]cmp intrinsics removed");
89STATISTIC(NumMinMax, "Number of llvm.[us]{min,max} intrinsics removed");
90STATISTIC(NumSMinMax,
91 "Number of llvm.s{min,max} intrinsics simplified to unsigned");
92STATISTIC(NumUDivURemsNarrowedExpanded,
93 "Number of bound udiv's/urem's expanded");
94STATISTIC(NumNNeg, "Number of zext/uitofp non-negative deductions");
95
96static Constant *getConstantAt(Value *V, Instruction *At, LazyValueInfo *LVI) {
97 if (Constant *C = LVI->getConstant(V, CxtI: At))
98 return C;
99
100 // TODO: The following really should be sunk inside LVI's core algorithm, or
101 // at least the outer shims around such.
102 auto *C = dyn_cast<CmpInst>(Val: V);
103 if (!C)
104 return nullptr;
105
106 Value *Op0 = C->getOperand(i_nocapture: 0);
107 Constant *Op1 = dyn_cast<Constant>(Val: C->getOperand(i_nocapture: 1));
108 if (!Op1)
109 return nullptr;
110
111 return LVI->getPredicateAt(Pred: C->getPredicate(), V: Op0, C: Op1, CxtI: At,
112 /*UseBlockValue=*/false);
113}
114
115static bool processSelect(SelectInst *S, LazyValueInfo *LVI) {
116 if (S->getType()->isVectorTy() || isa<Constant>(Val: S->getCondition()))
117 return false;
118
119 bool Changed = false;
120 for (Use &U : make_early_inc_range(Range: S->uses())) {
121 auto *I = cast<Instruction>(Val: U.getUser());
122 Constant *C;
123 if (auto *PN = dyn_cast<PHINode>(Val: I))
124 C = LVI->getConstantOnEdge(V: S->getCondition(), FromBB: PN->getIncomingBlock(U),
125 ToBB: I->getParent(), CxtI: I);
126 else
127 C = getConstantAt(V: S->getCondition(), At: I, LVI);
128
129 auto *CI = dyn_cast_or_null<ConstantInt>(Val: C);
130 if (!CI)
131 continue;
132
133 U.set(CI->isOne() ? S->getTrueValue() : S->getFalseValue());
134 Changed = true;
135 ++NumSelects;
136 }
137
138 if (Changed && S->use_empty())
139 S->eraseFromParent();
140
141 return Changed;
142}
143
144/// Try to simplify a phi with constant incoming values that match the edge
145/// values of a non-constant value on all other edges:
146/// bb0:
147/// %isnull = icmp eq i8* %x, null
148/// br i1 %isnull, label %bb2, label %bb1
149/// bb1:
150/// br label %bb2
151/// bb2:
152/// %r = phi i8* [ %x, %bb1 ], [ null, %bb0 ]
153/// -->
154/// %r = %x
155static bool simplifyCommonValuePhi(PHINode *P, LazyValueInfo *LVI,
156 DominatorTree *DT) {
157 // Collect incoming constants and initialize possible common value.
158 SmallVector<std::pair<Constant *, unsigned>, 4> IncomingConstants;
159 Value *CommonValue = nullptr;
160 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
161 Value *Incoming = P->getIncomingValue(i);
162 if (auto *IncomingConstant = dyn_cast<Constant>(Val: Incoming)) {
163 IncomingConstants.push_back(Elt: std::make_pair(x&: IncomingConstant, y&: i));
164 } else if (!CommonValue) {
165 // The potential common value is initialized to the first non-constant.
166 CommonValue = Incoming;
167 } else if (Incoming != CommonValue) {
168 // There can be only one non-constant common value.
169 return false;
170 }
171 }
172
173 if (!CommonValue || IncomingConstants.empty())
174 return false;
175
176 // The common value must be valid in all incoming blocks.
177 BasicBlock *ToBB = P->getParent();
178 if (auto *CommonInst = dyn_cast<Instruction>(Val: CommonValue))
179 if (!DT->dominates(Def: CommonInst, BB: ToBB))
180 return false;
181
182 // We have a phi with exactly 1 variable incoming value and 1 or more constant
183 // incoming values. See if all constant incoming values can be mapped back to
184 // the same incoming variable value.
185 for (auto &IncomingConstant : IncomingConstants) {
186 Constant *C = IncomingConstant.first;
187 BasicBlock *IncomingBB = P->getIncomingBlock(i: IncomingConstant.second);
188 if (C != LVI->getConstantOnEdge(V: CommonValue, FromBB: IncomingBB, ToBB, CxtI: P))
189 return false;
190 }
191
192 // LVI only guarantees that the value matches a certain constant if the value
193 // is not poison. Make sure we don't replace a well-defined value with poison.
194 // This is usually satisfied due to a prior branch on the value.
195 if (!isGuaranteedNotToBePoison(V: CommonValue, AC: nullptr, CtxI: P, DT))
196 return false;
197
198 // All constant incoming values map to the same variable along the incoming
199 // edges of the phi. The phi is unnecessary.
200 P->replaceAllUsesWith(V: CommonValue);
201 P->eraseFromParent();
202 ++NumPhiCommon;
203 return true;
204}
205
206static Value *getValueOnEdge(LazyValueInfo *LVI, Value *Incoming,
207 BasicBlock *From, BasicBlock *To,
208 Instruction *CxtI) {
209 if (Constant *C = LVI->getConstantOnEdge(V: Incoming, FromBB: From, ToBB: To, CxtI))
210 return C;
211
212 // Look if the incoming value is a select with a scalar condition for which
213 // LVI can tells us the value. In that case replace the incoming value with
214 // the appropriate value of the select. This often allows us to remove the
215 // select later.
216 auto *SI = dyn_cast<SelectInst>(Val: Incoming);
217 if (!SI)
218 return nullptr;
219
220 // Once LVI learns to handle vector types, we could also add support
221 // for vector type constants that are not all zeroes or all ones.
222 Value *Condition = SI->getCondition();
223 if (!Condition->getType()->isVectorTy()) {
224 if (Constant *C = LVI->getConstantOnEdge(V: Condition, FromBB: From, ToBB: To, CxtI)) {
225 if (C->isOneValue())
226 return SI->getTrueValue();
227 if (C->isZeroValue())
228 return SI->getFalseValue();
229 }
230 }
231
232 // Look if the select has a constant but LVI tells us that the incoming
233 // value can never be that constant. In that case replace the incoming
234 // value with the other value of the select. This often allows us to
235 // remove the select later.
236
237 // The "false" case
238 if (auto *C = dyn_cast<Constant>(Val: SI->getFalseValue()))
239 if (auto *Res = dyn_cast_or_null<ConstantInt>(
240 Val: LVI->getPredicateOnEdge(Pred: ICmpInst::ICMP_EQ, V: SI, C, FromBB: From, ToBB: To, CxtI));
241 Res && Res->isZero())
242 return SI->getTrueValue();
243
244 // The "true" case,
245 // similar to the select "false" case, but try the select "true" value
246 if (auto *C = dyn_cast<Constant>(Val: SI->getTrueValue()))
247 if (auto *Res = dyn_cast_or_null<ConstantInt>(
248 Val: LVI->getPredicateOnEdge(Pred: ICmpInst::ICMP_EQ, V: SI, C, FromBB: From, ToBB: To, CxtI));
249 Res && Res->isZero())
250 return SI->getFalseValue();
251
252 return nullptr;
253}
254
255static bool processPHI(PHINode *P, LazyValueInfo *LVI, DominatorTree *DT,
256 const SimplifyQuery &SQ) {
257 bool Changed = false;
258
259 BasicBlock *BB = P->getParent();
260 for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
261 Value *Incoming = P->getIncomingValue(i);
262 if (isa<Constant>(Val: Incoming)) continue;
263
264 Value *V = getValueOnEdge(LVI, Incoming, From: P->getIncomingBlock(i), To: BB, CxtI: P);
265 if (V) {
266 P->setIncomingValue(i, V);
267 Changed = true;
268 }
269 }
270
271 if (Value *V = simplifyInstruction(I: P, Q: SQ)) {
272 P->replaceAllUsesWith(V);
273 P->eraseFromParent();
274 Changed = true;
275 }
276
277 if (!Changed)
278 Changed = simplifyCommonValuePhi(P, LVI, DT);
279
280 if (Changed)
281 ++NumPhis;
282
283 return Changed;
284}
285
286static bool processICmp(ICmpInst *Cmp, LazyValueInfo *LVI) {
287 // Only for signed relational comparisons of integers.
288 if (!Cmp->getOperand(i_nocapture: 0)->getType()->isIntOrIntVectorTy())
289 return false;
290
291 if (!Cmp->isSigned() && (!Cmp->isUnsigned() || Cmp->hasSameSign()))
292 return false;
293
294 bool Changed = false;
295
296 ConstantRange CR1 = LVI->getConstantRangeAtUse(U: Cmp->getOperandUse(i: 0),
297 /*UndefAllowed=*/false),
298 CR2 = LVI->getConstantRangeAtUse(U: Cmp->getOperandUse(i: 1),
299 /*UndefAllowed=*/false);
300
301 if (Cmp->isSigned()) {
302 ICmpInst::Predicate UnsignedPred =
303 ConstantRange::getEquivalentPredWithFlippedSignedness(
304 Pred: Cmp->getPredicate(), CR1, CR2);
305
306 if (UnsignedPred == ICmpInst::Predicate::BAD_ICMP_PREDICATE)
307 return false;
308
309 ++NumSICmps;
310 Cmp->setPredicate(UnsignedPred);
311 Changed = true;
312 }
313
314 if (ConstantRange::areInsensitiveToSignednessOfICmpPredicate(CR1, CR2)) {
315 Cmp->setSameSign();
316 Changed = true;
317 }
318
319 return Changed;
320}
321
322/// See if LazyValueInfo's ability to exploit edge conditions or range
323/// information is sufficient to prove this comparison. Even for local
324/// conditions, this can sometimes prove conditions instcombine can't by
325/// exploiting range information.
326static bool constantFoldCmp(CmpInst *Cmp, LazyValueInfo *LVI) {
327 Value *Op0 = Cmp->getOperand(i_nocapture: 0);
328 Value *Op1 = Cmp->getOperand(i_nocapture: 1);
329 Constant *Res = LVI->getPredicateAt(Pred: Cmp->getPredicate(), LHS: Op0, RHS: Op1, CxtI: Cmp,
330 /*UseBlockValue=*/true);
331 if (!Res)
332 return false;
333
334 ++NumCmps;
335 Cmp->replaceAllUsesWith(V: Res);
336 Cmp->eraseFromParent();
337 return true;
338}
339
340static bool processCmp(CmpInst *Cmp, LazyValueInfo *LVI) {
341 if (constantFoldCmp(Cmp, LVI))
342 return true;
343
344 if (auto *ICmp = dyn_cast<ICmpInst>(Val: Cmp))
345 if (processICmp(Cmp: ICmp, LVI))
346 return true;
347
348 return false;
349}
350
351/// Simplify a switch instruction by removing cases which can never fire. If the
352/// uselessness of a case could be determined locally then constant propagation
353/// would already have figured it out. Instead, walk the predecessors and
354/// statically evaluate cases based on information available on that edge. Cases
355/// that cannot fire no matter what the incoming edge can safely be removed. If
356/// a case fires on every incoming edge then the entire switch can be removed
357/// and replaced with a branch to the case destination.
358static bool processSwitch(SwitchInst *I, LazyValueInfo *LVI,
359 DominatorTree *DT) {
360 DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy);
361 Value *Cond = I->getCondition();
362 BasicBlock *BB = I->getParent();
363
364 // Analyse each switch case in turn.
365 bool Changed = false;
366 DenseMap<BasicBlock*, int> SuccessorsCount;
367 for (auto *Succ : successors(BB))
368 SuccessorsCount[Succ]++;
369
370 { // Scope for SwitchInstProfUpdateWrapper. It must not live during
371 // ConstantFoldTerminator() as the underlying SwitchInst can be changed.
372 SwitchInstProfUpdateWrapper SI(*I);
373 ConstantRange CR =
374 LVI->getConstantRangeAtUse(U: I->getOperandUse(i: 0), /*UndefAllowed=*/false);
375 unsigned ReachableCaseCount = 0;
376
377 for (auto CI = SI->case_begin(), CE = SI->case_end(); CI != CE;) {
378 ConstantInt *Case = CI->getCaseValue();
379 std::optional<bool> Predicate = std::nullopt;
380 if (!CR.contains(Val: Case->getValue()))
381 Predicate = false;
382 else if (CR.isSingleElement() &&
383 *CR.getSingleElement() == Case->getValue())
384 Predicate = true;
385 if (!Predicate) {
386 // Handle missing cases, e.g., the range has a hole.
387 auto *Res = dyn_cast_or_null<ConstantInt>(
388 Val: LVI->getPredicateAt(Pred: CmpInst::ICMP_EQ, V: Cond, C: Case, CxtI: I,
389 /* UseBlockValue=*/true));
390 if (Res && Res->isZero())
391 Predicate = false;
392 else if (Res && Res->isOne())
393 Predicate = true;
394 }
395
396 if (Predicate && !*Predicate) {
397 // This case never fires - remove it.
398 BasicBlock *Succ = CI->getCaseSuccessor();
399 Succ->removePredecessor(Pred: BB);
400 CI = SI.removeCase(I: CI);
401 CE = SI->case_end();
402
403 // The condition can be modified by removePredecessor's PHI simplification
404 // logic.
405 Cond = SI->getCondition();
406
407 ++NumDeadCases;
408 Changed = true;
409 if (--SuccessorsCount[Succ] == 0)
410 DTU.applyUpdatesPermissive(Updates: {{DominatorTree::Delete, BB, Succ}});
411 continue;
412 }
413 if (Predicate && *Predicate) {
414 // This case always fires. Arrange for the switch to be turned into an
415 // unconditional branch by replacing the switch condition with the case
416 // value.
417 SI->setCondition(Case);
418 NumDeadCases += SI->getNumCases();
419 Changed = true;
420 break;
421 }
422
423 // Increment the case iterator since we didn't delete it.
424 ++CI;
425 ++ReachableCaseCount;
426 }
427
428 // The default dest is unreachable if all cases are covered.
429 if (!SI->defaultDestUnreachable() &&
430 !CR.isSizeLargerThan(MaxSize: ReachableCaseCount)) {
431 BasicBlock *DefaultDest = SI->getDefaultDest();
432 BasicBlock *NewUnreachableBB =
433 BasicBlock::Create(Context&: BB->getContext(), Name: "default.unreachable",
434 Parent: BB->getParent(), InsertBefore: DefaultDest);
435 auto *UI = new UnreachableInst(BB->getContext(), NewUnreachableBB);
436 UI->setDebugLoc(DebugLoc::getTemporary());
437
438 DefaultDest->removePredecessor(Pred: BB);
439 SI->setDefaultDest(NewUnreachableBB);
440
441 if (SuccessorsCount[DefaultDest] == 1)
442 DTU.applyUpdates(Updates: {{DominatorTree::Delete, BB, DefaultDest}});
443 DTU.applyUpdates(Updates: {{DominatorTree::Insert, BB, NewUnreachableBB}});
444
445 ++NumDeadCases;
446 Changed = true;
447 }
448 }
449
450 if (Changed)
451 // If the switch has been simplified to the point where it can be replaced
452 // by a branch then do so now.
453 ConstantFoldTerminator(BB, /*DeleteDeadConditions = */ false,
454 /*TLI = */ nullptr, DTU: &DTU);
455 return Changed;
456}
457
458// See if we can prove that the given binary op intrinsic will not overflow.
459static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI) {
460 ConstantRange LRange =
461 LVI->getConstantRangeAtUse(U: BO->getOperandUse(i: 0), /*UndefAllowed*/ false);
462 ConstantRange RRange =
463 LVI->getConstantRangeAtUse(U: BO->getOperandUse(i: 1), /*UndefAllowed*/ false);
464 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
465 BinOp: BO->getBinaryOp(), Other: RRange, NoWrapKind: BO->getNoWrapKind());
466 return NWRegion.contains(CR: LRange);
467}
468
469static void setDeducedOverflowingFlags(Value *V, Instruction::BinaryOps Opcode,
470 bool NewNSW, bool NewNUW) {
471 Statistic *OpcNW, *OpcNSW, *OpcNUW;
472 switch (Opcode) {
473 case Instruction::Add:
474 OpcNW = &NumAddNW;
475 OpcNSW = &NumAddNSW;
476 OpcNUW = &NumAddNUW;
477 break;
478 case Instruction::Sub:
479 OpcNW = &NumSubNW;
480 OpcNSW = &NumSubNSW;
481 OpcNUW = &NumSubNUW;
482 break;
483 case Instruction::Mul:
484 OpcNW = &NumMulNW;
485 OpcNSW = &NumMulNSW;
486 OpcNUW = &NumMulNUW;
487 break;
488 case Instruction::Shl:
489 OpcNW = &NumShlNW;
490 OpcNSW = &NumShlNSW;
491 OpcNUW = &NumShlNUW;
492 break;
493 default:
494 llvm_unreachable("Will not be called with other binops");
495 }
496
497 auto *Inst = dyn_cast<Instruction>(Val: V);
498 if (NewNSW) {
499 ++NumNW;
500 ++*OpcNW;
501 ++NumNSW;
502 ++*OpcNSW;
503 if (Inst)
504 Inst->setHasNoSignedWrap();
505 }
506 if (NewNUW) {
507 ++NumNW;
508 ++*OpcNW;
509 ++NumNUW;
510 ++*OpcNUW;
511 if (Inst)
512 Inst->setHasNoUnsignedWrap();
513 }
514}
515
516static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI);
517
518// See if @llvm.abs argument is alays positive/negative, and simplify.
519// Notably, INT_MIN can belong to either range, regardless of the NSW,
520// because it is negation-invariant.
521static bool processAbsIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
522 Value *X = II->getArgOperand(i: 0);
523 bool IsIntMinPoison = cast<ConstantInt>(Val: II->getArgOperand(i: 1))->isOne();
524 APInt IntMin = APInt::getSignedMinValue(numBits: X->getType()->getScalarSizeInBits());
525 ConstantRange Range = LVI->getConstantRangeAtUse(
526 U: II->getOperandUse(i: 0), /*UndefAllowed*/ IsIntMinPoison);
527
528 // Is X in [0, IntMin]? NOTE: INT_MIN is fine!
529 if (Range.icmp(Pred: CmpInst::ICMP_ULE, Other: IntMin)) {
530 ++NumAbs;
531 II->replaceAllUsesWith(V: X);
532 II->eraseFromParent();
533 return true;
534 }
535
536 // Is X in [IntMin, 0]? NOTE: INT_MIN is fine!
537 if (Range.getSignedMax().isNonPositive()) {
538 IRBuilder<> B(II);
539 Value *NegX = B.CreateNeg(V: X, Name: II->getName(),
540 /*HasNSW=*/IsIntMinPoison);
541 ++NumAbs;
542 II->replaceAllUsesWith(V: NegX);
543 II->eraseFromParent();
544
545 // See if we can infer some no-wrap flags.
546 if (auto *BO = dyn_cast<BinaryOperator>(Val: NegX))
547 processBinOp(BinOp: BO, LVI);
548
549 return true;
550 }
551
552 // Argument's range crosses zero.
553 // Can we at least tell that the argument is never INT_MIN?
554 if (!IsIntMinPoison && !Range.contains(Val: IntMin)) {
555 ++NumNSW;
556 ++NumSubNSW;
557 II->setArgOperand(i: 1, v: ConstantInt::getTrue(Context&: II->getContext()));
558 return true;
559 }
560 return false;
561}
562
563static bool processCmpIntrinsic(CmpIntrinsic *CI, LazyValueInfo *LVI) {
564 ConstantRange LHS_CR =
565 LVI->getConstantRangeAtUse(U: CI->getOperandUse(i: 0), /*UndefAllowed*/ false);
566 ConstantRange RHS_CR =
567 LVI->getConstantRangeAtUse(U: CI->getOperandUse(i: 1), /*UndefAllowed*/ false);
568
569 if (LHS_CR.icmp(Pred: CI->getGTPredicate(), Other: RHS_CR)) {
570 ++NumCmpIntr;
571 CI->replaceAllUsesWith(V: ConstantInt::get(Ty: CI->getType(), V: 1));
572 CI->eraseFromParent();
573 return true;
574 }
575 if (LHS_CR.icmp(Pred: CI->getLTPredicate(), Other: RHS_CR)) {
576 ++NumCmpIntr;
577 CI->replaceAllUsesWith(V: ConstantInt::getSigned(Ty: CI->getType(), V: -1));
578 CI->eraseFromParent();
579 return true;
580 }
581 if (LHS_CR.icmp(Pred: ICmpInst::ICMP_EQ, Other: RHS_CR)) {
582 ++NumCmpIntr;
583 CI->replaceAllUsesWith(V: ConstantInt::get(Ty: CI->getType(), V: 0));
584 CI->eraseFromParent();
585 return true;
586 }
587
588 return false;
589}
590
591// See if this min/max intrinsic always picks it's one specific operand.
592// If not, check whether we can canonicalize signed minmax into unsigned version
593static bool processMinMaxIntrinsic(MinMaxIntrinsic *MM, LazyValueInfo *LVI) {
594 CmpInst::Predicate Pred = CmpInst::getNonStrictPredicate(pred: MM->getPredicate());
595 ConstantRange LHS_CR = LVI->getConstantRangeAtUse(U: MM->getOperandUse(i: 0),
596 /*UndefAllowed*/ false);
597 ConstantRange RHS_CR = LVI->getConstantRangeAtUse(U: MM->getOperandUse(i: 1),
598 /*UndefAllowed*/ false);
599 if (LHS_CR.icmp(Pred, Other: RHS_CR)) {
600 ++NumMinMax;
601 MM->replaceAllUsesWith(V: MM->getLHS());
602 MM->eraseFromParent();
603 return true;
604 }
605 if (RHS_CR.icmp(Pred, Other: LHS_CR)) {
606 ++NumMinMax;
607 MM->replaceAllUsesWith(V: MM->getRHS());
608 MM->eraseFromParent();
609 return true;
610 }
611
612 if (MM->isSigned() &&
613 ConstantRange::areInsensitiveToSignednessOfICmpPredicate(CR1: LHS_CR,
614 CR2: RHS_CR)) {
615 ++NumSMinMax;
616 IRBuilder<> B(MM);
617 MM->replaceAllUsesWith(V: B.CreateBinaryIntrinsic(
618 ID: MM->getIntrinsicID() == Intrinsic::smin ? Intrinsic::umin
619 : Intrinsic::umax,
620 LHS: MM->getLHS(), RHS: MM->getRHS()));
621 MM->eraseFromParent();
622 return true;
623 }
624
625 return false;
626}
627
628// Rewrite this with.overflow intrinsic as non-overflowing.
629static bool processOverflowIntrinsic(WithOverflowInst *WO, LazyValueInfo *LVI) {
630 IRBuilder<> B(WO);
631 Instruction::BinaryOps Opcode = WO->getBinaryOp();
632 bool NSW = WO->isSigned();
633 bool NUW = !WO->isSigned();
634
635 Value *NewOp =
636 B.CreateBinOp(Opc: Opcode, LHS: WO->getLHS(), RHS: WO->getRHS(), Name: WO->getName());
637 setDeducedOverflowingFlags(V: NewOp, Opcode, NewNSW: NSW, NewNUW: NUW);
638
639 StructType *ST = cast<StructType>(Val: WO->getType());
640 Constant *Struct = ConstantStruct::get(T: ST,
641 V: { PoisonValue::get(T: ST->getElementType(N: 0)),
642 ConstantInt::getFalse(Ty: ST->getElementType(N: 1)) });
643 Value *NewI = B.CreateInsertValue(Agg: Struct, Val: NewOp, Idxs: 0);
644 WO->replaceAllUsesWith(V: NewI);
645 WO->eraseFromParent();
646 ++NumOverflows;
647
648 // See if we can infer the other no-wrap too.
649 if (auto *BO = dyn_cast<BinaryOperator>(Val: NewOp))
650 processBinOp(BinOp: BO, LVI);
651
652 return true;
653}
654
655static bool processSaturatingInst(SaturatingInst *SI, LazyValueInfo *LVI) {
656 Instruction::BinaryOps Opcode = SI->getBinaryOp();
657 bool NSW = SI->isSigned();
658 bool NUW = !SI->isSigned();
659 BinaryOperator *BinOp = BinaryOperator::Create(
660 Op: Opcode, S1: SI->getLHS(), S2: SI->getRHS(), Name: SI->getName(), InsertBefore: SI->getIterator());
661 BinOp->setDebugLoc(SI->getDebugLoc());
662 setDeducedOverflowingFlags(V: BinOp, Opcode, NewNSW: NSW, NewNUW: NUW);
663
664 SI->replaceAllUsesWith(V: BinOp);
665 SI->eraseFromParent();
666 ++NumSaturating;
667
668 // See if we can infer the other no-wrap too.
669 if (auto *BO = dyn_cast<BinaryOperator>(Val: BinOp))
670 processBinOp(BinOp: BO, LVI);
671
672 return true;
673}
674
675/// Infer nonnull attributes for the arguments at the specified callsite.
676static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) {
677
678 if (CB.getIntrinsicID() == Intrinsic::abs) {
679 return processAbsIntrinsic(II: &cast<IntrinsicInst>(Val&: CB), LVI);
680 }
681
682 if (auto *CI = dyn_cast<CmpIntrinsic>(Val: &CB)) {
683 return processCmpIntrinsic(CI, LVI);
684 }
685
686 if (auto *MM = dyn_cast<MinMaxIntrinsic>(Val: &CB)) {
687 return processMinMaxIntrinsic(MM, LVI);
688 }
689
690 if (auto *WO = dyn_cast<WithOverflowInst>(Val: &CB)) {
691 if (willNotOverflow(BO: WO, LVI))
692 return processOverflowIntrinsic(WO, LVI);
693 }
694
695 if (auto *SI = dyn_cast<SaturatingInst>(Val: &CB)) {
696 if (willNotOverflow(BO: SI, LVI))
697 return processSaturatingInst(SI, LVI);
698 }
699
700 bool Changed = false;
701
702 // Deopt bundle operands are intended to capture state with minimal
703 // perturbance of the code otherwise. If we can find a constant value for
704 // any such operand and remove a use of the original value, that's
705 // desireable since it may allow further optimization of that value (e.g. via
706 // single use rules in instcombine). Since deopt uses tend to,
707 // idiomatically, appear along rare conditional paths, it's reasonable likely
708 // we may have a conditional fact with which LVI can fold.
709 if (auto DeoptBundle = CB.getOperandBundle(ID: LLVMContext::OB_deopt)) {
710 for (const Use &ConstU : DeoptBundle->Inputs) {
711 Use &U = const_cast<Use&>(ConstU);
712 Value *V = U.get();
713 if (V->getType()->isVectorTy()) continue;
714 if (isa<Constant>(Val: V)) continue;
715
716 Constant *C = LVI->getConstant(V, CxtI: &CB);
717 if (!C) continue;
718 U.set(C);
719 Changed = true;
720 }
721 }
722
723 SmallVector<unsigned, 4> ArgNos;
724 unsigned ArgNo = 0;
725
726 for (Value *V : CB.args()) {
727 PointerType *Type = dyn_cast<PointerType>(Val: V->getType());
728 // Try to mark pointer typed parameters as non-null. We skip the
729 // relatively expensive analysis for constants which are obviously either
730 // null or non-null to start with.
731 if (Type && !CB.paramHasAttr(ArgNo, Kind: Attribute::NonNull) &&
732 !isa<Constant>(Val: V))
733 if (auto *Res = dyn_cast_or_null<ConstantInt>(Val: LVI->getPredicateAt(
734 Pred: ICmpInst::ICMP_EQ, V, C: ConstantPointerNull::get(T: Type), CxtI: &CB,
735 /*UseBlockValue=*/false));
736 Res && Res->isZero())
737 ArgNos.push_back(Elt: ArgNo);
738 ArgNo++;
739 }
740
741 assert(ArgNo == CB.arg_size() && "Call arguments not processed correctly.");
742
743 if (ArgNos.empty())
744 return Changed;
745
746 NumNonNull += ArgNos.size();
747 AttributeList AS = CB.getAttributes();
748 LLVMContext &Ctx = CB.getContext();
749 AS = AS.addParamAttribute(C&: Ctx, ArgNos,
750 A: Attribute::get(Context&: Ctx, Kind: Attribute::NonNull));
751 CB.setAttributes(AS);
752
753 return true;
754}
755
756enum class Domain { NonNegative, NonPositive, Unknown };
757
758static Domain getDomain(const ConstantRange &CR) {
759 if (CR.isAllNonNegative())
760 return Domain::NonNegative;
761 if (CR.icmp(Pred: ICmpInst::ICMP_SLE, Other: APInt::getZero(numBits: CR.getBitWidth())))
762 return Domain::NonPositive;
763 return Domain::Unknown;
764}
765
766/// Try to shrink a sdiv/srem's width down to the smallest power of two that's
767/// sufficient to contain its operands.
768static bool narrowSDivOrSRem(BinaryOperator *Instr, const ConstantRange &LCR,
769 const ConstantRange &RCR) {
770 assert(Instr->getOpcode() == Instruction::SDiv ||
771 Instr->getOpcode() == Instruction::SRem);
772
773 // Find the smallest power of two bitwidth that's sufficient to hold Instr's
774 // operands.
775 unsigned OrigWidth = Instr->getType()->getScalarSizeInBits();
776
777 // What is the smallest bit width that can accommodate the entire value ranges
778 // of both of the operands?
779 unsigned MinSignedBits =
780 std::max(a: LCR.getMinSignedBits(), b: RCR.getMinSignedBits());
781
782 // sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can
783 // prove that such a combination is impossible, we need to bump the bitwidth.
784 if (RCR.contains(Val: APInt::getAllOnes(numBits: OrigWidth)) &&
785 LCR.contains(Val: APInt::getSignedMinValue(numBits: MinSignedBits).sext(width: OrigWidth)))
786 ++MinSignedBits;
787
788 // Don't shrink below 8 bits wide.
789 unsigned NewWidth = std::max<unsigned>(a: PowerOf2Ceil(A: MinSignedBits), b: 8);
790
791 // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
792 // two.
793 if (NewWidth >= OrigWidth)
794 return false;
795
796 ++NumSDivSRemsNarrowed;
797 IRBuilder<> B{Instr};
798 auto *TruncTy = Instr->getType()->getWithNewBitWidth(NewBitWidth: NewWidth);
799 auto *LHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 0), DestTy: TruncTy,
800 Name: Instr->getName() + ".lhs.trunc");
801 auto *RHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 1), DestTy: TruncTy,
802 Name: Instr->getName() + ".rhs.trunc");
803 auto *BO = B.CreateBinOp(Opc: Instr->getOpcode(), LHS, RHS, Name: Instr->getName());
804 auto *Sext = B.CreateSExt(V: BO, DestTy: Instr->getType(), Name: Instr->getName() + ".sext");
805 if (auto *BinOp = dyn_cast<BinaryOperator>(Val: BO))
806 if (BinOp->getOpcode() == Instruction::SDiv)
807 BinOp->setIsExact(Instr->isExact());
808
809 Instr->replaceAllUsesWith(V: Sext);
810 Instr->eraseFromParent();
811 return true;
812}
813
814static bool expandUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
815 const ConstantRange &YCR) {
816 Type *Ty = Instr->getType();
817 assert(Instr->getOpcode() == Instruction::UDiv ||
818 Instr->getOpcode() == Instruction::URem);
819 bool IsRem = Instr->getOpcode() == Instruction::URem;
820
821 Value *X = Instr->getOperand(i_nocapture: 0);
822 Value *Y = Instr->getOperand(i_nocapture: 1);
823
824 // X u/ Y -> 0 iff X u< Y
825 // X u% Y -> X iff X u< Y
826 if (XCR.icmp(Pred: ICmpInst::ICMP_ULT, Other: YCR)) {
827 Instr->replaceAllUsesWith(V: IsRem ? X : Constant::getNullValue(Ty));
828 Instr->eraseFromParent();
829 ++NumUDivURemsNarrowedExpanded;
830 return true;
831 }
832
833 // Given
834 // R = X u% Y
835 // We can represent the modulo operation as a loop/self-recursion:
836 // urem_rec(X, Y):
837 // Z = X - Y
838 // if X u< Y
839 // ret X
840 // else
841 // ret urem_rec(Z, Y)
842 // which isn't better, but if we only need a single iteration
843 // to compute the answer, this becomes quite good:
844 // R = X < Y ? X : X - Y iff X u< 2*Y (w/ unsigned saturation)
845 // Now, we do not care about all full multiples of Y in X, they do not change
846 // the answer, thus we could rewrite the expression as:
847 // X* = X - (Y * |_ X / Y _|)
848 // R = X* % Y
849 // so we don't need the *first* iteration to return, we just need to
850 // know *which* iteration will always return, so we could also rewrite it as:
851 // X* = X - (Y * |_ X / Y _|)
852 // R = X* % Y iff X* u< 2*Y (w/ unsigned saturation)
853 // but that does not seem profitable here.
854
855 // Even if we don't know X's range, the divisor may be so large, X can't ever
856 // be 2x larger than that. I.e. if divisor is always negative.
857 if (!XCR.icmp(Pred: ICmpInst::ICMP_ULT, Other: YCR.uadd_sat(Other: YCR)) && !YCR.isAllNegative())
858 return false;
859
860 IRBuilder<> B(Instr);
861 Value *ExpandedOp;
862 if (XCR.icmp(Pred: ICmpInst::ICMP_UGE, Other: YCR)) {
863 // If X is between Y and 2*Y the result is known.
864 if (IsRem)
865 ExpandedOp = B.CreateNUWSub(LHS: X, RHS: Y);
866 else
867 ExpandedOp = ConstantInt::get(Ty: Instr->getType(), V: 1);
868 } else if (IsRem) {
869 // NOTE: this transformation introduces two uses of X,
870 // but it may be undef so we must freeze it first.
871 Value *FrozenX = X;
872 if (!isGuaranteedNotToBeUndef(V: X))
873 FrozenX = B.CreateFreeze(V: X, Name: X->getName() + ".frozen");
874 Value *FrozenY = Y;
875 if (!isGuaranteedNotToBeUndef(V: Y))
876 FrozenY = B.CreateFreeze(V: Y, Name: Y->getName() + ".frozen");
877 auto *AdjX = B.CreateNUWSub(LHS: FrozenX, RHS: FrozenY, Name: Instr->getName() + ".urem");
878 auto *Cmp = B.CreateICmp(P: ICmpInst::ICMP_ULT, LHS: FrozenX, RHS: FrozenY,
879 Name: Instr->getName() + ".cmp");
880 ExpandedOp = B.CreateSelect(C: Cmp, True: FrozenX, False: AdjX);
881 } else {
882 auto *Cmp =
883 B.CreateICmp(P: ICmpInst::ICMP_UGE, LHS: X, RHS: Y, Name: Instr->getName() + ".cmp");
884 ExpandedOp = B.CreateZExt(V: Cmp, DestTy: Ty, Name: Instr->getName() + ".udiv");
885 }
886 ExpandedOp->takeName(V: Instr);
887 Instr->replaceAllUsesWith(V: ExpandedOp);
888 Instr->eraseFromParent();
889 ++NumUDivURemsNarrowedExpanded;
890 return true;
891}
892
893/// Try to shrink a udiv/urem's width down to the smallest power of two that's
894/// sufficient to contain its operands.
895static bool narrowUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
896 const ConstantRange &YCR) {
897 assert(Instr->getOpcode() == Instruction::UDiv ||
898 Instr->getOpcode() == Instruction::URem);
899
900 // Find the smallest power of two bitwidth that's sufficient to hold Instr's
901 // operands.
902
903 // What is the smallest bit width that can accommodate the entire value ranges
904 // of both of the operands?
905 unsigned MaxActiveBits = std::max(a: XCR.getActiveBits(), b: YCR.getActiveBits());
906 // Don't shrink below 8 bits wide.
907 unsigned NewWidth = std::max<unsigned>(a: PowerOf2Ceil(A: MaxActiveBits), b: 8);
908
909 // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
910 // two.
911 if (NewWidth >= Instr->getType()->getScalarSizeInBits())
912 return false;
913
914 ++NumUDivURemsNarrowed;
915 IRBuilder<> B{Instr};
916 auto *TruncTy = Instr->getType()->getWithNewBitWidth(NewBitWidth: NewWidth);
917 auto *LHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 0), DestTy: TruncTy,
918 Name: Instr->getName() + ".lhs.trunc");
919 auto *RHS = B.CreateTruncOrBitCast(V: Instr->getOperand(i_nocapture: 1), DestTy: TruncTy,
920 Name: Instr->getName() + ".rhs.trunc");
921 auto *BO = B.CreateBinOp(Opc: Instr->getOpcode(), LHS, RHS, Name: Instr->getName());
922 auto *Zext = B.CreateZExt(V: BO, DestTy: Instr->getType(), Name: Instr->getName() + ".zext");
923 if (auto *BinOp = dyn_cast<BinaryOperator>(Val: BO))
924 if (BinOp->getOpcode() == Instruction::UDiv)
925 BinOp->setIsExact(Instr->isExact());
926
927 Instr->replaceAllUsesWith(V: Zext);
928 Instr->eraseFromParent();
929 return true;
930}
931
932static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
933 assert(Instr->getOpcode() == Instruction::UDiv ||
934 Instr->getOpcode() == Instruction::URem);
935 ConstantRange XCR = LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 0),
936 /*UndefAllowed*/ false);
937 // Allow undef for RHS, as we can assume it is division by zero UB.
938 ConstantRange YCR = LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 1),
939 /*UndefAllowed*/ true);
940 if (expandUDivOrURem(Instr, XCR, YCR))
941 return true;
942
943 return narrowUDivOrURem(Instr, XCR, YCR);
944}
945
946static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
947 const ConstantRange &RCR, LazyValueInfo *LVI) {
948 assert(SDI->getOpcode() == Instruction::SRem);
949
950 if (LCR.abs().icmp(Pred: CmpInst::ICMP_ULT, Other: RCR.abs())) {
951 SDI->replaceAllUsesWith(V: SDI->getOperand(i_nocapture: 0));
952 SDI->eraseFromParent();
953 return true;
954 }
955
956 struct Operand {
957 Value *V;
958 Domain D;
959 };
960 std::array<Operand, 2> Ops = {._M_elems: {{.V: SDI->getOperand(i_nocapture: 0), .D: getDomain(CR: LCR)},
961 {.V: SDI->getOperand(i_nocapture: 1), .D: getDomain(CR: RCR)}}};
962 if (Ops[0].D == Domain::Unknown || Ops[1].D == Domain::Unknown)
963 return false;
964
965 // We know domains of both of the operands!
966 ++NumSRems;
967
968 // We need operands to be non-negative, so negate each one that isn't.
969 for (Operand &Op : Ops) {
970 if (Op.D == Domain::NonNegative)
971 continue;
972 auto *BO = BinaryOperator::CreateNeg(Op: Op.V, Name: Op.V->getName() + ".nonneg",
973 InsertBefore: SDI->getIterator());
974 BO->setDebugLoc(SDI->getDebugLoc());
975 Op.V = BO;
976 }
977
978 auto *URem = BinaryOperator::CreateURem(V1: Ops[0].V, V2: Ops[1].V, Name: SDI->getName(),
979 InsertBefore: SDI->getIterator());
980 URem->setDebugLoc(SDI->getDebugLoc());
981
982 auto *Res = URem;
983
984 // If the divident was non-positive, we need to negate the result.
985 if (Ops[0].D == Domain::NonPositive) {
986 Res = BinaryOperator::CreateNeg(Op: Res, Name: Res->getName() + ".neg",
987 InsertBefore: SDI->getIterator());
988 Res->setDebugLoc(SDI->getDebugLoc());
989 }
990
991 SDI->replaceAllUsesWith(V: Res);
992 SDI->eraseFromParent();
993
994 // Try to simplify our new urem.
995 processUDivOrURem(Instr: URem, LVI);
996
997 return true;
998}
999
1000/// See if LazyValueInfo's ability to exploit edge conditions or range
1001/// information is sufficient to prove the signs of both operands of this SDiv.
1002/// If this is the case, replace the SDiv with a UDiv. Even for local
1003/// conditions, this can sometimes prove conditions instcombine can't by
1004/// exploiting range information.
1005static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
1006 const ConstantRange &RCR, LazyValueInfo *LVI) {
1007 assert(SDI->getOpcode() == Instruction::SDiv);
1008
1009 // Check whether the division folds to a constant.
1010 ConstantRange DivCR = LCR.sdiv(Other: RCR);
1011 if (const APInt *Elem = DivCR.getSingleElement()) {
1012 SDI->replaceAllUsesWith(V: ConstantInt::get(Ty: SDI->getType(), V: *Elem));
1013 SDI->eraseFromParent();
1014 return true;
1015 }
1016
1017 struct Operand {
1018 Value *V;
1019 Domain D;
1020 };
1021 std::array<Operand, 2> Ops = {._M_elems: {{.V: SDI->getOperand(i_nocapture: 0), .D: getDomain(CR: LCR)},
1022 {.V: SDI->getOperand(i_nocapture: 1), .D: getDomain(CR: RCR)}}};
1023 if (Ops[0].D == Domain::Unknown || Ops[1].D == Domain::Unknown)
1024 return false;
1025
1026 // We know domains of both of the operands!
1027 ++NumSDivs;
1028
1029 // We need operands to be non-negative, so negate each one that isn't.
1030 for (Operand &Op : Ops) {
1031 if (Op.D == Domain::NonNegative)
1032 continue;
1033 auto *BO = BinaryOperator::CreateNeg(Op: Op.V, Name: Op.V->getName() + ".nonneg",
1034 InsertBefore: SDI->getIterator());
1035 BO->setDebugLoc(SDI->getDebugLoc());
1036 Op.V = BO;
1037 }
1038
1039 auto *UDiv = BinaryOperator::CreateUDiv(V1: Ops[0].V, V2: Ops[1].V, Name: SDI->getName(),
1040 InsertBefore: SDI->getIterator());
1041 UDiv->setDebugLoc(SDI->getDebugLoc());
1042 UDiv->setIsExact(SDI->isExact());
1043
1044 auto *Res = UDiv;
1045
1046 // If the operands had two different domains, we need to negate the result.
1047 if (Ops[0].D != Ops[1].D) {
1048 Res = BinaryOperator::CreateNeg(Op: Res, Name: Res->getName() + ".neg",
1049 InsertBefore: SDI->getIterator());
1050 Res->setDebugLoc(SDI->getDebugLoc());
1051 }
1052
1053 SDI->replaceAllUsesWith(V: Res);
1054 SDI->eraseFromParent();
1055
1056 // Try to simplify our new udiv.
1057 processUDivOrURem(Instr: UDiv, LVI);
1058
1059 return true;
1060}
1061
1062static bool processSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
1063 assert(Instr->getOpcode() == Instruction::SDiv ||
1064 Instr->getOpcode() == Instruction::SRem);
1065 ConstantRange LCR =
1066 LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 0), /*AllowUndef*/ UndefAllowed: false);
1067 // Allow undef for RHS, as we can assume it is division by zero UB.
1068 ConstantRange RCR =
1069 LVI->getConstantRangeAtUse(U: Instr->getOperandUse(i: 1), /*AlloweUndef*/ UndefAllowed: true);
1070 if (Instr->getOpcode() == Instruction::SDiv)
1071 if (processSDiv(SDI: Instr, LCR, RCR, LVI))
1072 return true;
1073
1074 if (Instr->getOpcode() == Instruction::SRem) {
1075 if (processSRem(SDI: Instr, LCR, RCR, LVI))
1076 return true;
1077 }
1078
1079 return narrowSDivOrSRem(Instr, LCR, RCR);
1080}
1081
1082static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
1083 ConstantRange LRange =
1084 LVI->getConstantRangeAtUse(U: SDI->getOperandUse(i: 0), /*UndefAllowed*/ false);
1085 unsigned OrigWidth = SDI->getType()->getScalarSizeInBits();
1086 ConstantRange NegOneOrZero =
1087 ConstantRange(APInt(OrigWidth, (uint64_t)-1, true), APInt(OrigWidth, 1));
1088 if (NegOneOrZero.contains(CR: LRange)) {
1089 // ashr of -1 or 0 never changes the value, so drop the whole instruction
1090 ++NumAShrsRemoved;
1091 SDI->replaceAllUsesWith(V: SDI->getOperand(i_nocapture: 0));
1092 SDI->eraseFromParent();
1093 return true;
1094 }
1095
1096 if (!LRange.isAllNonNegative())
1097 return false;
1098
1099 ++NumAShrsConverted;
1100 auto *BO = BinaryOperator::CreateLShr(V1: SDI->getOperand(i_nocapture: 0), V2: SDI->getOperand(i_nocapture: 1),
1101 Name: "", InsertBefore: SDI->getIterator());
1102 BO->takeName(V: SDI);
1103 BO->setDebugLoc(SDI->getDebugLoc());
1104 BO->setIsExact(SDI->isExact());
1105 SDI->replaceAllUsesWith(V: BO);
1106 SDI->eraseFromParent();
1107
1108 return true;
1109}
1110
1111static bool processSExt(SExtInst *SDI, LazyValueInfo *LVI) {
1112 const Use &Base = SDI->getOperandUse(i: 0);
1113 if (!LVI->getConstantRangeAtUse(U: Base, /*UndefAllowed*/ false)
1114 .isAllNonNegative())
1115 return false;
1116
1117 ++NumSExt;
1118 auto *ZExt = CastInst::CreateZExtOrBitCast(S: Base, Ty: SDI->getType(), Name: "",
1119 InsertBefore: SDI->getIterator());
1120 ZExt->takeName(V: SDI);
1121 ZExt->setDebugLoc(SDI->getDebugLoc());
1122 ZExt->setNonNeg();
1123 SDI->replaceAllUsesWith(V: ZExt);
1124 SDI->eraseFromParent();
1125
1126 return true;
1127}
1128
1129static bool processPossibleNonNeg(PossiblyNonNegInst *I, LazyValueInfo *LVI) {
1130 if (I->hasNonNeg())
1131 return false;
1132
1133 const Use &Base = I->getOperandUse(i: 0);
1134 if (!LVI->getConstantRangeAtUse(U: Base, /*UndefAllowed*/ false)
1135 .isAllNonNegative())
1136 return false;
1137
1138 ++NumNNeg;
1139 I->setNonNeg();
1140
1141 return true;
1142}
1143
1144static bool processZExt(ZExtInst *ZExt, LazyValueInfo *LVI) {
1145 return processPossibleNonNeg(I: cast<PossiblyNonNegInst>(Val: ZExt), LVI);
1146}
1147
1148static bool processUIToFP(UIToFPInst *UIToFP, LazyValueInfo *LVI) {
1149 return processPossibleNonNeg(I: cast<PossiblyNonNegInst>(Val: UIToFP), LVI);
1150}
1151
1152static bool processSIToFP(SIToFPInst *SIToFP, LazyValueInfo *LVI) {
1153 const Use &Base = SIToFP->getOperandUse(i: 0);
1154 if (!LVI->getConstantRangeAtUse(U: Base, /*UndefAllowed*/ false)
1155 .isAllNonNegative())
1156 return false;
1157
1158 ++NumSIToFP;
1159 auto *UIToFP = CastInst::Create(Instruction::UIToFP, S: Base, Ty: SIToFP->getType(),
1160 Name: "", InsertBefore: SIToFP->getIterator());
1161 UIToFP->takeName(V: SIToFP);
1162 UIToFP->setDebugLoc(SIToFP->getDebugLoc());
1163 UIToFP->setNonNeg();
1164 SIToFP->replaceAllUsesWith(V: UIToFP);
1165 SIToFP->eraseFromParent();
1166
1167 return true;
1168}
1169
1170static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI) {
1171 using OBO = OverflowingBinaryOperator;
1172
1173 bool NSW = BinOp->hasNoSignedWrap();
1174 bool NUW = BinOp->hasNoUnsignedWrap();
1175 if (NSW && NUW)
1176 return false;
1177
1178 Instruction::BinaryOps Opcode = BinOp->getOpcode();
1179 ConstantRange LRange = LVI->getConstantRangeAtUse(U: BinOp->getOperandUse(i: 0),
1180 /*UndefAllowed=*/false);
1181 ConstantRange RRange = LVI->getConstantRangeAtUse(U: BinOp->getOperandUse(i: 1),
1182 /*UndefAllowed=*/false);
1183
1184 bool Changed = false;
1185 bool NewNUW = false, NewNSW = false;
1186 if (!NUW) {
1187 ConstantRange NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
1188 BinOp: Opcode, Other: RRange, NoWrapKind: OBO::NoUnsignedWrap);
1189 NewNUW = NUWRange.contains(CR: LRange);
1190 Changed |= NewNUW;
1191 }
1192 if (!NSW) {
1193 ConstantRange NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
1194 BinOp: Opcode, Other: RRange, NoWrapKind: OBO::NoSignedWrap);
1195 NewNSW = NSWRange.contains(CR: LRange);
1196 Changed |= NewNSW;
1197 }
1198
1199 setDeducedOverflowingFlags(V: BinOp, Opcode, NewNSW, NewNUW);
1200
1201 return Changed;
1202}
1203
1204static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
1205 using namespace llvm::PatternMatch;
1206
1207 // Pattern match (and lhs, C) where C includes a superset of bits which might
1208 // be set in lhs. This is a common truncation idiom created by instcombine.
1209 const Use &LHS = BinOp->getOperandUse(i: 0);
1210 const APInt *RHS;
1211 if (!match(V: BinOp->getOperand(i_nocapture: 1), P: m_LowBitMask(V&: RHS)))
1212 return false;
1213
1214 // We can only replace the AND with LHS based on range info if the range does
1215 // not include undef.
1216 ConstantRange LRange =
1217 LVI->getConstantRangeAtUse(U: LHS, /*UndefAllowed=*/false);
1218 if (!LRange.getUnsignedMax().ule(RHS: *RHS))
1219 return false;
1220
1221 BinOp->replaceAllUsesWith(V: LHS);
1222 BinOp->eraseFromParent();
1223 NumAnd++;
1224 return true;
1225}
1226
1227static bool processTrunc(TruncInst *TI, LazyValueInfo *LVI) {
1228 if (TI->hasNoSignedWrap() && TI->hasNoUnsignedWrap())
1229 return false;
1230
1231 ConstantRange Range =
1232 LVI->getConstantRangeAtUse(U: TI->getOperandUse(i: 0), /*UndefAllowed=*/false);
1233 uint64_t DestWidth = TI->getDestTy()->getScalarSizeInBits();
1234 bool Changed = false;
1235
1236 if (!TI->hasNoUnsignedWrap()) {
1237 if (Range.getActiveBits() <= DestWidth) {
1238 TI->setHasNoUnsignedWrap(true);
1239 ++NumNUW;
1240 Changed = true;
1241 }
1242 }
1243
1244 if (!TI->hasNoSignedWrap()) {
1245 if (Range.getMinSignedBits() <= DestWidth) {
1246 TI->setHasNoSignedWrap(true);
1247 ++NumNSW;
1248 Changed = true;
1249 }
1250 }
1251
1252 return Changed;
1253}
1254
1255static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT,
1256 const SimplifyQuery &SQ) {
1257 bool FnChanged = false;
1258 std::optional<ConstantRange> RetRange;
1259 if (F.hasExactDefinition() && F.getReturnType()->isIntOrIntVectorTy())
1260 RetRange =
1261 ConstantRange::getEmpty(BitWidth: F.getReturnType()->getScalarSizeInBits());
1262
1263 // Visiting in a pre-order depth-first traversal causes us to simplify early
1264 // blocks before querying later blocks (which require us to analyze early
1265 // blocks). Eagerly simplifying shallow blocks means there is strictly less
1266 // work to do for deep blocks. This also means we don't visit unreachable
1267 // blocks.
1268 for (BasicBlock *BB : depth_first(G: &F.getEntryBlock())) {
1269 bool BBChanged = false;
1270 for (Instruction &II : llvm::make_early_inc_range(Range&: *BB)) {
1271 switch (II.getOpcode()) {
1272 case Instruction::Select:
1273 BBChanged |= processSelect(S: cast<SelectInst>(Val: &II), LVI);
1274 break;
1275 case Instruction::PHI:
1276 BBChanged |= processPHI(P: cast<PHINode>(Val: &II), LVI, DT, SQ);
1277 break;
1278 case Instruction::ICmp:
1279 case Instruction::FCmp:
1280 BBChanged |= processCmp(Cmp: cast<CmpInst>(Val: &II), LVI);
1281 break;
1282 case Instruction::Call:
1283 case Instruction::Invoke:
1284 BBChanged |= processCallSite(CB&: cast<CallBase>(Val&: II), LVI);
1285 break;
1286 case Instruction::SRem:
1287 case Instruction::SDiv:
1288 BBChanged |= processSDivOrSRem(Instr: cast<BinaryOperator>(Val: &II), LVI);
1289 break;
1290 case Instruction::UDiv:
1291 case Instruction::URem:
1292 BBChanged |= processUDivOrURem(Instr: cast<BinaryOperator>(Val: &II), LVI);
1293 break;
1294 case Instruction::AShr:
1295 BBChanged |= processAShr(SDI: cast<BinaryOperator>(Val: &II), LVI);
1296 break;
1297 case Instruction::SExt:
1298 BBChanged |= processSExt(SDI: cast<SExtInst>(Val: &II), LVI);
1299 break;
1300 case Instruction::ZExt:
1301 BBChanged |= processZExt(ZExt: cast<ZExtInst>(Val: &II), LVI);
1302 break;
1303 case Instruction::UIToFP:
1304 BBChanged |= processUIToFP(UIToFP: cast<UIToFPInst>(Val: &II), LVI);
1305 break;
1306 case Instruction::SIToFP:
1307 BBChanged |= processSIToFP(SIToFP: cast<SIToFPInst>(Val: &II), LVI);
1308 break;
1309 case Instruction::Add:
1310 case Instruction::Sub:
1311 case Instruction::Mul:
1312 case Instruction::Shl:
1313 BBChanged |= processBinOp(BinOp: cast<BinaryOperator>(Val: &II), LVI);
1314 break;
1315 case Instruction::And:
1316 BBChanged |= processAnd(BinOp: cast<BinaryOperator>(Val: &II), LVI);
1317 break;
1318 case Instruction::Trunc:
1319 BBChanged |= processTrunc(TI: cast<TruncInst>(Val: &II), LVI);
1320 break;
1321 }
1322 }
1323
1324 Instruction *Term = BB->getTerminator();
1325 switch (Term->getOpcode()) {
1326 case Instruction::Switch:
1327 BBChanged |= processSwitch(I: cast<SwitchInst>(Val: Term), LVI, DT);
1328 break;
1329 case Instruction::Ret: {
1330 auto *RI = cast<ReturnInst>(Val: Term);
1331 // Try to determine the return value if we can. This is mainly here to
1332 // simplify the writing of unit tests, but also helps to enable IPO by
1333 // constant folding the return values of callees.
1334 auto *RetVal = RI->getReturnValue();
1335 if (!RetVal) break; // handle "ret void"
1336 if (RetRange && !RetRange->isFullSet())
1337 RetRange =
1338 RetRange->unionWith(CR: LVI->getConstantRange(V: RetVal, CxtI: RI,
1339 /*UndefAllowed=*/false));
1340
1341 if (isa<Constant>(Val: RetVal)) break; // nothing to do
1342 if (auto *C = getConstantAt(V: RetVal, At: RI, LVI)) {
1343 ++NumReturns;
1344 RI->replaceUsesOfWith(From: RetVal, To: C);
1345 BBChanged = true;
1346 }
1347 }
1348 }
1349
1350 FnChanged |= BBChanged;
1351 }
1352
1353 // Infer range attribute on return value.
1354 if (RetRange && !RetRange->isFullSet()) {
1355 Attribute RangeAttr = F.getRetAttribute(Kind: Attribute::Range);
1356 if (RangeAttr.isValid())
1357 RetRange = RetRange->intersectWith(CR: RangeAttr.getRange());
1358 // Don't add attribute for constant integer returns to reduce noise. These
1359 // are propagated across functions by IPSCCP.
1360 if (!RetRange->isEmptySet() && !RetRange->isSingleElement()) {
1361 F.addRangeRetAttr(CR: *RetRange);
1362 FnChanged = true;
1363 }
1364 }
1365 return FnChanged;
1366}
1367
1368PreservedAnalyses
1369CorrelatedValuePropagationPass::run(Function &F, FunctionAnalysisManager &AM) {
1370 LazyValueInfo *LVI = &AM.getResult<LazyValueAnalysis>(IR&: F);
1371 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
1372
1373 bool Changed = runImpl(F, LVI, DT, SQ: getBestSimplifyQuery(AM, F));
1374
1375 PreservedAnalyses PA;
1376 if (!Changed) {
1377 PA = PreservedAnalyses::all();
1378 } else {
1379#if defined(EXPENSIVE_CHECKS)
1380 assert(DT->verify(DominatorTree::VerificationLevel::Full));
1381#else
1382 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
1383#endif // EXPENSIVE_CHECKS
1384
1385 PA.preserve<DominatorTreeAnalysis>();
1386 PA.preserve<LazyValueAnalysis>();
1387 }
1388
1389 // Keeping LVI alive is expensive, both because it uses a lot of memory, and
1390 // because invalidating values in LVI is expensive. While CVP does preserve
1391 // LVI, we know that passes after JumpThreading+CVP will not need the result
1392 // of this analysis, so we forcefully discard it early.
1393 PA.abandon<LazyValueAnalysis>();
1394 return PA;
1395}
1396