1//===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10///
11/// This file provides internal interfaces used to implement the InstCombine.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
17
18#include "llvm/ADT/PostOrderIterator.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/Analysis/InstructionSimplify.h"
21#include "llvm/Analysis/TargetFolder.h"
22#include "llvm/Analysis/ValueTracking.h"
23#include "llvm/IR/IRBuilder.h"
24#include "llvm/IR/InstVisitor.h"
25#include "llvm/IR/PatternMatch.h"
26#include "llvm/IR/ProfDataUtils.h"
27#include "llvm/IR/Value.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/KnownBits.h"
30#include "llvm/Support/KnownFPClass.h"
31#include "llvm/Transforms/InstCombine/InstCombiner.h"
32#include "llvm/Transforms/Utils/Local.h"
33#include <cassert>
34
35#define DEBUG_TYPE "instcombine"
36#include "llvm/Transforms/Utils/InstructionWorklist.h"
37
38// As a default, let's assume that we want to be aggressive,
39// and attempt to traverse with no limits in attempt to sink negation.
40static constexpr unsigned NegatorDefaultMaxDepth = ~0U;
41
42// Let's guesstimate that most often we will end up visiting/producing
43// fairly small number of new instructions.
44static constexpr unsigned NegatorMaxNodesSSO = 16;
45
46namespace llvm {
47
48class AAResults;
49class APInt;
50class AssumptionCache;
51class BlockFrequencyInfo;
52class DataLayout;
53class DominatorTree;
54class GEPOperator;
55class GlobalVariable;
56class OptimizationRemarkEmitter;
57class ProfileSummaryInfo;
58class TargetLibraryInfo;
59class User;
60
61class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
62 : public InstCombiner,
63 public InstVisitor<InstCombinerImpl, Instruction *> {
64public:
65 InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder,
66 Function &F, AAResults *AA, AssumptionCache &AC,
67 TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
68 DominatorTree &DT, OptimizationRemarkEmitter &ORE,
69 BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI,
70 ProfileSummaryInfo *PSI, const DataLayout &DL,
71 ReversePostOrderTraversal<BasicBlock *> &RPOT)
72 : InstCombiner(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI, BPI,
73 PSI, DL, RPOT) {}
74
75 ~InstCombinerImpl() override = default;
76
77 /// Perform early cleanup and prepare the InstCombine worklist.
78 bool prepareWorklist(Function &F);
79
80 /// Run the combiner over the entire worklist until it is empty.
81 ///
82 /// \returns true if the IR is changed.
83 bool run();
84
85 // Visitation implementation - Implement instruction combining for different
86 // instruction types. The semantics are as follows:
87 // Return Value:
88 // null - No change was made
89 // I - Change was made, I is still valid, I may be dead though
90 // otherwise - Change was made, replace I with returned instruction
91 //
92 Instruction *visitFNeg(UnaryOperator &I);
93 Instruction *visitAdd(BinaryOperator &I);
94 Instruction *visitFAdd(BinaryOperator &I);
95 Value *OptimizePointerDifference(
96 Value *LHS, Value *RHS, Type *Ty, bool isNUW);
97 Instruction *visitSub(BinaryOperator &I);
98 Instruction *visitFSub(BinaryOperator &I);
99 Instruction *visitMul(BinaryOperator &I);
100 Instruction *foldPowiReassoc(BinaryOperator &I);
101 Instruction *foldFMulReassoc(BinaryOperator &I);
102 Instruction *visitFMul(BinaryOperator &I);
103 Instruction *visitURem(BinaryOperator &I);
104 Instruction *visitSRem(BinaryOperator &I);
105 Instruction *visitFRem(BinaryOperator &I);
106 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I);
107 Instruction *commonIDivRemTransforms(BinaryOperator &I);
108 Instruction *commonIRemTransforms(BinaryOperator &I);
109 Instruction *commonIDivTransforms(BinaryOperator &I);
110 Instruction *visitUDiv(BinaryOperator &I);
111 Instruction *visitSDiv(BinaryOperator &I);
112 Instruction *visitFDiv(BinaryOperator &I);
113 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
114 Instruction *FoldOrOfLogicalAnds(Value *Op0, Value *Op1);
115 Instruction *visitAnd(BinaryOperator &I);
116 Instruction *visitOr(BinaryOperator &I);
117 bool sinkNotIntoLogicalOp(Instruction &I);
118 bool sinkNotIntoOtherHandOfLogicalOp(Instruction &I);
119 Instruction *visitXor(BinaryOperator &I);
120 Instruction *visitShl(BinaryOperator &I);
121 Value *reassociateShiftAmtsOfTwoSameDirectionShifts(
122 BinaryOperator *Sh0, const SimplifyQuery &SQ,
123 bool AnalyzeForSignBitExtraction = false);
124 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
125 BinaryOperator &I);
126 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract(
127 BinaryOperator &OldAShr);
128 Instruction *visitAShr(BinaryOperator &I);
129 Instruction *visitLShr(BinaryOperator &I);
130 Instruction *commonShiftTransforms(BinaryOperator &I);
131 Instruction *visitFCmpInst(FCmpInst &I);
132 CmpInst *canonicalizeICmpPredicate(CmpInst &I);
133 Instruction *visitICmpInst(ICmpInst &I);
134 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
135 BinaryOperator &I);
136 Instruction *commonCastTransforms(CastInst &CI);
137 Instruction *visitTrunc(TruncInst &CI);
138 Instruction *visitZExt(ZExtInst &Zext);
139 Instruction *visitSExt(SExtInst &Sext);
140 Instruction *visitFPTrunc(FPTruncInst &CI);
141 Instruction *visitFPExt(CastInst &CI);
142 Instruction *visitFPToUI(FPToUIInst &FI);
143 Instruction *visitFPToSI(FPToSIInst &FI);
144 Instruction *visitUIToFP(CastInst &CI);
145 Instruction *visitSIToFP(CastInst &CI);
146 Instruction *visitPtrToInt(PtrToIntInst &CI);
147 Instruction *visitPtrToAddr(PtrToAddrInst &CI);
148 Instruction *visitIntToPtr(IntToPtrInst &CI);
149 Instruction *visitBitCast(BitCastInst &CI);
150 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
151 Instruction *foldItoFPtoI(CastInst &FI);
152 Instruction *visitSelectInst(SelectInst &SI);
153 Instruction *foldShuffledIntrinsicOperands(IntrinsicInst *II);
154 Value *foldReversedIntrinsicOperands(IntrinsicInst *II);
155 Instruction *visitCallInst(CallInst &CI);
156 Instruction *visitInvokeInst(InvokeInst &II);
157 Instruction *visitCallBrInst(CallBrInst &CBI);
158
159 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
160 Instruction *visitPHINode(PHINode &PN);
161 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
162 Instruction *visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src);
163 Instruction *visitAllocaInst(AllocaInst &AI);
164 Instruction *visitAllocSite(Instruction &FI);
165 Instruction *visitFree(CallInst &FI, Value *FreedOp);
166 Instruction *visitLoadInst(LoadInst &LI);
167 Instruction *visitStoreInst(StoreInst &SI);
168 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI);
169 Instruction *visitUnconditionalBranchInst(BranchInst &BI);
170 Instruction *visitBranchInst(BranchInst &BI);
171 Instruction *visitFenceInst(FenceInst &FI);
172 Instruction *visitSwitchInst(SwitchInst &SI);
173 Instruction *visitReturnInst(ReturnInst &RI);
174 Instruction *visitUnreachableInst(UnreachableInst &I);
175 Instruction *
176 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI);
177 Instruction *visitInsertValueInst(InsertValueInst &IV);
178 Instruction *visitInsertElementInst(InsertElementInst &IE);
179 Instruction *visitExtractElementInst(ExtractElementInst &EI);
180 Instruction *simplifyBinOpSplats(ShuffleVectorInst &SVI);
181 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
182 Instruction *visitExtractValueInst(ExtractValueInst &EV);
183 Instruction *visitLandingPadInst(LandingPadInst &LI);
184 Instruction *visitVAEndInst(VAEndInst &I);
185 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI);
186 bool freezeOtherUses(FreezeInst &FI);
187 Instruction *foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN);
188 Instruction *visitFreeze(FreezeInst &I);
189
190 /// Specify what to return for unhandled instructions.
191 Instruction *visitInstruction(Instruction &I) { return nullptr; }
192
193 /// True when DB dominates all uses of DI except UI.
194 /// UI must be in the same block as DI.
195 /// The routine checks that the DI parent and DB are different.
196 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
197 const BasicBlock *DB) const;
198
199 /// Try to replace select with select operand SIOpd in SI-ICmp sequence.
200 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
201 const unsigned SIOpd);
202
203 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy,
204 const Twine &Suffix = "");
205
206 KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF,
207 FPClassTest Interested = fcAllFlags,
208 const Instruction *CtxI = nullptr,
209 unsigned Depth = 0) const {
210 return llvm::computeKnownFPClass(
211 V: Val, FMF, InterestedClasses: Interested, SQ: getSimplifyQuery().getWithInstruction(I: CtxI),
212 Depth);
213 }
214
215 KnownFPClass computeKnownFPClass(Value *Val,
216 FPClassTest Interested = fcAllFlags,
217 const Instruction *CtxI = nullptr,
218 unsigned Depth = 0) const {
219 return llvm::computeKnownFPClass(
220 V: Val, InterestedClasses: Interested, SQ: getSimplifyQuery().getWithInstruction(I: CtxI), Depth);
221 }
222
223 /// Check if fmul \p MulVal, +0.0 will yield +0.0 (or signed zero is
224 /// ignorable).
225 bool fmulByZeroIsZero(Value *MulVal, FastMathFlags FMF,
226 const Instruction *CtxI) const;
227
228 std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
229 convertOrOfShiftsToFunnelShift(Instruction &Or);
230
231private:
232 bool annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI);
233 bool isDesirableIntType(unsigned BitWidth) const;
234 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
235 bool shouldChangeType(Type *From, Type *To) const;
236 Value *dyn_castNegVal(Value *V) const;
237
238 /// Classify whether a cast is worth optimizing.
239 ///
240 /// This is a helper to decide whether the simplification of
241 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed.
242 ///
243 /// \param CI The cast we are interested in.
244 ///
245 /// \return true if this cast actually results in any code being generated and
246 /// if it cannot already be eliminated by some other transformation.
247 bool shouldOptimizeCast(CastInst *CI);
248
249 /// Try to optimize a sequence of instructions checking if an operation
250 /// on LHS and RHS overflows.
251 ///
252 /// If this overflow check is done via one of the overflow check intrinsics,
253 /// then CtxI has to be the call instruction calling that intrinsic. If this
254 /// overflow check is done by arithmetic followed by a compare, then CtxI has
255 /// to be the arithmetic instruction.
256 ///
257 /// If a simplification is possible, stores the simplified result of the
258 /// operation in OperationResult and result of the overflow check in
259 /// OverflowResult, and return true. If no simplification is possible,
260 /// returns false.
261 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned,
262 Value *LHS, Value *RHS,
263 Instruction &CtxI, Value *&OperationResult,
264 Constant *&OverflowResult);
265
266 Instruction *visitCallBase(CallBase &Call);
267 Instruction *tryOptimizeCall(CallInst *CI);
268 bool transformConstExprCastCall(CallBase &Call);
269 Instruction *transformCallThroughTrampoline(CallBase &Call,
270 IntrinsicInst &Tramp);
271
272 /// Try to optimize a call to the result of a ptrauth intrinsic, potentially
273 /// into the ptrauth call bundle:
274 /// - call(ptrauth.resign(p)), ["ptrauth"()] -> call p, ["ptrauth"()]
275 /// - call(ptrauth.sign(p)), ["ptrauth"()] -> call p
276 /// as long as the key/discriminator are the same in sign and auth-bundle,
277 /// and we don't change the key in the bundle (to a potentially-invalid key.)
278 Instruction *foldPtrAuthIntrinsicCallee(CallBase &Call);
279
280 /// Try to optimize a call to a ptrauth constant, into its ptrauth bundle:
281 /// call(ptrauth(f)), ["ptrauth"()] -> call f
282 /// as long as the key/discriminator are the same in constant and bundle.
283 Instruction *foldPtrAuthConstantCallee(CallBase &Call);
284
285 // Return (a, b) if (LHS, RHS) is known to be (a, b) or (b, a).
286 // Otherwise, return std::nullopt
287 // Currently it matches:
288 // - LHS = (select c, a, b), RHS = (select c, b, a)
289 // - LHS = (phi [a, BB0], [b, BB1]), RHS = (phi [b, BB0], [a, BB1])
290 // - LHS = min(a, b), RHS = max(a, b)
291 std::optional<std::pair<Value *, Value *>> matchSymmetricPair(Value *LHS,
292 Value *RHS);
293
294 Value *simplifyMaskedLoad(IntrinsicInst &II);
295 Instruction *simplifyMaskedStore(IntrinsicInst &II);
296 Instruction *simplifyMaskedGather(IntrinsicInst &II);
297 Instruction *simplifyMaskedScatter(IntrinsicInst &II);
298
299 /// Transform (zext icmp) to bitwise / integer operations in order to
300 /// eliminate it.
301 ///
302 /// \param ICI The icmp of the (zext icmp) pair we are interested in.
303 /// \parem CI The zext of the (zext icmp) pair we are interested in.
304 ///
305 /// \return null if the transformation cannot be performed. If the
306 /// transformation can be performed the new instruction that replaces the
307 /// (zext icmp) pair will be returned.
308 Instruction *transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext);
309
310 Instruction *transformSExtICmp(ICmpInst *Cmp, SExtInst &Sext);
311
312 bool willNotOverflowSignedAdd(const WithCache<const Value *> &LHS,
313 const WithCache<const Value *> &RHS,
314 const Instruction &CxtI) const {
315 return computeOverflowForSignedAdd(LHS, RHS, CxtI: &CxtI) ==
316 OverflowResult::NeverOverflows;
317 }
318
319 bool willNotOverflowUnsignedAdd(const WithCache<const Value *> &LHS,
320 const WithCache<const Value *> &RHS,
321 const Instruction &CxtI) const {
322 return computeOverflowForUnsignedAdd(LHS, RHS, CxtI: &CxtI) ==
323 OverflowResult::NeverOverflows;
324 }
325
326 bool willNotOverflowAdd(const Value *LHS, const Value *RHS,
327 const Instruction &CxtI, bool IsSigned) const {
328 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI)
329 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI);
330 }
331
332 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
333 const Instruction &CxtI) const {
334 return computeOverflowForSignedSub(LHS, RHS, CxtI: &CxtI) ==
335 OverflowResult::NeverOverflows;
336 }
337
338 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
339 const Instruction &CxtI) const {
340 return computeOverflowForUnsignedSub(LHS, RHS, CxtI: &CxtI) ==
341 OverflowResult::NeverOverflows;
342 }
343
344 bool willNotOverflowSub(const Value *LHS, const Value *RHS,
345 const Instruction &CxtI, bool IsSigned) const {
346 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI)
347 : willNotOverflowUnsignedSub(LHS, RHS, CxtI);
348 }
349
350 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
351 const Instruction &CxtI) const {
352 return computeOverflowForSignedMul(LHS, RHS, CxtI: &CxtI) ==
353 OverflowResult::NeverOverflows;
354 }
355
356 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
357 const Instruction &CxtI,
358 bool IsNSW = false) const {
359 return computeOverflowForUnsignedMul(LHS, RHS, CxtI: &CxtI, IsNSW) ==
360 OverflowResult::NeverOverflows;
361 }
362
363 bool willNotOverflowMul(const Value *LHS, const Value *RHS,
364 const Instruction &CxtI, bool IsSigned) const {
365 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI)
366 : willNotOverflowUnsignedMul(LHS, RHS, CxtI);
367 }
368
369 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS,
370 const Value *RHS, const Instruction &CxtI,
371 bool IsSigned) const {
372 switch (Opcode) {
373 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned);
374 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned);
375 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned);
376 default: llvm_unreachable("Unexpected opcode for overflow query");
377 }
378 }
379
380 Value *EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP = false);
381 /// Emit sum of multiple GEP offsets. The GEPs are processed in reverse
382 /// order.
383 Value *EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs, GEPNoWrapFlags NW,
384 Type *IdxTy, bool RewriteGEPs);
385 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
386 Instruction *foldBitcastExtElt(ExtractElementInst &ExtElt);
387 Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
388 Instruction *foldFBinOpOfIntCasts(BinaryOperator &I);
389 // Should only be called by `foldFBinOpOfIntCasts`.
390 Instruction *foldFBinOpOfIntCastsFromSign(
391 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
392 Constant *Op1FpC, SmallVectorImpl<WithCache<const Value *>> &OpsKnown);
393 Instruction *foldBinopOfSextBoolToSelect(BinaryOperator &I);
394 Instruction *narrowBinOp(TruncInst &Trunc);
395 Instruction *narrowMaskedBinOp(BinaryOperator &And);
396 Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
397 Instruction *narrowFunnelShift(TruncInst &Trunc);
398 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
399 Instruction *matchSAddSubSat(IntrinsicInst &MinMax1);
400 Instruction *foldNot(BinaryOperator &I);
401 Instruction *foldBinOpOfDisplacedShifts(BinaryOperator &I);
402
403 /// Determine if a pair of casts can be replaced by a single cast.
404 ///
405 /// \param CI1 The first of a pair of casts.
406 /// \param CI2 The second of a pair of casts.
407 ///
408 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an
409 /// Instruction::CastOps value for a cast that can replace the pair, casting
410 /// CI1->getSrcTy() to CI2->getDstTy().
411 ///
412 /// \see CastInst::isEliminableCastPair
413 Instruction::CastOps isEliminableCastPair(const CastInst *CI1,
414 const CastInst *CI2);
415 Value *simplifyIntToPtrRoundTripCast(Value *Val);
416
417 Value *foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &I,
418 bool IsAnd, bool IsLogical = false);
419 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor);
420
421 Value *foldEqOfParts(Value *Cmp0, Value *Cmp1, bool IsAnd);
422
423 Value *foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, ICmpInst *ICmp2,
424 bool IsAnd);
425
426 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp).
427 /// NOTE: Unlike most of instcombine, this returns a Value which should
428 /// already be inserted into the function.
429 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd,
430 bool IsLogicalSelect = false);
431
432 Instruction *foldLogicOfIsFPClass(BinaryOperator &Operator, Value *LHS,
433 Value *RHS);
434
435 Value *foldBooleanAndOr(Value *LHS, Value *RHS, Instruction &I, bool IsAnd,
436 bool IsLogical);
437
438 Value *reassociateBooleanAndOr(Value *LHS, Value *X, Value *Y, Instruction &I,
439 bool IsAnd, bool RHSIsLogical);
440
441 Value *foldDisjointOr(Value *LHS, Value *RHS);
442
443 Value *reassociateDisjointOr(Value *LHS, Value *RHS);
444
445 Instruction *
446 canonicalizeConditionalNegationViaMathToSelect(BinaryOperator &i);
447
448 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D,
449 bool InvertFalseVal = false);
450 Value *getSelectCondition(Value *A, Value *B, bool ABIsTheSame);
451
452 Instruction *foldLShrOverflowBit(BinaryOperator &I);
453 Instruction *foldExtractOfOverflowIntrinsic(ExtractValueInst &EV);
454 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
455 Instruction *foldIntrinsicIsFPClass(IntrinsicInst &II);
456 Instruction *foldFPSignBitOps(BinaryOperator &I);
457 Instruction *foldFDivConstantDivisor(BinaryOperator &I);
458
459 // Optimize one of these forms:
460 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true)
461 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false)
462 // into simplier select instruction using isImpliedCondition.
463 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI,
464 bool IsAnd);
465
466 Instruction *hoistFNegAboveFMulFDiv(Value *FNegOp, Instruction &FMFSource);
467
468 /// Simplify \p V given that it is known to be non-null.
469 /// Returns the simplified value if possible, otherwise returns nullptr.
470 /// If \p HasDereferenceable is true, the simplification will not perform
471 /// same object checks.
472 Value *simplifyNonNullOperand(Value *V, bool HasDereferenceable,
473 unsigned Depth = 0);
474
475 /// Create `select C, S1, S2`. Use only when the profile cannot be calculated
476 /// from existing profile metadata: if the Function has profiles, this will
477 /// set the profile of this select to "unknown".
478 SelectInst *
479 createSelectInstWithUnknownProfile(Value *C, Value *S1, Value *S2,
480 const Twine &NameStr = "",
481 InsertPosition InsertBefore = nullptr) {
482 auto *Sel = SelectInst::Create(C, S1, S2, NameStr, InsertBefore, MDFrom: nullptr);
483 setExplicitlyUnknownBranchWeightsIfProfiled(I&: *Sel, DEBUG_TYPE, F: &F);
484 return Sel;
485 }
486
487public:
488 /// Create and insert the idiom we use to indicate a block is unreachable
489 /// without having to rewrite the CFG from within InstCombine.
490 void CreateNonTerminatorUnreachable(Instruction *InsertAt) {
491 auto &Ctx = InsertAt->getContext();
492 auto *SI = new StoreInst(ConstantInt::getTrue(Context&: Ctx),
493 PoisonValue::get(T: PointerType::getUnqual(C&: Ctx)),
494 /*isVolatile*/ false, Align(1));
495 InsertNewInstWith(New: SI, Old: InsertAt->getIterator());
496 }
497
498 /// Combiner aware instruction erasure.
499 ///
500 /// When dealing with an instruction that has side effects or produces a void
501 /// value, we can't rely on DCE to delete the instruction. Instead, visit
502 /// methods should return the value returned by this function.
503 Instruction *eraseInstFromFunction(Instruction &I) override {
504 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
505 assert(I.use_empty() && "Cannot erase instruction that is used!");
506 salvageDebugInfo(I);
507
508 // Make sure that we reprocess all operands now that we reduced their
509 // use counts.
510 SmallVector<Value *> Ops(I.operands());
511 Worklist.remove(I: &I);
512 DC.removeValue(V: &I);
513 I.eraseFromParent();
514 for (Value *Op : Ops)
515 Worklist.handleUseCountDecrement(V: Op);
516 MadeIRChange = true;
517 return nullptr; // Don't do anything with FI
518 }
519
520 OverflowResult computeOverflow(
521 Instruction::BinaryOps BinaryOp, bool IsSigned,
522 Value *LHS, Value *RHS, Instruction *CxtI) const;
523
524 /// Performs a few simplifications for operators which are associative
525 /// or commutative.
526 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
527
528 /// Tries to simplify binary operations which some other binary
529 /// operation distributes over.
530 ///
531 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
532 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
533 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
534 /// value, or null if it didn't simplify.
535 Value *foldUsingDistributiveLaws(BinaryOperator &I);
536
537 /// Tries to simplify add operations using the definition of remainder.
538 ///
539 /// The definition of remainder is X % C = X - (X / C ) * C. The add
540 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to
541 /// X % (C0 * C1)
542 Value *SimplifyAddWithRemainder(BinaryOperator &I);
543
544 // Binary Op helper for select operations where the expression can be
545 // efficiently reorganized.
546 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS,
547 Value *RHS);
548
549 // If `I` has operand `(ctpop (not x))`, fold `I` with `(sub nuw nsw
550 // BitWidth(x), (ctpop x))`.
551 Instruction *tryFoldInstWithCtpopWithNot(Instruction *I);
552
553 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
554 // -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
555 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
556 // -> (BinOp (logic_shift (BinOp X, Y)), Mask)
557 Instruction *foldBinOpShiftWithShift(BinaryOperator &I);
558
559 /// Tries to simplify binops of select and cast of the select condition.
560 ///
561 /// (Binop (cast C), (select C, T, F))
562 /// -> (select C, C0, C1)
563 Instruction *foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I);
564
565 /// This tries to simplify binary operations by factorizing out common terms
566 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
567 Value *tryFactorizationFolds(BinaryOperator &I);
568
569 /// Match a select chain which produces one of three values based on whether
570 /// the LHS is less than, equal to, or greater than RHS respectively.
571 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less,
572 /// Equal and Greater values are saved in the matching process and returned to
573 /// the caller.
574 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS,
575 ConstantInt *&Less, ConstantInt *&Equal,
576 ConstantInt *&Greater);
577
578 /// Attempts to replace I with a simpler value based on the demanded
579 /// bits.
580 Value *SimplifyDemandedUseBits(Instruction *I, const APInt &DemandedMask,
581 KnownBits &Known, const SimplifyQuery &Q,
582 unsigned Depth = 0);
583 using InstCombiner::SimplifyDemandedBits;
584 bool SimplifyDemandedBits(Instruction *I, unsigned Op,
585 const APInt &DemandedMask, KnownBits &Known,
586 const SimplifyQuery &Q,
587 unsigned Depth = 0) override;
588
589 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
590 /// bits. It also tries to handle simplifications that can be done based on
591 /// DemandedMask, but without modifying the Instruction.
592 Value *SimplifyMultipleUseDemandedBits(Instruction *I,
593 const APInt &DemandedMask,
594 KnownBits &Known,
595 const SimplifyQuery &Q,
596 unsigned Depth = 0);
597
598 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
599 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
600 Value *simplifyShrShlDemandedBits(
601 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
602 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
603
604 /// Tries to simplify operands to an integer instruction based on its
605 /// demanded bits.
606 bool SimplifyDemandedInstructionBits(Instruction &Inst);
607 bool SimplifyDemandedInstructionBits(Instruction &Inst, KnownBits &Known);
608
609 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
610 APInt &PoisonElts, unsigned Depth = 0,
611 bool AllowMultipleUsers = false) override;
612
613 /// Attempts to replace V with a simpler value based on the demanded
614 /// floating-point classes
615 Value *SimplifyDemandedUseFPClass(Instruction *I, FPClassTest DemandedMask,
616 KnownFPClass &Known, Instruction *CxtI,
617 unsigned Depth = 0);
618 Value *SimplifyMultipleUseDemandedFPClass(Instruction *I,
619 FPClassTest DemandedMask,
620 KnownFPClass &Known,
621 Instruction *CxtI, unsigned Depth);
622
623 bool SimplifyDemandedFPClass(Instruction *I, unsigned Op,
624 FPClassTest DemandedMask, KnownFPClass &Known,
625 unsigned Depth = 0);
626
627 bool SimplifyDemandedInstructionFPClass(Instruction &Inst);
628
629 /// Common transforms for add / disjoint or
630 Instruction *foldAddLikeCommutative(Value *LHS, Value *RHS, bool NSW,
631 bool NUW);
632
633 /// Canonicalize the position of binops relative to shufflevector.
634 Instruction *foldVectorBinop(BinaryOperator &Inst);
635 Instruction *foldVectorSelect(SelectInst &Sel);
636 Instruction *foldSelectShuffle(ShuffleVectorInst &Shuf);
637 Constant *unshuffleConstant(ArrayRef<int> ShMask, Constant *C,
638 VectorType *NewCTy);
639
640 /// Given a binary operator, cast instruction, or select which has a PHI node
641 /// as operand #0, see if we can fold the instruction into the PHI (which is
642 /// only possible if all operands to the PHI are constants).
643 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN,
644 bool AllowMultipleUses = false);
645
646 /// Try to fold binary operators whose operands are simple interleaved
647 /// recurrences to a single recurrence. This is a common pattern in reduction
648 /// operations.
649 /// Example:
650 /// %phi1 = phi [init1, %BB1], [%op1, %BB2]
651 /// %phi2 = phi [init2, %BB1], [%op2, %BB2]
652 /// %op1 = binop %phi1, constant1
653 /// %op2 = binop %phi2, constant2
654 /// %rdx = binop %op1, %op2
655 /// -->
656 /// %phi_combined = phi [init_combined, %BB1], [%op_combined, %BB2]
657 /// %rdx_combined = binop %phi_combined, constant_combined
658 Instruction *foldBinopWithRecurrence(BinaryOperator &BO);
659
660 /// For a binary operator with 2 phi operands, try to hoist the binary
661 /// operation before the phi. This can result in fewer instructions in
662 /// patterns where at least one set of phi operands simplifies.
663 /// Example:
664 /// BB3: binop (phi [X, BB1], [C1, BB2]), (phi [Y, BB1], [C2, BB2])
665 /// -->
666 /// BB1: BO = binop X, Y
667 /// BB3: phi [BO, BB1], [(binop C1, C2), BB2]
668 Instruction *foldBinopWithPhiOperands(BinaryOperator &BO);
669
670 /// Given an instruction with a select as one operand and a constant as the
671 /// other operand, try to fold the binary operator into the select arguments.
672 /// This also works for Cast instructions, which obviously do not have a
673 /// second operand.
674 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
675 bool FoldWithMultiUse = false,
676 bool SimplifyBothArms = false);
677
678 Instruction *foldBinOpSelectBinOp(BinaryOperator &Op);
679
680 /// This is a convenience wrapper function for the above two functions.
681 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I);
682
683 Instruction *foldAddWithConstant(BinaryOperator &Add);
684
685 Instruction *foldSquareSumInt(BinaryOperator &I);
686 Instruction *foldSquareSumFP(BinaryOperator &I);
687
688 /// Try to rotate an operation below a PHI node, using PHI nodes for
689 /// its operands.
690 Instruction *foldPHIArgOpIntoPHI(PHINode &PN);
691 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN);
692 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN);
693 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN);
694 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN);
695 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN);
696 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN);
697 Instruction *foldPHIArgIntToPtrToPHI(PHINode &PN);
698
699 /// If the phi is within a phi web, which is formed by the def-use chain
700 /// of phis and all the phis in the web are only used in the other phis.
701 /// In this case, these phis are dead and we will remove all of them.
702 bool foldDeadPhiWeb(PHINode &PN);
703
704 /// If an integer typed PHI has only one use which is an IntToPtr operation,
705 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
706 /// insert a new pointer typed PHI and replace the original one.
707 bool foldIntegerTypedPHI(PHINode &PN);
708
709 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the
710 /// folded operation.
711 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN);
712
713 Value *foldPtrToIntOrAddrOfGEP(Type *IntTy, Value *Ptr);
714 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond,
715 Instruction &I);
716 Instruction *foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS,
717 const ICmpInst &I);
718 bool foldAllocaCmp(AllocaInst *Alloca);
719 Instruction *foldCmpLoadFromIndexedGlobal(LoadInst *LI,
720 GetElementPtrInst *GEP,
721 CmpInst &ICI,
722 ConstantInt *AndCst = nullptr);
723 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
724 Constant *RHSC);
725 Instruction *foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred);
726 Instruction *foldICmpWithCastOp(ICmpInst &ICmp);
727 Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp);
728
729 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp);
730 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp);
731 Instruction *foldICmpWithConstant(ICmpInst &Cmp);
732 Instruction *foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp);
733 Instruction *foldICmpUsingBoolRange(ICmpInst &I);
734 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
735 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
736 Instruction *foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp,
737 const APInt &C);
738 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
739 Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax,
740 Value *Z, CmpPredicate Pred);
741 Instruction *foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min);
742 Instruction *foldICmpEquality(ICmpInst &Cmp);
743 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
744 Instruction *foldSignBitTest(ICmpInst &I);
745 Instruction *foldICmpWithZero(ICmpInst &Cmp);
746
747 Value *foldMultiplicationOverflowCheck(ICmpInst &Cmp);
748
749 Instruction *foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO,
750 const APInt &C);
751 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select,
752 ConstantInt *C);
753 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
754 const APInt &C);
755 Instruction *foldICmpTruncWithTruncOrExt(ICmpInst &Cmp,
756 const SimplifyQuery &Q);
757 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
758 const APInt &C);
759 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
760 const APInt &C);
761 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
762 const APInt &C);
763 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul,
764 const APInt &C);
765 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl,
766 const APInt &C);
767 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr,
768 const APInt &C);
769 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
770 const APInt &C);
771 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
772 const APInt &C);
773 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div,
774 const APInt &C);
775 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub,
776 const APInt &C);
777 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add,
778 const APInt &C);
779 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
780 const APInt &C1);
781 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
782 const APInt &C1, const APInt &C2);
783 Instruction *foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor,
784 const APInt &C);
785 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
786 const APInt &C2);
787 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
788 const APInt &C2);
789
790 Instruction *foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp,
791 BinaryOperator *BO,
792 const APInt &C);
793 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
794 BinaryOperator *BO,
795 const APInt &C);
796 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
797 const APInt &C);
798 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
799 const APInt &C);
800 Instruction *foldICmpBitCast(ICmpInst &Cmp);
801 Instruction *foldICmpWithTrunc(ICmpInst &Cmp);
802 Instruction *foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1,
803 ICmpInst &CxtI);
804
805 // Helpers of visitSelectInst().
806 Instruction *foldSelectOfBools(SelectInst &SI);
807 Instruction *foldSelectToCmp(SelectInst &SI);
808 Instruction *foldSelectExtConst(SelectInst &Sel);
809 Instruction *foldSelectEqualityTest(SelectInst &SI);
810 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
811 Instruction *foldSelectIntrinsic(SelectInst &SI);
812 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *);
813 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
814 Value *A, Value *B, Instruction &Outer,
815 SelectPatternFlavor SPF2, Value *C);
816 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
817 Value *foldSelectWithConstOpToBinOp(ICmpInst *Cmp, Value *TrueVal,
818 Value *FalseVal);
819 Instruction *foldSelectValueEquivalence(SelectInst &SI, CmpInst &CI);
820 bool replaceInInstruction(Value *V, Value *Old, Value *New,
821 unsigned Depth = 0);
822
823 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
824 bool isSigned, bool Inside);
825 bool mergeStoreIntoSuccessor(StoreInst &SI);
826
827 /// Given an initial instruction, check to see if it is the root of a
828 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse
829 /// intrinsic.
830 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps,
831 bool MatchBitReversals);
832
833 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI);
834 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI);
835
836 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
837
838 bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock);
839 void tryToSinkInstructionDbgVariableRecords(
840 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
841 BasicBlock *DestBlock, SmallVectorImpl<DbgVariableRecord *> &DPUsers);
842
843 bool removeInstructionsBeforeUnreachable(Instruction &I);
844 void addDeadEdge(BasicBlock *From, BasicBlock *To,
845 SmallVectorImpl<BasicBlock *> &Worklist);
846 void handleUnreachableFrom(Instruction *I,
847 SmallVectorImpl<BasicBlock *> &Worklist);
848 void handlePotentiallyDeadBlocks(SmallVectorImpl<BasicBlock *> &Worklist);
849 void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc);
850 void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser = nullptr);
851
852 /// Take the exact integer log2 of the value. If DoFold is true, create the
853 /// actual instructions, otherwise return a non-null dummy value. Return
854 /// nullptr on failure. Note, if DoFold is true the caller must ensure that
855 /// takeLog2 will succeed, otherwise it may create stray instructions.
856 Value *takeLog2(Value *Op, unsigned Depth, bool AssumeNonZero, bool DoFold);
857
858 Value *tryGetLog2(Value *Op, bool AssumeNonZero) {
859 if (takeLog2(Op, /*Depth=*/Depth: 0, AssumeNonZero, /*DoFold=*/DoFold: false))
860 return takeLog2(Op, /*Depth=*/Depth: 0, AssumeNonZero, /*DoFold=*/DoFold: true);
861 return nullptr;
862 }
863};
864
865class Negator final {
866 /// Top-to-bottom, def-to-use negated instruction tree we produced.
867 SmallVector<Instruction *, NegatorMaxNodesSSO> NewInstructions;
868
869 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
870 BuilderTy Builder;
871
872 const DominatorTree &DT;
873
874 const bool IsTrulyNegation;
875
876 SmallDenseMap<Value *, Value *> NegationsCache;
877
878 Negator(LLVMContext &C, const DataLayout &DL, const DominatorTree &DT,
879 bool IsTrulyNegation);
880
881#if LLVM_ENABLE_STATS
882 unsigned NumValuesVisitedInThisNegator = 0;
883 ~Negator();
884#endif
885
886 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/,
887 Value * /*NegatedRoot*/>;
888
889 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I);
890
891 [[nodiscard]] Value *visitImpl(Value *V, bool IsNSW, unsigned Depth);
892
893 [[nodiscard]] Value *negate(Value *V, bool IsNSW, unsigned Depth);
894
895 /// Recurse depth-first and attempt to sink the negation.
896 /// FIXME: use worklist?
897 [[nodiscard]] std::optional<Result> run(Value *Root, bool IsNSW);
898
899 Negator(const Negator &) = delete;
900 Negator(Negator &&) = delete;
901 Negator &operator=(const Negator &) = delete;
902 Negator &operator=(Negator &&) = delete;
903
904public:
905 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed,
906 /// otherwise returns negated value.
907 [[nodiscard]] static Value *Negate(bool LHSIsZero, bool IsNSW, Value *Root,
908 InstCombinerImpl &IC);
909};
910
911struct CommonPointerBase {
912 /// Common base pointer.
913 Value *Ptr = nullptr;
914 /// LHS GEPs until common base.
915 SmallVector<GEPOperator *> LHSGEPs;
916 /// RHS GEPs until common base.
917 SmallVector<GEPOperator *> RHSGEPs;
918 /// LHS GEP NoWrapFlags until common base.
919 GEPNoWrapFlags LHSNW = GEPNoWrapFlags::all();
920 /// RHS GEP NoWrapFlags until common base.
921 GEPNoWrapFlags RHSNW = GEPNoWrapFlags::all();
922
923 static CommonPointerBase compute(Value *LHS, Value *RHS);
924
925 /// Whether expanding the GEP chains is expensive.
926 bool isExpensive() const;
927};
928
929} // end namespace llvm
930
931#undef DEBUG_TYPE
932
933#endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
934