1 | //===- StraightLineStrengthReduce.cpp - -----------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements straight-line strength reduction (SLSR). Unlike loop |
10 | // strength reduction, this algorithm is designed to reduce arithmetic |
11 | // redundancy in straight-line code instead of loops. It has proven to be |
12 | // effective in simplifying arithmetic statements derived from an unrolled loop. |
13 | // It can also simplify the logic of SeparateConstOffsetFromGEP. |
14 | // |
15 | // There are many optimizations we can perform in the domain of SLSR. This file |
16 | // for now contains only an initial step. Specifically, we look for strength |
17 | // reduction candidates in the following forms: |
18 | // |
19 | // Form 1: B + i * S |
20 | // Form 2: (B + i) * S |
21 | // Form 3: &B[i * S] |
22 | // |
23 | // where S is an integer variable, and i is a constant integer. If we found two |
24 | // candidates S1 and S2 in the same form and S1 dominates S2, we may rewrite S2 |
25 | // in a simpler way with respect to S1. For example, |
26 | // |
27 | // S1: X = B + i * S |
28 | // S2: Y = B + i' * S => X + (i' - i) * S |
29 | // |
30 | // S1: X = (B + i) * S |
31 | // S2: Y = (B + i') * S => X + (i' - i) * S |
32 | // |
33 | // S1: X = &B[i * S] |
34 | // S2: Y = &B[i' * S] => &X[(i' - i) * S] |
35 | // |
36 | // Note: (i' - i) * S is folded to the extent possible. |
37 | // |
38 | // This rewriting is in general a good idea. The code patterns we focus on |
39 | // usually come from loop unrolling, so (i' - i) * S is likely the same |
40 | // across iterations and can be reused. When that happens, the optimized form |
41 | // takes only one add starting from the second iteration. |
42 | // |
43 | // When such rewriting is possible, we call S1 a "basis" of S2. When S2 has |
44 | // multiple bases, we choose to rewrite S2 with respect to its "immediate" |
45 | // basis, the basis that is the closest ancestor in the dominator tree. |
46 | // |
47 | // TODO: |
48 | // |
49 | // - Floating point arithmetics when fast math is enabled. |
50 | // |
51 | // - SLSR may decrease ILP at the architecture level. Targets that are very |
52 | // sensitive to ILP may want to disable it. Having SLSR to consider ILP is |
53 | // left as future work. |
54 | // |
55 | // - When (i' - i) is constant but i and i' are not, we could still perform |
56 | // SLSR. |
57 | |
58 | #include "llvm/Transforms/Scalar/StraightLineStrengthReduce.h" |
59 | #include "llvm/ADT/APInt.h" |
60 | #include "llvm/ADT/DepthFirstIterator.h" |
61 | #include "llvm/ADT/SmallVector.h" |
62 | #include "llvm/Analysis/ScalarEvolution.h" |
63 | #include "llvm/Analysis/TargetTransformInfo.h" |
64 | #include "llvm/Analysis/ValueTracking.h" |
65 | #include "llvm/IR/Constants.h" |
66 | #include "llvm/IR/DataLayout.h" |
67 | #include "llvm/IR/DerivedTypes.h" |
68 | #include "llvm/IR/Dominators.h" |
69 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
70 | #include "llvm/IR/IRBuilder.h" |
71 | #include "llvm/IR/Instruction.h" |
72 | #include "llvm/IR/Instructions.h" |
73 | #include "llvm/IR/Module.h" |
74 | #include "llvm/IR/Operator.h" |
75 | #include "llvm/IR/PatternMatch.h" |
76 | #include "llvm/IR/Type.h" |
77 | #include "llvm/IR/Value.h" |
78 | #include "llvm/InitializePasses.h" |
79 | #include "llvm/Pass.h" |
80 | #include "llvm/Support/Casting.h" |
81 | #include "llvm/Support/ErrorHandling.h" |
82 | #include "llvm/Transforms/Scalar.h" |
83 | #include "llvm/Transforms/Utils/Local.h" |
84 | #include <cassert> |
85 | #include <cstdint> |
86 | #include <limits> |
87 | #include <list> |
88 | #include <vector> |
89 | |
90 | using namespace llvm; |
91 | using namespace PatternMatch; |
92 | |
93 | static const unsigned UnknownAddressSpace = |
94 | std::numeric_limits<unsigned>::max(); |
95 | |
96 | namespace { |
97 | |
98 | class StraightLineStrengthReduceLegacyPass : public FunctionPass { |
99 | const DataLayout *DL = nullptr; |
100 | |
101 | public: |
102 | static char ID; |
103 | |
104 | StraightLineStrengthReduceLegacyPass() : FunctionPass(ID) { |
105 | initializeStraightLineStrengthReduceLegacyPassPass( |
106 | *PassRegistry::getPassRegistry()); |
107 | } |
108 | |
109 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
110 | AU.addRequired<DominatorTreeWrapperPass>(); |
111 | AU.addRequired<ScalarEvolutionWrapperPass>(); |
112 | AU.addRequired<TargetTransformInfoWrapperPass>(); |
113 | // We do not modify the shape of the CFG. |
114 | AU.setPreservesCFG(); |
115 | } |
116 | |
117 | bool doInitialization(Module &M) override { |
118 | DL = &M.getDataLayout(); |
119 | return false; |
120 | } |
121 | |
122 | bool runOnFunction(Function &F) override; |
123 | }; |
124 | |
125 | class StraightLineStrengthReduce { |
126 | public: |
127 | StraightLineStrengthReduce(const DataLayout *DL, DominatorTree *DT, |
128 | ScalarEvolution *SE, TargetTransformInfo *TTI) |
129 | : DL(DL), DT(DT), SE(SE), TTI(TTI) {} |
130 | |
131 | // SLSR candidate. Such a candidate must be in one of the forms described in |
132 | // the header comments. |
133 | struct Candidate { |
134 | enum Kind { |
135 | Invalid, // reserved for the default constructor |
136 | Add, // B + i * S |
137 | Mul, // (B + i) * S |
138 | GEP, // &B[..][i * S][..] |
139 | }; |
140 | |
141 | Candidate() = default; |
142 | Candidate(Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, |
143 | Instruction *I) |
144 | : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I) {} |
145 | |
146 | Kind CandidateKind = Invalid; |
147 | |
148 | const SCEV *Base = nullptr; |
149 | |
150 | // Note that Index and Stride of a GEP candidate do not necessarily have the |
151 | // same integer type. In that case, during rewriting, Stride will be |
152 | // sign-extended or truncated to Index's type. |
153 | ConstantInt *Index = nullptr; |
154 | |
155 | Value *Stride = nullptr; |
156 | |
157 | // The instruction this candidate corresponds to. It helps us to rewrite a |
158 | // candidate with respect to its immediate basis. Note that one instruction |
159 | // can correspond to multiple candidates depending on how you associate the |
160 | // expression. For instance, |
161 | // |
162 | // (a + 1) * (b + 2) |
163 | // |
164 | // can be treated as |
165 | // |
166 | // <Base: a, Index: 1, Stride: b + 2> |
167 | // |
168 | // or |
169 | // |
170 | // <Base: b, Index: 2, Stride: a + 1> |
171 | Instruction *Ins = nullptr; |
172 | |
173 | // Points to the immediate basis of this candidate, or nullptr if we cannot |
174 | // find any basis for this candidate. |
175 | Candidate *Basis = nullptr; |
176 | }; |
177 | |
178 | bool runOnFunction(Function &F); |
179 | |
180 | private: |
181 | // Returns true if Basis is a basis for C, i.e., Basis dominates C and they |
182 | // share the same base and stride. |
183 | bool isBasisFor(const Candidate &Basis, const Candidate &C); |
184 | |
185 | // Returns whether the candidate can be folded into an addressing mode. |
186 | bool isFoldable(const Candidate &C, TargetTransformInfo *TTI, |
187 | const DataLayout *DL); |
188 | |
189 | // Returns true if C is already in a simplest form and not worth being |
190 | // rewritten. |
191 | bool isSimplestForm(const Candidate &C); |
192 | |
193 | // Checks whether I is in a candidate form. If so, adds all the matching forms |
194 | // to Candidates, and tries to find the immediate basis for each of them. |
195 | void allocateCandidatesAndFindBasis(Instruction *I); |
196 | |
197 | // Allocate candidates and find bases for Add instructions. |
198 | void allocateCandidatesAndFindBasisForAdd(Instruction *I); |
199 | |
200 | // Given I = LHS + RHS, factors RHS into i * S and makes (LHS + i * S) a |
201 | // candidate. |
202 | void allocateCandidatesAndFindBasisForAdd(Value *LHS, Value *RHS, |
203 | Instruction *I); |
204 | // Allocate candidates and find bases for Mul instructions. |
205 | void allocateCandidatesAndFindBasisForMul(Instruction *I); |
206 | |
207 | // Splits LHS into Base + Index and, if succeeds, calls |
208 | // allocateCandidatesAndFindBasis. |
209 | void allocateCandidatesAndFindBasisForMul(Value *LHS, Value *RHS, |
210 | Instruction *I); |
211 | |
212 | // Allocate candidates and find bases for GetElementPtr instructions. |
213 | void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP); |
214 | |
215 | // A helper function that scales Idx with ElementSize before invoking |
216 | // allocateCandidatesAndFindBasis. |
217 | void allocateCandidatesAndFindBasisForGEP(const SCEV *B, ConstantInt *Idx, |
218 | Value *S, uint64_t ElementSize, |
219 | Instruction *I); |
220 | |
221 | // Adds the given form <CT, B, Idx, S> to Candidates, and finds its immediate |
222 | // basis. |
223 | void allocateCandidatesAndFindBasis(Candidate::Kind CT, const SCEV *B, |
224 | ConstantInt *Idx, Value *S, |
225 | Instruction *I); |
226 | |
227 | // Rewrites candidate C with respect to Basis. |
228 | void rewriteCandidateWithBasis(const Candidate &C, const Candidate &Basis); |
229 | |
230 | // A helper function that factors ArrayIdx to a product of a stride and a |
231 | // constant index, and invokes allocateCandidatesAndFindBasis with the |
232 | // factorings. |
233 | void factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize, |
234 | GetElementPtrInst *GEP); |
235 | |
236 | // Emit code that computes the "bump" from Basis to C. |
237 | static Value *emitBump(const Candidate &Basis, const Candidate &C, |
238 | IRBuilder<> &Builder, const DataLayout *DL); |
239 | |
240 | const DataLayout *DL = nullptr; |
241 | DominatorTree *DT = nullptr; |
242 | ScalarEvolution *SE; |
243 | TargetTransformInfo *TTI = nullptr; |
244 | std::list<Candidate> Candidates; |
245 | |
246 | // Temporarily holds all instructions that are unlinked (but not deleted) by |
247 | // rewriteCandidateWithBasis. These instructions will be actually removed |
248 | // after all rewriting finishes. |
249 | std::vector<Instruction *> UnlinkedInstructions; |
250 | }; |
251 | |
252 | } // end anonymous namespace |
253 | |
254 | char StraightLineStrengthReduceLegacyPass::ID = 0; |
255 | |
256 | INITIALIZE_PASS_BEGIN(StraightLineStrengthReduceLegacyPass, "slsr" , |
257 | "Straight line strength reduction" , false, false) |
258 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
259 | INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) |
260 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) |
261 | INITIALIZE_PASS_END(StraightLineStrengthReduceLegacyPass, "slsr" , |
262 | "Straight line strength reduction" , false, false) |
263 | |
264 | FunctionPass *llvm::createStraightLineStrengthReducePass() { |
265 | return new StraightLineStrengthReduceLegacyPass(); |
266 | } |
267 | |
268 | bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis, |
269 | const Candidate &C) { |
270 | return (Basis.Ins != C.Ins && // skip the same instruction |
271 | // They must have the same type too. Basis.Base == C.Base doesn't |
272 | // guarantee their types are the same (PR23975). |
273 | Basis.Ins->getType() == C.Ins->getType() && |
274 | // Basis must dominate C in order to rewrite C with respect to Basis. |
275 | DT->dominates(A: Basis.Ins->getParent(), B: C.Ins->getParent()) && |
276 | // They share the same base, stride, and candidate kind. |
277 | Basis.Base == C.Base && Basis.Stride == C.Stride && |
278 | Basis.CandidateKind == C.CandidateKind); |
279 | } |
280 | |
281 | static bool isGEPFoldable(GetElementPtrInst *GEP, |
282 | const TargetTransformInfo *TTI) { |
283 | SmallVector<const Value *, 4> Indices(GEP->indices()); |
284 | return TTI->getGEPCost(PointeeType: GEP->getSourceElementType(), Ptr: GEP->getPointerOperand(), |
285 | Operands: Indices) == TargetTransformInfo::TCC_Free; |
286 | } |
287 | |
288 | // Returns whether (Base + Index * Stride) can be folded to an addressing mode. |
289 | static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride, |
290 | TargetTransformInfo *TTI) { |
291 | // Index->getSExtValue() may crash if Index is wider than 64-bit. |
292 | return Index->getBitWidth() <= 64 && |
293 | TTI->isLegalAddressingMode(Ty: Base->getType(), BaseGV: nullptr, BaseOffset: 0, HasBaseReg: true, |
294 | Scale: Index->getSExtValue(), AddrSpace: UnknownAddressSpace); |
295 | } |
296 | |
297 | bool StraightLineStrengthReduce::isFoldable(const Candidate &C, |
298 | TargetTransformInfo *TTI, |
299 | const DataLayout *DL) { |
300 | if (C.CandidateKind == Candidate::Add) |
301 | return isAddFoldable(Base: C.Base, Index: C.Index, Stride: C.Stride, TTI); |
302 | if (C.CandidateKind == Candidate::GEP) |
303 | return isGEPFoldable(GEP: cast<GetElementPtrInst>(Val: C.Ins), TTI); |
304 | return false; |
305 | } |
306 | |
307 | // Returns true if GEP has zero or one non-zero index. |
308 | static bool hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) { |
309 | unsigned NumNonZeroIndices = 0; |
310 | for (Use &Idx : GEP->indices()) { |
311 | ConstantInt *ConstIdx = dyn_cast<ConstantInt>(Val&: Idx); |
312 | if (ConstIdx == nullptr || !ConstIdx->isZero()) |
313 | ++NumNonZeroIndices; |
314 | } |
315 | return NumNonZeroIndices <= 1; |
316 | } |
317 | |
318 | bool StraightLineStrengthReduce::isSimplestForm(const Candidate &C) { |
319 | if (C.CandidateKind == Candidate::Add) { |
320 | // B + 1 * S or B + (-1) * S |
321 | return C.Index->isOne() || C.Index->isMinusOne(); |
322 | } |
323 | if (C.CandidateKind == Candidate::Mul) { |
324 | // (B + 0) * S |
325 | return C.Index->isZero(); |
326 | } |
327 | if (C.CandidateKind == Candidate::GEP) { |
328 | // (char*)B + S or (char*)B - S |
329 | return ((C.Index->isOne() || C.Index->isMinusOne()) && |
330 | hasOnlyOneNonZeroIndex(GEP: cast<GetElementPtrInst>(Val: C.Ins))); |
331 | } |
332 | return false; |
333 | } |
334 | |
335 | // TODO: We currently implement an algorithm whose time complexity is linear in |
336 | // the number of existing candidates. However, we could do better by using |
337 | // ScopedHashTable. Specifically, while traversing the dominator tree, we could |
338 | // maintain all the candidates that dominate the basic block being traversed in |
339 | // a ScopedHashTable. This hash table is indexed by the base and the stride of |
340 | // a candidate. Therefore, finding the immediate basis of a candidate boils down |
341 | // to one hash-table look up. |
342 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( |
343 | Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, |
344 | Instruction *I) { |
345 | Candidate C(CT, B, Idx, S, I); |
346 | // SLSR can complicate an instruction in two cases: |
347 | // |
348 | // 1. If we can fold I into an addressing mode, computing I is likely free or |
349 | // takes only one instruction. |
350 | // |
351 | // 2. I is already in a simplest form. For example, when |
352 | // X = B + 8 * S |
353 | // Y = B + S, |
354 | // rewriting Y to X - 7 * S is probably a bad idea. |
355 | // |
356 | // In the above cases, we still add I to the candidate list so that I can be |
357 | // the basis of other candidates, but we leave I's basis blank so that I |
358 | // won't be rewritten. |
359 | if (!isFoldable(C, TTI, DL) && !isSimplestForm(C)) { |
360 | // Try to compute the immediate basis of C. |
361 | unsigned NumIterations = 0; |
362 | // Limit the scan radius to avoid running in quadratice time. |
363 | static const unsigned MaxNumIterations = 50; |
364 | for (auto Basis = Candidates.rbegin(); |
365 | Basis != Candidates.rend() && NumIterations < MaxNumIterations; |
366 | ++Basis, ++NumIterations) { |
367 | if (isBasisFor(Basis: *Basis, C)) { |
368 | C.Basis = &(*Basis); |
369 | break; |
370 | } |
371 | } |
372 | } |
373 | // Regardless of whether we find a basis for C, we need to push C to the |
374 | // candidate list so that it can be the basis of other candidates. |
375 | Candidates.push_back(x: C); |
376 | } |
377 | |
378 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( |
379 | Instruction *I) { |
380 | switch (I->getOpcode()) { |
381 | case Instruction::Add: |
382 | allocateCandidatesAndFindBasisForAdd(I); |
383 | break; |
384 | case Instruction::Mul: |
385 | allocateCandidatesAndFindBasisForMul(I); |
386 | break; |
387 | case Instruction::GetElementPtr: |
388 | allocateCandidatesAndFindBasisForGEP(GEP: cast<GetElementPtrInst>(Val: I)); |
389 | break; |
390 | } |
391 | } |
392 | |
393 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd( |
394 | Instruction *I) { |
395 | // Try matching B + i * S. |
396 | if (!isa<IntegerType>(Val: I->getType())) |
397 | return; |
398 | |
399 | assert(I->getNumOperands() == 2 && "isn't I an add?" ); |
400 | Value *LHS = I->getOperand(i: 0), *RHS = I->getOperand(i: 1); |
401 | allocateCandidatesAndFindBasisForAdd(LHS, RHS, I); |
402 | if (LHS != RHS) |
403 | allocateCandidatesAndFindBasisForAdd(LHS: RHS, RHS: LHS, I); |
404 | } |
405 | |
406 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd( |
407 | Value *LHS, Value *RHS, Instruction *I) { |
408 | Value *S = nullptr; |
409 | ConstantInt *Idx = nullptr; |
410 | if (match(V: RHS, P: m_Mul(L: m_Value(V&: S), R: m_ConstantInt(CI&: Idx)))) { |
411 | // I = LHS + RHS = LHS + Idx * S |
412 | allocateCandidatesAndFindBasis(CT: Candidate::Add, B: SE->getSCEV(V: LHS), Idx, S, I); |
413 | } else if (match(V: RHS, P: m_Shl(L: m_Value(V&: S), R: m_ConstantInt(CI&: Idx)))) { |
414 | // I = LHS + RHS = LHS + (S << Idx) = LHS + S * (1 << Idx) |
415 | APInt One(Idx->getBitWidth(), 1); |
416 | Idx = ConstantInt::get(Context&: Idx->getContext(), V: One << Idx->getValue()); |
417 | allocateCandidatesAndFindBasis(CT: Candidate::Add, B: SE->getSCEV(V: LHS), Idx, S, I); |
418 | } else { |
419 | // At least, I = LHS + 1 * RHS |
420 | ConstantInt *One = ConstantInt::get(Ty: cast<IntegerType>(Val: I->getType()), V: 1); |
421 | allocateCandidatesAndFindBasis(CT: Candidate::Add, B: SE->getSCEV(V: LHS), Idx: One, S: RHS, |
422 | I); |
423 | } |
424 | } |
425 | |
426 | // Returns true if A matches B + C where C is constant. |
427 | static bool matchesAdd(Value *A, Value *&B, ConstantInt *&C) { |
428 | return match(V: A, P: m_c_Add(L: m_Value(V&: B), R: m_ConstantInt(CI&: C))); |
429 | } |
430 | |
431 | // Returns true if A matches B | C where C is constant. |
432 | static bool matchesOr(Value *A, Value *&B, ConstantInt *&C) { |
433 | return match(V: A, P: m_c_Or(L: m_Value(V&: B), R: m_ConstantInt(CI&: C))); |
434 | } |
435 | |
436 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul( |
437 | Value *LHS, Value *RHS, Instruction *I) { |
438 | Value *B = nullptr; |
439 | ConstantInt *Idx = nullptr; |
440 | if (matchesAdd(A: LHS, B, C&: Idx)) { |
441 | // If LHS is in the form of "Base + Index", then I is in the form of |
442 | // "(Base + Index) * RHS". |
443 | allocateCandidatesAndFindBasis(CT: Candidate::Mul, B: SE->getSCEV(V: B), Idx, S: RHS, I); |
444 | } else if (matchesOr(A: LHS, B, C&: Idx) && haveNoCommonBitsSet(LHSCache: B, RHSCache: Idx, SQ: *DL)) { |
445 | // If LHS is in the form of "Base | Index" and Base and Index have no common |
446 | // bits set, then |
447 | // Base | Index = Base + Index |
448 | // and I is thus in the form of "(Base + Index) * RHS". |
449 | allocateCandidatesAndFindBasis(CT: Candidate::Mul, B: SE->getSCEV(V: B), Idx, S: RHS, I); |
450 | } else { |
451 | // Otherwise, at least try the form (LHS + 0) * RHS. |
452 | ConstantInt *Zero = ConstantInt::get(Ty: cast<IntegerType>(Val: I->getType()), V: 0); |
453 | allocateCandidatesAndFindBasis(CT: Candidate::Mul, B: SE->getSCEV(V: LHS), Idx: Zero, S: RHS, |
454 | I); |
455 | } |
456 | } |
457 | |
458 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul( |
459 | Instruction *I) { |
460 | // Try matching (B + i) * S. |
461 | // TODO: we could extend SLSR to float and vector types. |
462 | if (!isa<IntegerType>(Val: I->getType())) |
463 | return; |
464 | |
465 | assert(I->getNumOperands() == 2 && "isn't I a mul?" ); |
466 | Value *LHS = I->getOperand(i: 0), *RHS = I->getOperand(i: 1); |
467 | allocateCandidatesAndFindBasisForMul(LHS, RHS, I); |
468 | if (LHS != RHS) { |
469 | // Symmetrically, try to split RHS to Base + Index. |
470 | allocateCandidatesAndFindBasisForMul(LHS: RHS, RHS: LHS, I); |
471 | } |
472 | } |
473 | |
474 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( |
475 | const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize, |
476 | Instruction *I) { |
477 | // I = B + sext(Idx *nsw S) * ElementSize |
478 | // = B + (sext(Idx) * sext(S)) * ElementSize |
479 | // = B + (sext(Idx) * ElementSize) * sext(S) |
480 | // Casting to IntegerType is safe because we skipped vector GEPs. |
481 | IntegerType *PtrIdxTy = cast<IntegerType>(Val: DL->getIndexType(PtrTy: I->getType())); |
482 | ConstantInt *ScaledIdx = ConstantInt::get( |
483 | Ty: PtrIdxTy, V: Idx->getSExtValue() * (int64_t)ElementSize, IsSigned: true); |
484 | allocateCandidatesAndFindBasis(CT: Candidate::GEP, B, Idx: ScaledIdx, S, I); |
485 | } |
486 | |
487 | void StraightLineStrengthReduce::factorArrayIndex(Value *ArrayIdx, |
488 | const SCEV *Base, |
489 | uint64_t ElementSize, |
490 | GetElementPtrInst *GEP) { |
491 | // At least, ArrayIdx = ArrayIdx *nsw 1. |
492 | allocateCandidatesAndFindBasisForGEP( |
493 | B: Base, Idx: ConstantInt::get(Ty: cast<IntegerType>(Val: ArrayIdx->getType()), V: 1), |
494 | S: ArrayIdx, ElementSize, I: GEP); |
495 | Value *LHS = nullptr; |
496 | ConstantInt *RHS = nullptr; |
497 | // One alternative is matching the SCEV of ArrayIdx instead of ArrayIdx |
498 | // itself. This would allow us to handle the shl case for free. However, |
499 | // matching SCEVs has two issues: |
500 | // |
501 | // 1. this would complicate rewriting because the rewriting procedure |
502 | // would have to translate SCEVs back to IR instructions. This translation |
503 | // is difficult when LHS is further evaluated to a composite SCEV. |
504 | // |
505 | // 2. ScalarEvolution is designed to be control-flow oblivious. It tends |
506 | // to strip nsw/nuw flags which are critical for SLSR to trace into |
507 | // sext'ed multiplication. |
508 | if (match(V: ArrayIdx, P: m_NSWMul(L: m_Value(V&: LHS), R: m_ConstantInt(CI&: RHS)))) { |
509 | // SLSR is currently unsafe if i * S may overflow. |
510 | // GEP = Base + sext(LHS *nsw RHS) * ElementSize |
511 | allocateCandidatesAndFindBasisForGEP(B: Base, Idx: RHS, S: LHS, ElementSize, I: GEP); |
512 | } else if (match(V: ArrayIdx, P: m_NSWShl(L: m_Value(V&: LHS), R: m_ConstantInt(CI&: RHS)))) { |
513 | // GEP = Base + sext(LHS <<nsw RHS) * ElementSize |
514 | // = Base + sext(LHS *nsw (1 << RHS)) * ElementSize |
515 | APInt One(RHS->getBitWidth(), 1); |
516 | ConstantInt *PowerOf2 = |
517 | ConstantInt::get(Context&: RHS->getContext(), V: One << RHS->getValue()); |
518 | allocateCandidatesAndFindBasisForGEP(B: Base, Idx: PowerOf2, S: LHS, ElementSize, I: GEP); |
519 | } |
520 | } |
521 | |
522 | void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( |
523 | GetElementPtrInst *GEP) { |
524 | // TODO: handle vector GEPs |
525 | if (GEP->getType()->isVectorTy()) |
526 | return; |
527 | |
528 | SmallVector<const SCEV *, 4> IndexExprs; |
529 | for (Use &Idx : GEP->indices()) |
530 | IndexExprs.push_back(Elt: SE->getSCEV(V: Idx)); |
531 | |
532 | gep_type_iterator GTI = gep_type_begin(GEP); |
533 | for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { |
534 | if (GTI.isStruct()) |
535 | continue; |
536 | |
537 | const SCEV *OrigIndexExpr = IndexExprs[I - 1]; |
538 | IndexExprs[I - 1] = SE->getZero(Ty: OrigIndexExpr->getType()); |
539 | |
540 | // The base of this candidate is GEP's base plus the offsets of all |
541 | // indices except this current one. |
542 | const SCEV *BaseExpr = SE->getGEPExpr(GEP: cast<GEPOperator>(Val: GEP), IndexExprs); |
543 | Value *ArrayIdx = GEP->getOperand(i_nocapture: I); |
544 | uint64_t ElementSize = GTI.getSequentialElementStride(DL: *DL); |
545 | if (ArrayIdx->getType()->getIntegerBitWidth() <= |
546 | DL->getIndexSizeInBits(AS: GEP->getAddressSpace())) { |
547 | // Skip factoring if ArrayIdx is wider than the index size, because |
548 | // ArrayIdx is implicitly truncated to the index size. |
549 | factorArrayIndex(ArrayIdx, Base: BaseExpr, ElementSize, GEP); |
550 | } |
551 | // When ArrayIdx is the sext of a value, we try to factor that value as |
552 | // well. Handling this case is important because array indices are |
553 | // typically sign-extended to the pointer index size. |
554 | Value *TruncatedArrayIdx = nullptr; |
555 | if (match(V: ArrayIdx, P: m_SExt(Op: m_Value(V&: TruncatedArrayIdx))) && |
556 | TruncatedArrayIdx->getType()->getIntegerBitWidth() <= |
557 | DL->getIndexSizeInBits(AS: GEP->getAddressSpace())) { |
558 | // Skip factoring if TruncatedArrayIdx is wider than the pointer size, |
559 | // because TruncatedArrayIdx is implicitly truncated to the pointer size. |
560 | factorArrayIndex(ArrayIdx: TruncatedArrayIdx, Base: BaseExpr, ElementSize, GEP); |
561 | } |
562 | |
563 | IndexExprs[I - 1] = OrigIndexExpr; |
564 | } |
565 | } |
566 | |
567 | // A helper function that unifies the bitwidth of A and B. |
568 | static void unifyBitWidth(APInt &A, APInt &B) { |
569 | if (A.getBitWidth() < B.getBitWidth()) |
570 | A = A.sext(width: B.getBitWidth()); |
571 | else if (A.getBitWidth() > B.getBitWidth()) |
572 | B = B.sext(width: A.getBitWidth()); |
573 | } |
574 | |
575 | Value *StraightLineStrengthReduce::emitBump(const Candidate &Basis, |
576 | const Candidate &C, |
577 | IRBuilder<> &Builder, |
578 | const DataLayout *DL) { |
579 | APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue(); |
580 | unifyBitWidth(A&: Idx, B&: BasisIdx); |
581 | APInt IndexOffset = Idx - BasisIdx; |
582 | |
583 | // Compute Bump = C - Basis = (i' - i) * S. |
584 | // Common case 1: if (i' - i) is 1, Bump = S. |
585 | if (IndexOffset == 1) |
586 | return C.Stride; |
587 | // Common case 2: if (i' - i) is -1, Bump = -S. |
588 | if (IndexOffset.isAllOnes()) |
589 | return Builder.CreateNeg(V: C.Stride); |
590 | |
591 | // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may |
592 | // have different bit widths. |
593 | IntegerType *DeltaType = |
594 | IntegerType::get(C&: Basis.Ins->getContext(), NumBits: IndexOffset.getBitWidth()); |
595 | Value *ExtendedStride = Builder.CreateSExtOrTrunc(V: C.Stride, DestTy: DeltaType); |
596 | if (IndexOffset.isPowerOf2()) { |
597 | // If (i' - i) is a power of 2, Bump = sext/trunc(S) << log(i' - i). |
598 | ConstantInt *Exponent = ConstantInt::get(Ty: DeltaType, V: IndexOffset.logBase2()); |
599 | return Builder.CreateShl(LHS: ExtendedStride, RHS: Exponent); |
600 | } |
601 | if (IndexOffset.isNegatedPowerOf2()) { |
602 | // If (i - i') is a power of 2, Bump = -sext/trunc(S) << log(i' - i). |
603 | ConstantInt *Exponent = |
604 | ConstantInt::get(Ty: DeltaType, V: (-IndexOffset).logBase2()); |
605 | return Builder.CreateNeg(V: Builder.CreateShl(LHS: ExtendedStride, RHS: Exponent)); |
606 | } |
607 | Constant *Delta = ConstantInt::get(Ty: DeltaType, V: IndexOffset); |
608 | return Builder.CreateMul(LHS: ExtendedStride, RHS: Delta); |
609 | } |
610 | |
611 | void StraightLineStrengthReduce::rewriteCandidateWithBasis( |
612 | const Candidate &C, const Candidate &Basis) { |
613 | assert(C.CandidateKind == Basis.CandidateKind && C.Base == Basis.Base && |
614 | C.Stride == Basis.Stride); |
615 | // We run rewriteCandidateWithBasis on all candidates in a post-order, so the |
616 | // basis of a candidate cannot be unlinked before the candidate. |
617 | assert(Basis.Ins->getParent() != nullptr && "the basis is unlinked" ); |
618 | |
619 | // An instruction can correspond to multiple candidates. Therefore, instead of |
620 | // simply deleting an instruction when we rewrite it, we mark its parent as |
621 | // nullptr (i.e. unlink it) so that we can skip the candidates whose |
622 | // instruction is already rewritten. |
623 | if (!C.Ins->getParent()) |
624 | return; |
625 | |
626 | IRBuilder<> Builder(C.Ins); |
627 | Value *Bump = emitBump(Basis, C, Builder, DL); |
628 | Value *Reduced = nullptr; // equivalent to but weaker than C.Ins |
629 | switch (C.CandidateKind) { |
630 | case Candidate::Add: |
631 | case Candidate::Mul: { |
632 | // C = Basis + Bump |
633 | Value *NegBump; |
634 | if (match(V: Bump, P: m_Neg(V: m_Value(V&: NegBump)))) { |
635 | // If Bump is a neg instruction, emit C = Basis - (-Bump). |
636 | Reduced = Builder.CreateSub(LHS: Basis.Ins, RHS: NegBump); |
637 | // We only use the negative argument of Bump, and Bump itself may be |
638 | // trivially dead. |
639 | RecursivelyDeleteTriviallyDeadInstructions(V: Bump); |
640 | } else { |
641 | // It's tempting to preserve nsw on Bump and/or Reduced. However, it's |
642 | // usually unsound, e.g., |
643 | // |
644 | // X = (-2 +nsw 1) *nsw INT_MAX |
645 | // Y = (-2 +nsw 3) *nsw INT_MAX |
646 | // => |
647 | // Y = X + 2 * INT_MAX |
648 | // |
649 | // Neither + and * in the resultant expression are nsw. |
650 | Reduced = Builder.CreateAdd(LHS: Basis.Ins, RHS: Bump); |
651 | } |
652 | break; |
653 | } |
654 | case Candidate::GEP: { |
655 | bool InBounds = cast<GetElementPtrInst>(Val: C.Ins)->isInBounds(); |
656 | // C = (char *)Basis + Bump |
657 | Reduced = Builder.CreatePtrAdd(Ptr: Basis.Ins, Offset: Bump, Name: "" , NW: InBounds); |
658 | break; |
659 | } |
660 | default: |
661 | llvm_unreachable("C.CandidateKind is invalid" ); |
662 | }; |
663 | Reduced->takeName(V: C.Ins); |
664 | C.Ins->replaceAllUsesWith(V: Reduced); |
665 | // Unlink C.Ins so that we can skip other candidates also corresponding to |
666 | // C.Ins. The actual deletion is postponed to the end of runOnFunction. |
667 | C.Ins->removeFromParent(); |
668 | UnlinkedInstructions.push_back(x: C.Ins); |
669 | } |
670 | |
671 | bool StraightLineStrengthReduceLegacyPass::runOnFunction(Function &F) { |
672 | if (skipFunction(F)) |
673 | return false; |
674 | |
675 | auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); |
676 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
677 | auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); |
678 | return StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F); |
679 | } |
680 | |
681 | bool StraightLineStrengthReduce::runOnFunction(Function &F) { |
682 | // Traverse the dominator tree in the depth-first order. This order makes sure |
683 | // all bases of a candidate are in Candidates when we process it. |
684 | for (const auto Node : depth_first(G: DT)) |
685 | for (auto &I : *(Node->getBlock())) |
686 | allocateCandidatesAndFindBasis(I: &I); |
687 | |
688 | // Rewrite candidates in the reverse depth-first order. This order makes sure |
689 | // a candidate being rewritten is not a basis for any other candidate. |
690 | while (!Candidates.empty()) { |
691 | const Candidate &C = Candidates.back(); |
692 | if (C.Basis != nullptr) { |
693 | rewriteCandidateWithBasis(C, Basis: *C.Basis); |
694 | } |
695 | Candidates.pop_back(); |
696 | } |
697 | |
698 | // Delete all unlink instructions. |
699 | for (auto *UnlinkedInst : UnlinkedInstructions) { |
700 | for (unsigned I = 0, E = UnlinkedInst->getNumOperands(); I != E; ++I) { |
701 | Value *Op = UnlinkedInst->getOperand(i: I); |
702 | UnlinkedInst->setOperand(i: I, Val: nullptr); |
703 | RecursivelyDeleteTriviallyDeadInstructions(V: Op); |
704 | } |
705 | UnlinkedInst->deleteValue(); |
706 | } |
707 | bool Ret = !UnlinkedInstructions.empty(); |
708 | UnlinkedInstructions.clear(); |
709 | return Ret; |
710 | } |
711 | |
712 | namespace llvm { |
713 | |
714 | PreservedAnalyses |
715 | StraightLineStrengthReducePass::run(Function &F, FunctionAnalysisManager &AM) { |
716 | const DataLayout *DL = &F.getDataLayout(); |
717 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F); |
718 | auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(IR&: F); |
719 | auto *TTI = &AM.getResult<TargetIRAnalysis>(IR&: F); |
720 | |
721 | if (!StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F)) |
722 | return PreservedAnalyses::all(); |
723 | |
724 | PreservedAnalyses PA; |
725 | PA.preserveSet<CFGAnalyses>(); |
726 | PA.preserve<DominatorTreeAnalysis>(); |
727 | PA.preserve<ScalarEvolutionAnalysis>(); |
728 | PA.preserve<TargetIRAnalysis>(); |
729 | return PA; |
730 | } |
731 | |
732 | } // namespace llvm |
733 | |