1 | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass munges the code in the input function to better prepare it for |
10 | // SelectionDAG-based code generation. This works around limitations in it's |
11 | // basic-block-at-a-time approach. It should eventually be removed. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "llvm/CodeGen/CodeGenPrepare.h" |
16 | #include "llvm/ADT/APInt.h" |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/ADT/DenseMap.h" |
19 | #include "llvm/ADT/MapVector.h" |
20 | #include "llvm/ADT/PointerIntPair.h" |
21 | #include "llvm/ADT/STLExtras.h" |
22 | #include "llvm/ADT/SmallPtrSet.h" |
23 | #include "llvm/ADT/SmallVector.h" |
24 | #include "llvm/ADT/Statistic.h" |
25 | #include "llvm/Analysis/BlockFrequencyInfo.h" |
26 | #include "llvm/Analysis/BranchProbabilityInfo.h" |
27 | #include "llvm/Analysis/InstructionSimplify.h" |
28 | #include "llvm/Analysis/LoopInfo.h" |
29 | #include "llvm/Analysis/ProfileSummaryInfo.h" |
30 | #include "llvm/Analysis/TargetLibraryInfo.h" |
31 | #include "llvm/Analysis/TargetTransformInfo.h" |
32 | #include "llvm/Analysis/ValueTracking.h" |
33 | #include "llvm/Analysis/VectorUtils.h" |
34 | #include "llvm/CodeGen/Analysis.h" |
35 | #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h" |
36 | #include "llvm/CodeGen/ISDOpcodes.h" |
37 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
38 | #include "llvm/CodeGen/TargetLowering.h" |
39 | #include "llvm/CodeGen/TargetPassConfig.h" |
40 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
41 | #include "llvm/CodeGen/ValueTypes.h" |
42 | #include "llvm/CodeGenTypes/MachineValueType.h" |
43 | #include "llvm/Config/llvm-config.h" |
44 | #include "llvm/IR/Argument.h" |
45 | #include "llvm/IR/Attributes.h" |
46 | #include "llvm/IR/BasicBlock.h" |
47 | #include "llvm/IR/Constant.h" |
48 | #include "llvm/IR/Constants.h" |
49 | #include "llvm/IR/DataLayout.h" |
50 | #include "llvm/IR/DebugInfo.h" |
51 | #include "llvm/IR/DerivedTypes.h" |
52 | #include "llvm/IR/Dominators.h" |
53 | #include "llvm/IR/Function.h" |
54 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
55 | #include "llvm/IR/GlobalValue.h" |
56 | #include "llvm/IR/GlobalVariable.h" |
57 | #include "llvm/IR/IRBuilder.h" |
58 | #include "llvm/IR/InlineAsm.h" |
59 | #include "llvm/IR/InstrTypes.h" |
60 | #include "llvm/IR/Instruction.h" |
61 | #include "llvm/IR/Instructions.h" |
62 | #include "llvm/IR/IntrinsicInst.h" |
63 | #include "llvm/IR/Intrinsics.h" |
64 | #include "llvm/IR/IntrinsicsAArch64.h" |
65 | #include "llvm/IR/LLVMContext.h" |
66 | #include "llvm/IR/MDBuilder.h" |
67 | #include "llvm/IR/Module.h" |
68 | #include "llvm/IR/Operator.h" |
69 | #include "llvm/IR/PatternMatch.h" |
70 | #include "llvm/IR/ProfDataUtils.h" |
71 | #include "llvm/IR/Statepoint.h" |
72 | #include "llvm/IR/Type.h" |
73 | #include "llvm/IR/Use.h" |
74 | #include "llvm/IR/User.h" |
75 | #include "llvm/IR/Value.h" |
76 | #include "llvm/IR/ValueHandle.h" |
77 | #include "llvm/IR/ValueMap.h" |
78 | #include "llvm/InitializePasses.h" |
79 | #include "llvm/Pass.h" |
80 | #include "llvm/Support/BlockFrequency.h" |
81 | #include "llvm/Support/BranchProbability.h" |
82 | #include "llvm/Support/Casting.h" |
83 | #include "llvm/Support/CommandLine.h" |
84 | #include "llvm/Support/Compiler.h" |
85 | #include "llvm/Support/Debug.h" |
86 | #include "llvm/Support/ErrorHandling.h" |
87 | #include "llvm/Support/MathExtras.h" |
88 | #include "llvm/Support/raw_ostream.h" |
89 | #include "llvm/Target/TargetMachine.h" |
90 | #include "llvm/Target/TargetOptions.h" |
91 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
92 | #include "llvm/Transforms/Utils/BypassSlowDivision.h" |
93 | #include "llvm/Transforms/Utils/Local.h" |
94 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" |
95 | #include "llvm/Transforms/Utils/SizeOpts.h" |
96 | #include <algorithm> |
97 | #include <cassert> |
98 | #include <cstdint> |
99 | #include <iterator> |
100 | #include <limits> |
101 | #include <memory> |
102 | #include <optional> |
103 | #include <utility> |
104 | #include <vector> |
105 | |
106 | using namespace llvm; |
107 | using namespace llvm::PatternMatch; |
108 | |
109 | #define DEBUG_TYPE "codegenprepare" |
110 | |
111 | STATISTIC(NumBlocksElim, "Number of blocks eliminated" ); |
112 | STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated" ); |
113 | STATISTIC(NumGEPsElim, "Number of GEPs converted to casts" ); |
114 | STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " |
115 | "sunken Cmps" ); |
116 | STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " |
117 | "of sunken Casts" ); |
118 | STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " |
119 | "computations were sunk" ); |
120 | STATISTIC(NumMemoryInstsPhiCreated, |
121 | "Number of phis created when address " |
122 | "computations were sunk to memory instructions" ); |
123 | STATISTIC(NumMemoryInstsSelectCreated, |
124 | "Number of select created when address " |
125 | "computations were sunk to memory instructions" ); |
126 | STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads" ); |
127 | STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized" ); |
128 | STATISTIC(NumAndsAdded, |
129 | "Number of and mask instructions added to form ext loads" ); |
130 | STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized" ); |
131 | STATISTIC(NumRetsDup, "Number of return instructions duplicated" ); |
132 | STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved" ); |
133 | STATISTIC(NumSelectsExpanded, "Number of selects turned into branches" ); |
134 | STATISTIC(, "Number of store(extractelement) exposed" ); |
135 | |
136 | static cl::opt<bool> DisableBranchOpts( |
137 | "disable-cgp-branch-opts" , cl::Hidden, cl::init(Val: false), |
138 | cl::desc("Disable branch optimizations in CodeGenPrepare" )); |
139 | |
140 | static cl::opt<bool> |
141 | DisableGCOpts("disable-cgp-gc-opts" , cl::Hidden, cl::init(Val: false), |
142 | cl::desc("Disable GC optimizations in CodeGenPrepare" )); |
143 | |
144 | static cl::opt<bool> |
145 | DisableSelectToBranch("disable-cgp-select2branch" , cl::Hidden, |
146 | cl::init(Val: false), |
147 | cl::desc("Disable select to branch conversion." )); |
148 | |
149 | static cl::opt<bool> |
150 | AddrSinkUsingGEPs("addr-sink-using-gep" , cl::Hidden, cl::init(Val: true), |
151 | cl::desc("Address sinking in CGP using GEPs." )); |
152 | |
153 | static cl::opt<bool> |
154 | EnableAndCmpSinking("enable-andcmp-sinking" , cl::Hidden, cl::init(Val: true), |
155 | cl::desc("Enable sinkinig and/cmp into branches." )); |
156 | |
157 | static cl::opt<bool> ( |
158 | "disable-cgp-store-extract" , cl::Hidden, cl::init(Val: false), |
159 | cl::desc("Disable store(extract) optimizations in CodeGenPrepare" )); |
160 | |
161 | static cl::opt<bool> ( |
162 | "stress-cgp-store-extract" , cl::Hidden, cl::init(Val: false), |
163 | cl::desc("Stress test store(extract) optimizations in CodeGenPrepare" )); |
164 | |
165 | static cl::opt<bool> DisableExtLdPromotion( |
166 | "disable-cgp-ext-ld-promotion" , cl::Hidden, cl::init(Val: false), |
167 | cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " |
168 | "CodeGenPrepare" )); |
169 | |
170 | static cl::opt<bool> StressExtLdPromotion( |
171 | "stress-cgp-ext-ld-promotion" , cl::Hidden, cl::init(Val: false), |
172 | cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " |
173 | "optimization in CodeGenPrepare" )); |
174 | |
175 | static cl::opt<bool> ( |
176 | "disable-preheader-prot" , cl::Hidden, cl::init(Val: false), |
177 | cl::desc("Disable protection against removing loop preheaders" )); |
178 | |
179 | static cl::opt<bool> ProfileGuidedSectionPrefix( |
180 | "profile-guided-section-prefix" , cl::Hidden, cl::init(Val: true), |
181 | cl::desc("Use profile info to add section prefix for hot/cold functions" )); |
182 | |
183 | static cl::opt<bool> ProfileUnknownInSpecialSection( |
184 | "profile-unknown-in-special-section" , cl::Hidden, |
185 | cl::desc("In profiling mode like sampleFDO, if a function doesn't have " |
186 | "profile, we cannot tell the function is cold for sure because " |
187 | "it may be a function newly added without ever being sampled. " |
188 | "With the flag enabled, compiler can put such profile unknown " |
189 | "functions into a special section, so runtime system can choose " |
190 | "to handle it in a different way than .text section, to save " |
191 | "RAM for example. " )); |
192 | |
193 | static cl::opt<bool> BBSectionsGuidedSectionPrefix( |
194 | "bbsections-guided-section-prefix" , cl::Hidden, cl::init(Val: true), |
195 | cl::desc("Use the basic-block-sections profile to determine the text " |
196 | "section prefix for hot functions. Functions with " |
197 | "basic-block-sections profile will be placed in `.text.hot` " |
198 | "regardless of their FDO profile info. Other functions won't be " |
199 | "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " |
200 | "profiles." )); |
201 | |
202 | static cl::opt<uint64_t> FreqRatioToSkipMerge( |
203 | "cgp-freq-ratio-to-skip-merge" , cl::Hidden, cl::init(Val: 2), |
204 | cl::desc("Skip merging empty blocks if (frequency of empty block) / " |
205 | "(frequency of destination block) is greater than this ratio" )); |
206 | |
207 | static cl::opt<bool> ForceSplitStore( |
208 | "force-split-store" , cl::Hidden, cl::init(Val: false), |
209 | cl::desc("Force store splitting no matter what the target query says." )); |
210 | |
211 | static cl::opt<bool> EnableTypePromotionMerge( |
212 | "cgp-type-promotion-merge" , cl::Hidden, |
213 | cl::desc("Enable merging of redundant sexts when one is dominating" |
214 | " the other." ), |
215 | cl::init(Val: true)); |
216 | |
217 | static cl::opt<bool> DisableComplexAddrModes( |
218 | "disable-complex-addr-modes" , cl::Hidden, cl::init(Val: false), |
219 | cl::desc("Disables combining addressing modes with different parts " |
220 | "in optimizeMemoryInst." )); |
221 | |
222 | static cl::opt<bool> |
223 | AddrSinkNewPhis("addr-sink-new-phis" , cl::Hidden, cl::init(Val: false), |
224 | cl::desc("Allow creation of Phis in Address sinking." )); |
225 | |
226 | static cl::opt<bool> AddrSinkNewSelects( |
227 | "addr-sink-new-select" , cl::Hidden, cl::init(Val: true), |
228 | cl::desc("Allow creation of selects in Address sinking." )); |
229 | |
230 | static cl::opt<bool> AddrSinkCombineBaseReg( |
231 | "addr-sink-combine-base-reg" , cl::Hidden, cl::init(Val: true), |
232 | cl::desc("Allow combining of BaseReg field in Address sinking." )); |
233 | |
234 | static cl::opt<bool> AddrSinkCombineBaseGV( |
235 | "addr-sink-combine-base-gv" , cl::Hidden, cl::init(Val: true), |
236 | cl::desc("Allow combining of BaseGV field in Address sinking." )); |
237 | |
238 | static cl::opt<bool> AddrSinkCombineBaseOffs( |
239 | "addr-sink-combine-base-offs" , cl::Hidden, cl::init(Val: true), |
240 | cl::desc("Allow combining of BaseOffs field in Address sinking." )); |
241 | |
242 | static cl::opt<bool> AddrSinkCombineScaledReg( |
243 | "addr-sink-combine-scaled-reg" , cl::Hidden, cl::init(Val: true), |
244 | cl::desc("Allow combining of ScaledReg field in Address sinking." )); |
245 | |
246 | static cl::opt<bool> |
247 | EnableGEPOffsetSplit("cgp-split-large-offset-gep" , cl::Hidden, |
248 | cl::init(Val: true), |
249 | cl::desc("Enable splitting large offset of GEP." )); |
250 | |
251 | static cl::opt<bool> EnableICMP_EQToICMP_ST( |
252 | "cgp-icmp-eq2icmp-st" , cl::Hidden, cl::init(Val: false), |
253 | cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion." )); |
254 | |
255 | static cl::opt<bool> |
256 | VerifyBFIUpdates("cgp-verify-bfi-updates" , cl::Hidden, cl::init(Val: false), |
257 | cl::desc("Enable BFI update verification for " |
258 | "CodeGenPrepare." )); |
259 | |
260 | static cl::opt<bool> |
261 | OptimizePhiTypes("cgp-optimize-phi-types" , cl::Hidden, cl::init(Val: true), |
262 | cl::desc("Enable converting phi types in CodeGenPrepare" )); |
263 | |
264 | static cl::opt<unsigned> |
265 | HugeFuncThresholdInCGPP("cgpp-huge-func" , cl::init(Val: 10000), cl::Hidden, |
266 | cl::desc("Least BB number of huge function." )); |
267 | |
268 | static cl::opt<unsigned> |
269 | MaxAddressUsersToScan("cgp-max-address-users-to-scan" , cl::init(Val: 100), |
270 | cl::Hidden, |
271 | cl::desc("Max number of address users to look at" )); |
272 | |
273 | static cl::opt<bool> |
274 | DisableDeletePHIs("disable-cgp-delete-phis" , cl::Hidden, cl::init(Val: false), |
275 | cl::desc("Disable elimination of dead PHI nodes." )); |
276 | |
277 | namespace { |
278 | |
279 | enum ExtType { |
280 | ZeroExtension, // Zero extension has been seen. |
281 | SignExtension, // Sign extension has been seen. |
282 | BothExtension // This extension type is used if we saw sext after |
283 | // ZeroExtension had been set, or if we saw zext after |
284 | // SignExtension had been set. It makes the type |
285 | // information of a promoted instruction invalid. |
286 | }; |
287 | |
288 | enum ModifyDT { |
289 | NotModifyDT, // Not Modify any DT. |
290 | ModifyBBDT, // Modify the Basic Block Dominator Tree. |
291 | ModifyInstDT // Modify the Instruction Dominator in a Basic Block, |
292 | // This usually means we move/delete/insert instruction |
293 | // in a Basic Block. So we should re-iterate instructions |
294 | // in such Basic Block. |
295 | }; |
296 | |
297 | using SetOfInstrs = SmallPtrSet<Instruction *, 16>; |
298 | using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; |
299 | using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; |
300 | using SExts = SmallVector<Instruction *, 16>; |
301 | using ValueToSExts = MapVector<Value *, SExts>; |
302 | |
303 | class TypePromotionTransaction; |
304 | |
305 | class CodeGenPrepare { |
306 | friend class CodeGenPrepareLegacyPass; |
307 | const TargetMachine *TM = nullptr; |
308 | const TargetSubtargetInfo *SubtargetInfo = nullptr; |
309 | const TargetLowering *TLI = nullptr; |
310 | const TargetRegisterInfo *TRI = nullptr; |
311 | const TargetTransformInfo *TTI = nullptr; |
312 | const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr; |
313 | const TargetLibraryInfo *TLInfo = nullptr; |
314 | LoopInfo *LI = nullptr; |
315 | std::unique_ptr<BlockFrequencyInfo> BFI; |
316 | std::unique_ptr<BranchProbabilityInfo> BPI; |
317 | ProfileSummaryInfo *PSI = nullptr; |
318 | |
319 | /// As we scan instructions optimizing them, this is the next instruction |
320 | /// to optimize. Transforms that can invalidate this should update it. |
321 | BasicBlock::iterator CurInstIterator; |
322 | |
323 | /// Keeps track of non-local addresses that have been sunk into a block. |
324 | /// This allows us to avoid inserting duplicate code for blocks with |
325 | /// multiple load/stores of the same address. The usage of WeakTrackingVH |
326 | /// enables SunkAddrs to be treated as a cache whose entries can be |
327 | /// invalidated if a sunken address computation has been erased. |
328 | ValueMap<Value *, WeakTrackingVH> SunkAddrs; |
329 | |
330 | /// Keeps track of all instructions inserted for the current function. |
331 | SetOfInstrs InsertedInsts; |
332 | |
333 | /// Keeps track of the type of the related instruction before their |
334 | /// promotion for the current function. |
335 | InstrToOrigTy PromotedInsts; |
336 | |
337 | /// Keep track of instructions removed during promotion. |
338 | SetOfInstrs RemovedInsts; |
339 | |
340 | /// Keep track of sext chains based on their initial value. |
341 | DenseMap<Value *, Instruction *> SeenChainsForSExt; |
342 | |
343 | /// Keep track of GEPs accessing the same data structures such as structs or |
344 | /// arrays that are candidates to be split later because of their large |
345 | /// size. |
346 | MapVector<AssertingVH<Value>, |
347 | SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> |
348 | LargeOffsetGEPMap; |
349 | |
350 | /// Keep track of new GEP base after splitting the GEPs having large offset. |
351 | SmallSet<AssertingVH<Value>, 2> NewGEPBases; |
352 | |
353 | /// Map serial numbers to Large offset GEPs. |
354 | DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; |
355 | |
356 | /// Keep track of SExt promoted. |
357 | ValueToSExts ValToSExtendedUses; |
358 | |
359 | /// True if the function has the OptSize attribute. |
360 | bool OptSize; |
361 | |
362 | /// DataLayout for the Function being processed. |
363 | const DataLayout *DL = nullptr; |
364 | |
365 | /// Building the dominator tree can be expensive, so we only build it |
366 | /// lazily and update it when required. |
367 | std::unique_ptr<DominatorTree> DT; |
368 | |
369 | public: |
370 | CodeGenPrepare(){}; |
371 | CodeGenPrepare(const TargetMachine *TM) : TM(TM){}; |
372 | /// If encounter huge function, we need to limit the build time. |
373 | bool IsHugeFunc = false; |
374 | |
375 | /// FreshBBs is like worklist, it collected the updated BBs which need |
376 | /// to be optimized again. |
377 | /// Note: Consider building time in this pass, when a BB updated, we need |
378 | /// to insert such BB into FreshBBs for huge function. |
379 | SmallSet<BasicBlock *, 32> FreshBBs; |
380 | |
381 | void releaseMemory() { |
382 | // Clear per function information. |
383 | InsertedInsts.clear(); |
384 | PromotedInsts.clear(); |
385 | FreshBBs.clear(); |
386 | BPI.reset(); |
387 | BFI.reset(); |
388 | } |
389 | |
390 | bool run(Function &F, FunctionAnalysisManager &AM); |
391 | |
392 | private: |
393 | template <typename F> |
394 | void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { |
395 | // Substituting can cause recursive simplifications, which can invalidate |
396 | // our iterator. Use a WeakTrackingVH to hold onto it in case this |
397 | // happens. |
398 | Value *CurValue = &*CurInstIterator; |
399 | WeakTrackingVH IterHandle(CurValue); |
400 | |
401 | f(); |
402 | |
403 | // If the iterator instruction was recursively deleted, start over at the |
404 | // start of the block. |
405 | if (IterHandle != CurValue) { |
406 | CurInstIterator = BB->begin(); |
407 | SunkAddrs.clear(); |
408 | } |
409 | } |
410 | |
411 | // Get the DominatorTree, building if necessary. |
412 | DominatorTree &getDT(Function &F) { |
413 | if (!DT) |
414 | DT = std::make_unique<DominatorTree>(args&: F); |
415 | return *DT; |
416 | } |
417 | |
418 | void removeAllAssertingVHReferences(Value *V); |
419 | bool eliminateAssumptions(Function &F); |
420 | bool eliminateFallThrough(Function &F, DominatorTree *DT = nullptr); |
421 | bool eliminateMostlyEmptyBlocks(Function &F); |
422 | BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); |
423 | bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; |
424 | void eliminateMostlyEmptyBlock(BasicBlock *BB); |
425 | bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, |
426 | bool ); |
427 | bool makeBitReverse(Instruction &I); |
428 | bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT); |
429 | bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT); |
430 | bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy, |
431 | unsigned AddrSpace); |
432 | bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr); |
433 | bool optimizeInlineAsmInst(CallInst *CS); |
434 | bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT); |
435 | bool optimizeExt(Instruction *&I); |
436 | bool optimizeExtUses(Instruction *I); |
437 | bool optimizeLoadExt(LoadInst *Load); |
438 | bool optimizeShiftInst(BinaryOperator *BO); |
439 | bool optimizeFunnelShift(IntrinsicInst *Fsh); |
440 | bool optimizeSelectInst(SelectInst *SI); |
441 | bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); |
442 | bool optimizeSwitchType(SwitchInst *SI); |
443 | bool optimizeSwitchPhiConstants(SwitchInst *SI); |
444 | bool optimizeSwitchInst(SwitchInst *SI); |
445 | bool optimizeExtractElementInst(Instruction *Inst); |
446 | bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT); |
447 | bool fixupDbgValue(Instruction *I); |
448 | bool fixupDbgVariableRecord(DbgVariableRecord &I); |
449 | bool fixupDbgVariableRecordsOnInst(Instruction &I); |
450 | bool placeDbgValues(Function &F); |
451 | bool placePseudoProbes(Function &F); |
452 | bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, |
453 | LoadInst *&LI, Instruction *&Inst, bool HasPromoted); |
454 | bool tryToPromoteExts(TypePromotionTransaction &TPT, |
455 | const SmallVectorImpl<Instruction *> &Exts, |
456 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, |
457 | unsigned CreatedInstsCost = 0); |
458 | bool mergeSExts(Function &F); |
459 | bool splitLargeGEPOffsets(); |
460 | bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited, |
461 | SmallPtrSetImpl<Instruction *> &DeletedInstrs); |
462 | bool optimizePhiTypes(Function &F); |
463 | bool performAddressTypePromotion( |
464 | Instruction *&Inst, bool , |
465 | bool HasPromoted, TypePromotionTransaction &TPT, |
466 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); |
467 | bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT); |
468 | bool simplifyOffsetableRelocate(GCStatepointInst &I); |
469 | |
470 | bool tryToSinkFreeOperands(Instruction *I); |
471 | bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1, |
472 | CmpInst *Cmp, Intrinsic::ID IID); |
473 | bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT); |
474 | bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT); |
475 | bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT); |
476 | void verifyBFIUpdates(Function &F); |
477 | bool _run(Function &F); |
478 | }; |
479 | |
480 | class CodeGenPrepareLegacyPass : public FunctionPass { |
481 | public: |
482 | static char ID; // Pass identification, replacement for typeid |
483 | |
484 | CodeGenPrepareLegacyPass() : FunctionPass(ID) { |
485 | initializeCodeGenPrepareLegacyPassPass(*PassRegistry::getPassRegistry()); |
486 | } |
487 | |
488 | bool runOnFunction(Function &F) override; |
489 | |
490 | StringRef getPassName() const override { return "CodeGen Prepare" ; } |
491 | |
492 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
493 | // FIXME: When we can selectively preserve passes, preserve the domtree. |
494 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); |
495 | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
496 | AU.addRequired<TargetPassConfig>(); |
497 | AU.addRequired<TargetTransformInfoWrapperPass>(); |
498 | AU.addRequired<LoopInfoWrapperPass>(); |
499 | AU.addUsedIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>(); |
500 | } |
501 | }; |
502 | |
503 | } // end anonymous namespace |
504 | |
505 | char CodeGenPrepareLegacyPass::ID = 0; |
506 | |
507 | bool CodeGenPrepareLegacyPass::runOnFunction(Function &F) { |
508 | if (skipFunction(F)) |
509 | return false; |
510 | auto TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); |
511 | CodeGenPrepare CGP(TM); |
512 | CGP.DL = &F.getDataLayout(); |
513 | CGP.SubtargetInfo = TM->getSubtargetImpl(F); |
514 | CGP.TLI = CGP.SubtargetInfo->getTargetLowering(); |
515 | CGP.TRI = CGP.SubtargetInfo->getRegisterInfo(); |
516 | CGP.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); |
517 | CGP.TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); |
518 | CGP.LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); |
519 | CGP.BPI.reset(p: new BranchProbabilityInfo(F, *CGP.LI)); |
520 | CGP.BFI.reset(p: new BlockFrequencyInfo(F, *CGP.BPI, *CGP.LI)); |
521 | CGP.PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); |
522 | auto BBSPRWP = |
523 | getAnalysisIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>(); |
524 | CGP.BBSectionsProfileReader = BBSPRWP ? &BBSPRWP->getBBSPR() : nullptr; |
525 | |
526 | return CGP._run(F); |
527 | } |
528 | |
529 | INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass, DEBUG_TYPE, |
530 | "Optimize for code generation" , false, false) |
531 | INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass) |
532 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) |
533 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) |
534 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) |
535 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) |
536 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) |
537 | INITIALIZE_PASS_END(CodeGenPrepareLegacyPass, DEBUG_TYPE, |
538 | "Optimize for code generation" , false, false) |
539 | |
540 | FunctionPass *llvm::createCodeGenPrepareLegacyPass() { |
541 | return new CodeGenPrepareLegacyPass(); |
542 | } |
543 | |
544 | PreservedAnalyses CodeGenPreparePass::run(Function &F, |
545 | FunctionAnalysisManager &AM) { |
546 | CodeGenPrepare CGP(TM); |
547 | |
548 | bool Changed = CGP.run(F, AM); |
549 | if (!Changed) |
550 | return PreservedAnalyses::all(); |
551 | |
552 | PreservedAnalyses PA; |
553 | PA.preserve<TargetLibraryAnalysis>(); |
554 | PA.preserve<TargetIRAnalysis>(); |
555 | PA.preserve<LoopAnalysis>(); |
556 | return PA; |
557 | } |
558 | |
559 | bool CodeGenPrepare::run(Function &F, FunctionAnalysisManager &AM) { |
560 | DL = &F.getDataLayout(); |
561 | SubtargetInfo = TM->getSubtargetImpl(F); |
562 | TLI = SubtargetInfo->getTargetLowering(); |
563 | TRI = SubtargetInfo->getRegisterInfo(); |
564 | TLInfo = &AM.getResult<TargetLibraryAnalysis>(IR&: F); |
565 | TTI = &AM.getResult<TargetIRAnalysis>(IR&: F); |
566 | LI = &AM.getResult<LoopAnalysis>(IR&: F); |
567 | BPI.reset(p: new BranchProbabilityInfo(F, *LI)); |
568 | BFI.reset(p: new BlockFrequencyInfo(F, *BPI, *LI)); |
569 | auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(IR&: F); |
570 | PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(IR&: *F.getParent()); |
571 | BBSectionsProfileReader = |
572 | AM.getCachedResult<BasicBlockSectionsProfileReaderAnalysis>(IR&: F); |
573 | return _run(F); |
574 | } |
575 | |
576 | bool CodeGenPrepare::_run(Function &F) { |
577 | bool EverMadeChange = false; |
578 | |
579 | OptSize = F.hasOptSize(); |
580 | // Use the basic-block-sections profile to promote hot functions to .text.hot |
581 | // if requested. |
582 | if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader && |
583 | BBSectionsProfileReader->isFunctionHot(FuncName: F.getName())) { |
584 | F.setSectionPrefix("hot" ); |
585 | } else if (ProfileGuidedSectionPrefix) { |
586 | // The hot attribute overwrites profile count based hotness while profile |
587 | // counts based hotness overwrite the cold attribute. |
588 | // This is a conservative behabvior. |
589 | if (F.hasFnAttribute(Kind: Attribute::Hot) || |
590 | PSI->isFunctionHotInCallGraph(F: &F, BFI&: *BFI)) |
591 | F.setSectionPrefix("hot" ); |
592 | // If PSI shows this function is not hot, we will placed the function |
593 | // into unlikely section if (1) PSI shows this is a cold function, or |
594 | // (2) the function has a attribute of cold. |
595 | else if (PSI->isFunctionColdInCallGraph(F: &F, BFI&: *BFI) || |
596 | F.hasFnAttribute(Kind: Attribute::Cold)) |
597 | F.setSectionPrefix("unlikely" ); |
598 | else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() && |
599 | PSI->isFunctionHotnessUnknown(F)) |
600 | F.setSectionPrefix("unknown" ); |
601 | } |
602 | |
603 | /// This optimization identifies DIV instructions that can be |
604 | /// profitably bypassed and carried out with a shorter, faster divide. |
605 | if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) { |
606 | const DenseMap<unsigned int, unsigned int> &BypassWidths = |
607 | TLI->getBypassSlowDivWidths(); |
608 | BasicBlock *BB = &*F.begin(); |
609 | while (BB != nullptr) { |
610 | // bypassSlowDivision may create new BBs, but we don't want to reapply the |
611 | // optimization to those blocks. |
612 | BasicBlock *Next = BB->getNextNode(); |
613 | // F.hasOptSize is already checked in the outer if statement. |
614 | if (!llvm::shouldOptimizeForSize(BB, PSI, BFI: BFI.get())) |
615 | EverMadeChange |= bypassSlowDivision(BB, BypassWidth: BypassWidths); |
616 | BB = Next; |
617 | } |
618 | } |
619 | |
620 | // Get rid of @llvm.assume builtins before attempting to eliminate empty |
621 | // blocks, since there might be blocks that only contain @llvm.assume calls |
622 | // (plus arguments that we can get rid of). |
623 | EverMadeChange |= eliminateAssumptions(F); |
624 | |
625 | // Eliminate blocks that contain only PHI nodes and an |
626 | // unconditional branch. |
627 | EverMadeChange |= eliminateMostlyEmptyBlocks(F); |
628 | |
629 | ModifyDT ModifiedDT = ModifyDT::NotModifyDT; |
630 | if (!DisableBranchOpts) |
631 | EverMadeChange |= splitBranchCondition(F, ModifiedDT); |
632 | |
633 | // Split some critical edges where one of the sources is an indirect branch, |
634 | // to help generate sane code for PHIs involving such edges. |
635 | EverMadeChange |= |
636 | SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true); |
637 | |
638 | // If we are optimzing huge function, we need to consider the build time. |
639 | // Because the basic algorithm's complex is near O(N!). |
640 | IsHugeFunc = F.size() > HugeFuncThresholdInCGPP; |
641 | |
642 | // Transformations above may invalidate dominator tree and/or loop info. |
643 | DT.reset(); |
644 | LI->releaseMemory(); |
645 | LI->analyze(DomTree: getDT(F)); |
646 | |
647 | bool MadeChange = true; |
648 | bool FuncIterated = false; |
649 | while (MadeChange) { |
650 | MadeChange = false; |
651 | |
652 | for (BasicBlock &BB : llvm::make_early_inc_range(Range&: F)) { |
653 | if (FuncIterated && !FreshBBs.contains(Ptr: &BB)) |
654 | continue; |
655 | |
656 | ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT; |
657 | bool Changed = optimizeBlock(BB, ModifiedDT&: ModifiedDTOnIteration); |
658 | |
659 | if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT) |
660 | DT.reset(); |
661 | |
662 | MadeChange |= Changed; |
663 | if (IsHugeFunc) { |
664 | // If the BB is updated, it may still has chance to be optimized. |
665 | // This usually happen at sink optimization. |
666 | // For example: |
667 | // |
668 | // bb0: |
669 | // %and = and i32 %a, 4 |
670 | // %cmp = icmp eq i32 %and, 0 |
671 | // |
672 | // If the %cmp sink to other BB, the %and will has chance to sink. |
673 | if (Changed) |
674 | FreshBBs.insert(Ptr: &BB); |
675 | else if (FuncIterated) |
676 | FreshBBs.erase(Ptr: &BB); |
677 | } else { |
678 | // For small/normal functions, we restart BB iteration if the dominator |
679 | // tree of the Function was changed. |
680 | if (ModifiedDTOnIteration != ModifyDT::NotModifyDT) |
681 | break; |
682 | } |
683 | } |
684 | // We have iterated all the BB in the (only work for huge) function. |
685 | FuncIterated = IsHugeFunc; |
686 | |
687 | if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) |
688 | MadeChange |= mergeSExts(F); |
689 | if (!LargeOffsetGEPMap.empty()) |
690 | MadeChange |= splitLargeGEPOffsets(); |
691 | MadeChange |= optimizePhiTypes(F); |
692 | |
693 | if (MadeChange) |
694 | eliminateFallThrough(F, DT: DT.get()); |
695 | |
696 | #ifndef NDEBUG |
697 | if (MadeChange && VerifyLoopInfo) |
698 | LI->verify(getDT(F)); |
699 | #endif |
700 | |
701 | // Really free removed instructions during promotion. |
702 | for (Instruction *I : RemovedInsts) |
703 | I->deleteValue(); |
704 | |
705 | EverMadeChange |= MadeChange; |
706 | SeenChainsForSExt.clear(); |
707 | ValToSExtendedUses.clear(); |
708 | RemovedInsts.clear(); |
709 | LargeOffsetGEPMap.clear(); |
710 | LargeOffsetGEPID.clear(); |
711 | } |
712 | |
713 | NewGEPBases.clear(); |
714 | SunkAddrs.clear(); |
715 | |
716 | if (!DisableBranchOpts) { |
717 | MadeChange = false; |
718 | // Use a set vector to get deterministic iteration order. The order the |
719 | // blocks are removed may affect whether or not PHI nodes in successors |
720 | // are removed. |
721 | SmallSetVector<BasicBlock *, 8> WorkList; |
722 | for (BasicBlock &BB : F) { |
723 | SmallVector<BasicBlock *, 2> Successors(successors(BB: &BB)); |
724 | MadeChange |= ConstantFoldTerminator(BB: &BB, DeleteDeadConditions: true); |
725 | if (!MadeChange) |
726 | continue; |
727 | |
728 | for (BasicBlock *Succ : Successors) |
729 | if (pred_empty(BB: Succ)) |
730 | WorkList.insert(X: Succ); |
731 | } |
732 | |
733 | // Delete the dead blocks and any of their dead successors. |
734 | MadeChange |= !WorkList.empty(); |
735 | while (!WorkList.empty()) { |
736 | BasicBlock *BB = WorkList.pop_back_val(); |
737 | SmallVector<BasicBlock *, 2> Successors(successors(BB)); |
738 | |
739 | DeleteDeadBlock(BB); |
740 | |
741 | for (BasicBlock *Succ : Successors) |
742 | if (pred_empty(BB: Succ)) |
743 | WorkList.insert(X: Succ); |
744 | } |
745 | |
746 | // Merge pairs of basic blocks with unconditional branches, connected by |
747 | // a single edge. |
748 | if (EverMadeChange || MadeChange) |
749 | MadeChange |= eliminateFallThrough(F); |
750 | |
751 | EverMadeChange |= MadeChange; |
752 | } |
753 | |
754 | if (!DisableGCOpts) { |
755 | SmallVector<GCStatepointInst *, 2> Statepoints; |
756 | for (BasicBlock &BB : F) |
757 | for (Instruction &I : BB) |
758 | if (auto *SP = dyn_cast<GCStatepointInst>(Val: &I)) |
759 | Statepoints.push_back(Elt: SP); |
760 | for (auto &I : Statepoints) |
761 | EverMadeChange |= simplifyOffsetableRelocate(I&: *I); |
762 | } |
763 | |
764 | // Do this last to clean up use-before-def scenarios introduced by other |
765 | // preparatory transforms. |
766 | EverMadeChange |= placeDbgValues(F); |
767 | EverMadeChange |= placePseudoProbes(F); |
768 | |
769 | #ifndef NDEBUG |
770 | if (VerifyBFIUpdates) |
771 | verifyBFIUpdates(F); |
772 | #endif |
773 | |
774 | return EverMadeChange; |
775 | } |
776 | |
777 | bool CodeGenPrepare::eliminateAssumptions(Function &F) { |
778 | bool MadeChange = false; |
779 | for (BasicBlock &BB : F) { |
780 | CurInstIterator = BB.begin(); |
781 | while (CurInstIterator != BB.end()) { |
782 | Instruction *I = &*(CurInstIterator++); |
783 | if (auto *Assume = dyn_cast<AssumeInst>(Val: I)) { |
784 | MadeChange = true; |
785 | Value *Operand = Assume->getOperand(i_nocapture: 0); |
786 | Assume->eraseFromParent(); |
787 | |
788 | resetIteratorIfInvalidatedWhileCalling(BB: &BB, f: [&]() { |
789 | RecursivelyDeleteTriviallyDeadInstructions(V: Operand, TLI: TLInfo, MSSAU: nullptr); |
790 | }); |
791 | } |
792 | } |
793 | } |
794 | return MadeChange; |
795 | } |
796 | |
797 | /// An instruction is about to be deleted, so remove all references to it in our |
798 | /// GEP-tracking data strcutures. |
799 | void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) { |
800 | LargeOffsetGEPMap.erase(Key: V); |
801 | NewGEPBases.erase(V); |
802 | |
803 | auto GEP = dyn_cast<GetElementPtrInst>(Val: V); |
804 | if (!GEP) |
805 | return; |
806 | |
807 | LargeOffsetGEPID.erase(Val: GEP); |
808 | |
809 | auto VecI = LargeOffsetGEPMap.find(Key: GEP->getPointerOperand()); |
810 | if (VecI == LargeOffsetGEPMap.end()) |
811 | return; |
812 | |
813 | auto &GEPVector = VecI->second; |
814 | llvm::erase_if(C&: GEPVector, P: [=](auto &Elt) { return Elt.first == GEP; }); |
815 | |
816 | if (GEPVector.empty()) |
817 | LargeOffsetGEPMap.erase(Iterator: VecI); |
818 | } |
819 | |
820 | // Verify BFI has been updated correctly by recomputing BFI and comparing them. |
821 | void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) { |
822 | DominatorTree NewDT(F); |
823 | LoopInfo NewLI(NewDT); |
824 | BranchProbabilityInfo NewBPI(F, NewLI, TLInfo); |
825 | BlockFrequencyInfo NewBFI(F, NewBPI, NewLI); |
826 | NewBFI.verifyMatch(Other&: *BFI); |
827 | } |
828 | |
829 | /// Merge basic blocks which are connected by a single edge, where one of the |
830 | /// basic blocks has a single successor pointing to the other basic block, |
831 | /// which has a single predecessor. |
832 | bool CodeGenPrepare::eliminateFallThrough(Function &F, DominatorTree *DT) { |
833 | bool Changed = false; |
834 | // Scan all of the blocks in the function, except for the entry block. |
835 | // Use a temporary array to avoid iterator being invalidated when |
836 | // deleting blocks. |
837 | SmallVector<WeakTrackingVH, 16> Blocks; |
838 | for (auto &Block : llvm::drop_begin(RangeOrContainer&: F)) |
839 | Blocks.push_back(Elt: &Block); |
840 | |
841 | SmallSet<WeakTrackingVH, 16> Preds; |
842 | for (auto &Block : Blocks) { |
843 | auto *BB = cast_or_null<BasicBlock>(Val&: Block); |
844 | if (!BB) |
845 | continue; |
846 | // If the destination block has a single pred, then this is a trivial |
847 | // edge, just collapse it. |
848 | BasicBlock *SinglePred = BB->getSinglePredecessor(); |
849 | |
850 | // Don't merge if BB's address is taken. |
851 | if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) |
852 | continue; |
853 | |
854 | // Make an effort to skip unreachable blocks. |
855 | if (DT && !DT->isReachableFromEntry(A: BB)) |
856 | continue; |
857 | |
858 | BranchInst *Term = dyn_cast<BranchInst>(Val: SinglePred->getTerminator()); |
859 | if (Term && !Term->isConditional()) { |
860 | Changed = true; |
861 | LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n" ); |
862 | |
863 | // Merge BB into SinglePred and delete it. |
864 | MergeBlockIntoPredecessor(BB, /* DTU */ nullptr, LI, /* MSSAU */ nullptr, |
865 | /* MemDep */ nullptr, |
866 | /* PredecessorWithTwoSuccessors */ false, DT); |
867 | Preds.insert(V: SinglePred); |
868 | |
869 | if (IsHugeFunc) { |
870 | // Update FreshBBs to optimize the merged BB. |
871 | FreshBBs.insert(Ptr: SinglePred); |
872 | FreshBBs.erase(Ptr: BB); |
873 | } |
874 | } |
875 | } |
876 | |
877 | // (Repeatedly) merging blocks into their predecessors can create redundant |
878 | // debug intrinsics. |
879 | for (const auto &Pred : Preds) |
880 | if (auto *BB = cast_or_null<BasicBlock>(Val: Pred)) |
881 | RemoveRedundantDbgInstrs(BB); |
882 | |
883 | return Changed; |
884 | } |
885 | |
886 | /// Find a destination block from BB if BB is mergeable empty block. |
887 | BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { |
888 | // If this block doesn't end with an uncond branch, ignore it. |
889 | BranchInst *BI = dyn_cast<BranchInst>(Val: BB->getTerminator()); |
890 | if (!BI || !BI->isUnconditional()) |
891 | return nullptr; |
892 | |
893 | // If the instruction before the branch (skipping debug info) isn't a phi |
894 | // node, then other stuff is happening here. |
895 | BasicBlock::iterator BBI = BI->getIterator(); |
896 | if (BBI != BB->begin()) { |
897 | --BBI; |
898 | while (isa<DbgInfoIntrinsic>(Val: BBI)) { |
899 | if (BBI == BB->begin()) |
900 | break; |
901 | --BBI; |
902 | } |
903 | if (!isa<DbgInfoIntrinsic>(Val: BBI) && !isa<PHINode>(Val: BBI)) |
904 | return nullptr; |
905 | } |
906 | |
907 | // Do not break infinite loops. |
908 | BasicBlock *DestBB = BI->getSuccessor(i: 0); |
909 | if (DestBB == BB) |
910 | return nullptr; |
911 | |
912 | if (!canMergeBlocks(BB, DestBB)) |
913 | DestBB = nullptr; |
914 | |
915 | return DestBB; |
916 | } |
917 | |
918 | /// Eliminate blocks that contain only PHI nodes, debug info directives, and an |
919 | /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split |
920 | /// edges in ways that are non-optimal for isel. Start by eliminating these |
921 | /// blocks so we can split them the way we want them. |
922 | bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { |
923 | SmallPtrSet<BasicBlock *, 16> ; |
924 | SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); |
925 | while (!LoopList.empty()) { |
926 | Loop *L = LoopList.pop_back_val(); |
927 | llvm::append_range(C&: LoopList, R&: *L); |
928 | if (BasicBlock * = L->getLoopPreheader()) |
929 | Preheaders.insert(Ptr: Preheader); |
930 | } |
931 | |
932 | bool MadeChange = false; |
933 | // Copy blocks into a temporary array to avoid iterator invalidation issues |
934 | // as we remove them. |
935 | // Note that this intentionally skips the entry block. |
936 | SmallVector<WeakTrackingVH, 16> Blocks; |
937 | for (auto &Block : llvm::drop_begin(RangeOrContainer&: F)) { |
938 | // Delete phi nodes that could block deleting other empty blocks. |
939 | if (!DisableDeletePHIs) |
940 | MadeChange |= DeleteDeadPHIs(BB: &Block, TLI: TLInfo); |
941 | Blocks.push_back(Elt: &Block); |
942 | } |
943 | |
944 | for (auto &Block : Blocks) { |
945 | BasicBlock *BB = cast_or_null<BasicBlock>(Val&: Block); |
946 | if (!BB) |
947 | continue; |
948 | BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); |
949 | if (!DestBB || |
950 | !isMergingEmptyBlockProfitable(BB, DestBB, isPreheader: Preheaders.count(Ptr: BB))) |
951 | continue; |
952 | |
953 | eliminateMostlyEmptyBlock(BB); |
954 | MadeChange = true; |
955 | } |
956 | return MadeChange; |
957 | } |
958 | |
959 | bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, |
960 | BasicBlock *DestBB, |
961 | bool ) { |
962 | // Do not delete loop preheaders if doing so would create a critical edge. |
963 | // Loop preheaders can be good locations to spill registers. If the |
964 | // preheader is deleted and we create a critical edge, registers may be |
965 | // spilled in the loop body instead. |
966 | if (!DisablePreheaderProtect && isPreheader && |
967 | !(BB->getSinglePredecessor() && |
968 | BB->getSinglePredecessor()->getSingleSuccessor())) |
969 | return false; |
970 | |
971 | // Skip merging if the block's successor is also a successor to any callbr |
972 | // that leads to this block. |
973 | // FIXME: Is this really needed? Is this a correctness issue? |
974 | for (BasicBlock *Pred : predecessors(BB)) { |
975 | if (isa<CallBrInst>(Val: Pred->getTerminator()) && |
976 | llvm::is_contained(Range: successors(BB: Pred), Element: DestBB)) |
977 | return false; |
978 | } |
979 | |
980 | // Try to skip merging if the unique predecessor of BB is terminated by a |
981 | // switch or indirect branch instruction, and BB is used as an incoming block |
982 | // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to |
983 | // add COPY instructions in the predecessor of BB instead of BB (if it is not |
984 | // merged). Note that the critical edge created by merging such blocks wont be |
985 | // split in MachineSink because the jump table is not analyzable. By keeping |
986 | // such empty block (BB), ISel will place COPY instructions in BB, not in the |
987 | // predecessor of BB. |
988 | BasicBlock *Pred = BB->getUniquePredecessor(); |
989 | if (!Pred || !(isa<SwitchInst>(Val: Pred->getTerminator()) || |
990 | isa<IndirectBrInst>(Val: Pred->getTerminator()))) |
991 | return true; |
992 | |
993 | if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) |
994 | return true; |
995 | |
996 | // We use a simple cost heuristic which determine skipping merging is |
997 | // profitable if the cost of skipping merging is less than the cost of |
998 | // merging : Cost(skipping merging) < Cost(merging BB), where the |
999 | // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and |
1000 | // the Cost(merging BB) is Freq(Pred) * Cost(Copy). |
1001 | // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : |
1002 | // Freq(Pred) / Freq(BB) > 2. |
1003 | // Note that if there are multiple empty blocks sharing the same incoming |
1004 | // value for the PHIs in the DestBB, we consider them together. In such |
1005 | // case, Cost(merging BB) will be the sum of their frequencies. |
1006 | |
1007 | if (!isa<PHINode>(Val: DestBB->begin())) |
1008 | return true; |
1009 | |
1010 | SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; |
1011 | |
1012 | // Find all other incoming blocks from which incoming values of all PHIs in |
1013 | // DestBB are the same as the ones from BB. |
1014 | for (BasicBlock *DestBBPred : predecessors(BB: DestBB)) { |
1015 | if (DestBBPred == BB) |
1016 | continue; |
1017 | |
1018 | if (llvm::all_of(Range: DestBB->phis(), P: [&](const PHINode &DestPN) { |
1019 | return DestPN.getIncomingValueForBlock(BB) == |
1020 | DestPN.getIncomingValueForBlock(BB: DestBBPred); |
1021 | })) |
1022 | SameIncomingValueBBs.insert(Ptr: DestBBPred); |
1023 | } |
1024 | |
1025 | // See if all BB's incoming values are same as the value from Pred. In this |
1026 | // case, no reason to skip merging because COPYs are expected to be place in |
1027 | // Pred already. |
1028 | if (SameIncomingValueBBs.count(Ptr: Pred)) |
1029 | return true; |
1030 | |
1031 | BlockFrequency PredFreq = BFI->getBlockFreq(BB: Pred); |
1032 | BlockFrequency BBFreq = BFI->getBlockFreq(BB); |
1033 | |
1034 | for (auto *SameValueBB : SameIncomingValueBBs) |
1035 | if (SameValueBB->getUniquePredecessor() == Pred && |
1036 | DestBB == findDestBlockOfMergeableEmptyBlock(BB: SameValueBB)) |
1037 | BBFreq += BFI->getBlockFreq(BB: SameValueBB); |
1038 | |
1039 | std::optional<BlockFrequency> Limit = BBFreq.mul(Factor: FreqRatioToSkipMerge); |
1040 | return !Limit || PredFreq <= *Limit; |
1041 | } |
1042 | |
1043 | /// Return true if we can merge BB into DestBB if there is a single |
1044 | /// unconditional branch between them, and BB contains no other non-phi |
1045 | /// instructions. |
1046 | bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, |
1047 | const BasicBlock *DestBB) const { |
1048 | // We only want to eliminate blocks whose phi nodes are used by phi nodes in |
1049 | // the successor. If there are more complex condition (e.g. preheaders), |
1050 | // don't mess around with them. |
1051 | for (const PHINode &PN : BB->phis()) { |
1052 | for (const User *U : PN.users()) { |
1053 | const Instruction *UI = cast<Instruction>(Val: U); |
1054 | if (UI->getParent() != DestBB || !isa<PHINode>(Val: UI)) |
1055 | return false; |
1056 | // If User is inside DestBB block and it is a PHINode then check |
1057 | // incoming value. If incoming value is not from BB then this is |
1058 | // a complex condition (e.g. preheaders) we want to avoid here. |
1059 | if (UI->getParent() == DestBB) { |
1060 | if (const PHINode *UPN = dyn_cast<PHINode>(Val: UI)) |
1061 | for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { |
1062 | Instruction *Insn = dyn_cast<Instruction>(Val: UPN->getIncomingValue(i: I)); |
1063 | if (Insn && Insn->getParent() == BB && |
1064 | Insn->getParent() != UPN->getIncomingBlock(i: I)) |
1065 | return false; |
1066 | } |
1067 | } |
1068 | } |
1069 | } |
1070 | |
1071 | // If BB and DestBB contain any common predecessors, then the phi nodes in BB |
1072 | // and DestBB may have conflicting incoming values for the block. If so, we |
1073 | // can't merge the block. |
1074 | const PHINode *DestBBPN = dyn_cast<PHINode>(Val: DestBB->begin()); |
1075 | if (!DestBBPN) |
1076 | return true; // no conflict. |
1077 | |
1078 | // Collect the preds of BB. |
1079 | SmallPtrSet<const BasicBlock *, 16> BBPreds; |
1080 | if (const PHINode *BBPN = dyn_cast<PHINode>(Val: BB->begin())) { |
1081 | // It is faster to get preds from a PHI than with pred_iterator. |
1082 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) |
1083 | BBPreds.insert(Ptr: BBPN->getIncomingBlock(i)); |
1084 | } else { |
1085 | BBPreds.insert(I: pred_begin(BB), E: pred_end(BB)); |
1086 | } |
1087 | |
1088 | // Walk the preds of DestBB. |
1089 | for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { |
1090 | BasicBlock *Pred = DestBBPN->getIncomingBlock(i); |
1091 | if (BBPreds.count(Ptr: Pred)) { // Common predecessor? |
1092 | for (const PHINode &PN : DestBB->phis()) { |
1093 | const Value *V1 = PN.getIncomingValueForBlock(BB: Pred); |
1094 | const Value *V2 = PN.getIncomingValueForBlock(BB); |
1095 | |
1096 | // If V2 is a phi node in BB, look up what the mapped value will be. |
1097 | if (const PHINode *V2PN = dyn_cast<PHINode>(Val: V2)) |
1098 | if (V2PN->getParent() == BB) |
1099 | V2 = V2PN->getIncomingValueForBlock(BB: Pred); |
1100 | |
1101 | // If there is a conflict, bail out. |
1102 | if (V1 != V2) |
1103 | return false; |
1104 | } |
1105 | } |
1106 | } |
1107 | |
1108 | return true; |
1109 | } |
1110 | |
1111 | /// Replace all old uses with new ones, and push the updated BBs into FreshBBs. |
1112 | static void replaceAllUsesWith(Value *Old, Value *New, |
1113 | SmallSet<BasicBlock *, 32> &FreshBBs, |
1114 | bool IsHuge) { |
1115 | auto *OldI = dyn_cast<Instruction>(Val: Old); |
1116 | if (OldI) { |
1117 | for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end(); |
1118 | UI != E; ++UI) { |
1119 | Instruction *User = cast<Instruction>(Val: *UI); |
1120 | if (IsHuge) |
1121 | FreshBBs.insert(Ptr: User->getParent()); |
1122 | } |
1123 | } |
1124 | Old->replaceAllUsesWith(V: New); |
1125 | } |
1126 | |
1127 | /// Eliminate a basic block that has only phi's and an unconditional branch in |
1128 | /// it. |
1129 | void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { |
1130 | BranchInst *BI = cast<BranchInst>(Val: BB->getTerminator()); |
1131 | BasicBlock *DestBB = BI->getSuccessor(i: 0); |
1132 | |
1133 | LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" |
1134 | << *BB << *DestBB); |
1135 | |
1136 | // If the destination block has a single pred, then this is a trivial edge, |
1137 | // just collapse it. |
1138 | if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { |
1139 | if (SinglePred != DestBB) { |
1140 | assert(SinglePred == BB && |
1141 | "Single predecessor not the same as predecessor" ); |
1142 | // Merge DestBB into SinglePred/BB and delete it. |
1143 | MergeBlockIntoPredecessor(BB: DestBB); |
1144 | // Note: BB(=SinglePred) will not be deleted on this path. |
1145 | // DestBB(=its single successor) is the one that was deleted. |
1146 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n" ); |
1147 | |
1148 | if (IsHugeFunc) { |
1149 | // Update FreshBBs to optimize the merged BB. |
1150 | FreshBBs.insert(Ptr: SinglePred); |
1151 | FreshBBs.erase(Ptr: DestBB); |
1152 | } |
1153 | return; |
1154 | } |
1155 | } |
1156 | |
1157 | // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB |
1158 | // to handle the new incoming edges it is about to have. |
1159 | for (PHINode &PN : DestBB->phis()) { |
1160 | // Remove the incoming value for BB, and remember it. |
1161 | Value *InVal = PN.removeIncomingValue(BB, DeletePHIIfEmpty: false); |
1162 | |
1163 | // Two options: either the InVal is a phi node defined in BB or it is some |
1164 | // value that dominates BB. |
1165 | PHINode *InValPhi = dyn_cast<PHINode>(Val: InVal); |
1166 | if (InValPhi && InValPhi->getParent() == BB) { |
1167 | // Add all of the input values of the input PHI as inputs of this phi. |
1168 | for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) |
1169 | PN.addIncoming(V: InValPhi->getIncomingValue(i), |
1170 | BB: InValPhi->getIncomingBlock(i)); |
1171 | } else { |
1172 | // Otherwise, add one instance of the dominating value for each edge that |
1173 | // we will be adding. |
1174 | if (PHINode *BBPN = dyn_cast<PHINode>(Val: BB->begin())) { |
1175 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) |
1176 | PN.addIncoming(V: InVal, BB: BBPN->getIncomingBlock(i)); |
1177 | } else { |
1178 | for (BasicBlock *Pred : predecessors(BB)) |
1179 | PN.addIncoming(V: InVal, BB: Pred); |
1180 | } |
1181 | } |
1182 | } |
1183 | |
1184 | // The PHIs are now updated, change everything that refers to BB to use |
1185 | // DestBB and remove BB. |
1186 | BB->replaceAllUsesWith(V: DestBB); |
1187 | BB->eraseFromParent(); |
1188 | ++NumBlocksElim; |
1189 | |
1190 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n" ); |
1191 | } |
1192 | |
1193 | // Computes a map of base pointer relocation instructions to corresponding |
1194 | // derived pointer relocation instructions given a vector of all relocate calls |
1195 | static void computeBaseDerivedRelocateMap( |
1196 | const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, |
1197 | MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>> |
1198 | &RelocateInstMap) { |
1199 | // Collect information in two maps: one primarily for locating the base object |
1200 | // while filling the second map; the second map is the final structure holding |
1201 | // a mapping between Base and corresponding Derived relocate calls |
1202 | MapVector<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; |
1203 | for (auto *ThisRelocate : AllRelocateCalls) { |
1204 | auto K = std::make_pair(x: ThisRelocate->getBasePtrIndex(), |
1205 | y: ThisRelocate->getDerivedPtrIndex()); |
1206 | RelocateIdxMap.insert(KV: std::make_pair(x&: K, y&: ThisRelocate)); |
1207 | } |
1208 | for (auto &Item : RelocateIdxMap) { |
1209 | std::pair<unsigned, unsigned> Key = Item.first; |
1210 | if (Key.first == Key.second) |
1211 | // Base relocation: nothing to insert |
1212 | continue; |
1213 | |
1214 | GCRelocateInst *I = Item.second; |
1215 | auto BaseKey = std::make_pair(x&: Key.first, y&: Key.first); |
1216 | |
1217 | // We're iterating over RelocateIdxMap so we cannot modify it. |
1218 | auto MaybeBase = RelocateIdxMap.find(Key: BaseKey); |
1219 | if (MaybeBase == RelocateIdxMap.end()) |
1220 | // TODO: We might want to insert a new base object relocate and gep off |
1221 | // that, if there are enough derived object relocates. |
1222 | continue; |
1223 | |
1224 | RelocateInstMap[MaybeBase->second].push_back(Elt: I); |
1225 | } |
1226 | } |
1227 | |
1228 | // Accepts a GEP and extracts the operands into a vector provided they're all |
1229 | // small integer constants |
1230 | static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, |
1231 | SmallVectorImpl<Value *> &OffsetV) { |
1232 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { |
1233 | // Only accept small constant integer operands |
1234 | auto *Op = dyn_cast<ConstantInt>(Val: GEP->getOperand(i_nocapture: i)); |
1235 | if (!Op || Op->getZExtValue() > 20) |
1236 | return false; |
1237 | } |
1238 | |
1239 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) |
1240 | OffsetV.push_back(Elt: GEP->getOperand(i_nocapture: i)); |
1241 | return true; |
1242 | } |
1243 | |
1244 | // Takes a RelocatedBase (base pointer relocation instruction) and Targets to |
1245 | // replace, computes a replacement, and affects it. |
1246 | static bool |
1247 | simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, |
1248 | const SmallVectorImpl<GCRelocateInst *> &Targets) { |
1249 | bool MadeChange = false; |
1250 | // We must ensure the relocation of derived pointer is defined after |
1251 | // relocation of base pointer. If we find a relocation corresponding to base |
1252 | // defined earlier than relocation of base then we move relocation of base |
1253 | // right before found relocation. We consider only relocation in the same |
1254 | // basic block as relocation of base. Relocations from other basic block will |
1255 | // be skipped by optimization and we do not care about them. |
1256 | for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); |
1257 | &*R != RelocatedBase; ++R) |
1258 | if (auto *RI = dyn_cast<GCRelocateInst>(Val&: R)) |
1259 | if (RI->getStatepoint() == RelocatedBase->getStatepoint()) |
1260 | if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { |
1261 | RelocatedBase->moveBefore(MovePos: RI); |
1262 | MadeChange = true; |
1263 | break; |
1264 | } |
1265 | |
1266 | for (GCRelocateInst *ToReplace : Targets) { |
1267 | assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && |
1268 | "Not relocating a derived object of the original base object" ); |
1269 | if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { |
1270 | // A duplicate relocate call. TODO: coalesce duplicates. |
1271 | continue; |
1272 | } |
1273 | |
1274 | if (RelocatedBase->getParent() != ToReplace->getParent()) { |
1275 | // Base and derived relocates are in different basic blocks. |
1276 | // In this case transform is only valid when base dominates derived |
1277 | // relocate. However it would be too expensive to check dominance |
1278 | // for each such relocate, so we skip the whole transformation. |
1279 | continue; |
1280 | } |
1281 | |
1282 | Value *Base = ToReplace->getBasePtr(); |
1283 | auto *Derived = dyn_cast<GetElementPtrInst>(Val: ToReplace->getDerivedPtr()); |
1284 | if (!Derived || Derived->getPointerOperand() != Base) |
1285 | continue; |
1286 | |
1287 | SmallVector<Value *, 2> OffsetV; |
1288 | if (!getGEPSmallConstantIntOffsetV(GEP: Derived, OffsetV)) |
1289 | continue; |
1290 | |
1291 | // Create a Builder and replace the target callsite with a gep |
1292 | assert(RelocatedBase->getNextNode() && |
1293 | "Should always have one since it's not a terminator" ); |
1294 | |
1295 | // Insert after RelocatedBase |
1296 | IRBuilder<> Builder(RelocatedBase->getNextNode()); |
1297 | Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); |
1298 | |
1299 | // If gc_relocate does not match the actual type, cast it to the right type. |
1300 | // In theory, there must be a bitcast after gc_relocate if the type does not |
1301 | // match, and we should reuse it to get the derived pointer. But it could be |
1302 | // cases like this: |
1303 | // bb1: |
1304 | // ... |
1305 | // %g1 = call coldcc i8 addrspace(1)* |
1306 | // @llvm.experimental.gc.relocate.p1i8(...) br label %merge |
1307 | // |
1308 | // bb2: |
1309 | // ... |
1310 | // %g2 = call coldcc i8 addrspace(1)* |
1311 | // @llvm.experimental.gc.relocate.p1i8(...) br label %merge |
1312 | // |
1313 | // merge: |
1314 | // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] |
1315 | // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* |
1316 | // |
1317 | // In this case, we can not find the bitcast any more. So we insert a new |
1318 | // bitcast no matter there is already one or not. In this way, we can handle |
1319 | // all cases, and the extra bitcast should be optimized away in later |
1320 | // passes. |
1321 | Value *ActualRelocatedBase = RelocatedBase; |
1322 | if (RelocatedBase->getType() != Base->getType()) { |
1323 | ActualRelocatedBase = |
1324 | Builder.CreateBitCast(V: RelocatedBase, DestTy: Base->getType()); |
1325 | } |
1326 | Value *Replacement = |
1327 | Builder.CreateGEP(Ty: Derived->getSourceElementType(), Ptr: ActualRelocatedBase, |
1328 | IdxList: ArrayRef(OffsetV)); |
1329 | Replacement->takeName(V: ToReplace); |
1330 | // If the newly generated derived pointer's type does not match the original |
1331 | // derived pointer's type, cast the new derived pointer to match it. Same |
1332 | // reasoning as above. |
1333 | Value *ActualReplacement = Replacement; |
1334 | if (Replacement->getType() != ToReplace->getType()) { |
1335 | ActualReplacement = |
1336 | Builder.CreateBitCast(V: Replacement, DestTy: ToReplace->getType()); |
1337 | } |
1338 | ToReplace->replaceAllUsesWith(V: ActualReplacement); |
1339 | ToReplace->eraseFromParent(); |
1340 | |
1341 | MadeChange = true; |
1342 | } |
1343 | return MadeChange; |
1344 | } |
1345 | |
1346 | // Turns this: |
1347 | // |
1348 | // %base = ... |
1349 | // %ptr = gep %base + 15 |
1350 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) |
1351 | // %base' = relocate(%tok, i32 4, i32 4) |
1352 | // %ptr' = relocate(%tok, i32 4, i32 5) |
1353 | // %val = load %ptr' |
1354 | // |
1355 | // into this: |
1356 | // |
1357 | // %base = ... |
1358 | // %ptr = gep %base + 15 |
1359 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) |
1360 | // %base' = gc.relocate(%tok, i32 4, i32 4) |
1361 | // %ptr' = gep %base' + 15 |
1362 | // %val = load %ptr' |
1363 | bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) { |
1364 | bool MadeChange = false; |
1365 | SmallVector<GCRelocateInst *, 2> AllRelocateCalls; |
1366 | for (auto *U : I.users()) |
1367 | if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(Val: U)) |
1368 | // Collect all the relocate calls associated with a statepoint |
1369 | AllRelocateCalls.push_back(Elt: Relocate); |
1370 | |
1371 | // We need at least one base pointer relocation + one derived pointer |
1372 | // relocation to mangle |
1373 | if (AllRelocateCalls.size() < 2) |
1374 | return false; |
1375 | |
1376 | // RelocateInstMap is a mapping from the base relocate instruction to the |
1377 | // corresponding derived relocate instructions |
1378 | MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>> RelocateInstMap; |
1379 | computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); |
1380 | if (RelocateInstMap.empty()) |
1381 | return false; |
1382 | |
1383 | for (auto &Item : RelocateInstMap) |
1384 | // Item.first is the RelocatedBase to offset against |
1385 | // Item.second is the vector of Targets to replace |
1386 | MadeChange = simplifyRelocatesOffABase(RelocatedBase: Item.first, Targets: Item.second); |
1387 | return MadeChange; |
1388 | } |
1389 | |
1390 | /// Sink the specified cast instruction into its user blocks. |
1391 | static bool SinkCast(CastInst *CI) { |
1392 | BasicBlock *DefBB = CI->getParent(); |
1393 | |
1394 | /// InsertedCasts - Only insert a cast in each block once. |
1395 | DenseMap<BasicBlock *, CastInst *> InsertedCasts; |
1396 | |
1397 | bool MadeChange = false; |
1398 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); |
1399 | UI != E;) { |
1400 | Use &TheUse = UI.getUse(); |
1401 | Instruction *User = cast<Instruction>(Val: *UI); |
1402 | |
1403 | // Figure out which BB this cast is used in. For PHI's this is the |
1404 | // appropriate predecessor block. |
1405 | BasicBlock *UserBB = User->getParent(); |
1406 | if (PHINode *PN = dyn_cast<PHINode>(Val: User)) { |
1407 | UserBB = PN->getIncomingBlock(U: TheUse); |
1408 | } |
1409 | |
1410 | // Preincrement use iterator so we don't invalidate it. |
1411 | ++UI; |
1412 | |
1413 | // The first insertion point of a block containing an EH pad is after the |
1414 | // pad. If the pad is the user, we cannot sink the cast past the pad. |
1415 | if (User->isEHPad()) |
1416 | continue; |
1417 | |
1418 | // If the block selected to receive the cast is an EH pad that does not |
1419 | // allow non-PHI instructions before the terminator, we can't sink the |
1420 | // cast. |
1421 | if (UserBB->getTerminator()->isEHPad()) |
1422 | continue; |
1423 | |
1424 | // If this user is in the same block as the cast, don't change the cast. |
1425 | if (UserBB == DefBB) |
1426 | continue; |
1427 | |
1428 | // If we have already inserted a cast into this block, use it. |
1429 | CastInst *&InsertedCast = InsertedCasts[UserBB]; |
1430 | |
1431 | if (!InsertedCast) { |
1432 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); |
1433 | assert(InsertPt != UserBB->end()); |
1434 | InsertedCast = cast<CastInst>(Val: CI->clone()); |
1435 | InsertedCast->insertBefore(BB&: *UserBB, InsertPos: InsertPt); |
1436 | } |
1437 | |
1438 | // Replace a use of the cast with a use of the new cast. |
1439 | TheUse = InsertedCast; |
1440 | MadeChange = true; |
1441 | ++NumCastUses; |
1442 | } |
1443 | |
1444 | // If we removed all uses, nuke the cast. |
1445 | if (CI->use_empty()) { |
1446 | salvageDebugInfo(I&: *CI); |
1447 | CI->eraseFromParent(); |
1448 | MadeChange = true; |
1449 | } |
1450 | |
1451 | return MadeChange; |
1452 | } |
1453 | |
1454 | /// If the specified cast instruction is a noop copy (e.g. it's casting from |
1455 | /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to |
1456 | /// reduce the number of virtual registers that must be created and coalesced. |
1457 | /// |
1458 | /// Return true if any changes are made. |
1459 | static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, |
1460 | const DataLayout &DL) { |
1461 | // Sink only "cheap" (or nop) address-space casts. This is a weaker condition |
1462 | // than sinking only nop casts, but is helpful on some platforms. |
1463 | if (auto *ASC = dyn_cast<AddrSpaceCastInst>(Val: CI)) { |
1464 | if (!TLI.isFreeAddrSpaceCast(SrcAS: ASC->getSrcAddressSpace(), |
1465 | DestAS: ASC->getDestAddressSpace())) |
1466 | return false; |
1467 | } |
1468 | |
1469 | // If this is a noop copy, |
1470 | EVT SrcVT = TLI.getValueType(DL, Ty: CI->getOperand(i_nocapture: 0)->getType()); |
1471 | EVT DstVT = TLI.getValueType(DL, Ty: CI->getType()); |
1472 | |
1473 | // This is an fp<->int conversion? |
1474 | if (SrcVT.isInteger() != DstVT.isInteger()) |
1475 | return false; |
1476 | |
1477 | // If this is an extension, it will be a zero or sign extension, which |
1478 | // isn't a noop. |
1479 | if (SrcVT.bitsLT(VT: DstVT)) |
1480 | return false; |
1481 | |
1482 | // If these values will be promoted, find out what they will be promoted |
1483 | // to. This helps us consider truncates on PPC as noop copies when they |
1484 | // are. |
1485 | if (TLI.getTypeAction(Context&: CI->getContext(), VT: SrcVT) == |
1486 | TargetLowering::TypePromoteInteger) |
1487 | SrcVT = TLI.getTypeToTransformTo(Context&: CI->getContext(), VT: SrcVT); |
1488 | if (TLI.getTypeAction(Context&: CI->getContext(), VT: DstVT) == |
1489 | TargetLowering::TypePromoteInteger) |
1490 | DstVT = TLI.getTypeToTransformTo(Context&: CI->getContext(), VT: DstVT); |
1491 | |
1492 | // If, after promotion, these are the same types, this is a noop copy. |
1493 | if (SrcVT != DstVT) |
1494 | return false; |
1495 | |
1496 | return SinkCast(CI); |
1497 | } |
1498 | |
1499 | // Match a simple increment by constant operation. Note that if a sub is |
1500 | // matched, the step is negated (as if the step had been canonicalized to |
1501 | // an add, even though we leave the instruction alone.) |
1502 | static bool matchIncrement(const Instruction *IVInc, Instruction *&LHS, |
1503 | Constant *&Step) { |
1504 | if (match(V: IVInc, P: m_Add(L: m_Instruction(I&: LHS), R: m_Constant(C&: Step))) || |
1505 | match(V: IVInc, P: m_ExtractValue<0>(V: m_Intrinsic<Intrinsic::uadd_with_overflow>( |
1506 | Op0: m_Instruction(I&: LHS), Op1: m_Constant(C&: Step))))) |
1507 | return true; |
1508 | if (match(V: IVInc, P: m_Sub(L: m_Instruction(I&: LHS), R: m_Constant(C&: Step))) || |
1509 | match(V: IVInc, P: m_ExtractValue<0>(V: m_Intrinsic<Intrinsic::usub_with_overflow>( |
1510 | Op0: m_Instruction(I&: LHS), Op1: m_Constant(C&: Step))))) { |
1511 | Step = ConstantExpr::getNeg(C: Step); |
1512 | return true; |
1513 | } |
1514 | return false; |
1515 | } |
1516 | |
1517 | /// If given \p PN is an inductive variable with value IVInc coming from the |
1518 | /// backedge, and on each iteration it gets increased by Step, return pair |
1519 | /// <IVInc, Step>. Otherwise, return std::nullopt. |
1520 | static std::optional<std::pair<Instruction *, Constant *>> |
1521 | getIVIncrement(const PHINode *PN, const LoopInfo *LI) { |
1522 | const Loop *L = LI->getLoopFor(BB: PN->getParent()); |
1523 | if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch()) |
1524 | return std::nullopt; |
1525 | auto *IVInc = |
1526 | dyn_cast<Instruction>(Val: PN->getIncomingValueForBlock(BB: L->getLoopLatch())); |
1527 | if (!IVInc || LI->getLoopFor(BB: IVInc->getParent()) != L) |
1528 | return std::nullopt; |
1529 | Instruction *LHS = nullptr; |
1530 | Constant *Step = nullptr; |
1531 | if (matchIncrement(IVInc, LHS, Step) && LHS == PN) |
1532 | return std::make_pair(x&: IVInc, y&: Step); |
1533 | return std::nullopt; |
1534 | } |
1535 | |
1536 | static bool isIVIncrement(const Value *V, const LoopInfo *LI) { |
1537 | auto *I = dyn_cast<Instruction>(Val: V); |
1538 | if (!I) |
1539 | return false; |
1540 | Instruction *LHS = nullptr; |
1541 | Constant *Step = nullptr; |
1542 | if (!matchIncrement(IVInc: I, LHS, Step)) |
1543 | return false; |
1544 | if (auto *PN = dyn_cast<PHINode>(Val: LHS)) |
1545 | if (auto IVInc = getIVIncrement(PN, LI)) |
1546 | return IVInc->first == I; |
1547 | return false; |
1548 | } |
1549 | |
1550 | bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO, |
1551 | Value *Arg0, Value *Arg1, |
1552 | CmpInst *Cmp, |
1553 | Intrinsic::ID IID) { |
1554 | auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) { |
1555 | if (!isIVIncrement(V: BO, LI)) |
1556 | return false; |
1557 | const Loop *L = LI->getLoopFor(BB: BO->getParent()); |
1558 | assert(L && "L should not be null after isIVIncrement()" ); |
1559 | // Do not risk on moving increment into a child loop. |
1560 | if (LI->getLoopFor(BB: Cmp->getParent()) != L) |
1561 | return false; |
1562 | |
1563 | // Finally, we need to ensure that the insert point will dominate all |
1564 | // existing uses of the increment. |
1565 | |
1566 | auto &DT = getDT(F&: *BO->getParent()->getParent()); |
1567 | if (DT.dominates(A: Cmp->getParent(), B: BO->getParent())) |
1568 | // If we're moving up the dom tree, all uses are trivially dominated. |
1569 | // (This is the common case for code produced by LSR.) |
1570 | return true; |
1571 | |
1572 | // Otherwise, special case the single use in the phi recurrence. |
1573 | return BO->hasOneUse() && DT.dominates(A: Cmp->getParent(), B: L->getLoopLatch()); |
1574 | }; |
1575 | if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) { |
1576 | // We used to use a dominator tree here to allow multi-block optimization. |
1577 | // But that was problematic because: |
1578 | // 1. It could cause a perf regression by hoisting the math op into the |
1579 | // critical path. |
1580 | // 2. It could cause a perf regression by creating a value that was live |
1581 | // across multiple blocks and increasing register pressure. |
1582 | // 3. Use of a dominator tree could cause large compile-time regression. |
1583 | // This is because we recompute the DT on every change in the main CGP |
1584 | // run-loop. The recomputing is probably unnecessary in many cases, so if |
1585 | // that was fixed, using a DT here would be ok. |
1586 | // |
1587 | // There is one important particular case we still want to handle: if BO is |
1588 | // the IV increment. Important properties that make it profitable: |
1589 | // - We can speculate IV increment anywhere in the loop (as long as the |
1590 | // indvar Phi is its only user); |
1591 | // - Upon computing Cmp, we effectively compute something equivalent to the |
1592 | // IV increment (despite it loops differently in the IR). So moving it up |
1593 | // to the cmp point does not really increase register pressure. |
1594 | return false; |
1595 | } |
1596 | |
1597 | // We allow matching the canonical IR (add X, C) back to (usubo X, -C). |
1598 | if (BO->getOpcode() == Instruction::Add && |
1599 | IID == Intrinsic::usub_with_overflow) { |
1600 | assert(isa<Constant>(Arg1) && "Unexpected input for usubo" ); |
1601 | Arg1 = ConstantExpr::getNeg(C: cast<Constant>(Val: Arg1)); |
1602 | } |
1603 | |
1604 | // Insert at the first instruction of the pair. |
1605 | Instruction *InsertPt = nullptr; |
1606 | for (Instruction &Iter : *Cmp->getParent()) { |
1607 | // If BO is an XOR, it is not guaranteed that it comes after both inputs to |
1608 | // the overflow intrinsic are defined. |
1609 | if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) { |
1610 | InsertPt = &Iter; |
1611 | break; |
1612 | } |
1613 | } |
1614 | assert(InsertPt != nullptr && "Parent block did not contain cmp or binop" ); |
1615 | |
1616 | IRBuilder<> Builder(InsertPt); |
1617 | Value *MathOV = Builder.CreateBinaryIntrinsic(ID: IID, LHS: Arg0, RHS: Arg1); |
1618 | if (BO->getOpcode() != Instruction::Xor) { |
1619 | Value *Math = Builder.CreateExtractValue(Agg: MathOV, Idxs: 0, Name: "math" ); |
1620 | replaceAllUsesWith(Old: BO, New: Math, FreshBBs, IsHuge: IsHugeFunc); |
1621 | } else |
1622 | assert(BO->hasOneUse() && |
1623 | "Patterns with XOr should use the BO only in the compare" ); |
1624 | Value *OV = Builder.CreateExtractValue(Agg: MathOV, Idxs: 1, Name: "ov" ); |
1625 | replaceAllUsesWith(Old: Cmp, New: OV, FreshBBs, IsHuge: IsHugeFunc); |
1626 | Cmp->eraseFromParent(); |
1627 | BO->eraseFromParent(); |
1628 | return true; |
1629 | } |
1630 | |
1631 | /// Match special-case patterns that check for unsigned add overflow. |
1632 | static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, |
1633 | BinaryOperator *&Add) { |
1634 | // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) |
1635 | // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) |
1636 | Value *A = Cmp->getOperand(i_nocapture: 0), *B = Cmp->getOperand(i_nocapture: 1); |
1637 | |
1638 | // We are not expecting non-canonical/degenerate code. Just bail out. |
1639 | if (isa<Constant>(Val: A)) |
1640 | return false; |
1641 | |
1642 | ICmpInst::Predicate Pred = Cmp->getPredicate(); |
1643 | if (Pred == ICmpInst::ICMP_EQ && match(V: B, P: m_AllOnes())) |
1644 | B = ConstantInt::get(Ty: B->getType(), V: 1); |
1645 | else if (Pred == ICmpInst::ICMP_NE && match(V: B, P: m_ZeroInt())) |
1646 | B = ConstantInt::get(Ty: B->getType(), V: -1); |
1647 | else |
1648 | return false; |
1649 | |
1650 | // Check the users of the variable operand of the compare looking for an add |
1651 | // with the adjusted constant. |
1652 | for (User *U : A->users()) { |
1653 | if (match(V: U, P: m_Add(L: m_Specific(V: A), R: m_Specific(V: B)))) { |
1654 | Add = cast<BinaryOperator>(Val: U); |
1655 | return true; |
1656 | } |
1657 | } |
1658 | return false; |
1659 | } |
1660 | |
1661 | /// Try to combine the compare into a call to the llvm.uadd.with.overflow |
1662 | /// intrinsic. Return true if any changes were made. |
1663 | bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp, |
1664 | ModifyDT &ModifiedDT) { |
1665 | bool EdgeCase = false; |
1666 | Value *A, *B; |
1667 | BinaryOperator *Add; |
1668 | if (!match(V: Cmp, P: m_UAddWithOverflow(L: m_Value(V&: A), R: m_Value(V&: B), S: m_BinOp(I&: Add)))) { |
1669 | if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) |
1670 | return false; |
1671 | // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases. |
1672 | A = Add->getOperand(i_nocapture: 0); |
1673 | B = Add->getOperand(i_nocapture: 1); |
1674 | EdgeCase = true; |
1675 | } |
1676 | |
1677 | if (!TLI->shouldFormOverflowOp(Opcode: ISD::UADDO, |
1678 | VT: TLI->getValueType(DL: *DL, Ty: Add->getType()), |
1679 | MathUsed: Add->hasNUsesOrMore(N: EdgeCase ? 1 : 2))) |
1680 | return false; |
1681 | |
1682 | // We don't want to move around uses of condition values this late, so we |
1683 | // check if it is legal to create the call to the intrinsic in the basic |
1684 | // block containing the icmp. |
1685 | if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) |
1686 | return false; |
1687 | |
1688 | if (!replaceMathCmpWithIntrinsic(BO: Add, Arg0: A, Arg1: B, Cmp, |
1689 | IID: Intrinsic::uadd_with_overflow)) |
1690 | return false; |
1691 | |
1692 | // Reset callers - do not crash by iterating over a dead instruction. |
1693 | ModifiedDT = ModifyDT::ModifyInstDT; |
1694 | return true; |
1695 | } |
1696 | |
1697 | bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, |
1698 | ModifyDT &ModifiedDT) { |
1699 | // We are not expecting non-canonical/degenerate code. Just bail out. |
1700 | Value *A = Cmp->getOperand(i_nocapture: 0), *B = Cmp->getOperand(i_nocapture: 1); |
1701 | if (isa<Constant>(Val: A) && isa<Constant>(Val: B)) |
1702 | return false; |
1703 | |
1704 | // Convert (A u> B) to (A u< B) to simplify pattern matching. |
1705 | ICmpInst::Predicate Pred = Cmp->getPredicate(); |
1706 | if (Pred == ICmpInst::ICMP_UGT) { |
1707 | std::swap(a&: A, b&: B); |
1708 | Pred = ICmpInst::ICMP_ULT; |
1709 | } |
1710 | // Convert special-case: (A == 0) is the same as (A u< 1). |
1711 | if (Pred == ICmpInst::ICMP_EQ && match(V: B, P: m_ZeroInt())) { |
1712 | B = ConstantInt::get(Ty: B->getType(), V: 1); |
1713 | Pred = ICmpInst::ICMP_ULT; |
1714 | } |
1715 | // Convert special-case: (A != 0) is the same as (0 u< A). |
1716 | if (Pred == ICmpInst::ICMP_NE && match(V: B, P: m_ZeroInt())) { |
1717 | std::swap(a&: A, b&: B); |
1718 | Pred = ICmpInst::ICMP_ULT; |
1719 | } |
1720 | if (Pred != ICmpInst::ICMP_ULT) |
1721 | return false; |
1722 | |
1723 | // Walk the users of a variable operand of a compare looking for a subtract or |
1724 | // add with that same operand. Also match the 2nd operand of the compare to |
1725 | // the add/sub, but that may be a negated constant operand of an add. |
1726 | Value *CmpVariableOperand = isa<Constant>(Val: A) ? B : A; |
1727 | BinaryOperator *Sub = nullptr; |
1728 | for (User *U : CmpVariableOperand->users()) { |
1729 | // A - B, A u< B --> usubo(A, B) |
1730 | if (match(V: U, P: m_Sub(L: m_Specific(V: A), R: m_Specific(V: B)))) { |
1731 | Sub = cast<BinaryOperator>(Val: U); |
1732 | break; |
1733 | } |
1734 | |
1735 | // A + (-C), A u< C (canonicalized form of (sub A, C)) |
1736 | const APInt *CmpC, *AddC; |
1737 | if (match(V: U, P: m_Add(L: m_Specific(V: A), R: m_APInt(Res&: AddC))) && |
1738 | match(V: B, P: m_APInt(Res&: CmpC)) && *AddC == -(*CmpC)) { |
1739 | Sub = cast<BinaryOperator>(Val: U); |
1740 | break; |
1741 | } |
1742 | } |
1743 | if (!Sub) |
1744 | return false; |
1745 | |
1746 | if (!TLI->shouldFormOverflowOp(Opcode: ISD::USUBO, |
1747 | VT: TLI->getValueType(DL: *DL, Ty: Sub->getType()), |
1748 | MathUsed: Sub->hasNUsesOrMore(N: 1))) |
1749 | return false; |
1750 | |
1751 | if (!replaceMathCmpWithIntrinsic(BO: Sub, Arg0: Sub->getOperand(i_nocapture: 0), Arg1: Sub->getOperand(i_nocapture: 1), |
1752 | Cmp, IID: Intrinsic::usub_with_overflow)) |
1753 | return false; |
1754 | |
1755 | // Reset callers - do not crash by iterating over a dead instruction. |
1756 | ModifiedDT = ModifyDT::ModifyInstDT; |
1757 | return true; |
1758 | } |
1759 | |
1760 | /// Sink the given CmpInst into user blocks to reduce the number of virtual |
1761 | /// registers that must be created and coalesced. This is a clear win except on |
1762 | /// targets with multiple condition code registers (PowerPC), where it might |
1763 | /// lose; some adjustment may be wanted there. |
1764 | /// |
1765 | /// Return true if any changes are made. |
1766 | static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { |
1767 | if (TLI.hasMultipleConditionRegisters()) |
1768 | return false; |
1769 | |
1770 | // Avoid sinking soft-FP comparisons, since this can move them into a loop. |
1771 | if (TLI.useSoftFloat() && isa<FCmpInst>(Val: Cmp)) |
1772 | return false; |
1773 | |
1774 | // Only insert a cmp in each block once. |
1775 | DenseMap<BasicBlock *, CmpInst *> InsertedCmps; |
1776 | |
1777 | bool MadeChange = false; |
1778 | for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); |
1779 | UI != E;) { |
1780 | Use &TheUse = UI.getUse(); |
1781 | Instruction *User = cast<Instruction>(Val: *UI); |
1782 | |
1783 | // Preincrement use iterator so we don't invalidate it. |
1784 | ++UI; |
1785 | |
1786 | // Don't bother for PHI nodes. |
1787 | if (isa<PHINode>(Val: User)) |
1788 | continue; |
1789 | |
1790 | // Figure out which BB this cmp is used in. |
1791 | BasicBlock *UserBB = User->getParent(); |
1792 | BasicBlock *DefBB = Cmp->getParent(); |
1793 | |
1794 | // If this user is in the same block as the cmp, don't change the cmp. |
1795 | if (UserBB == DefBB) |
1796 | continue; |
1797 | |
1798 | // If we have already inserted a cmp into this block, use it. |
1799 | CmpInst *&InsertedCmp = InsertedCmps[UserBB]; |
1800 | |
1801 | if (!InsertedCmp) { |
1802 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); |
1803 | assert(InsertPt != UserBB->end()); |
1804 | InsertedCmp = CmpInst::Create(Op: Cmp->getOpcode(), Pred: Cmp->getPredicate(), |
1805 | S1: Cmp->getOperand(i_nocapture: 0), S2: Cmp->getOperand(i_nocapture: 1), Name: "" ); |
1806 | InsertedCmp->insertBefore(BB&: *UserBB, InsertPos: InsertPt); |
1807 | // Propagate the debug info. |
1808 | InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); |
1809 | } |
1810 | |
1811 | // Replace a use of the cmp with a use of the new cmp. |
1812 | TheUse = InsertedCmp; |
1813 | MadeChange = true; |
1814 | ++NumCmpUses; |
1815 | } |
1816 | |
1817 | // If we removed all uses, nuke the cmp. |
1818 | if (Cmp->use_empty()) { |
1819 | Cmp->eraseFromParent(); |
1820 | MadeChange = true; |
1821 | } |
1822 | |
1823 | return MadeChange; |
1824 | } |
1825 | |
1826 | /// For pattern like: |
1827 | /// |
1828 | /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) |
1829 | /// ... |
1830 | /// DomBB: |
1831 | /// ... |
1832 | /// br DomCond, TrueBB, CmpBB |
1833 | /// CmpBB: (with DomBB being the single predecessor) |
1834 | /// ... |
1835 | /// Cmp = icmp eq CmpOp0, CmpOp1 |
1836 | /// ... |
1837 | /// |
1838 | /// It would use two comparison on targets that lowering of icmp sgt/slt is |
1839 | /// different from lowering of icmp eq (PowerPC). This function try to convert |
1840 | /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. |
1841 | /// After that, DomCond and Cmp can use the same comparison so reduce one |
1842 | /// comparison. |
1843 | /// |
1844 | /// Return true if any changes are made. |
1845 | static bool foldICmpWithDominatingICmp(CmpInst *Cmp, |
1846 | const TargetLowering &TLI) { |
1847 | if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp()) |
1848 | return false; |
1849 | |
1850 | ICmpInst::Predicate Pred = Cmp->getPredicate(); |
1851 | if (Pred != ICmpInst::ICMP_EQ) |
1852 | return false; |
1853 | |
1854 | // If icmp eq has users other than BranchInst and SelectInst, converting it to |
1855 | // icmp slt/sgt would introduce more redundant LLVM IR. |
1856 | for (User *U : Cmp->users()) { |
1857 | if (isa<BranchInst>(Val: U)) |
1858 | continue; |
1859 | if (isa<SelectInst>(Val: U) && cast<SelectInst>(Val: U)->getCondition() == Cmp) |
1860 | continue; |
1861 | return false; |
1862 | } |
1863 | |
1864 | // This is a cheap/incomplete check for dominance - just match a single |
1865 | // predecessor with a conditional branch. |
1866 | BasicBlock *CmpBB = Cmp->getParent(); |
1867 | BasicBlock *DomBB = CmpBB->getSinglePredecessor(); |
1868 | if (!DomBB) |
1869 | return false; |
1870 | |
1871 | // We want to ensure that the only way control gets to the comparison of |
1872 | // interest is that a less/greater than comparison on the same operands is |
1873 | // false. |
1874 | Value *DomCond; |
1875 | BasicBlock *TrueBB, *FalseBB; |
1876 | if (!match(V: DomBB->getTerminator(), P: m_Br(C: m_Value(V&: DomCond), T&: TrueBB, F&: FalseBB))) |
1877 | return false; |
1878 | if (CmpBB != FalseBB) |
1879 | return false; |
1880 | |
1881 | Value *CmpOp0 = Cmp->getOperand(i_nocapture: 0), *CmpOp1 = Cmp->getOperand(i_nocapture: 1); |
1882 | ICmpInst::Predicate DomPred; |
1883 | if (!match(V: DomCond, P: m_ICmp(Pred&: DomPred, L: m_Specific(V: CmpOp0), R: m_Specific(V: CmpOp1)))) |
1884 | return false; |
1885 | if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT) |
1886 | return false; |
1887 | |
1888 | // Convert the equality comparison to the opposite of the dominating |
1889 | // comparison and swap the direction for all branch/select users. |
1890 | // We have conceptually converted: |
1891 | // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>; |
1892 | // to |
1893 | // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>; |
1894 | // And similarly for branches. |
1895 | for (User *U : Cmp->users()) { |
1896 | if (auto *BI = dyn_cast<BranchInst>(Val: U)) { |
1897 | assert(BI->isConditional() && "Must be conditional" ); |
1898 | BI->swapSuccessors(); |
1899 | continue; |
1900 | } |
1901 | if (auto *SI = dyn_cast<SelectInst>(Val: U)) { |
1902 | // Swap operands |
1903 | SI->swapValues(); |
1904 | SI->swapProfMetadata(); |
1905 | continue; |
1906 | } |
1907 | llvm_unreachable("Must be a branch or a select" ); |
1908 | } |
1909 | Cmp->setPredicate(CmpInst::getSwappedPredicate(pred: DomPred)); |
1910 | return true; |
1911 | } |
1912 | |
1913 | /// Many architectures use the same instruction for both subtract and cmp. Try |
1914 | /// to swap cmp operands to match subtract operations to allow for CSE. |
1915 | static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp) { |
1916 | Value *Op0 = Cmp->getOperand(i_nocapture: 0); |
1917 | Value *Op1 = Cmp->getOperand(i_nocapture: 1); |
1918 | if (!Op0->getType()->isIntegerTy() || isa<Constant>(Val: Op0) || |
1919 | isa<Constant>(Val: Op1) || Op0 == Op1) |
1920 | return false; |
1921 | |
1922 | // If a subtract already has the same operands as a compare, swapping would be |
1923 | // bad. If a subtract has the same operands as a compare but in reverse order, |
1924 | // then swapping is good. |
1925 | int GoodToSwap = 0; |
1926 | unsigned NumInspected = 0; |
1927 | for (const User *U : Op0->users()) { |
1928 | // Avoid walking many users. |
1929 | if (++NumInspected > 128) |
1930 | return false; |
1931 | if (match(V: U, P: m_Sub(L: m_Specific(V: Op1), R: m_Specific(V: Op0)))) |
1932 | GoodToSwap++; |
1933 | else if (match(V: U, P: m_Sub(L: m_Specific(V: Op0), R: m_Specific(V: Op1)))) |
1934 | GoodToSwap--; |
1935 | } |
1936 | |
1937 | if (GoodToSwap > 0) { |
1938 | Cmp->swapOperands(); |
1939 | return true; |
1940 | } |
1941 | return false; |
1942 | } |
1943 | |
1944 | static bool foldFCmpToFPClassTest(CmpInst *Cmp, const TargetLowering &TLI, |
1945 | const DataLayout &DL) { |
1946 | FCmpInst *FCmp = dyn_cast<FCmpInst>(Val: Cmp); |
1947 | if (!FCmp) |
1948 | return false; |
1949 | |
1950 | // Don't fold if the target offers free fabs and the predicate is legal. |
1951 | EVT VT = TLI.getValueType(DL, Ty: Cmp->getOperand(i_nocapture: 0)->getType()); |
1952 | if (TLI.isFAbsFree(VT) && |
1953 | TLI.isCondCodeLegal(CC: getFCmpCondCode(Pred: FCmp->getPredicate()), |
1954 | VT: VT.getSimpleVT())) |
1955 | return false; |
1956 | |
1957 | // Reverse the canonicalization if it is a FP class test |
1958 | auto ShouldReverseTransform = [](FPClassTest ClassTest) { |
1959 | return ClassTest == fcInf || ClassTest == (fcInf | fcNan); |
1960 | }; |
1961 | auto [ClassVal, ClassTest] = |
1962 | fcmpToClassTest(Pred: FCmp->getPredicate(), F: *FCmp->getParent()->getParent(), |
1963 | LHS: FCmp->getOperand(i_nocapture: 0), RHS: FCmp->getOperand(i_nocapture: 1)); |
1964 | if (!ClassVal) |
1965 | return false; |
1966 | |
1967 | if (!ShouldReverseTransform(ClassTest) && !ShouldReverseTransform(~ClassTest)) |
1968 | return false; |
1969 | |
1970 | IRBuilder<> Builder(Cmp); |
1971 | Value *IsFPClass = Builder.createIsFPClass(FPNum: ClassVal, Test: ClassTest); |
1972 | Cmp->replaceAllUsesWith(V: IsFPClass); |
1973 | RecursivelyDeleteTriviallyDeadInstructions(V: Cmp); |
1974 | return true; |
1975 | } |
1976 | |
1977 | bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) { |
1978 | if (sinkCmpExpression(Cmp, TLI: *TLI)) |
1979 | return true; |
1980 | |
1981 | if (combineToUAddWithOverflow(Cmp, ModifiedDT)) |
1982 | return true; |
1983 | |
1984 | if (combineToUSubWithOverflow(Cmp, ModifiedDT)) |
1985 | return true; |
1986 | |
1987 | if (foldICmpWithDominatingICmp(Cmp, TLI: *TLI)) |
1988 | return true; |
1989 | |
1990 | if (swapICmpOperandsToExposeCSEOpportunities(Cmp)) |
1991 | return true; |
1992 | |
1993 | if (foldFCmpToFPClassTest(Cmp, TLI: *TLI, DL: *DL)) |
1994 | return true; |
1995 | |
1996 | return false; |
1997 | } |
1998 | |
1999 | /// Duplicate and sink the given 'and' instruction into user blocks where it is |
2000 | /// used in a compare to allow isel to generate better code for targets where |
2001 | /// this operation can be combined. |
2002 | /// |
2003 | /// Return true if any changes are made. |
2004 | static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI, |
2005 | SetOfInstrs &InsertedInsts) { |
2006 | // Double-check that we're not trying to optimize an instruction that was |
2007 | // already optimized by some other part of this pass. |
2008 | assert(!InsertedInsts.count(AndI) && |
2009 | "Attempting to optimize already optimized and instruction" ); |
2010 | (void)InsertedInsts; |
2011 | |
2012 | // Nothing to do for single use in same basic block. |
2013 | if (AndI->hasOneUse() && |
2014 | AndI->getParent() == cast<Instruction>(Val: *AndI->user_begin())->getParent()) |
2015 | return false; |
2016 | |
2017 | // Try to avoid cases where sinking/duplicating is likely to increase register |
2018 | // pressure. |
2019 | if (!isa<ConstantInt>(Val: AndI->getOperand(i: 0)) && |
2020 | !isa<ConstantInt>(Val: AndI->getOperand(i: 1)) && |
2021 | AndI->getOperand(i: 0)->hasOneUse() && AndI->getOperand(i: 1)->hasOneUse()) |
2022 | return false; |
2023 | |
2024 | for (auto *U : AndI->users()) { |
2025 | Instruction *User = cast<Instruction>(Val: U); |
2026 | |
2027 | // Only sink 'and' feeding icmp with 0. |
2028 | if (!isa<ICmpInst>(Val: User)) |
2029 | return false; |
2030 | |
2031 | auto *CmpC = dyn_cast<ConstantInt>(Val: User->getOperand(i: 1)); |
2032 | if (!CmpC || !CmpC->isZero()) |
2033 | return false; |
2034 | } |
2035 | |
2036 | if (!TLI.isMaskAndCmp0FoldingBeneficial(AndI: *AndI)) |
2037 | return false; |
2038 | |
2039 | LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n" ); |
2040 | LLVM_DEBUG(AndI->getParent()->dump()); |
2041 | |
2042 | // Push the 'and' into the same block as the icmp 0. There should only be |
2043 | // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any |
2044 | // others, so we don't need to keep track of which BBs we insert into. |
2045 | for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); |
2046 | UI != E;) { |
2047 | Use &TheUse = UI.getUse(); |
2048 | Instruction *User = cast<Instruction>(Val: *UI); |
2049 | |
2050 | // Preincrement use iterator so we don't invalidate it. |
2051 | ++UI; |
2052 | |
2053 | LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n" ); |
2054 | |
2055 | // Keep the 'and' in the same place if the use is already in the same block. |
2056 | Instruction *InsertPt = |
2057 | User->getParent() == AndI->getParent() ? AndI : User; |
2058 | Instruction *InsertedAnd = BinaryOperator::Create( |
2059 | Op: Instruction::And, S1: AndI->getOperand(i: 0), S2: AndI->getOperand(i: 1), Name: "" , |
2060 | InsertBefore: InsertPt->getIterator()); |
2061 | // Propagate the debug info. |
2062 | InsertedAnd->setDebugLoc(AndI->getDebugLoc()); |
2063 | |
2064 | // Replace a use of the 'and' with a use of the new 'and'. |
2065 | TheUse = InsertedAnd; |
2066 | ++NumAndUses; |
2067 | LLVM_DEBUG(User->getParent()->dump()); |
2068 | } |
2069 | |
2070 | // We removed all uses, nuke the and. |
2071 | AndI->eraseFromParent(); |
2072 | return true; |
2073 | } |
2074 | |
2075 | /// Check if the candidates could be combined with a shift instruction, which |
2076 | /// includes: |
2077 | /// 1. Truncate instruction |
2078 | /// 2. And instruction and the imm is a mask of the low bits: |
2079 | /// imm & (imm+1) == 0 |
2080 | static bool isExtractBitsCandidateUse(Instruction *User) { |
2081 | if (!isa<TruncInst>(Val: User)) { |
2082 | if (User->getOpcode() != Instruction::And || |
2083 | !isa<ConstantInt>(Val: User->getOperand(i: 1))) |
2084 | return false; |
2085 | |
2086 | const APInt &Cimm = cast<ConstantInt>(Val: User->getOperand(i: 1))->getValue(); |
2087 | |
2088 | if ((Cimm & (Cimm + 1)).getBoolValue()) |
2089 | return false; |
2090 | } |
2091 | return true; |
2092 | } |
2093 | |
2094 | /// Sink both shift and truncate instruction to the use of truncate's BB. |
2095 | static bool |
2096 | SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, |
2097 | DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, |
2098 | const TargetLowering &TLI, const DataLayout &DL) { |
2099 | BasicBlock *UserBB = User->getParent(); |
2100 | DenseMap<BasicBlock *, CastInst *> InsertedTruncs; |
2101 | auto *TruncI = cast<TruncInst>(Val: User); |
2102 | bool MadeChange = false; |
2103 | |
2104 | for (Value::user_iterator TruncUI = TruncI->user_begin(), |
2105 | TruncE = TruncI->user_end(); |
2106 | TruncUI != TruncE;) { |
2107 | |
2108 | Use &TruncTheUse = TruncUI.getUse(); |
2109 | Instruction *TruncUser = cast<Instruction>(Val: *TruncUI); |
2110 | // Preincrement use iterator so we don't invalidate it. |
2111 | |
2112 | ++TruncUI; |
2113 | |
2114 | int ISDOpcode = TLI.InstructionOpcodeToISD(Opcode: TruncUser->getOpcode()); |
2115 | if (!ISDOpcode) |
2116 | continue; |
2117 | |
2118 | // If the use is actually a legal node, there will not be an |
2119 | // implicit truncate. |
2120 | // FIXME: always querying the result type is just an |
2121 | // approximation; some nodes' legality is determined by the |
2122 | // operand or other means. There's no good way to find out though. |
2123 | if (TLI.isOperationLegalOrCustom( |
2124 | Op: ISDOpcode, VT: TLI.getValueType(DL, Ty: TruncUser->getType(), AllowUnknown: true))) |
2125 | continue; |
2126 | |
2127 | // Don't bother for PHI nodes. |
2128 | if (isa<PHINode>(Val: TruncUser)) |
2129 | continue; |
2130 | |
2131 | BasicBlock *TruncUserBB = TruncUser->getParent(); |
2132 | |
2133 | if (UserBB == TruncUserBB) |
2134 | continue; |
2135 | |
2136 | BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; |
2137 | CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; |
2138 | |
2139 | if (!InsertedShift && !InsertedTrunc) { |
2140 | BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); |
2141 | assert(InsertPt != TruncUserBB->end()); |
2142 | // Sink the shift |
2143 | if (ShiftI->getOpcode() == Instruction::AShr) |
2144 | InsertedShift = |
2145 | BinaryOperator::CreateAShr(V1: ShiftI->getOperand(i_nocapture: 0), V2: CI, Name: "" ); |
2146 | else |
2147 | InsertedShift = |
2148 | BinaryOperator::CreateLShr(V1: ShiftI->getOperand(i_nocapture: 0), V2: CI, Name: "" ); |
2149 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); |
2150 | InsertedShift->insertBefore(BB&: *TruncUserBB, InsertPos: InsertPt); |
2151 | |
2152 | // Sink the trunc |
2153 | BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); |
2154 | TruncInsertPt++; |
2155 | // It will go ahead of any debug-info. |
2156 | TruncInsertPt.setHeadBit(true); |
2157 | assert(TruncInsertPt != TruncUserBB->end()); |
2158 | |
2159 | InsertedTrunc = CastInst::Create(TruncI->getOpcode(), S: InsertedShift, |
2160 | Ty: TruncI->getType(), Name: "" ); |
2161 | InsertedTrunc->insertBefore(BB&: *TruncUserBB, InsertPos: TruncInsertPt); |
2162 | InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); |
2163 | |
2164 | MadeChange = true; |
2165 | |
2166 | TruncTheUse = InsertedTrunc; |
2167 | } |
2168 | } |
2169 | return MadeChange; |
2170 | } |
2171 | |
2172 | /// Sink the shift *right* instruction into user blocks if the uses could |
2173 | /// potentially be combined with this shift instruction and generate BitExtract |
2174 | /// instruction. It will only be applied if the architecture supports BitExtract |
2175 | /// instruction. Here is an example: |
2176 | /// BB1: |
2177 | /// %x.extract.shift = lshr i64 %arg1, 32 |
2178 | /// BB2: |
2179 | /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 |
2180 | /// ==> |
2181 | /// |
2182 | /// BB2: |
2183 | /// %x.extract.shift.1 = lshr i64 %arg1, 32 |
2184 | /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 |
2185 | /// |
2186 | /// CodeGen will recognize the pattern in BB2 and generate BitExtract |
2187 | /// instruction. |
2188 | /// Return true if any changes are made. |
2189 | static bool (BinaryOperator *ShiftI, ConstantInt *CI, |
2190 | const TargetLowering &TLI, |
2191 | const DataLayout &DL) { |
2192 | BasicBlock *DefBB = ShiftI->getParent(); |
2193 | |
2194 | /// Only insert instructions in each block once. |
2195 | DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; |
2196 | |
2197 | bool shiftIsLegal = TLI.isTypeLegal(VT: TLI.getValueType(DL, Ty: ShiftI->getType())); |
2198 | |
2199 | bool MadeChange = false; |
2200 | for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); |
2201 | UI != E;) { |
2202 | Use &TheUse = UI.getUse(); |
2203 | Instruction *User = cast<Instruction>(Val: *UI); |
2204 | // Preincrement use iterator so we don't invalidate it. |
2205 | ++UI; |
2206 | |
2207 | // Don't bother for PHI nodes. |
2208 | if (isa<PHINode>(Val: User)) |
2209 | continue; |
2210 | |
2211 | if (!isExtractBitsCandidateUse(User)) |
2212 | continue; |
2213 | |
2214 | BasicBlock *UserBB = User->getParent(); |
2215 | |
2216 | if (UserBB == DefBB) { |
2217 | // If the shift and truncate instruction are in the same BB. The use of |
2218 | // the truncate(TruncUse) may still introduce another truncate if not |
2219 | // legal. In this case, we would like to sink both shift and truncate |
2220 | // instruction to the BB of TruncUse. |
2221 | // for example: |
2222 | // BB1: |
2223 | // i64 shift.result = lshr i64 opnd, imm |
2224 | // trunc.result = trunc shift.result to i16 |
2225 | // |
2226 | // BB2: |
2227 | // ----> We will have an implicit truncate here if the architecture does |
2228 | // not have i16 compare. |
2229 | // cmp i16 trunc.result, opnd2 |
2230 | // |
2231 | if (isa<TruncInst>(Val: User) && |
2232 | shiftIsLegal |
2233 | // If the type of the truncate is legal, no truncate will be |
2234 | // introduced in other basic blocks. |
2235 | && (!TLI.isTypeLegal(VT: TLI.getValueType(DL, Ty: User->getType())))) |
2236 | MadeChange = |
2237 | SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); |
2238 | |
2239 | continue; |
2240 | } |
2241 | // If we have already inserted a shift into this block, use it. |
2242 | BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; |
2243 | |
2244 | if (!InsertedShift) { |
2245 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); |
2246 | assert(InsertPt != UserBB->end()); |
2247 | |
2248 | if (ShiftI->getOpcode() == Instruction::AShr) |
2249 | InsertedShift = |
2250 | BinaryOperator::CreateAShr(V1: ShiftI->getOperand(i_nocapture: 0), V2: CI, Name: "" ); |
2251 | else |
2252 | InsertedShift = |
2253 | BinaryOperator::CreateLShr(V1: ShiftI->getOperand(i_nocapture: 0), V2: CI, Name: "" ); |
2254 | InsertedShift->insertBefore(BB&: *UserBB, InsertPos: InsertPt); |
2255 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); |
2256 | |
2257 | MadeChange = true; |
2258 | } |
2259 | |
2260 | // Replace a use of the shift with a use of the new shift. |
2261 | TheUse = InsertedShift; |
2262 | } |
2263 | |
2264 | // If we removed all uses, or there are none, nuke the shift. |
2265 | if (ShiftI->use_empty()) { |
2266 | salvageDebugInfo(I&: *ShiftI); |
2267 | ShiftI->eraseFromParent(); |
2268 | MadeChange = true; |
2269 | } |
2270 | |
2271 | return MadeChange; |
2272 | } |
2273 | |
2274 | /// If counting leading or trailing zeros is an expensive operation and a zero |
2275 | /// input is defined, add a check for zero to avoid calling the intrinsic. |
2276 | /// |
2277 | /// We want to transform: |
2278 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) |
2279 | /// |
2280 | /// into: |
2281 | /// entry: |
2282 | /// %cmpz = icmp eq i64 %A, 0 |
2283 | /// br i1 %cmpz, label %cond.end, label %cond.false |
2284 | /// cond.false: |
2285 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) |
2286 | /// br label %cond.end |
2287 | /// cond.end: |
2288 | /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] |
2289 | /// |
2290 | /// If the transform is performed, return true and set ModifiedDT to true. |
2291 | static bool despeculateCountZeros(IntrinsicInst *CountZeros, |
2292 | LoopInfo &LI, |
2293 | const TargetLowering *TLI, |
2294 | const DataLayout *DL, ModifyDT &ModifiedDT, |
2295 | SmallSet<BasicBlock *, 32> &FreshBBs, |
2296 | bool IsHugeFunc) { |
2297 | // If a zero input is undefined, it doesn't make sense to despeculate that. |
2298 | if (match(V: CountZeros->getOperand(i_nocapture: 1), P: m_One())) |
2299 | return false; |
2300 | |
2301 | // If it's cheap to speculate, there's nothing to do. |
2302 | Type *Ty = CountZeros->getType(); |
2303 | auto IntrinsicID = CountZeros->getIntrinsicID(); |
2304 | if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) || |
2305 | (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty))) |
2306 | return false; |
2307 | |
2308 | // Only handle legal scalar cases. Anything else requires too much work. |
2309 | unsigned SizeInBits = Ty->getScalarSizeInBits(); |
2310 | if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) |
2311 | return false; |
2312 | |
2313 | // Bail if the value is never zero. |
2314 | Use &Op = CountZeros->getOperandUse(i: 0); |
2315 | if (isKnownNonZero(V: Op, Q: *DL)) |
2316 | return false; |
2317 | |
2318 | // The intrinsic will be sunk behind a compare against zero and branch. |
2319 | BasicBlock *StartBlock = CountZeros->getParent(); |
2320 | BasicBlock *CallBlock = StartBlock->splitBasicBlock(I: CountZeros, BBName: "cond.false" ); |
2321 | if (IsHugeFunc) |
2322 | FreshBBs.insert(Ptr: CallBlock); |
2323 | |
2324 | // Create another block after the count zero intrinsic. A PHI will be added |
2325 | // in this block to select the result of the intrinsic or the bit-width |
2326 | // constant if the input to the intrinsic is zero. |
2327 | BasicBlock::iterator SplitPt = std::next(x: BasicBlock::iterator(CountZeros)); |
2328 | // Any debug-info after CountZeros should not be included. |
2329 | SplitPt.setHeadBit(true); |
2330 | BasicBlock *EndBlock = CallBlock->splitBasicBlock(I: SplitPt, BBName: "cond.end" ); |
2331 | if (IsHugeFunc) |
2332 | FreshBBs.insert(Ptr: EndBlock); |
2333 | |
2334 | // Update the LoopInfo. The new blocks are in the same loop as the start |
2335 | // block. |
2336 | if (Loop *L = LI.getLoopFor(BB: StartBlock)) { |
2337 | L->addBasicBlockToLoop(NewBB: CallBlock, LI); |
2338 | L->addBasicBlockToLoop(NewBB: EndBlock, LI); |
2339 | } |
2340 | |
2341 | // Set up a builder to create a compare, conditional branch, and PHI. |
2342 | IRBuilder<> Builder(CountZeros->getContext()); |
2343 | Builder.SetInsertPoint(StartBlock->getTerminator()); |
2344 | Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); |
2345 | |
2346 | // Replace the unconditional branch that was created by the first split with |
2347 | // a compare against zero and a conditional branch. |
2348 | Value *Zero = Constant::getNullValue(Ty); |
2349 | // Avoid introducing branch on poison. This also replaces the ctz operand. |
2350 | if (!isGuaranteedNotToBeUndefOrPoison(V: Op)) |
2351 | Op = Builder.CreateFreeze(V: Op, Name: Op->getName() + ".fr" ); |
2352 | Value *Cmp = Builder.CreateICmpEQ(LHS: Op, RHS: Zero, Name: "cmpz" ); |
2353 | Builder.CreateCondBr(Cond: Cmp, True: EndBlock, False: CallBlock); |
2354 | StartBlock->getTerminator()->eraseFromParent(); |
2355 | |
2356 | // Create a PHI in the end block to select either the output of the intrinsic |
2357 | // or the bit width of the operand. |
2358 | Builder.SetInsertPoint(TheBB: EndBlock, IP: EndBlock->begin()); |
2359 | PHINode *PN = Builder.CreatePHI(Ty, NumReservedValues: 2, Name: "ctz" ); |
2360 | replaceAllUsesWith(Old: CountZeros, New: PN, FreshBBs, IsHuge: IsHugeFunc); |
2361 | Value *BitWidth = Builder.getInt(AI: APInt(SizeInBits, SizeInBits)); |
2362 | PN->addIncoming(V: BitWidth, BB: StartBlock); |
2363 | PN->addIncoming(V: CountZeros, BB: CallBlock); |
2364 | |
2365 | // We are explicitly handling the zero case, so we can set the intrinsic's |
2366 | // undefined zero argument to 'true'. This will also prevent reprocessing the |
2367 | // intrinsic; we only despeculate when a zero input is defined. |
2368 | CountZeros->setArgOperand(i: 1, v: Builder.getTrue()); |
2369 | ModifiedDT = ModifyDT::ModifyBBDT; |
2370 | return true; |
2371 | } |
2372 | |
2373 | bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) { |
2374 | BasicBlock *BB = CI->getParent(); |
2375 | |
2376 | // Lower inline assembly if we can. |
2377 | // If we found an inline asm expession, and if the target knows how to |
2378 | // lower it to normal LLVM code, do so now. |
2379 | if (CI->isInlineAsm()) { |
2380 | if (TLI->ExpandInlineAsm(CI)) { |
2381 | // Avoid invalidating the iterator. |
2382 | CurInstIterator = BB->begin(); |
2383 | // Avoid processing instructions out of order, which could cause |
2384 | // reuse before a value is defined. |
2385 | SunkAddrs.clear(); |
2386 | return true; |
2387 | } |
2388 | // Sink address computing for memory operands into the block. |
2389 | if (optimizeInlineAsmInst(CS: CI)) |
2390 | return true; |
2391 | } |
2392 | |
2393 | // Align the pointer arguments to this call if the target thinks it's a good |
2394 | // idea |
2395 | unsigned MinSize; |
2396 | Align PrefAlign; |
2397 | if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { |
2398 | for (auto &Arg : CI->args()) { |
2399 | // We want to align both objects whose address is used directly and |
2400 | // objects whose address is used in casts and GEPs, though it only makes |
2401 | // sense for GEPs if the offset is a multiple of the desired alignment and |
2402 | // if size - offset meets the size threshold. |
2403 | if (!Arg->getType()->isPointerTy()) |
2404 | continue; |
2405 | APInt Offset(DL->getIndexSizeInBits( |
2406 | AS: cast<PointerType>(Val: Arg->getType())->getAddressSpace()), |
2407 | 0); |
2408 | Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(DL: *DL, Offset); |
2409 | uint64_t Offset2 = Offset.getLimitedValue(); |
2410 | if (!isAligned(Lhs: PrefAlign, SizeInBytes: Offset2)) |
2411 | continue; |
2412 | AllocaInst *AI; |
2413 | if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign && |
2414 | DL->getTypeAllocSize(Ty: AI->getAllocatedType()) >= MinSize + Offset2) |
2415 | AI->setAlignment(PrefAlign); |
2416 | // Global variables can only be aligned if they are defined in this |
2417 | // object (i.e. they are uniquely initialized in this object), and |
2418 | // over-aligning global variables that have an explicit section is |
2419 | // forbidden. |
2420 | GlobalVariable *GV; |
2421 | if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && |
2422 | GV->getPointerAlignment(DL: *DL) < PrefAlign && |
2423 | DL->getTypeAllocSize(Ty: GV->getValueType()) >= MinSize + Offset2) |
2424 | GV->setAlignment(PrefAlign); |
2425 | } |
2426 | } |
2427 | // If this is a memcpy (or similar) then we may be able to improve the |
2428 | // alignment. |
2429 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: CI)) { |
2430 | Align DestAlign = getKnownAlignment(V: MI->getDest(), DL: *DL); |
2431 | MaybeAlign MIDestAlign = MI->getDestAlign(); |
2432 | if (!MIDestAlign || DestAlign > *MIDestAlign) |
2433 | MI->setDestAlignment(DestAlign); |
2434 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Val: MI)) { |
2435 | MaybeAlign MTISrcAlign = MTI->getSourceAlign(); |
2436 | Align SrcAlign = getKnownAlignment(V: MTI->getSource(), DL: *DL); |
2437 | if (!MTISrcAlign || SrcAlign > *MTISrcAlign) |
2438 | MTI->setSourceAlignment(SrcAlign); |
2439 | } |
2440 | } |
2441 | |
2442 | // If we have a cold call site, try to sink addressing computation into the |
2443 | // cold block. This interacts with our handling for loads and stores to |
2444 | // ensure that we can fold all uses of a potential addressing computation |
2445 | // into their uses. TODO: generalize this to work over profiling data |
2446 | if (CI->hasFnAttr(Kind: Attribute::Cold) && !OptSize && |
2447 | !llvm::shouldOptimizeForSize(BB, PSI, BFI: BFI.get())) |
2448 | for (auto &Arg : CI->args()) { |
2449 | if (!Arg->getType()->isPointerTy()) |
2450 | continue; |
2451 | unsigned AS = Arg->getType()->getPointerAddressSpace(); |
2452 | if (optimizeMemoryInst(MemoryInst: CI, Addr: Arg, AccessTy: Arg->getType(), AddrSpace: AS)) |
2453 | return true; |
2454 | } |
2455 | |
2456 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: CI); |
2457 | if (II) { |
2458 | switch (II->getIntrinsicID()) { |
2459 | default: |
2460 | break; |
2461 | case Intrinsic::assume: |
2462 | llvm_unreachable("llvm.assume should have been removed already" ); |
2463 | case Intrinsic::allow_runtime_check: |
2464 | case Intrinsic::allow_ubsan_check: |
2465 | case Intrinsic::experimental_widenable_condition: { |
2466 | // Give up on future widening opportunities so that we can fold away dead |
2467 | // paths and merge blocks before going into block-local instruction |
2468 | // selection. |
2469 | if (II->use_empty()) { |
2470 | II->eraseFromParent(); |
2471 | return true; |
2472 | } |
2473 | Constant *RetVal = ConstantInt::getTrue(Context&: II->getContext()); |
2474 | resetIteratorIfInvalidatedWhileCalling(BB, f: [&]() { |
2475 | replaceAndRecursivelySimplify(I: CI, SimpleV: RetVal, TLI: TLInfo, DT: nullptr); |
2476 | }); |
2477 | return true; |
2478 | } |
2479 | case Intrinsic::objectsize: |
2480 | llvm_unreachable("llvm.objectsize.* should have been lowered already" ); |
2481 | case Intrinsic::is_constant: |
2482 | llvm_unreachable("llvm.is.constant.* should have been lowered already" ); |
2483 | case Intrinsic::aarch64_stlxr: |
2484 | case Intrinsic::aarch64_stxr: { |
2485 | ZExtInst *ExtVal = dyn_cast<ZExtInst>(Val: CI->getArgOperand(i: 0)); |
2486 | if (!ExtVal || !ExtVal->hasOneUse() || |
2487 | ExtVal->getParent() == CI->getParent()) |
2488 | return false; |
2489 | // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. |
2490 | ExtVal->moveBefore(MovePos: CI); |
2491 | // Mark this instruction as "inserted by CGP", so that other |
2492 | // optimizations don't touch it. |
2493 | InsertedInsts.insert(Ptr: ExtVal); |
2494 | return true; |
2495 | } |
2496 | |
2497 | case Intrinsic::launder_invariant_group: |
2498 | case Intrinsic::strip_invariant_group: { |
2499 | Value *ArgVal = II->getArgOperand(i: 0); |
2500 | auto it = LargeOffsetGEPMap.find(Key: II); |
2501 | if (it != LargeOffsetGEPMap.end()) { |
2502 | // Merge entries in LargeOffsetGEPMap to reflect the RAUW. |
2503 | // Make sure not to have to deal with iterator invalidation |
2504 | // after possibly adding ArgVal to LargeOffsetGEPMap. |
2505 | auto GEPs = std::move(it->second); |
2506 | LargeOffsetGEPMap[ArgVal].append(in_start: GEPs.begin(), in_end: GEPs.end()); |
2507 | LargeOffsetGEPMap.erase(Key: II); |
2508 | } |
2509 | |
2510 | replaceAllUsesWith(Old: II, New: ArgVal, FreshBBs, IsHuge: IsHugeFunc); |
2511 | II->eraseFromParent(); |
2512 | return true; |
2513 | } |
2514 | case Intrinsic::cttz: |
2515 | case Intrinsic::ctlz: |
2516 | // If counting zeros is expensive, try to avoid it. |
2517 | return despeculateCountZeros(CountZeros: II, LI&: *LI, TLI, DL, ModifiedDT, FreshBBs, |
2518 | IsHugeFunc); |
2519 | case Intrinsic::fshl: |
2520 | case Intrinsic::fshr: |
2521 | return optimizeFunnelShift(Fsh: II); |
2522 | case Intrinsic::dbg_assign: |
2523 | case Intrinsic::dbg_value: |
2524 | return fixupDbgValue(I: II); |
2525 | case Intrinsic::masked_gather: |
2526 | return optimizeGatherScatterInst(MemoryInst: II, Ptr: II->getArgOperand(i: 0)); |
2527 | case Intrinsic::masked_scatter: |
2528 | return optimizeGatherScatterInst(MemoryInst: II, Ptr: II->getArgOperand(i: 1)); |
2529 | } |
2530 | |
2531 | SmallVector<Value *, 2> PtrOps; |
2532 | Type *AccessTy; |
2533 | if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) |
2534 | while (!PtrOps.empty()) { |
2535 | Value *PtrVal = PtrOps.pop_back_val(); |
2536 | unsigned AS = PtrVal->getType()->getPointerAddressSpace(); |
2537 | if (optimizeMemoryInst(MemoryInst: II, Addr: PtrVal, AccessTy, AddrSpace: AS)) |
2538 | return true; |
2539 | } |
2540 | } |
2541 | |
2542 | // From here on out we're working with named functions. |
2543 | if (!CI->getCalledFunction()) |
2544 | return false; |
2545 | |
2546 | // Lower all default uses of _chk calls. This is very similar |
2547 | // to what InstCombineCalls does, but here we are only lowering calls |
2548 | // to fortified library functions (e.g. __memcpy_chk) that have the default |
2549 | // "don't know" as the objectsize. Anything else should be left alone. |
2550 | FortifiedLibCallSimplifier Simplifier(TLInfo, true); |
2551 | IRBuilder<> Builder(CI); |
2552 | if (Value *V = Simplifier.optimizeCall(CI, B&: Builder)) { |
2553 | replaceAllUsesWith(Old: CI, New: V, FreshBBs, IsHuge: IsHugeFunc); |
2554 | CI->eraseFromParent(); |
2555 | return true; |
2556 | } |
2557 | |
2558 | return false; |
2559 | } |
2560 | |
2561 | static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo *TLInfo, |
2562 | const CallInst *CI) { |
2563 | assert(CI && CI->use_empty()); |
2564 | |
2565 | if (const auto *II = dyn_cast<IntrinsicInst>(Val: CI)) |
2566 | switch (II->getIntrinsicID()) { |
2567 | case Intrinsic::memset: |
2568 | case Intrinsic::memcpy: |
2569 | case Intrinsic::memmove: |
2570 | return true; |
2571 | default: |
2572 | return false; |
2573 | } |
2574 | |
2575 | LibFunc LF; |
2576 | Function *Callee = CI->getCalledFunction(); |
2577 | if (Callee && TLInfo && TLInfo->getLibFunc(FDecl: *Callee, F&: LF)) |
2578 | switch (LF) { |
2579 | case LibFunc_strcpy: |
2580 | case LibFunc_strncpy: |
2581 | case LibFunc_strcat: |
2582 | case LibFunc_strncat: |
2583 | return true; |
2584 | default: |
2585 | return false; |
2586 | } |
2587 | |
2588 | return false; |
2589 | } |
2590 | |
2591 | /// Look for opportunities to duplicate return instructions to the predecessor |
2592 | /// to enable tail call optimizations. The case it is currently looking for is |
2593 | /// the following one. Known intrinsics or library function that may be tail |
2594 | /// called are taken into account as well. |
2595 | /// @code |
2596 | /// bb0: |
2597 | /// %tmp0 = tail call i32 @f0() |
2598 | /// br label %return |
2599 | /// bb1: |
2600 | /// %tmp1 = tail call i32 @f1() |
2601 | /// br label %return |
2602 | /// bb2: |
2603 | /// %tmp2 = tail call i32 @f2() |
2604 | /// br label %return |
2605 | /// return: |
2606 | /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] |
2607 | /// ret i32 %retval |
2608 | /// @endcode |
2609 | /// |
2610 | /// => |
2611 | /// |
2612 | /// @code |
2613 | /// bb0: |
2614 | /// %tmp0 = tail call i32 @f0() |
2615 | /// ret i32 %tmp0 |
2616 | /// bb1: |
2617 | /// %tmp1 = tail call i32 @f1() |
2618 | /// ret i32 %tmp1 |
2619 | /// bb2: |
2620 | /// %tmp2 = tail call i32 @f2() |
2621 | /// ret i32 %tmp2 |
2622 | /// @endcode |
2623 | bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, |
2624 | ModifyDT &ModifiedDT) { |
2625 | if (!BB->getTerminator()) |
2626 | return false; |
2627 | |
2628 | ReturnInst *RetI = dyn_cast<ReturnInst>(Val: BB->getTerminator()); |
2629 | if (!RetI) |
2630 | return false; |
2631 | |
2632 | assert(LI->getLoopFor(BB) == nullptr && "A return block cannot be in a loop" ); |
2633 | |
2634 | PHINode *PN = nullptr; |
2635 | ExtractValueInst *EVI = nullptr; |
2636 | BitCastInst *BCI = nullptr; |
2637 | Value *V = RetI->getReturnValue(); |
2638 | if (V) { |
2639 | BCI = dyn_cast<BitCastInst>(Val: V); |
2640 | if (BCI) |
2641 | V = BCI->getOperand(i_nocapture: 0); |
2642 | |
2643 | EVI = dyn_cast<ExtractValueInst>(Val: V); |
2644 | if (EVI) { |
2645 | V = EVI->getOperand(i_nocapture: 0); |
2646 | if (!llvm::all_of(Range: EVI->indices(), P: [](unsigned idx) { return idx == 0; })) |
2647 | return false; |
2648 | } |
2649 | |
2650 | PN = dyn_cast<PHINode>(Val: V); |
2651 | } |
2652 | |
2653 | if (PN && PN->getParent() != BB) |
2654 | return false; |
2655 | |
2656 | auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) { |
2657 | const BitCastInst *BC = dyn_cast<BitCastInst>(Val: Inst); |
2658 | if (BC && BC->hasOneUse()) |
2659 | Inst = BC->user_back(); |
2660 | |
2661 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: Inst)) |
2662 | return II->getIntrinsicID() == Intrinsic::lifetime_end; |
2663 | return false; |
2664 | }; |
2665 | |
2666 | // Make sure there are no instructions between the first instruction |
2667 | // and return. |
2668 | const Instruction *BI = BB->getFirstNonPHI(); |
2669 | // Skip over debug and the bitcast. |
2670 | while (isa<DbgInfoIntrinsic>(Val: BI) || BI == BCI || BI == EVI || |
2671 | isa<PseudoProbeInst>(Val: BI) || isLifetimeEndOrBitCastFor(BI)) |
2672 | BI = BI->getNextNode(); |
2673 | if (BI != RetI) |
2674 | return false; |
2675 | |
2676 | /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail |
2677 | /// call. |
2678 | const Function *F = BB->getParent(); |
2679 | SmallVector<BasicBlock *, 4> TailCallBBs; |
2680 | if (PN) { |
2681 | for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { |
2682 | // Look through bitcasts. |
2683 | Value *IncomingVal = PN->getIncomingValue(i: I)->stripPointerCasts(); |
2684 | CallInst *CI = dyn_cast<CallInst>(Val: IncomingVal); |
2685 | BasicBlock *PredBB = PN->getIncomingBlock(i: I); |
2686 | // Make sure the phi value is indeed produced by the tail call. |
2687 | if (CI && CI->hasOneUse() && CI->getParent() == PredBB && |
2688 | TLI->mayBeEmittedAsTailCall(CI) && |
2689 | attributesPermitTailCall(F, I: CI, Ret: RetI, TLI: *TLI)) { |
2690 | TailCallBBs.push_back(Elt: PredBB); |
2691 | } else { |
2692 | // Consider the cases in which the phi value is indirectly produced by |
2693 | // the tail call, for example when encountering memset(), memmove(), |
2694 | // strcpy(), whose return value may have been optimized out. In such |
2695 | // cases, the value needs to be the first function argument. |
2696 | // |
2697 | // bb0: |
2698 | // tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1) |
2699 | // br label %return |
2700 | // return: |
2701 | // %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ] |
2702 | if (PredBB && PredBB->getSingleSuccessor() == BB) |
2703 | CI = dyn_cast_or_null<CallInst>( |
2704 | Val: PredBB->getTerminator()->getPrevNonDebugInstruction(SkipPseudoOp: true)); |
2705 | |
2706 | if (CI && CI->use_empty() && |
2707 | isIntrinsicOrLFToBeTailCalled(TLInfo, CI) && |
2708 | IncomingVal == CI->getArgOperand(i: 0) && |
2709 | TLI->mayBeEmittedAsTailCall(CI) && |
2710 | attributesPermitTailCall(F, I: CI, Ret: RetI, TLI: *TLI)) |
2711 | TailCallBBs.push_back(Elt: PredBB); |
2712 | } |
2713 | } |
2714 | } else { |
2715 | SmallPtrSet<BasicBlock *, 4> VisitedBBs; |
2716 | for (BasicBlock *Pred : predecessors(BB)) { |
2717 | if (!VisitedBBs.insert(Ptr: Pred).second) |
2718 | continue; |
2719 | if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(SkipPseudoOp: true)) { |
2720 | CallInst *CI = dyn_cast<CallInst>(Val: I); |
2721 | if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && |
2722 | attributesPermitTailCall(F, I: CI, Ret: RetI, TLI: *TLI)) { |
2723 | // Either we return void or the return value must be the first |
2724 | // argument of a known intrinsic or library function. |
2725 | if (!V || isa<UndefValue>(Val: V) || |
2726 | (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) && |
2727 | V == CI->getArgOperand(i: 0))) { |
2728 | TailCallBBs.push_back(Elt: Pred); |
2729 | } |
2730 | } |
2731 | } |
2732 | } |
2733 | } |
2734 | |
2735 | bool Changed = false; |
2736 | for (auto const &TailCallBB : TailCallBBs) { |
2737 | // Make sure the call instruction is followed by an unconditional branch to |
2738 | // the return block. |
2739 | BranchInst *BI = dyn_cast<BranchInst>(Val: TailCallBB->getTerminator()); |
2740 | if (!BI || !BI->isUnconditional() || BI->getSuccessor(i: 0) != BB) |
2741 | continue; |
2742 | |
2743 | // Duplicate the return into TailCallBB. |
2744 | (void)FoldReturnIntoUncondBranch(RI: RetI, BB, Pred: TailCallBB); |
2745 | assert(!VerifyBFIUpdates || |
2746 | BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)); |
2747 | BFI->setBlockFreq(BB, |
2748 | Freq: (BFI->getBlockFreq(BB) - BFI->getBlockFreq(BB: TailCallBB))); |
2749 | ModifiedDT = ModifyDT::ModifyBBDT; |
2750 | Changed = true; |
2751 | ++NumRetsDup; |
2752 | } |
2753 | |
2754 | // If we eliminated all predecessors of the block, delete the block now. |
2755 | if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) |
2756 | BB->eraseFromParent(); |
2757 | |
2758 | return Changed; |
2759 | } |
2760 | |
2761 | //===----------------------------------------------------------------------===// |
2762 | // Memory Optimization |
2763 | //===----------------------------------------------------------------------===// |
2764 | |
2765 | namespace { |
2766 | |
2767 | /// This is an extended version of TargetLowering::AddrMode |
2768 | /// which holds actual Value*'s for register values. |
2769 | struct ExtAddrMode : public TargetLowering::AddrMode { |
2770 | Value *BaseReg = nullptr; |
2771 | Value *ScaledReg = nullptr; |
2772 | Value *OriginalValue = nullptr; |
2773 | bool InBounds = true; |
2774 | |
2775 | enum FieldName { |
2776 | NoField = 0x00, |
2777 | BaseRegField = 0x01, |
2778 | BaseGVField = 0x02, |
2779 | BaseOffsField = 0x04, |
2780 | ScaledRegField = 0x08, |
2781 | ScaleField = 0x10, |
2782 | MultipleFields = 0xff |
2783 | }; |
2784 | |
2785 | ExtAddrMode() = default; |
2786 | |
2787 | void print(raw_ostream &OS) const; |
2788 | void dump() const; |
2789 | |
2790 | FieldName compare(const ExtAddrMode &other) { |
2791 | // First check that the types are the same on each field, as differing types |
2792 | // is something we can't cope with later on. |
2793 | if (BaseReg && other.BaseReg && |
2794 | BaseReg->getType() != other.BaseReg->getType()) |
2795 | return MultipleFields; |
2796 | if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType()) |
2797 | return MultipleFields; |
2798 | if (ScaledReg && other.ScaledReg && |
2799 | ScaledReg->getType() != other.ScaledReg->getType()) |
2800 | return MultipleFields; |
2801 | |
2802 | // Conservatively reject 'inbounds' mismatches. |
2803 | if (InBounds != other.InBounds) |
2804 | return MultipleFields; |
2805 | |
2806 | // Check each field to see if it differs. |
2807 | unsigned Result = NoField; |
2808 | if (BaseReg != other.BaseReg) |
2809 | Result |= BaseRegField; |
2810 | if (BaseGV != other.BaseGV) |
2811 | Result |= BaseGVField; |
2812 | if (BaseOffs != other.BaseOffs) |
2813 | Result |= BaseOffsField; |
2814 | if (ScaledReg != other.ScaledReg) |
2815 | Result |= ScaledRegField; |
2816 | // Don't count 0 as being a different scale, because that actually means |
2817 | // unscaled (which will already be counted by having no ScaledReg). |
2818 | if (Scale && other.Scale && Scale != other.Scale) |
2819 | Result |= ScaleField; |
2820 | |
2821 | if (llvm::popcount(Value: Result) > 1) |
2822 | return MultipleFields; |
2823 | else |
2824 | return static_cast<FieldName>(Result); |
2825 | } |
2826 | |
2827 | // An AddrMode is trivial if it involves no calculation i.e. it is just a base |
2828 | // with no offset. |
2829 | bool isTrivial() { |
2830 | // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is |
2831 | // trivial if at most one of these terms is nonzero, except that BaseGV and |
2832 | // BaseReg both being zero actually means a null pointer value, which we |
2833 | // consider to be 'non-zero' here. |
2834 | return !BaseOffs && !Scale && !(BaseGV && BaseReg); |
2835 | } |
2836 | |
2837 | Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { |
2838 | switch (Field) { |
2839 | default: |
2840 | return nullptr; |
2841 | case BaseRegField: |
2842 | return BaseReg; |
2843 | case BaseGVField: |
2844 | return BaseGV; |
2845 | case ScaledRegField: |
2846 | return ScaledReg; |
2847 | case BaseOffsField: |
2848 | return ConstantInt::get(Ty: IntPtrTy, V: BaseOffs); |
2849 | } |
2850 | } |
2851 | |
2852 | void SetCombinedField(FieldName Field, Value *V, |
2853 | const SmallVectorImpl<ExtAddrMode> &AddrModes) { |
2854 | switch (Field) { |
2855 | default: |
2856 | llvm_unreachable("Unhandled fields are expected to be rejected earlier" ); |
2857 | break; |
2858 | case ExtAddrMode::BaseRegField: |
2859 | BaseReg = V; |
2860 | break; |
2861 | case ExtAddrMode::BaseGVField: |
2862 | // A combined BaseGV is an Instruction, not a GlobalValue, so it goes |
2863 | // in the BaseReg field. |
2864 | assert(BaseReg == nullptr); |
2865 | BaseReg = V; |
2866 | BaseGV = nullptr; |
2867 | break; |
2868 | case ExtAddrMode::ScaledRegField: |
2869 | ScaledReg = V; |
2870 | // If we have a mix of scaled and unscaled addrmodes then we want scale |
2871 | // to be the scale and not zero. |
2872 | if (!Scale) |
2873 | for (const ExtAddrMode &AM : AddrModes) |
2874 | if (AM.Scale) { |
2875 | Scale = AM.Scale; |
2876 | break; |
2877 | } |
2878 | break; |
2879 | case ExtAddrMode::BaseOffsField: |
2880 | // The offset is no longer a constant, so it goes in ScaledReg with a |
2881 | // scale of 1. |
2882 | assert(ScaledReg == nullptr); |
2883 | ScaledReg = V; |
2884 | Scale = 1; |
2885 | BaseOffs = 0; |
2886 | break; |
2887 | } |
2888 | } |
2889 | }; |
2890 | |
2891 | #ifndef NDEBUG |
2892 | static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { |
2893 | AM.print(OS); |
2894 | return OS; |
2895 | } |
2896 | #endif |
2897 | |
2898 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
2899 | void ExtAddrMode::print(raw_ostream &OS) const { |
2900 | bool NeedPlus = false; |
2901 | OS << "[" ; |
2902 | if (InBounds) |
2903 | OS << "inbounds " ; |
2904 | if (BaseGV) { |
2905 | OS << "GV:" ; |
2906 | BaseGV->printAsOperand(OS, /*PrintType=*/false); |
2907 | NeedPlus = true; |
2908 | } |
2909 | |
2910 | if (BaseOffs) { |
2911 | OS << (NeedPlus ? " + " : "" ) << BaseOffs; |
2912 | NeedPlus = true; |
2913 | } |
2914 | |
2915 | if (BaseReg) { |
2916 | OS << (NeedPlus ? " + " : "" ) << "Base:" ; |
2917 | BaseReg->printAsOperand(OS, /*PrintType=*/false); |
2918 | NeedPlus = true; |
2919 | } |
2920 | if (Scale) { |
2921 | OS << (NeedPlus ? " + " : "" ) << Scale << "*" ; |
2922 | ScaledReg->printAsOperand(OS, /*PrintType=*/false); |
2923 | } |
2924 | |
2925 | OS << ']'; |
2926 | } |
2927 | |
2928 | LLVM_DUMP_METHOD void ExtAddrMode::dump() const { |
2929 | print(dbgs()); |
2930 | dbgs() << '\n'; |
2931 | } |
2932 | #endif |
2933 | |
2934 | } // end anonymous namespace |
2935 | |
2936 | namespace { |
2937 | |
2938 | /// This class provides transaction based operation on the IR. |
2939 | /// Every change made through this class is recorded in the internal state and |
2940 | /// can be undone (rollback) until commit is called. |
2941 | /// CGP does not check if instructions could be speculatively executed when |
2942 | /// moved. Preserving the original location would pessimize the debugging |
2943 | /// experience, as well as negatively impact the quality of sample PGO. |
2944 | class TypePromotionTransaction { |
2945 | /// This represents the common interface of the individual transaction. |
2946 | /// Each class implements the logic for doing one specific modification on |
2947 | /// the IR via the TypePromotionTransaction. |
2948 | class TypePromotionAction { |
2949 | protected: |
2950 | /// The Instruction modified. |
2951 | Instruction *Inst; |
2952 | |
2953 | public: |
2954 | /// Constructor of the action. |
2955 | /// The constructor performs the related action on the IR. |
2956 | TypePromotionAction(Instruction *Inst) : Inst(Inst) {} |
2957 | |
2958 | virtual ~TypePromotionAction() = default; |
2959 | |
2960 | /// Undo the modification done by this action. |
2961 | /// When this method is called, the IR must be in the same state as it was |
2962 | /// before this action was applied. |
2963 | /// \pre Undoing the action works if and only if the IR is in the exact same |
2964 | /// state as it was directly after this action was applied. |
2965 | virtual void undo() = 0; |
2966 | |
2967 | /// Advocate every change made by this action. |
2968 | /// When the results on the IR of the action are to be kept, it is important |
2969 | /// to call this function, otherwise hidden information may be kept forever. |
2970 | virtual void commit() { |
2971 | // Nothing to be done, this action is not doing anything. |
2972 | } |
2973 | }; |
2974 | |
2975 | /// Utility to remember the position of an instruction. |
2976 | class InsertionHandler { |
2977 | /// Position of an instruction. |
2978 | /// Either an instruction: |
2979 | /// - Is the first in a basic block: BB is used. |
2980 | /// - Has a previous instruction: PrevInst is used. |
2981 | union { |
2982 | Instruction *PrevInst; |
2983 | BasicBlock *BB; |
2984 | } Point; |
2985 | std::optional<DbgRecord::self_iterator> BeforeDbgRecord = std::nullopt; |
2986 | |
2987 | /// Remember whether or not the instruction had a previous instruction. |
2988 | bool HasPrevInstruction; |
2989 | |
2990 | public: |
2991 | /// Record the position of \p Inst. |
2992 | InsertionHandler(Instruction *Inst) { |
2993 | HasPrevInstruction = (Inst != &*(Inst->getParent()->begin())); |
2994 | BasicBlock *BB = Inst->getParent(); |
2995 | |
2996 | // Record where we would have to re-insert the instruction in the sequence |
2997 | // of DbgRecords, if we ended up reinserting. |
2998 | if (BB->IsNewDbgInfoFormat) |
2999 | BeforeDbgRecord = Inst->getDbgReinsertionPosition(); |
3000 | |
3001 | if (HasPrevInstruction) { |
3002 | Point.PrevInst = &*std::prev(x: Inst->getIterator()); |
3003 | } else { |
3004 | Point.BB = BB; |
3005 | } |
3006 | } |
3007 | |
3008 | /// Insert \p Inst at the recorded position. |
3009 | void insert(Instruction *Inst) { |
3010 | if (HasPrevInstruction) { |
3011 | if (Inst->getParent()) |
3012 | Inst->removeFromParent(); |
3013 | Inst->insertAfter(InsertPos: &*Point.PrevInst); |
3014 | } else { |
3015 | BasicBlock::iterator Position = Point.BB->getFirstInsertionPt(); |
3016 | if (Inst->getParent()) |
3017 | Inst->moveBefore(BB&: *Point.BB, I: Position); |
3018 | else |
3019 | Inst->insertBefore(BB&: *Point.BB, InsertPos: Position); |
3020 | } |
3021 | |
3022 | Inst->getParent()->reinsertInstInDbgRecords(I: Inst, Pos: BeforeDbgRecord); |
3023 | } |
3024 | }; |
3025 | |
3026 | /// Move an instruction before another. |
3027 | class InstructionMoveBefore : public TypePromotionAction { |
3028 | /// Original position of the instruction. |
3029 | InsertionHandler Position; |
3030 | |
3031 | public: |
3032 | /// Move \p Inst before \p Before. |
3033 | InstructionMoveBefore(Instruction *Inst, Instruction *Before) |
3034 | : TypePromotionAction(Inst), Position(Inst) { |
3035 | LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before |
3036 | << "\n" ); |
3037 | Inst->moveBefore(MovePos: Before); |
3038 | } |
3039 | |
3040 | /// Move the instruction back to its original position. |
3041 | void undo() override { |
3042 | LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n" ); |
3043 | Position.insert(Inst); |
3044 | } |
3045 | }; |
3046 | |
3047 | /// Set the operand of an instruction with a new value. |
3048 | class OperandSetter : public TypePromotionAction { |
3049 | /// Original operand of the instruction. |
3050 | Value *Origin; |
3051 | |
3052 | /// Index of the modified instruction. |
3053 | unsigned Idx; |
3054 | |
3055 | public: |
3056 | /// Set \p Idx operand of \p Inst with \p NewVal. |
3057 | OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) |
3058 | : TypePromotionAction(Inst), Idx(Idx) { |
3059 | LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" |
3060 | << "for:" << *Inst << "\n" |
3061 | << "with:" << *NewVal << "\n" ); |
3062 | Origin = Inst->getOperand(i: Idx); |
3063 | Inst->setOperand(i: Idx, Val: NewVal); |
3064 | } |
3065 | |
3066 | /// Restore the original value of the instruction. |
3067 | void undo() override { |
3068 | LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" |
3069 | << "for: " << *Inst << "\n" |
3070 | << "with: " << *Origin << "\n" ); |
3071 | Inst->setOperand(i: Idx, Val: Origin); |
3072 | } |
3073 | }; |
3074 | |
3075 | /// Hide the operands of an instruction. |
3076 | /// Do as if this instruction was not using any of its operands. |
3077 | class OperandsHider : public TypePromotionAction { |
3078 | /// The list of original operands. |
3079 | SmallVector<Value *, 4> OriginalValues; |
3080 | |
3081 | public: |
3082 | /// Remove \p Inst from the uses of the operands of \p Inst. |
3083 | OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { |
3084 | LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n" ); |
3085 | unsigned NumOpnds = Inst->getNumOperands(); |
3086 | OriginalValues.reserve(N: NumOpnds); |
3087 | for (unsigned It = 0; It < NumOpnds; ++It) { |
3088 | // Save the current operand. |
3089 | Value *Val = Inst->getOperand(i: It); |
3090 | OriginalValues.push_back(Elt: Val); |
3091 | // Set a dummy one. |
3092 | // We could use OperandSetter here, but that would imply an overhead |
3093 | // that we are not willing to pay. |
3094 | Inst->setOperand(i: It, Val: UndefValue::get(T: Val->getType())); |
3095 | } |
3096 | } |
3097 | |
3098 | /// Restore the original list of uses. |
3099 | void undo() override { |
3100 | LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n" ); |
3101 | for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) |
3102 | Inst->setOperand(i: It, Val: OriginalValues[It]); |
3103 | } |
3104 | }; |
3105 | |
3106 | /// Build a truncate instruction. |
3107 | class TruncBuilder : public TypePromotionAction { |
3108 | Value *Val; |
3109 | |
3110 | public: |
3111 | /// Build a truncate instruction of \p Opnd producing a \p Ty |
3112 | /// result. |
3113 | /// trunc Opnd to Ty. |
3114 | TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { |
3115 | IRBuilder<> Builder(Opnd); |
3116 | Builder.SetCurrentDebugLocation(DebugLoc()); |
3117 | Val = Builder.CreateTrunc(V: Opnd, DestTy: Ty, Name: "promoted" ); |
3118 | LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n" ); |
3119 | } |
3120 | |
3121 | /// Get the built value. |
3122 | Value *getBuiltValue() { return Val; } |
3123 | |
3124 | /// Remove the built instruction. |
3125 | void undo() override { |
3126 | LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n" ); |
3127 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) |
3128 | IVal->eraseFromParent(); |
3129 | } |
3130 | }; |
3131 | |
3132 | /// Build a sign extension instruction. |
3133 | class SExtBuilder : public TypePromotionAction { |
3134 | Value *Val; |
3135 | |
3136 | public: |
3137 | /// Build a sign extension instruction of \p Opnd producing a \p Ty |
3138 | /// result. |
3139 | /// sext Opnd to Ty. |
3140 | SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) |
3141 | : TypePromotionAction(InsertPt) { |
3142 | IRBuilder<> Builder(InsertPt); |
3143 | Val = Builder.CreateSExt(V: Opnd, DestTy: Ty, Name: "promoted" ); |
3144 | LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n" ); |
3145 | } |
3146 | |
3147 | /// Get the built value. |
3148 | Value *getBuiltValue() { return Val; } |
3149 | |
3150 | /// Remove the built instruction. |
3151 | void undo() override { |
3152 | LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n" ); |
3153 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) |
3154 | IVal->eraseFromParent(); |
3155 | } |
3156 | }; |
3157 | |
3158 | /// Build a zero extension instruction. |
3159 | class ZExtBuilder : public TypePromotionAction { |
3160 | Value *Val; |
3161 | |
3162 | public: |
3163 | /// Build a zero extension instruction of \p Opnd producing a \p Ty |
3164 | /// result. |
3165 | /// zext Opnd to Ty. |
3166 | ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) |
3167 | : TypePromotionAction(InsertPt) { |
3168 | IRBuilder<> Builder(InsertPt); |
3169 | Builder.SetCurrentDebugLocation(DebugLoc()); |
3170 | Val = Builder.CreateZExt(V: Opnd, DestTy: Ty, Name: "promoted" ); |
3171 | LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n" ); |
3172 | } |
3173 | |
3174 | /// Get the built value. |
3175 | Value *getBuiltValue() { return Val; } |
3176 | |
3177 | /// Remove the built instruction. |
3178 | void undo() override { |
3179 | LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n" ); |
3180 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) |
3181 | IVal->eraseFromParent(); |
3182 | } |
3183 | }; |
3184 | |
3185 | /// Mutate an instruction to another type. |
3186 | class TypeMutator : public TypePromotionAction { |
3187 | /// Record the original type. |
3188 | Type *OrigTy; |
3189 | |
3190 | public: |
3191 | /// Mutate the type of \p Inst into \p NewTy. |
3192 | TypeMutator(Instruction *Inst, Type *NewTy) |
3193 | : TypePromotionAction(Inst), OrigTy(Inst->getType()) { |
3194 | LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy |
3195 | << "\n" ); |
3196 | Inst->mutateType(Ty: NewTy); |
3197 | } |
3198 | |
3199 | /// Mutate the instruction back to its original type. |
3200 | void undo() override { |
3201 | LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy |
3202 | << "\n" ); |
3203 | Inst->mutateType(Ty: OrigTy); |
3204 | } |
3205 | }; |
3206 | |
3207 | /// Replace the uses of an instruction by another instruction. |
3208 | class UsesReplacer : public TypePromotionAction { |
3209 | /// Helper structure to keep track of the replaced uses. |
3210 | struct InstructionAndIdx { |
3211 | /// The instruction using the instruction. |
3212 | Instruction *Inst; |
3213 | |
3214 | /// The index where this instruction is used for Inst. |
3215 | unsigned Idx; |
3216 | |
3217 | InstructionAndIdx(Instruction *Inst, unsigned Idx) |
3218 | : Inst(Inst), Idx(Idx) {} |
3219 | }; |
3220 | |
3221 | /// Keep track of the original uses (pair Instruction, Index). |
3222 | SmallVector<InstructionAndIdx, 4> OriginalUses; |
3223 | /// Keep track of the debug users. |
3224 | SmallVector<DbgValueInst *, 1> DbgValues; |
3225 | /// And non-instruction debug-users too. |
3226 | SmallVector<DbgVariableRecord *, 1> DbgVariableRecords; |
3227 | |
3228 | /// Keep track of the new value so that we can undo it by replacing |
3229 | /// instances of the new value with the original value. |
3230 | Value *New; |
3231 | |
3232 | using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; |
3233 | |
3234 | public: |
3235 | /// Replace all the use of \p Inst by \p New. |
3236 | UsesReplacer(Instruction *Inst, Value *New) |
3237 | : TypePromotionAction(Inst), New(New) { |
3238 | LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New |
3239 | << "\n" ); |
3240 | // Record the original uses. |
3241 | for (Use &U : Inst->uses()) { |
3242 | Instruction *UserI = cast<Instruction>(Val: U.getUser()); |
3243 | OriginalUses.push_back(Elt: InstructionAndIdx(UserI, U.getOperandNo())); |
3244 | } |
3245 | // Record the debug uses separately. They are not in the instruction's |
3246 | // use list, but they are replaced by RAUW. |
3247 | findDbgValues(DbgValues, V: Inst, DbgVariableRecords: &DbgVariableRecords); |
3248 | |
3249 | // Now, we can replace the uses. |
3250 | Inst->replaceAllUsesWith(V: New); |
3251 | } |
3252 | |
3253 | /// Reassign the original uses of Inst to Inst. |
3254 | void undo() override { |
3255 | LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n" ); |
3256 | for (InstructionAndIdx &Use : OriginalUses) |
3257 | Use.Inst->setOperand(i: Use.Idx, Val: Inst); |
3258 | // RAUW has replaced all original uses with references to the new value, |
3259 | // including the debug uses. Since we are undoing the replacements, |
3260 | // the original debug uses must also be reinstated to maintain the |
3261 | // correctness and utility of debug value instructions. |
3262 | for (auto *DVI : DbgValues) |
3263 | DVI->replaceVariableLocationOp(OldValue: New, NewValue: Inst); |
3264 | // Similar story with DbgVariableRecords, the non-instruction |
3265 | // representation of dbg.values. |
3266 | for (DbgVariableRecord *DVR : DbgVariableRecords) |
3267 | DVR->replaceVariableLocationOp(OldValue: New, NewValue: Inst); |
3268 | } |
3269 | }; |
3270 | |
3271 | /// Remove an instruction from the IR. |
3272 | class InstructionRemover : public TypePromotionAction { |
3273 | /// Original position of the instruction. |
3274 | InsertionHandler Inserter; |
3275 | |
3276 | /// Helper structure to hide all the link to the instruction. In other |
3277 | /// words, this helps to do as if the instruction was removed. |
3278 | OperandsHider Hider; |
3279 | |
3280 | /// Keep track of the uses replaced, if any. |
3281 | UsesReplacer *Replacer = nullptr; |
3282 | |
3283 | /// Keep track of instructions removed. |
3284 | SetOfInstrs &RemovedInsts; |
3285 | |
3286 | public: |
3287 | /// Remove all reference of \p Inst and optionally replace all its |
3288 | /// uses with New. |
3289 | /// \p RemovedInsts Keep track of the instructions removed by this Action. |
3290 | /// \pre If !Inst->use_empty(), then New != nullptr |
3291 | InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, |
3292 | Value *New = nullptr) |
3293 | : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), |
3294 | RemovedInsts(RemovedInsts) { |
3295 | if (New) |
3296 | Replacer = new UsesReplacer(Inst, New); |
3297 | LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n" ); |
3298 | RemovedInsts.insert(Ptr: Inst); |
3299 | /// The instructions removed here will be freed after completing |
3300 | /// optimizeBlock() for all blocks as we need to keep track of the |
3301 | /// removed instructions during promotion. |
3302 | Inst->removeFromParent(); |
3303 | } |
3304 | |
3305 | ~InstructionRemover() override { delete Replacer; } |
3306 | |
3307 | InstructionRemover &operator=(const InstructionRemover &other) = delete; |
3308 | InstructionRemover(const InstructionRemover &other) = delete; |
3309 | |
3310 | /// Resurrect the instruction and reassign it to the proper uses if |
3311 | /// new value was provided when build this action. |
3312 | void undo() override { |
3313 | LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n" ); |
3314 | Inserter.insert(Inst); |
3315 | if (Replacer) |
3316 | Replacer->undo(); |
3317 | Hider.undo(); |
3318 | RemovedInsts.erase(Ptr: Inst); |
3319 | } |
3320 | }; |
3321 | |
3322 | public: |
3323 | /// Restoration point. |
3324 | /// The restoration point is a pointer to an action instead of an iterator |
3325 | /// because the iterator may be invalidated but not the pointer. |
3326 | using ConstRestorationPt = const TypePromotionAction *; |
3327 | |
3328 | TypePromotionTransaction(SetOfInstrs &RemovedInsts) |
3329 | : RemovedInsts(RemovedInsts) {} |
3330 | |
3331 | /// Advocate every changes made in that transaction. Return true if any change |
3332 | /// happen. |
3333 | bool commit(); |
3334 | |
3335 | /// Undo all the changes made after the given point. |
3336 | void rollback(ConstRestorationPt Point); |
3337 | |
3338 | /// Get the current restoration point. |
3339 | ConstRestorationPt getRestorationPoint() const; |
3340 | |
3341 | /// \name API for IR modification with state keeping to support rollback. |
3342 | /// @{ |
3343 | /// Same as Instruction::setOperand. |
3344 | void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); |
3345 | |
3346 | /// Same as Instruction::eraseFromParent. |
3347 | void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); |
3348 | |
3349 | /// Same as Value::replaceAllUsesWith. |
3350 | void replaceAllUsesWith(Instruction *Inst, Value *New); |
3351 | |
3352 | /// Same as Value::mutateType. |
3353 | void mutateType(Instruction *Inst, Type *NewTy); |
3354 | |
3355 | /// Same as IRBuilder::createTrunc. |
3356 | Value *createTrunc(Instruction *Opnd, Type *Ty); |
3357 | |
3358 | /// Same as IRBuilder::createSExt. |
3359 | Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); |
3360 | |
3361 | /// Same as IRBuilder::createZExt. |
3362 | Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); |
3363 | |
3364 | private: |
3365 | /// The ordered list of actions made so far. |
3366 | SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; |
3367 | |
3368 | using CommitPt = |
3369 | SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; |
3370 | |
3371 | SetOfInstrs &RemovedInsts; |
3372 | }; |
3373 | |
3374 | } // end anonymous namespace |
3375 | |
3376 | void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, |
3377 | Value *NewVal) { |
3378 | Actions.push_back(Elt: std::make_unique<TypePromotionTransaction::OperandSetter>( |
3379 | args&: Inst, args&: Idx, args&: NewVal)); |
3380 | } |
3381 | |
3382 | void TypePromotionTransaction::eraseInstruction(Instruction *Inst, |
3383 | Value *NewVal) { |
3384 | Actions.push_back( |
3385 | Elt: std::make_unique<TypePromotionTransaction::InstructionRemover>( |
3386 | args&: Inst, args&: RemovedInsts, args&: NewVal)); |
3387 | } |
3388 | |
3389 | void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, |
3390 | Value *New) { |
3391 | Actions.push_back( |
3392 | Elt: std::make_unique<TypePromotionTransaction::UsesReplacer>(args&: Inst, args&: New)); |
3393 | } |
3394 | |
3395 | void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { |
3396 | Actions.push_back( |
3397 | Elt: std::make_unique<TypePromotionTransaction::TypeMutator>(args&: Inst, args&: NewTy)); |
3398 | } |
3399 | |
3400 | Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) { |
3401 | std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); |
3402 | Value *Val = Ptr->getBuiltValue(); |
3403 | Actions.push_back(Elt: std::move(Ptr)); |
3404 | return Val; |
3405 | } |
3406 | |
3407 | Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd, |
3408 | Type *Ty) { |
3409 | std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); |
3410 | Value *Val = Ptr->getBuiltValue(); |
3411 | Actions.push_back(Elt: std::move(Ptr)); |
3412 | return Val; |
3413 | } |
3414 | |
3415 | Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd, |
3416 | Type *Ty) { |
3417 | std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); |
3418 | Value *Val = Ptr->getBuiltValue(); |
3419 | Actions.push_back(Elt: std::move(Ptr)); |
3420 | return Val; |
3421 | } |
3422 | |
3423 | TypePromotionTransaction::ConstRestorationPt |
3424 | TypePromotionTransaction::getRestorationPoint() const { |
3425 | return !Actions.empty() ? Actions.back().get() : nullptr; |
3426 | } |
3427 | |
3428 | bool TypePromotionTransaction::commit() { |
3429 | for (std::unique_ptr<TypePromotionAction> &Action : Actions) |
3430 | Action->commit(); |
3431 | bool Modified = !Actions.empty(); |
3432 | Actions.clear(); |
3433 | return Modified; |
3434 | } |
3435 | |
3436 | void TypePromotionTransaction::rollback( |
3437 | TypePromotionTransaction::ConstRestorationPt Point) { |
3438 | while (!Actions.empty() && Point != Actions.back().get()) { |
3439 | std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); |
3440 | Curr->undo(); |
3441 | } |
3442 | } |
3443 | |
3444 | namespace { |
3445 | |
3446 | /// A helper class for matching addressing modes. |
3447 | /// |
3448 | /// This encapsulates the logic for matching the target-legal addressing modes. |
3449 | class AddressingModeMatcher { |
3450 | SmallVectorImpl<Instruction *> &AddrModeInsts; |
3451 | const TargetLowering &TLI; |
3452 | const TargetRegisterInfo &TRI; |
3453 | const DataLayout &DL; |
3454 | const LoopInfo &LI; |
3455 | const std::function<const DominatorTree &()> getDTFn; |
3456 | |
3457 | /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and |
3458 | /// the memory instruction that we're computing this address for. |
3459 | Type *AccessTy; |
3460 | unsigned AddrSpace; |
3461 | Instruction *MemoryInst; |
3462 | |
3463 | /// This is the addressing mode that we're building up. This is |
3464 | /// part of the return value of this addressing mode matching stuff. |
3465 | ExtAddrMode &AddrMode; |
3466 | |
3467 | /// The instructions inserted by other CodeGenPrepare optimizations. |
3468 | const SetOfInstrs &InsertedInsts; |
3469 | |
3470 | /// A map from the instructions to their type before promotion. |
3471 | InstrToOrigTy &PromotedInsts; |
3472 | |
3473 | /// The ongoing transaction where every action should be registered. |
3474 | TypePromotionTransaction &TPT; |
3475 | |
3476 | // A GEP which has too large offset to be folded into the addressing mode. |
3477 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; |
3478 | |
3479 | /// This is set to true when we should not do profitability checks. |
3480 | /// When true, IsProfitableToFoldIntoAddressingMode always returns true. |
3481 | bool IgnoreProfitability; |
3482 | |
3483 | /// True if we are optimizing for size. |
3484 | bool OptSize = false; |
3485 | |
3486 | ProfileSummaryInfo *PSI; |
3487 | BlockFrequencyInfo *BFI; |
3488 | |
3489 | AddressingModeMatcher( |
3490 | SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, |
3491 | const TargetRegisterInfo &TRI, const LoopInfo &LI, |
3492 | const std::function<const DominatorTree &()> getDTFn, Type *AT, |
3493 | unsigned AS, Instruction *MI, ExtAddrMode &AM, |
3494 | const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, |
3495 | TypePromotionTransaction &TPT, |
3496 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, |
3497 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) |
3498 | : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), |
3499 | DL(MI->getDataLayout()), LI(LI), getDTFn(getDTFn), |
3500 | AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM), |
3501 | InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT), |
3502 | LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) { |
3503 | IgnoreProfitability = false; |
3504 | } |
3505 | |
3506 | public: |
3507 | /// Find the maximal addressing mode that a load/store of V can fold, |
3508 | /// give an access type of AccessTy. This returns a list of involved |
3509 | /// instructions in AddrModeInsts. |
3510 | /// \p InsertedInsts The instructions inserted by other CodeGenPrepare |
3511 | /// optimizations. |
3512 | /// \p PromotedInsts maps the instructions to their type before promotion. |
3513 | /// \p The ongoing transaction where every action should be registered. |
3514 | static ExtAddrMode |
3515 | Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, |
3516 | SmallVectorImpl<Instruction *> &AddrModeInsts, |
3517 | const TargetLowering &TLI, const LoopInfo &LI, |
3518 | const std::function<const DominatorTree &()> getDTFn, |
3519 | const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts, |
3520 | InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, |
3521 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, |
3522 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { |
3523 | ExtAddrMode Result; |
3524 | |
3525 | bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn, |
3526 | AccessTy, AS, MemoryInst, Result, |
3527 | InsertedInsts, PromotedInsts, TPT, |
3528 | LargeOffsetGEP, OptSize, PSI, BFI) |
3529 | .matchAddr(Addr: V, Depth: 0); |
3530 | (void)Success; |
3531 | assert(Success && "Couldn't select *anything*?" ); |
3532 | return Result; |
3533 | } |
3534 | |
3535 | private: |
3536 | bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); |
3537 | bool matchAddr(Value *Addr, unsigned Depth); |
3538 | bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, |
3539 | bool *MovedAway = nullptr); |
3540 | bool isProfitableToFoldIntoAddressingMode(Instruction *I, |
3541 | ExtAddrMode &AMBefore, |
3542 | ExtAddrMode &AMAfter); |
3543 | bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); |
3544 | bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, |
3545 | Value *PromotedOperand) const; |
3546 | }; |
3547 | |
3548 | class PhiNodeSet; |
3549 | |
3550 | /// An iterator for PhiNodeSet. |
3551 | class PhiNodeSetIterator { |
3552 | PhiNodeSet *const Set; |
3553 | size_t CurrentIndex = 0; |
3554 | |
3555 | public: |
3556 | /// The constructor. Start should point to either a valid element, or be equal |
3557 | /// to the size of the underlying SmallVector of the PhiNodeSet. |
3558 | PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start); |
3559 | PHINode *operator*() const; |
3560 | PhiNodeSetIterator &operator++(); |
3561 | bool operator==(const PhiNodeSetIterator &RHS) const; |
3562 | bool operator!=(const PhiNodeSetIterator &RHS) const; |
3563 | }; |
3564 | |
3565 | /// Keeps a set of PHINodes. |
3566 | /// |
3567 | /// This is a minimal set implementation for a specific use case: |
3568 | /// It is very fast when there are very few elements, but also provides good |
3569 | /// performance when there are many. It is similar to SmallPtrSet, but also |
3570 | /// provides iteration by insertion order, which is deterministic and stable |
3571 | /// across runs. It is also similar to SmallSetVector, but provides removing |
3572 | /// elements in O(1) time. This is achieved by not actually removing the element |
3573 | /// from the underlying vector, so comes at the cost of using more memory, but |
3574 | /// that is fine, since PhiNodeSets are used as short lived objects. |
3575 | class PhiNodeSet { |
3576 | friend class PhiNodeSetIterator; |
3577 | |
3578 | using MapType = SmallDenseMap<PHINode *, size_t, 32>; |
3579 | using iterator = PhiNodeSetIterator; |
3580 | |
3581 | /// Keeps the elements in the order of their insertion in the underlying |
3582 | /// vector. To achieve constant time removal, it never deletes any element. |
3583 | SmallVector<PHINode *, 32> NodeList; |
3584 | |
3585 | /// Keeps the elements in the underlying set implementation. This (and not the |
3586 | /// NodeList defined above) is the source of truth on whether an element |
3587 | /// is actually in the collection. |
3588 | MapType NodeMap; |
3589 | |
3590 | /// Points to the first valid (not deleted) element when the set is not empty |
3591 | /// and the value is not zero. Equals to the size of the underlying vector |
3592 | /// when the set is empty. When the value is 0, as in the beginning, the |
3593 | /// first element may or may not be valid. |
3594 | size_t FirstValidElement = 0; |
3595 | |
3596 | public: |
3597 | /// Inserts a new element to the collection. |
3598 | /// \returns true if the element is actually added, i.e. was not in the |
3599 | /// collection before the operation. |
3600 | bool insert(PHINode *Ptr) { |
3601 | if (NodeMap.insert(KV: std::make_pair(x&: Ptr, y: NodeList.size())).second) { |
3602 | NodeList.push_back(Elt: Ptr); |
3603 | return true; |
3604 | } |
3605 | return false; |
3606 | } |
3607 | |
3608 | /// Removes the element from the collection. |
3609 | /// \returns whether the element is actually removed, i.e. was in the |
3610 | /// collection before the operation. |
3611 | bool erase(PHINode *Ptr) { |
3612 | if (NodeMap.erase(Val: Ptr)) { |
3613 | SkipRemovedElements(CurrentIndex&: FirstValidElement); |
3614 | return true; |
3615 | } |
3616 | return false; |
3617 | } |
3618 | |
3619 | /// Removes all elements and clears the collection. |
3620 | void clear() { |
3621 | NodeMap.clear(); |
3622 | NodeList.clear(); |
3623 | FirstValidElement = 0; |
3624 | } |
3625 | |
3626 | /// \returns an iterator that will iterate the elements in the order of |
3627 | /// insertion. |
3628 | iterator begin() { |
3629 | if (FirstValidElement == 0) |
3630 | SkipRemovedElements(CurrentIndex&: FirstValidElement); |
3631 | return PhiNodeSetIterator(this, FirstValidElement); |
3632 | } |
3633 | |
3634 | /// \returns an iterator that points to the end of the collection. |
3635 | iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } |
3636 | |
3637 | /// Returns the number of elements in the collection. |
3638 | size_t size() const { return NodeMap.size(); } |
3639 | |
3640 | /// \returns 1 if the given element is in the collection, and 0 if otherwise. |
3641 | size_t count(PHINode *Ptr) const { return NodeMap.count(Val: Ptr); } |
3642 | |
3643 | private: |
3644 | /// Updates the CurrentIndex so that it will point to a valid element. |
3645 | /// |
3646 | /// If the element of NodeList at CurrentIndex is valid, it does not |
3647 | /// change it. If there are no more valid elements, it updates CurrentIndex |
3648 | /// to point to the end of the NodeList. |
3649 | void SkipRemovedElements(size_t &CurrentIndex) { |
3650 | while (CurrentIndex < NodeList.size()) { |
3651 | auto it = NodeMap.find(Val: NodeList[CurrentIndex]); |
3652 | // If the element has been deleted and added again later, NodeMap will |
3653 | // point to a different index, so CurrentIndex will still be invalid. |
3654 | if (it != NodeMap.end() && it->second == CurrentIndex) |
3655 | break; |
3656 | ++CurrentIndex; |
3657 | } |
3658 | } |
3659 | }; |
3660 | |
3661 | PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) |
3662 | : Set(Set), CurrentIndex(Start) {} |
3663 | |
3664 | PHINode *PhiNodeSetIterator::operator*() const { |
3665 | assert(CurrentIndex < Set->NodeList.size() && |
3666 | "PhiNodeSet access out of range" ); |
3667 | return Set->NodeList[CurrentIndex]; |
3668 | } |
3669 | |
3670 | PhiNodeSetIterator &PhiNodeSetIterator::operator++() { |
3671 | assert(CurrentIndex < Set->NodeList.size() && |
3672 | "PhiNodeSet access out of range" ); |
3673 | ++CurrentIndex; |
3674 | Set->SkipRemovedElements(CurrentIndex); |
3675 | return *this; |
3676 | } |
3677 | |
3678 | bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { |
3679 | return CurrentIndex == RHS.CurrentIndex; |
3680 | } |
3681 | |
3682 | bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { |
3683 | return !((*this) == RHS); |
3684 | } |
3685 | |
3686 | /// Keep track of simplification of Phi nodes. |
3687 | /// Accept the set of all phi nodes and erase phi node from this set |
3688 | /// if it is simplified. |
3689 | class SimplificationTracker { |
3690 | DenseMap<Value *, Value *> Storage; |
3691 | const SimplifyQuery &SQ; |
3692 | // Tracks newly created Phi nodes. The elements are iterated by insertion |
3693 | // order. |
3694 | PhiNodeSet AllPhiNodes; |
3695 | // Tracks newly created Select nodes. |
3696 | SmallPtrSet<SelectInst *, 32> AllSelectNodes; |
3697 | |
3698 | public: |
3699 | SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {} |
3700 | |
3701 | Value *Get(Value *V) { |
3702 | do { |
3703 | auto SV = Storage.find(Val: V); |
3704 | if (SV == Storage.end()) |
3705 | return V; |
3706 | V = SV->second; |
3707 | } while (true); |
3708 | } |
3709 | |
3710 | Value *Simplify(Value *Val) { |
3711 | SmallVector<Value *, 32> WorkList; |
3712 | SmallPtrSet<Value *, 32> Visited; |
3713 | WorkList.push_back(Elt: Val); |
3714 | while (!WorkList.empty()) { |
3715 | auto *P = WorkList.pop_back_val(); |
3716 | if (!Visited.insert(Ptr: P).second) |
3717 | continue; |
3718 | if (auto *PI = dyn_cast<Instruction>(Val: P)) |
3719 | if (Value *V = simplifyInstruction(I: cast<Instruction>(Val: PI), Q: SQ)) { |
3720 | for (auto *U : PI->users()) |
3721 | WorkList.push_back(Elt: cast<Value>(Val: U)); |
3722 | Put(From: PI, To: V); |
3723 | PI->replaceAllUsesWith(V); |
3724 | if (auto *PHI = dyn_cast<PHINode>(Val: PI)) |
3725 | AllPhiNodes.erase(Ptr: PHI); |
3726 | if (auto *Select = dyn_cast<SelectInst>(Val: PI)) |
3727 | AllSelectNodes.erase(Ptr: Select); |
3728 | PI->eraseFromParent(); |
3729 | } |
3730 | } |
3731 | return Get(V: Val); |
3732 | } |
3733 | |
3734 | void Put(Value *From, Value *To) { Storage.insert(KV: {From, To}); } |
3735 | |
3736 | void ReplacePhi(PHINode *From, PHINode *To) { |
3737 | Value *OldReplacement = Get(V: From); |
3738 | while (OldReplacement != From) { |
3739 | From = To; |
3740 | To = dyn_cast<PHINode>(Val: OldReplacement); |
3741 | OldReplacement = Get(V: From); |
3742 | } |
3743 | assert(To && Get(To) == To && "Replacement PHI node is already replaced." ); |
3744 | Put(From, To); |
3745 | From->replaceAllUsesWith(V: To); |
3746 | AllPhiNodes.erase(Ptr: From); |
3747 | From->eraseFromParent(); |
3748 | } |
3749 | |
3750 | PhiNodeSet &newPhiNodes() { return AllPhiNodes; } |
3751 | |
3752 | void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(Ptr: PN); } |
3753 | |
3754 | void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(Ptr: SI); } |
3755 | |
3756 | unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } |
3757 | |
3758 | unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } |
3759 | |
3760 | void destroyNewNodes(Type *CommonType) { |
3761 | // For safe erasing, replace the uses with dummy value first. |
3762 | auto *Dummy = PoisonValue::get(T: CommonType); |
3763 | for (auto *I : AllPhiNodes) { |
3764 | I->replaceAllUsesWith(V: Dummy); |
3765 | I->eraseFromParent(); |
3766 | } |
3767 | AllPhiNodes.clear(); |
3768 | for (auto *I : AllSelectNodes) { |
3769 | I->replaceAllUsesWith(V: Dummy); |
3770 | I->eraseFromParent(); |
3771 | } |
3772 | AllSelectNodes.clear(); |
3773 | } |
3774 | }; |
3775 | |
3776 | /// A helper class for combining addressing modes. |
3777 | class AddressingModeCombiner { |
3778 | typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; |
3779 | typedef std::pair<PHINode *, PHINode *> PHIPair; |
3780 | |
3781 | private: |
3782 | /// The addressing modes we've collected. |
3783 | SmallVector<ExtAddrMode, 16> AddrModes; |
3784 | |
3785 | /// The field in which the AddrModes differ, when we have more than one. |
3786 | ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; |
3787 | |
3788 | /// Are the AddrModes that we have all just equal to their original values? |
3789 | bool AllAddrModesTrivial = true; |
3790 | |
3791 | /// Common Type for all different fields in addressing modes. |
3792 | Type *CommonType = nullptr; |
3793 | |
3794 | /// SimplifyQuery for simplifyInstruction utility. |
3795 | const SimplifyQuery &SQ; |
3796 | |
3797 | /// Original Address. |
3798 | Value *Original; |
3799 | |
3800 | /// Common value among addresses |
3801 | Value *CommonValue = nullptr; |
3802 | |
3803 | public: |
3804 | AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) |
3805 | : SQ(_SQ), Original(OriginalValue) {} |
3806 | |
3807 | ~AddressingModeCombiner() { eraseCommonValueIfDead(); } |
3808 | |
3809 | /// Get the combined AddrMode |
3810 | const ExtAddrMode &getAddrMode() const { return AddrModes[0]; } |
3811 | |
3812 | /// Add a new AddrMode if it's compatible with the AddrModes we already |
3813 | /// have. |
3814 | /// \return True iff we succeeded in doing so. |
3815 | bool addNewAddrMode(ExtAddrMode &NewAddrMode) { |
3816 | // Take note of if we have any non-trivial AddrModes, as we need to detect |
3817 | // when all AddrModes are trivial as then we would introduce a phi or select |
3818 | // which just duplicates what's already there. |
3819 | AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); |
3820 | |
3821 | // If this is the first addrmode then everything is fine. |
3822 | if (AddrModes.empty()) { |
3823 | AddrModes.emplace_back(Args&: NewAddrMode); |
3824 | return true; |
3825 | } |
3826 | |
3827 | // Figure out how different this is from the other address modes, which we |
3828 | // can do just by comparing against the first one given that we only care |
3829 | // about the cumulative difference. |
3830 | ExtAddrMode::FieldName ThisDifferentField = |
3831 | AddrModes[0].compare(other: NewAddrMode); |
3832 | if (DifferentField == ExtAddrMode::NoField) |
3833 | DifferentField = ThisDifferentField; |
3834 | else if (DifferentField != ThisDifferentField) |
3835 | DifferentField = ExtAddrMode::MultipleFields; |
3836 | |
3837 | // If NewAddrMode differs in more than one dimension we cannot handle it. |
3838 | bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; |
3839 | |
3840 | // If Scale Field is different then we reject. |
3841 | CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; |
3842 | |
3843 | // We also must reject the case when base offset is different and |
3844 | // scale reg is not null, we cannot handle this case due to merge of |
3845 | // different offsets will be used as ScaleReg. |
3846 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || |
3847 | !NewAddrMode.ScaledReg); |
3848 | |
3849 | // We also must reject the case when GV is different and BaseReg installed |
3850 | // due to we want to use base reg as a merge of GV values. |
3851 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || |
3852 | !NewAddrMode.HasBaseReg); |
3853 | |
3854 | // Even if NewAddMode is the same we still need to collect it due to |
3855 | // original value is different. And later we will need all original values |
3856 | // as anchors during finding the common Phi node. |
3857 | if (CanHandle) |
3858 | AddrModes.emplace_back(Args&: NewAddrMode); |
3859 | else |
3860 | AddrModes.clear(); |
3861 | |
3862 | return CanHandle; |
3863 | } |
3864 | |
3865 | /// Combine the addressing modes we've collected into a single |
3866 | /// addressing mode. |
3867 | /// \return True iff we successfully combined them or we only had one so |
3868 | /// didn't need to combine them anyway. |
3869 | bool combineAddrModes() { |
3870 | // If we have no AddrModes then they can't be combined. |
3871 | if (AddrModes.size() == 0) |
3872 | return false; |
3873 | |
3874 | // A single AddrMode can trivially be combined. |
3875 | if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) |
3876 | return true; |
3877 | |
3878 | // If the AddrModes we collected are all just equal to the value they are |
3879 | // derived from then combining them wouldn't do anything useful. |
3880 | if (AllAddrModesTrivial) |
3881 | return false; |
3882 | |
3883 | if (!addrModeCombiningAllowed()) |
3884 | return false; |
3885 | |
3886 | // Build a map between <original value, basic block where we saw it> to |
3887 | // value of base register. |
3888 | // Bail out if there is no common type. |
3889 | FoldAddrToValueMapping Map; |
3890 | if (!initializeMap(Map)) |
3891 | return false; |
3892 | |
3893 | CommonValue = findCommon(Map); |
3894 | if (CommonValue) |
3895 | AddrModes[0].SetCombinedField(Field: DifferentField, V: CommonValue, AddrModes); |
3896 | return CommonValue != nullptr; |
3897 | } |
3898 | |
3899 | private: |
3900 | /// `CommonValue` may be a placeholder inserted by us. |
3901 | /// If the placeholder is not used, we should remove this dead instruction. |
3902 | void eraseCommonValueIfDead() { |
3903 | if (CommonValue && CommonValue->getNumUses() == 0) |
3904 | if (Instruction *CommonInst = dyn_cast<Instruction>(Val: CommonValue)) |
3905 | CommonInst->eraseFromParent(); |
3906 | } |
3907 | |
3908 | /// Initialize Map with anchor values. For address seen |
3909 | /// we set the value of different field saw in this address. |
3910 | /// At the same time we find a common type for different field we will |
3911 | /// use to create new Phi/Select nodes. Keep it in CommonType field. |
3912 | /// Return false if there is no common type found. |
3913 | bool initializeMap(FoldAddrToValueMapping &Map) { |
3914 | // Keep track of keys where the value is null. We will need to replace it |
3915 | // with constant null when we know the common type. |
3916 | SmallVector<Value *, 2> NullValue; |
3917 | Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); |
3918 | for (auto &AM : AddrModes) { |
3919 | Value *DV = AM.GetFieldAsValue(Field: DifferentField, IntPtrTy); |
3920 | if (DV) { |
3921 | auto *Type = DV->getType(); |
3922 | if (CommonType && CommonType != Type) |
3923 | return false; |
3924 | CommonType = Type; |
3925 | Map[AM.OriginalValue] = DV; |
3926 | } else { |
3927 | NullValue.push_back(Elt: AM.OriginalValue); |
3928 | } |
3929 | } |
3930 | assert(CommonType && "At least one non-null value must be!" ); |
3931 | for (auto *V : NullValue) |
3932 | Map[V] = Constant::getNullValue(Ty: CommonType); |
3933 | return true; |
3934 | } |
3935 | |
3936 | /// We have mapping between value A and other value B where B was a field in |
3937 | /// addressing mode represented by A. Also we have an original value C |
3938 | /// representing an address we start with. Traversing from C through phi and |
3939 | /// selects we ended up with A's in a map. This utility function tries to find |
3940 | /// a value V which is a field in addressing mode C and traversing through phi |
3941 | /// nodes and selects we will end up in corresponded values B in a map. |
3942 | /// The utility will create a new Phi/Selects if needed. |
3943 | // The simple example looks as follows: |
3944 | // BB1: |
3945 | // p1 = b1 + 40 |
3946 | // br cond BB2, BB3 |
3947 | // BB2: |
3948 | // p2 = b2 + 40 |
3949 | // br BB3 |
3950 | // BB3: |
3951 | // p = phi [p1, BB1], [p2, BB2] |
3952 | // v = load p |
3953 | // Map is |
3954 | // p1 -> b1 |
3955 | // p2 -> b2 |
3956 | // Request is |
3957 | // p -> ? |
3958 | // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. |
3959 | Value *findCommon(FoldAddrToValueMapping &Map) { |
3960 | // Tracks the simplification of newly created phi nodes. The reason we use |
3961 | // this mapping is because we will add new created Phi nodes in AddrToBase. |
3962 | // Simplification of Phi nodes is recursive, so some Phi node may |
3963 | // be simplified after we added it to AddrToBase. In reality this |
3964 | // simplification is possible only if original phi/selects were not |
3965 | // simplified yet. |
3966 | // Using this mapping we can find the current value in AddrToBase. |
3967 | SimplificationTracker ST(SQ); |
3968 | |
3969 | // First step, DFS to create PHI nodes for all intermediate blocks. |
3970 | // Also fill traverse order for the second step. |
3971 | SmallVector<Value *, 32> TraverseOrder; |
3972 | InsertPlaceholders(Map, TraverseOrder, ST); |
3973 | |
3974 | // Second Step, fill new nodes by merged values and simplify if possible. |
3975 | FillPlaceholders(Map, TraverseOrder, ST); |
3976 | |
3977 | if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { |
3978 | ST.destroyNewNodes(CommonType); |
3979 | return nullptr; |
3980 | } |
3981 | |
3982 | // Now we'd like to match New Phi nodes to existed ones. |
3983 | unsigned PhiNotMatchedCount = 0; |
3984 | if (!MatchPhiSet(ST, AllowNewPhiNodes: AddrSinkNewPhis, PhiNotMatchedCount)) { |
3985 | ST.destroyNewNodes(CommonType); |
3986 | return nullptr; |
3987 | } |
3988 | |
3989 | auto *Result = ST.Get(V: Map.find(Val: Original)->second); |
3990 | if (Result) { |
3991 | NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; |
3992 | NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); |
3993 | } |
3994 | return Result; |
3995 | } |
3996 | |
3997 | /// Try to match PHI node to Candidate. |
3998 | /// Matcher tracks the matched Phi nodes. |
3999 | bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, |
4000 | SmallSetVector<PHIPair, 8> &Matcher, |
4001 | PhiNodeSet &PhiNodesToMatch) { |
4002 | SmallVector<PHIPair, 8> WorkList; |
4003 | Matcher.insert(X: {PHI, Candidate}); |
4004 | SmallSet<PHINode *, 8> MatchedPHIs; |
4005 | MatchedPHIs.insert(Ptr: PHI); |
4006 | WorkList.push_back(Elt: {PHI, Candidate}); |
4007 | SmallSet<PHIPair, 8> Visited; |
4008 | while (!WorkList.empty()) { |
4009 | auto Item = WorkList.pop_back_val(); |
4010 | if (!Visited.insert(V: Item).second) |
4011 | continue; |
4012 | // We iterate over all incoming values to Phi to compare them. |
4013 | // If values are different and both of them Phi and the first one is a |
4014 | // Phi we added (subject to match) and both of them is in the same basic |
4015 | // block then we can match our pair if values match. So we state that |
4016 | // these values match and add it to work list to verify that. |
4017 | for (auto *B : Item.first->blocks()) { |
4018 | Value *FirstValue = Item.first->getIncomingValueForBlock(BB: B); |
4019 | Value *SecondValue = Item.second->getIncomingValueForBlock(BB: B); |
4020 | if (FirstValue == SecondValue) |
4021 | continue; |
4022 | |
4023 | PHINode *FirstPhi = dyn_cast<PHINode>(Val: FirstValue); |
4024 | PHINode *SecondPhi = dyn_cast<PHINode>(Val: SecondValue); |
4025 | |
4026 | // One of them is not Phi or |
4027 | // The first one is not Phi node from the set we'd like to match or |
4028 | // Phi nodes from different basic blocks then |
4029 | // we will not be able to match. |
4030 | if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(Ptr: FirstPhi) || |
4031 | FirstPhi->getParent() != SecondPhi->getParent()) |
4032 | return false; |
4033 | |
4034 | // If we already matched them then continue. |
4035 | if (Matcher.count(key: {FirstPhi, SecondPhi})) |
4036 | continue; |
4037 | // So the values are different and does not match. So we need them to |
4038 | // match. (But we register no more than one match per PHI node, so that |
4039 | // we won't later try to replace them twice.) |
4040 | if (MatchedPHIs.insert(Ptr: FirstPhi).second) |
4041 | Matcher.insert(X: {FirstPhi, SecondPhi}); |
4042 | // But me must check it. |
4043 | WorkList.push_back(Elt: {FirstPhi, SecondPhi}); |
4044 | } |
4045 | } |
4046 | return true; |
4047 | } |
4048 | |
4049 | /// For the given set of PHI nodes (in the SimplificationTracker) try |
4050 | /// to find their equivalents. |
4051 | /// Returns false if this matching fails and creation of new Phi is disabled. |
4052 | bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, |
4053 | unsigned &PhiNotMatchedCount) { |
4054 | // Matched and PhiNodesToMatch iterate their elements in a deterministic |
4055 | // order, so the replacements (ReplacePhi) are also done in a deterministic |
4056 | // order. |
4057 | SmallSetVector<PHIPair, 8> Matched; |
4058 | SmallPtrSet<PHINode *, 8> WillNotMatch; |
4059 | PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); |
4060 | while (PhiNodesToMatch.size()) { |
4061 | PHINode *PHI = *PhiNodesToMatch.begin(); |
4062 | |
4063 | // Add us, if no Phi nodes in the basic block we do not match. |
4064 | WillNotMatch.clear(); |
4065 | WillNotMatch.insert(Ptr: PHI); |
4066 | |
4067 | // Traverse all Phis until we found equivalent or fail to do that. |
4068 | bool IsMatched = false; |
4069 | for (auto &P : PHI->getParent()->phis()) { |
4070 | // Skip new Phi nodes. |
4071 | if (PhiNodesToMatch.count(Ptr: &P)) |
4072 | continue; |
4073 | if ((IsMatched = MatchPhiNode(PHI, Candidate: &P, Matcher&: Matched, PhiNodesToMatch))) |
4074 | break; |
4075 | // If it does not match, collect all Phi nodes from matcher. |
4076 | // if we end up with no match, them all these Phi nodes will not match |
4077 | // later. |
4078 | for (auto M : Matched) |
4079 | WillNotMatch.insert(Ptr: M.first); |
4080 | Matched.clear(); |
4081 | } |
4082 | if (IsMatched) { |
4083 | // Replace all matched values and erase them. |
4084 | for (auto MV : Matched) |
4085 | ST.ReplacePhi(From: MV.first, To: MV.second); |
4086 | Matched.clear(); |
4087 | continue; |
4088 | } |
4089 | // If we are not allowed to create new nodes then bail out. |
4090 | if (!AllowNewPhiNodes) |
4091 | return false; |
4092 | // Just remove all seen values in matcher. They will not match anything. |
4093 | PhiNotMatchedCount += WillNotMatch.size(); |
4094 | for (auto *P : WillNotMatch) |
4095 | PhiNodesToMatch.erase(Ptr: P); |
4096 | } |
4097 | return true; |
4098 | } |
4099 | /// Fill the placeholders with values from predecessors and simplify them. |
4100 | void FillPlaceholders(FoldAddrToValueMapping &Map, |
4101 | SmallVectorImpl<Value *> &TraverseOrder, |
4102 | SimplificationTracker &ST) { |
4103 | while (!TraverseOrder.empty()) { |
4104 | Value *Current = TraverseOrder.pop_back_val(); |
4105 | assert(Map.contains(Current) && "No node to fill!!!" ); |
4106 | Value *V = Map[Current]; |
4107 | |
4108 | if (SelectInst *Select = dyn_cast<SelectInst>(Val: V)) { |
4109 | // CurrentValue also must be Select. |
4110 | auto *CurrentSelect = cast<SelectInst>(Val: Current); |
4111 | auto *TrueValue = CurrentSelect->getTrueValue(); |
4112 | assert(Map.contains(TrueValue) && "No True Value!" ); |
4113 | Select->setTrueValue(ST.Get(V: Map[TrueValue])); |
4114 | auto *FalseValue = CurrentSelect->getFalseValue(); |
4115 | assert(Map.contains(FalseValue) && "No False Value!" ); |
4116 | Select->setFalseValue(ST.Get(V: Map[FalseValue])); |
4117 | } else { |
4118 | // Must be a Phi node then. |
4119 | auto *PHI = cast<PHINode>(Val: V); |
4120 | // Fill the Phi node with values from predecessors. |
4121 | for (auto *B : predecessors(BB: PHI->getParent())) { |
4122 | Value *PV = cast<PHINode>(Val: Current)->getIncomingValueForBlock(BB: B); |
4123 | assert(Map.contains(PV) && "No predecessor Value!" ); |
4124 | PHI->addIncoming(V: ST.Get(V: Map[PV]), BB: B); |
4125 | } |
4126 | } |
4127 | Map[Current] = ST.Simplify(Val: V); |
4128 | } |
4129 | } |
4130 | |
4131 | /// Starting from original value recursively iterates over def-use chain up to |
4132 | /// known ending values represented in a map. For each traversed phi/select |
4133 | /// inserts a placeholder Phi or Select. |
4134 | /// Reports all new created Phi/Select nodes by adding them to set. |
4135 | /// Also reports and order in what values have been traversed. |
4136 | void InsertPlaceholders(FoldAddrToValueMapping &Map, |
4137 | SmallVectorImpl<Value *> &TraverseOrder, |
4138 | SimplificationTracker &ST) { |
4139 | SmallVector<Value *, 32> Worklist; |
4140 | assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) && |
4141 | "Address must be a Phi or Select node" ); |
4142 | auto *Dummy = PoisonValue::get(T: CommonType); |
4143 | Worklist.push_back(Elt: Original); |
4144 | while (!Worklist.empty()) { |
4145 | Value *Current = Worklist.pop_back_val(); |
4146 | // if it is already visited or it is an ending value then skip it. |
4147 | if (Map.contains(Val: Current)) |
4148 | continue; |
4149 | TraverseOrder.push_back(Elt: Current); |
4150 | |
4151 | // CurrentValue must be a Phi node or select. All others must be covered |
4152 | // by anchors. |
4153 | if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Val: Current)) { |
4154 | // Is it OK to get metadata from OrigSelect?! |
4155 | // Create a Select placeholder with dummy value. |
4156 | SelectInst *Select = |
4157 | SelectInst::Create(C: CurrentSelect->getCondition(), S1: Dummy, S2: Dummy, |
4158 | NameStr: CurrentSelect->getName(), |
4159 | InsertBefore: CurrentSelect->getIterator(), MDFrom: CurrentSelect); |
4160 | Map[Current] = Select; |
4161 | ST.insertNewSelect(SI: Select); |
4162 | // We are interested in True and False values. |
4163 | Worklist.push_back(Elt: CurrentSelect->getTrueValue()); |
4164 | Worklist.push_back(Elt: CurrentSelect->getFalseValue()); |
4165 | } else { |
4166 | // It must be a Phi node then. |
4167 | PHINode *CurrentPhi = cast<PHINode>(Val: Current); |
4168 | unsigned PredCount = CurrentPhi->getNumIncomingValues(); |
4169 | PHINode *PHI = |
4170 | PHINode::Create(Ty: CommonType, NumReservedValues: PredCount, NameStr: "sunk_phi" , InsertBefore: CurrentPhi->getIterator()); |
4171 | Map[Current] = PHI; |
4172 | ST.insertNewPhi(PN: PHI); |
4173 | append_range(C&: Worklist, R: CurrentPhi->incoming_values()); |
4174 | } |
4175 | } |
4176 | } |
4177 | |
4178 | bool addrModeCombiningAllowed() { |
4179 | if (DisableComplexAddrModes) |
4180 | return false; |
4181 | switch (DifferentField) { |
4182 | default: |
4183 | return false; |
4184 | case ExtAddrMode::BaseRegField: |
4185 | return AddrSinkCombineBaseReg; |
4186 | case ExtAddrMode::BaseGVField: |
4187 | return AddrSinkCombineBaseGV; |
4188 | case ExtAddrMode::BaseOffsField: |
4189 | return AddrSinkCombineBaseOffs; |
4190 | case ExtAddrMode::ScaledRegField: |
4191 | return AddrSinkCombineScaledReg; |
4192 | } |
4193 | } |
4194 | }; |
4195 | } // end anonymous namespace |
4196 | |
4197 | /// Try adding ScaleReg*Scale to the current addressing mode. |
4198 | /// Return true and update AddrMode if this addr mode is legal for the target, |
4199 | /// false if not. |
4200 | bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, |
4201 | unsigned Depth) { |
4202 | // If Scale is 1, then this is the same as adding ScaleReg to the addressing |
4203 | // mode. Just process that directly. |
4204 | if (Scale == 1) |
4205 | return matchAddr(Addr: ScaleReg, Depth); |
4206 | |
4207 | // If the scale is 0, it takes nothing to add this. |
4208 | if (Scale == 0) |
4209 | return true; |
4210 | |
4211 | // If we already have a scale of this value, we can add to it, otherwise, we |
4212 | // need an available scale field. |
4213 | if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) |
4214 | return false; |
4215 | |
4216 | ExtAddrMode TestAddrMode = AddrMode; |
4217 | |
4218 | // Add scale to turn X*4+X*3 -> X*7. This could also do things like |
4219 | // [A+B + A*7] -> [B+A*8]. |
4220 | TestAddrMode.Scale += Scale; |
4221 | TestAddrMode.ScaledReg = ScaleReg; |
4222 | |
4223 | // If the new address isn't legal, bail out. |
4224 | if (!TLI.isLegalAddressingMode(DL, AM: TestAddrMode, Ty: AccessTy, AddrSpace)) |
4225 | return false; |
4226 | |
4227 | // It was legal, so commit it. |
4228 | AddrMode = TestAddrMode; |
4229 | |
4230 | // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now |
4231 | // to see if ScaleReg is actually X+C. If so, we can turn this into adding |
4232 | // X*Scale + C*Scale to addr mode. If we found available IV increment, do not |
4233 | // go any further: we can reuse it and cannot eliminate it. |
4234 | ConstantInt *CI = nullptr; |
4235 | Value *AddLHS = nullptr; |
4236 | if (isa<Instruction>(Val: ScaleReg) && // not a constant expr. |
4237 | match(V: ScaleReg, P: m_Add(L: m_Value(V&: AddLHS), R: m_ConstantInt(CI))) && |
4238 | !isIVIncrement(V: ScaleReg, LI: &LI) && CI->getValue().isSignedIntN(N: 64)) { |
4239 | TestAddrMode.InBounds = false; |
4240 | TestAddrMode.ScaledReg = AddLHS; |
4241 | TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale; |
4242 | |
4243 | // If this addressing mode is legal, commit it and remember that we folded |
4244 | // this instruction. |
4245 | if (TLI.isLegalAddressingMode(DL, AM: TestAddrMode, Ty: AccessTy, AddrSpace)) { |
4246 | AddrModeInsts.push_back(Elt: cast<Instruction>(Val: ScaleReg)); |
4247 | AddrMode = TestAddrMode; |
4248 | return true; |
4249 | } |
4250 | // Restore status quo. |
4251 | TestAddrMode = AddrMode; |
4252 | } |
4253 | |
4254 | // If this is an add recurrence with a constant step, return the increment |
4255 | // instruction and the canonicalized step. |
4256 | auto GetConstantStep = |
4257 | [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> { |
4258 | auto *PN = dyn_cast<PHINode>(Val: V); |
4259 | if (!PN) |
4260 | return std::nullopt; |
4261 | auto IVInc = getIVIncrement(PN, LI: &LI); |
4262 | if (!IVInc) |
4263 | return std::nullopt; |
4264 | // TODO: The result of the intrinsics above is two-complement. However when |
4265 | // IV inc is expressed as add or sub, iv.next is potentially a poison value. |
4266 | // If it has nuw or nsw flags, we need to make sure that these flags are |
4267 | // inferrable at the point of memory instruction. Otherwise we are replacing |
4268 | // well-defined two-complement computation with poison. Currently, to avoid |
4269 | // potentially complex analysis needed to prove this, we reject such cases. |
4270 | if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(Val: IVInc->first)) |
4271 | if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap()) |
4272 | return std::nullopt; |
4273 | if (auto *ConstantStep = dyn_cast<ConstantInt>(Val: IVInc->second)) |
4274 | return std::make_pair(x&: IVInc->first, y: ConstantStep->getValue()); |
4275 | return std::nullopt; |
4276 | }; |
4277 | |
4278 | // Try to account for the following special case: |
4279 | // 1. ScaleReg is an inductive variable; |
4280 | // 2. We use it with non-zero offset; |
4281 | // 3. IV's increment is available at the point of memory instruction. |
4282 | // |
4283 | // In this case, we may reuse the IV increment instead of the IV Phi to |
4284 | // achieve the following advantages: |
4285 | // 1. If IV step matches the offset, we will have no need in the offset; |
4286 | // 2. Even if they don't match, we will reduce the overlap of living IV |
4287 | // and IV increment, that will potentially lead to better register |
4288 | // assignment. |
4289 | if (AddrMode.BaseOffs) { |
4290 | if (auto IVStep = GetConstantStep(ScaleReg)) { |
4291 | Instruction *IVInc = IVStep->first; |
4292 | // The following assert is important to ensure a lack of infinite loops. |
4293 | // This transforms is (intentionally) the inverse of the one just above. |
4294 | // If they don't agree on the definition of an increment, we'd alternate |
4295 | // back and forth indefinitely. |
4296 | assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep" ); |
4297 | APInt Step = IVStep->second; |
4298 | APInt Offset = Step * AddrMode.Scale; |
4299 | if (Offset.isSignedIntN(N: 64)) { |
4300 | TestAddrMode.InBounds = false; |
4301 | TestAddrMode.ScaledReg = IVInc; |
4302 | TestAddrMode.BaseOffs -= Offset.getLimitedValue(); |
4303 | // If this addressing mode is legal, commit it.. |
4304 | // (Note that we defer the (expensive) domtree base legality check |
4305 | // to the very last possible point.) |
4306 | if (TLI.isLegalAddressingMode(DL, AM: TestAddrMode, Ty: AccessTy, AddrSpace) && |
4307 | getDTFn().dominates(Def: IVInc, User: MemoryInst)) { |
4308 | AddrModeInsts.push_back(Elt: cast<Instruction>(Val: IVInc)); |
4309 | AddrMode = TestAddrMode; |
4310 | return true; |
4311 | } |
4312 | // Restore status quo. |
4313 | TestAddrMode = AddrMode; |
4314 | } |
4315 | } |
4316 | } |
4317 | |
4318 | // Otherwise, just return what we have. |
4319 | return true; |
4320 | } |
4321 | |
4322 | /// This is a little filter, which returns true if an addressing computation |
4323 | /// involving I might be folded into a load/store accessing it. |
4324 | /// This doesn't need to be perfect, but needs to accept at least |
4325 | /// the set of instructions that MatchOperationAddr can. |
4326 | static bool MightBeFoldableInst(Instruction *I) { |
4327 | switch (I->getOpcode()) { |
4328 | case Instruction::BitCast: |
4329 | case Instruction::AddrSpaceCast: |
4330 | // Don't touch identity bitcasts. |
4331 | if (I->getType() == I->getOperand(i: 0)->getType()) |
4332 | return false; |
4333 | return I->getType()->isIntOrPtrTy(); |
4334 | case Instruction::PtrToInt: |
4335 | // PtrToInt is always a noop, as we know that the int type is pointer sized. |
4336 | return true; |
4337 | case Instruction::IntToPtr: |
4338 | // We know the input is intptr_t, so this is foldable. |
4339 | return true; |
4340 | case Instruction::Add: |
4341 | return true; |
4342 | case Instruction::Mul: |
4343 | case Instruction::Shl: |
4344 | // Can only handle X*C and X << C. |
4345 | return isa<ConstantInt>(Val: I->getOperand(i: 1)); |
4346 | case Instruction::GetElementPtr: |
4347 | return true; |
4348 | default: |
4349 | return false; |
4350 | } |
4351 | } |
4352 | |
4353 | /// Check whether or not \p Val is a legal instruction for \p TLI. |
4354 | /// \note \p Val is assumed to be the product of some type promotion. |
4355 | /// Therefore if \p Val has an undefined state in \p TLI, this is assumed |
4356 | /// to be legal, as the non-promoted value would have had the same state. |
4357 | static bool isPromotedInstructionLegal(const TargetLowering &TLI, |
4358 | const DataLayout &DL, Value *Val) { |
4359 | Instruction *PromotedInst = dyn_cast<Instruction>(Val); |
4360 | if (!PromotedInst) |
4361 | return false; |
4362 | int ISDOpcode = TLI.InstructionOpcodeToISD(Opcode: PromotedInst->getOpcode()); |
4363 | // If the ISDOpcode is undefined, it was undefined before the promotion. |
4364 | if (!ISDOpcode) |
4365 | return true; |
4366 | // Otherwise, check if the promoted instruction is legal or not. |
4367 | return TLI.isOperationLegalOrCustom( |
4368 | Op: ISDOpcode, VT: TLI.getValueType(DL, Ty: PromotedInst->getType())); |
4369 | } |
4370 | |
4371 | namespace { |
4372 | |
4373 | /// Hepler class to perform type promotion. |
4374 | class TypePromotionHelper { |
4375 | /// Utility function to add a promoted instruction \p ExtOpnd to |
4376 | /// \p PromotedInsts and record the type of extension we have seen. |
4377 | static void addPromotedInst(InstrToOrigTy &PromotedInsts, |
4378 | Instruction *ExtOpnd, bool IsSExt) { |
4379 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; |
4380 | InstrToOrigTy::iterator It = PromotedInsts.find(Val: ExtOpnd); |
4381 | if (It != PromotedInsts.end()) { |
4382 | // If the new extension is same as original, the information in |
4383 | // PromotedInsts[ExtOpnd] is still correct. |
4384 | if (It->second.getInt() == ExtTy) |
4385 | return; |
4386 | |
4387 | // Now the new extension is different from old extension, we make |
4388 | // the type information invalid by setting extension type to |
4389 | // BothExtension. |
4390 | ExtTy = BothExtension; |
4391 | } |
4392 | PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); |
4393 | } |
4394 | |
4395 | /// Utility function to query the original type of instruction \p Opnd |
4396 | /// with a matched extension type. If the extension doesn't match, we |
4397 | /// cannot use the information we had on the original type. |
4398 | /// BothExtension doesn't match any extension type. |
4399 | static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, |
4400 | Instruction *Opnd, bool IsSExt) { |
4401 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; |
4402 | InstrToOrigTy::const_iterator It = PromotedInsts.find(Val: Opnd); |
4403 | if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) |
4404 | return It->second.getPointer(); |
4405 | return nullptr; |
4406 | } |
4407 | |
4408 | /// Utility function to check whether or not a sign or zero extension |
4409 | /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by |
4410 | /// either using the operands of \p Inst or promoting \p Inst. |
4411 | /// The type of the extension is defined by \p IsSExt. |
4412 | /// In other words, check if: |
4413 | /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. |
4414 | /// #1 Promotion applies: |
4415 | /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). |
4416 | /// #2 Operand reuses: |
4417 | /// ext opnd1 to ConsideredExtType. |
4418 | /// \p PromotedInsts maps the instructions to their type before promotion. |
4419 | static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, |
4420 | const InstrToOrigTy &PromotedInsts, bool IsSExt); |
4421 | |
4422 | /// Utility function to determine if \p OpIdx should be promoted when |
4423 | /// promoting \p Inst. |
4424 | static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { |
4425 | return !(isa<SelectInst>(Val: Inst) && OpIdx == 0); |
4426 | } |
4427 | |
4428 | /// Utility function to promote the operand of \p Ext when this |
4429 | /// operand is a promotable trunc or sext or zext. |
4430 | /// \p PromotedInsts maps the instructions to their type before promotion. |
4431 | /// \p CreatedInstsCost[out] contains the cost of all instructions |
4432 | /// created to promote the operand of Ext. |
4433 | /// Newly added extensions are inserted in \p Exts. |
4434 | /// Newly added truncates are inserted in \p Truncs. |
4435 | /// Should never be called directly. |
4436 | /// \return The promoted value which is used instead of Ext. |
4437 | static Value *promoteOperandForTruncAndAnyExt( |
4438 | Instruction *Ext, TypePromotionTransaction &TPT, |
4439 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, |
4440 | SmallVectorImpl<Instruction *> *Exts, |
4441 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); |
4442 | |
4443 | /// Utility function to promote the operand of \p Ext when this |
4444 | /// operand is promotable and is not a supported trunc or sext. |
4445 | /// \p PromotedInsts maps the instructions to their type before promotion. |
4446 | /// \p CreatedInstsCost[out] contains the cost of all the instructions |
4447 | /// created to promote the operand of Ext. |
4448 | /// Newly added extensions are inserted in \p Exts. |
4449 | /// Newly added truncates are inserted in \p Truncs. |
4450 | /// Should never be called directly. |
4451 | /// \return The promoted value which is used instead of Ext. |
4452 | static Value *promoteOperandForOther(Instruction *Ext, |
4453 | TypePromotionTransaction &TPT, |
4454 | InstrToOrigTy &PromotedInsts, |
4455 | unsigned &CreatedInstsCost, |
4456 | SmallVectorImpl<Instruction *> *Exts, |
4457 | SmallVectorImpl<Instruction *> *Truncs, |
4458 | const TargetLowering &TLI, bool IsSExt); |
4459 | |
4460 | /// \see promoteOperandForOther. |
4461 | static Value *signExtendOperandForOther( |
4462 | Instruction *Ext, TypePromotionTransaction &TPT, |
4463 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, |
4464 | SmallVectorImpl<Instruction *> *Exts, |
4465 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { |
4466 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, |
4467 | Exts, Truncs, TLI, IsSExt: true); |
4468 | } |
4469 | |
4470 | /// \see promoteOperandForOther. |
4471 | static Value *zeroExtendOperandForOther( |
4472 | Instruction *Ext, TypePromotionTransaction &TPT, |
4473 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, |
4474 | SmallVectorImpl<Instruction *> *Exts, |
4475 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { |
4476 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, |
4477 | Exts, Truncs, TLI, IsSExt: false); |
4478 | } |
4479 | |
4480 | public: |
4481 | /// Type for the utility function that promotes the operand of Ext. |
4482 | using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, |
4483 | InstrToOrigTy &PromotedInsts, |
4484 | unsigned &CreatedInstsCost, |
4485 | SmallVectorImpl<Instruction *> *Exts, |
4486 | SmallVectorImpl<Instruction *> *Truncs, |
4487 | const TargetLowering &TLI); |
4488 | |
4489 | /// Given a sign/zero extend instruction \p Ext, return the appropriate |
4490 | /// action to promote the operand of \p Ext instead of using Ext. |
4491 | /// \return NULL if no promotable action is possible with the current |
4492 | /// sign extension. |
4493 | /// \p InsertedInsts keeps track of all the instructions inserted by the |
4494 | /// other CodeGenPrepare optimizations. This information is important |
4495 | /// because we do not want to promote these instructions as CodeGenPrepare |
4496 | /// will reinsert them later. Thus creating an infinite loop: create/remove. |
4497 | /// \p PromotedInsts maps the instructions to their type before promotion. |
4498 | static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, |
4499 | const TargetLowering &TLI, |
4500 | const InstrToOrigTy &PromotedInsts); |
4501 | }; |
4502 | |
4503 | } // end anonymous namespace |
4504 | |
4505 | bool TypePromotionHelper::canGetThrough(const Instruction *Inst, |
4506 | Type *ConsideredExtType, |
4507 | const InstrToOrigTy &PromotedInsts, |
4508 | bool IsSExt) { |
4509 | // The promotion helper does not know how to deal with vector types yet. |
4510 | // To be able to fix that, we would need to fix the places where we |
4511 | // statically extend, e.g., constants and such. |
4512 | if (Inst->getType()->isVectorTy()) |
4513 | return false; |
4514 | |
4515 | // We can always get through zext. |
4516 | if (isa<ZExtInst>(Val: Inst)) |
4517 | return true; |
4518 | |
4519 | // sext(sext) is ok too. |
4520 | if (IsSExt && isa<SExtInst>(Val: Inst)) |
4521 | return true; |
4522 | |
4523 | // We can get through binary operator, if it is legal. In other words, the |
4524 | // binary operator must have a nuw or nsw flag. |
4525 | if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: Inst)) |
4526 | if (isa<OverflowingBinaryOperator>(Val: BinOp) && |
4527 | ((!IsSExt && BinOp->hasNoUnsignedWrap()) || |
4528 | (IsSExt && BinOp->hasNoSignedWrap()))) |
4529 | return true; |
4530 | |
4531 | // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) |
4532 | if ((Inst->getOpcode() == Instruction::And || |
4533 | Inst->getOpcode() == Instruction::Or)) |
4534 | return true; |
4535 | |
4536 | // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) |
4537 | if (Inst->getOpcode() == Instruction::Xor) { |
4538 | // Make sure it is not a NOT. |
4539 | if (const auto *Cst = dyn_cast<ConstantInt>(Val: Inst->getOperand(i: 1))) |
4540 | if (!Cst->getValue().isAllOnes()) |
4541 | return true; |
4542 | } |
4543 | |
4544 | // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) |
4545 | // It may change a poisoned value into a regular value, like |
4546 | // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 |
4547 | // poisoned value regular value |
4548 | // It should be OK since undef covers valid value. |
4549 | if (Inst->getOpcode() == Instruction::LShr && !IsSExt) |
4550 | return true; |
4551 | |
4552 | // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) |
4553 | // It may change a poisoned value into a regular value, like |
4554 | // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 |
4555 | // poisoned value regular value |
4556 | // It should be OK since undef covers valid value. |
4557 | if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { |
4558 | const auto *ExtInst = cast<const Instruction>(Val: *Inst->user_begin()); |
4559 | if (ExtInst->hasOneUse()) { |
4560 | const auto *AndInst = dyn_cast<const Instruction>(Val: *ExtInst->user_begin()); |
4561 | if (AndInst && AndInst->getOpcode() == Instruction::And) { |
4562 | const auto *Cst = dyn_cast<ConstantInt>(Val: AndInst->getOperand(i: 1)); |
4563 | if (Cst && |
4564 | Cst->getValue().isIntN(N: Inst->getType()->getIntegerBitWidth())) |
4565 | return true; |
4566 | } |
4567 | } |
4568 | } |
4569 | |
4570 | // Check if we can do the following simplification. |
4571 | // ext(trunc(opnd)) --> ext(opnd) |
4572 | if (!isa<TruncInst>(Val: Inst)) |
4573 | return false; |
4574 | |
4575 | Value *OpndVal = Inst->getOperand(i: 0); |
4576 | // Check if we can use this operand in the extension. |
4577 | // If the type is larger than the result type of the extension, we cannot. |
4578 | if (!OpndVal->getType()->isIntegerTy() || |
4579 | OpndVal->getType()->getIntegerBitWidth() > |
4580 | ConsideredExtType->getIntegerBitWidth()) |
4581 | return false; |
4582 | |
4583 | // If the operand of the truncate is not an instruction, we will not have |
4584 | // any information on the dropped bits. |
4585 | // (Actually we could for constant but it is not worth the extra logic). |
4586 | Instruction *Opnd = dyn_cast<Instruction>(Val: OpndVal); |
4587 | if (!Opnd) |
4588 | return false; |
4589 | |
4590 | // Check if the source of the type is narrow enough. |
4591 | // I.e., check that trunc just drops extended bits of the same kind of |
4592 | // the extension. |
4593 | // #1 get the type of the operand and check the kind of the extended bits. |
4594 | const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); |
4595 | if (OpndType) |
4596 | ; |
4597 | else if ((IsSExt && isa<SExtInst>(Val: Opnd)) || (!IsSExt && isa<ZExtInst>(Val: Opnd))) |
4598 | OpndType = Opnd->getOperand(i: 0)->getType(); |
4599 | else |
4600 | return false; |
4601 | |
4602 | // #2 check that the truncate just drops extended bits. |
4603 | return Inst->getType()->getIntegerBitWidth() >= |
4604 | OpndType->getIntegerBitWidth(); |
4605 | } |
4606 | |
4607 | TypePromotionHelper::Action TypePromotionHelper::getAction( |
4608 | Instruction *Ext, const SetOfInstrs &InsertedInsts, |
4609 | const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { |
4610 | assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && |
4611 | "Unexpected instruction type" ); |
4612 | Instruction *ExtOpnd = dyn_cast<Instruction>(Val: Ext->getOperand(i: 0)); |
4613 | Type *ExtTy = Ext->getType(); |
4614 | bool IsSExt = isa<SExtInst>(Val: Ext); |
4615 | // If the operand of the extension is not an instruction, we cannot |
4616 | // get through. |
4617 | // If it, check we can get through. |
4618 | if (!ExtOpnd || !canGetThrough(Inst: ExtOpnd, ConsideredExtType: ExtTy, PromotedInsts, IsSExt)) |
4619 | return nullptr; |
4620 | |
4621 | // Do not promote if the operand has been added by codegenprepare. |
4622 | // Otherwise, it means we are undoing an optimization that is likely to be |
4623 | // redone, thus causing potential infinite loop. |
4624 | if (isa<TruncInst>(Val: ExtOpnd) && InsertedInsts.count(Ptr: ExtOpnd)) |
4625 | return nullptr; |
4626 | |
4627 | // SExt or Trunc instructions. |
4628 | // Return the related handler. |
4629 | if (isa<SExtInst>(Val: ExtOpnd) || isa<TruncInst>(Val: ExtOpnd) || |
4630 | isa<ZExtInst>(Val: ExtOpnd)) |
4631 | return promoteOperandForTruncAndAnyExt; |
4632 | |
4633 | // Regular instruction. |
4634 | // Abort early if we will have to insert non-free instructions. |
4635 | if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(FromTy: ExtTy, ToTy: ExtOpnd->getType())) |
4636 | return nullptr; |
4637 | return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; |
4638 | } |
4639 | |
4640 | Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( |
4641 | Instruction *SExt, TypePromotionTransaction &TPT, |
4642 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, |
4643 | SmallVectorImpl<Instruction *> *Exts, |
4644 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { |
4645 | // By construction, the operand of SExt is an instruction. Otherwise we cannot |
4646 | // get through it and this method should not be called. |
4647 | Instruction *SExtOpnd = cast<Instruction>(Val: SExt->getOperand(i: 0)); |
4648 | Value *ExtVal = SExt; |
4649 | bool HasMergedNonFreeExt = false; |
4650 | if (isa<ZExtInst>(Val: SExtOpnd)) { |
4651 | // Replace s|zext(zext(opnd)) |
4652 | // => zext(opnd). |
4653 | HasMergedNonFreeExt = !TLI.isExtFree(I: SExtOpnd); |
4654 | Value *ZExt = |
4655 | TPT.createZExt(Inst: SExt, Opnd: SExtOpnd->getOperand(i: 0), Ty: SExt->getType()); |
4656 | TPT.replaceAllUsesWith(Inst: SExt, New: ZExt); |
4657 | TPT.eraseInstruction(Inst: SExt); |
4658 | ExtVal = ZExt; |
4659 | } else { |
4660 | // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) |
4661 | // => z|sext(opnd). |
4662 | TPT.setOperand(Inst: SExt, Idx: 0, NewVal: SExtOpnd->getOperand(i: 0)); |
4663 | } |
4664 | CreatedInstsCost = 0; |
4665 | |
4666 | // Remove dead code. |
4667 | if (SExtOpnd->use_empty()) |
4668 | TPT.eraseInstruction(Inst: SExtOpnd); |
4669 | |
4670 | // Check if the extension is still needed. |
4671 | Instruction *ExtInst = dyn_cast<Instruction>(Val: ExtVal); |
4672 | if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(i: 0)->getType()) { |
4673 | if (ExtInst) { |
4674 | if (Exts) |
4675 | Exts->push_back(Elt: ExtInst); |
4676 | CreatedInstsCost = !TLI.isExtFree(I: ExtInst) && !HasMergedNonFreeExt; |
4677 | } |
4678 | return ExtVal; |
4679 | } |
4680 | |
4681 | // At this point we have: ext ty opnd to ty. |
4682 | // Reassign the uses of ExtInst to the opnd and remove ExtInst. |
4683 | Value *NextVal = ExtInst->getOperand(i: 0); |
4684 | TPT.eraseInstruction(Inst: ExtInst, NewVal: NextVal); |
4685 | return NextVal; |
4686 | } |
4687 | |
4688 | Value *TypePromotionHelper::promoteOperandForOther( |
4689 | Instruction *Ext, TypePromotionTransaction &TPT, |
4690 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, |
4691 | SmallVectorImpl<Instruction *> *Exts, |
4692 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, |
4693 | bool IsSExt) { |
4694 | // By construction, the operand of Ext is an instruction. Otherwise we cannot |
4695 | // get through it and this method should not be called. |
4696 | Instruction *ExtOpnd = cast<Instruction>(Val: Ext->getOperand(i: 0)); |
4697 | CreatedInstsCost = 0; |
4698 | if (!ExtOpnd->hasOneUse()) { |
4699 | // ExtOpnd will be promoted. |
4700 | // All its uses, but Ext, will need to use a truncated value of the |
4701 | // promoted version. |
4702 | // Create the truncate now. |
4703 | Value *Trunc = TPT.createTrunc(Opnd: Ext, Ty: ExtOpnd->getType()); |
4704 | if (Instruction *ITrunc = dyn_cast<Instruction>(Val: Trunc)) { |
4705 | // Insert it just after the definition. |
4706 | ITrunc->moveAfter(MovePos: ExtOpnd); |
4707 | if (Truncs) |
4708 | Truncs->push_back(Elt: ITrunc); |
4709 | } |
4710 | |
4711 | TPT.replaceAllUsesWith(Inst: ExtOpnd, New: Trunc); |
4712 | // Restore the operand of Ext (which has been replaced by the previous call |
4713 | // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. |
4714 | TPT.setOperand(Inst: Ext, Idx: 0, NewVal: ExtOpnd); |
4715 | } |
4716 | |
4717 | // Get through the Instruction: |
4718 | // 1. Update its type. |
4719 | // 2. Replace the uses of Ext by Inst. |
4720 | // 3. Extend each operand that needs to be extended. |
4721 | |
4722 | // Remember the original type of the instruction before promotion. |
4723 | // This is useful to know that the high bits are sign extended bits. |
4724 | addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); |
4725 | // Step #1. |
4726 | TPT.mutateType(Inst: ExtOpnd, NewTy: Ext->getType()); |
4727 | // Step #2. |
4728 | TPT.replaceAllUsesWith(Inst: Ext, New: ExtOpnd); |
4729 | // Step #3. |
4730 | LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n" ); |
4731 | for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; |
4732 | ++OpIdx) { |
4733 | LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); |
4734 | if (ExtOpnd->getOperand(i: OpIdx)->getType() == Ext->getType() || |
4735 | !shouldExtOperand(Inst: ExtOpnd, OpIdx)) { |
4736 | LLVM_DEBUG(dbgs() << "No need to propagate\n" ); |
4737 | continue; |
4738 | } |
4739 | // Check if we can statically extend the operand. |
4740 | Value *Opnd = ExtOpnd->getOperand(i: OpIdx); |
4741 | if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Val: Opnd)) { |
4742 | LLVM_DEBUG(dbgs() << "Statically extend\n" ); |
4743 | unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); |
4744 | APInt CstVal = IsSExt ? Cst->getValue().sext(width: BitWidth) |
4745 | : Cst->getValue().zext(width: BitWidth); |
4746 | TPT.setOperand(Inst: ExtOpnd, Idx: OpIdx, NewVal: ConstantInt::get(Ty: Ext->getType(), V: CstVal)); |
4747 | continue; |
4748 | } |
4749 | // UndefValue are typed, so we have to statically sign extend them. |
4750 | if (isa<UndefValue>(Val: Opnd)) { |
4751 | LLVM_DEBUG(dbgs() << "Statically extend\n" ); |
4752 | TPT.setOperand(Inst: ExtOpnd, Idx: OpIdx, NewVal: UndefValue::get(T: Ext->getType())); |
4753 | continue; |
4754 | } |
4755 | |
4756 | // Otherwise we have to explicitly sign extend the operand. |
4757 | Value *ValForExtOpnd = IsSExt |
4758 | ? TPT.createSExt(Inst: ExtOpnd, Opnd, Ty: Ext->getType()) |
4759 | : TPT.createZExt(Inst: ExtOpnd, Opnd, Ty: Ext->getType()); |
4760 | TPT.setOperand(Inst: ExtOpnd, Idx: OpIdx, NewVal: ValForExtOpnd); |
4761 | Instruction *InstForExtOpnd = dyn_cast<Instruction>(Val: ValForExtOpnd); |
4762 | if (!InstForExtOpnd) |
4763 | continue; |
4764 | |
4765 | if (Exts) |
4766 | Exts->push_back(Elt: InstForExtOpnd); |
4767 | |
4768 | CreatedInstsCost += !TLI.isExtFree(I: InstForExtOpnd); |
4769 | } |
4770 | LLVM_DEBUG(dbgs() << "Extension is useless now\n" ); |
4771 | TPT.eraseInstruction(Inst: Ext); |
4772 | return ExtOpnd; |
4773 | } |
4774 | |
4775 | /// Check whether or not promoting an instruction to a wider type is profitable. |
4776 | /// \p NewCost gives the cost of extension instructions created by the |
4777 | /// promotion. |
4778 | /// \p OldCost gives the cost of extension instructions before the promotion |
4779 | /// plus the number of instructions that have been |
4780 | /// matched in the addressing mode the promotion. |
4781 | /// \p PromotedOperand is the value that has been promoted. |
4782 | /// \return True if the promotion is profitable, false otherwise. |
4783 | bool AddressingModeMatcher::isPromotionProfitable( |
4784 | unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { |
4785 | LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost |
4786 | << '\n'); |
4787 | // The cost of the new extensions is greater than the cost of the |
4788 | // old extension plus what we folded. |
4789 | // This is not profitable. |
4790 | if (NewCost > OldCost) |
4791 | return false; |
4792 | if (NewCost < OldCost) |
4793 | return true; |
4794 | // The promotion is neutral but it may help folding the sign extension in |
4795 | // loads for instance. |
4796 | // Check that we did not create an illegal instruction. |
4797 | return isPromotedInstructionLegal(TLI, DL, Val: PromotedOperand); |
4798 | } |
4799 | |
4800 | /// Given an instruction or constant expr, see if we can fold the operation |
4801 | /// into the addressing mode. If so, update the addressing mode and return |
4802 | /// true, otherwise return false without modifying AddrMode. |
4803 | /// If \p MovedAway is not NULL, it contains the information of whether or |
4804 | /// not AddrInst has to be folded into the addressing mode on success. |
4805 | /// If \p MovedAway == true, \p AddrInst will not be part of the addressing |
4806 | /// because it has been moved away. |
4807 | /// Thus AddrInst must not be added in the matched instructions. |
4808 | /// This state can happen when AddrInst is a sext, since it may be moved away. |
4809 | /// Therefore, AddrInst may not be valid when MovedAway is true and it must |
4810 | /// not be referenced anymore. |
4811 | bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, |
4812 | unsigned Depth, |
4813 | bool *MovedAway) { |
4814 | // Avoid exponential behavior on extremely deep expression trees. |
4815 | if (Depth >= 5) |
4816 | return false; |
4817 | |
4818 | // By default, all matched instructions stay in place. |
4819 | if (MovedAway) |
4820 | *MovedAway = false; |
4821 | |
4822 | switch (Opcode) { |
4823 | case Instruction::PtrToInt: |
4824 | // PtrToInt is always a noop, as we know that the int type is pointer sized. |
4825 | return matchAddr(Addr: AddrInst->getOperand(i: 0), Depth); |
4826 | case Instruction::IntToPtr: { |
4827 | auto AS = AddrInst->getType()->getPointerAddressSpace(); |
4828 | auto PtrTy = MVT::getIntegerVT(BitWidth: DL.getPointerSizeInBits(AS)); |
4829 | // This inttoptr is a no-op if the integer type is pointer sized. |
4830 | if (TLI.getValueType(DL, Ty: AddrInst->getOperand(i: 0)->getType()) == PtrTy) |
4831 | return matchAddr(Addr: AddrInst->getOperand(i: 0), Depth); |
4832 | return false; |
4833 | } |
4834 | case Instruction::BitCast: |
4835 | // BitCast is always a noop, and we can handle it as long as it is |
4836 | // int->int or pointer->pointer (we don't want int<->fp or something). |
4837 | if (AddrInst->getOperand(i: 0)->getType()->isIntOrPtrTy() && |
4838 | // Don't touch identity bitcasts. These were probably put here by LSR, |
4839 | // and we don't want to mess around with them. Assume it knows what it |
4840 | // is doing. |
4841 | AddrInst->getOperand(i: 0)->getType() != AddrInst->getType()) |
4842 | return matchAddr(Addr: AddrInst->getOperand(i: 0), Depth); |
4843 | return false; |
4844 | case Instruction::AddrSpaceCast: { |
4845 | unsigned SrcAS = |
4846 | AddrInst->getOperand(i: 0)->getType()->getPointerAddressSpace(); |
4847 | unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); |
4848 | if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS)) |
4849 | return matchAddr(Addr: AddrInst->getOperand(i: 0), Depth); |
4850 | return false; |
4851 | } |
4852 | case Instruction::Add: { |
4853 | // Check to see if we can merge in one operand, then the other. If so, we |
4854 | // win. |
4855 | ExtAddrMode BackupAddrMode = AddrMode; |
4856 | unsigned OldSize = AddrModeInsts.size(); |
4857 | // Start a transaction at this point. |
4858 | // The LHS may match but not the RHS. |
4859 | // Therefore, we need a higher level restoration point to undo partially |
4860 | // matched operation. |
4861 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = |
4862 | TPT.getRestorationPoint(); |
4863 | |
4864 | // Try to match an integer constant second to increase its chance of ending |
4865 | // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`. |
4866 | int First = 0, Second = 1; |
4867 | if (isa<ConstantInt>(Val: AddrInst->getOperand(i: First)) |
4868 | && !isa<ConstantInt>(Val: AddrInst->getOperand(i: Second))) |
4869 | std::swap(a&: First, b&: Second); |
4870 | AddrMode.InBounds = false; |
4871 | if (matchAddr(Addr: AddrInst->getOperand(i: First), Depth: Depth + 1) && |
4872 | matchAddr(Addr: AddrInst->getOperand(i: Second), Depth: Depth + 1)) |
4873 | return true; |
4874 | |
4875 | // Restore the old addr mode info. |
4876 | AddrMode = BackupAddrMode; |
4877 | AddrModeInsts.resize(N: OldSize); |
4878 | TPT.rollback(Point: LastKnownGood); |
4879 | |
4880 | // Otherwise this was over-aggressive. Try merging operands in the opposite |
4881 | // order. |
4882 | if (matchAddr(Addr: AddrInst->getOperand(i: Second), Depth: Depth + 1) && |
4883 | matchAddr(Addr: AddrInst->getOperand(i: First), Depth: Depth + 1)) |
4884 | return true; |
4885 | |
4886 | // Otherwise we definitely can't merge the ADD in. |
4887 | AddrMode = BackupAddrMode; |
4888 | AddrModeInsts.resize(N: OldSize); |
4889 | TPT.rollback(Point: LastKnownGood); |
4890 | break; |
4891 | } |
4892 | // case Instruction::Or: |
4893 | // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. |
4894 | // break; |
4895 | case Instruction::Mul: |
4896 | case Instruction::Shl: { |
4897 | // Can only handle X*C and X << C. |
4898 | AddrMode.InBounds = false; |
4899 | ConstantInt *RHS = dyn_cast<ConstantInt>(Val: AddrInst->getOperand(i: 1)); |
4900 | if (!RHS || RHS->getBitWidth() > 64) |
4901 | return false; |
4902 | int64_t Scale = Opcode == Instruction::Shl |
4903 | ? 1LL << RHS->getLimitedValue(Limit: RHS->getBitWidth() - 1) |
4904 | : RHS->getSExtValue(); |
4905 | |
4906 | return matchScaledValue(ScaleReg: AddrInst->getOperand(i: 0), Scale, Depth); |
4907 | } |
4908 | case Instruction::GetElementPtr: { |
4909 | // Scan the GEP. We check it if it contains constant offsets and at most |
4910 | // one variable offset. |
4911 | int VariableOperand = -1; |
4912 | unsigned VariableScale = 0; |
4913 | |
4914 | int64_t ConstantOffset = 0; |
4915 | gep_type_iterator GTI = gep_type_begin(GEP: AddrInst); |
4916 | for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { |
4917 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
4918 | const StructLayout *SL = DL.getStructLayout(Ty: STy); |
4919 | unsigned Idx = |
4920 | cast<ConstantInt>(Val: AddrInst->getOperand(i))->getZExtValue(); |
4921 | ConstantOffset += SL->getElementOffset(Idx); |
4922 | } else { |
4923 | TypeSize TS = GTI.getSequentialElementStride(DL); |
4924 | if (TS.isNonZero()) { |
4925 | // The optimisations below currently only work for fixed offsets. |
4926 | if (TS.isScalable()) |
4927 | return false; |
4928 | int64_t TypeSize = TS.getFixedValue(); |
4929 | if (ConstantInt *CI = |
4930 | dyn_cast<ConstantInt>(Val: AddrInst->getOperand(i))) { |
4931 | const APInt &CVal = CI->getValue(); |
4932 | if (CVal.getSignificantBits() <= 64) { |
4933 | ConstantOffset += CVal.getSExtValue() * TypeSize; |
4934 | continue; |
4935 | } |
4936 | } |
4937 | // We only allow one variable index at the moment. |
4938 | if (VariableOperand != -1) |
4939 | return false; |
4940 | |
4941 | // Remember the variable index. |
4942 | VariableOperand = i; |
4943 | VariableScale = TypeSize; |
4944 | } |
4945 | } |
4946 | } |
4947 | |
4948 | // A common case is for the GEP to only do a constant offset. In this case, |
4949 | // just add it to the disp field and check validity. |
4950 | if (VariableOperand == -1) { |
4951 | AddrMode.BaseOffs += ConstantOffset; |
4952 | if (matchAddr(Addr: AddrInst->getOperand(i: 0), Depth: Depth + 1)) { |
4953 | if (!cast<GEPOperator>(Val: AddrInst)->isInBounds()) |
4954 | AddrMode.InBounds = false; |
4955 | return true; |
4956 | } |
4957 | AddrMode.BaseOffs -= ConstantOffset; |
4958 | |
4959 | if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(Val: AddrInst) && |
4960 | TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && |
4961 | ConstantOffset > 0) { |
4962 | // Record GEPs with non-zero offsets as candidates for splitting in |
4963 | // the event that the offset cannot fit into the r+i addressing mode. |
4964 | // Simple and common case that only one GEP is used in calculating the |
4965 | // address for the memory access. |
4966 | Value *Base = AddrInst->getOperand(i: 0); |
4967 | auto *BaseI = dyn_cast<Instruction>(Val: Base); |
4968 | auto *GEP = cast<GetElementPtrInst>(Val: AddrInst); |
4969 | if (isa<Argument>(Val: Base) || isa<GlobalValue>(Val: Base) || |
4970 | (BaseI && !isa<CastInst>(Val: BaseI) && |
4971 | !isa<GetElementPtrInst>(Val: BaseI))) { |
4972 | // Make sure the parent block allows inserting non-PHI instructions |
4973 | // before the terminator. |
4974 | BasicBlock *Parent = BaseI ? BaseI->getParent() |
4975 | : &GEP->getFunction()->getEntryBlock(); |
4976 | if (!Parent->getTerminator()->isEHPad()) |
4977 | LargeOffsetGEP = std::make_pair(x&: GEP, y&: ConstantOffset); |
4978 | } |
4979 | } |
4980 | |
4981 | return false; |
4982 | } |
4983 | |
4984 | // Save the valid addressing mode in case we can't match. |
4985 | ExtAddrMode BackupAddrMode = AddrMode; |
4986 | unsigned OldSize = AddrModeInsts.size(); |
4987 | |
4988 | // See if the scale and offset amount is valid for this target. |
4989 | AddrMode.BaseOffs += ConstantOffset; |
4990 | if (!cast<GEPOperator>(Val: AddrInst)->isInBounds()) |
4991 | AddrMode.InBounds = false; |
4992 | |
4993 | // Match the base operand of the GEP. |
4994 | if (!matchAddr(Addr: AddrInst->getOperand(i: 0), Depth: Depth + 1)) { |
4995 | // If it couldn't be matched, just stuff the value in a register. |
4996 | if (AddrMode.HasBaseReg) { |
4997 | AddrMode = BackupAddrMode; |
4998 | AddrModeInsts.resize(N: OldSize); |
4999 | return false; |
5000 | } |
5001 | AddrMode.HasBaseReg = true; |
5002 | AddrMode.BaseReg = AddrInst->getOperand(i: 0); |
5003 | } |
5004 | |
5005 | // Match the remaining variable portion of the GEP. |
5006 | if (!matchScaledValue(ScaleReg: AddrInst->getOperand(i: VariableOperand), Scale: VariableScale, |
5007 | Depth)) { |
5008 | // If it couldn't be matched, try stuffing the base into a register |
5009 | // instead of matching it, and retrying the match of the scale. |
5010 | AddrMode = BackupAddrMode; |
5011 | AddrModeInsts.resize(N: OldSize); |
5012 | if (AddrMode.HasBaseReg) |
5013 | return false; |
5014 | AddrMode.HasBaseReg = true; |
5015 | AddrMode.BaseReg = AddrInst->getOperand(i: 0); |
5016 | AddrMode.BaseOffs += ConstantOffset; |
5017 | if (!matchScaledValue(ScaleReg: AddrInst->getOperand(i: VariableOperand), |
5018 | Scale: VariableScale, Depth)) { |
5019 | // If even that didn't work, bail. |
5020 | AddrMode = BackupAddrMode; |
5021 | AddrModeInsts.resize(N: OldSize); |
5022 | return false; |
5023 | } |
5024 | } |
5025 | |
5026 | return true; |
5027 | } |
5028 | case Instruction::SExt: |
5029 | case Instruction::ZExt: { |
5030 | Instruction *Ext = dyn_cast<Instruction>(Val: AddrInst); |
5031 | if (!Ext) |
5032 | return false; |
5033 | |
5034 | // Try to move this ext out of the way of the addressing mode. |
5035 | // Ask for a method for doing so. |
5036 | TypePromotionHelper::Action TPH = |
5037 | TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); |
5038 | if (!TPH) |
5039 | return false; |
5040 | |
5041 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = |
5042 | TPT.getRestorationPoint(); |
5043 | unsigned CreatedInstsCost = 0; |
5044 | unsigned ExtCost = !TLI.isExtFree(I: Ext); |
5045 | Value *PromotedOperand = |
5046 | TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); |
5047 | // SExt has been moved away. |
5048 | // Thus either it will be rematched later in the recursive calls or it is |
5049 | // gone. Anyway, we must not fold it into the addressing mode at this point. |
5050 | // E.g., |
5051 | // op = add opnd, 1 |
5052 | // idx = ext op |
5053 | // addr = gep base, idx |
5054 | // is now: |
5055 | // promotedOpnd = ext opnd <- no match here |
5056 | // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) |
5057 | // addr = gep base, op <- match |
5058 | if (MovedAway) |
5059 | *MovedAway = true; |
5060 | |
5061 | assert(PromotedOperand && |
5062 | "TypePromotionHelper should have filtered out those cases" ); |
5063 | |
5064 | ExtAddrMode BackupAddrMode = AddrMode; |
5065 | unsigned OldSize = AddrModeInsts.size(); |
5066 | |
5067 | if (!matchAddr(Addr: PromotedOperand, Depth) || |
5068 | // The total of the new cost is equal to the cost of the created |
5069 | // instructions. |
5070 | // The total of the old cost is equal to the cost of the extension plus |
5071 | // what we have saved in the addressing mode. |
5072 | !isPromotionProfitable(NewCost: CreatedInstsCost, |
5073 | OldCost: ExtCost + (AddrModeInsts.size() - OldSize), |
5074 | PromotedOperand)) { |
5075 | AddrMode = BackupAddrMode; |
5076 | AddrModeInsts.resize(N: OldSize); |
5077 | LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n" ); |
5078 | TPT.rollback(Point: LastKnownGood); |
5079 | return false; |
5080 | } |
5081 | return true; |
5082 | } |
5083 | case Instruction::Call: |
5084 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: AddrInst)) { |
5085 | if (II->getIntrinsicID() == Intrinsic::threadlocal_address) { |
5086 | GlobalValue &GV = cast<GlobalValue>(Val&: *II->getArgOperand(i: 0)); |
5087 | if (TLI.addressingModeSupportsTLS(GV)) |
5088 | return matchAddr(Addr: AddrInst->getOperand(i: 0), Depth); |
5089 | } |
5090 | } |
5091 | break; |
5092 | } |
5093 | return false; |
5094 | } |
5095 | |
5096 | /// If we can, try to add the value of 'Addr' into the current addressing mode. |
5097 | /// If Addr can't be added to AddrMode this returns false and leaves AddrMode |
5098 | /// unmodified. This assumes that Addr is either a pointer type or intptr_t |
5099 | /// for the target. |
5100 | /// |
5101 | bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { |
5102 | // Start a transaction at this point that we will rollback if the matching |
5103 | // fails. |
5104 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = |
5105 | TPT.getRestorationPoint(); |
5106 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: Addr)) { |
5107 | if (CI->getValue().isSignedIntN(N: 64)) { |
5108 | // Fold in immediates if legal for the target. |
5109 | AddrMode.BaseOffs += CI->getSExtValue(); |
5110 | if (TLI.isLegalAddressingMode(DL, AM: AddrMode, Ty: AccessTy, AddrSpace)) |
5111 | return true; |
5112 | AddrMode.BaseOffs -= CI->getSExtValue(); |
5113 | } |
5114 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Val: Addr)) { |
5115 | // If this is a global variable, try to fold it into the addressing mode. |
5116 | if (!AddrMode.BaseGV) { |
5117 | AddrMode.BaseGV = GV; |
5118 | if (TLI.isLegalAddressingMode(DL, AM: AddrMode, Ty: AccessTy, AddrSpace)) |
5119 | return true; |
5120 | AddrMode.BaseGV = nullptr; |
5121 | } |
5122 | } else if (Instruction *I = dyn_cast<Instruction>(Val: Addr)) { |
5123 | ExtAddrMode BackupAddrMode = AddrMode; |
5124 | unsigned OldSize = AddrModeInsts.size(); |
5125 | |
5126 | // Check to see if it is possible to fold this operation. |
5127 | bool MovedAway = false; |
5128 | if (matchOperationAddr(AddrInst: I, Opcode: I->getOpcode(), Depth, MovedAway: &MovedAway)) { |
5129 | // This instruction may have been moved away. If so, there is nothing |
5130 | // to check here. |
5131 | if (MovedAway) |
5132 | return true; |
5133 | // Okay, it's possible to fold this. Check to see if it is actually |
5134 | // *profitable* to do so. We use a simple cost model to avoid increasing |
5135 | // register pressure too much. |
5136 | if (I->hasOneUse() || |
5137 | isProfitableToFoldIntoAddressingMode(I, AMBefore&: BackupAddrMode, AMAfter&: AddrMode)) { |
5138 | AddrModeInsts.push_back(Elt: I); |
5139 | return true; |
5140 | } |
5141 | |
5142 | // It isn't profitable to do this, roll back. |
5143 | AddrMode = BackupAddrMode; |
5144 | AddrModeInsts.resize(N: OldSize); |
5145 | TPT.rollback(Point: LastKnownGood); |
5146 | } |
5147 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: Addr)) { |
5148 | if (matchOperationAddr(AddrInst: CE, Opcode: CE->getOpcode(), Depth)) |
5149 | return true; |
5150 | TPT.rollback(Point: LastKnownGood); |
5151 | } else if (isa<ConstantPointerNull>(Val: Addr)) { |
5152 | // Null pointer gets folded without affecting the addressing mode. |
5153 | return true; |
5154 | } |
5155 | |
5156 | // Worse case, the target should support [reg] addressing modes. :) |
5157 | if (!AddrMode.HasBaseReg) { |
5158 | AddrMode.HasBaseReg = true; |
5159 | AddrMode.BaseReg = Addr; |
5160 | // Still check for legality in case the target supports [imm] but not [i+r]. |
5161 | if (TLI.isLegalAddressingMode(DL, AM: AddrMode, Ty: AccessTy, AddrSpace)) |
5162 | return true; |
5163 | AddrMode.HasBaseReg = false; |
5164 | AddrMode.BaseReg = nullptr; |
5165 | } |
5166 | |
5167 | // If the base register is already taken, see if we can do [r+r]. |
5168 | if (AddrMode.Scale == 0) { |
5169 | AddrMode.Scale = 1; |
5170 | AddrMode.ScaledReg = Addr; |
5171 | if (TLI.isLegalAddressingMode(DL, AM: AddrMode, Ty: AccessTy, AddrSpace)) |
5172 | return true; |
5173 | AddrMode.Scale = 0; |
5174 | AddrMode.ScaledReg = nullptr; |
5175 | } |
5176 | // Couldn't match. |
5177 | TPT.rollback(Point: LastKnownGood); |
5178 | return false; |
5179 | } |
5180 | |
5181 | /// Check to see if all uses of OpVal by the specified inline asm call are due |
5182 | /// to memory operands. If so, return true, otherwise return false. |
5183 | static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, |
5184 | const TargetLowering &TLI, |
5185 | const TargetRegisterInfo &TRI) { |
5186 | const Function *F = CI->getFunction(); |
5187 | TargetLowering::AsmOperandInfoVector TargetConstraints = |
5188 | TLI.ParseConstraints(DL: F->getDataLayout(), TRI: &TRI, Call: *CI); |
5189 | |
5190 | for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) { |
5191 | // Compute the constraint code and ConstraintType to use. |
5192 | TLI.ComputeConstraintToUse(OpInfo, Op: SDValue()); |
5193 | |
5194 | // If this asm operand is our Value*, and if it isn't an indirect memory |
5195 | // operand, we can't fold it! TODO: Also handle C_Address? |
5196 | if (OpInfo.CallOperandVal == OpVal && |
5197 | (OpInfo.ConstraintType != TargetLowering::C_Memory || |
5198 | !OpInfo.isIndirect)) |
5199 | return false; |
5200 | } |
5201 | |
5202 | return true; |
5203 | } |
5204 | |
5205 | /// Recursively walk all the uses of I until we find a memory use. |
5206 | /// If we find an obviously non-foldable instruction, return true. |
5207 | /// Add accessed addresses and types to MemoryUses. |
5208 | static bool FindAllMemoryUses( |
5209 | Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses, |
5210 | SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, |
5211 | const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, |
5212 | BlockFrequencyInfo *BFI, unsigned &SeenInsts) { |
5213 | // If we already considered this instruction, we're done. |
5214 | if (!ConsideredInsts.insert(Ptr: I).second) |
5215 | return false; |
5216 | |
5217 | // If this is an obviously unfoldable instruction, bail out. |
5218 | if (!MightBeFoldableInst(I)) |
5219 | return true; |
5220 | |
5221 | // Loop over all the uses, recursively processing them. |
5222 | for (Use &U : I->uses()) { |
5223 | // Conservatively return true if we're seeing a large number or a deep chain |
5224 | // of users. This avoids excessive compilation times in pathological cases. |
5225 | if (SeenInsts++ >= MaxAddressUsersToScan) |
5226 | return true; |
5227 | |
5228 | Instruction *UserI = cast<Instruction>(Val: U.getUser()); |
5229 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: UserI)) { |
5230 | MemoryUses.push_back(Elt: {&U, LI->getType()}); |
5231 | continue; |
5232 | } |
5233 | |
5234 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: UserI)) { |
5235 | if (U.getOperandNo() != StoreInst::getPointerOperandIndex()) |
5236 | return true; // Storing addr, not into addr. |
5237 | MemoryUses.push_back(Elt: {&U, SI->getValueOperand()->getType()}); |
5238 | continue; |
5239 | } |
5240 | |
5241 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Val: UserI)) { |
5242 | if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex()) |
5243 | return true; // Storing addr, not into addr. |
5244 | MemoryUses.push_back(Elt: {&U, RMW->getValOperand()->getType()}); |
5245 | continue; |
5246 | } |
5247 | |
5248 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Val: UserI)) { |
5249 | if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex()) |
5250 | return true; // Storing addr, not into addr. |
5251 | MemoryUses.push_back(Elt: {&U, CmpX->getCompareOperand()->getType()}); |
5252 | continue; |
5253 | } |
5254 | |
5255 | if (CallInst *CI = dyn_cast<CallInst>(Val: UserI)) { |
5256 | if (CI->hasFnAttr(Kind: Attribute::Cold)) { |
5257 | // If this is a cold call, we can sink the addressing calculation into |
5258 | // the cold path. See optimizeCallInst |
5259 | bool OptForSize = |
5260 | OptSize || llvm::shouldOptimizeForSize(BB: CI->getParent(), PSI, BFI); |
5261 | if (!OptForSize) |
5262 | continue; |
5263 | } |
5264 | |
5265 | InlineAsm *IA = dyn_cast<InlineAsm>(Val: CI->getCalledOperand()); |
5266 | if (!IA) |
5267 | return true; |
5268 | |
5269 | // If this is a memory operand, we're cool, otherwise bail out. |
5270 | if (!IsOperandAMemoryOperand(CI, IA, OpVal: I, TLI, TRI)) |
5271 | return true; |
5272 | continue; |
5273 | } |
5274 | |
5275 | if (FindAllMemoryUses(I: UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, |
5276 | PSI, BFI, SeenInsts)) |
5277 | return true; |
5278 | } |
5279 | |
5280 | return false; |
5281 | } |
5282 | |
5283 | static bool FindAllMemoryUses( |
5284 | Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses, |
5285 | const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, |
5286 | ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { |
5287 | unsigned SeenInsts = 0; |
5288 | SmallPtrSet<Instruction *, 16> ConsideredInsts; |
5289 | return FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, |
5290 | PSI, BFI, SeenInsts); |
5291 | } |
5292 | |
5293 | |
5294 | /// Return true if Val is already known to be live at the use site that we're |
5295 | /// folding it into. If so, there is no cost to include it in the addressing |
5296 | /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the |
5297 | /// instruction already. |
5298 | bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val, |
5299 | Value *KnownLive1, |
5300 | Value *KnownLive2) { |
5301 | // If Val is either of the known-live values, we know it is live! |
5302 | if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) |
5303 | return true; |
5304 | |
5305 | // All values other than instructions and arguments (e.g. constants) are live. |
5306 | if (!isa<Instruction>(Val) && !isa<Argument>(Val)) |
5307 | return true; |
5308 | |
5309 | // If Val is a constant sized alloca in the entry block, it is live, this is |
5310 | // true because it is just a reference to the stack/frame pointer, which is |
5311 | // live for the whole function. |
5312 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) |
5313 | if (AI->isStaticAlloca()) |
5314 | return true; |
5315 | |
5316 | // Check to see if this value is already used in the memory instruction's |
5317 | // block. If so, it's already live into the block at the very least, so we |
5318 | // can reasonably fold it. |
5319 | return Val->isUsedInBasicBlock(BB: MemoryInst->getParent()); |
5320 | } |
5321 | |
5322 | /// It is possible for the addressing mode of the machine to fold the specified |
5323 | /// instruction into a load or store that ultimately uses it. |
5324 | /// However, the specified instruction has multiple uses. |
5325 | /// Given this, it may actually increase register pressure to fold it |
5326 | /// into the load. For example, consider this code: |
5327 | /// |
5328 | /// X = ... |
5329 | /// Y = X+1 |
5330 | /// use(Y) -> nonload/store |
5331 | /// Z = Y+1 |
5332 | /// load Z |
5333 | /// |
5334 | /// In this case, Y has multiple uses, and can be folded into the load of Z |
5335 | /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to |
5336 | /// be live at the use(Y) line. If we don't fold Y into load Z, we use one |
5337 | /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the |
5338 | /// number of computations either. |
5339 | /// |
5340 | /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If |
5341 | /// X was live across 'load Z' for other reasons, we actually *would* want to |
5342 | /// fold the addressing mode in the Z case. This would make Y die earlier. |
5343 | bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode( |
5344 | Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) { |
5345 | if (IgnoreProfitability) |
5346 | return true; |
5347 | |
5348 | // AMBefore is the addressing mode before this instruction was folded into it, |
5349 | // and AMAfter is the addressing mode after the instruction was folded. Get |
5350 | // the set of registers referenced by AMAfter and subtract out those |
5351 | // referenced by AMBefore: this is the set of values which folding in this |
5352 | // address extends the lifetime of. |
5353 | // |
5354 | // Note that there are only two potential values being referenced here, |
5355 | // BaseReg and ScaleReg (global addresses are always available, as are any |
5356 | // folded immediates). |
5357 | Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; |
5358 | |
5359 | // If the BaseReg or ScaledReg was referenced by the previous addrmode, their |
5360 | // lifetime wasn't extended by adding this instruction. |
5361 | if (valueAlreadyLiveAtInst(Val: BaseReg, KnownLive1: AMBefore.BaseReg, KnownLive2: AMBefore.ScaledReg)) |
5362 | BaseReg = nullptr; |
5363 | if (valueAlreadyLiveAtInst(Val: ScaledReg, KnownLive1: AMBefore.BaseReg, KnownLive2: AMBefore.ScaledReg)) |
5364 | ScaledReg = nullptr; |
5365 | |
5366 | // If folding this instruction (and it's subexprs) didn't extend any live |
5367 | // ranges, we're ok with it. |
5368 | if (!BaseReg && !ScaledReg) |
5369 | return true; |
5370 | |
5371 | // If all uses of this instruction can have the address mode sunk into them, |
5372 | // we can remove the addressing mode and effectively trade one live register |
5373 | // for another (at worst.) In this context, folding an addressing mode into |
5374 | // the use is just a particularly nice way of sinking it. |
5375 | SmallVector<std::pair<Use *, Type *>, 16> MemoryUses; |
5376 | if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI)) |
5377 | return false; // Has a non-memory, non-foldable use! |
5378 | |
5379 | // Now that we know that all uses of this instruction are part of a chain of |
5380 | // computation involving only operations that could theoretically be folded |
5381 | // into a memory use, loop over each of these memory operation uses and see |
5382 | // if they could *actually* fold the instruction. The assumption is that |
5383 | // addressing modes are cheap and that duplicating the computation involved |
5384 | // many times is worthwhile, even on a fastpath. For sinking candidates |
5385 | // (i.e. cold call sites), this serves as a way to prevent excessive code |
5386 | // growth since most architectures have some reasonable small and fast way to |
5387 | // compute an effective address. (i.e LEA on x86) |
5388 | SmallVector<Instruction *, 32> MatchedAddrModeInsts; |
5389 | for (const std::pair<Use *, Type *> &Pair : MemoryUses) { |
5390 | Value *Address = Pair.first->get(); |
5391 | Instruction *UserI = cast<Instruction>(Val: Pair.first->getUser()); |
5392 | Type *AddressAccessTy = Pair.second; |
5393 | unsigned AS = Address->getType()->getPointerAddressSpace(); |
5394 | |
5395 | // Do a match against the root of this address, ignoring profitability. This |
5396 | // will tell us if the addressing mode for the memory operation will |
5397 | // *actually* cover the shared instruction. |
5398 | ExtAddrMode Result; |
5399 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, |
5400 | 0); |
5401 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = |
5402 | TPT.getRestorationPoint(); |
5403 | AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn, |
5404 | AddressAccessTy, AS, UserI, Result, |
5405 | InsertedInsts, PromotedInsts, TPT, |
5406 | LargeOffsetGEP, OptSize, PSI, BFI); |
5407 | Matcher.IgnoreProfitability = true; |
5408 | bool Success = Matcher.matchAddr(Addr: Address, Depth: 0); |
5409 | (void)Success; |
5410 | assert(Success && "Couldn't select *anything*?" ); |
5411 | |
5412 | // The match was to check the profitability, the changes made are not |
5413 | // part of the original matcher. Therefore, they should be dropped |
5414 | // otherwise the original matcher will not present the right state. |
5415 | TPT.rollback(Point: LastKnownGood); |
5416 | |
5417 | // If the match didn't cover I, then it won't be shared by it. |
5418 | if (!is_contained(Range&: MatchedAddrModeInsts, Element: I)) |
5419 | return false; |
5420 | |
5421 | MatchedAddrModeInsts.clear(); |
5422 | } |
5423 | |
5424 | return true; |
5425 | } |
5426 | |
5427 | /// Return true if the specified values are defined in a |
5428 | /// different basic block than BB. |
5429 | static bool IsNonLocalValue(Value *V, BasicBlock *BB) { |
5430 | if (Instruction *I = dyn_cast<Instruction>(Val: V)) |
5431 | return I->getParent() != BB; |
5432 | return false; |
5433 | } |
5434 | |
5435 | /// Sink addressing mode computation immediate before MemoryInst if doing so |
5436 | /// can be done without increasing register pressure. The need for the |
5437 | /// register pressure constraint means this can end up being an all or nothing |
5438 | /// decision for all uses of the same addressing computation. |
5439 | /// |
5440 | /// Load and Store Instructions often have addressing modes that can do |
5441 | /// significant amounts of computation. As such, instruction selection will try |
5442 | /// to get the load or store to do as much computation as possible for the |
5443 | /// program. The problem is that isel can only see within a single block. As |
5444 | /// such, we sink as much legal addressing mode work into the block as possible. |
5445 | /// |
5446 | /// This method is used to optimize both load/store and inline asms with memory |
5447 | /// operands. It's also used to sink addressing computations feeding into cold |
5448 | /// call sites into their (cold) basic block. |
5449 | /// |
5450 | /// The motivation for handling sinking into cold blocks is that doing so can |
5451 | /// both enable other address mode sinking (by satisfying the register pressure |
5452 | /// constraint above), and reduce register pressure globally (by removing the |
5453 | /// addressing mode computation from the fast path entirely.). |
5454 | bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, |
5455 | Type *AccessTy, unsigned AddrSpace) { |
5456 | Value *Repl = Addr; |
5457 | |
5458 | // Try to collapse single-value PHI nodes. This is necessary to undo |
5459 | // unprofitable PRE transformations. |
5460 | SmallVector<Value *, 8> worklist; |
5461 | SmallPtrSet<Value *, 16> Visited; |
5462 | worklist.push_back(Elt: Addr); |
5463 | |
5464 | // Use a worklist to iteratively look through PHI and select nodes, and |
5465 | // ensure that the addressing mode obtained from the non-PHI/select roots of |
5466 | // the graph are compatible. |
5467 | bool PhiOrSelectSeen = false; |
5468 | SmallVector<Instruction *, 16> AddrModeInsts; |
5469 | const SimplifyQuery SQ(*DL, TLInfo); |
5470 | AddressingModeCombiner AddrModes(SQ, Addr); |
5471 | TypePromotionTransaction TPT(RemovedInsts); |
5472 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = |
5473 | TPT.getRestorationPoint(); |
5474 | while (!worklist.empty()) { |
5475 | Value *V = worklist.pop_back_val(); |
5476 | |
5477 | // We allow traversing cyclic Phi nodes. |
5478 | // In case of success after this loop we ensure that traversing through |
5479 | // Phi nodes ends up with all cases to compute address of the form |
5480 | // BaseGV + Base + Scale * Index + Offset |
5481 | // where Scale and Offset are constans and BaseGV, Base and Index |
5482 | // are exactly the same Values in all cases. |
5483 | // It means that BaseGV, Scale and Offset dominate our memory instruction |
5484 | // and have the same value as they had in address computation represented |
5485 | // as Phi. So we can safely sink address computation to memory instruction. |
5486 | if (!Visited.insert(Ptr: V).second) |
5487 | continue; |
5488 | |
5489 | // For a PHI node, push all of its incoming values. |
5490 | if (PHINode *P = dyn_cast<PHINode>(Val: V)) { |
5491 | append_range(C&: worklist, R: P->incoming_values()); |
5492 | PhiOrSelectSeen = true; |
5493 | continue; |
5494 | } |
5495 | // Similar for select. |
5496 | if (SelectInst *SI = dyn_cast<SelectInst>(Val: V)) { |
5497 | worklist.push_back(Elt: SI->getFalseValue()); |
5498 | worklist.push_back(Elt: SI->getTrueValue()); |
5499 | PhiOrSelectSeen = true; |
5500 | continue; |
5501 | } |
5502 | |
5503 | // For non-PHIs, determine the addressing mode being computed. Note that |
5504 | // the result may differ depending on what other uses our candidate |
5505 | // addressing instructions might have. |
5506 | AddrModeInsts.clear(); |
5507 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, |
5508 | 0); |
5509 | // Defer the query (and possible computation of) the dom tree to point of |
5510 | // actual use. It's expected that most address matches don't actually need |
5511 | // the domtree. |
5512 | auto getDTFn = [MemoryInst, this]() -> const DominatorTree & { |
5513 | Function *F = MemoryInst->getParent()->getParent(); |
5514 | return this->getDT(F&: *F); |
5515 | }; |
5516 | ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( |
5517 | V, AccessTy, AS: AddrSpace, MemoryInst, AddrModeInsts, TLI: *TLI, LI: *LI, getDTFn, |
5518 | TRI: *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, |
5519 | BFI: BFI.get()); |
5520 | |
5521 | GetElementPtrInst *GEP = LargeOffsetGEP.first; |
5522 | if (GEP && !NewGEPBases.count(V: GEP)) { |
5523 | // If splitting the underlying data structure can reduce the offset of a |
5524 | // GEP, collect the GEP. Skip the GEPs that are the new bases of |
5525 | // previously split data structures. |
5526 | LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(Elt: LargeOffsetGEP); |
5527 | LargeOffsetGEPID.insert(KV: std::make_pair(x&: GEP, y: LargeOffsetGEPID.size())); |
5528 | } |
5529 | |
5530 | NewAddrMode.OriginalValue = V; |
5531 | if (!AddrModes.addNewAddrMode(NewAddrMode)) |
5532 | break; |
5533 | } |
5534 | |
5535 | // Try to combine the AddrModes we've collected. If we couldn't collect any, |
5536 | // or we have multiple but either couldn't combine them or combining them |
5537 | // wouldn't do anything useful, bail out now. |
5538 | if (!AddrModes.combineAddrModes()) { |
5539 | TPT.rollback(Point: LastKnownGood); |
5540 | return false; |
5541 | } |
5542 | bool Modified = TPT.commit(); |
5543 | |
5544 | // Get the combined AddrMode (or the only AddrMode, if we only had one). |
5545 | ExtAddrMode AddrMode = AddrModes.getAddrMode(); |
5546 | |
5547 | // If all the instructions matched are already in this BB, don't do anything. |
5548 | // If we saw a Phi node then it is not local definitely, and if we saw a |
5549 | // select then we want to push the address calculation past it even if it's |
5550 | // already in this BB. |
5551 | if (!PhiOrSelectSeen && none_of(Range&: AddrModeInsts, P: [&](Value *V) { |
5552 | return IsNonLocalValue(V, BB: MemoryInst->getParent()); |
5553 | })) { |
5554 | LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode |
5555 | << "\n" ); |
5556 | return Modified; |
5557 | } |
5558 | |
5559 | // Insert this computation right after this user. Since our caller is |
5560 | // scanning from the top of the BB to the bottom, reuse of the expr are |
5561 | // guaranteed to happen later. |
5562 | IRBuilder<> Builder(MemoryInst); |
5563 | |
5564 | // Now that we determined the addressing expression we want to use and know |
5565 | // that we have to sink it into this block. Check to see if we have already |
5566 | // done this for some other load/store instr in this block. If so, reuse |
5567 | // the computation. Before attempting reuse, check if the address is valid |
5568 | // as it may have been erased. |
5569 | |
5570 | WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; |
5571 | |
5572 | Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; |
5573 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); |
5574 | if (SunkAddr) { |
5575 | LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode |
5576 | << " for " << *MemoryInst << "\n" ); |
5577 | if (SunkAddr->getType() != Addr->getType()) { |
5578 | if (SunkAddr->getType()->getPointerAddressSpace() != |
5579 | Addr->getType()->getPointerAddressSpace() && |
5580 | !DL->isNonIntegralPointerType(Ty: Addr->getType())) { |
5581 | // There are two reasons the address spaces might not match: a no-op |
5582 | // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a |
5583 | // ptrtoint/inttoptr pair to ensure we match the original semantics. |
5584 | // TODO: allow bitcast between different address space pointers with the |
5585 | // same size. |
5586 | SunkAddr = Builder.CreatePtrToInt(V: SunkAddr, DestTy: IntPtrTy, Name: "sunkaddr" ); |
5587 | SunkAddr = |
5588 | Builder.CreateIntToPtr(V: SunkAddr, DestTy: Addr->getType(), Name: "sunkaddr" ); |
5589 | } else |
5590 | SunkAddr = Builder.CreatePointerCast(V: SunkAddr, DestTy: Addr->getType()); |
5591 | } |
5592 | } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && |
5593 | SubtargetInfo->addrSinkUsingGEPs())) { |
5594 | // By default, we use the GEP-based method when AA is used later. This |
5595 | // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. |
5596 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode |
5597 | << " for " << *MemoryInst << "\n" ); |
5598 | Value *ResultPtr = nullptr, *ResultIndex = nullptr; |
5599 | |
5600 | // First, find the pointer. |
5601 | if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { |
5602 | ResultPtr = AddrMode.BaseReg; |
5603 | AddrMode.BaseReg = nullptr; |
5604 | } |
5605 | |
5606 | if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { |
5607 | // We can't add more than one pointer together, nor can we scale a |
5608 | // pointer (both of which seem meaningless). |
5609 | if (ResultPtr || AddrMode.Scale != 1) |
5610 | return Modified; |
5611 | |
5612 | ResultPtr = AddrMode.ScaledReg; |
5613 | AddrMode.Scale = 0; |
5614 | } |
5615 | |
5616 | // It is only safe to sign extend the BaseReg if we know that the math |
5617 | // required to create it did not overflow before we extend it. Since |
5618 | // the original IR value was tossed in favor of a constant back when |
5619 | // the AddrMode was created we need to bail out gracefully if widths |
5620 | // do not match instead of extending it. |
5621 | // |
5622 | // (See below for code to add the scale.) |
5623 | if (AddrMode.Scale) { |
5624 | Type *ScaledRegTy = AddrMode.ScaledReg->getType(); |
5625 | if (cast<IntegerType>(Val: IntPtrTy)->getBitWidth() > |
5626 | cast<IntegerType>(Val: ScaledRegTy)->getBitWidth()) |
5627 | return Modified; |
5628 | } |
5629 | |
5630 | GlobalValue *BaseGV = AddrMode.BaseGV; |
5631 | if (BaseGV != nullptr) { |
5632 | if (ResultPtr) |
5633 | return Modified; |
5634 | |
5635 | if (BaseGV->isThreadLocal()) { |
5636 | ResultPtr = Builder.CreateThreadLocalAddress(Ptr: BaseGV); |
5637 | } else { |
5638 | ResultPtr = BaseGV; |
5639 | } |
5640 | } |
5641 | |
5642 | // If the real base value actually came from an inttoptr, then the matcher |
5643 | // will look through it and provide only the integer value. In that case, |
5644 | // use it here. |
5645 | if (!DL->isNonIntegralPointerType(Ty: Addr->getType())) { |
5646 | if (!ResultPtr && AddrMode.BaseReg) { |
5647 | ResultPtr = Builder.CreateIntToPtr(V: AddrMode.BaseReg, DestTy: Addr->getType(), |
5648 | Name: "sunkaddr" ); |
5649 | AddrMode.BaseReg = nullptr; |
5650 | } else if (!ResultPtr && AddrMode.Scale == 1) { |
5651 | ResultPtr = Builder.CreateIntToPtr(V: AddrMode.ScaledReg, DestTy: Addr->getType(), |
5652 | Name: "sunkaddr" ); |
5653 | AddrMode.Scale = 0; |
5654 | } |
5655 | } |
5656 | |
5657 | if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale && |
5658 | !AddrMode.BaseOffs) { |
5659 | SunkAddr = Constant::getNullValue(Ty: Addr->getType()); |
5660 | } else if (!ResultPtr) { |
5661 | return Modified; |
5662 | } else { |
5663 | Type *I8PtrTy = |
5664 | Builder.getPtrTy(AddrSpace: Addr->getType()->getPointerAddressSpace()); |
5665 | |
5666 | // Start with the base register. Do this first so that subsequent address |
5667 | // matching finds it last, which will prevent it from trying to match it |
5668 | // as the scaled value in case it happens to be a mul. That would be |
5669 | // problematic if we've sunk a different mul for the scale, because then |
5670 | // we'd end up sinking both muls. |
5671 | if (AddrMode.BaseReg) { |
5672 | Value *V = AddrMode.BaseReg; |
5673 | if (V->getType() != IntPtrTy) |
5674 | V = Builder.CreateIntCast(V, DestTy: IntPtrTy, /*isSigned=*/true, Name: "sunkaddr" ); |
5675 | |
5676 | ResultIndex = V; |
5677 | } |
5678 | |
5679 | // Add the scale value. |
5680 | if (AddrMode.Scale) { |
5681 | Value *V = AddrMode.ScaledReg; |
5682 | if (V->getType() == IntPtrTy) { |
5683 | // done. |
5684 | } else { |
5685 | assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < |
5686 | cast<IntegerType>(V->getType())->getBitWidth() && |
5687 | "We can't transform if ScaledReg is too narrow" ); |
5688 | V = Builder.CreateTrunc(V, DestTy: IntPtrTy, Name: "sunkaddr" ); |
5689 | } |
5690 | |
5691 | if (AddrMode.Scale != 1) |
5692 | V = Builder.CreateMul(LHS: V, RHS: ConstantInt::get(Ty: IntPtrTy, V: AddrMode.Scale), |
5693 | Name: "sunkaddr" ); |
5694 | if (ResultIndex) |
5695 | ResultIndex = Builder.CreateAdd(LHS: ResultIndex, RHS: V, Name: "sunkaddr" ); |
5696 | else |
5697 | ResultIndex = V; |
5698 | } |
5699 | |
5700 | // Add in the Base Offset if present. |
5701 | if (AddrMode.BaseOffs) { |
5702 | Value *V = ConstantInt::get(Ty: IntPtrTy, V: AddrMode.BaseOffs); |
5703 | if (ResultIndex) { |
5704 | // We need to add this separately from the scale above to help with |
5705 | // SDAG consecutive load/store merging. |
5706 | if (ResultPtr->getType() != I8PtrTy) |
5707 | ResultPtr = Builder.CreatePointerCast(V: ResultPtr, DestTy: I8PtrTy); |
5708 | ResultPtr = Builder.CreatePtrAdd(Ptr: ResultPtr, Offset: ResultIndex, Name: "sunkaddr" , |
5709 | NW: AddrMode.InBounds); |
5710 | } |
5711 | |
5712 | ResultIndex = V; |
5713 | } |
5714 | |
5715 | if (!ResultIndex) { |
5716 | SunkAddr = ResultPtr; |
5717 | } else { |
5718 | if (ResultPtr->getType() != I8PtrTy) |
5719 | ResultPtr = Builder.CreatePointerCast(V: ResultPtr, DestTy: I8PtrTy); |
5720 | SunkAddr = Builder.CreatePtrAdd(Ptr: ResultPtr, Offset: ResultIndex, Name: "sunkaddr" , |
5721 | NW: AddrMode.InBounds); |
5722 | } |
5723 | |
5724 | if (SunkAddr->getType() != Addr->getType()) { |
5725 | if (SunkAddr->getType()->getPointerAddressSpace() != |
5726 | Addr->getType()->getPointerAddressSpace() && |
5727 | !DL->isNonIntegralPointerType(Ty: Addr->getType())) { |
5728 | // There are two reasons the address spaces might not match: a no-op |
5729 | // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a |
5730 | // ptrtoint/inttoptr pair to ensure we match the original semantics. |
5731 | // TODO: allow bitcast between different address space pointers with |
5732 | // the same size. |
5733 | SunkAddr = Builder.CreatePtrToInt(V: SunkAddr, DestTy: IntPtrTy, Name: "sunkaddr" ); |
5734 | SunkAddr = |
5735 | Builder.CreateIntToPtr(V: SunkAddr, DestTy: Addr->getType(), Name: "sunkaddr" ); |
5736 | } else |
5737 | SunkAddr = Builder.CreatePointerCast(V: SunkAddr, DestTy: Addr->getType()); |
5738 | } |
5739 | } |
5740 | } else { |
5741 | // We'd require a ptrtoint/inttoptr down the line, which we can't do for |
5742 | // non-integral pointers, so in that case bail out now. |
5743 | Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; |
5744 | Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; |
5745 | PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(Val: BaseTy); |
5746 | PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(Val: ScaleTy); |
5747 | if (DL->isNonIntegralPointerType(Ty: Addr->getType()) || |
5748 | (BasePtrTy && DL->isNonIntegralPointerType(PT: BasePtrTy)) || |
5749 | (ScalePtrTy && DL->isNonIntegralPointerType(PT: ScalePtrTy)) || |
5750 | (AddrMode.BaseGV && |
5751 | DL->isNonIntegralPointerType(PT: AddrMode.BaseGV->getType()))) |
5752 | return Modified; |
5753 | |
5754 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode |
5755 | << " for " << *MemoryInst << "\n" ); |
5756 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); |
5757 | Value *Result = nullptr; |
5758 | |
5759 | // Start with the base register. Do this first so that subsequent address |
5760 | // matching finds it last, which will prevent it from trying to match it |
5761 | // as the scaled value in case it happens to be a mul. That would be |
5762 | // problematic if we've sunk a different mul for the scale, because then |
5763 | // we'd end up sinking both muls. |
5764 | if (AddrMode.BaseReg) { |
5765 | Value *V = AddrMode.BaseReg; |
5766 | if (V->getType()->isPointerTy()) |
5767 | V = Builder.CreatePtrToInt(V, DestTy: IntPtrTy, Name: "sunkaddr" ); |
5768 | if (V->getType() != IntPtrTy) |
5769 | V = Builder.CreateIntCast(V, DestTy: IntPtrTy, /*isSigned=*/true, Name: "sunkaddr" ); |
5770 | Result = V; |
5771 | } |
5772 | |
5773 | // Add the scale value. |
5774 | if (AddrMode.Scale) { |
5775 | Value *V = AddrMode.ScaledReg; |
5776 | if (V->getType() == IntPtrTy) { |
5777 | // done. |
5778 | } else if (V->getType()->isPointerTy()) { |
5779 | V = Builder.CreatePtrToInt(V, DestTy: IntPtrTy, Name: "sunkaddr" ); |
5780 | } else if (cast<IntegerType>(Val: IntPtrTy)->getBitWidth() < |
5781 | cast<IntegerType>(Val: V->getType())->getBitWidth()) { |
5782 | V = Builder.CreateTrunc(V, DestTy: IntPtrTy, Name: "sunkaddr" ); |
5783 | } else { |
5784 | // It is only safe to sign extend the BaseReg if we know that the math |
5785 | // required to create it did not overflow before we extend it. Since |
5786 | // the original IR value was tossed in favor of a constant back when |
5787 | // the AddrMode was created we need to bail out gracefully if widths |
5788 | // do not match instead of extending it. |
5789 | Instruction *I = dyn_cast_or_null<Instruction>(Val: Result); |
5790 | if (I && (Result != AddrMode.BaseReg)) |
5791 | I->eraseFromParent(); |
5792 | return Modified; |
5793 | } |
5794 | if (AddrMode.Scale != 1) |
5795 | V = Builder.CreateMul(LHS: V, RHS: ConstantInt::get(Ty: IntPtrTy, V: AddrMode.Scale), |
5796 | Name: "sunkaddr" ); |
5797 | if (Result) |
5798 | Result = Builder.CreateAdd(LHS: Result, RHS: V, Name: "sunkaddr" ); |
5799 | else |
5800 | Result = V; |
5801 | } |
5802 | |
5803 | // Add in the BaseGV if present. |
5804 | GlobalValue *BaseGV = AddrMode.BaseGV; |
5805 | if (BaseGV != nullptr) { |
5806 | Value *BaseGVPtr; |
5807 | if (BaseGV->isThreadLocal()) { |
5808 | BaseGVPtr = Builder.CreateThreadLocalAddress(Ptr: BaseGV); |
5809 | } else { |
5810 | BaseGVPtr = BaseGV; |
5811 | } |
5812 | Value *V = Builder.CreatePtrToInt(V: BaseGVPtr, DestTy: IntPtrTy, Name: "sunkaddr" ); |
5813 | if (Result) |
5814 | Result = Builder.CreateAdd(LHS: Result, RHS: V, Name: "sunkaddr" ); |
5815 | else |
5816 | Result = V; |
5817 | } |
5818 | |
5819 | // Add in the Base Offset if present. |
5820 | if (AddrMode.BaseOffs) { |
5821 | Value *V = ConstantInt::get(Ty: IntPtrTy, V: AddrMode.BaseOffs); |
5822 | if (Result) |
5823 | Result = Builder.CreateAdd(LHS: Result, RHS: V, Name: "sunkaddr" ); |
5824 | else |
5825 | Result = V; |
5826 | } |
5827 | |
5828 | if (!Result) |
5829 | SunkAddr = Constant::getNullValue(Ty: Addr->getType()); |
5830 | else |
5831 | SunkAddr = Builder.CreateIntToPtr(V: Result, DestTy: Addr->getType(), Name: "sunkaddr" ); |
5832 | } |
5833 | |
5834 | MemoryInst->replaceUsesOfWith(From: Repl, To: SunkAddr); |
5835 | // Store the newly computed address into the cache. In the case we reused a |
5836 | // value, this should be idempotent. |
5837 | SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); |
5838 | |
5839 | // If we have no uses, recursively delete the value and all dead instructions |
5840 | // using it. |
5841 | if (Repl->use_empty()) { |
5842 | resetIteratorIfInvalidatedWhileCalling(BB: CurInstIterator->getParent(), f: [&]() { |
5843 | RecursivelyDeleteTriviallyDeadInstructions( |
5844 | V: Repl, TLI: TLInfo, MSSAU: nullptr, |
5845 | AboutToDeleteCallback: [&](Value *V) { removeAllAssertingVHReferences(V); }); |
5846 | }); |
5847 | } |
5848 | ++NumMemoryInsts; |
5849 | return true; |
5850 | } |
5851 | |
5852 | /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find |
5853 | /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can |
5854 | /// only handle a 2 operand GEP in the same basic block or a splat constant |
5855 | /// vector. The 2 operands to the GEP must have a scalar pointer and a vector |
5856 | /// index. |
5857 | /// |
5858 | /// If the existing GEP has a vector base pointer that is splat, we can look |
5859 | /// through the splat to find the scalar pointer. If we can't find a scalar |
5860 | /// pointer there's nothing we can do. |
5861 | /// |
5862 | /// If we have a GEP with more than 2 indices where the middle indices are all |
5863 | /// zeroes, we can replace it with 2 GEPs where the second has 2 operands. |
5864 | /// |
5865 | /// If the final index isn't a vector or is a splat, we can emit a scalar GEP |
5866 | /// followed by a GEP with an all zeroes vector index. This will enable |
5867 | /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a |
5868 | /// zero index. |
5869 | bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst, |
5870 | Value *Ptr) { |
5871 | Value *NewAddr; |
5872 | |
5873 | if (const auto *GEP = dyn_cast<GetElementPtrInst>(Val: Ptr)) { |
5874 | // Don't optimize GEPs that don't have indices. |
5875 | if (!GEP->hasIndices()) |
5876 | return false; |
5877 | |
5878 | // If the GEP and the gather/scatter aren't in the same BB, don't optimize. |
5879 | // FIXME: We should support this by sinking the GEP. |
5880 | if (MemoryInst->getParent() != GEP->getParent()) |
5881 | return false; |
5882 | |
5883 | SmallVector<Value *, 2> Ops(GEP->operands()); |
5884 | |
5885 | bool RewriteGEP = false; |
5886 | |
5887 | if (Ops[0]->getType()->isVectorTy()) { |
5888 | Ops[0] = getSplatValue(V: Ops[0]); |
5889 | if (!Ops[0]) |
5890 | return false; |
5891 | RewriteGEP = true; |
5892 | } |
5893 | |
5894 | unsigned FinalIndex = Ops.size() - 1; |
5895 | |
5896 | // Ensure all but the last index is 0. |
5897 | // FIXME: This isn't strictly required. All that's required is that they are |
5898 | // all scalars or splats. |
5899 | for (unsigned i = 1; i < FinalIndex; ++i) { |
5900 | auto *C = dyn_cast<Constant>(Val: Ops[i]); |
5901 | if (!C) |
5902 | return false; |
5903 | if (isa<VectorType>(Val: C->getType())) |
5904 | C = C->getSplatValue(); |
5905 | auto *CI = dyn_cast_or_null<ConstantInt>(Val: C); |
5906 | if (!CI || !CI->isZero()) |
5907 | return false; |
5908 | // Scalarize the index if needed. |
5909 | Ops[i] = CI; |
5910 | } |
5911 | |
5912 | // Try to scalarize the final index. |
5913 | if (Ops[FinalIndex]->getType()->isVectorTy()) { |
5914 | if (Value *V = getSplatValue(V: Ops[FinalIndex])) { |
5915 | auto *C = dyn_cast<ConstantInt>(Val: V); |
5916 | // Don't scalarize all zeros vector. |
5917 | if (!C || !C->isZero()) { |
5918 | Ops[FinalIndex] = V; |
5919 | RewriteGEP = true; |
5920 | } |
5921 | } |
5922 | } |
5923 | |
5924 | // If we made any changes or the we have extra operands, we need to generate |
5925 | // new instructions. |
5926 | if (!RewriteGEP && Ops.size() == 2) |
5927 | return false; |
5928 | |
5929 | auto NumElts = cast<VectorType>(Val: Ptr->getType())->getElementCount(); |
5930 | |
5931 | IRBuilder<> Builder(MemoryInst); |
5932 | |
5933 | Type *SourceTy = GEP->getSourceElementType(); |
5934 | Type *ScalarIndexTy = DL->getIndexType(PtrTy: Ops[0]->getType()->getScalarType()); |
5935 | |
5936 | // If the final index isn't a vector, emit a scalar GEP containing all ops |
5937 | // and a vector GEP with all zeroes final index. |
5938 | if (!Ops[FinalIndex]->getType()->isVectorTy()) { |
5939 | NewAddr = Builder.CreateGEP(Ty: SourceTy, Ptr: Ops[0], IdxList: ArrayRef(Ops).drop_front()); |
5940 | auto *IndexTy = VectorType::get(ElementType: ScalarIndexTy, EC: NumElts); |
5941 | auto *SecondTy = GetElementPtrInst::getIndexedType( |
5942 | Ty: SourceTy, IdxList: ArrayRef(Ops).drop_front()); |
5943 | NewAddr = |
5944 | Builder.CreateGEP(Ty: SecondTy, Ptr: NewAddr, IdxList: Constant::getNullValue(Ty: IndexTy)); |
5945 | } else { |
5946 | Value *Base = Ops[0]; |
5947 | Value *Index = Ops[FinalIndex]; |
5948 | |
5949 | // Create a scalar GEP if there are more than 2 operands. |
5950 | if (Ops.size() != 2) { |
5951 | // Replace the last index with 0. |
5952 | Ops[FinalIndex] = |
5953 | Constant::getNullValue(Ty: Ops[FinalIndex]->getType()->getScalarType()); |
5954 | Base = Builder.CreateGEP(Ty: SourceTy, Ptr: Base, IdxList: ArrayRef(Ops).drop_front()); |
5955 | SourceTy = GetElementPtrInst::getIndexedType( |
5956 | Ty: SourceTy, IdxList: ArrayRef(Ops).drop_front()); |
5957 | } |
5958 | |
5959 | // Now create the GEP with scalar pointer and vector index. |
5960 | NewAddr = Builder.CreateGEP(Ty: SourceTy, Ptr: Base, IdxList: Index); |
5961 | } |
5962 | } else if (!isa<Constant>(Val: Ptr)) { |
5963 | // Not a GEP, maybe its a splat and we can create a GEP to enable |
5964 | // SelectionDAGBuilder to use it as a uniform base. |
5965 | Value *V = getSplatValue(V: Ptr); |
5966 | if (!V) |
5967 | return false; |
5968 | |
5969 | auto NumElts = cast<VectorType>(Val: Ptr->getType())->getElementCount(); |
5970 | |
5971 | IRBuilder<> Builder(MemoryInst); |
5972 | |
5973 | // Emit a vector GEP with a scalar pointer and all 0s vector index. |
5974 | Type *ScalarIndexTy = DL->getIndexType(PtrTy: V->getType()->getScalarType()); |
5975 | auto *IndexTy = VectorType::get(ElementType: ScalarIndexTy, EC: NumElts); |
5976 | Type *ScalarTy; |
5977 | if (cast<IntrinsicInst>(Val: MemoryInst)->getIntrinsicID() == |
5978 | Intrinsic::masked_gather) { |
5979 | ScalarTy = MemoryInst->getType()->getScalarType(); |
5980 | } else { |
5981 | assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == |
5982 | Intrinsic::masked_scatter); |
5983 | ScalarTy = MemoryInst->getOperand(i: 0)->getType()->getScalarType(); |
5984 | } |
5985 | NewAddr = Builder.CreateGEP(Ty: ScalarTy, Ptr: V, IdxList: Constant::getNullValue(Ty: IndexTy)); |
5986 | } else { |
5987 | // Constant, SelectionDAGBuilder knows to check if its a splat. |
5988 | return false; |
5989 | } |
5990 | |
5991 | MemoryInst->replaceUsesOfWith(From: Ptr, To: NewAddr); |
5992 | |
5993 | // If we have no uses, recursively delete the value and all dead instructions |
5994 | // using it. |
5995 | if (Ptr->use_empty()) |
5996 | RecursivelyDeleteTriviallyDeadInstructions( |
5997 | V: Ptr, TLI: TLInfo, MSSAU: nullptr, |
5998 | AboutToDeleteCallback: [&](Value *V) { removeAllAssertingVHReferences(V); }); |
5999 | |
6000 | return true; |
6001 | } |
6002 | |
6003 | /// If there are any memory operands, use OptimizeMemoryInst to sink their |
6004 | /// address computing into the block when possible / profitable. |
6005 | bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { |
6006 | bool MadeChange = false; |
6007 | |
6008 | const TargetRegisterInfo *TRI = |
6009 | TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); |
6010 | TargetLowering::AsmOperandInfoVector TargetConstraints = |
6011 | TLI->ParseConstraints(DL: *DL, TRI, Call: *CS); |
6012 | unsigned ArgNo = 0; |
6013 | for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) { |
6014 | // Compute the constraint code and ConstraintType to use. |
6015 | TLI->ComputeConstraintToUse(OpInfo, Op: SDValue()); |
6016 | |
6017 | // TODO: Also handle C_Address? |
6018 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && |
6019 | OpInfo.isIndirect) { |
6020 | Value *OpVal = CS->getArgOperand(i: ArgNo++); |
6021 | MadeChange |= optimizeMemoryInst(MemoryInst: CS, Addr: OpVal, AccessTy: OpVal->getType(), AddrSpace: ~0u); |
6022 | } else if (OpInfo.Type == InlineAsm::isInput) |
6023 | ArgNo++; |
6024 | } |
6025 | |
6026 | return MadeChange; |
6027 | } |
6028 | |
6029 | /// Check if all the uses of \p Val are equivalent (or free) zero or |
6030 | /// sign extensions. |
6031 | static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { |
6032 | assert(!Val->use_empty() && "Input must have at least one use" ); |
6033 | const Instruction *FirstUser = cast<Instruction>(Val: *Val->user_begin()); |
6034 | bool IsSExt = isa<SExtInst>(Val: FirstUser); |
6035 | Type *ExtTy = FirstUser->getType(); |
6036 | for (const User *U : Val->users()) { |
6037 | const Instruction *UI = cast<Instruction>(Val: U); |
6038 | if ((IsSExt && !isa<SExtInst>(Val: UI)) || (!IsSExt && !isa<ZExtInst>(Val: UI))) |
6039 | return false; |
6040 | Type *CurTy = UI->getType(); |
6041 | // Same input and output types: Same instruction after CSE. |
6042 | if (CurTy == ExtTy) |
6043 | continue; |
6044 | |
6045 | // If IsSExt is true, we are in this situation: |
6046 | // a = Val |
6047 | // b = sext ty1 a to ty2 |
6048 | // c = sext ty1 a to ty3 |
6049 | // Assuming ty2 is shorter than ty3, this could be turned into: |
6050 | // a = Val |
6051 | // b = sext ty1 a to ty2 |
6052 | // c = sext ty2 b to ty3 |
6053 | // However, the last sext is not free. |
6054 | if (IsSExt) |
6055 | return false; |
6056 | |
6057 | // This is a ZExt, maybe this is free to extend from one type to another. |
6058 | // In that case, we would not account for a different use. |
6059 | Type *NarrowTy; |
6060 | Type *LargeTy; |
6061 | if (ExtTy->getScalarType()->getIntegerBitWidth() > |
6062 | CurTy->getScalarType()->getIntegerBitWidth()) { |
6063 | NarrowTy = CurTy; |
6064 | LargeTy = ExtTy; |
6065 | } else { |
6066 | NarrowTy = ExtTy; |
6067 | LargeTy = CurTy; |
6068 | } |
6069 | |
6070 | if (!TLI.isZExtFree(FromTy: NarrowTy, ToTy: LargeTy)) |
6071 | return false; |
6072 | } |
6073 | // All uses are the same or can be derived from one another for free. |
6074 | return true; |
6075 | } |
6076 | |
6077 | /// Try to speculatively promote extensions in \p Exts and continue |
6078 | /// promoting through newly promoted operands recursively as far as doing so is |
6079 | /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. |
6080 | /// When some promotion happened, \p TPT contains the proper state to revert |
6081 | /// them. |
6082 | /// |
6083 | /// \return true if some promotion happened, false otherwise. |
6084 | bool CodeGenPrepare::tryToPromoteExts( |
6085 | TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, |
6086 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, |
6087 | unsigned CreatedInstsCost) { |
6088 | bool Promoted = false; |
6089 | |
6090 | // Iterate over all the extensions to try to promote them. |
6091 | for (auto *I : Exts) { |
6092 | // Early check if we directly have ext(load). |
6093 | if (isa<LoadInst>(Val: I->getOperand(i: 0))) { |
6094 | ProfitablyMovedExts.push_back(Elt: I); |
6095 | continue; |
6096 | } |
6097 | |
6098 | // Check whether or not we want to do any promotion. The reason we have |
6099 | // this check inside the for loop is to catch the case where an extension |
6100 | // is directly fed by a load because in such case the extension can be moved |
6101 | // up without any promotion on its operands. |
6102 | if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion) |
6103 | return false; |
6104 | |
6105 | // Get the action to perform the promotion. |
6106 | TypePromotionHelper::Action TPH = |
6107 | TypePromotionHelper::getAction(Ext: I, InsertedInsts, TLI: *TLI, PromotedInsts); |
6108 | // Check if we can promote. |
6109 | if (!TPH) { |
6110 | // Save the current extension as we cannot move up through its operand. |
6111 | ProfitablyMovedExts.push_back(Elt: I); |
6112 | continue; |
6113 | } |
6114 | |
6115 | // Save the current state. |
6116 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = |
6117 | TPT.getRestorationPoint(); |
6118 | SmallVector<Instruction *, 4> NewExts; |
6119 | unsigned NewCreatedInstsCost = 0; |
6120 | unsigned ExtCost = !TLI->isExtFree(I); |
6121 | // Promote. |
6122 | Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, |
6123 | &NewExts, nullptr, *TLI); |
6124 | assert(PromotedVal && |
6125 | "TypePromotionHelper should have filtered out those cases" ); |
6126 | |
6127 | // We would be able to merge only one extension in a load. |
6128 | // Therefore, if we have more than 1 new extension we heuristically |
6129 | // cut this search path, because it means we degrade the code quality. |
6130 | // With exactly 2, the transformation is neutral, because we will merge |
6131 | // one extension but leave one. However, we optimistically keep going, |
6132 | // because the new extension may be removed too. Also avoid replacing a |
6133 | // single free extension with multiple extensions, as this increases the |
6134 | // number of IR instructions while not providing any savings. |
6135 | long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; |
6136 | // FIXME: It would be possible to propagate a negative value instead of |
6137 | // conservatively ceiling it to 0. |
6138 | TotalCreatedInstsCost = |
6139 | std::max(a: (long long)0, b: (TotalCreatedInstsCost - ExtCost)); |
6140 | if (!StressExtLdPromotion && |
6141 | (TotalCreatedInstsCost > 1 || |
6142 | !isPromotedInstructionLegal(TLI: *TLI, DL: *DL, Val: PromotedVal) || |
6143 | (ExtCost == 0 && NewExts.size() > 1))) { |
6144 | // This promotion is not profitable, rollback to the previous state, and |
6145 | // save the current extension in ProfitablyMovedExts as the latest |
6146 | // speculative promotion turned out to be unprofitable. |
6147 | TPT.rollback(Point: LastKnownGood); |
6148 | ProfitablyMovedExts.push_back(Elt: I); |
6149 | continue; |
6150 | } |
6151 | // Continue promoting NewExts as far as doing so is profitable. |
6152 | SmallVector<Instruction *, 2> NewlyMovedExts; |
6153 | (void)tryToPromoteExts(TPT, Exts: NewExts, ProfitablyMovedExts&: NewlyMovedExts, CreatedInstsCost: TotalCreatedInstsCost); |
6154 | bool NewPromoted = false; |
6155 | for (auto *ExtInst : NewlyMovedExts) { |
6156 | Instruction *MovedExt = cast<Instruction>(Val: ExtInst); |
6157 | Value *ExtOperand = MovedExt->getOperand(i: 0); |
6158 | // If we have reached to a load, we need this extra profitability check |
6159 | // as it could potentially be merged into an ext(load). |
6160 | if (isa<LoadInst>(Val: ExtOperand) && |
6161 | !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || |
6162 | (ExtOperand->hasOneUse() || hasSameExtUse(Val: ExtOperand, TLI: *TLI)))) |
6163 | continue; |
6164 | |
6165 | ProfitablyMovedExts.push_back(Elt: MovedExt); |
6166 | NewPromoted = true; |
6167 | } |
6168 | |
6169 | // If none of speculative promotions for NewExts is profitable, rollback |
6170 | // and save the current extension (I) as the last profitable extension. |
6171 | if (!NewPromoted) { |
6172 | TPT.rollback(Point: LastKnownGood); |
6173 | ProfitablyMovedExts.push_back(Elt: I); |
6174 | continue; |
6175 | } |
6176 | // The promotion is profitable. |
6177 | Promoted = true; |
6178 | } |
6179 | return Promoted; |
6180 | } |
6181 | |
6182 | /// Merging redundant sexts when one is dominating the other. |
6183 | bool CodeGenPrepare::mergeSExts(Function &F) { |
6184 | bool Changed = false; |
6185 | for (auto &Entry : ValToSExtendedUses) { |
6186 | SExts &Insts = Entry.second; |
6187 | SExts CurPts; |
6188 | for (Instruction *Inst : Insts) { |
6189 | if (RemovedInsts.count(Ptr: Inst) || !isa<SExtInst>(Val: Inst) || |
6190 | Inst->getOperand(i: 0) != Entry.first) |
6191 | continue; |
6192 | bool inserted = false; |
6193 | for (auto &Pt : CurPts) { |
6194 | if (getDT(F).dominates(Def: Inst, User: Pt)) { |
6195 | replaceAllUsesWith(Old: Pt, New: Inst, FreshBBs, IsHuge: IsHugeFunc); |
6196 | RemovedInsts.insert(Ptr: Pt); |
6197 | Pt->removeFromParent(); |
6198 | Pt = Inst; |
6199 | inserted = true; |
6200 | Changed = true; |
6201 | break; |
6202 | } |
6203 | if (!getDT(F).dominates(Def: Pt, User: Inst)) |
6204 | // Give up if we need to merge in a common dominator as the |
6205 | // experiments show it is not profitable. |
6206 | continue; |
6207 | replaceAllUsesWith(Old: Inst, New: Pt, FreshBBs, IsHuge: IsHugeFunc); |
6208 | RemovedInsts.insert(Ptr: Inst); |
6209 | Inst->removeFromParent(); |
6210 | inserted = true; |
6211 | Changed = true; |
6212 | break; |
6213 | } |
6214 | if (!inserted) |
6215 | CurPts.push_back(Elt: Inst); |
6216 | } |
6217 | } |
6218 | return Changed; |
6219 | } |
6220 | |
6221 | // Splitting large data structures so that the GEPs accessing them can have |
6222 | // smaller offsets so that they can be sunk to the same blocks as their users. |
6223 | // For example, a large struct starting from %base is split into two parts |
6224 | // where the second part starts from %new_base. |
6225 | // |
6226 | // Before: |
6227 | // BB0: |
6228 | // %base = |
6229 | // |
6230 | // BB1: |
6231 | // %gep0 = gep %base, off0 |
6232 | // %gep1 = gep %base, off1 |
6233 | // %gep2 = gep %base, off2 |
6234 | // |
6235 | // BB2: |
6236 | // %load1 = load %gep0 |
6237 | // %load2 = load %gep1 |
6238 | // %load3 = load %gep2 |
6239 | // |
6240 | // After: |
6241 | // BB0: |
6242 | // %base = |
6243 | // %new_base = gep %base, off0 |
6244 | // |
6245 | // BB1: |
6246 | // %new_gep0 = %new_base |
6247 | // %new_gep1 = gep %new_base, off1 - off0 |
6248 | // %new_gep2 = gep %new_base, off2 - off0 |
6249 | // |
6250 | // BB2: |
6251 | // %load1 = load i32, i32* %new_gep0 |
6252 | // %load2 = load i32, i32* %new_gep1 |
6253 | // %load3 = load i32, i32* %new_gep2 |
6254 | // |
6255 | // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because |
6256 | // their offsets are smaller enough to fit into the addressing mode. |
6257 | bool CodeGenPrepare::splitLargeGEPOffsets() { |
6258 | bool Changed = false; |
6259 | for (auto &Entry : LargeOffsetGEPMap) { |
6260 | Value *OldBase = Entry.first; |
6261 | SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> |
6262 | &LargeOffsetGEPs = Entry.second; |
6263 | auto compareGEPOffset = |
6264 | [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, |
6265 | const std::pair<GetElementPtrInst *, int64_t> &RHS) { |
6266 | if (LHS.first == RHS.first) |
6267 | return false; |
6268 | if (LHS.second != RHS.second) |
6269 | return LHS.second < RHS.second; |
6270 | return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; |
6271 | }; |
6272 | // Sorting all the GEPs of the same data structures based on the offsets. |
6273 | llvm::sort(C&: LargeOffsetGEPs, Comp: compareGEPOffset); |
6274 | LargeOffsetGEPs.erase(CS: llvm::unique(R&: LargeOffsetGEPs), CE: LargeOffsetGEPs.end()); |
6275 | // Skip if all the GEPs have the same offsets. |
6276 | if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) |
6277 | continue; |
6278 | GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; |
6279 | int64_t BaseOffset = LargeOffsetGEPs.begin()->second; |
6280 | Value *NewBaseGEP = nullptr; |
6281 | |
6282 | auto createNewBase = [&](int64_t BaseOffset, Value *OldBase, |
6283 | GetElementPtrInst *GEP) { |
6284 | LLVMContext &Ctx = GEP->getContext(); |
6285 | Type *PtrIdxTy = DL->getIndexType(PtrTy: GEP->getType()); |
6286 | Type *I8PtrTy = |
6287 | PointerType::get(C&: Ctx, AddressSpace: GEP->getType()->getPointerAddressSpace()); |
6288 | |
6289 | BasicBlock::iterator NewBaseInsertPt; |
6290 | BasicBlock *NewBaseInsertBB; |
6291 | if (auto *BaseI = dyn_cast<Instruction>(Val: OldBase)) { |
6292 | // If the base of the struct is an instruction, the new base will be |
6293 | // inserted close to it. |
6294 | NewBaseInsertBB = BaseI->getParent(); |
6295 | if (isa<PHINode>(Val: BaseI)) |
6296 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); |
6297 | else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Val: BaseI)) { |
6298 | NewBaseInsertBB = |
6299 | SplitEdge(From: NewBaseInsertBB, To: Invoke->getNormalDest(), DT: DT.get(), LI); |
6300 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); |
6301 | } else |
6302 | NewBaseInsertPt = std::next(x: BaseI->getIterator()); |
6303 | } else { |
6304 | // If the current base is an argument or global value, the new base |
6305 | // will be inserted to the entry block. |
6306 | NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); |
6307 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); |
6308 | } |
6309 | IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); |
6310 | // Create a new base. |
6311 | Value *BaseIndex = ConstantInt::get(Ty: PtrIdxTy, V: BaseOffset); |
6312 | NewBaseGEP = OldBase; |
6313 | if (NewBaseGEP->getType() != I8PtrTy) |
6314 | NewBaseGEP = NewBaseBuilder.CreatePointerCast(V: NewBaseGEP, DestTy: I8PtrTy); |
6315 | NewBaseGEP = |
6316 | NewBaseBuilder.CreatePtrAdd(Ptr: NewBaseGEP, Offset: BaseIndex, Name: "splitgep" ); |
6317 | NewGEPBases.insert(V: NewBaseGEP); |
6318 | return; |
6319 | }; |
6320 | |
6321 | // Check whether all the offsets can be encoded with prefered common base. |
6322 | if (int64_t PreferBase = TLI->getPreferredLargeGEPBaseOffset( |
6323 | MinOffset: LargeOffsetGEPs.front().second, MaxOffset: LargeOffsetGEPs.back().second)) { |
6324 | BaseOffset = PreferBase; |
6325 | // Create a new base if the offset of the BaseGEP can be decoded with one |
6326 | // instruction. |
6327 | createNewBase(BaseOffset, OldBase, BaseGEP); |
6328 | } |
6329 | |
6330 | auto *LargeOffsetGEP = LargeOffsetGEPs.begin(); |
6331 | while (LargeOffsetGEP != LargeOffsetGEPs.end()) { |
6332 | GetElementPtrInst *GEP = LargeOffsetGEP->first; |
6333 | int64_t Offset = LargeOffsetGEP->second; |
6334 | if (Offset != BaseOffset) { |
6335 | TargetLowering::AddrMode AddrMode; |
6336 | AddrMode.HasBaseReg = true; |
6337 | AddrMode.BaseOffs = Offset - BaseOffset; |
6338 | // The result type of the GEP might not be the type of the memory |
6339 | // access. |
6340 | if (!TLI->isLegalAddressingMode(DL: *DL, AM: AddrMode, |
6341 | Ty: GEP->getResultElementType(), |
6342 | AddrSpace: GEP->getAddressSpace())) { |
6343 | // We need to create a new base if the offset to the current base is |
6344 | // too large to fit into the addressing mode. So, a very large struct |
6345 | // may be split into several parts. |
6346 | BaseGEP = GEP; |
6347 | BaseOffset = Offset; |
6348 | NewBaseGEP = nullptr; |
6349 | } |
6350 | } |
6351 | |
6352 | // Generate a new GEP to replace the current one. |
6353 | Type *PtrIdxTy = DL->getIndexType(PtrTy: GEP->getType()); |
6354 | |
6355 | if (!NewBaseGEP) { |
6356 | // Create a new base if we don't have one yet. Find the insertion |
6357 | // pointer for the new base first. |
6358 | createNewBase(BaseOffset, OldBase, GEP); |
6359 | } |
6360 | |
6361 | IRBuilder<> Builder(GEP); |
6362 | Value *NewGEP = NewBaseGEP; |
6363 | if (Offset != BaseOffset) { |
6364 | // Calculate the new offset for the new GEP. |
6365 | Value *Index = ConstantInt::get(Ty: PtrIdxTy, V: Offset - BaseOffset); |
6366 | NewGEP = Builder.CreatePtrAdd(Ptr: NewBaseGEP, Offset: Index); |
6367 | } |
6368 | replaceAllUsesWith(Old: GEP, New: NewGEP, FreshBBs, IsHuge: IsHugeFunc); |
6369 | LargeOffsetGEPID.erase(Val: GEP); |
6370 | LargeOffsetGEP = LargeOffsetGEPs.erase(CI: LargeOffsetGEP); |
6371 | GEP->eraseFromParent(); |
6372 | Changed = true; |
6373 | } |
6374 | } |
6375 | return Changed; |
6376 | } |
6377 | |
6378 | bool CodeGenPrepare::optimizePhiType( |
6379 | PHINode *I, SmallPtrSetImpl<PHINode *> &Visited, |
6380 | SmallPtrSetImpl<Instruction *> &DeletedInstrs) { |
6381 | // We are looking for a collection on interconnected phi nodes that together |
6382 | // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts |
6383 | // are of the same type. Convert the whole set of nodes to the type of the |
6384 | // bitcast. |
6385 | Type *PhiTy = I->getType(); |
6386 | Type *ConvertTy = nullptr; |
6387 | if (Visited.count(Ptr: I) || |
6388 | (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy())) |
6389 | return false; |
6390 | |
6391 | SmallVector<Instruction *, 4> Worklist; |
6392 | Worklist.push_back(Elt: cast<Instruction>(Val: I)); |
6393 | SmallPtrSet<PHINode *, 4> PhiNodes; |
6394 | SmallPtrSet<ConstantData *, 4> Constants; |
6395 | PhiNodes.insert(Ptr: I); |
6396 | Visited.insert(Ptr: I); |
6397 | SmallPtrSet<Instruction *, 4> Defs; |
6398 | SmallPtrSet<Instruction *, 4> Uses; |
6399 | // This works by adding extra bitcasts between load/stores and removing |
6400 | // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi)) |
6401 | // we can get in the situation where we remove a bitcast in one iteration |
6402 | // just to add it again in the next. We need to ensure that at least one |
6403 | // bitcast we remove are anchored to something that will not change back. |
6404 | bool AnyAnchored = false; |
6405 | |
6406 | while (!Worklist.empty()) { |
6407 | Instruction *II = Worklist.pop_back_val(); |
6408 | |
6409 | if (auto *Phi = dyn_cast<PHINode>(Val: II)) { |
6410 | // Handle Defs, which might also be PHI's |
6411 | for (Value *V : Phi->incoming_values()) { |
6412 | if (auto *OpPhi = dyn_cast<PHINode>(Val: V)) { |
6413 | if (!PhiNodes.count(Ptr: OpPhi)) { |
6414 | if (!Visited.insert(Ptr: OpPhi).second) |
6415 | return false; |
6416 | PhiNodes.insert(Ptr: OpPhi); |
6417 | Worklist.push_back(Elt: OpPhi); |
6418 | } |
6419 | } else if (auto *OpLoad = dyn_cast<LoadInst>(Val: V)) { |
6420 | if (!OpLoad->isSimple()) |
6421 | return false; |
6422 | if (Defs.insert(Ptr: OpLoad).second) |
6423 | Worklist.push_back(Elt: OpLoad); |
6424 | } else if (auto *OpEx = dyn_cast<ExtractElementInst>(Val: V)) { |
6425 | if (Defs.insert(Ptr: OpEx).second) |
6426 | Worklist.push_back(Elt: OpEx); |
6427 | } else if (auto *OpBC = dyn_cast<BitCastInst>(Val: V)) { |
6428 | if (!ConvertTy) |
6429 | ConvertTy = OpBC->getOperand(i_nocapture: 0)->getType(); |
6430 | if (OpBC->getOperand(i_nocapture: 0)->getType() != ConvertTy) |
6431 | return false; |
6432 | if (Defs.insert(Ptr: OpBC).second) { |
6433 | Worklist.push_back(Elt: OpBC); |
6434 | AnyAnchored |= !isa<LoadInst>(Val: OpBC->getOperand(i_nocapture: 0)) && |
6435 | !isa<ExtractElementInst>(Val: OpBC->getOperand(i_nocapture: 0)); |
6436 | } |
6437 | } else if (auto *OpC = dyn_cast<ConstantData>(Val: V)) |
6438 | Constants.insert(Ptr: OpC); |
6439 | else |
6440 | return false; |
6441 | } |
6442 | } |
6443 | |
6444 | // Handle uses which might also be phi's |
6445 | for (User *V : II->users()) { |
6446 | if (auto *OpPhi = dyn_cast<PHINode>(Val: V)) { |
6447 | if (!PhiNodes.count(Ptr: OpPhi)) { |
6448 | if (Visited.count(Ptr: OpPhi)) |
6449 | return false; |
6450 | PhiNodes.insert(Ptr: OpPhi); |
6451 | Visited.insert(Ptr: OpPhi); |
6452 | Worklist.push_back(Elt: OpPhi); |
6453 | } |
6454 | } else if (auto *OpStore = dyn_cast<StoreInst>(Val: V)) { |
6455 | if (!OpStore->isSimple() || OpStore->getOperand(i_nocapture: 0) != II) |
6456 | return false; |
6457 | Uses.insert(Ptr: OpStore); |
6458 | } else if (auto *OpBC = dyn_cast<BitCastInst>(Val: V)) { |
6459 | if (!ConvertTy) |
6460 | ConvertTy = OpBC->getType(); |
6461 | if (OpBC->getType() != ConvertTy) |
6462 | return false; |
6463 | Uses.insert(Ptr: OpBC); |
6464 | AnyAnchored |= |
6465 | any_of(Range: OpBC->users(), P: [](User *U) { return !isa<StoreInst>(Val: U); }); |
6466 | } else { |
6467 | return false; |
6468 | } |
6469 | } |
6470 | } |
6471 | |
6472 | if (!ConvertTy || !AnyAnchored || |
6473 | !TLI->shouldConvertPhiType(From: PhiTy, To: ConvertTy)) |
6474 | return false; |
6475 | |
6476 | LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to " |
6477 | << *ConvertTy << "\n" ); |
6478 | |
6479 | // Create all the new phi nodes of the new type, and bitcast any loads to the |
6480 | // correct type. |
6481 | ValueToValueMap ValMap; |
6482 | for (ConstantData *C : Constants) |
6483 | ValMap[C] = ConstantExpr::getBitCast(C, Ty: ConvertTy); |
6484 | for (Instruction *D : Defs) { |
6485 | if (isa<BitCastInst>(Val: D)) { |
6486 | ValMap[D] = D->getOperand(i: 0); |
6487 | DeletedInstrs.insert(Ptr: D); |
6488 | } else { |
6489 | BasicBlock::iterator insertPt = std::next(x: D->getIterator()); |
6490 | ValMap[D] = new BitCastInst(D, ConvertTy, D->getName() + ".bc" , insertPt); |
6491 | } |
6492 | } |
6493 | for (PHINode *Phi : PhiNodes) |
6494 | ValMap[Phi] = PHINode::Create(Ty: ConvertTy, NumReservedValues: Phi->getNumIncomingValues(), |
6495 | NameStr: Phi->getName() + ".tc" , InsertBefore: Phi->getIterator()); |
6496 | // Pipe together all the PhiNodes. |
6497 | for (PHINode *Phi : PhiNodes) { |
6498 | PHINode *NewPhi = cast<PHINode>(Val: ValMap[Phi]); |
6499 | for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++) |
6500 | NewPhi->addIncoming(V: ValMap[Phi->getIncomingValue(i)], |
6501 | BB: Phi->getIncomingBlock(i)); |
6502 | Visited.insert(Ptr: NewPhi); |
6503 | } |
6504 | // And finally pipe up the stores and bitcasts |
6505 | for (Instruction *U : Uses) { |
6506 | if (isa<BitCastInst>(Val: U)) { |
6507 | DeletedInstrs.insert(Ptr: U); |
6508 | replaceAllUsesWith(Old: U, New: ValMap[U->getOperand(i: 0)], FreshBBs, IsHuge: IsHugeFunc); |
6509 | } else { |
6510 | U->setOperand(i: 0, Val: new BitCastInst(ValMap[U->getOperand(i: 0)], PhiTy, "bc" , |
6511 | U->getIterator())); |
6512 | } |
6513 | } |
6514 | |
6515 | // Save the removed phis to be deleted later. |
6516 | for (PHINode *Phi : PhiNodes) |
6517 | DeletedInstrs.insert(Ptr: Phi); |
6518 | return true; |
6519 | } |
6520 | |
6521 | bool CodeGenPrepare::optimizePhiTypes(Function &F) { |
6522 | if (!OptimizePhiTypes) |
6523 | return false; |
6524 | |
6525 | bool Changed = false; |
6526 | SmallPtrSet<PHINode *, 4> Visited; |
6527 | SmallPtrSet<Instruction *, 4> DeletedInstrs; |
6528 | |
6529 | // Attempt to optimize all the phis in the functions to the correct type. |
6530 | for (auto &BB : F) |
6531 | for (auto &Phi : BB.phis()) |
6532 | Changed |= optimizePhiType(I: &Phi, Visited, DeletedInstrs); |
6533 | |
6534 | // Remove any old phi's that have been converted. |
6535 | for (auto *I : DeletedInstrs) { |
6536 | replaceAllUsesWith(Old: I, New: PoisonValue::get(T: I->getType()), FreshBBs, IsHuge: IsHugeFunc); |
6537 | I->eraseFromParent(); |
6538 | } |
6539 | |
6540 | return Changed; |
6541 | } |
6542 | |
6543 | /// Return true, if an ext(load) can be formed from an extension in |
6544 | /// \p MovedExts. |
6545 | bool CodeGenPrepare::canFormExtLd( |
6546 | const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, |
6547 | Instruction *&Inst, bool HasPromoted) { |
6548 | for (auto *MovedExtInst : MovedExts) { |
6549 | if (isa<LoadInst>(Val: MovedExtInst->getOperand(i: 0))) { |
6550 | LI = cast<LoadInst>(Val: MovedExtInst->getOperand(i: 0)); |
6551 | Inst = MovedExtInst; |
6552 | break; |
6553 | } |
6554 | } |
6555 | if (!LI) |
6556 | return false; |
6557 | |
6558 | // If they're already in the same block, there's nothing to do. |
6559 | // Make the cheap checks first if we did not promote. |
6560 | // If we promoted, we need to check if it is indeed profitable. |
6561 | if (!HasPromoted && LI->getParent() == Inst->getParent()) |
6562 | return false; |
6563 | |
6564 | return TLI->isExtLoad(Load: LI, Ext: Inst, DL: *DL); |
6565 | } |
6566 | |
6567 | /// Move a zext or sext fed by a load into the same basic block as the load, |
6568 | /// unless conditions are unfavorable. This allows SelectionDAG to fold the |
6569 | /// extend into the load. |
6570 | /// |
6571 | /// E.g., |
6572 | /// \code |
6573 | /// %ld = load i32* %addr |
6574 | /// %add = add nuw i32 %ld, 4 |
6575 | /// %zext = zext i32 %add to i64 |
6576 | // \endcode |
6577 | /// => |
6578 | /// \code |
6579 | /// %ld = load i32* %addr |
6580 | /// %zext = zext i32 %ld to i64 |
6581 | /// %add = add nuw i64 %zext, 4 |
6582 | /// \encode |
6583 | /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which |
6584 | /// allow us to match zext(load i32*) to i64. |
6585 | /// |
6586 | /// Also, try to promote the computations used to obtain a sign extended |
6587 | /// value used into memory accesses. |
6588 | /// E.g., |
6589 | /// \code |
6590 | /// a = add nsw i32 b, 3 |
6591 | /// d = sext i32 a to i64 |
6592 | /// e = getelementptr ..., i64 d |
6593 | /// \endcode |
6594 | /// => |
6595 | /// \code |
6596 | /// f = sext i32 b to i64 |
6597 | /// a = add nsw i64 f, 3 |
6598 | /// e = getelementptr ..., i64 a |
6599 | /// \endcode |
6600 | /// |
6601 | /// \p Inst[in/out] the extension may be modified during the process if some |
6602 | /// promotions apply. |
6603 | bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { |
6604 | bool = false; |
6605 | /// See if it is an interesting sext operations for the address type |
6606 | /// promotion before trying to promote it, e.g., the ones with the right |
6607 | /// type and used in memory accesses. |
6608 | bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( |
6609 | I: *Inst, AllowPromotionWithoutCommonHeader); |
6610 | TypePromotionTransaction TPT(RemovedInsts); |
6611 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = |
6612 | TPT.getRestorationPoint(); |
6613 | SmallVector<Instruction *, 1> Exts; |
6614 | SmallVector<Instruction *, 2> SpeculativelyMovedExts; |
6615 | Exts.push_back(Elt: Inst); |
6616 | |
6617 | bool HasPromoted = tryToPromoteExts(TPT, Exts, ProfitablyMovedExts&: SpeculativelyMovedExts); |
6618 | |
6619 | // Look for a load being extended. |
6620 | LoadInst *LI = nullptr; |
6621 | Instruction *ExtFedByLoad; |
6622 | |
6623 | // Try to promote a chain of computation if it allows to form an extended |
6624 | // load. |
6625 | if (canFormExtLd(MovedExts: SpeculativelyMovedExts, LI, Inst&: ExtFedByLoad, HasPromoted)) { |
6626 | assert(LI && ExtFedByLoad && "Expect a valid load and extension" ); |
6627 | TPT.commit(); |
6628 | // Move the extend into the same block as the load. |
6629 | ExtFedByLoad->moveAfter(MovePos: LI); |
6630 | ++NumExtsMoved; |
6631 | Inst = ExtFedByLoad; |
6632 | return true; |
6633 | } |
6634 | |
6635 | // Continue promoting SExts if known as considerable depending on targets. |
6636 | if (ATPConsiderable && |
6637 | performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, |
6638 | HasPromoted, TPT, SpeculativelyMovedExts)) |
6639 | return true; |
6640 | |
6641 | TPT.rollback(Point: LastKnownGood); |
6642 | return false; |
6643 | } |
6644 | |
6645 | // Perform address type promotion if doing so is profitable. |
6646 | // If AllowPromotionWithoutCommonHeader == false, we should find other sext |
6647 | // instructions that sign extended the same initial value. However, if |
6648 | // AllowPromotionWithoutCommonHeader == true, we expect promoting the |
6649 | // extension is just profitable. |
6650 | bool CodeGenPrepare::performAddressTypePromotion( |
6651 | Instruction *&Inst, bool , |
6652 | bool HasPromoted, TypePromotionTransaction &TPT, |
6653 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { |
6654 | bool Promoted = false; |
6655 | SmallPtrSet<Instruction *, 1> UnhandledExts; |
6656 | bool AllSeenFirst = true; |
6657 | for (auto *I : SpeculativelyMovedExts) { |
6658 | Value *HeadOfChain = I->getOperand(i: 0); |
6659 | DenseMap<Value *, Instruction *>::iterator AlreadySeen = |
6660 | SeenChainsForSExt.find(Val: HeadOfChain); |
6661 | // If there is an unhandled SExt which has the same header, try to promote |
6662 | // it as well. |
6663 | if (AlreadySeen != SeenChainsForSExt.end()) { |
6664 | if (AlreadySeen->second != nullptr) |
6665 | UnhandledExts.insert(Ptr: AlreadySeen->second); |
6666 | AllSeenFirst = false; |
6667 | } |
6668 | } |
6669 | |
6670 | if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && |
6671 | SpeculativelyMovedExts.size() == 1)) { |
6672 | TPT.commit(); |
6673 | if (HasPromoted) |
6674 | Promoted = true; |
6675 | for (auto *I : SpeculativelyMovedExts) { |
6676 | Value *HeadOfChain = I->getOperand(i: 0); |
6677 | SeenChainsForSExt[HeadOfChain] = nullptr; |
6678 | ValToSExtendedUses[HeadOfChain].push_back(Elt: I); |
6679 | } |
6680 | // Update Inst as promotion happen. |
6681 | Inst = SpeculativelyMovedExts.pop_back_val(); |
6682 | } else { |
6683 | // This is the first chain visited from the header, keep the current chain |
6684 | // as unhandled. Defer to promote this until we encounter another SExt |
6685 | // chain derived from the same header. |
6686 | for (auto *I : SpeculativelyMovedExts) { |
6687 | Value *HeadOfChain = I->getOperand(i: 0); |
6688 | SeenChainsForSExt[HeadOfChain] = Inst; |
6689 | } |
6690 | return false; |
6691 | } |
6692 | |
6693 | if (!AllSeenFirst && !UnhandledExts.empty()) |
6694 | for (auto *VisitedSExt : UnhandledExts) { |
6695 | if (RemovedInsts.count(Ptr: VisitedSExt)) |
6696 | continue; |
6697 | TypePromotionTransaction TPT(RemovedInsts); |
6698 | SmallVector<Instruction *, 1> Exts; |
6699 | SmallVector<Instruction *, 2> Chains; |
6700 | Exts.push_back(Elt: VisitedSExt); |
6701 | bool HasPromoted = tryToPromoteExts(TPT, Exts, ProfitablyMovedExts&: Chains); |
6702 | TPT.commit(); |
6703 | if (HasPromoted) |
6704 | Promoted = true; |
6705 | for (auto *I : Chains) { |
6706 | Value *HeadOfChain = I->getOperand(i: 0); |
6707 | // Mark this as handled. |
6708 | SeenChainsForSExt[HeadOfChain] = nullptr; |
6709 | ValToSExtendedUses[HeadOfChain].push_back(Elt: I); |
6710 | } |
6711 | } |
6712 | return Promoted; |
6713 | } |
6714 | |
6715 | bool CodeGenPrepare::optimizeExtUses(Instruction *I) { |
6716 | BasicBlock *DefBB = I->getParent(); |
6717 | |
6718 | // If the result of a {s|z}ext and its source are both live out, rewrite all |
6719 | // other uses of the source with result of extension. |
6720 | Value *Src = I->getOperand(i: 0); |
6721 | if (Src->hasOneUse()) |
6722 | return false; |
6723 | |
6724 | // Only do this xform if truncating is free. |
6725 | if (!TLI->isTruncateFree(FromTy: I->getType(), ToTy: Src->getType())) |
6726 | return false; |
6727 | |
6728 | // Only safe to perform the optimization if the source is also defined in |
6729 | // this block. |
6730 | if (!isa<Instruction>(Val: Src) || DefBB != cast<Instruction>(Val: Src)->getParent()) |
6731 | return false; |
6732 | |
6733 | bool DefIsLiveOut = false; |
6734 | for (User *U : I->users()) { |
6735 | Instruction *UI = cast<Instruction>(Val: U); |
6736 | |
6737 | // Figure out which BB this ext is used in. |
6738 | BasicBlock *UserBB = UI->getParent(); |
6739 | if (UserBB == DefBB) |
6740 | continue; |
6741 | DefIsLiveOut = true; |
6742 | break; |
6743 | } |
6744 | if (!DefIsLiveOut) |
6745 | return false; |
6746 | |
6747 | // Make sure none of the uses are PHI nodes. |
6748 | for (User *U : Src->users()) { |
6749 | Instruction *UI = cast<Instruction>(Val: U); |
6750 | BasicBlock *UserBB = UI->getParent(); |
6751 | if (UserBB == DefBB) |
6752 | continue; |
6753 | // Be conservative. We don't want this xform to end up introducing |
6754 | // reloads just before load / store instructions. |
6755 | if (isa<PHINode>(Val: UI) || isa<LoadInst>(Val: UI) || isa<StoreInst>(Val: UI)) |
6756 | return false; |
6757 | } |
6758 | |
6759 | // InsertedTruncs - Only insert one trunc in each block once. |
6760 | DenseMap<BasicBlock *, Instruction *> InsertedTruncs; |
6761 | |
6762 | bool MadeChange = false; |
6763 | for (Use &U : Src->uses()) { |
6764 | Instruction *User = cast<Instruction>(Val: U.getUser()); |
6765 | |
6766 | // Figure out which BB this ext is used in. |
6767 | BasicBlock *UserBB = User->getParent(); |
6768 | if (UserBB == DefBB) |
6769 | continue; |
6770 | |
6771 | // Both src and def are live in this block. Rewrite the use. |
6772 | Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; |
6773 | |
6774 | if (!InsertedTrunc) { |
6775 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); |
6776 | assert(InsertPt != UserBB->end()); |
6777 | InsertedTrunc = new TruncInst(I, Src->getType(), "" ); |
6778 | InsertedTrunc->insertBefore(BB&: *UserBB, InsertPos: InsertPt); |
6779 | InsertedInsts.insert(Ptr: InsertedTrunc); |
6780 | } |
6781 | |
6782 | // Replace a use of the {s|z}ext source with a use of the result. |
6783 | U = InsertedTrunc; |
6784 | ++NumExtUses; |
6785 | MadeChange = true; |
6786 | } |
6787 | |
6788 | return MadeChange; |
6789 | } |
6790 | |
6791 | // Find loads whose uses only use some of the loaded value's bits. Add an "and" |
6792 | // just after the load if the target can fold this into one extload instruction, |
6793 | // with the hope of eliminating some of the other later "and" instructions using |
6794 | // the loaded value. "and"s that are made trivially redundant by the insertion |
6795 | // of the new "and" are removed by this function, while others (e.g. those whose |
6796 | // path from the load goes through a phi) are left for isel to potentially |
6797 | // remove. |
6798 | // |
6799 | // For example: |
6800 | // |
6801 | // b0: |
6802 | // x = load i32 |
6803 | // ... |
6804 | // b1: |
6805 | // y = and x, 0xff |
6806 | // z = use y |
6807 | // |
6808 | // becomes: |
6809 | // |
6810 | // b0: |
6811 | // x = load i32 |
6812 | // x' = and x, 0xff |
6813 | // ... |
6814 | // b1: |
6815 | // z = use x' |
6816 | // |
6817 | // whereas: |
6818 | // |
6819 | // b0: |
6820 | // x1 = load i32 |
6821 | // ... |
6822 | // b1: |
6823 | // x2 = load i32 |
6824 | // ... |
6825 | // b2: |
6826 | // x = phi x1, x2 |
6827 | // y = and x, 0xff |
6828 | // |
6829 | // becomes (after a call to optimizeLoadExt for each load): |
6830 | // |
6831 | // b0: |
6832 | // x1 = load i32 |
6833 | // x1' = and x1, 0xff |
6834 | // ... |
6835 | // b1: |
6836 | // x2 = load i32 |
6837 | // x2' = and x2, 0xff |
6838 | // ... |
6839 | // b2: |
6840 | // x = phi x1', x2' |
6841 | // y = and x, 0xff |
6842 | bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { |
6843 | if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) |
6844 | return false; |
6845 | |
6846 | // Skip loads we've already transformed. |
6847 | if (Load->hasOneUse() && |
6848 | InsertedInsts.count(Ptr: cast<Instruction>(Val: *Load->user_begin()))) |
6849 | return false; |
6850 | |
6851 | // Look at all uses of Load, looking through phis, to determine how many bits |
6852 | // of the loaded value are needed. |
6853 | SmallVector<Instruction *, 8> WorkList; |
6854 | SmallPtrSet<Instruction *, 16> Visited; |
6855 | SmallVector<Instruction *, 8> AndsToMaybeRemove; |
6856 | for (auto *U : Load->users()) |
6857 | WorkList.push_back(Elt: cast<Instruction>(Val: U)); |
6858 | |
6859 | EVT LoadResultVT = TLI->getValueType(DL: *DL, Ty: Load->getType()); |
6860 | unsigned BitWidth = LoadResultVT.getSizeInBits(); |
6861 | // If the BitWidth is 0, do not try to optimize the type |
6862 | if (BitWidth == 0) |
6863 | return false; |
6864 | |
6865 | APInt DemandBits(BitWidth, 0); |
6866 | APInt WidestAndBits(BitWidth, 0); |
6867 | |
6868 | while (!WorkList.empty()) { |
6869 | Instruction *I = WorkList.pop_back_val(); |
6870 | |
6871 | // Break use-def graph loops. |
6872 | if (!Visited.insert(Ptr: I).second) |
6873 | continue; |
6874 | |
6875 | // For a PHI node, push all of its users. |
6876 | if (auto *Phi = dyn_cast<PHINode>(Val: I)) { |
6877 | for (auto *U : Phi->users()) |
6878 | WorkList.push_back(Elt: cast<Instruction>(Val: U)); |
6879 | continue; |
6880 | } |
6881 | |
6882 | switch (I->getOpcode()) { |
6883 | case Instruction::And: { |
6884 | auto *AndC = dyn_cast<ConstantInt>(Val: I->getOperand(i: 1)); |
6885 | if (!AndC) |
6886 | return false; |
6887 | APInt AndBits = AndC->getValue(); |
6888 | DemandBits |= AndBits; |
6889 | // Keep track of the widest and mask we see. |
6890 | if (AndBits.ugt(RHS: WidestAndBits)) |
6891 | WidestAndBits = AndBits; |
6892 | if (AndBits == WidestAndBits && I->getOperand(i: 0) == Load) |
6893 | AndsToMaybeRemove.push_back(Elt: I); |
6894 | break; |
6895 | } |
6896 | |
6897 | case Instruction::Shl: { |
6898 | auto *ShlC = dyn_cast<ConstantInt>(Val: I->getOperand(i: 1)); |
6899 | if (!ShlC) |
6900 | return false; |
6901 | uint64_t ShiftAmt = ShlC->getLimitedValue(Limit: BitWidth - 1); |
6902 | DemandBits.setLowBits(BitWidth - ShiftAmt); |
6903 | break; |
6904 | } |
6905 | |
6906 | case Instruction::Trunc: { |
6907 | EVT TruncVT = TLI->getValueType(DL: *DL, Ty: I->getType()); |
6908 | unsigned TruncBitWidth = TruncVT.getSizeInBits(); |
6909 | DemandBits.setLowBits(TruncBitWidth); |
6910 | break; |
6911 | } |
6912 | |
6913 | default: |
6914 | return false; |
6915 | } |
6916 | } |
6917 | |
6918 | uint32_t ActiveBits = DemandBits.getActiveBits(); |
6919 | // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the |
6920 | // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, |
6921 | // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but |
6922 | // (and (load x) 1) is not matched as a single instruction, rather as a LDR |
6923 | // followed by an AND. |
6924 | // TODO: Look into removing this restriction by fixing backends to either |
6925 | // return false for isLoadExtLegal for i1 or have them select this pattern to |
6926 | // a single instruction. |
6927 | // |
6928 | // Also avoid hoisting if we didn't see any ands with the exact DemandBits |
6929 | // mask, since these are the only ands that will be removed by isel. |
6930 | if (ActiveBits <= 1 || !DemandBits.isMask(numBits: ActiveBits) || |
6931 | WidestAndBits != DemandBits) |
6932 | return false; |
6933 | |
6934 | LLVMContext &Ctx = Load->getType()->getContext(); |
6935 | Type *TruncTy = Type::getIntNTy(C&: Ctx, N: ActiveBits); |
6936 | EVT TruncVT = TLI->getValueType(DL: *DL, Ty: TruncTy); |
6937 | |
6938 | // Reject cases that won't be matched as extloads. |
6939 | if (!LoadResultVT.bitsGT(VT: TruncVT) || !TruncVT.isRound() || |
6940 | !TLI->isLoadExtLegal(ExtType: ISD::ZEXTLOAD, ValVT: LoadResultVT, MemVT: TruncVT)) |
6941 | return false; |
6942 | |
6943 | IRBuilder<> Builder(Load->getNextNonDebugInstruction()); |
6944 | auto *NewAnd = cast<Instruction>( |
6945 | Val: Builder.CreateAnd(LHS: Load, RHS: ConstantInt::get(Context&: Ctx, V: DemandBits))); |
6946 | // Mark this instruction as "inserted by CGP", so that other |
6947 | // optimizations don't touch it. |
6948 | InsertedInsts.insert(Ptr: NewAnd); |
6949 | |
6950 | // Replace all uses of load with new and (except for the use of load in the |
6951 | // new and itself). |
6952 | replaceAllUsesWith(Old: Load, New: NewAnd, FreshBBs, IsHuge: IsHugeFunc); |
6953 | NewAnd->setOperand(i: 0, Val: Load); |
6954 | |
6955 | // Remove any and instructions that are now redundant. |
6956 | for (auto *And : AndsToMaybeRemove) |
6957 | // Check that the and mask is the same as the one we decided to put on the |
6958 | // new and. |
6959 | if (cast<ConstantInt>(Val: And->getOperand(i: 1))->getValue() == DemandBits) { |
6960 | replaceAllUsesWith(Old: And, New: NewAnd, FreshBBs, IsHuge: IsHugeFunc); |
6961 | if (&*CurInstIterator == And) |
6962 | CurInstIterator = std::next(x: And->getIterator()); |
6963 | And->eraseFromParent(); |
6964 | ++NumAndUses; |
6965 | } |
6966 | |
6967 | ++NumAndsAdded; |
6968 | return true; |
6969 | } |
6970 | |
6971 | /// Check if V (an operand of a select instruction) is an expensive instruction |
6972 | /// that is only used once. |
6973 | static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { |
6974 | auto *I = dyn_cast<Instruction>(Val: V); |
6975 | // If it's safe to speculatively execute, then it should not have side |
6976 | // effects; therefore, it's safe to sink and possibly *not* execute. |
6977 | return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && |
6978 | TTI->isExpensiveToSpeculativelyExecute(I); |
6979 | } |
6980 | |
6981 | /// Returns true if a SelectInst should be turned into an explicit branch. |
6982 | static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, |
6983 | const TargetLowering *TLI, |
6984 | SelectInst *SI) { |
6985 | // If even a predictable select is cheap, then a branch can't be cheaper. |
6986 | if (!TLI->isPredictableSelectExpensive()) |
6987 | return false; |
6988 | |
6989 | // FIXME: This should use the same heuristics as IfConversion to determine |
6990 | // whether a select is better represented as a branch. |
6991 | |
6992 | // If metadata tells us that the select condition is obviously predictable, |
6993 | // then we want to replace the select with a branch. |
6994 | uint64_t TrueWeight, FalseWeight; |
6995 | if (extractBranchWeights(I: *SI, TrueVal&: TrueWeight, FalseVal&: FalseWeight)) { |
6996 | uint64_t Max = std::max(a: TrueWeight, b: FalseWeight); |
6997 | uint64_t Sum = TrueWeight + FalseWeight; |
6998 | if (Sum != 0) { |
6999 | auto Probability = BranchProbability::getBranchProbability(Numerator: Max, Denominator: Sum); |
7000 | if (Probability > TTI->getPredictableBranchThreshold()) |
7001 | return true; |
7002 | } |
7003 | } |
7004 | |
7005 | CmpInst *Cmp = dyn_cast<CmpInst>(Val: SI->getCondition()); |
7006 | |
7007 | // If a branch is predictable, an out-of-order CPU can avoid blocking on its |
7008 | // comparison condition. If the compare has more than one use, there's |
7009 | // probably another cmov or setcc around, so it's not worth emitting a branch. |
7010 | if (!Cmp || !Cmp->hasOneUse()) |
7011 | return false; |
7012 | |
7013 | // If either operand of the select is expensive and only needed on one side |
7014 | // of the select, we should form a branch. |
7015 | if (sinkSelectOperand(TTI, V: SI->getTrueValue()) || |
7016 | sinkSelectOperand(TTI, V: SI->getFalseValue())) |
7017 | return true; |
7018 | |
7019 | return false; |
7020 | } |
7021 | |
7022 | /// If \p isTrue is true, return the true value of \p SI, otherwise return |
7023 | /// false value of \p SI. If the true/false value of \p SI is defined by any |
7024 | /// select instructions in \p Selects, look through the defining select |
7025 | /// instruction until the true/false value is not defined in \p Selects. |
7026 | static Value * |
7027 | getTrueOrFalseValue(SelectInst *SI, bool isTrue, |
7028 | const SmallPtrSet<const Instruction *, 2> &Selects) { |
7029 | Value *V = nullptr; |
7030 | |
7031 | for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(Ptr: DefSI); |
7032 | DefSI = dyn_cast<SelectInst>(Val: V)) { |
7033 | assert(DefSI->getCondition() == SI->getCondition() && |
7034 | "The condition of DefSI does not match with SI" ); |
7035 | V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); |
7036 | } |
7037 | |
7038 | assert(V && "Failed to get select true/false value" ); |
7039 | return V; |
7040 | } |
7041 | |
7042 | bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) { |
7043 | assert(Shift->isShift() && "Expected a shift" ); |
7044 | |
7045 | // If this is (1) a vector shift, (2) shifts by scalars are cheaper than |
7046 | // general vector shifts, and (3) the shift amount is a select-of-splatted |
7047 | // values, hoist the shifts before the select: |
7048 | // shift Op0, (select Cond, TVal, FVal) --> |
7049 | // select Cond, (shift Op0, TVal), (shift Op0, FVal) |
7050 | // |
7051 | // This is inverting a generic IR transform when we know that the cost of a |
7052 | // general vector shift is more than the cost of 2 shift-by-scalars. |
7053 | // We can't do this effectively in SDAG because we may not be able to |
7054 | // determine if the select operands are splats from within a basic block. |
7055 | Type *Ty = Shift->getType(); |
7056 | if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) |
7057 | return false; |
7058 | Value *Cond, *TVal, *FVal; |
7059 | if (!match(V: Shift->getOperand(i_nocapture: 1), |
7060 | P: m_OneUse(SubPattern: m_Select(C: m_Value(V&: Cond), L: m_Value(V&: TVal), R: m_Value(V&: FVal))))) |
7061 | return false; |
7062 | if (!isSplatValue(V: TVal) || !isSplatValue(V: FVal)) |
7063 | return false; |
7064 | |
7065 | IRBuilder<> Builder(Shift); |
7066 | BinaryOperator::BinaryOps Opcode = Shift->getOpcode(); |
7067 | Value *NewTVal = Builder.CreateBinOp(Opc: Opcode, LHS: Shift->getOperand(i_nocapture: 0), RHS: TVal); |
7068 | Value *NewFVal = Builder.CreateBinOp(Opc: Opcode, LHS: Shift->getOperand(i_nocapture: 0), RHS: FVal); |
7069 | Value *NewSel = Builder.CreateSelect(C: Cond, True: NewTVal, False: NewFVal); |
7070 | replaceAllUsesWith(Old: Shift, New: NewSel, FreshBBs, IsHuge: IsHugeFunc); |
7071 | Shift->eraseFromParent(); |
7072 | return true; |
7073 | } |
7074 | |
7075 | bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) { |
7076 | Intrinsic::ID Opcode = Fsh->getIntrinsicID(); |
7077 | assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && |
7078 | "Expected a funnel shift" ); |
7079 | |
7080 | // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper |
7081 | // than general vector shifts, and (3) the shift amount is select-of-splatted |
7082 | // values, hoist the funnel shifts before the select: |
7083 | // fsh Op0, Op1, (select Cond, TVal, FVal) --> |
7084 | // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal) |
7085 | // |
7086 | // This is inverting a generic IR transform when we know that the cost of a |
7087 | // general vector shift is more than the cost of 2 shift-by-scalars. |
7088 | // We can't do this effectively in SDAG because we may not be able to |
7089 | // determine if the select operands are splats from within a basic block. |
7090 | Type *Ty = Fsh->getType(); |
7091 | if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) |
7092 | return false; |
7093 | Value *Cond, *TVal, *FVal; |
7094 | if (!match(V: Fsh->getOperand(i_nocapture: 2), |
7095 | P: m_OneUse(SubPattern: m_Select(C: m_Value(V&: Cond), L: m_Value(V&: TVal), R: m_Value(V&: FVal))))) |
7096 | return false; |
7097 | if (!isSplatValue(V: TVal) || !isSplatValue(V: FVal)) |
7098 | return false; |
7099 | |
7100 | IRBuilder<> Builder(Fsh); |
7101 | Value *X = Fsh->getOperand(i_nocapture: 0), *Y = Fsh->getOperand(i_nocapture: 1); |
7102 | Value *NewTVal = Builder.CreateIntrinsic(ID: Opcode, Types: Ty, Args: {X, Y, TVal}); |
7103 | Value *NewFVal = Builder.CreateIntrinsic(ID: Opcode, Types: Ty, Args: {X, Y, FVal}); |
7104 | Value *NewSel = Builder.CreateSelect(C: Cond, True: NewTVal, False: NewFVal); |
7105 | replaceAllUsesWith(Old: Fsh, New: NewSel, FreshBBs, IsHuge: IsHugeFunc); |
7106 | Fsh->eraseFromParent(); |
7107 | return true; |
7108 | } |
7109 | |
7110 | /// If we have a SelectInst that will likely profit from branch prediction, |
7111 | /// turn it into a branch. |
7112 | bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { |
7113 | if (DisableSelectToBranch) |
7114 | return false; |
7115 | |
7116 | // If the SelectOptimize pass is enabled, selects have already been optimized. |
7117 | if (!getCGPassBuilderOption().DisableSelectOptimize) |
7118 | return false; |
7119 | |
7120 | // Find all consecutive select instructions that share the same condition. |
7121 | SmallVector<SelectInst *, 2> ASI; |
7122 | ASI.push_back(Elt: SI); |
7123 | for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); |
7124 | It != SI->getParent()->end(); ++It) { |
7125 | SelectInst *I = dyn_cast<SelectInst>(Val: &*It); |
7126 | if (I && SI->getCondition() == I->getCondition()) { |
7127 | ASI.push_back(Elt: I); |
7128 | } else { |
7129 | break; |
7130 | } |
7131 | } |
7132 | |
7133 | SelectInst *LastSI = ASI.back(); |
7134 | // Increment the current iterator to skip all the rest of select instructions |
7135 | // because they will be either "not lowered" or "all lowered" to branch. |
7136 | CurInstIterator = std::next(x: LastSI->getIterator()); |
7137 | // Examine debug-info attached to the consecutive select instructions. They |
7138 | // won't be individually optimised by optimizeInst, so we need to perform |
7139 | // DbgVariableRecord maintenence here instead. |
7140 | for (SelectInst *SI : ArrayRef(ASI).drop_front()) |
7141 | fixupDbgVariableRecordsOnInst(I&: *SI); |
7142 | |
7143 | bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(Bitwidth: 1); |
7144 | |
7145 | // Can we convert the 'select' to CF ? |
7146 | if (VectorCond || SI->getMetadata(KindID: LLVMContext::MD_unpredictable)) |
7147 | return false; |
7148 | |
7149 | TargetLowering::SelectSupportKind SelectKind; |
7150 | if (SI->getType()->isVectorTy()) |
7151 | SelectKind = TargetLowering::ScalarCondVectorVal; |
7152 | else |
7153 | SelectKind = TargetLowering::ScalarValSelect; |
7154 | |
7155 | if (TLI->isSelectSupported(SelectKind) && |
7156 | (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize || |
7157 | llvm::shouldOptimizeForSize(BB: SI->getParent(), PSI, BFI: BFI.get()))) |
7158 | return false; |
7159 | |
7160 | // The DominatorTree needs to be rebuilt by any consumers after this |
7161 | // transformation. We simply reset here rather than setting the ModifiedDT |
7162 | // flag to avoid restarting the function walk in runOnFunction for each |
7163 | // select optimized. |
7164 | DT.reset(); |
7165 | |
7166 | // Transform a sequence like this: |
7167 | // start: |
7168 | // %cmp = cmp uge i32 %a, %b |
7169 | // %sel = select i1 %cmp, i32 %c, i32 %d |
7170 | // |
7171 | // Into: |
7172 | // start: |
7173 | // %cmp = cmp uge i32 %a, %b |
7174 | // %cmp.frozen = freeze %cmp |
7175 | // br i1 %cmp.frozen, label %select.true, label %select.false |
7176 | // select.true: |
7177 | // br label %select.end |
7178 | // select.false: |
7179 | // br label %select.end |
7180 | // select.end: |
7181 | // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] |
7182 | // |
7183 | // %cmp should be frozen, otherwise it may introduce undefined behavior. |
7184 | // In addition, we may sink instructions that produce %c or %d from |
7185 | // the entry block into the destination(s) of the new branch. |
7186 | // If the true or false blocks do not contain a sunken instruction, that |
7187 | // block and its branch may be optimized away. In that case, one side of the |
7188 | // first branch will point directly to select.end, and the corresponding PHI |
7189 | // predecessor block will be the start block. |
7190 | |
7191 | // Collect values that go on the true side and the values that go on the false |
7192 | // side. |
7193 | SmallVector<Instruction *> TrueInstrs, FalseInstrs; |
7194 | for (SelectInst *SI : ASI) { |
7195 | if (Value *V = SI->getTrueValue(); sinkSelectOperand(TTI, V)) |
7196 | TrueInstrs.push_back(Elt: cast<Instruction>(Val: V)); |
7197 | if (Value *V = SI->getFalseValue(); sinkSelectOperand(TTI, V)) |
7198 | FalseInstrs.push_back(Elt: cast<Instruction>(Val: V)); |
7199 | } |
7200 | |
7201 | // Split the select block, according to how many (if any) values go on each |
7202 | // side. |
7203 | BasicBlock *StartBlock = SI->getParent(); |
7204 | BasicBlock::iterator SplitPt = std::next(x: BasicBlock::iterator(LastSI)); |
7205 | // We should split before any debug-info. |
7206 | SplitPt.setHeadBit(true); |
7207 | |
7208 | IRBuilder<> IB(SI); |
7209 | auto *CondFr = IB.CreateFreeze(V: SI->getCondition(), Name: SI->getName() + ".frozen" ); |
7210 | |
7211 | BasicBlock *TrueBlock = nullptr; |
7212 | BasicBlock *FalseBlock = nullptr; |
7213 | BasicBlock *EndBlock = nullptr; |
7214 | BranchInst *TrueBranch = nullptr; |
7215 | BranchInst *FalseBranch = nullptr; |
7216 | if (TrueInstrs.size() == 0) { |
7217 | FalseBranch = cast<BranchInst>(Val: SplitBlockAndInsertIfElse( |
7218 | Cond: CondFr, SplitBefore: SplitPt, Unreachable: false, BranchWeights: nullptr, DTU: nullptr, LI)); |
7219 | FalseBlock = FalseBranch->getParent(); |
7220 | EndBlock = cast<BasicBlock>(Val: FalseBranch->getOperand(i_nocapture: 0)); |
7221 | } else if (FalseInstrs.size() == 0) { |
7222 | TrueBranch = cast<BranchInst>(Val: SplitBlockAndInsertIfThen( |
7223 | Cond: CondFr, SplitBefore: SplitPt, Unreachable: false, BranchWeights: nullptr, DTU: nullptr, LI)); |
7224 | TrueBlock = TrueBranch->getParent(); |
7225 | EndBlock = cast<BasicBlock>(Val: TrueBranch->getOperand(i_nocapture: 0)); |
7226 | } else { |
7227 | Instruction *ThenTerm = nullptr; |
7228 | Instruction *ElseTerm = nullptr; |
7229 | SplitBlockAndInsertIfThenElse(Cond: CondFr, SplitBefore: SplitPt, ThenTerm: &ThenTerm, ElseTerm: &ElseTerm, |
7230 | BranchWeights: nullptr, DTU: nullptr, LI); |
7231 | TrueBranch = cast<BranchInst>(Val: ThenTerm); |
7232 | FalseBranch = cast<BranchInst>(Val: ElseTerm); |
7233 | TrueBlock = TrueBranch->getParent(); |
7234 | FalseBlock = FalseBranch->getParent(); |
7235 | EndBlock = cast<BasicBlock>(Val: TrueBranch->getOperand(i_nocapture: 0)); |
7236 | } |
7237 | |
7238 | EndBlock->setName("select.end" ); |
7239 | if (TrueBlock) |
7240 | TrueBlock->setName("select.true.sink" ); |
7241 | if (FalseBlock) |
7242 | FalseBlock->setName(FalseInstrs.size() == 0 ? "select.false" |
7243 | : "select.false.sink" ); |
7244 | |
7245 | if (IsHugeFunc) { |
7246 | if (TrueBlock) |
7247 | FreshBBs.insert(Ptr: TrueBlock); |
7248 | if (FalseBlock) |
7249 | FreshBBs.insert(Ptr: FalseBlock); |
7250 | FreshBBs.insert(Ptr: EndBlock); |
7251 | } |
7252 | |
7253 | BFI->setBlockFreq(BB: EndBlock, Freq: BFI->getBlockFreq(BB: StartBlock)); |
7254 | |
7255 | static const unsigned MD[] = { |
7256 | LLVMContext::MD_prof, LLVMContext::MD_unpredictable, |
7257 | LLVMContext::MD_make_implicit, LLVMContext::MD_dbg}; |
7258 | StartBlock->getTerminator()->copyMetadata(SrcInst: *SI, WL: MD); |
7259 | |
7260 | // Sink expensive instructions into the conditional blocks to avoid executing |
7261 | // them speculatively. |
7262 | for (Instruction *I : TrueInstrs) |
7263 | I->moveBefore(MovePos: TrueBranch); |
7264 | for (Instruction *I : FalseInstrs) |
7265 | I->moveBefore(MovePos: FalseBranch); |
7266 | |
7267 | // If we did not create a new block for one of the 'true' or 'false' paths |
7268 | // of the condition, it means that side of the branch goes to the end block |
7269 | // directly and the path originates from the start block from the point of |
7270 | // view of the new PHI. |
7271 | if (TrueBlock == nullptr) |
7272 | TrueBlock = StartBlock; |
7273 | else if (FalseBlock == nullptr) |
7274 | FalseBlock = StartBlock; |
7275 | |
7276 | SmallPtrSet<const Instruction *, 2> INS; |
7277 | INS.insert(I: ASI.begin(), E: ASI.end()); |
7278 | // Use reverse iterator because later select may use the value of the |
7279 | // earlier select, and we need to propagate value through earlier select |
7280 | // to get the PHI operand. |
7281 | for (SelectInst *SI : llvm::reverse(C&: ASI)) { |
7282 | // The select itself is replaced with a PHI Node. |
7283 | PHINode *PN = PHINode::Create(Ty: SI->getType(), NumReservedValues: 2, NameStr: "" ); |
7284 | PN->insertBefore(InsertPos: EndBlock->begin()); |
7285 | PN->takeName(V: SI); |
7286 | PN->addIncoming(V: getTrueOrFalseValue(SI, isTrue: true, Selects: INS), BB: TrueBlock); |
7287 | PN->addIncoming(V: getTrueOrFalseValue(SI, isTrue: false, Selects: INS), BB: FalseBlock); |
7288 | PN->setDebugLoc(SI->getDebugLoc()); |
7289 | |
7290 | replaceAllUsesWith(Old: SI, New: PN, FreshBBs, IsHuge: IsHugeFunc); |
7291 | SI->eraseFromParent(); |
7292 | INS.erase(Ptr: SI); |
7293 | ++NumSelectsExpanded; |
7294 | } |
7295 | |
7296 | // Instruct OptimizeBlock to skip to the next block. |
7297 | CurInstIterator = StartBlock->end(); |
7298 | return true; |
7299 | } |
7300 | |
7301 | /// Some targets only accept certain types for splat inputs. For example a VDUP |
7302 | /// in MVE takes a GPR (integer) register, and the instruction that incorporate |
7303 | /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register. |
7304 | bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { |
7305 | // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only |
7306 | if (!match(V: SVI, P: m_Shuffle(v1: m_InsertElt(Val: m_Undef(), Elt: m_Value(), Idx: m_ZeroInt()), |
7307 | v2: m_Undef(), mask: m_ZeroMask()))) |
7308 | return false; |
7309 | Type *NewType = TLI->shouldConvertSplatType(SVI); |
7310 | if (!NewType) |
7311 | return false; |
7312 | |
7313 | auto *SVIVecType = cast<FixedVectorType>(Val: SVI->getType()); |
7314 | assert(!NewType->isVectorTy() && "Expected a scalar type!" ); |
7315 | assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && |
7316 | "Expected a type of the same size!" ); |
7317 | auto *NewVecType = |
7318 | FixedVectorType::get(ElementType: NewType, NumElts: SVIVecType->getNumElements()); |
7319 | |
7320 | // Create a bitcast (shuffle (insert (bitcast(..)))) |
7321 | IRBuilder<> Builder(SVI->getContext()); |
7322 | Builder.SetInsertPoint(SVI); |
7323 | Value *BC1 = Builder.CreateBitCast( |
7324 | V: cast<Instruction>(Val: SVI->getOperand(i_nocapture: 0))->getOperand(i: 1), DestTy: NewType); |
7325 | Value *Shuffle = Builder.CreateVectorSplat(NumElts: NewVecType->getNumElements(), V: BC1); |
7326 | Value *BC2 = Builder.CreateBitCast(V: Shuffle, DestTy: SVIVecType); |
7327 | |
7328 | replaceAllUsesWith(Old: SVI, New: BC2, FreshBBs, IsHuge: IsHugeFunc); |
7329 | RecursivelyDeleteTriviallyDeadInstructions( |
7330 | V: SVI, TLI: TLInfo, MSSAU: nullptr, |
7331 | AboutToDeleteCallback: [&](Value *V) { removeAllAssertingVHReferences(V); }); |
7332 | |
7333 | // Also hoist the bitcast up to its operand if it they are not in the same |
7334 | // block. |
7335 | if (auto *BCI = dyn_cast<Instruction>(Val: BC1)) |
7336 | if (auto *Op = dyn_cast<Instruction>(Val: BCI->getOperand(i: 0))) |
7337 | if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Val: Op) && |
7338 | !Op->isTerminator() && !Op->isEHPad()) |
7339 | BCI->moveAfter(MovePos: Op); |
7340 | |
7341 | return true; |
7342 | } |
7343 | |
7344 | bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { |
7345 | // If the operands of I can be folded into a target instruction together with |
7346 | // I, duplicate and sink them. |
7347 | SmallVector<Use *, 4> OpsToSink; |
7348 | if (!TLI->shouldSinkOperands(I, Ops&: OpsToSink)) |
7349 | return false; |
7350 | |
7351 | // OpsToSink can contain multiple uses in a use chain (e.g. |
7352 | // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating |
7353 | // uses must come first, so we process the ops in reverse order so as to not |
7354 | // create invalid IR. |
7355 | BasicBlock *TargetBB = I->getParent(); |
7356 | bool Changed = false; |
7357 | SmallVector<Use *, 4> ToReplace; |
7358 | Instruction *InsertPoint = I; |
7359 | DenseMap<const Instruction *, unsigned long> InstOrdering; |
7360 | unsigned long InstNumber = 0; |
7361 | for (const auto &I : *TargetBB) |
7362 | InstOrdering[&I] = InstNumber++; |
7363 | |
7364 | for (Use *U : reverse(C&: OpsToSink)) { |
7365 | auto *UI = cast<Instruction>(Val: U->get()); |
7366 | if (isa<PHINode>(Val: UI)) |
7367 | continue; |
7368 | if (UI->getParent() == TargetBB) { |
7369 | if (InstOrdering[UI] < InstOrdering[InsertPoint]) |
7370 | InsertPoint = UI; |
7371 | continue; |
7372 | } |
7373 | ToReplace.push_back(Elt: U); |
7374 | } |
7375 | |
7376 | SetVector<Instruction *> MaybeDead; |
7377 | DenseMap<Instruction *, Instruction *> NewInstructions; |
7378 | for (Use *U : ToReplace) { |
7379 | auto *UI = cast<Instruction>(Val: U->get()); |
7380 | Instruction *NI = UI->clone(); |
7381 | |
7382 | if (IsHugeFunc) { |
7383 | // Now we clone an instruction, its operands' defs may sink to this BB |
7384 | // now. So we put the operands defs' BBs into FreshBBs to do optimization. |
7385 | for (unsigned I = 0; I < NI->getNumOperands(); ++I) { |
7386 | auto *OpDef = dyn_cast<Instruction>(Val: NI->getOperand(i: I)); |
7387 | if (!OpDef) |
7388 | continue; |
7389 | FreshBBs.insert(Ptr: OpDef->getParent()); |
7390 | } |
7391 | } |
7392 | |
7393 | NewInstructions[UI] = NI; |
7394 | MaybeDead.insert(X: UI); |
7395 | LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n" ); |
7396 | NI->insertBefore(InsertPos: InsertPoint); |
7397 | InsertPoint = NI; |
7398 | InsertedInsts.insert(Ptr: NI); |
7399 | |
7400 | // Update the use for the new instruction, making sure that we update the |
7401 | // sunk instruction uses, if it is part of a chain that has already been |
7402 | // sunk. |
7403 | Instruction *OldI = cast<Instruction>(Val: U->getUser()); |
7404 | if (NewInstructions.count(Val: OldI)) |
7405 | NewInstructions[OldI]->setOperand(i: U->getOperandNo(), Val: NI); |
7406 | else |
7407 | U->set(NI); |
7408 | Changed = true; |
7409 | } |
7410 | |
7411 | // Remove instructions that are dead after sinking. |
7412 | for (auto *I : MaybeDead) { |
7413 | if (!I->hasNUsesOrMore(N: 1)) { |
7414 | LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n" ); |
7415 | I->eraseFromParent(); |
7416 | } |
7417 | } |
7418 | |
7419 | return Changed; |
7420 | } |
7421 | |
7422 | bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) { |
7423 | Value *Cond = SI->getCondition(); |
7424 | Type *OldType = Cond->getType(); |
7425 | LLVMContext &Context = Cond->getContext(); |
7426 | EVT OldVT = TLI->getValueType(DL: *DL, Ty: OldType); |
7427 | MVT RegType = TLI->getPreferredSwitchConditionType(Context, ConditionVT: OldVT); |
7428 | unsigned RegWidth = RegType.getSizeInBits(); |
7429 | |
7430 | if (RegWidth <= cast<IntegerType>(Val: OldType)->getBitWidth()) |
7431 | return false; |
7432 | |
7433 | // If the register width is greater than the type width, expand the condition |
7434 | // of the switch instruction and each case constant to the width of the |
7435 | // register. By widening the type of the switch condition, subsequent |
7436 | // comparisons (for case comparisons) will not need to be extended to the |
7437 | // preferred register width, so we will potentially eliminate N-1 extends, |
7438 | // where N is the number of cases in the switch. |
7439 | auto *NewType = Type::getIntNTy(C&: Context, N: RegWidth); |
7440 | |
7441 | // Extend the switch condition and case constants using the target preferred |
7442 | // extend unless the switch condition is a function argument with an extend |
7443 | // attribute. In that case, we can avoid an unnecessary mask/extension by |
7444 | // matching the argument extension instead. |
7445 | Instruction::CastOps ExtType = Instruction::ZExt; |
7446 | // Some targets prefer SExt over ZExt. |
7447 | if (TLI->isSExtCheaperThanZExt(FromTy: OldVT, ToTy: RegType)) |
7448 | ExtType = Instruction::SExt; |
7449 | |
7450 | if (auto *Arg = dyn_cast<Argument>(Val: Cond)) { |
7451 | if (Arg->hasSExtAttr()) |
7452 | ExtType = Instruction::SExt; |
7453 | if (Arg->hasZExtAttr()) |
7454 | ExtType = Instruction::ZExt; |
7455 | } |
7456 | |
7457 | auto *ExtInst = CastInst::Create(ExtType, S: Cond, Ty: NewType); |
7458 | ExtInst->insertBefore(InsertPos: SI); |
7459 | ExtInst->setDebugLoc(SI->getDebugLoc()); |
7460 | SI->setCondition(ExtInst); |
7461 | for (auto Case : SI->cases()) { |
7462 | const APInt &NarrowConst = Case.getCaseValue()->getValue(); |
7463 | APInt WideConst = (ExtType == Instruction::ZExt) |
7464 | ? NarrowConst.zext(width: RegWidth) |
7465 | : NarrowConst.sext(width: RegWidth); |
7466 | Case.setValue(ConstantInt::get(Context, V: WideConst)); |
7467 | } |
7468 | |
7469 | return true; |
7470 | } |
7471 | |
7472 | bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) { |
7473 | // The SCCP optimization tends to produce code like this: |
7474 | // switch(x) { case 42: phi(42, ...) } |
7475 | // Materializing the constant for the phi-argument needs instructions; So we |
7476 | // change the code to: |
7477 | // switch(x) { case 42: phi(x, ...) } |
7478 | |
7479 | Value *Condition = SI->getCondition(); |
7480 | // Avoid endless loop in degenerate case. |
7481 | if (isa<ConstantInt>(Val: *Condition)) |
7482 | return false; |
7483 | |
7484 | bool Changed = false; |
7485 | BasicBlock *SwitchBB = SI->getParent(); |
7486 | Type *ConditionType = Condition->getType(); |
7487 | |
7488 | for (const SwitchInst::CaseHandle &Case : SI->cases()) { |
7489 | ConstantInt *CaseValue = Case.getCaseValue(); |
7490 | BasicBlock *CaseBB = Case.getCaseSuccessor(); |
7491 | // Set to true if we previously checked that `CaseBB` is only reached by |
7492 | // a single case from this switch. |
7493 | bool CheckedForSinglePred = false; |
7494 | for (PHINode &PHI : CaseBB->phis()) { |
7495 | Type *PHIType = PHI.getType(); |
7496 | // If ZExt is free then we can also catch patterns like this: |
7497 | // switch((i32)x) { case 42: phi((i64)42, ...); } |
7498 | // and replace `(i64)42` with `zext i32 %x to i64`. |
7499 | bool TryZExt = |
7500 | PHIType->isIntegerTy() && |
7501 | PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() && |
7502 | TLI->isZExtFree(FromTy: ConditionType, ToTy: PHIType); |
7503 | if (PHIType == ConditionType || TryZExt) { |
7504 | // Set to true to skip this case because of multiple preds. |
7505 | bool SkipCase = false; |
7506 | Value *Replacement = nullptr; |
7507 | for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) { |
7508 | Value *PHIValue = PHI.getIncomingValue(i: I); |
7509 | if (PHIValue != CaseValue) { |
7510 | if (!TryZExt) |
7511 | continue; |
7512 | ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(Val: PHIValue); |
7513 | if (!PHIValueInt || |
7514 | PHIValueInt->getValue() != |
7515 | CaseValue->getValue().zext(width: PHIType->getIntegerBitWidth())) |
7516 | continue; |
7517 | } |
7518 | if (PHI.getIncomingBlock(i: I) != SwitchBB) |
7519 | continue; |
7520 | // We cannot optimize if there are multiple case labels jumping to |
7521 | // this block. This check may get expensive when there are many |
7522 | // case labels so we test for it last. |
7523 | if (!CheckedForSinglePred) { |
7524 | CheckedForSinglePred = true; |
7525 | if (SI->findCaseDest(BB: CaseBB) == nullptr) { |
7526 | SkipCase = true; |
7527 | break; |
7528 | } |
7529 | } |
7530 | |
7531 | if (Replacement == nullptr) { |
7532 | if (PHIValue == CaseValue) { |
7533 | Replacement = Condition; |
7534 | } else { |
7535 | IRBuilder<> Builder(SI); |
7536 | Replacement = Builder.CreateZExt(V: Condition, DestTy: PHIType); |
7537 | } |
7538 | } |
7539 | PHI.setIncomingValue(i: I, V: Replacement); |
7540 | Changed = true; |
7541 | } |
7542 | if (SkipCase) |
7543 | break; |
7544 | } |
7545 | } |
7546 | } |
7547 | return Changed; |
7548 | } |
7549 | |
7550 | bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { |
7551 | bool Changed = optimizeSwitchType(SI); |
7552 | Changed |= optimizeSwitchPhiConstants(SI); |
7553 | return Changed; |
7554 | } |
7555 | |
7556 | namespace { |
7557 | |
7558 | /// Helper class to promote a scalar operation to a vector one. |
7559 | /// This class is used to move downward extractelement transition. |
7560 | /// E.g., |
7561 | /// a = vector_op <2 x i32> |
7562 | /// b = extractelement <2 x i32> a, i32 0 |
7563 | /// c = scalar_op b |
7564 | /// store c |
7565 | /// |
7566 | /// => |
7567 | /// a = vector_op <2 x i32> |
7568 | /// c = vector_op a (equivalent to scalar_op on the related lane) |
7569 | /// * d = extractelement <2 x i32> c, i32 0 |
7570 | /// * store d |
7571 | /// Assuming both extractelement and store can be combine, we get rid of the |
7572 | /// transition. |
7573 | class VectorPromoteHelper { |
7574 | /// DataLayout associated with the current module. |
7575 | const DataLayout &DL; |
7576 | |
7577 | /// Used to perform some checks on the legality of vector operations. |
7578 | const TargetLowering &TLI; |
7579 | |
7580 | /// Used to estimated the cost of the promoted chain. |
7581 | const TargetTransformInfo &TTI; |
7582 | |
7583 | /// The transition being moved downwards. |
7584 | Instruction *Transition; |
7585 | |
7586 | /// The sequence of instructions to be promoted. |
7587 | SmallVector<Instruction *, 4> InstsToBePromoted; |
7588 | |
7589 | /// Cost of combining a store and an extract. |
7590 | unsigned ; |
7591 | |
7592 | /// Instruction that will be combined with the transition. |
7593 | Instruction *CombineInst = nullptr; |
7594 | |
7595 | /// The instruction that represents the current end of the transition. |
7596 | /// Since we are faking the promotion until we reach the end of the chain |
7597 | /// of computation, we need a way to get the current end of the transition. |
7598 | Instruction *getEndOfTransition() const { |
7599 | if (InstsToBePromoted.empty()) |
7600 | return Transition; |
7601 | return InstsToBePromoted.back(); |
7602 | } |
7603 | |
7604 | /// Return the index of the original value in the transition. |
7605 | /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, |
7606 | /// c, is at index 0. |
7607 | unsigned getTransitionOriginalValueIdx() const { |
7608 | assert(isa<ExtractElementInst>(Transition) && |
7609 | "Other kind of transitions are not supported yet" ); |
7610 | return 0; |
7611 | } |
7612 | |
7613 | /// Return the index of the index in the transition. |
7614 | /// E.g., for "extractelement <2 x i32> c, i32 0" the index |
7615 | /// is at index 1. |
7616 | unsigned getTransitionIdx() const { |
7617 | assert(isa<ExtractElementInst>(Transition) && |
7618 | "Other kind of transitions are not supported yet" ); |
7619 | return 1; |
7620 | } |
7621 | |
7622 | /// Get the type of the transition. |
7623 | /// This is the type of the original value. |
7624 | /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the |
7625 | /// transition is <2 x i32>. |
7626 | Type *getTransitionType() const { |
7627 | return Transition->getOperand(i: getTransitionOriginalValueIdx())->getType(); |
7628 | } |
7629 | |
7630 | /// Promote \p ToBePromoted by moving \p Def downward through. |
7631 | /// I.e., we have the following sequence: |
7632 | /// Def = Transition <ty1> a to <ty2> |
7633 | /// b = ToBePromoted <ty2> Def, ... |
7634 | /// => |
7635 | /// b = ToBePromoted <ty1> a, ... |
7636 | /// Def = Transition <ty1> ToBePromoted to <ty2> |
7637 | void promoteImpl(Instruction *ToBePromoted); |
7638 | |
7639 | /// Check whether or not it is profitable to promote all the |
7640 | /// instructions enqueued to be promoted. |
7641 | bool isProfitableToPromote() { |
7642 | Value *ValIdx = Transition->getOperand(i: getTransitionOriginalValueIdx()); |
7643 | unsigned Index = isa<ConstantInt>(Val: ValIdx) |
7644 | ? cast<ConstantInt>(Val: ValIdx)->getZExtValue() |
7645 | : -1; |
7646 | Type *PromotedType = getTransitionType(); |
7647 | |
7648 | StoreInst *ST = cast<StoreInst>(Val: CombineInst); |
7649 | unsigned AS = ST->getPointerAddressSpace(); |
7650 | // Check if this store is supported. |
7651 | if (!TLI.allowsMisalignedMemoryAccesses( |
7652 | TLI.getValueType(DL, Ty: ST->getValueOperand()->getType()), AddrSpace: AS, |
7653 | Alignment: ST->getAlign())) { |
7654 | // If this is not supported, there is no way we can combine |
7655 | // the extract with the store. |
7656 | return false; |
7657 | } |
7658 | |
7659 | // The scalar chain of computation has to pay for the transition |
7660 | // scalar to vector. |
7661 | // The vector chain has to account for the combining cost. |
7662 | enum TargetTransformInfo::TargetCostKind CostKind = |
7663 | TargetTransformInfo::TCK_RecipThroughput; |
7664 | InstructionCost ScalarCost = |
7665 | TTI.getVectorInstrCost(I: *Transition, Val: PromotedType, CostKind, Index); |
7666 | InstructionCost VectorCost = StoreExtractCombineCost; |
7667 | for (const auto &Inst : InstsToBePromoted) { |
7668 | // Compute the cost. |
7669 | // By construction, all instructions being promoted are arithmetic ones. |
7670 | // Moreover, one argument is a constant that can be viewed as a splat |
7671 | // constant. |
7672 | Value *Arg0 = Inst->getOperand(i: 0); |
7673 | bool IsArg0Constant = isa<UndefValue>(Val: Arg0) || isa<ConstantInt>(Val: Arg0) || |
7674 | isa<ConstantFP>(Val: Arg0); |
7675 | TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info; |
7676 | if (IsArg0Constant) |
7677 | Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue; |
7678 | else |
7679 | Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue; |
7680 | |
7681 | ScalarCost += TTI.getArithmeticInstrCost( |
7682 | Opcode: Inst->getOpcode(), Ty: Inst->getType(), CostKind, Opd1Info: Arg0Info, Opd2Info: Arg1Info); |
7683 | VectorCost += TTI.getArithmeticInstrCost(Opcode: Inst->getOpcode(), Ty: PromotedType, |
7684 | CostKind, Opd1Info: Arg0Info, Opd2Info: Arg1Info); |
7685 | } |
7686 | LLVM_DEBUG( |
7687 | dbgs() << "Estimated cost of computation to be promoted:\nScalar: " |
7688 | << ScalarCost << "\nVector: " << VectorCost << '\n'); |
7689 | return ScalarCost > VectorCost; |
7690 | } |
7691 | |
7692 | /// Generate a constant vector with \p Val with the same |
7693 | /// number of elements as the transition. |
7694 | /// \p UseSplat defines whether or not \p Val should be replicated |
7695 | /// across the whole vector. |
7696 | /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, |
7697 | /// otherwise we generate a vector with as many undef as possible: |
7698 | /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only |
7699 | /// used at the index of the extract. |
7700 | Value *getConstantVector(Constant *Val, bool UseSplat) const { |
7701 | unsigned = std::numeric_limits<unsigned>::max(); |
7702 | if (!UseSplat) { |
7703 | // If we cannot determine where the constant must be, we have to |
7704 | // use a splat constant. |
7705 | Value * = Transition->getOperand(i: getTransitionIdx()); |
7706 | if (ConstantInt *CstVal = dyn_cast<ConstantInt>(Val: ValExtractIdx)) |
7707 | ExtractIdx = CstVal->getSExtValue(); |
7708 | else |
7709 | UseSplat = true; |
7710 | } |
7711 | |
7712 | ElementCount EC = cast<VectorType>(Val: getTransitionType())->getElementCount(); |
7713 | if (UseSplat) |
7714 | return ConstantVector::getSplat(EC, Elt: Val); |
7715 | |
7716 | if (!EC.isScalable()) { |
7717 | SmallVector<Constant *, 4> ConstVec; |
7718 | UndefValue *UndefVal = UndefValue::get(T: Val->getType()); |
7719 | for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) { |
7720 | if (Idx == ExtractIdx) |
7721 | ConstVec.push_back(Elt: Val); |
7722 | else |
7723 | ConstVec.push_back(Elt: UndefVal); |
7724 | } |
7725 | return ConstantVector::get(V: ConstVec); |
7726 | } else |
7727 | llvm_unreachable( |
7728 | "Generate scalable vector for non-splat is unimplemented" ); |
7729 | } |
7730 | |
7731 | /// Check if promoting to a vector type an operand at \p OperandIdx |
7732 | /// in \p Use can trigger undefined behavior. |
7733 | static bool canCauseUndefinedBehavior(const Instruction *Use, |
7734 | unsigned OperandIdx) { |
7735 | // This is not safe to introduce undef when the operand is on |
7736 | // the right hand side of a division-like instruction. |
7737 | if (OperandIdx != 1) |
7738 | return false; |
7739 | switch (Use->getOpcode()) { |
7740 | default: |
7741 | return false; |
7742 | case Instruction::SDiv: |
7743 | case Instruction::UDiv: |
7744 | case Instruction::SRem: |
7745 | case Instruction::URem: |
7746 | return true; |
7747 | case Instruction::FDiv: |
7748 | case Instruction::FRem: |
7749 | return !Use->hasNoNaNs(); |
7750 | } |
7751 | llvm_unreachable(nullptr); |
7752 | } |
7753 | |
7754 | public: |
7755 | VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, |
7756 | const TargetTransformInfo &TTI, Instruction *Transition, |
7757 | unsigned CombineCost) |
7758 | : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), |
7759 | StoreExtractCombineCost(CombineCost) { |
7760 | assert(Transition && "Do not know how to promote null" ); |
7761 | } |
7762 | |
7763 | /// Check if we can promote \p ToBePromoted to \p Type. |
7764 | bool canPromote(const Instruction *ToBePromoted) const { |
7765 | // We could support CastInst too. |
7766 | return isa<BinaryOperator>(Val: ToBePromoted); |
7767 | } |
7768 | |
7769 | /// Check if it is profitable to promote \p ToBePromoted |
7770 | /// by moving downward the transition through. |
7771 | bool shouldPromote(const Instruction *ToBePromoted) const { |
7772 | // Promote only if all the operands can be statically expanded. |
7773 | // Indeed, we do not want to introduce any new kind of transitions. |
7774 | for (const Use &U : ToBePromoted->operands()) { |
7775 | const Value *Val = U.get(); |
7776 | if (Val == getEndOfTransition()) { |
7777 | // If the use is a division and the transition is on the rhs, |
7778 | // we cannot promote the operation, otherwise we may create a |
7779 | // division by zero. |
7780 | if (canCauseUndefinedBehavior(Use: ToBePromoted, OperandIdx: U.getOperandNo())) |
7781 | return false; |
7782 | continue; |
7783 | } |
7784 | if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && |
7785 | !isa<ConstantFP>(Val)) |
7786 | return false; |
7787 | } |
7788 | // Check that the resulting operation is legal. |
7789 | int ISDOpcode = TLI.InstructionOpcodeToISD(Opcode: ToBePromoted->getOpcode()); |
7790 | if (!ISDOpcode) |
7791 | return false; |
7792 | return StressStoreExtract || |
7793 | TLI.isOperationLegalOrCustom( |
7794 | Op: ISDOpcode, VT: TLI.getValueType(DL, Ty: getTransitionType(), AllowUnknown: true)); |
7795 | } |
7796 | |
7797 | /// Check whether or not \p Use can be combined |
7798 | /// with the transition. |
7799 | /// I.e., is it possible to do Use(Transition) => AnotherUse? |
7800 | bool canCombine(const Instruction *Use) { return isa<StoreInst>(Val: Use); } |
7801 | |
7802 | /// Record \p ToBePromoted as part of the chain to be promoted. |
7803 | void enqueueForPromotion(Instruction *ToBePromoted) { |
7804 | InstsToBePromoted.push_back(Elt: ToBePromoted); |
7805 | } |
7806 | |
7807 | /// Set the instruction that will be combined with the transition. |
7808 | void recordCombineInstruction(Instruction *ToBeCombined) { |
7809 | assert(canCombine(ToBeCombined) && "Unsupported instruction to combine" ); |
7810 | CombineInst = ToBeCombined; |
7811 | } |
7812 | |
7813 | /// Promote all the instructions enqueued for promotion if it is |
7814 | /// is profitable. |
7815 | /// \return True if the promotion happened, false otherwise. |
7816 | bool promote() { |
7817 | // Check if there is something to promote. |
7818 | // Right now, if we do not have anything to combine with, |
7819 | // we assume the promotion is not profitable. |
7820 | if (InstsToBePromoted.empty() || !CombineInst) |
7821 | return false; |
7822 | |
7823 | // Check cost. |
7824 | if (!StressStoreExtract && !isProfitableToPromote()) |
7825 | return false; |
7826 | |
7827 | // Promote. |
7828 | for (auto &ToBePromoted : InstsToBePromoted) |
7829 | promoteImpl(ToBePromoted); |
7830 | InstsToBePromoted.clear(); |
7831 | return true; |
7832 | } |
7833 | }; |
7834 | |
7835 | } // end anonymous namespace |
7836 | |
7837 | void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { |
7838 | // At this point, we know that all the operands of ToBePromoted but Def |
7839 | // can be statically promoted. |
7840 | // For Def, we need to use its parameter in ToBePromoted: |
7841 | // b = ToBePromoted ty1 a |
7842 | // Def = Transition ty1 b to ty2 |
7843 | // Move the transition down. |
7844 | // 1. Replace all uses of the promoted operation by the transition. |
7845 | // = ... b => = ... Def. |
7846 | assert(ToBePromoted->getType() == Transition->getType() && |
7847 | "The type of the result of the transition does not match " |
7848 | "the final type" ); |
7849 | ToBePromoted->replaceAllUsesWith(V: Transition); |
7850 | // 2. Update the type of the uses. |
7851 | // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. |
7852 | Type *TransitionTy = getTransitionType(); |
7853 | ToBePromoted->mutateType(Ty: TransitionTy); |
7854 | // 3. Update all the operands of the promoted operation with promoted |
7855 | // operands. |
7856 | // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. |
7857 | for (Use &U : ToBePromoted->operands()) { |
7858 | Value *Val = U.get(); |
7859 | Value *NewVal = nullptr; |
7860 | if (Val == Transition) |
7861 | NewVal = Transition->getOperand(i: getTransitionOriginalValueIdx()); |
7862 | else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || |
7863 | isa<ConstantFP>(Val)) { |
7864 | // Use a splat constant if it is not safe to use undef. |
7865 | NewVal = getConstantVector( |
7866 | Val: cast<Constant>(Val), |
7867 | UseSplat: isa<UndefValue>(Val) || |
7868 | canCauseUndefinedBehavior(Use: ToBePromoted, OperandIdx: U.getOperandNo())); |
7869 | } else |
7870 | llvm_unreachable("Did you modified shouldPromote and forgot to update " |
7871 | "this?" ); |
7872 | ToBePromoted->setOperand(i: U.getOperandNo(), Val: NewVal); |
7873 | } |
7874 | Transition->moveAfter(MovePos: ToBePromoted); |
7875 | Transition->setOperand(i: getTransitionOriginalValueIdx(), Val: ToBePromoted); |
7876 | } |
7877 | |
7878 | /// Some targets can do store(extractelement) with one instruction. |
7879 | /// Try to push the extractelement towards the stores when the target |
7880 | /// has this feature and this is profitable. |
7881 | bool CodeGenPrepare::(Instruction *Inst) { |
7882 | unsigned CombineCost = std::numeric_limits<unsigned>::max(); |
7883 | if (DisableStoreExtract || |
7884 | (!StressStoreExtract && |
7885 | !TLI->canCombineStoreAndExtract(VectorTy: Inst->getOperand(i: 0)->getType(), |
7886 | Idx: Inst->getOperand(i: 1), Cost&: CombineCost))) |
7887 | return false; |
7888 | |
7889 | // At this point we know that Inst is a vector to scalar transition. |
7890 | // Try to move it down the def-use chain, until: |
7891 | // - We can combine the transition with its single use |
7892 | // => we got rid of the transition. |
7893 | // - We escape the current basic block |
7894 | // => we would need to check that we are moving it at a cheaper place and |
7895 | // we do not do that for now. |
7896 | BasicBlock *Parent = Inst->getParent(); |
7897 | LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); |
7898 | VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); |
7899 | // If the transition has more than one use, assume this is not going to be |
7900 | // beneficial. |
7901 | while (Inst->hasOneUse()) { |
7902 | Instruction *ToBePromoted = cast<Instruction>(Val: *Inst->user_begin()); |
7903 | LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); |
7904 | |
7905 | if (ToBePromoted->getParent() != Parent) { |
7906 | LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block (" |
7907 | << ToBePromoted->getParent()->getName() |
7908 | << ") than the transition (" << Parent->getName() |
7909 | << ").\n" ); |
7910 | return false; |
7911 | } |
7912 | |
7913 | if (VPH.canCombine(Use: ToBePromoted)) { |
7914 | LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n' |
7915 | << "will be combined with: " << *ToBePromoted << '\n'); |
7916 | VPH.recordCombineInstruction(ToBeCombined: ToBePromoted); |
7917 | bool Changed = VPH.promote(); |
7918 | NumStoreExtractExposed += Changed; |
7919 | return Changed; |
7920 | } |
7921 | |
7922 | LLVM_DEBUG(dbgs() << "Try promoting.\n" ); |
7923 | if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) |
7924 | return false; |
7925 | |
7926 | LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n" ); |
7927 | |
7928 | VPH.enqueueForPromotion(ToBePromoted); |
7929 | Inst = ToBePromoted; |
7930 | } |
7931 | return false; |
7932 | } |
7933 | |
7934 | /// For the instruction sequence of store below, F and I values |
7935 | /// are bundled together as an i64 value before being stored into memory. |
7936 | /// Sometimes it is more efficient to generate separate stores for F and I, |
7937 | /// which can remove the bitwise instructions or sink them to colder places. |
7938 | /// |
7939 | /// (store (or (zext (bitcast F to i32) to i64), |
7940 | /// (shl (zext I to i64), 32)), addr) --> |
7941 | /// (store F, addr) and (store I, addr+4) |
7942 | /// |
7943 | /// Similarly, splitting for other merged store can also be beneficial, like: |
7944 | /// For pair of {i32, i32}, i64 store --> two i32 stores. |
7945 | /// For pair of {i32, i16}, i64 store --> two i32 stores. |
7946 | /// For pair of {i16, i16}, i32 store --> two i16 stores. |
7947 | /// For pair of {i16, i8}, i32 store --> two i16 stores. |
7948 | /// For pair of {i8, i8}, i16 store --> two i8 stores. |
7949 | /// |
7950 | /// We allow each target to determine specifically which kind of splitting is |
7951 | /// supported. |
7952 | /// |
7953 | /// The store patterns are commonly seen from the simple code snippet below |
7954 | /// if only std::make_pair(...) is sroa transformed before inlined into hoo. |
7955 | /// void goo(const std::pair<int, float> &); |
7956 | /// hoo() { |
7957 | /// ... |
7958 | /// goo(std::make_pair(tmp, ftmp)); |
7959 | /// ... |
7960 | /// } |
7961 | /// |
7962 | /// Although we already have similar splitting in DAG Combine, we duplicate |
7963 | /// it in CodeGenPrepare to catch the case in which pattern is across |
7964 | /// multiple BBs. The logic in DAG Combine is kept to catch case generated |
7965 | /// during code expansion. |
7966 | static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, |
7967 | const TargetLowering &TLI) { |
7968 | // Handle simple but common cases only. |
7969 | Type *StoreType = SI.getValueOperand()->getType(); |
7970 | |
7971 | // The code below assumes shifting a value by <number of bits>, |
7972 | // whereas scalable vectors would have to be shifted by |
7973 | // <2log(vscale) + number of bits> in order to store the |
7974 | // low/high parts. Bailing out for now. |
7975 | if (StoreType->isScalableTy()) |
7976 | return false; |
7977 | |
7978 | if (!DL.typeSizeEqualsStoreSize(Ty: StoreType) || |
7979 | DL.getTypeSizeInBits(Ty: StoreType) == 0) |
7980 | return false; |
7981 | |
7982 | unsigned HalfValBitSize = DL.getTypeSizeInBits(Ty: StoreType) / 2; |
7983 | Type *SplitStoreType = Type::getIntNTy(C&: SI.getContext(), N: HalfValBitSize); |
7984 | if (!DL.typeSizeEqualsStoreSize(Ty: SplitStoreType)) |
7985 | return false; |
7986 | |
7987 | // Don't split the store if it is volatile. |
7988 | if (SI.isVolatile()) |
7989 | return false; |
7990 | |
7991 | // Match the following patterns: |
7992 | // (store (or (zext LValue to i64), |
7993 | // (shl (zext HValue to i64), 32)), HalfValBitSize) |
7994 | // or |
7995 | // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) |
7996 | // (zext LValue to i64), |
7997 | // Expect both operands of OR and the first operand of SHL have only |
7998 | // one use. |
7999 | Value *LValue, *HValue; |
8000 | if (!match(V: SI.getValueOperand(), |
8001 | P: m_c_Or(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: LValue))), |
8002 | R: m_OneUse(SubPattern: m_Shl(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: HValue))), |
8003 | R: m_SpecificInt(V: HalfValBitSize)))))) |
8004 | return false; |
8005 | |
8006 | // Check LValue and HValue are int with size less or equal than 32. |
8007 | if (!LValue->getType()->isIntegerTy() || |
8008 | DL.getTypeSizeInBits(Ty: LValue->getType()) > HalfValBitSize || |
8009 | !HValue->getType()->isIntegerTy() || |
8010 | DL.getTypeSizeInBits(Ty: HValue->getType()) > HalfValBitSize) |
8011 | return false; |
8012 | |
8013 | // If LValue/HValue is a bitcast instruction, use the EVT before bitcast |
8014 | // as the input of target query. |
8015 | auto *LBC = dyn_cast<BitCastInst>(Val: LValue); |
8016 | auto *HBC = dyn_cast<BitCastInst>(Val: HValue); |
8017 | EVT LowTy = LBC ? EVT::getEVT(Ty: LBC->getOperand(i_nocapture: 0)->getType()) |
8018 | : EVT::getEVT(Ty: LValue->getType()); |
8019 | EVT HighTy = HBC ? EVT::getEVT(Ty: HBC->getOperand(i_nocapture: 0)->getType()) |
8020 | : EVT::getEVT(Ty: HValue->getType()); |
8021 | if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LTy: LowTy, HTy: HighTy)) |
8022 | return false; |
8023 | |
8024 | // Start to split store. |
8025 | IRBuilder<> Builder(SI.getContext()); |
8026 | Builder.SetInsertPoint(&SI); |
8027 | |
8028 | // If LValue/HValue is a bitcast in another BB, create a new one in current |
8029 | // BB so it may be merged with the splitted stores by dag combiner. |
8030 | if (LBC && LBC->getParent() != SI.getParent()) |
8031 | LValue = Builder.CreateBitCast(V: LBC->getOperand(i_nocapture: 0), DestTy: LBC->getType()); |
8032 | if (HBC && HBC->getParent() != SI.getParent()) |
8033 | HValue = Builder.CreateBitCast(V: HBC->getOperand(i_nocapture: 0), DestTy: HBC->getType()); |
8034 | |
8035 | bool IsLE = SI.getDataLayout().isLittleEndian(); |
8036 | auto CreateSplitStore = [&](Value *V, bool Upper) { |
8037 | V = Builder.CreateZExtOrBitCast(V, DestTy: SplitStoreType); |
8038 | Value *Addr = SI.getPointerOperand(); |
8039 | Align Alignment = SI.getAlign(); |
8040 | const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper); |
8041 | if (IsOffsetStore) { |
8042 | Addr = Builder.CreateGEP( |
8043 | Ty: SplitStoreType, Ptr: Addr, |
8044 | IdxList: ConstantInt::get(Ty: Type::getInt32Ty(C&: SI.getContext()), V: 1)); |
8045 | |
8046 | // When splitting the store in half, naturally one half will retain the |
8047 | // alignment of the original wider store, regardless of whether it was |
8048 | // over-aligned or not, while the other will require adjustment. |
8049 | Alignment = commonAlignment(A: Alignment, Offset: HalfValBitSize / 8); |
8050 | } |
8051 | Builder.CreateAlignedStore(Val: V, Ptr: Addr, Align: Alignment); |
8052 | }; |
8053 | |
8054 | CreateSplitStore(LValue, false); |
8055 | CreateSplitStore(HValue, true); |
8056 | |
8057 | // Delete the old store. |
8058 | SI.eraseFromParent(); |
8059 | return true; |
8060 | } |
8061 | |
8062 | // Return true if the GEP has two operands, the first operand is of a sequential |
8063 | // type, and the second operand is a constant. |
8064 | static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { |
8065 | gep_type_iterator I = gep_type_begin(GEP: *GEP); |
8066 | return GEP->getNumOperands() == 2 && I.isSequential() && |
8067 | isa<ConstantInt>(Val: GEP->getOperand(i_nocapture: 1)); |
8068 | } |
8069 | |
8070 | // Try unmerging GEPs to reduce liveness interference (register pressure) across |
8071 | // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, |
8072 | // reducing liveness interference across those edges benefits global register |
8073 | // allocation. Currently handles only certain cases. |
8074 | // |
8075 | // For example, unmerge %GEPI and %UGEPI as below. |
8076 | // |
8077 | // ---------- BEFORE ---------- |
8078 | // SrcBlock: |
8079 | // ... |
8080 | // %GEPIOp = ... |
8081 | // ... |
8082 | // %GEPI = gep %GEPIOp, Idx |
8083 | // ... |
8084 | // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] |
8085 | // (* %GEPI is alive on the indirectbr edges due to other uses ahead) |
8086 | // (* %GEPIOp is alive on the indirectbr edges only because of it's used by |
8087 | // %UGEPI) |
8088 | // |
8089 | // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) |
8090 | // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) |
8091 | // ... |
8092 | // |
8093 | // DstBi: |
8094 | // ... |
8095 | // %UGEPI = gep %GEPIOp, UIdx |
8096 | // ... |
8097 | // --------------------------- |
8098 | // |
8099 | // ---------- AFTER ---------- |
8100 | // SrcBlock: |
8101 | // ... (same as above) |
8102 | // (* %GEPI is still alive on the indirectbr edges) |
8103 | // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the |
8104 | // unmerging) |
8105 | // ... |
8106 | // |
8107 | // DstBi: |
8108 | // ... |
8109 | // %UGEPI = gep %GEPI, (UIdx-Idx) |
8110 | // ... |
8111 | // --------------------------- |
8112 | // |
8113 | // The register pressure on the IndirectBr edges is reduced because %GEPIOp is |
8114 | // no longer alive on them. |
8115 | // |
8116 | // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging |
8117 | // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as |
8118 | // not to disable further simplications and optimizations as a result of GEP |
8119 | // merging. |
8120 | // |
8121 | // Note this unmerging may increase the length of the data flow critical path |
8122 | // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff |
8123 | // between the register pressure and the length of data-flow critical |
8124 | // path. Restricting this to the uncommon IndirectBr case would minimize the |
8125 | // impact of potentially longer critical path, if any, and the impact on compile |
8126 | // time. |
8127 | static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, |
8128 | const TargetTransformInfo *TTI) { |
8129 | BasicBlock *SrcBlock = GEPI->getParent(); |
8130 | // Check that SrcBlock ends with an IndirectBr. If not, give up. The common |
8131 | // (non-IndirectBr) cases exit early here. |
8132 | if (!isa<IndirectBrInst>(Val: SrcBlock->getTerminator())) |
8133 | return false; |
8134 | // Check that GEPI is a simple gep with a single constant index. |
8135 | if (!GEPSequentialConstIndexed(GEP: GEPI)) |
8136 | return false; |
8137 | ConstantInt *GEPIIdx = cast<ConstantInt>(Val: GEPI->getOperand(i_nocapture: 1)); |
8138 | // Check that GEPI is a cheap one. |
8139 | if (TTI->getIntImmCost(Imm: GEPIIdx->getValue(), Ty: GEPIIdx->getType(), |
8140 | CostKind: TargetTransformInfo::TCK_SizeAndLatency) > |
8141 | TargetTransformInfo::TCC_Basic) |
8142 | return false; |
8143 | Value *GEPIOp = GEPI->getOperand(i_nocapture: 0); |
8144 | // Check that GEPIOp is an instruction that's also defined in SrcBlock. |
8145 | if (!isa<Instruction>(Val: GEPIOp)) |
8146 | return false; |
8147 | auto *GEPIOpI = cast<Instruction>(Val: GEPIOp); |
8148 | if (GEPIOpI->getParent() != SrcBlock) |
8149 | return false; |
8150 | // Check that GEP is used outside the block, meaning it's alive on the |
8151 | // IndirectBr edge(s). |
8152 | if (llvm::none_of(Range: GEPI->users(), P: [&](User *Usr) { |
8153 | if (auto *I = dyn_cast<Instruction>(Val: Usr)) { |
8154 | if (I->getParent() != SrcBlock) { |
8155 | return true; |
8156 | } |
8157 | } |
8158 | return false; |
8159 | })) |
8160 | return false; |
8161 | // The second elements of the GEP chains to be unmerged. |
8162 | std::vector<GetElementPtrInst *> UGEPIs; |
8163 | // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive |
8164 | // on IndirectBr edges. |
8165 | for (User *Usr : GEPIOp->users()) { |
8166 | if (Usr == GEPI) |
8167 | continue; |
8168 | // Check if Usr is an Instruction. If not, give up. |
8169 | if (!isa<Instruction>(Val: Usr)) |
8170 | return false; |
8171 | auto *UI = cast<Instruction>(Val: Usr); |
8172 | // Check if Usr in the same block as GEPIOp, which is fine, skip. |
8173 | if (UI->getParent() == SrcBlock) |
8174 | continue; |
8175 | // Check if Usr is a GEP. If not, give up. |
8176 | if (!isa<GetElementPtrInst>(Val: Usr)) |
8177 | return false; |
8178 | auto *UGEPI = cast<GetElementPtrInst>(Val: Usr); |
8179 | // Check if UGEPI is a simple gep with a single constant index and GEPIOp is |
8180 | // the pointer operand to it. If so, record it in the vector. If not, give |
8181 | // up. |
8182 | if (!GEPSequentialConstIndexed(GEP: UGEPI)) |
8183 | return false; |
8184 | if (UGEPI->getOperand(i_nocapture: 0) != GEPIOp) |
8185 | return false; |
8186 | if (UGEPI->getSourceElementType() != GEPI->getSourceElementType()) |
8187 | return false; |
8188 | if (GEPIIdx->getType() != |
8189 | cast<ConstantInt>(Val: UGEPI->getOperand(i_nocapture: 1))->getType()) |
8190 | return false; |
8191 | ConstantInt *UGEPIIdx = cast<ConstantInt>(Val: UGEPI->getOperand(i_nocapture: 1)); |
8192 | if (TTI->getIntImmCost(Imm: UGEPIIdx->getValue(), Ty: UGEPIIdx->getType(), |
8193 | CostKind: TargetTransformInfo::TCK_SizeAndLatency) > |
8194 | TargetTransformInfo::TCC_Basic) |
8195 | return false; |
8196 | UGEPIs.push_back(x: UGEPI); |
8197 | } |
8198 | if (UGEPIs.size() == 0) |
8199 | return false; |
8200 | // Check the materializing cost of (Uidx-Idx). |
8201 | for (GetElementPtrInst *UGEPI : UGEPIs) { |
8202 | ConstantInt *UGEPIIdx = cast<ConstantInt>(Val: UGEPI->getOperand(i_nocapture: 1)); |
8203 | APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); |
8204 | InstructionCost ImmCost = TTI->getIntImmCost( |
8205 | Imm: NewIdx, Ty: GEPIIdx->getType(), CostKind: TargetTransformInfo::TCK_SizeAndLatency); |
8206 | if (ImmCost > TargetTransformInfo::TCC_Basic) |
8207 | return false; |
8208 | } |
8209 | // Now unmerge between GEPI and UGEPIs. |
8210 | for (GetElementPtrInst *UGEPI : UGEPIs) { |
8211 | UGEPI->setOperand(i_nocapture: 0, Val_nocapture: GEPI); |
8212 | ConstantInt *UGEPIIdx = cast<ConstantInt>(Val: UGEPI->getOperand(i_nocapture: 1)); |
8213 | Constant *NewUGEPIIdx = ConstantInt::get( |
8214 | Ty: GEPIIdx->getType(), V: UGEPIIdx->getValue() - GEPIIdx->getValue()); |
8215 | UGEPI->setOperand(i_nocapture: 1, Val_nocapture: NewUGEPIIdx); |
8216 | // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not |
8217 | // inbounds to avoid UB. |
8218 | if (!GEPI->isInBounds()) { |
8219 | UGEPI->setIsInBounds(false); |
8220 | } |
8221 | } |
8222 | // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not |
8223 | // alive on IndirectBr edges). |
8224 | assert(llvm::none_of(GEPIOp->users(), |
8225 | [&](User *Usr) { |
8226 | return cast<Instruction>(Usr)->getParent() != SrcBlock; |
8227 | }) && |
8228 | "GEPIOp is used outside SrcBlock" ); |
8229 | return true; |
8230 | } |
8231 | |
8232 | static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI, |
8233 | SmallSet<BasicBlock *, 32> &FreshBBs, |
8234 | bool IsHugeFunc) { |
8235 | // Try and convert |
8236 | // %c = icmp ult %x, 8 |
8237 | // br %c, bla, blb |
8238 | // %tc = lshr %x, 3 |
8239 | // to |
8240 | // %tc = lshr %x, 3 |
8241 | // %c = icmp eq %tc, 0 |
8242 | // br %c, bla, blb |
8243 | // Creating the cmp to zero can be better for the backend, especially if the |
8244 | // lshr produces flags that can be used automatically. |
8245 | if (!TLI.preferZeroCompareBranch() || !Branch->isConditional()) |
8246 | return false; |
8247 | |
8248 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Val: Branch->getCondition()); |
8249 | if (!Cmp || !isa<ConstantInt>(Val: Cmp->getOperand(i_nocapture: 1)) || !Cmp->hasOneUse()) |
8250 | return false; |
8251 | |
8252 | Value *X = Cmp->getOperand(i_nocapture: 0); |
8253 | APInt CmpC = cast<ConstantInt>(Val: Cmp->getOperand(i_nocapture: 1))->getValue(); |
8254 | |
8255 | for (auto *U : X->users()) { |
8256 | Instruction *UI = dyn_cast<Instruction>(Val: U); |
8257 | // A quick dominance check |
8258 | if (!UI || |
8259 | (UI->getParent() != Branch->getParent() && |
8260 | UI->getParent() != Branch->getSuccessor(i: 0) && |
8261 | UI->getParent() != Branch->getSuccessor(i: 1)) || |
8262 | (UI->getParent() != Branch->getParent() && |
8263 | !UI->getParent()->getSinglePredecessor())) |
8264 | continue; |
8265 | |
8266 | if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT && |
8267 | match(V: UI, P: m_Shr(L: m_Specific(V: X), R: m_SpecificInt(V: CmpC.logBase2())))) { |
8268 | IRBuilder<> Builder(Branch); |
8269 | if (UI->getParent() != Branch->getParent()) |
8270 | UI->moveBefore(MovePos: Branch); |
8271 | UI->dropPoisonGeneratingFlags(); |
8272 | Value *NewCmp = Builder.CreateCmp(Pred: ICmpInst::ICMP_EQ, LHS: UI, |
8273 | RHS: ConstantInt::get(Ty: UI->getType(), V: 0)); |
8274 | LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n" ); |
8275 | LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n" ); |
8276 | replaceAllUsesWith(Old: Cmp, New: NewCmp, FreshBBs, IsHuge: IsHugeFunc); |
8277 | return true; |
8278 | } |
8279 | if (Cmp->isEquality() && |
8280 | (match(V: UI, P: m_Add(L: m_Specific(V: X), R: m_SpecificInt(V: -CmpC))) || |
8281 | match(V: UI, P: m_Sub(L: m_Specific(V: X), R: m_SpecificInt(V: CmpC))))) { |
8282 | IRBuilder<> Builder(Branch); |
8283 | if (UI->getParent() != Branch->getParent()) |
8284 | UI->moveBefore(MovePos: Branch); |
8285 | UI->dropPoisonGeneratingFlags(); |
8286 | Value *NewCmp = Builder.CreateCmp(Pred: Cmp->getPredicate(), LHS: UI, |
8287 | RHS: ConstantInt::get(Ty: UI->getType(), V: 0)); |
8288 | LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n" ); |
8289 | LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n" ); |
8290 | replaceAllUsesWith(Old: Cmp, New: NewCmp, FreshBBs, IsHuge: IsHugeFunc); |
8291 | return true; |
8292 | } |
8293 | } |
8294 | return false; |
8295 | } |
8296 | |
8297 | bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) { |
8298 | bool AnyChange = false; |
8299 | AnyChange = fixupDbgVariableRecordsOnInst(I&: *I); |
8300 | |
8301 | // Bail out if we inserted the instruction to prevent optimizations from |
8302 | // stepping on each other's toes. |
8303 | if (InsertedInsts.count(Ptr: I)) |
8304 | return AnyChange; |
8305 | |
8306 | // TODO: Move into the switch on opcode below here. |
8307 | if (PHINode *P = dyn_cast<PHINode>(Val: I)) { |
8308 | // It is possible for very late stage optimizations (such as SimplifyCFG) |
8309 | // to introduce PHI nodes too late to be cleaned up. If we detect such a |
8310 | // trivial PHI, go ahead and zap it here. |
8311 | if (Value *V = simplifyInstruction(I: P, Q: {*DL, TLInfo})) { |
8312 | LargeOffsetGEPMap.erase(Key: P); |
8313 | replaceAllUsesWith(Old: P, New: V, FreshBBs, IsHuge: IsHugeFunc); |
8314 | P->eraseFromParent(); |
8315 | ++NumPHIsElim; |
8316 | return true; |
8317 | } |
8318 | return AnyChange; |
8319 | } |
8320 | |
8321 | if (CastInst *CI = dyn_cast<CastInst>(Val: I)) { |
8322 | // If the source of the cast is a constant, then this should have |
8323 | // already been constant folded. The only reason NOT to constant fold |
8324 | // it is if something (e.g. LSR) was careful to place the constant |
8325 | // evaluation in a block other than then one that uses it (e.g. to hoist |
8326 | // the address of globals out of a loop). If this is the case, we don't |
8327 | // want to forward-subst the cast. |
8328 | if (isa<Constant>(Val: CI->getOperand(i_nocapture: 0))) |
8329 | return AnyChange; |
8330 | |
8331 | if (OptimizeNoopCopyExpression(CI, TLI: *TLI, DL: *DL)) |
8332 | return true; |
8333 | |
8334 | if ((isa<UIToFPInst>(Val: I) || isa<SIToFPInst>(Val: I) || isa<FPToUIInst>(Val: I) || |
8335 | isa<TruncInst>(Val: I)) && |
8336 | TLI->optimizeExtendOrTruncateConversion( |
8337 | I, L: LI->getLoopFor(BB: I->getParent()), TTI: *TTI)) |
8338 | return true; |
8339 | |
8340 | if (isa<ZExtInst>(Val: I) || isa<SExtInst>(Val: I)) { |
8341 | /// Sink a zext or sext into its user blocks if the target type doesn't |
8342 | /// fit in one register |
8343 | if (TLI->getTypeAction(Context&: CI->getContext(), |
8344 | VT: TLI->getValueType(DL: *DL, Ty: CI->getType())) == |
8345 | TargetLowering::TypeExpandInteger) { |
8346 | return SinkCast(CI); |
8347 | } else { |
8348 | if (TLI->optimizeExtendOrTruncateConversion( |
8349 | I, L: LI->getLoopFor(BB: I->getParent()), TTI: *TTI)) |
8350 | return true; |
8351 | |
8352 | bool MadeChange = optimizeExt(Inst&: I); |
8353 | return MadeChange | optimizeExtUses(I); |
8354 | } |
8355 | } |
8356 | return AnyChange; |
8357 | } |
8358 | |
8359 | if (auto *Cmp = dyn_cast<CmpInst>(Val: I)) |
8360 | if (optimizeCmp(Cmp, ModifiedDT)) |
8361 | return true; |
8362 | |
8363 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: I)) { |
8364 | LI->setMetadata(KindID: LLVMContext::MD_invariant_group, Node: nullptr); |
8365 | bool Modified = optimizeLoadExt(Load: LI); |
8366 | unsigned AS = LI->getPointerAddressSpace(); |
8367 | Modified |= optimizeMemoryInst(MemoryInst: I, Addr: I->getOperand(i: 0), AccessTy: LI->getType(), AddrSpace: AS); |
8368 | return Modified; |
8369 | } |
8370 | |
8371 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: I)) { |
8372 | if (splitMergedValStore(SI&: *SI, DL: *DL, TLI: *TLI)) |
8373 | return true; |
8374 | SI->setMetadata(KindID: LLVMContext::MD_invariant_group, Node: nullptr); |
8375 | unsigned AS = SI->getPointerAddressSpace(); |
8376 | return optimizeMemoryInst(MemoryInst: I, Addr: SI->getOperand(i_nocapture: 1), |
8377 | AccessTy: SI->getOperand(i_nocapture: 0)->getType(), AddrSpace: AS); |
8378 | } |
8379 | |
8380 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Val: I)) { |
8381 | unsigned AS = RMW->getPointerAddressSpace(); |
8382 | return optimizeMemoryInst(MemoryInst: I, Addr: RMW->getPointerOperand(), AccessTy: RMW->getType(), AddrSpace: AS); |
8383 | } |
8384 | |
8385 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Val: I)) { |
8386 | unsigned AS = CmpX->getPointerAddressSpace(); |
8387 | return optimizeMemoryInst(MemoryInst: I, Addr: CmpX->getPointerOperand(), |
8388 | AccessTy: CmpX->getCompareOperand()->getType(), AddrSpace: AS); |
8389 | } |
8390 | |
8391 | BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val: I); |
8392 | |
8393 | if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking && |
8394 | sinkAndCmp0Expression(AndI: BinOp, TLI: *TLI, InsertedInsts)) |
8395 | return true; |
8396 | |
8397 | // TODO: Move this into the switch on opcode - it handles shifts already. |
8398 | if (BinOp && (BinOp->getOpcode() == Instruction::AShr || |
8399 | BinOp->getOpcode() == Instruction::LShr)) { |
8400 | ConstantInt *CI = dyn_cast<ConstantInt>(Val: BinOp->getOperand(i_nocapture: 1)); |
8401 | if (CI && TLI->hasExtractBitsInsn()) |
8402 | if (OptimizeExtractBits(ShiftI: BinOp, CI, TLI: *TLI, DL: *DL)) |
8403 | return true; |
8404 | } |
8405 | |
8406 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Val: I)) { |
8407 | if (GEPI->hasAllZeroIndices()) { |
8408 | /// The GEP operand must be a pointer, so must its result -> BitCast |
8409 | Instruction *NC = new BitCastInst(GEPI->getOperand(i_nocapture: 0), GEPI->getType(), |
8410 | GEPI->getName(), GEPI->getIterator()); |
8411 | NC->setDebugLoc(GEPI->getDebugLoc()); |
8412 | replaceAllUsesWith(Old: GEPI, New: NC, FreshBBs, IsHuge: IsHugeFunc); |
8413 | RecursivelyDeleteTriviallyDeadInstructions( |
8414 | V: GEPI, TLI: TLInfo, MSSAU: nullptr, |
8415 | AboutToDeleteCallback: [&](Value *V) { removeAllAssertingVHReferences(V); }); |
8416 | ++NumGEPsElim; |
8417 | optimizeInst(I: NC, ModifiedDT); |
8418 | return true; |
8419 | } |
8420 | if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { |
8421 | return true; |
8422 | } |
8423 | } |
8424 | |
8425 | if (FreezeInst *FI = dyn_cast<FreezeInst>(Val: I)) { |
8426 | // freeze(icmp a, const)) -> icmp (freeze a), const |
8427 | // This helps generate efficient conditional jumps. |
8428 | Instruction *CmpI = nullptr; |
8429 | if (ICmpInst *II = dyn_cast<ICmpInst>(Val: FI->getOperand(i_nocapture: 0))) |
8430 | CmpI = II; |
8431 | else if (FCmpInst *F = dyn_cast<FCmpInst>(Val: FI->getOperand(i_nocapture: 0))) |
8432 | CmpI = F->getFastMathFlags().none() ? F : nullptr; |
8433 | |
8434 | if (CmpI && CmpI->hasOneUse()) { |
8435 | auto Op0 = CmpI->getOperand(i: 0), Op1 = CmpI->getOperand(i: 1); |
8436 | bool Const0 = isa<ConstantInt>(Val: Op0) || isa<ConstantFP>(Val: Op0) || |
8437 | isa<ConstantPointerNull>(Val: Op0); |
8438 | bool Const1 = isa<ConstantInt>(Val: Op1) || isa<ConstantFP>(Val: Op1) || |
8439 | isa<ConstantPointerNull>(Val: Op1); |
8440 | if (Const0 || Const1) { |
8441 | if (!Const0 || !Const1) { |
8442 | auto *F = new FreezeInst(Const0 ? Op1 : Op0, "" , CmpI->getIterator()); |
8443 | F->takeName(V: FI); |
8444 | CmpI->setOperand(i: Const0 ? 1 : 0, Val: F); |
8445 | } |
8446 | replaceAllUsesWith(Old: FI, New: CmpI, FreshBBs, IsHuge: IsHugeFunc); |
8447 | FI->eraseFromParent(); |
8448 | return true; |
8449 | } |
8450 | } |
8451 | return AnyChange; |
8452 | } |
8453 | |
8454 | if (tryToSinkFreeOperands(I)) |
8455 | return true; |
8456 | |
8457 | switch (I->getOpcode()) { |
8458 | case Instruction::Shl: |
8459 | case Instruction::LShr: |
8460 | case Instruction::AShr: |
8461 | return optimizeShiftInst(Shift: cast<BinaryOperator>(Val: I)); |
8462 | case Instruction::Call: |
8463 | return optimizeCallInst(CI: cast<CallInst>(Val: I), ModifiedDT); |
8464 | case Instruction::Select: |
8465 | return optimizeSelectInst(SI: cast<SelectInst>(Val: I)); |
8466 | case Instruction::ShuffleVector: |
8467 | return optimizeShuffleVectorInst(SVI: cast<ShuffleVectorInst>(Val: I)); |
8468 | case Instruction::Switch: |
8469 | return optimizeSwitchInst(SI: cast<SwitchInst>(Val: I)); |
8470 | case Instruction::ExtractElement: |
8471 | return optimizeExtractElementInst(Inst: cast<ExtractElementInst>(Val: I)); |
8472 | case Instruction::Br: |
8473 | return optimizeBranch(Branch: cast<BranchInst>(Val: I), TLI: *TLI, FreshBBs, IsHugeFunc); |
8474 | } |
8475 | |
8476 | return AnyChange; |
8477 | } |
8478 | |
8479 | /// Given an OR instruction, check to see if this is a bitreverse |
8480 | /// idiom. If so, insert the new intrinsic and return true. |
8481 | bool CodeGenPrepare::makeBitReverse(Instruction &I) { |
8482 | if (!I.getType()->isIntegerTy() || |
8483 | !TLI->isOperationLegalOrCustom(Op: ISD::BITREVERSE, |
8484 | VT: TLI->getValueType(DL: *DL, Ty: I.getType(), AllowUnknown: true))) |
8485 | return false; |
8486 | |
8487 | SmallVector<Instruction *, 4> Insts; |
8488 | if (!recognizeBSwapOrBitReverseIdiom(I: &I, MatchBSwaps: false, MatchBitReversals: true, InsertedInsts&: Insts)) |
8489 | return false; |
8490 | Instruction *LastInst = Insts.back(); |
8491 | replaceAllUsesWith(Old: &I, New: LastInst, FreshBBs, IsHuge: IsHugeFunc); |
8492 | RecursivelyDeleteTriviallyDeadInstructions( |
8493 | V: &I, TLI: TLInfo, MSSAU: nullptr, |
8494 | AboutToDeleteCallback: [&](Value *V) { removeAllAssertingVHReferences(V); }); |
8495 | return true; |
8496 | } |
8497 | |
8498 | // In this pass we look for GEP and cast instructions that are used |
8499 | // across basic blocks and rewrite them to improve basic-block-at-a-time |
8500 | // selection. |
8501 | bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) { |
8502 | SunkAddrs.clear(); |
8503 | bool MadeChange = false; |
8504 | |
8505 | do { |
8506 | CurInstIterator = BB.begin(); |
8507 | ModifiedDT = ModifyDT::NotModifyDT; |
8508 | while (CurInstIterator != BB.end()) { |
8509 | MadeChange |= optimizeInst(I: &*CurInstIterator++, ModifiedDT); |
8510 | if (ModifiedDT != ModifyDT::NotModifyDT) { |
8511 | // For huge function we tend to quickly go though the inner optmization |
8512 | // opportunities in the BB. So we go back to the BB head to re-optimize |
8513 | // each instruction instead of go back to the function head. |
8514 | if (IsHugeFunc) { |
8515 | DT.reset(); |
8516 | getDT(F&: *BB.getParent()); |
8517 | break; |
8518 | } else { |
8519 | return true; |
8520 | } |
8521 | } |
8522 | } |
8523 | } while (ModifiedDT == ModifyDT::ModifyInstDT); |
8524 | |
8525 | bool MadeBitReverse = true; |
8526 | while (MadeBitReverse) { |
8527 | MadeBitReverse = false; |
8528 | for (auto &I : reverse(C&: BB)) { |
8529 | if (makeBitReverse(I)) { |
8530 | MadeBitReverse = MadeChange = true; |
8531 | break; |
8532 | } |
8533 | } |
8534 | } |
8535 | MadeChange |= dupRetToEnableTailCallOpts(BB: &BB, ModifiedDT); |
8536 | |
8537 | return MadeChange; |
8538 | } |
8539 | |
8540 | // Some CGP optimizations may move or alter what's computed in a block. Check |
8541 | // whether a dbg.value intrinsic could be pointed at a more appropriate operand. |
8542 | bool CodeGenPrepare::fixupDbgValue(Instruction *I) { |
8543 | assert(isa<DbgValueInst>(I)); |
8544 | DbgValueInst &DVI = *cast<DbgValueInst>(Val: I); |
8545 | |
8546 | // Does this dbg.value refer to a sunk address calculation? |
8547 | bool AnyChange = false; |
8548 | SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(), |
8549 | DVI.location_ops().end()); |
8550 | for (Value *Location : LocationOps) { |
8551 | WeakTrackingVH SunkAddrVH = SunkAddrs[Location]; |
8552 | Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; |
8553 | if (SunkAddr) { |
8554 | // Point dbg.value at locally computed address, which should give the best |
8555 | // opportunity to be accurately lowered. This update may change the type |
8556 | // of pointer being referred to; however this makes no difference to |
8557 | // debugging information, and we can't generate bitcasts that may affect |
8558 | // codegen. |
8559 | DVI.replaceVariableLocationOp(OldValue: Location, NewValue: SunkAddr); |
8560 | AnyChange = true; |
8561 | } |
8562 | } |
8563 | return AnyChange; |
8564 | } |
8565 | |
8566 | bool CodeGenPrepare::fixupDbgVariableRecordsOnInst(Instruction &I) { |
8567 | bool AnyChange = false; |
8568 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) |
8569 | AnyChange |= fixupDbgVariableRecord(I&: DVR); |
8570 | return AnyChange; |
8571 | } |
8572 | |
8573 | // FIXME: should updating debug-info really cause the "changed" flag to fire, |
8574 | // which can cause a function to be reprocessed? |
8575 | bool CodeGenPrepare::fixupDbgVariableRecord(DbgVariableRecord &DVR) { |
8576 | if (DVR.Type != DbgVariableRecord::LocationType::Value && |
8577 | DVR.Type != DbgVariableRecord::LocationType::Assign) |
8578 | return false; |
8579 | |
8580 | // Does this DbgVariableRecord refer to a sunk address calculation? |
8581 | bool AnyChange = false; |
8582 | SmallDenseSet<Value *> LocationOps(DVR.location_ops().begin(), |
8583 | DVR.location_ops().end()); |
8584 | for (Value *Location : LocationOps) { |
8585 | WeakTrackingVH SunkAddrVH = SunkAddrs[Location]; |
8586 | Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; |
8587 | if (SunkAddr) { |
8588 | // Point dbg.value at locally computed address, which should give the best |
8589 | // opportunity to be accurately lowered. This update may change the type |
8590 | // of pointer being referred to; however this makes no difference to |
8591 | // debugging information, and we can't generate bitcasts that may affect |
8592 | // codegen. |
8593 | DVR.replaceVariableLocationOp(OldValue: Location, NewValue: SunkAddr); |
8594 | AnyChange = true; |
8595 | } |
8596 | } |
8597 | return AnyChange; |
8598 | } |
8599 | |
8600 | static void DbgInserterHelper(DbgValueInst *DVI, Instruction *VI) { |
8601 | DVI->removeFromParent(); |
8602 | if (isa<PHINode>(Val: VI)) |
8603 | DVI->insertBefore(InsertPos: &*VI->getParent()->getFirstInsertionPt()); |
8604 | else |
8605 | DVI->insertAfter(InsertPos: VI); |
8606 | } |
8607 | |
8608 | static void DbgInserterHelper(DbgVariableRecord *DVR, Instruction *VI) { |
8609 | DVR->removeFromParent(); |
8610 | BasicBlock *VIBB = VI->getParent(); |
8611 | if (isa<PHINode>(Val: VI)) |
8612 | VIBB->insertDbgRecordBefore(DR: DVR, Here: VIBB->getFirstInsertionPt()); |
8613 | else |
8614 | VIBB->insertDbgRecordAfter(DR: DVR, I: VI); |
8615 | } |
8616 | |
8617 | // A llvm.dbg.value may be using a value before its definition, due to |
8618 | // optimizations in this pass and others. Scan for such dbg.values, and rescue |
8619 | // them by moving the dbg.value to immediately after the value definition. |
8620 | // FIXME: Ideally this should never be necessary, and this has the potential |
8621 | // to re-order dbg.value intrinsics. |
8622 | bool CodeGenPrepare::placeDbgValues(Function &F) { |
8623 | bool MadeChange = false; |
8624 | DominatorTree DT(F); |
8625 | |
8626 | auto DbgProcessor = [&](auto *DbgItem, Instruction *Position) { |
8627 | SmallVector<Instruction *, 4> VIs; |
8628 | for (Value *V : DbgItem->location_ops()) |
8629 | if (Instruction *VI = dyn_cast_or_null<Instruction>(Val: V)) |
8630 | VIs.push_back(Elt: VI); |
8631 | |
8632 | // This item may depend on multiple instructions, complicating any |
8633 | // potential sink. This block takes the defensive approach, opting to |
8634 | // "undef" the item if it has more than one instruction and any of them do |
8635 | // not dominate iem. |
8636 | for (Instruction *VI : VIs) { |
8637 | if (VI->isTerminator()) |
8638 | continue; |
8639 | |
8640 | // If VI is a phi in a block with an EHPad terminator, we can't insert |
8641 | // after it. |
8642 | if (isa<PHINode>(Val: VI) && VI->getParent()->getTerminator()->isEHPad()) |
8643 | continue; |
8644 | |
8645 | // If the defining instruction dominates the dbg.value, we do not need |
8646 | // to move the dbg.value. |
8647 | if (DT.dominates(Def: VI, User: Position)) |
8648 | continue; |
8649 | |
8650 | // If we depend on multiple instructions and any of them doesn't |
8651 | // dominate this DVI, we probably can't salvage it: moving it to |
8652 | // after any of the instructions could cause us to lose the others. |
8653 | if (VIs.size() > 1) { |
8654 | LLVM_DEBUG( |
8655 | dbgs() |
8656 | << "Unable to find valid location for Debug Value, undefing:\n" |
8657 | << *DbgItem); |
8658 | DbgItem->setKillLocation(); |
8659 | break; |
8660 | } |
8661 | |
8662 | LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n" |
8663 | << *DbgItem << ' ' << *VI); |
8664 | DbgInserterHelper(DbgItem, VI); |
8665 | MadeChange = true; |
8666 | ++NumDbgValueMoved; |
8667 | } |
8668 | }; |
8669 | |
8670 | for (BasicBlock &BB : F) { |
8671 | for (Instruction &Insn : llvm::make_early_inc_range(Range&: BB)) { |
8672 | // Process dbg.value intrinsics. |
8673 | DbgValueInst *DVI = dyn_cast<DbgValueInst>(Val: &Insn); |
8674 | if (DVI) { |
8675 | DbgProcessor(DVI, DVI); |
8676 | continue; |
8677 | } |
8678 | |
8679 | // If this isn't a dbg.value, process any attached DbgVariableRecord |
8680 | // records attached to this instruction. |
8681 | for (DbgVariableRecord &DVR : llvm::make_early_inc_range( |
8682 | Range: filterDbgVars(R: Insn.getDbgRecordRange()))) { |
8683 | if (DVR.Type != DbgVariableRecord::LocationType::Value) |
8684 | continue; |
8685 | DbgProcessor(&DVR, &Insn); |
8686 | } |
8687 | } |
8688 | } |
8689 | |
8690 | return MadeChange; |
8691 | } |
8692 | |
8693 | // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered |
8694 | // probes can be chained dependencies of other regular DAG nodes and block DAG |
8695 | // combine optimizations. |
8696 | bool CodeGenPrepare::placePseudoProbes(Function &F) { |
8697 | bool MadeChange = false; |
8698 | for (auto &Block : F) { |
8699 | // Move the rest probes to the beginning of the block. |
8700 | auto FirstInst = Block.getFirstInsertionPt(); |
8701 | while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst()) |
8702 | ++FirstInst; |
8703 | BasicBlock::iterator I(FirstInst); |
8704 | I++; |
8705 | while (I != Block.end()) { |
8706 | if (auto *II = dyn_cast<PseudoProbeInst>(Val: I++)) { |
8707 | II->moveBefore(MovePos: &*FirstInst); |
8708 | MadeChange = true; |
8709 | } |
8710 | } |
8711 | } |
8712 | return MadeChange; |
8713 | } |
8714 | |
8715 | /// Scale down both weights to fit into uint32_t. |
8716 | static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { |
8717 | uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; |
8718 | uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; |
8719 | NewTrue = NewTrue / Scale; |
8720 | NewFalse = NewFalse / Scale; |
8721 | } |
8722 | |
8723 | /// Some targets prefer to split a conditional branch like: |
8724 | /// \code |
8725 | /// %0 = icmp ne i32 %a, 0 |
8726 | /// %1 = icmp ne i32 %b, 0 |
8727 | /// %or.cond = or i1 %0, %1 |
8728 | /// br i1 %or.cond, label %TrueBB, label %FalseBB |
8729 | /// \endcode |
8730 | /// into multiple branch instructions like: |
8731 | /// \code |
8732 | /// bb1: |
8733 | /// %0 = icmp ne i32 %a, 0 |
8734 | /// br i1 %0, label %TrueBB, label %bb2 |
8735 | /// bb2: |
8736 | /// %1 = icmp ne i32 %b, 0 |
8737 | /// br i1 %1, label %TrueBB, label %FalseBB |
8738 | /// \endcode |
8739 | /// This usually allows instruction selection to do even further optimizations |
8740 | /// and combine the compare with the branch instruction. Currently this is |
8741 | /// applied for targets which have "cheap" jump instructions. |
8742 | /// |
8743 | /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. |
8744 | /// |
8745 | bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) { |
8746 | if (!TM->Options.EnableFastISel || TLI->isJumpExpensive()) |
8747 | return false; |
8748 | |
8749 | bool MadeChange = false; |
8750 | for (auto &BB : F) { |
8751 | // Does this BB end with the following? |
8752 | // %cond1 = icmp|fcmp|binary instruction ... |
8753 | // %cond2 = icmp|fcmp|binary instruction ... |
8754 | // %cond.or = or|and i1 %cond1, cond2 |
8755 | // br i1 %cond.or label %dest1, label %dest2" |
8756 | Instruction *LogicOp; |
8757 | BasicBlock *TBB, *FBB; |
8758 | if (!match(V: BB.getTerminator(), |
8759 | P: m_Br(C: m_OneUse(SubPattern: m_Instruction(I&: LogicOp)), T&: TBB, F&: FBB))) |
8760 | continue; |
8761 | |
8762 | auto *Br1 = cast<BranchInst>(Val: BB.getTerminator()); |
8763 | if (Br1->getMetadata(KindID: LLVMContext::MD_unpredictable)) |
8764 | continue; |
8765 | |
8766 | // The merging of mostly empty BB can cause a degenerate branch. |
8767 | if (TBB == FBB) |
8768 | continue; |
8769 | |
8770 | unsigned Opc; |
8771 | Value *Cond1, *Cond2; |
8772 | if (match(V: LogicOp, |
8773 | P: m_LogicalAnd(L: m_OneUse(SubPattern: m_Value(V&: Cond1)), R: m_OneUse(SubPattern: m_Value(V&: Cond2))))) |
8774 | Opc = Instruction::And; |
8775 | else if (match(V: LogicOp, P: m_LogicalOr(L: m_OneUse(SubPattern: m_Value(V&: Cond1)), |
8776 | R: m_OneUse(SubPattern: m_Value(V&: Cond2))))) |
8777 | Opc = Instruction::Or; |
8778 | else |
8779 | continue; |
8780 | |
8781 | auto IsGoodCond = [](Value *Cond) { |
8782 | return match( |
8783 | V: Cond, |
8784 | P: m_CombineOr(L: m_Cmp(), R: m_CombineOr(L: m_LogicalAnd(L: m_Value(), R: m_Value()), |
8785 | R: m_LogicalOr(L: m_Value(), R: m_Value())))); |
8786 | }; |
8787 | if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2)) |
8788 | continue; |
8789 | |
8790 | LLVM_DEBUG(dbgs() << "Before branch condition splitting\n" ; BB.dump()); |
8791 | |
8792 | // Create a new BB. |
8793 | auto *TmpBB = |
8794 | BasicBlock::Create(Context&: BB.getContext(), Name: BB.getName() + ".cond.split" , |
8795 | Parent: BB.getParent(), InsertBefore: BB.getNextNode()); |
8796 | if (IsHugeFunc) |
8797 | FreshBBs.insert(Ptr: TmpBB); |
8798 | |
8799 | // Update original basic block by using the first condition directly by the |
8800 | // branch instruction and removing the no longer needed and/or instruction. |
8801 | Br1->setCondition(Cond1); |
8802 | LogicOp->eraseFromParent(); |
8803 | |
8804 | // Depending on the condition we have to either replace the true or the |
8805 | // false successor of the original branch instruction. |
8806 | if (Opc == Instruction::And) |
8807 | Br1->setSuccessor(idx: 0, NewSucc: TmpBB); |
8808 | else |
8809 | Br1->setSuccessor(idx: 1, NewSucc: TmpBB); |
8810 | |
8811 | // Fill in the new basic block. |
8812 | auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond: Cond2, True: TBB, False: FBB); |
8813 | if (auto *I = dyn_cast<Instruction>(Val: Cond2)) { |
8814 | I->removeFromParent(); |
8815 | I->insertBefore(InsertPos: Br2); |
8816 | } |
8817 | |
8818 | // Update PHI nodes in both successors. The original BB needs to be |
8819 | // replaced in one successor's PHI nodes, because the branch comes now from |
8820 | // the newly generated BB (NewBB). In the other successor we need to add one |
8821 | // incoming edge to the PHI nodes, because both branch instructions target |
8822 | // now the same successor. Depending on the original branch condition |
8823 | // (and/or) we have to swap the successors (TrueDest, FalseDest), so that |
8824 | // we perform the correct update for the PHI nodes. |
8825 | // This doesn't change the successor order of the just created branch |
8826 | // instruction (or any other instruction). |
8827 | if (Opc == Instruction::Or) |
8828 | std::swap(a&: TBB, b&: FBB); |
8829 | |
8830 | // Replace the old BB with the new BB. |
8831 | TBB->replacePhiUsesWith(Old: &BB, New: TmpBB); |
8832 | |
8833 | // Add another incoming edge from the new BB. |
8834 | for (PHINode &PN : FBB->phis()) { |
8835 | auto *Val = PN.getIncomingValueForBlock(BB: &BB); |
8836 | PN.addIncoming(V: Val, BB: TmpBB); |
8837 | } |
8838 | |
8839 | // Update the branch weights (from SelectionDAGBuilder:: |
8840 | // FindMergedConditions). |
8841 | if (Opc == Instruction::Or) { |
8842 | // Codegen X | Y as: |
8843 | // BB1: |
8844 | // jmp_if_X TBB |
8845 | // jmp TmpBB |
8846 | // TmpBB: |
8847 | // jmp_if_Y TBB |
8848 | // jmp FBB |
8849 | // |
8850 | |
8851 | // We have flexibility in setting Prob for BB1 and Prob for NewBB. |
8852 | // The requirement is that |
8853 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) |
8854 | // = TrueProb for original BB. |
8855 | // Assuming the original weights are A and B, one choice is to set BB1's |
8856 | // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice |
8857 | // assumes that |
8858 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. |
8859 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for |
8860 | // TmpBB, but the math is more complicated. |
8861 | uint64_t TrueWeight, FalseWeight; |
8862 | if (extractBranchWeights(I: *Br1, TrueVal&: TrueWeight, FalseVal&: FalseWeight)) { |
8863 | uint64_t NewTrueWeight = TrueWeight; |
8864 | uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; |
8865 | scaleWeights(NewTrue&: NewTrueWeight, NewFalse&: NewFalseWeight); |
8866 | Br1->setMetadata(KindID: LLVMContext::MD_prof, |
8867 | Node: MDBuilder(Br1->getContext()) |
8868 | .createBranchWeights(TrueWeight, FalseWeight, |
8869 | IsExpected: hasBranchWeightOrigin(I: *Br1))); |
8870 | |
8871 | NewTrueWeight = TrueWeight; |
8872 | NewFalseWeight = 2 * FalseWeight; |
8873 | scaleWeights(NewTrue&: NewTrueWeight, NewFalse&: NewFalseWeight); |
8874 | Br2->setMetadata(KindID: LLVMContext::MD_prof, |
8875 | Node: MDBuilder(Br2->getContext()) |
8876 | .createBranchWeights(TrueWeight, FalseWeight)); |
8877 | } |
8878 | } else { |
8879 | // Codegen X & Y as: |
8880 | // BB1: |
8881 | // jmp_if_X TmpBB |
8882 | // jmp FBB |
8883 | // TmpBB: |
8884 | // jmp_if_Y TBB |
8885 | // jmp FBB |
8886 | // |
8887 | // This requires creation of TmpBB after CurBB. |
8888 | |
8889 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. |
8890 | // The requirement is that |
8891 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) |
8892 | // = FalseProb for original BB. |
8893 | // Assuming the original weights are A and B, one choice is to set BB1's |
8894 | // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice |
8895 | // assumes that |
8896 | // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. |
8897 | uint64_t TrueWeight, FalseWeight; |
8898 | if (extractBranchWeights(I: *Br1, TrueVal&: TrueWeight, FalseVal&: FalseWeight)) { |
8899 | uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; |
8900 | uint64_t NewFalseWeight = FalseWeight; |
8901 | scaleWeights(NewTrue&: NewTrueWeight, NewFalse&: NewFalseWeight); |
8902 | Br1->setMetadata(KindID: LLVMContext::MD_prof, |
8903 | Node: MDBuilder(Br1->getContext()) |
8904 | .createBranchWeights(TrueWeight, FalseWeight)); |
8905 | |
8906 | NewTrueWeight = 2 * TrueWeight; |
8907 | NewFalseWeight = FalseWeight; |
8908 | scaleWeights(NewTrue&: NewTrueWeight, NewFalse&: NewFalseWeight); |
8909 | Br2->setMetadata(KindID: LLVMContext::MD_prof, |
8910 | Node: MDBuilder(Br2->getContext()) |
8911 | .createBranchWeights(TrueWeight, FalseWeight)); |
8912 | } |
8913 | } |
8914 | |
8915 | ModifiedDT = ModifyDT::ModifyBBDT; |
8916 | MadeChange = true; |
8917 | |
8918 | LLVM_DEBUG(dbgs() << "After branch condition splitting\n" ; BB.dump(); |
8919 | TmpBB->dump()); |
8920 | } |
8921 | return MadeChange; |
8922 | } |
8923 | |