1 | //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass turns explicit null checks of the form |
10 | // |
11 | // test %r10, %r10 |
12 | // je throw_npe |
13 | // movl (%r10), %esi |
14 | // ... |
15 | // |
16 | // to |
17 | // |
18 | // faulting_load_op("movl (%r10), %esi", throw_npe) |
19 | // ... |
20 | // |
21 | // With the help of a runtime that understands the .fault_maps section, |
22 | // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs |
23 | // a page fault. |
24 | // Store and LoadStore are also supported. |
25 | // |
26 | //===----------------------------------------------------------------------===// |
27 | |
28 | #include "llvm/ADT/ArrayRef.h" |
29 | #include "llvm/ADT/STLExtras.h" |
30 | #include "llvm/ADT/SmallVector.h" |
31 | #include "llvm/ADT/Statistic.h" |
32 | #include "llvm/Analysis/AliasAnalysis.h" |
33 | #include "llvm/Analysis/MemoryLocation.h" |
34 | #include "llvm/CodeGen/FaultMaps.h" |
35 | #include "llvm/CodeGen/MachineBasicBlock.h" |
36 | #include "llvm/CodeGen/MachineFunction.h" |
37 | #include "llvm/CodeGen/MachineFunctionPass.h" |
38 | #include "llvm/CodeGen/MachineInstr.h" |
39 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
40 | #include "llvm/CodeGen/MachineMemOperand.h" |
41 | #include "llvm/CodeGen/MachineOperand.h" |
42 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
43 | #include "llvm/CodeGen/PseudoSourceValue.h" |
44 | #include "llvm/CodeGen/TargetInstrInfo.h" |
45 | #include "llvm/CodeGen/TargetOpcodes.h" |
46 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
47 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
48 | #include "llvm/IR/BasicBlock.h" |
49 | #include "llvm/IR/DebugLoc.h" |
50 | #include "llvm/IR/LLVMContext.h" |
51 | #include "llvm/InitializePasses.h" |
52 | #include "llvm/MC/MCInstrDesc.h" |
53 | #include "llvm/MC/MCRegisterInfo.h" |
54 | #include "llvm/Pass.h" |
55 | #include "llvm/Support/CommandLine.h" |
56 | #include <cassert> |
57 | #include <cstdint> |
58 | #include <iterator> |
59 | |
60 | using namespace llvm; |
61 | |
62 | static cl::opt<int> PageSize("imp-null-check-page-size" , |
63 | cl::desc("The page size of the target in bytes" ), |
64 | cl::init(Val: 4096), cl::Hidden); |
65 | |
66 | static cl::opt<unsigned> MaxInstsToConsider( |
67 | "imp-null-max-insts-to-consider" , |
68 | cl::desc("The max number of instructions to consider hoisting loads over " |
69 | "(the algorithm is quadratic over this number)" ), |
70 | cl::Hidden, cl::init(Val: 8)); |
71 | |
72 | #define DEBUG_TYPE "implicit-null-checks" |
73 | |
74 | STATISTIC(NumImplicitNullChecks, |
75 | "Number of explicit null checks made implicit" ); |
76 | |
77 | namespace { |
78 | |
79 | class ImplicitNullChecks : public MachineFunctionPass { |
80 | /// Return true if \c computeDependence can process \p MI. |
81 | static bool canHandle(const MachineInstr *MI); |
82 | |
83 | /// Helper function for \c computeDependence. Return true if \p A |
84 | /// and \p B do not have any dependences between them, and can be |
85 | /// re-ordered without changing program semantics. |
86 | bool canReorder(const MachineInstr *A, const MachineInstr *B); |
87 | |
88 | /// A data type for representing the result computed by \c |
89 | /// computeDependence. States whether it is okay to reorder the |
90 | /// instruction passed to \c computeDependence with at most one |
91 | /// dependency. |
92 | struct DependenceResult { |
93 | /// Can we actually re-order \p MI with \p Insts (see \c |
94 | /// computeDependence). |
95 | bool CanReorder; |
96 | |
97 | /// If non-std::nullopt, then an instruction in \p Insts that also must be |
98 | /// hoisted. |
99 | std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; |
100 | |
101 | /*implicit*/ DependenceResult( |
102 | bool CanReorder, |
103 | std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) |
104 | : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { |
105 | assert((!PotentialDependence || CanReorder) && |
106 | "!CanReorder && PotentialDependence.hasValue() not allowed!" ); |
107 | } |
108 | }; |
109 | |
110 | /// Compute a result for the following question: can \p MI be |
111 | /// re-ordered from after \p Insts to before it. |
112 | /// |
113 | /// \c canHandle should return true for all instructions in \p |
114 | /// Insts. |
115 | DependenceResult computeDependence(const MachineInstr *MI, |
116 | ArrayRef<MachineInstr *> Block); |
117 | |
118 | /// Represents one null check that can be made implicit. |
119 | class NullCheck { |
120 | // The memory operation the null check can be folded into. |
121 | MachineInstr *MemOperation; |
122 | |
123 | // The instruction actually doing the null check (Ptr != 0). |
124 | MachineInstr *CheckOperation; |
125 | |
126 | // The block the check resides in. |
127 | MachineBasicBlock *CheckBlock; |
128 | |
129 | // The block branched to if the pointer is non-null. |
130 | MachineBasicBlock *NotNullSucc; |
131 | |
132 | // The block branched to if the pointer is null. |
133 | MachineBasicBlock *NullSucc; |
134 | |
135 | // If this is non-null, then MemOperation has a dependency on this |
136 | // instruction; and it needs to be hoisted to execute before MemOperation. |
137 | MachineInstr *OnlyDependency; |
138 | |
139 | public: |
140 | explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation, |
141 | MachineBasicBlock *checkBlock, |
142 | MachineBasicBlock *notNullSucc, |
143 | MachineBasicBlock *nullSucc, |
144 | MachineInstr *onlyDependency) |
145 | : MemOperation(memOperation), CheckOperation(checkOperation), |
146 | CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc), |
147 | OnlyDependency(onlyDependency) {} |
148 | |
149 | MachineInstr *getMemOperation() const { return MemOperation; } |
150 | |
151 | MachineInstr *getCheckOperation() const { return CheckOperation; } |
152 | |
153 | MachineBasicBlock *getCheckBlock() const { return CheckBlock; } |
154 | |
155 | MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; } |
156 | |
157 | MachineBasicBlock *getNullSucc() const { return NullSucc; } |
158 | |
159 | MachineInstr *getOnlyDependency() const { return OnlyDependency; } |
160 | }; |
161 | |
162 | const TargetInstrInfo *TII = nullptr; |
163 | const TargetRegisterInfo *TRI = nullptr; |
164 | AliasAnalysis *AA = nullptr; |
165 | MachineFrameInfo *MFI = nullptr; |
166 | |
167 | bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, |
168 | SmallVectorImpl<NullCheck> &NullCheckList); |
169 | MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, |
170 | MachineBasicBlock *HandlerMBB); |
171 | void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); |
172 | |
173 | enum AliasResult { |
174 | AR_NoAlias, |
175 | AR_MayAlias, |
176 | AR_WillAliasEverything |
177 | }; |
178 | |
179 | /// Returns AR_NoAlias if \p MI memory operation does not alias with |
180 | /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if |
181 | /// they may alias and any further memory operation may alias with \p PrevMI. |
182 | AliasResult areMemoryOpsAliased(const MachineInstr &MI, |
183 | const MachineInstr *PrevMI) const; |
184 | |
185 | enum SuitabilityResult { |
186 | SR_Suitable, |
187 | SR_Unsuitable, |
188 | SR_Impossible |
189 | }; |
190 | |
191 | /// Return SR_Suitable if \p MI a memory operation that can be used to |
192 | /// implicitly null check the value in \p PointerReg, SR_Unsuitable if |
193 | /// \p MI cannot be used to null check and SR_Impossible if there is |
194 | /// no sense to continue lookup due to any other instruction will not be able |
195 | /// to be used. \p PrevInsts is the set of instruction seen since |
196 | /// the explicit null check on \p PointerReg. |
197 | SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI, |
198 | Register PointerReg, |
199 | ArrayRef<MachineInstr *> PrevInsts); |
200 | |
201 | /// Returns true if \p DependenceMI can clobber the liveIns in NullSucc block |
202 | /// if it was hoisted to the NullCheck block. This is used by caller |
203 | /// canHoistInst to decide if DependenceMI can be hoisted safely. |
204 | bool canDependenceHoistingClobberLiveIns(MachineInstr *DependenceMI, |
205 | MachineBasicBlock *NullSucc); |
206 | |
207 | /// Return true if \p FaultingMI can be hoisted from after the |
208 | /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a |
209 | /// non-null value if we also need to (and legally can) hoist a dependency. |
210 | bool canHoistInst(MachineInstr *FaultingMI, |
211 | ArrayRef<MachineInstr *> InstsSeenSoFar, |
212 | MachineBasicBlock *NullSucc, MachineInstr *&Dependence); |
213 | |
214 | public: |
215 | static char ID; |
216 | |
217 | ImplicitNullChecks() : MachineFunctionPass(ID) { |
218 | initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry()); |
219 | } |
220 | |
221 | bool runOnMachineFunction(MachineFunction &MF) override; |
222 | |
223 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
224 | AU.addRequired<AAResultsWrapperPass>(); |
225 | MachineFunctionPass::getAnalysisUsage(AU); |
226 | } |
227 | |
228 | MachineFunctionProperties getRequiredProperties() const override { |
229 | return MachineFunctionProperties().setNoVRegs(); |
230 | } |
231 | }; |
232 | |
233 | } // end anonymous namespace |
234 | |
235 | bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { |
236 | if (MI->isCall() || MI->mayRaiseFPException() || |
237 | MI->hasUnmodeledSideEffects()) |
238 | return false; |
239 | auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; |
240 | (void)IsRegMask; |
241 | |
242 | assert(llvm::none_of(MI->operands(), IsRegMask) && |
243 | "Calls were filtered out above!" ); |
244 | |
245 | auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); }; |
246 | return llvm::all_of(Range: MI->memoperands(), P: IsUnordered); |
247 | } |
248 | |
249 | ImplicitNullChecks::DependenceResult |
250 | ImplicitNullChecks::computeDependence(const MachineInstr *MI, |
251 | ArrayRef<MachineInstr *> Block) { |
252 | assert(llvm::all_of(Block, canHandle) && "Check this first!" ); |
253 | assert(!is_contained(Block, MI) && "Block must be exclusive of MI!" ); |
254 | |
255 | std::optional<ArrayRef<MachineInstr *>::iterator> Dep; |
256 | |
257 | for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { |
258 | if (canReorder(A: *I, B: MI)) |
259 | continue; |
260 | |
261 | if (Dep == std::nullopt) { |
262 | // Found one possible dependency, keep track of it. |
263 | Dep = I; |
264 | } else { |
265 | // We found two dependencies, so bail out. |
266 | return {false, std::nullopt}; |
267 | } |
268 | } |
269 | |
270 | return {true, Dep}; |
271 | } |
272 | |
273 | bool ImplicitNullChecks::canReorder(const MachineInstr *A, |
274 | const MachineInstr *B) { |
275 | assert(canHandle(A) && canHandle(B) && "Precondition!" ); |
276 | |
277 | // canHandle makes sure that we _can_ correctly analyze the dependencies |
278 | // between A and B here -- for instance, we should not be dealing with heap |
279 | // load-store dependencies here. |
280 | |
281 | for (const auto &MOA : A->operands()) { |
282 | if (!(MOA.isReg() && MOA.getReg())) |
283 | continue; |
284 | |
285 | Register RegA = MOA.getReg(); |
286 | for (const auto &MOB : B->operands()) { |
287 | if (!(MOB.isReg() && MOB.getReg())) |
288 | continue; |
289 | |
290 | Register RegB = MOB.getReg(); |
291 | |
292 | if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) |
293 | return false; |
294 | } |
295 | } |
296 | |
297 | return true; |
298 | } |
299 | |
300 | bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { |
301 | TII = MF.getSubtarget().getInstrInfo(); |
302 | TRI = MF.getRegInfo().getTargetRegisterInfo(); |
303 | MFI = &MF.getFrameInfo(); |
304 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); |
305 | |
306 | SmallVector<NullCheck, 16> NullCheckList; |
307 | |
308 | for (auto &MBB : MF) |
309 | analyzeBlockForNullChecks(MBB, NullCheckList); |
310 | |
311 | if (!NullCheckList.empty()) |
312 | rewriteNullChecks(NullCheckList); |
313 | |
314 | return !NullCheckList.empty(); |
315 | } |
316 | |
317 | // Return true if any register aliasing \p Reg is live-in into \p MBB. |
318 | static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, |
319 | MachineBasicBlock *MBB, Register Reg) { |
320 | for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid(); |
321 | ++AR) |
322 | if (MBB->isLiveIn(Reg: *AR)) |
323 | return true; |
324 | return false; |
325 | } |
326 | |
327 | ImplicitNullChecks::AliasResult |
328 | ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI, |
329 | const MachineInstr *PrevMI) const { |
330 | // If it is not memory access, skip the check. |
331 | if (!(PrevMI->mayStore() || PrevMI->mayLoad())) |
332 | return AR_NoAlias; |
333 | // Load-Load may alias |
334 | if (!(MI.mayStore() || PrevMI->mayStore())) |
335 | return AR_NoAlias; |
336 | // We lost info, conservatively alias. If it was store then no sense to |
337 | // continue because we won't be able to check against it further. |
338 | if (MI.memoperands_empty()) |
339 | return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; |
340 | if (PrevMI->memoperands_empty()) |
341 | return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; |
342 | |
343 | for (MachineMemOperand *MMO1 : MI.memoperands()) { |
344 | // MMO1 should have a value due it comes from operation we'd like to use |
345 | // as implicit null check. |
346 | assert(MMO1->getValue() && "MMO1 should have a Value!" ); |
347 | for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { |
348 | if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { |
349 | if (PSV->mayAlias(MFI)) |
350 | return AR_MayAlias; |
351 | continue; |
352 | } |
353 | if (!AA->isNoAlias( |
354 | LocA: MemoryLocation::getAfter(Ptr: MMO1->getValue(), AATags: MMO1->getAAInfo()), |
355 | LocB: MemoryLocation::getAfter(Ptr: MMO2->getValue(), AATags: MMO2->getAAInfo()))) |
356 | return AR_MayAlias; |
357 | } |
358 | } |
359 | return AR_NoAlias; |
360 | } |
361 | |
362 | ImplicitNullChecks::SuitabilityResult |
363 | ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI, |
364 | Register PointerReg, |
365 | ArrayRef<MachineInstr *> PrevInsts) { |
366 | // Implementation restriction for faulting_op insertion |
367 | // TODO: This could be relaxed if we find a test case which warrants it. |
368 | if (MI.getDesc().getNumDefs() > 1) |
369 | return SR_Unsuitable; |
370 | |
371 | if (!MI.mayLoadOrStore() || MI.isPredicable()) |
372 | return SR_Unsuitable; |
373 | auto AM = TII->getAddrModeFromMemoryOp(MemI: MI, TRI); |
374 | if (!AM || AM->Form != ExtAddrMode::Formula::Basic) |
375 | return SR_Unsuitable; |
376 | auto AddrMode = *AM; |
377 | const Register BaseReg = AddrMode.BaseReg, ScaledReg = AddrMode.ScaledReg; |
378 | int64_t Displacement = AddrMode.Displacement; |
379 | |
380 | // We need the base of the memory instruction to be same as the register |
381 | // where the null check is performed (i.e. PointerReg). |
382 | if (BaseReg != PointerReg && ScaledReg != PointerReg) |
383 | return SR_Unsuitable; |
384 | const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); |
385 | unsigned PointerRegSizeInBits = TRI->getRegSizeInBits(Reg: PointerReg, MRI); |
386 | // Bail out of the sizes of BaseReg, ScaledReg and PointerReg are not the |
387 | // same. |
388 | if ((BaseReg && |
389 | TRI->getRegSizeInBits(Reg: BaseReg, MRI) != PointerRegSizeInBits) || |
390 | (ScaledReg && |
391 | TRI->getRegSizeInBits(Reg: ScaledReg, MRI) != PointerRegSizeInBits)) |
392 | return SR_Unsuitable; |
393 | |
394 | // Returns true if RegUsedInAddr is used for calculating the displacement |
395 | // depending on addressing mode. Also calculates the Displacement. |
396 | auto CalculateDisplacementFromAddrMode = [&](Register RegUsedInAddr, |
397 | int64_t Multiplier) { |
398 | // The register can be NoRegister, which is defined as zero for all targets. |
399 | // Consider instruction of interest as `movq 8(,%rdi,8), %rax`. Here the |
400 | // ScaledReg is %rdi, while there is no BaseReg. |
401 | if (!RegUsedInAddr) |
402 | return false; |
403 | assert(Multiplier && "expected to be non-zero!" ); |
404 | MachineInstr *ModifyingMI = nullptr; |
405 | for (auto It = std::next(x: MachineBasicBlock::const_reverse_iterator(&MI)); |
406 | It != MI.getParent()->rend(); It++) { |
407 | const MachineInstr *CurrMI = &*It; |
408 | if (CurrMI->modifiesRegister(Reg: RegUsedInAddr, TRI)) { |
409 | ModifyingMI = const_cast<MachineInstr *>(CurrMI); |
410 | break; |
411 | } |
412 | } |
413 | if (!ModifyingMI) |
414 | return false; |
415 | // Check for the const value defined in register by ModifyingMI. This means |
416 | // all other previous values for that register has been invalidated. |
417 | int64_t ImmVal; |
418 | if (!TII->getConstValDefinedInReg(MI: *ModifyingMI, Reg: RegUsedInAddr, ImmVal)) |
419 | return false; |
420 | // Calculate the reg size in bits, since this is needed for bailing out in |
421 | // case of overflow. |
422 | int32_t RegSizeInBits = TRI->getRegSizeInBits(Reg: RegUsedInAddr, MRI); |
423 | APInt ImmValC(RegSizeInBits, ImmVal, true /*IsSigned*/); |
424 | APInt MultiplierC(RegSizeInBits, Multiplier); |
425 | assert(MultiplierC.isStrictlyPositive() && |
426 | "expected to be a positive value!" ); |
427 | bool IsOverflow; |
428 | // Sign of the product depends on the sign of the ImmVal, since Multiplier |
429 | // is always positive. |
430 | APInt Product = ImmValC.smul_ov(RHS: MultiplierC, Overflow&: IsOverflow); |
431 | if (IsOverflow) |
432 | return false; |
433 | APInt DisplacementC(64, Displacement, true /*isSigned*/); |
434 | DisplacementC = Product.sadd_ov(RHS: DisplacementC, Overflow&: IsOverflow); |
435 | if (IsOverflow) |
436 | return false; |
437 | |
438 | // We only handle diplacements upto 64 bits wide. |
439 | if (DisplacementC.getActiveBits() > 64) |
440 | return false; |
441 | Displacement = DisplacementC.getSExtValue(); |
442 | return true; |
443 | }; |
444 | |
445 | // If a register used in the address is constant, fold it's effect into the |
446 | // displacement for ease of analysis. |
447 | bool BaseRegIsConstVal = false, ScaledRegIsConstVal = false; |
448 | if (CalculateDisplacementFromAddrMode(BaseReg, 1)) |
449 | BaseRegIsConstVal = true; |
450 | if (CalculateDisplacementFromAddrMode(ScaledReg, AddrMode.Scale)) |
451 | ScaledRegIsConstVal = true; |
452 | |
453 | // The register which is not null checked should be part of the Displacement |
454 | // calculation, otherwise we do not know whether the Displacement is made up |
455 | // by some symbolic values. |
456 | // This matters because we do not want to incorrectly assume that load from |
457 | // falls in the zeroth faulting page in the "sane offset check" below. |
458 | if ((BaseReg && BaseReg != PointerReg && !BaseRegIsConstVal) || |
459 | (ScaledReg && ScaledReg != PointerReg && !ScaledRegIsConstVal)) |
460 | return SR_Unsuitable; |
461 | |
462 | // We want the mem access to be issued at a sane offset from PointerReg, |
463 | // so that if PointerReg is null then the access reliably page faults. |
464 | if (!(-PageSize < Displacement && Displacement < PageSize)) |
465 | return SR_Unsuitable; |
466 | |
467 | // Finally, check whether the current memory access aliases with previous one. |
468 | for (auto *PrevMI : PrevInsts) { |
469 | AliasResult AR = areMemoryOpsAliased(MI, PrevMI); |
470 | if (AR == AR_WillAliasEverything) |
471 | return SR_Impossible; |
472 | if (AR == AR_MayAlias) |
473 | return SR_Unsuitable; |
474 | } |
475 | return SR_Suitable; |
476 | } |
477 | |
478 | bool ImplicitNullChecks::canDependenceHoistingClobberLiveIns( |
479 | MachineInstr *DependenceMI, MachineBasicBlock *NullSucc) { |
480 | for (const auto &DependenceMO : DependenceMI->operands()) { |
481 | if (!(DependenceMO.isReg() && DependenceMO.getReg())) |
482 | continue; |
483 | |
484 | // Make sure that we won't clobber any live ins to the sibling block by |
485 | // hoisting Dependency. For instance, we can't hoist INST to before the |
486 | // null check (even if it safe, and does not violate any dependencies in |
487 | // the non_null_block) if %rdx is live in to _null_block. |
488 | // |
489 | // test %rcx, %rcx |
490 | // je _null_block |
491 | // _non_null_block: |
492 | // %rdx = INST |
493 | // ... |
494 | // |
495 | // This restriction does not apply to the faulting load inst because in |
496 | // case the pointer loaded from is in the null page, the load will not |
497 | // semantically execute, and affect machine state. That is, if the load |
498 | // was loading into %rax and it faults, the value of %rax should stay the |
499 | // same as it would have been had the load not have executed and we'd have |
500 | // branched to NullSucc directly. |
501 | if (AnyAliasLiveIn(TRI, MBB: NullSucc, Reg: DependenceMO.getReg())) |
502 | return true; |
503 | |
504 | } |
505 | |
506 | // The dependence does not clobber live-ins in NullSucc block. |
507 | return false; |
508 | } |
509 | |
510 | bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, |
511 | ArrayRef<MachineInstr *> InstsSeenSoFar, |
512 | MachineBasicBlock *NullSucc, |
513 | MachineInstr *&Dependence) { |
514 | auto DepResult = computeDependence(MI: FaultingMI, Block: InstsSeenSoFar); |
515 | if (!DepResult.CanReorder) |
516 | return false; |
517 | |
518 | if (!DepResult.PotentialDependence) { |
519 | Dependence = nullptr; |
520 | return true; |
521 | } |
522 | |
523 | auto DependenceItr = *DepResult.PotentialDependence; |
524 | auto *DependenceMI = *DependenceItr; |
525 | |
526 | // We don't want to reason about speculating loads. Note -- at this point |
527 | // we should have already filtered out all of the other non-speculatable |
528 | // things, like calls and stores. |
529 | // We also do not want to hoist stores because it might change the memory |
530 | // while the FaultingMI may result in faulting. |
531 | assert(canHandle(DependenceMI) && "Should never have reached here!" ); |
532 | if (DependenceMI->mayLoadOrStore()) |
533 | return false; |
534 | |
535 | if (canDependenceHoistingClobberLiveIns(DependenceMI, NullSucc)) |
536 | return false; |
537 | |
538 | auto DepDepResult = |
539 | computeDependence(MI: DependenceMI, Block: {InstsSeenSoFar.begin(), DependenceItr}); |
540 | |
541 | if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence) |
542 | return false; |
543 | |
544 | Dependence = DependenceMI; |
545 | return true; |
546 | } |
547 | |
548 | /// Analyze MBB to check if its terminating branch can be turned into an |
549 | /// implicit null check. If yes, append a description of the said null check to |
550 | /// NullCheckList and return true, else return false. |
551 | bool ImplicitNullChecks::analyzeBlockForNullChecks( |
552 | MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) { |
553 | using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate; |
554 | |
555 | MDNode *BranchMD = nullptr; |
556 | if (auto *BB = MBB.getBasicBlock()) |
557 | BranchMD = BB->getTerminator()->getMetadata(KindID: LLVMContext::MD_make_implicit); |
558 | |
559 | if (!BranchMD) |
560 | return false; |
561 | |
562 | MachineBranchPredicate MBP; |
563 | |
564 | if (TII->analyzeBranchPredicate(MBB, MBP, AllowModify: true)) |
565 | return false; |
566 | |
567 | // Is the predicate comparing an integer to zero? |
568 | if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && |
569 | (MBP.Predicate == MachineBranchPredicate::PRED_NE || |
570 | MBP.Predicate == MachineBranchPredicate::PRED_EQ))) |
571 | return false; |
572 | |
573 | // If there is a separate condition generation instruction, we chose not to |
574 | // transform unless we can remove both condition and consuming branch. |
575 | if (MBP.ConditionDef && !MBP.SingleUseCondition) |
576 | return false; |
577 | |
578 | MachineBasicBlock *NotNullSucc, *NullSucc; |
579 | |
580 | if (MBP.Predicate == MachineBranchPredicate::PRED_NE) { |
581 | NotNullSucc = MBP.TrueDest; |
582 | NullSucc = MBP.FalseDest; |
583 | } else { |
584 | NotNullSucc = MBP.FalseDest; |
585 | NullSucc = MBP.TrueDest; |
586 | } |
587 | |
588 | // We handle the simplest case for now. We can potentially do better by using |
589 | // the machine dominator tree. |
590 | if (NotNullSucc->pred_size() != 1) |
591 | return false; |
592 | |
593 | const Register PointerReg = MBP.LHS.getReg(); |
594 | |
595 | if (MBP.ConditionDef) { |
596 | // To prevent the invalid transformation of the following code: |
597 | // |
598 | // mov %rax, %rcx |
599 | // test %rax, %rax |
600 | // %rax = ... |
601 | // je throw_npe |
602 | // mov(%rcx), %r9 |
603 | // mov(%rax), %r10 |
604 | // |
605 | // into: |
606 | // |
607 | // mov %rax, %rcx |
608 | // %rax = .... |
609 | // faulting_load_op("movl (%rax), %r10", throw_npe) |
610 | // mov(%rcx), %r9 |
611 | // |
612 | // we must ensure that there are no instructions between the 'test' and |
613 | // conditional jump that modify %rax. |
614 | assert(MBP.ConditionDef->getParent() == &MBB && |
615 | "Should be in basic block" ); |
616 | |
617 | for (auto I = MBB.rbegin(); MBP.ConditionDef != &*I; ++I) |
618 | if (I->modifiesRegister(Reg: PointerReg, TRI)) |
619 | return false; |
620 | } |
621 | // Starting with a code fragment like: |
622 | // |
623 | // test %rax, %rax |
624 | // jne LblNotNull |
625 | // |
626 | // LblNull: |
627 | // callq throw_NullPointerException |
628 | // |
629 | // LblNotNull: |
630 | // Inst0 |
631 | // Inst1 |
632 | // ... |
633 | // Def = Load (%rax + <offset>) |
634 | // ... |
635 | // |
636 | // |
637 | // we want to end up with |
638 | // |
639 | // Def = FaultingLoad (%rax + <offset>), LblNull |
640 | // jmp LblNotNull ;; explicit or fallthrough |
641 | // |
642 | // LblNotNull: |
643 | // Inst0 |
644 | // Inst1 |
645 | // ... |
646 | // |
647 | // LblNull: |
648 | // callq throw_NullPointerException |
649 | // |
650 | // |
651 | // To see why this is legal, consider the two possibilities: |
652 | // |
653 | // 1. %rax is null: since we constrain <offset> to be less than PageSize, the |
654 | // load instruction dereferences the null page, causing a segmentation |
655 | // fault. |
656 | // |
657 | // 2. %rax is not null: in this case we know that the load cannot fault, as |
658 | // otherwise the load would've faulted in the original program too and the |
659 | // original program would've been undefined. |
660 | // |
661 | // This reasoning cannot be extended to justify hoisting through arbitrary |
662 | // control flow. For instance, in the example below (in pseudo-C) |
663 | // |
664 | // if (ptr == null) { throw_npe(); unreachable; } |
665 | // if (some_cond) { return 42; } |
666 | // v = ptr->field; // LD |
667 | // ... |
668 | // |
669 | // we cannot (without code duplication) use the load marked "LD" to null check |
670 | // ptr -- clause (2) above does not apply in this case. In the above program |
671 | // the safety of ptr->field can be dependent on some_cond; and, for instance, |
672 | // ptr could be some non-null invalid reference that never gets loaded from |
673 | // because some_cond is always true. |
674 | |
675 | SmallVector<MachineInstr *, 8> InstsSeenSoFar; |
676 | |
677 | for (auto &MI : *NotNullSucc) { |
678 | if (!canHandle(MI: &MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) |
679 | return false; |
680 | |
681 | MachineInstr *Dependence; |
682 | SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, PrevInsts: InstsSeenSoFar); |
683 | if (SR == SR_Impossible) |
684 | return false; |
685 | if (SR == SR_Suitable && |
686 | canHoistInst(FaultingMI: &MI, InstsSeenSoFar, NullSucc, Dependence)) { |
687 | NullCheckList.emplace_back(Args: &MI, Args&: MBP.ConditionDef, Args: &MBB, Args&: NotNullSucc, |
688 | Args&: NullSucc, Args&: Dependence); |
689 | return true; |
690 | } |
691 | |
692 | // If MI re-defines the PointerReg in a way that changes the value of |
693 | // PointerReg if it was null, then we cannot move further. |
694 | if (!TII->preservesZeroValueInReg(MI: &MI, NullValueReg: PointerReg, TRI)) |
695 | return false; |
696 | InstsSeenSoFar.push_back(Elt: &MI); |
697 | } |
698 | |
699 | return false; |
700 | } |
701 | |
702 | /// Wrap a machine instruction, MI, into a FAULTING machine instruction. |
703 | /// The FAULTING instruction does the same load/store as MI |
704 | /// (defining the same register), and branches to HandlerMBB if the mem access |
705 | /// faults. The FAULTING instruction is inserted at the end of MBB. |
706 | MachineInstr *ImplicitNullChecks::insertFaultingInstr( |
707 | MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { |
708 | DebugLoc DL; |
709 | unsigned NumDefs = MI->getDesc().getNumDefs(); |
710 | assert(NumDefs <= 1 && "other cases unhandled!" ); |
711 | |
712 | Register DefReg; |
713 | if (NumDefs != 0) { |
714 | DefReg = MI->getOperand(i: 0).getReg(); |
715 | assert(NumDefs == 1 && "expected exactly one def!" ); |
716 | } |
717 | |
718 | FaultMaps::FaultKind FK; |
719 | if (MI->mayLoad()) |
720 | FK = |
721 | MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; |
722 | else |
723 | FK = FaultMaps::FaultingStore; |
724 | |
725 | auto MIB = BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::FAULTING_OP), DestReg: DefReg) |
726 | .addImm(Val: FK) |
727 | .addMBB(MBB: HandlerMBB) |
728 | .addImm(Val: MI->getOpcode()); |
729 | |
730 | for (auto &MO : MI->uses()) { |
731 | if (MO.isReg()) { |
732 | MachineOperand NewMO = MO; |
733 | if (MO.isUse()) { |
734 | NewMO.setIsKill(false); |
735 | } else { |
736 | assert(MO.isDef() && "Expected def or use" ); |
737 | NewMO.setIsDead(false); |
738 | } |
739 | MIB.add(MO: NewMO); |
740 | } else { |
741 | MIB.add(MO); |
742 | } |
743 | } |
744 | |
745 | MIB.setMemRefs(MI->memoperands()); |
746 | |
747 | return MIB; |
748 | } |
749 | |
750 | /// Rewrite the null checks in NullCheckList into implicit null checks. |
751 | void ImplicitNullChecks::rewriteNullChecks( |
752 | ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) { |
753 | DebugLoc DL; |
754 | |
755 | for (const auto &NC : NullCheckList) { |
756 | // Remove the conditional branch dependent on the null check. |
757 | unsigned BranchesRemoved = TII->removeBranch(MBB&: *NC.getCheckBlock()); |
758 | (void)BranchesRemoved; |
759 | assert(BranchesRemoved > 0 && "expected at least one branch!" ); |
760 | |
761 | if (auto *DepMI = NC.getOnlyDependency()) { |
762 | DepMI->removeFromParent(); |
763 | NC.getCheckBlock()->insert(I: NC.getCheckBlock()->end(), MI: DepMI); |
764 | } |
765 | |
766 | // Insert a faulting instruction where the conditional branch was |
767 | // originally. We check earlier ensures that this bit of code motion |
768 | // is legal. We do not touch the successors list for any basic block |
769 | // since we haven't changed control flow, we've just made it implicit. |
770 | MachineInstr *FaultingInstr = insertFaultingInstr( |
771 | MI: NC.getMemOperation(), MBB: NC.getCheckBlock(), HandlerMBB: NC.getNullSucc()); |
772 | // Now the values defined by MemOperation, if any, are live-in of |
773 | // the block of MemOperation. |
774 | // The original operation may define implicit-defs alongside |
775 | // the value. |
776 | MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); |
777 | for (const MachineOperand &MO : FaultingInstr->all_defs()) { |
778 | Register Reg = MO.getReg(); |
779 | if (!Reg || MBB->isLiveIn(Reg)) |
780 | continue; |
781 | MBB->addLiveIn(PhysReg: Reg); |
782 | } |
783 | |
784 | if (auto *DepMI = NC.getOnlyDependency()) { |
785 | for (auto &MO : DepMI->all_defs()) { |
786 | if (!MO.getReg() || MO.isDead()) |
787 | continue; |
788 | if (!NC.getNotNullSucc()->isLiveIn(Reg: MO.getReg())) |
789 | NC.getNotNullSucc()->addLiveIn(PhysReg: MO.getReg()); |
790 | } |
791 | } |
792 | |
793 | NC.getMemOperation()->eraseFromParent(); |
794 | if (auto *CheckOp = NC.getCheckOperation()) |
795 | CheckOp->eraseFromParent(); |
796 | |
797 | // Insert an *unconditional* branch to not-null successor - we expect |
798 | // block placement to remove fallthroughs later. |
799 | TII->insertBranch(MBB&: *NC.getCheckBlock(), TBB: NC.getNotNullSucc(), FBB: nullptr, |
800 | /*Cond=*/{}, DL); |
801 | |
802 | NumImplicitNullChecks++; |
803 | } |
804 | } |
805 | |
806 | char ImplicitNullChecks::ID = 0; |
807 | |
808 | char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID; |
809 | |
810 | INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE, |
811 | "Implicit null checks" , false, false) |
812 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
813 | INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE, |
814 | "Implicit null checks" , false, false) |
815 | |