1 | //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass turns explicit null checks of the form |
10 | // |
11 | // test %r10, %r10 |
12 | // je throw_npe |
13 | // movl (%r10), %esi |
14 | // ... |
15 | // |
16 | // to |
17 | // |
18 | // faulting_load_op("movl (%r10), %esi", throw_npe) |
19 | // ... |
20 | // |
21 | // With the help of a runtime that understands the .fault_maps section, |
22 | // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs |
23 | // a page fault. |
24 | // Store and LoadStore are also supported. |
25 | // |
26 | //===----------------------------------------------------------------------===// |
27 | |
28 | #include "llvm/ADT/ArrayRef.h" |
29 | #include "llvm/ADT/STLExtras.h" |
30 | #include "llvm/ADT/SmallVector.h" |
31 | #include "llvm/ADT/Statistic.h" |
32 | #include "llvm/Analysis/AliasAnalysis.h" |
33 | #include "llvm/Analysis/MemoryLocation.h" |
34 | #include "llvm/CodeGen/FaultMaps.h" |
35 | #include "llvm/CodeGen/MachineBasicBlock.h" |
36 | #include "llvm/CodeGen/MachineFunction.h" |
37 | #include "llvm/CodeGen/MachineFunctionPass.h" |
38 | #include "llvm/CodeGen/MachineInstr.h" |
39 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
40 | #include "llvm/CodeGen/MachineMemOperand.h" |
41 | #include "llvm/CodeGen/MachineOperand.h" |
42 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
43 | #include "llvm/CodeGen/PseudoSourceValue.h" |
44 | #include "llvm/CodeGen/TargetInstrInfo.h" |
45 | #include "llvm/CodeGen/TargetOpcodes.h" |
46 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
47 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
48 | #include "llvm/IR/BasicBlock.h" |
49 | #include "llvm/IR/DebugLoc.h" |
50 | #include "llvm/IR/LLVMContext.h" |
51 | #include "llvm/InitializePasses.h" |
52 | #include "llvm/MC/MCInstrDesc.h" |
53 | #include "llvm/MC/MCRegisterInfo.h" |
54 | #include "llvm/Pass.h" |
55 | #include "llvm/Support/CommandLine.h" |
56 | #include <cassert> |
57 | #include <cstdint> |
58 | #include <iterator> |
59 | |
60 | using namespace llvm; |
61 | |
62 | static cl::opt<int> PageSize("imp-null-check-page-size" , |
63 | cl::desc("The page size of the target in bytes" ), |
64 | cl::init(Val: 4096), cl::Hidden); |
65 | |
66 | static cl::opt<unsigned> MaxInstsToConsider( |
67 | "imp-null-max-insts-to-consider" , |
68 | cl::desc("The max number of instructions to consider hoisting loads over " |
69 | "(the algorithm is quadratic over this number)" ), |
70 | cl::Hidden, cl::init(Val: 8)); |
71 | |
72 | #define DEBUG_TYPE "implicit-null-checks" |
73 | |
74 | STATISTIC(NumImplicitNullChecks, |
75 | "Number of explicit null checks made implicit" ); |
76 | |
77 | namespace { |
78 | |
79 | class ImplicitNullChecks : public MachineFunctionPass { |
80 | /// Return true if \c computeDependence can process \p MI. |
81 | static bool canHandle(const MachineInstr *MI); |
82 | |
83 | /// Helper function for \c computeDependence. Return true if \p A |
84 | /// and \p B do not have any dependences between them, and can be |
85 | /// re-ordered without changing program semantics. |
86 | bool canReorder(const MachineInstr *A, const MachineInstr *B); |
87 | |
88 | /// A data type for representing the result computed by \c |
89 | /// computeDependence. States whether it is okay to reorder the |
90 | /// instruction passed to \c computeDependence with at most one |
91 | /// dependency. |
92 | struct DependenceResult { |
93 | /// Can we actually re-order \p MI with \p Insts (see \c |
94 | /// computeDependence). |
95 | bool CanReorder; |
96 | |
97 | /// If non-std::nullopt, then an instruction in \p Insts that also must be |
98 | /// hoisted. |
99 | std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; |
100 | |
101 | /*implicit*/ DependenceResult( |
102 | bool CanReorder, |
103 | std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) |
104 | : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { |
105 | assert((!PotentialDependence || CanReorder) && |
106 | "!CanReorder && PotentialDependence.hasValue() not allowed!" ); |
107 | } |
108 | }; |
109 | |
110 | /// Compute a result for the following question: can \p MI be |
111 | /// re-ordered from after \p Insts to before it. |
112 | /// |
113 | /// \c canHandle should return true for all instructions in \p |
114 | /// Insts. |
115 | DependenceResult computeDependence(const MachineInstr *MI, |
116 | ArrayRef<MachineInstr *> Block); |
117 | |
118 | /// Represents one null check that can be made implicit. |
119 | class NullCheck { |
120 | // The memory operation the null check can be folded into. |
121 | MachineInstr *MemOperation; |
122 | |
123 | // The instruction actually doing the null check (Ptr != 0). |
124 | MachineInstr *CheckOperation; |
125 | |
126 | // The block the check resides in. |
127 | MachineBasicBlock *CheckBlock; |
128 | |
129 | // The block branched to if the pointer is non-null. |
130 | MachineBasicBlock *NotNullSucc; |
131 | |
132 | // The block branched to if the pointer is null. |
133 | MachineBasicBlock *NullSucc; |
134 | |
135 | // If this is non-null, then MemOperation has a dependency on this |
136 | // instruction; and it needs to be hoisted to execute before MemOperation. |
137 | MachineInstr *OnlyDependency; |
138 | |
139 | public: |
140 | explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation, |
141 | MachineBasicBlock *checkBlock, |
142 | MachineBasicBlock *notNullSucc, |
143 | MachineBasicBlock *nullSucc, |
144 | MachineInstr *onlyDependency) |
145 | : MemOperation(memOperation), CheckOperation(checkOperation), |
146 | CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc), |
147 | OnlyDependency(onlyDependency) {} |
148 | |
149 | MachineInstr *getMemOperation() const { return MemOperation; } |
150 | |
151 | MachineInstr *getCheckOperation() const { return CheckOperation; } |
152 | |
153 | MachineBasicBlock *getCheckBlock() const { return CheckBlock; } |
154 | |
155 | MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; } |
156 | |
157 | MachineBasicBlock *getNullSucc() const { return NullSucc; } |
158 | |
159 | MachineInstr *getOnlyDependency() const { return OnlyDependency; } |
160 | }; |
161 | |
162 | const TargetInstrInfo *TII = nullptr; |
163 | const TargetRegisterInfo *TRI = nullptr; |
164 | AliasAnalysis *AA = nullptr; |
165 | MachineFrameInfo *MFI = nullptr; |
166 | |
167 | bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, |
168 | SmallVectorImpl<NullCheck> &NullCheckList); |
169 | MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, |
170 | MachineBasicBlock *HandlerMBB); |
171 | void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); |
172 | |
173 | enum AliasResult { |
174 | AR_NoAlias, |
175 | AR_MayAlias, |
176 | AR_WillAliasEverything |
177 | }; |
178 | |
179 | /// Returns AR_NoAlias if \p MI memory operation does not alias with |
180 | /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if |
181 | /// they may alias and any further memory operation may alias with \p PrevMI. |
182 | AliasResult areMemoryOpsAliased(const MachineInstr &MI, |
183 | const MachineInstr *PrevMI) const; |
184 | |
185 | enum SuitabilityResult { |
186 | SR_Suitable, |
187 | SR_Unsuitable, |
188 | SR_Impossible |
189 | }; |
190 | |
191 | /// Return SR_Suitable if \p MI a memory operation that can be used to |
192 | /// implicitly null check the value in \p PointerReg, SR_Unsuitable if |
193 | /// \p MI cannot be used to null check and SR_Impossible if there is |
194 | /// no sense to continue lookup due to any other instruction will not be able |
195 | /// to be used. \p PrevInsts is the set of instruction seen since |
196 | /// the explicit null check on \p PointerReg. |
197 | SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI, |
198 | unsigned PointerReg, |
199 | ArrayRef<MachineInstr *> PrevInsts); |
200 | |
201 | /// Returns true if \p DependenceMI can clobber the liveIns in NullSucc block |
202 | /// if it was hoisted to the NullCheck block. This is used by caller |
203 | /// canHoistInst to decide if DependenceMI can be hoisted safely. |
204 | bool canDependenceHoistingClobberLiveIns(MachineInstr *DependenceMI, |
205 | MachineBasicBlock *NullSucc); |
206 | |
207 | /// Return true if \p FaultingMI can be hoisted from after the |
208 | /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a |
209 | /// non-null value if we also need to (and legally can) hoist a dependency. |
210 | bool canHoistInst(MachineInstr *FaultingMI, |
211 | ArrayRef<MachineInstr *> InstsSeenSoFar, |
212 | MachineBasicBlock *NullSucc, MachineInstr *&Dependence); |
213 | |
214 | public: |
215 | static char ID; |
216 | |
217 | ImplicitNullChecks() : MachineFunctionPass(ID) { |
218 | initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry()); |
219 | } |
220 | |
221 | bool runOnMachineFunction(MachineFunction &MF) override; |
222 | |
223 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
224 | AU.addRequired<AAResultsWrapperPass>(); |
225 | MachineFunctionPass::getAnalysisUsage(AU); |
226 | } |
227 | |
228 | MachineFunctionProperties getRequiredProperties() const override { |
229 | return MachineFunctionProperties().set( |
230 | MachineFunctionProperties::Property::NoVRegs); |
231 | } |
232 | }; |
233 | |
234 | } // end anonymous namespace |
235 | |
236 | bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { |
237 | if (MI->isCall() || MI->mayRaiseFPException() || |
238 | MI->hasUnmodeledSideEffects()) |
239 | return false; |
240 | auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; |
241 | (void)IsRegMask; |
242 | |
243 | assert(llvm::none_of(MI->operands(), IsRegMask) && |
244 | "Calls were filtered out above!" ); |
245 | |
246 | auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); }; |
247 | return llvm::all_of(Range: MI->memoperands(), P: IsUnordered); |
248 | } |
249 | |
250 | ImplicitNullChecks::DependenceResult |
251 | ImplicitNullChecks::computeDependence(const MachineInstr *MI, |
252 | ArrayRef<MachineInstr *> Block) { |
253 | assert(llvm::all_of(Block, canHandle) && "Check this first!" ); |
254 | assert(!is_contained(Block, MI) && "Block must be exclusive of MI!" ); |
255 | |
256 | std::optional<ArrayRef<MachineInstr *>::iterator> Dep; |
257 | |
258 | for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { |
259 | if (canReorder(A: *I, B: MI)) |
260 | continue; |
261 | |
262 | if (Dep == std::nullopt) { |
263 | // Found one possible dependency, keep track of it. |
264 | Dep = I; |
265 | } else { |
266 | // We found two dependencies, so bail out. |
267 | return {false, std::nullopt}; |
268 | } |
269 | } |
270 | |
271 | return {true, Dep}; |
272 | } |
273 | |
274 | bool ImplicitNullChecks::canReorder(const MachineInstr *A, |
275 | const MachineInstr *B) { |
276 | assert(canHandle(A) && canHandle(B) && "Precondition!" ); |
277 | |
278 | // canHandle makes sure that we _can_ correctly analyze the dependencies |
279 | // between A and B here -- for instance, we should not be dealing with heap |
280 | // load-store dependencies here. |
281 | |
282 | for (const auto &MOA : A->operands()) { |
283 | if (!(MOA.isReg() && MOA.getReg())) |
284 | continue; |
285 | |
286 | Register RegA = MOA.getReg(); |
287 | for (const auto &MOB : B->operands()) { |
288 | if (!(MOB.isReg() && MOB.getReg())) |
289 | continue; |
290 | |
291 | Register RegB = MOB.getReg(); |
292 | |
293 | if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) |
294 | return false; |
295 | } |
296 | } |
297 | |
298 | return true; |
299 | } |
300 | |
301 | bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { |
302 | TII = MF.getSubtarget().getInstrInfo(); |
303 | TRI = MF.getRegInfo().getTargetRegisterInfo(); |
304 | MFI = &MF.getFrameInfo(); |
305 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); |
306 | |
307 | SmallVector<NullCheck, 16> NullCheckList; |
308 | |
309 | for (auto &MBB : MF) |
310 | analyzeBlockForNullChecks(MBB, NullCheckList); |
311 | |
312 | if (!NullCheckList.empty()) |
313 | rewriteNullChecks(NullCheckList); |
314 | |
315 | return !NullCheckList.empty(); |
316 | } |
317 | |
318 | // Return true if any register aliasing \p Reg is live-in into \p MBB. |
319 | static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, |
320 | MachineBasicBlock *MBB, unsigned Reg) { |
321 | for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid(); |
322 | ++AR) |
323 | if (MBB->isLiveIn(Reg: *AR)) |
324 | return true; |
325 | return false; |
326 | } |
327 | |
328 | ImplicitNullChecks::AliasResult |
329 | ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI, |
330 | const MachineInstr *PrevMI) const { |
331 | // If it is not memory access, skip the check. |
332 | if (!(PrevMI->mayStore() || PrevMI->mayLoad())) |
333 | return AR_NoAlias; |
334 | // Load-Load may alias |
335 | if (!(MI.mayStore() || PrevMI->mayStore())) |
336 | return AR_NoAlias; |
337 | // We lost info, conservatively alias. If it was store then no sense to |
338 | // continue because we won't be able to check against it further. |
339 | if (MI.memoperands_empty()) |
340 | return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; |
341 | if (PrevMI->memoperands_empty()) |
342 | return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; |
343 | |
344 | for (MachineMemOperand *MMO1 : MI.memoperands()) { |
345 | // MMO1 should have a value due it comes from operation we'd like to use |
346 | // as implicit null check. |
347 | assert(MMO1->getValue() && "MMO1 should have a Value!" ); |
348 | for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { |
349 | if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { |
350 | if (PSV->mayAlias(MFI)) |
351 | return AR_MayAlias; |
352 | continue; |
353 | } |
354 | if (!AA->isNoAlias( |
355 | LocA: MemoryLocation::getAfter(Ptr: MMO1->getValue(), AATags: MMO1->getAAInfo()), |
356 | LocB: MemoryLocation::getAfter(Ptr: MMO2->getValue(), AATags: MMO2->getAAInfo()))) |
357 | return AR_MayAlias; |
358 | } |
359 | } |
360 | return AR_NoAlias; |
361 | } |
362 | |
363 | ImplicitNullChecks::SuitabilityResult |
364 | ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI, |
365 | unsigned PointerReg, |
366 | ArrayRef<MachineInstr *> PrevInsts) { |
367 | // Implementation restriction for faulting_op insertion |
368 | // TODO: This could be relaxed if we find a test case which warrants it. |
369 | if (MI.getDesc().getNumDefs() > 1) |
370 | return SR_Unsuitable; |
371 | |
372 | if (!MI.mayLoadOrStore() || MI.isPredicable()) |
373 | return SR_Unsuitable; |
374 | auto AM = TII->getAddrModeFromMemoryOp(MemI: MI, TRI); |
375 | if (!AM || AM->Form != ExtAddrMode::Formula::Basic) |
376 | return SR_Unsuitable; |
377 | auto AddrMode = *AM; |
378 | const Register BaseReg = AddrMode.BaseReg, ScaledReg = AddrMode.ScaledReg; |
379 | int64_t Displacement = AddrMode.Displacement; |
380 | |
381 | // We need the base of the memory instruction to be same as the register |
382 | // where the null check is performed (i.e. PointerReg). |
383 | if (BaseReg != PointerReg && ScaledReg != PointerReg) |
384 | return SR_Unsuitable; |
385 | const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); |
386 | unsigned PointerRegSizeInBits = TRI->getRegSizeInBits(Reg: PointerReg, MRI); |
387 | // Bail out of the sizes of BaseReg, ScaledReg and PointerReg are not the |
388 | // same. |
389 | if ((BaseReg && |
390 | TRI->getRegSizeInBits(Reg: BaseReg, MRI) != PointerRegSizeInBits) || |
391 | (ScaledReg && |
392 | TRI->getRegSizeInBits(Reg: ScaledReg, MRI) != PointerRegSizeInBits)) |
393 | return SR_Unsuitable; |
394 | |
395 | // Returns true if RegUsedInAddr is used for calculating the displacement |
396 | // depending on addressing mode. Also calculates the Displacement. |
397 | auto CalculateDisplacementFromAddrMode = [&](Register RegUsedInAddr, |
398 | int64_t Multiplier) { |
399 | // The register can be NoRegister, which is defined as zero for all targets. |
400 | // Consider instruction of interest as `movq 8(,%rdi,8), %rax`. Here the |
401 | // ScaledReg is %rdi, while there is no BaseReg. |
402 | if (!RegUsedInAddr) |
403 | return false; |
404 | assert(Multiplier && "expected to be non-zero!" ); |
405 | MachineInstr *ModifyingMI = nullptr; |
406 | for (auto It = std::next(x: MachineBasicBlock::const_reverse_iterator(&MI)); |
407 | It != MI.getParent()->rend(); It++) { |
408 | const MachineInstr *CurrMI = &*It; |
409 | if (CurrMI->modifiesRegister(Reg: RegUsedInAddr, TRI)) { |
410 | ModifyingMI = const_cast<MachineInstr *>(CurrMI); |
411 | break; |
412 | } |
413 | } |
414 | if (!ModifyingMI) |
415 | return false; |
416 | // Check for the const value defined in register by ModifyingMI. This means |
417 | // all other previous values for that register has been invalidated. |
418 | int64_t ImmVal; |
419 | if (!TII->getConstValDefinedInReg(MI: *ModifyingMI, Reg: RegUsedInAddr, ImmVal)) |
420 | return false; |
421 | // Calculate the reg size in bits, since this is needed for bailing out in |
422 | // case of overflow. |
423 | int32_t RegSizeInBits = TRI->getRegSizeInBits(Reg: RegUsedInAddr, MRI); |
424 | APInt ImmValC(RegSizeInBits, ImmVal, true /*IsSigned*/); |
425 | APInt MultiplierC(RegSizeInBits, Multiplier); |
426 | assert(MultiplierC.isStrictlyPositive() && |
427 | "expected to be a positive value!" ); |
428 | bool IsOverflow; |
429 | // Sign of the product depends on the sign of the ImmVal, since Multiplier |
430 | // is always positive. |
431 | APInt Product = ImmValC.smul_ov(RHS: MultiplierC, Overflow&: IsOverflow); |
432 | if (IsOverflow) |
433 | return false; |
434 | APInt DisplacementC(64, Displacement, true /*isSigned*/); |
435 | DisplacementC = Product.sadd_ov(RHS: DisplacementC, Overflow&: IsOverflow); |
436 | if (IsOverflow) |
437 | return false; |
438 | |
439 | // We only handle diplacements upto 64 bits wide. |
440 | if (DisplacementC.getActiveBits() > 64) |
441 | return false; |
442 | Displacement = DisplacementC.getSExtValue(); |
443 | return true; |
444 | }; |
445 | |
446 | // If a register used in the address is constant, fold it's effect into the |
447 | // displacement for ease of analysis. |
448 | bool BaseRegIsConstVal = false, ScaledRegIsConstVal = false; |
449 | if (CalculateDisplacementFromAddrMode(BaseReg, 1)) |
450 | BaseRegIsConstVal = true; |
451 | if (CalculateDisplacementFromAddrMode(ScaledReg, AddrMode.Scale)) |
452 | ScaledRegIsConstVal = true; |
453 | |
454 | // The register which is not null checked should be part of the Displacement |
455 | // calculation, otherwise we do not know whether the Displacement is made up |
456 | // by some symbolic values. |
457 | // This matters because we do not want to incorrectly assume that load from |
458 | // falls in the zeroth faulting page in the "sane offset check" below. |
459 | if ((BaseReg && BaseReg != PointerReg && !BaseRegIsConstVal) || |
460 | (ScaledReg && ScaledReg != PointerReg && !ScaledRegIsConstVal)) |
461 | return SR_Unsuitable; |
462 | |
463 | // We want the mem access to be issued at a sane offset from PointerReg, |
464 | // so that if PointerReg is null then the access reliably page faults. |
465 | if (!(-PageSize < Displacement && Displacement < PageSize)) |
466 | return SR_Unsuitable; |
467 | |
468 | // Finally, check whether the current memory access aliases with previous one. |
469 | for (auto *PrevMI : PrevInsts) { |
470 | AliasResult AR = areMemoryOpsAliased(MI, PrevMI); |
471 | if (AR == AR_WillAliasEverything) |
472 | return SR_Impossible; |
473 | if (AR == AR_MayAlias) |
474 | return SR_Unsuitable; |
475 | } |
476 | return SR_Suitable; |
477 | } |
478 | |
479 | bool ImplicitNullChecks::canDependenceHoistingClobberLiveIns( |
480 | MachineInstr *DependenceMI, MachineBasicBlock *NullSucc) { |
481 | for (const auto &DependenceMO : DependenceMI->operands()) { |
482 | if (!(DependenceMO.isReg() && DependenceMO.getReg())) |
483 | continue; |
484 | |
485 | // Make sure that we won't clobber any live ins to the sibling block by |
486 | // hoisting Dependency. For instance, we can't hoist INST to before the |
487 | // null check (even if it safe, and does not violate any dependencies in |
488 | // the non_null_block) if %rdx is live in to _null_block. |
489 | // |
490 | // test %rcx, %rcx |
491 | // je _null_block |
492 | // _non_null_block: |
493 | // %rdx = INST |
494 | // ... |
495 | // |
496 | // This restriction does not apply to the faulting load inst because in |
497 | // case the pointer loaded from is in the null page, the load will not |
498 | // semantically execute, and affect machine state. That is, if the load |
499 | // was loading into %rax and it faults, the value of %rax should stay the |
500 | // same as it would have been had the load not have executed and we'd have |
501 | // branched to NullSucc directly. |
502 | if (AnyAliasLiveIn(TRI, MBB: NullSucc, Reg: DependenceMO.getReg())) |
503 | return true; |
504 | |
505 | } |
506 | |
507 | // The dependence does not clobber live-ins in NullSucc block. |
508 | return false; |
509 | } |
510 | |
511 | bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, |
512 | ArrayRef<MachineInstr *> InstsSeenSoFar, |
513 | MachineBasicBlock *NullSucc, |
514 | MachineInstr *&Dependence) { |
515 | auto DepResult = computeDependence(MI: FaultingMI, Block: InstsSeenSoFar); |
516 | if (!DepResult.CanReorder) |
517 | return false; |
518 | |
519 | if (!DepResult.PotentialDependence) { |
520 | Dependence = nullptr; |
521 | return true; |
522 | } |
523 | |
524 | auto DependenceItr = *DepResult.PotentialDependence; |
525 | auto *DependenceMI = *DependenceItr; |
526 | |
527 | // We don't want to reason about speculating loads. Note -- at this point |
528 | // we should have already filtered out all of the other non-speculatable |
529 | // things, like calls and stores. |
530 | // We also do not want to hoist stores because it might change the memory |
531 | // while the FaultingMI may result in faulting. |
532 | assert(canHandle(DependenceMI) && "Should never have reached here!" ); |
533 | if (DependenceMI->mayLoadOrStore()) |
534 | return false; |
535 | |
536 | if (canDependenceHoistingClobberLiveIns(DependenceMI, NullSucc)) |
537 | return false; |
538 | |
539 | auto DepDepResult = |
540 | computeDependence(MI: DependenceMI, Block: {InstsSeenSoFar.begin(), DependenceItr}); |
541 | |
542 | if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence) |
543 | return false; |
544 | |
545 | Dependence = DependenceMI; |
546 | return true; |
547 | } |
548 | |
549 | /// Analyze MBB to check if its terminating branch can be turned into an |
550 | /// implicit null check. If yes, append a description of the said null check to |
551 | /// NullCheckList and return true, else return false. |
552 | bool ImplicitNullChecks::analyzeBlockForNullChecks( |
553 | MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) { |
554 | using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate; |
555 | |
556 | MDNode *BranchMD = nullptr; |
557 | if (auto *BB = MBB.getBasicBlock()) |
558 | BranchMD = BB->getTerminator()->getMetadata(KindID: LLVMContext::MD_make_implicit); |
559 | |
560 | if (!BranchMD) |
561 | return false; |
562 | |
563 | MachineBranchPredicate MBP; |
564 | |
565 | if (TII->analyzeBranchPredicate(MBB, MBP, AllowModify: true)) |
566 | return false; |
567 | |
568 | // Is the predicate comparing an integer to zero? |
569 | if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && |
570 | (MBP.Predicate == MachineBranchPredicate::PRED_NE || |
571 | MBP.Predicate == MachineBranchPredicate::PRED_EQ))) |
572 | return false; |
573 | |
574 | // If there is a separate condition generation instruction, we chose not to |
575 | // transform unless we can remove both condition and consuming branch. |
576 | if (MBP.ConditionDef && !MBP.SingleUseCondition) |
577 | return false; |
578 | |
579 | MachineBasicBlock *NotNullSucc, *NullSucc; |
580 | |
581 | if (MBP.Predicate == MachineBranchPredicate::PRED_NE) { |
582 | NotNullSucc = MBP.TrueDest; |
583 | NullSucc = MBP.FalseDest; |
584 | } else { |
585 | NotNullSucc = MBP.FalseDest; |
586 | NullSucc = MBP.TrueDest; |
587 | } |
588 | |
589 | // We handle the simplest case for now. We can potentially do better by using |
590 | // the machine dominator tree. |
591 | if (NotNullSucc->pred_size() != 1) |
592 | return false; |
593 | |
594 | const Register PointerReg = MBP.LHS.getReg(); |
595 | |
596 | if (MBP.ConditionDef) { |
597 | // To prevent the invalid transformation of the following code: |
598 | // |
599 | // mov %rax, %rcx |
600 | // test %rax, %rax |
601 | // %rax = ... |
602 | // je throw_npe |
603 | // mov(%rcx), %r9 |
604 | // mov(%rax), %r10 |
605 | // |
606 | // into: |
607 | // |
608 | // mov %rax, %rcx |
609 | // %rax = .... |
610 | // faulting_load_op("movl (%rax), %r10", throw_npe) |
611 | // mov(%rcx), %r9 |
612 | // |
613 | // we must ensure that there are no instructions between the 'test' and |
614 | // conditional jump that modify %rax. |
615 | assert(MBP.ConditionDef->getParent() == &MBB && |
616 | "Should be in basic block" ); |
617 | |
618 | for (auto I = MBB.rbegin(); MBP.ConditionDef != &*I; ++I) |
619 | if (I->modifiesRegister(Reg: PointerReg, TRI)) |
620 | return false; |
621 | } |
622 | // Starting with a code fragment like: |
623 | // |
624 | // test %rax, %rax |
625 | // jne LblNotNull |
626 | // |
627 | // LblNull: |
628 | // callq throw_NullPointerException |
629 | // |
630 | // LblNotNull: |
631 | // Inst0 |
632 | // Inst1 |
633 | // ... |
634 | // Def = Load (%rax + <offset>) |
635 | // ... |
636 | // |
637 | // |
638 | // we want to end up with |
639 | // |
640 | // Def = FaultingLoad (%rax + <offset>), LblNull |
641 | // jmp LblNotNull ;; explicit or fallthrough |
642 | // |
643 | // LblNotNull: |
644 | // Inst0 |
645 | // Inst1 |
646 | // ... |
647 | // |
648 | // LblNull: |
649 | // callq throw_NullPointerException |
650 | // |
651 | // |
652 | // To see why this is legal, consider the two possibilities: |
653 | // |
654 | // 1. %rax is null: since we constrain <offset> to be less than PageSize, the |
655 | // load instruction dereferences the null page, causing a segmentation |
656 | // fault. |
657 | // |
658 | // 2. %rax is not null: in this case we know that the load cannot fault, as |
659 | // otherwise the load would've faulted in the original program too and the |
660 | // original program would've been undefined. |
661 | // |
662 | // This reasoning cannot be extended to justify hoisting through arbitrary |
663 | // control flow. For instance, in the example below (in pseudo-C) |
664 | // |
665 | // if (ptr == null) { throw_npe(); unreachable; } |
666 | // if (some_cond) { return 42; } |
667 | // v = ptr->field; // LD |
668 | // ... |
669 | // |
670 | // we cannot (without code duplication) use the load marked "LD" to null check |
671 | // ptr -- clause (2) above does not apply in this case. In the above program |
672 | // the safety of ptr->field can be dependent on some_cond; and, for instance, |
673 | // ptr could be some non-null invalid reference that never gets loaded from |
674 | // because some_cond is always true. |
675 | |
676 | SmallVector<MachineInstr *, 8> InstsSeenSoFar; |
677 | |
678 | for (auto &MI : *NotNullSucc) { |
679 | if (!canHandle(MI: &MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) |
680 | return false; |
681 | |
682 | MachineInstr *Dependence; |
683 | SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, PrevInsts: InstsSeenSoFar); |
684 | if (SR == SR_Impossible) |
685 | return false; |
686 | if (SR == SR_Suitable && |
687 | canHoistInst(FaultingMI: &MI, InstsSeenSoFar, NullSucc, Dependence)) { |
688 | NullCheckList.emplace_back(Args: &MI, Args&: MBP.ConditionDef, Args: &MBB, Args&: NotNullSucc, |
689 | Args&: NullSucc, Args&: Dependence); |
690 | return true; |
691 | } |
692 | |
693 | // If MI re-defines the PointerReg in a way that changes the value of |
694 | // PointerReg if it was null, then we cannot move further. |
695 | if (!TII->preservesZeroValueInReg(MI: &MI, NullValueReg: PointerReg, TRI)) |
696 | return false; |
697 | InstsSeenSoFar.push_back(Elt: &MI); |
698 | } |
699 | |
700 | return false; |
701 | } |
702 | |
703 | /// Wrap a machine instruction, MI, into a FAULTING machine instruction. |
704 | /// The FAULTING instruction does the same load/store as MI |
705 | /// (defining the same register), and branches to HandlerMBB if the mem access |
706 | /// faults. The FAULTING instruction is inserted at the end of MBB. |
707 | MachineInstr *ImplicitNullChecks::insertFaultingInstr( |
708 | MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { |
709 | const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for |
710 | // all targets. |
711 | |
712 | DebugLoc DL; |
713 | unsigned NumDefs = MI->getDesc().getNumDefs(); |
714 | assert(NumDefs <= 1 && "other cases unhandled!" ); |
715 | |
716 | unsigned DefReg = NoRegister; |
717 | if (NumDefs != 0) { |
718 | DefReg = MI->getOperand(i: 0).getReg(); |
719 | assert(NumDefs == 1 && "expected exactly one def!" ); |
720 | } |
721 | |
722 | FaultMaps::FaultKind FK; |
723 | if (MI->mayLoad()) |
724 | FK = |
725 | MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; |
726 | else |
727 | FK = FaultMaps::FaultingStore; |
728 | |
729 | auto MIB = BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::FAULTING_OP), DestReg: DefReg) |
730 | .addImm(Val: FK) |
731 | .addMBB(MBB: HandlerMBB) |
732 | .addImm(Val: MI->getOpcode()); |
733 | |
734 | for (auto &MO : MI->uses()) { |
735 | if (MO.isReg()) { |
736 | MachineOperand NewMO = MO; |
737 | if (MO.isUse()) { |
738 | NewMO.setIsKill(false); |
739 | } else { |
740 | assert(MO.isDef() && "Expected def or use" ); |
741 | NewMO.setIsDead(false); |
742 | } |
743 | MIB.add(MO: NewMO); |
744 | } else { |
745 | MIB.add(MO); |
746 | } |
747 | } |
748 | |
749 | MIB.setMemRefs(MI->memoperands()); |
750 | |
751 | return MIB; |
752 | } |
753 | |
754 | /// Rewrite the null checks in NullCheckList into implicit null checks. |
755 | void ImplicitNullChecks::rewriteNullChecks( |
756 | ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) { |
757 | DebugLoc DL; |
758 | |
759 | for (const auto &NC : NullCheckList) { |
760 | // Remove the conditional branch dependent on the null check. |
761 | unsigned BranchesRemoved = TII->removeBranch(MBB&: *NC.getCheckBlock()); |
762 | (void)BranchesRemoved; |
763 | assert(BranchesRemoved > 0 && "expected at least one branch!" ); |
764 | |
765 | if (auto *DepMI = NC.getOnlyDependency()) { |
766 | DepMI->removeFromParent(); |
767 | NC.getCheckBlock()->insert(I: NC.getCheckBlock()->end(), MI: DepMI); |
768 | } |
769 | |
770 | // Insert a faulting instruction where the conditional branch was |
771 | // originally. We check earlier ensures that this bit of code motion |
772 | // is legal. We do not touch the successors list for any basic block |
773 | // since we haven't changed control flow, we've just made it implicit. |
774 | MachineInstr *FaultingInstr = insertFaultingInstr( |
775 | MI: NC.getMemOperation(), MBB: NC.getCheckBlock(), HandlerMBB: NC.getNullSucc()); |
776 | // Now the values defined by MemOperation, if any, are live-in of |
777 | // the block of MemOperation. |
778 | // The original operation may define implicit-defs alongside |
779 | // the value. |
780 | MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); |
781 | for (const MachineOperand &MO : FaultingInstr->all_defs()) { |
782 | Register Reg = MO.getReg(); |
783 | if (!Reg || MBB->isLiveIn(Reg)) |
784 | continue; |
785 | MBB->addLiveIn(PhysReg: Reg); |
786 | } |
787 | |
788 | if (auto *DepMI = NC.getOnlyDependency()) { |
789 | for (auto &MO : DepMI->all_defs()) { |
790 | if (!MO.getReg() || MO.isDead()) |
791 | continue; |
792 | if (!NC.getNotNullSucc()->isLiveIn(Reg: MO.getReg())) |
793 | NC.getNotNullSucc()->addLiveIn(PhysReg: MO.getReg()); |
794 | } |
795 | } |
796 | |
797 | NC.getMemOperation()->eraseFromParent(); |
798 | if (auto *CheckOp = NC.getCheckOperation()) |
799 | CheckOp->eraseFromParent(); |
800 | |
801 | // Insert an *unconditional* branch to not-null successor - we expect |
802 | // block placement to remove fallthroughs later. |
803 | TII->insertBranch(MBB&: *NC.getCheckBlock(), TBB: NC.getNotNullSucc(), FBB: nullptr, |
804 | /*Cond=*/std::nullopt, DL); |
805 | |
806 | NumImplicitNullChecks++; |
807 | } |
808 | } |
809 | |
810 | char ImplicitNullChecks::ID = 0; |
811 | |
812 | char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID; |
813 | |
814 | INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE, |
815 | "Implicit null checks" , false, false) |
816 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
817 | INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE, |
818 | "Implicit null checks" , false, false) |
819 | |