1 | //===- Local.cpp - Functions to perform local transformations -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This family of functions perform various local transformations to the |
10 | // program. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "llvm/Transforms/Utils/Local.h" |
15 | #include "llvm/ADT/APInt.h" |
16 | #include "llvm/ADT/DenseMap.h" |
17 | #include "llvm/ADT/DenseMapInfo.h" |
18 | #include "llvm/ADT/DenseSet.h" |
19 | #include "llvm/ADT/Hashing.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/SetVector.h" |
22 | #include "llvm/ADT/SmallPtrSet.h" |
23 | #include "llvm/ADT/SmallVector.h" |
24 | #include "llvm/ADT/Statistic.h" |
25 | #include "llvm/Analysis/AssumeBundleQueries.h" |
26 | #include "llvm/Analysis/ConstantFolding.h" |
27 | #include "llvm/Analysis/DomTreeUpdater.h" |
28 | #include "llvm/Analysis/InstructionSimplify.h" |
29 | #include "llvm/Analysis/MemoryBuiltins.h" |
30 | #include "llvm/Analysis/MemorySSAUpdater.h" |
31 | #include "llvm/Analysis/TargetLibraryInfo.h" |
32 | #include "llvm/Analysis/ValueTracking.h" |
33 | #include "llvm/Analysis/VectorUtils.h" |
34 | #include "llvm/BinaryFormat/Dwarf.h" |
35 | #include "llvm/IR/Argument.h" |
36 | #include "llvm/IR/Attributes.h" |
37 | #include "llvm/IR/BasicBlock.h" |
38 | #include "llvm/IR/CFG.h" |
39 | #include "llvm/IR/Constant.h" |
40 | #include "llvm/IR/ConstantRange.h" |
41 | #include "llvm/IR/Constants.h" |
42 | #include "llvm/IR/DIBuilder.h" |
43 | #include "llvm/IR/DataLayout.h" |
44 | #include "llvm/IR/DebugInfo.h" |
45 | #include "llvm/IR/DebugInfoMetadata.h" |
46 | #include "llvm/IR/DebugLoc.h" |
47 | #include "llvm/IR/DerivedTypes.h" |
48 | #include "llvm/IR/Dominators.h" |
49 | #include "llvm/IR/EHPersonalities.h" |
50 | #include "llvm/IR/Function.h" |
51 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
52 | #include "llvm/IR/GlobalObject.h" |
53 | #include "llvm/IR/IRBuilder.h" |
54 | #include "llvm/IR/InstrTypes.h" |
55 | #include "llvm/IR/Instruction.h" |
56 | #include "llvm/IR/Instructions.h" |
57 | #include "llvm/IR/IntrinsicInst.h" |
58 | #include "llvm/IR/Intrinsics.h" |
59 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
60 | #include "llvm/IR/LLVMContext.h" |
61 | #include "llvm/IR/MDBuilder.h" |
62 | #include "llvm/IR/MemoryModelRelaxationAnnotations.h" |
63 | #include "llvm/IR/Metadata.h" |
64 | #include "llvm/IR/Module.h" |
65 | #include "llvm/IR/PatternMatch.h" |
66 | #include "llvm/IR/ProfDataUtils.h" |
67 | #include "llvm/IR/Type.h" |
68 | #include "llvm/IR/Use.h" |
69 | #include "llvm/IR/User.h" |
70 | #include "llvm/IR/Value.h" |
71 | #include "llvm/IR/ValueHandle.h" |
72 | #include "llvm/Support/Casting.h" |
73 | #include "llvm/Support/CommandLine.h" |
74 | #include "llvm/Support/Debug.h" |
75 | #include "llvm/Support/ErrorHandling.h" |
76 | #include "llvm/Support/KnownBits.h" |
77 | #include "llvm/Support/raw_ostream.h" |
78 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
79 | #include "llvm/Transforms/Utils/ValueMapper.h" |
80 | #include <algorithm> |
81 | #include <cassert> |
82 | #include <cstdint> |
83 | #include <iterator> |
84 | #include <map> |
85 | #include <optional> |
86 | #include <utility> |
87 | |
88 | using namespace llvm; |
89 | using namespace llvm::PatternMatch; |
90 | |
91 | extern cl::opt<bool> UseNewDbgInfoFormat; |
92 | |
93 | #define DEBUG_TYPE "local" |
94 | |
95 | STATISTIC(NumRemoved, "Number of unreachable basic blocks removed" ); |
96 | STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd" ); |
97 | |
98 | static cl::opt<bool> PHICSEDebugHash( |
99 | "phicse-debug-hash" , |
100 | #ifdef EXPENSIVE_CHECKS |
101 | cl::init(true), |
102 | #else |
103 | cl::init(Val: false), |
104 | #endif |
105 | cl::Hidden, |
106 | cl::desc("Perform extra assertion checking to verify that PHINodes's hash " |
107 | "function is well-behaved w.r.t. its isEqual predicate" )); |
108 | |
109 | static cl::opt<unsigned> PHICSENumPHISmallSize( |
110 | "phicse-num-phi-smallsize" , cl::init(Val: 32), cl::Hidden, |
111 | cl::desc( |
112 | "When the basic block contains not more than this number of PHI nodes, " |
113 | "perform a (faster!) exhaustive search instead of set-driven one." )); |
114 | |
115 | // Max recursion depth for collectBitParts used when detecting bswap and |
116 | // bitreverse idioms. |
117 | static const unsigned BitPartRecursionMaxDepth = 48; |
118 | |
119 | //===----------------------------------------------------------------------===// |
120 | // Local constant propagation. |
121 | // |
122 | |
123 | /// ConstantFoldTerminator - If a terminator instruction is predicated on a |
124 | /// constant value, convert it into an unconditional branch to the constant |
125 | /// destination. This is a nontrivial operation because the successors of this |
126 | /// basic block must have their PHI nodes updated. |
127 | /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch |
128 | /// conditions and indirectbr addresses this might make dead if |
129 | /// DeleteDeadConditions is true. |
130 | bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, |
131 | const TargetLibraryInfo *TLI, |
132 | DomTreeUpdater *DTU) { |
133 | Instruction *T = BB->getTerminator(); |
134 | IRBuilder<> Builder(T); |
135 | |
136 | // Branch - See if we are conditional jumping on constant |
137 | if (auto *BI = dyn_cast<BranchInst>(Val: T)) { |
138 | if (BI->isUnconditional()) return false; // Can't optimize uncond branch |
139 | |
140 | BasicBlock *Dest1 = BI->getSuccessor(i: 0); |
141 | BasicBlock *Dest2 = BI->getSuccessor(i: 1); |
142 | |
143 | if (Dest2 == Dest1) { // Conditional branch to same location? |
144 | // This branch matches something like this: |
145 | // br bool %cond, label %Dest, label %Dest |
146 | // and changes it into: br label %Dest |
147 | |
148 | // Let the basic block know that we are letting go of one copy of it. |
149 | assert(BI->getParent() && "Terminator not inserted in block!" ); |
150 | Dest1->removePredecessor(Pred: BI->getParent()); |
151 | |
152 | // Replace the conditional branch with an unconditional one. |
153 | BranchInst *NewBI = Builder.CreateBr(Dest: Dest1); |
154 | |
155 | // Transfer the metadata to the new branch instruction. |
156 | NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg, |
157 | LLVMContext::MD_annotation}); |
158 | |
159 | Value *Cond = BI->getCondition(); |
160 | BI->eraseFromParent(); |
161 | if (DeleteDeadConditions) |
162 | RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI); |
163 | return true; |
164 | } |
165 | |
166 | if (auto *Cond = dyn_cast<ConstantInt>(Val: BI->getCondition())) { |
167 | // Are we branching on constant? |
168 | // YES. Change to unconditional branch... |
169 | BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; |
170 | BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; |
171 | |
172 | // Let the basic block know that we are letting go of it. Based on this, |
173 | // it will adjust it's PHI nodes. |
174 | OldDest->removePredecessor(Pred: BB); |
175 | |
176 | // Replace the conditional branch with an unconditional one. |
177 | BranchInst *NewBI = Builder.CreateBr(Dest: Destination); |
178 | |
179 | // Transfer the metadata to the new branch instruction. |
180 | NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg, |
181 | LLVMContext::MD_annotation}); |
182 | |
183 | BI->eraseFromParent(); |
184 | if (DTU) |
185 | DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, OldDest}}); |
186 | return true; |
187 | } |
188 | |
189 | return false; |
190 | } |
191 | |
192 | if (auto *SI = dyn_cast<SwitchInst>(Val: T)) { |
193 | // If we are switching on a constant, we can convert the switch to an |
194 | // unconditional branch. |
195 | auto *CI = dyn_cast<ConstantInt>(Val: SI->getCondition()); |
196 | BasicBlock *DefaultDest = SI->getDefaultDest(); |
197 | BasicBlock *TheOnlyDest = DefaultDest; |
198 | |
199 | // If the default is unreachable, ignore it when searching for TheOnlyDest. |
200 | if (isa<UnreachableInst>(Val: DefaultDest->getFirstNonPHIOrDbg()) && |
201 | SI->getNumCases() > 0) { |
202 | TheOnlyDest = SI->case_begin()->getCaseSuccessor(); |
203 | } |
204 | |
205 | bool Changed = false; |
206 | |
207 | // Figure out which case it goes to. |
208 | for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) { |
209 | // Found case matching a constant operand? |
210 | if (It->getCaseValue() == CI) { |
211 | TheOnlyDest = It->getCaseSuccessor(); |
212 | break; |
213 | } |
214 | |
215 | // Check to see if this branch is going to the same place as the default |
216 | // dest. If so, eliminate it as an explicit compare. |
217 | if (It->getCaseSuccessor() == DefaultDest) { |
218 | MDNode *MD = getValidBranchWeightMDNode(I: *SI); |
219 | unsigned NCases = SI->getNumCases(); |
220 | // Fold the case metadata into the default if there will be any branches |
221 | // left, unless the metadata doesn't match the switch. |
222 | if (NCases > 1 && MD) { |
223 | // Collect branch weights into a vector. |
224 | SmallVector<uint32_t, 8> Weights; |
225 | extractBranchWeights(ProfileData: MD, Weights); |
226 | |
227 | // Merge weight of this case to the default weight. |
228 | unsigned Idx = It->getCaseIndex(); |
229 | // TODO: Add overflow check. |
230 | Weights[0] += Weights[Idx + 1]; |
231 | // Remove weight for this case. |
232 | std::swap(a&: Weights[Idx + 1], b&: Weights.back()); |
233 | Weights.pop_back(); |
234 | setBranchWeights(I&: *SI, Weights, IsExpected: hasBranchWeightOrigin(ProfileData: MD)); |
235 | } |
236 | // Remove this entry. |
237 | BasicBlock *ParentBB = SI->getParent(); |
238 | DefaultDest->removePredecessor(Pred: ParentBB); |
239 | It = SI->removeCase(I: It); |
240 | End = SI->case_end(); |
241 | |
242 | // Removing this case may have made the condition constant. In that |
243 | // case, update CI and restart iteration through the cases. |
244 | if (auto *NewCI = dyn_cast<ConstantInt>(Val: SI->getCondition())) { |
245 | CI = NewCI; |
246 | It = SI->case_begin(); |
247 | } |
248 | |
249 | Changed = true; |
250 | continue; |
251 | } |
252 | |
253 | // Otherwise, check to see if the switch only branches to one destination. |
254 | // We do this by reseting "TheOnlyDest" to null when we find two non-equal |
255 | // destinations. |
256 | if (It->getCaseSuccessor() != TheOnlyDest) |
257 | TheOnlyDest = nullptr; |
258 | |
259 | // Increment this iterator as we haven't removed the case. |
260 | ++It; |
261 | } |
262 | |
263 | if (CI && !TheOnlyDest) { |
264 | // Branching on a constant, but not any of the cases, go to the default |
265 | // successor. |
266 | TheOnlyDest = SI->getDefaultDest(); |
267 | } |
268 | |
269 | // If we found a single destination that we can fold the switch into, do so |
270 | // now. |
271 | if (TheOnlyDest) { |
272 | // Insert the new branch. |
273 | Builder.CreateBr(Dest: TheOnlyDest); |
274 | BasicBlock *BB = SI->getParent(); |
275 | |
276 | SmallSet<BasicBlock *, 8> RemovedSuccessors; |
277 | |
278 | // Remove entries from PHI nodes which we no longer branch to... |
279 | BasicBlock *SuccToKeep = TheOnlyDest; |
280 | for (BasicBlock *Succ : successors(I: SI)) { |
281 | if (DTU && Succ != TheOnlyDest) |
282 | RemovedSuccessors.insert(Ptr: Succ); |
283 | // Found case matching a constant operand? |
284 | if (Succ == SuccToKeep) { |
285 | SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest |
286 | } else { |
287 | Succ->removePredecessor(Pred: BB); |
288 | } |
289 | } |
290 | |
291 | // Delete the old switch. |
292 | Value *Cond = SI->getCondition(); |
293 | SI->eraseFromParent(); |
294 | if (DeleteDeadConditions) |
295 | RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI); |
296 | if (DTU) { |
297 | std::vector<DominatorTree::UpdateType> Updates; |
298 | Updates.reserve(n: RemovedSuccessors.size()); |
299 | for (auto *RemovedSuccessor : RemovedSuccessors) |
300 | Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor}); |
301 | DTU->applyUpdates(Updates); |
302 | } |
303 | return true; |
304 | } |
305 | |
306 | if (SI->getNumCases() == 1) { |
307 | // Otherwise, we can fold this switch into a conditional branch |
308 | // instruction if it has only one non-default destination. |
309 | auto FirstCase = *SI->case_begin(); |
310 | Value *Cond = Builder.CreateICmpEQ(LHS: SI->getCondition(), |
311 | RHS: FirstCase.getCaseValue(), Name: "cond" ); |
312 | |
313 | // Insert the new branch. |
314 | BranchInst *NewBr = Builder.CreateCondBr(Cond, |
315 | True: FirstCase.getCaseSuccessor(), |
316 | False: SI->getDefaultDest()); |
317 | SmallVector<uint32_t> Weights; |
318 | if (extractBranchWeights(I: *SI, Weights) && Weights.size() == 2) { |
319 | uint32_t DefWeight = Weights[0]; |
320 | uint32_t CaseWeight = Weights[1]; |
321 | // The TrueWeight should be the weight for the single case of SI. |
322 | NewBr->setMetadata(KindID: LLVMContext::MD_prof, |
323 | Node: MDBuilder(BB->getContext()) |
324 | .createBranchWeights(TrueWeight: CaseWeight, FalseWeight: DefWeight)); |
325 | } |
326 | |
327 | // Update make.implicit metadata to the newly-created conditional branch. |
328 | MDNode *MakeImplicitMD = SI->getMetadata(KindID: LLVMContext::MD_make_implicit); |
329 | if (MakeImplicitMD) |
330 | NewBr->setMetadata(KindID: LLVMContext::MD_make_implicit, Node: MakeImplicitMD); |
331 | |
332 | // Delete the old switch. |
333 | SI->eraseFromParent(); |
334 | return true; |
335 | } |
336 | return Changed; |
337 | } |
338 | |
339 | if (auto *IBI = dyn_cast<IndirectBrInst>(Val: T)) { |
340 | // indirectbr blockaddress(@F, @BB) -> br label @BB |
341 | if (auto *BA = |
342 | dyn_cast<BlockAddress>(Val: IBI->getAddress()->stripPointerCasts())) { |
343 | BasicBlock *TheOnlyDest = BA->getBasicBlock(); |
344 | SmallSet<BasicBlock *, 8> RemovedSuccessors; |
345 | |
346 | // Insert the new branch. |
347 | Builder.CreateBr(Dest: TheOnlyDest); |
348 | |
349 | BasicBlock *SuccToKeep = TheOnlyDest; |
350 | for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { |
351 | BasicBlock *DestBB = IBI->getDestination(i); |
352 | if (DTU && DestBB != TheOnlyDest) |
353 | RemovedSuccessors.insert(Ptr: DestBB); |
354 | if (IBI->getDestination(i) == SuccToKeep) { |
355 | SuccToKeep = nullptr; |
356 | } else { |
357 | DestBB->removePredecessor(Pred: BB); |
358 | } |
359 | } |
360 | Value *Address = IBI->getAddress(); |
361 | IBI->eraseFromParent(); |
362 | if (DeleteDeadConditions) |
363 | // Delete pointer cast instructions. |
364 | RecursivelyDeleteTriviallyDeadInstructions(V: Address, TLI); |
365 | |
366 | // Also zap the blockaddress constant if there are no users remaining, |
367 | // otherwise the destination is still marked as having its address taken. |
368 | if (BA->use_empty()) |
369 | BA->destroyConstant(); |
370 | |
371 | // If we didn't find our destination in the IBI successor list, then we |
372 | // have undefined behavior. Replace the unconditional branch with an |
373 | // 'unreachable' instruction. |
374 | if (SuccToKeep) { |
375 | BB->getTerminator()->eraseFromParent(); |
376 | new UnreachableInst(BB->getContext(), BB); |
377 | } |
378 | |
379 | if (DTU) { |
380 | std::vector<DominatorTree::UpdateType> Updates; |
381 | Updates.reserve(n: RemovedSuccessors.size()); |
382 | for (auto *RemovedSuccessor : RemovedSuccessors) |
383 | Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor}); |
384 | DTU->applyUpdates(Updates); |
385 | } |
386 | return true; |
387 | } |
388 | } |
389 | |
390 | return false; |
391 | } |
392 | |
393 | //===----------------------------------------------------------------------===// |
394 | // Local dead code elimination. |
395 | // |
396 | |
397 | /// isInstructionTriviallyDead - Return true if the result produced by the |
398 | /// instruction is not used, and the instruction has no side effects. |
399 | /// |
400 | bool llvm::isInstructionTriviallyDead(Instruction *I, |
401 | const TargetLibraryInfo *TLI) { |
402 | if (!I->use_empty()) |
403 | return false; |
404 | return wouldInstructionBeTriviallyDead(I, TLI); |
405 | } |
406 | |
407 | bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths( |
408 | Instruction *I, const TargetLibraryInfo *TLI) { |
409 | // Instructions that are "markers" and have implied meaning on code around |
410 | // them (without explicit uses), are not dead on unused paths. |
411 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) |
412 | if (II->getIntrinsicID() == Intrinsic::stacksave || |
413 | II->getIntrinsicID() == Intrinsic::launder_invariant_group || |
414 | II->isLifetimeStartOrEnd()) |
415 | return false; |
416 | return wouldInstructionBeTriviallyDead(I, TLI); |
417 | } |
418 | |
419 | bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I, |
420 | const TargetLibraryInfo *TLI) { |
421 | if (I->isTerminator()) |
422 | return false; |
423 | |
424 | // We don't want the landingpad-like instructions removed by anything this |
425 | // general. |
426 | if (I->isEHPad()) |
427 | return false; |
428 | |
429 | // We don't want debug info removed by anything this general. |
430 | if (isa<DbgVariableIntrinsic>(Val: I)) |
431 | return false; |
432 | |
433 | if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(Val: I)) { |
434 | if (DLI->getLabel()) |
435 | return false; |
436 | return true; |
437 | } |
438 | |
439 | if (auto *CB = dyn_cast<CallBase>(Val: I)) |
440 | if (isRemovableAlloc(V: CB, TLI)) |
441 | return true; |
442 | |
443 | if (!I->willReturn()) { |
444 | auto *II = dyn_cast<IntrinsicInst>(Val: I); |
445 | if (!II) |
446 | return false; |
447 | |
448 | switch (II->getIntrinsicID()) { |
449 | case Intrinsic::experimental_guard: { |
450 | // Guards on true are operationally no-ops. In the future we can |
451 | // consider more sophisticated tradeoffs for guards considering potential |
452 | // for check widening, but for now we keep things simple. |
453 | auto *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0)); |
454 | return Cond && Cond->isOne(); |
455 | } |
456 | // TODO: These intrinsics are not safe to remove, because this may remove |
457 | // a well-defined trap. |
458 | case Intrinsic::wasm_trunc_signed: |
459 | case Intrinsic::wasm_trunc_unsigned: |
460 | case Intrinsic::ptrauth_auth: |
461 | case Intrinsic::ptrauth_resign: |
462 | return true; |
463 | default: |
464 | return false; |
465 | } |
466 | } |
467 | |
468 | if (!I->mayHaveSideEffects()) |
469 | return true; |
470 | |
471 | // Special case intrinsics that "may have side effects" but can be deleted |
472 | // when dead. |
473 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) { |
474 | // Safe to delete llvm.stacksave and launder.invariant.group if dead. |
475 | if (II->getIntrinsicID() == Intrinsic::stacksave || |
476 | II->getIntrinsicID() == Intrinsic::launder_invariant_group) |
477 | return true; |
478 | |
479 | // Intrinsics declare sideeffects to prevent them from moving, but they are |
480 | // nops without users. |
481 | if (II->getIntrinsicID() == Intrinsic::allow_runtime_check || |
482 | II->getIntrinsicID() == Intrinsic::allow_ubsan_check) |
483 | return true; |
484 | |
485 | if (II->isLifetimeStartOrEnd()) { |
486 | auto *Arg = II->getArgOperand(i: 1); |
487 | // Lifetime intrinsics are dead when their right-hand is undef. |
488 | if (isa<UndefValue>(Val: Arg)) |
489 | return true; |
490 | // If the right-hand is an alloc, global, or argument and the only uses |
491 | // are lifetime intrinsics then the intrinsics are dead. |
492 | if (isa<AllocaInst>(Val: Arg) || isa<GlobalValue>(Val: Arg) || isa<Argument>(Val: Arg)) |
493 | return llvm::all_of(Range: Arg->uses(), P: [](Use &Use) { |
494 | if (IntrinsicInst *IntrinsicUse = |
495 | dyn_cast<IntrinsicInst>(Val: Use.getUser())) |
496 | return IntrinsicUse->isLifetimeStartOrEnd(); |
497 | return false; |
498 | }); |
499 | return false; |
500 | } |
501 | |
502 | // Assumptions are dead if their condition is trivially true. |
503 | if (II->getIntrinsicID() == Intrinsic::assume && |
504 | isAssumeWithEmptyBundle(Assume: cast<AssumeInst>(Val: *II))) { |
505 | if (ConstantInt *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0))) |
506 | return !Cond->isZero(); |
507 | |
508 | return false; |
509 | } |
510 | |
511 | if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(Val: I)) { |
512 | std::optional<fp::ExceptionBehavior> ExBehavior = |
513 | FPI->getExceptionBehavior(); |
514 | return *ExBehavior != fp::ebStrict; |
515 | } |
516 | } |
517 | |
518 | if (auto *Call = dyn_cast<CallBase>(Val: I)) { |
519 | if (Value *FreedOp = getFreedOperand(CB: Call, TLI)) |
520 | if (Constant *C = dyn_cast<Constant>(Val: FreedOp)) |
521 | return C->isNullValue() || isa<UndefValue>(Val: C); |
522 | if (isMathLibCallNoop(Call, TLI)) |
523 | return true; |
524 | } |
525 | |
526 | // Non-volatile atomic loads from constants can be removed. |
527 | if (auto *LI = dyn_cast<LoadInst>(Val: I)) |
528 | if (auto *GV = dyn_cast<GlobalVariable>( |
529 | Val: LI->getPointerOperand()->stripPointerCasts())) |
530 | if (!LI->isVolatile() && GV->isConstant()) |
531 | return true; |
532 | |
533 | return false; |
534 | } |
535 | |
536 | /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a |
537 | /// trivially dead instruction, delete it. If that makes any of its operands |
538 | /// trivially dead, delete them too, recursively. Return true if any |
539 | /// instructions were deleted. |
540 | bool llvm::RecursivelyDeleteTriviallyDeadInstructions( |
541 | Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU, |
542 | std::function<void(Value *)> AboutToDeleteCallback) { |
543 | Instruction *I = dyn_cast<Instruction>(Val: V); |
544 | if (!I || !isInstructionTriviallyDead(I, TLI)) |
545 | return false; |
546 | |
547 | SmallVector<WeakTrackingVH, 16> DeadInsts; |
548 | DeadInsts.push_back(Elt: I); |
549 | RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, |
550 | AboutToDeleteCallback); |
551 | |
552 | return true; |
553 | } |
554 | |
555 | bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive( |
556 | SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, |
557 | MemorySSAUpdater *MSSAU, |
558 | std::function<void(Value *)> AboutToDeleteCallback) { |
559 | unsigned S = 0, E = DeadInsts.size(), Alive = 0; |
560 | for (; S != E; ++S) { |
561 | auto *I = dyn_cast_or_null<Instruction>(Val&: DeadInsts[S]); |
562 | if (!I || !isInstructionTriviallyDead(I)) { |
563 | DeadInsts[S] = nullptr; |
564 | ++Alive; |
565 | } |
566 | } |
567 | if (Alive == E) |
568 | return false; |
569 | RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, |
570 | AboutToDeleteCallback); |
571 | return true; |
572 | } |
573 | |
574 | void llvm::RecursivelyDeleteTriviallyDeadInstructions( |
575 | SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, |
576 | MemorySSAUpdater *MSSAU, |
577 | std::function<void(Value *)> AboutToDeleteCallback) { |
578 | // Process the dead instruction list until empty. |
579 | while (!DeadInsts.empty()) { |
580 | Value *V = DeadInsts.pop_back_val(); |
581 | Instruction *I = cast_or_null<Instruction>(Val: V); |
582 | if (!I) |
583 | continue; |
584 | assert(isInstructionTriviallyDead(I, TLI) && |
585 | "Live instruction found in dead worklist!" ); |
586 | assert(I->use_empty() && "Instructions with uses are not dead." ); |
587 | |
588 | // Don't lose the debug info while deleting the instructions. |
589 | salvageDebugInfo(I&: *I); |
590 | |
591 | if (AboutToDeleteCallback) |
592 | AboutToDeleteCallback(I); |
593 | |
594 | // Null out all of the instruction's operands to see if any operand becomes |
595 | // dead as we go. |
596 | for (Use &OpU : I->operands()) { |
597 | Value *OpV = OpU.get(); |
598 | OpU.set(nullptr); |
599 | |
600 | if (!OpV->use_empty()) |
601 | continue; |
602 | |
603 | // If the operand is an instruction that became dead as we nulled out the |
604 | // operand, and if it is 'trivially' dead, delete it in a future loop |
605 | // iteration. |
606 | if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV)) |
607 | if (isInstructionTriviallyDead(I: OpI, TLI)) |
608 | DeadInsts.push_back(Elt: OpI); |
609 | } |
610 | if (MSSAU) |
611 | MSSAU->removeMemoryAccess(I); |
612 | |
613 | I->eraseFromParent(); |
614 | } |
615 | } |
616 | |
617 | bool llvm::replaceDbgUsesWithUndef(Instruction *I) { |
618 | SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; |
619 | SmallVector<DbgVariableRecord *, 1> DPUsers; |
620 | findDbgUsers(DbgInsts&: DbgUsers, V: I, DbgVariableRecords: &DPUsers); |
621 | for (auto *DII : DbgUsers) |
622 | DII->setKillLocation(); |
623 | for (auto *DVR : DPUsers) |
624 | DVR->setKillLocation(); |
625 | return !DbgUsers.empty() || !DPUsers.empty(); |
626 | } |
627 | |
628 | /// areAllUsesEqual - Check whether the uses of a value are all the same. |
629 | /// This is similar to Instruction::hasOneUse() except this will also return |
630 | /// true when there are no uses or multiple uses that all refer to the same |
631 | /// value. |
632 | static bool areAllUsesEqual(Instruction *I) { |
633 | Value::user_iterator UI = I->user_begin(); |
634 | Value::user_iterator UE = I->user_end(); |
635 | if (UI == UE) |
636 | return true; |
637 | |
638 | User *TheUse = *UI; |
639 | for (++UI; UI != UE; ++UI) { |
640 | if (*UI != TheUse) |
641 | return false; |
642 | } |
643 | return true; |
644 | } |
645 | |
646 | /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively |
647 | /// dead PHI node, due to being a def-use chain of single-use nodes that |
648 | /// either forms a cycle or is terminated by a trivially dead instruction, |
649 | /// delete it. If that makes any of its operands trivially dead, delete them |
650 | /// too, recursively. Return true if a change was made. |
651 | bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, |
652 | const TargetLibraryInfo *TLI, |
653 | llvm::MemorySSAUpdater *MSSAU) { |
654 | SmallPtrSet<Instruction*, 4> Visited; |
655 | for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); |
656 | I = cast<Instruction>(Val: *I->user_begin())) { |
657 | if (I->use_empty()) |
658 | return RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU); |
659 | |
660 | // If we find an instruction more than once, we're on a cycle that |
661 | // won't prove fruitful. |
662 | if (!Visited.insert(Ptr: I).second) { |
663 | // Break the cycle and delete the instruction and its operands. |
664 | I->replaceAllUsesWith(V: PoisonValue::get(T: I->getType())); |
665 | (void)RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU); |
666 | return true; |
667 | } |
668 | } |
669 | return false; |
670 | } |
671 | |
672 | static bool |
673 | simplifyAndDCEInstruction(Instruction *I, |
674 | SmallSetVector<Instruction *, 16> &WorkList, |
675 | const DataLayout &DL, |
676 | const TargetLibraryInfo *TLI) { |
677 | if (isInstructionTriviallyDead(I, TLI)) { |
678 | salvageDebugInfo(I&: *I); |
679 | |
680 | // Null out all of the instruction's operands to see if any operand becomes |
681 | // dead as we go. |
682 | for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { |
683 | Value *OpV = I->getOperand(i); |
684 | I->setOperand(i, Val: nullptr); |
685 | |
686 | if (!OpV->use_empty() || I == OpV) |
687 | continue; |
688 | |
689 | // If the operand is an instruction that became dead as we nulled out the |
690 | // operand, and if it is 'trivially' dead, delete it in a future loop |
691 | // iteration. |
692 | if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV)) |
693 | if (isInstructionTriviallyDead(I: OpI, TLI)) |
694 | WorkList.insert(X: OpI); |
695 | } |
696 | |
697 | I->eraseFromParent(); |
698 | |
699 | return true; |
700 | } |
701 | |
702 | if (Value *SimpleV = simplifyInstruction(I, Q: DL)) { |
703 | // Add the users to the worklist. CAREFUL: an instruction can use itself, |
704 | // in the case of a phi node. |
705 | for (User *U : I->users()) { |
706 | if (U != I) { |
707 | WorkList.insert(X: cast<Instruction>(Val: U)); |
708 | } |
709 | } |
710 | |
711 | // Replace the instruction with its simplified value. |
712 | bool Changed = false; |
713 | if (!I->use_empty()) { |
714 | I->replaceAllUsesWith(V: SimpleV); |
715 | Changed = true; |
716 | } |
717 | if (isInstructionTriviallyDead(I, TLI)) { |
718 | I->eraseFromParent(); |
719 | Changed = true; |
720 | } |
721 | return Changed; |
722 | } |
723 | return false; |
724 | } |
725 | |
726 | /// SimplifyInstructionsInBlock - Scan the specified basic block and try to |
727 | /// simplify any instructions in it and recursively delete dead instructions. |
728 | /// |
729 | /// This returns true if it changed the code, note that it can delete |
730 | /// instructions in other blocks as well in this block. |
731 | bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, |
732 | const TargetLibraryInfo *TLI) { |
733 | bool MadeChange = false; |
734 | const DataLayout &DL = BB->getDataLayout(); |
735 | |
736 | #ifndef NDEBUG |
737 | // In debug builds, ensure that the terminator of the block is never replaced |
738 | // or deleted by these simplifications. The idea of simplification is that it |
739 | // cannot introduce new instructions, and there is no way to replace the |
740 | // terminator of a block without introducing a new instruction. |
741 | AssertingVH<Instruction> TerminatorVH(&BB->back()); |
742 | #endif |
743 | |
744 | SmallSetVector<Instruction *, 16> WorkList; |
745 | // Iterate over the original function, only adding insts to the worklist |
746 | // if they actually need to be revisited. This avoids having to pre-init |
747 | // the worklist with the entire function's worth of instructions. |
748 | for (BasicBlock::iterator BI = BB->begin(), E = std::prev(x: BB->end()); |
749 | BI != E;) { |
750 | assert(!BI->isTerminator()); |
751 | Instruction *I = &*BI; |
752 | ++BI; |
753 | |
754 | // We're visiting this instruction now, so make sure it's not in the |
755 | // worklist from an earlier visit. |
756 | if (!WorkList.count(key: I)) |
757 | MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); |
758 | } |
759 | |
760 | while (!WorkList.empty()) { |
761 | Instruction *I = WorkList.pop_back_val(); |
762 | MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); |
763 | } |
764 | return MadeChange; |
765 | } |
766 | |
767 | //===----------------------------------------------------------------------===// |
768 | // Control Flow Graph Restructuring. |
769 | // |
770 | |
771 | void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, |
772 | DomTreeUpdater *DTU) { |
773 | |
774 | // If BB has single-entry PHI nodes, fold them. |
775 | while (PHINode *PN = dyn_cast<PHINode>(Val: DestBB->begin())) { |
776 | Value *NewVal = PN->getIncomingValue(i: 0); |
777 | // Replace self referencing PHI with poison, it must be dead. |
778 | if (NewVal == PN) NewVal = PoisonValue::get(T: PN->getType()); |
779 | PN->replaceAllUsesWith(V: NewVal); |
780 | PN->eraseFromParent(); |
781 | } |
782 | |
783 | BasicBlock *PredBB = DestBB->getSinglePredecessor(); |
784 | assert(PredBB && "Block doesn't have a single predecessor!" ); |
785 | |
786 | bool ReplaceEntryBB = PredBB->isEntryBlock(); |
787 | |
788 | // DTU updates: Collect all the edges that enter |
789 | // PredBB. These dominator edges will be redirected to DestBB. |
790 | SmallVector<DominatorTree::UpdateType, 32> Updates; |
791 | |
792 | if (DTU) { |
793 | // To avoid processing the same predecessor more than once. |
794 | SmallPtrSet<BasicBlock *, 2> SeenPreds; |
795 | Updates.reserve(N: Updates.size() + 2 * pred_size(BB: PredBB) + 1); |
796 | for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB)) |
797 | // This predecessor of PredBB may already have DestBB as a successor. |
798 | if (PredOfPredBB != PredBB) |
799 | if (SeenPreds.insert(Ptr: PredOfPredBB).second) |
800 | Updates.push_back(Elt: {DominatorTree::Insert, PredOfPredBB, DestBB}); |
801 | SeenPreds.clear(); |
802 | for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB)) |
803 | if (SeenPreds.insert(Ptr: PredOfPredBB).second) |
804 | Updates.push_back(Elt: {DominatorTree::Delete, PredOfPredBB, PredBB}); |
805 | Updates.push_back(Elt: {DominatorTree::Delete, PredBB, DestBB}); |
806 | } |
807 | |
808 | // Zap anything that took the address of DestBB. Not doing this will give the |
809 | // address an invalid value. |
810 | if (DestBB->hasAddressTaken()) { |
811 | BlockAddress *BA = BlockAddress::get(BB: DestBB); |
812 | Constant *Replacement = |
813 | ConstantInt::get(Ty: Type::getInt32Ty(C&: BA->getContext()), V: 1); |
814 | BA->replaceAllUsesWith(V: ConstantExpr::getIntToPtr(C: Replacement, |
815 | Ty: BA->getType())); |
816 | BA->destroyConstant(); |
817 | } |
818 | |
819 | // Anything that branched to PredBB now branches to DestBB. |
820 | PredBB->replaceAllUsesWith(V: DestBB); |
821 | |
822 | // Splice all the instructions from PredBB to DestBB. |
823 | PredBB->getTerminator()->eraseFromParent(); |
824 | DestBB->splice(ToIt: DestBB->begin(), FromBB: PredBB); |
825 | new UnreachableInst(PredBB->getContext(), PredBB); |
826 | |
827 | // If the PredBB is the entry block of the function, move DestBB up to |
828 | // become the entry block after we erase PredBB. |
829 | if (ReplaceEntryBB) |
830 | DestBB->moveAfter(MovePos: PredBB); |
831 | |
832 | if (DTU) { |
833 | assert(PredBB->size() == 1 && |
834 | isa<UnreachableInst>(PredBB->getTerminator()) && |
835 | "The successor list of PredBB isn't empty before " |
836 | "applying corresponding DTU updates." ); |
837 | DTU->applyUpdatesPermissive(Updates); |
838 | DTU->deleteBB(DelBB: PredBB); |
839 | // Recalculation of DomTree is needed when updating a forward DomTree and |
840 | // the Entry BB is replaced. |
841 | if (ReplaceEntryBB && DTU->hasDomTree()) { |
842 | // The entry block was removed and there is no external interface for |
843 | // the dominator tree to be notified of this change. In this corner-case |
844 | // we recalculate the entire tree. |
845 | DTU->recalculate(F&: *(DestBB->getParent())); |
846 | } |
847 | } |
848 | |
849 | else { |
850 | PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr. |
851 | } |
852 | } |
853 | |
854 | /// Return true if we can choose one of these values to use in place of the |
855 | /// other. Note that we will always choose the non-undef value to keep. |
856 | static bool CanMergeValues(Value *First, Value *Second) { |
857 | return First == Second || isa<UndefValue>(Val: First) || isa<UndefValue>(Val: Second); |
858 | } |
859 | |
860 | /// Return true if we can fold BB, an almost-empty BB ending in an unconditional |
861 | /// branch to Succ, into Succ. |
862 | /// |
863 | /// Assumption: Succ is the single successor for BB. |
864 | static bool |
865 | CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, |
866 | const SmallPtrSetImpl<BasicBlock *> &BBPreds) { |
867 | assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!" ); |
868 | |
869 | LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " |
870 | << Succ->getName() << "\n" ); |
871 | // Shortcut, if there is only a single predecessor it must be BB and merging |
872 | // is always safe |
873 | if (Succ->getSinglePredecessor()) |
874 | return true; |
875 | |
876 | // Look at all the phi nodes in Succ, to see if they present a conflict when |
877 | // merging these blocks |
878 | for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) { |
879 | PHINode *PN = cast<PHINode>(Val&: I); |
880 | |
881 | // If the incoming value from BB is again a PHINode in |
882 | // BB which has the same incoming value for *PI as PN does, we can |
883 | // merge the phi nodes and then the blocks can still be merged |
884 | PHINode *BBPN = dyn_cast<PHINode>(Val: PN->getIncomingValueForBlock(BB)); |
885 | if (BBPN && BBPN->getParent() == BB) { |
886 | for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { |
887 | BasicBlock *IBB = PN->getIncomingBlock(i: PI); |
888 | if (BBPreds.count(Ptr: IBB) && |
889 | !CanMergeValues(First: BBPN->getIncomingValueForBlock(BB: IBB), |
890 | Second: PN->getIncomingValue(i: PI))) { |
891 | LLVM_DEBUG(dbgs() |
892 | << "Can't fold, phi node " << PN->getName() << " in " |
893 | << Succ->getName() << " is conflicting with " |
894 | << BBPN->getName() << " with regard to common predecessor " |
895 | << IBB->getName() << "\n" ); |
896 | return false; |
897 | } |
898 | } |
899 | } else { |
900 | Value* Val = PN->getIncomingValueForBlock(BB); |
901 | for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { |
902 | // See if the incoming value for the common predecessor is equal to the |
903 | // one for BB, in which case this phi node will not prevent the merging |
904 | // of the block. |
905 | BasicBlock *IBB = PN->getIncomingBlock(i: PI); |
906 | if (BBPreds.count(Ptr: IBB) && |
907 | !CanMergeValues(First: Val, Second: PN->getIncomingValue(i: PI))) { |
908 | LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() |
909 | << " in " << Succ->getName() |
910 | << " is conflicting with regard to common " |
911 | << "predecessor " << IBB->getName() << "\n" ); |
912 | return false; |
913 | } |
914 | } |
915 | } |
916 | } |
917 | |
918 | return true; |
919 | } |
920 | |
921 | using PredBlockVector = SmallVector<BasicBlock *, 16>; |
922 | using IncomingValueMap = DenseMap<BasicBlock *, Value *>; |
923 | |
924 | /// Determines the value to use as the phi node input for a block. |
925 | /// |
926 | /// Select between \p OldVal any value that we know flows from \p BB |
927 | /// to a particular phi on the basis of which one (if either) is not |
928 | /// undef. Update IncomingValues based on the selected value. |
929 | /// |
930 | /// \param OldVal The value we are considering selecting. |
931 | /// \param BB The block that the value flows in from. |
932 | /// \param IncomingValues A map from block-to-value for other phi inputs |
933 | /// that we have examined. |
934 | /// |
935 | /// \returns the selected value. |
936 | static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, |
937 | IncomingValueMap &IncomingValues) { |
938 | if (!isa<UndefValue>(Val: OldVal)) { |
939 | assert((!IncomingValues.count(BB) || |
940 | IncomingValues.find(BB)->second == OldVal) && |
941 | "Expected OldVal to match incoming value from BB!" ); |
942 | |
943 | IncomingValues.insert(KV: std::make_pair(x&: BB, y&: OldVal)); |
944 | return OldVal; |
945 | } |
946 | |
947 | IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB); |
948 | if (It != IncomingValues.end()) return It->second; |
949 | |
950 | return OldVal; |
951 | } |
952 | |
953 | /// Create a map from block to value for the operands of a |
954 | /// given phi. |
955 | /// |
956 | /// Create a map from block to value for each non-undef value flowing |
957 | /// into \p PN. |
958 | /// |
959 | /// \param PN The phi we are collecting the map for. |
960 | /// \param IncomingValues [out] The map from block to value for this phi. |
961 | static void gatherIncomingValuesToPhi(PHINode *PN, |
962 | IncomingValueMap &IncomingValues) { |
963 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
964 | BasicBlock *BB = PN->getIncomingBlock(i); |
965 | Value *V = PN->getIncomingValue(i); |
966 | |
967 | if (!isa<UndefValue>(Val: V)) |
968 | IncomingValues.insert(KV: std::make_pair(x&: BB, y&: V)); |
969 | } |
970 | } |
971 | |
972 | /// Replace the incoming undef values to a phi with the values |
973 | /// from a block-to-value map. |
974 | /// |
975 | /// \param PN The phi we are replacing the undefs in. |
976 | /// \param IncomingValues A map from block to value. |
977 | static void replaceUndefValuesInPhi(PHINode *PN, |
978 | const IncomingValueMap &IncomingValues) { |
979 | SmallVector<unsigned> TrueUndefOps; |
980 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
981 | Value *V = PN->getIncomingValue(i); |
982 | |
983 | if (!isa<UndefValue>(Val: V)) continue; |
984 | |
985 | BasicBlock *BB = PN->getIncomingBlock(i); |
986 | IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB); |
987 | |
988 | // Keep track of undef/poison incoming values. Those must match, so we fix |
989 | // them up below if needed. |
990 | // Note: this is conservatively correct, but we could try harder and group |
991 | // the undef values per incoming basic block. |
992 | if (It == IncomingValues.end()) { |
993 | TrueUndefOps.push_back(Elt: i); |
994 | continue; |
995 | } |
996 | |
997 | // There is a defined value for this incoming block, so map this undef |
998 | // incoming value to the defined value. |
999 | PN->setIncomingValue(i, V: It->second); |
1000 | } |
1001 | |
1002 | // If there are both undef and poison values incoming, then convert those |
1003 | // values to undef. It is invalid to have different values for the same |
1004 | // incoming block. |
1005 | unsigned PoisonCount = count_if(Range&: TrueUndefOps, P: [&](unsigned i) { |
1006 | return isa<PoisonValue>(Val: PN->getIncomingValue(i)); |
1007 | }); |
1008 | if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) { |
1009 | for (unsigned i : TrueUndefOps) |
1010 | PN->setIncomingValue(i, V: UndefValue::get(T: PN->getType())); |
1011 | } |
1012 | } |
1013 | |
1014 | // Only when they shares a single common predecessor, return true. |
1015 | // Only handles cases when BB can't be merged while its predecessors can be |
1016 | // redirected. |
1017 | static bool |
1018 | CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, |
1019 | const SmallPtrSetImpl<BasicBlock *> &BBPreds, |
1020 | const SmallPtrSetImpl<BasicBlock *> &SuccPreds, |
1021 | BasicBlock *&CommonPred) { |
1022 | |
1023 | // There must be phis in BB, otherwise BB will be merged into Succ directly |
1024 | if (BB->phis().empty() || Succ->phis().empty()) |
1025 | return false; |
1026 | |
1027 | // BB must have predecessors not shared that can be redirected to Succ |
1028 | if (!BB->hasNPredecessorsOrMore(N: 2)) |
1029 | return false; |
1030 | |
1031 | // Get single common predecessors of both BB and Succ |
1032 | for (BasicBlock *SuccPred : SuccPreds) { |
1033 | if (BBPreds.count(Ptr: SuccPred)) { |
1034 | if (CommonPred) |
1035 | return false; |
1036 | CommonPred = SuccPred; |
1037 | } |
1038 | } |
1039 | |
1040 | return true; |
1041 | } |
1042 | |
1043 | /// Replace a value flowing from a block to a phi with |
1044 | /// potentially multiple instances of that value flowing from the |
1045 | /// block's predecessors to the phi. |
1046 | /// |
1047 | /// \param BB The block with the value flowing into the phi. |
1048 | /// \param BBPreds The predecessors of BB. |
1049 | /// \param PN The phi that we are updating. |
1050 | /// \param CommonPred The common predecessor of BB and PN's BasicBlock |
1051 | static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, |
1052 | const PredBlockVector &BBPreds, |
1053 | PHINode *PN, |
1054 | BasicBlock *CommonPred) { |
1055 | Value *OldVal = PN->removeIncomingValue(BB, DeletePHIIfEmpty: false); |
1056 | assert(OldVal && "No entry in PHI for Pred BB!" ); |
1057 | |
1058 | IncomingValueMap IncomingValues; |
1059 | |
1060 | // We are merging two blocks - BB, and the block containing PN - and |
1061 | // as a result we need to redirect edges from the predecessors of BB |
1062 | // to go to the block containing PN, and update PN |
1063 | // accordingly. Since we allow merging blocks in the case where the |
1064 | // predecessor and successor blocks both share some predecessors, |
1065 | // and where some of those common predecessors might have undef |
1066 | // values flowing into PN, we want to rewrite those values to be |
1067 | // consistent with the non-undef values. |
1068 | |
1069 | gatherIncomingValuesToPhi(PN, IncomingValues); |
1070 | |
1071 | // If this incoming value is one of the PHI nodes in BB, the new entries |
1072 | // in the PHI node are the entries from the old PHI. |
1073 | if (isa<PHINode>(Val: OldVal) && cast<PHINode>(Val: OldVal)->getParent() == BB) { |
1074 | PHINode *OldValPN = cast<PHINode>(Val: OldVal); |
1075 | for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { |
1076 | // Note that, since we are merging phi nodes and BB and Succ might |
1077 | // have common predecessors, we could end up with a phi node with |
1078 | // identical incoming branches. This will be cleaned up later (and |
1079 | // will trigger asserts if we try to clean it up now, without also |
1080 | // simplifying the corresponding conditional branch). |
1081 | BasicBlock *PredBB = OldValPN->getIncomingBlock(i); |
1082 | |
1083 | if (PredBB == CommonPred) |
1084 | continue; |
1085 | |
1086 | Value *PredVal = OldValPN->getIncomingValue(i); |
1087 | Value *Selected = |
1088 | selectIncomingValueForBlock(OldVal: PredVal, BB: PredBB, IncomingValues); |
1089 | |
1090 | // And add a new incoming value for this predecessor for the |
1091 | // newly retargeted branch. |
1092 | PN->addIncoming(V: Selected, BB: PredBB); |
1093 | } |
1094 | if (CommonPred) |
1095 | PN->addIncoming(V: OldValPN->getIncomingValueForBlock(BB: CommonPred), BB); |
1096 | |
1097 | } else { |
1098 | for (BasicBlock *PredBB : BBPreds) { |
1099 | // Update existing incoming values in PN for this |
1100 | // predecessor of BB. |
1101 | if (PredBB == CommonPred) |
1102 | continue; |
1103 | |
1104 | Value *Selected = |
1105 | selectIncomingValueForBlock(OldVal, BB: PredBB, IncomingValues); |
1106 | |
1107 | // And add a new incoming value for this predecessor for the |
1108 | // newly retargeted branch. |
1109 | PN->addIncoming(V: Selected, BB: PredBB); |
1110 | } |
1111 | if (CommonPred) |
1112 | PN->addIncoming(V: OldVal, BB); |
1113 | } |
1114 | |
1115 | replaceUndefValuesInPhi(PN, IncomingValues); |
1116 | } |
1117 | |
1118 | bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, |
1119 | DomTreeUpdater *DTU) { |
1120 | assert(BB != &BB->getParent()->getEntryBlock() && |
1121 | "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!" ); |
1122 | |
1123 | // We can't simplify infinite loops. |
1124 | BasicBlock *Succ = cast<BranchInst>(Val: BB->getTerminator())->getSuccessor(i: 0); |
1125 | if (BB == Succ) |
1126 | return false; |
1127 | |
1128 | SmallPtrSet<BasicBlock *, 16> BBPreds(pred_begin(BB), pred_end(BB)); |
1129 | SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(BB: Succ), pred_end(BB: Succ)); |
1130 | |
1131 | // The single common predecessor of BB and Succ when BB cannot be killed |
1132 | BasicBlock *CommonPred = nullptr; |
1133 | |
1134 | bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds); |
1135 | |
1136 | // Even if we can not fold bB into Succ, we may be able to redirect the |
1137 | // predecessors of BB to Succ. |
1138 | bool BBPhisMergeable = |
1139 | BBKillable || |
1140 | CanRedirectPredsOfEmptyBBToSucc(BB, Succ, BBPreds, SuccPreds, CommonPred); |
1141 | |
1142 | if (!BBKillable && !BBPhisMergeable) |
1143 | return false; |
1144 | |
1145 | // Check to see if merging these blocks/phis would cause conflicts for any of |
1146 | // the phi nodes in BB or Succ. If not, we can safely merge. |
1147 | |
1148 | // Check for cases where Succ has multiple predecessors and a PHI node in BB |
1149 | // has uses which will not disappear when the PHI nodes are merged. It is |
1150 | // possible to handle such cases, but difficult: it requires checking whether |
1151 | // BB dominates Succ, which is non-trivial to calculate in the case where |
1152 | // Succ has multiple predecessors. Also, it requires checking whether |
1153 | // constructing the necessary self-referential PHI node doesn't introduce any |
1154 | // conflicts; this isn't too difficult, but the previous code for doing this |
1155 | // was incorrect. |
1156 | // |
1157 | // Note that if this check finds a live use, BB dominates Succ, so BB is |
1158 | // something like a loop pre-header (or rarely, a part of an irreducible CFG); |
1159 | // folding the branch isn't profitable in that case anyway. |
1160 | if (!Succ->getSinglePredecessor()) { |
1161 | BasicBlock::iterator BBI = BB->begin(); |
1162 | while (isa<PHINode>(Val: *BBI)) { |
1163 | for (Use &U : BBI->uses()) { |
1164 | if (PHINode* PN = dyn_cast<PHINode>(Val: U.getUser())) { |
1165 | if (PN->getIncomingBlock(U) != BB) |
1166 | return false; |
1167 | } else { |
1168 | return false; |
1169 | } |
1170 | } |
1171 | ++BBI; |
1172 | } |
1173 | } |
1174 | |
1175 | if (BBPhisMergeable && CommonPred) |
1176 | LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName() |
1177 | << " and " << Succ->getName() << " : " |
1178 | << CommonPred->getName() << "\n" ); |
1179 | |
1180 | // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop |
1181 | // metadata. |
1182 | // |
1183 | // FIXME: This is a stop-gap solution to preserve inner-loop metadata given |
1184 | // current status (that loop metadata is implemented as metadata attached to |
1185 | // the branch instruction in the loop latch block). To quote from review |
1186 | // comments, "the current representation of loop metadata (using a loop latch |
1187 | // terminator attachment) is known to be fundamentally broken. Loop latches |
1188 | // are not uniquely associated with loops (both in that a latch can be part of |
1189 | // multiple loops and a loop may have multiple latches). Loop headers are. The |
1190 | // solution to this problem is also known: Add support for basic block |
1191 | // metadata, and attach loop metadata to the loop header." |
1192 | // |
1193 | // Why bail out: |
1194 | // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is |
1195 | // the latch for inner-loop (see reason below), so bail out to prerserve |
1196 | // inner-loop metadata rather than eliminating 'BB' and attaching its metadata |
1197 | // to this inner-loop. |
1198 | // - The reason we believe 'BB' and 'BB->Pred' have different inner-most |
1199 | // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L, |
1200 | // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of |
1201 | // one self-looping basic block, which is contradictory with the assumption. |
1202 | // |
1203 | // To illustrate how inner-loop metadata is dropped: |
1204 | // |
1205 | // CFG Before |
1206 | // |
1207 | // BB is while.cond.exit, attached with loop metdata md2. |
1208 | // BB->Pred is for.body, attached with loop metadata md1. |
1209 | // |
1210 | // entry |
1211 | // | |
1212 | // v |
1213 | // ---> while.cond -------------> while.end |
1214 | // | | |
1215 | // | v |
1216 | // | while.body |
1217 | // | | |
1218 | // | v |
1219 | // | for.body <---- (md1) |
1220 | // | | |______| |
1221 | // | v |
1222 | // | while.cond.exit (md2) |
1223 | // | | |
1224 | // |_______| |
1225 | // |
1226 | // CFG After |
1227 | // |
1228 | // while.cond1 is the merge of while.cond.exit and while.cond above. |
1229 | // for.body is attached with md2, and md1 is dropped. |
1230 | // If LoopSimplify runs later (as a part of loop pass), it could create |
1231 | // dedicated exits for inner-loop (essentially adding `while.cond.exit` |
1232 | // back), but won't it won't see 'md1' nor restore it for the inner-loop. |
1233 | // |
1234 | // entry |
1235 | // | |
1236 | // v |
1237 | // ---> while.cond1 -------------> while.end |
1238 | // | | |
1239 | // | v |
1240 | // | while.body |
1241 | // | | |
1242 | // | v |
1243 | // | for.body <---- (md2) |
1244 | // |_______| |______| |
1245 | if (Instruction *TI = BB->getTerminator()) |
1246 | if (TI->hasMetadata(KindID: LLVMContext::MD_loop)) |
1247 | for (BasicBlock *Pred : predecessors(BB)) |
1248 | if (Instruction *PredTI = Pred->getTerminator()) |
1249 | if (PredTI->hasMetadata(KindID: LLVMContext::MD_loop)) |
1250 | return false; |
1251 | |
1252 | if (BBKillable) |
1253 | LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); |
1254 | else if (BBPhisMergeable) |
1255 | LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB); |
1256 | |
1257 | SmallVector<DominatorTree::UpdateType, 32> Updates; |
1258 | |
1259 | if (DTU) { |
1260 | // To avoid processing the same predecessor more than once. |
1261 | SmallPtrSet<BasicBlock *, 8> SeenPreds; |
1262 | // All predecessors of BB (except the common predecessor) will be moved to |
1263 | // Succ. |
1264 | Updates.reserve(N: Updates.size() + 2 * pred_size(BB) + 1); |
1265 | |
1266 | for (auto *PredOfBB : predecessors(BB)) { |
1267 | // Do not modify those common predecessors of BB and Succ |
1268 | if (!SuccPreds.contains(Ptr: PredOfBB)) |
1269 | if (SeenPreds.insert(Ptr: PredOfBB).second) |
1270 | Updates.push_back(Elt: {DominatorTree::Insert, PredOfBB, Succ}); |
1271 | } |
1272 | |
1273 | SeenPreds.clear(); |
1274 | |
1275 | for (auto *PredOfBB : predecessors(BB)) |
1276 | // When BB cannot be killed, do not remove the edge between BB and |
1277 | // CommonPred. |
1278 | if (SeenPreds.insert(Ptr: PredOfBB).second && PredOfBB != CommonPred) |
1279 | Updates.push_back(Elt: {DominatorTree::Delete, PredOfBB, BB}); |
1280 | |
1281 | if (BBKillable) |
1282 | Updates.push_back(Elt: {DominatorTree::Delete, BB, Succ}); |
1283 | } |
1284 | |
1285 | if (isa<PHINode>(Val: Succ->begin())) { |
1286 | // If there is more than one pred of succ, and there are PHI nodes in |
1287 | // the successor, then we need to add incoming edges for the PHI nodes |
1288 | // |
1289 | const PredBlockVector BBPreds(predecessors(BB)); |
1290 | |
1291 | // Loop over all of the PHI nodes in the successor of BB. |
1292 | for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) { |
1293 | PHINode *PN = cast<PHINode>(Val&: I); |
1294 | redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred); |
1295 | } |
1296 | } |
1297 | |
1298 | if (Succ->getSinglePredecessor()) { |
1299 | // BB is the only predecessor of Succ, so Succ will end up with exactly |
1300 | // the same predecessors BB had. |
1301 | // Copy over any phi, debug or lifetime instruction. |
1302 | BB->getTerminator()->eraseFromParent(); |
1303 | Succ->splice(ToIt: Succ->getFirstNonPHIIt(), FromBB: BB); |
1304 | } else { |
1305 | while (PHINode *PN = dyn_cast<PHINode>(Val: &BB->front())) { |
1306 | // We explicitly check for such uses for merging phis. |
1307 | assert(PN->use_empty() && "There shouldn't be any uses here!" ); |
1308 | PN->eraseFromParent(); |
1309 | } |
1310 | } |
1311 | |
1312 | // If the unconditional branch we replaced contains llvm.loop metadata, we |
1313 | // add the metadata to the branch instructions in the predecessors. |
1314 | if (Instruction *TI = BB->getTerminator()) |
1315 | if (MDNode *LoopMD = TI->getMetadata(KindID: LLVMContext::MD_loop)) |
1316 | for (BasicBlock *Pred : predecessors(BB)) |
1317 | Pred->getTerminator()->setMetadata(KindID: LLVMContext::MD_loop, Node: LoopMD); |
1318 | |
1319 | if (BBKillable) { |
1320 | // Everything that jumped to BB now goes to Succ. |
1321 | BB->replaceAllUsesWith(V: Succ); |
1322 | |
1323 | if (!Succ->hasName()) |
1324 | Succ->takeName(V: BB); |
1325 | |
1326 | // Clear the successor list of BB to match updates applying to DTU later. |
1327 | if (BB->getTerminator()) |
1328 | BB->back().eraseFromParent(); |
1329 | |
1330 | new UnreachableInst(BB->getContext(), BB); |
1331 | assert(succ_empty(BB) && "The successor list of BB isn't empty before " |
1332 | "applying corresponding DTU updates." ); |
1333 | } else if (BBPhisMergeable) { |
1334 | // Everything except CommonPred that jumped to BB now goes to Succ. |
1335 | BB->replaceUsesWithIf(New: Succ, ShouldReplace: [BBPreds, CommonPred](Use &U) -> bool { |
1336 | if (Instruction *UseInst = dyn_cast<Instruction>(Val: U.getUser())) |
1337 | return UseInst->getParent() != CommonPred && |
1338 | BBPreds.contains(Ptr: UseInst->getParent()); |
1339 | return false; |
1340 | }); |
1341 | } |
1342 | |
1343 | if (DTU) |
1344 | DTU->applyUpdates(Updates); |
1345 | |
1346 | if (BBKillable) |
1347 | DeleteDeadBlock(BB, DTU); |
1348 | |
1349 | return true; |
1350 | } |
1351 | |
1352 | static bool |
1353 | EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, |
1354 | SmallPtrSetImpl<PHINode *> &ToRemove) { |
1355 | // This implementation doesn't currently consider undef operands |
1356 | // specially. Theoretically, two phis which are identical except for |
1357 | // one having an undef where the other doesn't could be collapsed. |
1358 | |
1359 | bool Changed = false; |
1360 | |
1361 | // Examine each PHI. |
1362 | // Note that increment of I must *NOT* be in the iteration_expression, since |
1363 | // we don't want to immediately advance when we restart from the beginning. |
1364 | for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val&: I);) { |
1365 | ++I; |
1366 | // Is there an identical PHI node in this basic block? |
1367 | // Note that we only look in the upper square's triangle, |
1368 | // we already checked that the lower triangle PHI's aren't identical. |
1369 | for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(Val&: J); ++J) { |
1370 | if (ToRemove.contains(Ptr: DuplicatePN)) |
1371 | continue; |
1372 | if (!DuplicatePN->isIdenticalToWhenDefined(I: PN)) |
1373 | continue; |
1374 | // A duplicate. Replace this PHI with the base PHI. |
1375 | ++NumPHICSEs; |
1376 | DuplicatePN->replaceAllUsesWith(V: PN); |
1377 | ToRemove.insert(Ptr: DuplicatePN); |
1378 | Changed = true; |
1379 | |
1380 | // The RAUW can change PHIs that we already visited. |
1381 | I = BB->begin(); |
1382 | break; // Start over from the beginning. |
1383 | } |
1384 | } |
1385 | return Changed; |
1386 | } |
1387 | |
1388 | static bool |
1389 | EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, |
1390 | SmallPtrSetImpl<PHINode *> &ToRemove) { |
1391 | // This implementation doesn't currently consider undef operands |
1392 | // specially. Theoretically, two phis which are identical except for |
1393 | // one having an undef where the other doesn't could be collapsed. |
1394 | |
1395 | struct PHIDenseMapInfo { |
1396 | static PHINode *getEmptyKey() { |
1397 | return DenseMapInfo<PHINode *>::getEmptyKey(); |
1398 | } |
1399 | |
1400 | static PHINode *getTombstoneKey() { |
1401 | return DenseMapInfo<PHINode *>::getTombstoneKey(); |
1402 | } |
1403 | |
1404 | static bool isSentinel(PHINode *PN) { |
1405 | return PN == getEmptyKey() || PN == getTombstoneKey(); |
1406 | } |
1407 | |
1408 | // WARNING: this logic must be kept in sync with |
1409 | // Instruction::isIdenticalToWhenDefined()! |
1410 | static unsigned getHashValueImpl(PHINode *PN) { |
1411 | // Compute a hash value on the operands. Instcombine will likely have |
1412 | // sorted them, which helps expose duplicates, but we have to check all |
1413 | // the operands to be safe in case instcombine hasn't run. |
1414 | return static_cast<unsigned>(hash_combine( |
1415 | args: hash_combine_range(first: PN->value_op_begin(), last: PN->value_op_end()), |
1416 | args: hash_combine_range(first: PN->block_begin(), last: PN->block_end()))); |
1417 | } |
1418 | |
1419 | static unsigned getHashValue(PHINode *PN) { |
1420 | #ifndef NDEBUG |
1421 | // If -phicse-debug-hash was specified, return a constant -- this |
1422 | // will force all hashing to collide, so we'll exhaustively search |
1423 | // the table for a match, and the assertion in isEqual will fire if |
1424 | // there's a bug causing equal keys to hash differently. |
1425 | if (PHICSEDebugHash) |
1426 | return 0; |
1427 | #endif |
1428 | return getHashValueImpl(PN); |
1429 | } |
1430 | |
1431 | static bool isEqualImpl(PHINode *LHS, PHINode *RHS) { |
1432 | if (isSentinel(PN: LHS) || isSentinel(PN: RHS)) |
1433 | return LHS == RHS; |
1434 | return LHS->isIdenticalTo(I: RHS); |
1435 | } |
1436 | |
1437 | static bool isEqual(PHINode *LHS, PHINode *RHS) { |
1438 | // These comparisons are nontrivial, so assert that equality implies |
1439 | // hash equality (DenseMap demands this as an invariant). |
1440 | bool Result = isEqualImpl(LHS, RHS); |
1441 | assert(!Result || (isSentinel(LHS) && LHS == RHS) || |
1442 | getHashValueImpl(LHS) == getHashValueImpl(RHS)); |
1443 | return Result; |
1444 | } |
1445 | }; |
1446 | |
1447 | // Set of unique PHINodes. |
1448 | DenseSet<PHINode *, PHIDenseMapInfo> PHISet; |
1449 | PHISet.reserve(Size: 4 * PHICSENumPHISmallSize); |
1450 | |
1451 | // Examine each PHI. |
1452 | bool Changed = false; |
1453 | for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val: I++);) { |
1454 | if (ToRemove.contains(Ptr: PN)) |
1455 | continue; |
1456 | auto Inserted = PHISet.insert(V: PN); |
1457 | if (!Inserted.second) { |
1458 | // A duplicate. Replace this PHI with its duplicate. |
1459 | ++NumPHICSEs; |
1460 | PN->replaceAllUsesWith(V: *Inserted.first); |
1461 | ToRemove.insert(Ptr: PN); |
1462 | Changed = true; |
1463 | |
1464 | // The RAUW can change PHIs that we already visited. Start over from the |
1465 | // beginning. |
1466 | PHISet.clear(); |
1467 | I = BB->begin(); |
1468 | } |
1469 | } |
1470 | |
1471 | return Changed; |
1472 | } |
1473 | |
1474 | bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB, |
1475 | SmallPtrSetImpl<PHINode *> &ToRemove) { |
1476 | if ( |
1477 | #ifndef NDEBUG |
1478 | !PHICSEDebugHash && |
1479 | #endif |
1480 | hasNItemsOrLess(C: BB->phis(), N: PHICSENumPHISmallSize)) |
1481 | return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove); |
1482 | return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove); |
1483 | } |
1484 | |
1485 | bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { |
1486 | SmallPtrSet<PHINode *, 8> ToRemove; |
1487 | bool Changed = EliminateDuplicatePHINodes(BB, ToRemove); |
1488 | for (PHINode *PN : ToRemove) |
1489 | PN->eraseFromParent(); |
1490 | return Changed; |
1491 | } |
1492 | |
1493 | Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign, |
1494 | const DataLayout &DL) { |
1495 | V = V->stripPointerCasts(); |
1496 | |
1497 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: V)) { |
1498 | // TODO: Ideally, this function would not be called if PrefAlign is smaller |
1499 | // than the current alignment, as the known bits calculation should have |
1500 | // already taken it into account. However, this is not always the case, |
1501 | // as computeKnownBits() has a depth limit, while stripPointerCasts() |
1502 | // doesn't. |
1503 | Align CurrentAlign = AI->getAlign(); |
1504 | if (PrefAlign <= CurrentAlign) |
1505 | return CurrentAlign; |
1506 | |
1507 | // If the preferred alignment is greater than the natural stack alignment |
1508 | // then don't round up. This avoids dynamic stack realignment. |
1509 | if (DL.exceedsNaturalStackAlignment(Alignment: PrefAlign)) |
1510 | return CurrentAlign; |
1511 | AI->setAlignment(PrefAlign); |
1512 | return PrefAlign; |
1513 | } |
1514 | |
1515 | if (auto *GO = dyn_cast<GlobalObject>(Val: V)) { |
1516 | // TODO: as above, this shouldn't be necessary. |
1517 | Align CurrentAlign = GO->getPointerAlignment(DL); |
1518 | if (PrefAlign <= CurrentAlign) |
1519 | return CurrentAlign; |
1520 | |
1521 | // If there is a large requested alignment and we can, bump up the alignment |
1522 | // of the global. If the memory we set aside for the global may not be the |
1523 | // memory used by the final program then it is impossible for us to reliably |
1524 | // enforce the preferred alignment. |
1525 | if (!GO->canIncreaseAlignment()) |
1526 | return CurrentAlign; |
1527 | |
1528 | if (GO->isThreadLocal()) { |
1529 | unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT; |
1530 | if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign)) |
1531 | PrefAlign = Align(MaxTLSAlign); |
1532 | } |
1533 | |
1534 | GO->setAlignment(PrefAlign); |
1535 | return PrefAlign; |
1536 | } |
1537 | |
1538 | return Align(1); |
1539 | } |
1540 | |
1541 | Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, |
1542 | const DataLayout &DL, |
1543 | const Instruction *CxtI, |
1544 | AssumptionCache *AC, |
1545 | const DominatorTree *DT) { |
1546 | assert(V->getType()->isPointerTy() && |
1547 | "getOrEnforceKnownAlignment expects a pointer!" ); |
1548 | |
1549 | KnownBits Known = computeKnownBits(V, DL, Depth: 0, AC, CxtI, DT); |
1550 | unsigned TrailZ = Known.countMinTrailingZeros(); |
1551 | |
1552 | // Avoid trouble with ridiculously large TrailZ values, such as |
1553 | // those computed from a null pointer. |
1554 | // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent). |
1555 | TrailZ = std::min(a: TrailZ, b: +Value::MaxAlignmentExponent); |
1556 | |
1557 | Align Alignment = Align(1ull << std::min(a: Known.getBitWidth() - 1, b: TrailZ)); |
1558 | |
1559 | if (PrefAlign && *PrefAlign > Alignment) |
1560 | Alignment = std::max(a: Alignment, b: tryEnforceAlignment(V, PrefAlign: *PrefAlign, DL)); |
1561 | |
1562 | // We don't need to make any adjustment. |
1563 | return Alignment; |
1564 | } |
1565 | |
1566 | ///===---------------------------------------------------------------------===// |
1567 | /// Dbg Intrinsic utilities |
1568 | /// |
1569 | |
1570 | /// See if there is a dbg.value intrinsic for DIVar for the PHI node. |
1571 | static bool PhiHasDebugValue(DILocalVariable *DIVar, |
1572 | DIExpression *DIExpr, |
1573 | PHINode *APN) { |
1574 | // Since we can't guarantee that the original dbg.declare intrinsic |
1575 | // is removed by LowerDbgDeclare(), we need to make sure that we are |
1576 | // not inserting the same dbg.value intrinsic over and over. |
1577 | SmallVector<DbgValueInst *, 1> DbgValues; |
1578 | SmallVector<DbgVariableRecord *, 1> DbgVariableRecords; |
1579 | findDbgValues(DbgValues, V: APN, DbgVariableRecords: &DbgVariableRecords); |
1580 | for (auto *DVI : DbgValues) { |
1581 | assert(is_contained(DVI->getValues(), APN)); |
1582 | if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr)) |
1583 | return true; |
1584 | } |
1585 | for (auto *DVR : DbgVariableRecords) { |
1586 | assert(is_contained(DVR->location_ops(), APN)); |
1587 | if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr)) |
1588 | return true; |
1589 | } |
1590 | return false; |
1591 | } |
1592 | |
1593 | /// Check if the alloc size of \p ValTy is large enough to cover the variable |
1594 | /// (or fragment of the variable) described by \p DII. |
1595 | /// |
1596 | /// This is primarily intended as a helper for the different |
1597 | /// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted |
1598 | /// describes an alloca'd variable, so we need to use the alloc size of the |
1599 | /// value when doing the comparison. E.g. an i1 value will be identified as |
1600 | /// covering an n-bit fragment, if the store size of i1 is at least n bits. |
1601 | static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) { |
1602 | const DataLayout &DL = DII->getDataLayout(); |
1603 | TypeSize ValueSize = DL.getTypeAllocSizeInBits(Ty: ValTy); |
1604 | if (std::optional<uint64_t> FragmentSize = |
1605 | DII->getExpression()->getActiveBits(Var: DII->getVariable())) |
1606 | return TypeSize::isKnownGE(LHS: ValueSize, RHS: TypeSize::getFixed(ExactSize: *FragmentSize)); |
1607 | |
1608 | // We can't always calculate the size of the DI variable (e.g. if it is a |
1609 | // VLA). Try to use the size of the alloca that the dbg intrinsic describes |
1610 | // intead. |
1611 | if (DII->isAddressOfVariable()) { |
1612 | // DII should have exactly 1 location when it is an address. |
1613 | assert(DII->getNumVariableLocationOps() == 1 && |
1614 | "address of variable must have exactly 1 location operand." ); |
1615 | if (auto *AI = |
1616 | dyn_cast_or_null<AllocaInst>(Val: DII->getVariableLocationOp(OpIdx: 0))) { |
1617 | if (std::optional<TypeSize> FragmentSize = |
1618 | AI->getAllocationSizeInBits(DL)) { |
1619 | return TypeSize::isKnownGE(LHS: ValueSize, RHS: *FragmentSize); |
1620 | } |
1621 | } |
1622 | } |
1623 | // Could not determine size of variable. Conservatively return false. |
1624 | return false; |
1625 | } |
1626 | // RemoveDIs: duplicate implementation of the above, using DbgVariableRecords, |
1627 | // the replacement for dbg.values. |
1628 | static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR) { |
1629 | const DataLayout &DL = DVR->getModule()->getDataLayout(); |
1630 | TypeSize ValueSize = DL.getTypeAllocSizeInBits(Ty: ValTy); |
1631 | if (std::optional<uint64_t> FragmentSize = |
1632 | DVR->getExpression()->getActiveBits(Var: DVR->getVariable())) |
1633 | return TypeSize::isKnownGE(LHS: ValueSize, RHS: TypeSize::getFixed(ExactSize: *FragmentSize)); |
1634 | |
1635 | // We can't always calculate the size of the DI variable (e.g. if it is a |
1636 | // VLA). Try to use the size of the alloca that the dbg intrinsic describes |
1637 | // intead. |
1638 | if (DVR->isAddressOfVariable()) { |
1639 | // DVR should have exactly 1 location when it is an address. |
1640 | assert(DVR->getNumVariableLocationOps() == 1 && |
1641 | "address of variable must have exactly 1 location operand." ); |
1642 | if (auto *AI = |
1643 | dyn_cast_or_null<AllocaInst>(Val: DVR->getVariableLocationOp(OpIdx: 0))) { |
1644 | if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) { |
1645 | return TypeSize::isKnownGE(LHS: ValueSize, RHS: *FragmentSize); |
1646 | } |
1647 | } |
1648 | } |
1649 | // Could not determine size of variable. Conservatively return false. |
1650 | return false; |
1651 | } |
1652 | |
1653 | static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, |
1654 | DILocalVariable *DIVar, |
1655 | DIExpression *DIExpr, |
1656 | const DebugLoc &NewLoc, |
1657 | BasicBlock::iterator Instr) { |
1658 | if (!UseNewDbgInfoFormat) { |
1659 | auto DbgVal = Builder.insertDbgValueIntrinsic(Val: DV, VarInfo: DIVar, Expr: DIExpr, DL: NewLoc, |
1660 | InsertBefore: (Instruction *)nullptr); |
1661 | DbgVal.get<Instruction *>()->insertBefore(InsertPos: Instr); |
1662 | } else { |
1663 | // RemoveDIs: if we're using the new debug-info format, allocate a |
1664 | // DbgVariableRecord directly instead of a dbg.value intrinsic. |
1665 | ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV); |
1666 | DbgVariableRecord *DV = |
1667 | new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); |
1668 | Instr->getParent()->insertDbgRecordBefore(DR: DV, Here: Instr); |
1669 | } |
1670 | } |
1671 | |
1672 | static void insertDbgValueOrDbgVariableRecordAfter( |
1673 | DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, |
1674 | const DebugLoc &NewLoc, BasicBlock::iterator Instr) { |
1675 | if (!UseNewDbgInfoFormat) { |
1676 | auto DbgVal = Builder.insertDbgValueIntrinsic(Val: DV, VarInfo: DIVar, Expr: DIExpr, DL: NewLoc, |
1677 | InsertBefore: (Instruction *)nullptr); |
1678 | DbgVal.get<Instruction *>()->insertAfter(InsertPos: &*Instr); |
1679 | } else { |
1680 | // RemoveDIs: if we're using the new debug-info format, allocate a |
1681 | // DbgVariableRecord directly instead of a dbg.value intrinsic. |
1682 | ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV); |
1683 | DbgVariableRecord *DV = |
1684 | new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); |
1685 | Instr->getParent()->insertDbgRecordAfter(DR: DV, I: &*Instr); |
1686 | } |
1687 | } |
1688 | |
1689 | /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value |
1690 | /// that has an associated llvm.dbg.declare intrinsic. |
1691 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, |
1692 | StoreInst *SI, DIBuilder &Builder) { |
1693 | assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII)); |
1694 | auto *DIVar = DII->getVariable(); |
1695 | assert(DIVar && "Missing variable" ); |
1696 | auto *DIExpr = DII->getExpression(); |
1697 | Value *DV = SI->getValueOperand(); |
1698 | |
1699 | DebugLoc NewLoc = getDebugValueLoc(DII); |
1700 | |
1701 | // If the alloca describes the variable itself, i.e. the expression in the |
1702 | // dbg.declare doesn't start with a dereference, we can perform the |
1703 | // conversion if the value covers the entire fragment of DII. |
1704 | // If the alloca describes the *address* of DIVar, i.e. DIExpr is |
1705 | // *just* a DW_OP_deref, we use DV as is for the dbg.value. |
1706 | // We conservatively ignore other dereferences, because the following two are |
1707 | // not equivalent: |
1708 | // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2)) |
1709 | // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2)) |
1710 | // The former is adding 2 to the address of the variable, whereas the latter |
1711 | // is adding 2 to the value of the variable. As such, we insist on just a |
1712 | // deref expression. |
1713 | bool CanConvert = |
1714 | DIExpr->isDeref() || (!DIExpr->startsWithDeref() && |
1715 | valueCoversEntireFragment(ValTy: DV->getType(), DII)); |
1716 | if (CanConvert) { |
1717 | insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, |
1718 | Instr: SI->getIterator()); |
1719 | return; |
1720 | } |
1721 | |
1722 | // FIXME: If storing to a part of the variable described by the dbg.declare, |
1723 | // then we want to insert a dbg.value for the corresponding fragment. |
1724 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII |
1725 | << '\n'); |
1726 | // For now, when there is a store to parts of the variable (but we do not |
1727 | // know which part) we insert an dbg.value intrinsic to indicate that we |
1728 | // know nothing about the variable's content. |
1729 | DV = UndefValue::get(T: DV->getType()); |
1730 | insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, |
1731 | Instr: SI->getIterator()); |
1732 | } |
1733 | |
1734 | /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value |
1735 | /// that has an associated llvm.dbg.declare intrinsic. |
1736 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, |
1737 | LoadInst *LI, DIBuilder &Builder) { |
1738 | auto *DIVar = DII->getVariable(); |
1739 | auto *DIExpr = DII->getExpression(); |
1740 | assert(DIVar && "Missing variable" ); |
1741 | |
1742 | if (!valueCoversEntireFragment(ValTy: LI->getType(), DII)) { |
1743 | // FIXME: If only referring to a part of the variable described by the |
1744 | // dbg.declare, then we want to insert a dbg.value for the corresponding |
1745 | // fragment. |
1746 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " |
1747 | << *DII << '\n'); |
1748 | return; |
1749 | } |
1750 | |
1751 | DebugLoc NewLoc = getDebugValueLoc(DII); |
1752 | |
1753 | // We are now tracking the loaded value instead of the address. In the |
1754 | // future if multi-location support is added to the IR, it might be |
1755 | // preferable to keep tracking both the loaded value and the original |
1756 | // address in case the alloca can not be elided. |
1757 | insertDbgValueOrDbgVariableRecordAfter(Builder, DV: LI, DIVar, DIExpr, NewLoc, |
1758 | Instr: LI->getIterator()); |
1759 | } |
1760 | |
1761 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, |
1762 | StoreInst *SI, DIBuilder &Builder) { |
1763 | assert(DVR->isAddressOfVariable() || DVR->isDbgAssign()); |
1764 | auto *DIVar = DVR->getVariable(); |
1765 | assert(DIVar && "Missing variable" ); |
1766 | auto *DIExpr = DVR->getExpression(); |
1767 | Value *DV = SI->getValueOperand(); |
1768 | |
1769 | DebugLoc NewLoc = getDebugValueLoc(DVR); |
1770 | |
1771 | // If the alloca describes the variable itself, i.e. the expression in the |
1772 | // dbg.declare doesn't start with a dereference, we can perform the |
1773 | // conversion if the value covers the entire fragment of DII. |
1774 | // If the alloca describes the *address* of DIVar, i.e. DIExpr is |
1775 | // *just* a DW_OP_deref, we use DV as is for the dbg.value. |
1776 | // We conservatively ignore other dereferences, because the following two are |
1777 | // not equivalent: |
1778 | // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2)) |
1779 | // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2)) |
1780 | // The former is adding 2 to the address of the variable, whereas the latter |
1781 | // is adding 2 to the value of the variable. As such, we insist on just a |
1782 | // deref expression. |
1783 | bool CanConvert = |
1784 | DIExpr->isDeref() || (!DIExpr->startsWithDeref() && |
1785 | valueCoversEntireFragment(ValTy: DV->getType(), DVR)); |
1786 | if (CanConvert) { |
1787 | insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, |
1788 | Instr: SI->getIterator()); |
1789 | return; |
1790 | } |
1791 | |
1792 | // FIXME: If storing to a part of the variable described by the dbg.declare, |
1793 | // then we want to insert a dbg.value for the corresponding fragment. |
1794 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR |
1795 | << '\n'); |
1796 | assert(UseNewDbgInfoFormat); |
1797 | |
1798 | // For now, when there is a store to parts of the variable (but we do not |
1799 | // know which part) we insert an dbg.value intrinsic to indicate that we |
1800 | // know nothing about the variable's content. |
1801 | DV = UndefValue::get(T: DV->getType()); |
1802 | ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV); |
1803 | DbgVariableRecord *NewDVR = |
1804 | new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); |
1805 | SI->getParent()->insertDbgRecordBefore(DR: NewDVR, Here: SI->getIterator()); |
1806 | } |
1807 | |
1808 | /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated |
1809 | /// llvm.dbg.declare intrinsic. |
1810 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, |
1811 | PHINode *APN, DIBuilder &Builder) { |
1812 | auto *DIVar = DII->getVariable(); |
1813 | auto *DIExpr = DII->getExpression(); |
1814 | assert(DIVar && "Missing variable" ); |
1815 | |
1816 | if (PhiHasDebugValue(DIVar, DIExpr, APN)) |
1817 | return; |
1818 | |
1819 | if (!valueCoversEntireFragment(ValTy: APN->getType(), DII)) { |
1820 | // FIXME: If only referring to a part of the variable described by the |
1821 | // dbg.declare, then we want to insert a dbg.value for the corresponding |
1822 | // fragment. |
1823 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " |
1824 | << *DII << '\n'); |
1825 | return; |
1826 | } |
1827 | |
1828 | BasicBlock *BB = APN->getParent(); |
1829 | auto InsertionPt = BB->getFirstInsertionPt(); |
1830 | |
1831 | DebugLoc NewLoc = getDebugValueLoc(DII); |
1832 | |
1833 | // The block may be a catchswitch block, which does not have a valid |
1834 | // insertion point. |
1835 | // FIXME: Insert dbg.value markers in the successors when appropriate. |
1836 | if (InsertionPt != BB->end()) { |
1837 | insertDbgValueOrDbgVariableRecord(Builder, DV: APN, DIVar, DIExpr, NewLoc, |
1838 | Instr: InsertionPt); |
1839 | } |
1840 | } |
1841 | |
1842 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, LoadInst *LI, |
1843 | DIBuilder &Builder) { |
1844 | auto *DIVar = DVR->getVariable(); |
1845 | auto *DIExpr = DVR->getExpression(); |
1846 | assert(DIVar && "Missing variable" ); |
1847 | |
1848 | if (!valueCoversEntireFragment(ValTy: LI->getType(), DVR)) { |
1849 | // FIXME: If only referring to a part of the variable described by the |
1850 | // dbg.declare, then we want to insert a DbgVariableRecord for the |
1851 | // corresponding fragment. |
1852 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: " |
1853 | << *DVR << '\n'); |
1854 | return; |
1855 | } |
1856 | |
1857 | DebugLoc NewLoc = getDebugValueLoc(DVR); |
1858 | |
1859 | // We are now tracking the loaded value instead of the address. In the |
1860 | // future if multi-location support is added to the IR, it might be |
1861 | // preferable to keep tracking both the loaded value and the original |
1862 | // address in case the alloca can not be elided. |
1863 | assert(UseNewDbgInfoFormat); |
1864 | |
1865 | // Create a DbgVariableRecord directly and insert. |
1866 | ValueAsMetadata *LIVAM = ValueAsMetadata::get(V: LI); |
1867 | DbgVariableRecord *DV = |
1868 | new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get()); |
1869 | LI->getParent()->insertDbgRecordAfter(DR: DV, I: LI); |
1870 | } |
1871 | |
1872 | /// Determine whether this alloca is either a VLA or an array. |
1873 | static bool isArray(AllocaInst *AI) { |
1874 | return AI->isArrayAllocation() || |
1875 | (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy()); |
1876 | } |
1877 | |
1878 | /// Determine whether this alloca is a structure. |
1879 | static bool isStructure(AllocaInst *AI) { |
1880 | return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy(); |
1881 | } |
1882 | void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, PHINode *APN, |
1883 | DIBuilder &Builder) { |
1884 | auto *DIVar = DVR->getVariable(); |
1885 | auto *DIExpr = DVR->getExpression(); |
1886 | assert(DIVar && "Missing variable" ); |
1887 | |
1888 | if (PhiHasDebugValue(DIVar, DIExpr, APN)) |
1889 | return; |
1890 | |
1891 | if (!valueCoversEntireFragment(ValTy: APN->getType(), DVR)) { |
1892 | // FIXME: If only referring to a part of the variable described by the |
1893 | // dbg.declare, then we want to insert a DbgVariableRecord for the |
1894 | // corresponding fragment. |
1895 | LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: " |
1896 | << *DVR << '\n'); |
1897 | return; |
1898 | } |
1899 | |
1900 | BasicBlock *BB = APN->getParent(); |
1901 | auto InsertionPt = BB->getFirstInsertionPt(); |
1902 | |
1903 | DebugLoc NewLoc = getDebugValueLoc(DVR); |
1904 | |
1905 | // The block may be a catchswitch block, which does not have a valid |
1906 | // insertion point. |
1907 | // FIXME: Insert DbgVariableRecord markers in the successors when appropriate. |
1908 | if (InsertionPt != BB->end()) { |
1909 | insertDbgValueOrDbgVariableRecord(Builder, DV: APN, DIVar, DIExpr, NewLoc, |
1910 | Instr: InsertionPt); |
1911 | } |
1912 | } |
1913 | |
1914 | /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set |
1915 | /// of llvm.dbg.value intrinsics. |
1916 | bool llvm::LowerDbgDeclare(Function &F) { |
1917 | bool Changed = false; |
1918 | DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); |
1919 | SmallVector<DbgDeclareInst *, 4> Dbgs; |
1920 | SmallVector<DbgVariableRecord *> DVRs; |
1921 | for (auto &FI : F) { |
1922 | for (Instruction &BI : FI) { |
1923 | if (auto *DDI = dyn_cast<DbgDeclareInst>(Val: &BI)) |
1924 | Dbgs.push_back(Elt: DDI); |
1925 | for (DbgVariableRecord &DVR : filterDbgVars(R: BI.getDbgRecordRange())) { |
1926 | if (DVR.getType() == DbgVariableRecord::LocationType::Declare) |
1927 | DVRs.push_back(Elt: &DVR); |
1928 | } |
1929 | } |
1930 | } |
1931 | |
1932 | if (Dbgs.empty() && DVRs.empty()) |
1933 | return Changed; |
1934 | |
1935 | auto LowerOne = [&](auto *DDI) { |
1936 | AllocaInst *AI = |
1937 | dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0)); |
1938 | // If this is an alloca for a scalar variable, insert a dbg.value |
1939 | // at each load and store to the alloca and erase the dbg.declare. |
1940 | // The dbg.values allow tracking a variable even if it is not |
1941 | // stored on the stack, while the dbg.declare can only describe |
1942 | // the stack slot (and at a lexical-scope granularity). Later |
1943 | // passes will attempt to elide the stack slot. |
1944 | if (!AI || isArray(AI) || isStructure(AI)) |
1945 | return; |
1946 | |
1947 | // A volatile load/store means that the alloca can't be elided anyway. |
1948 | if (llvm::any_of(AI->users(), [](User *U) -> bool { |
1949 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: U)) |
1950 | return LI->isVolatile(); |
1951 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: U)) |
1952 | return SI->isVolatile(); |
1953 | return false; |
1954 | })) |
1955 | return; |
1956 | |
1957 | SmallVector<const Value *, 8> WorkList; |
1958 | WorkList.push_back(Elt: AI); |
1959 | while (!WorkList.empty()) { |
1960 | const Value *V = WorkList.pop_back_val(); |
1961 | for (const auto &AIUse : V->uses()) { |
1962 | User *U = AIUse.getUser(); |
1963 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: U)) { |
1964 | if (AIUse.getOperandNo() == 1) |
1965 | ConvertDebugDeclareToDebugValue(DDI, SI, DIB); |
1966 | } else if (LoadInst *LI = dyn_cast<LoadInst>(Val: U)) { |
1967 | ConvertDebugDeclareToDebugValue(DDI, LI, DIB); |
1968 | } else if (CallInst *CI = dyn_cast<CallInst>(Val: U)) { |
1969 | // This is a call by-value or some other instruction that takes a |
1970 | // pointer to the variable. Insert a *value* intrinsic that describes |
1971 | // the variable by dereferencing the alloca. |
1972 | if (!CI->isLifetimeStartOrEnd()) { |
1973 | DebugLoc NewLoc = getDebugValueLoc(DDI); |
1974 | auto *DerefExpr = |
1975 | DIExpression::append(Expr: DDI->getExpression(), Ops: dwarf::DW_OP_deref); |
1976 | insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(), |
1977 | DerefExpr, NewLoc, |
1978 | CI->getIterator()); |
1979 | } |
1980 | } else if (BitCastInst *BI = dyn_cast<BitCastInst>(Val: U)) { |
1981 | if (BI->getType()->isPointerTy()) |
1982 | WorkList.push_back(Elt: BI); |
1983 | } |
1984 | } |
1985 | } |
1986 | DDI->eraseFromParent(); |
1987 | Changed = true; |
1988 | }; |
1989 | |
1990 | for_each(Range&: Dbgs, F: LowerOne); |
1991 | for_each(Range&: DVRs, F: LowerOne); |
1992 | |
1993 | if (Changed) |
1994 | for (BasicBlock &BB : F) |
1995 | RemoveRedundantDbgInstrs(BB: &BB); |
1996 | |
1997 | return Changed; |
1998 | } |
1999 | |
2000 | // RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the |
2001 | // debug-info out of the block's DbgVariableRecords rather than dbg.value |
2002 | // intrinsics. |
2003 | static void |
2004 | insertDbgVariableRecordsForPHIs(BasicBlock *BB, |
2005 | SmallVectorImpl<PHINode *> &InsertedPHIs) { |
2006 | assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from." ); |
2007 | if (InsertedPHIs.size() == 0) |
2008 | return; |
2009 | |
2010 | // Map existing PHI nodes to their DbgVariableRecords. |
2011 | DenseMap<Value *, DbgVariableRecord *> DbgValueMap; |
2012 | for (auto &I : *BB) { |
2013 | for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) { |
2014 | for (Value *V : DVR.location_ops()) |
2015 | if (auto *Loc = dyn_cast_or_null<PHINode>(Val: V)) |
2016 | DbgValueMap.insert(KV: {Loc, &DVR}); |
2017 | } |
2018 | } |
2019 | if (DbgValueMap.size() == 0) |
2020 | return; |
2021 | |
2022 | // Map a pair of the destination BB and old DbgVariableRecord to the new |
2023 | // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use |
2024 | // more than one of the inserted PHIs in the same destination BB, we can |
2025 | // update the same DbgVariableRecord with all the new PHIs instead of creating |
2026 | // one copy for each. |
2027 | MapVector<std::pair<BasicBlock *, DbgVariableRecord *>, DbgVariableRecord *> |
2028 | NewDbgValueMap; |
2029 | // Then iterate through the new PHIs and look to see if they use one of the |
2030 | // previously mapped PHIs. If so, create a new DbgVariableRecord that will |
2031 | // propagate the info through the new PHI. If we use more than one new PHI in |
2032 | // a single destination BB with the same old dbg.value, merge the updates so |
2033 | // that we get a single new DbgVariableRecord with all the new PHIs. |
2034 | for (auto PHI : InsertedPHIs) { |
2035 | BasicBlock *Parent = PHI->getParent(); |
2036 | // Avoid inserting a debug-info record into an EH block. |
2037 | if (Parent->getFirstNonPHI()->isEHPad()) |
2038 | continue; |
2039 | for (auto VI : PHI->operand_values()) { |
2040 | auto V = DbgValueMap.find(Val: VI); |
2041 | if (V != DbgValueMap.end()) { |
2042 | DbgVariableRecord *DbgII = cast<DbgVariableRecord>(Val: V->second); |
2043 | auto NewDI = NewDbgValueMap.find(Key: {Parent, DbgII}); |
2044 | if (NewDI == NewDbgValueMap.end()) { |
2045 | DbgVariableRecord *NewDbgII = DbgII->clone(); |
2046 | NewDI = NewDbgValueMap.insert(KV: {{Parent, DbgII}, NewDbgII}).first; |
2047 | } |
2048 | DbgVariableRecord *NewDbgII = NewDI->second; |
2049 | // If PHI contains VI as an operand more than once, we may |
2050 | // replaced it in NewDbgII; confirm that it is present. |
2051 | if (is_contained(Range: NewDbgII->location_ops(), Element: VI)) |
2052 | NewDbgII->replaceVariableLocationOp(OldValue: VI, NewValue: PHI); |
2053 | } |
2054 | } |
2055 | } |
2056 | // Insert the new DbgVariableRecords into their destination blocks. |
2057 | for (auto DI : NewDbgValueMap) { |
2058 | BasicBlock *Parent = DI.first.first; |
2059 | DbgVariableRecord *NewDbgII = DI.second; |
2060 | auto InsertionPt = Parent->getFirstInsertionPt(); |
2061 | assert(InsertionPt != Parent->end() && "Ill-formed basic block" ); |
2062 | |
2063 | Parent->insertDbgRecordBefore(DR: NewDbgII, Here: InsertionPt); |
2064 | } |
2065 | } |
2066 | |
2067 | /// Propagate dbg.value intrinsics through the newly inserted PHIs. |
2068 | void llvm::insertDebugValuesForPHIs(BasicBlock *BB, |
2069 | SmallVectorImpl<PHINode *> &InsertedPHIs) { |
2070 | assert(BB && "No BasicBlock to clone dbg.value(s) from." ); |
2071 | if (InsertedPHIs.size() == 0) |
2072 | return; |
2073 | |
2074 | insertDbgVariableRecordsForPHIs(BB, InsertedPHIs); |
2075 | |
2076 | // Map existing PHI nodes to their dbg.values. |
2077 | ValueToValueMapTy DbgValueMap; |
2078 | for (auto &I : *BB) { |
2079 | if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(Val: &I)) { |
2080 | for (Value *V : DbgII->location_ops()) |
2081 | if (auto *Loc = dyn_cast_or_null<PHINode>(Val: V)) |
2082 | DbgValueMap.insert(KV: {Loc, DbgII}); |
2083 | } |
2084 | } |
2085 | if (DbgValueMap.size() == 0) |
2086 | return; |
2087 | |
2088 | // Map a pair of the destination BB and old dbg.value to the new dbg.value, |
2089 | // so that if a dbg.value is being rewritten to use more than one of the |
2090 | // inserted PHIs in the same destination BB, we can update the same dbg.value |
2091 | // with all the new PHIs instead of creating one copy for each. |
2092 | MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>, |
2093 | DbgVariableIntrinsic *> |
2094 | NewDbgValueMap; |
2095 | // Then iterate through the new PHIs and look to see if they use one of the |
2096 | // previously mapped PHIs. If so, create a new dbg.value intrinsic that will |
2097 | // propagate the info through the new PHI. If we use more than one new PHI in |
2098 | // a single destination BB with the same old dbg.value, merge the updates so |
2099 | // that we get a single new dbg.value with all the new PHIs. |
2100 | for (auto *PHI : InsertedPHIs) { |
2101 | BasicBlock *Parent = PHI->getParent(); |
2102 | // Avoid inserting an intrinsic into an EH block. |
2103 | if (Parent->getFirstNonPHI()->isEHPad()) |
2104 | continue; |
2105 | for (auto *VI : PHI->operand_values()) { |
2106 | auto V = DbgValueMap.find(Val: VI); |
2107 | if (V != DbgValueMap.end()) { |
2108 | auto *DbgII = cast<DbgVariableIntrinsic>(Val&: V->second); |
2109 | auto NewDI = NewDbgValueMap.find(Key: {Parent, DbgII}); |
2110 | if (NewDI == NewDbgValueMap.end()) { |
2111 | auto *NewDbgII = cast<DbgVariableIntrinsic>(Val: DbgII->clone()); |
2112 | NewDI = NewDbgValueMap.insert(KV: {{Parent, DbgII}, NewDbgII}).first; |
2113 | } |
2114 | DbgVariableIntrinsic *NewDbgII = NewDI->second; |
2115 | // If PHI contains VI as an operand more than once, we may |
2116 | // replaced it in NewDbgII; confirm that it is present. |
2117 | if (is_contained(Range: NewDbgII->location_ops(), Element: VI)) |
2118 | NewDbgII->replaceVariableLocationOp(OldValue: VI, NewValue: PHI); |
2119 | } |
2120 | } |
2121 | } |
2122 | // Insert thew new dbg.values into their destination blocks. |
2123 | for (auto DI : NewDbgValueMap) { |
2124 | BasicBlock *Parent = DI.first.first; |
2125 | auto *NewDbgII = DI.second; |
2126 | auto InsertionPt = Parent->getFirstInsertionPt(); |
2127 | assert(InsertionPt != Parent->end() && "Ill-formed basic block" ); |
2128 | NewDbgII->insertBefore(InsertPos: &*InsertionPt); |
2129 | } |
2130 | } |
2131 | |
2132 | bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress, |
2133 | DIBuilder &Builder, uint8_t DIExprFlags, |
2134 | int Offset) { |
2135 | TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(V: Address); |
2136 | TinyPtrVector<DbgVariableRecord *> DVRDeclares = findDVRDeclares(V: Address); |
2137 | |
2138 | auto ReplaceOne = [&](auto *DII) { |
2139 | assert(DII->getVariable() && "Missing variable" ); |
2140 | auto *DIExpr = DII->getExpression(); |
2141 | DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: DIExprFlags, Offset); |
2142 | DII->setExpression(DIExpr); |
2143 | DII->replaceVariableLocationOp(Address, NewAddress); |
2144 | }; |
2145 | |
2146 | for_each(Range&: DbgDeclares, F: ReplaceOne); |
2147 | for_each(Range&: DVRDeclares, F: ReplaceOne); |
2148 | |
2149 | return !DbgDeclares.empty() || !DVRDeclares.empty(); |
2150 | } |
2151 | |
2152 | static void updateOneDbgValueForAlloca(const DebugLoc &Loc, |
2153 | DILocalVariable *DIVar, |
2154 | DIExpression *DIExpr, Value *NewAddress, |
2155 | DbgValueInst *DVI, |
2156 | DbgVariableRecord *DVR, |
2157 | DIBuilder &Builder, int Offset) { |
2158 | assert(DIVar && "Missing variable" ); |
2159 | |
2160 | // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it |
2161 | // should do with the alloca pointer is dereference it. Otherwise we don't |
2162 | // know how to handle it and give up. |
2163 | if (!DIExpr || DIExpr->getNumElements() < 1 || |
2164 | DIExpr->getElement(I: 0) != dwarf::DW_OP_deref) |
2165 | return; |
2166 | |
2167 | // Insert the offset before the first deref. |
2168 | if (Offset) |
2169 | DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: 0, Offset); |
2170 | |
2171 | if (DVI) { |
2172 | DVI->setExpression(DIExpr); |
2173 | DVI->replaceVariableLocationOp(OpIdx: 0u, NewValue: NewAddress); |
2174 | } else { |
2175 | assert(DVR); |
2176 | DVR->setExpression(DIExpr); |
2177 | DVR->replaceVariableLocationOp(OpIdx: 0u, NewValue: NewAddress); |
2178 | } |
2179 | } |
2180 | |
2181 | void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, |
2182 | DIBuilder &Builder, int Offset) { |
2183 | SmallVector<DbgValueInst *, 1> DbgUsers; |
2184 | SmallVector<DbgVariableRecord *, 1> DPUsers; |
2185 | findDbgValues(DbgValues&: DbgUsers, V: AI, DbgVariableRecords: &DPUsers); |
2186 | |
2187 | // Attempt to replace dbg.values that use this alloca. |
2188 | for (auto *DVI : DbgUsers) |
2189 | updateOneDbgValueForAlloca(Loc: DVI->getDebugLoc(), DIVar: DVI->getVariable(), |
2190 | DIExpr: DVI->getExpression(), NewAddress: NewAllocaAddress, DVI, |
2191 | DVR: nullptr, Builder, Offset); |
2192 | |
2193 | // Replace any DbgVariableRecords that use this alloca. |
2194 | for (DbgVariableRecord *DVR : DPUsers) |
2195 | updateOneDbgValueForAlloca(Loc: DVR->getDebugLoc(), DIVar: DVR->getVariable(), |
2196 | DIExpr: DVR->getExpression(), NewAddress: NewAllocaAddress, DVI: nullptr, |
2197 | DVR, Builder, Offset); |
2198 | } |
2199 | |
2200 | /// Where possible to salvage debug information for \p I do so. |
2201 | /// If not possible mark undef. |
2202 | void llvm::salvageDebugInfo(Instruction &I) { |
2203 | SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; |
2204 | SmallVector<DbgVariableRecord *, 1> DPUsers; |
2205 | findDbgUsers(DbgInsts&: DbgUsers, V: &I, DbgVariableRecords: &DPUsers); |
2206 | salvageDebugInfoForDbgValues(I, Insns: DbgUsers, DPInsns: DPUsers); |
2207 | } |
2208 | |
2209 | template <typename T> static void salvageDbgAssignAddress(T *Assign) { |
2210 | Instruction *I = dyn_cast<Instruction>(Assign->getAddress()); |
2211 | // Only instructions can be salvaged at the moment. |
2212 | if (!I) |
2213 | return; |
2214 | |
2215 | assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() && |
2216 | "address-expression shouldn't have fragment info" ); |
2217 | |
2218 | // The address component of a dbg.assign cannot be variadic. |
2219 | uint64_t CurrentLocOps = 0; |
2220 | SmallVector<Value *, 4> AdditionalValues; |
2221 | SmallVector<uint64_t, 16> Ops; |
2222 | Value *NewV = salvageDebugInfoImpl(I&: *I, CurrentLocOps, Ops, AdditionalValues); |
2223 | |
2224 | // Check if the salvage failed. |
2225 | if (!NewV) |
2226 | return; |
2227 | |
2228 | DIExpression *SalvagedExpr = DIExpression::appendOpsToArg( |
2229 | Expr: Assign->getAddressExpression(), Ops, ArgNo: 0, /*StackValue=*/false); |
2230 | assert(!SalvagedExpr->getFragmentInfo().has_value() && |
2231 | "address-expression shouldn't have fragment info" ); |
2232 | |
2233 | SalvagedExpr = SalvagedExpr->foldConstantMath(); |
2234 | |
2235 | // Salvage succeeds if no additional values are required. |
2236 | if (AdditionalValues.empty()) { |
2237 | Assign->setAddress(NewV); |
2238 | Assign->setAddressExpression(SalvagedExpr); |
2239 | } else { |
2240 | Assign->setKillAddress(); |
2241 | } |
2242 | } |
2243 | |
2244 | void llvm::salvageDebugInfoForDbgValues( |
2245 | Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers, |
2246 | ArrayRef<DbgVariableRecord *> DPUsers) { |
2247 | // These are arbitrary chosen limits on the maximum number of values and the |
2248 | // maximum size of a debug expression we can salvage up to, used for |
2249 | // performance reasons. |
2250 | const unsigned MaxDebugArgs = 16; |
2251 | const unsigned MaxExpressionSize = 128; |
2252 | bool Salvaged = false; |
2253 | |
2254 | for (auto *DII : DbgUsers) { |
2255 | if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: DII)) { |
2256 | if (DAI->getAddress() == &I) { |
2257 | salvageDbgAssignAddress(Assign: DAI); |
2258 | Salvaged = true; |
2259 | } |
2260 | if (DAI->getValue() != &I) |
2261 | continue; |
2262 | } |
2263 | |
2264 | // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly |
2265 | // pointing out the value as a DWARF memory location description. |
2266 | bool StackValue = isa<DbgValueInst>(Val: DII); |
2267 | auto DIILocation = DII->location_ops(); |
2268 | assert( |
2269 | is_contained(DIILocation, &I) && |
2270 | "DbgVariableIntrinsic must use salvaged instruction as its location" ); |
2271 | SmallVector<Value *, 4> AdditionalValues; |
2272 | // `I` may appear more than once in DII's location ops, and each use of `I` |
2273 | // must be updated in the DIExpression and potentially have additional |
2274 | // values added; thus we call salvageDebugInfoImpl for each `I` instance in |
2275 | // DIILocation. |
2276 | Value *Op0 = nullptr; |
2277 | DIExpression *SalvagedExpr = DII->getExpression(); |
2278 | auto LocItr = find(Range&: DIILocation, Val: &I); |
2279 | while (SalvagedExpr && LocItr != DIILocation.end()) { |
2280 | SmallVector<uint64_t, 16> Ops; |
2281 | unsigned LocNo = std::distance(first: DIILocation.begin(), last: LocItr); |
2282 | uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands(); |
2283 | Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues); |
2284 | if (!Op0) |
2285 | break; |
2286 | SalvagedExpr = |
2287 | DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: LocNo, StackValue); |
2288 | LocItr = std::find(first: ++LocItr, last: DIILocation.end(), val: &I); |
2289 | } |
2290 | // salvageDebugInfoImpl should fail on examining the first element of |
2291 | // DbgUsers, or none of them. |
2292 | if (!Op0) |
2293 | break; |
2294 | |
2295 | SalvagedExpr = SalvagedExpr->foldConstantMath(); |
2296 | DII->replaceVariableLocationOp(OldValue: &I, NewValue: Op0); |
2297 | bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize; |
2298 | if (AdditionalValues.empty() && IsValidSalvageExpr) { |
2299 | DII->setExpression(SalvagedExpr); |
2300 | } else if (isa<DbgValueInst>(Val: DII) && IsValidSalvageExpr && |
2301 | DII->getNumVariableLocationOps() + AdditionalValues.size() <= |
2302 | MaxDebugArgs) { |
2303 | DII->addVariableLocationOps(NewValues: AdditionalValues, NewExpr: SalvagedExpr); |
2304 | } else { |
2305 | // Do not salvage using DIArgList for dbg.declare, as it is not currently |
2306 | // supported in those instructions. Also do not salvage if the resulting |
2307 | // DIArgList would contain an unreasonably large number of values. |
2308 | DII->setKillLocation(); |
2309 | } |
2310 | LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); |
2311 | Salvaged = true; |
2312 | } |
2313 | // Duplicate of above block for DbgVariableRecords. |
2314 | for (auto *DVR : DPUsers) { |
2315 | if (DVR->isDbgAssign()) { |
2316 | if (DVR->getAddress() == &I) { |
2317 | salvageDbgAssignAddress(Assign: DVR); |
2318 | Salvaged = true; |
2319 | } |
2320 | if (DVR->getValue() != &I) |
2321 | continue; |
2322 | } |
2323 | |
2324 | // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they |
2325 | // are implicitly pointing out the value as a DWARF memory location |
2326 | // description. |
2327 | bool StackValue = |
2328 | DVR->getType() != DbgVariableRecord::LocationType::Declare; |
2329 | auto DVRLocation = DVR->location_ops(); |
2330 | assert( |
2331 | is_contained(DVRLocation, &I) && |
2332 | "DbgVariableIntrinsic must use salvaged instruction as its location" ); |
2333 | SmallVector<Value *, 4> AdditionalValues; |
2334 | // 'I' may appear more than once in DVR's location ops, and each use of 'I' |
2335 | // must be updated in the DIExpression and potentially have additional |
2336 | // values added; thus we call salvageDebugInfoImpl for each 'I' instance in |
2337 | // DVRLocation. |
2338 | Value *Op0 = nullptr; |
2339 | DIExpression *SalvagedExpr = DVR->getExpression(); |
2340 | auto LocItr = find(Range&: DVRLocation, Val: &I); |
2341 | while (SalvagedExpr && LocItr != DVRLocation.end()) { |
2342 | SmallVector<uint64_t, 16> Ops; |
2343 | unsigned LocNo = std::distance(first: DVRLocation.begin(), last: LocItr); |
2344 | uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands(); |
2345 | Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues); |
2346 | if (!Op0) |
2347 | break; |
2348 | SalvagedExpr = |
2349 | DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: LocNo, StackValue); |
2350 | LocItr = std::find(first: ++LocItr, last: DVRLocation.end(), val: &I); |
2351 | } |
2352 | // salvageDebugInfoImpl should fail on examining the first element of |
2353 | // DbgUsers, or none of them. |
2354 | if (!Op0) |
2355 | break; |
2356 | |
2357 | SalvagedExpr = SalvagedExpr->foldConstantMath(); |
2358 | DVR->replaceVariableLocationOp(OldValue: &I, NewValue: Op0); |
2359 | bool IsValidSalvageExpr = |
2360 | SalvagedExpr->getNumElements() <= MaxExpressionSize; |
2361 | if (AdditionalValues.empty() && IsValidSalvageExpr) { |
2362 | DVR->setExpression(SalvagedExpr); |
2363 | } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare && |
2364 | IsValidSalvageExpr && |
2365 | DVR->getNumVariableLocationOps() + AdditionalValues.size() <= |
2366 | MaxDebugArgs) { |
2367 | DVR->addVariableLocationOps(NewValues: AdditionalValues, NewExpr: SalvagedExpr); |
2368 | } else { |
2369 | // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is |
2370 | // currently only valid for stack value expressions. |
2371 | // Also do not salvage if the resulting DIArgList would contain an |
2372 | // unreasonably large number of values. |
2373 | DVR->setKillLocation(); |
2374 | } |
2375 | LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n'); |
2376 | Salvaged = true; |
2377 | } |
2378 | |
2379 | if (Salvaged) |
2380 | return; |
2381 | |
2382 | for (auto *DII : DbgUsers) |
2383 | DII->setKillLocation(); |
2384 | |
2385 | for (auto *DVR : DPUsers) |
2386 | DVR->setKillLocation(); |
2387 | } |
2388 | |
2389 | Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, |
2390 | uint64_t CurrentLocOps, |
2391 | SmallVectorImpl<uint64_t> &Opcodes, |
2392 | SmallVectorImpl<Value *> &AdditionalValues) { |
2393 | unsigned BitWidth = DL.getIndexSizeInBits(AS: GEP->getPointerAddressSpace()); |
2394 | // Rewrite a GEP into a DIExpression. |
2395 | MapVector<Value *, APInt> VariableOffsets; |
2396 | APInt ConstantOffset(BitWidth, 0); |
2397 | if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) |
2398 | return nullptr; |
2399 | if (!VariableOffsets.empty() && !CurrentLocOps) { |
2400 | Opcodes.insert(I: Opcodes.begin(), IL: {dwarf::DW_OP_LLVM_arg, 0}); |
2401 | CurrentLocOps = 1; |
2402 | } |
2403 | for (const auto &Offset : VariableOffsets) { |
2404 | AdditionalValues.push_back(Elt: Offset.first); |
2405 | assert(Offset.second.isStrictlyPositive() && |
2406 | "Expected strictly positive multiplier for offset." ); |
2407 | Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu, |
2408 | Offset.second.getZExtValue(), dwarf::DW_OP_mul, |
2409 | dwarf::DW_OP_plus}); |
2410 | } |
2411 | DIExpression::appendOffset(Ops&: Opcodes, Offset: ConstantOffset.getSExtValue()); |
2412 | return GEP->getOperand(i_nocapture: 0); |
2413 | } |
2414 | |
2415 | uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) { |
2416 | switch (Opcode) { |
2417 | case Instruction::Add: |
2418 | return dwarf::DW_OP_plus; |
2419 | case Instruction::Sub: |
2420 | return dwarf::DW_OP_minus; |
2421 | case Instruction::Mul: |
2422 | return dwarf::DW_OP_mul; |
2423 | case Instruction::SDiv: |
2424 | return dwarf::DW_OP_div; |
2425 | case Instruction::SRem: |
2426 | return dwarf::DW_OP_mod; |
2427 | case Instruction::Or: |
2428 | return dwarf::DW_OP_or; |
2429 | case Instruction::And: |
2430 | return dwarf::DW_OP_and; |
2431 | case Instruction::Xor: |
2432 | return dwarf::DW_OP_xor; |
2433 | case Instruction::Shl: |
2434 | return dwarf::DW_OP_shl; |
2435 | case Instruction::LShr: |
2436 | return dwarf::DW_OP_shr; |
2437 | case Instruction::AShr: |
2438 | return dwarf::DW_OP_shra; |
2439 | default: |
2440 | // TODO: Salvage from each kind of binop we know about. |
2441 | return 0; |
2442 | } |
2443 | } |
2444 | |
2445 | static void handleSSAValueOperands(uint64_t CurrentLocOps, |
2446 | SmallVectorImpl<uint64_t> &Opcodes, |
2447 | SmallVectorImpl<Value *> &AdditionalValues, |
2448 | Instruction *I) { |
2449 | if (!CurrentLocOps) { |
2450 | Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, 0}); |
2451 | CurrentLocOps = 1; |
2452 | } |
2453 | Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps}); |
2454 | AdditionalValues.push_back(Elt: I->getOperand(i: 1)); |
2455 | } |
2456 | |
2457 | Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, |
2458 | SmallVectorImpl<uint64_t> &Opcodes, |
2459 | SmallVectorImpl<Value *> &AdditionalValues) { |
2460 | // Handle binary operations with constant integer operands as a special case. |
2461 | auto *ConstInt = dyn_cast<ConstantInt>(Val: BI->getOperand(i_nocapture: 1)); |
2462 | // Values wider than 64 bits cannot be represented within a DIExpression. |
2463 | if (ConstInt && ConstInt->getBitWidth() > 64) |
2464 | return nullptr; |
2465 | |
2466 | Instruction::BinaryOps BinOpcode = BI->getOpcode(); |
2467 | // Push any Constant Int operand onto the expression stack. |
2468 | if (ConstInt) { |
2469 | uint64_t Val = ConstInt->getSExtValue(); |
2470 | // Add or Sub Instructions with a constant operand can potentially be |
2471 | // simplified. |
2472 | if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) { |
2473 | uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val); |
2474 | DIExpression::appendOffset(Ops&: Opcodes, Offset); |
2475 | return BI->getOperand(i_nocapture: 0); |
2476 | } |
2477 | Opcodes.append(IL: {dwarf::DW_OP_constu, Val}); |
2478 | } else { |
2479 | handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: BI); |
2480 | } |
2481 | |
2482 | // Add salvaged binary operator to expression stack, if it has a valid |
2483 | // representation in a DIExpression. |
2484 | uint64_t DwarfBinOp = getDwarfOpForBinOp(Opcode: BinOpcode); |
2485 | if (!DwarfBinOp) |
2486 | return nullptr; |
2487 | Opcodes.push_back(Elt: DwarfBinOp); |
2488 | return BI->getOperand(i_nocapture: 0); |
2489 | } |
2490 | |
2491 | uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) { |
2492 | // The signedness of the operation is implicit in the typed stack, signed and |
2493 | // unsigned instructions map to the same DWARF opcode. |
2494 | switch (Pred) { |
2495 | case CmpInst::ICMP_EQ: |
2496 | return dwarf::DW_OP_eq; |
2497 | case CmpInst::ICMP_NE: |
2498 | return dwarf::DW_OP_ne; |
2499 | case CmpInst::ICMP_UGT: |
2500 | case CmpInst::ICMP_SGT: |
2501 | return dwarf::DW_OP_gt; |
2502 | case CmpInst::ICMP_UGE: |
2503 | case CmpInst::ICMP_SGE: |
2504 | return dwarf::DW_OP_ge; |
2505 | case CmpInst::ICMP_ULT: |
2506 | case CmpInst::ICMP_SLT: |
2507 | return dwarf::DW_OP_lt; |
2508 | case CmpInst::ICMP_ULE: |
2509 | case CmpInst::ICMP_SLE: |
2510 | return dwarf::DW_OP_le; |
2511 | default: |
2512 | return 0; |
2513 | } |
2514 | } |
2515 | |
2516 | Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, |
2517 | SmallVectorImpl<uint64_t> &Opcodes, |
2518 | SmallVectorImpl<Value *> &AdditionalValues) { |
2519 | // Handle icmp operations with constant integer operands as a special case. |
2520 | auto *ConstInt = dyn_cast<ConstantInt>(Val: Icmp->getOperand(i_nocapture: 1)); |
2521 | // Values wider than 64 bits cannot be represented within a DIExpression. |
2522 | if (ConstInt && ConstInt->getBitWidth() > 64) |
2523 | return nullptr; |
2524 | // Push any Constant Int operand onto the expression stack. |
2525 | if (ConstInt) { |
2526 | if (Icmp->isSigned()) |
2527 | Opcodes.push_back(Elt: dwarf::DW_OP_consts); |
2528 | else |
2529 | Opcodes.push_back(Elt: dwarf::DW_OP_constu); |
2530 | uint64_t Val = ConstInt->getSExtValue(); |
2531 | Opcodes.push_back(Elt: Val); |
2532 | } else { |
2533 | handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: Icmp); |
2534 | } |
2535 | |
2536 | // Add salvaged binary operator to expression stack, if it has a valid |
2537 | // representation in a DIExpression. |
2538 | uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Pred: Icmp->getPredicate()); |
2539 | if (!DwarfIcmpOp) |
2540 | return nullptr; |
2541 | Opcodes.push_back(Elt: DwarfIcmpOp); |
2542 | return Icmp->getOperand(i_nocapture: 0); |
2543 | } |
2544 | |
2545 | Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, |
2546 | SmallVectorImpl<uint64_t> &Ops, |
2547 | SmallVectorImpl<Value *> &AdditionalValues) { |
2548 | auto &M = *I.getModule(); |
2549 | auto &DL = M.getDataLayout(); |
2550 | |
2551 | if (auto *CI = dyn_cast<CastInst>(Val: &I)) { |
2552 | Value *FromValue = CI->getOperand(i_nocapture: 0); |
2553 | // No-op casts are irrelevant for debug info. |
2554 | if (CI->isNoopCast(DL)) { |
2555 | return FromValue; |
2556 | } |
2557 | |
2558 | Type *Type = CI->getType(); |
2559 | if (Type->isPointerTy()) |
2560 | Type = DL.getIntPtrType(Type); |
2561 | // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged. |
2562 | if (Type->isVectorTy() || |
2563 | !(isa<TruncInst>(Val: &I) || isa<SExtInst>(Val: &I) || isa<ZExtInst>(Val: &I) || |
2564 | isa<IntToPtrInst>(Val: &I) || isa<PtrToIntInst>(Val: &I))) |
2565 | return nullptr; |
2566 | |
2567 | llvm::Type *FromType = FromValue->getType(); |
2568 | if (FromType->isPointerTy()) |
2569 | FromType = DL.getIntPtrType(FromType); |
2570 | |
2571 | unsigned FromTypeBitSize = FromType->getScalarSizeInBits(); |
2572 | unsigned ToTypeBitSize = Type->getScalarSizeInBits(); |
2573 | |
2574 | auto ExtOps = DIExpression::getExtOps(FromSize: FromTypeBitSize, ToSize: ToTypeBitSize, |
2575 | Signed: isa<SExtInst>(Val: &I)); |
2576 | Ops.append(in_start: ExtOps.begin(), in_end: ExtOps.end()); |
2577 | return FromValue; |
2578 | } |
2579 | |
2580 | if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: &I)) |
2581 | return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Opcodes&: Ops, AdditionalValues); |
2582 | if (auto *BI = dyn_cast<BinaryOperator>(Val: &I)) |
2583 | return getSalvageOpsForBinOp(BI, CurrentLocOps, Opcodes&: Ops, AdditionalValues); |
2584 | if (auto *IC = dyn_cast<ICmpInst>(Val: &I)) |
2585 | return getSalvageOpsForIcmpOp(Icmp: IC, CurrentLocOps, Opcodes&: Ops, AdditionalValues); |
2586 | |
2587 | // *Not* to do: we should not attempt to salvage load instructions, |
2588 | // because the validity and lifetime of a dbg.value containing |
2589 | // DW_OP_deref becomes difficult to analyze. See PR40628 for examples. |
2590 | return nullptr; |
2591 | } |
2592 | |
2593 | /// A replacement for a dbg.value expression. |
2594 | using DbgValReplacement = std::optional<DIExpression *>; |
2595 | |
2596 | /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr, |
2597 | /// possibly moving/undefing users to prevent use-before-def. Returns true if |
2598 | /// changes are made. |
2599 | static bool rewriteDebugUsers( |
2600 | Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, |
2601 | function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr, |
2602 | function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) { |
2603 | // Find debug users of From. |
2604 | SmallVector<DbgVariableIntrinsic *, 1> Users; |
2605 | SmallVector<DbgVariableRecord *, 1> DPUsers; |
2606 | findDbgUsers(DbgInsts&: Users, V: &From, DbgVariableRecords: &DPUsers); |
2607 | if (Users.empty() && DPUsers.empty()) |
2608 | return false; |
2609 | |
2610 | // Prevent use-before-def of To. |
2611 | bool Changed = false; |
2612 | |
2613 | SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage; |
2614 | SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR; |
2615 | if (isa<Instruction>(Val: &To)) { |
2616 | bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint; |
2617 | |
2618 | for (auto *DII : Users) { |
2619 | // It's common to see a debug user between From and DomPoint. Move it |
2620 | // after DomPoint to preserve the variable update without any reordering. |
2621 | if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) { |
2622 | LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n'); |
2623 | DII->moveAfter(MovePos: &DomPoint); |
2624 | Changed = true; |
2625 | |
2626 | // Users which otherwise aren't dominated by the replacement value must |
2627 | // be salvaged or deleted. |
2628 | } else if (!DT.dominates(Def: &DomPoint, User: DII)) { |
2629 | UndefOrSalvage.insert(Ptr: DII); |
2630 | } |
2631 | } |
2632 | |
2633 | // DbgVariableRecord implementation of the above. |
2634 | for (auto *DVR : DPUsers) { |
2635 | Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr; |
2636 | Instruction *NextNonDebug = MarkedInstr; |
2637 | // The next instruction might still be a dbg.declare, skip over it. |
2638 | if (isa<DbgVariableIntrinsic>(Val: NextNonDebug)) |
2639 | NextNonDebug = NextNonDebug->getNextNonDebugInstruction(); |
2640 | |
2641 | if (DomPointAfterFrom && NextNonDebug == &DomPoint) { |
2642 | LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n'); |
2643 | DVR->removeFromParent(); |
2644 | // Ensure there's a marker. |
2645 | DomPoint.getParent()->insertDbgRecordAfter(DR: DVR, I: &DomPoint); |
2646 | Changed = true; |
2647 | } else if (!DT.dominates(Def: &DomPoint, User: MarkedInstr)) { |
2648 | UndefOrSalvageDVR.insert(Ptr: DVR); |
2649 | } |
2650 | } |
2651 | } |
2652 | |
2653 | // Update debug users without use-before-def risk. |
2654 | for (auto *DII : Users) { |
2655 | if (UndefOrSalvage.count(Ptr: DII)) |
2656 | continue; |
2657 | |
2658 | DbgValReplacement DVRepl = RewriteExpr(*DII); |
2659 | if (!DVRepl) |
2660 | continue; |
2661 | |
2662 | DII->replaceVariableLocationOp(OldValue: &From, NewValue: &To); |
2663 | DII->setExpression(*DVRepl); |
2664 | LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n'); |
2665 | Changed = true; |
2666 | } |
2667 | for (auto *DVR : DPUsers) { |
2668 | if (UndefOrSalvageDVR.count(Ptr: DVR)) |
2669 | continue; |
2670 | |
2671 | DbgValReplacement DVRepl = RewriteDVRExpr(*DVR); |
2672 | if (!DVRepl) |
2673 | continue; |
2674 | |
2675 | DVR->replaceVariableLocationOp(OldValue: &From, NewValue: &To); |
2676 | DVR->setExpression(*DVRepl); |
2677 | LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n'); |
2678 | Changed = true; |
2679 | } |
2680 | |
2681 | if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) { |
2682 | // Try to salvage the remaining debug users. |
2683 | salvageDebugInfo(I&: From); |
2684 | Changed = true; |
2685 | } |
2686 | |
2687 | return Changed; |
2688 | } |
2689 | |
2690 | /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would |
2691 | /// losslessly preserve the bits and semantics of the value. This predicate is |
2692 | /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result. |
2693 | /// |
2694 | /// Note that Type::canLosslesslyBitCastTo is not suitable here because it |
2695 | /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>, |
2696 | /// and also does not allow lossless pointer <-> integer conversions. |
2697 | static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, |
2698 | Type *ToTy) { |
2699 | // Trivially compatible types. |
2700 | if (FromTy == ToTy) |
2701 | return true; |
2702 | |
2703 | // Handle compatible pointer <-> integer conversions. |
2704 | if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) { |
2705 | bool SameSize = DL.getTypeSizeInBits(Ty: FromTy) == DL.getTypeSizeInBits(Ty: ToTy); |
2706 | bool LosslessConversion = !DL.isNonIntegralPointerType(Ty: FromTy) && |
2707 | !DL.isNonIntegralPointerType(Ty: ToTy); |
2708 | return SameSize && LosslessConversion; |
2709 | } |
2710 | |
2711 | // TODO: This is not exhaustive. |
2712 | return false; |
2713 | } |
2714 | |
2715 | bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To, |
2716 | Instruction &DomPoint, DominatorTree &DT) { |
2717 | // Exit early if From has no debug users. |
2718 | if (!From.isUsedByMetadata()) |
2719 | return false; |
2720 | |
2721 | assert(&From != &To && "Can't replace something with itself" ); |
2722 | |
2723 | Type *FromTy = From.getType(); |
2724 | Type *ToTy = To.getType(); |
2725 | |
2726 | auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { |
2727 | return DII.getExpression(); |
2728 | }; |
2729 | auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement { |
2730 | return DVR.getExpression(); |
2731 | }; |
2732 | |
2733 | // Handle no-op conversions. |
2734 | Module &M = *From.getModule(); |
2735 | const DataLayout &DL = M.getDataLayout(); |
2736 | if (isBitCastSemanticsPreserving(DL, FromTy, ToTy)) |
2737 | return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: Identity, RewriteDVRExpr: IdentityDVR); |
2738 | |
2739 | // Handle integer-to-integer widening and narrowing. |
2740 | // FIXME: Use DW_OP_convert when it's available everywhere. |
2741 | if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) { |
2742 | uint64_t FromBits = FromTy->getPrimitiveSizeInBits(); |
2743 | uint64_t ToBits = ToTy->getPrimitiveSizeInBits(); |
2744 | assert(FromBits != ToBits && "Unexpected no-op conversion" ); |
2745 | |
2746 | // When the width of the result grows, assume that a debugger will only |
2747 | // access the low `FromBits` bits when inspecting the source variable. |
2748 | if (FromBits < ToBits) |
2749 | return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: Identity, RewriteDVRExpr: IdentityDVR); |
2750 | |
2751 | // The width of the result has shrunk. Use sign/zero extension to describe |
2752 | // the source variable's high bits. |
2753 | auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { |
2754 | DILocalVariable *Var = DII.getVariable(); |
2755 | |
2756 | // Without knowing signedness, sign/zero extension isn't possible. |
2757 | auto Signedness = Var->getSignedness(); |
2758 | if (!Signedness) |
2759 | return std::nullopt; |
2760 | |
2761 | bool Signed = *Signedness == DIBasicType::Signedness::Signed; |
2762 | return DIExpression::appendExt(Expr: DII.getExpression(), FromSize: ToBits, ToSize: FromBits, |
2763 | Signed); |
2764 | }; |
2765 | // RemoveDIs: duplicate implementation working on DbgVariableRecords rather |
2766 | // than on dbg.value intrinsics. |
2767 | auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement { |
2768 | DILocalVariable *Var = DVR.getVariable(); |
2769 | |
2770 | // Without knowing signedness, sign/zero extension isn't possible. |
2771 | auto Signedness = Var->getSignedness(); |
2772 | if (!Signedness) |
2773 | return std::nullopt; |
2774 | |
2775 | bool Signed = *Signedness == DIBasicType::Signedness::Signed; |
2776 | return DIExpression::appendExt(Expr: DVR.getExpression(), FromSize: ToBits, ToSize: FromBits, |
2777 | Signed); |
2778 | }; |
2779 | return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: SignOrZeroExt, |
2780 | RewriteDVRExpr: SignOrZeroExtDVR); |
2781 | } |
2782 | |
2783 | // TODO: Floating-point conversions, vectors. |
2784 | return false; |
2785 | } |
2786 | |
2787 | bool llvm::handleUnreachableTerminator( |
2788 | Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) { |
2789 | bool Changed = false; |
2790 | // RemoveDIs: erase debug-info on this instruction manually. |
2791 | I->dropDbgRecords(); |
2792 | for (Use &U : I->operands()) { |
2793 | Value *Op = U.get(); |
2794 | if (isa<Instruction>(Val: Op) && !Op->getType()->isTokenTy()) { |
2795 | U.set(PoisonValue::get(T: Op->getType())); |
2796 | PoisonedValues.push_back(Elt: Op); |
2797 | Changed = true; |
2798 | } |
2799 | } |
2800 | |
2801 | return Changed; |
2802 | } |
2803 | |
2804 | std::pair<unsigned, unsigned> |
2805 | llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { |
2806 | unsigned NumDeadInst = 0; |
2807 | unsigned NumDeadDbgInst = 0; |
2808 | // Delete the instructions backwards, as it has a reduced likelihood of |
2809 | // having to update as many def-use and use-def chains. |
2810 | Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. |
2811 | SmallVector<Value *> Uses; |
2812 | handleUnreachableTerminator(I: EndInst, PoisonedValues&: Uses); |
2813 | |
2814 | while (EndInst != &BB->front()) { |
2815 | // Delete the next to last instruction. |
2816 | Instruction *Inst = &*--EndInst->getIterator(); |
2817 | if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) |
2818 | Inst->replaceAllUsesWith(V: PoisonValue::get(T: Inst->getType())); |
2819 | if (Inst->isEHPad() || Inst->getType()->isTokenTy()) { |
2820 | // EHPads can't have DbgVariableRecords attached to them, but it might be |
2821 | // possible for things with token type. |
2822 | Inst->dropDbgRecords(); |
2823 | EndInst = Inst; |
2824 | continue; |
2825 | } |
2826 | if (isa<DbgInfoIntrinsic>(Val: Inst)) |
2827 | ++NumDeadDbgInst; |
2828 | else |
2829 | ++NumDeadInst; |
2830 | // RemoveDIs: erasing debug-info must be done manually. |
2831 | Inst->dropDbgRecords(); |
2832 | Inst->eraseFromParent(); |
2833 | } |
2834 | return {NumDeadInst, NumDeadDbgInst}; |
2835 | } |
2836 | |
2837 | unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA, |
2838 | DomTreeUpdater *DTU, |
2839 | MemorySSAUpdater *MSSAU) { |
2840 | BasicBlock *BB = I->getParent(); |
2841 | |
2842 | if (MSSAU) |
2843 | MSSAU->changeToUnreachable(I); |
2844 | |
2845 | SmallSet<BasicBlock *, 8> UniqueSuccessors; |
2846 | |
2847 | // Loop over all of the successors, removing BB's entry from any PHI |
2848 | // nodes. |
2849 | for (BasicBlock *Successor : successors(BB)) { |
2850 | Successor->removePredecessor(Pred: BB, KeepOneInputPHIs: PreserveLCSSA); |
2851 | if (DTU) |
2852 | UniqueSuccessors.insert(Ptr: Successor); |
2853 | } |
2854 | auto *UI = new UnreachableInst(I->getContext(), I->getIterator()); |
2855 | UI->setDebugLoc(I->getDebugLoc()); |
2856 | |
2857 | // All instructions after this are dead. |
2858 | unsigned NumInstrsRemoved = 0; |
2859 | BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end(); |
2860 | while (BBI != BBE) { |
2861 | if (!BBI->use_empty()) |
2862 | BBI->replaceAllUsesWith(V: PoisonValue::get(T: BBI->getType())); |
2863 | BBI++->eraseFromParent(); |
2864 | ++NumInstrsRemoved; |
2865 | } |
2866 | if (DTU) { |
2867 | SmallVector<DominatorTree::UpdateType, 8> Updates; |
2868 | Updates.reserve(N: UniqueSuccessors.size()); |
2869 | for (BasicBlock *UniqueSuccessor : UniqueSuccessors) |
2870 | Updates.push_back(Elt: {DominatorTree::Delete, BB, UniqueSuccessor}); |
2871 | DTU->applyUpdates(Updates); |
2872 | } |
2873 | BB->flushTerminatorDbgRecords(); |
2874 | return NumInstrsRemoved; |
2875 | } |
2876 | |
2877 | CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { |
2878 | SmallVector<Value *, 8> Args(II->args()); |
2879 | SmallVector<OperandBundleDef, 1> OpBundles; |
2880 | II->getOperandBundlesAsDefs(Defs&: OpBundles); |
2881 | CallInst *NewCall = CallInst::Create(Ty: II->getFunctionType(), |
2882 | Func: II->getCalledOperand(), Args, Bundles: OpBundles); |
2883 | NewCall->setCallingConv(II->getCallingConv()); |
2884 | NewCall->setAttributes(II->getAttributes()); |
2885 | NewCall->setDebugLoc(II->getDebugLoc()); |
2886 | NewCall->copyMetadata(SrcInst: *II); |
2887 | |
2888 | // If the invoke had profile metadata, try converting them for CallInst. |
2889 | uint64_t TotalWeight; |
2890 | if (NewCall->extractProfTotalWeight(TotalVal&: TotalWeight)) { |
2891 | // Set the total weight if it fits into i32, otherwise reset. |
2892 | MDBuilder MDB(NewCall->getContext()); |
2893 | auto NewWeights = uint32_t(TotalWeight) != TotalWeight |
2894 | ? nullptr |
2895 | : MDB.createBranchWeights(Weights: {uint32_t(TotalWeight)}); |
2896 | NewCall->setMetadata(KindID: LLVMContext::MD_prof, Node: NewWeights); |
2897 | } |
2898 | |
2899 | return NewCall; |
2900 | } |
2901 | |
2902 | // changeToCall - Convert the specified invoke into a normal call. |
2903 | CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) { |
2904 | CallInst *NewCall = createCallMatchingInvoke(II); |
2905 | NewCall->takeName(V: II); |
2906 | NewCall->insertBefore(InsertPos: II); |
2907 | II->replaceAllUsesWith(V: NewCall); |
2908 | |
2909 | // Follow the call by a branch to the normal destination. |
2910 | BasicBlock *NormalDestBB = II->getNormalDest(); |
2911 | BranchInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator()); |
2912 | |
2913 | // Update PHI nodes in the unwind destination |
2914 | BasicBlock *BB = II->getParent(); |
2915 | BasicBlock *UnwindDestBB = II->getUnwindDest(); |
2916 | UnwindDestBB->removePredecessor(Pred: BB); |
2917 | II->eraseFromParent(); |
2918 | if (DTU) |
2919 | DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}}); |
2920 | return NewCall; |
2921 | } |
2922 | |
2923 | BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, |
2924 | BasicBlock *UnwindEdge, |
2925 | DomTreeUpdater *DTU) { |
2926 | BasicBlock *BB = CI->getParent(); |
2927 | |
2928 | // Convert this function call into an invoke instruction. First, split the |
2929 | // basic block. |
2930 | BasicBlock *Split = SplitBlock(Old: BB, SplitPt: CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr, |
2931 | BBName: CI->getName() + ".noexc" ); |
2932 | |
2933 | // Delete the unconditional branch inserted by SplitBlock |
2934 | BB->back().eraseFromParent(); |
2935 | |
2936 | // Create the new invoke instruction. |
2937 | SmallVector<Value *, 8> InvokeArgs(CI->args()); |
2938 | SmallVector<OperandBundleDef, 1> OpBundles; |
2939 | |
2940 | CI->getOperandBundlesAsDefs(Defs&: OpBundles); |
2941 | |
2942 | // Note: we're round tripping operand bundles through memory here, and that |
2943 | // can potentially be avoided with a cleverer API design that we do not have |
2944 | // as of this time. |
2945 | |
2946 | InvokeInst *II = |
2947 | InvokeInst::Create(Ty: CI->getFunctionType(), Func: CI->getCalledOperand(), IfNormal: Split, |
2948 | IfException: UnwindEdge, Args: InvokeArgs, Bundles: OpBundles, NameStr: CI->getName(), InsertBefore: BB); |
2949 | II->setDebugLoc(CI->getDebugLoc()); |
2950 | II->setCallingConv(CI->getCallingConv()); |
2951 | II->setAttributes(CI->getAttributes()); |
2952 | II->setMetadata(KindID: LLVMContext::MD_prof, Node: CI->getMetadata(KindID: LLVMContext::MD_prof)); |
2953 | |
2954 | if (DTU) |
2955 | DTU->applyUpdates(Updates: {{DominatorTree::Insert, BB, UnwindEdge}}); |
2956 | |
2957 | // Make sure that anything using the call now uses the invoke! This also |
2958 | // updates the CallGraph if present, because it uses a WeakTrackingVH. |
2959 | CI->replaceAllUsesWith(V: II); |
2960 | |
2961 | // Delete the original call |
2962 | Split->front().eraseFromParent(); |
2963 | return Split; |
2964 | } |
2965 | |
2966 | static bool markAliveBlocks(Function &F, |
2967 | SmallPtrSetImpl<BasicBlock *> &Reachable, |
2968 | DomTreeUpdater *DTU = nullptr) { |
2969 | SmallVector<BasicBlock*, 128> Worklist; |
2970 | BasicBlock *BB = &F.front(); |
2971 | Worklist.push_back(Elt: BB); |
2972 | Reachable.insert(Ptr: BB); |
2973 | bool Changed = false; |
2974 | do { |
2975 | BB = Worklist.pop_back_val(); |
2976 | |
2977 | // Do a quick scan of the basic block, turning any obviously unreachable |
2978 | // instructions into LLVM unreachable insts. The instruction combining pass |
2979 | // canonicalizes unreachable insts into stores to null or undef. |
2980 | for (Instruction &I : *BB) { |
2981 | if (auto *CI = dyn_cast<CallInst>(Val: &I)) { |
2982 | Value *Callee = CI->getCalledOperand(); |
2983 | // Handle intrinsic calls. |
2984 | if (Function *F = dyn_cast<Function>(Val: Callee)) { |
2985 | auto IntrinsicID = F->getIntrinsicID(); |
2986 | // Assumptions that are known to be false are equivalent to |
2987 | // unreachable. Also, if the condition is undefined, then we make the |
2988 | // choice most beneficial to the optimizer, and choose that to also be |
2989 | // unreachable. |
2990 | if (IntrinsicID == Intrinsic::assume) { |
2991 | if (match(V: CI->getArgOperand(i: 0), P: m_CombineOr(L: m_Zero(), R: m_Undef()))) { |
2992 | // Don't insert a call to llvm.trap right before the unreachable. |
2993 | changeToUnreachable(I: CI, PreserveLCSSA: false, DTU); |
2994 | Changed = true; |
2995 | break; |
2996 | } |
2997 | } else if (IntrinsicID == Intrinsic::experimental_guard) { |
2998 | // A call to the guard intrinsic bails out of the current |
2999 | // compilation unit if the predicate passed to it is false. If the |
3000 | // predicate is a constant false, then we know the guard will bail |
3001 | // out of the current compile unconditionally, so all code following |
3002 | // it is dead. |
3003 | // |
3004 | // Note: unlike in llvm.assume, it is not "obviously profitable" for |
3005 | // guards to treat `undef` as `false` since a guard on `undef` can |
3006 | // still be useful for widening. |
3007 | if (match(V: CI->getArgOperand(i: 0), P: m_Zero())) |
3008 | if (!isa<UnreachableInst>(Val: CI->getNextNode())) { |
3009 | changeToUnreachable(I: CI->getNextNode(), PreserveLCSSA: false, DTU); |
3010 | Changed = true; |
3011 | break; |
3012 | } |
3013 | } |
3014 | } else if ((isa<ConstantPointerNull>(Val: Callee) && |
3015 | !NullPointerIsDefined(F: CI->getFunction(), |
3016 | AS: cast<PointerType>(Val: Callee->getType()) |
3017 | ->getAddressSpace())) || |
3018 | isa<UndefValue>(Val: Callee)) { |
3019 | changeToUnreachable(I: CI, PreserveLCSSA: false, DTU); |
3020 | Changed = true; |
3021 | break; |
3022 | } |
3023 | if (CI->doesNotReturn() && !CI->isMustTailCall()) { |
3024 | // If we found a call to a no-return function, insert an unreachable |
3025 | // instruction after it. Make sure there isn't *already* one there |
3026 | // though. |
3027 | if (!isa<UnreachableInst>(Val: CI->getNextNonDebugInstruction())) { |
3028 | // Don't insert a call to llvm.trap right before the unreachable. |
3029 | changeToUnreachable(I: CI->getNextNonDebugInstruction(), PreserveLCSSA: false, DTU); |
3030 | Changed = true; |
3031 | } |
3032 | break; |
3033 | } |
3034 | } else if (auto *SI = dyn_cast<StoreInst>(Val: &I)) { |
3035 | // Store to undef and store to null are undefined and used to signal |
3036 | // that they should be changed to unreachable by passes that can't |
3037 | // modify the CFG. |
3038 | |
3039 | // Don't touch volatile stores. |
3040 | if (SI->isVolatile()) continue; |
3041 | |
3042 | Value *Ptr = SI->getOperand(i_nocapture: 1); |
3043 | |
3044 | if (isa<UndefValue>(Val: Ptr) || |
3045 | (isa<ConstantPointerNull>(Val: Ptr) && |
3046 | !NullPointerIsDefined(F: SI->getFunction(), |
3047 | AS: SI->getPointerAddressSpace()))) { |
3048 | changeToUnreachable(I: SI, PreserveLCSSA: false, DTU); |
3049 | Changed = true; |
3050 | break; |
3051 | } |
3052 | } |
3053 | } |
3054 | |
3055 | Instruction *Terminator = BB->getTerminator(); |
3056 | if (auto *II = dyn_cast<InvokeInst>(Val: Terminator)) { |
3057 | // Turn invokes that call 'nounwind' functions into ordinary calls. |
3058 | Value *Callee = II->getCalledOperand(); |
3059 | if ((isa<ConstantPointerNull>(Val: Callee) && |
3060 | !NullPointerIsDefined(F: BB->getParent())) || |
3061 | isa<UndefValue>(Val: Callee)) { |
3062 | changeToUnreachable(I: II, PreserveLCSSA: false, DTU); |
3063 | Changed = true; |
3064 | } else { |
3065 | if (II->doesNotReturn() && |
3066 | !isa<UnreachableInst>(Val: II->getNormalDest()->front())) { |
3067 | // If we found an invoke of a no-return function, |
3068 | // create a new empty basic block with an `unreachable` terminator, |
3069 | // and set it as the normal destination for the invoke, |
3070 | // unless that is already the case. |
3071 | // Note that the original normal destination could have other uses. |
3072 | BasicBlock *OrigNormalDest = II->getNormalDest(); |
3073 | OrigNormalDest->removePredecessor(Pred: II->getParent()); |
3074 | LLVMContext &Ctx = II->getContext(); |
3075 | BasicBlock *UnreachableNormalDest = BasicBlock::Create( |
3076 | Context&: Ctx, Name: OrigNormalDest->getName() + ".unreachable" , |
3077 | Parent: II->getFunction(), InsertBefore: OrigNormalDest); |
3078 | new UnreachableInst(Ctx, UnreachableNormalDest); |
3079 | II->setNormalDest(UnreachableNormalDest); |
3080 | if (DTU) |
3081 | DTU->applyUpdates( |
3082 | Updates: {{DominatorTree::Delete, BB, OrigNormalDest}, |
3083 | {DominatorTree::Insert, BB, UnreachableNormalDest}}); |
3084 | Changed = true; |
3085 | } |
3086 | if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(F: &F)) { |
3087 | if (II->use_empty() && !II->mayHaveSideEffects()) { |
3088 | // jump to the normal destination branch. |
3089 | BasicBlock *NormalDestBB = II->getNormalDest(); |
3090 | BasicBlock *UnwindDestBB = II->getUnwindDest(); |
3091 | BranchInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator()); |
3092 | UnwindDestBB->removePredecessor(Pred: II->getParent()); |
3093 | II->eraseFromParent(); |
3094 | if (DTU) |
3095 | DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}}); |
3096 | } else |
3097 | changeToCall(II, DTU); |
3098 | Changed = true; |
3099 | } |
3100 | } |
3101 | } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: Terminator)) { |
3102 | // Remove catchpads which cannot be reached. |
3103 | struct CatchPadDenseMapInfo { |
3104 | static CatchPadInst *getEmptyKey() { |
3105 | return DenseMapInfo<CatchPadInst *>::getEmptyKey(); |
3106 | } |
3107 | |
3108 | static CatchPadInst *getTombstoneKey() { |
3109 | return DenseMapInfo<CatchPadInst *>::getTombstoneKey(); |
3110 | } |
3111 | |
3112 | static unsigned getHashValue(CatchPadInst *CatchPad) { |
3113 | return static_cast<unsigned>(hash_combine_range( |
3114 | first: CatchPad->value_op_begin(), last: CatchPad->value_op_end())); |
3115 | } |
3116 | |
3117 | static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) { |
3118 | if (LHS == getEmptyKey() || LHS == getTombstoneKey() || |
3119 | RHS == getEmptyKey() || RHS == getTombstoneKey()) |
3120 | return LHS == RHS; |
3121 | return LHS->isIdenticalTo(I: RHS); |
3122 | } |
3123 | }; |
3124 | |
3125 | SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases; |
3126 | // Set of unique CatchPads. |
3127 | SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4, |
3128 | CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>> |
3129 | HandlerSet; |
3130 | detail::DenseSetEmpty Empty; |
3131 | for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(), |
3132 | E = CatchSwitch->handler_end(); |
3133 | I != E; ++I) { |
3134 | BasicBlock *HandlerBB = *I; |
3135 | if (DTU) |
3136 | ++NumPerSuccessorCases[HandlerBB]; |
3137 | auto *CatchPad = cast<CatchPadInst>(Val: HandlerBB->getFirstNonPHI()); |
3138 | if (!HandlerSet.insert(KV: {CatchPad, Empty}).second) { |
3139 | if (DTU) |
3140 | --NumPerSuccessorCases[HandlerBB]; |
3141 | CatchSwitch->removeHandler(HI: I); |
3142 | --I; |
3143 | --E; |
3144 | Changed = true; |
3145 | } |
3146 | } |
3147 | if (DTU) { |
3148 | std::vector<DominatorTree::UpdateType> Updates; |
3149 | for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases) |
3150 | if (I.second == 0) |
3151 | Updates.push_back(x: {DominatorTree::Delete, BB, I.first}); |
3152 | DTU->applyUpdates(Updates); |
3153 | } |
3154 | } |
3155 | |
3156 | Changed |= ConstantFoldTerminator(BB, DeleteDeadConditions: true, TLI: nullptr, DTU); |
3157 | for (BasicBlock *Successor : successors(BB)) |
3158 | if (Reachable.insert(Ptr: Successor).second) |
3159 | Worklist.push_back(Elt: Successor); |
3160 | } while (!Worklist.empty()); |
3161 | return Changed; |
3162 | } |
3163 | |
3164 | Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) { |
3165 | Instruction *TI = BB->getTerminator(); |
3166 | |
3167 | if (auto *II = dyn_cast<InvokeInst>(Val: TI)) |
3168 | return changeToCall(II, DTU); |
3169 | |
3170 | Instruction *NewTI; |
3171 | BasicBlock *UnwindDest; |
3172 | |
3173 | if (auto *CRI = dyn_cast<CleanupReturnInst>(Val: TI)) { |
3174 | NewTI = CleanupReturnInst::Create(CleanupPad: CRI->getCleanupPad(), UnwindBB: nullptr, InsertBefore: CRI->getIterator()); |
3175 | UnwindDest = CRI->getUnwindDest(); |
3176 | } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: TI)) { |
3177 | auto *NewCatchSwitch = CatchSwitchInst::Create( |
3178 | ParentPad: CatchSwitch->getParentPad(), UnwindDest: nullptr, NumHandlers: CatchSwitch->getNumHandlers(), |
3179 | NameStr: CatchSwitch->getName(), InsertBefore: CatchSwitch->getIterator()); |
3180 | for (BasicBlock *PadBB : CatchSwitch->handlers()) |
3181 | NewCatchSwitch->addHandler(Dest: PadBB); |
3182 | |
3183 | NewTI = NewCatchSwitch; |
3184 | UnwindDest = CatchSwitch->getUnwindDest(); |
3185 | } else { |
3186 | llvm_unreachable("Could not find unwind successor" ); |
3187 | } |
3188 | |
3189 | NewTI->takeName(V: TI); |
3190 | NewTI->setDebugLoc(TI->getDebugLoc()); |
3191 | UnwindDest->removePredecessor(Pred: BB); |
3192 | TI->replaceAllUsesWith(V: NewTI); |
3193 | TI->eraseFromParent(); |
3194 | if (DTU) |
3195 | DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDest}}); |
3196 | return NewTI; |
3197 | } |
3198 | |
3199 | /// removeUnreachableBlocks - Remove blocks that are not reachable, even |
3200 | /// if they are in a dead cycle. Return true if a change was made, false |
3201 | /// otherwise. |
3202 | bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU, |
3203 | MemorySSAUpdater *MSSAU) { |
3204 | SmallPtrSet<BasicBlock *, 16> Reachable; |
3205 | bool Changed = markAliveBlocks(F, Reachable, DTU); |
3206 | |
3207 | // If there are unreachable blocks in the CFG... |
3208 | if (Reachable.size() == F.size()) |
3209 | return Changed; |
3210 | |
3211 | assert(Reachable.size() < F.size()); |
3212 | |
3213 | // Are there any blocks left to actually delete? |
3214 | SmallSetVector<BasicBlock *, 8> BlocksToRemove; |
3215 | for (BasicBlock &BB : F) { |
3216 | // Skip reachable basic blocks |
3217 | if (Reachable.count(Ptr: &BB)) |
3218 | continue; |
3219 | // Skip already-deleted blocks |
3220 | if (DTU && DTU->isBBPendingDeletion(DelBB: &BB)) |
3221 | continue; |
3222 | BlocksToRemove.insert(X: &BB); |
3223 | } |
3224 | |
3225 | if (BlocksToRemove.empty()) |
3226 | return Changed; |
3227 | |
3228 | Changed = true; |
3229 | NumRemoved += BlocksToRemove.size(); |
3230 | |
3231 | if (MSSAU) |
3232 | MSSAU->removeBlocks(DeadBlocks: BlocksToRemove); |
3233 | |
3234 | DeleteDeadBlocks(BBs: BlocksToRemove.takeVector(), DTU); |
3235 | |
3236 | return Changed; |
3237 | } |
3238 | |
3239 | void llvm::combineMetadata(Instruction *K, const Instruction *J, |
3240 | ArrayRef<unsigned> KnownIDs, bool DoesKMove) { |
3241 | SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; |
3242 | K->dropUnknownNonDebugMetadata(KnownIDs); |
3243 | K->getAllMetadataOtherThanDebugLoc(MDs&: Metadata); |
3244 | for (const auto &MD : Metadata) { |
3245 | unsigned Kind = MD.first; |
3246 | MDNode *JMD = J->getMetadata(KindID: Kind); |
3247 | MDNode *KMD = MD.second; |
3248 | |
3249 | switch (Kind) { |
3250 | default: |
3251 | K->setMetadata(KindID: Kind, Node: nullptr); // Remove unknown metadata |
3252 | break; |
3253 | case LLVMContext::MD_dbg: |
3254 | llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg" ); |
3255 | case LLVMContext::MD_DIAssignID: |
3256 | K->mergeDIAssignID(SourceInstructions: J); |
3257 | break; |
3258 | case LLVMContext::MD_tbaa: |
3259 | K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericTBAA(A: JMD, B: KMD)); |
3260 | break; |
3261 | case LLVMContext::MD_alias_scope: |
3262 | K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericAliasScope(A: JMD, B: KMD)); |
3263 | break; |
3264 | case LLVMContext::MD_noalias: |
3265 | case LLVMContext::MD_mem_parallel_loop_access: |
3266 | K->setMetadata(KindID: Kind, Node: MDNode::intersect(A: JMD, B: KMD)); |
3267 | break; |
3268 | case LLVMContext::MD_access_group: |
3269 | K->setMetadata(KindID: LLVMContext::MD_access_group, |
3270 | Node: intersectAccessGroups(Inst1: K, Inst2: J)); |
3271 | break; |
3272 | case LLVMContext::MD_range: |
3273 | if (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)) |
3274 | K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericRange(A: JMD, B: KMD)); |
3275 | break; |
3276 | case LLVMContext::MD_fpmath: |
3277 | K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericFPMath(A: JMD, B: KMD)); |
3278 | break; |
3279 | case LLVMContext::MD_invariant_load: |
3280 | // If K moves, only set the !invariant.load if it is present in both |
3281 | // instructions. |
3282 | if (DoesKMove) |
3283 | K->setMetadata(KindID: Kind, Node: JMD); |
3284 | break; |
3285 | case LLVMContext::MD_nonnull: |
3286 | if (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)) |
3287 | K->setMetadata(KindID: Kind, Node: JMD); |
3288 | break; |
3289 | case LLVMContext::MD_invariant_group: |
3290 | // Preserve !invariant.group in K. |
3291 | break; |
3292 | case LLVMContext::MD_mmra: |
3293 | // Combine MMRAs |
3294 | break; |
3295 | case LLVMContext::MD_align: |
3296 | if (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)) |
3297 | K->setMetadata( |
3298 | KindID: Kind, Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD)); |
3299 | break; |
3300 | case LLVMContext::MD_dereferenceable: |
3301 | case LLVMContext::MD_dereferenceable_or_null: |
3302 | if (DoesKMove) |
3303 | K->setMetadata(KindID: Kind, |
3304 | Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD)); |
3305 | break; |
3306 | case LLVMContext::MD_preserve_access_index: |
3307 | // Preserve !preserve.access.index in K. |
3308 | break; |
3309 | case LLVMContext::MD_noundef: |
3310 | // If K does move, keep noundef if it is present in both instructions. |
3311 | if (DoesKMove) |
3312 | K->setMetadata(KindID: Kind, Node: JMD); |
3313 | break; |
3314 | case LLVMContext::MD_nontemporal: |
3315 | // Preserve !nontemporal if it is present on both instructions. |
3316 | K->setMetadata(KindID: Kind, Node: JMD); |
3317 | break; |
3318 | case LLVMContext::MD_prof: |
3319 | if (DoesKMove) |
3320 | K->setMetadata(KindID: Kind, Node: MDNode::getMergedProfMetadata(A: KMD, B: JMD, AInstr: K, BInstr: J)); |
3321 | break; |
3322 | } |
3323 | } |
3324 | // Set !invariant.group from J if J has it. If both instructions have it |
3325 | // then we will just pick it from J - even when they are different. |
3326 | // Also make sure that K is load or store - f.e. combining bitcast with load |
3327 | // could produce bitcast with invariant.group metadata, which is invalid. |
3328 | // FIXME: we should try to preserve both invariant.group md if they are |
3329 | // different, but right now instruction can only have one invariant.group. |
3330 | if (auto *JMD = J->getMetadata(KindID: LLVMContext::MD_invariant_group)) |
3331 | if (isa<LoadInst>(Val: K) || isa<StoreInst>(Val: K)) |
3332 | K->setMetadata(KindID: LLVMContext::MD_invariant_group, Node: JMD); |
3333 | |
3334 | // Merge MMRAs. |
3335 | // This is handled separately because we also want to handle cases where K |
3336 | // doesn't have tags but J does. |
3337 | auto JMMRA = J->getMetadata(KindID: LLVMContext::MD_mmra); |
3338 | auto KMMRA = K->getMetadata(KindID: LLVMContext::MD_mmra); |
3339 | if (JMMRA || KMMRA) { |
3340 | K->setMetadata(KindID: LLVMContext::MD_mmra, |
3341 | Node: MMRAMetadata::combine(Ctx&: K->getContext(), A: JMMRA, B: KMMRA)); |
3342 | } |
3343 | } |
3344 | |
3345 | void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J, |
3346 | bool KDominatesJ) { |
3347 | unsigned KnownIDs[] = {LLVMContext::MD_tbaa, |
3348 | LLVMContext::MD_alias_scope, |
3349 | LLVMContext::MD_noalias, |
3350 | LLVMContext::MD_range, |
3351 | LLVMContext::MD_fpmath, |
3352 | LLVMContext::MD_invariant_load, |
3353 | LLVMContext::MD_nonnull, |
3354 | LLVMContext::MD_invariant_group, |
3355 | LLVMContext::MD_align, |
3356 | LLVMContext::MD_dereferenceable, |
3357 | LLVMContext::MD_dereferenceable_or_null, |
3358 | LLVMContext::MD_access_group, |
3359 | LLVMContext::MD_preserve_access_index, |
3360 | LLVMContext::MD_prof, |
3361 | LLVMContext::MD_nontemporal, |
3362 | LLVMContext::MD_noundef, |
3363 | LLVMContext::MD_mmra}; |
3364 | combineMetadata(K, J, KnownIDs, DoesKMove: KDominatesJ); |
3365 | } |
3366 | |
3367 | void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) { |
3368 | SmallVector<std::pair<unsigned, MDNode *>, 8> MD; |
3369 | Source.getAllMetadata(MDs&: MD); |
3370 | MDBuilder MDB(Dest.getContext()); |
3371 | Type *NewType = Dest.getType(); |
3372 | const DataLayout &DL = Source.getDataLayout(); |
3373 | for (const auto &MDPair : MD) { |
3374 | unsigned ID = MDPair.first; |
3375 | MDNode *N = MDPair.second; |
3376 | // Note, essentially every kind of metadata should be preserved here! This |
3377 | // routine is supposed to clone a load instruction changing *only its type*. |
3378 | // The only metadata it makes sense to drop is metadata which is invalidated |
3379 | // when the pointer type changes. This should essentially never be the case |
3380 | // in LLVM, but we explicitly switch over only known metadata to be |
3381 | // conservatively correct. If you are adding metadata to LLVM which pertains |
3382 | // to loads, you almost certainly want to add it here. |
3383 | switch (ID) { |
3384 | case LLVMContext::MD_dbg: |
3385 | case LLVMContext::MD_tbaa: |
3386 | case LLVMContext::MD_prof: |
3387 | case LLVMContext::MD_fpmath: |
3388 | case LLVMContext::MD_tbaa_struct: |
3389 | case LLVMContext::MD_invariant_load: |
3390 | case LLVMContext::MD_alias_scope: |
3391 | case LLVMContext::MD_noalias: |
3392 | case LLVMContext::MD_nontemporal: |
3393 | case LLVMContext::MD_mem_parallel_loop_access: |
3394 | case LLVMContext::MD_access_group: |
3395 | case LLVMContext::MD_noundef: |
3396 | // All of these directly apply. |
3397 | Dest.setMetadata(KindID: ID, Node: N); |
3398 | break; |
3399 | |
3400 | case LLVMContext::MD_nonnull: |
3401 | copyNonnullMetadata(OldLI: Source, N, NewLI&: Dest); |
3402 | break; |
3403 | |
3404 | case LLVMContext::MD_align: |
3405 | case LLVMContext::MD_dereferenceable: |
3406 | case LLVMContext::MD_dereferenceable_or_null: |
3407 | // These only directly apply if the new type is also a pointer. |
3408 | if (NewType->isPointerTy()) |
3409 | Dest.setMetadata(KindID: ID, Node: N); |
3410 | break; |
3411 | |
3412 | case LLVMContext::MD_range: |
3413 | copyRangeMetadata(DL, OldLI: Source, N, NewLI&: Dest); |
3414 | break; |
3415 | } |
3416 | } |
3417 | } |
3418 | |
3419 | void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) { |
3420 | auto *ReplInst = dyn_cast<Instruction>(Val: Repl); |
3421 | if (!ReplInst) |
3422 | return; |
3423 | |
3424 | // Patch the replacement so that it is not more restrictive than the value |
3425 | // being replaced. |
3426 | WithOverflowInst *UnusedWO; |
3427 | // When replacing the result of a llvm.*.with.overflow intrinsic with a |
3428 | // overflowing binary operator, nuw/nsw flags may no longer hold. |
3429 | if (isa<OverflowingBinaryOperator>(Val: ReplInst) && |
3430 | match(V: I, P: m_ExtractValue<0>(V: m_WithOverflowInst(I&: UnusedWO)))) |
3431 | ReplInst->dropPoisonGeneratingFlags(); |
3432 | // Note that if 'I' is a load being replaced by some operation, |
3433 | // for example, by an arithmetic operation, then andIRFlags() |
3434 | // would just erase all math flags from the original arithmetic |
3435 | // operation, which is clearly not wanted and not needed. |
3436 | else if (!isa<LoadInst>(Val: I)) |
3437 | ReplInst->andIRFlags(V: I); |
3438 | |
3439 | // FIXME: If both the original and replacement value are part of the |
3440 | // same control-flow region (meaning that the execution of one |
3441 | // guarantees the execution of the other), then we can combine the |
3442 | // noalias scopes here and do better than the general conservative |
3443 | // answer used in combineMetadata(). |
3444 | |
3445 | // In general, GVN unifies expressions over different control-flow |
3446 | // regions, and so we need a conservative combination of the noalias |
3447 | // scopes. |
3448 | combineMetadataForCSE(K: ReplInst, J: I, KDominatesJ: false); |
3449 | } |
3450 | |
3451 | template <typename RootType, typename ShouldReplaceFn> |
3452 | static unsigned replaceDominatedUsesWith(Value *From, Value *To, |
3453 | const RootType &Root, |
3454 | const ShouldReplaceFn &ShouldReplace) { |
3455 | assert(From->getType() == To->getType()); |
3456 | |
3457 | unsigned Count = 0; |
3458 | for (Use &U : llvm::make_early_inc_range(Range: From->uses())) { |
3459 | if (!ShouldReplace(Root, U)) |
3460 | continue; |
3461 | LLVM_DEBUG(dbgs() << "Replace dominated use of '" ; |
3462 | From->printAsOperand(dbgs()); |
3463 | dbgs() << "' with " << *To << " in " << *U.getUser() << "\n" ); |
3464 | U.set(To); |
3465 | ++Count; |
3466 | } |
3467 | return Count; |
3468 | } |
3469 | |
3470 | unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) { |
3471 | assert(From->getType() == To->getType()); |
3472 | auto *BB = From->getParent(); |
3473 | unsigned Count = 0; |
3474 | |
3475 | for (Use &U : llvm::make_early_inc_range(Range: From->uses())) { |
3476 | auto *I = cast<Instruction>(Val: U.getUser()); |
3477 | if (I->getParent() == BB) |
3478 | continue; |
3479 | U.set(To); |
3480 | ++Count; |
3481 | } |
3482 | return Count; |
3483 | } |
3484 | |
3485 | unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, |
3486 | DominatorTree &DT, |
3487 | const BasicBlockEdge &Root) { |
3488 | auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) { |
3489 | return DT.dominates(BBE: Root, U); |
3490 | }; |
3491 | return ::replaceDominatedUsesWith(From, To, Root, ShouldReplace: Dominates); |
3492 | } |
3493 | |
3494 | unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, |
3495 | DominatorTree &DT, |
3496 | const BasicBlock *BB) { |
3497 | auto Dominates = [&DT](const BasicBlock *BB, const Use &U) { |
3498 | return DT.dominates(BB, U); |
3499 | }; |
3500 | return ::replaceDominatedUsesWith(From, To, Root: BB, ShouldReplace: Dominates); |
3501 | } |
3502 | |
3503 | unsigned llvm::replaceDominatedUsesWithIf( |
3504 | Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root, |
3505 | function_ref<bool(const Use &U, const Value *To)> ShouldReplace) { |
3506 | auto DominatesAndShouldReplace = |
3507 | [&DT, &ShouldReplace, To](const BasicBlockEdge &Root, const Use &U) { |
3508 | return DT.dominates(BBE: Root, U) && ShouldReplace(U, To); |
3509 | }; |
3510 | return ::replaceDominatedUsesWith(From, To, Root, ShouldReplace: DominatesAndShouldReplace); |
3511 | } |
3512 | |
3513 | unsigned llvm::replaceDominatedUsesWithIf( |
3514 | Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB, |
3515 | function_ref<bool(const Use &U, const Value *To)> ShouldReplace) { |
3516 | auto DominatesAndShouldReplace = [&DT, &ShouldReplace, |
3517 | To](const BasicBlock *BB, const Use &U) { |
3518 | return DT.dominates(BB, U) && ShouldReplace(U, To); |
3519 | }; |
3520 | return ::replaceDominatedUsesWith(From, To, Root: BB, ShouldReplace: DominatesAndShouldReplace); |
3521 | } |
3522 | |
3523 | bool llvm::callsGCLeafFunction(const CallBase *Call, |
3524 | const TargetLibraryInfo &TLI) { |
3525 | // Check if the function is specifically marked as a gc leaf function. |
3526 | if (Call->hasFnAttr(Kind: "gc-leaf-function" )) |
3527 | return true; |
3528 | if (const Function *F = Call->getCalledFunction()) { |
3529 | if (F->hasFnAttribute(Kind: "gc-leaf-function" )) |
3530 | return true; |
3531 | |
3532 | if (auto IID = F->getIntrinsicID()) { |
3533 | // Most LLVM intrinsics do not take safepoints. |
3534 | return IID != Intrinsic::experimental_gc_statepoint && |
3535 | IID != Intrinsic::experimental_deoptimize && |
3536 | IID != Intrinsic::memcpy_element_unordered_atomic && |
3537 | IID != Intrinsic::memmove_element_unordered_atomic; |
3538 | } |
3539 | } |
3540 | |
3541 | // Lib calls can be materialized by some passes, and won't be |
3542 | // marked as 'gc-leaf-function.' All available Libcalls are |
3543 | // GC-leaf. |
3544 | LibFunc LF; |
3545 | if (TLI.getLibFunc(CB: *Call, F&: LF)) { |
3546 | return TLI.has(F: LF); |
3547 | } |
3548 | |
3549 | return false; |
3550 | } |
3551 | |
3552 | void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, |
3553 | LoadInst &NewLI) { |
3554 | auto *NewTy = NewLI.getType(); |
3555 | |
3556 | // This only directly applies if the new type is also a pointer. |
3557 | if (NewTy->isPointerTy()) { |
3558 | NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: N); |
3559 | return; |
3560 | } |
3561 | |
3562 | // The only other translation we can do is to integral loads with !range |
3563 | // metadata. |
3564 | if (!NewTy->isIntegerTy()) |
3565 | return; |
3566 | |
3567 | MDBuilder MDB(NewLI.getContext()); |
3568 | const Value *Ptr = OldLI.getPointerOperand(); |
3569 | auto *ITy = cast<IntegerType>(Val: NewTy); |
3570 | auto *NullInt = ConstantExpr::getPtrToInt( |
3571 | C: ConstantPointerNull::get(T: cast<PointerType>(Val: Ptr->getType())), Ty: ITy); |
3572 | auto *NonNullInt = ConstantExpr::getAdd(C1: NullInt, C2: ConstantInt::get(Ty: ITy, V: 1)); |
3573 | NewLI.setMetadata(KindID: LLVMContext::MD_range, |
3574 | Node: MDB.createRange(Lo: NonNullInt, Hi: NullInt)); |
3575 | } |
3576 | |
3577 | void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, |
3578 | MDNode *N, LoadInst &NewLI) { |
3579 | auto *NewTy = NewLI.getType(); |
3580 | // Simply copy the metadata if the type did not change. |
3581 | if (NewTy == OldLI.getType()) { |
3582 | NewLI.setMetadata(KindID: LLVMContext::MD_range, Node: N); |
3583 | return; |
3584 | } |
3585 | |
3586 | // Give up unless it is converted to a pointer where there is a single very |
3587 | // valuable mapping we can do reliably. |
3588 | // FIXME: It would be nice to propagate this in more ways, but the type |
3589 | // conversions make it hard. |
3590 | if (!NewTy->isPointerTy()) |
3591 | return; |
3592 | |
3593 | unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); |
3594 | if (BitWidth == OldLI.getType()->getScalarSizeInBits() && |
3595 | !getConstantRangeFromMetadata(RangeMD: *N).contains(Val: APInt(BitWidth, 0))) { |
3596 | MDNode *NN = MDNode::get(Context&: OldLI.getContext(), MDs: std::nullopt); |
3597 | NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: NN); |
3598 | } |
3599 | } |
3600 | |
3601 | void llvm::dropDebugUsers(Instruction &I) { |
3602 | SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; |
3603 | SmallVector<DbgVariableRecord *, 1> DPUsers; |
3604 | findDbgUsers(DbgInsts&: DbgUsers, V: &I, DbgVariableRecords: &DPUsers); |
3605 | for (auto *DII : DbgUsers) |
3606 | DII->eraseFromParent(); |
3607 | for (auto *DVR : DPUsers) |
3608 | DVR->eraseFromParent(); |
3609 | } |
3610 | |
3611 | void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, |
3612 | BasicBlock *BB) { |
3613 | // Since we are moving the instructions out of its basic block, we do not |
3614 | // retain their original debug locations (DILocations) and debug intrinsic |
3615 | // instructions. |
3616 | // |
3617 | // Doing so would degrade the debugging experience and adversely affect the |
3618 | // accuracy of profiling information. |
3619 | // |
3620 | // Currently, when hoisting the instructions, we take the following actions: |
3621 | // - Remove their debug intrinsic instructions. |
3622 | // - Set their debug locations to the values from the insertion point. |
3623 | // |
3624 | // As per PR39141 (comment #8), the more fundamental reason why the dbg.values |
3625 | // need to be deleted, is because there will not be any instructions with a |
3626 | // DILocation in either branch left after performing the transformation. We |
3627 | // can only insert a dbg.value after the two branches are joined again. |
3628 | // |
3629 | // See PR38762, PR39243 for more details. |
3630 | // |
3631 | // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to |
3632 | // encode predicated DIExpressions that yield different results on different |
3633 | // code paths. |
3634 | |
3635 | for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { |
3636 | Instruction *I = &*II; |
3637 | I->dropUBImplyingAttrsAndMetadata(); |
3638 | if (I->isUsedByMetadata()) |
3639 | dropDebugUsers(I&: *I); |
3640 | // RemoveDIs: drop debug-info too as the following code does. |
3641 | I->dropDbgRecords(); |
3642 | if (I->isDebugOrPseudoInst()) { |
3643 | // Remove DbgInfo and pseudo probe Intrinsics. |
3644 | II = I->eraseFromParent(); |
3645 | continue; |
3646 | } |
3647 | I->setDebugLoc(InsertPt->getDebugLoc()); |
3648 | ++II; |
3649 | } |
3650 | DomBlock->splice(ToIt: InsertPt->getIterator(), FromBB: BB, FromBeginIt: BB->begin(), |
3651 | FromEndIt: BB->getTerminator()->getIterator()); |
3652 | } |
3653 | |
3654 | DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C, |
3655 | Type &Ty) { |
3656 | // Create integer constant expression. |
3657 | auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * { |
3658 | const APInt &API = cast<ConstantInt>(Val: &CV)->getValue(); |
3659 | std::optional<int64_t> InitIntOpt = API.trySExtValue(); |
3660 | return InitIntOpt ? DIB.createConstantValueExpression( |
3661 | Val: static_cast<uint64_t>(*InitIntOpt)) |
3662 | : nullptr; |
3663 | }; |
3664 | |
3665 | if (isa<ConstantInt>(Val: C)) |
3666 | return createIntegerExpression(C); |
3667 | |
3668 | auto *FP = dyn_cast<ConstantFP>(Val: &C); |
3669 | if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) { |
3670 | const APFloat &APF = FP->getValueAPF(); |
3671 | APInt const &API = APF.bitcastToAPInt(); |
3672 | if (auto Temp = API.getZExtValue()) |
3673 | return DIB.createConstantValueExpression(Val: static_cast<uint64_t>(Temp)); |
3674 | return DIB.createConstantValueExpression(Val: *API.getRawData()); |
3675 | } |
3676 | |
3677 | if (!Ty.isPointerTy()) |
3678 | return nullptr; |
3679 | |
3680 | if (isa<ConstantPointerNull>(Val: C)) |
3681 | return DIB.createConstantValueExpression(Val: 0); |
3682 | |
3683 | if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: &C)) |
3684 | if (CE->getOpcode() == Instruction::IntToPtr) { |
3685 | const Value *V = CE->getOperand(i_nocapture: 0); |
3686 | if (auto CI = dyn_cast_or_null<ConstantInt>(Val: V)) |
3687 | return createIntegerExpression(*CI); |
3688 | } |
3689 | return nullptr; |
3690 | } |
3691 | |
3692 | void llvm::remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst) { |
3693 | auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) { |
3694 | for (auto *Op : Set) { |
3695 | auto I = Mapping.find(Op); |
3696 | if (I != Mapping.end()) |
3697 | DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true); |
3698 | } |
3699 | }; |
3700 | auto RemapAssignAddress = [&Mapping](auto *DA) { |
3701 | auto I = Mapping.find(DA->getAddress()); |
3702 | if (I != Mapping.end()) |
3703 | DA->setAddress(I->second); |
3704 | }; |
3705 | if (auto DVI = dyn_cast<DbgVariableIntrinsic>(Val: Inst)) |
3706 | RemapDebugOperands(DVI, DVI->location_ops()); |
3707 | if (auto DAI = dyn_cast<DbgAssignIntrinsic>(Val: Inst)) |
3708 | RemapAssignAddress(DAI); |
3709 | for (DbgVariableRecord &DVR : filterDbgVars(R: Inst->getDbgRecordRange())) { |
3710 | RemapDebugOperands(&DVR, DVR.location_ops()); |
3711 | if (DVR.isDbgAssign()) |
3712 | RemapAssignAddress(&DVR); |
3713 | } |
3714 | } |
3715 | |
3716 | namespace { |
3717 | |
3718 | /// A potential constituent of a bitreverse or bswap expression. See |
3719 | /// collectBitParts for a fuller explanation. |
3720 | struct BitPart { |
3721 | BitPart(Value *P, unsigned BW) : Provider(P) { |
3722 | Provenance.resize(N: BW); |
3723 | } |
3724 | |
3725 | /// The Value that this is a bitreverse/bswap of. |
3726 | Value *Provider; |
3727 | |
3728 | /// The "provenance" of each bit. Provenance[A] = B means that bit A |
3729 | /// in Provider becomes bit B in the result of this expression. |
3730 | SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128. |
3731 | |
3732 | enum { Unset = -1 }; |
3733 | }; |
3734 | |
3735 | } // end anonymous namespace |
3736 | |
3737 | /// Analyze the specified subexpression and see if it is capable of providing |
3738 | /// pieces of a bswap or bitreverse. The subexpression provides a potential |
3739 | /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in |
3740 | /// the output of the expression came from a corresponding bit in some other |
3741 | /// value. This function is recursive, and the end result is a mapping of |
3742 | /// bitnumber to bitnumber. It is the caller's responsibility to validate that |
3743 | /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse. |
3744 | /// |
3745 | /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know |
3746 | /// that the expression deposits the low byte of %X into the high byte of the |
3747 | /// result and that all other bits are zero. This expression is accepted and a |
3748 | /// BitPart is returned with Provider set to %X and Provenance[24-31] set to |
3749 | /// [0-7]. |
3750 | /// |
3751 | /// For vector types, all analysis is performed at the per-element level. No |
3752 | /// cross-element analysis is supported (shuffle/insertion/reduction), and all |
3753 | /// constant masks must be splatted across all elements. |
3754 | /// |
3755 | /// To avoid revisiting values, the BitPart results are memoized into the |
3756 | /// provided map. To avoid unnecessary copying of BitParts, BitParts are |
3757 | /// constructed in-place in the \c BPS map. Because of this \c BPS needs to |
3758 | /// store BitParts objects, not pointers. As we need the concept of a nullptr |
3759 | /// BitParts (Value has been analyzed and the analysis failed), we an Optional |
3760 | /// type instead to provide the same functionality. |
3761 | /// |
3762 | /// Because we pass around references into \c BPS, we must use a container that |
3763 | /// does not invalidate internal references (std::map instead of DenseMap). |
3764 | static const std::optional<BitPart> & |
3765 | collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, |
3766 | std::map<Value *, std::optional<BitPart>> &BPS, int Depth, |
3767 | bool &FoundRoot) { |
3768 | auto I = BPS.find(x: V); |
3769 | if (I != BPS.end()) |
3770 | return I->second; |
3771 | |
3772 | auto &Result = BPS[V] = std::nullopt; |
3773 | auto BitWidth = V->getType()->getScalarSizeInBits(); |
3774 | |
3775 | // Can't do integer/elements > 128 bits. |
3776 | if (BitWidth > 128) |
3777 | return Result; |
3778 | |
3779 | // Prevent stack overflow by limiting the recursion depth |
3780 | if (Depth == BitPartRecursionMaxDepth) { |
3781 | LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n" ); |
3782 | return Result; |
3783 | } |
3784 | |
3785 | if (auto *I = dyn_cast<Instruction>(Val: V)) { |
3786 | Value *X, *Y; |
3787 | const APInt *C; |
3788 | |
3789 | // If this is an or instruction, it may be an inner node of the bswap. |
3790 | if (match(V, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y)))) { |
3791 | // Check we have both sources and they are from the same provider. |
3792 | const auto &A = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3793 | Depth: Depth + 1, FoundRoot); |
3794 | if (!A || !A->Provider) |
3795 | return Result; |
3796 | |
3797 | const auto &B = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS, |
3798 | Depth: Depth + 1, FoundRoot); |
3799 | if (!B || A->Provider != B->Provider) |
3800 | return Result; |
3801 | |
3802 | // Try and merge the two together. |
3803 | Result = BitPart(A->Provider, BitWidth); |
3804 | for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) { |
3805 | if (A->Provenance[BitIdx] != BitPart::Unset && |
3806 | B->Provenance[BitIdx] != BitPart::Unset && |
3807 | A->Provenance[BitIdx] != B->Provenance[BitIdx]) |
3808 | return Result = std::nullopt; |
3809 | |
3810 | if (A->Provenance[BitIdx] == BitPart::Unset) |
3811 | Result->Provenance[BitIdx] = B->Provenance[BitIdx]; |
3812 | else |
3813 | Result->Provenance[BitIdx] = A->Provenance[BitIdx]; |
3814 | } |
3815 | |
3816 | return Result; |
3817 | } |
3818 | |
3819 | // If this is a logical shift by a constant, recurse then shift the result. |
3820 | if (match(V, P: m_LogicalShift(L: m_Value(V&: X), R: m_APInt(Res&: C)))) { |
3821 | const APInt &BitShift = *C; |
3822 | |
3823 | // Ensure the shift amount is defined. |
3824 | if (BitShift.uge(RHS: BitWidth)) |
3825 | return Result; |
3826 | |
3827 | // For bswap-only, limit shift amounts to whole bytes, for an early exit. |
3828 | if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0) |
3829 | return Result; |
3830 | |
3831 | const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3832 | Depth: Depth + 1, FoundRoot); |
3833 | if (!Res) |
3834 | return Result; |
3835 | Result = Res; |
3836 | |
3837 | // Perform the "shift" on BitProvenance. |
3838 | auto &P = Result->Provenance; |
3839 | if (I->getOpcode() == Instruction::Shl) { |
3840 | P.erase(CS: std::prev(x: P.end(), n: BitShift.getZExtValue()), CE: P.end()); |
3841 | P.insert(I: P.begin(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset); |
3842 | } else { |
3843 | P.erase(CS: P.begin(), CE: std::next(x: P.begin(), n: BitShift.getZExtValue())); |
3844 | P.insert(I: P.end(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset); |
3845 | } |
3846 | |
3847 | return Result; |
3848 | } |
3849 | |
3850 | // If this is a logical 'and' with a mask that clears bits, recurse then |
3851 | // unset the appropriate bits. |
3852 | if (match(V, P: m_And(L: m_Value(V&: X), R: m_APInt(Res&: C)))) { |
3853 | const APInt &AndMask = *C; |
3854 | |
3855 | // Check that the mask allows a multiple of 8 bits for a bswap, for an |
3856 | // early exit. |
3857 | unsigned NumMaskedBits = AndMask.popcount(); |
3858 | if (!MatchBitReversals && (NumMaskedBits % 8) != 0) |
3859 | return Result; |
3860 | |
3861 | const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3862 | Depth: Depth + 1, FoundRoot); |
3863 | if (!Res) |
3864 | return Result; |
3865 | Result = Res; |
3866 | |
3867 | for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) |
3868 | // If the AndMask is zero for this bit, clear the bit. |
3869 | if (AndMask[BitIdx] == 0) |
3870 | Result->Provenance[BitIdx] = BitPart::Unset; |
3871 | return Result; |
3872 | } |
3873 | |
3874 | // If this is a zext instruction zero extend the result. |
3875 | if (match(V, P: m_ZExt(Op: m_Value(V&: X)))) { |
3876 | const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3877 | Depth: Depth + 1, FoundRoot); |
3878 | if (!Res) |
3879 | return Result; |
3880 | |
3881 | Result = BitPart(Res->Provider, BitWidth); |
3882 | auto NarrowBitWidth = X->getType()->getScalarSizeInBits(); |
3883 | for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx) |
3884 | Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; |
3885 | for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx) |
3886 | Result->Provenance[BitIdx] = BitPart::Unset; |
3887 | return Result; |
3888 | } |
3889 | |
3890 | // If this is a truncate instruction, extract the lower bits. |
3891 | if (match(V, P: m_Trunc(Op: m_Value(V&: X)))) { |
3892 | const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3893 | Depth: Depth + 1, FoundRoot); |
3894 | if (!Res) |
3895 | return Result; |
3896 | |
3897 | Result = BitPart(Res->Provider, BitWidth); |
3898 | for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) |
3899 | Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; |
3900 | return Result; |
3901 | } |
3902 | |
3903 | // BITREVERSE - most likely due to us previous matching a partial |
3904 | // bitreverse. |
3905 | if (match(V, P: m_BitReverse(Op0: m_Value(V&: X)))) { |
3906 | const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3907 | Depth: Depth + 1, FoundRoot); |
3908 | if (!Res) |
3909 | return Result; |
3910 | |
3911 | Result = BitPart(Res->Provider, BitWidth); |
3912 | for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) |
3913 | Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx]; |
3914 | return Result; |
3915 | } |
3916 | |
3917 | // BSWAP - most likely due to us previous matching a partial bswap. |
3918 | if (match(V, P: m_BSwap(Op0: m_Value(V&: X)))) { |
3919 | const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3920 | Depth: Depth + 1, FoundRoot); |
3921 | if (!Res) |
3922 | return Result; |
3923 | |
3924 | unsigned ByteWidth = BitWidth / 8; |
3925 | Result = BitPart(Res->Provider, BitWidth); |
3926 | for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) { |
3927 | unsigned ByteBitOfs = ByteIdx * 8; |
3928 | for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx) |
3929 | Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] = |
3930 | Res->Provenance[ByteBitOfs + BitIdx]; |
3931 | } |
3932 | return Result; |
3933 | } |
3934 | |
3935 | // Funnel 'double' shifts take 3 operands, 2 inputs and the shift |
3936 | // amount (modulo). |
3937 | // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW))) |
3938 | // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW)) |
3939 | if (match(V, P: m_FShl(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C))) || |
3940 | match(V, P: m_FShr(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C)))) { |
3941 | // We can treat fshr as a fshl by flipping the modulo amount. |
3942 | unsigned ModAmt = C->urem(RHS: BitWidth); |
3943 | if (cast<IntrinsicInst>(Val: I)->getIntrinsicID() == Intrinsic::fshr) |
3944 | ModAmt = BitWidth - ModAmt; |
3945 | |
3946 | // For bswap-only, limit shift amounts to whole bytes, for an early exit. |
3947 | if (!MatchBitReversals && (ModAmt % 8) != 0) |
3948 | return Result; |
3949 | |
3950 | // Check we have both sources and they are from the same provider. |
3951 | const auto &LHS = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS, |
3952 | Depth: Depth + 1, FoundRoot); |
3953 | if (!LHS || !LHS->Provider) |
3954 | return Result; |
3955 | |
3956 | const auto &RHS = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS, |
3957 | Depth: Depth + 1, FoundRoot); |
3958 | if (!RHS || LHS->Provider != RHS->Provider) |
3959 | return Result; |
3960 | |
3961 | unsigned StartBitRHS = BitWidth - ModAmt; |
3962 | Result = BitPart(LHS->Provider, BitWidth); |
3963 | for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx) |
3964 | Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx]; |
3965 | for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx) |
3966 | Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS]; |
3967 | return Result; |
3968 | } |
3969 | } |
3970 | |
3971 | // If we've already found a root input value then we're never going to merge |
3972 | // these back together. |
3973 | if (FoundRoot) |
3974 | return Result; |
3975 | |
3976 | // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must |
3977 | // be the root input value to the bswap/bitreverse. |
3978 | FoundRoot = true; |
3979 | Result = BitPart(V, BitWidth); |
3980 | for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) |
3981 | Result->Provenance[BitIdx] = BitIdx; |
3982 | return Result; |
3983 | } |
3984 | |
3985 | static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, |
3986 | unsigned BitWidth) { |
3987 | if (From % 8 != To % 8) |
3988 | return false; |
3989 | // Convert from bit indices to byte indices and check for a byte reversal. |
3990 | From >>= 3; |
3991 | To >>= 3; |
3992 | BitWidth >>= 3; |
3993 | return From == BitWidth - To - 1; |
3994 | } |
3995 | |
3996 | static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, |
3997 | unsigned BitWidth) { |
3998 | return From == BitWidth - To - 1; |
3999 | } |
4000 | |
4001 | bool llvm::recognizeBSwapOrBitReverseIdiom( |
4002 | Instruction *I, bool MatchBSwaps, bool MatchBitReversals, |
4003 | SmallVectorImpl<Instruction *> &InsertedInsts) { |
4004 | if (!match(V: I, P: m_Or(L: m_Value(), R: m_Value())) && |
4005 | !match(V: I, P: m_FShl(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) && |
4006 | !match(V: I, P: m_FShr(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) && |
4007 | !match(V: I, P: m_BSwap(Op0: m_Value()))) |
4008 | return false; |
4009 | if (!MatchBSwaps && !MatchBitReversals) |
4010 | return false; |
4011 | Type *ITy = I->getType(); |
4012 | if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128) |
4013 | return false; // Can't do integer/elements > 128 bits. |
4014 | |
4015 | // Try to find all the pieces corresponding to the bswap. |
4016 | bool FoundRoot = false; |
4017 | std::map<Value *, std::optional<BitPart>> BPS; |
4018 | const auto &Res = |
4019 | collectBitParts(V: I, MatchBSwaps, MatchBitReversals, BPS, Depth: 0, FoundRoot); |
4020 | if (!Res) |
4021 | return false; |
4022 | ArrayRef<int8_t> BitProvenance = Res->Provenance; |
4023 | assert(all_of(BitProvenance, |
4024 | [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) && |
4025 | "Illegal bit provenance index" ); |
4026 | |
4027 | // If the upper bits are zero, then attempt to perform as a truncated op. |
4028 | Type *DemandedTy = ITy; |
4029 | if (BitProvenance.back() == BitPart::Unset) { |
4030 | while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset) |
4031 | BitProvenance = BitProvenance.drop_back(); |
4032 | if (BitProvenance.empty()) |
4033 | return false; // TODO - handle null value? |
4034 | DemandedTy = Type::getIntNTy(C&: I->getContext(), N: BitProvenance.size()); |
4035 | if (auto *IVecTy = dyn_cast<VectorType>(Val: ITy)) |
4036 | DemandedTy = VectorType::get(ElementType: DemandedTy, Other: IVecTy); |
4037 | } |
4038 | |
4039 | // Check BitProvenance hasn't found a source larger than the result type. |
4040 | unsigned DemandedBW = DemandedTy->getScalarSizeInBits(); |
4041 | if (DemandedBW > ITy->getScalarSizeInBits()) |
4042 | return false; |
4043 | |
4044 | // Now, is the bit permutation correct for a bswap or a bitreverse? We can |
4045 | // only byteswap values with an even number of bytes. |
4046 | APInt DemandedMask = APInt::getAllOnes(numBits: DemandedBW); |
4047 | bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0; |
4048 | bool OKForBitReverse = MatchBitReversals; |
4049 | for (unsigned BitIdx = 0; |
4050 | (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) { |
4051 | if (BitProvenance[BitIdx] == BitPart::Unset) { |
4052 | DemandedMask.clearBit(BitPosition: BitIdx); |
4053 | continue; |
4054 | } |
4055 | OKForBSwap &= bitTransformIsCorrectForBSwap(From: BitProvenance[BitIdx], To: BitIdx, |
4056 | BitWidth: DemandedBW); |
4057 | OKForBitReverse &= bitTransformIsCorrectForBitReverse(From: BitProvenance[BitIdx], |
4058 | To: BitIdx, BitWidth: DemandedBW); |
4059 | } |
4060 | |
4061 | Intrinsic::ID Intrin; |
4062 | if (OKForBSwap) |
4063 | Intrin = Intrinsic::bswap; |
4064 | else if (OKForBitReverse) |
4065 | Intrin = Intrinsic::bitreverse; |
4066 | else |
4067 | return false; |
4068 | |
4069 | Function *F = Intrinsic::getDeclaration(M: I->getModule(), id: Intrin, Tys: DemandedTy); |
4070 | Value *Provider = Res->Provider; |
4071 | |
4072 | // We may need to truncate the provider. |
4073 | if (DemandedTy != Provider->getType()) { |
4074 | auto *Trunc = |
4075 | CastInst::CreateIntegerCast(S: Provider, Ty: DemandedTy, isSigned: false, Name: "trunc" , InsertBefore: I->getIterator()); |
4076 | InsertedInsts.push_back(Elt: Trunc); |
4077 | Provider = Trunc; |
4078 | } |
4079 | |
4080 | Instruction *Result = CallInst::Create(Func: F, Args: Provider, NameStr: "rev" , InsertBefore: I->getIterator()); |
4081 | InsertedInsts.push_back(Elt: Result); |
4082 | |
4083 | if (!DemandedMask.isAllOnes()) { |
4084 | auto *Mask = ConstantInt::get(Ty: DemandedTy, V: DemandedMask); |
4085 | Result = BinaryOperator::Create(Op: Instruction::And, S1: Result, S2: Mask, Name: "mask" , InsertBefore: I->getIterator()); |
4086 | InsertedInsts.push_back(Elt: Result); |
4087 | } |
4088 | |
4089 | // We may need to zeroextend back to the result type. |
4090 | if (ITy != Result->getType()) { |
4091 | auto *ExtInst = CastInst::CreateIntegerCast(S: Result, Ty: ITy, isSigned: false, Name: "zext" , InsertBefore: I->getIterator()); |
4092 | InsertedInsts.push_back(Elt: ExtInst); |
4093 | } |
4094 | |
4095 | return true; |
4096 | } |
4097 | |
4098 | // CodeGen has special handling for some string functions that may replace |
4099 | // them with target-specific intrinsics. Since that'd skip our interceptors |
4100 | // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses, |
4101 | // we mark affected calls as NoBuiltin, which will disable optimization |
4102 | // in CodeGen. |
4103 | void llvm::maybeMarkSanitizerLibraryCallNoBuiltin( |
4104 | CallInst *CI, const TargetLibraryInfo *TLI) { |
4105 | Function *F = CI->getCalledFunction(); |
4106 | LibFunc Func; |
4107 | if (F && !F->hasLocalLinkage() && F->hasName() && |
4108 | TLI->getLibFunc(funcName: F->getName(), F&: Func) && TLI->hasOptimizedCodeGen(F: Func) && |
4109 | !F->doesNotAccessMemory()) |
4110 | CI->addFnAttr(Kind: Attribute::NoBuiltin); |
4111 | } |
4112 | |
4113 | bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { |
4114 | // We can't have a PHI with a metadata type. |
4115 | if (I->getOperand(i: OpIdx)->getType()->isMetadataTy()) |
4116 | return false; |
4117 | |
4118 | // Early exit. |
4119 | if (!isa<Constant>(Val: I->getOperand(i: OpIdx))) |
4120 | return true; |
4121 | |
4122 | switch (I->getOpcode()) { |
4123 | default: |
4124 | return true; |
4125 | case Instruction::Call: |
4126 | case Instruction::Invoke: { |
4127 | const auto &CB = cast<CallBase>(Val: *I); |
4128 | |
4129 | // Can't handle inline asm. Skip it. |
4130 | if (CB.isInlineAsm()) |
4131 | return false; |
4132 | |
4133 | // Constant bundle operands may need to retain their constant-ness for |
4134 | // correctness. |
4135 | if (CB.isBundleOperand(Idx: OpIdx)) |
4136 | return false; |
4137 | |
4138 | if (OpIdx < CB.arg_size()) { |
4139 | // Some variadic intrinsics require constants in the variadic arguments, |
4140 | // which currently aren't markable as immarg. |
4141 | if (isa<IntrinsicInst>(Val: CB) && |
4142 | OpIdx >= CB.getFunctionType()->getNumParams()) { |
4143 | // This is known to be OK for stackmap. |
4144 | return CB.getIntrinsicID() == Intrinsic::experimental_stackmap; |
4145 | } |
4146 | |
4147 | // gcroot is a special case, since it requires a constant argument which |
4148 | // isn't also required to be a simple ConstantInt. |
4149 | if (CB.getIntrinsicID() == Intrinsic::gcroot) |
4150 | return false; |
4151 | |
4152 | // Some intrinsic operands are required to be immediates. |
4153 | return !CB.paramHasAttr(ArgNo: OpIdx, Kind: Attribute::ImmArg); |
4154 | } |
4155 | |
4156 | // It is never allowed to replace the call argument to an intrinsic, but it |
4157 | // may be possible for a call. |
4158 | return !isa<IntrinsicInst>(Val: CB); |
4159 | } |
4160 | case Instruction::ShuffleVector: |
4161 | // Shufflevector masks are constant. |
4162 | return OpIdx != 2; |
4163 | case Instruction::Switch: |
4164 | case Instruction::ExtractValue: |
4165 | // All operands apart from the first are constant. |
4166 | return OpIdx == 0; |
4167 | case Instruction::InsertValue: |
4168 | // All operands apart from the first and the second are constant. |
4169 | return OpIdx < 2; |
4170 | case Instruction::Alloca: |
4171 | // Static allocas (constant size in the entry block) are handled by |
4172 | // prologue/epilogue insertion so they're free anyway. We definitely don't |
4173 | // want to make them non-constant. |
4174 | return !cast<AllocaInst>(Val: I)->isStaticAlloca(); |
4175 | case Instruction::GetElementPtr: |
4176 | if (OpIdx == 0) |
4177 | return true; |
4178 | gep_type_iterator It = gep_type_begin(GEP: I); |
4179 | for (auto E = std::next(x: It, n: OpIdx); It != E; ++It) |
4180 | if (It.isStruct()) |
4181 | return false; |
4182 | return true; |
4183 | } |
4184 | } |
4185 | |
4186 | Value *llvm::invertCondition(Value *Condition) { |
4187 | // First: Check if it's a constant |
4188 | if (Constant *C = dyn_cast<Constant>(Val: Condition)) |
4189 | return ConstantExpr::getNot(C); |
4190 | |
4191 | // Second: If the condition is already inverted, return the original value |
4192 | Value *NotCondition; |
4193 | if (match(V: Condition, P: m_Not(V: m_Value(V&: NotCondition)))) |
4194 | return NotCondition; |
4195 | |
4196 | BasicBlock *Parent = nullptr; |
4197 | Instruction *Inst = dyn_cast<Instruction>(Val: Condition); |
4198 | if (Inst) |
4199 | Parent = Inst->getParent(); |
4200 | else if (Argument *Arg = dyn_cast<Argument>(Val: Condition)) |
4201 | Parent = &Arg->getParent()->getEntryBlock(); |
4202 | assert(Parent && "Unsupported condition to invert" ); |
4203 | |
4204 | // Third: Check all the users for an invert |
4205 | for (User *U : Condition->users()) |
4206 | if (Instruction *I = dyn_cast<Instruction>(Val: U)) |
4207 | if (I->getParent() == Parent && match(V: I, P: m_Not(V: m_Specific(V: Condition)))) |
4208 | return I; |
4209 | |
4210 | // Last option: Create a new instruction |
4211 | auto *Inverted = |
4212 | BinaryOperator::CreateNot(Op: Condition, Name: Condition->getName() + ".inv" ); |
4213 | if (Inst && !isa<PHINode>(Val: Inst)) |
4214 | Inverted->insertAfter(InsertPos: Inst); |
4215 | else |
4216 | Inverted->insertBefore(InsertPos: &*Parent->getFirstInsertionPt()); |
4217 | return Inverted; |
4218 | } |
4219 | |
4220 | bool llvm::inferAttributesFromOthers(Function &F) { |
4221 | // Note: We explicitly check for attributes rather than using cover functions |
4222 | // because some of the cover functions include the logic being implemented. |
4223 | |
4224 | bool Changed = false; |
4225 | // readnone + not convergent implies nosync |
4226 | if (!F.hasFnAttribute(Kind: Attribute::NoSync) && |
4227 | F.doesNotAccessMemory() && !F.isConvergent()) { |
4228 | F.setNoSync(); |
4229 | Changed = true; |
4230 | } |
4231 | |
4232 | // readonly implies nofree |
4233 | if (!F.hasFnAttribute(Kind: Attribute::NoFree) && F.onlyReadsMemory()) { |
4234 | F.setDoesNotFreeMemory(); |
4235 | Changed = true; |
4236 | } |
4237 | |
4238 | // willreturn implies mustprogress |
4239 | if (!F.hasFnAttribute(Kind: Attribute::MustProgress) && F.willReturn()) { |
4240 | F.setMustProgress(); |
4241 | Changed = true; |
4242 | } |
4243 | |
4244 | // TODO: There are a bunch of cases of restrictive memory effects we |
4245 | // can infer by inspecting arguments of argmemonly-ish functions. |
4246 | |
4247 | return Changed; |
4248 | } |
4249 | |