1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Utils/Local.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/AssumeBundleQueries.h"
26#include "llvm/Analysis/ConstantFolding.h"
27#include "llvm/Analysis/DomTreeUpdater.h"
28#include "llvm/Analysis/InstructionSimplify.h"
29#include "llvm/Analysis/MemoryBuiltins.h"
30#include "llvm/Analysis/MemorySSAUpdater.h"
31#include "llvm/Analysis/TargetLibraryInfo.h"
32#include "llvm/Analysis/ValueTracking.h"
33#include "llvm/Analysis/VectorUtils.h"
34#include "llvm/BinaryFormat/Dwarf.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/ConstantRange.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
45#include "llvm/IR/DebugInfoMetadata.h"
46#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/DerivedTypes.h"
48#include "llvm/IR/Dominators.h"
49#include "llvm/IR/EHPersonalities.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GetElementPtrTypeIterator.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/IntrinsicInst.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/IntrinsicsWebAssembly.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
64#include "llvm/IR/PatternMatch.h"
65#include "llvm/IR/ProfDataUtils.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
71#include "llvm/Support/Casting.h"
72#include "llvm/Support/CommandLine.h"
73#include "llvm/Support/Compiler.h"
74#include "llvm/Support/Debug.h"
75#include "llvm/Support/ErrorHandling.h"
76#include "llvm/Support/KnownBits.h"
77#include "llvm/Support/raw_ostream.h"
78#include "llvm/Transforms/Utils/BasicBlockUtils.h"
79#include "llvm/Transforms/Utils/ValueMapper.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91#define DEBUG_TYPE "local"
92
93STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95
96static cl::opt<bool> PHICSEDebugHash(
97 "phicse-debug-hash",
98#ifdef EXPENSIVE_CHECKS
99 cl::init(true),
100#else
101 cl::init(Val: false),
102#endif
103 cl::Hidden,
104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105 "function is well-behaved w.r.t. its isEqual predicate"));
106
107static cl::opt<unsigned> PHICSENumPHISmallSize(
108 "phicse-num-phi-smallsize", cl::init(Val: 32), cl::Hidden,
109 cl::desc(
110 "When the basic block contains not more than this number of PHI nodes, "
111 "perform a (faster!) exhaustive search instead of set-driven one."));
112
113static cl::opt<unsigned> MaxPhiEntriesIncreaseAfterRemovingEmptyBlock(
114 "max-phi-entries-increase-after-removing-empty-block", cl::init(Val: 1000),
115 cl::Hidden,
116 cl::desc("Stop removing an empty block if removing it will introduce more "
117 "than this number of phi entries in its successor"));
118
119// Max recursion depth for collectBitParts used when detecting bswap and
120// bitreverse idioms.
121static const unsigned BitPartRecursionMaxDepth = 48;
122
123//===----------------------------------------------------------------------===//
124// Local constant propagation.
125//
126
127/// ConstantFoldTerminator - If a terminator instruction is predicated on a
128/// constant value, convert it into an unconditional branch to the constant
129/// destination. This is a nontrivial operation because the successors of this
130/// basic block must have their PHI nodes updated.
131/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
132/// conditions and indirectbr addresses this might make dead if
133/// DeleteDeadConditions is true.
134bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
135 const TargetLibraryInfo *TLI,
136 DomTreeUpdater *DTU) {
137 Instruction *T = BB->getTerminator();
138 IRBuilder<> Builder(T);
139
140 // Branch - See if we are conditional jumping on constant
141 if (auto *BI = dyn_cast<BranchInst>(Val: T)) {
142 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
143
144 BasicBlock *Dest1 = BI->getSuccessor(i: 0);
145 BasicBlock *Dest2 = BI->getSuccessor(i: 1);
146
147 if (Dest2 == Dest1) { // Conditional branch to same location?
148 // This branch matches something like this:
149 // br bool %cond, label %Dest, label %Dest
150 // and changes it into: br label %Dest
151
152 // Let the basic block know that we are letting go of one copy of it.
153 assert(BI->getParent() && "Terminator not inserted in block!");
154 Dest1->removePredecessor(Pred: BI->getParent());
155
156 // Replace the conditional branch with an unconditional one.
157 BranchInst *NewBI = Builder.CreateBr(Dest: Dest1);
158
159 // Transfer the metadata to the new branch instruction.
160 NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg,
161 LLVMContext::MD_annotation});
162
163 Value *Cond = BI->getCondition();
164 BI->eraseFromParent();
165 if (DeleteDeadConditions)
166 RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI);
167 return true;
168 }
169
170 if (auto *Cond = dyn_cast<ConstantInt>(Val: BI->getCondition())) {
171 // Are we branching on constant?
172 // YES. Change to unconditional branch...
173 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
174 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
175
176 // Let the basic block know that we are letting go of it. Based on this,
177 // it will adjust it's PHI nodes.
178 OldDest->removePredecessor(Pred: BB);
179
180 // Replace the conditional branch with an unconditional one.
181 BranchInst *NewBI = Builder.CreateBr(Dest: Destination);
182
183 // Transfer the metadata to the new branch instruction.
184 NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg,
185 LLVMContext::MD_annotation});
186
187 BI->eraseFromParent();
188 if (DTU)
189 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, OldDest}});
190 return true;
191 }
192
193 return false;
194 }
195
196 if (auto *SI = dyn_cast<SwitchInst>(Val: T)) {
197 // If we are switching on a constant, we can convert the switch to an
198 // unconditional branch.
199 auto *CI = dyn_cast<ConstantInt>(Val: SI->getCondition());
200 BasicBlock *DefaultDest = SI->getDefaultDest();
201 BasicBlock *TheOnlyDest = DefaultDest;
202
203 // If the default is unreachable, ignore it when searching for TheOnlyDest.
204 if (SI->defaultDestUnreachable() && SI->getNumCases() > 0)
205 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
206
207 bool Changed = false;
208
209 // Figure out which case it goes to.
210 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
211 // Found case matching a constant operand?
212 if (It->getCaseValue() == CI) {
213 TheOnlyDest = It->getCaseSuccessor();
214 break;
215 }
216
217 // Check to see if this branch is going to the same place as the default
218 // dest. If so, eliminate it as an explicit compare.
219 if (It->getCaseSuccessor() == DefaultDest) {
220 MDNode *MD = getValidBranchWeightMDNode(I: *SI);
221 unsigned NCases = SI->getNumCases();
222 // Fold the case metadata into the default if there will be any branches
223 // left, unless the metadata doesn't match the switch.
224 if (NCases > 1 && MD) {
225 // Collect branch weights into a vector.
226 SmallVector<uint32_t, 8> Weights;
227 extractBranchWeights(ProfileData: MD, Weights);
228
229 // Merge weight of this case to the default weight.
230 unsigned Idx = It->getCaseIndex();
231 // TODO: Add overflow check.
232 Weights[0] += Weights[Idx + 1];
233 // Remove weight for this case.
234 std::swap(a&: Weights[Idx + 1], b&: Weights.back());
235 Weights.pop_back();
236 setBranchWeights(I&: *SI, Weights, IsExpected: hasBranchWeightOrigin(ProfileData: MD));
237 }
238 // Remove this entry.
239 BasicBlock *ParentBB = SI->getParent();
240 DefaultDest->removePredecessor(Pred: ParentBB);
241 It = SI->removeCase(I: It);
242 End = SI->case_end();
243
244 // Removing this case may have made the condition constant. In that
245 // case, update CI and restart iteration through the cases.
246 if (auto *NewCI = dyn_cast<ConstantInt>(Val: SI->getCondition())) {
247 CI = NewCI;
248 It = SI->case_begin();
249 }
250
251 Changed = true;
252 continue;
253 }
254
255 // Otherwise, check to see if the switch only branches to one destination.
256 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
257 // destinations.
258 if (It->getCaseSuccessor() != TheOnlyDest)
259 TheOnlyDest = nullptr;
260
261 // Increment this iterator as we haven't removed the case.
262 ++It;
263 }
264
265 if (CI && !TheOnlyDest) {
266 // Branching on a constant, but not any of the cases, go to the default
267 // successor.
268 TheOnlyDest = SI->getDefaultDest();
269 }
270
271 // If we found a single destination that we can fold the switch into, do so
272 // now.
273 if (TheOnlyDest) {
274 // Insert the new branch.
275 Builder.CreateBr(Dest: TheOnlyDest);
276 BasicBlock *BB = SI->getParent();
277
278 SmallSet<BasicBlock *, 8> RemovedSuccessors;
279
280 // Remove entries from PHI nodes which we no longer branch to...
281 BasicBlock *SuccToKeep = TheOnlyDest;
282 for (BasicBlock *Succ : successors(I: SI)) {
283 if (DTU && Succ != TheOnlyDest)
284 RemovedSuccessors.insert(Ptr: Succ);
285 // Found case matching a constant operand?
286 if (Succ == SuccToKeep) {
287 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
288 } else {
289 Succ->removePredecessor(Pred: BB);
290 }
291 }
292
293 // Delete the old switch.
294 Value *Cond = SI->getCondition();
295 SI->eraseFromParent();
296 if (DeleteDeadConditions)
297 RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI);
298 if (DTU) {
299 std::vector<DominatorTree::UpdateType> Updates;
300 Updates.reserve(n: RemovedSuccessors.size());
301 for (auto *RemovedSuccessor : RemovedSuccessors)
302 Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor});
303 DTU->applyUpdates(Updates);
304 }
305 return true;
306 }
307
308 if (SI->getNumCases() == 1) {
309 // Otherwise, we can fold this switch into a conditional branch
310 // instruction if it has only one non-default destination.
311 auto FirstCase = *SI->case_begin();
312 Value *Cond = Builder.CreateICmpEQ(LHS: SI->getCondition(),
313 RHS: FirstCase.getCaseValue(), Name: "cond");
314
315 // Insert the new branch.
316 BranchInst *NewBr = Builder.CreateCondBr(Cond,
317 True: FirstCase.getCaseSuccessor(),
318 False: SI->getDefaultDest());
319 SmallVector<uint32_t> Weights;
320 if (extractBranchWeights(I: *SI, Weights) && Weights.size() == 2) {
321 uint32_t DefWeight = Weights[0];
322 uint32_t CaseWeight = Weights[1];
323 // The TrueWeight should be the weight for the single case of SI.
324 NewBr->setMetadata(KindID: LLVMContext::MD_prof,
325 Node: MDBuilder(BB->getContext())
326 .createBranchWeights(TrueWeight: CaseWeight, FalseWeight: DefWeight));
327 }
328
329 // Update make.implicit metadata to the newly-created conditional branch.
330 MDNode *MakeImplicitMD = SI->getMetadata(KindID: LLVMContext::MD_make_implicit);
331 if (MakeImplicitMD)
332 NewBr->setMetadata(KindID: LLVMContext::MD_make_implicit, Node: MakeImplicitMD);
333
334 // Delete the old switch.
335 SI->eraseFromParent();
336 return true;
337 }
338 return Changed;
339 }
340
341 if (auto *IBI = dyn_cast<IndirectBrInst>(Val: T)) {
342 // indirectbr blockaddress(@F, @BB) -> br label @BB
343 if (auto *BA =
344 dyn_cast<BlockAddress>(Val: IBI->getAddress()->stripPointerCasts())) {
345 BasicBlock *TheOnlyDest = BA->getBasicBlock();
346 SmallSet<BasicBlock *, 8> RemovedSuccessors;
347
348 // Insert the new branch.
349 Builder.CreateBr(Dest: TheOnlyDest);
350
351 BasicBlock *SuccToKeep = TheOnlyDest;
352 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
353 BasicBlock *DestBB = IBI->getDestination(i);
354 if (DTU && DestBB != TheOnlyDest)
355 RemovedSuccessors.insert(Ptr: DestBB);
356 if (IBI->getDestination(i) == SuccToKeep) {
357 SuccToKeep = nullptr;
358 } else {
359 DestBB->removePredecessor(Pred: BB);
360 }
361 }
362 Value *Address = IBI->getAddress();
363 IBI->eraseFromParent();
364 if (DeleteDeadConditions)
365 // Delete pointer cast instructions.
366 RecursivelyDeleteTriviallyDeadInstructions(V: Address, TLI);
367
368 // Also zap the blockaddress constant if there are no users remaining,
369 // otherwise the destination is still marked as having its address taken.
370 if (BA->use_empty())
371 BA->destroyConstant();
372
373 // If we didn't find our destination in the IBI successor list, then we
374 // have undefined behavior. Replace the unconditional branch with an
375 // 'unreachable' instruction.
376 if (SuccToKeep) {
377 BB->getTerminator()->eraseFromParent();
378 new UnreachableInst(BB->getContext(), BB);
379 }
380
381 if (DTU) {
382 std::vector<DominatorTree::UpdateType> Updates;
383 Updates.reserve(n: RemovedSuccessors.size());
384 for (auto *RemovedSuccessor : RemovedSuccessors)
385 Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor});
386 DTU->applyUpdates(Updates);
387 }
388 return true;
389 }
390 }
391
392 return false;
393}
394
395//===----------------------------------------------------------------------===//
396// Local dead code elimination.
397//
398
399/// isInstructionTriviallyDead - Return true if the result produced by the
400/// instruction is not used, and the instruction has no side effects.
401///
402bool llvm::isInstructionTriviallyDead(Instruction *I,
403 const TargetLibraryInfo *TLI) {
404 if (!I->use_empty())
405 return false;
406 return wouldInstructionBeTriviallyDead(I, TLI);
407}
408
409bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths(
410 Instruction *I, const TargetLibraryInfo *TLI) {
411 // Instructions that are "markers" and have implied meaning on code around
412 // them (without explicit uses), are not dead on unused paths.
413 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I))
414 if (II->getIntrinsicID() == Intrinsic::stacksave ||
415 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
416 II->isLifetimeStartOrEnd())
417 return false;
418 return wouldInstructionBeTriviallyDead(I, TLI);
419}
420
421bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
422 const TargetLibraryInfo *TLI) {
423 if (I->isTerminator())
424 return false;
425
426 // We don't want the landingpad-like instructions removed by anything this
427 // general.
428 if (I->isEHPad())
429 return false;
430
431 // We don't want debug info removed by anything this general.
432 if (isa<DbgVariableIntrinsic>(Val: I))
433 return false;
434
435 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(Val: I)) {
436 if (DLI->getLabel())
437 return false;
438 return true;
439 }
440
441 if (auto *CB = dyn_cast<CallBase>(Val: I))
442 if (isRemovableAlloc(V: CB, TLI))
443 return true;
444
445 if (!I->willReturn()) {
446 auto *II = dyn_cast<IntrinsicInst>(Val: I);
447 if (!II)
448 return false;
449
450 switch (II->getIntrinsicID()) {
451 case Intrinsic::experimental_guard: {
452 // Guards on true are operationally no-ops. In the future we can
453 // consider more sophisticated tradeoffs for guards considering potential
454 // for check widening, but for now we keep things simple.
455 auto *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0));
456 return Cond && Cond->isOne();
457 }
458 // TODO: These intrinsics are not safe to remove, because this may remove
459 // a well-defined trap.
460 case Intrinsic::wasm_trunc_signed:
461 case Intrinsic::wasm_trunc_unsigned:
462 case Intrinsic::ptrauth_auth:
463 case Intrinsic::ptrauth_resign:
464 return true;
465 default:
466 return false;
467 }
468 }
469
470 if (!I->mayHaveSideEffects())
471 return true;
472
473 // Special case intrinsics that "may have side effects" but can be deleted
474 // when dead.
475 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) {
476 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
477 if (II->getIntrinsicID() == Intrinsic::stacksave ||
478 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
479 return true;
480
481 // Intrinsics declare sideeffects to prevent them from moving, but they are
482 // nops without users.
483 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
484 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
485 return true;
486
487 if (II->isLifetimeStartOrEnd()) {
488 auto *Arg = II->getArgOperand(i: 1);
489 // Lifetime intrinsics are dead when their right-hand is undef.
490 if (isa<UndefValue>(Val: Arg))
491 return true;
492 // If the right-hand is an alloc, global, or argument and the only uses
493 // are lifetime intrinsics then the intrinsics are dead.
494 if (isa<AllocaInst>(Val: Arg) || isa<GlobalValue>(Val: Arg) || isa<Argument>(Val: Arg))
495 return llvm::all_of(Range: Arg->uses(), P: [](Use &Use) {
496 return isa<LifetimeIntrinsic>(Val: Use.getUser());
497 });
498 return false;
499 }
500
501 // Assumptions are dead if their condition is trivially true.
502 if (II->getIntrinsicID() == Intrinsic::assume &&
503 isAssumeWithEmptyBundle(Assume: cast<AssumeInst>(Val: *II))) {
504 if (ConstantInt *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0)))
505 return !Cond->isZero();
506
507 return false;
508 }
509
510 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(Val: I)) {
511 std::optional<fp::ExceptionBehavior> ExBehavior =
512 FPI->getExceptionBehavior();
513 return *ExBehavior != fp::ebStrict;
514 }
515 }
516
517 if (auto *Call = dyn_cast<CallBase>(Val: I)) {
518 if (Value *FreedOp = getFreedOperand(CB: Call, TLI))
519 if (Constant *C = dyn_cast<Constant>(Val: FreedOp))
520 return C->isNullValue() || isa<UndefValue>(Val: C);
521 if (isMathLibCallNoop(Call, TLI))
522 return true;
523 }
524
525 // Non-volatile atomic loads from constants can be removed.
526 if (auto *LI = dyn_cast<LoadInst>(Val: I))
527 if (auto *GV = dyn_cast<GlobalVariable>(
528 Val: LI->getPointerOperand()->stripPointerCasts()))
529 if (!LI->isVolatile() && GV->isConstant())
530 return true;
531
532 return false;
533}
534
535/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
536/// trivially dead instruction, delete it. If that makes any of its operands
537/// trivially dead, delete them too, recursively. Return true if any
538/// instructions were deleted.
539bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
540 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
541 std::function<void(Value *)> AboutToDeleteCallback) {
542 Instruction *I = dyn_cast<Instruction>(Val: V);
543 if (!I || !isInstructionTriviallyDead(I, TLI))
544 return false;
545
546 SmallVector<WeakTrackingVH, 16> DeadInsts;
547 DeadInsts.push_back(Elt: I);
548 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
549 AboutToDeleteCallback);
550
551 return true;
552}
553
554bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
555 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
556 MemorySSAUpdater *MSSAU,
557 std::function<void(Value *)> AboutToDeleteCallback) {
558 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
559 for (; S != E; ++S) {
560 auto *I = dyn_cast_or_null<Instruction>(Val&: DeadInsts[S]);
561 if (!I || !isInstructionTriviallyDead(I)) {
562 DeadInsts[S] = nullptr;
563 ++Alive;
564 }
565 }
566 if (Alive == E)
567 return false;
568 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
569 AboutToDeleteCallback);
570 return true;
571}
572
573void llvm::RecursivelyDeleteTriviallyDeadInstructions(
574 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
575 MemorySSAUpdater *MSSAU,
576 std::function<void(Value *)> AboutToDeleteCallback) {
577 // Process the dead instruction list until empty.
578 while (!DeadInsts.empty()) {
579 Value *V = DeadInsts.pop_back_val();
580 Instruction *I = cast_or_null<Instruction>(Val: V);
581 if (!I)
582 continue;
583 assert(isInstructionTriviallyDead(I, TLI) &&
584 "Live instruction found in dead worklist!");
585 assert(I->use_empty() && "Instructions with uses are not dead.");
586
587 // Don't lose the debug info while deleting the instructions.
588 salvageDebugInfo(I&: *I);
589
590 if (AboutToDeleteCallback)
591 AboutToDeleteCallback(I);
592
593 // Null out all of the instruction's operands to see if any operand becomes
594 // dead as we go.
595 for (Use &OpU : I->operands()) {
596 Value *OpV = OpU.get();
597 OpU.set(nullptr);
598
599 if (!OpV->use_empty())
600 continue;
601
602 // If the operand is an instruction that became dead as we nulled out the
603 // operand, and if it is 'trivially' dead, delete it in a future loop
604 // iteration.
605 if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV))
606 if (isInstructionTriviallyDead(I: OpI, TLI))
607 DeadInsts.push_back(Elt: OpI);
608 }
609 if (MSSAU)
610 MSSAU->removeMemoryAccess(I);
611
612 I->eraseFromParent();
613 }
614}
615
616bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
617 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
618 SmallVector<DbgVariableRecord *, 1> DPUsers;
619 findDbgUsers(DbgInsts&: DbgUsers, V: I, DbgVariableRecords: &DPUsers);
620 for (auto *DII : DbgUsers)
621 DII->setKillLocation();
622 for (auto *DVR : DPUsers)
623 DVR->setKillLocation();
624 return !DbgUsers.empty() || !DPUsers.empty();
625}
626
627/// areAllUsesEqual - Check whether the uses of a value are all the same.
628/// This is similar to Instruction::hasOneUse() except this will also return
629/// true when there are no uses or multiple uses that all refer to the same
630/// value.
631static bool areAllUsesEqual(Instruction *I) {
632 Value::user_iterator UI = I->user_begin();
633 Value::user_iterator UE = I->user_end();
634 if (UI == UE)
635 return true;
636
637 User *TheUse = *UI;
638 for (++UI; UI != UE; ++UI) {
639 if (*UI != TheUse)
640 return false;
641 }
642 return true;
643}
644
645/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
646/// dead PHI node, due to being a def-use chain of single-use nodes that
647/// either forms a cycle or is terminated by a trivially dead instruction,
648/// delete it. If that makes any of its operands trivially dead, delete them
649/// too, recursively. Return true if a change was made.
650bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
651 const TargetLibraryInfo *TLI,
652 llvm::MemorySSAUpdater *MSSAU) {
653 SmallPtrSet<Instruction*, 4> Visited;
654 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
655 I = cast<Instruction>(Val: *I->user_begin())) {
656 if (I->use_empty())
657 return RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU);
658
659 // If we find an instruction more than once, we're on a cycle that
660 // won't prove fruitful.
661 if (!Visited.insert(Ptr: I).second) {
662 // Break the cycle and delete the instruction and its operands.
663 I->replaceAllUsesWith(V: PoisonValue::get(T: I->getType()));
664 (void)RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU);
665 return true;
666 }
667 }
668 return false;
669}
670
671static bool
672simplifyAndDCEInstruction(Instruction *I,
673 SmallSetVector<Instruction *, 16> &WorkList,
674 const DataLayout &DL,
675 const TargetLibraryInfo *TLI) {
676 if (isInstructionTriviallyDead(I, TLI)) {
677 salvageDebugInfo(I&: *I);
678
679 // Null out all of the instruction's operands to see if any operand becomes
680 // dead as we go.
681 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
682 Value *OpV = I->getOperand(i);
683 I->setOperand(i, Val: nullptr);
684
685 if (!OpV->use_empty() || I == OpV)
686 continue;
687
688 // If the operand is an instruction that became dead as we nulled out the
689 // operand, and if it is 'trivially' dead, delete it in a future loop
690 // iteration.
691 if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV))
692 if (isInstructionTriviallyDead(I: OpI, TLI))
693 WorkList.insert(X: OpI);
694 }
695
696 I->eraseFromParent();
697
698 return true;
699 }
700
701 if (Value *SimpleV = simplifyInstruction(I, Q: DL)) {
702 // Add the users to the worklist. CAREFUL: an instruction can use itself,
703 // in the case of a phi node.
704 for (User *U : I->users()) {
705 if (U != I) {
706 WorkList.insert(X: cast<Instruction>(Val: U));
707 }
708 }
709
710 // Replace the instruction with its simplified value.
711 bool Changed = false;
712 if (!I->use_empty()) {
713 I->replaceAllUsesWith(V: SimpleV);
714 Changed = true;
715 }
716 if (isInstructionTriviallyDead(I, TLI)) {
717 I->eraseFromParent();
718 Changed = true;
719 }
720 return Changed;
721 }
722 return false;
723}
724
725/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
726/// simplify any instructions in it and recursively delete dead instructions.
727///
728/// This returns true if it changed the code, note that it can delete
729/// instructions in other blocks as well in this block.
730bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
731 const TargetLibraryInfo *TLI) {
732 bool MadeChange = false;
733 const DataLayout &DL = BB->getDataLayout();
734
735#ifndef NDEBUG
736 // In debug builds, ensure that the terminator of the block is never replaced
737 // or deleted by these simplifications. The idea of simplification is that it
738 // cannot introduce new instructions, and there is no way to replace the
739 // terminator of a block without introducing a new instruction.
740 AssertingVH<Instruction> TerminatorVH(&BB->back());
741#endif
742
743 SmallSetVector<Instruction *, 16> WorkList;
744 // Iterate over the original function, only adding insts to the worklist
745 // if they actually need to be revisited. This avoids having to pre-init
746 // the worklist with the entire function's worth of instructions.
747 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(x: BB->end());
748 BI != E;) {
749 assert(!BI->isTerminator());
750 Instruction *I = &*BI;
751 ++BI;
752
753 // We're visiting this instruction now, so make sure it's not in the
754 // worklist from an earlier visit.
755 if (!WorkList.count(key: I))
756 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
757 }
758
759 while (!WorkList.empty()) {
760 Instruction *I = WorkList.pop_back_val();
761 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
762 }
763 return MadeChange;
764}
765
766//===----------------------------------------------------------------------===//
767// Control Flow Graph Restructuring.
768//
769
770void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
771 DomTreeUpdater *DTU) {
772
773 // If BB has single-entry PHI nodes, fold them.
774 while (PHINode *PN = dyn_cast<PHINode>(Val: DestBB->begin())) {
775 Value *NewVal = PN->getIncomingValue(i: 0);
776 // Replace self referencing PHI with poison, it must be dead.
777 if (NewVal == PN) NewVal = PoisonValue::get(T: PN->getType());
778 PN->replaceAllUsesWith(V: NewVal);
779 PN->eraseFromParent();
780 }
781
782 BasicBlock *PredBB = DestBB->getSinglePredecessor();
783 assert(PredBB && "Block doesn't have a single predecessor!");
784
785 bool ReplaceEntryBB = PredBB->isEntryBlock();
786
787 // DTU updates: Collect all the edges that enter
788 // PredBB. These dominator edges will be redirected to DestBB.
789 SmallVector<DominatorTree::UpdateType, 32> Updates;
790
791 if (DTU) {
792 // To avoid processing the same predecessor more than once.
793 SmallPtrSet<BasicBlock *, 2> SeenPreds;
794 Updates.reserve(N: Updates.size() + 2 * pred_size(BB: PredBB) + 1);
795 for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB))
796 // This predecessor of PredBB may already have DestBB as a successor.
797 if (PredOfPredBB != PredBB)
798 if (SeenPreds.insert(Ptr: PredOfPredBB).second)
799 Updates.push_back(Elt: {DominatorTree::Insert, PredOfPredBB, DestBB});
800 SeenPreds.clear();
801 for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB))
802 if (SeenPreds.insert(Ptr: PredOfPredBB).second)
803 Updates.push_back(Elt: {DominatorTree::Delete, PredOfPredBB, PredBB});
804 Updates.push_back(Elt: {DominatorTree::Delete, PredBB, DestBB});
805 }
806
807 // Zap anything that took the address of DestBB. Not doing this will give the
808 // address an invalid value.
809 if (DestBB->hasAddressTaken()) {
810 BlockAddress *BA = BlockAddress::get(BB: DestBB);
811 Constant *Replacement =
812 ConstantInt::get(Ty: Type::getInt32Ty(C&: BA->getContext()), V: 1);
813 BA->replaceAllUsesWith(V: ConstantExpr::getIntToPtr(C: Replacement,
814 Ty: BA->getType()));
815 BA->destroyConstant();
816 }
817
818 // Anything that branched to PredBB now branches to DestBB.
819 PredBB->replaceAllUsesWith(V: DestBB);
820
821 // Splice all the instructions from PredBB to DestBB.
822 PredBB->getTerminator()->eraseFromParent();
823 DestBB->splice(ToIt: DestBB->begin(), FromBB: PredBB);
824 new UnreachableInst(PredBB->getContext(), PredBB);
825
826 // If the PredBB is the entry block of the function, move DestBB up to
827 // become the entry block after we erase PredBB.
828 if (ReplaceEntryBB)
829 DestBB->moveAfter(MovePos: PredBB);
830
831 if (DTU) {
832 assert(PredBB->size() == 1 &&
833 isa<UnreachableInst>(PredBB->getTerminator()) &&
834 "The successor list of PredBB isn't empty before "
835 "applying corresponding DTU updates.");
836 DTU->applyUpdatesPermissive(Updates);
837 DTU->deleteBB(DelBB: PredBB);
838 // Recalculation of DomTree is needed when updating a forward DomTree and
839 // the Entry BB is replaced.
840 if (ReplaceEntryBB && DTU->hasDomTree()) {
841 // The entry block was removed and there is no external interface for
842 // the dominator tree to be notified of this change. In this corner-case
843 // we recalculate the entire tree.
844 DTU->recalculate(F&: *(DestBB->getParent()));
845 }
846 }
847
848 else {
849 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
850 }
851}
852
853/// Return true if we can choose one of these values to use in place of the
854/// other. Note that we will always choose the non-undef value to keep.
855static bool CanMergeValues(Value *First, Value *Second) {
856 return First == Second || isa<UndefValue>(Val: First) || isa<UndefValue>(Val: Second);
857}
858
859/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
860/// branch to Succ, into Succ.
861///
862/// Assumption: Succ is the single successor for BB.
863static bool
864CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ,
865 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
866 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
867
868 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
869 << Succ->getName() << "\n");
870 // Shortcut, if there is only a single predecessor it must be BB and merging
871 // is always safe
872 if (Succ->getSinglePredecessor())
873 return true;
874
875 // Look at all the phi nodes in Succ, to see if they present a conflict when
876 // merging these blocks
877 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) {
878 PHINode *PN = cast<PHINode>(Val&: I);
879
880 // If the incoming value from BB is again a PHINode in
881 // BB which has the same incoming value for *PI as PN does, we can
882 // merge the phi nodes and then the blocks can still be merged
883 PHINode *BBPN = dyn_cast<PHINode>(Val: PN->getIncomingValueForBlock(BB));
884 if (BBPN && BBPN->getParent() == BB) {
885 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
886 BasicBlock *IBB = PN->getIncomingBlock(i: PI);
887 if (BBPreds.count(Ptr: IBB) &&
888 !CanMergeValues(First: BBPN->getIncomingValueForBlock(BB: IBB),
889 Second: PN->getIncomingValue(i: PI))) {
890 LLVM_DEBUG(dbgs()
891 << "Can't fold, phi node " << PN->getName() << " in "
892 << Succ->getName() << " is conflicting with "
893 << BBPN->getName() << " with regard to common predecessor "
894 << IBB->getName() << "\n");
895 return false;
896 }
897 }
898 } else {
899 Value* Val = PN->getIncomingValueForBlock(BB);
900 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
901 // See if the incoming value for the common predecessor is equal to the
902 // one for BB, in which case this phi node will not prevent the merging
903 // of the block.
904 BasicBlock *IBB = PN->getIncomingBlock(i: PI);
905 if (BBPreds.count(Ptr: IBB) &&
906 !CanMergeValues(First: Val, Second: PN->getIncomingValue(i: PI))) {
907 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
908 << " in " << Succ->getName()
909 << " is conflicting with regard to common "
910 << "predecessor " << IBB->getName() << "\n");
911 return false;
912 }
913 }
914 }
915 }
916
917 return true;
918}
919
920using PredBlockVector = SmallVector<BasicBlock *, 16>;
921using IncomingValueMap = SmallDenseMap<BasicBlock *, Value *, 16>;
922
923/// Determines the value to use as the phi node input for a block.
924///
925/// Select between \p OldVal any value that we know flows from \p BB
926/// to a particular phi on the basis of which one (if either) is not
927/// undef. Update IncomingValues based on the selected value.
928///
929/// \param OldVal The value we are considering selecting.
930/// \param BB The block that the value flows in from.
931/// \param IncomingValues A map from block-to-value for other phi inputs
932/// that we have examined.
933///
934/// \returns the selected value.
935static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
936 IncomingValueMap &IncomingValues) {
937 if (!isa<UndefValue>(Val: OldVal)) {
938 assert((!IncomingValues.count(BB) ||
939 IncomingValues.find(BB)->second == OldVal) &&
940 "Expected OldVal to match incoming value from BB!");
941
942 IncomingValues.insert(KV: std::make_pair(x&: BB, y&: OldVal));
943 return OldVal;
944 }
945
946 IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB);
947 if (It != IncomingValues.end()) return It->second;
948
949 return OldVal;
950}
951
952/// Create a map from block to value for the operands of a
953/// given phi.
954///
955/// Create a map from block to value for each non-undef value flowing
956/// into \p PN.
957///
958/// \param PN The phi we are collecting the map for.
959/// \param IncomingValues [out] The map from block to value for this phi.
960static void gatherIncomingValuesToPhi(PHINode *PN,
961 IncomingValueMap &IncomingValues) {
962 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
963 BasicBlock *BB = PN->getIncomingBlock(i);
964 Value *V = PN->getIncomingValue(i);
965
966 if (!isa<UndefValue>(Val: V))
967 IncomingValues.insert(KV: std::make_pair(x&: BB, y&: V));
968 }
969}
970
971/// Replace the incoming undef values to a phi with the values
972/// from a block-to-value map.
973///
974/// \param PN The phi we are replacing the undefs in.
975/// \param IncomingValues A map from block to value.
976static void replaceUndefValuesInPhi(PHINode *PN,
977 const IncomingValueMap &IncomingValues) {
978 SmallVector<unsigned> TrueUndefOps;
979 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
980 Value *V = PN->getIncomingValue(i);
981
982 if (!isa<UndefValue>(Val: V)) continue;
983
984 BasicBlock *BB = PN->getIncomingBlock(i);
985 IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB);
986
987 // Keep track of undef/poison incoming values. Those must match, so we fix
988 // them up below if needed.
989 // Note: this is conservatively correct, but we could try harder and group
990 // the undef values per incoming basic block.
991 if (It == IncomingValues.end()) {
992 TrueUndefOps.push_back(Elt: i);
993 continue;
994 }
995
996 // There is a defined value for this incoming block, so map this undef
997 // incoming value to the defined value.
998 PN->setIncomingValue(i, V: It->second);
999 }
1000
1001 // If there are both undef and poison values incoming, then convert those
1002 // values to undef. It is invalid to have different values for the same
1003 // incoming block.
1004 unsigned PoisonCount = count_if(Range&: TrueUndefOps, P: [&](unsigned i) {
1005 return isa<PoisonValue>(Val: PN->getIncomingValue(i));
1006 });
1007 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1008 for (unsigned i : TrueUndefOps)
1009 PN->setIncomingValue(i, V: UndefValue::get(T: PN->getType()));
1010 }
1011}
1012
1013// Only when they shares a single common predecessor, return true.
1014// Only handles cases when BB can't be merged while its predecessors can be
1015// redirected.
1016static bool
1017CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ,
1018 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1019 BasicBlock *&CommonPred) {
1020
1021 // There must be phis in BB, otherwise BB will be merged into Succ directly
1022 if (BB->phis().empty() || Succ->phis().empty())
1023 return false;
1024
1025 // BB must have predecessors not shared that can be redirected to Succ
1026 if (!BB->hasNPredecessorsOrMore(N: 2))
1027 return false;
1028
1029 if (any_of(Range: BBPreds, P: [](const BasicBlock *Pred) {
1030 return isa<IndirectBrInst>(Val: Pred->getTerminator());
1031 }))
1032 return false;
1033
1034 // Get the single common predecessor of both BB and Succ. Return false
1035 // when there are more than one common predecessors.
1036 for (BasicBlock *SuccPred : predecessors(BB: Succ)) {
1037 if (BBPreds.count(Ptr: SuccPred)) {
1038 if (CommonPred)
1039 return false;
1040 CommonPred = SuccPred;
1041 }
1042 }
1043
1044 return true;
1045}
1046
1047/// Check whether removing \p BB will make the phis in its \p Succ have too
1048/// many incoming entries. This function does not check whether \p BB is
1049/// foldable or not.
1050static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ) {
1051 // If BB only has one predecessor, then removing it will not introduce more
1052 // incoming edges for phis.
1053 if (BB->hasNPredecessors(N: 1))
1054 return false;
1055 unsigned NumPreds = pred_size(BB);
1056 unsigned NumChangedPhi = 0;
1057 for (auto &Phi : Succ->phis()) {
1058 // If the incoming value is a phi and the phi is defined in BB,
1059 // then removing BB will not increase the total phi entries of the ir.
1060 if (auto *IncomingPhi = dyn_cast<PHINode>(Val: Phi.getIncomingValueForBlock(BB)))
1061 if (IncomingPhi->getParent() == BB)
1062 continue;
1063 // Otherwise, we need to add entries to the phi
1064 NumChangedPhi++;
1065 }
1066 // For every phi that needs to be changed, (NumPreds - 1) new entries will be
1067 // added. If the total increase in phi entries exceeds
1068 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as
1069 // introducing too many new phi entries.
1070 return (NumPreds - 1) * NumChangedPhi >
1071 MaxPhiEntriesIncreaseAfterRemovingEmptyBlock;
1072}
1073
1074/// Replace a value flowing from a block to a phi with
1075/// potentially multiple instances of that value flowing from the
1076/// block's predecessors to the phi.
1077///
1078/// \param BB The block with the value flowing into the phi.
1079/// \param BBPreds The predecessors of BB.
1080/// \param PN The phi that we are updating.
1081/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1082static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
1083 const PredBlockVector &BBPreds,
1084 PHINode *PN,
1085 BasicBlock *CommonPred) {
1086 Value *OldVal = PN->removeIncomingValue(BB, DeletePHIIfEmpty: false);
1087 assert(OldVal && "No entry in PHI for Pred BB!");
1088
1089 IncomingValueMap IncomingValues;
1090
1091 // We are merging two blocks - BB, and the block containing PN - and
1092 // as a result we need to redirect edges from the predecessors of BB
1093 // to go to the block containing PN, and update PN
1094 // accordingly. Since we allow merging blocks in the case where the
1095 // predecessor and successor blocks both share some predecessors,
1096 // and where some of those common predecessors might have undef
1097 // values flowing into PN, we want to rewrite those values to be
1098 // consistent with the non-undef values.
1099
1100 gatherIncomingValuesToPhi(PN, IncomingValues);
1101
1102 // If this incoming value is one of the PHI nodes in BB, the new entries
1103 // in the PHI node are the entries from the old PHI.
1104 if (isa<PHINode>(Val: OldVal) && cast<PHINode>(Val: OldVal)->getParent() == BB) {
1105 PHINode *OldValPN = cast<PHINode>(Val: OldVal);
1106 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1107 // Note that, since we are merging phi nodes and BB and Succ might
1108 // have common predecessors, we could end up with a phi node with
1109 // identical incoming branches. This will be cleaned up later (and
1110 // will trigger asserts if we try to clean it up now, without also
1111 // simplifying the corresponding conditional branch).
1112 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1113
1114 if (PredBB == CommonPred)
1115 continue;
1116
1117 Value *PredVal = OldValPN->getIncomingValue(i);
1118 Value *Selected =
1119 selectIncomingValueForBlock(OldVal: PredVal, BB: PredBB, IncomingValues);
1120
1121 // And add a new incoming value for this predecessor for the
1122 // newly retargeted branch.
1123 PN->addIncoming(V: Selected, BB: PredBB);
1124 }
1125 if (CommonPred)
1126 PN->addIncoming(V: OldValPN->getIncomingValueForBlock(BB: CommonPred), BB);
1127
1128 } else {
1129 for (BasicBlock *PredBB : BBPreds) {
1130 // Update existing incoming values in PN for this
1131 // predecessor of BB.
1132 if (PredBB == CommonPred)
1133 continue;
1134
1135 Value *Selected =
1136 selectIncomingValueForBlock(OldVal, BB: PredBB, IncomingValues);
1137
1138 // And add a new incoming value for this predecessor for the
1139 // newly retargeted branch.
1140 PN->addIncoming(V: Selected, BB: PredBB);
1141 }
1142 if (CommonPred)
1143 PN->addIncoming(V: OldVal, BB);
1144 }
1145
1146 replaceUndefValuesInPhi(PN, IncomingValues);
1147}
1148
1149bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1150 DomTreeUpdater *DTU) {
1151 assert(BB != &BB->getParent()->getEntryBlock() &&
1152 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1153
1154 // We can't simplify infinite loops.
1155 BasicBlock *Succ = cast<BranchInst>(Val: BB->getTerminator())->getSuccessor(i: 0);
1156 if (BB == Succ)
1157 return false;
1158
1159 SmallPtrSet<BasicBlock *, 16> BBPreds(llvm::from_range, predecessors(BB));
1160
1161 // The single common predecessor of BB and Succ when BB cannot be killed
1162 BasicBlock *CommonPred = nullptr;
1163
1164 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1165
1166 // Even if we can not fold BB into Succ, we may be able to redirect the
1167 // predecessors of BB to Succ.
1168 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc(
1169 BB, Succ, BBPreds, CommonPred);
1170
1171 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ))
1172 return false;
1173
1174 // Check to see if merging these blocks/phis would cause conflicts for any of
1175 // the phi nodes in BB or Succ. If not, we can safely merge.
1176
1177 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1178 // has uses which will not disappear when the PHI nodes are merged. It is
1179 // possible to handle such cases, but difficult: it requires checking whether
1180 // BB dominates Succ, which is non-trivial to calculate in the case where
1181 // Succ has multiple predecessors. Also, it requires checking whether
1182 // constructing the necessary self-referential PHI node doesn't introduce any
1183 // conflicts; this isn't too difficult, but the previous code for doing this
1184 // was incorrect.
1185 //
1186 // Note that if this check finds a live use, BB dominates Succ, so BB is
1187 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1188 // folding the branch isn't profitable in that case anyway.
1189 if (!Succ->getSinglePredecessor()) {
1190 BasicBlock::iterator BBI = BB->begin();
1191 while (isa<PHINode>(Val: *BBI)) {
1192 for (Use &U : BBI->uses()) {
1193 if (PHINode* PN = dyn_cast<PHINode>(Val: U.getUser())) {
1194 if (PN->getIncomingBlock(U) != BB)
1195 return false;
1196 } else {
1197 return false;
1198 }
1199 }
1200 ++BBI;
1201 }
1202 }
1203
1204 if (BBPhisMergeable && CommonPred)
1205 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1206 << " and " << Succ->getName() << " : "
1207 << CommonPred->getName() << "\n");
1208
1209 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1210 // metadata.
1211 //
1212 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1213 // current status (that loop metadata is implemented as metadata attached to
1214 // the branch instruction in the loop latch block). To quote from review
1215 // comments, "the current representation of loop metadata (using a loop latch
1216 // terminator attachment) is known to be fundamentally broken. Loop latches
1217 // are not uniquely associated with loops (both in that a latch can be part of
1218 // multiple loops and a loop may have multiple latches). Loop headers are. The
1219 // solution to this problem is also known: Add support for basic block
1220 // metadata, and attach loop metadata to the loop header."
1221 //
1222 // Why bail out:
1223 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1224 // the latch for inner-loop (see reason below), so bail out to prerserve
1225 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1226 // to this inner-loop.
1227 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1228 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1229 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1230 // one self-looping basic block, which is contradictory with the assumption.
1231 //
1232 // To illustrate how inner-loop metadata is dropped:
1233 //
1234 // CFG Before
1235 //
1236 // BB is while.cond.exit, attached with loop metdata md2.
1237 // BB->Pred is for.body, attached with loop metadata md1.
1238 //
1239 // entry
1240 // |
1241 // v
1242 // ---> while.cond -------------> while.end
1243 // | |
1244 // | v
1245 // | while.body
1246 // | |
1247 // | v
1248 // | for.body <---- (md1)
1249 // | | |______|
1250 // | v
1251 // | while.cond.exit (md2)
1252 // | |
1253 // |_______|
1254 //
1255 // CFG After
1256 //
1257 // while.cond1 is the merge of while.cond.exit and while.cond above.
1258 // for.body is attached with md2, and md1 is dropped.
1259 // If LoopSimplify runs later (as a part of loop pass), it could create
1260 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1261 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1262 //
1263 // entry
1264 // |
1265 // v
1266 // ---> while.cond1 -------------> while.end
1267 // | |
1268 // | v
1269 // | while.body
1270 // | |
1271 // | v
1272 // | for.body <---- (md2)
1273 // |_______| |______|
1274 if (Instruction *TI = BB->getTerminator())
1275 if (TI->hasNonDebugLocLoopMetadata())
1276 for (BasicBlock *Pred : predecessors(BB))
1277 if (Instruction *PredTI = Pred->getTerminator())
1278 if (PredTI->hasNonDebugLocLoopMetadata())
1279 return false;
1280
1281 if (BBKillable)
1282 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1283 else if (BBPhisMergeable)
1284 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1285
1286 SmallVector<DominatorTree::UpdateType, 32> Updates;
1287
1288 if (DTU) {
1289 // To avoid processing the same predecessor more than once.
1290 SmallPtrSet<BasicBlock *, 8> SeenPreds;
1291 // All predecessors of BB (except the common predecessor) will be moved to
1292 // Succ.
1293 Updates.reserve(N: Updates.size() + 2 * pred_size(BB) + 1);
1294 SmallPtrSet<BasicBlock *, 16> SuccPreds(llvm::from_range,
1295 predecessors(BB: Succ));
1296 for (auto *PredOfBB : predecessors(BB)) {
1297 // Do not modify those common predecessors of BB and Succ
1298 if (!SuccPreds.contains(Ptr: PredOfBB))
1299 if (SeenPreds.insert(Ptr: PredOfBB).second)
1300 Updates.push_back(Elt: {DominatorTree::Insert, PredOfBB, Succ});
1301 }
1302
1303 SeenPreds.clear();
1304
1305 for (auto *PredOfBB : predecessors(BB))
1306 // When BB cannot be killed, do not remove the edge between BB and
1307 // CommonPred.
1308 if (SeenPreds.insert(Ptr: PredOfBB).second && PredOfBB != CommonPred)
1309 Updates.push_back(Elt: {DominatorTree::Delete, PredOfBB, BB});
1310
1311 if (BBKillable)
1312 Updates.push_back(Elt: {DominatorTree::Delete, BB, Succ});
1313 }
1314
1315 if (isa<PHINode>(Val: Succ->begin())) {
1316 // If there is more than one pred of succ, and there are PHI nodes in
1317 // the successor, then we need to add incoming edges for the PHI nodes
1318 //
1319 const PredBlockVector BBPreds(predecessors(BB));
1320
1321 // Loop over all of the PHI nodes in the successor of BB.
1322 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) {
1323 PHINode *PN = cast<PHINode>(Val&: I);
1324 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1325 }
1326 }
1327
1328 if (Succ->getSinglePredecessor()) {
1329 // BB is the only predecessor of Succ, so Succ will end up with exactly
1330 // the same predecessors BB had.
1331 // Copy over any phi, debug or lifetime instruction.
1332 BB->getTerminator()->eraseFromParent();
1333 Succ->splice(ToIt: Succ->getFirstNonPHIIt(), FromBB: BB);
1334 } else {
1335 while (PHINode *PN = dyn_cast<PHINode>(Val: &BB->front())) {
1336 // We explicitly check for such uses for merging phis.
1337 assert(PN->use_empty() && "There shouldn't be any uses here!");
1338 PN->eraseFromParent();
1339 }
1340 }
1341
1342 // If the unconditional branch we replaced contains non-debug llvm.loop
1343 // metadata, we add the metadata to the branch instructions in the
1344 // predecessors.
1345 if (Instruction *TI = BB->getTerminator())
1346 if (TI->hasNonDebugLocLoopMetadata()) {
1347 MDNode *LoopMD = TI->getMetadata(KindID: LLVMContext::MD_loop);
1348 for (BasicBlock *Pred : predecessors(BB))
1349 Pred->getTerminator()->setMetadata(KindID: LLVMContext::MD_loop, Node: LoopMD);
1350 }
1351
1352 if (BBKillable) {
1353 // Everything that jumped to BB now goes to Succ.
1354 BB->replaceAllUsesWith(V: Succ);
1355
1356 if (!Succ->hasName())
1357 Succ->takeName(V: BB);
1358
1359 // Clear the successor list of BB to match updates applying to DTU later.
1360 if (BB->getTerminator())
1361 BB->back().eraseFromParent();
1362
1363 new UnreachableInst(BB->getContext(), BB);
1364 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1365 "applying corresponding DTU updates.");
1366 } else if (BBPhisMergeable) {
1367 // Everything except CommonPred that jumped to BB now goes to Succ.
1368 BB->replaceUsesWithIf(New: Succ, ShouldReplace: [BBPreds, CommonPred](Use &U) -> bool {
1369 if (Instruction *UseInst = dyn_cast<Instruction>(Val: U.getUser()))
1370 return UseInst->getParent() != CommonPred &&
1371 BBPreds.contains(Ptr: UseInst->getParent());
1372 return false;
1373 });
1374 }
1375
1376 if (DTU)
1377 DTU->applyUpdates(Updates);
1378
1379 if (BBKillable)
1380 DeleteDeadBlock(BB, DTU);
1381
1382 return true;
1383}
1384
1385static bool
1386EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB,
1387 SmallPtrSetImpl<PHINode *> &ToRemove) {
1388 // This implementation doesn't currently consider undef operands
1389 // specially. Theoretically, two phis which are identical except for
1390 // one having an undef where the other doesn't could be collapsed.
1391
1392 bool Changed = false;
1393
1394 // Examine each PHI.
1395 // Note that increment of I must *NOT* be in the iteration_expression, since
1396 // we don't want to immediately advance when we restart from the beginning.
1397 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val&: I);) {
1398 ++I;
1399 // Is there an identical PHI node in this basic block?
1400 // Note that we only look in the upper square's triangle,
1401 // we already checked that the lower triangle PHI's aren't identical.
1402 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(Val&: J); ++J) {
1403 if (ToRemove.contains(Ptr: DuplicatePN))
1404 continue;
1405 if (!DuplicatePN->isIdenticalToWhenDefined(I: PN))
1406 continue;
1407 // A duplicate. Replace this PHI with the base PHI.
1408 ++NumPHICSEs;
1409 DuplicatePN->replaceAllUsesWith(V: PN);
1410 ToRemove.insert(Ptr: DuplicatePN);
1411 Changed = true;
1412
1413 // The RAUW can change PHIs that we already visited.
1414 I = BB->begin();
1415 break; // Start over from the beginning.
1416 }
1417 }
1418 return Changed;
1419}
1420
1421static bool
1422EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB,
1423 SmallPtrSetImpl<PHINode *> &ToRemove) {
1424 // This implementation doesn't currently consider undef operands
1425 // specially. Theoretically, two phis which are identical except for
1426 // one having an undef where the other doesn't could be collapsed.
1427
1428 struct PHIDenseMapInfo {
1429 static PHINode *getEmptyKey() {
1430 return DenseMapInfo<PHINode *>::getEmptyKey();
1431 }
1432
1433 static PHINode *getTombstoneKey() {
1434 return DenseMapInfo<PHINode *>::getTombstoneKey();
1435 }
1436
1437 static bool isSentinel(PHINode *PN) {
1438 return PN == getEmptyKey() || PN == getTombstoneKey();
1439 }
1440
1441 // WARNING: this logic must be kept in sync with
1442 // Instruction::isIdenticalToWhenDefined()!
1443 static unsigned getHashValueImpl(PHINode *PN) {
1444 // Compute a hash value on the operands. Instcombine will likely have
1445 // sorted them, which helps expose duplicates, but we have to check all
1446 // the operands to be safe in case instcombine hasn't run.
1447 return static_cast<unsigned>(
1448 hash_combine(args: hash_combine_range(R: PN->operand_values()),
1449 args: hash_combine_range(R: PN->blocks())));
1450 }
1451
1452 static unsigned getHashValue(PHINode *PN) {
1453#ifndef NDEBUG
1454 // If -phicse-debug-hash was specified, return a constant -- this
1455 // will force all hashing to collide, so we'll exhaustively search
1456 // the table for a match, and the assertion in isEqual will fire if
1457 // there's a bug causing equal keys to hash differently.
1458 if (PHICSEDebugHash)
1459 return 0;
1460#endif
1461 return getHashValueImpl(PN);
1462 }
1463
1464 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1465 if (isSentinel(PN: LHS) || isSentinel(PN: RHS))
1466 return LHS == RHS;
1467 return LHS->isIdenticalTo(I: RHS);
1468 }
1469
1470 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1471 // These comparisons are nontrivial, so assert that equality implies
1472 // hash equality (DenseMap demands this as an invariant).
1473 bool Result = isEqualImpl(LHS, RHS);
1474 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1475 getHashValueImpl(LHS) == getHashValueImpl(RHS));
1476 return Result;
1477 }
1478 };
1479
1480 // Set of unique PHINodes.
1481 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1482 PHISet.reserve(Size: 4 * PHICSENumPHISmallSize);
1483
1484 // Examine each PHI.
1485 bool Changed = false;
1486 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val: I++);) {
1487 if (ToRemove.contains(Ptr: PN))
1488 continue;
1489 auto Inserted = PHISet.insert(V: PN);
1490 if (!Inserted.second) {
1491 // A duplicate. Replace this PHI with its duplicate.
1492 ++NumPHICSEs;
1493 PN->replaceAllUsesWith(V: *Inserted.first);
1494 ToRemove.insert(Ptr: PN);
1495 Changed = true;
1496
1497 // The RAUW can change PHIs that we already visited. Start over from the
1498 // beginning.
1499 PHISet.clear();
1500 I = BB->begin();
1501 }
1502 }
1503
1504 return Changed;
1505}
1506
1507bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB,
1508 SmallPtrSetImpl<PHINode *> &ToRemove) {
1509 if (
1510#ifndef NDEBUG
1511 !PHICSEDebugHash &&
1512#endif
1513 hasNItemsOrLess(C: BB->phis(), N: PHICSENumPHISmallSize))
1514 return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove);
1515 return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove);
1516}
1517
1518bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1519 SmallPtrSet<PHINode *, 8> ToRemove;
1520 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1521 for (PHINode *PN : ToRemove)
1522 PN->eraseFromParent();
1523 return Changed;
1524}
1525
1526Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign,
1527 const DataLayout &DL) {
1528 V = V->stripPointerCasts();
1529
1530 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: V)) {
1531 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1532 // than the current alignment, as the known bits calculation should have
1533 // already taken it into account. However, this is not always the case,
1534 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1535 // doesn't.
1536 Align CurrentAlign = AI->getAlign();
1537 if (PrefAlign <= CurrentAlign)
1538 return CurrentAlign;
1539
1540 // If the preferred alignment is greater than the natural stack alignment
1541 // then don't round up. This avoids dynamic stack realignment.
1542 MaybeAlign StackAlign = DL.getStackAlignment();
1543 if (StackAlign && PrefAlign > *StackAlign)
1544 return CurrentAlign;
1545 AI->setAlignment(PrefAlign);
1546 return PrefAlign;
1547 }
1548
1549 if (auto *GV = dyn_cast<GlobalVariable>(Val: V)) {
1550 // TODO: as above, this shouldn't be necessary.
1551 Align CurrentAlign = GV->getPointerAlignment(DL);
1552 if (PrefAlign <= CurrentAlign)
1553 return CurrentAlign;
1554
1555 // If there is a large requested alignment and we can, bump up the alignment
1556 // of the global. If the memory we set aside for the global may not be the
1557 // memory used by the final program then it is impossible for us to reliably
1558 // enforce the preferred alignment.
1559 if (!GV->canIncreaseAlignment())
1560 return CurrentAlign;
1561
1562 if (GV->isThreadLocal()) {
1563 unsigned MaxTLSAlign = GV->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1564 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1565 PrefAlign = Align(MaxTLSAlign);
1566 }
1567
1568 GV->setAlignment(PrefAlign);
1569 return PrefAlign;
1570 }
1571
1572 return Align(1);
1573}
1574
1575Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1576 const DataLayout &DL,
1577 const Instruction *CxtI,
1578 AssumptionCache *AC,
1579 const DominatorTree *DT) {
1580 assert(V->getType()->isPointerTy() &&
1581 "getOrEnforceKnownAlignment expects a pointer!");
1582
1583 KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT);
1584 unsigned TrailZ = Known.countMinTrailingZeros();
1585
1586 // Avoid trouble with ridiculously large TrailZ values, such as
1587 // those computed from a null pointer.
1588 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1589 TrailZ = std::min(a: TrailZ, b: +Value::MaxAlignmentExponent);
1590
1591 Align Alignment = Align(1ull << std::min(a: Known.getBitWidth() - 1, b: TrailZ));
1592
1593 if (PrefAlign && *PrefAlign > Alignment)
1594 Alignment = std::max(a: Alignment, b: tryEnforceAlignment(V, PrefAlign: *PrefAlign, DL));
1595
1596 // We don't need to make any adjustment.
1597 return Alignment;
1598}
1599
1600///===---------------------------------------------------------------------===//
1601/// Dbg Intrinsic utilities
1602///
1603
1604/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1605static bool PhiHasDebugValue(DILocalVariable *DIVar,
1606 DIExpression *DIExpr,
1607 PHINode *APN) {
1608 // Since we can't guarantee that the original dbg.declare intrinsic
1609 // is removed by LowerDbgDeclare(), we need to make sure that we are
1610 // not inserting the same dbg.value intrinsic over and over.
1611 SmallVector<DbgValueInst *, 1> DbgValues;
1612 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1613 findDbgValues(DbgValues, V: APN, DbgVariableRecords: &DbgVariableRecords);
1614 for (auto *DVI : DbgValues) {
1615 assert(is_contained(DVI->getValues(), APN));
1616 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1617 return true;
1618 }
1619 for (auto *DVR : DbgVariableRecords) {
1620 assert(is_contained(DVR->location_ops(), APN));
1621 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1622 return true;
1623 }
1624 return false;
1625}
1626
1627/// Check if the alloc size of \p ValTy is large enough to cover the variable
1628/// (or fragment of the variable) described by \p DII.
1629///
1630/// This is primarily intended as a helper for the different
1631/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1632/// describes an alloca'd variable, so we need to use the alloc size of the
1633/// value when doing the comparison. E.g. an i1 value will be identified as
1634/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1635static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1636 const DataLayout &DL = DII->getDataLayout();
1637 TypeSize ValueSize = DL.getTypeAllocSizeInBits(Ty: ValTy);
1638 if (std::optional<uint64_t> FragmentSize =
1639 DII->getExpression()->getActiveBits(Var: DII->getVariable()))
1640 return TypeSize::isKnownGE(LHS: ValueSize, RHS: TypeSize::getFixed(ExactSize: *FragmentSize));
1641
1642 // We can't always calculate the size of the DI variable (e.g. if it is a
1643 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1644 // instead.
1645 if (DII->isAddressOfVariable()) {
1646 // DII should have exactly 1 location when it is an address.
1647 assert(DII->getNumVariableLocationOps() == 1 &&
1648 "address of variable must have exactly 1 location operand.");
1649 if (auto *AI =
1650 dyn_cast_or_null<AllocaInst>(Val: DII->getVariableLocationOp(OpIdx: 0))) {
1651 if (std::optional<TypeSize> FragmentSize =
1652 AI->getAllocationSizeInBits(DL)) {
1653 return TypeSize::isKnownGE(LHS: ValueSize, RHS: *FragmentSize);
1654 }
1655 }
1656 }
1657 // Could not determine size of variable. Conservatively return false.
1658 return false;
1659}
1660// RemoveDIs: duplicate implementation of the above, using DbgVariableRecords,
1661// the replacement for dbg.values.
1662static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR) {
1663 const DataLayout &DL = DVR->getModule()->getDataLayout();
1664 TypeSize ValueSize = DL.getTypeAllocSizeInBits(Ty: ValTy);
1665 if (std::optional<uint64_t> FragmentSize =
1666 DVR->getExpression()->getActiveBits(Var: DVR->getVariable()))
1667 return TypeSize::isKnownGE(LHS: ValueSize, RHS: TypeSize::getFixed(ExactSize: *FragmentSize));
1668
1669 // We can't always calculate the size of the DI variable (e.g. if it is a
1670 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1671 // instead.
1672 if (DVR->isAddressOfVariable()) {
1673 // DVR should have exactly 1 location when it is an address.
1674 assert(DVR->getNumVariableLocationOps() == 1 &&
1675 "address of variable must have exactly 1 location operand.");
1676 if (auto *AI =
1677 dyn_cast_or_null<AllocaInst>(Val: DVR->getVariableLocationOp(OpIdx: 0))) {
1678 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1679 return TypeSize::isKnownGE(LHS: ValueSize, RHS: *FragmentSize);
1680 }
1681 }
1682 }
1683 // Could not determine size of variable. Conservatively return false.
1684 return false;
1685}
1686
1687static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV,
1688 DILocalVariable *DIVar,
1689 DIExpression *DIExpr,
1690 const DebugLoc &NewLoc,
1691 BasicBlock::iterator Instr) {
1692 ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV);
1693 DbgVariableRecord *DVRec =
1694 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1695 Instr->getParent()->insertDbgRecordBefore(DR: DVRec, Here: Instr);
1696}
1697
1698static void insertDbgValueOrDbgVariableRecordAfter(
1699 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr,
1700 const DebugLoc &NewLoc, Instruction *Instr) {
1701 BasicBlock::iterator NextIt = std::next(x: Instr->getIterator());
1702 NextIt.setHeadBit(true);
1703 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, Instr: NextIt);
1704}
1705
1706/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1707/// that has an associated llvm.dbg.declare intrinsic.
1708void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1709 StoreInst *SI, DIBuilder &Builder) {
1710 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1711 auto *DIVar = DII->getVariable();
1712 assert(DIVar && "Missing variable");
1713 auto *DIExpr = DII->getExpression();
1714 Value *DV = SI->getValueOperand();
1715
1716 DebugLoc NewLoc = getDebugValueLoc(DII);
1717
1718 // If the alloca describes the variable itself, i.e. the expression in the
1719 // dbg.declare doesn't start with a dereference, we can perform the
1720 // conversion if the value covers the entire fragment of DII.
1721 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1722 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1723 // We conservatively ignore other dereferences, because the following two are
1724 // not equivalent:
1725 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1726 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1727 // The former is adding 2 to the address of the variable, whereas the latter
1728 // is adding 2 to the value of the variable. As such, we insist on just a
1729 // deref expression.
1730 bool CanConvert =
1731 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1732 valueCoversEntireFragment(ValTy: DV->getType(), DII));
1733 if (CanConvert) {
1734 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1735 Instr: SI->getIterator());
1736 return;
1737 }
1738
1739 // FIXME: If storing to a part of the variable described by the dbg.declare,
1740 // then we want to insert a dbg.value for the corresponding fragment.
1741 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1742 << '\n');
1743 // For now, when there is a store to parts of the variable (but we do not
1744 // know which part) we insert an dbg.value intrinsic to indicate that we
1745 // know nothing about the variable's content.
1746 DV = PoisonValue::get(T: DV->getType());
1747 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1748 Instr: SI->getIterator());
1749}
1750
1751static DIExpression *dropInitialDeref(const DIExpression *DIExpr) {
1752 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1;
1753 return DIExpression::get(Context&: DIExpr->getContext(),
1754 Elements: DIExpr->getElements().drop_front(N: NumEltDropped));
1755}
1756
1757void llvm::InsertDebugValueAtStoreLoc(DbgVariableIntrinsic *DII, StoreInst *SI,
1758 DIBuilder &Builder) {
1759 auto *DIVar = DII->getVariable();
1760 assert(DIVar && "Missing variable");
1761 auto *DIExpr = DII->getExpression();
1762 DIExpr = dropInitialDeref(DIExpr);
1763 Value *DV = SI->getValueOperand();
1764
1765 DebugLoc NewLoc = getDebugValueLoc(DII);
1766
1767 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1768 Instr: SI->getIterator());
1769}
1770
1771/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1772/// that has an associated llvm.dbg.declare intrinsic.
1773void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1774 LoadInst *LI, DIBuilder &Builder) {
1775 auto *DIVar = DII->getVariable();
1776 auto *DIExpr = DII->getExpression();
1777 assert(DIVar && "Missing variable");
1778
1779 if (!valueCoversEntireFragment(ValTy: LI->getType(), DII)) {
1780 // FIXME: If only referring to a part of the variable described by the
1781 // dbg.declare, then we want to insert a dbg.value for the corresponding
1782 // fragment.
1783 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1784 << *DII << '\n');
1785 return;
1786 }
1787
1788 DebugLoc NewLoc = getDebugValueLoc(DII);
1789
1790 // We are now tracking the loaded value instead of the address. In the
1791 // future if multi-location support is added to the IR, it might be
1792 // preferable to keep tracking both the loaded value and the original
1793 // address in case the alloca can not be elided.
1794 insertDbgValueOrDbgVariableRecordAfter(Builder, DV: LI, DIVar, DIExpr, NewLoc,
1795 Instr: LI);
1796}
1797
1798void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR,
1799 StoreInst *SI, DIBuilder &Builder) {
1800 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1801 auto *DIVar = DVR->getVariable();
1802 assert(DIVar && "Missing variable");
1803 auto *DIExpr = DVR->getExpression();
1804 Value *DV = SI->getValueOperand();
1805
1806 DebugLoc NewLoc = getDebugValueLoc(DVR);
1807
1808 // If the alloca describes the variable itself, i.e. the expression in the
1809 // dbg.declare doesn't start with a dereference, we can perform the
1810 // conversion if the value covers the entire fragment of DII.
1811 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1812 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1813 // We conservatively ignore other dereferences, because the following two are
1814 // not equivalent:
1815 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1816 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1817 // The former is adding 2 to the address of the variable, whereas the latter
1818 // is adding 2 to the value of the variable. As such, we insist on just a
1819 // deref expression.
1820 bool CanConvert =
1821 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1822 valueCoversEntireFragment(ValTy: DV->getType(), DVR));
1823 if (CanConvert) {
1824 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1825 Instr: SI->getIterator());
1826 return;
1827 }
1828
1829 // FIXME: If storing to a part of the variable described by the dbg.declare,
1830 // then we want to insert a dbg.value for the corresponding fragment.
1831 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1832 << '\n');
1833
1834 // For now, when there is a store to parts of the variable (but we do not
1835 // know which part) we insert an dbg.value intrinsic to indicate that we
1836 // know nothing about the variable's content.
1837 DV = PoisonValue::get(T: DV->getType());
1838 ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV);
1839 DbgVariableRecord *NewDVR =
1840 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1841 SI->getParent()->insertDbgRecordBefore(DR: NewDVR, Here: SI->getIterator());
1842}
1843
1844void llvm::InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI,
1845 DIBuilder &Builder) {
1846 auto *DIVar = DVR->getVariable();
1847 assert(DIVar && "Missing variable");
1848 auto *DIExpr = DVR->getExpression();
1849 DIExpr = dropInitialDeref(DIExpr);
1850 Value *DV = SI->getValueOperand();
1851
1852 DebugLoc NewLoc = getDebugValueLoc(DVR);
1853
1854 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1855 Instr: SI->getIterator());
1856}
1857
1858/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1859/// llvm.dbg.declare intrinsic.
1860void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1861 PHINode *APN, DIBuilder &Builder) {
1862 auto *DIVar = DII->getVariable();
1863 auto *DIExpr = DII->getExpression();
1864 assert(DIVar && "Missing variable");
1865
1866 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1867 return;
1868
1869 if (!valueCoversEntireFragment(ValTy: APN->getType(), DII)) {
1870 // FIXME: If only referring to a part of the variable described by the
1871 // dbg.declare, then we want to insert a dbg.value for the corresponding
1872 // fragment.
1873 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1874 << *DII << '\n');
1875 return;
1876 }
1877
1878 BasicBlock *BB = APN->getParent();
1879 auto InsertionPt = BB->getFirstInsertionPt();
1880
1881 DebugLoc NewLoc = getDebugValueLoc(DII);
1882
1883 // The block may be a catchswitch block, which does not have a valid
1884 // insertion point.
1885 // FIXME: Insert dbg.value markers in the successors when appropriate.
1886 if (InsertionPt != BB->end()) {
1887 insertDbgValueOrDbgVariableRecord(Builder, DV: APN, DIVar, DIExpr, NewLoc,
1888 Instr: InsertionPt);
1889 }
1890}
1891
1892void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, LoadInst *LI,
1893 DIBuilder &Builder) {
1894 auto *DIVar = DVR->getVariable();
1895 auto *DIExpr = DVR->getExpression();
1896 assert(DIVar && "Missing variable");
1897
1898 if (!valueCoversEntireFragment(ValTy: LI->getType(), DVR)) {
1899 // FIXME: If only referring to a part of the variable described by the
1900 // dbg.declare, then we want to insert a DbgVariableRecord for the
1901 // corresponding fragment.
1902 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1903 << *DVR << '\n');
1904 return;
1905 }
1906
1907 DebugLoc NewLoc = getDebugValueLoc(DVR);
1908
1909 // We are now tracking the loaded value instead of the address. In the
1910 // future if multi-location support is added to the IR, it might be
1911 // preferable to keep tracking both the loaded value and the original
1912 // address in case the alloca can not be elided.
1913
1914 // Create a DbgVariableRecord directly and insert.
1915 ValueAsMetadata *LIVAM = ValueAsMetadata::get(V: LI);
1916 DbgVariableRecord *DV =
1917 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1918 LI->getParent()->insertDbgRecordAfter(DR: DV, I: LI);
1919}
1920
1921/// Determine whether this alloca is either a VLA or an array.
1922static bool isArray(AllocaInst *AI) {
1923 return AI->isArrayAllocation() ||
1924 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1925}
1926
1927/// Determine whether this alloca is a structure.
1928static bool isStructure(AllocaInst *AI) {
1929 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1930}
1931void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, PHINode *APN,
1932 DIBuilder &Builder) {
1933 auto *DIVar = DVR->getVariable();
1934 auto *DIExpr = DVR->getExpression();
1935 assert(DIVar && "Missing variable");
1936
1937 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1938 return;
1939
1940 if (!valueCoversEntireFragment(ValTy: APN->getType(), DVR)) {
1941 // FIXME: If only referring to a part of the variable described by the
1942 // dbg.declare, then we want to insert a DbgVariableRecord for the
1943 // corresponding fragment.
1944 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1945 << *DVR << '\n');
1946 return;
1947 }
1948
1949 BasicBlock *BB = APN->getParent();
1950 auto InsertionPt = BB->getFirstInsertionPt();
1951
1952 DebugLoc NewLoc = getDebugValueLoc(DVR);
1953
1954 // The block may be a catchswitch block, which does not have a valid
1955 // insertion point.
1956 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1957 if (InsertionPt != BB->end()) {
1958 insertDbgValueOrDbgVariableRecord(Builder, DV: APN, DIVar, DIExpr, NewLoc,
1959 Instr: InsertionPt);
1960 }
1961}
1962
1963/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1964/// of llvm.dbg.value intrinsics.
1965bool llvm::LowerDbgDeclare(Function &F) {
1966 bool Changed = false;
1967 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1968 SmallVector<DbgDeclareInst *, 4> Dbgs;
1969 SmallVector<DbgVariableRecord *> DVRs;
1970 for (auto &FI : F) {
1971 for (Instruction &BI : FI) {
1972 if (auto *DDI = dyn_cast<DbgDeclareInst>(Val: &BI))
1973 Dbgs.push_back(Elt: DDI);
1974 for (DbgVariableRecord &DVR : filterDbgVars(R: BI.getDbgRecordRange())) {
1975 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1976 DVRs.push_back(Elt: &DVR);
1977 }
1978 }
1979 }
1980
1981 if (Dbgs.empty() && DVRs.empty())
1982 return Changed;
1983
1984 auto LowerOne = [&](auto *DDI) {
1985 AllocaInst *AI =
1986 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1987 // If this is an alloca for a scalar variable, insert a dbg.value
1988 // at each load and store to the alloca and erase the dbg.declare.
1989 // The dbg.values allow tracking a variable even if it is not
1990 // stored on the stack, while the dbg.declare can only describe
1991 // the stack slot (and at a lexical-scope granularity). Later
1992 // passes will attempt to elide the stack slot.
1993 if (!AI || isArray(AI) || isStructure(AI))
1994 return;
1995
1996 // A volatile load/store means that the alloca can't be elided anyway.
1997 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1998 if (LoadInst *LI = dyn_cast<LoadInst>(Val: U))
1999 return LI->isVolatile();
2000 if (StoreInst *SI = dyn_cast<StoreInst>(Val: U))
2001 return SI->isVolatile();
2002 return false;
2003 }))
2004 return;
2005
2006 SmallVector<const Value *, 8> WorkList;
2007 WorkList.push_back(Elt: AI);
2008 while (!WorkList.empty()) {
2009 const Value *V = WorkList.pop_back_val();
2010 for (const auto &AIUse : V->uses()) {
2011 User *U = AIUse.getUser();
2012 if (StoreInst *SI = dyn_cast<StoreInst>(Val: U)) {
2013 if (AIUse.getOperandNo() == 1)
2014 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
2015 } else if (LoadInst *LI = dyn_cast<LoadInst>(Val: U)) {
2016 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
2017 } else if (CallInst *CI = dyn_cast<CallInst>(Val: U)) {
2018 // This is a call by-value or some other instruction that takes a
2019 // pointer to the variable. Insert a *value* intrinsic that describes
2020 // the variable by dereferencing the alloca.
2021 if (!CI->isLifetimeStartOrEnd()) {
2022 DebugLoc NewLoc = getDebugValueLoc(DDI);
2023 auto *DerefExpr =
2024 DIExpression::append(Expr: DDI->getExpression(), Ops: dwarf::DW_OP_deref);
2025 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
2026 DerefExpr, NewLoc,
2027 CI->getIterator());
2028 }
2029 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(Val: U)) {
2030 if (BI->getType()->isPointerTy())
2031 WorkList.push_back(Elt: BI);
2032 }
2033 }
2034 }
2035 DDI->eraseFromParent();
2036 Changed = true;
2037 };
2038
2039 for_each(Range&: Dbgs, F: LowerOne);
2040 for_each(Range&: DVRs, F: LowerOne);
2041
2042 if (Changed)
2043 for (BasicBlock &BB : F)
2044 RemoveRedundantDbgInstrs(BB: &BB);
2045
2046 return Changed;
2047}
2048
2049// RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the
2050// debug-info out of the block's DbgVariableRecords rather than dbg.value
2051// intrinsics.
2052static void
2053insertDbgVariableRecordsForPHIs(BasicBlock *BB,
2054 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2055 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
2056 if (InsertedPHIs.size() == 0)
2057 return;
2058
2059 // Map existing PHI nodes to their DbgVariableRecords.
2060 DenseMap<Value *, DbgVariableRecord *> DbgValueMap;
2061 for (auto &I : *BB) {
2062 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) {
2063 for (Value *V : DVR.location_ops())
2064 if (auto *Loc = dyn_cast_or_null<PHINode>(Val: V))
2065 DbgValueMap.insert(KV: {Loc, &DVR});
2066 }
2067 }
2068 if (DbgValueMap.size() == 0)
2069 return;
2070
2071 // Map a pair of the destination BB and old DbgVariableRecord to the new
2072 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
2073 // more than one of the inserted PHIs in the same destination BB, we can
2074 // update the same DbgVariableRecord with all the new PHIs instead of creating
2075 // one copy for each.
2076 MapVector<std::pair<BasicBlock *, DbgVariableRecord *>, DbgVariableRecord *>
2077 NewDbgValueMap;
2078 // Then iterate through the new PHIs and look to see if they use one of the
2079 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
2080 // propagate the info through the new PHI. If we use more than one new PHI in
2081 // a single destination BB with the same old dbg.value, merge the updates so
2082 // that we get a single new DbgVariableRecord with all the new PHIs.
2083 for (auto PHI : InsertedPHIs) {
2084 BasicBlock *Parent = PHI->getParent();
2085 // Avoid inserting a debug-info record into an EH block.
2086 if (Parent->getFirstNonPHIIt()->isEHPad())
2087 continue;
2088 for (auto VI : PHI->operand_values()) {
2089 auto V = DbgValueMap.find(Val: VI);
2090 if (V != DbgValueMap.end()) {
2091 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(Val: V->second);
2092 auto NewDI = NewDbgValueMap.find(Key: {Parent, DbgII});
2093 if (NewDI == NewDbgValueMap.end()) {
2094 DbgVariableRecord *NewDbgII = DbgII->clone();
2095 NewDI = NewDbgValueMap.insert(KV: {{Parent, DbgII}, NewDbgII}).first;
2096 }
2097 DbgVariableRecord *NewDbgII = NewDI->second;
2098 // If PHI contains VI as an operand more than once, we may
2099 // replaced it in NewDbgII; confirm that it is present.
2100 if (is_contained(Range: NewDbgII->location_ops(), Element: VI))
2101 NewDbgII->replaceVariableLocationOp(OldValue: VI, NewValue: PHI);
2102 }
2103 }
2104 }
2105 // Insert the new DbgVariableRecords into their destination blocks.
2106 for (auto DI : NewDbgValueMap) {
2107 BasicBlock *Parent = DI.first.first;
2108 DbgVariableRecord *NewDbgII = DI.second;
2109 auto InsertionPt = Parent->getFirstInsertionPt();
2110 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2111
2112 Parent->insertDbgRecordBefore(DR: NewDbgII, Here: InsertionPt);
2113 }
2114}
2115
2116/// Propagate dbg.value intrinsics through the newly inserted PHIs.
2117void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
2118 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2119 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
2120 if (InsertedPHIs.size() == 0)
2121 return;
2122
2123 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs);
2124
2125 // Map existing PHI nodes to their dbg.values.
2126 ValueToValueMapTy DbgValueMap;
2127 for (auto &I : *BB) {
2128 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(Val: &I)) {
2129 for (Value *V : DbgII->location_ops())
2130 if (auto *Loc = dyn_cast_or_null<PHINode>(Val: V))
2131 DbgValueMap.insert(KV: {Loc, DbgII});
2132 }
2133 }
2134 if (DbgValueMap.size() == 0)
2135 return;
2136
2137 // Map a pair of the destination BB and old dbg.value to the new dbg.value,
2138 // so that if a dbg.value is being rewritten to use more than one of the
2139 // inserted PHIs in the same destination BB, we can update the same dbg.value
2140 // with all the new PHIs instead of creating one copy for each.
2141 MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
2142 DbgVariableIntrinsic *>
2143 NewDbgValueMap;
2144 // Then iterate through the new PHIs and look to see if they use one of the
2145 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
2146 // propagate the info through the new PHI. If we use more than one new PHI in
2147 // a single destination BB with the same old dbg.value, merge the updates so
2148 // that we get a single new dbg.value with all the new PHIs.
2149 for (auto *PHI : InsertedPHIs) {
2150 BasicBlock *Parent = PHI->getParent();
2151 // Avoid inserting an intrinsic into an EH block.
2152 if (Parent->getFirstNonPHIIt()->isEHPad())
2153 continue;
2154 for (auto *VI : PHI->operand_values()) {
2155 auto V = DbgValueMap.find(Val: VI);
2156 if (V != DbgValueMap.end()) {
2157 auto *DbgII = cast<DbgVariableIntrinsic>(Val&: V->second);
2158 auto [NewDI, Inserted] = NewDbgValueMap.try_emplace(Key: {Parent, DbgII});
2159 if (Inserted)
2160 NewDI->second = cast<DbgVariableIntrinsic>(Val: DbgII->clone());
2161 DbgVariableIntrinsic *NewDbgII = NewDI->second;
2162 // If PHI contains VI as an operand more than once, we may
2163 // replaced it in NewDbgII; confirm that it is present.
2164 if (is_contained(Range: NewDbgII->location_ops(), Element: VI))
2165 NewDbgII->replaceVariableLocationOp(OldValue: VI, NewValue: PHI);
2166 }
2167 }
2168 }
2169 // Insert thew new dbg.values into their destination blocks.
2170 for (auto DI : NewDbgValueMap) {
2171 BasicBlock *Parent = DI.first.first;
2172 auto *NewDbgII = DI.second;
2173 auto InsertionPt = Parent->getFirstInsertionPt();
2174 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2175 NewDbgII->insertBefore(InsertPos: InsertionPt);
2176 }
2177}
2178
2179bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
2180 DIBuilder &Builder, uint8_t DIExprFlags,
2181 int Offset) {
2182 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(V: Address);
2183 TinyPtrVector<DbgVariableRecord *> DVRDeclares = findDVRDeclares(V: Address);
2184
2185 auto ReplaceOne = [&](auto *DII) {
2186 assert(DII->getVariable() && "Missing variable");
2187 auto *DIExpr = DII->getExpression();
2188 DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: DIExprFlags, Offset);
2189 DII->setExpression(DIExpr);
2190 DII->replaceVariableLocationOp(Address, NewAddress);
2191 };
2192
2193 for_each(Range&: DbgDeclares, F: ReplaceOne);
2194 for_each(Range&: DVRDeclares, F: ReplaceOne);
2195
2196 return !DbgDeclares.empty() || !DVRDeclares.empty();
2197}
2198
2199static void updateOneDbgValueForAlloca(const DebugLoc &Loc,
2200 DILocalVariable *DIVar,
2201 DIExpression *DIExpr, Value *NewAddress,
2202 DbgValueInst *DVI,
2203 DbgVariableRecord *DVR,
2204 DIBuilder &Builder, int Offset) {
2205 assert(DIVar && "Missing variable");
2206
2207 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
2208 // should do with the alloca pointer is dereference it. Otherwise we don't
2209 // know how to handle it and give up.
2210 if (!DIExpr || DIExpr->getNumElements() < 1 ||
2211 DIExpr->getElement(I: 0) != dwarf::DW_OP_deref)
2212 return;
2213
2214 // Insert the offset before the first deref.
2215 if (Offset)
2216 DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: 0, Offset);
2217
2218 if (DVI) {
2219 DVI->setExpression(DIExpr);
2220 DVI->replaceVariableLocationOp(OpIdx: 0u, NewValue: NewAddress);
2221 } else {
2222 assert(DVR);
2223 DVR->setExpression(DIExpr);
2224 DVR->replaceVariableLocationOp(OpIdx: 0u, NewValue: NewAddress);
2225 }
2226}
2227
2228void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
2229 DIBuilder &Builder, int Offset) {
2230 SmallVector<DbgValueInst *, 1> DbgUsers;
2231 SmallVector<DbgVariableRecord *, 1> DPUsers;
2232 findDbgValues(DbgValues&: DbgUsers, V: AI, DbgVariableRecords: &DPUsers);
2233
2234 // Attempt to replace dbg.values that use this alloca.
2235 for (auto *DVI : DbgUsers)
2236 updateOneDbgValueForAlloca(Loc: DVI->getDebugLoc(), DIVar: DVI->getVariable(),
2237 DIExpr: DVI->getExpression(), NewAddress: NewAllocaAddress, DVI,
2238 DVR: nullptr, Builder, Offset);
2239
2240 // Replace any DbgVariableRecords that use this alloca.
2241 for (DbgVariableRecord *DVR : DPUsers)
2242 updateOneDbgValueForAlloca(Loc: DVR->getDebugLoc(), DIVar: DVR->getVariable(),
2243 DIExpr: DVR->getExpression(), NewAddress: NewAllocaAddress, DVI: nullptr,
2244 DVR, Builder, Offset);
2245}
2246
2247/// Where possible to salvage debug information for \p I do so.
2248/// If not possible mark undef.
2249void llvm::salvageDebugInfo(Instruction &I) {
2250 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2251 SmallVector<DbgVariableRecord *, 1> DPUsers;
2252 findDbgUsers(DbgInsts&: DbgUsers, V: &I, DbgVariableRecords: &DPUsers);
2253 salvageDebugInfoForDbgValues(I, Insns: DbgUsers, DPInsns: DPUsers);
2254}
2255
2256template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2257 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2258 // Only instructions can be salvaged at the moment.
2259 if (!I)
2260 return;
2261
2262 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2263 "address-expression shouldn't have fragment info");
2264
2265 // The address component of a dbg.assign cannot be variadic.
2266 uint64_t CurrentLocOps = 0;
2267 SmallVector<Value *, 4> AdditionalValues;
2268 SmallVector<uint64_t, 16> Ops;
2269 Value *NewV = salvageDebugInfoImpl(I&: *I, CurrentLocOps, Ops, AdditionalValues);
2270
2271 // Check if the salvage failed.
2272 if (!NewV)
2273 return;
2274
2275 DIExpression *SalvagedExpr = DIExpression::appendOpsToArg(
2276 Expr: Assign->getAddressExpression(), Ops, ArgNo: 0, /*StackValue=*/false);
2277 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2278 "address-expression shouldn't have fragment info");
2279
2280 SalvagedExpr = SalvagedExpr->foldConstantMath();
2281
2282 // Salvage succeeds if no additional values are required.
2283 if (AdditionalValues.empty()) {
2284 Assign->setAddress(NewV);
2285 Assign->setAddressExpression(SalvagedExpr);
2286 } else {
2287 Assign->setKillAddress();
2288 }
2289}
2290
2291void llvm::salvageDebugInfoForDbgValues(
2292 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers,
2293 ArrayRef<DbgVariableRecord *> DPUsers) {
2294 // These are arbitrary chosen limits on the maximum number of values and the
2295 // maximum size of a debug expression we can salvage up to, used for
2296 // performance reasons.
2297 const unsigned MaxDebugArgs = 16;
2298 const unsigned MaxExpressionSize = 128;
2299 bool Salvaged = false;
2300
2301 for (auto *DII : DbgUsers) {
2302 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: DII)) {
2303 if (DAI->getAddress() == &I) {
2304 salvageDbgAssignAddress(Assign: DAI);
2305 Salvaged = true;
2306 }
2307 if (DAI->getValue() != &I)
2308 continue;
2309 }
2310
2311 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
2312 // pointing out the value as a DWARF memory location description.
2313 bool StackValue = isa<DbgValueInst>(Val: DII);
2314 auto DIILocation = DII->location_ops();
2315 assert(
2316 is_contained(DIILocation, &I) &&
2317 "DbgVariableIntrinsic must use salvaged instruction as its location");
2318 SmallVector<Value *, 4> AdditionalValues;
2319 // `I` may appear more than once in DII's location ops, and each use of `I`
2320 // must be updated in the DIExpression and potentially have additional
2321 // values added; thus we call salvageDebugInfoImpl for each `I` instance in
2322 // DIILocation.
2323 Value *Op0 = nullptr;
2324 DIExpression *SalvagedExpr = DII->getExpression();
2325 auto LocItr = find(Range&: DIILocation, Val: &I);
2326 while (SalvagedExpr && LocItr != DIILocation.end()) {
2327 SmallVector<uint64_t, 16> Ops;
2328 unsigned LocNo = std::distance(first: DIILocation.begin(), last: LocItr);
2329 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2330 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2331 if (!Op0)
2332 break;
2333 SalvagedExpr =
2334 DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: LocNo, StackValue);
2335 LocItr = std::find(first: ++LocItr, last: DIILocation.end(), val: &I);
2336 }
2337 // salvageDebugInfoImpl should fail on examining the first element of
2338 // DbgUsers, or none of them.
2339 if (!Op0)
2340 break;
2341
2342 SalvagedExpr = SalvagedExpr->foldConstantMath();
2343 DII->replaceVariableLocationOp(OldValue: &I, NewValue: Op0);
2344 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
2345 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2346 DII->setExpression(SalvagedExpr);
2347 } else if (isa<DbgValueInst>(Val: DII) && IsValidSalvageExpr &&
2348 DII->getNumVariableLocationOps() + AdditionalValues.size() <=
2349 MaxDebugArgs) {
2350 DII->addVariableLocationOps(NewValues: AdditionalValues, NewExpr: SalvagedExpr);
2351 } else {
2352 // Do not salvage using DIArgList for dbg.declare, as it is not currently
2353 // supported in those instructions. Also do not salvage if the resulting
2354 // DIArgList would contain an unreasonably large number of values.
2355 DII->setKillLocation();
2356 }
2357 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
2358 Salvaged = true;
2359 }
2360 // Duplicate of above block for DbgVariableRecords.
2361 for (auto *DVR : DPUsers) {
2362 if (DVR->isDbgAssign()) {
2363 if (DVR->getAddress() == &I) {
2364 salvageDbgAssignAddress(Assign: DVR);
2365 Salvaged = true;
2366 }
2367 if (DVR->getValue() != &I)
2368 continue;
2369 }
2370
2371 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2372 // are implicitly pointing out the value as a DWARF memory location
2373 // description.
2374 bool StackValue =
2375 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2376 auto DVRLocation = DVR->location_ops();
2377 assert(
2378 is_contained(DVRLocation, &I) &&
2379 "DbgVariableIntrinsic must use salvaged instruction as its location");
2380 SmallVector<Value *, 4> AdditionalValues;
2381 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2382 // must be updated in the DIExpression and potentially have additional
2383 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2384 // DVRLocation.
2385 Value *Op0 = nullptr;
2386 DIExpression *SalvagedExpr = DVR->getExpression();
2387 auto LocItr = find(Range&: DVRLocation, Val: &I);
2388 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2389 SmallVector<uint64_t, 16> Ops;
2390 unsigned LocNo = std::distance(first: DVRLocation.begin(), last: LocItr);
2391 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2392 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2393 if (!Op0)
2394 break;
2395 SalvagedExpr =
2396 DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: LocNo, StackValue);
2397 LocItr = std::find(first: ++LocItr, last: DVRLocation.end(), val: &I);
2398 }
2399 // salvageDebugInfoImpl should fail on examining the first element of
2400 // DbgUsers, or none of them.
2401 if (!Op0)
2402 break;
2403
2404 SalvagedExpr = SalvagedExpr->foldConstantMath();
2405 DVR->replaceVariableLocationOp(OldValue: &I, NewValue: Op0);
2406 bool IsValidSalvageExpr =
2407 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2408 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2409 DVR->setExpression(SalvagedExpr);
2410 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2411 IsValidSalvageExpr &&
2412 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2413 MaxDebugArgs) {
2414 DVR->addVariableLocationOps(NewValues: AdditionalValues, NewExpr: SalvagedExpr);
2415 } else {
2416 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2417 // currently only valid for stack value expressions.
2418 // Also do not salvage if the resulting DIArgList would contain an
2419 // unreasonably large number of values.
2420 DVR->setKillLocation();
2421 }
2422 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2423 Salvaged = true;
2424 }
2425
2426 if (Salvaged)
2427 return;
2428
2429 for (auto *DII : DbgUsers)
2430 DII->setKillLocation();
2431
2432 for (auto *DVR : DPUsers)
2433 DVR->setKillLocation();
2434}
2435
2436Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
2437 uint64_t CurrentLocOps,
2438 SmallVectorImpl<uint64_t> &Opcodes,
2439 SmallVectorImpl<Value *> &AdditionalValues) {
2440 unsigned BitWidth = DL.getIndexSizeInBits(AS: GEP->getPointerAddressSpace());
2441 // Rewrite a GEP into a DIExpression.
2442 SmallMapVector<Value *, APInt, 4> VariableOffsets;
2443 APInt ConstantOffset(BitWidth, 0);
2444 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2445 return nullptr;
2446 if (!VariableOffsets.empty() && !CurrentLocOps) {
2447 Opcodes.insert(I: Opcodes.begin(), IL: {dwarf::DW_OP_LLVM_arg, 0});
2448 CurrentLocOps = 1;
2449 }
2450 for (const auto &Offset : VariableOffsets) {
2451 AdditionalValues.push_back(Elt: Offset.first);
2452 assert(Offset.second.isStrictlyPositive() &&
2453 "Expected strictly positive multiplier for offset.");
2454 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2455 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2456 dwarf::DW_OP_plus});
2457 }
2458 DIExpression::appendOffset(Ops&: Opcodes, Offset: ConstantOffset.getSExtValue());
2459 return GEP->getOperand(i_nocapture: 0);
2460}
2461
2462uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
2463 switch (Opcode) {
2464 case Instruction::Add:
2465 return dwarf::DW_OP_plus;
2466 case Instruction::Sub:
2467 return dwarf::DW_OP_minus;
2468 case Instruction::Mul:
2469 return dwarf::DW_OP_mul;
2470 case Instruction::SDiv:
2471 return dwarf::DW_OP_div;
2472 case Instruction::SRem:
2473 return dwarf::DW_OP_mod;
2474 case Instruction::Or:
2475 return dwarf::DW_OP_or;
2476 case Instruction::And:
2477 return dwarf::DW_OP_and;
2478 case Instruction::Xor:
2479 return dwarf::DW_OP_xor;
2480 case Instruction::Shl:
2481 return dwarf::DW_OP_shl;
2482 case Instruction::LShr:
2483 return dwarf::DW_OP_shr;
2484 case Instruction::AShr:
2485 return dwarf::DW_OP_shra;
2486 default:
2487 // TODO: Salvage from each kind of binop we know about.
2488 return 0;
2489 }
2490}
2491
2492static void handleSSAValueOperands(uint64_t CurrentLocOps,
2493 SmallVectorImpl<uint64_t> &Opcodes,
2494 SmallVectorImpl<Value *> &AdditionalValues,
2495 Instruction *I) {
2496 if (!CurrentLocOps) {
2497 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, 0});
2498 CurrentLocOps = 1;
2499 }
2500 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2501 AdditionalValues.push_back(Elt: I->getOperand(i: 1));
2502}
2503
2504Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
2505 SmallVectorImpl<uint64_t> &Opcodes,
2506 SmallVectorImpl<Value *> &AdditionalValues) {
2507 // Handle binary operations with constant integer operands as a special case.
2508 auto *ConstInt = dyn_cast<ConstantInt>(Val: BI->getOperand(i_nocapture: 1));
2509 // Values wider than 64 bits cannot be represented within a DIExpression.
2510 if (ConstInt && ConstInt->getBitWidth() > 64)
2511 return nullptr;
2512
2513 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2514 // Push any Constant Int operand onto the expression stack.
2515 if (ConstInt) {
2516 uint64_t Val = ConstInt->getSExtValue();
2517 // Add or Sub Instructions with a constant operand can potentially be
2518 // simplified.
2519 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2520 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2521 DIExpression::appendOffset(Ops&: Opcodes, Offset);
2522 return BI->getOperand(i_nocapture: 0);
2523 }
2524 Opcodes.append(IL: {dwarf::DW_OP_constu, Val});
2525 } else {
2526 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: BI);
2527 }
2528
2529 // Add salvaged binary operator to expression stack, if it has a valid
2530 // representation in a DIExpression.
2531 uint64_t DwarfBinOp = getDwarfOpForBinOp(Opcode: BinOpcode);
2532 if (!DwarfBinOp)
2533 return nullptr;
2534 Opcodes.push_back(Elt: DwarfBinOp);
2535 return BI->getOperand(i_nocapture: 0);
2536}
2537
2538uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) {
2539 // The signedness of the operation is implicit in the typed stack, signed and
2540 // unsigned instructions map to the same DWARF opcode.
2541 switch (Pred) {
2542 case CmpInst::ICMP_EQ:
2543 return dwarf::DW_OP_eq;
2544 case CmpInst::ICMP_NE:
2545 return dwarf::DW_OP_ne;
2546 case CmpInst::ICMP_UGT:
2547 case CmpInst::ICMP_SGT:
2548 return dwarf::DW_OP_gt;
2549 case CmpInst::ICMP_UGE:
2550 case CmpInst::ICMP_SGE:
2551 return dwarf::DW_OP_ge;
2552 case CmpInst::ICMP_ULT:
2553 case CmpInst::ICMP_SLT:
2554 return dwarf::DW_OP_lt;
2555 case CmpInst::ICMP_ULE:
2556 case CmpInst::ICMP_SLE:
2557 return dwarf::DW_OP_le;
2558 default:
2559 return 0;
2560 }
2561}
2562
2563Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps,
2564 SmallVectorImpl<uint64_t> &Opcodes,
2565 SmallVectorImpl<Value *> &AdditionalValues) {
2566 // Handle icmp operations with constant integer operands as a special case.
2567 auto *ConstInt = dyn_cast<ConstantInt>(Val: Icmp->getOperand(i_nocapture: 1));
2568 // Values wider than 64 bits cannot be represented within a DIExpression.
2569 if (ConstInt && ConstInt->getBitWidth() > 64)
2570 return nullptr;
2571 // Push any Constant Int operand onto the expression stack.
2572 if (ConstInt) {
2573 if (Icmp->isSigned())
2574 Opcodes.push_back(Elt: dwarf::DW_OP_consts);
2575 else
2576 Opcodes.push_back(Elt: dwarf::DW_OP_constu);
2577 uint64_t Val = ConstInt->getSExtValue();
2578 Opcodes.push_back(Elt: Val);
2579 } else {
2580 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: Icmp);
2581 }
2582
2583 // Add salvaged binary operator to expression stack, if it has a valid
2584 // representation in a DIExpression.
2585 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Pred: Icmp->getPredicate());
2586 if (!DwarfIcmpOp)
2587 return nullptr;
2588 Opcodes.push_back(Elt: DwarfIcmpOp);
2589 return Icmp->getOperand(i_nocapture: 0);
2590}
2591
2592Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
2593 SmallVectorImpl<uint64_t> &Ops,
2594 SmallVectorImpl<Value *> &AdditionalValues) {
2595 auto &M = *I.getModule();
2596 auto &DL = M.getDataLayout();
2597
2598 if (auto *CI = dyn_cast<CastInst>(Val: &I)) {
2599 Value *FromValue = CI->getOperand(i_nocapture: 0);
2600 // No-op casts are irrelevant for debug info.
2601 if (CI->isNoopCast(DL)) {
2602 return FromValue;
2603 }
2604
2605 Type *Type = CI->getType();
2606 if (Type->isPointerTy())
2607 Type = DL.getIntPtrType(Type);
2608 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2609 if (Type->isVectorTy() ||
2610 !(isa<TruncInst>(Val: &I) || isa<SExtInst>(Val: &I) || isa<ZExtInst>(Val: &I) ||
2611 isa<IntToPtrInst>(Val: &I) || isa<PtrToIntInst>(Val: &I)))
2612 return nullptr;
2613
2614 llvm::Type *FromType = FromValue->getType();
2615 if (FromType->isPointerTy())
2616 FromType = DL.getIntPtrType(FromType);
2617
2618 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2619 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2620
2621 auto ExtOps = DIExpression::getExtOps(FromSize: FromTypeBitSize, ToSize: ToTypeBitSize,
2622 Signed: isa<SExtInst>(Val: &I));
2623 Ops.append(in_start: ExtOps.begin(), in_end: ExtOps.end());
2624 return FromValue;
2625 }
2626
2627 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: &I))
2628 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2629 if (auto *BI = dyn_cast<BinaryOperator>(Val: &I))
2630 return getSalvageOpsForBinOp(BI, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2631 if (auto *IC = dyn_cast<ICmpInst>(Val: &I))
2632 return getSalvageOpsForIcmpOp(Icmp: IC, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2633
2634 // *Not* to do: we should not attempt to salvage load instructions,
2635 // because the validity and lifetime of a dbg.value containing
2636 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2637 return nullptr;
2638}
2639
2640/// A replacement for a dbg.value expression.
2641using DbgValReplacement = std::optional<DIExpression *>;
2642
2643/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2644/// possibly moving/undefing users to prevent use-before-def. Returns true if
2645/// changes are made.
2646static bool rewriteDebugUsers(
2647 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2648 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr,
2649 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2650 // Find debug users of From.
2651 SmallVector<DbgVariableIntrinsic *, 1> Users;
2652 SmallVector<DbgVariableRecord *, 1> DPUsers;
2653 findDbgUsers(DbgInsts&: Users, V: &From, DbgVariableRecords: &DPUsers);
2654 if (Users.empty() && DPUsers.empty())
2655 return false;
2656
2657 // Prevent use-before-def of To.
2658 bool Changed = false;
2659
2660 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
2661 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2662 if (isa<Instruction>(Val: &To)) {
2663 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2664
2665 for (auto *DII : Users) {
2666 // It's common to see a debug user between From and DomPoint. Move it
2667 // after DomPoint to preserve the variable update without any reordering.
2668 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2669 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
2670 DII->moveAfter(MovePos: &DomPoint);
2671 Changed = true;
2672
2673 // Users which otherwise aren't dominated by the replacement value must
2674 // be salvaged or deleted.
2675 } else if (!DT.dominates(Def: &DomPoint, User: DII)) {
2676 UndefOrSalvage.insert(Ptr: DII);
2677 }
2678 }
2679
2680 // DbgVariableRecord implementation of the above.
2681 for (auto *DVR : DPUsers) {
2682 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2683 Instruction *NextNonDebug = MarkedInstr;
2684 // The next instruction might still be a dbg.declare, skip over it.
2685 if (isa<DbgVariableIntrinsic>(Val: NextNonDebug))
2686 NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
2687
2688 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2689 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2690 DVR->removeFromParent();
2691 // Ensure there's a marker.
2692 DomPoint.getParent()->insertDbgRecordAfter(DR: DVR, I: &DomPoint);
2693 Changed = true;
2694 } else if (!DT.dominates(Def: &DomPoint, User: MarkedInstr)) {
2695 UndefOrSalvageDVR.insert(Ptr: DVR);
2696 }
2697 }
2698 }
2699
2700 // Update debug users without use-before-def risk.
2701 for (auto *DII : Users) {
2702 if (UndefOrSalvage.count(Ptr: DII))
2703 continue;
2704
2705 DbgValReplacement DVRepl = RewriteExpr(*DII);
2706 if (!DVRepl)
2707 continue;
2708
2709 DII->replaceVariableLocationOp(OldValue: &From, NewValue: &To);
2710 DII->setExpression(*DVRepl);
2711 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
2712 Changed = true;
2713 }
2714 for (auto *DVR : DPUsers) {
2715 if (UndefOrSalvageDVR.count(Ptr: DVR))
2716 continue;
2717
2718 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2719 if (!DVRepl)
2720 continue;
2721
2722 DVR->replaceVariableLocationOp(OldValue: &From, NewValue: &To);
2723 DVR->setExpression(*DVRepl);
2724 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2725 Changed = true;
2726 }
2727
2728 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) {
2729 // Try to salvage the remaining debug users.
2730 salvageDebugInfo(I&: From);
2731 Changed = true;
2732 }
2733
2734 return Changed;
2735}
2736
2737/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2738/// losslessly preserve the bits and semantics of the value. This predicate is
2739/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2740///
2741/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2742/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2743/// and also does not allow lossless pointer <-> integer conversions.
2744static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2745 Type *ToTy) {
2746 // Trivially compatible types.
2747 if (FromTy == ToTy)
2748 return true;
2749
2750 // Handle compatible pointer <-> integer conversions.
2751 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2752 bool SameSize = DL.getTypeSizeInBits(Ty: FromTy) == DL.getTypeSizeInBits(Ty: ToTy);
2753 bool LosslessConversion = !DL.isNonIntegralPointerType(Ty: FromTy) &&
2754 !DL.isNonIntegralPointerType(Ty: ToTy);
2755 return SameSize && LosslessConversion;
2756 }
2757
2758 // TODO: This is not exhaustive.
2759 return false;
2760}
2761
2762bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2763 Instruction &DomPoint, DominatorTree &DT) {
2764 // Exit early if From has no debug users.
2765 if (!From.isUsedByMetadata())
2766 return false;
2767
2768 assert(&From != &To && "Can't replace something with itself");
2769
2770 Type *FromTy = From.getType();
2771 Type *ToTy = To.getType();
2772
2773 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2774 return DII.getExpression();
2775 };
2776 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2777 return DVR.getExpression();
2778 };
2779
2780 // Handle no-op conversions.
2781 Module &M = *From.getModule();
2782 const DataLayout &DL = M.getDataLayout();
2783 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2784 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: Identity, RewriteDVRExpr: IdentityDVR);
2785
2786 // Handle integer-to-integer widening and narrowing.
2787 // FIXME: Use DW_OP_convert when it's available everywhere.
2788 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2789 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2790 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2791 assert(FromBits != ToBits && "Unexpected no-op conversion");
2792
2793 // When the width of the result grows, assume that a debugger will only
2794 // access the low `FromBits` bits when inspecting the source variable.
2795 if (FromBits < ToBits)
2796 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: Identity, RewriteDVRExpr: IdentityDVR);
2797
2798 // The width of the result has shrunk. Use sign/zero extension to describe
2799 // the source variable's high bits.
2800 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2801 DILocalVariable *Var = DII.getVariable();
2802
2803 // Without knowing signedness, sign/zero extension isn't possible.
2804 auto Signedness = Var->getSignedness();
2805 if (!Signedness)
2806 return std::nullopt;
2807
2808 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2809 return DIExpression::appendExt(Expr: DII.getExpression(), FromSize: ToBits, ToSize: FromBits,
2810 Signed);
2811 };
2812 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather
2813 // than on dbg.value intrinsics.
2814 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2815 DILocalVariable *Var = DVR.getVariable();
2816
2817 // Without knowing signedness, sign/zero extension isn't possible.
2818 auto Signedness = Var->getSignedness();
2819 if (!Signedness)
2820 return std::nullopt;
2821
2822 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2823 return DIExpression::appendExt(Expr: DVR.getExpression(), FromSize: ToBits, ToSize: FromBits,
2824 Signed);
2825 };
2826 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: SignOrZeroExt,
2827 RewriteDVRExpr: SignOrZeroExtDVR);
2828 }
2829
2830 // TODO: Floating-point conversions, vectors.
2831 return false;
2832}
2833
2834bool llvm::handleUnreachableTerminator(
2835 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2836 bool Changed = false;
2837 // RemoveDIs: erase debug-info on this instruction manually.
2838 I->dropDbgRecords();
2839 for (Use &U : I->operands()) {
2840 Value *Op = U.get();
2841 if (isa<Instruction>(Val: Op) && !Op->getType()->isTokenTy()) {
2842 U.set(PoisonValue::get(T: Op->getType()));
2843 PoisonedValues.push_back(Elt: Op);
2844 Changed = true;
2845 }
2846 }
2847
2848 return Changed;
2849}
2850
2851unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2852 unsigned NumDeadInst = 0;
2853 // Delete the instructions backwards, as it has a reduced likelihood of
2854 // having to update as many def-use and use-def chains.
2855 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2856 SmallVector<Value *> Uses;
2857 handleUnreachableTerminator(I: EndInst, PoisonedValues&: Uses);
2858
2859 while (EndInst != &BB->front()) {
2860 // Delete the next to last instruction.
2861 Instruction *Inst = &*--EndInst->getIterator();
2862 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2863 Inst->replaceAllUsesWith(V: PoisonValue::get(T: Inst->getType()));
2864 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2865 // EHPads can't have DbgVariableRecords attached to them, but it might be
2866 // possible for things with token type.
2867 Inst->dropDbgRecords();
2868 EndInst = Inst;
2869 continue;
2870 }
2871 ++NumDeadInst;
2872 // RemoveDIs: erasing debug-info must be done manually.
2873 Inst->dropDbgRecords();
2874 Inst->eraseFromParent();
2875 }
2876 return NumDeadInst;
2877}
2878
2879unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2880 DomTreeUpdater *DTU,
2881 MemorySSAUpdater *MSSAU) {
2882 BasicBlock *BB = I->getParent();
2883
2884 if (MSSAU)
2885 MSSAU->changeToUnreachable(I);
2886
2887 SmallSet<BasicBlock *, 8> UniqueSuccessors;
2888
2889 // Loop over all of the successors, removing BB's entry from any PHI
2890 // nodes.
2891 for (BasicBlock *Successor : successors(BB)) {
2892 Successor->removePredecessor(Pred: BB, KeepOneInputPHIs: PreserveLCSSA);
2893 if (DTU)
2894 UniqueSuccessors.insert(Ptr: Successor);
2895 }
2896 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2897 UI->setDebugLoc(I->getDebugLoc());
2898
2899 // All instructions after this are dead.
2900 unsigned NumInstrsRemoved = 0;
2901 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2902 while (BBI != BBE) {
2903 if (!BBI->use_empty())
2904 BBI->replaceAllUsesWith(V: PoisonValue::get(T: BBI->getType()));
2905 BBI++->eraseFromParent();
2906 ++NumInstrsRemoved;
2907 }
2908 if (DTU) {
2909 SmallVector<DominatorTree::UpdateType, 8> Updates;
2910 Updates.reserve(N: UniqueSuccessors.size());
2911 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2912 Updates.push_back(Elt: {DominatorTree::Delete, BB, UniqueSuccessor});
2913 DTU->applyUpdates(Updates);
2914 }
2915 BB->flushTerminatorDbgRecords();
2916 return NumInstrsRemoved;
2917}
2918
2919CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2920 SmallVector<Value *, 8> Args(II->args());
2921 SmallVector<OperandBundleDef, 1> OpBundles;
2922 II->getOperandBundlesAsDefs(Defs&: OpBundles);
2923 CallInst *NewCall = CallInst::Create(Ty: II->getFunctionType(),
2924 Func: II->getCalledOperand(), Args, Bundles: OpBundles);
2925 NewCall->setCallingConv(II->getCallingConv());
2926 NewCall->setAttributes(II->getAttributes());
2927 NewCall->setDebugLoc(II->getDebugLoc());
2928 NewCall->copyMetadata(SrcInst: *II);
2929
2930 // If the invoke had profile metadata, try converting them for CallInst.
2931 uint64_t TotalWeight;
2932 if (NewCall->extractProfTotalWeight(TotalVal&: TotalWeight)) {
2933 // Set the total weight if it fits into i32, otherwise reset.
2934 MDBuilder MDB(NewCall->getContext());
2935 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2936 ? nullptr
2937 : MDB.createBranchWeights(Weights: {uint32_t(TotalWeight)});
2938 NewCall->setMetadata(KindID: LLVMContext::MD_prof, Node: NewWeights);
2939 }
2940
2941 return NewCall;
2942}
2943
2944// changeToCall - Convert the specified invoke into a normal call.
2945CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2946 CallInst *NewCall = createCallMatchingInvoke(II);
2947 NewCall->takeName(V: II);
2948 NewCall->insertBefore(InsertPos: II->getIterator());
2949 II->replaceAllUsesWith(V: NewCall);
2950
2951 // Follow the call by a branch to the normal destination.
2952 BasicBlock *NormalDestBB = II->getNormalDest();
2953 auto *BI = BranchInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator());
2954 // Although it takes place after the call itself, the new branch is still
2955 // performing part of the control-flow functionality of the invoke, so we use
2956 // II's DebugLoc.
2957 BI->setDebugLoc(II->getDebugLoc());
2958
2959 // Update PHI nodes in the unwind destination
2960 BasicBlock *BB = II->getParent();
2961 BasicBlock *UnwindDestBB = II->getUnwindDest();
2962 UnwindDestBB->removePredecessor(Pred: BB);
2963 II->eraseFromParent();
2964 if (DTU)
2965 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}});
2966 return NewCall;
2967}
2968
2969BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2970 BasicBlock *UnwindEdge,
2971 DomTreeUpdater *DTU) {
2972 BasicBlock *BB = CI->getParent();
2973
2974 // Convert this function call into an invoke instruction. First, split the
2975 // basic block.
2976 BasicBlock *Split = SplitBlock(Old: BB, SplitPt: CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2977 BBName: CI->getName() + ".noexc");
2978
2979 // Delete the unconditional branch inserted by SplitBlock
2980 BB->back().eraseFromParent();
2981
2982 // Create the new invoke instruction.
2983 SmallVector<Value *, 8> InvokeArgs(CI->args());
2984 SmallVector<OperandBundleDef, 1> OpBundles;
2985
2986 CI->getOperandBundlesAsDefs(Defs&: OpBundles);
2987
2988 // Note: we're round tripping operand bundles through memory here, and that
2989 // can potentially be avoided with a cleverer API design that we do not have
2990 // as of this time.
2991
2992 InvokeInst *II =
2993 InvokeInst::Create(Ty: CI->getFunctionType(), Func: CI->getCalledOperand(), IfNormal: Split,
2994 IfException: UnwindEdge, Args: InvokeArgs, Bundles: OpBundles, NameStr: CI->getName(), InsertBefore: BB);
2995 II->setDebugLoc(CI->getDebugLoc());
2996 II->setCallingConv(CI->getCallingConv());
2997 II->setAttributes(CI->getAttributes());
2998 II->setMetadata(KindID: LLVMContext::MD_prof, Node: CI->getMetadata(KindID: LLVMContext::MD_prof));
2999
3000 if (DTU)
3001 DTU->applyUpdates(Updates: {{DominatorTree::Insert, BB, UnwindEdge}});
3002
3003 // Make sure that anything using the call now uses the invoke! This also
3004 // updates the CallGraph if present, because it uses a WeakTrackingVH.
3005 CI->replaceAllUsesWith(V: II);
3006
3007 // Delete the original call
3008 Split->front().eraseFromParent();
3009 return Split;
3010}
3011
3012static bool markAliveBlocks(Function &F,
3013 SmallPtrSetImpl<BasicBlock *> &Reachable,
3014 DomTreeUpdater *DTU = nullptr) {
3015 SmallVector<BasicBlock*, 128> Worklist;
3016 BasicBlock *BB = &F.front();
3017 Worklist.push_back(Elt: BB);
3018 Reachable.insert(Ptr: BB);
3019 bool Changed = false;
3020 do {
3021 BB = Worklist.pop_back_val();
3022
3023 // Do a quick scan of the basic block, turning any obviously unreachable
3024 // instructions into LLVM unreachable insts. The instruction combining pass
3025 // canonicalizes unreachable insts into stores to null or undef.
3026 for (Instruction &I : *BB) {
3027 if (auto *CI = dyn_cast<CallInst>(Val: &I)) {
3028 Value *Callee = CI->getCalledOperand();
3029 // Handle intrinsic calls.
3030 if (Function *F = dyn_cast<Function>(Val: Callee)) {
3031 auto IntrinsicID = F->getIntrinsicID();
3032 // Assumptions that are known to be false are equivalent to
3033 // unreachable. Also, if the condition is undefined, then we make the
3034 // choice most beneficial to the optimizer, and choose that to also be
3035 // unreachable.
3036 if (IntrinsicID == Intrinsic::assume) {
3037 if (match(V: CI->getArgOperand(i: 0), P: m_CombineOr(L: m_Zero(), R: m_Undef()))) {
3038 // Don't insert a call to llvm.trap right before the unreachable.
3039 changeToUnreachable(I: CI, PreserveLCSSA: false, DTU);
3040 Changed = true;
3041 break;
3042 }
3043 } else if (IntrinsicID == Intrinsic::experimental_guard) {
3044 // A call to the guard intrinsic bails out of the current
3045 // compilation unit if the predicate passed to it is false. If the
3046 // predicate is a constant false, then we know the guard will bail
3047 // out of the current compile unconditionally, so all code following
3048 // it is dead.
3049 //
3050 // Note: unlike in llvm.assume, it is not "obviously profitable" for
3051 // guards to treat `undef` as `false` since a guard on `undef` can
3052 // still be useful for widening.
3053 if (match(V: CI->getArgOperand(i: 0), P: m_Zero()))
3054 if (!isa<UnreachableInst>(Val: CI->getNextNode())) {
3055 changeToUnreachable(I: CI->getNextNode(), PreserveLCSSA: false, DTU);
3056 Changed = true;
3057 break;
3058 }
3059 }
3060 } else if ((isa<ConstantPointerNull>(Val: Callee) &&
3061 !NullPointerIsDefined(F: CI->getFunction(),
3062 AS: cast<PointerType>(Val: Callee->getType())
3063 ->getAddressSpace())) ||
3064 isa<UndefValue>(Val: Callee)) {
3065 changeToUnreachable(I: CI, PreserveLCSSA: false, DTU);
3066 Changed = true;
3067 break;
3068 }
3069 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
3070 // If we found a call to a no-return function, insert an unreachable
3071 // instruction after it. Make sure there isn't *already* one there
3072 // though.
3073 if (!isa<UnreachableInst>(Val: CI->getNextNonDebugInstruction())) {
3074 // Don't insert a call to llvm.trap right before the unreachable.
3075 changeToUnreachable(I: CI->getNextNonDebugInstruction(), PreserveLCSSA: false, DTU);
3076 Changed = true;
3077 }
3078 break;
3079 }
3080 } else if (auto *SI = dyn_cast<StoreInst>(Val: &I)) {
3081 // Store to undef and store to null are undefined and used to signal
3082 // that they should be changed to unreachable by passes that can't
3083 // modify the CFG.
3084
3085 // Don't touch volatile stores.
3086 if (SI->isVolatile()) continue;
3087
3088 Value *Ptr = SI->getOperand(i_nocapture: 1);
3089
3090 if (isa<UndefValue>(Val: Ptr) ||
3091 (isa<ConstantPointerNull>(Val: Ptr) &&
3092 !NullPointerIsDefined(F: SI->getFunction(),
3093 AS: SI->getPointerAddressSpace()))) {
3094 changeToUnreachable(I: SI, PreserveLCSSA: false, DTU);
3095 Changed = true;
3096 break;
3097 }
3098 }
3099 }
3100
3101 Instruction *Terminator = BB->getTerminator();
3102 if (auto *II = dyn_cast<InvokeInst>(Val: Terminator)) {
3103 // Turn invokes that call 'nounwind' functions into ordinary calls.
3104 Value *Callee = II->getCalledOperand();
3105 if ((isa<ConstantPointerNull>(Val: Callee) &&
3106 !NullPointerIsDefined(F: BB->getParent())) ||
3107 isa<UndefValue>(Val: Callee)) {
3108 changeToUnreachable(I: II, PreserveLCSSA: false, DTU);
3109 Changed = true;
3110 } else {
3111 if (II->doesNotReturn() &&
3112 !isa<UnreachableInst>(Val: II->getNormalDest()->front())) {
3113 // If we found an invoke of a no-return function,
3114 // create a new empty basic block with an `unreachable` terminator,
3115 // and set it as the normal destination for the invoke,
3116 // unless that is already the case.
3117 // Note that the original normal destination could have other uses.
3118 BasicBlock *OrigNormalDest = II->getNormalDest();
3119 OrigNormalDest->removePredecessor(Pred: II->getParent());
3120 LLVMContext &Ctx = II->getContext();
3121 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
3122 Context&: Ctx, Name: OrigNormalDest->getName() + ".unreachable",
3123 Parent: II->getFunction(), InsertBefore: OrigNormalDest);
3124 auto *UI = new UnreachableInst(Ctx, UnreachableNormalDest);
3125 UI->setDebugLoc(DebugLoc::getTemporary());
3126 II->setNormalDest(UnreachableNormalDest);
3127 if (DTU)
3128 DTU->applyUpdates(
3129 Updates: {{DominatorTree::Delete, BB, OrigNormalDest},
3130 {DominatorTree::Insert, BB, UnreachableNormalDest}});
3131 Changed = true;
3132 }
3133 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(F: &F)) {
3134 if (II->use_empty() && !II->mayHaveSideEffects()) {
3135 // jump to the normal destination branch.
3136 BasicBlock *NormalDestBB = II->getNormalDest();
3137 BasicBlock *UnwindDestBB = II->getUnwindDest();
3138 BranchInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator());
3139 UnwindDestBB->removePredecessor(Pred: II->getParent());
3140 II->eraseFromParent();
3141 if (DTU)
3142 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}});
3143 } else
3144 changeToCall(II, DTU);
3145 Changed = true;
3146 }
3147 }
3148 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: Terminator)) {
3149 // Remove catchpads which cannot be reached.
3150 struct CatchPadDenseMapInfo {
3151 static CatchPadInst *getEmptyKey() {
3152 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
3153 }
3154
3155 static CatchPadInst *getTombstoneKey() {
3156 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
3157 }
3158
3159 static unsigned getHashValue(CatchPadInst *CatchPad) {
3160 return static_cast<unsigned>(hash_combine_range(
3161 first: CatchPad->value_op_begin(), last: CatchPad->value_op_end()));
3162 }
3163
3164 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
3165 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
3166 RHS == getEmptyKey() || RHS == getTombstoneKey())
3167 return LHS == RHS;
3168 return LHS->isIdenticalTo(I: RHS);
3169 }
3170 };
3171
3172 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
3173 // Set of unique CatchPads.
3174 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
3175 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
3176 HandlerSet;
3177 detail::DenseSetEmpty Empty;
3178 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
3179 E = CatchSwitch->handler_end();
3180 I != E; ++I) {
3181 BasicBlock *HandlerBB = *I;
3182 if (DTU)
3183 ++NumPerSuccessorCases[HandlerBB];
3184 auto *CatchPad = cast<CatchPadInst>(Val: HandlerBB->getFirstNonPHIIt());
3185 if (!HandlerSet.insert(KV: {CatchPad, Empty}).second) {
3186 if (DTU)
3187 --NumPerSuccessorCases[HandlerBB];
3188 CatchSwitch->removeHandler(HI: I);
3189 --I;
3190 --E;
3191 Changed = true;
3192 }
3193 }
3194 if (DTU) {
3195 std::vector<DominatorTree::UpdateType> Updates;
3196 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
3197 if (I.second == 0)
3198 Updates.push_back(x: {DominatorTree::Delete, BB, I.first});
3199 DTU->applyUpdates(Updates);
3200 }
3201 }
3202
3203 Changed |= ConstantFoldTerminator(BB, DeleteDeadConditions: true, TLI: nullptr, DTU);
3204 for (BasicBlock *Successor : successors(BB))
3205 if (Reachable.insert(Ptr: Successor).second)
3206 Worklist.push_back(Elt: Successor);
3207 } while (!Worklist.empty());
3208 return Changed;
3209}
3210
3211Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
3212 Instruction *TI = BB->getTerminator();
3213
3214 if (auto *II = dyn_cast<InvokeInst>(Val: TI))
3215 return changeToCall(II, DTU);
3216
3217 Instruction *NewTI;
3218 BasicBlock *UnwindDest;
3219
3220 if (auto *CRI = dyn_cast<CleanupReturnInst>(Val: TI)) {
3221 NewTI = CleanupReturnInst::Create(CleanupPad: CRI->getCleanupPad(), UnwindBB: nullptr, InsertBefore: CRI->getIterator());
3222 UnwindDest = CRI->getUnwindDest();
3223 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: TI)) {
3224 auto *NewCatchSwitch = CatchSwitchInst::Create(
3225 ParentPad: CatchSwitch->getParentPad(), UnwindDest: nullptr, NumHandlers: CatchSwitch->getNumHandlers(),
3226 NameStr: CatchSwitch->getName(), InsertBefore: CatchSwitch->getIterator());
3227 for (BasicBlock *PadBB : CatchSwitch->handlers())
3228 NewCatchSwitch->addHandler(Dest: PadBB);
3229
3230 NewTI = NewCatchSwitch;
3231 UnwindDest = CatchSwitch->getUnwindDest();
3232 } else {
3233 llvm_unreachable("Could not find unwind successor");
3234 }
3235
3236 NewTI->takeName(V: TI);
3237 NewTI->setDebugLoc(TI->getDebugLoc());
3238 UnwindDest->removePredecessor(Pred: BB);
3239 TI->replaceAllUsesWith(V: NewTI);
3240 TI->eraseFromParent();
3241 if (DTU)
3242 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDest}});
3243 return NewTI;
3244}
3245
3246/// removeUnreachableBlocks - Remove blocks that are not reachable, even
3247/// if they are in a dead cycle. Return true if a change was made, false
3248/// otherwise.
3249bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
3250 MemorySSAUpdater *MSSAU) {
3251 SmallPtrSet<BasicBlock *, 16> Reachable;
3252 bool Changed = markAliveBlocks(F, Reachable, DTU);
3253
3254 // If there are unreachable blocks in the CFG...
3255 if (Reachable.size() == F.size())
3256 return Changed;
3257
3258 assert(Reachable.size() < F.size());
3259
3260 // Are there any blocks left to actually delete?
3261 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
3262 for (BasicBlock &BB : F) {
3263 // Skip reachable basic blocks
3264 if (Reachable.count(Ptr: &BB))
3265 continue;
3266 // Skip already-deleted blocks
3267 if (DTU && DTU->isBBPendingDeletion(DelBB: &BB))
3268 continue;
3269 BlocksToRemove.insert(X: &BB);
3270 }
3271
3272 if (BlocksToRemove.empty())
3273 return Changed;
3274
3275 Changed = true;
3276 NumRemoved += BlocksToRemove.size();
3277
3278 if (MSSAU)
3279 MSSAU->removeBlocks(DeadBlocks: BlocksToRemove);
3280
3281 DeleteDeadBlocks(BBs: BlocksToRemove.takeVector(), DTU);
3282
3283 return Changed;
3284}
3285
3286/// If AAOnly is set, only intersect alias analysis metadata and preserve other
3287/// known metadata. Unknown metadata is always dropped.
3288static void combineMetadata(Instruction *K, const Instruction *J,
3289 bool DoesKMove, bool AAOnly = false) {
3290 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
3291 K->getAllMetadataOtherThanDebugLoc(MDs&: Metadata);
3292 for (const auto &MD : Metadata) {
3293 unsigned Kind = MD.first;
3294 MDNode *JMD = J->getMetadata(KindID: Kind);
3295 MDNode *KMD = MD.second;
3296
3297 // TODO: Assert that this switch is exhaustive for fixed MD kinds.
3298 switch (Kind) {
3299 default:
3300 K->setMetadata(KindID: Kind, Node: nullptr); // Remove unknown metadata
3301 break;
3302 case LLVMContext::MD_dbg:
3303 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
3304 case LLVMContext::MD_DIAssignID:
3305 if (!AAOnly)
3306 K->mergeDIAssignID(SourceInstructions: J);
3307 break;
3308 case LLVMContext::MD_tbaa:
3309 if (DoesKMove)
3310 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericTBAA(A: JMD, B: KMD));
3311 break;
3312 case LLVMContext::MD_alias_scope:
3313 if (DoesKMove)
3314 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericAliasScope(A: JMD, B: KMD));
3315 break;
3316 case LLVMContext::MD_noalias:
3317 case LLVMContext::MD_mem_parallel_loop_access:
3318 if (DoesKMove)
3319 K->setMetadata(KindID: Kind, Node: MDNode::intersect(A: JMD, B: KMD));
3320 break;
3321 case LLVMContext::MD_access_group:
3322 if (DoesKMove)
3323 K->setMetadata(KindID: LLVMContext::MD_access_group,
3324 Node: intersectAccessGroups(Inst1: K, Inst2: J));
3325 break;
3326 case LLVMContext::MD_range:
3327 if (!AAOnly && (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)))
3328 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericRange(A: JMD, B: KMD));
3329 break;
3330 case LLVMContext::MD_fpmath:
3331 if (!AAOnly)
3332 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericFPMath(A: JMD, B: KMD));
3333 break;
3334 case LLVMContext::MD_invariant_load:
3335 // If K moves, only set the !invariant.load if it is present in both
3336 // instructions.
3337 if (DoesKMove)
3338 K->setMetadata(KindID: Kind, Node: JMD);
3339 break;
3340 case LLVMContext::MD_nonnull:
3341 if (!AAOnly && (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)))
3342 K->setMetadata(KindID: Kind, Node: JMD);
3343 break;
3344 case LLVMContext::MD_invariant_group:
3345 // Preserve !invariant.group in K.
3346 break;
3347 // Keep empty cases for prof, mmra, memprof, and callsite to prevent them
3348 // from being removed as unknown metadata. The actual merging is handled
3349 // separately below.
3350 case LLVMContext::MD_prof:
3351 case LLVMContext::MD_mmra:
3352 case LLVMContext::MD_memprof:
3353 case LLVMContext::MD_callsite:
3354 break;
3355 case LLVMContext::MD_align:
3356 if (!AAOnly && (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)))
3357 K->setMetadata(
3358 KindID: Kind, Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD));
3359 break;
3360 case LLVMContext::MD_dereferenceable:
3361 case LLVMContext::MD_dereferenceable_or_null:
3362 if (!AAOnly && DoesKMove)
3363 K->setMetadata(KindID: Kind,
3364 Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD));
3365 break;
3366 case LLVMContext::MD_preserve_access_index:
3367 // Preserve !preserve.access.index in K.
3368 break;
3369 case LLVMContext::MD_noundef:
3370 // If K does move, keep noundef if it is present in both instructions.
3371 if (!AAOnly && DoesKMove)
3372 K->setMetadata(KindID: Kind, Node: JMD);
3373 break;
3374 case LLVMContext::MD_nontemporal:
3375 // Preserve !nontemporal if it is present on both instructions.
3376 if (!AAOnly)
3377 K->setMetadata(KindID: Kind, Node: JMD);
3378 break;
3379 case LLVMContext::MD_noalias_addrspace:
3380 if (DoesKMove)
3381 K->setMetadata(KindID: Kind,
3382 Node: MDNode::getMostGenericNoaliasAddrspace(A: JMD, B: KMD));
3383 break;
3384 }
3385 }
3386 // Set !invariant.group from J if J has it. If both instructions have it
3387 // then we will just pick it from J - even when they are different.
3388 // Also make sure that K is load or store - f.e. combining bitcast with load
3389 // could produce bitcast with invariant.group metadata, which is invalid.
3390 // FIXME: we should try to preserve both invariant.group md if they are
3391 // different, but right now instruction can only have one invariant.group.
3392 if (auto *JMD = J->getMetadata(KindID: LLVMContext::MD_invariant_group))
3393 if (isa<LoadInst>(Val: K) || isa<StoreInst>(Val: K))
3394 K->setMetadata(KindID: LLVMContext::MD_invariant_group, Node: JMD);
3395
3396 // Merge MMRAs.
3397 // This is handled separately because we also want to handle cases where K
3398 // doesn't have tags but J does.
3399 auto JMMRA = J->getMetadata(KindID: LLVMContext::MD_mmra);
3400 auto KMMRA = K->getMetadata(KindID: LLVMContext::MD_mmra);
3401 if (JMMRA || KMMRA) {
3402 K->setMetadata(KindID: LLVMContext::MD_mmra,
3403 Node: MMRAMetadata::combine(Ctx&: K->getContext(), A: JMMRA, B: KMMRA));
3404 }
3405
3406 // Merge memprof metadata.
3407 // Handle separately to support cases where only one instruction has the
3408 // metadata.
3409 auto *JMemProf = J->getMetadata(KindID: LLVMContext::MD_memprof);
3410 auto *KMemProf = K->getMetadata(KindID: LLVMContext::MD_memprof);
3411 if (!AAOnly && (JMemProf || KMemProf)) {
3412 K->setMetadata(KindID: LLVMContext::MD_memprof,
3413 Node: MDNode::getMergedMemProfMetadata(A: KMemProf, B: JMemProf));
3414 }
3415
3416 // Merge callsite metadata.
3417 // Handle separately to support cases where only one instruction has the
3418 // metadata.
3419 auto *JCallSite = J->getMetadata(KindID: LLVMContext::MD_callsite);
3420 auto *KCallSite = K->getMetadata(KindID: LLVMContext::MD_callsite);
3421 if (!AAOnly && (JCallSite || KCallSite)) {
3422 K->setMetadata(KindID: LLVMContext::MD_callsite,
3423 Node: MDNode::getMergedCallsiteMetadata(A: KCallSite, B: JCallSite));
3424 }
3425
3426 // Merge prof metadata.
3427 // Handle separately to support cases where only one instruction has the
3428 // metadata.
3429 auto *JProf = J->getMetadata(KindID: LLVMContext::MD_prof);
3430 auto *KProf = K->getMetadata(KindID: LLVMContext::MD_prof);
3431 if (!AAOnly && (JProf || KProf)) {
3432 K->setMetadata(KindID: LLVMContext::MD_prof,
3433 Node: MDNode::getMergedProfMetadata(A: KProf, B: JProf, AInstr: K, BInstr: J));
3434 }
3435}
3436
3437void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
3438 bool DoesKMove) {
3439 combineMetadata(K, J, DoesKMove);
3440}
3441
3442void llvm::combineAAMetadata(Instruction *K, const Instruction *J) {
3443 combineMetadata(K, J, /*DoesKMove=*/true, /*AAOnly=*/true);
3444}
3445
3446void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3447 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
3448 Source.getAllMetadata(MDs&: MD);
3449 MDBuilder MDB(Dest.getContext());
3450 Type *NewType = Dest.getType();
3451 const DataLayout &DL = Source.getDataLayout();
3452 for (const auto &MDPair : MD) {
3453 unsigned ID = MDPair.first;
3454 MDNode *N = MDPair.second;
3455 // Note, essentially every kind of metadata should be preserved here! This
3456 // routine is supposed to clone a load instruction changing *only its type*.
3457 // The only metadata it makes sense to drop is metadata which is invalidated
3458 // when the pointer type changes. This should essentially never be the case
3459 // in LLVM, but we explicitly switch over only known metadata to be
3460 // conservatively correct. If you are adding metadata to LLVM which pertains
3461 // to loads, you almost certainly want to add it here.
3462 switch (ID) {
3463 case LLVMContext::MD_dbg:
3464 case LLVMContext::MD_tbaa:
3465 case LLVMContext::MD_prof:
3466 case LLVMContext::MD_fpmath:
3467 case LLVMContext::MD_tbaa_struct:
3468 case LLVMContext::MD_invariant_load:
3469 case LLVMContext::MD_alias_scope:
3470 case LLVMContext::MD_noalias:
3471 case LLVMContext::MD_nontemporal:
3472 case LLVMContext::MD_mem_parallel_loop_access:
3473 case LLVMContext::MD_access_group:
3474 case LLVMContext::MD_noundef:
3475 case LLVMContext::MD_noalias_addrspace:
3476 // All of these directly apply.
3477 Dest.setMetadata(KindID: ID, Node: N);
3478 break;
3479
3480 case LLVMContext::MD_nonnull:
3481 copyNonnullMetadata(OldLI: Source, N, NewLI&: Dest);
3482 break;
3483
3484 case LLVMContext::MD_align:
3485 case LLVMContext::MD_dereferenceable:
3486 case LLVMContext::MD_dereferenceable_or_null:
3487 // These only directly apply if the new type is also a pointer.
3488 if (NewType->isPointerTy())
3489 Dest.setMetadata(KindID: ID, Node: N);
3490 break;
3491
3492 case LLVMContext::MD_range:
3493 copyRangeMetadata(DL, OldLI: Source, N, NewLI&: Dest);
3494 break;
3495 }
3496 }
3497}
3498
3499void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
3500 auto *ReplInst = dyn_cast<Instruction>(Val: Repl);
3501 if (!ReplInst)
3502 return;
3503
3504 // Patch the replacement so that it is not more restrictive than the value
3505 // being replaced.
3506 WithOverflowInst *UnusedWO;
3507 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3508 // overflowing binary operator, nuw/nsw flags may no longer hold.
3509 if (isa<OverflowingBinaryOperator>(Val: ReplInst) &&
3510 match(V: I, P: m_ExtractValue<0>(V: m_WithOverflowInst(I&: UnusedWO))))
3511 ReplInst->dropPoisonGeneratingFlags();
3512 // Note that if 'I' is a load being replaced by some operation,
3513 // for example, by an arithmetic operation, then andIRFlags()
3514 // would just erase all math flags from the original arithmetic
3515 // operation, which is clearly not wanted and not needed.
3516 else if (!isa<LoadInst>(Val: I))
3517 ReplInst->andIRFlags(V: I);
3518
3519 // Handle attributes.
3520 if (auto *CB1 = dyn_cast<CallBase>(Val: ReplInst)) {
3521 if (auto *CB2 = dyn_cast<CallBase>(Val: I)) {
3522 bool Success = CB1->tryIntersectAttributes(Other: CB2);
3523 assert(Success && "We should not be trying to sink callbases "
3524 "with non-intersectable attributes");
3525 // For NDEBUG Compile.
3526 (void)Success;
3527 }
3528 }
3529
3530 // FIXME: If both the original and replacement value are part of the
3531 // same control-flow region (meaning that the execution of one
3532 // guarantees the execution of the other), then we can combine the
3533 // noalias scopes here and do better than the general conservative
3534 // answer used in combineMetadata().
3535
3536 // In general, GVN unifies expressions over different control-flow
3537 // regions, and so we need a conservative combination of the noalias
3538 // scopes.
3539 combineMetadataForCSE(K: ReplInst, J: I, DoesKMove: false);
3540}
3541
3542template <typename RootType, typename ShouldReplaceFn>
3543static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3544 const RootType &Root,
3545 const ShouldReplaceFn &ShouldReplace) {
3546 assert(From->getType() == To->getType());
3547
3548 unsigned Count = 0;
3549 for (Use &U : llvm::make_early_inc_range(Range: From->uses())) {
3550 auto *II = dyn_cast<IntrinsicInst>(Val: U.getUser());
3551 if (II && II->getIntrinsicID() == Intrinsic::fake_use)
3552 continue;
3553 if (!ShouldReplace(Root, U))
3554 continue;
3555 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3556 From->printAsOperand(dbgs());
3557 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3558 U.set(To);
3559 ++Count;
3560 }
3561 return Count;
3562}
3563
3564unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
3565 assert(From->getType() == To->getType());
3566 auto *BB = From->getParent();
3567 unsigned Count = 0;
3568
3569 for (Use &U : llvm::make_early_inc_range(Range: From->uses())) {
3570 auto *I = cast<Instruction>(Val: U.getUser());
3571 if (I->getParent() == BB)
3572 continue;
3573 U.set(To);
3574 ++Count;
3575 }
3576 return Count;
3577}
3578
3579unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3580 DominatorTree &DT,
3581 const BasicBlockEdge &Root) {
3582 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
3583 return DT.dominates(BBE: Root, U);
3584 };
3585 return ::replaceDominatedUsesWith(From, To, Root, ShouldReplace: Dominates);
3586}
3587
3588unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3589 DominatorTree &DT,
3590 const BasicBlock *BB) {
3591 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
3592 return DT.dominates(BB, U);
3593 };
3594 return ::replaceDominatedUsesWith(From, To, Root: BB, ShouldReplace: Dominates);
3595}
3596
3597unsigned llvm::replaceDominatedUsesWithIf(
3598 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3599 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3600 auto DominatesAndShouldReplace =
3601 [&DT, &ShouldReplace, To](const BasicBlockEdge &Root, const Use &U) {
3602 return DT.dominates(BBE: Root, U) && ShouldReplace(U, To);
3603 };
3604 return ::replaceDominatedUsesWith(From, To, Root, ShouldReplace: DominatesAndShouldReplace);
3605}
3606
3607unsigned llvm::replaceDominatedUsesWithIf(
3608 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3609 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3610 auto DominatesAndShouldReplace = [&DT, &ShouldReplace,
3611 To](const BasicBlock *BB, const Use &U) {
3612 return DT.dominates(BB, U) && ShouldReplace(U, To);
3613 };
3614 return ::replaceDominatedUsesWith(From, To, Root: BB, ShouldReplace: DominatesAndShouldReplace);
3615}
3616
3617bool llvm::callsGCLeafFunction(const CallBase *Call,
3618 const TargetLibraryInfo &TLI) {
3619 // Check if the function is specifically marked as a gc leaf function.
3620 if (Call->hasFnAttr(Kind: "gc-leaf-function"))
3621 return true;
3622 if (const Function *F = Call->getCalledFunction()) {
3623 if (F->hasFnAttribute(Kind: "gc-leaf-function"))
3624 return true;
3625
3626 if (auto IID = F->getIntrinsicID()) {
3627 // Most LLVM intrinsics do not take safepoints.
3628 return IID != Intrinsic::experimental_gc_statepoint &&
3629 IID != Intrinsic::experimental_deoptimize &&
3630 IID != Intrinsic::memcpy_element_unordered_atomic &&
3631 IID != Intrinsic::memmove_element_unordered_atomic;
3632 }
3633 }
3634
3635 // Lib calls can be materialized by some passes, and won't be
3636 // marked as 'gc-leaf-function.' All available Libcalls are
3637 // GC-leaf.
3638 LibFunc LF;
3639 if (TLI.getLibFunc(CB: *Call, F&: LF)) {
3640 return TLI.has(F: LF);
3641 }
3642
3643 return false;
3644}
3645
3646void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
3647 LoadInst &NewLI) {
3648 auto *NewTy = NewLI.getType();
3649
3650 // This only directly applies if the new type is also a pointer.
3651 if (NewTy->isPointerTy()) {
3652 NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: N);
3653 return;
3654 }
3655
3656 // The only other translation we can do is to integral loads with !range
3657 // metadata.
3658 if (!NewTy->isIntegerTy())
3659 return;
3660
3661 MDBuilder MDB(NewLI.getContext());
3662 const Value *Ptr = OldLI.getPointerOperand();
3663 auto *ITy = cast<IntegerType>(Val: NewTy);
3664 auto *NullInt = ConstantExpr::getPtrToInt(
3665 C: ConstantPointerNull::get(T: cast<PointerType>(Val: Ptr->getType())), Ty: ITy);
3666 auto *NonNullInt = ConstantExpr::getAdd(C1: NullInt, C2: ConstantInt::get(Ty: ITy, V: 1));
3667 NewLI.setMetadata(KindID: LLVMContext::MD_range,
3668 Node: MDB.createRange(Lo: NonNullInt, Hi: NullInt));
3669}
3670
3671void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
3672 MDNode *N, LoadInst &NewLI) {
3673 auto *NewTy = NewLI.getType();
3674 // Simply copy the metadata if the type did not change.
3675 if (NewTy == OldLI.getType()) {
3676 NewLI.setMetadata(KindID: LLVMContext::MD_range, Node: N);
3677 return;
3678 }
3679
3680 // Give up unless it is converted to a pointer where there is a single very
3681 // valuable mapping we can do reliably.
3682 // FIXME: It would be nice to propagate this in more ways, but the type
3683 // conversions make it hard.
3684 if (!NewTy->isPointerTy())
3685 return;
3686
3687 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3688 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3689 !getConstantRangeFromMetadata(RangeMD: *N).contains(Val: APInt(BitWidth, 0))) {
3690 MDNode *NN = MDNode::get(Context&: OldLI.getContext(), MDs: {});
3691 NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: NN);
3692 }
3693}
3694
3695void llvm::dropDebugUsers(Instruction &I) {
3696 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
3697 SmallVector<DbgVariableRecord *, 1> DPUsers;
3698 findDbgUsers(DbgInsts&: DbgUsers, V: &I, DbgVariableRecords: &DPUsers);
3699 for (auto *DII : DbgUsers)
3700 DII->eraseFromParent();
3701 for (auto *DVR : DPUsers)
3702 DVR->eraseFromParent();
3703}
3704
3705void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
3706 BasicBlock *BB) {
3707 // Since we are moving the instructions out of its basic block, we do not
3708 // retain their original debug locations (DILocations) and debug intrinsic
3709 // instructions.
3710 //
3711 // Doing so would degrade the debugging experience and adversely affect the
3712 // accuracy of profiling information.
3713 //
3714 // Currently, when hoisting the instructions, we take the following actions:
3715 // - Remove their debug intrinsic instructions.
3716 // - Set their debug locations to the values from the insertion point.
3717 //
3718 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3719 // need to be deleted, is because there will not be any instructions with a
3720 // DILocation in either branch left after performing the transformation. We
3721 // can only insert a dbg.value after the two branches are joined again.
3722 //
3723 // See PR38762, PR39243 for more details.
3724 //
3725 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3726 // encode predicated DIExpressions that yield different results on different
3727 // code paths.
3728
3729 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3730 Instruction *I = &*II;
3731 I->dropUBImplyingAttrsAndMetadata();
3732 if (I->isUsedByMetadata())
3733 dropDebugUsers(I&: *I);
3734 // RemoveDIs: drop debug-info too as the following code does.
3735 I->dropDbgRecords();
3736 if (I->isDebugOrPseudoInst()) {
3737 // Remove DbgInfo and pseudo probe Intrinsics.
3738 II = I->eraseFromParent();
3739 continue;
3740 }
3741 I->setDebugLoc(InsertPt->getDebugLoc());
3742 ++II;
3743 }
3744 DomBlock->splice(ToIt: InsertPt->getIterator(), FromBB: BB, FromBeginIt: BB->begin(),
3745 FromEndIt: BB->getTerminator()->getIterator());
3746}
3747
3748DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C,
3749 Type &Ty) {
3750 // Create integer constant expression.
3751 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3752 const APInt &API = cast<ConstantInt>(Val: &CV)->getValue();
3753 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3754 return InitIntOpt ? DIB.createConstantValueExpression(
3755 Val: static_cast<uint64_t>(*InitIntOpt))
3756 : nullptr;
3757 };
3758
3759 if (isa<ConstantInt>(Val: C))
3760 return createIntegerExpression(C);
3761
3762 auto *FP = dyn_cast<ConstantFP>(Val: &C);
3763 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3764 const APFloat &APF = FP->getValueAPF();
3765 APInt const &API = APF.bitcastToAPInt();
3766 if (auto Temp = API.getZExtValue())
3767 return DIB.createConstantValueExpression(Val: static_cast<uint64_t>(Temp));
3768 return DIB.createConstantValueExpression(Val: *API.getRawData());
3769 }
3770
3771 if (!Ty.isPointerTy())
3772 return nullptr;
3773
3774 if (isa<ConstantPointerNull>(Val: C))
3775 return DIB.createConstantValueExpression(Val: 0);
3776
3777 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: &C))
3778 if (CE->getOpcode() == Instruction::IntToPtr) {
3779 const Value *V = CE->getOperand(i_nocapture: 0);
3780 if (auto CI = dyn_cast_or_null<ConstantInt>(Val: V))
3781 return createIntegerExpression(*CI);
3782 }
3783 return nullptr;
3784}
3785
3786void llvm::remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst) {
3787 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3788 for (auto *Op : Set) {
3789 auto I = Mapping.find(Op);
3790 if (I != Mapping.end())
3791 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3792 }
3793 };
3794 auto RemapAssignAddress = [&Mapping](auto *DA) {
3795 auto I = Mapping.find(DA->getAddress());
3796 if (I != Mapping.end())
3797 DA->setAddress(I->second);
3798 };
3799 if (auto DVI = dyn_cast<DbgVariableIntrinsic>(Val: Inst))
3800 RemapDebugOperands(DVI, DVI->location_ops());
3801 if (auto DAI = dyn_cast<DbgAssignIntrinsic>(Val: Inst))
3802 RemapAssignAddress(DAI);
3803 for (DbgVariableRecord &DVR : filterDbgVars(R: Inst->getDbgRecordRange())) {
3804 RemapDebugOperands(&DVR, DVR.location_ops());
3805 if (DVR.isDbgAssign())
3806 RemapAssignAddress(&DVR);
3807 }
3808}
3809
3810namespace {
3811
3812/// A potential constituent of a bitreverse or bswap expression. See
3813/// collectBitParts for a fuller explanation.
3814struct BitPart {
3815 BitPart(Value *P, unsigned BW) : Provider(P) {
3816 Provenance.resize(N: BW);
3817 }
3818
3819 /// The Value that this is a bitreverse/bswap of.
3820 Value *Provider;
3821
3822 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3823 /// in Provider becomes bit B in the result of this expression.
3824 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3825
3826 enum { Unset = -1 };
3827};
3828
3829} // end anonymous namespace
3830
3831/// Analyze the specified subexpression and see if it is capable of providing
3832/// pieces of a bswap or bitreverse. The subexpression provides a potential
3833/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3834/// the output of the expression came from a corresponding bit in some other
3835/// value. This function is recursive, and the end result is a mapping of
3836/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3837/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3838///
3839/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3840/// that the expression deposits the low byte of %X into the high byte of the
3841/// result and that all other bits are zero. This expression is accepted and a
3842/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3843/// [0-7].
3844///
3845/// For vector types, all analysis is performed at the per-element level. No
3846/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3847/// constant masks must be splatted across all elements.
3848///
3849/// To avoid revisiting values, the BitPart results are memoized into the
3850/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3851/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3852/// store BitParts objects, not pointers. As we need the concept of a nullptr
3853/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3854/// type instead to provide the same functionality.
3855///
3856/// Because we pass around references into \c BPS, we must use a container that
3857/// does not invalidate internal references (std::map instead of DenseMap).
3858static const std::optional<BitPart> &
3859collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3860 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3861 bool &FoundRoot) {
3862 auto [I, Inserted] = BPS.try_emplace(k: V);
3863 if (!Inserted)
3864 return I->second;
3865
3866 auto &Result = I->second;
3867 auto BitWidth = V->getType()->getScalarSizeInBits();
3868
3869 // Can't do integer/elements > 128 bits.
3870 if (BitWidth > 128)
3871 return Result;
3872
3873 // Prevent stack overflow by limiting the recursion depth
3874 if (Depth == BitPartRecursionMaxDepth) {
3875 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3876 return Result;
3877 }
3878
3879 if (auto *I = dyn_cast<Instruction>(Val: V)) {
3880 Value *X, *Y;
3881 const APInt *C;
3882
3883 // If this is an or instruction, it may be an inner node of the bswap.
3884 if (match(V, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y)))) {
3885 // Check we have both sources and they are from the same provider.
3886 const auto &A = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3887 Depth: Depth + 1, FoundRoot);
3888 if (!A || !A->Provider)
3889 return Result;
3890
3891 const auto &B = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS,
3892 Depth: Depth + 1, FoundRoot);
3893 if (!B || A->Provider != B->Provider)
3894 return Result;
3895
3896 // Try and merge the two together.
3897 Result = BitPart(A->Provider, BitWidth);
3898 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3899 if (A->Provenance[BitIdx] != BitPart::Unset &&
3900 B->Provenance[BitIdx] != BitPart::Unset &&
3901 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3902 return Result = std::nullopt;
3903
3904 if (A->Provenance[BitIdx] == BitPart::Unset)
3905 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3906 else
3907 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3908 }
3909
3910 return Result;
3911 }
3912
3913 // If this is a logical shift by a constant, recurse then shift the result.
3914 if (match(V, P: m_LogicalShift(L: m_Value(V&: X), R: m_APInt(Res&: C)))) {
3915 const APInt &BitShift = *C;
3916
3917 // Ensure the shift amount is defined.
3918 if (BitShift.uge(RHS: BitWidth))
3919 return Result;
3920
3921 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3922 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3923 return Result;
3924
3925 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3926 Depth: Depth + 1, FoundRoot);
3927 if (!Res)
3928 return Result;
3929 Result = Res;
3930
3931 // Perform the "shift" on BitProvenance.
3932 auto &P = Result->Provenance;
3933 if (I->getOpcode() == Instruction::Shl) {
3934 P.erase(CS: std::prev(x: P.end(), n: BitShift.getZExtValue()), CE: P.end());
3935 P.insert(I: P.begin(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset);
3936 } else {
3937 P.erase(CS: P.begin(), CE: std::next(x: P.begin(), n: BitShift.getZExtValue()));
3938 P.insert(I: P.end(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset);
3939 }
3940
3941 return Result;
3942 }
3943
3944 // If this is a logical 'and' with a mask that clears bits, recurse then
3945 // unset the appropriate bits.
3946 if (match(V, P: m_And(L: m_Value(V&: X), R: m_APInt(Res&: C)))) {
3947 const APInt &AndMask = *C;
3948
3949 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3950 // early exit.
3951 unsigned NumMaskedBits = AndMask.popcount();
3952 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3953 return Result;
3954
3955 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3956 Depth: Depth + 1, FoundRoot);
3957 if (!Res)
3958 return Result;
3959 Result = Res;
3960
3961 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3962 // If the AndMask is zero for this bit, clear the bit.
3963 if (AndMask[BitIdx] == 0)
3964 Result->Provenance[BitIdx] = BitPart::Unset;
3965 return Result;
3966 }
3967
3968 // If this is a zext instruction zero extend the result.
3969 if (match(V, P: m_ZExt(Op: m_Value(V&: X)))) {
3970 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3971 Depth: Depth + 1, FoundRoot);
3972 if (!Res)
3973 return Result;
3974
3975 Result = BitPart(Res->Provider, BitWidth);
3976 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3977 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3978 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3979 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3980 Result->Provenance[BitIdx] = BitPart::Unset;
3981 return Result;
3982 }
3983
3984 // If this is a truncate instruction, extract the lower bits.
3985 if (match(V, P: m_Trunc(Op: m_Value(V&: X)))) {
3986 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3987 Depth: Depth + 1, FoundRoot);
3988 if (!Res)
3989 return Result;
3990
3991 Result = BitPart(Res->Provider, BitWidth);
3992 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3993 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3994 return Result;
3995 }
3996
3997 // BITREVERSE - most likely due to us previous matching a partial
3998 // bitreverse.
3999 if (match(V, P: m_BitReverse(Op0: m_Value(V&: X)))) {
4000 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
4001 Depth: Depth + 1, FoundRoot);
4002 if (!Res)
4003 return Result;
4004
4005 Result = BitPart(Res->Provider, BitWidth);
4006 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
4007 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
4008 return Result;
4009 }
4010
4011 // BSWAP - most likely due to us previous matching a partial bswap.
4012 if (match(V, P: m_BSwap(Op0: m_Value(V&: X)))) {
4013 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
4014 Depth: Depth + 1, FoundRoot);
4015 if (!Res)
4016 return Result;
4017
4018 unsigned ByteWidth = BitWidth / 8;
4019 Result = BitPart(Res->Provider, BitWidth);
4020 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
4021 unsigned ByteBitOfs = ByteIdx * 8;
4022 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
4023 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
4024 Res->Provenance[ByteBitOfs + BitIdx];
4025 }
4026 return Result;
4027 }
4028
4029 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
4030 // amount (modulo).
4031 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
4032 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
4033 if (match(V, P: m_FShl(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C))) ||
4034 match(V, P: m_FShr(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C)))) {
4035 // We can treat fshr as a fshl by flipping the modulo amount.
4036 unsigned ModAmt = C->urem(RHS: BitWidth);
4037 if (cast<IntrinsicInst>(Val: I)->getIntrinsicID() == Intrinsic::fshr)
4038 ModAmt = BitWidth - ModAmt;
4039
4040 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
4041 if (!MatchBitReversals && (ModAmt % 8) != 0)
4042 return Result;
4043
4044 // Check we have both sources and they are from the same provider.
4045 const auto &LHS = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
4046 Depth: Depth + 1, FoundRoot);
4047 if (!LHS || !LHS->Provider)
4048 return Result;
4049
4050 const auto &RHS = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS,
4051 Depth: Depth + 1, FoundRoot);
4052 if (!RHS || LHS->Provider != RHS->Provider)
4053 return Result;
4054
4055 unsigned StartBitRHS = BitWidth - ModAmt;
4056 Result = BitPart(LHS->Provider, BitWidth);
4057 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
4058 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
4059 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
4060 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
4061 return Result;
4062 }
4063 }
4064
4065 // If we've already found a root input value then we're never going to merge
4066 // these back together.
4067 if (FoundRoot)
4068 return Result;
4069
4070 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
4071 // be the root input value to the bswap/bitreverse.
4072 FoundRoot = true;
4073 Result = BitPart(V, BitWidth);
4074 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
4075 Result->Provenance[BitIdx] = BitIdx;
4076 return Result;
4077}
4078
4079static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
4080 unsigned BitWidth) {
4081 if (From % 8 != To % 8)
4082 return false;
4083 // Convert from bit indices to byte indices and check for a byte reversal.
4084 From >>= 3;
4085 To >>= 3;
4086 BitWidth >>= 3;
4087 return From == BitWidth - To - 1;
4088}
4089
4090static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
4091 unsigned BitWidth) {
4092 return From == BitWidth - To - 1;
4093}
4094
4095bool llvm::recognizeBSwapOrBitReverseIdiom(
4096 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
4097 SmallVectorImpl<Instruction *> &InsertedInsts) {
4098 if (!match(V: I, P: m_Or(L: m_Value(), R: m_Value())) &&
4099 !match(V: I, P: m_FShl(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) &&
4100 !match(V: I, P: m_FShr(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) &&
4101 !match(V: I, P: m_BSwap(Op0: m_Value())))
4102 return false;
4103 if (!MatchBSwaps && !MatchBitReversals)
4104 return false;
4105 Type *ITy = I->getType();
4106 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() == 1 ||
4107 ITy->getScalarSizeInBits() > 128)
4108 return false; // Can't do integer/elements > 128 bits.
4109
4110 // Try to find all the pieces corresponding to the bswap.
4111 bool FoundRoot = false;
4112 std::map<Value *, std::optional<BitPart>> BPS;
4113 const auto &Res =
4114 collectBitParts(V: I, MatchBSwaps, MatchBitReversals, BPS, Depth: 0, FoundRoot);
4115 if (!Res)
4116 return false;
4117 ArrayRef<int8_t> BitProvenance = Res->Provenance;
4118 assert(all_of(BitProvenance,
4119 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
4120 "Illegal bit provenance index");
4121
4122 // If the upper bits are zero, then attempt to perform as a truncated op.
4123 Type *DemandedTy = ITy;
4124 if (BitProvenance.back() == BitPart::Unset) {
4125 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
4126 BitProvenance = BitProvenance.drop_back();
4127 if (BitProvenance.empty())
4128 return false; // TODO - handle null value?
4129 DemandedTy = Type::getIntNTy(C&: I->getContext(), N: BitProvenance.size());
4130 if (auto *IVecTy = dyn_cast<VectorType>(Val: ITy))
4131 DemandedTy = VectorType::get(ElementType: DemandedTy, Other: IVecTy);
4132 }
4133
4134 // Check BitProvenance hasn't found a source larger than the result type.
4135 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
4136 if (DemandedBW > ITy->getScalarSizeInBits())
4137 return false;
4138
4139 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
4140 // only byteswap values with an even number of bytes.
4141 APInt DemandedMask = APInt::getAllOnes(numBits: DemandedBW);
4142 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
4143 bool OKForBitReverse = MatchBitReversals;
4144 for (unsigned BitIdx = 0;
4145 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
4146 if (BitProvenance[BitIdx] == BitPart::Unset) {
4147 DemandedMask.clearBit(BitPosition: BitIdx);
4148 continue;
4149 }
4150 OKForBSwap &= bitTransformIsCorrectForBSwap(From: BitProvenance[BitIdx], To: BitIdx,
4151 BitWidth: DemandedBW);
4152 OKForBitReverse &= bitTransformIsCorrectForBitReverse(From: BitProvenance[BitIdx],
4153 To: BitIdx, BitWidth: DemandedBW);
4154 }
4155
4156 Intrinsic::ID Intrin;
4157 if (OKForBSwap)
4158 Intrin = Intrinsic::bswap;
4159 else if (OKForBitReverse)
4160 Intrin = Intrinsic::bitreverse;
4161 else
4162 return false;
4163
4164 Function *F =
4165 Intrinsic::getOrInsertDeclaration(M: I->getModule(), id: Intrin, Tys: DemandedTy);
4166 Value *Provider = Res->Provider;
4167
4168 // We may need to truncate the provider.
4169 if (DemandedTy != Provider->getType()) {
4170 auto *Trunc =
4171 CastInst::CreateIntegerCast(S: Provider, Ty: DemandedTy, isSigned: false, Name: "trunc", InsertBefore: I->getIterator());
4172 InsertedInsts.push_back(Elt: Trunc);
4173 Provider = Trunc;
4174 }
4175
4176 Instruction *Result = CallInst::Create(Func: F, Args: Provider, NameStr: "rev", InsertBefore: I->getIterator());
4177 InsertedInsts.push_back(Elt: Result);
4178
4179 if (!DemandedMask.isAllOnes()) {
4180 auto *Mask = ConstantInt::get(Ty: DemandedTy, V: DemandedMask);
4181 Result = BinaryOperator::Create(Op: Instruction::And, S1: Result, S2: Mask, Name: "mask", InsertBefore: I->getIterator());
4182 InsertedInsts.push_back(Elt: Result);
4183 }
4184
4185 // We may need to zeroextend back to the result type.
4186 if (ITy != Result->getType()) {
4187 auto *ExtInst = CastInst::CreateIntegerCast(S: Result, Ty: ITy, isSigned: false, Name: "zext", InsertBefore: I->getIterator());
4188 InsertedInsts.push_back(Elt: ExtInst);
4189 }
4190
4191 return true;
4192}
4193
4194// CodeGen has special handling for some string functions that may replace
4195// them with target-specific intrinsics. Since that'd skip our interceptors
4196// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
4197// we mark affected calls as NoBuiltin, which will disable optimization
4198// in CodeGen.
4199void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
4200 CallInst *CI, const TargetLibraryInfo *TLI) {
4201 Function *F = CI->getCalledFunction();
4202 LibFunc Func;
4203 if (F && !F->hasLocalLinkage() && F->hasName() &&
4204 TLI->getLibFunc(funcName: F->getName(), F&: Func) && TLI->hasOptimizedCodeGen(F: Func) &&
4205 !F->doesNotAccessMemory())
4206 CI->addFnAttr(Kind: Attribute::NoBuiltin);
4207}
4208
4209bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
4210 const auto *Op = I->getOperand(i: OpIdx);
4211 // We can't have a PHI with a metadata type.
4212 if (Op->getType()->isMetadataTy())
4213 return false;
4214
4215 // swifterror pointers can only be used by a load, store, or as a swifterror
4216 // argument; swifterror pointers are not allowed to be used in select or phi
4217 // instructions.
4218 if (Op->isSwiftError())
4219 return false;
4220
4221 // Early exit.
4222 if (!isa<Constant, InlineAsm>(Val: Op))
4223 return true;
4224
4225 switch (I->getOpcode()) {
4226 default:
4227 return true;
4228 case Instruction::Call:
4229 case Instruction::Invoke: {
4230 const auto &CB = cast<CallBase>(Val: *I);
4231
4232 // Can't handle inline asm. Skip it.
4233 if (CB.isInlineAsm())
4234 return false;
4235
4236 // Constant bundle operands may need to retain their constant-ness for
4237 // correctness.
4238 if (CB.isBundleOperand(Idx: OpIdx))
4239 return false;
4240
4241 if (OpIdx < CB.arg_size()) {
4242 // Some variadic intrinsics require constants in the variadic arguments,
4243 // which currently aren't markable as immarg.
4244 if (isa<IntrinsicInst>(Val: CB) &&
4245 OpIdx >= CB.getFunctionType()->getNumParams()) {
4246 // This is known to be OK for stackmap.
4247 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
4248 }
4249
4250 // gcroot is a special case, since it requires a constant argument which
4251 // isn't also required to be a simple ConstantInt.
4252 if (CB.getIntrinsicID() == Intrinsic::gcroot)
4253 return false;
4254
4255 // Some intrinsic operands are required to be immediates.
4256 return !CB.paramHasAttr(ArgNo: OpIdx, Kind: Attribute::ImmArg);
4257 }
4258
4259 // It is never allowed to replace the call argument to an intrinsic, but it
4260 // may be possible for a call.
4261 return !isa<IntrinsicInst>(Val: CB);
4262 }
4263 case Instruction::ShuffleVector:
4264 // Shufflevector masks are constant.
4265 return OpIdx != 2;
4266 case Instruction::Switch:
4267 case Instruction::ExtractValue:
4268 // All operands apart from the first are constant.
4269 return OpIdx == 0;
4270 case Instruction::InsertValue:
4271 // All operands apart from the first and the second are constant.
4272 return OpIdx < 2;
4273 case Instruction::Alloca:
4274 // Static allocas (constant size in the entry block) are handled by
4275 // prologue/epilogue insertion so they're free anyway. We definitely don't
4276 // want to make them non-constant.
4277 return !cast<AllocaInst>(Val: I)->isStaticAlloca();
4278 case Instruction::GetElementPtr:
4279 if (OpIdx == 0)
4280 return true;
4281 gep_type_iterator It = gep_type_begin(GEP: I);
4282 for (auto E = std::next(x: It, n: OpIdx); It != E; ++It)
4283 if (It.isStruct())
4284 return false;
4285 return true;
4286 }
4287}
4288
4289Value *llvm::invertCondition(Value *Condition) {
4290 // First: Check if it's a constant
4291 if (Constant *C = dyn_cast<Constant>(Val: Condition))
4292 return ConstantExpr::getNot(C);
4293
4294 // Second: If the condition is already inverted, return the original value
4295 Value *NotCondition;
4296 if (match(V: Condition, P: m_Not(V: m_Value(V&: NotCondition))))
4297 return NotCondition;
4298
4299 BasicBlock *Parent = nullptr;
4300 Instruction *Inst = dyn_cast<Instruction>(Val: Condition);
4301 if (Inst)
4302 Parent = Inst->getParent();
4303 else if (Argument *Arg = dyn_cast<Argument>(Val: Condition))
4304 Parent = &Arg->getParent()->getEntryBlock();
4305 assert(Parent && "Unsupported condition to invert");
4306
4307 // Third: Check all the users for an invert
4308 for (User *U : Condition->users())
4309 if (Instruction *I = dyn_cast<Instruction>(Val: U))
4310 if (I->getParent() == Parent && match(V: I, P: m_Not(V: m_Specific(V: Condition))))
4311 return I;
4312
4313 // Last option: Create a new instruction
4314 auto *Inverted =
4315 BinaryOperator::CreateNot(Op: Condition, Name: Condition->getName() + ".inv");
4316 if (Inst && !isa<PHINode>(Val: Inst))
4317 Inverted->insertAfter(InsertPos: Inst->getIterator());
4318 else
4319 Inverted->insertBefore(InsertPos: Parent->getFirstInsertionPt());
4320 return Inverted;
4321}
4322
4323bool llvm::inferAttributesFromOthers(Function &F) {
4324 // Note: We explicitly check for attributes rather than using cover functions
4325 // because some of the cover functions include the logic being implemented.
4326
4327 bool Changed = false;
4328 // readnone + not convergent implies nosync
4329 if (!F.hasFnAttribute(Kind: Attribute::NoSync) &&
4330 F.doesNotAccessMemory() && !F.isConvergent()) {
4331 F.setNoSync();
4332 Changed = true;
4333 }
4334
4335 // readonly implies nofree
4336 if (!F.hasFnAttribute(Kind: Attribute::NoFree) && F.onlyReadsMemory()) {
4337 F.setDoesNotFreeMemory();
4338 Changed = true;
4339 }
4340
4341 // willreturn implies mustprogress
4342 if (!F.hasFnAttribute(Kind: Attribute::MustProgress) && F.willReturn()) {
4343 F.setMustProgress();
4344 Changed = true;
4345 }
4346
4347 // TODO: There are a bunch of cases of restrictive memory effects we
4348 // can infer by inspecting arguments of argmemonly-ish functions.
4349
4350 return Changed;
4351}
4352
4353void OverflowTracking::mergeFlags(Instruction &I) {
4354#ifndef NDEBUG
4355 if (Opcode)
4356 assert(Opcode == I.getOpcode() &&
4357 "can only use mergeFlags on instructions with matching opcodes");
4358 else
4359 Opcode = I.getOpcode();
4360#endif
4361 if (isa<OverflowingBinaryOperator>(Val: &I)) {
4362 HasNUW &= I.hasNoUnsignedWrap();
4363 HasNSW &= I.hasNoSignedWrap();
4364 }
4365 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(Val: &I))
4366 IsDisjoint &= DisjointOp->isDisjoint();
4367}
4368
4369void OverflowTracking::applyFlags(Instruction &I) {
4370 I.clearSubclassOptionalData();
4371 if (I.getOpcode() == Instruction::Add ||
4372 (I.getOpcode() == Instruction::Mul && AllKnownNonZero)) {
4373 if (HasNUW)
4374 I.setHasNoUnsignedWrap();
4375 if (HasNSW && (AllKnownNonNegative || HasNUW))
4376 I.setHasNoSignedWrap();
4377 }
4378 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(Val: &I))
4379 DisjointOp->setIsDisjoint(IsDisjoint);
4380}
4381