1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Utils/Local.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/AssumeBundleQueries.h"
26#include "llvm/Analysis/ConstantFolding.h"
27#include "llvm/Analysis/DomTreeUpdater.h"
28#include "llvm/Analysis/InstructionSimplify.h"
29#include "llvm/Analysis/MemoryBuiltins.h"
30#include "llvm/Analysis/MemorySSAUpdater.h"
31#include "llvm/Analysis/TargetLibraryInfo.h"
32#include "llvm/Analysis/ValueTracking.h"
33#include "llvm/Analysis/VectorUtils.h"
34#include "llvm/BinaryFormat/Dwarf.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/ConstantRange.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
45#include "llvm/IR/DebugInfoMetadata.h"
46#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/DerivedTypes.h"
48#include "llvm/IR/Dominators.h"
49#include "llvm/IR/EHPersonalities.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GetElementPtrTypeIterator.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/IntrinsicInst.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/IntrinsicsWebAssembly.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
64#include "llvm/IR/PatternMatch.h"
65#include "llvm/IR/ProfDataUtils.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
71#include "llvm/Support/Casting.h"
72#include "llvm/Support/CommandLine.h"
73#include "llvm/Support/Compiler.h"
74#include "llvm/Support/Debug.h"
75#include "llvm/Support/ErrorHandling.h"
76#include "llvm/Support/KnownBits.h"
77#include "llvm/Support/raw_ostream.h"
78#include "llvm/Transforms/Utils/BasicBlockUtils.h"
79#include "llvm/Transforms/Utils/ValueMapper.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91#define DEBUG_TYPE "local"
92
93STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95
96static cl::opt<bool> PHICSEDebugHash(
97 "phicse-debug-hash",
98#ifdef EXPENSIVE_CHECKS
99 cl::init(true),
100#else
101 cl::init(Val: false),
102#endif
103 cl::Hidden,
104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105 "function is well-behaved w.r.t. its isEqual predicate"));
106
107static cl::opt<unsigned> PHICSENumPHISmallSize(
108 "phicse-num-phi-smallsize", cl::init(Val: 32), cl::Hidden,
109 cl::desc(
110 "When the basic block contains not more than this number of PHI nodes, "
111 "perform a (faster!) exhaustive search instead of set-driven one."));
112
113static cl::opt<unsigned> MaxPhiEntriesIncreaseAfterRemovingEmptyBlock(
114 "max-phi-entries-increase-after-removing-empty-block", cl::init(Val: 1000),
115 cl::Hidden,
116 cl::desc("Stop removing an empty block if removing it will introduce more "
117 "than this number of phi entries in its successor"));
118
119// Max recursion depth for collectBitParts used when detecting bswap and
120// bitreverse idioms.
121static const unsigned BitPartRecursionMaxDepth = 48;
122
123//===----------------------------------------------------------------------===//
124// Local constant propagation.
125//
126
127/// ConstantFoldTerminator - If a terminator instruction is predicated on a
128/// constant value, convert it into an unconditional branch to the constant
129/// destination. This is a nontrivial operation because the successors of this
130/// basic block must have their PHI nodes updated.
131/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
132/// conditions and indirectbr addresses this might make dead if
133/// DeleteDeadConditions is true.
134bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
135 const TargetLibraryInfo *TLI,
136 DomTreeUpdater *DTU) {
137 Instruction *T = BB->getTerminator();
138 IRBuilder<> Builder(T);
139
140 // Branch - See if we are conditional jumping on constant
141 if (auto *BI = dyn_cast<CondBrInst>(Val: T)) {
142 BasicBlock *Dest1 = BI->getSuccessor(i: 0);
143 BasicBlock *Dest2 = BI->getSuccessor(i: 1);
144
145 if (Dest2 == Dest1) { // Conditional branch to same location?
146 // This branch matches something like this:
147 // br bool %cond, label %Dest, label %Dest
148 // and changes it into: br label %Dest
149
150 // Let the basic block know that we are letting go of one copy of it.
151 assert(BI->getParent() && "Terminator not inserted in block!");
152 Dest1->removePredecessor(Pred: BI->getParent());
153
154 // Replace the conditional branch with an unconditional one.
155 UncondBrInst *NewBI = Builder.CreateBr(Dest: Dest1);
156
157 // Transfer the metadata to the new branch instruction.
158 NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg,
159 LLVMContext::MD_annotation});
160
161 Value *Cond = BI->getCondition();
162 BI->eraseFromParent();
163 if (DeleteDeadConditions)
164 RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI);
165 return true;
166 }
167
168 if (auto *Cond = dyn_cast<ConstantInt>(Val: BI->getCondition())) {
169 // Are we branching on constant?
170 // YES. Change to unconditional branch...
171 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
172 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
173
174 // Let the basic block know that we are letting go of it. Based on this,
175 // it will adjust it's PHI nodes.
176 OldDest->removePredecessor(Pred: BB);
177
178 // Replace the conditional branch with an unconditional one.
179 UncondBrInst *NewBI = Builder.CreateBr(Dest: Destination);
180
181 // Transfer the metadata to the new branch instruction.
182 NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg,
183 LLVMContext::MD_annotation});
184
185 BI->eraseFromParent();
186 if (DTU)
187 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, OldDest}});
188 return true;
189 }
190
191 return false;
192 }
193
194 if (auto *SI = dyn_cast<SwitchInst>(Val: T)) {
195 // If we are switching on a constant, we can convert the switch to an
196 // unconditional branch.
197 auto *CI = dyn_cast<ConstantInt>(Val: SI->getCondition());
198 BasicBlock *DefaultDest = SI->getDefaultDest();
199 BasicBlock *TheOnlyDest = DefaultDest;
200
201 // If the default is unreachable, ignore it when searching for TheOnlyDest.
202 if (SI->defaultDestUnreachable() && SI->getNumCases() > 0)
203 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
204
205 bool Changed = false;
206
207 // Figure out which case it goes to.
208 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
209 // Found case matching a constant operand?
210 if (It->getCaseValue() == CI) {
211 TheOnlyDest = It->getCaseSuccessor();
212 break;
213 }
214
215 // Check to see if this branch is going to the same place as the default
216 // dest. If so, eliminate it as an explicit compare.
217 if (It->getCaseSuccessor() == DefaultDest) {
218 MDNode *MD = getValidBranchWeightMDNode(I: *SI);
219 unsigned NCases = SI->getNumCases();
220 // Fold the case metadata into the default if there will be any branches
221 // left, unless the metadata doesn't match the switch.
222 if (NCases > 1 && MD) {
223 // Collect branch weights into a vector.
224 SmallVector<uint64_t, 8> Weights;
225 extractFromBranchWeightMD64(ProfileData: MD, Weights);
226
227 // Merge weight of this case to the default weight.
228 unsigned Idx = It->getCaseIndex();
229
230 // Check for and prevent uint64_t overflow by reducing branch weights.
231 if (Weights[0] > UINT64_MAX - Weights[Idx + 1])
232 fitWeights(Weights);
233
234 Weights[0] += Weights[Idx + 1];
235 // Remove weight for this case.
236 std::swap(a&: Weights[Idx + 1], b&: Weights.back());
237 Weights.pop_back();
238 setFittedBranchWeights(I&: *SI, Weights, IsExpected: hasBranchWeightOrigin(ProfileData: MD));
239 }
240 // Remove this entry.
241 BasicBlock *ParentBB = SI->getParent();
242 DefaultDest->removePredecessor(Pred: ParentBB);
243 It = SI->removeCase(I: It);
244 End = SI->case_end();
245
246 // Removing this case may have made the condition constant. In that
247 // case, update CI and restart iteration through the cases.
248 if (auto *NewCI = dyn_cast<ConstantInt>(Val: SI->getCondition())) {
249 CI = NewCI;
250 It = SI->case_begin();
251 }
252
253 Changed = true;
254 continue;
255 }
256
257 // Otherwise, check to see if the switch only branches to one destination.
258 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
259 // destinations.
260 if (It->getCaseSuccessor() != TheOnlyDest)
261 TheOnlyDest = nullptr;
262
263 // Increment this iterator as we haven't removed the case.
264 ++It;
265 }
266
267 if (CI && !TheOnlyDest) {
268 // Branching on a constant, but not any of the cases, go to the default
269 // successor.
270 TheOnlyDest = SI->getDefaultDest();
271 }
272
273 // If we found a single destination that we can fold the switch into, do so
274 // now.
275 if (TheOnlyDest) {
276 // Insert the new branch.
277 Builder.CreateBr(Dest: TheOnlyDest);
278 BasicBlock *BB = SI->getParent();
279
280 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
281
282 // Remove entries from PHI nodes which we no longer branch to...
283 BasicBlock *SuccToKeep = TheOnlyDest;
284 for (BasicBlock *Succ : successors(I: SI)) {
285 if (DTU && Succ != TheOnlyDest)
286 RemovedSuccessors.insert(Ptr: Succ);
287 // Found case matching a constant operand?
288 if (Succ == SuccToKeep) {
289 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
290 } else {
291 Succ->removePredecessor(Pred: BB);
292 }
293 }
294
295 // Delete the old switch.
296 Value *Cond = SI->getCondition();
297 SI->eraseFromParent();
298 if (DeleteDeadConditions)
299 RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI);
300 if (DTU) {
301 std::vector<DominatorTree::UpdateType> Updates;
302 Updates.reserve(n: RemovedSuccessors.size());
303 for (auto *RemovedSuccessor : RemovedSuccessors)
304 Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor});
305 DTU->applyUpdates(Updates);
306 }
307 return true;
308 }
309
310 if (SI->getNumCases() == 1) {
311 // Otherwise, we can fold this switch into a conditional branch
312 // instruction if it has only one non-default destination.
313 auto FirstCase = *SI->case_begin();
314 Value *Cond = Builder.CreateICmpEQ(LHS: SI->getCondition(),
315 RHS: FirstCase.getCaseValue(), Name: "cond");
316
317 // Insert the new branch.
318 CondBrInst *NewBr = Builder.CreateCondBr(
319 Cond, True: FirstCase.getCaseSuccessor(), False: SI->getDefaultDest());
320 SmallVector<uint32_t> Weights;
321 if (extractBranchWeights(I: *SI, Weights) && Weights.size() == 2) {
322 uint32_t DefWeight = Weights[0];
323 uint32_t CaseWeight = Weights[1];
324 // The TrueWeight should be the weight for the single case of SI.
325 NewBr->setMetadata(KindID: LLVMContext::MD_prof,
326 Node: MDBuilder(BB->getContext())
327 .createBranchWeights(TrueWeight: CaseWeight, FalseWeight: DefWeight));
328 }
329
330 // Update make.implicit metadata to the newly-created conditional branch.
331 MDNode *MakeImplicitMD = SI->getMetadata(KindID: LLVMContext::MD_make_implicit);
332 if (MakeImplicitMD)
333 NewBr->setMetadata(KindID: LLVMContext::MD_make_implicit, Node: MakeImplicitMD);
334
335 // Delete the old switch.
336 SI->eraseFromParent();
337 return true;
338 }
339 return Changed;
340 }
341
342 if (auto *IBI = dyn_cast<IndirectBrInst>(Val: T)) {
343 // indirectbr blockaddress(@F, @BB) -> br label @BB
344 if (auto *BA =
345 dyn_cast<BlockAddress>(Val: IBI->getAddress()->stripPointerCasts())) {
346 BasicBlock *TheOnlyDest = BA->getBasicBlock();
347 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
348
349 // Insert the new branch.
350 Builder.CreateBr(Dest: TheOnlyDest);
351
352 BasicBlock *SuccToKeep = TheOnlyDest;
353 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
354 BasicBlock *DestBB = IBI->getDestination(i);
355 if (DTU && DestBB != TheOnlyDest)
356 RemovedSuccessors.insert(Ptr: DestBB);
357 if (IBI->getDestination(i) == SuccToKeep) {
358 SuccToKeep = nullptr;
359 } else {
360 DestBB->removePredecessor(Pred: BB);
361 }
362 }
363 Value *Address = IBI->getAddress();
364 IBI->eraseFromParent();
365 if (DeleteDeadConditions)
366 // Delete pointer cast instructions.
367 RecursivelyDeleteTriviallyDeadInstructions(V: Address, TLI);
368
369 // Also zap the blockaddress constant if there are no users remaining,
370 // otherwise the destination is still marked as having its address taken.
371 if (BA->use_empty())
372 BA->destroyConstant();
373
374 // If we didn't find our destination in the IBI successor list, then we
375 // have undefined behavior. Replace the unconditional branch with an
376 // 'unreachable' instruction.
377 if (SuccToKeep) {
378 BB->getTerminator()->eraseFromParent();
379 new UnreachableInst(BB->getContext(), BB);
380 }
381
382 if (DTU) {
383 std::vector<DominatorTree::UpdateType> Updates;
384 Updates.reserve(n: RemovedSuccessors.size());
385 for (auto *RemovedSuccessor : RemovedSuccessors)
386 Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor});
387 DTU->applyUpdates(Updates);
388 }
389 return true;
390 }
391 }
392
393 return false;
394}
395
396//===----------------------------------------------------------------------===//
397// Local dead code elimination.
398//
399
400/// isInstructionTriviallyDead - Return true if the result produced by the
401/// instruction is not used, and the instruction has no side effects.
402///
403bool llvm::isInstructionTriviallyDead(Instruction *I,
404 const TargetLibraryInfo *TLI) {
405 if (!I->use_empty())
406 return false;
407 return wouldInstructionBeTriviallyDead(I, TLI);
408}
409
410bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths(
411 Instruction *I, const TargetLibraryInfo *TLI) {
412 // Instructions that are "markers" and have implied meaning on code around
413 // them (without explicit uses), are not dead on unused paths.
414 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I))
415 if (II->getIntrinsicID() == Intrinsic::stacksave ||
416 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
417 II->isLifetimeStartOrEnd())
418 return false;
419 return wouldInstructionBeTriviallyDead(I, TLI);
420}
421
422bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
423 const TargetLibraryInfo *TLI) {
424 if (I->isTerminator())
425 return false;
426
427 // We don't want the landingpad-like instructions removed by anything this
428 // general.
429 if (I->isEHPad())
430 return false;
431
432 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(Val: I)) {
433 if (DLI->getLabel())
434 return false;
435 return true;
436 }
437
438 if (auto *CB = dyn_cast<CallBase>(Val: I))
439 if (isRemovableAlloc(V: CB, TLI))
440 return true;
441
442 if (!I->willReturn()) {
443 auto *II = dyn_cast<IntrinsicInst>(Val: I);
444 if (!II)
445 return false;
446
447 switch (II->getIntrinsicID()) {
448 case Intrinsic::experimental_guard: {
449 // Guards on true are operationally no-ops. In the future we can
450 // consider more sophisticated tradeoffs for guards considering potential
451 // for check widening, but for now we keep things simple.
452 auto *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0));
453 return Cond && Cond->isOne();
454 }
455 // TODO: These intrinsics are not safe to remove, because this may remove
456 // a well-defined trap.
457 case Intrinsic::wasm_trunc_signed:
458 case Intrinsic::wasm_trunc_unsigned:
459 case Intrinsic::ptrauth_auth:
460 case Intrinsic::ptrauth_resign:
461 case Intrinsic::ptrauth_resign_load_relative:
462 return true;
463 default:
464 return false;
465 }
466 }
467
468 if (!I->mayHaveSideEffects())
469 return true;
470
471 // Special case intrinsics that "may have side effects" but can be deleted
472 // when dead.
473 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) {
474 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
475 if (II->getIntrinsicID() == Intrinsic::stacksave ||
476 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
477 return true;
478
479 // Intrinsics declare sideeffects to prevent them from moving, but they are
480 // nops without users.
481 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
482 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
483 return true;
484
485 if (II->isLifetimeStartOrEnd()) {
486 auto *Arg = II->getArgOperand(i: 0);
487 if (isa<PoisonValue>(Val: Arg))
488 return true;
489
490 // If the only uses of the alloca are lifetime intrinsics, then the
491 // intrinsics are dead.
492 return llvm::all_of(Range: Arg->uses(), P: [](Use &Use) {
493 return isa<LifetimeIntrinsic>(Val: Use.getUser());
494 });
495 }
496
497 // Assumptions are dead if their condition is trivially true.
498 if (II->getIntrinsicID() == Intrinsic::assume &&
499 isAssumeWithEmptyBundle(Assume: cast<AssumeInst>(Val: *II))) {
500 if (ConstantInt *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0)))
501 return !Cond->isZero();
502
503 return false;
504 }
505
506 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(Val: I)) {
507 std::optional<fp::ExceptionBehavior> ExBehavior =
508 FPI->getExceptionBehavior();
509 return *ExBehavior != fp::ebStrict;
510 }
511 }
512
513 if (auto *Call = dyn_cast<CallBase>(Val: I)) {
514 if (Value *FreedOp = getFreedOperand(CB: Call, TLI))
515 if (Constant *C = dyn_cast<Constant>(Val: FreedOp))
516 return C->isNullValue() || isa<UndefValue>(Val: C);
517 if (isMathLibCallNoop(Call, TLI))
518 return true;
519 }
520
521 // Non-volatile atomic loads from constants can be removed.
522 if (auto *LI = dyn_cast<LoadInst>(Val: I))
523 if (auto *GV = dyn_cast<GlobalVariable>(
524 Val: LI->getPointerOperand()->stripPointerCasts()))
525 if (!LI->isVolatile() && GV->isConstant())
526 return true;
527
528 return false;
529}
530
531/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
532/// trivially dead instruction, delete it. If that makes any of its operands
533/// trivially dead, delete them too, recursively. Return true if any
534/// instructions were deleted.
535bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
536 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
537 std::function<void(Value *)> AboutToDeleteCallback) {
538 Instruction *I = dyn_cast<Instruction>(Val: V);
539 if (!I || !isInstructionTriviallyDead(I, TLI))
540 return false;
541
542 SmallVector<WeakTrackingVH, 16> DeadInsts;
543 DeadInsts.push_back(Elt: I);
544 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
545 AboutToDeleteCallback);
546
547 return true;
548}
549
550bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
551 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
552 MemorySSAUpdater *MSSAU,
553 std::function<void(Value *)> AboutToDeleteCallback) {
554 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
555 for (; S != E; ++S) {
556 auto *I = dyn_cast_or_null<Instruction>(Val&: DeadInsts[S]);
557 if (!I || !isInstructionTriviallyDead(I)) {
558 DeadInsts[S] = nullptr;
559 ++Alive;
560 }
561 }
562 if (Alive == E)
563 return false;
564 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
565 AboutToDeleteCallback);
566 return true;
567}
568
569void llvm::RecursivelyDeleteTriviallyDeadInstructions(
570 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
571 MemorySSAUpdater *MSSAU,
572 std::function<void(Value *)> AboutToDeleteCallback) {
573 // Process the dead instruction list until empty.
574 while (!DeadInsts.empty()) {
575 Value *V = DeadInsts.pop_back_val();
576 Instruction *I = cast_or_null<Instruction>(Val: V);
577 if (!I)
578 continue;
579 assert(isInstructionTriviallyDead(I, TLI) &&
580 "Live instruction found in dead worklist!");
581 assert(I->use_empty() && "Instructions with uses are not dead.");
582
583 // Don't lose the debug info while deleting the instructions.
584 salvageDebugInfo(I&: *I);
585
586 if (AboutToDeleteCallback)
587 AboutToDeleteCallback(I);
588
589 // Null out all of the instruction's operands to see if any operand becomes
590 // dead as we go.
591 for (Use &OpU : I->operands()) {
592 Value *OpV = OpU.get();
593 OpU.set(nullptr);
594
595 if (!OpV->use_empty())
596 continue;
597
598 // If the operand is an instruction that became dead as we nulled out the
599 // operand, and if it is 'trivially' dead, delete it in a future loop
600 // iteration.
601 if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV))
602 if (isInstructionTriviallyDead(I: OpI, TLI))
603 DeadInsts.push_back(Elt: OpI);
604 }
605 if (MSSAU)
606 MSSAU->removeMemoryAccess(I);
607
608 I->eraseFromParent();
609 }
610}
611
612bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
613 SmallVector<DbgVariableRecord *, 1> DPUsers;
614 findDbgUsers(V: I, DbgVariableRecords&: DPUsers);
615 for (auto *DVR : DPUsers)
616 DVR->setKillLocation();
617 return !DPUsers.empty();
618}
619
620/// areAllUsesEqual - Check whether the uses of a value are all the same.
621/// This is similar to Instruction::hasOneUse() except this will also return
622/// true when there are no uses or multiple uses that all refer to the same
623/// value.
624static bool areAllUsesEqual(Instruction *I) {
625 Value::user_iterator UI = I->user_begin();
626 Value::user_iterator UE = I->user_end();
627 if (UI == UE)
628 return true;
629
630 User *TheUse = *UI;
631 for (++UI; UI != UE; ++UI) {
632 if (*UI != TheUse)
633 return false;
634 }
635 return true;
636}
637
638/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
639/// dead PHI node, due to being a def-use chain of single-use nodes that
640/// either forms a cycle or is terminated by a trivially dead instruction,
641/// delete it. If that makes any of its operands trivially dead, delete them
642/// too, recursively. Return true if a change was made.
643bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
644 const TargetLibraryInfo *TLI,
645 llvm::MemorySSAUpdater *MSSAU) {
646 SmallPtrSet<Instruction*, 4> Visited;
647 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
648 I = cast<Instruction>(Val: *I->user_begin())) {
649 if (I->use_empty())
650 return RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU);
651
652 // If we find an instruction more than once, we're on a cycle that
653 // won't prove fruitful.
654 if (!Visited.insert(Ptr: I).second) {
655 // Break the cycle and delete the instruction and its operands.
656 I->replaceAllUsesWith(V: PoisonValue::get(T: I->getType()));
657 (void)RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU);
658 return true;
659 }
660 }
661 return false;
662}
663
664static bool
665simplifyAndDCEInstruction(Instruction *I,
666 SmallSetVector<Instruction *, 16> &WorkList,
667 const DataLayout &DL,
668 const TargetLibraryInfo *TLI) {
669 if (isInstructionTriviallyDead(I, TLI)) {
670 salvageDebugInfo(I&: *I);
671
672 // Null out all of the instruction's operands to see if any operand becomes
673 // dead as we go.
674 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
675 Value *OpV = I->getOperand(i);
676 I->setOperand(i, Val: nullptr);
677
678 if (!OpV->use_empty() || I == OpV)
679 continue;
680
681 // If the operand is an instruction that became dead as we nulled out the
682 // operand, and if it is 'trivially' dead, delete it in a future loop
683 // iteration.
684 if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV))
685 if (isInstructionTriviallyDead(I: OpI, TLI))
686 WorkList.insert(X: OpI);
687 }
688
689 I->eraseFromParent();
690
691 return true;
692 }
693
694 if (Value *SimpleV = simplifyInstruction(I, Q: DL)) {
695 // Add the users to the worklist. CAREFUL: an instruction can use itself,
696 // in the case of a phi node.
697 for (User *U : I->users()) {
698 if (U != I) {
699 WorkList.insert(X: cast<Instruction>(Val: U));
700 }
701 }
702
703 // Replace the instruction with its simplified value.
704 bool Changed = false;
705 if (!I->use_empty()) {
706 I->replaceAllUsesWith(V: SimpleV);
707 Changed = true;
708 }
709 if (isInstructionTriviallyDead(I, TLI)) {
710 I->eraseFromParent();
711 Changed = true;
712 }
713 return Changed;
714 }
715 return false;
716}
717
718/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
719/// simplify any instructions in it and recursively delete dead instructions.
720///
721/// This returns true if it changed the code, note that it can delete
722/// instructions in other blocks as well in this block.
723bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
724 const TargetLibraryInfo *TLI) {
725 bool MadeChange = false;
726 const DataLayout &DL = BB->getDataLayout();
727
728#ifndef NDEBUG
729 // In debug builds, ensure that the terminator of the block is never replaced
730 // or deleted by these simplifications. The idea of simplification is that it
731 // cannot introduce new instructions, and there is no way to replace the
732 // terminator of a block without introducing a new instruction.
733 AssertingVH<Instruction> TerminatorVH(&BB->back());
734#endif
735
736 SmallSetVector<Instruction *, 16> WorkList;
737 // Iterate over the original function, only adding insts to the worklist
738 // if they actually need to be revisited. This avoids having to pre-init
739 // the worklist with the entire function's worth of instructions.
740 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(x: BB->end());
741 BI != E;) {
742 assert(!BI->isTerminator());
743 Instruction *I = &*BI;
744 ++BI;
745
746 // We're visiting this instruction now, so make sure it's not in the
747 // worklist from an earlier visit.
748 if (!WorkList.count(key: I))
749 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
750 }
751
752 while (!WorkList.empty()) {
753 Instruction *I = WorkList.pop_back_val();
754 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
755 }
756 return MadeChange;
757}
758
759//===----------------------------------------------------------------------===//
760// Control Flow Graph Restructuring.
761//
762
763void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
764 DomTreeUpdater *DTU) {
765
766 // If BB has single-entry PHI nodes, fold them.
767 while (PHINode *PN = dyn_cast<PHINode>(Val: DestBB->begin())) {
768 Value *NewVal = PN->getIncomingValue(i: 0);
769 // Replace self referencing PHI with poison, it must be dead.
770 if (NewVal == PN) NewVal = PoisonValue::get(T: PN->getType());
771 PN->replaceAllUsesWith(V: NewVal);
772 PN->eraseFromParent();
773 }
774
775 BasicBlock *PredBB = DestBB->getSinglePredecessor();
776 assert(PredBB && "Block doesn't have a single predecessor!");
777
778 bool ReplaceEntryBB = PredBB->isEntryBlock();
779
780 // DTU updates: Collect all the edges that enter
781 // PredBB. These dominator edges will be redirected to DestBB.
782 SmallVector<DominatorTree::UpdateType, 32> Updates;
783
784 if (DTU) {
785 // To avoid processing the same predecessor more than once.
786 SmallPtrSet<BasicBlock *, 2> SeenPreds;
787 Updates.reserve(N: Updates.size() + 2 * pred_size(BB: PredBB) + 1);
788 for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB))
789 // This predecessor of PredBB may already have DestBB as a successor.
790 if (PredOfPredBB != PredBB)
791 if (SeenPreds.insert(Ptr: PredOfPredBB).second)
792 Updates.push_back(Elt: {DominatorTree::Insert, PredOfPredBB, DestBB});
793 SeenPreds.clear();
794 for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB))
795 if (SeenPreds.insert(Ptr: PredOfPredBB).second)
796 Updates.push_back(Elt: {DominatorTree::Delete, PredOfPredBB, PredBB});
797 Updates.push_back(Elt: {DominatorTree::Delete, PredBB, DestBB});
798 }
799
800 // Zap anything that took the address of DestBB. Not doing this will give the
801 // address an invalid value.
802 if (DestBB->hasAddressTaken()) {
803 BlockAddress *BA = BlockAddress::get(BB: DestBB);
804 Constant *Replacement =
805 ConstantInt::get(Ty: Type::getInt32Ty(C&: BA->getContext()), V: 1);
806 BA->replaceAllUsesWith(V: ConstantExpr::getIntToPtr(C: Replacement,
807 Ty: BA->getType()));
808 BA->destroyConstant();
809 }
810
811 // Anything that branched to PredBB now branches to DestBB.
812 PredBB->replaceAllUsesWith(V: DestBB);
813
814 // Splice all the instructions from PredBB to DestBB.
815 PredBB->getTerminator()->eraseFromParent();
816 DestBB->splice(ToIt: DestBB->begin(), FromBB: PredBB);
817 new UnreachableInst(PredBB->getContext(), PredBB);
818
819 // If the PredBB is the entry block of the function, move DestBB up to
820 // become the entry block after we erase PredBB.
821 if (ReplaceEntryBB)
822 DestBB->moveAfter(MovePos: PredBB);
823
824 if (DTU) {
825 assert(PredBB->size() == 1 &&
826 isa<UnreachableInst>(PredBB->getTerminator()) &&
827 "The successor list of PredBB isn't empty before "
828 "applying corresponding DTU updates.");
829 DTU->applyUpdatesPermissive(Updates);
830 DTU->deleteBB(DelBB: PredBB);
831 // Recalculation of DomTree is needed when updating a forward DomTree and
832 // the Entry BB is replaced.
833 if (ReplaceEntryBB && DTU->hasDomTree()) {
834 // The entry block was removed and there is no external interface for
835 // the dominator tree to be notified of this change. In this corner-case
836 // we recalculate the entire tree.
837 DTU->recalculate(F&: *(DestBB->getParent()));
838 }
839 }
840
841 else {
842 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
843 }
844}
845
846/// Return true if we can choose one of these values to use in place of the
847/// other. Note that we will always choose the non-undef value to keep.
848static bool CanMergeValues(Value *First, Value *Second) {
849 return First == Second || isa<UndefValue>(Val: First) || isa<UndefValue>(Val: Second);
850}
851
852/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
853/// branch to Succ, into Succ.
854///
855/// Assumption: Succ is the single successor for BB.
856static bool
857CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ,
858 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
859 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
860
861 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
862 << Succ->getName() << "\n");
863 // Shortcut, if there is only a single predecessor it must be BB and merging
864 // is always safe
865 if (Succ->getSinglePredecessor())
866 return true;
867
868 // Look at all the phi nodes in Succ, to see if they present a conflict when
869 // merging these blocks
870 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) {
871 PHINode *PN = cast<PHINode>(Val&: I);
872
873 // If the incoming value from BB is again a PHINode in
874 // BB which has the same incoming value for *PI as PN does, we can
875 // merge the phi nodes and then the blocks can still be merged
876 PHINode *BBPN = dyn_cast<PHINode>(Val: PN->getIncomingValueForBlock(BB));
877 if (BBPN && BBPN->getParent() == BB) {
878 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
879 BasicBlock *IBB = PN->getIncomingBlock(i: PI);
880 if (BBPreds.count(Ptr: IBB) &&
881 !CanMergeValues(First: BBPN->getIncomingValueForBlock(BB: IBB),
882 Second: PN->getIncomingValue(i: PI))) {
883 LLVM_DEBUG(dbgs()
884 << "Can't fold, phi node " << PN->getName() << " in "
885 << Succ->getName() << " is conflicting with "
886 << BBPN->getName() << " with regard to common predecessor "
887 << IBB->getName() << "\n");
888 return false;
889 }
890 }
891 } else {
892 Value* Val = PN->getIncomingValueForBlock(BB);
893 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
894 // See if the incoming value for the common predecessor is equal to the
895 // one for BB, in which case this phi node will not prevent the merging
896 // of the block.
897 BasicBlock *IBB = PN->getIncomingBlock(i: PI);
898 if (BBPreds.count(Ptr: IBB) &&
899 !CanMergeValues(First: Val, Second: PN->getIncomingValue(i: PI))) {
900 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
901 << " in " << Succ->getName()
902 << " is conflicting with regard to common "
903 << "predecessor " << IBB->getName() << "\n");
904 return false;
905 }
906 }
907 }
908 }
909
910 return true;
911}
912
913using PredBlockVector = SmallVector<BasicBlock *, 16>;
914using IncomingValueMap = SmallDenseMap<BasicBlock *, Value *, 16>;
915
916/// Determines the value to use as the phi node input for a block.
917///
918/// Select between \p OldVal any value that we know flows from \p BB
919/// to a particular phi on the basis of which one (if either) is not
920/// undef. Update IncomingValues based on the selected value.
921///
922/// \param OldVal The value we are considering selecting.
923/// \param BB The block that the value flows in from.
924/// \param IncomingValues A map from block-to-value for other phi inputs
925/// that we have examined.
926///
927/// \returns the selected value.
928static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
929 IncomingValueMap &IncomingValues) {
930 IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB);
931 if (!isa<UndefValue>(Val: OldVal)) {
932 assert((It != IncomingValues.end() &&
933 (!(It->second) || It->second == OldVal)) &&
934 "Expected OldVal to match incoming value from BB!");
935
936 IncomingValues.insert_or_assign(Key: BB, Val&: OldVal);
937 return OldVal;
938 }
939
940 if (It != IncomingValues.end() && It->second)
941 return It->second;
942
943 return OldVal;
944}
945
946/// Create a map from block to value for the operands of a
947/// given phi.
948///
949/// This function initializes the map with UndefValue for all predecessors
950/// in BBPreds, and then updates the map with concrete non-undef values
951/// found in the PHI node.
952///
953/// \param PN The phi we are collecting the map for.
954/// \param BBPreds The list of all predecessor blocks to initialize with Undef.
955/// \param IncomingValues [out] The map from block to value for this phi.
956static void gatherIncomingValuesToPhi(PHINode *PN,
957 const PredBlockVector &BBPreds,
958 IncomingValueMap &IncomingValues) {
959 for (BasicBlock *Pred : BBPreds)
960 IncomingValues[Pred] = nullptr;
961
962 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
963 Value *V = PN->getIncomingValue(i);
964 if (isa<UndefValue>(Val: V))
965 continue;
966
967 BasicBlock *BB = PN->getIncomingBlock(i);
968 auto It = IncomingValues.find(Val: BB);
969 if (It != IncomingValues.end())
970 It->second = V;
971 }
972}
973
974/// Replace the incoming undef values to a phi with the values
975/// from a block-to-value map.
976///
977/// \param PN The phi we are replacing the undefs in.
978/// \param IncomingValues A map from block to value.
979static void replaceUndefValuesInPhi(PHINode *PN,
980 const IncomingValueMap &IncomingValues) {
981 SmallVector<unsigned> TrueUndefOps;
982 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
983 Value *V = PN->getIncomingValue(i);
984
985 if (!isa<UndefValue>(Val: V)) continue;
986
987 BasicBlock *BB = PN->getIncomingBlock(i);
988 IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB);
989 if (It == IncomingValues.end())
990 continue;
991
992 // Keep track of undef/poison incoming values. Those must match, so we fix
993 // them up below if needed.
994 // Note: this is conservatively correct, but we could try harder and group
995 // the undef values per incoming basic block.
996 if (!It->second) {
997 TrueUndefOps.push_back(Elt: i);
998 continue;
999 }
1000
1001 // There is a defined value for this incoming block, so map this undef
1002 // incoming value to the defined value.
1003 PN->setIncomingValue(i, V: It->second);
1004 }
1005
1006 // If there are both undef and poison values incoming, then convert those
1007 // values to undef. It is invalid to have different values for the same
1008 // incoming block.
1009 unsigned PoisonCount = count_if(Range&: TrueUndefOps, P: [&](unsigned i) {
1010 return isa<PoisonValue>(Val: PN->getIncomingValue(i));
1011 });
1012 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1013 for (unsigned i : TrueUndefOps)
1014 PN->setIncomingValue(i, V: UndefValue::get(T: PN->getType()));
1015 }
1016}
1017
1018// Only when they shares a single common predecessor, return true.
1019// Only handles cases when BB can't be merged while its predecessors can be
1020// redirected.
1021static bool
1022CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ,
1023 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1024 BasicBlock *&CommonPred) {
1025
1026 // There must be phis in BB, otherwise BB will be merged into Succ directly
1027 if (BB->phis().empty() || Succ->phis().empty())
1028 return false;
1029
1030 // BB must have predecessors not shared that can be redirected to Succ
1031 if (!BB->hasNPredecessorsOrMore(N: 2))
1032 return false;
1033
1034 if (any_of(Range: BBPreds, P: [](const BasicBlock *Pred) {
1035 return isa<IndirectBrInst>(Val: Pred->getTerminator());
1036 }))
1037 return false;
1038
1039 // Get the single common predecessor of both BB and Succ. Return false
1040 // when there are more than one common predecessors.
1041 for (BasicBlock *SuccPred : predecessors(BB: Succ)) {
1042 if (BBPreds.count(Ptr: SuccPred)) {
1043 if (CommonPred)
1044 return false;
1045 CommonPred = SuccPred;
1046 }
1047 }
1048
1049 return true;
1050}
1051
1052/// Check whether removing \p BB will make the phis in its \p Succ have too
1053/// many incoming entries. This function does not check whether \p BB is
1054/// foldable or not.
1055static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ) {
1056 // If BB only has one predecessor, then removing it will not introduce more
1057 // incoming edges for phis.
1058 if (BB->hasNPredecessors(N: 1))
1059 return false;
1060 unsigned NumPreds = pred_size(BB);
1061 unsigned NumChangedPhi = 0;
1062 for (auto &Phi : Succ->phis()) {
1063 // If the incoming value is a phi and the phi is defined in BB,
1064 // then removing BB will not increase the total phi entries of the ir.
1065 if (auto *IncomingPhi = dyn_cast<PHINode>(Val: Phi.getIncomingValueForBlock(BB)))
1066 if (IncomingPhi->getParent() == BB)
1067 continue;
1068 // Otherwise, we need to add entries to the phi
1069 NumChangedPhi++;
1070 }
1071 // For every phi that needs to be changed, (NumPreds - 1) new entries will be
1072 // added. If the total increase in phi entries exceeds
1073 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as
1074 // introducing too many new phi entries.
1075 return (NumPreds - 1) * NumChangedPhi >
1076 MaxPhiEntriesIncreaseAfterRemovingEmptyBlock;
1077}
1078
1079/// Replace a value flowing from a block to a phi with
1080/// potentially multiple instances of that value flowing from the
1081/// block's predecessors to the phi.
1082///
1083/// \param BB The block with the value flowing into the phi.
1084/// \param BBPreds The predecessors of BB.
1085/// \param PN The phi that we are updating.
1086/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1087static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
1088 const PredBlockVector &BBPreds,
1089 PHINode *PN,
1090 BasicBlock *CommonPred) {
1091 Value *OldVal = PN->removeIncomingValue(BB, DeletePHIIfEmpty: false);
1092 assert(OldVal && "No entry in PHI for Pred BB!");
1093
1094 // Map BBPreds to defined values or nullptr (representing undefined values).
1095 IncomingValueMap IncomingValues;
1096
1097 // We are merging two blocks - BB, and the block containing PN - and
1098 // as a result we need to redirect edges from the predecessors of BB
1099 // to go to the block containing PN, and update PN
1100 // accordingly. Since we allow merging blocks in the case where the
1101 // predecessor and successor blocks both share some predecessors,
1102 // and where some of those common predecessors might have undef
1103 // values flowing into PN, we want to rewrite those values to be
1104 // consistent with the non-undef values.
1105
1106 gatherIncomingValuesToPhi(PN, BBPreds, IncomingValues);
1107
1108 // If this incoming value is one of the PHI nodes in BB, the new entries
1109 // in the PHI node are the entries from the old PHI.
1110 if (isa<PHINode>(Val: OldVal) && cast<PHINode>(Val: OldVal)->getParent() == BB) {
1111 PHINode *OldValPN = cast<PHINode>(Val: OldVal);
1112 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1113 // Note that, since we are merging phi nodes and BB and Succ might
1114 // have common predecessors, we could end up with a phi node with
1115 // identical incoming branches. This will be cleaned up later (and
1116 // will trigger asserts if we try to clean it up now, without also
1117 // simplifying the corresponding conditional branch).
1118 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1119
1120 if (PredBB == CommonPred)
1121 continue;
1122
1123 Value *PredVal = OldValPN->getIncomingValue(i);
1124 Value *Selected =
1125 selectIncomingValueForBlock(OldVal: PredVal, BB: PredBB, IncomingValues);
1126
1127 // And add a new incoming value for this predecessor for the
1128 // newly retargeted branch.
1129 PN->addIncoming(V: Selected, BB: PredBB);
1130 }
1131 if (CommonPred)
1132 PN->addIncoming(V: OldValPN->getIncomingValueForBlock(BB: CommonPred), BB);
1133
1134 } else {
1135 for (BasicBlock *PredBB : BBPreds) {
1136 // Update existing incoming values in PN for this
1137 // predecessor of BB.
1138 if (PredBB == CommonPred)
1139 continue;
1140
1141 Value *Selected =
1142 selectIncomingValueForBlock(OldVal, BB: PredBB, IncomingValues);
1143
1144 // And add a new incoming value for this predecessor for the
1145 // newly retargeted branch.
1146 PN->addIncoming(V: Selected, BB: PredBB);
1147 }
1148 if (CommonPred)
1149 PN->addIncoming(V: OldVal, BB);
1150 }
1151
1152 replaceUndefValuesInPhi(PN, IncomingValues);
1153}
1154
1155bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1156 DomTreeUpdater *DTU) {
1157 assert(BB != &BB->getParent()->getEntryBlock() &&
1158 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1159
1160 // We can't simplify infinite loops.
1161 BasicBlock *Succ = cast<UncondBrInst>(Val: BB->getTerminator())->getSuccessor(i: 0);
1162 if (BB == Succ)
1163 return false;
1164
1165 SmallPtrSet<BasicBlock *, 16> BBPreds(llvm::from_range, predecessors(BB));
1166
1167 // The single common predecessor of BB and Succ when BB cannot be killed
1168 BasicBlock *CommonPred = nullptr;
1169
1170 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1171
1172 // Even if we can not fold BB into Succ, we may be able to redirect the
1173 // predecessors of BB to Succ.
1174 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc(
1175 BB, Succ, BBPreds, CommonPred);
1176
1177 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ))
1178 return false;
1179
1180 // Check to see if merging these blocks/phis would cause conflicts for any of
1181 // the phi nodes in BB or Succ. If not, we can safely merge.
1182
1183 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1184 // has uses which will not disappear when the PHI nodes are merged. It is
1185 // possible to handle such cases, but difficult: it requires checking whether
1186 // BB dominates Succ, which is non-trivial to calculate in the case where
1187 // Succ has multiple predecessors. Also, it requires checking whether
1188 // constructing the necessary self-referential PHI node doesn't introduce any
1189 // conflicts; this isn't too difficult, but the previous code for doing this
1190 // was incorrect.
1191 //
1192 // Note that if this check finds a live use, BB dominates Succ, so BB is
1193 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1194 // folding the branch isn't profitable in that case anyway.
1195 if (!Succ->getSinglePredecessor()) {
1196 BasicBlock::iterator BBI = BB->begin();
1197 while (isa<PHINode>(Val: *BBI)) {
1198 for (Use &U : BBI->uses()) {
1199 if (PHINode* PN = dyn_cast<PHINode>(Val: U.getUser())) {
1200 if (PN->getIncomingBlock(U) != BB)
1201 return false;
1202 } else {
1203 return false;
1204 }
1205 }
1206 ++BBI;
1207 }
1208 }
1209
1210 if (BBPhisMergeable && CommonPred)
1211 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1212 << " and " << Succ->getName() << " : "
1213 << CommonPred->getName() << "\n");
1214
1215 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1216 // metadata.
1217 //
1218 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1219 // current status (that loop metadata is implemented as metadata attached to
1220 // the branch instruction in the loop latch block). To quote from review
1221 // comments, "the current representation of loop metadata (using a loop latch
1222 // terminator attachment) is known to be fundamentally broken. Loop latches
1223 // are not uniquely associated with loops (both in that a latch can be part of
1224 // multiple loops and a loop may have multiple latches). Loop headers are. The
1225 // solution to this problem is also known: Add support for basic block
1226 // metadata, and attach loop metadata to the loop header."
1227 //
1228 // Why bail out:
1229 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1230 // the latch for inner-loop (see reason below), so bail out to prerserve
1231 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1232 // to this inner-loop.
1233 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1234 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1235 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1236 // one self-looping basic block, which is contradictory with the assumption.
1237 //
1238 // To illustrate how inner-loop metadata is dropped:
1239 //
1240 // CFG Before
1241 //
1242 // BB is while.cond.exit, attached with loop metdata md2.
1243 // BB->Pred is for.body, attached with loop metadata md1.
1244 //
1245 // entry
1246 // |
1247 // v
1248 // ---> while.cond -------------> while.end
1249 // | |
1250 // | v
1251 // | while.body
1252 // | |
1253 // | v
1254 // | for.body <---- (md1)
1255 // | | |______|
1256 // | v
1257 // | while.cond.exit (md2)
1258 // | |
1259 // |_______|
1260 //
1261 // CFG After
1262 //
1263 // while.cond1 is the merge of while.cond.exit and while.cond above.
1264 // for.body is attached with md2, and md1 is dropped.
1265 // If LoopSimplify runs later (as a part of loop pass), it could create
1266 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1267 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1268 //
1269 // entry
1270 // |
1271 // v
1272 // ---> while.cond1 -------------> while.end
1273 // | |
1274 // | v
1275 // | while.body
1276 // | |
1277 // | v
1278 // | for.body <---- (md2)
1279 // |_______| |______|
1280 if (Instruction *TI = BB->getTerminator())
1281 if (TI->hasNonDebugLocLoopMetadata())
1282 for (BasicBlock *Pred : predecessors(BB))
1283 if (Instruction *PredTI = Pred->getTerminator())
1284 if (PredTI->hasNonDebugLocLoopMetadata())
1285 return false;
1286
1287 if (BBKillable)
1288 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1289 else if (BBPhisMergeable)
1290 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1291
1292 SmallVector<DominatorTree::UpdateType, 32> Updates;
1293
1294 if (DTU) {
1295 // To avoid processing the same predecessor more than once.
1296 SmallPtrSet<BasicBlock *, 8> SeenPreds;
1297 // All predecessors of BB (except the common predecessor) will be moved to
1298 // Succ.
1299 Updates.reserve(N: Updates.size() + 2 * pred_size(BB) + 1);
1300 SmallPtrSet<BasicBlock *, 16> SuccPreds(llvm::from_range,
1301 predecessors(BB: Succ));
1302 for (auto *PredOfBB : predecessors(BB)) {
1303 // Do not modify those common predecessors of BB and Succ
1304 if (!SuccPreds.contains(Ptr: PredOfBB))
1305 if (SeenPreds.insert(Ptr: PredOfBB).second)
1306 Updates.push_back(Elt: {DominatorTree::Insert, PredOfBB, Succ});
1307 }
1308
1309 SeenPreds.clear();
1310
1311 for (auto *PredOfBB : predecessors(BB))
1312 // When BB cannot be killed, do not remove the edge between BB and
1313 // CommonPred.
1314 if (SeenPreds.insert(Ptr: PredOfBB).second && PredOfBB != CommonPred)
1315 Updates.push_back(Elt: {DominatorTree::Delete, PredOfBB, BB});
1316
1317 if (BBKillable)
1318 Updates.push_back(Elt: {DominatorTree::Delete, BB, Succ});
1319 }
1320
1321 if (isa<PHINode>(Val: Succ->begin())) {
1322 // If there is more than one pred of succ, and there are PHI nodes in
1323 // the successor, then we need to add incoming edges for the PHI nodes
1324 //
1325 const PredBlockVector BBPreds(predecessors(BB));
1326
1327 // Loop over all of the PHI nodes in the successor of BB.
1328 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) {
1329 PHINode *PN = cast<PHINode>(Val&: I);
1330 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1331 }
1332 }
1333
1334 if (Succ->getSinglePredecessor()) {
1335 // BB is the only predecessor of Succ, so Succ will end up with exactly
1336 // the same predecessors BB had.
1337 // Copy over any phi, debug or lifetime instruction.
1338 BB->getTerminator()->eraseFromParent();
1339 Succ->splice(ToIt: Succ->getFirstNonPHIIt(), FromBB: BB);
1340 } else {
1341 while (PHINode *PN = dyn_cast<PHINode>(Val: &BB->front())) {
1342 // We explicitly check for such uses for merging phis.
1343 assert(PN->use_empty() && "There shouldn't be any uses here!");
1344 PN->eraseFromParent();
1345 }
1346 }
1347
1348 // If the unconditional branch we replaced contains non-debug llvm.loop
1349 // metadata, we add the metadata to the branch instructions in the
1350 // predecessors.
1351 if (Instruction *TI = BB->getTerminator())
1352 if (TI->hasNonDebugLocLoopMetadata()) {
1353 MDNode *LoopMD = TI->getMetadata(KindID: LLVMContext::MD_loop);
1354 for (BasicBlock *Pred : predecessors(BB))
1355 Pred->getTerminator()->setMetadata(KindID: LLVMContext::MD_loop, Node: LoopMD);
1356 }
1357
1358 if (BBKillable) {
1359 // Everything that jumped to BB now goes to Succ.
1360 BB->replaceAllUsesWith(V: Succ);
1361
1362 if (!Succ->hasName())
1363 Succ->takeName(V: BB);
1364
1365 // Clear the successor list of BB to match updates applying to DTU later.
1366 if (BB->getTerminator())
1367 BB->back().eraseFromParent();
1368
1369 new UnreachableInst(BB->getContext(), BB);
1370 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1371 "applying corresponding DTU updates.");
1372 } else if (BBPhisMergeable) {
1373 // Everything except CommonPred that jumped to BB now goes to Succ.
1374 BB->replaceUsesWithIf(New: Succ, ShouldReplace: [BBPreds, CommonPred](Use &U) -> bool {
1375 if (Instruction *UseInst = dyn_cast<Instruction>(Val: U.getUser()))
1376 return UseInst->getParent() != CommonPred &&
1377 BBPreds.contains(Ptr: UseInst->getParent());
1378 return false;
1379 });
1380 }
1381
1382 if (DTU)
1383 DTU->applyUpdates(Updates);
1384
1385 if (BBKillable)
1386 DeleteDeadBlock(BB, DTU);
1387
1388 return true;
1389}
1390
1391static bool
1392EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB,
1393 SmallPtrSetImpl<PHINode *> &ToRemove) {
1394 // This implementation doesn't currently consider undef operands
1395 // specially. Theoretically, two phis which are identical except for
1396 // one having an undef where the other doesn't could be collapsed.
1397
1398 bool Changed = false;
1399
1400 // Examine each PHI.
1401 // Note that increment of I must *NOT* be in the iteration_expression, since
1402 // we don't want to immediately advance when we restart from the beginning.
1403 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val&: I);) {
1404 ++I;
1405 // Is there an identical PHI node in this basic block?
1406 // Note that we only look in the upper square's triangle,
1407 // we already checked that the lower triangle PHI's aren't identical.
1408 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(Val&: J); ++J) {
1409 if (ToRemove.contains(Ptr: DuplicatePN))
1410 continue;
1411 if (!DuplicatePN->isIdenticalToWhenDefined(I: PN))
1412 continue;
1413 // A duplicate. Replace this PHI with the base PHI.
1414 ++NumPHICSEs;
1415 DuplicatePN->replaceAllUsesWith(V: PN);
1416 ToRemove.insert(Ptr: DuplicatePN);
1417 Changed = true;
1418
1419 // The RAUW can change PHIs that we already visited.
1420 I = BB->begin();
1421 break; // Start over from the beginning.
1422 }
1423 }
1424 return Changed;
1425}
1426
1427static bool
1428EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB,
1429 SmallPtrSetImpl<PHINode *> &ToRemove) {
1430 // This implementation doesn't currently consider undef operands
1431 // specially. Theoretically, two phis which are identical except for
1432 // one having an undef where the other doesn't could be collapsed.
1433
1434 struct PHIDenseMapInfo {
1435 static PHINode *getEmptyKey() {
1436 return DenseMapInfo<PHINode *>::getEmptyKey();
1437 }
1438
1439 static PHINode *getTombstoneKey() {
1440 return DenseMapInfo<PHINode *>::getTombstoneKey();
1441 }
1442
1443 static bool isSentinel(PHINode *PN) {
1444 return PN == getEmptyKey() || PN == getTombstoneKey();
1445 }
1446
1447 // WARNING: this logic must be kept in sync with
1448 // Instruction::isIdenticalToWhenDefined()!
1449 static unsigned getHashValueImpl(PHINode *PN) {
1450 // Compute a hash value on the operands. Instcombine will likely have
1451 // sorted them, which helps expose duplicates, but we have to check all
1452 // the operands to be safe in case instcombine hasn't run.
1453 return static_cast<unsigned>(
1454 hash_combine(args: hash_combine_range(R: PN->operand_values()),
1455 args: hash_combine_range(R: PN->blocks())));
1456 }
1457
1458 static unsigned getHashValue(PHINode *PN) {
1459#ifndef NDEBUG
1460 // If -phicse-debug-hash was specified, return a constant -- this
1461 // will force all hashing to collide, so we'll exhaustively search
1462 // the table for a match, and the assertion in isEqual will fire if
1463 // there's a bug causing equal keys to hash differently.
1464 if (PHICSEDebugHash)
1465 return 0;
1466#endif
1467 return getHashValueImpl(PN);
1468 }
1469
1470 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1471 if (isSentinel(PN: LHS) || isSentinel(PN: RHS))
1472 return LHS == RHS;
1473 return LHS->isIdenticalTo(I: RHS);
1474 }
1475
1476 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1477 // These comparisons are nontrivial, so assert that equality implies
1478 // hash equality (DenseMap demands this as an invariant).
1479 bool Result = isEqualImpl(LHS, RHS);
1480 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1481 getHashValueImpl(LHS) == getHashValueImpl(RHS));
1482 return Result;
1483 }
1484 };
1485
1486 // Set of unique PHINodes.
1487 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1488 PHISet.reserve(Size: 4 * PHICSENumPHISmallSize);
1489
1490 // Examine each PHI.
1491 bool Changed = false;
1492 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val: I++);) {
1493 if (ToRemove.contains(Ptr: PN))
1494 continue;
1495 auto Inserted = PHISet.insert(V: PN);
1496 if (!Inserted.second) {
1497 // A duplicate. Replace this PHI with its duplicate.
1498 ++NumPHICSEs;
1499 PN->replaceAllUsesWith(V: *Inserted.first);
1500 ToRemove.insert(Ptr: PN);
1501 Changed = true;
1502
1503 // The RAUW can change PHIs that we already visited. Start over from the
1504 // beginning.
1505 PHISet.clear();
1506 I = BB->begin();
1507 }
1508 }
1509
1510 return Changed;
1511}
1512
1513bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB,
1514 SmallPtrSetImpl<PHINode *> &ToRemove) {
1515 if (
1516#ifndef NDEBUG
1517 !PHICSEDebugHash &&
1518#endif
1519 hasNItemsOrLess(C: BB->phis(), N: PHICSENumPHISmallSize))
1520 return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove);
1521 return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove);
1522}
1523
1524bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1525 SmallPtrSet<PHINode *, 8> ToRemove;
1526 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1527 for (PHINode *PN : ToRemove)
1528 PN->eraseFromParent();
1529 return Changed;
1530}
1531
1532Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign,
1533 const DataLayout &DL) {
1534 V = V->stripPointerCasts();
1535
1536 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: V)) {
1537 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1538 // than the current alignment, as the known bits calculation should have
1539 // already taken it into account. However, this is not always the case,
1540 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1541 // doesn't.
1542 Align CurrentAlign = AI->getAlign();
1543 if (PrefAlign <= CurrentAlign)
1544 return CurrentAlign;
1545
1546 // If the preferred alignment is greater than the natural stack alignment
1547 // then don't round up. This avoids dynamic stack realignment.
1548 MaybeAlign StackAlign = DL.getStackAlignment();
1549 if (StackAlign && PrefAlign > *StackAlign)
1550 return CurrentAlign;
1551 AI->setAlignment(PrefAlign);
1552 return PrefAlign;
1553 }
1554
1555 if (auto *GV = dyn_cast<GlobalVariable>(Val: V)) {
1556 // TODO: as above, this shouldn't be necessary.
1557 Align CurrentAlign = GV->getPointerAlignment(DL);
1558 if (PrefAlign <= CurrentAlign)
1559 return CurrentAlign;
1560
1561 // If there is a large requested alignment and we can, bump up the alignment
1562 // of the global. If the memory we set aside for the global may not be the
1563 // memory used by the final program then it is impossible for us to reliably
1564 // enforce the preferred alignment.
1565 if (!GV->canIncreaseAlignment())
1566 return CurrentAlign;
1567
1568 if (GV->isThreadLocal()) {
1569 unsigned MaxTLSAlign = GV->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1570 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1571 PrefAlign = Align(MaxTLSAlign);
1572 }
1573
1574 GV->setAlignment(PrefAlign);
1575 return PrefAlign;
1576 }
1577
1578 return Align(1);
1579}
1580
1581Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1582 const DataLayout &DL,
1583 const Instruction *CxtI,
1584 AssumptionCache *AC,
1585 const DominatorTree *DT) {
1586 assert(V->getType()->isPointerTy() &&
1587 "getOrEnforceKnownAlignment expects a pointer!");
1588
1589 KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT);
1590 unsigned TrailZ = Known.countMinTrailingZeros();
1591
1592 // Avoid trouble with ridiculously large TrailZ values, such as
1593 // those computed from a null pointer.
1594 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1595 TrailZ = std::min(a: TrailZ, b: +Value::MaxAlignmentExponent);
1596
1597 Align Alignment = Align(1ull << std::min(a: Known.getBitWidth() - 1, b: TrailZ));
1598
1599 if (PrefAlign && *PrefAlign > Alignment)
1600 Alignment = std::max(a: Alignment, b: tryEnforceAlignment(V, PrefAlign: *PrefAlign, DL));
1601
1602 // We don't need to make any adjustment.
1603 return Alignment;
1604}
1605
1606///===---------------------------------------------------------------------===//
1607/// Dbg Intrinsic utilities
1608///
1609
1610/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1611static bool PhiHasDebugValue(DILocalVariable *DIVar,
1612 DIExpression *DIExpr,
1613 PHINode *APN) {
1614 // Since we can't guarantee that the original dbg.declare intrinsic
1615 // is removed by LowerDbgDeclare(), we need to make sure that we are
1616 // not inserting the same dbg.value intrinsic over and over.
1617 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1618 findDbgValues(V: APN, DbgVariableRecords);
1619 for (DbgVariableRecord *DVR : DbgVariableRecords) {
1620 assert(is_contained(DVR->location_ops(), APN));
1621 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1622 return true;
1623 }
1624 return false;
1625}
1626
1627/// Check if the alloc size of \p ValTy is large enough to cover the variable
1628/// (or fragment of the variable) described by \p DII.
1629///
1630/// This is primarily intended as a helper for the different
1631/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1632/// describes an alloca'd variable, so we need to use the alloc size of the
1633/// value when doing the comparison. E.g. an i1 value will be identified as
1634/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1635static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR) {
1636 const DataLayout &DL = DVR->getModule()->getDataLayout();
1637 TypeSize ValueSize = DL.getTypeAllocSizeInBits(Ty: ValTy);
1638 if (std::optional<uint64_t> FragmentSize =
1639 DVR->getExpression()->getActiveBits(Var: DVR->getVariable()))
1640 return TypeSize::isKnownGE(LHS: ValueSize, RHS: TypeSize::getFixed(ExactSize: *FragmentSize));
1641
1642 // We can't always calculate the size of the DI variable (e.g. if it is a
1643 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1644 // instead.
1645 if (DVR->isAddressOfVariable()) {
1646 // DVR should have exactly 1 location when it is an address.
1647 assert(DVR->getNumVariableLocationOps() == 1 &&
1648 "address of variable must have exactly 1 location operand.");
1649 if (auto *AI =
1650 dyn_cast_or_null<AllocaInst>(Val: DVR->getVariableLocationOp(OpIdx: 0))) {
1651 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1652 return TypeSize::isKnownGE(LHS: ValueSize, RHS: *FragmentSize);
1653 }
1654 }
1655 }
1656 // Could not determine size of variable. Conservatively return false.
1657 return false;
1658}
1659
1660static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV,
1661 DILocalVariable *DIVar,
1662 DIExpression *DIExpr,
1663 const DebugLoc &NewLoc,
1664 BasicBlock::iterator Instr) {
1665 ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV);
1666 DbgVariableRecord *DVRec =
1667 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1668 Instr->getParent()->insertDbgRecordBefore(DR: DVRec, Here: Instr);
1669}
1670
1671static DIExpression *dropInitialDeref(const DIExpression *DIExpr) {
1672 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1;
1673 return DIExpression::get(Context&: DIExpr->getContext(),
1674 Elements: DIExpr->getElements().drop_front(N: NumEltDropped));
1675}
1676
1677void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR,
1678 StoreInst *SI, DIBuilder &Builder) {
1679 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1680 auto *DIVar = DVR->getVariable();
1681 assert(DIVar && "Missing variable");
1682 auto *DIExpr = DVR->getExpression();
1683 Value *DV = SI->getValueOperand();
1684
1685 DebugLoc NewLoc = getDebugValueLoc(DVR);
1686
1687 // If the alloca describes the variable itself, i.e. the expression in the
1688 // dbg.declare doesn't start with a dereference, we can perform the
1689 // conversion if the value covers the entire fragment of DII.
1690 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1691 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1692 // We conservatively ignore other dereferences, because the following two are
1693 // not equivalent:
1694 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1695 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1696 // The former is adding 2 to the address of the variable, whereas the latter
1697 // is adding 2 to the value of the variable. As such, we insist on just a
1698 // deref expression.
1699 bool CanConvert =
1700 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1701 valueCoversEntireFragment(ValTy: DV->getType(), DVR));
1702 if (CanConvert) {
1703 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1704 Instr: SI->getIterator());
1705 return;
1706 }
1707
1708 // FIXME: If storing to a part of the variable described by the dbg.declare,
1709 // then we want to insert a dbg.value for the corresponding fragment.
1710 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1711 << '\n');
1712
1713 // For now, when there is a store to parts of the variable (but we do not
1714 // know which part) we insert an dbg.value intrinsic to indicate that we
1715 // know nothing about the variable's content.
1716 DV = PoisonValue::get(T: DV->getType());
1717 ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV);
1718 DbgVariableRecord *NewDVR =
1719 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1720 SI->getParent()->insertDbgRecordBefore(DR: NewDVR, Here: SI->getIterator());
1721}
1722
1723void llvm::InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI,
1724 DIBuilder &Builder) {
1725 auto *DIVar = DVR->getVariable();
1726 assert(DIVar && "Missing variable");
1727 auto *DIExpr = DVR->getExpression();
1728 DIExpr = dropInitialDeref(DIExpr);
1729 Value *DV = SI->getValueOperand();
1730
1731 DebugLoc NewLoc = getDebugValueLoc(DVR);
1732
1733 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1734 Instr: SI->getIterator());
1735}
1736
1737void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, LoadInst *LI,
1738 DIBuilder &Builder) {
1739 auto *DIVar = DVR->getVariable();
1740 auto *DIExpr = DVR->getExpression();
1741 assert(DIVar && "Missing variable");
1742
1743 if (!valueCoversEntireFragment(ValTy: LI->getType(), DVR)) {
1744 // FIXME: If only referring to a part of the variable described by the
1745 // dbg.declare, then we want to insert a DbgVariableRecord for the
1746 // corresponding fragment.
1747 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1748 << *DVR << '\n');
1749 return;
1750 }
1751
1752 DebugLoc NewLoc = getDebugValueLoc(DVR);
1753
1754 // We are now tracking the loaded value instead of the address. In the
1755 // future if multi-location support is added to the IR, it might be
1756 // preferable to keep tracking both the loaded value and the original
1757 // address in case the alloca can not be elided.
1758
1759 // Create a DbgVariableRecord directly and insert.
1760 ValueAsMetadata *LIVAM = ValueAsMetadata::get(V: LI);
1761 DbgVariableRecord *DV =
1762 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1763 LI->getParent()->insertDbgRecordAfter(DR: DV, I: LI);
1764}
1765
1766/// Determine whether this alloca is either a VLA or an array.
1767static bool isArray(AllocaInst *AI) {
1768 return AI->isArrayAllocation() ||
1769 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1770}
1771
1772/// Determine whether this alloca is a structure.
1773static bool isStructure(AllocaInst *AI) {
1774 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1775}
1776void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, PHINode *APN,
1777 DIBuilder &Builder) {
1778 auto *DIVar = DVR->getVariable();
1779 auto *DIExpr = DVR->getExpression();
1780 assert(DIVar && "Missing variable");
1781
1782 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1783 return;
1784
1785 if (!valueCoversEntireFragment(ValTy: APN->getType(), DVR)) {
1786 // FIXME: If only referring to a part of the variable described by the
1787 // dbg.declare, then we want to insert a DbgVariableRecord for the
1788 // corresponding fragment.
1789 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1790 << *DVR << '\n');
1791 return;
1792 }
1793
1794 BasicBlock *BB = APN->getParent();
1795 auto InsertionPt = BB->getFirstInsertionPt();
1796
1797 DebugLoc NewLoc = getDebugValueLoc(DVR);
1798
1799 // The block may be a catchswitch block, which does not have a valid
1800 // insertion point.
1801 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1802 if (InsertionPt != BB->end()) {
1803 insertDbgValueOrDbgVariableRecord(Builder, DV: APN, DIVar, DIExpr, NewLoc,
1804 Instr: InsertionPt);
1805 }
1806}
1807
1808/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1809/// of llvm.dbg.value intrinsics.
1810bool llvm::LowerDbgDeclare(Function &F) {
1811 bool Changed = false;
1812 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1813 SmallVector<DbgDeclareInst *, 4> Dbgs;
1814 SmallVector<DbgVariableRecord *> DVRs;
1815 for (auto &FI : F) {
1816 for (Instruction &BI : FI) {
1817 if (auto *DDI = dyn_cast<DbgDeclareInst>(Val: &BI))
1818 Dbgs.push_back(Elt: DDI);
1819 for (DbgVariableRecord &DVR : filterDbgVars(R: BI.getDbgRecordRange())) {
1820 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1821 DVRs.push_back(Elt: &DVR);
1822 }
1823 }
1824 }
1825
1826 if (Dbgs.empty() && DVRs.empty())
1827 return Changed;
1828
1829 auto LowerOne = [&](DbgVariableRecord *DDI) {
1830 AllocaInst *AI =
1831 dyn_cast_or_null<AllocaInst>(Val: DDI->getVariableLocationOp(OpIdx: 0));
1832 // If this is an alloca for a scalar variable, insert a dbg.value
1833 // at each load and store to the alloca and erase the dbg.declare.
1834 // The dbg.values allow tracking a variable even if it is not
1835 // stored on the stack, while the dbg.declare can only describe
1836 // the stack slot (and at a lexical-scope granularity). Later
1837 // passes will attempt to elide the stack slot.
1838 if (!AI || isArray(AI) || isStructure(AI))
1839 return;
1840
1841 // A volatile load/store means that the alloca can't be elided anyway.
1842 if (llvm::any_of(Range: AI->users(), P: [](User *U) -> bool {
1843 if (LoadInst *LI = dyn_cast<LoadInst>(Val: U))
1844 return LI->isVolatile();
1845 if (StoreInst *SI = dyn_cast<StoreInst>(Val: U))
1846 return SI->isVolatile();
1847 return false;
1848 }))
1849 return;
1850
1851 SmallVector<const Value *, 8> WorkList;
1852 WorkList.push_back(Elt: AI);
1853 while (!WorkList.empty()) {
1854 const Value *V = WorkList.pop_back_val();
1855 for (const auto &AIUse : V->uses()) {
1856 User *U = AIUse.getUser();
1857 if (StoreInst *SI = dyn_cast<StoreInst>(Val: U)) {
1858 if (AIUse.getOperandNo() == 1)
1859 ConvertDebugDeclareToDebugValue(DVR: DDI, SI, Builder&: DIB);
1860 } else if (LoadInst *LI = dyn_cast<LoadInst>(Val: U)) {
1861 ConvertDebugDeclareToDebugValue(DVR: DDI, LI, Builder&: DIB);
1862 } else if (CallInst *CI = dyn_cast<CallInst>(Val: U)) {
1863 // This is a call by-value or some other instruction that takes a
1864 // pointer to the variable. Insert a *value* intrinsic that describes
1865 // the variable by dereferencing the alloca.
1866 if (!CI->isLifetimeStartOrEnd()) {
1867 DebugLoc NewLoc = getDebugValueLoc(DVR: DDI);
1868 auto *DerefExpr =
1869 DIExpression::append(Expr: DDI->getExpression(), Ops: dwarf::DW_OP_deref);
1870 insertDbgValueOrDbgVariableRecord(Builder&: DIB, DV: AI, DIVar: DDI->getVariable(),
1871 DIExpr: DerefExpr, NewLoc,
1872 Instr: CI->getIterator());
1873 }
1874 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(Val: U)) {
1875 if (BI->getType()->isPointerTy())
1876 WorkList.push_back(Elt: BI);
1877 }
1878 }
1879 }
1880 DDI->eraseFromParent();
1881 Changed = true;
1882 };
1883
1884 for_each(Range&: DVRs, F: LowerOne);
1885
1886 if (Changed)
1887 for (BasicBlock &BB : F)
1888 RemoveRedundantDbgInstrs(BB: &BB);
1889
1890 return Changed;
1891}
1892
1893/// Propagate dbg.value records through the newly inserted PHIs.
1894void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1895 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1896 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
1897 if (InsertedPHIs.size() == 0)
1898 return;
1899
1900 // Map existing PHI nodes to their DbgVariableRecords.
1901 DenseMap<Value *, DbgVariableRecord *> DbgValueMap;
1902 for (auto &I : *BB) {
1903 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) {
1904 for (Value *V : DVR.location_ops())
1905 if (auto *Loc = dyn_cast_or_null<PHINode>(Val: V))
1906 DbgValueMap.insert(KV: {Loc, &DVR});
1907 }
1908 }
1909 if (DbgValueMap.size() == 0)
1910 return;
1911
1912 // Map a pair of the destination BB and old DbgVariableRecord to the new
1913 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
1914 // more than one of the inserted PHIs in the same destination BB, we can
1915 // update the same DbgVariableRecord with all the new PHIs instead of creating
1916 // one copy for each.
1917 MapVector<std::pair<BasicBlock *, DbgVariableRecord *>, DbgVariableRecord *>
1918 NewDbgValueMap;
1919 // Then iterate through the new PHIs and look to see if they use one of the
1920 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
1921 // propagate the info through the new PHI. If we use more than one new PHI in
1922 // a single destination BB with the same old dbg.value, merge the updates so
1923 // that we get a single new DbgVariableRecord with all the new PHIs.
1924 for (auto PHI : InsertedPHIs) {
1925 BasicBlock *Parent = PHI->getParent();
1926 // Avoid inserting a debug-info record into an EH block.
1927 if (Parent->getFirstNonPHIIt()->isEHPad())
1928 continue;
1929 for (auto VI : PHI->operand_values()) {
1930 auto V = DbgValueMap.find(Val: VI);
1931 if (V != DbgValueMap.end()) {
1932 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(Val: V->second);
1933 auto NewDI = NewDbgValueMap.find(Key: {Parent, DbgII});
1934 if (NewDI == NewDbgValueMap.end()) {
1935 DbgVariableRecord *NewDbgII = DbgII->clone();
1936 NewDI = NewDbgValueMap.insert(KV: {{Parent, DbgII}, NewDbgII}).first;
1937 }
1938 DbgVariableRecord *NewDbgII = NewDI->second;
1939 // If PHI contains VI as an operand more than once, we may
1940 // replaced it in NewDbgII; confirm that it is present.
1941 if (is_contained(Range: NewDbgII->location_ops(), Element: VI))
1942 NewDbgII->replaceVariableLocationOp(OldValue: VI, NewValue: PHI);
1943 }
1944 }
1945 }
1946 // Insert the new DbgVariableRecords into their destination blocks.
1947 for (auto DI : NewDbgValueMap) {
1948 BasicBlock *Parent = DI.first.first;
1949 DbgVariableRecord *NewDbgII = DI.second;
1950 auto InsertionPt = Parent->getFirstInsertionPt();
1951 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1952
1953 Parent->insertDbgRecordBefore(DR: NewDbgII, Here: InsertionPt);
1954 }
1955}
1956
1957bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1958 DIBuilder &Builder, uint8_t DIExprFlags,
1959 int Offset) {
1960 TinyPtrVector<DbgVariableRecord *> DVRDeclares = findDVRDeclares(V: Address);
1961
1962 auto ReplaceOne = [&](DbgVariableRecord *DII) {
1963 assert(DII->getVariable() && "Missing variable");
1964 auto *DIExpr = DII->getExpression();
1965 DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: DIExprFlags, Offset);
1966 DII->setExpression(DIExpr);
1967 DII->replaceVariableLocationOp(OldValue: Address, NewValue: NewAddress);
1968 };
1969
1970 for_each(Range&: DVRDeclares, F: ReplaceOne);
1971
1972 return !DVRDeclares.empty();
1973}
1974
1975static void updateOneDbgValueForAlloca(const DebugLoc &Loc,
1976 DILocalVariable *DIVar,
1977 DIExpression *DIExpr, Value *NewAddress,
1978 DbgVariableRecord *DVR,
1979 DIBuilder &Builder, int Offset) {
1980 assert(DIVar && "Missing variable");
1981
1982 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
1983 // should do with the alloca pointer is dereference it. Otherwise we don't
1984 // know how to handle it and give up.
1985 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1986 DIExpr->getElement(I: 0) != dwarf::DW_OP_deref)
1987 return;
1988
1989 // Insert the offset before the first deref.
1990 if (Offset)
1991 DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: 0, Offset);
1992
1993 DVR->setExpression(DIExpr);
1994 DVR->replaceVariableLocationOp(OpIdx: 0u, NewValue: NewAddress);
1995}
1996
1997void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1998 DIBuilder &Builder, int Offset) {
1999 SmallVector<DbgVariableRecord *, 1> DPUsers;
2000 findDbgValues(V: AI, DbgVariableRecords&: DPUsers);
2001
2002 // Replace any DbgVariableRecords that use this alloca.
2003 for (DbgVariableRecord *DVR : DPUsers)
2004 updateOneDbgValueForAlloca(Loc: DVR->getDebugLoc(), DIVar: DVR->getVariable(),
2005 DIExpr: DVR->getExpression(), NewAddress: NewAllocaAddress, DVR,
2006 Builder, Offset);
2007}
2008
2009/// Where possible to salvage debug information for \p I do so.
2010/// If not possible mark undef.
2011void llvm::salvageDebugInfo(Instruction &I) {
2012 SmallVector<DbgVariableRecord *, 1> DPUsers;
2013 findDbgUsers(V: &I, DbgVariableRecords&: DPUsers);
2014 salvageDebugInfoForDbgValues(I, DPInsns: DPUsers);
2015}
2016
2017template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2018 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2019 // Only instructions can be salvaged at the moment.
2020 if (!I)
2021 return;
2022
2023 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2024 "address-expression shouldn't have fragment info");
2025
2026 // The address component of a dbg.assign cannot be variadic.
2027 uint64_t CurrentLocOps = 0;
2028 SmallVector<Value *, 4> AdditionalValues;
2029 SmallVector<uint64_t, 16> Ops;
2030 Value *NewV = salvageDebugInfoImpl(I&: *I, CurrentLocOps, Ops, AdditionalValues);
2031
2032 // Check if the salvage failed.
2033 if (!NewV)
2034 return;
2035
2036 DIExpression *SalvagedExpr = DIExpression::appendOpsToArg(
2037 Expr: Assign->getAddressExpression(), Ops, ArgNo: 0, /*StackValue=*/false);
2038 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2039 "address-expression shouldn't have fragment info");
2040
2041 SalvagedExpr = SalvagedExpr->foldConstantMath();
2042
2043 // Salvage succeeds if no additional values are required.
2044 if (AdditionalValues.empty()) {
2045 Assign->setAddress(NewV);
2046 Assign->setAddressExpression(SalvagedExpr);
2047 } else {
2048 Assign->setKillAddress();
2049 }
2050}
2051
2052void llvm::salvageDebugInfoForDbgValues(Instruction &I,
2053 ArrayRef<DbgVariableRecord *> DPUsers) {
2054 // These are arbitrary chosen limits on the maximum number of values and the
2055 // maximum size of a debug expression we can salvage up to, used for
2056 // performance reasons.
2057 const unsigned MaxDebugArgs = 16;
2058 const unsigned MaxExpressionSize = 128;
2059 bool Salvaged = false;
2060
2061 for (auto *DVR : DPUsers) {
2062 if (DVR->isDbgAssign()) {
2063 if (DVR->getAddress() == &I) {
2064 salvageDbgAssignAddress(Assign: DVR);
2065 Salvaged = true;
2066 }
2067 if (DVR->getValue() != &I)
2068 continue;
2069 }
2070
2071 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2072 // are implicitly pointing out the value as a DWARF memory location
2073 // description.
2074 bool StackValue =
2075 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2076 auto DVRLocation = DVR->location_ops();
2077 assert(
2078 is_contained(DVRLocation, &I) &&
2079 "DbgVariableIntrinsic must use salvaged instruction as its location");
2080 SmallVector<Value *, 4> AdditionalValues;
2081 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2082 // must be updated in the DIExpression and potentially have additional
2083 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2084 // DVRLocation.
2085 Value *Op0 = nullptr;
2086 DIExpression *SalvagedExpr = DVR->getExpression();
2087 auto LocItr = find(Range&: DVRLocation, Val: &I);
2088 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2089 SmallVector<uint64_t, 16> Ops;
2090 unsigned LocNo = std::distance(first: DVRLocation.begin(), last: LocItr);
2091 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2092 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2093 if (!Op0)
2094 break;
2095 SalvagedExpr =
2096 DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: LocNo, StackValue);
2097 LocItr = std::find(first: ++LocItr, last: DVRLocation.end(), val: &I);
2098 }
2099 // salvageDebugInfoImpl should fail on examining the first element of
2100 // DbgUsers, or none of them.
2101 if (!Op0)
2102 break;
2103
2104 SalvagedExpr = SalvagedExpr->foldConstantMath();
2105 DVR->replaceVariableLocationOp(OldValue: &I, NewValue: Op0);
2106 bool IsValidSalvageExpr =
2107 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2108 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2109 DVR->setExpression(SalvagedExpr);
2110 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2111 IsValidSalvageExpr &&
2112 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2113 MaxDebugArgs) {
2114 DVR->addVariableLocationOps(NewValues: AdditionalValues, NewExpr: SalvagedExpr);
2115 } else {
2116 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2117 // currently only valid for stack value expressions.
2118 // Also do not salvage if the resulting DIArgList would contain an
2119 // unreasonably large number of values.
2120 DVR->setKillLocation();
2121 }
2122 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2123 Salvaged = true;
2124 }
2125
2126 if (Salvaged)
2127 return;
2128
2129 for (auto *DVR : DPUsers)
2130 DVR->setKillLocation();
2131}
2132
2133Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
2134 uint64_t CurrentLocOps,
2135 SmallVectorImpl<uint64_t> &Opcodes,
2136 SmallVectorImpl<Value *> &AdditionalValues) {
2137 unsigned BitWidth = DL.getIndexSizeInBits(AS: GEP->getPointerAddressSpace());
2138 // Rewrite a GEP into a DIExpression.
2139 SmallMapVector<Value *, APInt, 4> VariableOffsets;
2140 APInt ConstantOffset(BitWidth, 0);
2141 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2142 return nullptr;
2143 if (!VariableOffsets.empty() && !CurrentLocOps) {
2144 Opcodes.insert(I: Opcodes.begin(), IL: {dwarf::DW_OP_LLVM_arg, 0});
2145 CurrentLocOps = 1;
2146 }
2147 for (const auto &Offset : VariableOffsets) {
2148 AdditionalValues.push_back(Elt: Offset.first);
2149 assert(Offset.second.isStrictlyPositive() &&
2150 "Expected strictly positive multiplier for offset.");
2151 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2152 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2153 dwarf::DW_OP_plus});
2154 }
2155 DIExpression::appendOffset(Ops&: Opcodes, Offset: ConstantOffset.getSExtValue());
2156 return GEP->getOperand(i_nocapture: 0);
2157}
2158
2159uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
2160 switch (Opcode) {
2161 case Instruction::Add:
2162 return dwarf::DW_OP_plus;
2163 case Instruction::Sub:
2164 return dwarf::DW_OP_minus;
2165 case Instruction::Mul:
2166 return dwarf::DW_OP_mul;
2167 case Instruction::SDiv:
2168 return dwarf::DW_OP_div;
2169 case Instruction::SRem:
2170 return dwarf::DW_OP_mod;
2171 case Instruction::Or:
2172 return dwarf::DW_OP_or;
2173 case Instruction::And:
2174 return dwarf::DW_OP_and;
2175 case Instruction::Xor:
2176 return dwarf::DW_OP_xor;
2177 case Instruction::Shl:
2178 return dwarf::DW_OP_shl;
2179 case Instruction::LShr:
2180 return dwarf::DW_OP_shr;
2181 case Instruction::AShr:
2182 return dwarf::DW_OP_shra;
2183 default:
2184 // TODO: Salvage from each kind of binop we know about.
2185 return 0;
2186 }
2187}
2188
2189static void handleSSAValueOperands(uint64_t CurrentLocOps,
2190 SmallVectorImpl<uint64_t> &Opcodes,
2191 SmallVectorImpl<Value *> &AdditionalValues,
2192 Instruction *I) {
2193 if (!CurrentLocOps) {
2194 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, 0});
2195 CurrentLocOps = 1;
2196 }
2197 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2198 AdditionalValues.push_back(Elt: I->getOperand(i: 1));
2199}
2200
2201Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
2202 SmallVectorImpl<uint64_t> &Opcodes,
2203 SmallVectorImpl<Value *> &AdditionalValues) {
2204 // Handle binary operations with constant integer operands as a special case.
2205 auto *ConstInt = dyn_cast<ConstantInt>(Val: BI->getOperand(i_nocapture: 1));
2206 // Values wider than 64 bits cannot be represented within a DIExpression.
2207 if (ConstInt && ConstInt->getBitWidth() > 64)
2208 return nullptr;
2209
2210 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2211 // Push any Constant Int operand onto the expression stack.
2212 if (ConstInt) {
2213 uint64_t Val = ConstInt->getSExtValue();
2214 // Add or Sub Instructions with a constant operand can potentially be
2215 // simplified.
2216 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2217 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2218 DIExpression::appendOffset(Ops&: Opcodes, Offset);
2219 return BI->getOperand(i_nocapture: 0);
2220 }
2221 Opcodes.append(IL: {dwarf::DW_OP_constu, Val});
2222 } else {
2223 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: BI);
2224 }
2225
2226 // Add salvaged binary operator to expression stack, if it has a valid
2227 // representation in a DIExpression.
2228 uint64_t DwarfBinOp = getDwarfOpForBinOp(Opcode: BinOpcode);
2229 if (!DwarfBinOp)
2230 return nullptr;
2231 Opcodes.push_back(Elt: DwarfBinOp);
2232 return BI->getOperand(i_nocapture: 0);
2233}
2234
2235uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) {
2236 // The signedness of the operation is implicit in the typed stack, signed and
2237 // unsigned instructions map to the same DWARF opcode.
2238 switch (Pred) {
2239 case CmpInst::ICMP_EQ:
2240 return dwarf::DW_OP_eq;
2241 case CmpInst::ICMP_NE:
2242 return dwarf::DW_OP_ne;
2243 case CmpInst::ICMP_UGT:
2244 case CmpInst::ICMP_SGT:
2245 return dwarf::DW_OP_gt;
2246 case CmpInst::ICMP_UGE:
2247 case CmpInst::ICMP_SGE:
2248 return dwarf::DW_OP_ge;
2249 case CmpInst::ICMP_ULT:
2250 case CmpInst::ICMP_SLT:
2251 return dwarf::DW_OP_lt;
2252 case CmpInst::ICMP_ULE:
2253 case CmpInst::ICMP_SLE:
2254 return dwarf::DW_OP_le;
2255 default:
2256 return 0;
2257 }
2258}
2259
2260Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps,
2261 SmallVectorImpl<uint64_t> &Opcodes,
2262 SmallVectorImpl<Value *> &AdditionalValues) {
2263 // Handle icmp operations with constant integer operands as a special case.
2264 auto *ConstInt = dyn_cast<ConstantInt>(Val: Icmp->getOperand(i_nocapture: 1));
2265 // Values wider than 64 bits cannot be represented within a DIExpression.
2266 if (ConstInt && ConstInt->getBitWidth() > 64)
2267 return nullptr;
2268 // Push any Constant Int operand onto the expression stack.
2269 if (ConstInt) {
2270 if (Icmp->isSigned())
2271 Opcodes.push_back(Elt: dwarf::DW_OP_consts);
2272 else
2273 Opcodes.push_back(Elt: dwarf::DW_OP_constu);
2274 uint64_t Val = ConstInt->getSExtValue();
2275 Opcodes.push_back(Elt: Val);
2276 } else {
2277 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: Icmp);
2278 }
2279
2280 // Add salvaged binary operator to expression stack, if it has a valid
2281 // representation in a DIExpression.
2282 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Pred: Icmp->getPredicate());
2283 if (!DwarfIcmpOp)
2284 return nullptr;
2285 Opcodes.push_back(Elt: DwarfIcmpOp);
2286 return Icmp->getOperand(i_nocapture: 0);
2287}
2288
2289Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
2290 SmallVectorImpl<uint64_t> &Ops,
2291 SmallVectorImpl<Value *> &AdditionalValues) {
2292 auto &M = *I.getModule();
2293 auto &DL = M.getDataLayout();
2294
2295 if (auto *CI = dyn_cast<CastInst>(Val: &I)) {
2296 Value *FromValue = CI->getOperand(i_nocapture: 0);
2297 // No-op casts are irrelevant for debug info.
2298 if (CI->isNoopCast(DL)) {
2299 return FromValue;
2300 }
2301
2302 Type *Type = CI->getType();
2303 if (Type->isPointerTy())
2304 Type = DL.getIntPtrType(Type);
2305 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2306 if (Type->isVectorTy() ||
2307 !(isa<TruncInst>(Val: &I) || isa<SExtInst>(Val: &I) || isa<ZExtInst>(Val: &I) ||
2308 isa<IntToPtrInst>(Val: &I) || isa<PtrToIntInst>(Val: &I)))
2309 return nullptr;
2310
2311 llvm::Type *FromType = FromValue->getType();
2312 if (FromType->isPointerTy())
2313 FromType = DL.getIntPtrType(FromType);
2314
2315 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2316 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2317
2318 auto ExtOps = DIExpression::getExtOps(FromSize: FromTypeBitSize, ToSize: ToTypeBitSize,
2319 Signed: isa<SExtInst>(Val: &I));
2320 Ops.append(in_start: ExtOps.begin(), in_end: ExtOps.end());
2321 return FromValue;
2322 }
2323
2324 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: &I))
2325 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2326 if (auto *BI = dyn_cast<BinaryOperator>(Val: &I))
2327 return getSalvageOpsForBinOp(BI, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2328 if (auto *IC = dyn_cast<ICmpInst>(Val: &I))
2329 return getSalvageOpsForIcmpOp(Icmp: IC, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2330
2331 // *Not* to do: we should not attempt to salvage load instructions,
2332 // because the validity and lifetime of a dbg.value containing
2333 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2334 return nullptr;
2335}
2336
2337/// A replacement for a dbg.value expression.
2338using DbgValReplacement = std::optional<DIExpression *>;
2339
2340/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2341/// possibly moving/undefing users to prevent use-before-def. Returns true if
2342/// changes are made.
2343static bool rewriteDebugUsers(
2344 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2345 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2346 // Find debug users of From.
2347 SmallVector<DbgVariableRecord *, 1> DPUsers;
2348 findDbgUsers(V: &From, DbgVariableRecords&: DPUsers);
2349 if (DPUsers.empty())
2350 return false;
2351
2352 // Prevent use-before-def of To.
2353 bool Changed = false;
2354
2355 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2356 if (isa<Instruction>(Val: &To)) {
2357 bool DomPointAfterFrom = From.getNextNode() == &DomPoint;
2358
2359 // DbgVariableRecord implementation of the above.
2360 for (auto *DVR : DPUsers) {
2361 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2362 Instruction *NextNonDebug = MarkedInstr;
2363
2364 // It's common to see a debug user between From and DomPoint. Move it
2365 // after DomPoint to preserve the variable update without any reordering.
2366 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2367 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2368 DVR->removeFromParent();
2369 DomPoint.getParent()->insertDbgRecordAfter(DR: DVR, I: &DomPoint);
2370 Changed = true;
2371
2372 // Users which otherwise aren't dominated by the replacement value must
2373 // be salvaged or deleted.
2374 } else if (!DT.dominates(Def: &DomPoint, User: MarkedInstr)) {
2375 UndefOrSalvageDVR.insert(Ptr: DVR);
2376 }
2377 }
2378 }
2379
2380 // Update debug users without use-before-def risk.
2381 for (auto *DVR : DPUsers) {
2382 if (UndefOrSalvageDVR.count(Ptr: DVR))
2383 continue;
2384
2385 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2386 if (!DVRepl)
2387 continue;
2388
2389 DVR->replaceVariableLocationOp(OldValue: &From, NewValue: &To);
2390 DVR->setExpression(*DVRepl);
2391 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2392 Changed = true;
2393 }
2394
2395 if (!UndefOrSalvageDVR.empty()) {
2396 // Try to salvage the remaining debug users.
2397 salvageDebugInfo(I&: From);
2398 Changed = true;
2399 }
2400
2401 return Changed;
2402}
2403
2404/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2405/// losslessly preserve the bits and semantics of the value. This predicate is
2406/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2407///
2408/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2409/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2410/// and also does not allow lossless pointer <-> integer conversions.
2411static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2412 Type *ToTy) {
2413 // Trivially compatible types.
2414 if (FromTy == ToTy)
2415 return true;
2416
2417 // Handle compatible pointer <-> integer conversions.
2418 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2419 bool SameSize = DL.getTypeSizeInBits(Ty: FromTy) == DL.getTypeSizeInBits(Ty: ToTy);
2420 bool LosslessConversion = !DL.isNonIntegralPointerType(Ty: FromTy) &&
2421 !DL.isNonIntegralPointerType(Ty: ToTy);
2422 return SameSize && LosslessConversion;
2423 }
2424
2425 // TODO: This is not exhaustive.
2426 return false;
2427}
2428
2429bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2430 Instruction &DomPoint, DominatorTree &DT) {
2431 // Exit early if From has no debug users.
2432 if (!From.isUsedByMetadata())
2433 return false;
2434
2435 assert(&From != &To && "Can't replace something with itself");
2436
2437 Type *FromTy = From.getType();
2438 Type *ToTy = To.getType();
2439
2440 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2441 return DVR.getExpression();
2442 };
2443
2444 // Handle no-op conversions.
2445 Module &M = *From.getModule();
2446 const DataLayout &DL = M.getDataLayout();
2447 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2448 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteDVRExpr: IdentityDVR);
2449
2450 // Handle integer-to-integer widening and narrowing.
2451 // FIXME: Use DW_OP_convert when it's available everywhere.
2452 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2453 uint64_t FromBits = FromTy->getIntegerBitWidth();
2454 uint64_t ToBits = ToTy->getIntegerBitWidth();
2455 assert(FromBits != ToBits && "Unexpected no-op conversion");
2456
2457 // When the width of the result grows, assume that a debugger will only
2458 // access the low `FromBits` bits when inspecting the source variable.
2459 if (FromBits < ToBits)
2460 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteDVRExpr: IdentityDVR);
2461
2462 // The width of the result has shrunk. Use sign/zero extension to describe
2463 // the source variable's high bits.
2464 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2465 DILocalVariable *Var = DVR.getVariable();
2466
2467 // Without knowing signedness, sign/zero extension isn't possible.
2468 auto Signedness = Var->getSignedness();
2469 if (!Signedness)
2470 return std::nullopt;
2471
2472 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2473 return DIExpression::appendExt(Expr: DVR.getExpression(), FromSize: ToBits, ToSize: FromBits,
2474 Signed);
2475 };
2476 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteDVRExpr: SignOrZeroExtDVR);
2477 }
2478
2479 // TODO: Floating-point conversions, vectors.
2480 return false;
2481}
2482
2483bool llvm::handleUnreachableTerminator(
2484 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2485 bool Changed = false;
2486 // RemoveDIs: erase debug-info on this instruction manually.
2487 I->dropDbgRecords();
2488 for (Use &U : I->operands()) {
2489 Value *Op = U.get();
2490 if (isa<Instruction>(Val: Op) && !Op->getType()->isTokenTy()) {
2491 U.set(PoisonValue::get(T: Op->getType()));
2492 PoisonedValues.push_back(Elt: Op);
2493 Changed = true;
2494 }
2495 }
2496
2497 return Changed;
2498}
2499
2500unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2501 unsigned NumDeadInst = 0;
2502 // Delete the instructions backwards, as it has a reduced likelihood of
2503 // having to update as many def-use and use-def chains.
2504 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2505 SmallVector<Value *> Uses;
2506 handleUnreachableTerminator(I: EndInst, PoisonedValues&: Uses);
2507
2508 while (EndInst != &BB->front()) {
2509 // Delete the next to last instruction.
2510 Instruction *Inst = &*--EndInst->getIterator();
2511 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2512 Inst->replaceAllUsesWith(V: PoisonValue::get(T: Inst->getType()));
2513 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2514 // EHPads can't have DbgVariableRecords attached to them, but it might be
2515 // possible for things with token type.
2516 Inst->dropDbgRecords();
2517 EndInst = Inst;
2518 continue;
2519 }
2520 ++NumDeadInst;
2521 // RemoveDIs: erasing debug-info must be done manually.
2522 Inst->dropDbgRecords();
2523 Inst->eraseFromParent();
2524 }
2525 return NumDeadInst;
2526}
2527
2528unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2529 DomTreeUpdater *DTU,
2530 MemorySSAUpdater *MSSAU) {
2531 BasicBlock *BB = I->getParent();
2532
2533 if (MSSAU)
2534 MSSAU->changeToUnreachable(I);
2535
2536 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
2537
2538 // Loop over all of the successors, removing BB's entry from any PHI
2539 // nodes.
2540 for (BasicBlock *Successor : successors(BB)) {
2541 Successor->removePredecessor(Pred: BB, KeepOneInputPHIs: PreserveLCSSA);
2542 if (DTU)
2543 UniqueSuccessors.insert(Ptr: Successor);
2544 }
2545 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2546 UI->setDebugLoc(I->getDebugLoc());
2547
2548 // All instructions after this are dead.
2549 unsigned NumInstrsRemoved = 0;
2550 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2551 while (BBI != BBE) {
2552 if (!BBI->use_empty())
2553 BBI->replaceAllUsesWith(V: PoisonValue::get(T: BBI->getType()));
2554 BBI++->eraseFromParent();
2555 ++NumInstrsRemoved;
2556 }
2557 if (DTU) {
2558 SmallVector<DominatorTree::UpdateType, 8> Updates;
2559 Updates.reserve(N: UniqueSuccessors.size());
2560 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2561 Updates.push_back(Elt: {DominatorTree::Delete, BB, UniqueSuccessor});
2562 DTU->applyUpdates(Updates);
2563 }
2564 BB->flushTerminatorDbgRecords();
2565 return NumInstrsRemoved;
2566}
2567
2568CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2569 SmallVector<Value *, 8> Args(II->args());
2570 SmallVector<OperandBundleDef, 1> OpBundles;
2571 II->getOperandBundlesAsDefs(Defs&: OpBundles);
2572 CallInst *NewCall = CallInst::Create(Ty: II->getFunctionType(),
2573 Func: II->getCalledOperand(), Args, Bundles: OpBundles);
2574 NewCall->setCallingConv(II->getCallingConv());
2575 NewCall->setAttributes(II->getAttributes());
2576 NewCall->setDebugLoc(II->getDebugLoc());
2577 NewCall->copyMetadata(SrcInst: *II);
2578
2579 // If the invoke had profile metadata, try converting them for CallInst.
2580 uint64_t TotalWeight;
2581 if (NewCall->extractProfTotalWeight(TotalVal&: TotalWeight)) {
2582 // Set the total weight if it fits into i32, otherwise reset.
2583 MDBuilder MDB(NewCall->getContext());
2584 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2585 ? nullptr
2586 : MDB.createBranchWeights(Weights: {uint32_t(TotalWeight)});
2587 NewCall->setMetadata(KindID: LLVMContext::MD_prof, Node: NewWeights);
2588 }
2589
2590 return NewCall;
2591}
2592
2593// changeToCall - Convert the specified invoke into a normal call.
2594CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2595 CallInst *NewCall = createCallMatchingInvoke(II);
2596 NewCall->takeName(V: II);
2597 NewCall->insertBefore(InsertPos: II->getIterator());
2598 II->replaceAllUsesWith(V: NewCall);
2599
2600 // Follow the call by a branch to the normal destination.
2601 BasicBlock *NormalDestBB = II->getNormalDest();
2602 auto *BI = UncondBrInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator());
2603 // Although it takes place after the call itself, the new branch is still
2604 // performing part of the control-flow functionality of the invoke, so we use
2605 // II's DebugLoc.
2606 BI->setDebugLoc(II->getDebugLoc());
2607
2608 // Update PHI nodes in the unwind destination
2609 BasicBlock *BB = II->getParent();
2610 BasicBlock *UnwindDestBB = II->getUnwindDest();
2611 UnwindDestBB->removePredecessor(Pred: BB);
2612 II->eraseFromParent();
2613 if (DTU)
2614 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}});
2615 return NewCall;
2616}
2617
2618BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2619 BasicBlock *UnwindEdge,
2620 DomTreeUpdater *DTU) {
2621 BasicBlock *BB = CI->getParent();
2622
2623 // Convert this function call into an invoke instruction. First, split the
2624 // basic block.
2625 BasicBlock *Split = SplitBlock(Old: BB, SplitPt: CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2626 BBName: CI->getName() + ".noexc");
2627
2628 // Delete the unconditional branch inserted by SplitBlock
2629 BB->back().eraseFromParent();
2630
2631 // Create the new invoke instruction.
2632 SmallVector<Value *, 8> InvokeArgs(CI->args());
2633 SmallVector<OperandBundleDef, 1> OpBundles;
2634
2635 CI->getOperandBundlesAsDefs(Defs&: OpBundles);
2636
2637 // Note: we're round tripping operand bundles through memory here, and that
2638 // can potentially be avoided with a cleverer API design that we do not have
2639 // as of this time.
2640
2641 InvokeInst *II =
2642 InvokeInst::Create(Ty: CI->getFunctionType(), Func: CI->getCalledOperand(), IfNormal: Split,
2643 IfException: UnwindEdge, Args: InvokeArgs, Bundles: OpBundles, NameStr: CI->getName(), InsertBefore: BB);
2644 II->setDebugLoc(CI->getDebugLoc());
2645 II->setCallingConv(CI->getCallingConv());
2646 II->setAttributes(CI->getAttributes());
2647 II->setMetadata(KindID: LLVMContext::MD_prof, Node: CI->getMetadata(KindID: LLVMContext::MD_prof));
2648
2649 if (DTU)
2650 DTU->applyUpdates(Updates: {{DominatorTree::Insert, BB, UnwindEdge}});
2651
2652 // Make sure that anything using the call now uses the invoke! This also
2653 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2654 CI->replaceAllUsesWith(V: II);
2655
2656 // Delete the original call
2657 Split->front().eraseFromParent();
2658 return Split;
2659}
2660
2661static bool markAliveBlocks(Function &F, SmallVectorImpl<bool> &Reachable,
2662 DomTreeUpdater *DTU = nullptr) {
2663 SmallVector<BasicBlock*, 128> Worklist;
2664 BasicBlock *BB = &F.front();
2665 Worklist.push_back(Elt: BB);
2666 Reachable[BB->getNumber()] = true;
2667 bool Changed = false;
2668 do {
2669 BB = Worklist.pop_back_val();
2670
2671 // Do a quick scan of the basic block, turning any obviously unreachable
2672 // instructions into LLVM unreachable insts. The instruction combining pass
2673 // canonicalizes unreachable insts into stores to null or undef.
2674 for (Instruction &I : *BB) {
2675 if (auto *CI = dyn_cast<CallInst>(Val: &I)) {
2676 Value *Callee = CI->getCalledOperand();
2677 // Handle intrinsic calls.
2678 if (Function *F = dyn_cast<Function>(Val: Callee)) {
2679 auto IntrinsicID = F->getIntrinsicID();
2680 // Assumptions that are known to be false are equivalent to
2681 // unreachable. Also, if the condition is undefined, then we make the
2682 // choice most beneficial to the optimizer, and choose that to also be
2683 // unreachable.
2684 if (IntrinsicID == Intrinsic::assume) {
2685 if (match(V: CI->getArgOperand(i: 0), P: m_CombineOr(L: m_Zero(), R: m_Undef()))) {
2686 // Don't insert a call to llvm.trap right before the unreachable.
2687 changeToUnreachable(I: CI, PreserveLCSSA: false, DTU);
2688 Changed = true;
2689 break;
2690 }
2691 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2692 // A call to the guard intrinsic bails out of the current
2693 // compilation unit if the predicate passed to it is false. If the
2694 // predicate is a constant false, then we know the guard will bail
2695 // out of the current compile unconditionally, so all code following
2696 // it is dead.
2697 //
2698 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2699 // guards to treat `undef` as `false` since a guard on `undef` can
2700 // still be useful for widening.
2701 if (match(V: CI->getArgOperand(i: 0), P: m_Zero()))
2702 if (!isa<UnreachableInst>(Val: CI->getNextNode())) {
2703 changeToUnreachable(I: CI->getNextNode(), PreserveLCSSA: false, DTU);
2704 Changed = true;
2705 break;
2706 }
2707 }
2708 } else if ((isa<ConstantPointerNull>(Val: Callee) &&
2709 !NullPointerIsDefined(F: CI->getFunction(),
2710 AS: cast<PointerType>(Val: Callee->getType())
2711 ->getAddressSpace())) ||
2712 isa<UndefValue>(Val: Callee)) {
2713 changeToUnreachable(I: CI, PreserveLCSSA: false, DTU);
2714 Changed = true;
2715 break;
2716 }
2717 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2718 // If we found a call to a no-return function, insert an unreachable
2719 // instruction after it. Make sure there isn't *already* one there
2720 // though.
2721 if (!isa<UnreachableInst>(Val: CI->getNextNode())) {
2722 // Don't insert a call to llvm.trap right before the unreachable.
2723 changeToUnreachable(I: CI->getNextNode(), PreserveLCSSA: false, DTU);
2724 Changed = true;
2725 }
2726 break;
2727 }
2728 } else if (auto *SI = dyn_cast<StoreInst>(Val: &I)) {
2729 // Store to undef and store to null are undefined and used to signal
2730 // that they should be changed to unreachable by passes that can't
2731 // modify the CFG.
2732
2733 // Don't touch volatile stores.
2734 if (SI->isVolatile()) continue;
2735
2736 Value *Ptr = SI->getOperand(i_nocapture: 1);
2737
2738 if (isa<UndefValue>(Val: Ptr) ||
2739 (isa<ConstantPointerNull>(Val: Ptr) &&
2740 !NullPointerIsDefined(F: SI->getFunction(),
2741 AS: SI->getPointerAddressSpace()))) {
2742 changeToUnreachable(I: SI, PreserveLCSSA: false, DTU);
2743 Changed = true;
2744 break;
2745 }
2746 }
2747 }
2748
2749 Instruction *Terminator = BB->getTerminator();
2750 if (auto *II = dyn_cast<InvokeInst>(Val: Terminator)) {
2751 // Turn invokes that call 'nounwind' functions into ordinary calls.
2752 Value *Callee = II->getCalledOperand();
2753 if ((isa<ConstantPointerNull>(Val: Callee) &&
2754 !NullPointerIsDefined(F: BB->getParent())) ||
2755 isa<UndefValue>(Val: Callee)) {
2756 changeToUnreachable(I: II, PreserveLCSSA: false, DTU);
2757 Changed = true;
2758 } else {
2759 if (II->doesNotReturn() &&
2760 !isa<UnreachableInst>(Val: II->getNormalDest()->front())) {
2761 // If we found an invoke of a no-return function,
2762 // create a new empty basic block with an `unreachable` terminator,
2763 // and set it as the normal destination for the invoke,
2764 // unless that is already the case.
2765 // Note that the original normal destination could have other uses.
2766 BasicBlock *OrigNormalDest = II->getNormalDest();
2767 OrigNormalDest->removePredecessor(Pred: II->getParent());
2768 LLVMContext &Ctx = II->getContext();
2769 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
2770 Context&: Ctx, Name: OrigNormalDest->getName() + ".unreachable",
2771 Parent: II->getFunction(), InsertBefore: OrigNormalDest);
2772 Reachable.resize(N: II->getFunction()->getMaxBlockNumber());
2773 auto *UI = new UnreachableInst(Ctx, UnreachableNormalDest);
2774 UI->setDebugLoc(DebugLoc::getTemporary());
2775 II->setNormalDest(UnreachableNormalDest);
2776 if (DTU)
2777 DTU->applyUpdates(
2778 Updates: {{DominatorTree::Delete, BB, OrigNormalDest},
2779 {DominatorTree::Insert, BB, UnreachableNormalDest}});
2780 Changed = true;
2781 }
2782 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(F: &F)) {
2783 if (II->use_empty() && !II->mayHaveSideEffects()) {
2784 // jump to the normal destination branch.
2785 BasicBlock *NormalDestBB = II->getNormalDest();
2786 BasicBlock *UnwindDestBB = II->getUnwindDest();
2787 UncondBrInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator());
2788 UnwindDestBB->removePredecessor(Pred: II->getParent());
2789 II->eraseFromParent();
2790 if (DTU)
2791 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}});
2792 } else
2793 changeToCall(II, DTU);
2794 Changed = true;
2795 }
2796 }
2797 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: Terminator)) {
2798 // Remove catchpads which cannot be reached.
2799 struct CatchPadDenseMapInfo {
2800 static CatchPadInst *getEmptyKey() {
2801 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2802 }
2803
2804 static CatchPadInst *getTombstoneKey() {
2805 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2806 }
2807
2808 static unsigned getHashValue(CatchPadInst *CatchPad) {
2809 return static_cast<unsigned>(hash_combine_range(
2810 first: CatchPad->value_op_begin(), last: CatchPad->value_op_end()));
2811 }
2812
2813 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2814 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2815 RHS == getEmptyKey() || RHS == getTombstoneKey())
2816 return LHS == RHS;
2817 return LHS->isIdenticalTo(I: RHS);
2818 }
2819 };
2820
2821 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2822 // Set of unique CatchPads.
2823 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2824 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2825 HandlerSet;
2826 detail::DenseSetEmpty Empty;
2827 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2828 E = CatchSwitch->handler_end();
2829 I != E; ++I) {
2830 BasicBlock *HandlerBB = *I;
2831 if (DTU)
2832 ++NumPerSuccessorCases[HandlerBB];
2833 auto *CatchPad = cast<CatchPadInst>(Val: HandlerBB->getFirstNonPHIIt());
2834 if (!HandlerSet.insert(KV: {CatchPad, Empty}).second) {
2835 if (DTU)
2836 --NumPerSuccessorCases[HandlerBB];
2837 CatchSwitch->removeHandler(HI: I);
2838 --I;
2839 --E;
2840 Changed = true;
2841 }
2842 }
2843 if (DTU) {
2844 std::vector<DominatorTree::UpdateType> Updates;
2845 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2846 if (I.second == 0)
2847 Updates.push_back(x: {DominatorTree::Delete, BB, I.first});
2848 DTU->applyUpdates(Updates);
2849 }
2850 }
2851
2852 Changed |= ConstantFoldTerminator(BB, DeleteDeadConditions: true, TLI: nullptr, DTU);
2853 for (BasicBlock *Successor : successors(BB)) {
2854 if (!Reachable[Successor->getNumber()]) {
2855 Worklist.push_back(Elt: Successor);
2856 Reachable[Successor->getNumber()] = true;
2857 }
2858 }
2859 } while (!Worklist.empty());
2860 return Changed;
2861}
2862
2863Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2864 Instruction *TI = BB->getTerminator();
2865
2866 if (auto *II = dyn_cast<InvokeInst>(Val: TI))
2867 return changeToCall(II, DTU);
2868
2869 Instruction *NewTI;
2870 BasicBlock *UnwindDest;
2871
2872 if (auto *CRI = dyn_cast<CleanupReturnInst>(Val: TI)) {
2873 NewTI = CleanupReturnInst::Create(CleanupPad: CRI->getCleanupPad(), UnwindBB: nullptr, InsertBefore: CRI->getIterator());
2874 UnwindDest = CRI->getUnwindDest();
2875 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: TI)) {
2876 auto *NewCatchSwitch = CatchSwitchInst::Create(
2877 ParentPad: CatchSwitch->getParentPad(), UnwindDest: nullptr, NumHandlers: CatchSwitch->getNumHandlers(),
2878 NameStr: CatchSwitch->getName(), InsertBefore: CatchSwitch->getIterator());
2879 for (BasicBlock *PadBB : CatchSwitch->handlers())
2880 NewCatchSwitch->addHandler(Dest: PadBB);
2881
2882 NewTI = NewCatchSwitch;
2883 UnwindDest = CatchSwitch->getUnwindDest();
2884 } else {
2885 llvm_unreachable("Could not find unwind successor");
2886 }
2887
2888 NewTI->takeName(V: TI);
2889 NewTI->setDebugLoc(TI->getDebugLoc());
2890 UnwindDest->removePredecessor(Pred: BB);
2891 TI->replaceAllUsesWith(V: NewTI);
2892 TI->eraseFromParent();
2893 if (DTU)
2894 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDest}});
2895 return NewTI;
2896}
2897
2898/// removeUnreachableBlocks - Remove blocks that are not reachable, even
2899/// if they are in a dead cycle. Return true if a change was made, false
2900/// otherwise.
2901bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2902 MemorySSAUpdater *MSSAU) {
2903 SmallVector<bool, 16> Reachable(F.getMaxBlockNumber());
2904 bool Changed = markAliveBlocks(F, Reachable, DTU);
2905
2906 // Are there any blocks left to actually delete?
2907 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2908 for (BasicBlock &BB : F) {
2909 // Skip reachable basic blocks
2910 if (Reachable[BB.getNumber()])
2911 continue;
2912 // Skip already-deleted blocks
2913 if (DTU && DTU->isBBPendingDeletion(DelBB: &BB))
2914 continue;
2915 BlocksToRemove.insert(X: &BB);
2916 }
2917
2918 if (BlocksToRemove.empty())
2919 return Changed;
2920
2921 Changed = true;
2922 NumRemoved += BlocksToRemove.size();
2923
2924 if (MSSAU)
2925 MSSAU->removeBlocks(DeadBlocks: BlocksToRemove);
2926
2927 DeleteDeadBlocks(BBs: BlocksToRemove.takeVector(), DTU);
2928
2929 return Changed;
2930}
2931
2932/// If AAOnly is set, only intersect alias analysis metadata and preserve other
2933/// known metadata. Unknown metadata is always dropped.
2934static void combineMetadata(Instruction *K, const Instruction *J,
2935 bool DoesKMove, bool AAOnly = false) {
2936 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2937 K->getAllMetadataOtherThanDebugLoc(MDs&: Metadata);
2938 for (const auto &MD : Metadata) {
2939 unsigned Kind = MD.first;
2940 MDNode *JMD = J->getMetadata(KindID: Kind);
2941 MDNode *KMD = MD.second;
2942
2943 // TODO: Assert that this switch is exhaustive for fixed MD kinds.
2944 switch (Kind) {
2945 default:
2946 K->setMetadata(KindID: Kind, Node: nullptr); // Remove unknown metadata
2947 break;
2948 case LLVMContext::MD_dbg:
2949 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2950 case LLVMContext::MD_DIAssignID:
2951 if (!AAOnly)
2952 K->mergeDIAssignID(SourceInstructions: J);
2953 break;
2954 case LLVMContext::MD_tbaa:
2955 if (DoesKMove)
2956 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericTBAA(A: JMD, B: KMD));
2957 break;
2958 case LLVMContext::MD_alias_scope:
2959 if (DoesKMove)
2960 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericAliasScope(A: JMD, B: KMD));
2961 break;
2962 case LLVMContext::MD_noalias:
2963 case LLVMContext::MD_mem_parallel_loop_access:
2964 if (DoesKMove)
2965 K->setMetadata(KindID: Kind, Node: MDNode::intersect(A: JMD, B: KMD));
2966 break;
2967 case LLVMContext::MD_access_group:
2968 if (DoesKMove)
2969 K->setMetadata(KindID: LLVMContext::MD_access_group,
2970 Node: intersectAccessGroups(Inst1: K, Inst2: J));
2971 break;
2972 case LLVMContext::MD_range:
2973 if (!AAOnly && (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)))
2974 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericRange(A: JMD, B: KMD));
2975 break;
2976 case LLVMContext::MD_nofpclass:
2977 if (!AAOnly && (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)))
2978 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericNoFPClass(A: JMD, B: KMD));
2979 break;
2980 case LLVMContext::MD_fpmath:
2981 if (!AAOnly)
2982 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericFPMath(A: JMD, B: KMD));
2983 break;
2984 case LLVMContext::MD_invariant_load:
2985 // If K moves, only set the !invariant.load if it is present in both
2986 // instructions.
2987 if (DoesKMove)
2988 K->setMetadata(KindID: Kind, Node: JMD);
2989 break;
2990 case LLVMContext::MD_nonnull:
2991 if (!AAOnly && (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)))
2992 K->setMetadata(KindID: Kind, Node: JMD);
2993 break;
2994 case LLVMContext::MD_invariant_group:
2995 // Preserve !invariant.group in K.
2996 break;
2997 // Keep empty cases for prof, mmra, memprof, and callsite to prevent them
2998 // from being removed as unknown metadata. The actual merging is handled
2999 // separately below.
3000 case LLVMContext::MD_prof:
3001 case LLVMContext::MD_mmra:
3002 case LLVMContext::MD_memprof:
3003 case LLVMContext::MD_callsite:
3004 break;
3005 case LLVMContext::MD_callee_type:
3006 if (!AAOnly) {
3007 K->setMetadata(KindID: LLVMContext::MD_callee_type,
3008 Node: MDNode::getMergedCalleeTypeMetadata(A: KMD, B: JMD));
3009 }
3010 break;
3011 case LLVMContext::MD_align:
3012 if (!AAOnly && (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef)))
3013 K->setMetadata(
3014 KindID: Kind, Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD));
3015 break;
3016 case LLVMContext::MD_dereferenceable:
3017 case LLVMContext::MD_dereferenceable_or_null:
3018 if (!AAOnly && DoesKMove)
3019 K->setMetadata(KindID: Kind,
3020 Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD));
3021 break;
3022 case LLVMContext::MD_preserve_access_index:
3023 // Preserve !preserve.access.index in K.
3024 break;
3025 case LLVMContext::MD_noundef:
3026 // If K does move, keep noundef if it is present in both instructions.
3027 if (!AAOnly && DoesKMove)
3028 K->setMetadata(KindID: Kind, Node: JMD);
3029 break;
3030 case LLVMContext::MD_nontemporal:
3031 // Preserve !nontemporal if it is present on both instructions.
3032 if (!AAOnly)
3033 K->setMetadata(KindID: Kind, Node: JMD);
3034 break;
3035 case LLVMContext::MD_noalias_addrspace:
3036 if (DoesKMove)
3037 K->setMetadata(KindID: Kind,
3038 Node: MDNode::getMostGenericNoaliasAddrspace(A: JMD, B: KMD));
3039 break;
3040 case LLVMContext::MD_nosanitize:
3041 // Preserve !nosanitize if both K and J have it.
3042 K->setMetadata(KindID: Kind, Node: JMD);
3043 break;
3044 case LLVMContext::MD_captures:
3045 K->setMetadata(
3046 KindID: Kind, Node: MDNode::fromCaptureComponents(
3047 Ctx&: K->getContext(), CC: MDNode::toCaptureComponents(MD: JMD) |
3048 MDNode::toCaptureComponents(MD: KMD)));
3049 break;
3050 case LLVMContext::MD_alloc_token:
3051 // Preserve !alloc_token if both K and J have it, and they are equal.
3052 if (KMD == JMD)
3053 K->setMetadata(KindID: Kind, Node: JMD);
3054 else
3055 K->setMetadata(KindID: Kind, Node: nullptr);
3056 break;
3057 }
3058 }
3059 // Set !invariant.group from J if J has it. If both instructions have it
3060 // then we will just pick it from J - even when they are different.
3061 // Also make sure that K is load or store - f.e. combining bitcast with load
3062 // could produce bitcast with invariant.group metadata, which is invalid.
3063 // FIXME: we should try to preserve both invariant.group md if they are
3064 // different, but right now instruction can only have one invariant.group.
3065 if (auto *JMD = J->getMetadata(KindID: LLVMContext::MD_invariant_group))
3066 if (isa<LoadInst>(Val: K) || isa<StoreInst>(Val: K))
3067 K->setMetadata(KindID: LLVMContext::MD_invariant_group, Node: JMD);
3068
3069 // Merge MMRAs.
3070 // This is handled separately because we also want to handle cases where K
3071 // doesn't have tags but J does.
3072 auto JMMRA = J->getMetadata(KindID: LLVMContext::MD_mmra);
3073 auto KMMRA = K->getMetadata(KindID: LLVMContext::MD_mmra);
3074 if (JMMRA || KMMRA) {
3075 K->setMetadata(KindID: LLVMContext::MD_mmra,
3076 Node: MMRAMetadata::combine(Ctx&: K->getContext(), A: JMMRA, B: KMMRA));
3077 }
3078
3079 // Merge memprof metadata.
3080 // Handle separately to support cases where only one instruction has the
3081 // metadata.
3082 auto *JMemProf = J->getMetadata(KindID: LLVMContext::MD_memprof);
3083 auto *KMemProf = K->getMetadata(KindID: LLVMContext::MD_memprof);
3084 if (!AAOnly && (JMemProf || KMemProf)) {
3085 K->setMetadata(KindID: LLVMContext::MD_memprof,
3086 Node: MDNode::getMergedMemProfMetadata(A: KMemProf, B: JMemProf));
3087 }
3088
3089 // Merge callsite metadata.
3090 // Handle separately to support cases where only one instruction has the
3091 // metadata.
3092 auto *JCallSite = J->getMetadata(KindID: LLVMContext::MD_callsite);
3093 auto *KCallSite = K->getMetadata(KindID: LLVMContext::MD_callsite);
3094 if (!AAOnly && (JCallSite || KCallSite)) {
3095 K->setMetadata(KindID: LLVMContext::MD_callsite,
3096 Node: MDNode::getMergedCallsiteMetadata(A: KCallSite, B: JCallSite));
3097 }
3098
3099 // Merge prof metadata.
3100 // Handle separately to support cases where only one instruction has the
3101 // metadata.
3102 auto *JProf = J->getMetadata(KindID: LLVMContext::MD_prof);
3103 auto *KProf = K->getMetadata(KindID: LLVMContext::MD_prof);
3104 if (!AAOnly && (JProf || KProf)) {
3105 K->setMetadata(KindID: LLVMContext::MD_prof,
3106 Node: MDNode::getMergedProfMetadata(A: KProf, B: JProf, AInstr: K, BInstr: J));
3107 }
3108}
3109
3110void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
3111 bool DoesKMove) {
3112 combineMetadata(K, J, DoesKMove);
3113}
3114
3115void llvm::combineAAMetadata(Instruction *K, const Instruction *J) {
3116 combineMetadata(K, J, /*DoesKMove=*/true, /*AAOnly=*/true);
3117}
3118
3119void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3120 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
3121 Source.getAllMetadata(MDs&: MD);
3122 MDBuilder MDB(Dest.getContext());
3123 Type *NewType = Dest.getType();
3124 const DataLayout &DL = Source.getDataLayout();
3125 for (const auto &MDPair : MD) {
3126 unsigned ID = MDPair.first;
3127 MDNode *N = MDPair.second;
3128 // Note, essentially every kind of metadata should be preserved here! This
3129 // routine is supposed to clone a load instruction changing *only its type*.
3130 // The only metadata it makes sense to drop is metadata which is invalidated
3131 // when the pointer type changes. This should essentially never be the case
3132 // in LLVM, but we explicitly switch over only known metadata to be
3133 // conservatively correct. If you are adding metadata to LLVM which pertains
3134 // to loads, you almost certainly want to add it here.
3135 switch (ID) {
3136 case LLVMContext::MD_dbg:
3137 case LLVMContext::MD_tbaa:
3138 case LLVMContext::MD_prof:
3139 case LLVMContext::MD_fpmath:
3140 case LLVMContext::MD_tbaa_struct:
3141 case LLVMContext::MD_invariant_load:
3142 case LLVMContext::MD_alias_scope:
3143 case LLVMContext::MD_noalias:
3144 case LLVMContext::MD_nontemporal:
3145 case LLVMContext::MD_mem_parallel_loop_access:
3146 case LLVMContext::MD_access_group:
3147 case LLVMContext::MD_noundef:
3148 case LLVMContext::MD_noalias_addrspace:
3149 // All of these directly apply.
3150 Dest.setMetadata(KindID: ID, Node: N);
3151 break;
3152
3153 case LLVMContext::MD_nonnull:
3154 copyNonnullMetadata(OldLI: Source, N, NewLI&: Dest);
3155 break;
3156
3157 case LLVMContext::MD_align:
3158 case LLVMContext::MD_dereferenceable:
3159 case LLVMContext::MD_dereferenceable_or_null:
3160 // These only directly apply if the new type is also a pointer.
3161 if (NewType->isPointerTy())
3162 Dest.setMetadata(KindID: ID, Node: N);
3163 break;
3164
3165 case LLVMContext::MD_range:
3166 copyRangeMetadata(DL, OldLI: Source, N, NewLI&: Dest);
3167 break;
3168
3169 case LLVMContext::MD_nofpclass:
3170 // This only applies if the floating-point type interpretation. This
3171 // should handle degenerate cases like casting between a scalar and single
3172 // element vector.
3173 if (NewType->getScalarType() == Source.getType()->getScalarType())
3174 Dest.setMetadata(KindID: ID, Node: N);
3175 break;
3176 }
3177 }
3178}
3179
3180void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
3181 auto *ReplInst = dyn_cast<Instruction>(Val: Repl);
3182 if (!ReplInst)
3183 return;
3184
3185 // Patch the replacement so that it is not more restrictive than the value
3186 // being replaced.
3187 WithOverflowInst *UnusedWO;
3188 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3189 // overflowing binary operator, nuw/nsw flags may no longer hold.
3190 if (isa<OverflowingBinaryOperator>(Val: ReplInst) &&
3191 match(V: I, P: m_ExtractValue<0>(V: m_WithOverflowInst(I&: UnusedWO))))
3192 ReplInst->dropPoisonGeneratingFlags();
3193 // Note that if 'I' is a load being replaced by some operation,
3194 // for example, by an arithmetic operation, then andIRFlags()
3195 // would just erase all math flags from the original arithmetic
3196 // operation, which is clearly not wanted and not needed.
3197 else if (!isa<LoadInst>(Val: I))
3198 ReplInst->andIRFlags(V: I);
3199
3200 // Handle attributes.
3201 if (auto *CB1 = dyn_cast<CallBase>(Val: ReplInst)) {
3202 if (auto *CB2 = dyn_cast<CallBase>(Val: I)) {
3203 bool Success = CB1->tryIntersectAttributes(Other: CB2);
3204 assert(Success && "We should not be trying to sink callbases "
3205 "with non-intersectable attributes");
3206 // For NDEBUG Compile.
3207 (void)Success;
3208 }
3209 }
3210
3211 // FIXME: If both the original and replacement value are part of the
3212 // same control-flow region (meaning that the execution of one
3213 // guarantees the execution of the other), then we can combine the
3214 // noalias scopes here and do better than the general conservative
3215 // answer used in combineMetadata().
3216
3217 // In general, GVN unifies expressions over different control-flow
3218 // regions, and so we need a conservative combination of the noalias
3219 // scopes.
3220 combineMetadataForCSE(K: ReplInst, J: I, DoesKMove: false);
3221}
3222
3223template <typename ShouldReplaceFn>
3224static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3225 const ShouldReplaceFn &ShouldReplace) {
3226 assert(From->getType() == To->getType());
3227
3228 unsigned Count = 0;
3229 for (Use &U : llvm::make_early_inc_range(Range: From->uses())) {
3230 auto *II = dyn_cast<IntrinsicInst>(Val: U.getUser());
3231 if (II && II->getIntrinsicID() == Intrinsic::fake_use)
3232 continue;
3233 if (!ShouldReplace(U))
3234 continue;
3235 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3236 From->printAsOperand(dbgs());
3237 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3238 U.set(To);
3239 ++Count;
3240 }
3241 return Count;
3242}
3243
3244unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
3245 assert(From->getType() == To->getType());
3246 auto *BB = From->getParent();
3247 unsigned Count = 0;
3248
3249 for (Use &U : llvm::make_early_inc_range(Range: From->uses())) {
3250 auto *I = cast<Instruction>(Val: U.getUser());
3251 if (I->getParent() == BB)
3252 continue;
3253 U.set(To);
3254 ++Count;
3255 }
3256 return Count;
3257}
3258
3259unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3260 DominatorTree &DT,
3261 const BasicBlockEdge &Root) {
3262 auto Dominates = [&](const Use &U) { return DT.dominates(BBE: Root, U); };
3263 return ::replaceDominatedUsesWith(From, To, ShouldReplace: Dominates);
3264}
3265
3266unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3267 DominatorTree &DT,
3268 const BasicBlock *BB) {
3269 auto Dominates = [&](const Use &U) { return DT.dominates(BB, U); };
3270 return ::replaceDominatedUsesWith(From, To, ShouldReplace: Dominates);
3271}
3272
3273unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3274 DominatorTree &DT,
3275 const Instruction *I) {
3276 auto Dominates = [&](const Use &U) { return DT.dominates(Def: I, U); };
3277 return ::replaceDominatedUsesWith(From, To, ShouldReplace: Dominates);
3278}
3279
3280unsigned llvm::replaceDominatedUsesWithIf(
3281 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3282 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3283 auto DominatesAndShouldReplace = [&](const Use &U) {
3284 return DT.dominates(BBE: Root, U) && ShouldReplace(U, To);
3285 };
3286 return ::replaceDominatedUsesWith(From, To, ShouldReplace: DominatesAndShouldReplace);
3287}
3288
3289unsigned llvm::replaceDominatedUsesWithIf(
3290 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3291 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3292 auto DominatesAndShouldReplace = [&](const Use &U) {
3293 return DT.dominates(BB, U) && ShouldReplace(U, To);
3294 };
3295 return ::replaceDominatedUsesWith(From, To, ShouldReplace: DominatesAndShouldReplace);
3296}
3297
3298unsigned llvm::replaceDominatedUsesWithIf(
3299 Value *From, Value *To, DominatorTree &DT, const Instruction *I,
3300 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3301 auto DominatesAndShouldReplace = [&](const Use &U) {
3302 return DT.dominates(Def: I, U) && ShouldReplace(U, To);
3303 };
3304 return ::replaceDominatedUsesWith(From, To, ShouldReplace: DominatesAndShouldReplace);
3305}
3306
3307bool llvm::callsGCLeafFunction(const CallBase *Call,
3308 const TargetLibraryInfo &TLI) {
3309 // Check if the function is specifically marked as a gc leaf function.
3310 if (Call->hasFnAttr(Kind: "gc-leaf-function"))
3311 return true;
3312 if (const Function *F = Call->getCalledFunction()) {
3313 if (F->hasFnAttribute(Kind: "gc-leaf-function"))
3314 return true;
3315
3316 if (auto IID = F->getIntrinsicID()) {
3317 // Most LLVM intrinsics do not take safepoints.
3318 return IID != Intrinsic::experimental_gc_statepoint &&
3319 IID != Intrinsic::experimental_deoptimize &&
3320 IID != Intrinsic::memcpy_element_unordered_atomic &&
3321 IID != Intrinsic::memmove_element_unordered_atomic;
3322 }
3323 }
3324
3325 // Lib calls can be materialized by some passes, and won't be
3326 // marked as 'gc-leaf-function.' All available Libcalls are
3327 // GC-leaf.
3328 LibFunc LF;
3329 if (TLI.getLibFunc(CB: *Call, F&: LF)) {
3330 return TLI.has(F: LF);
3331 }
3332
3333 return false;
3334}
3335
3336void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
3337 LoadInst &NewLI) {
3338 auto *NewTy = NewLI.getType();
3339
3340 // This only directly applies if the new type is also a pointer.
3341 if (NewTy->isPointerTy()) {
3342 NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: N);
3343 return;
3344 }
3345
3346 // The only other translation we can do is to integral loads with !range
3347 // metadata.
3348 if (!NewTy->isIntegerTy())
3349 return;
3350
3351 MDBuilder MDB(NewLI.getContext());
3352 const Value *Ptr = OldLI.getPointerOperand();
3353 auto *ITy = cast<IntegerType>(Val: NewTy);
3354 auto *NullInt = ConstantExpr::getPtrToInt(
3355 C: ConstantPointerNull::get(T: cast<PointerType>(Val: Ptr->getType())), Ty: ITy);
3356 auto *NonNullInt = ConstantExpr::getAdd(C1: NullInt, C2: ConstantInt::get(Ty: ITy, V: 1));
3357 NewLI.setMetadata(KindID: LLVMContext::MD_range,
3358 Node: MDB.createRange(Lo: NonNullInt, Hi: NullInt));
3359}
3360
3361void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
3362 MDNode *N, LoadInst &NewLI) {
3363 auto *NewTy = NewLI.getType();
3364 // Simply copy the metadata if the type did not change.
3365 if (NewTy == OldLI.getType()) {
3366 NewLI.setMetadata(KindID: LLVMContext::MD_range, Node: N);
3367 return;
3368 }
3369
3370 // Give up unless it is converted to a pointer where there is a single very
3371 // valuable mapping we can do reliably.
3372 // FIXME: It would be nice to propagate this in more ways, but the type
3373 // conversions make it hard.
3374 if (!NewTy->isPointerTy())
3375 return;
3376
3377 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3378 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3379 !getConstantRangeFromMetadata(RangeMD: *N).contains(Val: APInt(BitWidth, 0))) {
3380 MDNode *NN = MDNode::get(Context&: OldLI.getContext(), MDs: {});
3381 NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: NN);
3382 }
3383}
3384
3385void llvm::dropDebugUsers(Instruction &I) {
3386 SmallVector<DbgVariableRecord *, 1> DPUsers;
3387 findDbgUsers(V: &I, DbgVariableRecords&: DPUsers);
3388 for (auto *DVR : DPUsers)
3389 DVR->eraseFromParent();
3390}
3391
3392void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
3393 BasicBlock *BB) {
3394 // Since we are moving the instructions out of its basic block, we do not
3395 // retain their original debug locations (DILocations) and debug intrinsic
3396 // instructions.
3397 //
3398 // Doing so would degrade the debugging experience.
3399 //
3400 // FIXME: Issue #152767: debug info should also be the same as the
3401 // original branch, **if** the user explicitly indicated that (for sampling
3402 // PGO)
3403 //
3404 // Currently, when hoisting the instructions, we take the following actions:
3405 // - Remove their debug intrinsic instructions.
3406 // - Set their debug locations to the values from the insertion point.
3407 //
3408 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3409 // need to be deleted, is because there will not be any instructions with a
3410 // DILocation in either branch left after performing the transformation. We
3411 // can only insert a dbg.value after the two branches are joined again.
3412 //
3413 // See PR38762, PR39243 for more details.
3414 //
3415 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3416 // encode predicated DIExpressions that yield different results on different
3417 // code paths.
3418
3419 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3420 Instruction *I = &*II;
3421 I->dropUBImplyingAttrsAndMetadata();
3422 if (I->isUsedByMetadata())
3423 dropDebugUsers(I&: *I);
3424 // RemoveDIs: drop debug-info too as the following code does.
3425 I->dropDbgRecords();
3426 if (I->isDebugOrPseudoInst()) {
3427 // Remove DbgInfo and pseudo probe Intrinsics.
3428 II = I->eraseFromParent();
3429 continue;
3430 }
3431 I->setDebugLoc(InsertPt->getDebugLoc());
3432 ++II;
3433 }
3434 DomBlock->splice(ToIt: InsertPt->getIterator(), FromBB: BB, FromBeginIt: BB->begin(),
3435 FromEndIt: BB->getTerminator()->getIterator());
3436}
3437
3438DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C,
3439 Type &Ty) {
3440 // Create integer constant expression.
3441 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3442 const APInt &API = cast<ConstantInt>(Val: &CV)->getValue();
3443 std::optional<int64_t> InitIntOpt;
3444 if (API.getBitWidth() == 1)
3445 InitIntOpt = API.tryZExtValue();
3446 else
3447 InitIntOpt = API.trySExtValue();
3448 return InitIntOpt ? DIB.createConstantValueExpression(
3449 Val: static_cast<uint64_t>(*InitIntOpt))
3450 : nullptr;
3451 };
3452
3453 if (isa<ConstantInt>(Val: C))
3454 return createIntegerExpression(C);
3455
3456 auto *FP = dyn_cast<ConstantFP>(Val: &C);
3457 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3458 const APFloat &APF = FP->getValueAPF();
3459 APInt const &API = APF.bitcastToAPInt();
3460 if (uint64_t Temp = API.getZExtValue())
3461 return DIB.createConstantValueExpression(Val: Temp);
3462 return DIB.createConstantValueExpression(Val: *API.getRawData());
3463 }
3464
3465 if (!Ty.isPointerTy())
3466 return nullptr;
3467
3468 if (isa<ConstantPointerNull>(Val: C))
3469 return DIB.createConstantValueExpression(Val: 0);
3470
3471 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: &C))
3472 if (CE->getOpcode() == Instruction::IntToPtr) {
3473 const Value *V = CE->getOperand(i_nocapture: 0);
3474 if (auto CI = dyn_cast_or_null<ConstantInt>(Val: V))
3475 return createIntegerExpression(*CI);
3476 }
3477 return nullptr;
3478}
3479
3480void llvm::remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst) {
3481 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3482 for (auto *Op : Set) {
3483 auto I = Mapping.find(Op);
3484 if (I != Mapping.end())
3485 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3486 }
3487 };
3488 auto RemapAssignAddress = [&Mapping](auto *DA) {
3489 auto I = Mapping.find(DA->getAddress());
3490 if (I != Mapping.end())
3491 DA->setAddress(I->second);
3492 };
3493 for (DbgVariableRecord &DVR : filterDbgVars(R: Inst->getDbgRecordRange())) {
3494 RemapDebugOperands(&DVR, DVR.location_ops());
3495 if (DVR.isDbgAssign())
3496 RemapAssignAddress(&DVR);
3497 }
3498}
3499
3500namespace {
3501
3502/// A potential constituent of a bitreverse or bswap expression. See
3503/// collectBitParts for a fuller explanation.
3504struct BitPart {
3505 BitPart(Value *P, unsigned BW) : Provider(P) {
3506 Provenance.resize(N: BW);
3507 }
3508
3509 /// The Value that this is a bitreverse/bswap of.
3510 Value *Provider;
3511
3512 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3513 /// in Provider becomes bit B in the result of this expression.
3514 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3515
3516 enum { Unset = -1 };
3517};
3518
3519} // end anonymous namespace
3520
3521/// Analyze the specified subexpression and see if it is capable of providing
3522/// pieces of a bswap or bitreverse. The subexpression provides a potential
3523/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3524/// the output of the expression came from a corresponding bit in some other
3525/// value. This function is recursive, and the end result is a mapping of
3526/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3527/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3528///
3529/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3530/// that the expression deposits the low byte of %X into the high byte of the
3531/// result and that all other bits are zero. This expression is accepted and a
3532/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3533/// [0-7].
3534///
3535/// For vector types, all analysis is performed at the per-element level. No
3536/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3537/// constant masks must be splatted across all elements.
3538///
3539/// To avoid revisiting values, the BitPart results are memoized into the
3540/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3541/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3542/// store BitParts objects, not pointers. As we need the concept of a nullptr
3543/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3544/// type instead to provide the same functionality.
3545///
3546/// Because we pass around references into \c BPS, we must use a container that
3547/// does not invalidate internal references (std::map instead of DenseMap).
3548static const std::optional<BitPart> &
3549collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3550 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3551 bool &FoundRoot) {
3552 auto [I, Inserted] = BPS.try_emplace(k: V);
3553 if (!Inserted)
3554 return I->second;
3555
3556 auto &Result = I->second;
3557 auto BitWidth = V->getType()->getScalarSizeInBits();
3558
3559 // Can't do integer/elements > 128 bits.
3560 if (BitWidth > 128)
3561 return Result;
3562
3563 // Prevent stack overflow by limiting the recursion depth
3564 if (Depth == BitPartRecursionMaxDepth) {
3565 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3566 return Result;
3567 }
3568
3569 if (auto *I = dyn_cast<Instruction>(Val: V)) {
3570 Value *X, *Y;
3571 const APInt *C;
3572
3573 // If this is an or instruction, it may be an inner node of the bswap.
3574 if (match(V, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y)))) {
3575 // Check we have both sources and they are from the same provider.
3576 const auto &A = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3577 Depth: Depth + 1, FoundRoot);
3578 if (!A || !A->Provider)
3579 return Result;
3580
3581 const auto &B = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS,
3582 Depth: Depth + 1, FoundRoot);
3583 if (!B || A->Provider != B->Provider)
3584 return Result;
3585
3586 // Try and merge the two together.
3587 Result = BitPart(A->Provider, BitWidth);
3588 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3589 if (A->Provenance[BitIdx] != BitPart::Unset &&
3590 B->Provenance[BitIdx] != BitPart::Unset &&
3591 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3592 return Result = std::nullopt;
3593
3594 if (A->Provenance[BitIdx] == BitPart::Unset)
3595 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3596 else
3597 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3598 }
3599
3600 return Result;
3601 }
3602
3603 // If this is a logical shift by a constant, recurse then shift the result.
3604 if (match(V, P: m_LogicalShift(L: m_Value(V&: X), R: m_APInt(Res&: C)))) {
3605 const APInt &BitShift = *C;
3606
3607 // Ensure the shift amount is defined.
3608 if (BitShift.uge(RHS: BitWidth))
3609 return Result;
3610
3611 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3612 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3613 return Result;
3614
3615 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3616 Depth: Depth + 1, FoundRoot);
3617 if (!Res)
3618 return Result;
3619 Result = Res;
3620
3621 // Perform the "shift" on BitProvenance.
3622 auto &P = Result->Provenance;
3623 if (I->getOpcode() == Instruction::Shl) {
3624 P.erase(CS: std::prev(x: P.end(), n: BitShift.getZExtValue()), CE: P.end());
3625 P.insert(I: P.begin(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset);
3626 } else {
3627 P.erase(CS: P.begin(), CE: std::next(x: P.begin(), n: BitShift.getZExtValue()));
3628 P.insert(I: P.end(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset);
3629 }
3630
3631 return Result;
3632 }
3633
3634 // If this is a logical 'and' with a mask that clears bits, recurse then
3635 // unset the appropriate bits.
3636 if (match(V, P: m_And(L: m_Value(V&: X), R: m_APInt(Res&: C)))) {
3637 const APInt &AndMask = *C;
3638
3639 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3640 // early exit.
3641 unsigned NumMaskedBits = AndMask.popcount();
3642 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3643 return Result;
3644
3645 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3646 Depth: Depth + 1, FoundRoot);
3647 if (!Res)
3648 return Result;
3649 Result = Res;
3650
3651 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3652 // If the AndMask is zero for this bit, clear the bit.
3653 if (AndMask[BitIdx] == 0)
3654 Result->Provenance[BitIdx] = BitPart::Unset;
3655 return Result;
3656 }
3657
3658 // If this is a zext instruction zero extend the result.
3659 if (match(V, P: m_ZExt(Op: m_Value(V&: X)))) {
3660 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3661 Depth: Depth + 1, FoundRoot);
3662 if (!Res)
3663 return Result;
3664
3665 Result = BitPart(Res->Provider, BitWidth);
3666 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3667 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3668 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3669 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3670 Result->Provenance[BitIdx] = BitPart::Unset;
3671 return Result;
3672 }
3673
3674 // If this is a truncate instruction, extract the lower bits.
3675 if (match(V, P: m_Trunc(Op: m_Value(V&: X)))) {
3676 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3677 Depth: Depth + 1, FoundRoot);
3678 if (!Res)
3679 return Result;
3680
3681 Result = BitPart(Res->Provider, BitWidth);
3682 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3683 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3684 return Result;
3685 }
3686
3687 // BITREVERSE - most likely due to us previous matching a partial
3688 // bitreverse.
3689 if (match(V, P: m_BitReverse(Op0: m_Value(V&: X)))) {
3690 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3691 Depth: Depth + 1, FoundRoot);
3692 if (!Res)
3693 return Result;
3694
3695 Result = BitPart(Res->Provider, BitWidth);
3696 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3697 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3698 return Result;
3699 }
3700
3701 // BSWAP - most likely due to us previous matching a partial bswap.
3702 if (match(V, P: m_BSwap(Op0: m_Value(V&: X)))) {
3703 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3704 Depth: Depth + 1, FoundRoot);
3705 if (!Res)
3706 return Result;
3707
3708 unsigned ByteWidth = BitWidth / 8;
3709 Result = BitPart(Res->Provider, BitWidth);
3710 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3711 unsigned ByteBitOfs = ByteIdx * 8;
3712 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3713 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3714 Res->Provenance[ByteBitOfs + BitIdx];
3715 }
3716 return Result;
3717 }
3718
3719 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3720 // amount (modulo).
3721 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3722 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3723 if (match(V, P: m_FShl(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C))) ||
3724 match(V, P: m_FShr(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C)))) {
3725 // We can treat fshr as a fshl by flipping the modulo amount.
3726 unsigned ModAmt = C->urem(RHS: BitWidth);
3727 if (cast<IntrinsicInst>(Val: I)->getIntrinsicID() == Intrinsic::fshr)
3728 ModAmt = BitWidth - ModAmt;
3729
3730 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3731 if (!MatchBitReversals && (ModAmt % 8) != 0)
3732 return Result;
3733
3734 // Check we have both sources and they are from the same provider.
3735 const auto &LHS = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3736 Depth: Depth + 1, FoundRoot);
3737 if (!LHS || !LHS->Provider)
3738 return Result;
3739
3740 const auto &RHS = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS,
3741 Depth: Depth + 1, FoundRoot);
3742 if (!RHS || LHS->Provider != RHS->Provider)
3743 return Result;
3744
3745 unsigned StartBitRHS = BitWidth - ModAmt;
3746 Result = BitPart(LHS->Provider, BitWidth);
3747 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3748 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3749 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3750 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3751 return Result;
3752 }
3753 }
3754
3755 // If we've already found a root input value then we're never going to merge
3756 // these back together.
3757 if (FoundRoot)
3758 return Result;
3759
3760 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3761 // be the root input value to the bswap/bitreverse.
3762 FoundRoot = true;
3763 Result = BitPart(V, BitWidth);
3764 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3765 Result->Provenance[BitIdx] = BitIdx;
3766 return Result;
3767}
3768
3769static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3770 unsigned BitWidth) {
3771 if (From % 8 != To % 8)
3772 return false;
3773 // Convert from bit indices to byte indices and check for a byte reversal.
3774 From >>= 3;
3775 To >>= 3;
3776 BitWidth >>= 3;
3777 return From == BitWidth - To - 1;
3778}
3779
3780static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3781 unsigned BitWidth) {
3782 return From == BitWidth - To - 1;
3783}
3784
3785bool llvm::recognizeBSwapOrBitReverseIdiom(
3786 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3787 SmallVectorImpl<Instruction *> &InsertedInsts) {
3788 if (!match(V: I, P: m_Or(L: m_Value(), R: m_Value())) &&
3789 !match(V: I, P: m_FShl(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) &&
3790 !match(V: I, P: m_FShr(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) &&
3791 !match(V: I, P: m_BSwap(Op0: m_Value())))
3792 return false;
3793 if (!MatchBSwaps && !MatchBitReversals)
3794 return false;
3795 Type *ITy = I->getType();
3796 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() == 1 ||
3797 ITy->getScalarSizeInBits() > 128)
3798 return false; // Can't do integer/elements > 128 bits.
3799
3800 // Try to find all the pieces corresponding to the bswap.
3801 bool FoundRoot = false;
3802 std::map<Value *, std::optional<BitPart>> BPS;
3803 const auto &Res =
3804 collectBitParts(V: I, MatchBSwaps, MatchBitReversals, BPS, Depth: 0, FoundRoot);
3805 if (!Res)
3806 return false;
3807 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3808 assert(all_of(BitProvenance,
3809 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3810 "Illegal bit provenance index");
3811
3812 // If the upper bits are zero, then attempt to perform as a truncated op.
3813 Type *DemandedTy = ITy;
3814 if (BitProvenance.back() == BitPart::Unset) {
3815 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3816 BitProvenance = BitProvenance.drop_back();
3817 if (BitProvenance.empty())
3818 return false; // TODO - handle null value?
3819 DemandedTy = Type::getIntNTy(C&: I->getContext(), N: BitProvenance.size());
3820 if (auto *IVecTy = dyn_cast<VectorType>(Val: ITy))
3821 DemandedTy = VectorType::get(ElementType: DemandedTy, Other: IVecTy);
3822 }
3823
3824 // Check BitProvenance hasn't found a source larger than the result type.
3825 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3826 if (DemandedBW > ITy->getScalarSizeInBits())
3827 return false;
3828
3829 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3830 // only byteswap values with an even number of bytes.
3831 APInt DemandedMask = APInt::getAllOnes(numBits: DemandedBW);
3832 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3833 bool OKForBitReverse = MatchBitReversals;
3834 for (unsigned BitIdx = 0;
3835 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3836 if (BitProvenance[BitIdx] == BitPart::Unset) {
3837 DemandedMask.clearBit(BitPosition: BitIdx);
3838 continue;
3839 }
3840 OKForBSwap &= bitTransformIsCorrectForBSwap(From: BitProvenance[BitIdx], To: BitIdx,
3841 BitWidth: DemandedBW);
3842 OKForBitReverse &= bitTransformIsCorrectForBitReverse(From: BitProvenance[BitIdx],
3843 To: BitIdx, BitWidth: DemandedBW);
3844 }
3845
3846 Intrinsic::ID Intrin;
3847 if (OKForBSwap)
3848 Intrin = Intrinsic::bswap;
3849 else if (OKForBitReverse)
3850 Intrin = Intrinsic::bitreverse;
3851 else
3852 return false;
3853
3854 Function *F =
3855 Intrinsic::getOrInsertDeclaration(M: I->getModule(), id: Intrin, Tys: DemandedTy);
3856 Value *Provider = Res->Provider;
3857
3858 // We may need to truncate the provider.
3859 if (DemandedTy != Provider->getType()) {
3860 auto *Trunc =
3861 CastInst::CreateIntegerCast(S: Provider, Ty: DemandedTy, isSigned: false, Name: "trunc", InsertBefore: I->getIterator());
3862 InsertedInsts.push_back(Elt: Trunc);
3863 Provider = Trunc;
3864 }
3865
3866 Instruction *Result = CallInst::Create(Func: F, Args: Provider, NameStr: "rev", InsertBefore: I->getIterator());
3867 InsertedInsts.push_back(Elt: Result);
3868
3869 if (!DemandedMask.isAllOnes()) {
3870 auto *Mask = ConstantInt::get(Ty: DemandedTy, V: DemandedMask);
3871 Result = BinaryOperator::Create(Op: Instruction::And, S1: Result, S2: Mask, Name: "mask", InsertBefore: I->getIterator());
3872 InsertedInsts.push_back(Elt: Result);
3873 }
3874
3875 // We may need to zeroextend back to the result type.
3876 if (ITy != Result->getType()) {
3877 auto *ExtInst = CastInst::CreateIntegerCast(S: Result, Ty: ITy, isSigned: false, Name: "zext", InsertBefore: I->getIterator());
3878 InsertedInsts.push_back(Elt: ExtInst);
3879 }
3880
3881 return true;
3882}
3883
3884// CodeGen has special handling for some string functions that may replace
3885// them with target-specific intrinsics. Since that'd skip our interceptors
3886// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3887// we mark affected calls as NoBuiltin, which will disable optimization
3888// in CodeGen.
3889void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3890 CallInst *CI, const TargetLibraryInfo *TLI) {
3891 Function *F = CI->getCalledFunction();
3892 LibFunc Func;
3893 if (F && !F->hasLocalLinkage() && F->hasName() &&
3894 TLI->getLibFunc(funcName: F->getName(), F&: Func) && TLI->hasOptimizedCodeGen(F: Func) &&
3895 !F->doesNotAccessMemory())
3896 CI->addFnAttr(Kind: Attribute::NoBuiltin);
3897}
3898
3899bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3900 const auto *Op = I->getOperand(i: OpIdx);
3901 // We can't have a PHI with a metadata or token type.
3902 if (Op->getType()->isMetadataTy() || Op->getType()->isTokenLikeTy())
3903 return false;
3904
3905 // swifterror pointers can only be used by a load, store, or as a swifterror
3906 // argument; swifterror pointers are not allowed to be used in select or phi
3907 // instructions.
3908 if (Op->isSwiftError())
3909 return false;
3910
3911 // Protected pointer field loads/stores should be paired with the intrinsic
3912 // to avoid unnecessary address escapes.
3913 if (auto *II = dyn_cast<IntrinsicInst>(Val: Op))
3914 if (II->getIntrinsicID() == Intrinsic::protected_field_ptr)
3915 return false;
3916
3917 // Cannot replace alloca argument with phi/select.
3918 if (I->isLifetimeStartOrEnd())
3919 return false;
3920
3921 // Early exit.
3922 if (!isa<Constant, InlineAsm>(Val: Op))
3923 return true;
3924
3925 switch (I->getOpcode()) {
3926 default:
3927 return true;
3928 case Instruction::Call:
3929 case Instruction::Invoke: {
3930 const auto &CB = cast<CallBase>(Val: *I);
3931
3932 // Can't handle inline asm. Skip it.
3933 if (CB.isInlineAsm())
3934 return false;
3935
3936 // Constant bundle operands may need to retain their constant-ness for
3937 // correctness.
3938 if (CB.isBundleOperand(Idx: OpIdx))
3939 return false;
3940
3941 if (OpIdx < CB.arg_size()) {
3942 // Some variadic intrinsics require constants in the variadic arguments,
3943 // which currently aren't markable as immarg.
3944 if (isa<IntrinsicInst>(Val: CB) &&
3945 OpIdx >= CB.getFunctionType()->getNumParams()) {
3946 // This is known to be OK for stackmap.
3947 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3948 }
3949
3950 // gcroot is a special case, since it requires a constant argument which
3951 // isn't also required to be a simple ConstantInt.
3952 if (CB.getIntrinsicID() == Intrinsic::gcroot)
3953 return false;
3954
3955 // Some intrinsic operands are required to be immediates.
3956 return !CB.paramHasAttr(ArgNo: OpIdx, Kind: Attribute::ImmArg);
3957 }
3958
3959 // It is never allowed to replace the call argument to an intrinsic, but it
3960 // may be possible for a call.
3961 return !isa<IntrinsicInst>(Val: CB);
3962 }
3963 case Instruction::ShuffleVector:
3964 // Shufflevector masks are constant.
3965 return OpIdx != 2;
3966 case Instruction::Switch:
3967 case Instruction::ExtractValue:
3968 // All operands apart from the first are constant.
3969 return OpIdx == 0;
3970 case Instruction::InsertValue:
3971 // All operands apart from the first and the second are constant.
3972 return OpIdx < 2;
3973 case Instruction::Alloca:
3974 // Static allocas (constant size in the entry block) are handled by
3975 // prologue/epilogue insertion so they're free anyway. We definitely don't
3976 // want to make them non-constant.
3977 return !cast<AllocaInst>(Val: I)->isStaticAlloca();
3978 case Instruction::GetElementPtr:
3979 if (OpIdx == 0)
3980 return true;
3981 gep_type_iterator It = gep_type_begin(GEP: I);
3982 for (auto E = std::next(x: It, n: OpIdx); It != E; ++It)
3983 if (It.isStruct())
3984 return false;
3985 return true;
3986 }
3987}
3988
3989Value *llvm::invertCondition(Value *Condition) {
3990 // First: Check if it's a constant
3991 if (Constant *C = dyn_cast<Constant>(Val: Condition))
3992 return ConstantExpr::getNot(C);
3993
3994 // Second: If the condition is already inverted, return the original value
3995 Value *NotCondition;
3996 if (match(V: Condition, P: m_Not(V: m_Value(V&: NotCondition))))
3997 return NotCondition;
3998
3999 BasicBlock *Parent = nullptr;
4000 Instruction *Inst = dyn_cast<Instruction>(Val: Condition);
4001 if (Inst)
4002 Parent = Inst->getParent();
4003 else if (Argument *Arg = dyn_cast<Argument>(Val: Condition))
4004 Parent = &Arg->getParent()->getEntryBlock();
4005 assert(Parent && "Unsupported condition to invert");
4006
4007 // Third: Check all the users for an invert
4008 for (User *U : Condition->users())
4009 if (Instruction *I = dyn_cast<Instruction>(Val: U))
4010 if (I->getParent() == Parent && match(V: I, P: m_Not(V: m_Specific(V: Condition))))
4011 return I;
4012
4013 // Last option: Create a new instruction
4014 auto *Inverted =
4015 BinaryOperator::CreateNot(Op: Condition, Name: Condition->getName() + ".inv");
4016 if (Inst && !isa<PHINode>(Val: Inst))
4017 Inverted->insertAfter(InsertPos: Inst->getIterator());
4018 else
4019 Inverted->insertBefore(InsertPos: Parent->getFirstInsertionPt());
4020 return Inverted;
4021}
4022
4023bool llvm::inferAttributesFromOthers(Function &F) {
4024 // Note: We explicitly check for attributes rather than using cover functions
4025 // because some of the cover functions include the logic being implemented.
4026
4027 bool Changed = false;
4028 // readnone + not convergent implies nosync
4029 if (!F.hasFnAttribute(Kind: Attribute::NoSync) &&
4030 F.doesNotAccessMemory() && !F.isConvergent()) {
4031 F.setNoSync();
4032 Changed = true;
4033 }
4034
4035 // readonly implies nofree
4036 if (!F.hasFnAttribute(Kind: Attribute::NoFree) && F.onlyReadsMemory()) {
4037 F.setDoesNotFreeMemory();
4038 Changed = true;
4039 }
4040
4041 // willreturn implies mustprogress
4042 if (!F.hasFnAttribute(Kind: Attribute::MustProgress) && F.willReturn()) {
4043 F.setMustProgress();
4044 Changed = true;
4045 }
4046
4047 // TODO: There are a bunch of cases of restrictive memory effects we
4048 // can infer by inspecting arguments of argmemonly-ish functions.
4049
4050 return Changed;
4051}
4052
4053void OverflowTracking::mergeFlags(Instruction &I) {
4054#ifndef NDEBUG
4055 if (Opcode)
4056 assert(Opcode == I.getOpcode() &&
4057 "can only use mergeFlags on instructions with matching opcodes");
4058 else
4059 Opcode = I.getOpcode();
4060#endif
4061 if (isa<OverflowingBinaryOperator>(Val: &I)) {
4062 HasNUW &= I.hasNoUnsignedWrap();
4063 HasNSW &= I.hasNoSignedWrap();
4064 }
4065 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(Val: &I))
4066 IsDisjoint &= DisjointOp->isDisjoint();
4067}
4068
4069void OverflowTracking::applyFlags(Instruction &I) {
4070 I.clearSubclassOptionalData();
4071 if (I.getOpcode() == Instruction::Add ||
4072 (I.getOpcode() == Instruction::Mul && AllKnownNonZero)) {
4073 if (HasNUW)
4074 I.setHasNoUnsignedWrap();
4075 if (HasNSW && (AllKnownNonNegative || HasNUW))
4076 I.setHasNoSignedWrap();
4077 }
4078 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(Val: &I))
4079 DisjointOp->setIsDisjoint(IsDisjoint);
4080}
4081