1//===-- MoveAutoInit.cpp - move auto-init inst closer to their use site----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass moves instruction maked as auto-init closer to the basic block that
10// use it, eventually removing it from some control path of the function.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Utils/MoveAutoInit.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/MemorySSA.h"
18#include "llvm/Analysis/MemorySSAUpdater.h"
19#include "llvm/Analysis/ValueTracking.h"
20#include "llvm/IR/DebugInfo.h"
21#include "llvm/IR/Dominators.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/Support/CommandLine.h"
25#include "llvm/Transforms/Utils/LoopUtils.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "move-auto-init"
30
31STATISTIC(NumMoved, "Number of instructions moved");
32
33static cl::opt<unsigned> MoveAutoInitThreshold(
34 "move-auto-init-threshold", cl::Hidden, cl::init(Val: 128),
35 cl::desc("Maximum instructions to analyze per moved initialization"));
36
37static bool hasAutoInitMetadata(const Instruction &I) {
38 return I.hasMetadata(KindID: LLVMContext::MD_annotation) &&
39 any_of(Range: I.getMetadata(KindID: LLVMContext::MD_annotation)->operands(),
40 P: [](const MDOperand &Op) { return Op.equalsStr(Str: "auto-init"); });
41}
42
43static std::optional<MemoryLocation> writeToAlloca(const Instruction &I) {
44 MemoryLocation ML;
45 if (auto *MI = dyn_cast<MemIntrinsic>(Val: &I))
46 ML = MemoryLocation::getForDest(MI);
47 else if (auto *SI = dyn_cast<StoreInst>(Val: &I))
48 ML = MemoryLocation::get(SI);
49 else
50 return std::nullopt;
51
52 if (isa<AllocaInst>(Val: getUnderlyingObject(V: ML.Ptr)))
53 return ML;
54 else
55 return {};
56}
57
58/// Finds a BasicBlock in the CFG where instruction `I` can be moved to while
59/// not changing the Memory SSA ordering and being guarded by at least one
60/// condition.
61static BasicBlock *usersDominator(const MemoryLocation &ML, Instruction *I,
62 DominatorTree &DT, MemorySSA &MSSA) {
63 BasicBlock *CurrentDominator = nullptr;
64 MemoryUseOrDef &IMA = *MSSA.getMemoryAccess(I);
65 BatchAAResults AA(MSSA.getAA());
66
67 SmallPtrSet<MemoryAccess *, 8> Visited;
68
69 auto AsMemoryAccess = [](User *U) { return cast<MemoryAccess>(Val: U); };
70 SmallVector<MemoryAccess *> WorkList(map_range(C: IMA.users(), F: AsMemoryAccess));
71
72 while (!WorkList.empty()) {
73 MemoryAccess *MA = WorkList.pop_back_val();
74 if (!Visited.insert(Ptr: MA).second)
75 continue;
76
77 if (Visited.size() > MoveAutoInitThreshold)
78 return nullptr;
79
80 bool FoundClobberingUser = false;
81 if (auto *M = dyn_cast<MemoryUseOrDef>(Val: MA)) {
82 Instruction *MI = M->getMemoryInst();
83
84 // If this memory instruction may not clobber `I`, we can skip it.
85 // LifetimeEnd is a valid user, but we do not want it in the user
86 // dominator.
87 if (AA.getModRefInfo(I: MI, OptLoc: ML) != ModRefInfo::NoModRef &&
88 !MI->isLifetimeStartOrEnd() && MI != I) {
89 FoundClobberingUser = true;
90 CurrentDominator = CurrentDominator
91 ? DT.findNearestCommonDominator(A: CurrentDominator,
92 B: MI->getParent())
93 : MI->getParent();
94 }
95 }
96 if (!FoundClobberingUser) {
97 auto UsersAsMemoryAccesses = map_range(C: MA->users(), F: AsMemoryAccess);
98 append_range(C&: WorkList, R&: UsersAsMemoryAccesses);
99 }
100 }
101 return CurrentDominator;
102}
103
104static bool runMoveAutoInit(Function &F, DominatorTree &DT, MemorySSA &MSSA) {
105 BasicBlock &EntryBB = F.getEntryBlock();
106 SmallVector<std::pair<Instruction *, BasicBlock *>> JobList;
107
108 //
109 // Compute movable instructions.
110 //
111 for (Instruction &I : EntryBB) {
112 if (!hasAutoInitMetadata(I))
113 continue;
114
115 std::optional<MemoryLocation> ML = writeToAlloca(I);
116 if (!ML)
117 continue;
118
119 if (I.isVolatile())
120 continue;
121
122 BasicBlock *UsersDominator = usersDominator(ML: ML.value(), I: &I, DT, MSSA);
123 if (!UsersDominator)
124 continue;
125
126 if (UsersDominator == &EntryBB)
127 continue;
128
129 // Traverse the CFG to detect cycles `UsersDominator` would be part of.
130 SmallPtrSet<BasicBlock *, 8> TransitiveSuccessors;
131 SmallVector<BasicBlock *> WorkList(successors(BB: UsersDominator));
132 bool HasCycle = false;
133 while (!WorkList.empty()) {
134 BasicBlock *CurrBB = WorkList.pop_back_val();
135 if (CurrBB == UsersDominator)
136 // No early exit because we want to compute the full set of transitive
137 // successors.
138 HasCycle = true;
139 for (BasicBlock *Successor : successors(BB: CurrBB)) {
140 if (!TransitiveSuccessors.insert(Ptr: Successor).second)
141 continue;
142 WorkList.push_back(Elt: Successor);
143 }
144 }
145
146 // Don't insert if that could create multiple execution of I,
147 // but we can insert it in the non back-edge predecessors, if it exists.
148 if (HasCycle) {
149 BasicBlock *UsersDominatorHead = UsersDominator;
150 while (BasicBlock *UniquePredecessor =
151 UsersDominatorHead->getUniquePredecessor())
152 UsersDominatorHead = UniquePredecessor;
153
154 if (UsersDominatorHead == &EntryBB)
155 continue;
156
157 BasicBlock *DominatingPredecessor = nullptr;
158 for (BasicBlock *Pred : predecessors(BB: UsersDominatorHead)) {
159 // If one of the predecessor of the dominator also transitively is a
160 // successor, moving to the dominator would do the inverse of loop
161 // hoisting, and we don't want that.
162 if (TransitiveSuccessors.count(Ptr: Pred))
163 continue;
164
165 if (!DT.isReachableFromEntry(A: Pred))
166 continue;
167
168 DominatingPredecessor =
169 DominatingPredecessor
170 ? DT.findNearestCommonDominator(A: DominatingPredecessor, B: Pred)
171 : Pred;
172 }
173
174 if (!DominatingPredecessor || DominatingPredecessor == &EntryBB)
175 continue;
176
177 UsersDominator = DominatingPredecessor;
178 }
179
180 // CatchSwitchInst blocks can only have one instruction, so they are not
181 // good candidates for insertion.
182 while (isa<CatchSwitchInst>(Val: UsersDominator->getFirstNonPHIIt())) {
183 for (BasicBlock *Pred : predecessors(BB: UsersDominator))
184 if (DT.isReachableFromEntry(A: Pred))
185 UsersDominator = DT.findNearestCommonDominator(A: UsersDominator, B: Pred);
186 }
187
188 // We finally found a place where I can be moved while not introducing extra
189 // execution, and guarded by at least one condition.
190 if (UsersDominator != &EntryBB)
191 JobList.emplace_back(Args: &I, Args&: UsersDominator);
192 }
193
194 //
195 // Perform the actual substitution.
196 //
197 if (JobList.empty())
198 return false;
199
200 MemorySSAUpdater MSSAU(&MSSA);
201
202 // Reverse insertion to respect relative order between instructions:
203 // if two instructions are moved from the same BB to the same BB, we insert
204 // the second one in the front, then the first on top of it.
205 for (auto &Job : reverse(C&: JobList)) {
206 Job.first->moveBefore(BB&: *Job.second, I: Job.second->getFirstInsertionPt());
207 MSSAU.moveToPlace(What: MSSA.getMemoryAccess(I: Job.first), BB: Job.first->getParent(),
208 Where: MemorySSA::InsertionPlace::Beginning);
209 }
210
211 if (VerifyMemorySSA)
212 MSSA.verifyMemorySSA();
213
214 NumMoved += JobList.size();
215
216 return true;
217}
218
219PreservedAnalyses MoveAutoInitPass::run(Function &F,
220 FunctionAnalysisManager &AM) {
221
222 auto &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
223 auto &MSSA = AM.getResult<MemorySSAAnalysis>(IR&: F).getMSSA();
224 if (!runMoveAutoInit(F, DT, MSSA))
225 return PreservedAnalyses::all();
226
227 PreservedAnalyses PA;
228 PA.preserve<DominatorTreeAnalysis>();
229 PA.preserve<MemorySSAAnalysis>();
230 PA.preserveSet<CFGAnalyses>();
231 return PA;
232}
233