1//===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------===//
8//
9// This file implements the MemorySSAUpdater class.
10//
11//===----------------------------------------------------------------===//
12#include "llvm/Analysis/MemorySSAUpdater.h"
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/ADT/SetVector.h"
15#include "llvm/ADT/SmallPtrSet.h"
16#include "llvm/Analysis/IteratedDominanceFrontier.h"
17#include "llvm/Analysis/LoopIterator.h"
18#include "llvm/Analysis/MemorySSA.h"
19#include "llvm/IR/BasicBlock.h"
20#include "llvm/IR/Dominators.h"
21#include "llvm/Support/Debug.h"
22#include <algorithm>
23
24#define DEBUG_TYPE "memoryssa"
25using namespace llvm;
26
27// This is the marker algorithm from "Simple and Efficient Construction of
28// Static Single Assignment Form"
29// The simple, non-marker algorithm places phi nodes at any join
30// Here, we place markers, and only place phi nodes if they end up necessary.
31// They are only necessary if they break a cycle (IE we recursively visit
32// ourselves again), or we discover, while getting the value of the operands,
33// that there are two or more definitions needing to be merged.
34// This still will leave non-minimal form in the case of irreducible control
35// flow, where phi nodes may be in cycles with themselves, but unnecessary.
36MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
37 BasicBlock *BB,
38 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
39 // First, do a cache lookup. Without this cache, certain CFG structures
40 // (like a series of if statements) take exponential time to visit.
41 auto Cached = CachedPreviousDef.find(Val: BB);
42 if (Cached != CachedPreviousDef.end())
43 return Cached->second;
44
45 // If this method is called from an unreachable block, return LoE.
46 if (!MSSA->DT->isReachableFromEntry(A: BB))
47 return MSSA->getLiveOnEntryDef();
48
49 if (BasicBlock *Pred = BB->getUniquePredecessor()) {
50 VisitedBlocks.insert(Ptr: BB);
51 // Single predecessor case, just recurse, we can only have one definition.
52 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
53 CachedPreviousDef.insert(KV: {BB, Result});
54 return Result;
55 }
56
57 if (VisitedBlocks.count(Ptr: BB)) {
58 // We hit our node again, meaning we had a cycle, we must insert a phi
59 // node to break it so we have an operand. The only case this will
60 // insert useless phis is if we have irreducible control flow.
61 MemoryAccess *Result = MSSA->createMemoryPhi(BB);
62 CachedPreviousDef.insert(KV: {BB, Result});
63 return Result;
64 }
65
66 if (VisitedBlocks.insert(Ptr: BB).second) {
67 // Mark us visited so we can detect a cycle
68 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
69
70 // Recurse to get the values in our predecessors for placement of a
71 // potential phi node. This will insert phi nodes if we cycle in order to
72 // break the cycle and have an operand.
73 bool UniqueIncomingAccess = true;
74 MemoryAccess *SingleAccess = nullptr;
75 for (auto *Pred : predecessors(BB)) {
76 if (MSSA->DT->isReachableFromEntry(A: Pred)) {
77 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
78 if (!SingleAccess)
79 SingleAccess = IncomingAccess;
80 else if (IncomingAccess != SingleAccess)
81 UniqueIncomingAccess = false;
82 PhiOps.push_back(Elt: IncomingAccess);
83 } else
84 PhiOps.push_back(Elt: MSSA->getLiveOnEntryDef());
85 }
86
87 // Now try to simplify the ops to avoid placing a phi.
88 // This may return null if we never created a phi yet, that's okay
89 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(Val: MSSA->getMemoryAccess(BB));
90
91 // See if we can avoid the phi by simplifying it.
92 auto *Result = tryRemoveTrivialPhi(Phi, Operands&: PhiOps);
93 // If we couldn't simplify, we may have to create a phi
94 if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
95 // A concrete Phi only exists if we created an empty one to break a cycle.
96 if (Phi) {
97 assert(Phi->operands().empty() && "Expected empty Phi");
98 Phi->replaceAllUsesWith(V: SingleAccess);
99 removeMemoryAccess(Phi);
100 }
101 Result = SingleAccess;
102 } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
103 if (!Phi)
104 Phi = MSSA->createMemoryPhi(BB);
105
106 // See if the existing phi operands match what we need.
107 // Unlike normal SSA, we only allow one phi node per block, so we can't just
108 // create a new one.
109 if (Phi->getNumOperands() != 0) {
110 // FIXME: Figure out whether this is dead code and if so remove it.
111 if (!std::equal(first1: Phi->op_begin(), last1: Phi->op_end(), first2: PhiOps.begin())) {
112 // These will have been filled in by the recursive read we did above.
113 llvm::copy(Range&: PhiOps, Out: Phi->op_begin());
114 std::copy(first: pred_begin(BB), last: pred_end(BB), result: Phi->block_begin());
115 }
116 } else {
117 unsigned i = 0;
118 for (auto *Pred : predecessors(BB))
119 Phi->addIncoming(V: &*PhiOps[i++], BB: Pred);
120 InsertedPHIs.push_back(Elt: Phi);
121 }
122 Result = Phi;
123 }
124
125 // Set ourselves up for the next variable by resetting visited state.
126 VisitedBlocks.erase(Ptr: BB);
127 CachedPreviousDef.insert(KV: {BB, Result});
128 return Result;
129 }
130 llvm_unreachable("Should have hit one of the three cases above");
131}
132
133// This starts at the memory access, and goes backwards in the block to find the
134// previous definition. If a definition is not found the block of the access,
135// it continues globally, creating phi nodes to ensure we have a single
136// definition.
137MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
138 if (auto *LocalResult = getPreviousDefInBlock(MA))
139 return LocalResult;
140 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
141 return getPreviousDefRecursive(BB: MA->getBlock(), CachedPreviousDef);
142}
143
144// This starts at the memory access, and goes backwards in the block to the find
145// the previous definition. If the definition is not found in the block of the
146// access, it returns nullptr.
147MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
148 auto *Defs = MSSA->getWritableBlockDefs(BB: MA->getBlock());
149
150 // It's possible there are no defs, or we got handed the first def to start.
151 if (Defs) {
152 // If this is a def, we can just use the def iterators.
153 if (!isa<MemoryUse>(Val: MA)) {
154 auto Iter = MA->getReverseDefsIterator();
155 ++Iter;
156 if (Iter != Defs->rend())
157 return &*Iter;
158 } else {
159 // Otherwise, have to walk the all access iterator.
160 auto End = MSSA->getWritableBlockAccesses(BB: MA->getBlock())->rend();
161 for (auto &U : make_range(x: ++MA->getReverseIterator(), y: End))
162 if (!isa<MemoryUse>(Val: U))
163 return cast<MemoryAccess>(Val: &U);
164 // Note that if MA comes before Defs->begin(), we won't hit a def.
165 return nullptr;
166 }
167 }
168 return nullptr;
169}
170
171// This starts at the end of block
172MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
173 BasicBlock *BB,
174 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
175 auto *Defs = MSSA->getWritableBlockDefs(BB);
176
177 if (Defs) {
178 CachedPreviousDef.insert(KV: {BB, &*Defs->rbegin()});
179 return &*Defs->rbegin();
180 }
181
182 return getPreviousDefRecursive(BB, CachedPreviousDef);
183}
184// Recurse over a set of phi uses to eliminate the trivial ones
185MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
186 if (!Phi)
187 return nullptr;
188 TrackingVH<MemoryAccess> Res(Phi);
189 SmallVector<TrackingVH<Value>, 8> Uses;
190 std::copy(first: Phi->user_begin(), last: Phi->user_end(), result: std::back_inserter(x&: Uses));
191 for (auto &U : Uses)
192 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(Val: &*U))
193 tryRemoveTrivialPhi(Phi: UsePhi);
194 return Res;
195}
196
197// Eliminate trivial phis
198// Phis are trivial if they are defined either by themselves, or all the same
199// argument.
200// IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
201// We recursively try to remove them.
202MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
203 assert(Phi && "Can only remove concrete Phi.");
204 auto OperRange = Phi->operands();
205 return tryRemoveTrivialPhi(Phi, Operands&: OperRange);
206}
207template <class RangeType>
208MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
209 RangeType &Operands) {
210 // Bail out on non-opt Phis.
211 if (NonOptPhis.count(V: Phi))
212 return Phi;
213
214 // Detect equal or self arguments
215 MemoryAccess *Same = nullptr;
216 for (auto &Op : Operands) {
217 // If the same or self, good so far
218 if (Op == Phi || Op == Same)
219 continue;
220 // not the same, return the phi since it's not eliminatable by us
221 if (Same)
222 return Phi;
223 Same = cast<MemoryAccess>(&*Op);
224 }
225 // Never found a non-self reference, the phi is undef
226 if (Same == nullptr)
227 return MSSA->getLiveOnEntryDef();
228 if (Phi) {
229 Phi->replaceAllUsesWith(V: Same);
230 removeMemoryAccess(Phi);
231 }
232
233 // We should only end up recursing in case we replaced something, in which
234 // case, we may have made other Phis trivial.
235 return recursePhi(Phi: Same);
236}
237
238void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
239 VisitedBlocks.clear();
240 InsertedPHIs.clear();
241 MU->setDefiningAccess(DMA: getPreviousDef(MA: MU));
242
243 // In cases without unreachable blocks, because uses do not create new
244 // may-defs, there are only two cases:
245 // 1. There was a def already below us, and therefore, we should not have
246 // created a phi node because it was already needed for the def.
247 //
248 // 2. There is no def below us, and therefore, there is no extra renaming work
249 // to do.
250
251 // In cases with unreachable blocks, where the unnecessary Phis were
252 // optimized out, adding the Use may re-insert those Phis. Hence, when
253 // inserting Uses outside of the MSSA creation process, and new Phis were
254 // added, rename all uses if we are asked.
255
256 if (!RenameUses && !InsertedPHIs.empty()) {
257 auto *Defs = MSSA->getBlockDefs(BB: MU->getBlock());
258 (void)Defs;
259 assert((!Defs || (++Defs->begin() == Defs->end())) &&
260 "Block may have only a Phi or no defs");
261 }
262
263 if (RenameUses && InsertedPHIs.size()) {
264 SmallPtrSet<BasicBlock *, 16> Visited;
265 BasicBlock *StartBlock = MU->getBlock();
266
267 if (auto *Defs = MSSA->getWritableBlockDefs(BB: StartBlock)) {
268 MemoryAccess *FirstDef = &*Defs->begin();
269 // Convert to incoming value if it's a memorydef. A phi *is* already an
270 // incoming value.
271 if (auto *MD = dyn_cast<MemoryDef>(Val: FirstDef))
272 FirstDef = MD->getDefiningAccess();
273
274 MSSA->renamePass(BB: MU->getBlock(), IncomingVal: FirstDef, Visited);
275 }
276 // We just inserted a phi into this block, so the incoming value will
277 // become the phi anyway, so it does not matter what we pass.
278 for (auto &MP : InsertedPHIs)
279 if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(Val&: MP))
280 MSSA->renamePass(BB: Phi->getBlock(), IncomingVal: nullptr, Visited);
281 }
282}
283
284// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
285static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
286 MemoryAccess *NewDef) {
287 // Replace any operand with us an incoming block with the new defining
288 // access.
289 int i = MP->getBasicBlockIndex(BB);
290 assert(i != -1 && "Should have found the basic block in the phi");
291 // We can't just compare i against getNumOperands since one is signed and the
292 // other not. So use it to index into the block iterator.
293 for (const BasicBlock *BlockBB : llvm::drop_begin(RangeOrContainer: MP->blocks(), N: i)) {
294 if (BlockBB != BB)
295 break;
296 MP->setIncomingValue(I: i, V: NewDef);
297 ++i;
298 }
299}
300
301// A brief description of the algorithm:
302// First, we compute what should define the new def, using the SSA
303// construction algorithm.
304// Then, we update the defs below us (and any new phi nodes) in the graph to
305// point to the correct new defs, to ensure we only have one variable, and no
306// disconnected stores.
307void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
308 // Don't bother updating dead code.
309 if (!MSSA->DT->isReachableFromEntry(A: MD->getBlock())) {
310 MD->setDefiningAccess(DMA: MSSA->getLiveOnEntryDef());
311 return;
312 }
313
314 VisitedBlocks.clear();
315 InsertedPHIs.clear();
316
317 // See if we had a local def, and if not, go hunting.
318 MemoryAccess *DefBefore = getPreviousDef(MA: MD);
319 bool DefBeforeSameBlock = false;
320 if (DefBefore->getBlock() == MD->getBlock() &&
321 !(isa<MemoryPhi>(Val: DefBefore) &&
322 llvm::is_contained(Range&: InsertedPHIs, Element: DefBefore)))
323 DefBeforeSameBlock = true;
324
325 // There is a def before us, which means we can replace any store/phi uses
326 // of that thing with us, since we are in the way of whatever was there
327 // before.
328 // We now define that def's memorydefs and memoryphis
329 if (DefBeforeSameBlock) {
330 DefBefore->replaceUsesWithIf(New: MD, ShouldReplace: [MD](Use &U) {
331 // Leave the MemoryUses alone.
332 // Also make sure we skip ourselves to avoid self references.
333 User *Usr = U.getUser();
334 return !isa<MemoryUse>(Val: Usr) && Usr != MD;
335 // Defs are automatically unoptimized when the user is set to MD below,
336 // because the isOptimized() call will fail to find the same ID.
337 });
338 }
339
340 // and that def is now our defining access.
341 MD->setDefiningAccess(DMA: DefBefore);
342
343 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
344
345 SmallSet<WeakVH, 8> ExistingPhis;
346
347 // Remember the index where we may insert new phis.
348 unsigned NewPhiIndex = InsertedPHIs.size();
349 if (!DefBeforeSameBlock) {
350 // If there was a local def before us, we must have the same effect it
351 // did. Because every may-def is the same, any phis/etc we would create, it
352 // would also have created. If there was no local def before us, we
353 // performed a global update, and have to search all successors and make
354 // sure we update the first def in each of them (following all paths until
355 // we hit the first def along each path). This may also insert phi nodes.
356 // TODO: There are other cases we can skip this work, such as when we have a
357 // single successor, and only used a straight line of single pred blocks
358 // backwards to find the def. To make that work, we'd have to track whether
359 // getDefRecursive only ever used the single predecessor case. These types
360 // of paths also only exist in between CFG simplifications.
361
362 // If this is the first def in the block and this insert is in an arbitrary
363 // place, compute IDF and place phis.
364 SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
365
366 // If this is the last Def in the block, we may need additional Phis.
367 // Compute IDF in all cases, as renaming needs to be done even when MD is
368 // not the last access, because it can introduce a new access past which a
369 // previous access was optimized; that access needs to be reoptimized.
370 DefiningBlocks.insert(Ptr: MD->getBlock());
371 for (const auto &VH : InsertedPHIs)
372 if (const auto *RealPHI = cast_or_null<MemoryPhi>(Val: VH))
373 DefiningBlocks.insert(Ptr: RealPHI->getBlock());
374 ForwardIDFCalculator IDFs(*MSSA->DT);
375 SmallVector<BasicBlock *, 32> IDFBlocks;
376 IDFs.setDefiningBlocks(DefiningBlocks);
377 IDFs.calculate(IDFBlocks);
378 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
379 for (auto *BBIDF : IDFBlocks) {
380 auto *MPhi = MSSA->getMemoryAccess(BB: BBIDF);
381 if (!MPhi) {
382 MPhi = MSSA->createMemoryPhi(BB: BBIDF);
383 NewInsertedPHIs.push_back(Elt: MPhi);
384 } else {
385 ExistingPhis.insert(V: MPhi);
386 }
387 // Add the phis created into the IDF blocks to NonOptPhis, so they are not
388 // optimized out as trivial by the call to getPreviousDefFromEnd below.
389 // Once they are complete, all these Phis are added to the FixupList, and
390 // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
391 // need fixing as well, and potentially be trivial before this insertion,
392 // hence add all IDF Phis. See PR43044.
393 NonOptPhis.insert(V: MPhi);
394 }
395 for (auto &MPhi : NewInsertedPHIs) {
396 auto *BBIDF = MPhi->getBlock();
397 for (auto *Pred : predecessors(BB: BBIDF)) {
398 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
399 MPhi->addIncoming(V: getPreviousDefFromEnd(BB: Pred, CachedPreviousDef), BB: Pred);
400 }
401 }
402
403 // Re-take the index where we're adding the new phis, because the above call
404 // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
405 NewPhiIndex = InsertedPHIs.size();
406 for (auto &MPhi : NewInsertedPHIs) {
407 InsertedPHIs.push_back(Elt: &*MPhi);
408 FixupList.push_back(Elt: &*MPhi);
409 }
410
411 FixupList.push_back(Elt: MD);
412 }
413
414 // Remember the index where we stopped inserting new phis above, since the
415 // fixupDefs call in the loop below may insert more, that are already minimal.
416 unsigned NewPhiIndexEnd = InsertedPHIs.size();
417
418 while (!FixupList.empty()) {
419 unsigned StartingPHISize = InsertedPHIs.size();
420 fixupDefs(FixupList);
421 FixupList.clear();
422 // Put any new phis on the fixup list, and process them
423 FixupList.append(in_start: InsertedPHIs.begin() + StartingPHISize, in_end: InsertedPHIs.end());
424 }
425
426 // Optimize potentially non-minimal phis added in this method.
427 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
428 if (NewPhiSize)
429 tryRemoveTrivialPhis(UpdatedPHIs: ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
430
431 // Now that all fixups are done, rename all uses if we are asked. The defs are
432 // guaranteed to be in reachable code due to the check at the method entry.
433 BasicBlock *StartBlock = MD->getBlock();
434 if (RenameUses) {
435 SmallPtrSet<BasicBlock *, 16> Visited;
436 // We are guaranteed there is a def in the block, because we just got it
437 // handed to us in this function.
438 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(BB: StartBlock)->begin();
439 // Convert to incoming value if it's a memorydef. A phi *is* already an
440 // incoming value.
441 if (auto *MD = dyn_cast<MemoryDef>(Val: FirstDef))
442 FirstDef = MD->getDefiningAccess();
443
444 MSSA->renamePass(BB: MD->getBlock(), IncomingVal: FirstDef, Visited);
445 // We just inserted a phi into this block, so the incoming value will become
446 // the phi anyway, so it does not matter what we pass.
447 for (auto &MP : InsertedPHIs) {
448 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(Val&: MP);
449 if (Phi)
450 MSSA->renamePass(BB: Phi->getBlock(), IncomingVal: nullptr, Visited);
451 }
452 // Existing Phi blocks may need renaming too, if an access was previously
453 // optimized and the inserted Defs "covers" the Optimized value.
454 for (const auto &MP : ExistingPhis) {
455 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(Val: MP);
456 if (Phi)
457 MSSA->renamePass(BB: Phi->getBlock(), IncomingVal: nullptr, Visited);
458 }
459 }
460}
461
462void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
463 SmallPtrSet<const BasicBlock *, 8> Seen;
464 SmallVector<const BasicBlock *, 16> Worklist;
465 for (const auto &Var : Vars) {
466 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Val: Var);
467 if (!NewDef)
468 continue;
469 // First, see if there is a local def after the operand.
470 auto *Defs = MSSA->getWritableBlockDefs(BB: NewDef->getBlock());
471 auto DefIter = NewDef->getDefsIterator();
472
473 // The temporary Phi is being fixed, unmark it for not to optimize.
474 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(Val: NewDef))
475 NonOptPhis.erase(V: Phi);
476
477 // If there is a local def after us, we only have to rename that.
478 if (++DefIter != Defs->end()) {
479 cast<MemoryDef>(Val&: DefIter)->setDefiningAccess(DMA: NewDef);
480 continue;
481 }
482
483 // Otherwise, we need to search down through the CFG.
484 // For each of our successors, handle it directly if their is a phi, or
485 // place on the fixup worklist.
486 for (const auto *S : successors(BB: NewDef->getBlock())) {
487 if (auto *MP = MSSA->getMemoryAccess(BB: S))
488 setMemoryPhiValueForBlock(MP, BB: NewDef->getBlock(), NewDef);
489 else
490 Worklist.push_back(Elt: S);
491 }
492
493 while (!Worklist.empty()) {
494 const BasicBlock *FixupBlock = Worklist.pop_back_val();
495
496 // Get the first def in the block that isn't a phi node.
497 if (auto *Defs = MSSA->getWritableBlockDefs(BB: FixupBlock)) {
498 auto *FirstDef = &*Defs->begin();
499 // The loop above and below should have taken care of phi nodes
500 assert(!isa<MemoryPhi>(FirstDef) &&
501 "Should have already handled phi nodes!");
502 // We are now this def's defining access, make sure we actually dominate
503 // it
504 assert(MSSA->dominates(NewDef, FirstDef) &&
505 "Should have dominated the new access");
506
507 // This may insert new phi nodes, because we are not guaranteed the
508 // block we are processing has a single pred, and depending where the
509 // store was inserted, it may require phi nodes below it.
510 cast<MemoryDef>(Val: FirstDef)->setDefiningAccess(DMA: getPreviousDef(MA: FirstDef));
511 return;
512 }
513 // We didn't find a def, so we must continue.
514 for (const auto *S : successors(BB: FixupBlock)) {
515 // If there is a phi node, handle it.
516 // Otherwise, put the block on the worklist
517 if (auto *MP = MSSA->getMemoryAccess(BB: S))
518 setMemoryPhiValueForBlock(MP, BB: FixupBlock, NewDef);
519 else {
520 // If we cycle, we should have ended up at a phi node that we already
521 // processed. FIXME: Double check this
522 if (!Seen.insert(Ptr: S).second)
523 continue;
524 Worklist.push_back(Elt: S);
525 }
526 }
527 }
528 }
529}
530
531void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
532 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB: To)) {
533 MPhi->unorderedDeleteIncomingBlock(BB: From);
534 tryRemoveTrivialPhi(Phi: MPhi);
535 }
536}
537
538void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
539 const BasicBlock *To) {
540 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB: To)) {
541 bool Found = false;
542 MPhi->unorderedDeleteIncomingIf(Pred: [&](const MemoryAccess *, BasicBlock *B) {
543 if (From != B)
544 return false;
545 if (Found)
546 return true;
547 Found = true;
548 return false;
549 });
550 tryRemoveTrivialPhi(Phi: MPhi);
551 }
552}
553
554/// If all arguments of a MemoryPHI are defined by the same incoming
555/// argument, return that argument.
556static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
557 MemoryAccess *MA = nullptr;
558
559 for (auto &Arg : MP->operands()) {
560 if (!MA)
561 MA = cast<MemoryAccess>(Val&: Arg);
562 else if (MA != Arg)
563 return nullptr;
564 }
565 return MA;
566}
567
568static MemoryAccess *getNewDefiningAccessForClone(
569 MemoryAccess *MA, const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap,
570 MemorySSA *MSSA, function_ref<bool(BasicBlock *BB)> IsInClonedRegion) {
571 MemoryAccess *InsnDefining = MA;
572 if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(Val: InsnDefining)) {
573 if (MSSA->isLiveOnEntryDef(MA: DefMUD))
574 return DefMUD;
575
576 // If the MemoryDef is not part of the cloned region, leave it alone.
577 Instruction *DefMUDI = DefMUD->getMemoryInst();
578 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
579 if (!IsInClonedRegion(DefMUDI->getParent()))
580 return DefMUD;
581
582 auto *NewDefMUDI = cast_or_null<Instruction>(Val: VMap.lookup(Val: DefMUDI));
583 InsnDefining = NewDefMUDI ? MSSA->getMemoryAccess(I: NewDefMUDI) : nullptr;
584 if (!InsnDefining || isa<MemoryUse>(Val: InsnDefining)) {
585 // The clone was simplified, it's no longer a MemoryDef, look up.
586 InsnDefining = getNewDefiningAccessForClone(
587 MA: DefMUD->getDefiningAccess(), VMap, MPhiMap, MSSA, IsInClonedRegion);
588 }
589 } else {
590 MemoryPhi *DefPhi = cast<MemoryPhi>(Val: InsnDefining);
591 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(Val: DefPhi))
592 InsnDefining = NewDefPhi;
593 }
594 assert(InsnDefining && "Defining instruction cannot be nullptr.");
595 return InsnDefining;
596}
597
598void MemorySSAUpdater::cloneUsesAndDefs(
599 BasicBlock *BB, BasicBlock *NewBB, const ValueToValueMapTy &VMap,
600 PhiToDefMap &MPhiMap, function_ref<bool(BasicBlock *)> IsInClonedRegion,
601 bool CloneWasSimplified) {
602 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
603 if (!Acc)
604 return;
605 for (const MemoryAccess &MA : *Acc) {
606 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(Val: &MA)) {
607 Instruction *Insn = MUD->getMemoryInst();
608 // Entry does not exist if the clone of the block did not clone all
609 // instructions. This occurs in LoopRotate when cloning instructions
610 // from the old header to the old preheader. The cloned instruction may
611 // also be a simplified Value, not an Instruction (see LoopRotate).
612 // Also in LoopRotate, even when it's an instruction, due to it being
613 // simplified, it may be a Use rather than a Def, so we cannot use MUD as
614 // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
615 if (Instruction *NewInsn =
616 dyn_cast_or_null<Instruction>(Val: VMap.lookup(Val: Insn))) {
617 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
618 NewInsn,
619 getNewDefiningAccessForClone(MA: MUD->getDefiningAccess(), VMap,
620 MPhiMap, MSSA, IsInClonedRegion),
621 /*Template=*/CloneWasSimplified ? nullptr : MUD,
622 /*CreationMustSucceed=*/false);
623 if (NewUseOrDef)
624 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
625 }
626 }
627 }
628}
629
630void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
631 BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
632 auto *MPhi = MSSA->getMemoryAccess(BB: Header);
633 if (!MPhi)
634 return;
635
636 // Create phi node in the backedge block and populate it with the same
637 // incoming values as MPhi. Skip incoming values coming from Preheader.
638 auto *NewMPhi = MSSA->createMemoryPhi(BB: BEBlock);
639 bool HasUniqueIncomingValue = true;
640 MemoryAccess *UniqueValue = nullptr;
641 for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
642 BasicBlock *IBB = MPhi->getIncomingBlock(I);
643 MemoryAccess *IV = MPhi->getIncomingValue(I);
644 if (IBB != Preheader) {
645 NewMPhi->addIncoming(V: IV, BB: IBB);
646 if (HasUniqueIncomingValue) {
647 if (!UniqueValue)
648 UniqueValue = IV;
649 else if (UniqueValue != IV)
650 HasUniqueIncomingValue = false;
651 }
652 }
653 }
654
655 // Update incoming edges into MPhi. Remove all but the incoming edge from
656 // Preheader. Add an edge from NewMPhi
657 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(BB: Preheader);
658 MPhi->setIncomingValue(I: 0, V: AccFromPreheader);
659 MPhi->setIncomingBlock(I: 0, BB: Preheader);
660 for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
661 MPhi->unorderedDeleteIncoming(I);
662 MPhi->addIncoming(V: NewMPhi, BB: BEBlock);
663
664 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
665 // replaced with the unique value.
666 tryRemoveTrivialPhi(Phi: NewMPhi);
667}
668
669void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
670 ArrayRef<BasicBlock *> ExitBlocks,
671 const ValueToValueMapTy &VMap,
672 bool IgnoreIncomingWithNoClones) {
673 SmallSetVector<BasicBlock *, 16> Blocks(
674 llvm::from_range, concat<BasicBlock *const>(Ranges: LoopBlocks, Ranges&: ExitBlocks));
675
676 auto IsInClonedRegion = [&](BasicBlock *BB) { return Blocks.contains(key: BB); };
677
678 PhiToDefMap MPhiMap;
679 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
680 assert(Phi && NewPhi && "Invalid Phi nodes.");
681 BasicBlock *NewPhiBB = NewPhi->getBlock();
682 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(llvm::from_range,
683 predecessors(BB: NewPhiBB));
684 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
685 MemoryAccess *IncomingAccess = Phi->getIncomingValue(I: It);
686 BasicBlock *IncBB = Phi->getIncomingBlock(I: It);
687
688 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(Val: VMap.lookup(Val: IncBB)))
689 IncBB = NewIncBB;
690 else if (IgnoreIncomingWithNoClones)
691 continue;
692
693 // Now we have IncBB, and will need to add incoming from it to NewPhi.
694
695 // If IncBB is not a predecessor of NewPhiBB, then do not add it.
696 // NewPhiBB was cloned without that edge.
697 if (!NewPhiBBPreds.count(Ptr: IncBB))
698 continue;
699
700 // Determine incoming value and add it as incoming from IncBB.
701 NewPhi->addIncoming(V: getNewDefiningAccessForClone(MA: IncomingAccess, VMap,
702 MPhiMap, MSSA,
703 IsInClonedRegion),
704 BB: IncBB);
705 }
706 if (auto *SingleAccess = onlySingleValue(MP: NewPhi)) {
707 MPhiMap[Phi] = SingleAccess;
708 removeMemoryAccess(NewPhi);
709 }
710 };
711
712 auto ProcessBlock = [&](BasicBlock *BB) {
713 BasicBlock *NewBlock = cast_or_null<BasicBlock>(Val: VMap.lookup(Val: BB));
714 if (!NewBlock)
715 return;
716
717 assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
718 "Cloned block should have no accesses");
719
720 // Add MemoryPhi.
721 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
722 MemoryPhi *NewPhi = MSSA->createMemoryPhi(BB: NewBlock);
723 MPhiMap[MPhi] = NewPhi;
724 }
725 // Update Uses and Defs.
726 cloneUsesAndDefs(BB, NewBB: NewBlock, VMap, MPhiMap, IsInClonedRegion);
727 };
728
729 for (auto *BB : Blocks)
730 ProcessBlock(BB);
731
732 for (auto *BB : Blocks)
733 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
734 if (MemoryAccess *NewPhi = MPhiMap.lookup(Val: MPhi))
735 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(Val: NewPhi));
736}
737
738void MemorySSAUpdater::updateForClonedBlockIntoPred(
739 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
740 // All defs/phis from outside BB that are used in BB, are valid uses in P1.
741 // Since those defs/phis must have dominated BB, and also dominate P1.
742 // Defs from BB being used in BB will be replaced with the cloned defs from
743 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
744 // incoming def into the Phi from P1.
745 // Instructions cloned into the predecessor are in practice sometimes
746 // simplified, so disable the use of the template, and create an access from
747 // scratch.
748 PhiToDefMap MPhiMap;
749 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
750 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(BB: P1);
751 cloneUsesAndDefs(
752 BB, NewBB: P1, VMap: VM, MPhiMap, IsInClonedRegion: [&](BasicBlock *CheckBB) { return BB == CheckBB; },
753 /*CloneWasSimplified=*/true);
754}
755
756template <typename Iter>
757void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
758 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
759 DominatorTree &DT) {
760 SmallVector<CFGUpdate, 4> Updates;
761 // Update/insert phis in all successors of exit blocks.
762 for (auto *Exit : ExitBlocks)
763 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
764 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(Val: VMap->lookup(Val: Exit))) {
765 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(Idx: 0);
766 Updates.push_back(Elt: {DT.Insert, NewExit, ExitSucc});
767 }
768 applyInsertUpdates(Updates, DT);
769}
770
771void MemorySSAUpdater::updateExitBlocksForClonedLoop(
772 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
773 DominatorTree &DT) {
774 const ValueToValueMapTy *const Arr[] = {&VMap};
775 privateUpdateExitBlocksForClonedLoop(ExitBlocks, ValuesBegin: std::begin(arr: Arr),
776 ValuesEnd: std::end(arr: Arr), DT);
777}
778
779void MemorySSAUpdater::updateExitBlocksForClonedLoop(
780 ArrayRef<BasicBlock *> ExitBlocks,
781 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
782 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
783 return I.get();
784 };
785 using MappedIteratorType =
786 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
787 decltype(GetPtr)>;
788 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
789 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
790 privateUpdateExitBlocksForClonedLoop(ExitBlocks, ValuesBegin: MapBegin, ValuesEnd: MapEnd, DT);
791}
792
793void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
794 DominatorTree &DT, bool UpdateDT) {
795 SmallVector<CFGUpdate, 4> DeleteUpdates;
796 SmallVector<CFGUpdate, 4> RevDeleteUpdates;
797 SmallVector<CFGUpdate, 4> InsertUpdates;
798 for (const auto &Update : Updates) {
799 if (Update.getKind() == DT.Insert)
800 InsertUpdates.push_back(Elt: {DT.Insert, Update.getFrom(), Update.getTo()});
801 else {
802 DeleteUpdates.push_back(Elt: {DT.Delete, Update.getFrom(), Update.getTo()});
803 RevDeleteUpdates.push_back(Elt: {DT.Insert, Update.getFrom(), Update.getTo()});
804 }
805 }
806
807 if (!DeleteUpdates.empty()) {
808 if (!InsertUpdates.empty()) {
809 if (!UpdateDT) {
810 SmallVector<CFGUpdate, 0> Empty;
811 // Deletes are reversed applied, because this CFGView is pretending the
812 // deletes did not happen yet, hence the edges still exist.
813 DT.applyUpdates(Updates: Empty, PostViewUpdates: RevDeleteUpdates);
814 } else {
815 // Apply all updates, with the RevDeleteUpdates as PostCFGView.
816 DT.applyUpdates(Updates, PostViewUpdates: RevDeleteUpdates);
817 }
818
819 // Note: the MSSA update below doesn't distinguish between a GD with
820 // (RevDelete,false) and (Delete, true), but this matters for the DT
821 // updates above; for "children" purposes they are equivalent; but the
822 // updates themselves convey the desired update, used inside DT only.
823 GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
824 applyInsertUpdates(InsertUpdates, DT, GD: &GD);
825 // Update DT to redelete edges; this matches the real CFG so we can
826 // perform the standard update without a postview of the CFG.
827 DT.applyUpdates(Updates: DeleteUpdates);
828 } else {
829 if (UpdateDT)
830 DT.applyUpdates(Updates: DeleteUpdates);
831 }
832 } else {
833 if (UpdateDT)
834 DT.applyUpdates(Updates);
835 GraphDiff<BasicBlock *> GD;
836 applyInsertUpdates(InsertUpdates, DT, GD: &GD);
837 }
838
839 // Update for deleted edges
840 for (auto &Update : DeleteUpdates)
841 removeEdge(From: Update.getFrom(), To: Update.getTo());
842}
843
844void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
845 DominatorTree &DT) {
846 GraphDiff<BasicBlock *> GD;
847 applyInsertUpdates(Updates, DT, GD: &GD);
848}
849
850void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
851 DominatorTree &DT,
852 const GraphDiff<BasicBlock *> *GD) {
853 // Get recursive last Def, assuming well formed MSSA and updated DT.
854 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
855 while (true) {
856 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
857 // Return last Def or Phi in BB, if it exists.
858 if (Defs)
859 return &*(--Defs->end());
860
861 // Check number of predecessors, we only care if there's more than one.
862 unsigned Count = 0;
863 BasicBlock *Pred = nullptr;
864 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(N: BB)) {
865 Pred = Pi;
866 Count++;
867 if (Count == 2)
868 break;
869 }
870
871 // If BB has multiple predecessors, get last definition from IDom.
872 if (Count != 1) {
873 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
874 // DT is invalidated. Return LoE as its last def. This will be added to
875 // MemoryPhi node, and later deleted when the block is deleted.
876 if (!DT.getNode(BB))
877 return MSSA->getLiveOnEntryDef();
878 if (auto *IDom = DT.getNode(BB)->getIDom())
879 if (IDom->getBlock() != BB) {
880 BB = IDom->getBlock();
881 continue;
882 }
883 return MSSA->getLiveOnEntryDef();
884 } else {
885 // Single predecessor, BB cannot be dead. GetLastDef of Pred.
886 assert(Count == 1 && Pred && "Single predecessor expected.");
887 // BB can be unreachable though, return LoE if that is the case.
888 if (!DT.getNode(BB))
889 return MSSA->getLiveOnEntryDef();
890 BB = Pred;
891 }
892 };
893 llvm_unreachable("Unable to get last definition.");
894 };
895
896 // Get nearest IDom given a set of blocks.
897 // TODO: this can be optimized by starting the search at the node with the
898 // lowest level (highest in the tree).
899 auto FindNearestCommonDominator =
900 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
901 BasicBlock *PrevIDom = *BBSet.begin();
902 for (auto *BB : BBSet)
903 PrevIDom = DT.findNearestCommonDominator(A: PrevIDom, B: BB);
904 return PrevIDom;
905 };
906
907 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
908 // include CurrIDom.
909 auto GetNoLongerDomBlocks =
910 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
911 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
912 if (PrevIDom == CurrIDom)
913 return;
914 BlocksPrevDom.push_back(Elt: PrevIDom);
915 BasicBlock *NextIDom = PrevIDom;
916 while (BasicBlock *UpIDom =
917 DT.getNode(BB: NextIDom)->getIDom()->getBlock()) {
918 if (UpIDom == CurrIDom)
919 break;
920 BlocksPrevDom.push_back(Elt: UpIDom);
921 NextIDom = UpIDom;
922 }
923 };
924
925 // Map a BB to its predecessors: added + previously existing. To get a
926 // deterministic order, store predecessors as SetVectors. The order in each
927 // will be defined by the order in Updates (fixed) and the order given by
928 // children<> (also fixed). Since we further iterate over these ordered sets,
929 // we lose the information of multiple edges possibly existing between two
930 // blocks, so we'll keep and EdgeCount map for that.
931 // An alternate implementation could keep unordered set for the predecessors,
932 // traverse either Updates or children<> each time to get the deterministic
933 // order, and drop the usage of EdgeCount. This alternate approach would still
934 // require querying the maps for each predecessor, and children<> call has
935 // additional computation inside for creating the snapshot-graph predecessors.
936 // As such, we favor using a little additional storage and less compute time.
937 // This decision can be revisited if we find the alternative more favorable.
938
939 struct PredInfo {
940 SmallSetVector<BasicBlock *, 2> Added;
941 SmallSetVector<BasicBlock *, 2> Prev;
942 };
943 SmallDenseMap<BasicBlock *, PredInfo> PredMap;
944
945 for (const auto &Edge : Updates) {
946 BasicBlock *BB = Edge.getTo();
947 auto &AddedBlockSet = PredMap[BB].Added;
948 AddedBlockSet.insert(X: Edge.getFrom());
949 }
950
951 // Store all existing predecessor for each BB, at least one must exist.
952 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
953 SmallPtrSet<BasicBlock *, 2> NewBlocks;
954 for (auto &BBPredPair : PredMap) {
955 auto *BB = BBPredPair.first;
956 const auto &AddedBlockSet = BBPredPair.second.Added;
957 auto &PrevBlockSet = BBPredPair.second.Prev;
958 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(N: BB)) {
959 if (!AddedBlockSet.count(key: Pi))
960 PrevBlockSet.insert(X: Pi);
961 EdgeCountMap[{Pi, BB}]++;
962 }
963
964 if (PrevBlockSet.empty()) {
965 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
966 LLVM_DEBUG(
967 dbgs()
968 << "Adding a predecessor to a block with no predecessors. "
969 "This must be an edge added to a new, likely cloned, block. "
970 "Its memory accesses must be already correct, assuming completed "
971 "via the updateExitBlocksForClonedLoop API. "
972 "Assert a single such edge is added so no phi addition or "
973 "additional processing is required.\n");
974 assert(AddedBlockSet.size() == 1 &&
975 "Can only handle adding one predecessor to a new block.");
976 // Need to remove new blocks from PredMap. Remove below to not invalidate
977 // iterator here.
978 NewBlocks.insert(Ptr: BB);
979 }
980 }
981 // Nothing to process for new/cloned blocks.
982 for (auto *BB : NewBlocks)
983 PredMap.erase(Val: BB);
984
985 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
986 SmallVector<WeakVH, 8> InsertedPhis;
987
988 // First create MemoryPhis in all blocks that don't have one. Create in the
989 // order found in Updates, not in PredMap, to get deterministic numbering.
990 for (const auto &Edge : Updates) {
991 BasicBlock *BB = Edge.getTo();
992 if (PredMap.count(Val: BB) && !MSSA->getMemoryAccess(BB))
993 InsertedPhis.push_back(Elt: MSSA->createMemoryPhi(BB));
994 }
995
996 // Now we'll fill in the MemoryPhis with the right incoming values.
997 for (auto &BBPredPair : PredMap) {
998 auto *BB = BBPredPair.first;
999 const auto &PrevBlockSet = BBPredPair.second.Prev;
1000 const auto &AddedBlockSet = BBPredPair.second.Added;
1001 assert(!PrevBlockSet.empty() &&
1002 "At least one previous predecessor must exist.");
1003
1004 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
1005 // keeping this map before the loop. We can reuse already populated entries
1006 // if an edge is added from the same predecessor to two different blocks,
1007 // and this does happen in rotate. Note that the map needs to be updated
1008 // when deleting non-necessary phis below, if the phi is in the map by
1009 // replacing the value with DefP1.
1010 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
1011 for (auto *AddedPred : AddedBlockSet) {
1012 auto *DefPn = GetLastDef(AddedPred);
1013 assert(DefPn != nullptr && "Unable to find last definition.");
1014 LastDefAddedPred[AddedPred] = DefPn;
1015 }
1016
1017 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
1018 // If Phi is not empty, add an incoming edge from each added pred. Must
1019 // still compute blocks with defs to replace for this block below.
1020 if (NewPhi->getNumOperands()) {
1021 for (auto *Pred : AddedBlockSet) {
1022 auto *LastDefForPred = LastDefAddedPred[Pred];
1023 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1024 NewPhi->addIncoming(V: LastDefForPred, BB: Pred);
1025 }
1026 } else {
1027 // Pick any existing predecessor and get its definition. All other
1028 // existing predecessors should have the same one, since no phi existed.
1029 auto *P1 = *PrevBlockSet.begin();
1030 MemoryAccess *DefP1 = GetLastDef(P1);
1031
1032 // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1033 // nothing to add.
1034 bool InsertPhi = false;
1035 for (auto LastDefPredPair : LastDefAddedPred)
1036 if (DefP1 != LastDefPredPair.second) {
1037 InsertPhi = true;
1038 break;
1039 }
1040 if (!InsertPhi) {
1041 // Since NewPhi may be used in other newly added Phis, replace all uses
1042 // of NewPhi with the definition coming from all predecessors (DefP1),
1043 // before deleting it.
1044 NewPhi->replaceAllUsesWith(V: DefP1);
1045 removeMemoryAccess(NewPhi);
1046 continue;
1047 }
1048
1049 // Update Phi with new values for new predecessors and old value for all
1050 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1051 // sets, the order of entries in NewPhi is deterministic.
1052 for (auto *Pred : AddedBlockSet) {
1053 auto *LastDefForPred = LastDefAddedPred[Pred];
1054 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1055 NewPhi->addIncoming(V: LastDefForPred, BB: Pred);
1056 }
1057 for (auto *Pred : PrevBlockSet)
1058 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1059 NewPhi->addIncoming(V: DefP1, BB: Pred);
1060 }
1061
1062 // Get all blocks that used to dominate BB and no longer do after adding
1063 // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1064 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1065 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1066 assert(PrevIDom && "Previous IDom should exists");
1067 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1068 assert(NewIDom && "BB should have a new valid idom");
1069 assert(DT.dominates(NewIDom, PrevIDom) &&
1070 "New idom should dominate old idom");
1071 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1072 }
1073
1074 tryRemoveTrivialPhis(UpdatedPHIs: InsertedPhis);
1075 // Create the set of blocks that now have a definition. We'll use this to
1076 // compute IDF and add Phis there next.
1077 SmallVector<BasicBlock *, 8> BlocksToProcess;
1078 for (auto &VH : InsertedPhis)
1079 if (auto *MPhi = cast_or_null<MemoryPhi>(Val&: VH))
1080 BlocksToProcess.push_back(Elt: MPhi->getBlock());
1081
1082 // Compute IDF and add Phis in all IDF blocks that do not have one.
1083 SmallVector<BasicBlock *, 32> IDFBlocks;
1084 if (!BlocksToProcess.empty()) {
1085 ForwardIDFCalculator IDFs(DT, GD);
1086 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(llvm::from_range,
1087 BlocksToProcess);
1088 IDFs.setDefiningBlocks(DefiningBlocks);
1089 IDFs.calculate(IDFBlocks);
1090
1091 SmallSetVector<MemoryPhi *, 4> PhisToFill;
1092 // First create all needed Phis.
1093 for (auto *BBIDF : IDFBlocks)
1094 if (!MSSA->getMemoryAccess(BB: BBIDF)) {
1095 auto *IDFPhi = MSSA->createMemoryPhi(BB: BBIDF);
1096 InsertedPhis.push_back(Elt: IDFPhi);
1097 PhisToFill.insert(X: IDFPhi);
1098 }
1099 // Then update or insert their correct incoming values.
1100 for (auto *BBIDF : IDFBlocks) {
1101 auto *IDFPhi = MSSA->getMemoryAccess(BB: BBIDF);
1102 assert(IDFPhi && "Phi must exist");
1103 if (!PhisToFill.count(key: IDFPhi)) {
1104 // Update existing Phi.
1105 // FIXME: some updates may be redundant, try to optimize and skip some.
1106 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1107 IDFPhi->setIncomingValue(I, V: GetLastDef(IDFPhi->getIncomingBlock(I)));
1108 } else {
1109 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(N: BBIDF))
1110 IDFPhi->addIncoming(V: GetLastDef(Pi), BB: Pi);
1111 }
1112 }
1113 }
1114
1115 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1116 // longer dominate, replace those with the closest dominating def.
1117 // This will also update optimized accesses, as they're also uses.
1118 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1119 if (auto DefsList = MSSA->getWritableBlockDefs(BB: BlockWithDefsToReplace)) {
1120 for (auto &DefToReplaceUses : *DefsList) {
1121 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1122 // We defer resetting optimized accesses until all uses are replaced, to
1123 // avoid invalidating the iterator.
1124 SmallVector<MemoryUseOrDef *, 4> ResetOptimized;
1125 for (Use &U : llvm::make_early_inc_range(Range: DefToReplaceUses.uses())) {
1126 MemoryAccess *Usr = cast<MemoryAccess>(Val: U.getUser());
1127 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Val: Usr)) {
1128 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1129 if (!DT.dominates(A: DominatingBlock, B: DominatedBlock))
1130 U.set(GetLastDef(DominatedBlock));
1131 } else {
1132 BasicBlock *DominatedBlock = Usr->getBlock();
1133 if (!DT.dominates(A: DominatingBlock, B: DominatedBlock)) {
1134 if (auto *DomBlPhi = MSSA->getMemoryAccess(BB: DominatedBlock))
1135 U.set(DomBlPhi);
1136 else {
1137 auto *IDom = DT.getNode(BB: DominatedBlock)->getIDom();
1138 assert(IDom && "Block must have a valid IDom.");
1139 U.set(GetLastDef(IDom->getBlock()));
1140 }
1141 ResetOptimized.push_back(Elt: cast<MemoryUseOrDef>(Val: Usr));
1142 }
1143 }
1144 }
1145
1146 for (auto *Usr : ResetOptimized)
1147 Usr->resetOptimized();
1148 }
1149 }
1150 }
1151 tryRemoveTrivialPhis(UpdatedPHIs: InsertedPhis);
1152}
1153
1154// Move What before Where in the MemorySSA IR.
1155template <class WhereType>
1156void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1157 WhereType Where) {
1158 // Mark MemoryPhi users of What not to be optimized.
1159 for (auto *U : What->users())
1160 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(Val: U))
1161 NonOptPhis.insert(V: PhiUser);
1162
1163 // Replace all our users with our defining access.
1164 What->replaceAllUsesWith(V: What->getDefiningAccess());
1165
1166 // Let MemorySSA take care of moving it around in the lists.
1167 MSSA->moveTo(What, BB, Where);
1168
1169 // Now reinsert it into the IR and do whatever fixups needed.
1170 if (auto *MD = dyn_cast<MemoryDef>(Val: What))
1171 insertDef(MD, /*RenameUses=*/true);
1172 else
1173 insertUse(MU: cast<MemoryUse>(Val: What), /*RenameUses=*/true);
1174
1175 // Clear dangling pointers. We added all MemoryPhi users, but not all
1176 // of them are removed by fixupDefs().
1177 NonOptPhis.clear();
1178}
1179
1180// Move What before Where in the MemorySSA IR.
1181void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1182 moveTo(What, BB: Where->getBlock(), Where: Where->getIterator());
1183}
1184
1185// Move What after Where in the MemorySSA IR.
1186void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1187 moveTo(What, BB: Where->getBlock(), Where: ++Where->getIterator());
1188}
1189
1190void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1191 MemorySSA::InsertionPlace Where) {
1192 if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
1193 return moveTo(What, BB, Where);
1194
1195 if (auto *Where = MSSA->getMemoryAccess(I: BB->getTerminator()))
1196 return moveBefore(What, Where);
1197 else
1198 return moveTo(What, BB, Where: MemorySSA::InsertionPlace::End);
1199}
1200
1201// All accesses in To used to be in From. Move to end and update access lists.
1202void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1203 Instruction *Start) {
1204
1205 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(BB: From);
1206 if (!Accs)
1207 return;
1208
1209 assert(Start->getParent() == To && "Incorrect Start instruction");
1210 MemoryAccess *FirstInNew = nullptr;
1211 for (Instruction &I : make_range(x: Start->getIterator(), y: To->end()))
1212 if ((FirstInNew = MSSA->getMemoryAccess(I: &I)))
1213 break;
1214 if (FirstInNew) {
1215 auto *MUD = cast<MemoryUseOrDef>(Val: FirstInNew);
1216 do {
1217 auto NextIt = ++MUD->getIterator();
1218 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1219 ? nullptr
1220 : cast<MemoryUseOrDef>(Val: &*NextIt);
1221 MSSA->moveTo(What: MUD, BB: To, Point: MemorySSA::End);
1222 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1223 // to retrieve it again.
1224 Accs = MSSA->getWritableBlockAccesses(BB: From);
1225 MUD = NextMUD;
1226 } while (MUD);
1227 }
1228
1229 // If all accesses were moved and only a trivial Phi remains, we try to remove
1230 // that Phi. This is needed when From is going to be deleted.
1231 auto *Defs = MSSA->getWritableBlockDefs(BB: From);
1232 if (Defs && !Defs->empty())
1233 if (auto *Phi = dyn_cast<MemoryPhi>(Val: &*Defs->begin()))
1234 tryRemoveTrivialPhi(Phi);
1235}
1236
1237void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1238 BasicBlock *To,
1239 Instruction *Start) {
1240 assert(MSSA->getBlockAccesses(To) == nullptr &&
1241 "To block is expected to be free of MemoryAccesses.");
1242 moveAllAccesses(From, To, Start);
1243 for (BasicBlock *Succ : successors(BB: To))
1244 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB: Succ))
1245 MPhi->setIncomingBlock(I: MPhi->getBasicBlockIndex(BB: From), BB: To);
1246}
1247
1248void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1249 Instruction *Start) {
1250 assert(From->getUniquePredecessor() == To &&
1251 "From block is expected to have a single predecessor (To).");
1252 moveAllAccesses(From, To, Start);
1253 for (BasicBlock *Succ : successors(BB: From))
1254 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB: Succ))
1255 MPhi->setIncomingBlock(I: MPhi->getBasicBlockIndex(BB: From), BB: To);
1256}
1257
1258void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
1259 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1260 bool IdenticalEdgesWereMerged) {
1261 assert(!MSSA->getWritableBlockAccesses(New) &&
1262 "Access list should be null for a new block.");
1263 MemoryPhi *Phi = MSSA->getMemoryAccess(BB: Old);
1264 if (!Phi)
1265 return;
1266 if (Old->hasNPredecessors(N: 1)) {
1267 assert(pred_size(New) == Preds.size() &&
1268 "Should have moved all predecessors.");
1269 MSSA->moveTo(What: Phi, BB: New, Point: MemorySSA::Beginning);
1270 } else {
1271 assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1272 "new immediate predecessor.");
1273 MemoryPhi *NewPhi = MSSA->createMemoryPhi(BB: New);
1274 SmallPtrSet<BasicBlock *, 16> PredsSet(llvm::from_range, Preds);
1275 // Currently only support the case of removing a single incoming edge when
1276 // identical edges were not merged.
1277 if (!IdenticalEdgesWereMerged)
1278 assert(PredsSet.size() == Preds.size() &&
1279 "If identical edges were not merged, we cannot have duplicate "
1280 "blocks in the predecessors");
1281 Phi->unorderedDeleteIncomingIf(Pred: [&](MemoryAccess *MA, BasicBlock *B) {
1282 if (PredsSet.count(Ptr: B)) {
1283 NewPhi->addIncoming(V: MA, BB: B);
1284 if (!IdenticalEdgesWereMerged)
1285 PredsSet.erase(Ptr: B);
1286 return true;
1287 }
1288 return false;
1289 });
1290 Phi->addIncoming(V: NewPhi, BB: New);
1291 tryRemoveTrivialPhi(Phi: NewPhi);
1292 }
1293}
1294
1295void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
1296 assert(!MSSA->isLiveOnEntryDef(MA) &&
1297 "Trying to remove the live on entry def");
1298 // We can only delete phi nodes if they have no uses, or we can replace all
1299 // uses with a single definition.
1300 MemoryAccess *NewDefTarget = nullptr;
1301 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Val: MA)) {
1302 // Note that it is sufficient to know that all edges of the phi node have
1303 // the same argument. If they do, by the definition of dominance frontiers
1304 // (which we used to place this phi), that argument must dominate this phi,
1305 // and thus, must dominate the phi's uses, and so we will not hit the assert
1306 // below.
1307 NewDefTarget = onlySingleValue(MP);
1308 assert((NewDefTarget || MP->use_empty()) &&
1309 "We can't delete this memory phi");
1310 } else {
1311 NewDefTarget = cast<MemoryUseOrDef>(Val: MA)->getDefiningAccess();
1312 }
1313
1314 SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1315
1316 // Re-point the uses at our defining access
1317 if (!isa<MemoryUse>(Val: MA) && !MA->use_empty()) {
1318 // Reset optimized on users of this store, and reset the uses.
1319 // A few notes:
1320 // 1. This is a slightly modified version of RAUW to avoid walking the
1321 // uses twice here.
1322 // 2. If we wanted to be complete, we would have to reset the optimized
1323 // flags on users of phi nodes if doing the below makes a phi node have all
1324 // the same arguments. Instead, we prefer users to removeMemoryAccess those
1325 // phi nodes, because doing it here would be N^3.
1326 if (MA->hasValueHandle())
1327 ValueHandleBase::ValueIsRAUWd(Old: MA, New: NewDefTarget);
1328 // Note: We assume MemorySSA is not used in metadata since it's not really
1329 // part of the IR.
1330
1331 assert(NewDefTarget != MA && "Going into an infinite loop");
1332 while (!MA->use_empty()) {
1333 Use &U = *MA->use_begin();
1334 if (auto *MUD = dyn_cast<MemoryUseOrDef>(Val: U.getUser()))
1335 MUD->resetOptimized();
1336 if (OptimizePhis)
1337 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Val: U.getUser()))
1338 PhisToCheck.insert(X: MP);
1339 U.set(NewDefTarget);
1340 }
1341 }
1342
1343 // The call below to erase will destroy MA, so we can't change the order we
1344 // are doing things here
1345 MSSA->removeFromLookups(MA);
1346 MSSA->removeFromLists(MA);
1347
1348 // Optionally optimize Phi uses. This will recursively remove trivial phis.
1349 if (!PhisToCheck.empty()) {
1350 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1351 PhisToCheck.end()};
1352 PhisToCheck.clear();
1353
1354 unsigned PhisSize = PhisToOptimize.size();
1355 while (PhisSize-- > 0)
1356 if (MemoryPhi *MP =
1357 cast_or_null<MemoryPhi>(Val: PhisToOptimize.pop_back_val()))
1358 tryRemoveTrivialPhi(Phi: MP);
1359 }
1360}
1361
1362void MemorySSAUpdater::removeBlocks(
1363 const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
1364 // First delete all uses of BB in MemoryPhis.
1365 for (BasicBlock *BB : DeadBlocks) {
1366 Instruction *TI = BB->getTerminator();
1367 assert(TI && "Basic block expected to have a terminator instruction");
1368 for (BasicBlock *Succ : successors(I: TI))
1369 if (!DeadBlocks.count(key: Succ))
1370 if (MemoryPhi *MP = MSSA->getMemoryAccess(BB: Succ)) {
1371 MP->unorderedDeleteIncomingBlock(BB);
1372 tryRemoveTrivialPhi(Phi: MP);
1373 }
1374 // Drop all references of all accesses in BB
1375 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1376 for (MemoryAccess &MA : *Acc)
1377 MA.dropAllReferences();
1378 }
1379
1380 // Next, delete all memory accesses in each block
1381 for (BasicBlock *BB : DeadBlocks) {
1382 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1383 if (!Acc)
1384 continue;
1385 for (MemoryAccess &MA : llvm::make_early_inc_range(Range&: *Acc)) {
1386 MSSA->removeFromLookups(&MA);
1387 MSSA->removeFromLists(&MA);
1388 }
1389 }
1390}
1391
1392void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1393 for (const auto &VH : UpdatedPHIs)
1394 if (auto *MPhi = cast_or_null<MemoryPhi>(Val: VH))
1395 tryRemoveTrivialPhi(Phi: MPhi);
1396}
1397
1398void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
1399 const BasicBlock *BB = I->getParent();
1400 // Remove memory accesses in BB for I and all following instructions.
1401 auto BBI = I->getIterator(), BBE = BB->end();
1402 // FIXME: If this becomes too expensive, iterate until the first instruction
1403 // with a memory access, then iterate over MemoryAccesses.
1404 while (BBI != BBE)
1405 removeMemoryAccess(I: &*(BBI++));
1406 // Update phis in BB's successors to remove BB.
1407 SmallVector<WeakVH, 16> UpdatedPHIs;
1408 for (const BasicBlock *Successor : successors(BB)) {
1409 removeDuplicatePhiEdgesBetween(From: BB, To: Successor);
1410 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB: Successor)) {
1411 MPhi->unorderedDeleteIncomingBlock(BB);
1412 UpdatedPHIs.push_back(Elt: MPhi);
1413 }
1414 }
1415 // Optimize trivial phis.
1416 tryRemoveTrivialPhis(UpdatedPHIs);
1417}
1418
1419MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1420 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1421 MemorySSA::InsertionPlace Point, bool CreationMustSucceed) {
1422 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(
1423 I, Definition, /*Template=*/nullptr, CreationMustSucceed);
1424 if (NewAccess)
1425 MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1426 return NewAccess;
1427}
1428
1429MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1430 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1431 assert(I->getParent() == InsertPt->getBlock() &&
1432 "New and old access must be in the same block");
1433 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1434 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1435 InsertPt->getIterator());
1436 return NewAccess;
1437}
1438
1439MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1440 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1441 assert(I->getParent() == InsertPt->getBlock() &&
1442 "New and old access must be in the same block");
1443 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1444 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1445 ++InsertPt->getIterator());
1446 return NewAccess;
1447}
1448