1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the MemorySSA class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/MemorySSA.h"
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/DenseMapInfo.h"
16#include "llvm/ADT/DenseSet.h"
17#include "llvm/ADT/DepthFirstIterator.h"
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringExtras.h"
23#include "llvm/ADT/iterator.h"
24#include "llvm/ADT/iterator_range.h"
25#include "llvm/Analysis/AliasAnalysis.h"
26#include "llvm/Analysis/CFGPrinter.h"
27#include "llvm/Analysis/IteratedDominanceFrontier.h"
28#include "llvm/Analysis/LoopInfo.h"
29#include "llvm/Analysis/MemoryLocation.h"
30#include "llvm/Config/llvm-config.h"
31#include "llvm/IR/AssemblyAnnotationWriter.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/LLVMContext.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/PassManager.h"
41#include "llvm/IR/Use.h"
42#include "llvm/InitializePasses.h"
43#include "llvm/Pass.h"
44#include "llvm/Support/AtomicOrdering.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Support/CommandLine.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/Debug.h"
49#include "llvm/Support/ErrorHandling.h"
50#include "llvm/Support/FormattedStream.h"
51#include "llvm/Support/GraphWriter.h"
52#include "llvm/Support/raw_ostream.h"
53#include <algorithm>
54#include <cassert>
55#include <iterator>
56#include <memory>
57#include <utility>
58
59using namespace llvm;
60
61#define DEBUG_TYPE "memoryssa"
62
63static cl::opt<std::string>
64 DotCFGMSSA("dot-cfg-mssa",
65 cl::value_desc("file name for generated dot file"),
66 cl::desc("file name for generated dot file"), cl::init(Val: ""));
67
68INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
69 true)
70INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
71INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
72INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
73 true)
74
75static cl::opt<unsigned> MaxCheckLimit(
76 "memssa-check-limit", cl::Hidden, cl::init(Val: 100),
77 cl::desc("The maximum number of stores/phis MemorySSA"
78 "will consider trying to walk past (default = 100)"));
79
80// Always verify MemorySSA if expensive checking is enabled.
81#ifdef EXPENSIVE_CHECKS
82bool llvm::VerifyMemorySSA = true;
83#else
84bool llvm::VerifyMemorySSA = false;
85#endif
86
87static cl::opt<bool, true>
88 VerifyMemorySSAX("verify-memoryssa", cl::location(L&: VerifyMemorySSA),
89 cl::Hidden, cl::desc("Enable verification of MemorySSA."));
90
91const static char LiveOnEntryStr[] = "liveOnEntry";
92
93namespace {
94
95/// An assembly annotator class to print Memory SSA information in
96/// comments.
97class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
98 const MemorySSA *MSSA;
99
100public:
101 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
102
103 void emitBasicBlockStartAnnot(const BasicBlock *BB,
104 formatted_raw_ostream &OS) override {
105 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
106 OS << "; " << *MA << "\n";
107 }
108
109 void emitInstructionAnnot(const Instruction *I,
110 formatted_raw_ostream &OS) override {
111 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
112 OS << "; " << *MA << "\n";
113 }
114};
115
116/// An assembly annotator class to print Memory SSA information in
117/// comments.
118class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
119 MemorySSA *MSSA;
120 MemorySSAWalker *Walker;
121 BatchAAResults BAA;
122
123public:
124 MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
125 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
126
127 void emitBasicBlockStartAnnot(const BasicBlock *BB,
128 formatted_raw_ostream &OS) override {
129 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
130 OS << "; " << *MA << "\n";
131 }
132
133 void emitInstructionAnnot(const Instruction *I,
134 formatted_raw_ostream &OS) override {
135 if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
136 MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA, AA&: BAA);
137 OS << "; " << *MA;
138 if (Clobber) {
139 OS << " - clobbered by ";
140 if (MSSA->isLiveOnEntryDef(MA: Clobber))
141 OS << LiveOnEntryStr;
142 else
143 OS << *Clobber;
144 }
145 OS << "\n";
146 }
147 }
148};
149
150} // namespace
151
152namespace {
153
154/// Our current alias analysis API differentiates heavily between calls and
155/// non-calls, and functions called on one usually assert on the other.
156/// This class encapsulates the distinction to simplify other code that wants
157/// "Memory affecting instructions and related data" to use as a key.
158/// For example, this class is used as a densemap key in the use optimizer.
159class MemoryLocOrCall {
160public:
161 bool IsCall = false;
162
163 MemoryLocOrCall(MemoryUseOrDef *MUD)
164 : MemoryLocOrCall(MUD->getMemoryInst()) {}
165 MemoryLocOrCall(const MemoryUseOrDef *MUD)
166 : MemoryLocOrCall(MUD->getMemoryInst()) {}
167
168 MemoryLocOrCall(Instruction *Inst) {
169 if (auto *C = dyn_cast<CallBase>(Val: Inst)) {
170 IsCall = true;
171 Call = C;
172 } else {
173 IsCall = false;
174 // There is no such thing as a memorylocation for a fence inst, and it is
175 // unique in that regard.
176 if (!isa<FenceInst>(Val: Inst))
177 Loc = MemoryLocation::get(Inst);
178 }
179 }
180
181 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
182
183 const CallBase *getCall() const {
184 assert(IsCall);
185 return Call;
186 }
187
188 MemoryLocation getLoc() const {
189 assert(!IsCall);
190 return Loc;
191 }
192
193 bool operator==(const MemoryLocOrCall &Other) const {
194 if (IsCall != Other.IsCall)
195 return false;
196
197 if (!IsCall)
198 return Loc == Other.Loc;
199
200 if (Call->getCalledOperand() != Other.Call->getCalledOperand())
201 return false;
202
203 return Call->arg_size() == Other.Call->arg_size() &&
204 std::equal(Call->arg_begin(), Call->arg_end(),
205 Other.Call->arg_begin());
206 }
207
208private:
209 union {
210 const CallBase *Call;
211 MemoryLocation Loc;
212 };
213};
214
215} // end anonymous namespace
216
217namespace llvm {
218
219template <> struct DenseMapInfo<MemoryLocOrCall> {
220 static inline MemoryLocOrCall getEmptyKey() {
221 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
222 }
223
224 static inline MemoryLocOrCall getTombstoneKey() {
225 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
226 }
227
228 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
229 if (!MLOC.IsCall)
230 return hash_combine(
231 args: MLOC.IsCall,
232 args: DenseMapInfo<MemoryLocation>::getHashValue(Val: MLOC.getLoc()));
233
234 hash_code hash =
235 hash_combine(args: MLOC.IsCall, args: DenseMapInfo<const Value *>::getHashValue(
236 PtrVal: MLOC.getCall()->getCalledOperand()));
237
238 for (const Value *Arg : MLOC.getCall()->args())
239 hash = hash_combine(args: hash, args: DenseMapInfo<const Value *>::getHashValue(PtrVal: Arg));
240 return hash;
241 }
242
243 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
244 return LHS == RHS;
245 }
246};
247
248} // end namespace llvm
249
250/// This does one-way checks to see if Use could theoretically be hoisted above
251/// MayClobber. This will not check the other way around.
252///
253/// This assumes that, for the purposes of MemorySSA, Use comes directly after
254/// MayClobber, with no potentially clobbering operations in between them.
255/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
256static bool areLoadsReorderable(const LoadInst *Use,
257 const LoadInst *MayClobber) {
258 bool VolatileUse = Use->isVolatile();
259 bool VolatileClobber = MayClobber->isVolatile();
260 // Volatile operations may never be reordered with other volatile operations.
261 if (VolatileUse && VolatileClobber)
262 return false;
263 // Otherwise, volatile doesn't matter here. From the language reference:
264 // 'optimizers may change the order of volatile operations relative to
265 // non-volatile operations.'"
266
267 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
268 // is weaker, it can be moved above other loads. We just need to be sure that
269 // MayClobber isn't an acquire load, because loads can't be moved above
270 // acquire loads.
271 //
272 // Note that this explicitly *does* allow the free reordering of monotonic (or
273 // weaker) loads of the same address.
274 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
275 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(AO: MayClobber->getOrdering(),
276 Other: AtomicOrdering::Acquire);
277 return !(SeqCstUse || MayClobberIsAcquire);
278}
279
280template <typename AliasAnalysisType>
281static bool
282instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
283 const Instruction *UseInst, AliasAnalysisType &AA) {
284 Instruction *DefInst = MD->getMemoryInst();
285 assert(DefInst && "Defining instruction not actually an instruction");
286
287 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: DefInst)) {
288 // These intrinsics will show up as affecting memory, but they are just
289 // markers, mostly.
290 //
291 // FIXME: We probably don't actually want MemorySSA to model these at all
292 // (including creating MemoryAccesses for them): we just end up inventing
293 // clobbers where they don't really exist at all. Please see D43269 for
294 // context.
295 switch (II->getIntrinsicID()) {
296 case Intrinsic::allow_runtime_check:
297 case Intrinsic::allow_ubsan_check:
298 case Intrinsic::invariant_start:
299 case Intrinsic::invariant_end:
300 case Intrinsic::assume:
301 case Intrinsic::experimental_noalias_scope_decl:
302 case Intrinsic::pseudoprobe:
303 return false;
304 case Intrinsic::dbg_declare:
305 case Intrinsic::dbg_label:
306 case Intrinsic::dbg_value:
307 llvm_unreachable("debuginfo shouldn't have associated defs!");
308 default:
309 break;
310 }
311 }
312
313 if (auto *CB = dyn_cast_or_null<CallBase>(Val: UseInst)) {
314 ModRefInfo I = AA.getModRefInfo(DefInst, CB);
315 return isModOrRefSet(MRI: I);
316 }
317
318 if (auto *DefLoad = dyn_cast<LoadInst>(Val: DefInst))
319 if (auto *UseLoad = dyn_cast_or_null<LoadInst>(Val: UseInst))
320 return !areLoadsReorderable(Use: UseLoad, MayClobber: DefLoad);
321
322 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
323 return isModSet(MRI: I);
324}
325
326template <typename AliasAnalysisType>
327static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
328 const MemoryLocOrCall &UseMLOC,
329 AliasAnalysisType &AA) {
330 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
331 // to exist while MemoryLocOrCall is pushed through places.
332 if (UseMLOC.IsCall)
333 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
334 AA);
335 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
336 AA);
337}
338
339// Return true when MD may alias MU, return false otherwise.
340bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
341 AliasAnalysis &AA) {
342 return instructionClobbersQuery(MD, MU, UseMLOC: MemoryLocOrCall(MU), AA);
343}
344
345namespace {
346
347struct UpwardsMemoryQuery {
348 // True if our original query started off as a call
349 bool IsCall = false;
350 // The pointer location we started the query with. This will be empty if
351 // IsCall is true.
352 MemoryLocation StartingLoc;
353 // This is the instruction we were querying about.
354 const Instruction *Inst = nullptr;
355 // The MemoryAccess we actually got called with, used to test local domination
356 const MemoryAccess *OriginalAccess = nullptr;
357 bool SkipSelfAccess = false;
358
359 UpwardsMemoryQuery() = default;
360
361 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
362 : IsCall(isa<CallBase>(Val: Inst)), Inst(Inst), OriginalAccess(Access) {
363 if (!IsCall)
364 StartingLoc = MemoryLocation::get(Inst);
365 }
366};
367
368} // end anonymous namespace
369
370template <typename AliasAnalysisType>
371static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
372 const Instruction *I) {
373 // If the memory can't be changed, then loads of the memory can't be
374 // clobbered.
375 if (auto *LI = dyn_cast<LoadInst>(Val: I)) {
376 return I->hasMetadata(KindID: LLVMContext::MD_invariant_load) ||
377 !isModSet(AA.getModRefInfoMask(MemoryLocation::get(LI)));
378 }
379 return false;
380}
381
382/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
383/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
384///
385/// This is meant to be as simple and self-contained as possible. Because it
386/// uses no cache, etc., it can be relatively expensive.
387///
388/// \param Start The MemoryAccess that we want to walk from.
389/// \param ClobberAt A clobber for Start.
390/// \param StartLoc The MemoryLocation for Start.
391/// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
392/// \param Query The UpwardsMemoryQuery we used for our search.
393/// \param AA The AliasAnalysis we used for our search.
394/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
395
396LLVM_ATTRIBUTE_UNUSED static void
397checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
398 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
399 const UpwardsMemoryQuery &Query, BatchAAResults &AA,
400 bool AllowImpreciseClobber = false) {
401 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
402
403 if (MSSA.isLiveOnEntryDef(MA: Start)) {
404 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
405 "liveOnEntry must clobber itself");
406 return;
407 }
408
409 bool FoundClobber = false;
410 DenseSet<ConstMemoryAccessPair> VisitedPhis;
411 SmallVector<ConstMemoryAccessPair, 8> Worklist;
412 Worklist.emplace_back(Args&: Start, Args: StartLoc);
413 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
414 // is found, complain.
415 while (!Worklist.empty()) {
416 auto MAP = Worklist.pop_back_val();
417 // All we care about is that nothing from Start to ClobberAt clobbers Start.
418 // We learn nothing from revisiting nodes.
419 if (!VisitedPhis.insert(V: MAP).second)
420 continue;
421
422 for (const auto *MA : def_chain(MA: MAP.first)) {
423 if (MA == ClobberAt) {
424 if (const auto *MD = dyn_cast<MemoryDef>(Val: MA)) {
425 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
426 // since it won't let us short-circuit.
427 //
428 // Also, note that this can't be hoisted out of the `Worklist` loop,
429 // since MD may only act as a clobber for 1 of N MemoryLocations.
430 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MA: MD);
431 if (!FoundClobber) {
432 if (instructionClobbersQuery(MD, UseLoc: MAP.second, UseInst: Query.Inst, AA))
433 FoundClobber = true;
434 }
435 }
436 break;
437 }
438
439 // We should never hit liveOnEntry, unless it's the clobber.
440 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
441
442 if (const auto *MD = dyn_cast<MemoryDef>(Val: MA)) {
443 // If Start is a Def, skip self.
444 if (MD == Start)
445 continue;
446
447 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
448 "Found clobber before reaching ClobberAt!");
449 continue;
450 }
451
452 if (const auto *MU = dyn_cast<MemoryUse>(Val: MA)) {
453 (void)MU;
454 assert (MU == Start &&
455 "Can only find use in def chain if Start is a use");
456 continue;
457 }
458
459 assert(isa<MemoryPhi>(MA));
460
461 // Add reachable phi predecessors
462 for (auto ItB = upward_defs_begin(
463 Pair: {const_cast<MemoryAccess *>(MA), MAP.second},
464 DT&: MSSA.getDomTree()),
465 ItE = upward_defs_end();
466 ItB != ItE; ++ItB)
467 if (MSSA.getDomTree().isReachableFromEntry(A: ItB.getPhiArgBlock()))
468 Worklist.emplace_back(Args: *ItB);
469 }
470 }
471
472 // If the verify is done following an optimization, it's possible that
473 // ClobberAt was a conservative clobbering, that we can now infer is not a
474 // true clobbering access. Don't fail the verify if that's the case.
475 // We do have accesses that claim they're optimized, but could be optimized
476 // further. Updating all these can be expensive, so allow it for now (FIXME).
477 if (AllowImpreciseClobber)
478 return;
479
480 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
481 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
482 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
483 "ClobberAt never acted as a clobber");
484}
485
486namespace {
487
488/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
489/// in one class.
490class ClobberWalker {
491 /// Save a few bytes by using unsigned instead of size_t.
492 using ListIndex = unsigned;
493
494 /// Represents a span of contiguous MemoryDefs, potentially ending in a
495 /// MemoryPhi.
496 struct DefPath {
497 MemoryLocation Loc;
498 // Note that, because we always walk in reverse, Last will always dominate
499 // First. Also note that First and Last are inclusive.
500 MemoryAccess *First;
501 MemoryAccess *Last;
502 std::optional<ListIndex> Previous;
503
504 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
505 std::optional<ListIndex> Previous)
506 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
507
508 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
509 std::optional<ListIndex> Previous)
510 : DefPath(Loc, Init, Init, Previous) {}
511 };
512
513 const MemorySSA &MSSA;
514 DominatorTree &DT;
515 BatchAAResults *AA;
516 UpwardsMemoryQuery *Query;
517 unsigned *UpwardWalkLimit;
518
519 // Phi optimization bookkeeping:
520 // List of DefPath to process during the current phi optimization walk.
521 SmallVector<DefPath, 32> Paths;
522 // List of visited <Access, Location> pairs; we can skip paths already
523 // visited with the same memory location.
524 DenseSet<ConstMemoryAccessPair> VisitedPhis;
525
526 /// Find the nearest def or phi that `From` can legally be optimized to.
527 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
528 assert(From->getNumOperands() && "Phi with no operands?");
529
530 BasicBlock *BB = From->getBlock();
531 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
532 DomTreeNode *Node = DT.getNode(BB);
533 while ((Node = Node->getIDom())) {
534 auto *Defs = MSSA.getBlockDefs(BB: Node->getBlock());
535 if (Defs)
536 return &*Defs->rbegin();
537 }
538 return Result;
539 }
540
541 /// Result of calling walkToPhiOrClobber.
542 struct UpwardsWalkResult {
543 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
544 /// both. Include alias info when clobber found.
545 MemoryAccess *Result;
546 bool IsKnownClobber;
547 };
548
549 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
550 /// This will update Desc.Last as it walks. It will (optionally) also stop at
551 /// StopAt.
552 ///
553 /// This does not test for whether StopAt is a clobber
554 UpwardsWalkResult
555 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
556 const MemoryAccess *SkipStopAt = nullptr) const {
557 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
558 assert(UpwardWalkLimit && "Need a valid walk limit");
559 bool LimitAlreadyReached = false;
560 // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
561 // it to 1. This will not do any alias() calls. It either returns in the
562 // first iteration in the loop below, or is set back to 0 if all def chains
563 // are free of MemoryDefs.
564 if (!*UpwardWalkLimit) {
565 *UpwardWalkLimit = 1;
566 LimitAlreadyReached = true;
567 }
568
569 for (MemoryAccess *Current : def_chain(MA: Desc.Last)) {
570 Desc.Last = Current;
571 if (Current == StopAt || Current == SkipStopAt)
572 return {.Result: Current, .IsKnownClobber: false};
573
574 if (auto *MD = dyn_cast<MemoryDef>(Val: Current)) {
575 if (MSSA.isLiveOnEntryDef(MA: MD))
576 return {.Result: MD, .IsKnownClobber: true};
577
578 if (!--*UpwardWalkLimit)
579 return {.Result: Current, .IsKnownClobber: true};
580
581 if (instructionClobbersQuery(MD, UseLoc: Desc.Loc, UseInst: Query->Inst, AA&: *AA))
582 return {.Result: MD, .IsKnownClobber: true};
583 }
584 }
585
586 if (LimitAlreadyReached)
587 *UpwardWalkLimit = 0;
588
589 assert(isa<MemoryPhi>(Desc.Last) &&
590 "Ended at a non-clobber that's not a phi?");
591 return {.Result: Desc.Last, .IsKnownClobber: false};
592 }
593
594 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
595 ListIndex PriorNode) {
596 auto UpwardDefsBegin = upward_defs_begin(Pair: {Phi, Paths[PriorNode].Loc}, DT);
597 auto UpwardDefs = make_range(x: UpwardDefsBegin, y: upward_defs_end());
598 for (const MemoryAccessPair &P : UpwardDefs) {
599 PausedSearches.push_back(Elt: Paths.size());
600 Paths.emplace_back(Args: P.second, Args: P.first, Args&: PriorNode);
601 }
602 }
603
604 /// Represents a search that terminated after finding a clobber. This clobber
605 /// may or may not be present in the path of defs from LastNode..SearchStart,
606 /// since it may have been retrieved from cache.
607 struct TerminatedPath {
608 MemoryAccess *Clobber;
609 ListIndex LastNode;
610 };
611
612 /// Get an access that keeps us from optimizing to the given phi.
613 ///
614 /// PausedSearches is an array of indices into the Paths array. Its incoming
615 /// value is the indices of searches that stopped at the last phi optimization
616 /// target. It's left in an unspecified state.
617 ///
618 /// If this returns std::nullopt, NewPaused is a vector of searches that
619 /// terminated at StopWhere. Otherwise, NewPaused is left in an unspecified
620 /// state.
621 std::optional<TerminatedPath>
622 getBlockingAccess(const MemoryAccess *StopWhere,
623 SmallVectorImpl<ListIndex> &PausedSearches,
624 SmallVectorImpl<ListIndex> &NewPaused,
625 SmallVectorImpl<TerminatedPath> &Terminated) {
626 assert(!PausedSearches.empty() && "No searches to continue?");
627
628 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
629 // PausedSearches as our stack.
630 while (!PausedSearches.empty()) {
631 ListIndex PathIndex = PausedSearches.pop_back_val();
632 DefPath &Node = Paths[PathIndex];
633
634 // If we've already visited this path with this MemoryLocation, we don't
635 // need to do so again.
636 //
637 // NOTE: That we just drop these paths on the ground makes caching
638 // behavior sporadic. e.g. given a diamond:
639 // A
640 // B C
641 // D
642 //
643 // ...If we walk D, B, A, C, we'll only cache the result of phi
644 // optimization for A, B, and D; C will be skipped because it dies here.
645 // This arguably isn't the worst thing ever, since:
646 // - We generally query things in a top-down order, so if we got below D
647 // without needing cache entries for {C, MemLoc}, then chances are
648 // that those cache entries would end up ultimately unused.
649 // - We still cache things for A, so C only needs to walk up a bit.
650 // If this behavior becomes problematic, we can fix without a ton of extra
651 // work.
652 if (!VisitedPhis.insert(V: {Node.Last, Node.Loc}).second)
653 continue;
654
655 const MemoryAccess *SkipStopWhere = nullptr;
656 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
657 assert(isa<MemoryDef>(Query->OriginalAccess));
658 SkipStopWhere = Query->OriginalAccess;
659 }
660
661 UpwardsWalkResult Res = walkToPhiOrClobber(Desc&: Node,
662 /*StopAt=*/StopWhere,
663 /*SkipStopAt=*/SkipStopWhere);
664 if (Res.IsKnownClobber) {
665 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
666
667 // If this wasn't a cache hit, we hit a clobber when walking. That's a
668 // failure.
669 TerminatedPath Term{.Clobber: Res.Result, .LastNode: PathIndex};
670 if (!MSSA.dominates(A: Res.Result, B: StopWhere))
671 return Term;
672
673 // Otherwise, it's a valid thing to potentially optimize to.
674 Terminated.push_back(Elt: Term);
675 continue;
676 }
677
678 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
679 // We've hit our target. Save this path off for if we want to continue
680 // walking. If we are in the mode of skipping the OriginalAccess, and
681 // we've reached back to the OriginalAccess, do not save path, we've
682 // just looped back to self.
683 if (Res.Result != SkipStopWhere)
684 NewPaused.push_back(Elt: PathIndex);
685 continue;
686 }
687
688 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
689 addSearches(Phi: cast<MemoryPhi>(Val: Res.Result), PausedSearches, PriorNode: PathIndex);
690 }
691
692 return std::nullopt;
693 }
694
695 template <typename T, typename Walker>
696 struct generic_def_path_iterator
697 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
698 std::forward_iterator_tag, T *> {
699 generic_def_path_iterator() = default;
700 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
701
702 T &operator*() const { return curNode(); }
703
704 generic_def_path_iterator &operator++() {
705 N = curNode().Previous;
706 return *this;
707 }
708
709 bool operator==(const generic_def_path_iterator &O) const {
710 if (N.has_value() != O.N.has_value())
711 return false;
712 return !N || *N == *O.N;
713 }
714
715 private:
716 T &curNode() const { return W->Paths[*N]; }
717
718 Walker *W = nullptr;
719 std::optional<ListIndex> N;
720 };
721
722 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
723 using const_def_path_iterator =
724 generic_def_path_iterator<const DefPath, const ClobberWalker>;
725
726 iterator_range<def_path_iterator> def_path(ListIndex From) {
727 return make_range(x: def_path_iterator(this, From), y: def_path_iterator());
728 }
729
730 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
731 return make_range(x: const_def_path_iterator(this, From),
732 y: const_def_path_iterator());
733 }
734
735 struct OptznResult {
736 /// The path that contains our result.
737 TerminatedPath PrimaryClobber;
738 /// The paths that we can legally cache back from, but that aren't
739 /// necessarily the result of the Phi optimization.
740 SmallVector<TerminatedPath, 4> OtherClobbers;
741 };
742
743 ListIndex defPathIndex(const DefPath &N) const {
744 // The assert looks nicer if we don't need to do &N
745 const DefPath *NP = &N;
746 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
747 "Out of bounds DefPath!");
748 return NP - &Paths.front();
749 }
750
751 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
752 /// that act as legal clobbers. Note that this won't return *all* clobbers.
753 ///
754 /// Phi optimization algorithm tl;dr:
755 /// - Find the earliest def/phi, A, we can optimize to
756 /// - Find if all paths from the starting memory access ultimately reach A
757 /// - If not, optimization isn't possible.
758 /// - Otherwise, walk from A to another clobber or phi, A'.
759 /// - If A' is a def, we're done.
760 /// - If A' is a phi, try to optimize it.
761 ///
762 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
763 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
764 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
765 const MemoryLocation &Loc) {
766 assert(Paths.empty() && VisitedPhis.empty() &&
767 "Reset the optimization state.");
768
769 Paths.emplace_back(Args: Loc, Args&: Start, Args&: Phi, Args: std::nullopt);
770 // Stores how many "valid" optimization nodes we had prior to calling
771 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
772 auto PriorPathsSize = Paths.size();
773
774 SmallVector<ListIndex, 16> PausedSearches;
775 SmallVector<ListIndex, 8> NewPaused;
776 SmallVector<TerminatedPath, 4> TerminatedPaths;
777
778 addSearches(Phi, PausedSearches, PriorNode: 0);
779
780 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
781 // Paths.
782 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
783 assert(!Paths.empty() && "Need a path to move");
784 auto Dom = Paths.begin();
785 for (auto I = std::next(x: Dom), E = Paths.end(); I != E; ++I)
786 if (!MSSA.dominates(A: I->Clobber, B: Dom->Clobber))
787 Dom = I;
788 auto Last = Paths.end() - 1;
789 if (Last != Dom)
790 std::iter_swap(Last, Dom);
791 };
792
793 MemoryPhi *Current = Phi;
794 while (true) {
795 assert(!MSSA.isLiveOnEntryDef(Current) &&
796 "liveOnEntry wasn't treated as a clobber?");
797
798 const auto *Target = getWalkTarget(From: Current);
799 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
800 // optimization for the prior phi.
801 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
802 return MSSA.dominates(P.Clobber, Target);
803 }));
804
805 // FIXME: This is broken, because the Blocker may be reported to be
806 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
807 // For the moment, this is fine, since we do nothing with blocker info.
808 if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
809 StopWhere: Target, PausedSearches, NewPaused, Terminated&: TerminatedPaths)) {
810
811 // Find the node we started at. We can't search based on N->Last, since
812 // we may have gone around a loop with a different MemoryLocation.
813 auto Iter = find_if(Range: def_path(From: Blocker->LastNode), P: [&](const DefPath &N) {
814 return defPathIndex(N) < PriorPathsSize;
815 });
816 assert(Iter != def_path_iterator());
817
818 DefPath &CurNode = *Iter;
819 assert(CurNode.Last == Current);
820
821 // Two things:
822 // A. We can't reliably cache all of NewPaused back. Consider a case
823 // where we have two paths in NewPaused; one of which can't optimize
824 // above this phi, whereas the other can. If we cache the second path
825 // back, we'll end up with suboptimal cache entries. We can handle
826 // cases like this a bit better when we either try to find all
827 // clobbers that block phi optimization, or when our cache starts
828 // supporting unfinished searches.
829 // B. We can't reliably cache TerminatedPaths back here without doing
830 // extra checks; consider a case like:
831 // T
832 // / \
833 // D C
834 // \ /
835 // S
836 // Where T is our target, C is a node with a clobber on it, D is a
837 // diamond (with a clobber *only* on the left or right node, N), and
838 // S is our start. Say we walk to D, through the node opposite N
839 // (read: ignoring the clobber), and see a cache entry in the top
840 // node of D. That cache entry gets put into TerminatedPaths. We then
841 // walk up to C (N is later in our worklist), find the clobber, and
842 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
843 // the bottom part of D to the cached clobber, ignoring the clobber
844 // in N. Again, this problem goes away if we start tracking all
845 // blockers for a given phi optimization.
846 TerminatedPath Result{.Clobber: CurNode.Last, .LastNode: defPathIndex(N: CurNode)};
847 return {.PrimaryClobber: Result, .OtherClobbers: {}};
848 }
849
850 // If there's nothing left to search, then all paths led to valid clobbers
851 // that we got from our cache; pick the nearest to the start, and allow
852 // the rest to be cached back.
853 if (NewPaused.empty()) {
854 MoveDominatedPathToEnd(TerminatedPaths);
855 TerminatedPath Result = TerminatedPaths.pop_back_val();
856 return {.PrimaryClobber: Result, .OtherClobbers: std::move(TerminatedPaths)};
857 }
858
859 MemoryAccess *DefChainEnd = nullptr;
860 SmallVector<TerminatedPath, 4> Clobbers;
861 for (ListIndex Paused : NewPaused) {
862 UpwardsWalkResult WR = walkToPhiOrClobber(Desc&: Paths[Paused]);
863 if (WR.IsKnownClobber)
864 Clobbers.push_back(Elt: {.Clobber: WR.Result, .LastNode: Paused});
865 else
866 // Micro-opt: If we hit the end of the chain, save it.
867 DefChainEnd = WR.Result;
868 }
869
870 if (!TerminatedPaths.empty()) {
871 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
872 // do it now.
873 if (!DefChainEnd)
874 for (auto *MA : def_chain(MA: const_cast<MemoryAccess *>(Target)))
875 DefChainEnd = MA;
876 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
877
878 // If any of the terminated paths don't dominate the phi we'll try to
879 // optimize, we need to figure out what they are and quit.
880 const BasicBlock *ChainBB = DefChainEnd->getBlock();
881 for (const TerminatedPath &TP : TerminatedPaths) {
882 // Because we know that DefChainEnd is as "high" as we can go, we
883 // don't need local dominance checks; BB dominance is sufficient.
884 if (DT.dominates(A: ChainBB, B: TP.Clobber->getBlock()))
885 Clobbers.push_back(Elt: TP);
886 }
887 }
888
889 // If we have clobbers in the def chain, find the one closest to Current
890 // and quit.
891 if (!Clobbers.empty()) {
892 MoveDominatedPathToEnd(Clobbers);
893 TerminatedPath Result = Clobbers.pop_back_val();
894 return {.PrimaryClobber: Result, .OtherClobbers: std::move(Clobbers)};
895 }
896
897 assert(all_of(NewPaused,
898 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
899
900 // Because liveOnEntry is a clobber, this must be a phi.
901 auto *DefChainPhi = cast<MemoryPhi>(Val: DefChainEnd);
902
903 PriorPathsSize = Paths.size();
904 PausedSearches.clear();
905 for (ListIndex I : NewPaused)
906 addSearches(Phi: DefChainPhi, PausedSearches, PriorNode: I);
907 NewPaused.clear();
908
909 Current = DefChainPhi;
910 }
911 }
912
913 void verifyOptResult(const OptznResult &R) const {
914 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
915 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
916 }));
917 }
918
919 void resetPhiOptznState() {
920 Paths.clear();
921 VisitedPhis.clear();
922 }
923
924public:
925 ClobberWalker(const MemorySSA &MSSA, DominatorTree &DT)
926 : MSSA(MSSA), DT(DT) {}
927
928 /// Finds the nearest clobber for the given query, optimizing phis if
929 /// possible.
930 MemoryAccess *findClobber(BatchAAResults &BAA, MemoryAccess *Start,
931 UpwardsMemoryQuery &Q, unsigned &UpWalkLimit) {
932 AA = &BAA;
933 Query = &Q;
934 UpwardWalkLimit = &UpWalkLimit;
935 // Starting limit must be > 0.
936 if (!UpWalkLimit)
937 UpWalkLimit++;
938
939 MemoryAccess *Current = Start;
940 // This walker pretends uses don't exist. If we're handed one, silently grab
941 // its def. (This has the nice side-effect of ensuring we never cache uses)
942 if (auto *MU = dyn_cast<MemoryUse>(Val: Start))
943 Current = MU->getDefiningAccess();
944
945 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
946 // Fast path for the overly-common case (no crazy phi optimization
947 // necessary)
948 UpwardsWalkResult WalkResult = walkToPhiOrClobber(Desc&: FirstDesc);
949 MemoryAccess *Result;
950 if (WalkResult.IsKnownClobber) {
951 Result = WalkResult.Result;
952 } else {
953 OptznResult OptRes = tryOptimizePhi(Phi: cast<MemoryPhi>(Val: FirstDesc.Last),
954 Start: Current, Loc: Q.StartingLoc);
955 verifyOptResult(R: OptRes);
956 resetPhiOptznState();
957 Result = OptRes.PrimaryClobber.Clobber;
958 }
959
960#ifdef EXPENSIVE_CHECKS
961 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
962 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, BAA);
963#endif
964 return Result;
965 }
966};
967
968struct RenamePassData {
969 DomTreeNode *DTN;
970 DomTreeNode::const_iterator ChildIt;
971 MemoryAccess *IncomingVal;
972
973 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
974 MemoryAccess *M)
975 : DTN(D), ChildIt(It), IncomingVal(M) {}
976
977 void swap(RenamePassData &RHS) {
978 std::swap(a&: DTN, b&: RHS.DTN);
979 std::swap(a&: ChildIt, b&: RHS.ChildIt);
980 std::swap(a&: IncomingVal, b&: RHS.IncomingVal);
981 }
982};
983
984} // end anonymous namespace
985
986namespace llvm {
987
988class MemorySSA::ClobberWalkerBase {
989 ClobberWalker Walker;
990 MemorySSA *MSSA;
991
992public:
993 ClobberWalkerBase(MemorySSA *M, DominatorTree *D) : Walker(*M, *D), MSSA(M) {}
994
995 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
996 const MemoryLocation &,
997 BatchAAResults &, unsigned &);
998 // Third argument (bool), defines whether the clobber search should skip the
999 // original queried access. If true, there will be a follow-up query searching
1000 // for a clobber access past "self". Note that the Optimized access is not
1001 // updated if a new clobber is found by this SkipSelf search. If this
1002 // additional query becomes heavily used we may decide to cache the result.
1003 // Walker instantiations will decide how to set the SkipSelf bool.
1004 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, BatchAAResults &,
1005 unsigned &, bool,
1006 bool UseInvariantGroup = true);
1007};
1008
1009/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1010/// longer does caching on its own, but the name has been retained for the
1011/// moment.
1012class MemorySSA::CachingWalker final : public MemorySSAWalker {
1013 ClobberWalkerBase *Walker;
1014
1015public:
1016 CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
1017 : MemorySSAWalker(M), Walker(W) {}
1018 ~CachingWalker() override = default;
1019
1020 using MemorySSAWalker::getClobberingMemoryAccess;
1021
1022 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA,
1023 unsigned &UWL) {
1024 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false);
1025 }
1026 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1027 const MemoryLocation &Loc,
1028 BatchAAResults &BAA, unsigned &UWL) {
1029 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1030 }
1031 // This method is not accessible outside of this file.
1032 MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(
1033 MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL) {
1034 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false, UseInvariantGroup: false);
1035 }
1036
1037 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1038 BatchAAResults &BAA) override {
1039 unsigned UpwardWalkLimit = MaxCheckLimit;
1040 return getClobberingMemoryAccess(MA, BAA, UWL&: UpwardWalkLimit);
1041 }
1042 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1043 const MemoryLocation &Loc,
1044 BatchAAResults &BAA) override {
1045 unsigned UpwardWalkLimit = MaxCheckLimit;
1046 return getClobberingMemoryAccess(MA, Loc, BAA, UWL&: UpwardWalkLimit);
1047 }
1048
1049 void invalidateInfo(MemoryAccess *MA) override {
1050 if (auto *MUD = dyn_cast<MemoryUseOrDef>(Val: MA))
1051 MUD->resetOptimized();
1052 }
1053};
1054
1055class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1056 ClobberWalkerBase *Walker;
1057
1058public:
1059 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
1060 : MemorySSAWalker(M), Walker(W) {}
1061 ~SkipSelfWalker() override = default;
1062
1063 using MemorySSAWalker::getClobberingMemoryAccess;
1064
1065 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA,
1066 unsigned &UWL) {
1067 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, true);
1068 }
1069 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1070 const MemoryLocation &Loc,
1071 BatchAAResults &BAA, unsigned &UWL) {
1072 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1073 }
1074
1075 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1076 BatchAAResults &BAA) override {
1077 unsigned UpwardWalkLimit = MaxCheckLimit;
1078 return getClobberingMemoryAccess(MA, BAA, UWL&: UpwardWalkLimit);
1079 }
1080 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1081 const MemoryLocation &Loc,
1082 BatchAAResults &BAA) override {
1083 unsigned UpwardWalkLimit = MaxCheckLimit;
1084 return getClobberingMemoryAccess(MA, Loc, BAA, UWL&: UpwardWalkLimit);
1085 }
1086
1087 void invalidateInfo(MemoryAccess *MA) override {
1088 if (auto *MUD = dyn_cast<MemoryUseOrDef>(Val: MA))
1089 MUD->resetOptimized();
1090 }
1091};
1092
1093} // end namespace llvm
1094
1095void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1096 bool RenameAllUses) {
1097 // Pass through values to our successors
1098 for (const BasicBlock *S : successors(BB)) {
1099 auto It = PerBlockAccesses.find(Val: S);
1100 // Rename the phi nodes in our successor block
1101 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(Val: It->second->front()))
1102 continue;
1103 AccessList *Accesses = It->second.get();
1104 auto *Phi = cast<MemoryPhi>(Val: &Accesses->front());
1105 if (RenameAllUses) {
1106 bool ReplacementDone = false;
1107 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1108 if (Phi->getIncomingBlock(I) == BB) {
1109 Phi->setIncomingValue(I, V: IncomingVal);
1110 ReplacementDone = true;
1111 }
1112 (void) ReplacementDone;
1113 assert(ReplacementDone && "Incomplete phi during partial rename");
1114 } else
1115 Phi->addIncoming(V: IncomingVal, BB);
1116 }
1117}
1118
1119/// Rename a single basic block into MemorySSA form.
1120/// Uses the standard SSA renaming algorithm.
1121/// \returns The new incoming value.
1122MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1123 bool RenameAllUses) {
1124 auto It = PerBlockAccesses.find(Val: BB);
1125 // Skip most processing if the list is empty.
1126 if (It != PerBlockAccesses.end()) {
1127 AccessList *Accesses = It->second.get();
1128 for (MemoryAccess &L : *Accesses) {
1129 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(Val: &L)) {
1130 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1131 MUD->setDefiningAccess(DMA: IncomingVal);
1132 if (isa<MemoryDef>(Val: &L))
1133 IncomingVal = &L;
1134 } else {
1135 IncomingVal = &L;
1136 }
1137 }
1138 }
1139 return IncomingVal;
1140}
1141
1142/// This is the standard SSA renaming algorithm.
1143///
1144/// We walk the dominator tree in preorder, renaming accesses, and then filling
1145/// in phi nodes in our successors.
1146void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1147 SmallPtrSetImpl<BasicBlock *> &Visited,
1148 bool SkipVisited, bool RenameAllUses) {
1149 assert(Root && "Trying to rename accesses in an unreachable block");
1150
1151 SmallVector<RenamePassData, 32> WorkStack;
1152 // Skip everything if we already renamed this block and we are skipping.
1153 // Note: You can't sink this into the if, because we need it to occur
1154 // regardless of whether we skip blocks or not.
1155 bool AlreadyVisited = !Visited.insert(Ptr: Root->getBlock()).second;
1156 if (SkipVisited && AlreadyVisited)
1157 return;
1158
1159 IncomingVal = renameBlock(BB: Root->getBlock(), IncomingVal, RenameAllUses);
1160 renameSuccessorPhis(BB: Root->getBlock(), IncomingVal, RenameAllUses);
1161 WorkStack.push_back(Elt: {Root, Root->begin(), IncomingVal});
1162
1163 while (!WorkStack.empty()) {
1164 DomTreeNode *Node = WorkStack.back().DTN;
1165 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1166 IncomingVal = WorkStack.back().IncomingVal;
1167
1168 if (ChildIt == Node->end()) {
1169 WorkStack.pop_back();
1170 } else {
1171 DomTreeNode *Child = *ChildIt;
1172 ++WorkStack.back().ChildIt;
1173 BasicBlock *BB = Child->getBlock();
1174 // Note: You can't sink this into the if, because we need it to occur
1175 // regardless of whether we skip blocks or not.
1176 AlreadyVisited = !Visited.insert(Ptr: BB).second;
1177 if (SkipVisited && AlreadyVisited) {
1178 // We already visited this during our renaming, which can happen when
1179 // being asked to rename multiple blocks. Figure out the incoming val,
1180 // which is the last def.
1181 // Incoming value can only change if there is a block def, and in that
1182 // case, it's the last block def in the list.
1183 if (auto *BlockDefs = getWritableBlockDefs(BB))
1184 IncomingVal = &*BlockDefs->rbegin();
1185 } else
1186 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1187 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1188 WorkStack.push_back(Elt: {Child, Child->begin(), IncomingVal});
1189 }
1190 }
1191}
1192
1193/// This handles unreachable block accesses by deleting phi nodes in
1194/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1195/// being uses of the live on entry definition.
1196void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1197 assert(!DT->isReachableFromEntry(BB) &&
1198 "Reachable block found while handling unreachable blocks");
1199
1200 // Make sure phi nodes in our reachable successors end up with a
1201 // LiveOnEntryDef for our incoming edge, even though our block is forward
1202 // unreachable. We could just disconnect these blocks from the CFG fully,
1203 // but we do not right now.
1204 for (const BasicBlock *S : successors(BB)) {
1205 if (!DT->isReachableFromEntry(A: S))
1206 continue;
1207 auto It = PerBlockAccesses.find(Val: S);
1208 // Rename the phi nodes in our successor block
1209 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(Val: It->second->front()))
1210 continue;
1211 AccessList *Accesses = It->second.get();
1212 auto *Phi = cast<MemoryPhi>(Val: &Accesses->front());
1213 Phi->addIncoming(V: LiveOnEntryDef.get(), BB);
1214 }
1215
1216 auto It = PerBlockAccesses.find(Val: BB);
1217 if (It == PerBlockAccesses.end())
1218 return;
1219
1220 auto &Accesses = It->second;
1221 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1222 auto Next = std::next(x: AI);
1223 // If we have a phi, just remove it. We are going to replace all
1224 // users with live on entry.
1225 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(Val&: AI))
1226 UseOrDef->setDefiningAccess(DMA: LiveOnEntryDef.get());
1227 else
1228 Accesses->erase(where: AI);
1229 AI = Next;
1230 }
1231}
1232
1233MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1234 : DT(DT), F(&Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1235 SkipWalker(nullptr) {
1236 // Build MemorySSA using a batch alias analysis. This reuses the internal
1237 // state that AA collects during an alias()/getModRefInfo() call. This is
1238 // safe because there are no CFG changes while building MemorySSA and can
1239 // significantly reduce the time spent by the compiler in AA, because we will
1240 // make queries about all the instructions in the Function.
1241 assert(AA && "No alias analysis?");
1242 BatchAAResults BatchAA(*AA);
1243 buildMemorySSA(BAA&: BatchAA, Blocks: iterator_range(F->begin(), F->end()));
1244 // Intentionally leave AA to nullptr while building so we don't accidentally
1245 // use non-batch AliasAnalysis.
1246 this->AA = AA;
1247 // Also create the walker here.
1248 getWalker();
1249}
1250
1251MemorySSA::MemorySSA(Loop &L, AliasAnalysis *AA, DominatorTree *DT)
1252 : DT(DT), L(&L), LiveOnEntryDef(nullptr), Walker(nullptr),
1253 SkipWalker(nullptr) {
1254 // Build MemorySSA using a batch alias analysis. This reuses the internal
1255 // state that AA collects during an alias()/getModRefInfo() call. This is
1256 // safe because there are no CFG changes while building MemorySSA and can
1257 // significantly reduce the time spent by the compiler in AA, because we will
1258 // make queries about all the instructions in the Function.
1259 assert(AA && "No alias analysis?");
1260 BatchAAResults BatchAA(*AA);
1261 buildMemorySSA(
1262 BAA&: BatchAA, Blocks: map_range(C: L.blocks(), F: [](const BasicBlock *BB) -> BasicBlock & {
1263 return *const_cast<BasicBlock *>(BB);
1264 }));
1265 // Intentionally leave AA to nullptr while building so we don't accidentally
1266 // use non-batch AliasAnalysis.
1267 this->AA = AA;
1268 // Also create the walker here.
1269 getWalker();
1270}
1271
1272MemorySSA::~MemorySSA() {
1273 // Drop all our references
1274 for (const auto &Pair : PerBlockAccesses)
1275 for (MemoryAccess &MA : *Pair.second)
1276 MA.dropAllReferences();
1277}
1278
1279MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1280 auto Res = PerBlockAccesses.insert(KV: std::make_pair(x&: BB, y: nullptr));
1281
1282 if (Res.second)
1283 Res.first->second = std::make_unique<AccessList>();
1284 return Res.first->second.get();
1285}
1286
1287MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1288 auto Res = PerBlockDefs.insert(KV: std::make_pair(x&: BB, y: nullptr));
1289
1290 if (Res.second)
1291 Res.first->second = std::make_unique<DefsList>();
1292 return Res.first->second.get();
1293}
1294
1295namespace llvm {
1296
1297/// This class is a batch walker of all MemoryUse's in the program, and points
1298/// their defining access at the thing that actually clobbers them. Because it
1299/// is a batch walker that touches everything, it does not operate like the
1300/// other walkers. This walker is basically performing a top-down SSA renaming
1301/// pass, where the version stack is used as the cache. This enables it to be
1302/// significantly more time and memory efficient than using the regular walker,
1303/// which is walking bottom-up.
1304class MemorySSA::OptimizeUses {
1305public:
1306 OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA,
1307 DominatorTree *DT)
1308 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1309
1310 void optimizeUses();
1311
1312private:
1313 /// This represents where a given memorylocation is in the stack.
1314 struct MemlocStackInfo {
1315 // This essentially is keeping track of versions of the stack. Whenever
1316 // the stack changes due to pushes or pops, these versions increase.
1317 unsigned long StackEpoch;
1318 unsigned long PopEpoch;
1319 // This is the lower bound of places on the stack to check. It is equal to
1320 // the place the last stack walk ended.
1321 // Note: Correctness depends on this being initialized to 0, which densemap
1322 // does
1323 unsigned long LowerBound;
1324 const BasicBlock *LowerBoundBlock;
1325 // This is where the last walk for this memory location ended.
1326 unsigned long LastKill;
1327 bool LastKillValid;
1328 };
1329
1330 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1331 SmallVectorImpl<MemoryAccess *> &,
1332 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1333
1334 MemorySSA *MSSA;
1335 CachingWalker *Walker;
1336 BatchAAResults *AA;
1337 DominatorTree *DT;
1338};
1339
1340} // end namespace llvm
1341
1342/// Optimize the uses in a given block This is basically the SSA renaming
1343/// algorithm, with one caveat: We are able to use a single stack for all
1344/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1345/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1346/// going to be some position in that stack of possible ones.
1347///
1348/// We track the stack positions that each MemoryLocation needs
1349/// to check, and last ended at. This is because we only want to check the
1350/// things that changed since last time. The same MemoryLocation should
1351/// get clobbered by the same store (getModRefInfo does not use invariantness or
1352/// things like this, and if they start, we can modify MemoryLocOrCall to
1353/// include relevant data)
1354void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1355 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1356 SmallVectorImpl<MemoryAccess *> &VersionStack,
1357 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1358
1359 /// If no accesses, nothing to do.
1360 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1361 if (Accesses == nullptr)
1362 return;
1363
1364 // Pop everything that doesn't dominate the current block off the stack,
1365 // increment the PopEpoch to account for this.
1366 while (true) {
1367 assert(
1368 !VersionStack.empty() &&
1369 "Version stack should have liveOnEntry sentinel dominating everything");
1370 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1371 if (DT->dominates(A: BackBlock, B: BB))
1372 break;
1373 while (VersionStack.back()->getBlock() == BackBlock)
1374 VersionStack.pop_back();
1375 ++PopEpoch;
1376 }
1377
1378 for (MemoryAccess &MA : *Accesses) {
1379 auto *MU = dyn_cast<MemoryUse>(Val: &MA);
1380 if (!MU) {
1381 VersionStack.push_back(Elt: &MA);
1382 ++StackEpoch;
1383 continue;
1384 }
1385
1386 if (MU->isOptimized())
1387 continue;
1388
1389 MemoryLocOrCall UseMLOC(MU);
1390 auto &LocInfo = LocStackInfo[UseMLOC];
1391 // If the pop epoch changed, it means we've removed stuff from top of
1392 // stack due to changing blocks. We may have to reset the lower bound or
1393 // last kill info.
1394 if (LocInfo.PopEpoch != PopEpoch) {
1395 LocInfo.PopEpoch = PopEpoch;
1396 LocInfo.StackEpoch = StackEpoch;
1397 // If the lower bound was in something that no longer dominates us, we
1398 // have to reset it.
1399 // We can't simply track stack size, because the stack may have had
1400 // pushes/pops in the meantime.
1401 // XXX: This is non-optimal, but only is slower cases with heavily
1402 // branching dominator trees. To get the optimal number of queries would
1403 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1404 // the top of that stack dominates us. This does not seem worth it ATM.
1405 // A much cheaper optimization would be to always explore the deepest
1406 // branch of the dominator tree first. This will guarantee this resets on
1407 // the smallest set of blocks.
1408 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1409 !DT->dominates(A: LocInfo.LowerBoundBlock, B: BB)) {
1410 // Reset the lower bound of things to check.
1411 // TODO: Some day we should be able to reset to last kill, rather than
1412 // 0.
1413 LocInfo.LowerBound = 0;
1414 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1415 LocInfo.LastKillValid = false;
1416 }
1417 } else if (LocInfo.StackEpoch != StackEpoch) {
1418 // If all that has changed is the StackEpoch, we only have to check the
1419 // new things on the stack, because we've checked everything before. In
1420 // this case, the lower bound of things to check remains the same.
1421 LocInfo.PopEpoch = PopEpoch;
1422 LocInfo.StackEpoch = StackEpoch;
1423 }
1424 if (!LocInfo.LastKillValid) {
1425 LocInfo.LastKill = VersionStack.size() - 1;
1426 LocInfo.LastKillValid = true;
1427 }
1428
1429 // At this point, we should have corrected last kill and LowerBound to be
1430 // in bounds.
1431 assert(LocInfo.LowerBound < VersionStack.size() &&
1432 "Lower bound out of range");
1433 assert(LocInfo.LastKill < VersionStack.size() &&
1434 "Last kill info out of range");
1435 // In any case, the new upper bound is the top of the stack.
1436 unsigned long UpperBound = VersionStack.size() - 1;
1437
1438 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1439 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1440 << *(MU->getMemoryInst()) << ")"
1441 << " because there are "
1442 << UpperBound - LocInfo.LowerBound
1443 << " stores to disambiguate\n");
1444 // Because we did not walk, LastKill is no longer valid, as this may
1445 // have been a kill.
1446 LocInfo.LastKillValid = false;
1447 continue;
1448 }
1449 bool FoundClobberResult = false;
1450 unsigned UpwardWalkLimit = MaxCheckLimit;
1451 while (UpperBound > LocInfo.LowerBound) {
1452 if (isa<MemoryPhi>(Val: VersionStack[UpperBound])) {
1453 // For phis, use the walker, see where we ended up, go there.
1454 // The invariant.group handling in MemorySSA is ad-hoc and doesn't
1455 // support updates, so don't use it to optimize uses.
1456 MemoryAccess *Result =
1457 Walker->getClobberingMemoryAccessWithoutInvariantGroup(
1458 MA: MU, BAA&: *AA, UWL&: UpwardWalkLimit);
1459 // We are guaranteed to find it or something is wrong.
1460 while (VersionStack[UpperBound] != Result) {
1461 assert(UpperBound != 0);
1462 --UpperBound;
1463 }
1464 FoundClobberResult = true;
1465 break;
1466 }
1467
1468 MemoryDef *MD = cast<MemoryDef>(Val: VersionStack[UpperBound]);
1469 if (instructionClobbersQuery(MD, MU, UseMLOC, AA&: *AA)) {
1470 FoundClobberResult = true;
1471 break;
1472 }
1473 --UpperBound;
1474 }
1475
1476 // At the end of this loop, UpperBound is either a clobber, or lower bound
1477 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1478 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1479 MU->setDefiningAccess(DMA: VersionStack[UpperBound], Optimized: true);
1480 LocInfo.LastKill = UpperBound;
1481 } else {
1482 // Otherwise, we checked all the new ones, and now we know we can get to
1483 // LastKill.
1484 MU->setDefiningAccess(DMA: VersionStack[LocInfo.LastKill], Optimized: true);
1485 }
1486 LocInfo.LowerBound = VersionStack.size() - 1;
1487 LocInfo.LowerBoundBlock = BB;
1488 }
1489}
1490
1491/// Optimize uses to point to their actual clobbering definitions.
1492void MemorySSA::OptimizeUses::optimizeUses() {
1493 SmallVector<MemoryAccess *, 16> VersionStack;
1494 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1495 VersionStack.push_back(Elt: MSSA->getLiveOnEntryDef());
1496
1497 unsigned long StackEpoch = 1;
1498 unsigned long PopEpoch = 1;
1499 // We perform a non-recursive top-down dominator tree walk.
1500 for (const auto *DomNode : depth_first(G: DT->getRootNode()))
1501 optimizeUsesInBlock(BB: DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1502 LocStackInfo);
1503}
1504
1505void MemorySSA::placePHINodes(
1506 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1507 // Determine where our MemoryPhi's should go
1508 ForwardIDFCalculator IDFs(*DT);
1509 IDFs.setDefiningBlocks(DefiningBlocks);
1510 SmallVector<BasicBlock *, 32> IDFBlocks;
1511 IDFs.calculate(IDFBlocks);
1512
1513 // Now place MemoryPhi nodes.
1514 for (auto &BB : IDFBlocks)
1515 createMemoryPhi(BB);
1516}
1517
1518template <typename IterT>
1519void MemorySSA::buildMemorySSA(BatchAAResults &BAA, IterT Blocks) {
1520 // We create an access to represent "live on entry", for things like
1521 // arguments or users of globals, where the memory they use is defined before
1522 // the beginning of the function. We do not actually insert it into the IR.
1523 // We do not define a live on exit for the immediate uses, and thus our
1524 // semantics do *not* imply that something with no immediate uses can simply
1525 // be removed.
1526 BasicBlock &StartingPoint = *Blocks.begin();
1527 LiveOnEntryDef.reset(p: new MemoryDef(StartingPoint.getContext(), nullptr,
1528 nullptr, &StartingPoint, NextID++));
1529
1530 // We maintain lists of memory accesses per-block, trading memory for time. We
1531 // could just look up the memory access for every possible instruction in the
1532 // stream.
1533 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1534 // Go through each block, figure out where defs occur, and chain together all
1535 // the accesses.
1536 for (BasicBlock &B : Blocks) {
1537 bool InsertIntoDef = false;
1538 AccessList *Accesses = nullptr;
1539 DefsList *Defs = nullptr;
1540 for (Instruction &I : B) {
1541 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1542 if (!MUD)
1543 continue;
1544
1545 if (!Accesses)
1546 Accesses = getOrCreateAccessList(BB: &B);
1547 Accesses->push_back(val: MUD);
1548 if (isa<MemoryDef>(Val: MUD)) {
1549 InsertIntoDef = true;
1550 if (!Defs)
1551 Defs = getOrCreateDefsList(BB: &B);
1552 Defs->push_back(Node&: *MUD);
1553 }
1554 }
1555 if (InsertIntoDef)
1556 DefiningBlocks.insert(Ptr: &B);
1557 }
1558 placePHINodes(DefiningBlocks);
1559
1560 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1561 // filled in with all blocks.
1562 SmallPtrSet<BasicBlock *, 16> Visited;
1563 if (L) {
1564 // Only building MemorySSA for a single loop. placePHINodes may have
1565 // inserted a MemoryPhi in the loop's preheader. As this is outside the
1566 // scope of the loop, set them to LiveOnEntry.
1567 if (auto *P = getMemoryAccess(BB: L->getLoopPreheader())) {
1568 for (Use &U : make_early_inc_range(Range: P->uses()))
1569 U.set(LiveOnEntryDef.get());
1570 removeFromLists(P);
1571 }
1572 // Now rename accesses in the loop. Populate Visited with the exit blocks of
1573 // the loop, to limit the scope of the renaming.
1574 SmallVector<BasicBlock *> ExitBlocks;
1575 L->getExitBlocks(ExitBlocks);
1576 Visited.insert(I: ExitBlocks.begin(), E: ExitBlocks.end());
1577 renamePass(Root: DT->getNode(BB: L->getLoopPreheader()), IncomingVal: LiveOnEntryDef.get(),
1578 Visited);
1579 } else {
1580 renamePass(Root: DT->getRootNode(), IncomingVal: LiveOnEntryDef.get(), Visited);
1581 }
1582
1583 // Mark the uses in unreachable blocks as live on entry, so that they go
1584 // somewhere.
1585 for (auto &BB : Blocks)
1586 if (!Visited.count(Ptr: &BB))
1587 markUnreachableAsLiveOnEntry(BB: &BB);
1588}
1589
1590MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1591
1592MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1593 if (Walker)
1594 return Walker.get();
1595
1596 if (!WalkerBase)
1597 WalkerBase = std::make_unique<ClobberWalkerBase>(args: this, args&: DT);
1598
1599 Walker = std::make_unique<CachingWalker>(args: this, args: WalkerBase.get());
1600 return Walker.get();
1601}
1602
1603MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1604 if (SkipWalker)
1605 return SkipWalker.get();
1606
1607 if (!WalkerBase)
1608 WalkerBase = std::make_unique<ClobberWalkerBase>(args: this, args&: DT);
1609
1610 SkipWalker = std::make_unique<SkipSelfWalker>(args: this, args: WalkerBase.get());
1611 return SkipWalker.get();
1612 }
1613
1614
1615// This is a helper function used by the creation routines. It places NewAccess
1616// into the access and defs lists for a given basic block, at the given
1617// insertion point.
1618void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1619 const BasicBlock *BB,
1620 InsertionPlace Point) {
1621 auto *Accesses = getOrCreateAccessList(BB);
1622 if (Point == Beginning) {
1623 // If it's a phi node, it goes first, otherwise, it goes after any phi
1624 // nodes.
1625 if (isa<MemoryPhi>(Val: NewAccess)) {
1626 Accesses->push_front(val: NewAccess);
1627 auto *Defs = getOrCreateDefsList(BB);
1628 Defs->push_front(Node&: *NewAccess);
1629 } else {
1630 auto AI = find_if_not(
1631 Range&: *Accesses, P: [](const MemoryAccess &MA) { return isa<MemoryPhi>(Val: MA); });
1632 Accesses->insert(where: AI, New: NewAccess);
1633 if (!isa<MemoryUse>(Val: NewAccess)) {
1634 auto *Defs = getOrCreateDefsList(BB);
1635 auto DI = find_if_not(
1636 Range&: *Defs, P: [](const MemoryAccess &MA) { return isa<MemoryPhi>(Val: MA); });
1637 Defs->insert(I: DI, Node&: *NewAccess);
1638 }
1639 }
1640 } else {
1641 Accesses->push_back(val: NewAccess);
1642 if (!isa<MemoryUse>(Val: NewAccess)) {
1643 auto *Defs = getOrCreateDefsList(BB);
1644 Defs->push_back(Node&: *NewAccess);
1645 }
1646 }
1647 BlockNumberingValid.erase(Ptr: BB);
1648}
1649
1650void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1651 AccessList::iterator InsertPt) {
1652 auto *Accesses = getWritableBlockAccesses(BB);
1653 bool WasEnd = InsertPt == Accesses->end();
1654 Accesses->insert(where: AccessList::iterator(InsertPt), New: What);
1655 if (!isa<MemoryUse>(Val: What)) {
1656 auto *Defs = getOrCreateDefsList(BB);
1657 // If we got asked to insert at the end, we have an easy job, just shove it
1658 // at the end. If we got asked to insert before an existing def, we also get
1659 // an iterator. If we got asked to insert before a use, we have to hunt for
1660 // the next def.
1661 if (WasEnd) {
1662 Defs->push_back(Node&: *What);
1663 } else if (isa<MemoryDef>(Val: InsertPt)) {
1664 Defs->insert(I: InsertPt->getDefsIterator(), Node&: *What);
1665 } else {
1666 while (InsertPt != Accesses->end() && !isa<MemoryDef>(Val: InsertPt))
1667 ++InsertPt;
1668 // Either we found a def, or we are inserting at the end
1669 if (InsertPt == Accesses->end())
1670 Defs->push_back(Node&: *What);
1671 else
1672 Defs->insert(I: InsertPt->getDefsIterator(), Node&: *What);
1673 }
1674 }
1675 BlockNumberingValid.erase(Ptr: BB);
1676}
1677
1678void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1679 // Keep it in the lookup tables, remove from the lists
1680 removeFromLists(What, ShouldDelete: false);
1681
1682 // Note that moving should implicitly invalidate the optimized state of a
1683 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1684 // MemoryDef.
1685 if (auto *MD = dyn_cast<MemoryDef>(Val: What))
1686 MD->resetOptimized();
1687 What->setBlock(BB);
1688}
1689
1690// Move What before Where in the IR. The end result is that What will belong to
1691// the right lists and have the right Block set, but will not otherwise be
1692// correct. It will not have the right defining access, and if it is a def,
1693// things below it will not properly be updated.
1694void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1695 AccessList::iterator Where) {
1696 prepareForMoveTo(What, BB);
1697 insertIntoListsBefore(What, BB, InsertPt: Where);
1698}
1699
1700void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1701 InsertionPlace Point) {
1702 if (isa<MemoryPhi>(Val: What)) {
1703 assert(Point == Beginning &&
1704 "Can only move a Phi at the beginning of the block");
1705 // Update lookup table entry
1706 ValueToMemoryAccess.erase(Val: What->getBlock());
1707 bool Inserted = ValueToMemoryAccess.insert(KV: {BB, What}).second;
1708 (void)Inserted;
1709 assert(Inserted && "Cannot move a Phi to a block that already has one");
1710 }
1711
1712 prepareForMoveTo(What, BB);
1713 insertIntoListsForBlock(NewAccess: What, BB, Point);
1714}
1715
1716MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1717 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1718 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1719 // Phi's always are placed at the front of the block.
1720 insertIntoListsForBlock(NewAccess: Phi, BB, Point: Beginning);
1721 ValueToMemoryAccess[BB] = Phi;
1722 return Phi;
1723}
1724
1725MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1726 MemoryAccess *Definition,
1727 const MemoryUseOrDef *Template,
1728 bool CreationMustSucceed) {
1729 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1730 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1731 if (CreationMustSucceed)
1732 assert(NewAccess != nullptr && "Tried to create a memory access for a "
1733 "non-memory touching instruction");
1734 if (NewAccess) {
1735 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1736 "A use cannot be a defining access");
1737 NewAccess->setDefiningAccess(DMA: Definition);
1738 }
1739 return NewAccess;
1740}
1741
1742// Return true if the instruction has ordering constraints.
1743// Note specifically that this only considers stores and loads
1744// because others are still considered ModRef by getModRefInfo.
1745static inline bool isOrdered(const Instruction *I) {
1746 if (auto *SI = dyn_cast<StoreInst>(Val: I)) {
1747 if (!SI->isUnordered())
1748 return true;
1749 } else if (auto *LI = dyn_cast<LoadInst>(Val: I)) {
1750 if (!LI->isUnordered())
1751 return true;
1752 }
1753 return false;
1754}
1755
1756/// Helper function to create new memory accesses
1757template <typename AliasAnalysisType>
1758MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1759 AliasAnalysisType *AAP,
1760 const MemoryUseOrDef *Template) {
1761 // The assume intrinsic has a control dependency which we model by claiming
1762 // that it writes arbitrarily. Debuginfo intrinsics may be considered
1763 // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1764 // dependencies here.
1765 // FIXME: Replace this special casing with a more accurate modelling of
1766 // assume's control dependency.
1767 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) {
1768 switch (II->getIntrinsicID()) {
1769 default:
1770 break;
1771 case Intrinsic::allow_runtime_check:
1772 case Intrinsic::allow_ubsan_check:
1773 case Intrinsic::assume:
1774 case Intrinsic::experimental_noalias_scope_decl:
1775 case Intrinsic::pseudoprobe:
1776 return nullptr;
1777 }
1778 }
1779
1780 // Using a nonstandard AA pipelines might leave us with unexpected modref
1781 // results for I, so add a check to not model instructions that may not read
1782 // from or write to memory. This is necessary for correctness.
1783 if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1784 return nullptr;
1785
1786 bool Def, Use;
1787 if (Template) {
1788 Def = isa<MemoryDef>(Val: Template);
1789 Use = isa<MemoryUse>(Val: Template);
1790#if !defined(NDEBUG)
1791 ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt);
1792 bool DefCheck, UseCheck;
1793 DefCheck = isModSet(ModRef) || isOrdered(I);
1794 UseCheck = isRefSet(ModRef);
1795 // Memory accesses should only be reduced and can not be increased since AA
1796 // just might return better results as a result of some transformations.
1797 assert((Def == DefCheck || !DefCheck) &&
1798 "Memory accesses should only be reduced");
1799 if (!Def && Use != UseCheck) {
1800 // New Access should not have more power than template access
1801 assert(!UseCheck && "Invalid template");
1802 }
1803#endif
1804 } else {
1805 // Find out what affect this instruction has on memory.
1806 ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt);
1807 // The isOrdered check is used to ensure that volatiles end up as defs
1808 // (atomics end up as ModRef right now anyway). Until we separate the
1809 // ordering chain from the memory chain, this enables people to see at least
1810 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1811 // will still give an answer that bypasses other volatile loads. TODO:
1812 // Separate memory aliasing and ordering into two different chains so that
1813 // we can precisely represent both "what memory will this read/write/is
1814 // clobbered by" and "what instructions can I move this past".
1815 Def = isModSet(MRI: ModRef) || isOrdered(I);
1816 Use = isRefSet(MRI: ModRef);
1817 }
1818
1819 // It's possible for an instruction to not modify memory at all. During
1820 // construction, we ignore them.
1821 if (!Def && !Use)
1822 return nullptr;
1823
1824 MemoryUseOrDef *MUD;
1825 if (Def) {
1826 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1827 } else {
1828 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1829 if (isUseTriviallyOptimizableToLiveOnEntry(*AAP, I)) {
1830 MemoryAccess *LiveOnEntry = getLiveOnEntryDef();
1831 MUD->setOptimized(LiveOnEntry);
1832 }
1833 }
1834 ValueToMemoryAccess[I] = MUD;
1835 return MUD;
1836}
1837
1838/// Properly remove \p MA from all of MemorySSA's lookup tables.
1839void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1840 assert(MA->use_empty() &&
1841 "Trying to remove memory access that still has uses");
1842 BlockNumbering.erase(Val: MA);
1843 if (auto *MUD = dyn_cast<MemoryUseOrDef>(Val: MA))
1844 MUD->setDefiningAccess(DMA: nullptr);
1845 // Invalidate our walker's cache if necessary
1846 if (!isa<MemoryUse>(Val: MA))
1847 getWalker()->invalidateInfo(MA);
1848
1849 Value *MemoryInst;
1850 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(Val: MA))
1851 MemoryInst = MUD->getMemoryInst();
1852 else
1853 MemoryInst = MA->getBlock();
1854
1855 auto VMA = ValueToMemoryAccess.find(Val: MemoryInst);
1856 if (VMA->second == MA)
1857 ValueToMemoryAccess.erase(I: VMA);
1858}
1859
1860/// Properly remove \p MA from all of MemorySSA's lists.
1861///
1862/// Because of the way the intrusive list and use lists work, it is important to
1863/// do removal in the right order.
1864/// ShouldDelete defaults to true, and will cause the memory access to also be
1865/// deleted, not just removed.
1866void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1867 BasicBlock *BB = MA->getBlock();
1868 // The access list owns the reference, so we erase it from the non-owning list
1869 // first.
1870 if (!isa<MemoryUse>(Val: MA)) {
1871 auto DefsIt = PerBlockDefs.find(Val: BB);
1872 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1873 Defs->remove(N&: *MA);
1874 if (Defs->empty())
1875 PerBlockDefs.erase(I: DefsIt);
1876 }
1877
1878 // The erase call here will delete it. If we don't want it deleted, we call
1879 // remove instead.
1880 auto AccessIt = PerBlockAccesses.find(Val: BB);
1881 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1882 if (ShouldDelete)
1883 Accesses->erase(IT: MA);
1884 else
1885 Accesses->remove(IT: MA);
1886
1887 if (Accesses->empty()) {
1888 PerBlockAccesses.erase(I: AccessIt);
1889 BlockNumberingValid.erase(Ptr: BB);
1890 }
1891}
1892
1893void MemorySSA::print(raw_ostream &OS) const {
1894 MemorySSAAnnotatedWriter Writer(this);
1895 Function *F = this->F;
1896 if (L)
1897 F = L->getHeader()->getParent();
1898 F->print(OS, AAW: &Writer);
1899}
1900
1901#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1902LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1903#endif
1904
1905void MemorySSA::verifyMemorySSA(VerificationLevel VL) const {
1906#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1907 VL = VerificationLevel::Full;
1908#endif
1909
1910#ifndef NDEBUG
1911 if (F) {
1912 auto Blocks = iterator_range(F->begin(), F->end());
1913 verifyOrderingDominationAndDefUses(Blocks, VL);
1914 verifyDominationNumbers(Blocks);
1915 if (VL == VerificationLevel::Full)
1916 verifyPrevDefInPhis(Blocks);
1917 } else {
1918 assert(L && "must either have loop or function");
1919 auto Blocks =
1920 map_range(L->blocks(), [](const BasicBlock *BB) -> BasicBlock & {
1921 return *const_cast<BasicBlock *>(BB);
1922 });
1923 verifyOrderingDominationAndDefUses(Blocks, VL);
1924 verifyDominationNumbers(Blocks);
1925 if (VL == VerificationLevel::Full)
1926 verifyPrevDefInPhis(Blocks);
1927 }
1928#endif
1929 // Previously, the verification used to also verify that the clobberingAccess
1930 // cached by MemorySSA is the same as the clobberingAccess found at a later
1931 // query to AA. This does not hold true in general due to the current fragility
1932 // of BasicAA which has arbitrary caps on the things it analyzes before giving
1933 // up. As a result, transformations that are correct, will lead to BasicAA
1934 // returning different Alias answers before and after that transformation.
1935 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1936 // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1937 // every transformation, which defeats the purpose of using it. For such an
1938 // example, see test4 added in D51960.
1939}
1940
1941template <typename IterT>
1942void MemorySSA::verifyPrevDefInPhis(IterT Blocks) const {
1943 for (const BasicBlock &BB : Blocks) {
1944 if (MemoryPhi *Phi = getMemoryAccess(BB: &BB)) {
1945 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1946 auto *Pred = Phi->getIncomingBlock(I);
1947 auto *IncAcc = Phi->getIncomingValue(I);
1948 // If Pred has no unreachable predecessors, get last def looking at
1949 // IDoms. If, while walkings IDoms, any of these has an unreachable
1950 // predecessor, then the incoming def can be any access.
1951 if (auto *DTNode = DT->getNode(BB: Pred)) {
1952 while (DTNode) {
1953 if (auto *DefList = getBlockDefs(BB: DTNode->getBlock())) {
1954 auto *LastAcc = &*(--DefList->end());
1955 assert(LastAcc == IncAcc &&
1956 "Incorrect incoming access into phi.");
1957 (void)IncAcc;
1958 (void)LastAcc;
1959 break;
1960 }
1961 DTNode = DTNode->getIDom();
1962 }
1963 } else {
1964 // If Pred has unreachable predecessors, but has at least a Def, the
1965 // incoming access can be the last Def in Pred, or it could have been
1966 // optimized to LoE. After an update, though, the LoE may have been
1967 // replaced by another access, so IncAcc may be any access.
1968 // If Pred has unreachable predecessors and no Defs, incoming access
1969 // should be LoE; However, after an update, it may be any access.
1970 }
1971 }
1972 }
1973 }
1974}
1975
1976/// Verify that all of the blocks we believe to have valid domination numbers
1977/// actually have valid domination numbers.
1978template <typename IterT>
1979void MemorySSA::verifyDominationNumbers(IterT Blocks) const {
1980 if (BlockNumberingValid.empty())
1981 return;
1982
1983 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1984 for (const BasicBlock &BB : Blocks) {
1985 if (!ValidBlocks.count(Ptr: &BB))
1986 continue;
1987
1988 ValidBlocks.erase(Ptr: &BB);
1989
1990 const AccessList *Accesses = getBlockAccesses(BB: &BB);
1991 // It's correct to say an empty block has valid numbering.
1992 if (!Accesses)
1993 continue;
1994
1995 // Block numbering starts at 1.
1996 unsigned long LastNumber = 0;
1997 for (const MemoryAccess &MA : *Accesses) {
1998 auto ThisNumberIter = BlockNumbering.find(Val: &MA);
1999 assert(ThisNumberIter != BlockNumbering.end() &&
2000 "MemoryAccess has no domination number in a valid block!");
2001
2002 unsigned long ThisNumber = ThisNumberIter->second;
2003 assert(ThisNumber > LastNumber &&
2004 "Domination numbers should be strictly increasing!");
2005 (void)LastNumber;
2006 LastNumber = ThisNumber;
2007 }
2008 }
2009
2010 assert(ValidBlocks.empty() &&
2011 "All valid BasicBlocks should exist in F -- dangling pointers?");
2012}
2013
2014/// Verify ordering: the order and existence of MemoryAccesses matches the
2015/// order and existence of memory affecting instructions.
2016/// Verify domination: each definition dominates all of its uses.
2017/// Verify def-uses: the immediate use information - walk all the memory
2018/// accesses and verifying that, for each use, it appears in the appropriate
2019/// def's use list
2020template <typename IterT>
2021void MemorySSA::verifyOrderingDominationAndDefUses(IterT Blocks,
2022 VerificationLevel VL) const {
2023 // Walk all the blocks, comparing what the lookups think and what the access
2024 // lists think, as well as the order in the blocks vs the order in the access
2025 // lists.
2026 SmallVector<MemoryAccess *, 32> ActualAccesses;
2027 SmallVector<MemoryAccess *, 32> ActualDefs;
2028 for (BasicBlock &B : Blocks) {
2029 const AccessList *AL = getBlockAccesses(BB: &B);
2030 const auto *DL = getBlockDefs(BB: &B);
2031 MemoryPhi *Phi = getMemoryAccess(BB: &B);
2032 if (Phi) {
2033 // Verify ordering.
2034 ActualAccesses.push_back(Elt: Phi);
2035 ActualDefs.push_back(Elt: Phi);
2036 // Verify domination
2037 for (const Use &U : Phi->uses()) {
2038 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
2039 (void)U;
2040 }
2041 // Verify def-uses for full verify.
2042 if (VL == VerificationLevel::Full) {
2043 assert(Phi->getNumOperands() == pred_size(&B) &&
2044 "Incomplete MemoryPhi Node");
2045 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2046 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2047 assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
2048 "Incoming phi block not a block predecessor");
2049 }
2050 }
2051 }
2052
2053 for (Instruction &I : B) {
2054 MemoryUseOrDef *MA = getMemoryAccess(I: &I);
2055 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
2056 "We have memory affecting instructions "
2057 "in this block but they are not in the "
2058 "access list or defs list");
2059 if (MA) {
2060 // Verify ordering.
2061 ActualAccesses.push_back(Elt: MA);
2062 if (MemoryAccess *MD = dyn_cast<MemoryDef>(Val: MA)) {
2063 // Verify ordering.
2064 ActualDefs.push_back(Elt: MA);
2065 // Verify domination.
2066 for (const Use &U : MD->uses()) {
2067 assert(dominates(MD, U) &&
2068 "Memory Def does not dominate it's uses");
2069 (void)U;
2070 }
2071 }
2072 // Verify def-uses for full verify.
2073 if (VL == VerificationLevel::Full)
2074 verifyUseInDefs(MA->getDefiningAccess(), MA);
2075 }
2076 }
2077 // Either we hit the assert, really have no accesses, or we have both
2078 // accesses and an access list. Same with defs.
2079 if (!AL && !DL)
2080 continue;
2081 // Verify ordering.
2082 assert(AL->size() == ActualAccesses.size() &&
2083 "We don't have the same number of accesses in the block as on the "
2084 "access list");
2085 assert((DL || ActualDefs.size() == 0) &&
2086 "Either we should have a defs list, or we should have no defs");
2087 assert((!DL || DL->size() == ActualDefs.size()) &&
2088 "We don't have the same number of defs in the block as on the "
2089 "def list");
2090 auto ALI = AL->begin();
2091 auto AAI = ActualAccesses.begin();
2092 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2093 assert(&*ALI == *AAI && "Not the same accesses in the same order");
2094 ++ALI;
2095 ++AAI;
2096 }
2097 ActualAccesses.clear();
2098 if (DL) {
2099 auto DLI = DL->begin();
2100 auto ADI = ActualDefs.begin();
2101 while (DLI != DL->end() && ADI != ActualDefs.end()) {
2102 assert(&*DLI == *ADI && "Not the same defs in the same order");
2103 ++DLI;
2104 ++ADI;
2105 }
2106 }
2107 ActualDefs.clear();
2108 }
2109}
2110
2111/// Verify the def-use lists in MemorySSA, by verifying that \p Use
2112/// appears in the use list of \p Def.
2113void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2114 // The live on entry use may cause us to get a NULL def here
2115 if (!Def)
2116 assert(isLiveOnEntryDef(Use) &&
2117 "Null def but use not point to live on entry def");
2118 else
2119 assert(is_contained(Def->users(), Use) &&
2120 "Did not find use in def's use list");
2121}
2122
2123/// Perform a local numbering on blocks so that instruction ordering can be
2124/// determined in constant time.
2125/// TODO: We currently just number in order. If we numbered by N, we could
2126/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2127/// log2(N) sequences of mixed before and after) without needing to invalidate
2128/// the numbering.
2129void MemorySSA::renumberBlock(const BasicBlock *B) const {
2130 // The pre-increment ensures the numbers really start at 1.
2131 unsigned long CurrentNumber = 0;
2132 const AccessList *AL = getBlockAccesses(BB: B);
2133 assert(AL != nullptr && "Asking to renumber an empty block");
2134 for (const auto &I : *AL)
2135 BlockNumbering[&I] = ++CurrentNumber;
2136 BlockNumberingValid.insert(Ptr: B);
2137}
2138
2139/// Determine, for two memory accesses in the same block,
2140/// whether \p Dominator dominates \p Dominatee.
2141/// \returns True if \p Dominator dominates \p Dominatee.
2142bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2143 const MemoryAccess *Dominatee) const {
2144 const BasicBlock *DominatorBlock = Dominator->getBlock();
2145
2146 assert((DominatorBlock == Dominatee->getBlock()) &&
2147 "Asking for local domination when accesses are in different blocks!");
2148 // A node dominates itself.
2149 if (Dominatee == Dominator)
2150 return true;
2151
2152 // When Dominatee is defined on function entry, it is not dominated by another
2153 // memory access.
2154 if (isLiveOnEntryDef(MA: Dominatee))
2155 return false;
2156
2157 // When Dominator is defined on function entry, it dominates the other memory
2158 // access.
2159 if (isLiveOnEntryDef(MA: Dominator))
2160 return true;
2161
2162 if (!BlockNumberingValid.count(Ptr: DominatorBlock))
2163 renumberBlock(B: DominatorBlock);
2164
2165 unsigned long DominatorNum = BlockNumbering.lookup(Val: Dominator);
2166 // All numbers start with 1
2167 assert(DominatorNum != 0 && "Block was not numbered properly");
2168 unsigned long DominateeNum = BlockNumbering.lookup(Val: Dominatee);
2169 assert(DominateeNum != 0 && "Block was not numbered properly");
2170 return DominatorNum < DominateeNum;
2171}
2172
2173bool MemorySSA::dominates(const MemoryAccess *Dominator,
2174 const MemoryAccess *Dominatee) const {
2175 if (Dominator == Dominatee)
2176 return true;
2177
2178 if (isLiveOnEntryDef(MA: Dominatee))
2179 return false;
2180
2181 if (Dominator->getBlock() != Dominatee->getBlock())
2182 return DT->dominates(A: Dominator->getBlock(), B: Dominatee->getBlock());
2183 return locallyDominates(Dominator, Dominatee);
2184}
2185
2186bool MemorySSA::dominates(const MemoryAccess *Dominator,
2187 const Use &Dominatee) const {
2188 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Val: Dominatee.getUser())) {
2189 BasicBlock *UseBB = MP->getIncomingBlock(U: Dominatee);
2190 // The def must dominate the incoming block of the phi.
2191 if (UseBB != Dominator->getBlock())
2192 return DT->dominates(A: Dominator->getBlock(), B: UseBB);
2193 // If the UseBB and the DefBB are the same, compare locally.
2194 return locallyDominates(Dominator, Dominatee: cast<MemoryAccess>(Val: Dominatee));
2195 }
2196 // If it's not a PHI node use, the normal dominates can already handle it.
2197 return dominates(Dominator, Dominatee: cast<MemoryAccess>(Val: Dominatee.getUser()));
2198}
2199
2200void MemorySSA::ensureOptimizedUses() {
2201 if (IsOptimized)
2202 return;
2203
2204 BatchAAResults BatchAA(*AA);
2205 ClobberWalkerBase WalkerBase(this, DT);
2206 CachingWalker WalkerLocal(this, &WalkerBase);
2207 OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses();
2208 IsOptimized = true;
2209}
2210
2211void MemoryAccess::print(raw_ostream &OS) const {
2212 switch (getValueID()) {
2213 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2214 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2215 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2216 }
2217 llvm_unreachable("invalid value id");
2218}
2219
2220void MemoryDef::print(raw_ostream &OS) const {
2221 MemoryAccess *UO = getDefiningAccess();
2222
2223 auto printID = [&OS](MemoryAccess *A) {
2224 if (A && A->getID())
2225 OS << A->getID();
2226 else
2227 OS << LiveOnEntryStr;
2228 };
2229
2230 OS << getID() << " = MemoryDef(";
2231 printID(UO);
2232 OS << ")";
2233
2234 if (isOptimized()) {
2235 OS << "->";
2236 printID(getOptimized());
2237 }
2238}
2239
2240void MemoryPhi::print(raw_ostream &OS) const {
2241 ListSeparator LS(",");
2242 OS << getID() << " = MemoryPhi(";
2243 for (const auto &Op : operands()) {
2244 BasicBlock *BB = getIncomingBlock(U: Op);
2245 MemoryAccess *MA = cast<MemoryAccess>(Val: Op);
2246
2247 OS << LS << '{';
2248 if (BB->hasName())
2249 OS << BB->getName();
2250 else
2251 BB->printAsOperand(O&: OS, PrintType: false);
2252 OS << ',';
2253 if (unsigned ID = MA->getID())
2254 OS << ID;
2255 else
2256 OS << LiveOnEntryStr;
2257 OS << '}';
2258 }
2259 OS << ')';
2260}
2261
2262void MemoryUse::print(raw_ostream &OS) const {
2263 MemoryAccess *UO = getDefiningAccess();
2264 OS << "MemoryUse(";
2265 if (UO && UO->getID())
2266 OS << UO->getID();
2267 else
2268 OS << LiveOnEntryStr;
2269 OS << ')';
2270}
2271
2272void MemoryAccess::dump() const {
2273// Cannot completely remove virtual function even in release mode.
2274#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2275 print(dbgs());
2276 dbgs() << "\n";
2277#endif
2278}
2279
2280class DOTFuncMSSAInfo {
2281private:
2282 const Function &F;
2283 MemorySSAAnnotatedWriter MSSAWriter;
2284
2285public:
2286 DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
2287 : F(F), MSSAWriter(&MSSA) {}
2288
2289 const Function *getFunction() { return &F; }
2290 MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2291};
2292
2293namespace llvm {
2294
2295template <>
2296struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
2297 static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2298 return &(CFGInfo->getFunction()->getEntryBlock());
2299 }
2300
2301 // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2302 using nodes_iterator = pointer_iterator<Function::const_iterator>;
2303
2304 static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2305 return nodes_iterator(CFGInfo->getFunction()->begin());
2306 }
2307
2308 static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
2309 return nodes_iterator(CFGInfo->getFunction()->end());
2310 }
2311
2312 static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2313 return CFGInfo->getFunction()->size();
2314 }
2315};
2316
2317template <>
2318struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
2319
2320 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2321
2322 static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2323 return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2324 "' function";
2325 }
2326
2327 std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2328 return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2329 Node, nullptr,
2330 HandleBasicBlock: [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2331 BB.print(OS, AAW: &CFGInfo->getWriter(), ShouldPreserveUseListOrder: true, IsForDebug: true);
2332 },
2333 HandleComment: [](std::string &S, unsigned &I, unsigned Idx) -> void {
2334 std::string Str = S.substr(pos: I, n: Idx - I);
2335 StringRef SR = Str;
2336 if (SR.count(Str: " = MemoryDef(") || SR.count(Str: " = MemoryPhi(") ||
2337 SR.count(Str: "MemoryUse("))
2338 return;
2339 DOTGraphTraits<DOTFuncInfo *>::eraseComment(OutStr&: S, I, Idx);
2340 });
2341 }
2342
2343 static std::string getEdgeSourceLabel(const BasicBlock *Node,
2344 const_succ_iterator I) {
2345 return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
2346 }
2347
2348 /// Display the raw branch weights from PGO.
2349 std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
2350 DOTFuncMSSAInfo *CFGInfo) {
2351 return "";
2352 }
2353
2354 std::string getNodeAttributes(const BasicBlock *Node,
2355 DOTFuncMSSAInfo *CFGInfo) {
2356 return getNodeLabel(Node, CFGInfo).find(c: ';') != std::string::npos
2357 ? "style=filled, fillcolor=lightpink"
2358 : "";
2359 }
2360};
2361
2362} // namespace llvm
2363
2364AnalysisKey MemorySSAAnalysis::Key;
2365
2366MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2367 FunctionAnalysisManager &AM) {
2368 auto &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
2369 auto &AA = AM.getResult<AAManager>(IR&: F);
2370 return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(args&: F, args: &AA, args: &DT));
2371}
2372
2373bool MemorySSAAnalysis::Result::invalidate(
2374 Function &F, const PreservedAnalyses &PA,
2375 FunctionAnalysisManager::Invalidator &Inv) {
2376 auto PAC = PA.getChecker<MemorySSAAnalysis>();
2377 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2378 Inv.invalidate<AAManager>(IR&: F, PA) ||
2379 Inv.invalidate<DominatorTreeAnalysis>(IR&: F, PA);
2380}
2381
2382PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2383 FunctionAnalysisManager &AM) {
2384 auto &MSSA = AM.getResult<MemorySSAAnalysis>(IR&: F).getMSSA();
2385 if (EnsureOptimizedUses)
2386 MSSA.ensureOptimizedUses();
2387 if (DotCFGMSSA != "") {
2388 DOTFuncMSSAInfo CFGInfo(F, MSSA);
2389 WriteGraph(G: &CFGInfo, Name: "", ShortNames: false, Title: "MSSA", Filename: DotCFGMSSA);
2390 } else {
2391 OS << "MemorySSA for function: " << F.getName() << "\n";
2392 MSSA.print(OS);
2393 }
2394
2395 return PreservedAnalyses::all();
2396}
2397
2398PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F,
2399 FunctionAnalysisManager &AM) {
2400 auto &MSSA = AM.getResult<MemorySSAAnalysis>(IR&: F).getMSSA();
2401 OS << "MemorySSA (walker) for function: " << F.getName() << "\n";
2402 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2403 F.print(OS, AAW: &Writer);
2404
2405 return PreservedAnalyses::all();
2406}
2407
2408PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2409 FunctionAnalysisManager &AM) {
2410 AM.getResult<MemorySSAAnalysis>(IR&: F).getMSSA().verifyMemorySSA();
2411
2412 return PreservedAnalyses::all();
2413}
2414
2415char MemorySSAWrapperPass::ID = 0;
2416
2417MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2418 initializeMemorySSAWrapperPassPass(Registry&: *PassRegistry::getPassRegistry());
2419}
2420
2421void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2422
2423void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2424 AU.setPreservesAll();
2425 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2426 AU.addRequiredTransitive<AAResultsWrapperPass>();
2427}
2428
2429bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2430 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2431 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2432 MSSA.reset(p: new MemorySSA(F, &AA, &DT));
2433 return false;
2434}
2435
2436void MemorySSAWrapperPass::verifyAnalysis() const {
2437 if (VerifyMemorySSA)
2438 MSSA->verifyMemorySSA();
2439}
2440
2441void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2442 MSSA->print(OS);
2443}
2444
2445MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2446
2447/// Walk the use-def chains starting at \p StartingAccess and find
2448/// the MemoryAccess that actually clobbers Loc.
2449///
2450/// \returns our clobbering memory access
2451MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2452 MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2453 BatchAAResults &BAA, unsigned &UpwardWalkLimit) {
2454 assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2455
2456 // If location is undefined, conservatively return starting access.
2457 if (Loc.Ptr == nullptr)
2458 return StartingAccess;
2459
2460 Instruction *I = nullptr;
2461 if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(Val: StartingAccess)) {
2462 if (MSSA->isLiveOnEntryDef(MA: StartingUseOrDef))
2463 return StartingUseOrDef;
2464
2465 I = StartingUseOrDef->getMemoryInst();
2466
2467 // Conservatively, fences are always clobbers, so don't perform the walk if
2468 // we hit a fence.
2469 if (!isa<CallBase>(Val: I) && I->isFenceLike())
2470 return StartingUseOrDef;
2471 }
2472
2473 UpwardsMemoryQuery Q;
2474 Q.OriginalAccess = StartingAccess;
2475 Q.StartingLoc = Loc;
2476 Q.Inst = nullptr;
2477 Q.IsCall = false;
2478
2479 // Unlike the other function, do not walk to the def of a def, because we are
2480 // handed something we already believe is the clobbering access.
2481 // We never set SkipSelf to true in Q in this method.
2482 MemoryAccess *Clobber =
2483 Walker.findClobber(BAA, Start: StartingAccess, Q, UpWalkLimit&: UpwardWalkLimit);
2484 LLVM_DEBUG({
2485 dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2486 if (I)
2487 dbgs() << " for instruction " << *I << "\n";
2488 dbgs() << " is " << *Clobber << "\n";
2489 });
2490 return Clobber;
2491}
2492
2493static const Instruction *
2494getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) {
2495 if (!I.hasMetadata(KindID: LLVMContext::MD_invariant_group) || I.isVolatile())
2496 return nullptr;
2497
2498 // We consider bitcasts and zero GEPs to be the same pointer value. Start by
2499 // stripping bitcasts and zero GEPs, then we will recursively look at loads
2500 // and stores through bitcasts and zero GEPs.
2501 Value *PointerOperand = getLoadStorePointerOperand(V: &I)->stripPointerCasts();
2502
2503 // It's not safe to walk the use list of a global value because function
2504 // passes aren't allowed to look outside their functions.
2505 // FIXME: this could be fixed by filtering instructions from outside of
2506 // current function.
2507 if (isa<Constant>(Val: PointerOperand))
2508 return nullptr;
2509
2510 // Queue to process all pointers that are equivalent to load operand.
2511 SmallVector<const Value *, 8> PointerUsesQueue;
2512 PointerUsesQueue.push_back(Elt: PointerOperand);
2513
2514 const Instruction *MostDominatingInstruction = &I;
2515
2516 // FIXME: This loop is O(n^2) because dominates can be O(n) and in worst case
2517 // we will see all the instructions. It may not matter in practice. If it
2518 // does, we will have to support MemorySSA construction and updates.
2519 while (!PointerUsesQueue.empty()) {
2520 const Value *Ptr = PointerUsesQueue.pop_back_val();
2521 assert(Ptr && !isa<GlobalValue>(Ptr) &&
2522 "Null or GlobalValue should not be inserted");
2523
2524 for (const User *Us : Ptr->users()) {
2525 auto *U = dyn_cast<Instruction>(Val: Us);
2526 if (!U || U == &I || !DT.dominates(Def: U, User: MostDominatingInstruction))
2527 continue;
2528
2529 // Add bitcasts and zero GEPs to queue.
2530 if (isa<BitCastInst>(Val: U)) {
2531 PointerUsesQueue.push_back(Elt: U);
2532 continue;
2533 }
2534 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: U)) {
2535 if (GEP->hasAllZeroIndices())
2536 PointerUsesQueue.push_back(Elt: U);
2537 continue;
2538 }
2539
2540 // If we hit a load/store with an invariant.group metadata and the same
2541 // pointer operand, we can assume that value pointed to by the pointer
2542 // operand didn't change.
2543 if (U->hasMetadata(KindID: LLVMContext::MD_invariant_group) &&
2544 getLoadStorePointerOperand(V: U) == Ptr && !U->isVolatile()) {
2545 MostDominatingInstruction = U;
2546 }
2547 }
2548 }
2549 return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;
2550}
2551
2552MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2553 MemoryAccess *MA, BatchAAResults &BAA, unsigned &UpwardWalkLimit,
2554 bool SkipSelf, bool UseInvariantGroup) {
2555 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(Val: MA);
2556 // If this is a MemoryPhi, we can't do anything.
2557 if (!StartingAccess)
2558 return MA;
2559
2560 if (UseInvariantGroup) {
2561 if (auto *I = getInvariantGroupClobberingInstruction(
2562 I&: *StartingAccess->getMemoryInst(), DT&: MSSA->getDomTree())) {
2563 assert(isa<LoadInst>(I) || isa<StoreInst>(I));
2564
2565 auto *ClobberMA = MSSA->getMemoryAccess(I);
2566 assert(ClobberMA);
2567 if (isa<MemoryUse>(Val: ClobberMA))
2568 return ClobberMA->getDefiningAccess();
2569 return ClobberMA;
2570 }
2571 }
2572
2573 bool IsOptimized = false;
2574
2575 // If this is an already optimized use or def, return the optimized result.
2576 // Note: Currently, we store the optimized def result in a separate field,
2577 // since we can't use the defining access.
2578 if (StartingAccess->isOptimized()) {
2579 if (!SkipSelf || !isa<MemoryDef>(Val: StartingAccess))
2580 return StartingAccess->getOptimized();
2581 IsOptimized = true;
2582 }
2583
2584 const Instruction *I = StartingAccess->getMemoryInst();
2585 // We can't sanely do anything with a fence, since they conservatively clobber
2586 // all memory, and have no locations to get pointers from to try to
2587 // disambiguate.
2588 if (!isa<CallBase>(Val: I) && I->isFenceLike())
2589 return StartingAccess;
2590
2591 UpwardsMemoryQuery Q(I, StartingAccess);
2592
2593 if (isUseTriviallyOptimizableToLiveOnEntry(AA&: BAA, I)) {
2594 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2595 StartingAccess->setOptimized(LiveOnEntry);
2596 return LiveOnEntry;
2597 }
2598
2599 MemoryAccess *OptimizedAccess;
2600 if (!IsOptimized) {
2601 // Start with the thing we already think clobbers this location
2602 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2603
2604 // At this point, DefiningAccess may be the live on entry def.
2605 // If it is, we will not get a better result.
2606 if (MSSA->isLiveOnEntryDef(MA: DefiningAccess)) {
2607 StartingAccess->setOptimized(DefiningAccess);
2608 return DefiningAccess;
2609 }
2610
2611 OptimizedAccess =
2612 Walker.findClobber(BAA, Start: DefiningAccess, Q, UpWalkLimit&: UpwardWalkLimit);
2613 StartingAccess->setOptimized(OptimizedAccess);
2614 } else
2615 OptimizedAccess = StartingAccess->getOptimized();
2616
2617 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2618 LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2619 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2620 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2621
2622 MemoryAccess *Result;
2623 if (SkipSelf && isa<MemoryPhi>(Val: OptimizedAccess) &&
2624 isa<MemoryDef>(Val: StartingAccess) && UpwardWalkLimit) {
2625 assert(isa<MemoryDef>(Q.OriginalAccess));
2626 Q.SkipSelfAccess = true;
2627 Result = Walker.findClobber(BAA, Start: OptimizedAccess, Q, UpWalkLimit&: UpwardWalkLimit);
2628 } else
2629 Result = OptimizedAccess;
2630
2631 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2632 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2633
2634 return Result;
2635}
2636
2637MemoryAccess *
2638DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA,
2639 BatchAAResults &) {
2640 if (auto *Use = dyn_cast<MemoryUseOrDef>(Val: MA))
2641 return Use->getDefiningAccess();
2642 return MA;
2643}
2644
2645MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2646 MemoryAccess *StartingAccess, const MemoryLocation &, BatchAAResults &) {
2647 if (auto *Use = dyn_cast<MemoryUseOrDef>(Val: StartingAccess))
2648 return Use->getDefiningAccess();
2649 return StartingAccess;
2650}
2651
2652void MemoryPhi::deleteMe(DerivedUser *Self) {
2653 delete static_cast<MemoryPhi *>(Self);
2654}
2655
2656void MemoryDef::deleteMe(DerivedUser *Self) {
2657 delete static_cast<MemoryDef *>(Self);
2658}
2659
2660void MemoryUse::deleteMe(DerivedUser *Self) {
2661 delete static_cast<MemoryUse *>(Self);
2662}
2663
2664bool upward_defs_iterator::IsGuaranteedLoopInvariant(const Value *Ptr) const {
2665 auto IsGuaranteedLoopInvariantBase = [](const Value *Ptr) {
2666 Ptr = Ptr->stripPointerCasts();
2667 if (!isa<Instruction>(Val: Ptr))
2668 return true;
2669 return isa<AllocaInst>(Val: Ptr);
2670 };
2671
2672 Ptr = Ptr->stripPointerCasts();
2673 if (auto *I = dyn_cast<Instruction>(Val: Ptr)) {
2674 if (I->getParent()->isEntryBlock())
2675 return true;
2676 }
2677 if (auto *GEP = dyn_cast<GEPOperator>(Val: Ptr)) {
2678 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2679 GEP->hasAllConstantIndices();
2680 }
2681 return IsGuaranteedLoopInvariantBase(Ptr);
2682}
2683