1 | //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements an analysis that determines, for a given memory |
10 | // operation, what preceding memory operations it depends on. It builds on |
11 | // alias analysis information, and tries to provide a lazy, caching interface to |
12 | // a common kind of alias information query. |
13 | // |
14 | //===----------------------------------------------------------------------===// |
15 | |
16 | #include "llvm/Analysis/MemoryDependenceAnalysis.h" |
17 | #include "llvm/ADT/DenseMap.h" |
18 | #include "llvm/ADT/STLExtras.h" |
19 | #include "llvm/ADT/SmallPtrSet.h" |
20 | #include "llvm/ADT/SmallVector.h" |
21 | #include "llvm/ADT/Statistic.h" |
22 | #include "llvm/Analysis/AliasAnalysis.h" |
23 | #include "llvm/Analysis/AssumptionCache.h" |
24 | #include "llvm/Analysis/MemoryBuiltins.h" |
25 | #include "llvm/Analysis/MemoryLocation.h" |
26 | #include "llvm/Analysis/PHITransAddr.h" |
27 | #include "llvm/Analysis/TargetLibraryInfo.h" |
28 | #include "llvm/Analysis/ValueTracking.h" |
29 | #include "llvm/IR/BasicBlock.h" |
30 | #include "llvm/IR/Dominators.h" |
31 | #include "llvm/IR/Function.h" |
32 | #include "llvm/IR/InstrTypes.h" |
33 | #include "llvm/IR/Instruction.h" |
34 | #include "llvm/IR/Instructions.h" |
35 | #include "llvm/IR/IntrinsicInst.h" |
36 | #include "llvm/IR/LLVMContext.h" |
37 | #include "llvm/IR/Metadata.h" |
38 | #include "llvm/IR/Module.h" |
39 | #include "llvm/IR/PredIteratorCache.h" |
40 | #include "llvm/IR/Type.h" |
41 | #include "llvm/IR/Use.h" |
42 | #include "llvm/IR/Value.h" |
43 | #include "llvm/InitializePasses.h" |
44 | #include "llvm/Pass.h" |
45 | #include "llvm/Support/AtomicOrdering.h" |
46 | #include "llvm/Support/Casting.h" |
47 | #include "llvm/Support/CommandLine.h" |
48 | #include "llvm/Support/Compiler.h" |
49 | #include "llvm/Support/Debug.h" |
50 | #include <algorithm> |
51 | #include <cassert> |
52 | #include <iterator> |
53 | #include <utility> |
54 | |
55 | using namespace llvm; |
56 | |
57 | #define DEBUG_TYPE "memdep" |
58 | |
59 | STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses" ); |
60 | STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses" ); |
61 | STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses" ); |
62 | |
63 | STATISTIC(NumCacheNonLocalPtr, |
64 | "Number of fully cached non-local ptr responses" ); |
65 | STATISTIC(NumCacheDirtyNonLocalPtr, |
66 | "Number of cached, but dirty, non-local ptr responses" ); |
67 | STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses" ); |
68 | STATISTIC(NumCacheCompleteNonLocalPtr, |
69 | "Number of block queries that were completely cached" ); |
70 | |
71 | // Limit for the number of instructions to scan in a block. |
72 | |
73 | static cl::opt<unsigned> BlockScanLimit( |
74 | "memdep-block-scan-limit" , cl::Hidden, cl::init(Val: 100), |
75 | cl::desc("The number of instructions to scan in a block in memory " |
76 | "dependency analysis (default = 100)" )); |
77 | |
78 | static cl::opt<unsigned> |
79 | BlockNumberLimit("memdep-block-number-limit" , cl::Hidden, cl::init(Val: 200), |
80 | cl::desc("The number of blocks to scan during memory " |
81 | "dependency analysis (default = 200)" )); |
82 | |
83 | // Limit on the number of memdep results to process. |
84 | static const unsigned int NumResultsLimit = 100; |
85 | |
86 | /// This is a helper function that removes Val from 'Inst's set in ReverseMap. |
87 | /// |
88 | /// If the set becomes empty, remove Inst's entry. |
89 | template <typename KeyTy> |
90 | static void |
91 | RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, |
92 | Instruction *Inst, KeyTy Val) { |
93 | typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = |
94 | ReverseMap.find(Inst); |
95 | assert(InstIt != ReverseMap.end() && "Reverse map out of sync?" ); |
96 | bool Found = InstIt->second.erase(Val); |
97 | assert(Found && "Invalid reverse map!" ); |
98 | (void)Found; |
99 | if (InstIt->second.empty()) |
100 | ReverseMap.erase(InstIt); |
101 | } |
102 | |
103 | /// If the given instruction references a specific memory location, fill in Loc |
104 | /// with the details, otherwise set Loc.Ptr to null. |
105 | /// |
106 | /// Returns a ModRefInfo value describing the general behavior of the |
107 | /// instruction. |
108 | static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, |
109 | const TargetLibraryInfo &TLI) { |
110 | if (const LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) { |
111 | if (LI->isUnordered()) { |
112 | Loc = MemoryLocation::get(LI); |
113 | return ModRefInfo::Ref; |
114 | } |
115 | if (LI->getOrdering() == AtomicOrdering::Monotonic) { |
116 | Loc = MemoryLocation::get(LI); |
117 | return ModRefInfo::ModRef; |
118 | } |
119 | Loc = MemoryLocation(); |
120 | return ModRefInfo::ModRef; |
121 | } |
122 | |
123 | if (const StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
124 | if (SI->isUnordered()) { |
125 | Loc = MemoryLocation::get(SI); |
126 | return ModRefInfo::Mod; |
127 | } |
128 | if (SI->getOrdering() == AtomicOrdering::Monotonic) { |
129 | Loc = MemoryLocation::get(SI); |
130 | return ModRefInfo::ModRef; |
131 | } |
132 | Loc = MemoryLocation(); |
133 | return ModRefInfo::ModRef; |
134 | } |
135 | |
136 | if (const VAArgInst *V = dyn_cast<VAArgInst>(Val: Inst)) { |
137 | Loc = MemoryLocation::get(VI: V); |
138 | return ModRefInfo::ModRef; |
139 | } |
140 | |
141 | if (const CallBase *CB = dyn_cast<CallBase>(Val: Inst)) { |
142 | if (Value *FreedOp = getFreedOperand(CB, TLI: &TLI)) { |
143 | // calls to free() deallocate the entire structure |
144 | Loc = MemoryLocation::getAfter(Ptr: FreedOp); |
145 | return ModRefInfo::Mod; |
146 | } |
147 | } |
148 | |
149 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: Inst)) { |
150 | switch (II->getIntrinsicID()) { |
151 | case Intrinsic::lifetime_start: |
152 | case Intrinsic::lifetime_end: |
153 | case Intrinsic::invariant_start: |
154 | Loc = MemoryLocation::getForArgument(Call: II, ArgIdx: 1, TLI); |
155 | // These intrinsics don't really modify the memory, but returning Mod |
156 | // will allow them to be handled conservatively. |
157 | return ModRefInfo::Mod; |
158 | case Intrinsic::invariant_end: |
159 | Loc = MemoryLocation::getForArgument(Call: II, ArgIdx: 2, TLI); |
160 | // These intrinsics don't really modify the memory, but returning Mod |
161 | // will allow them to be handled conservatively. |
162 | return ModRefInfo::Mod; |
163 | case Intrinsic::masked_load: |
164 | Loc = MemoryLocation::getForArgument(Call: II, ArgIdx: 0, TLI); |
165 | return ModRefInfo::Ref; |
166 | case Intrinsic::masked_store: |
167 | Loc = MemoryLocation::getForArgument(Call: II, ArgIdx: 1, TLI); |
168 | return ModRefInfo::Mod; |
169 | default: |
170 | break; |
171 | } |
172 | } |
173 | |
174 | // Otherwise, just do the coarse-grained thing that always works. |
175 | if (Inst->mayWriteToMemory()) |
176 | return ModRefInfo::ModRef; |
177 | if (Inst->mayReadFromMemory()) |
178 | return ModRefInfo::Ref; |
179 | return ModRefInfo::NoModRef; |
180 | } |
181 | |
182 | /// Private helper for finding the local dependencies of a call site. |
183 | MemDepResult MemoryDependenceResults::getCallDependencyFrom( |
184 | CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt, |
185 | BasicBlock *BB) { |
186 | unsigned Limit = getDefaultBlockScanLimit(); |
187 | |
188 | // Walk backwards through the block, looking for dependencies. |
189 | while (ScanIt != BB->begin()) { |
190 | Instruction *Inst = &*--ScanIt; |
191 | |
192 | // Limit the amount of scanning we do so we don't end up with quadratic |
193 | // running time on extreme testcases. |
194 | --Limit; |
195 | if (!Limit) |
196 | return MemDepResult::getUnknown(); |
197 | |
198 | // If this inst is a memory op, get the pointer it accessed |
199 | MemoryLocation Loc; |
200 | ModRefInfo MR = GetLocation(Inst, Loc, TLI); |
201 | if (Loc.Ptr) { |
202 | // A simple instruction. |
203 | if (isModOrRefSet(MRI: AA.getModRefInfo(I: Call, OptLoc: Loc))) |
204 | return MemDepResult::getClobber(Inst); |
205 | continue; |
206 | } |
207 | |
208 | if (auto *CallB = dyn_cast<CallBase>(Val: Inst)) { |
209 | // If these two calls do not interfere, look past it. |
210 | if (isNoModRef(MRI: AA.getModRefInfo(I: Call, Call: CallB))) { |
211 | // If the two calls are the same, return Inst as a Def, so that |
212 | // Call can be found redundant and eliminated. |
213 | if (isReadOnlyCall && !isModSet(MRI: MR) && |
214 | Call->isIdenticalToWhenDefined(I: CallB)) |
215 | return MemDepResult::getDef(Inst); |
216 | |
217 | // Otherwise if the two calls don't interact (e.g. CallB is readnone) |
218 | // keep scanning. |
219 | continue; |
220 | } else |
221 | return MemDepResult::getClobber(Inst); |
222 | } |
223 | |
224 | // If we could not obtain a pointer for the instruction and the instruction |
225 | // touches memory then assume that this is a dependency. |
226 | if (isModOrRefSet(MRI: MR)) |
227 | return MemDepResult::getClobber(Inst); |
228 | } |
229 | |
230 | // No dependence found. If this is the entry block of the function, it is |
231 | // unknown, otherwise it is non-local. |
232 | if (BB != &BB->getParent()->getEntryBlock()) |
233 | return MemDepResult::getNonLocal(); |
234 | return MemDepResult::getNonFuncLocal(); |
235 | } |
236 | |
237 | MemDepResult MemoryDependenceResults::getPointerDependencyFrom( |
238 | const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, |
239 | BasicBlock *BB, Instruction *QueryInst, unsigned *Limit, |
240 | BatchAAResults &BatchAA) { |
241 | MemDepResult InvariantGroupDependency = MemDepResult::getUnknown(); |
242 | if (QueryInst != nullptr) { |
243 | if (auto *LI = dyn_cast<LoadInst>(Val: QueryInst)) { |
244 | InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB); |
245 | |
246 | if (InvariantGroupDependency.isDef()) |
247 | return InvariantGroupDependency; |
248 | } |
249 | } |
250 | MemDepResult SimpleDep = getSimplePointerDependencyFrom( |
251 | MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, BatchAA); |
252 | if (SimpleDep.isDef()) |
253 | return SimpleDep; |
254 | // Non-local invariant group dependency indicates there is non local Def |
255 | // (it only returns nonLocal if it finds nonLocal def), which is better than |
256 | // local clobber and everything else. |
257 | if (InvariantGroupDependency.isNonLocal()) |
258 | return InvariantGroupDependency; |
259 | |
260 | assert(InvariantGroupDependency.isUnknown() && |
261 | "InvariantGroupDependency should be only unknown at this point" ); |
262 | return SimpleDep; |
263 | } |
264 | |
265 | MemDepResult MemoryDependenceResults::getPointerDependencyFrom( |
266 | const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, |
267 | BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { |
268 | BatchAAResults BatchAA(AA, &EEA); |
269 | return getPointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, |
270 | BatchAA); |
271 | } |
272 | |
273 | MemDepResult |
274 | MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI, |
275 | BasicBlock *BB) { |
276 | |
277 | if (!LI->hasMetadata(KindID: LLVMContext::MD_invariant_group)) |
278 | return MemDepResult::getUnknown(); |
279 | |
280 | // Take the ptr operand after all casts and geps 0. This way we can search |
281 | // cast graph down only. |
282 | Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts(); |
283 | |
284 | // It's is not safe to walk the use list of global value, because function |
285 | // passes aren't allowed to look outside their functions. |
286 | // FIXME: this could be fixed by filtering instructions from outside |
287 | // of current function. |
288 | if (isa<GlobalValue>(Val: LoadOperand)) |
289 | return MemDepResult::getUnknown(); |
290 | |
291 | Instruction *ClosestDependency = nullptr; |
292 | // Order of instructions in uses list is unpredictible. In order to always |
293 | // get the same result, we will look for the closest dominance. |
294 | auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) { |
295 | assert(Other && "Must call it with not null instruction" ); |
296 | if (Best == nullptr || DT.dominates(Def: Best, User: Other)) |
297 | return Other; |
298 | return Best; |
299 | }; |
300 | |
301 | for (const Use &Us : LoadOperand->uses()) { |
302 | auto *U = dyn_cast<Instruction>(Val: Us.getUser()); |
303 | if (!U || U == LI || !DT.dominates(Def: U, User: LI)) |
304 | continue; |
305 | |
306 | // If we hit load/store with the same invariant.group metadata (and the |
307 | // same pointer operand) we can assume that value pointed by pointer |
308 | // operand didn't change. |
309 | if ((isa<LoadInst>(Val: U) || |
310 | (isa<StoreInst>(Val: U) && |
311 | cast<StoreInst>(Val: U)->getPointerOperand() == LoadOperand)) && |
312 | U->hasMetadata(KindID: LLVMContext::MD_invariant_group)) |
313 | ClosestDependency = GetClosestDependency(ClosestDependency, U); |
314 | } |
315 | |
316 | if (!ClosestDependency) |
317 | return MemDepResult::getUnknown(); |
318 | if (ClosestDependency->getParent() == BB) |
319 | return MemDepResult::getDef(Inst: ClosestDependency); |
320 | // Def(U) can't be returned here because it is non-local. If local |
321 | // dependency won't be found then return nonLocal counting that the |
322 | // user will call getNonLocalPointerDependency, which will return cached |
323 | // result. |
324 | NonLocalDefsCache.try_emplace( |
325 | Key: LI, Args: NonLocalDepResult(ClosestDependency->getParent(), |
326 | MemDepResult::getDef(Inst: ClosestDependency), nullptr)); |
327 | ReverseNonLocalDefsCache[ClosestDependency].insert(Ptr: LI); |
328 | return MemDepResult::getNonLocal(); |
329 | } |
330 | |
331 | // Check if SI that may alias with MemLoc can be safely skipped. This is |
332 | // possible in case if SI can only must alias or no alias with MemLoc (no |
333 | // partial overlapping possible) and it writes the same value that MemLoc |
334 | // contains now (it was loaded before this store and was not modified in |
335 | // between). |
336 | static bool canSkipClobberingStore(const StoreInst *SI, |
337 | const MemoryLocation &MemLoc, |
338 | Align MemLocAlign, BatchAAResults &BatchAA, |
339 | unsigned ScanLimit) { |
340 | if (!MemLoc.Size.hasValue()) |
341 | return false; |
342 | if (MemoryLocation::get(SI).Size != MemLoc.Size) |
343 | return false; |
344 | if (MemLoc.Size.isScalable()) |
345 | return false; |
346 | if (std::min(a: MemLocAlign, b: SI->getAlign()).value() < |
347 | MemLoc.Size.getValue().getKnownMinValue()) |
348 | return false; |
349 | |
350 | auto *LI = dyn_cast<LoadInst>(Val: SI->getValueOperand()); |
351 | if (!LI || LI->getParent() != SI->getParent()) |
352 | return false; |
353 | if (BatchAA.alias(LocA: MemoryLocation::get(LI), LocB: MemLoc) != AliasResult::MustAlias) |
354 | return false; |
355 | unsigned NumVisitedInsts = 0; |
356 | for (const Instruction *I = LI; I != SI; I = I->getNextNonDebugInstruction()) |
357 | if (++NumVisitedInsts > ScanLimit || |
358 | isModSet(MRI: BatchAA.getModRefInfo(I, OptLoc: MemLoc))) |
359 | return false; |
360 | |
361 | return true; |
362 | } |
363 | |
364 | MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( |
365 | const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, |
366 | BasicBlock *BB, Instruction *QueryInst, unsigned *Limit, |
367 | BatchAAResults &BatchAA) { |
368 | bool isInvariantLoad = false; |
369 | Align MemLocAlign = |
370 | MemLoc.Ptr->getPointerAlignment(DL: BB->getDataLayout()); |
371 | |
372 | unsigned DefaultLimit = getDefaultBlockScanLimit(); |
373 | if (!Limit) |
374 | Limit = &DefaultLimit; |
375 | |
376 | // We must be careful with atomic accesses, as they may allow another thread |
377 | // to touch this location, clobbering it. We are conservative: if the |
378 | // QueryInst is not a simple (non-atomic) memory access, we automatically |
379 | // return getClobber. |
380 | // If it is simple, we know based on the results of |
381 | // "Compiler testing via a theory of sound optimisations in the C11/C++11 |
382 | // memory model" in PLDI 2013, that a non-atomic location can only be |
383 | // clobbered between a pair of a release and an acquire action, with no |
384 | // access to the location in between. |
385 | // Here is an example for giving the general intuition behind this rule. |
386 | // In the following code: |
387 | // store x 0; |
388 | // release action; [1] |
389 | // acquire action; [4] |
390 | // %val = load x; |
391 | // It is unsafe to replace %val by 0 because another thread may be running: |
392 | // acquire action; [2] |
393 | // store x 42; |
394 | // release action; [3] |
395 | // with synchronization from 1 to 2 and from 3 to 4, resulting in %val |
396 | // being 42. A key property of this program however is that if either |
397 | // 1 or 4 were missing, there would be a race between the store of 42 |
398 | // either the store of 0 or the load (making the whole program racy). |
399 | // The paper mentioned above shows that the same property is respected |
400 | // by every program that can detect any optimization of that kind: either |
401 | // it is racy (undefined) or there is a release followed by an acquire |
402 | // between the pair of accesses under consideration. |
403 | |
404 | // If the load is invariant, we "know" that it doesn't alias *any* write. We |
405 | // do want to respect mustalias results since defs are useful for value |
406 | // forwarding, but any mayalias write can be assumed to be noalias. |
407 | // Arguably, this logic should be pushed inside AliasAnalysis itself. |
408 | if (isLoad && QueryInst) |
409 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: QueryInst)) { |
410 | if (LI->hasMetadata(KindID: LLVMContext::MD_invariant_load)) |
411 | isInvariantLoad = true; |
412 | MemLocAlign = LI->getAlign(); |
413 | } |
414 | |
415 | // True for volatile instruction. |
416 | // For Load/Store return true if atomic ordering is stronger than AO, |
417 | // for other instruction just true if it can read or write to memory. |
418 | auto isComplexForReordering = [](Instruction * I, AtomicOrdering AO)->bool { |
419 | if (I->isVolatile()) |
420 | return true; |
421 | if (auto *LI = dyn_cast<LoadInst>(Val: I)) |
422 | return isStrongerThan(AO: LI->getOrdering(), Other: AO); |
423 | if (auto *SI = dyn_cast<StoreInst>(Val: I)) |
424 | return isStrongerThan(AO: SI->getOrdering(), Other: AO); |
425 | return I->mayReadOrWriteMemory(); |
426 | }; |
427 | |
428 | // Walk backwards through the basic block, looking for dependencies. |
429 | while (ScanIt != BB->begin()) { |
430 | Instruction *Inst = &*--ScanIt; |
431 | |
432 | // Limit the amount of scanning we do so we don't end up with quadratic |
433 | // running time on extreme testcases. |
434 | --*Limit; |
435 | if (!*Limit) |
436 | return MemDepResult::getUnknown(); |
437 | |
438 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: Inst)) { |
439 | // If we reach a lifetime begin or end marker, then the query ends here |
440 | // because the value is undefined. |
441 | Intrinsic::ID ID = II->getIntrinsicID(); |
442 | switch (ID) { |
443 | case Intrinsic::lifetime_start: { |
444 | // FIXME: This only considers queries directly on the invariant-tagged |
445 | // pointer, not on query pointers that are indexed off of them. It'd |
446 | // be nice to handle that at some point (the right approach is to use |
447 | // GetPointerBaseWithConstantOffset). |
448 | MemoryLocation ArgLoc = MemoryLocation::getAfter(Ptr: II->getArgOperand(i: 1)); |
449 | if (BatchAA.isMustAlias(LocA: ArgLoc, LocB: MemLoc)) |
450 | return MemDepResult::getDef(Inst: II); |
451 | continue; |
452 | } |
453 | case Intrinsic::masked_load: |
454 | case Intrinsic::masked_store: { |
455 | MemoryLocation Loc; |
456 | /*ModRefInfo MR =*/ GetLocation(Inst: II, Loc, TLI); |
457 | AliasResult R = BatchAA.alias(LocA: Loc, LocB: MemLoc); |
458 | if (R == AliasResult::NoAlias) |
459 | continue; |
460 | if (R == AliasResult::MustAlias) |
461 | return MemDepResult::getDef(Inst: II); |
462 | if (ID == Intrinsic::masked_load) |
463 | continue; |
464 | return MemDepResult::getClobber(Inst: II); |
465 | } |
466 | } |
467 | } |
468 | |
469 | // Values depend on loads if the pointers are must aliased. This means |
470 | // that a load depends on another must aliased load from the same value. |
471 | // One exception is atomic loads: a value can depend on an atomic load that |
472 | // it does not alias with when this atomic load indicates that another |
473 | // thread may be accessing the location. |
474 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) { |
475 | // While volatile access cannot be eliminated, they do not have to clobber |
476 | // non-aliasing locations, as normal accesses, for example, can be safely |
477 | // reordered with volatile accesses. |
478 | if (LI->isVolatile()) { |
479 | if (!QueryInst) |
480 | // Original QueryInst *may* be volatile |
481 | return MemDepResult::getClobber(Inst: LI); |
482 | if (QueryInst->isVolatile()) |
483 | // Ordering required if QueryInst is itself volatile |
484 | return MemDepResult::getClobber(Inst: LI); |
485 | // Otherwise, volatile doesn't imply any special ordering |
486 | } |
487 | |
488 | // Atomic loads have complications involved. |
489 | // A Monotonic (or higher) load is OK if the query inst is itself not |
490 | // atomic. |
491 | // FIXME: This is overly conservative. |
492 | if (LI->isAtomic() && isStrongerThanUnordered(AO: LI->getOrdering())) { |
493 | if (!QueryInst || |
494 | isComplexForReordering(QueryInst, AtomicOrdering::NotAtomic)) |
495 | return MemDepResult::getClobber(Inst: LI); |
496 | if (LI->getOrdering() != AtomicOrdering::Monotonic) |
497 | return MemDepResult::getClobber(Inst: LI); |
498 | } |
499 | |
500 | MemoryLocation LoadLoc = MemoryLocation::get(LI); |
501 | |
502 | // If we found a pointer, check if it could be the same as our pointer. |
503 | AliasResult R = BatchAA.alias(LocA: LoadLoc, LocB: MemLoc); |
504 | |
505 | if (R == AliasResult::NoAlias) |
506 | continue; |
507 | |
508 | if (isLoad) { |
509 | // Must aliased loads are defs of each other. |
510 | if (R == AliasResult::MustAlias) |
511 | return MemDepResult::getDef(Inst); |
512 | |
513 | // If we have a partial alias, then return this as a clobber for the |
514 | // client to handle. |
515 | if (R == AliasResult::PartialAlias && R.hasOffset()) { |
516 | ClobberOffsets[LI] = R.getOffset(); |
517 | return MemDepResult::getClobber(Inst); |
518 | } |
519 | |
520 | // Random may-alias loads don't depend on each other without a |
521 | // dependence. |
522 | continue; |
523 | } |
524 | |
525 | // Stores don't alias loads from read-only memory. |
526 | if (!isModSet(MRI: BatchAA.getModRefInfoMask(Loc: LoadLoc))) |
527 | continue; |
528 | |
529 | // Stores depend on may/must aliased loads. |
530 | return MemDepResult::getDef(Inst); |
531 | } |
532 | |
533 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
534 | // Atomic stores have complications involved. |
535 | // A Monotonic store is OK if the query inst is itself not atomic. |
536 | // FIXME: This is overly conservative. |
537 | if (!SI->isUnordered() && SI->isAtomic()) { |
538 | if (!QueryInst || |
539 | isComplexForReordering(QueryInst, AtomicOrdering::Unordered)) |
540 | return MemDepResult::getClobber(Inst: SI); |
541 | // Ok, if we are here the guard above guarantee us that |
542 | // QueryInst is a non-atomic or unordered load/store. |
543 | // SI is atomic with monotonic or release semantic (seq_cst for store |
544 | // is actually a release semantic plus total order over other seq_cst |
545 | // instructions, as soon as QueryInst is not seq_cst we can consider it |
546 | // as simple release semantic). |
547 | // Monotonic and Release semantic allows re-ordering before store |
548 | // so we are safe to go further and check the aliasing. It will prohibit |
549 | // re-ordering in case locations are may or must alias. |
550 | } |
551 | |
552 | // While volatile access cannot be eliminated, they do not have to clobber |
553 | // non-aliasing locations, as normal accesses can for example be reordered |
554 | // with volatile accesses. |
555 | if (SI->isVolatile()) |
556 | if (!QueryInst || QueryInst->isVolatile()) |
557 | return MemDepResult::getClobber(Inst: SI); |
558 | |
559 | // If alias analysis can tell that this store is guaranteed to not modify |
560 | // the query pointer, ignore it. Use getModRefInfo to handle cases where |
561 | // the query pointer points to constant memory etc. |
562 | if (!isModOrRefSet(MRI: BatchAA.getModRefInfo(I: SI, OptLoc: MemLoc))) |
563 | continue; |
564 | |
565 | // Ok, this store might clobber the query pointer. Check to see if it is |
566 | // a must alias: in this case, we want to return this as a def. |
567 | // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above. |
568 | MemoryLocation StoreLoc = MemoryLocation::get(SI); |
569 | |
570 | // If we found a pointer, check if it could be the same as our pointer. |
571 | AliasResult R = BatchAA.alias(LocA: StoreLoc, LocB: MemLoc); |
572 | |
573 | if (R == AliasResult::NoAlias) |
574 | continue; |
575 | if (R == AliasResult::MustAlias) |
576 | return MemDepResult::getDef(Inst); |
577 | if (isInvariantLoad) |
578 | continue; |
579 | if (canSkipClobberingStore(SI, MemLoc, MemLocAlign, BatchAA, ScanLimit: *Limit)) |
580 | continue; |
581 | return MemDepResult::getClobber(Inst); |
582 | } |
583 | |
584 | // If this is an allocation, and if we know that the accessed pointer is to |
585 | // the allocation, return Def. This means that there is no dependence and |
586 | // the access can be optimized based on that. For example, a load could |
587 | // turn into undef. Note that we can bypass the allocation itself when |
588 | // looking for a clobber in many cases; that's an alias property and is |
589 | // handled by BasicAA. |
590 | if (isa<AllocaInst>(Val: Inst) || isNoAliasCall(V: Inst)) { |
591 | const Value *AccessPtr = getUnderlyingObject(V: MemLoc.Ptr); |
592 | if (AccessPtr == Inst || BatchAA.isMustAlias(V1: Inst, V2: AccessPtr)) |
593 | return MemDepResult::getDef(Inst); |
594 | } |
595 | |
596 | // If we found a select instruction for MemLoc pointer, return it as Def |
597 | // dependency. |
598 | if (isa<SelectInst>(Val: Inst) && MemLoc.Ptr == Inst) |
599 | return MemDepResult::getDef(Inst); |
600 | |
601 | if (isInvariantLoad) |
602 | continue; |
603 | |
604 | // A release fence requires that all stores complete before it, but does |
605 | // not prevent the reordering of following loads or stores 'before' the |
606 | // fence. As a result, we look past it when finding a dependency for |
607 | // loads. DSE uses this to find preceding stores to delete and thus we |
608 | // can't bypass the fence if the query instruction is a store. |
609 | if (FenceInst *FI = dyn_cast<FenceInst>(Val: Inst)) |
610 | if (isLoad && FI->getOrdering() == AtomicOrdering::Release) |
611 | continue; |
612 | |
613 | // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. |
614 | switch (BatchAA.getModRefInfo(I: Inst, OptLoc: MemLoc)) { |
615 | case ModRefInfo::NoModRef: |
616 | // If the call has no effect on the queried pointer, just ignore it. |
617 | continue; |
618 | case ModRefInfo::Mod: |
619 | return MemDepResult::getClobber(Inst); |
620 | case ModRefInfo::Ref: |
621 | // If the call is known to never store to the pointer, and if this is a |
622 | // load query, we can safely ignore it (scan past it). |
623 | if (isLoad) |
624 | continue; |
625 | [[fallthrough]]; |
626 | default: |
627 | // Otherwise, there is a potential dependence. Return a clobber. |
628 | return MemDepResult::getClobber(Inst); |
629 | } |
630 | } |
631 | |
632 | // No dependence found. If this is the entry block of the function, it is |
633 | // unknown, otherwise it is non-local. |
634 | if (BB != &BB->getParent()->getEntryBlock()) |
635 | return MemDepResult::getNonLocal(); |
636 | return MemDepResult::getNonFuncLocal(); |
637 | } |
638 | |
639 | MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) { |
640 | ClobberOffsets.clear(); |
641 | Instruction *ScanPos = QueryInst; |
642 | |
643 | // Check for a cached result |
644 | MemDepResult &LocalCache = LocalDeps[QueryInst]; |
645 | |
646 | // If the cached entry is non-dirty, just return it. Note that this depends |
647 | // on MemDepResult's default constructing to 'dirty'. |
648 | if (!LocalCache.isDirty()) |
649 | return LocalCache; |
650 | |
651 | // Otherwise, if we have a dirty entry, we know we can start the scan at that |
652 | // instruction, which may save us some work. |
653 | if (Instruction *Inst = LocalCache.getInst()) { |
654 | ScanPos = Inst; |
655 | |
656 | RemoveFromReverseMap(ReverseMap&: ReverseLocalDeps, Inst, Val: QueryInst); |
657 | } |
658 | |
659 | BasicBlock *QueryParent = QueryInst->getParent(); |
660 | |
661 | // Do the scan. |
662 | if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { |
663 | // No dependence found. If this is the entry block of the function, it is |
664 | // unknown, otherwise it is non-local. |
665 | if (QueryParent != &QueryParent->getParent()->getEntryBlock()) |
666 | LocalCache = MemDepResult::getNonLocal(); |
667 | else |
668 | LocalCache = MemDepResult::getNonFuncLocal(); |
669 | } else { |
670 | MemoryLocation MemLoc; |
671 | ModRefInfo MR = GetLocation(Inst: QueryInst, Loc&: MemLoc, TLI); |
672 | if (MemLoc.Ptr) { |
673 | // If we can do a pointer scan, make it happen. |
674 | bool isLoad = !isModSet(MRI: MR); |
675 | if (auto *II = dyn_cast<IntrinsicInst>(Val: QueryInst)) |
676 | isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; |
677 | |
678 | LocalCache = |
679 | getPointerDependencyFrom(MemLoc, isLoad, ScanIt: ScanPos->getIterator(), |
680 | BB: QueryParent, QueryInst, Limit: nullptr); |
681 | } else if (auto *QueryCall = dyn_cast<CallBase>(Val: QueryInst)) { |
682 | bool isReadOnly = AA.onlyReadsMemory(Call: QueryCall); |
683 | LocalCache = getCallDependencyFrom(Call: QueryCall, isReadOnlyCall: isReadOnly, |
684 | ScanIt: ScanPos->getIterator(), BB: QueryParent); |
685 | } else |
686 | // Non-memory instruction. |
687 | LocalCache = MemDepResult::getUnknown(); |
688 | } |
689 | |
690 | // Remember the result! |
691 | if (Instruction *I = LocalCache.getInst()) |
692 | ReverseLocalDeps[I].insert(Ptr: QueryInst); |
693 | |
694 | return LocalCache; |
695 | } |
696 | |
697 | #ifndef NDEBUG |
698 | /// This method is used when -debug is specified to verify that cache arrays |
699 | /// are properly kept sorted. |
700 | static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, |
701 | int Count = -1) { |
702 | if (Count == -1) |
703 | Count = Cache.size(); |
704 | assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && |
705 | "Cache isn't sorted!" ); |
706 | } |
707 | #endif |
708 | |
709 | const MemoryDependenceResults::NonLocalDepInfo & |
710 | MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) { |
711 | assert(getDependency(QueryCall).isNonLocal() && |
712 | "getNonLocalCallDependency should only be used on calls with " |
713 | "non-local deps!" ); |
714 | PerInstNLInfo &CacheP = NonLocalDepsMap[QueryCall]; |
715 | NonLocalDepInfo &Cache = CacheP.first; |
716 | |
717 | // This is the set of blocks that need to be recomputed. In the cached case, |
718 | // this can happen due to instructions being deleted etc. In the uncached |
719 | // case, this starts out as the set of predecessors we care about. |
720 | SmallVector<BasicBlock *, 32> DirtyBlocks; |
721 | |
722 | if (!Cache.empty()) { |
723 | // Okay, we have a cache entry. If we know it is not dirty, just return it |
724 | // with no computation. |
725 | if (!CacheP.second) { |
726 | ++NumCacheNonLocal; |
727 | return Cache; |
728 | } |
729 | |
730 | // If we already have a partially computed set of results, scan them to |
731 | // determine what is dirty, seeding our initial DirtyBlocks worklist. |
732 | for (auto &Entry : Cache) |
733 | if (Entry.getResult().isDirty()) |
734 | DirtyBlocks.push_back(Elt: Entry.getBB()); |
735 | |
736 | // Sort the cache so that we can do fast binary search lookups below. |
737 | llvm::sort(C&: Cache); |
738 | |
739 | ++NumCacheDirtyNonLocal; |
740 | } else { |
741 | // Seed DirtyBlocks with each of the preds of QueryInst's block. |
742 | BasicBlock *QueryBB = QueryCall->getParent(); |
743 | append_range(C&: DirtyBlocks, R: PredCache.get(BB: QueryBB)); |
744 | ++NumUncacheNonLocal; |
745 | } |
746 | |
747 | // isReadonlyCall - If this is a read-only call, we can be more aggressive. |
748 | bool isReadonlyCall = AA.onlyReadsMemory(Call: QueryCall); |
749 | |
750 | SmallPtrSet<BasicBlock *, 32> Visited; |
751 | |
752 | unsigned NumSortedEntries = Cache.size(); |
753 | LLVM_DEBUG(AssertSorted(Cache)); |
754 | |
755 | // Iterate while we still have blocks to update. |
756 | while (!DirtyBlocks.empty()) { |
757 | BasicBlock *DirtyBB = DirtyBlocks.pop_back_val(); |
758 | |
759 | // Already processed this block? |
760 | if (!Visited.insert(Ptr: DirtyBB).second) |
761 | continue; |
762 | |
763 | // Do a binary search to see if we already have an entry for this block in |
764 | // the cache set. If so, find it. |
765 | LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries)); |
766 | NonLocalDepInfo::iterator Entry = |
767 | std::upper_bound(first: Cache.begin(), last: Cache.begin() + NumSortedEntries, |
768 | val: NonLocalDepEntry(DirtyBB)); |
769 | if (Entry != Cache.begin() && std::prev(x: Entry)->getBB() == DirtyBB) |
770 | --Entry; |
771 | |
772 | NonLocalDepEntry *ExistingResult = nullptr; |
773 | if (Entry != Cache.begin() + NumSortedEntries && |
774 | Entry->getBB() == DirtyBB) { |
775 | // If we already have an entry, and if it isn't already dirty, the block |
776 | // is done. |
777 | if (!Entry->getResult().isDirty()) |
778 | continue; |
779 | |
780 | // Otherwise, remember this slot so we can update the value. |
781 | ExistingResult = &*Entry; |
782 | } |
783 | |
784 | // If the dirty entry has a pointer, start scanning from it so we don't have |
785 | // to rescan the entire block. |
786 | BasicBlock::iterator ScanPos = DirtyBB->end(); |
787 | if (ExistingResult) { |
788 | if (Instruction *Inst = ExistingResult->getResult().getInst()) { |
789 | ScanPos = Inst->getIterator(); |
790 | // We're removing QueryInst's use of Inst. |
791 | RemoveFromReverseMap<Instruction *>(ReverseMap&: ReverseNonLocalDeps, Inst, |
792 | Val: QueryCall); |
793 | } |
794 | } |
795 | |
796 | // Find out if this block has a local dependency for QueryInst. |
797 | MemDepResult Dep; |
798 | |
799 | if (ScanPos != DirtyBB->begin()) { |
800 | Dep = getCallDependencyFrom(Call: QueryCall, isReadOnlyCall: isReadonlyCall, ScanIt: ScanPos, BB: DirtyBB); |
801 | } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { |
802 | // No dependence found. If this is the entry block of the function, it is |
803 | // a clobber, otherwise it is unknown. |
804 | Dep = MemDepResult::getNonLocal(); |
805 | } else { |
806 | Dep = MemDepResult::getNonFuncLocal(); |
807 | } |
808 | |
809 | // If we had a dirty entry for the block, update it. Otherwise, just add |
810 | // a new entry. |
811 | if (ExistingResult) |
812 | ExistingResult->setResult(Dep); |
813 | else |
814 | Cache.push_back(x: NonLocalDepEntry(DirtyBB, Dep)); |
815 | |
816 | // If the block has a dependency (i.e. it isn't completely transparent to |
817 | // the value), remember the association! |
818 | if (!Dep.isNonLocal()) { |
819 | // Keep the ReverseNonLocalDeps map up to date so we can efficiently |
820 | // update this when we remove instructions. |
821 | if (Instruction *Inst = Dep.getInst()) |
822 | ReverseNonLocalDeps[Inst].insert(Ptr: QueryCall); |
823 | } else { |
824 | |
825 | // If the block *is* completely transparent to the load, we need to check |
826 | // the predecessors of this block. Add them to our worklist. |
827 | append_range(C&: DirtyBlocks, R: PredCache.get(BB: DirtyBB)); |
828 | } |
829 | } |
830 | |
831 | return Cache; |
832 | } |
833 | |
834 | void MemoryDependenceResults::getNonLocalPointerDependency( |
835 | Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { |
836 | const MemoryLocation Loc = MemoryLocation::get(Inst: QueryInst); |
837 | bool isLoad = isa<LoadInst>(Val: QueryInst); |
838 | BasicBlock *FromBB = QueryInst->getParent(); |
839 | assert(FromBB); |
840 | |
841 | assert(Loc.Ptr->getType()->isPointerTy() && |
842 | "Can't get pointer deps of a non-pointer!" ); |
843 | Result.clear(); |
844 | { |
845 | // Check if there is cached Def with invariant.group. |
846 | auto NonLocalDefIt = NonLocalDefsCache.find(Val: QueryInst); |
847 | if (NonLocalDefIt != NonLocalDefsCache.end()) { |
848 | Result.push_back(Elt: NonLocalDefIt->second); |
849 | ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()] |
850 | .erase(Ptr: QueryInst); |
851 | NonLocalDefsCache.erase(I: NonLocalDefIt); |
852 | return; |
853 | } |
854 | } |
855 | // This routine does not expect to deal with volatile instructions. |
856 | // Doing so would require piping through the QueryInst all the way through. |
857 | // TODO: volatiles can't be elided, but they can be reordered with other |
858 | // non-volatile accesses. |
859 | |
860 | // We currently give up on any instruction which is ordered, but we do handle |
861 | // atomic instructions which are unordered. |
862 | // TODO: Handle ordered instructions |
863 | auto isOrdered = [](Instruction *Inst) { |
864 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) { |
865 | return !LI->isUnordered(); |
866 | } else if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
867 | return !SI->isUnordered(); |
868 | } |
869 | return false; |
870 | }; |
871 | if (QueryInst->isVolatile() || isOrdered(QueryInst)) { |
872 | Result.push_back(Elt: NonLocalDepResult(FromBB, MemDepResult::getUnknown(), |
873 | const_cast<Value *>(Loc.Ptr))); |
874 | return; |
875 | } |
876 | const DataLayout &DL = FromBB->getDataLayout(); |
877 | PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC); |
878 | |
879 | // This is the set of blocks we've inspected, and the pointer we consider in |
880 | // each block. Because of critical edges, we currently bail out if querying |
881 | // a block with multiple different pointers. This can happen during PHI |
882 | // translation. |
883 | SmallDenseMap<BasicBlock *, Value *, 16> Visited; |
884 | if (getNonLocalPointerDepFromBB(QueryInst, Pointer: Address, Loc, isLoad, BB: FromBB, |
885 | Result, Visited, SkipFirstBlock: true)) |
886 | return; |
887 | Result.clear(); |
888 | Result.push_back(Elt: NonLocalDepResult(FromBB, MemDepResult::getUnknown(), |
889 | const_cast<Value *>(Loc.Ptr))); |
890 | } |
891 | |
892 | /// Compute the memdep value for BB with Pointer/PointeeSize using either |
893 | /// cached information in Cache or by doing a lookup (which may use dirty cache |
894 | /// info if available). |
895 | /// |
896 | /// If we do a lookup, add the result to the cache. |
897 | MemDepResult MemoryDependenceResults::getNonLocalInfoForBlock( |
898 | Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, |
899 | BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries, |
900 | BatchAAResults &BatchAA) { |
901 | |
902 | bool isInvariantLoad = false; |
903 | |
904 | if (LoadInst *LI = dyn_cast_or_null<LoadInst>(Val: QueryInst)) |
905 | isInvariantLoad = LI->getMetadata(KindID: LLVMContext::MD_invariant_load); |
906 | |
907 | // Do a binary search to see if we already have an entry for this block in |
908 | // the cache set. If so, find it. |
909 | NonLocalDepInfo::iterator Entry = std::upper_bound( |
910 | first: Cache->begin(), last: Cache->begin() + NumSortedEntries, val: NonLocalDepEntry(BB)); |
911 | if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) |
912 | --Entry; |
913 | |
914 | NonLocalDepEntry *ExistingResult = nullptr; |
915 | if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) |
916 | ExistingResult = &*Entry; |
917 | |
918 | // Use cached result for invariant load only if there is no dependency for non |
919 | // invariant load. In this case invariant load can not have any dependency as |
920 | // well. |
921 | if (ExistingResult && isInvariantLoad && |
922 | !ExistingResult->getResult().isNonFuncLocal()) |
923 | ExistingResult = nullptr; |
924 | |
925 | // If we have a cached entry, and it is non-dirty, use it as the value for |
926 | // this dependency. |
927 | if (ExistingResult && !ExistingResult->getResult().isDirty()) { |
928 | ++NumCacheNonLocalPtr; |
929 | return ExistingResult->getResult(); |
930 | } |
931 | |
932 | // Otherwise, we have to scan for the value. If we have a dirty cache |
933 | // entry, start scanning from its position, otherwise we scan from the end |
934 | // of the block. |
935 | BasicBlock::iterator ScanPos = BB->end(); |
936 | if (ExistingResult && ExistingResult->getResult().getInst()) { |
937 | assert(ExistingResult->getResult().getInst()->getParent() == BB && |
938 | "Instruction invalidated?" ); |
939 | ++NumCacheDirtyNonLocalPtr; |
940 | ScanPos = ExistingResult->getResult().getInst()->getIterator(); |
941 | |
942 | // Eliminating the dirty entry from 'Cache', so update the reverse info. |
943 | ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); |
944 | RemoveFromReverseMap(ReverseMap&: ReverseNonLocalPtrDeps, Inst: &*ScanPos, Val: CacheKey); |
945 | } else { |
946 | ++NumUncacheNonLocalPtr; |
947 | } |
948 | |
949 | // Scan the block for the dependency. |
950 | MemDepResult Dep = getPointerDependencyFrom(MemLoc: Loc, isLoad, ScanIt: ScanPos, BB, |
951 | QueryInst, Limit: nullptr, BatchAA); |
952 | |
953 | // Don't cache results for invariant load. |
954 | if (isInvariantLoad) |
955 | return Dep; |
956 | |
957 | // If we had a dirty entry for the block, update it. Otherwise, just add |
958 | // a new entry. |
959 | if (ExistingResult) |
960 | ExistingResult->setResult(Dep); |
961 | else |
962 | Cache->push_back(x: NonLocalDepEntry(BB, Dep)); |
963 | |
964 | // If the block has a dependency (i.e. it isn't completely transparent to |
965 | // the value), remember the reverse association because we just added it |
966 | // to Cache! |
967 | if (!Dep.isLocal()) |
968 | return Dep; |
969 | |
970 | // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently |
971 | // update MemDep when we remove instructions. |
972 | Instruction *Inst = Dep.getInst(); |
973 | assert(Inst && "Didn't depend on anything?" ); |
974 | ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); |
975 | ReverseNonLocalPtrDeps[Inst].insert(Ptr: CacheKey); |
976 | return Dep; |
977 | } |
978 | |
979 | /// Sort the NonLocalDepInfo cache, given a certain number of elements in the |
980 | /// array that are already properly ordered. |
981 | /// |
982 | /// This is optimized for the case when only a few entries are added. |
983 | static void |
984 | SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, |
985 | unsigned NumSortedEntries) { |
986 | switch (Cache.size() - NumSortedEntries) { |
987 | case 0: |
988 | // done, no new entries. |
989 | break; |
990 | case 2: { |
991 | // Two new entries, insert the last one into place. |
992 | NonLocalDepEntry Val = Cache.back(); |
993 | Cache.pop_back(); |
994 | MemoryDependenceResults::NonLocalDepInfo::iterator Entry = |
995 | std::upper_bound(first: Cache.begin(), last: Cache.end() - 1, val: Val); |
996 | Cache.insert(position: Entry, x: Val); |
997 | [[fallthrough]]; |
998 | } |
999 | case 1: |
1000 | // One new entry, Just insert the new value at the appropriate position. |
1001 | if (Cache.size() != 1) { |
1002 | NonLocalDepEntry Val = Cache.back(); |
1003 | Cache.pop_back(); |
1004 | MemoryDependenceResults::NonLocalDepInfo::iterator Entry = |
1005 | llvm::upper_bound(Range&: Cache, Value&: Val); |
1006 | Cache.insert(position: Entry, x: Val); |
1007 | } |
1008 | break; |
1009 | default: |
1010 | // Added many values, do a full scale sort. |
1011 | llvm::sort(C&: Cache); |
1012 | break; |
1013 | } |
1014 | } |
1015 | |
1016 | /// Perform a dependency query based on pointer/pointeesize starting at the end |
1017 | /// of StartBB. |
1018 | /// |
1019 | /// Add any clobber/def results to the results vector and keep track of which |
1020 | /// blocks are visited in 'Visited'. |
1021 | /// |
1022 | /// This has special behavior for the first block queries (when SkipFirstBlock |
1023 | /// is true). In this special case, it ignores the contents of the specified |
1024 | /// block and starts returning dependence info for its predecessors. |
1025 | /// |
1026 | /// This function returns true on success, or false to indicate that it could |
1027 | /// not compute dependence information for some reason. This should be treated |
1028 | /// as a clobber dependence on the first instruction in the predecessor block. |
1029 | bool MemoryDependenceResults::getNonLocalPointerDepFromBB( |
1030 | Instruction *QueryInst, const PHITransAddr &Pointer, |
1031 | const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, |
1032 | SmallVectorImpl<NonLocalDepResult> &Result, |
1033 | SmallDenseMap<BasicBlock *, Value *, 16> &Visited, bool SkipFirstBlock, |
1034 | bool IsIncomplete) { |
1035 | // Look up the cached info for Pointer. |
1036 | ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); |
1037 | |
1038 | // Set up a temporary NLPI value. If the map doesn't yet have an entry for |
1039 | // CacheKey, this value will be inserted as the associated value. Otherwise, |
1040 | // it'll be ignored, and we'll have to check to see if the cached size and |
1041 | // aa tags are consistent with the current query. |
1042 | NonLocalPointerInfo InitialNLPI; |
1043 | InitialNLPI.Size = Loc.Size; |
1044 | InitialNLPI.AATags = Loc.AATags; |
1045 | |
1046 | bool isInvariantLoad = false; |
1047 | if (LoadInst *LI = dyn_cast_or_null<LoadInst>(Val: QueryInst)) |
1048 | isInvariantLoad = LI->getMetadata(KindID: LLVMContext::MD_invariant_load); |
1049 | |
1050 | // Get the NLPI for CacheKey, inserting one into the map if it doesn't |
1051 | // already have one. |
1052 | std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = |
1053 | NonLocalPointerDeps.insert(KV: std::make_pair(x&: CacheKey, y&: InitialNLPI)); |
1054 | NonLocalPointerInfo *CacheInfo = &Pair.first->second; |
1055 | |
1056 | // If we already have a cache entry for this CacheKey, we may need to do some |
1057 | // work to reconcile the cache entry and the current query. |
1058 | // Invariant loads don't participate in caching. Thus no need to reconcile. |
1059 | if (!isInvariantLoad && !Pair.second) { |
1060 | if (CacheInfo->Size != Loc.Size) { |
1061 | // The query's Size is not equal to the cached one. Throw out the cached |
1062 | // data and proceed with the query with the new size. |
1063 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
1064 | CacheInfo->Size = Loc.Size; |
1065 | for (auto &Entry : CacheInfo->NonLocalDeps) |
1066 | if (Instruction *Inst = Entry.getResult().getInst()) |
1067 | RemoveFromReverseMap(ReverseMap&: ReverseNonLocalPtrDeps, Inst, Val: CacheKey); |
1068 | CacheInfo->NonLocalDeps.clear(); |
1069 | // The cache is cleared (in the above line) so we will have lost |
1070 | // information about blocks we have already visited. We therefore must |
1071 | // assume that the cache information is incomplete. |
1072 | IsIncomplete = true; |
1073 | } |
1074 | |
1075 | // If the query's AATags are inconsistent with the cached one, |
1076 | // conservatively throw out the cached data and restart the query with |
1077 | // no tag if needed. |
1078 | if (CacheInfo->AATags != Loc.AATags) { |
1079 | if (CacheInfo->AATags) { |
1080 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
1081 | CacheInfo->AATags = AAMDNodes(); |
1082 | for (auto &Entry : CacheInfo->NonLocalDeps) |
1083 | if (Instruction *Inst = Entry.getResult().getInst()) |
1084 | RemoveFromReverseMap(ReverseMap&: ReverseNonLocalPtrDeps, Inst, Val: CacheKey); |
1085 | CacheInfo->NonLocalDeps.clear(); |
1086 | // The cache is cleared (in the above line) so we will have lost |
1087 | // information about blocks we have already visited. We therefore must |
1088 | // assume that the cache information is incomplete. |
1089 | IsIncomplete = true; |
1090 | } |
1091 | if (Loc.AATags) |
1092 | return getNonLocalPointerDepFromBB( |
1093 | QueryInst, Pointer, Loc: Loc.getWithoutAATags(), isLoad, StartBB, Result, |
1094 | Visited, SkipFirstBlock, IsIncomplete); |
1095 | } |
1096 | } |
1097 | |
1098 | NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; |
1099 | |
1100 | // If we have valid cached information for exactly the block we are |
1101 | // investigating, just return it with no recomputation. |
1102 | // Don't use cached information for invariant loads since it is valid for |
1103 | // non-invariant loads only. |
1104 | if (!IsIncomplete && !isInvariantLoad && |
1105 | CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { |
1106 | // We have a fully cached result for this query then we can just return the |
1107 | // cached results and populate the visited set. However, we have to verify |
1108 | // that we don't already have conflicting results for these blocks. Check |
1109 | // to ensure that if a block in the results set is in the visited set that |
1110 | // it was for the same pointer query. |
1111 | if (!Visited.empty()) { |
1112 | for (auto &Entry : *Cache) { |
1113 | DenseMap<BasicBlock *, Value *>::iterator VI = |
1114 | Visited.find(Val: Entry.getBB()); |
1115 | if (VI == Visited.end() || VI->second == Pointer.getAddr()) |
1116 | continue; |
1117 | |
1118 | // We have a pointer mismatch in a block. Just return false, saying |
1119 | // that something was clobbered in this result. We could also do a |
1120 | // non-fully cached query, but there is little point in doing this. |
1121 | return false; |
1122 | } |
1123 | } |
1124 | |
1125 | Value *Addr = Pointer.getAddr(); |
1126 | for (auto &Entry : *Cache) { |
1127 | Visited.insert(KV: std::make_pair(x: Entry.getBB(), y&: Addr)); |
1128 | if (Entry.getResult().isNonLocal()) { |
1129 | continue; |
1130 | } |
1131 | |
1132 | if (DT.isReachableFromEntry(A: Entry.getBB())) { |
1133 | Result.push_back( |
1134 | Elt: NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); |
1135 | } |
1136 | } |
1137 | ++NumCacheCompleteNonLocalPtr; |
1138 | return true; |
1139 | } |
1140 | |
1141 | // Otherwise, either this is a new block, a block with an invalid cache |
1142 | // pointer or one that we're about to invalidate by putting more info into |
1143 | // it than its valid cache info. If empty and not explicitly indicated as |
1144 | // incomplete, the result will be valid cache info, otherwise it isn't. |
1145 | // |
1146 | // Invariant loads don't affect cache in any way thus no need to update |
1147 | // CacheInfo as well. |
1148 | if (!isInvariantLoad) { |
1149 | if (!IsIncomplete && Cache->empty()) |
1150 | CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); |
1151 | else |
1152 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
1153 | } |
1154 | |
1155 | SmallVector<BasicBlock *, 32> Worklist; |
1156 | Worklist.push_back(Elt: StartBB); |
1157 | |
1158 | // PredList used inside loop. |
1159 | SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; |
1160 | |
1161 | // Keep track of the entries that we know are sorted. Previously cached |
1162 | // entries will all be sorted. The entries we add we only sort on demand (we |
1163 | // don't insert every element into its sorted position). We know that we |
1164 | // won't get any reuse from currently inserted values, because we don't |
1165 | // revisit blocks after we insert info for them. |
1166 | unsigned NumSortedEntries = Cache->size(); |
1167 | unsigned WorklistEntries = BlockNumberLimit; |
1168 | bool GotWorklistLimit = false; |
1169 | LLVM_DEBUG(AssertSorted(*Cache)); |
1170 | |
1171 | BatchAAResults BatchAA(AA, &EEA); |
1172 | while (!Worklist.empty()) { |
1173 | BasicBlock *BB = Worklist.pop_back_val(); |
1174 | |
1175 | // If we do process a large number of blocks it becomes very expensive and |
1176 | // likely it isn't worth worrying about |
1177 | if (Result.size() > NumResultsLimit) { |
1178 | // Sort it now (if needed) so that recursive invocations of |
1179 | // getNonLocalPointerDepFromBB and other routines that could reuse the |
1180 | // cache value will only see properly sorted cache arrays. |
1181 | if (Cache && NumSortedEntries != Cache->size()) { |
1182 | SortNonLocalDepInfoCache(Cache&: *Cache, NumSortedEntries); |
1183 | } |
1184 | // Since we bail out, the "Cache" set won't contain all of the |
1185 | // results for the query. This is ok (we can still use it to accelerate |
1186 | // specific block queries) but we can't do the fastpath "return all |
1187 | // results from the set". Clear out the indicator for this. |
1188 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
1189 | return false; |
1190 | } |
1191 | |
1192 | // Skip the first block if we have it. |
1193 | if (!SkipFirstBlock) { |
1194 | // Analyze the dependency of *Pointer in FromBB. See if we already have |
1195 | // been here. |
1196 | assert(Visited.count(BB) && "Should check 'visited' before adding to WL" ); |
1197 | |
1198 | // Get the dependency info for Pointer in BB. If we have cached |
1199 | // information, we will use it, otherwise we compute it. |
1200 | LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries)); |
1201 | MemDepResult Dep = getNonLocalInfoForBlock( |
1202 | QueryInst, Loc, isLoad, BB, Cache, NumSortedEntries, BatchAA); |
1203 | |
1204 | // If we got a Def or Clobber, add this to the list of results. |
1205 | if (!Dep.isNonLocal()) { |
1206 | if (DT.isReachableFromEntry(A: BB)) { |
1207 | Result.push_back(Elt: NonLocalDepResult(BB, Dep, Pointer.getAddr())); |
1208 | continue; |
1209 | } |
1210 | } |
1211 | } |
1212 | |
1213 | // If 'Pointer' is an instruction defined in this block, then we need to do |
1214 | // phi translation to change it into a value live in the predecessor block. |
1215 | // If not, we just add the predecessors to the worklist and scan them with |
1216 | // the same Pointer. |
1217 | if (!Pointer.needsPHITranslationFromBlock(BB)) { |
1218 | SkipFirstBlock = false; |
1219 | SmallVector<BasicBlock *, 16> NewBlocks; |
1220 | for (BasicBlock *Pred : PredCache.get(BB)) { |
1221 | // Verify that we haven't looked at this block yet. |
1222 | std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = |
1223 | Visited.insert(KV: std::make_pair(x&: Pred, y: Pointer.getAddr())); |
1224 | if (InsertRes.second) { |
1225 | // First time we've looked at *PI. |
1226 | NewBlocks.push_back(Elt: Pred); |
1227 | continue; |
1228 | } |
1229 | |
1230 | // If we have seen this block before, but it was with a different |
1231 | // pointer then we have a phi translation failure and we have to treat |
1232 | // this as a clobber. |
1233 | if (InsertRes.first->second != Pointer.getAddr()) { |
1234 | // Make sure to clean up the Visited map before continuing on to |
1235 | // PredTranslationFailure. |
1236 | for (auto *NewBlock : NewBlocks) |
1237 | Visited.erase(Val: NewBlock); |
1238 | goto PredTranslationFailure; |
1239 | } |
1240 | } |
1241 | if (NewBlocks.size() > WorklistEntries) { |
1242 | // Make sure to clean up the Visited map before continuing on to |
1243 | // PredTranslationFailure. |
1244 | for (auto *NewBlock : NewBlocks) |
1245 | Visited.erase(Val: NewBlock); |
1246 | GotWorklistLimit = true; |
1247 | goto PredTranslationFailure; |
1248 | } |
1249 | WorklistEntries -= NewBlocks.size(); |
1250 | Worklist.append(in_start: NewBlocks.begin(), in_end: NewBlocks.end()); |
1251 | continue; |
1252 | } |
1253 | |
1254 | // We do need to do phi translation, if we know ahead of time we can't phi |
1255 | // translate this value, don't even try. |
1256 | if (!Pointer.isPotentiallyPHITranslatable()) |
1257 | goto PredTranslationFailure; |
1258 | |
1259 | // We may have added values to the cache list before this PHI translation. |
1260 | // If so, we haven't done anything to ensure that the cache remains sorted. |
1261 | // Sort it now (if needed) so that recursive invocations of |
1262 | // getNonLocalPointerDepFromBB and other routines that could reuse the cache |
1263 | // value will only see properly sorted cache arrays. |
1264 | if (Cache && NumSortedEntries != Cache->size()) { |
1265 | SortNonLocalDepInfoCache(Cache&: *Cache, NumSortedEntries); |
1266 | NumSortedEntries = Cache->size(); |
1267 | } |
1268 | Cache = nullptr; |
1269 | |
1270 | PredList.clear(); |
1271 | for (BasicBlock *Pred : PredCache.get(BB)) { |
1272 | PredList.push_back(Elt: std::make_pair(x&: Pred, y: Pointer)); |
1273 | |
1274 | // Get the PHI translated pointer in this predecessor. This can fail if |
1275 | // not translatable, in which case the getAddr() returns null. |
1276 | PHITransAddr &PredPointer = PredList.back().second; |
1277 | Value *PredPtrVal = |
1278 | PredPointer.translateValue(CurBB: BB, PredBB: Pred, DT: &DT, /*MustDominate=*/false); |
1279 | |
1280 | // Check to see if we have already visited this pred block with another |
1281 | // pointer. If so, we can't do this lookup. This failure can occur |
1282 | // with PHI translation when a critical edge exists and the PHI node in |
1283 | // the successor translates to a pointer value different than the |
1284 | // pointer the block was first analyzed with. |
1285 | std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = |
1286 | Visited.insert(KV: std::make_pair(x&: Pred, y&: PredPtrVal)); |
1287 | |
1288 | if (!InsertRes.second) { |
1289 | // We found the pred; take it off the list of preds to visit. |
1290 | PredList.pop_back(); |
1291 | |
1292 | // If the predecessor was visited with PredPtr, then we already did |
1293 | // the analysis and can ignore it. |
1294 | if (InsertRes.first->second == PredPtrVal) |
1295 | continue; |
1296 | |
1297 | // Otherwise, the block was previously analyzed with a different |
1298 | // pointer. We can't represent the result of this case, so we just |
1299 | // treat this as a phi translation failure. |
1300 | |
1301 | // Make sure to clean up the Visited map before continuing on to |
1302 | // PredTranslationFailure. |
1303 | for (const auto &Pred : PredList) |
1304 | Visited.erase(Val: Pred.first); |
1305 | |
1306 | goto PredTranslationFailure; |
1307 | } |
1308 | } |
1309 | |
1310 | // Actually process results here; this need to be a separate loop to avoid |
1311 | // calling getNonLocalPointerDepFromBB for blocks we don't want to return |
1312 | // any results for. (getNonLocalPointerDepFromBB will modify our |
1313 | // datastructures in ways the code after the PredTranslationFailure label |
1314 | // doesn't expect.) |
1315 | for (auto &I : PredList) { |
1316 | BasicBlock *Pred = I.first; |
1317 | PHITransAddr &PredPointer = I.second; |
1318 | Value *PredPtrVal = PredPointer.getAddr(); |
1319 | |
1320 | bool CanTranslate = true; |
1321 | // If PHI translation was unable to find an available pointer in this |
1322 | // predecessor, then we have to assume that the pointer is clobbered in |
1323 | // that predecessor. We can still do PRE of the load, which would insert |
1324 | // a computation of the pointer in this predecessor. |
1325 | if (!PredPtrVal) |
1326 | CanTranslate = false; |
1327 | |
1328 | // FIXME: it is entirely possible that PHI translating will end up with |
1329 | // the same value. Consider PHI translating something like: |
1330 | // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* |
1331 | // to recurse here, pedantically speaking. |
1332 | |
1333 | // If getNonLocalPointerDepFromBB fails here, that means the cached |
1334 | // result conflicted with the Visited list; we have to conservatively |
1335 | // assume it is unknown, but this also does not block PRE of the load. |
1336 | if (!CanTranslate || |
1337 | !getNonLocalPointerDepFromBB(QueryInst, Pointer: PredPointer, |
1338 | Loc: Loc.getWithNewPtr(NewPtr: PredPtrVal), isLoad, |
1339 | StartBB: Pred, Result, Visited)) { |
1340 | // Add the entry to the Result list. |
1341 | NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); |
1342 | Result.push_back(Elt: Entry); |
1343 | |
1344 | // Since we had a phi translation failure, the cache for CacheKey won't |
1345 | // include all of the entries that we need to immediately satisfy future |
1346 | // queries. Mark this in NonLocalPointerDeps by setting the |
1347 | // BBSkipFirstBlockPair pointer to null. This requires reuse of the |
1348 | // cached value to do more work but not miss the phi trans failure. |
1349 | NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; |
1350 | NLPI.Pair = BBSkipFirstBlockPair(); |
1351 | continue; |
1352 | } |
1353 | } |
1354 | |
1355 | // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. |
1356 | CacheInfo = &NonLocalPointerDeps[CacheKey]; |
1357 | Cache = &CacheInfo->NonLocalDeps; |
1358 | NumSortedEntries = Cache->size(); |
1359 | |
1360 | // Since we did phi translation, the "Cache" set won't contain all of the |
1361 | // results for the query. This is ok (we can still use it to accelerate |
1362 | // specific block queries) but we can't do the fastpath "return all |
1363 | // results from the set" Clear out the indicator for this. |
1364 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
1365 | SkipFirstBlock = false; |
1366 | continue; |
1367 | |
1368 | PredTranslationFailure: |
1369 | // The following code is "failure"; we can't produce a sane translation |
1370 | // for the given block. It assumes that we haven't modified any of |
1371 | // our datastructures while processing the current block. |
1372 | |
1373 | if (!Cache) { |
1374 | // Refresh the CacheInfo/Cache pointer if it got invalidated. |
1375 | CacheInfo = &NonLocalPointerDeps[CacheKey]; |
1376 | Cache = &CacheInfo->NonLocalDeps; |
1377 | NumSortedEntries = Cache->size(); |
1378 | } |
1379 | |
1380 | // Since we failed phi translation, the "Cache" set won't contain all of the |
1381 | // results for the query. This is ok (we can still use it to accelerate |
1382 | // specific block queries) but we can't do the fastpath "return all |
1383 | // results from the set". Clear out the indicator for this. |
1384 | CacheInfo->Pair = BBSkipFirstBlockPair(); |
1385 | |
1386 | // If *nothing* works, mark the pointer as unknown. |
1387 | // |
1388 | // If this is the magic first block, return this as a clobber of the whole |
1389 | // incoming value. Since we can't phi translate to one of the predecessors, |
1390 | // we have to bail out. |
1391 | if (SkipFirstBlock) |
1392 | return false; |
1393 | |
1394 | // Results of invariant loads are not cached thus no need to update cached |
1395 | // information. |
1396 | if (!isInvariantLoad) { |
1397 | for (NonLocalDepEntry &I : llvm::reverse(C&: *Cache)) { |
1398 | if (I.getBB() != BB) |
1399 | continue; |
1400 | |
1401 | assert((GotWorklistLimit || I.getResult().isNonLocal() || |
1402 | !DT.isReachableFromEntry(BB)) && |
1403 | "Should only be here with transparent block" ); |
1404 | |
1405 | I.setResult(MemDepResult::getUnknown()); |
1406 | |
1407 | |
1408 | break; |
1409 | } |
1410 | } |
1411 | (void)GotWorklistLimit; |
1412 | // Go ahead and report unknown dependence. |
1413 | Result.push_back( |
1414 | Elt: NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr())); |
1415 | } |
1416 | |
1417 | // Okay, we're done now. If we added new values to the cache, re-sort it. |
1418 | SortNonLocalDepInfoCache(Cache&: *Cache, NumSortedEntries); |
1419 | LLVM_DEBUG(AssertSorted(*Cache)); |
1420 | return true; |
1421 | } |
1422 | |
1423 | /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it. |
1424 | void MemoryDependenceResults::removeCachedNonLocalPointerDependencies( |
1425 | ValueIsLoadPair P) { |
1426 | |
1427 | // Most of the time this cache is empty. |
1428 | if (!NonLocalDefsCache.empty()) { |
1429 | auto it = NonLocalDefsCache.find(Val: P.getPointer()); |
1430 | if (it != NonLocalDefsCache.end()) { |
1431 | RemoveFromReverseMap(ReverseMap&: ReverseNonLocalDefsCache, |
1432 | Inst: it->second.getResult().getInst(), Val: P.getPointer()); |
1433 | NonLocalDefsCache.erase(I: it); |
1434 | } |
1435 | |
1436 | if (auto *I = dyn_cast<Instruction>(Val: P.getPointer())) { |
1437 | auto toRemoveIt = ReverseNonLocalDefsCache.find(Val: I); |
1438 | if (toRemoveIt != ReverseNonLocalDefsCache.end()) { |
1439 | for (const auto *entry : toRemoveIt->second) |
1440 | NonLocalDefsCache.erase(Val: entry); |
1441 | ReverseNonLocalDefsCache.erase(I: toRemoveIt); |
1442 | } |
1443 | } |
1444 | } |
1445 | |
1446 | CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(Val: P); |
1447 | if (It == NonLocalPointerDeps.end()) |
1448 | return; |
1449 | |
1450 | // Remove all of the entries in the BB->val map. This involves removing |
1451 | // instructions from the reverse map. |
1452 | NonLocalDepInfo &PInfo = It->second.NonLocalDeps; |
1453 | |
1454 | for (const NonLocalDepEntry &DE : PInfo) { |
1455 | Instruction *Target = DE.getResult().getInst(); |
1456 | if (!Target) |
1457 | continue; // Ignore non-local dep results. |
1458 | assert(Target->getParent() == DE.getBB()); |
1459 | |
1460 | // Eliminating the dirty entry from 'Cache', so update the reverse info. |
1461 | RemoveFromReverseMap(ReverseMap&: ReverseNonLocalPtrDeps, Inst: Target, Val: P); |
1462 | } |
1463 | |
1464 | // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). |
1465 | NonLocalPointerDeps.erase(I: It); |
1466 | } |
1467 | |
1468 | void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) { |
1469 | // If Ptr isn't really a pointer, just ignore it. |
1470 | if (!Ptr->getType()->isPointerTy()) |
1471 | return; |
1472 | // Flush store info for the pointer. |
1473 | removeCachedNonLocalPointerDependencies(P: ValueIsLoadPair(Ptr, false)); |
1474 | // Flush load info for the pointer. |
1475 | removeCachedNonLocalPointerDependencies(P: ValueIsLoadPair(Ptr, true)); |
1476 | } |
1477 | |
1478 | void MemoryDependenceResults::invalidateCachedPredecessors() { |
1479 | PredCache.clear(); |
1480 | } |
1481 | |
1482 | void MemoryDependenceResults::removeInstruction(Instruction *RemInst) { |
1483 | EEA.removeInstruction(I: RemInst); |
1484 | |
1485 | // Walk through the Non-local dependencies, removing this one as the value |
1486 | // for any cached queries. |
1487 | NonLocalDepMapType::iterator NLDI = NonLocalDepsMap.find(Val: RemInst); |
1488 | if (NLDI != NonLocalDepsMap.end()) { |
1489 | NonLocalDepInfo &BlockMap = NLDI->second.first; |
1490 | for (auto &Entry : BlockMap) |
1491 | if (Instruction *Inst = Entry.getResult().getInst()) |
1492 | RemoveFromReverseMap(ReverseMap&: ReverseNonLocalDeps, Inst, Val: RemInst); |
1493 | NonLocalDepsMap.erase(I: NLDI); |
1494 | } |
1495 | |
1496 | // If we have a cached local dependence query for this instruction, remove it. |
1497 | LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(Val: RemInst); |
1498 | if (LocalDepEntry != LocalDeps.end()) { |
1499 | // Remove us from DepInst's reverse set now that the local dep info is gone. |
1500 | if (Instruction *Inst = LocalDepEntry->second.getInst()) |
1501 | RemoveFromReverseMap(ReverseMap&: ReverseLocalDeps, Inst, Val: RemInst); |
1502 | |
1503 | // Remove this local dependency info. |
1504 | LocalDeps.erase(I: LocalDepEntry); |
1505 | } |
1506 | |
1507 | // If we have any cached dependencies on this instruction, remove |
1508 | // them. |
1509 | |
1510 | // If the instruction is a pointer, remove it from both the load info and the |
1511 | // store info. |
1512 | if (RemInst->getType()->isPointerTy()) { |
1513 | removeCachedNonLocalPointerDependencies(P: ValueIsLoadPair(RemInst, false)); |
1514 | removeCachedNonLocalPointerDependencies(P: ValueIsLoadPair(RemInst, true)); |
1515 | } else { |
1516 | // Otherwise, if the instructions is in the map directly, it must be a load. |
1517 | // Remove it. |
1518 | auto toRemoveIt = NonLocalDefsCache.find(Val: RemInst); |
1519 | if (toRemoveIt != NonLocalDefsCache.end()) { |
1520 | assert(isa<LoadInst>(RemInst) && |
1521 | "only load instructions should be added directly" ); |
1522 | const Instruction *DepV = toRemoveIt->second.getResult().getInst(); |
1523 | ReverseNonLocalDefsCache.find(Val: DepV)->second.erase(Ptr: RemInst); |
1524 | NonLocalDefsCache.erase(I: toRemoveIt); |
1525 | } |
1526 | } |
1527 | |
1528 | // Loop over all of the things that depend on the instruction we're removing. |
1529 | SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; |
1530 | |
1531 | // If we find RemInst as a clobber or Def in any of the maps for other values, |
1532 | // we need to replace its entry with a dirty version of the instruction after |
1533 | // it. If RemInst is a terminator, we use a null dirty value. |
1534 | // |
1535 | // Using a dirty version of the instruction after RemInst saves having to scan |
1536 | // the entire block to get to this point. |
1537 | MemDepResult NewDirtyVal; |
1538 | if (!RemInst->isTerminator()) |
1539 | NewDirtyVal = MemDepResult::getDirty(Inst: &*++RemInst->getIterator()); |
1540 | |
1541 | ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(Val: RemInst); |
1542 | if (ReverseDepIt != ReverseLocalDeps.end()) { |
1543 | // RemInst can't be the terminator if it has local stuff depending on it. |
1544 | assert(!ReverseDepIt->second.empty() && !RemInst->isTerminator() && |
1545 | "Nothing can locally depend on a terminator" ); |
1546 | |
1547 | for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { |
1548 | assert(InstDependingOnRemInst != RemInst && |
1549 | "Already removed our local dep info" ); |
1550 | |
1551 | LocalDeps[InstDependingOnRemInst] = NewDirtyVal; |
1552 | |
1553 | // Make sure to remember that new things depend on NewDepInst. |
1554 | assert(NewDirtyVal.getInst() && |
1555 | "There is no way something else can have " |
1556 | "a local dep on this if it is a terminator!" ); |
1557 | ReverseDepsToAdd.push_back( |
1558 | Elt: std::make_pair(x: NewDirtyVal.getInst(), y&: InstDependingOnRemInst)); |
1559 | } |
1560 | |
1561 | ReverseLocalDeps.erase(I: ReverseDepIt); |
1562 | |
1563 | // Add new reverse deps after scanning the set, to avoid invalidating the |
1564 | // 'ReverseDeps' reference. |
1565 | while (!ReverseDepsToAdd.empty()) { |
1566 | ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( |
1567 | Ptr: ReverseDepsToAdd.back().second); |
1568 | ReverseDepsToAdd.pop_back(); |
1569 | } |
1570 | } |
1571 | |
1572 | ReverseDepIt = ReverseNonLocalDeps.find(Val: RemInst); |
1573 | if (ReverseDepIt != ReverseNonLocalDeps.end()) { |
1574 | for (Instruction *I : ReverseDepIt->second) { |
1575 | assert(I != RemInst && "Already removed NonLocalDep info for RemInst" ); |
1576 | |
1577 | PerInstNLInfo &INLD = NonLocalDepsMap[I]; |
1578 | // The information is now dirty! |
1579 | INLD.second = true; |
1580 | |
1581 | for (auto &Entry : INLD.first) { |
1582 | if (Entry.getResult().getInst() != RemInst) |
1583 | continue; |
1584 | |
1585 | // Convert to a dirty entry for the subsequent instruction. |
1586 | Entry.setResult(NewDirtyVal); |
1587 | |
1588 | if (Instruction *NextI = NewDirtyVal.getInst()) |
1589 | ReverseDepsToAdd.push_back(Elt: std::make_pair(x&: NextI, y&: I)); |
1590 | } |
1591 | } |
1592 | |
1593 | ReverseNonLocalDeps.erase(I: ReverseDepIt); |
1594 | |
1595 | // Add new reverse deps after scanning the set, to avoid invalidating 'Set' |
1596 | while (!ReverseDepsToAdd.empty()) { |
1597 | ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( |
1598 | Ptr: ReverseDepsToAdd.back().second); |
1599 | ReverseDepsToAdd.pop_back(); |
1600 | } |
1601 | } |
1602 | |
1603 | // If the instruction is in ReverseNonLocalPtrDeps then it appears as a |
1604 | // value in the NonLocalPointerDeps info. |
1605 | ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = |
1606 | ReverseNonLocalPtrDeps.find(Val: RemInst); |
1607 | if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { |
1608 | SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> |
1609 | ReversePtrDepsToAdd; |
1610 | |
1611 | for (ValueIsLoadPair P : ReversePtrDepIt->second) { |
1612 | assert(P.getPointer() != RemInst && |
1613 | "Already removed NonLocalPointerDeps info for RemInst" ); |
1614 | |
1615 | auto &NLPD = NonLocalPointerDeps[P]; |
1616 | |
1617 | NonLocalDepInfo &NLPDI = NLPD.NonLocalDeps; |
1618 | |
1619 | // The cache is not valid for any specific block anymore. |
1620 | NLPD.Pair = BBSkipFirstBlockPair(); |
1621 | |
1622 | // Update any entries for RemInst to use the instruction after it. |
1623 | for (auto &Entry : NLPDI) { |
1624 | if (Entry.getResult().getInst() != RemInst) |
1625 | continue; |
1626 | |
1627 | // Convert to a dirty entry for the subsequent instruction. |
1628 | Entry.setResult(NewDirtyVal); |
1629 | |
1630 | if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) |
1631 | ReversePtrDepsToAdd.push_back(Elt: std::make_pair(x&: NewDirtyInst, y&: P)); |
1632 | } |
1633 | |
1634 | // Re-sort the NonLocalDepInfo. Changing the dirty entry to its |
1635 | // subsequent value may invalidate the sortedness. |
1636 | llvm::sort(C&: NLPDI); |
1637 | } |
1638 | |
1639 | ReverseNonLocalPtrDeps.erase(I: ReversePtrDepIt); |
1640 | |
1641 | while (!ReversePtrDepsToAdd.empty()) { |
1642 | ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( |
1643 | Ptr: ReversePtrDepsToAdd.back().second); |
1644 | ReversePtrDepsToAdd.pop_back(); |
1645 | } |
1646 | } |
1647 | |
1648 | assert(!NonLocalDepsMap.count(RemInst) && "RemInst got reinserted?" ); |
1649 | LLVM_DEBUG(verifyRemoved(RemInst)); |
1650 | } |
1651 | |
1652 | /// Verify that the specified instruction does not occur in our internal data |
1653 | /// structures. |
1654 | /// |
1655 | /// This function verifies by asserting in debug builds. |
1656 | void MemoryDependenceResults::verifyRemoved(Instruction *D) const { |
1657 | #ifndef NDEBUG |
1658 | for (const auto &DepKV : LocalDeps) { |
1659 | assert(DepKV.first != D && "Inst occurs in data structures" ); |
1660 | assert(DepKV.second.getInst() != D && "Inst occurs in data structures" ); |
1661 | } |
1662 | |
1663 | for (const auto &DepKV : NonLocalPointerDeps) { |
1664 | assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key" ); |
1665 | for (const auto &Entry : DepKV.second.NonLocalDeps) |
1666 | assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value" ); |
1667 | } |
1668 | |
1669 | for (const auto &DepKV : NonLocalDepsMap) { |
1670 | assert(DepKV.first != D && "Inst occurs in data structures" ); |
1671 | const PerInstNLInfo &INLD = DepKV.second; |
1672 | for (const auto &Entry : INLD.first) |
1673 | assert(Entry.getResult().getInst() != D && |
1674 | "Inst occurs in data structures" ); |
1675 | } |
1676 | |
1677 | for (const auto &DepKV : ReverseLocalDeps) { |
1678 | assert(DepKV.first != D && "Inst occurs in data structures" ); |
1679 | for (Instruction *Inst : DepKV.second) |
1680 | assert(Inst != D && "Inst occurs in data structures" ); |
1681 | } |
1682 | |
1683 | for (const auto &DepKV : ReverseNonLocalDeps) { |
1684 | assert(DepKV.first != D && "Inst occurs in data structures" ); |
1685 | for (Instruction *Inst : DepKV.second) |
1686 | assert(Inst != D && "Inst occurs in data structures" ); |
1687 | } |
1688 | |
1689 | for (const auto &DepKV : ReverseNonLocalPtrDeps) { |
1690 | assert(DepKV.first != D && "Inst occurs in rev NLPD map" ); |
1691 | |
1692 | for (ValueIsLoadPair P : DepKV.second) |
1693 | assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && |
1694 | "Inst occurs in ReverseNonLocalPtrDeps map" ); |
1695 | } |
1696 | #endif |
1697 | } |
1698 | |
1699 | AnalysisKey MemoryDependenceAnalysis::Key; |
1700 | |
1701 | MemoryDependenceAnalysis::MemoryDependenceAnalysis() |
1702 | : DefaultBlockScanLimit(BlockScanLimit) {} |
1703 | |
1704 | MemoryDependenceResults |
1705 | MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) { |
1706 | auto &AA = AM.getResult<AAManager>(IR&: F); |
1707 | auto &AC = AM.getResult<AssumptionAnalysis>(IR&: F); |
1708 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F); |
1709 | auto &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F); |
1710 | return MemoryDependenceResults(AA, AC, TLI, DT, DefaultBlockScanLimit); |
1711 | } |
1712 | |
1713 | char MemoryDependenceWrapperPass::ID = 0; |
1714 | |
1715 | INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep" , |
1716 | "Memory Dependence Analysis" , false, true) |
1717 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) |
1718 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
1719 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
1720 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) |
1721 | INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep" , |
1722 | "Memory Dependence Analysis" , false, true) |
1723 | |
1724 | MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) {} |
1725 | |
1726 | MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default; |
1727 | |
1728 | void MemoryDependenceWrapperPass::releaseMemory() { |
1729 | MemDep.reset(); |
1730 | } |
1731 | |
1732 | void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
1733 | AU.setPreservesAll(); |
1734 | AU.addRequired<AssumptionCacheTracker>(); |
1735 | AU.addRequired<DominatorTreeWrapperPass>(); |
1736 | AU.addRequiredTransitive<AAResultsWrapperPass>(); |
1737 | AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); |
1738 | } |
1739 | |
1740 | bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA, |
1741 | FunctionAnalysisManager::Invalidator &Inv) { |
1742 | // Check whether our analysis is preserved. |
1743 | auto PAC = PA.getChecker<MemoryDependenceAnalysis>(); |
1744 | if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>()) |
1745 | // If not, give up now. |
1746 | return true; |
1747 | |
1748 | // Check whether the analyses we depend on became invalid for any reason. |
1749 | if (Inv.invalidate<AAManager>(IR&: F, PA) || |
1750 | Inv.invalidate<AssumptionAnalysis>(IR&: F, PA) || |
1751 | Inv.invalidate<DominatorTreeAnalysis>(IR&: F, PA)) |
1752 | return true; |
1753 | |
1754 | // Otherwise this analysis result remains valid. |
1755 | return false; |
1756 | } |
1757 | |
1758 | unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const { |
1759 | return DefaultBlockScanLimit; |
1760 | } |
1761 | |
1762 | bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { |
1763 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); |
1764 | auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); |
1765 | auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); |
1766 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
1767 | MemDep.emplace(args&: AA, args&: AC, args&: TLI, args&: DT, args&: BlockScanLimit); |
1768 | return false; |
1769 | } |
1770 | |