1//===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Rewrite call/invoke instructions so as to make potential relocations
10// performed by the garbage collector explicit in the IR.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h"
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/Sequence.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/Analysis/DomTreeUpdater.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/TargetTransformInfo.h"
30#include "llvm/IR/Argument.h"
31#include "llvm/IR/AttributeMask.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/CallingConv.h"
35#include "llvm/IR/Constant.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/DerivedTypes.h"
39#include "llvm/IR/Dominators.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/GCStrategy.h"
42#include "llvm/IR/IRBuilder.h"
43#include "llvm/IR/InstIterator.h"
44#include "llvm/IR/InstrTypes.h"
45#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Instructions.h"
47#include "llvm/IR/IntrinsicInst.h"
48#include "llvm/IR/Intrinsics.h"
49#include "llvm/IR/LLVMContext.h"
50#include "llvm/IR/MDBuilder.h"
51#include "llvm/IR/Metadata.h"
52#include "llvm/IR/Module.h"
53#include "llvm/IR/Statepoint.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/User.h"
56#include "llvm/IR/Value.h"
57#include "llvm/IR/ValueHandle.h"
58#include "llvm/Support/Casting.h"
59#include "llvm/Support/CommandLine.h"
60#include "llvm/Support/Compiler.h"
61#include "llvm/Support/Debug.h"
62#include "llvm/Support/ErrorHandling.h"
63#include "llvm/Support/raw_ostream.h"
64#include "llvm/Transforms/Utils/BasicBlockUtils.h"
65#include "llvm/Transforms/Utils/Local.h"
66#include "llvm/Transforms/Utils/PromoteMemToReg.h"
67#include <algorithm>
68#include <cassert>
69#include <cstddef>
70#include <cstdint>
71#include <iterator>
72#include <optional>
73#include <set>
74#include <string>
75#include <utility>
76#include <vector>
77
78#define DEBUG_TYPE "rewrite-statepoints-for-gc"
79
80using namespace llvm;
81
82// Print the liveset found at the insert location
83static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
84 cl::init(Val: false));
85static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
86 cl::init(Val: false));
87
88// Print out the base pointers for debugging
89static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
90 cl::init(Val: false));
91
92// Cost threshold measuring when it is profitable to rematerialize value instead
93// of relocating it
94static cl::opt<unsigned>
95RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
96 cl::init(Val: 6));
97
98#ifdef EXPENSIVE_CHECKS
99static bool ClobberNonLive = true;
100#else
101static bool ClobberNonLive = false;
102#endif
103
104static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
105 cl::location(L&: ClobberNonLive),
106 cl::Hidden);
107
108static cl::opt<bool>
109 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info",
110 cl::Hidden, cl::init(Val: true));
111
112static cl::opt<bool> RematDerivedAtUses("rs4gc-remat-derived-at-uses",
113 cl::Hidden, cl::init(Val: true));
114
115/// The IR fed into RewriteStatepointsForGC may have had attributes and
116/// metadata implying dereferenceability that are no longer valid/correct after
117/// RewriteStatepointsForGC has run. This is because semantically, after
118/// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
119/// heap. stripNonValidData (conservatively) restores
120/// correctness by erasing all attributes in the module that externally imply
121/// dereferenceability. Similar reasoning also applies to the noalias
122/// attributes and metadata. gc.statepoint can touch the entire heap including
123/// noalias objects.
124/// Apart from attributes and metadata, we also remove instructions that imply
125/// constant physical memory: llvm.invariant.start.
126static void stripNonValidData(Module &M);
127
128// Find the GC strategy for a function, or null if it doesn't have one.
129static std::unique_ptr<GCStrategy> findGCStrategy(Function &F);
130
131static bool shouldRewriteStatepointsIn(Function &F);
132
133PreservedAnalyses RewriteStatepointsForGC::run(Module &M,
134 ModuleAnalysisManager &AM) {
135 bool Changed = false;
136 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
137 for (Function &F : M) {
138 // Nothing to do for declarations.
139 if (F.isDeclaration() || F.empty())
140 continue;
141
142 // Policy choice says not to rewrite - the most common reason is that we're
143 // compiling code without a GCStrategy.
144 if (!shouldRewriteStatepointsIn(F))
145 continue;
146
147 auto &DT = FAM.getResult<DominatorTreeAnalysis>(IR&: F);
148 auto &TTI = FAM.getResult<TargetIRAnalysis>(IR&: F);
149 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(IR&: F);
150 Changed |= runOnFunction(F, DT, TTI, TLI);
151 }
152 if (!Changed)
153 return PreservedAnalyses::all();
154
155 // stripNonValidData asserts that shouldRewriteStatepointsIn
156 // returns true for at least one function in the module. Since at least
157 // one function changed, we know that the precondition is satisfied.
158 stripNonValidData(M);
159
160 PreservedAnalyses PA;
161 PA.preserve<TargetIRAnalysis>();
162 PA.preserve<TargetLibraryAnalysis>();
163 return PA;
164}
165
166namespace {
167
168struct GCPtrLivenessData {
169 /// Values defined in this block.
170 MapVector<BasicBlock *, SetVector<Value *>> KillSet;
171
172 /// Values used in this block (and thus live); does not included values
173 /// killed within this block.
174 MapVector<BasicBlock *, SetVector<Value *>> LiveSet;
175
176 /// Values live into this basic block (i.e. used by any
177 /// instruction in this basic block or ones reachable from here)
178 MapVector<BasicBlock *, SetVector<Value *>> LiveIn;
179
180 /// Values live out of this basic block (i.e. live into
181 /// any successor block)
182 MapVector<BasicBlock *, SetVector<Value *>> LiveOut;
183};
184
185// The type of the internal cache used inside the findBasePointers family
186// of functions. From the callers perspective, this is an opaque type and
187// should not be inspected.
188//
189// In the actual implementation this caches two relations:
190// - The base relation itself (i.e. this pointer is based on that one)
191// - The base defining value relation (i.e. before base_phi insertion)
192// Generally, after the execution of a full findBasePointer call, only the
193// base relation will remain. Internally, we add a mixture of the two
194// types, then update all the second type to the first type
195using DefiningValueMapTy = MapVector<Value *, Value *>;
196using IsKnownBaseMapTy = MapVector<Value *, bool>;
197using PointerToBaseTy = MapVector<Value *, Value *>;
198using StatepointLiveSetTy = SetVector<Value *>;
199using RematerializedValueMapTy =
200 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>;
201
202struct PartiallyConstructedSafepointRecord {
203 /// The set of values known to be live across this safepoint
204 StatepointLiveSetTy LiveSet;
205
206 /// The *new* gc.statepoint instruction itself. This produces the token
207 /// that normal path gc.relocates and the gc.result are tied to.
208 GCStatepointInst *StatepointToken;
209
210 /// Instruction to which exceptional gc relocates are attached
211 /// Makes it easier to iterate through them during relocationViaAlloca.
212 Instruction *UnwindToken;
213
214 /// Record live values we are rematerialized instead of relocating.
215 /// They are not included into 'LiveSet' field.
216 /// Maps rematerialized copy to it's original value.
217 RematerializedValueMapTy RematerializedValues;
218};
219
220struct RematerizlizationCandidateRecord {
221 // Chain from derived pointer to base.
222 SmallVector<Instruction *, 3> ChainToBase;
223 // Original base.
224 Value *RootOfChain;
225 // Cost of chain.
226 InstructionCost Cost;
227};
228using RematCandTy = MapVector<Value *, RematerizlizationCandidateRecord>;
229
230} // end anonymous namespace
231
232static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) {
233 std::optional<OperandBundleUse> DeoptBundle =
234 Call->getOperandBundle(ID: LLVMContext::OB_deopt);
235
236 if (!DeoptBundle) {
237 assert(AllowStatepointWithNoDeoptInfo &&
238 "Found non-leaf call without deopt info!");
239 return std::nullopt;
240 }
241
242 return DeoptBundle->Inputs;
243}
244
245/// Compute the live-in set for every basic block in the function
246static void computeLiveInValues(DominatorTree &DT, Function &F,
247 GCPtrLivenessData &Data, GCStrategy *GC);
248
249/// Given results from the dataflow liveness computation, find the set of live
250/// Values at a particular instruction.
251static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
252 StatepointLiveSetTy &out, GCStrategy *GC);
253
254static bool isGCPointerType(Type *T, GCStrategy *GC) {
255 assert(GC && "GC Strategy for isGCPointerType cannot be null");
256
257 if (!isa<PointerType>(Val: T))
258 return false;
259
260 // conservative - same as StatepointLowering
261 return GC->isGCManagedPointer(Ty: T).value_or(u: true);
262}
263
264// Return true if this type is one which a) is a gc pointer or contains a GC
265// pointer and b) is of a type this code expects to encounter as a live value.
266// (The insertion code will assert that a type which matches (a) and not (b)
267// is not encountered.)
268static bool isHandledGCPointerType(Type *T, GCStrategy *GC) {
269 // We fully support gc pointers
270 if (isGCPointerType(T, GC))
271 return true;
272 // We partially support vectors of gc pointers. The code will assert if it
273 // can't handle something.
274 if (auto VT = dyn_cast<VectorType>(Val: T))
275 if (isGCPointerType(T: VT->getElementType(), GC))
276 return true;
277 return false;
278}
279
280#ifndef NDEBUG
281/// Returns true if this type contains a gc pointer whether we know how to
282/// handle that type or not.
283static bool containsGCPtrType(Type *Ty, GCStrategy *GC) {
284 if (isGCPointerType(Ty, GC))
285 return true;
286 if (VectorType *VT = dyn_cast<VectorType>(Ty))
287 return isGCPointerType(VT->getScalarType(), GC);
288 if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
289 return containsGCPtrType(AT->getElementType(), GC);
290 if (StructType *ST = dyn_cast<StructType>(Ty))
291 return llvm::any_of(ST->elements(),
292 [GC](Type *Ty) { return containsGCPtrType(Ty, GC); });
293 return false;
294}
295
296// Returns true if this is a type which a) is a gc pointer or contains a GC
297// pointer and b) is of a type which the code doesn't expect (i.e. first class
298// aggregates). Used to trip assertions.
299static bool isUnhandledGCPointerType(Type *Ty, GCStrategy *GC) {
300 return containsGCPtrType(Ty, GC) && !isHandledGCPointerType(Ty, GC);
301}
302#endif
303
304// Return the name of the value suffixed with the provided value, or if the
305// value didn't have a name, the default value specified.
306static std::string suffixed_name_or(Value *V, StringRef Suffix,
307 StringRef DefaultName) {
308 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str();
309}
310
311// Conservatively identifies any definitions which might be live at the
312// given instruction. The analysis is performed immediately before the
313// given instruction. Values defined by that instruction are not considered
314// live. Values used by that instruction are considered live.
315static void analyzeParsePointLiveness(
316 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call,
317 PartiallyConstructedSafepointRecord &Result, GCStrategy *GC) {
318 StatepointLiveSetTy LiveSet;
319 findLiveSetAtInst(inst: Call, Data&: OriginalLivenessData, out&: LiveSet, GC);
320
321 if (PrintLiveSet) {
322 dbgs() << "Live Variables:\n";
323 for (Value *V : LiveSet)
324 dbgs() << " " << V->getName() << " " << *V << "\n";
325 }
326 if (PrintLiveSetSize) {
327 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
328 dbgs() << "Number live values: " << LiveSet.size() << "\n";
329 }
330 Result.LiveSet = LiveSet;
331}
332
333/// Returns true if V is a known base.
334static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases);
335
336/// Caches the IsKnownBase flag for a value and asserts that it wasn't present
337/// in the cache before.
338static void setKnownBase(Value *V, bool IsKnownBase,
339 IsKnownBaseMapTy &KnownBases);
340
341static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
342 IsKnownBaseMapTy &KnownBases);
343
344/// Return a base defining value for the 'Index' element of the given vector
345/// instruction 'I'. If Index is null, returns a BDV for the entire vector
346/// 'I'. As an optimization, this method will try to determine when the
347/// element is known to already be a base pointer. If this can be established,
348/// the second value in the returned pair will be true. Note that either a
349/// vector or a pointer typed value can be returned. For the former, the
350/// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
351/// If the later, the return pointer is a BDV (or possibly a base) for the
352/// particular element in 'I'.
353static Value *findBaseDefiningValueOfVector(Value *I, DefiningValueMapTy &Cache,
354 IsKnownBaseMapTy &KnownBases) {
355 // Each case parallels findBaseDefiningValue below, see that code for
356 // detailed motivation.
357
358 auto Cached = Cache.find(Key: I);
359 if (Cached != Cache.end())
360 return Cached->second;
361
362 if (isa<Argument>(Val: I)) {
363 // An incoming argument to the function is a base pointer
364 Cache[I] = I;
365 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
366 return I;
367 }
368
369 if (isa<Constant>(Val: I)) {
370 // Base of constant vector consists only of constant null pointers.
371 // For reasoning see similar case inside 'findBaseDefiningValue' function.
372 auto *CAZ = ConstantAggregateZero::get(Ty: I->getType());
373 Cache[I] = CAZ;
374 setKnownBase(V: CAZ, /* IsKnownBase */true, KnownBases);
375 return CAZ;
376 }
377
378 if (isa<LoadInst>(Val: I)) {
379 Cache[I] = I;
380 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
381 return I;
382 }
383
384 if (isa<InsertElementInst>(Val: I)) {
385 // We don't know whether this vector contains entirely base pointers or
386 // not. To be conservatively correct, we treat it as a BDV and will
387 // duplicate code as needed to construct a parallel vector of bases.
388 Cache[I] = I;
389 setKnownBase(V: I, /* IsKnownBase */false, KnownBases);
390 return I;
391 }
392
393 if (isa<ShuffleVectorInst>(Val: I)) {
394 // We don't know whether this vector contains entirely base pointers or
395 // not. To be conservatively correct, we treat it as a BDV and will
396 // duplicate code as needed to construct a parallel vector of bases.
397 // TODO: There a number of local optimizations which could be applied here
398 // for particular sufflevector patterns.
399 Cache[I] = I;
400 setKnownBase(V: I, /* IsKnownBase */false, KnownBases);
401 return I;
402 }
403
404 // The behavior of getelementptr instructions is the same for vector and
405 // non-vector data types.
406 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: I)) {
407 auto *BDV =
408 findBaseDefiningValue(I: GEP->getPointerOperand(), Cache, KnownBases);
409 Cache[GEP] = BDV;
410 return BDV;
411 }
412
413 // The behavior of freeze instructions is the same for vector and
414 // non-vector data types.
415 if (auto *Freeze = dyn_cast<FreezeInst>(Val: I)) {
416 auto *BDV = findBaseDefiningValue(I: Freeze->getOperand(i_nocapture: 0), Cache, KnownBases);
417 Cache[Freeze] = BDV;
418 return BDV;
419 }
420
421 // If the pointer comes through a bitcast of a vector of pointers to
422 // a vector of another type of pointer, then look through the bitcast
423 if (auto *BC = dyn_cast<BitCastInst>(Val: I)) {
424 auto *BDV = findBaseDefiningValue(I: BC->getOperand(i_nocapture: 0), Cache, KnownBases);
425 Cache[BC] = BDV;
426 return BDV;
427 }
428
429 // We assume that functions in the source language only return base
430 // pointers. This should probably be generalized via attributes to support
431 // both source language and internal functions.
432 if (isa<CallInst>(Val: I) || isa<InvokeInst>(Val: I)) {
433 Cache[I] = I;
434 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
435 return I;
436 }
437
438 // A PHI or Select is a base defining value. The outer findBasePointer
439 // algorithm is responsible for constructing a base value for this BDV.
440 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
441 "unknown vector instruction - no base found for vector element");
442 Cache[I] = I;
443 setKnownBase(V: I, /* IsKnownBase */false, KnownBases);
444 return I;
445}
446
447/// Helper function for findBasePointer - Will return a value which either a)
448/// defines the base pointer for the input, b) blocks the simple search
449/// (i.e. a PHI or Select of two derived pointers), or c) involves a change
450/// from pointer to vector type or back.
451static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
452 IsKnownBaseMapTy &KnownBases) {
453 assert(I->getType()->isPtrOrPtrVectorTy() &&
454 "Illegal to ask for the base pointer of a non-pointer type");
455 auto Cached = Cache.find(Key: I);
456 if (Cached != Cache.end())
457 return Cached->second;
458
459 if (I->getType()->isVectorTy())
460 return findBaseDefiningValueOfVector(I, Cache, KnownBases);
461
462 if (isa<Argument>(Val: I)) {
463 // An incoming argument to the function is a base pointer
464 // We should have never reached here if this argument isn't an gc value
465 Cache[I] = I;
466 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
467 return I;
468 }
469
470 if (isa<Constant>(Val: I)) {
471 // We assume that objects with a constant base (e.g. a global) can't move
472 // and don't need to be reported to the collector because they are always
473 // live. Besides global references, all kinds of constants (e.g. undef,
474 // constant expressions, null pointers) can be introduced by the inliner or
475 // the optimizer, especially on dynamically dead paths.
476 // Here we treat all of them as having single null base. By doing this we
477 // trying to avoid problems reporting various conflicts in a form of
478 // "phi (const1, const2)" or "phi (const, regular gc ptr)".
479 // See constant.ll file for relevant test cases.
480
481 auto *CPN = ConstantPointerNull::get(T: cast<PointerType>(Val: I->getType()));
482 Cache[I] = CPN;
483 setKnownBase(V: CPN, /* IsKnownBase */true, KnownBases);
484 return CPN;
485 }
486
487 // inttoptrs in an integral address space are currently ill-defined. We
488 // treat them as defining base pointers here for consistency with the
489 // constant rule above and because we don't really have a better semantic
490 // to give them. Note that the optimizer is always free to insert undefined
491 // behavior on dynamically dead paths as well.
492 if (isa<IntToPtrInst>(Val: I)) {
493 Cache[I] = I;
494 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
495 return I;
496 }
497
498 if (CastInst *CI = dyn_cast<CastInst>(Val: I)) {
499 Value *Def = CI->stripPointerCasts();
500 // If stripping pointer casts changes the address space there is an
501 // addrspacecast in between.
502 assert(cast<PointerType>(Def->getType())->getAddressSpace() ==
503 cast<PointerType>(CI->getType())->getAddressSpace() &&
504 "unsupported addrspacecast");
505 // If we find a cast instruction here, it means we've found a cast which is
506 // not simply a pointer cast (i.e. an inttoptr). We don't know how to
507 // handle int->ptr conversion.
508 assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
509 auto *BDV = findBaseDefiningValue(I: Def, Cache, KnownBases);
510 Cache[CI] = BDV;
511 return BDV;
512 }
513
514 if (isa<LoadInst>(Val: I)) {
515 // The value loaded is an gc base itself
516 Cache[I] = I;
517 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
518 return I;
519 }
520
521 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: I)) {
522 // The base of this GEP is the base
523 auto *BDV =
524 findBaseDefiningValue(I: GEP->getPointerOperand(), Cache, KnownBases);
525 Cache[GEP] = BDV;
526 return BDV;
527 }
528
529 if (auto *Freeze = dyn_cast<FreezeInst>(Val: I)) {
530 auto *BDV = findBaseDefiningValue(I: Freeze->getOperand(i_nocapture: 0), Cache, KnownBases);
531 Cache[Freeze] = BDV;
532 return BDV;
533 }
534
535 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) {
536 switch (II->getIntrinsicID()) {
537 default:
538 // fall through to general call handling
539 break;
540 case Intrinsic::experimental_gc_statepoint:
541 llvm_unreachable("statepoints don't produce pointers");
542 case Intrinsic::experimental_gc_relocate:
543 // Rerunning safepoint insertion after safepoints are already
544 // inserted is not supported. It could probably be made to work,
545 // but why are you doing this? There's no good reason.
546 llvm_unreachable("repeat safepoint insertion is not supported");
547 case Intrinsic::gcroot:
548 // Currently, this mechanism hasn't been extended to work with gcroot.
549 // There's no reason it couldn't be, but I haven't thought about the
550 // implications much.
551 llvm_unreachable(
552 "interaction with the gcroot mechanism is not supported");
553 case Intrinsic::experimental_gc_get_pointer_base:
554 auto *BDV = findBaseDefiningValue(I: II->getOperand(i_nocapture: 0), Cache, KnownBases);
555 Cache[II] = BDV;
556 return BDV;
557 }
558 }
559 // We assume that functions in the source language only return base
560 // pointers. This should probably be generalized via attributes to support
561 // both source language and internal functions.
562 if (isa<CallInst>(Val: I) || isa<InvokeInst>(Val: I)) {
563 Cache[I] = I;
564 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
565 return I;
566 }
567
568 // TODO: I have absolutely no idea how to implement this part yet. It's not
569 // necessarily hard, I just haven't really looked at it yet.
570 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
571
572 if (isa<AtomicCmpXchgInst>(Val: I)) {
573 // A CAS is effectively a atomic store and load combined under a
574 // predicate. From the perspective of base pointers, we just treat it
575 // like a load.
576 Cache[I] = I;
577 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
578 return I;
579 }
580
581 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are "
582 "binary ops which don't apply to pointers");
583
584 // The aggregate ops. Aggregates can either be in the heap or on the
585 // stack, but in either case, this is simply a field load. As a result,
586 // this is a defining definition of the base just like a load is.
587 if (isa<ExtractValueInst>(Val: I)) {
588 Cache[I] = I;
589 setKnownBase(V: I, /* IsKnownBase */true, KnownBases);
590 return I;
591 }
592
593 // We should never see an insert vector since that would require we be
594 // tracing back a struct value not a pointer value.
595 assert(!isa<InsertValueInst>(I) &&
596 "Base pointer for a struct is meaningless");
597
598 // This value might have been generated by findBasePointer() called when
599 // substituting gc.get.pointer.base() intrinsic.
600 bool IsKnownBase =
601 isa<Instruction>(Val: I) && cast<Instruction>(Val: I)->getMetadata(Kind: "is_base_value");
602 setKnownBase(V: I, /* IsKnownBase */IsKnownBase, KnownBases);
603 Cache[I] = I;
604
605 // An extractelement produces a base result exactly when it's input does.
606 // We may need to insert a parallel instruction to extract the appropriate
607 // element out of the base vector corresponding to the input. Given this,
608 // it's analogous to the phi and select case even though it's not a merge.
609 if (isa<ExtractElementInst>(Val: I))
610 // Note: There a lot of obvious peephole cases here. This are deliberately
611 // handled after the main base pointer inference algorithm to make writing
612 // test cases to exercise that code easier.
613 return I;
614
615 // The last two cases here don't return a base pointer. Instead, they
616 // return a value which dynamically selects from among several base
617 // derived pointers (each with it's own base potentially). It's the job of
618 // the caller to resolve these.
619 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
620 "missing instruction case in findBaseDefiningValue");
621 return I;
622}
623
624/// Returns the base defining value for this value.
625static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache,
626 IsKnownBaseMapTy &KnownBases) {
627 if (!Cache.contains(Key: I)) {
628 auto *BDV = findBaseDefiningValue(I, Cache, KnownBases);
629 Cache[I] = BDV;
630 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
631 << Cache[I]->getName() << ", is known base = "
632 << KnownBases[I] << "\n");
633 }
634 assert(Cache[I] != nullptr);
635 assert(KnownBases.contains(Cache[I]) &&
636 "Cached value must be present in known bases map");
637 return Cache[I];
638}
639
640/// Return a base pointer for this value if known. Otherwise, return it's
641/// base defining value.
642static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache,
643 IsKnownBaseMapTy &KnownBases) {
644 Value *Def = findBaseDefiningValueCached(I, Cache, KnownBases);
645 auto Found = Cache.find(Key: Def);
646 if (Found != Cache.end()) {
647 // Either a base-of relation, or a self reference. Caller must check.
648 return Found->second;
649 }
650 // Only a BDV available
651 return Def;
652}
653
654#ifndef NDEBUG
655/// This value is a base pointer that is not generated by RS4GC, i.e. it already
656/// exists in the code.
657static bool isOriginalBaseResult(Value *V) {
658 // no recursion possible
659 return !isa<PHINode>(V) && !isa<SelectInst>(V) &&
660 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) &&
661 !isa<ShuffleVectorInst>(V);
662}
663#endif
664
665static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases) {
666 auto It = KnownBases.find(Key: V);
667 assert(It != KnownBases.end() && "Value not present in the map");
668 return It->second;
669}
670
671static void setKnownBase(Value *V, bool IsKnownBase,
672 IsKnownBaseMapTy &KnownBases) {
673#ifndef NDEBUG
674 auto It = KnownBases.find(V);
675 if (It != KnownBases.end())
676 assert(It->second == IsKnownBase && "Changing already present value");
677#endif
678 KnownBases[V] = IsKnownBase;
679}
680
681// Returns true if First and Second values are both scalar or both vector.
682static bool areBothVectorOrScalar(Value *First, Value *Second) {
683 return isa<VectorType>(Val: First->getType()) ==
684 isa<VectorType>(Val: Second->getType());
685}
686
687namespace {
688
689/// Models the state of a single base defining value in the findBasePointer
690/// algorithm for determining where a new instruction is needed to propagate
691/// the base of this BDV.
692class BDVState {
693public:
694 enum StatusTy {
695 // Starting state of lattice
696 Unknown,
697 // Some specific base value -- does *not* mean that instruction
698 // propagates the base of the object
699 // ex: gep %arg, 16 -> %arg is the base value
700 Base,
701 // Need to insert a node to represent a merge.
702 Conflict
703 };
704
705 BDVState() {
706 llvm_unreachable("missing state in map");
707 }
708
709 explicit BDVState(Value *OriginalValue)
710 : OriginalValue(OriginalValue) {}
711 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr)
712 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) {
713 assert(Status != Base || BaseValue);
714 }
715
716 StatusTy getStatus() const { return Status; }
717 Value *getOriginalValue() const { return OriginalValue; }
718 Value *getBaseValue() const { return BaseValue; }
719
720 bool isBase() const { return getStatus() == Base; }
721 bool isUnknown() const { return getStatus() == Unknown; }
722 bool isConflict() const { return getStatus() == Conflict; }
723
724 // Values of type BDVState form a lattice, and this function implements the
725 // meet
726 // operation.
727 void meet(const BDVState &Other) {
728 auto markConflict = [&]() {
729 Status = BDVState::Conflict;
730 BaseValue = nullptr;
731 };
732 // Conflict is a final state.
733 if (isConflict())
734 return;
735 // if we are not known - just take other state.
736 if (isUnknown()) {
737 Status = Other.getStatus();
738 BaseValue = Other.getBaseValue();
739 return;
740 }
741 // We are base.
742 assert(isBase() && "Unknown state");
743 // If other is unknown - just keep our state.
744 if (Other.isUnknown())
745 return;
746 // If other is conflict - it is a final state.
747 if (Other.isConflict())
748 return markConflict();
749 // Other is base as well.
750 assert(Other.isBase() && "Unknown state");
751 // If bases are different - Conflict.
752 if (getBaseValue() != Other.getBaseValue())
753 return markConflict();
754 // We are identical, do nothing.
755 }
756
757 bool operator==(const BDVState &Other) const {
758 return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue &&
759 Status == Other.Status;
760 }
761
762 bool operator!=(const BDVState &other) const { return !(*this == other); }
763
764 LLVM_DUMP_METHOD
765 void dump() const {
766 print(OS&: dbgs());
767 dbgs() << '\n';
768 }
769
770 void print(raw_ostream &OS) const {
771 switch (getStatus()) {
772 case Unknown:
773 OS << "U";
774 break;
775 case Base:
776 OS << "B";
777 break;
778 case Conflict:
779 OS << "C";
780 break;
781 }
782 OS << " (base " << getBaseValue() << " - "
783 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")"
784 << " for " << OriginalValue->getName() << ":";
785 }
786
787private:
788 AssertingVH<Value> OriginalValue; // instruction this state corresponds to
789 StatusTy Status = Unknown;
790 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base.
791};
792
793} // end anonymous namespace
794
795#ifndef NDEBUG
796static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) {
797 State.print(OS);
798 return OS;
799}
800#endif
801
802/// For a given value or instruction, figure out what base ptr its derived from.
803/// For gc objects, this is simply itself. On success, returns a value which is
804/// the base pointer. (This is reliable and can be used for relocation.) On
805/// failure, returns nullptr.
806static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache,
807 IsKnownBaseMapTy &KnownBases) {
808 Value *Def = findBaseOrBDV(I, Cache, KnownBases);
809
810 if (isKnownBase(V: Def, KnownBases) && areBothVectorOrScalar(First: Def, Second: I))
811 return Def;
812
813 // Here's the rough algorithm:
814 // - For every SSA value, construct a mapping to either an actual base
815 // pointer or a PHI which obscures the base pointer.
816 // - Construct a mapping from PHI to unknown TOP state. Use an
817 // optimistic algorithm to propagate base pointer information. Lattice
818 // looks like:
819 // UNKNOWN
820 // b1 b2 b3 b4
821 // CONFLICT
822 // When algorithm terminates, all PHIs will either have a single concrete
823 // base or be in a conflict state.
824 // - For every conflict, insert a dummy PHI node without arguments. Add
825 // these to the base[Instruction] = BasePtr mapping. For every
826 // non-conflict, add the actual base.
827 // - For every conflict, add arguments for the base[a] of each input
828 // arguments.
829 //
830 // Note: A simpler form of this would be to add the conflict form of all
831 // PHIs without running the optimistic algorithm. This would be
832 // analogous to pessimistic data flow and would likely lead to an
833 // overall worse solution.
834
835#ifndef NDEBUG
836 auto isExpectedBDVType = [](Value *BDV) {
837 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) ||
838 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) ||
839 isa<ShuffleVectorInst>(BDV);
840 };
841#endif
842
843 // Once populated, will contain a mapping from each potentially non-base BDV
844 // to a lattice value (described above) which corresponds to that BDV.
845 // We use the order of insertion (DFS over the def/use graph) to provide a
846 // stable deterministic ordering for visiting DenseMaps (which are unordered)
847 // below. This is important for deterministic compilation.
848 MapVector<Value *, BDVState> States;
849
850#ifndef NDEBUG
851 auto VerifyStates = [&]() {
852 for (auto &Entry : States) {
853 assert(Entry.first == Entry.second.getOriginalValue());
854 }
855 };
856#endif
857
858 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) {
859 if (PHINode *PN = dyn_cast<PHINode>(Val: BDV)) {
860 for (Value *InVal : PN->incoming_values())
861 F(InVal);
862 } else if (SelectInst *SI = dyn_cast<SelectInst>(Val: BDV)) {
863 F(SI->getTrueValue());
864 F(SI->getFalseValue());
865 } else if (auto *EE = dyn_cast<ExtractElementInst>(Val: BDV)) {
866 F(EE->getVectorOperand());
867 } else if (auto *IE = dyn_cast<InsertElementInst>(Val: BDV)) {
868 F(IE->getOperand(i_nocapture: 0));
869 F(IE->getOperand(i_nocapture: 1));
870 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(Val: BDV)) {
871 // For a canonical broadcast, ignore the undef argument
872 // (without this, we insert a parallel base shuffle for every broadcast)
873 F(SV->getOperand(i_nocapture: 0));
874 if (!SV->isZeroEltSplat())
875 F(SV->getOperand(i_nocapture: 1));
876 } else {
877 llvm_unreachable("unexpected BDV type");
878 }
879 };
880
881
882 // Recursively fill in all base defining values reachable from the initial
883 // one for which we don't already know a definite base value for
884 /* scope */ {
885 SmallVector<Value*, 16> Worklist;
886 Worklist.push_back(Elt: Def);
887 States.insert(KV: {Def, BDVState(Def)});
888 while (!Worklist.empty()) {
889 Value *Current = Worklist.pop_back_val();
890 assert(!isOriginalBaseResult(Current) && "why did it get added?");
891
892 auto visitIncomingValue = [&](Value *InVal) {
893 Value *Base = findBaseOrBDV(I: InVal, Cache, KnownBases);
894 if (isKnownBase(V: Base, KnownBases) && areBothVectorOrScalar(First: Base, Second: InVal))
895 // Known bases won't need new instructions introduced and can be
896 // ignored safely. However, this can only be done when InVal and Base
897 // are both scalar or both vector. Otherwise, we need to find a
898 // correct BDV for InVal, by creating an entry in the lattice
899 // (States).
900 return;
901 assert(isExpectedBDVType(Base) && "the only non-base values "
902 "we see should be base defining values");
903 if (States.insert(KV: std::make_pair(x&: Base, y: BDVState(Base))).second)
904 Worklist.push_back(Elt: Base);
905 };
906
907 visitBDVOperands(Current, visitIncomingValue);
908 }
909 }
910
911#ifndef NDEBUG
912 VerifyStates();
913 LLVM_DEBUG(dbgs() << "States after initialization:\n");
914 for (const auto &Pair : States) {
915 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
916 }
917#endif
918
919 // Iterate forward through the value graph pruning any node from the state
920 // list where all of the inputs are base pointers. The purpose of this is to
921 // reuse existing values when the derived pointer we were asked to materialize
922 // a base pointer for happens to be a base pointer itself. (Or a sub-graph
923 // feeding it does.)
924 SmallVector<Value *> ToRemove;
925 do {
926 ToRemove.clear();
927 for (auto Pair : States) {
928 Value *BDV = Pair.first;
929 auto canPruneInput = [&](Value *V) {
930 // If the input of the BDV is the BDV itself we can prune it. This is
931 // only possible if the BDV is a PHI node.
932 if (V->stripPointerCasts() == BDV)
933 return true;
934 Value *VBDV = findBaseOrBDV(I: V, Cache, KnownBases);
935 if (V->stripPointerCasts() != VBDV)
936 return false;
937 // The assumption is that anything not in the state list is
938 // propagates a base pointer.
939 return States.count(Key: VBDV) == 0;
940 };
941
942 bool CanPrune = true;
943 visitBDVOperands(BDV, [&](Value *Op) {
944 CanPrune = CanPrune && canPruneInput(Op);
945 });
946 if (CanPrune)
947 ToRemove.push_back(Elt: BDV);
948 }
949 for (Value *V : ToRemove) {
950 States.erase(Key: V);
951 // Cache the fact V is it's own base for later usage.
952 Cache[V] = V;
953 }
954 } while (!ToRemove.empty());
955
956 // Did we manage to prove that Def itself must be a base pointer?
957 if (!States.count(Key: Def))
958 return Def;
959
960 // Return a phi state for a base defining value. We'll generate a new
961 // base state for known bases and expect to find a cached state otherwise.
962 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) {
963 auto I = States.find(Key: BaseValue);
964 if (I != States.end())
965 return I->second;
966 assert(areBothVectorOrScalar(BaseValue, Input));
967 return BDVState(BaseValue, BDVState::Base, BaseValue);
968 };
969
970 // Even though we have identified a concrete base (or a conflict) for all live
971 // pointers at this point, there are cases where the base is of an
972 // incompatible type compared to the original instruction. We conservatively
973 // mark those as conflicts to ensure that corresponding BDVs will be generated
974 // in the next steps.
975
976 // this is a rather explicit check for all cases where we should mark the
977 // state as a conflict to force the latter stages of the algorithm to emit
978 // the BDVs.
979 // TODO: in many cases the instructions emited for the conflicting states
980 // will be identical to the I itself (if the I's operate on their BDVs
981 // themselves). We should exploit this, but can't do it here since it would
982 // break the invariant about the BDVs not being known to be a base.
983 // TODO: the code also does not handle constants at all - the algorithm relies
984 // on all constants having the same BDV and therefore constant-only insns
985 // will never be in conflict, but this check is ignored here. If the
986 // constant conflicts will be to BDVs themselves, they will be identical
987 // instructions and will get optimized away (as in the above TODO)
988 auto MarkConflict = [&](Instruction *I, Value *BaseValue) {
989 // II and EE mixes vector & scalar so is always a conflict
990 if (isa<InsertElementInst>(Val: I) || isa<ExtractElementInst>(Val: I))
991 return true;
992 // Shuffle vector is always a conflict as it creates new vector from
993 // existing ones.
994 if (isa<ShuffleVectorInst>(Val: I))
995 return true;
996 // Any instructions where the computed base type differs from the
997 // instruction type. An example is where an extract instruction is used by a
998 // select. Here the select's BDV is a vector (because of extract's BDV),
999 // while the select itself is a scalar type. Note that the IE and EE
1000 // instruction check is not fully subsumed by the vector<->scalar check at
1001 // the end, this is due to the BDV algorithm being ignorant of BDV types at
1002 // this junction.
1003 if (!areBothVectorOrScalar(First: BaseValue, Second: I))
1004 return true;
1005 return false;
1006 };
1007
1008 bool Progress = true;
1009 while (Progress) {
1010#ifndef NDEBUG
1011 const size_t OldSize = States.size();
1012#endif
1013 Progress = false;
1014 // We're only changing values in this loop, thus safe to keep iterators.
1015 // Since this is computing a fixed point, the order of visit does not
1016 // effect the result. TODO: We could use a worklist here and make this run
1017 // much faster.
1018 for (auto Pair : States) {
1019 Value *BDV = Pair.first;
1020 // Only values that do not have known bases or those that have differing
1021 // type (scalar versus vector) from a possible known base should be in the
1022 // lattice.
1023 assert((!isKnownBase(BDV, KnownBases) ||
1024 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) &&
1025 "why did it get added?");
1026
1027 BDVState NewState(BDV);
1028 visitBDVOperands(BDV, [&](Value *Op) {
1029 Value *BDV = findBaseOrBDV(I: Op, Cache, KnownBases);
1030 auto OpState = GetStateForBDV(BDV, Op);
1031 NewState.meet(Other: OpState);
1032 });
1033
1034 // if the instruction has known base, but should in fact be marked as
1035 // conflict because of incompatible in/out types, we mark it as such
1036 // ensuring that it will propagate through the fixpoint iteration
1037 auto I = cast<Instruction>(Val: BDV);
1038 auto BV = NewState.getBaseValue();
1039 if (BV && MarkConflict(I, BV))
1040 NewState = BDVState(I, BDVState::Conflict);
1041
1042 BDVState OldState = Pair.second;
1043 if (OldState != NewState) {
1044 Progress = true;
1045 States[BDV] = NewState;
1046 }
1047 }
1048
1049 assert(OldSize == States.size() &&
1050 "fixed point shouldn't be adding any new nodes to state");
1051 }
1052
1053#ifndef NDEBUG
1054 VerifyStates();
1055 LLVM_DEBUG(dbgs() << "States after meet iteration:\n");
1056 for (const auto &Pair : States) {
1057 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
1058 }
1059
1060 // since we do the conflict marking as part of the fixpoint iteration this
1061 // loop only asserts that invariants are met
1062 for (auto Pair : States) {
1063 Instruction *I = cast<Instruction>(Pair.first);
1064 BDVState State = Pair.second;
1065 auto *BaseValue = State.getBaseValue();
1066 // Only values that do not have known bases or those that have differing
1067 // type (scalar versus vector) from a possible known base should be in the
1068 // lattice.
1069 assert(
1070 (!isKnownBase(I, KnownBases) || !areBothVectorOrScalar(I, BaseValue)) &&
1071 "why did it get added?");
1072 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1073 }
1074#endif
1075
1076 // Insert Phis for all conflicts
1077 // TODO: adjust naming patterns to avoid this order of iteration dependency
1078 for (auto Pair : States) {
1079 Instruction *I = cast<Instruction>(Val: Pair.first);
1080 BDVState State = Pair.second;
1081 // Only values that do not have known bases or those that have differing
1082 // type (scalar versus vector) from a possible known base should be in the
1083 // lattice.
1084 assert((!isKnownBase(I, KnownBases) ||
1085 !areBothVectorOrScalar(I, State.getBaseValue())) &&
1086 "why did it get added?");
1087 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1088
1089 // Since we're joining a vector and scalar base, they can never be the
1090 // same. As a result, we should always see insert element having reached
1091 // the conflict state.
1092 assert(!isa<InsertElementInst>(I) || State.isConflict());
1093
1094 if (!State.isConflict())
1095 continue;
1096
1097 auto getMangledName = [](Instruction *I) -> std::string {
1098 if (isa<PHINode>(Val: I)) {
1099 return suffixed_name_or(V: I, Suffix: ".base", DefaultName: "base_phi");
1100 } else if (isa<SelectInst>(Val: I)) {
1101 return suffixed_name_or(V: I, Suffix: ".base", DefaultName: "base_select");
1102 } else if (isa<ExtractElementInst>(Val: I)) {
1103 return suffixed_name_or(V: I, Suffix: ".base", DefaultName: "base_ee");
1104 } else if (isa<InsertElementInst>(Val: I)) {
1105 return suffixed_name_or(V: I, Suffix: ".base", DefaultName: "base_ie");
1106 } else {
1107 return suffixed_name_or(V: I, Suffix: ".base", DefaultName: "base_sv");
1108 }
1109 };
1110
1111 Instruction *BaseInst = I->clone();
1112 BaseInst->insertBefore(InsertPos: I);
1113 BaseInst->setName(getMangledName(I));
1114 // Add metadata marking this as a base value
1115 BaseInst->setMetadata(Kind: "is_base_value", Node: MDNode::get(Context&: I->getContext(), MDs: {}));
1116 States[I] = BDVState(I, BDVState::Conflict, BaseInst);
1117 setKnownBase(V: BaseInst, /* IsKnownBase */true, KnownBases);
1118 }
1119
1120#ifndef NDEBUG
1121 VerifyStates();
1122#endif
1123
1124 // Returns a instruction which produces the base pointer for a given
1125 // instruction. The instruction is assumed to be an input to one of the BDVs
1126 // seen in the inference algorithm above. As such, we must either already
1127 // know it's base defining value is a base, or have inserted a new
1128 // instruction to propagate the base of it's BDV and have entered that newly
1129 // introduced instruction into the state table. In either case, we are
1130 // assured to be able to determine an instruction which produces it's base
1131 // pointer.
1132 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) {
1133 Value *BDV = findBaseOrBDV(I: Input, Cache, KnownBases);
1134 Value *Base = nullptr;
1135 if (!States.count(Key: BDV)) {
1136 assert(areBothVectorOrScalar(BDV, Input));
1137 Base = BDV;
1138 } else {
1139 // Either conflict or base.
1140 assert(States.count(BDV));
1141 Base = States[BDV].getBaseValue();
1142 }
1143 assert(Base && "Can't be null");
1144 // The cast is needed since base traversal may strip away bitcasts
1145 if (Base->getType() != Input->getType() && InsertPt)
1146 Base = new BitCastInst(Base, Input->getType(), "cast",
1147 InsertPt->getIterator());
1148 return Base;
1149 };
1150
1151 // Fixup all the inputs of the new PHIs. Visit order needs to be
1152 // deterministic and predictable because we're naming newly created
1153 // instructions.
1154 for (auto Pair : States) {
1155 Instruction *BDV = cast<Instruction>(Val: Pair.first);
1156 BDVState State = Pair.second;
1157
1158 // Only values that do not have known bases or those that have differing
1159 // type (scalar versus vector) from a possible known base should be in the
1160 // lattice.
1161 assert((!isKnownBase(BDV, KnownBases) ||
1162 !areBothVectorOrScalar(BDV, State.getBaseValue())) &&
1163 "why did it get added?");
1164 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1165 if (!State.isConflict())
1166 continue;
1167
1168 if (PHINode *BasePHI = dyn_cast<PHINode>(Val: State.getBaseValue())) {
1169 PHINode *PN = cast<PHINode>(Val: BDV);
1170 const unsigned NumPHIValues = PN->getNumIncomingValues();
1171
1172 // The IR verifier requires phi nodes with multiple entries from the
1173 // same basic block to have the same incoming value for each of those
1174 // entries. Since we're inserting bitcasts in the loop, make sure we
1175 // do so at least once per incoming block.
1176 DenseMap<BasicBlock *, Value*> BlockToValue;
1177 for (unsigned i = 0; i < NumPHIValues; i++) {
1178 Value *InVal = PN->getIncomingValue(i);
1179 BasicBlock *InBB = PN->getIncomingBlock(i);
1180 if (!BlockToValue.count(Val: InBB))
1181 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator());
1182 else {
1183#ifndef NDEBUG
1184 Value *OldBase = BlockToValue[InBB];
1185 Value *Base = getBaseForInput(InVal, nullptr);
1186
1187 // We can't use `stripPointerCasts` instead of this function because
1188 // `stripPointerCasts` doesn't handle vectors of pointers.
1189 auto StripBitCasts = [](Value *V) -> Value * {
1190 while (auto *BC = dyn_cast<BitCastInst>(V))
1191 V = BC->getOperand(0);
1192 return V;
1193 };
1194 // In essence this assert states: the only way two values
1195 // incoming from the same basic block may be different is by
1196 // being different bitcasts of the same value. A cleanup
1197 // that remains TODO is changing findBaseOrBDV to return an
1198 // llvm::Value of the correct type (and still remain pure).
1199 // This will remove the need to add bitcasts.
1200 assert(StripBitCasts(Base) == StripBitCasts(OldBase) &&
1201 "findBaseOrBDV should be pure!");
1202#endif
1203 }
1204 Value *Base = BlockToValue[InBB];
1205 BasePHI->setIncomingValue(i, V: Base);
1206 }
1207 } else if (SelectInst *BaseSI =
1208 dyn_cast<SelectInst>(Val: State.getBaseValue())) {
1209 SelectInst *SI = cast<SelectInst>(Val: BDV);
1210
1211 // Find the instruction which produces the base for each input.
1212 // We may need to insert a bitcast.
1213 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI));
1214 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI));
1215 } else if (auto *BaseEE =
1216 dyn_cast<ExtractElementInst>(Val: State.getBaseValue())) {
1217 Value *InVal = cast<ExtractElementInst>(Val: BDV)->getVectorOperand();
1218 // Find the instruction which produces the base for each input. We may
1219 // need to insert a bitcast.
1220 BaseEE->setOperand(i_nocapture: 0, Val_nocapture: getBaseForInput(InVal, BaseEE));
1221 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(Val: State.getBaseValue())){
1222 auto *BdvIE = cast<InsertElementInst>(Val: BDV);
1223 auto UpdateOperand = [&](int OperandIdx) {
1224 Value *InVal = BdvIE->getOperand(i_nocapture: OperandIdx);
1225 Value *Base = getBaseForInput(InVal, BaseIE);
1226 BaseIE->setOperand(i_nocapture: OperandIdx, Val_nocapture: Base);
1227 };
1228 UpdateOperand(0); // vector operand
1229 UpdateOperand(1); // scalar operand
1230 } else {
1231 auto *BaseSV = cast<ShuffleVectorInst>(Val: State.getBaseValue());
1232 auto *BdvSV = cast<ShuffleVectorInst>(Val: BDV);
1233 auto UpdateOperand = [&](int OperandIdx) {
1234 Value *InVal = BdvSV->getOperand(i_nocapture: OperandIdx);
1235 Value *Base = getBaseForInput(InVal, BaseSV);
1236 BaseSV->setOperand(i_nocapture: OperandIdx, Val_nocapture: Base);
1237 };
1238 UpdateOperand(0); // vector operand
1239 if (!BdvSV->isZeroEltSplat())
1240 UpdateOperand(1); // vector operand
1241 else {
1242 // Never read, so just use poison
1243 Value *InVal = BdvSV->getOperand(i_nocapture: 1);
1244 BaseSV->setOperand(i_nocapture: 1, Val_nocapture: PoisonValue::get(T: InVal->getType()));
1245 }
1246 }
1247 }
1248
1249#ifndef NDEBUG
1250 VerifyStates();
1251#endif
1252
1253 // get the data layout to compare the sizes of base/derived pointer values
1254 [[maybe_unused]] auto &DL =
1255 cast<llvm::Instruction>(Val: Def)->getDataLayout();
1256 // Cache all of our results so we can cheaply reuse them
1257 // NOTE: This is actually two caches: one of the base defining value
1258 // relation and one of the base pointer relation! FIXME
1259 for (auto Pair : States) {
1260 auto *BDV = Pair.first;
1261 Value *Base = Pair.second.getBaseValue();
1262 assert(BDV && Base);
1263 // Whenever we have a derived ptr(s), their base
1264 // ptr(s) must be of the same size, not necessarily the same type
1265 assert(DL.getTypeAllocSize(BDV->getType()) ==
1266 DL.getTypeAllocSize(Base->getType()) &&
1267 "Derived and base values should have same size");
1268 // Only values that do not have known bases or those that have differing
1269 // type (scalar versus vector) from a possible known base should be in the
1270 // lattice.
1271 assert(
1272 (!isKnownBase(BDV, KnownBases) || !areBothVectorOrScalar(BDV, Base)) &&
1273 "why did it get added?");
1274
1275 LLVM_DEBUG(
1276 dbgs() << "Updating base value cache"
1277 << " for: " << BDV->getName() << " from: "
1278 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
1279 << " to: " << Base->getName() << "\n");
1280
1281 Cache[BDV] = Base;
1282 }
1283 assert(Cache.count(Def));
1284 return Cache[Def];
1285}
1286
1287// For a set of live pointers (base and/or derived), identify the base
1288// pointer of the object which they are derived from. This routine will
1289// mutate the IR graph as needed to make the 'base' pointer live at the
1290// definition site of 'derived'. This ensures that any use of 'derived' can
1291// also use 'base'. This may involve the insertion of a number of
1292// additional PHI nodes.
1293//
1294// preconditions: live is a set of pointer type Values
1295//
1296// side effects: may insert PHI nodes into the existing CFG, will preserve
1297// CFG, will not remove or mutate any existing nodes
1298//
1299// post condition: PointerToBase contains one (derived, base) pair for every
1300// pointer in live. Note that derived can be equal to base if the original
1301// pointer was a base pointer.
1302static void findBasePointers(const StatepointLiveSetTy &live,
1303 PointerToBaseTy &PointerToBase, DominatorTree *DT,
1304 DefiningValueMapTy &DVCache,
1305 IsKnownBaseMapTy &KnownBases) {
1306 for (Value *ptr : live) {
1307 Value *base = findBasePointer(I: ptr, Cache&: DVCache, KnownBases);
1308 assert(base && "failed to find base pointer");
1309 PointerToBase[ptr] = base;
1310 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) ||
1311 DT->dominates(cast<Instruction>(base)->getParent(),
1312 cast<Instruction>(ptr)->getParent())) &&
1313 "The base we found better dominate the derived pointer");
1314 }
1315}
1316
1317/// Find the required based pointers (and adjust the live set) for the given
1318/// parse point.
1319static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache,
1320 CallBase *Call,
1321 PartiallyConstructedSafepointRecord &result,
1322 PointerToBaseTy &PointerToBase,
1323 IsKnownBaseMapTy &KnownBases) {
1324 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet;
1325 // We assume that all pointers passed to deopt are base pointers; as an
1326 // optimization, we can use this to avoid separately materializing the base
1327 // pointer graph. This is only relevant since we're very conservative about
1328 // generating new conflict nodes during base pointer insertion. If we were
1329 // smarter there, this would be irrelevant.
1330 if (auto Opt = Call->getOperandBundle(ID: LLVMContext::OB_deopt))
1331 for (Value *V : Opt->Inputs) {
1332 if (!PotentiallyDerivedPointers.count(key: V))
1333 continue;
1334 PotentiallyDerivedPointers.remove(X: V);
1335 PointerToBase[V] = V;
1336 }
1337 findBasePointers(live: PotentiallyDerivedPointers, PointerToBase, DT: &DT, DVCache,
1338 KnownBases);
1339}
1340
1341/// Given an updated version of the dataflow liveness results, update the
1342/// liveset and base pointer maps for the call site CS.
1343static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
1344 CallBase *Call,
1345 PartiallyConstructedSafepointRecord &result,
1346 PointerToBaseTy &PointerToBase,
1347 GCStrategy *GC);
1348
1349static void recomputeLiveInValues(
1350 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
1351 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records,
1352 PointerToBaseTy &PointerToBase, GCStrategy *GC) {
1353 // TODO-PERF: reuse the original liveness, then simply run the dataflow
1354 // again. The old values are still live and will help it stabilize quickly.
1355 GCPtrLivenessData RevisedLivenessData;
1356 computeLiveInValues(DT, F, Data&: RevisedLivenessData, GC);
1357 for (size_t i = 0; i < records.size(); i++) {
1358 struct PartiallyConstructedSafepointRecord &info = records[i];
1359 recomputeLiveInValues(RevisedLivenessData, Call: toUpdate[i], result&: info, PointerToBase,
1360 GC);
1361 }
1362}
1363
1364// Utility function which clones all instructions from "ChainToBase"
1365// and inserts them before "InsertBefore". Returns rematerialized value
1366// which should be used after statepoint.
1367static Instruction *rematerializeChain(ArrayRef<Instruction *> ChainToBase,
1368 Instruction *InsertBefore,
1369 Value *RootOfChain,
1370 Value *AlternateLiveBase) {
1371 Instruction *LastClonedValue = nullptr;
1372 Instruction *LastValue = nullptr;
1373 // Walk backwards to visit top-most instructions first.
1374 for (Instruction *Instr :
1375 make_range(x: ChainToBase.rbegin(), y: ChainToBase.rend())) {
1376 // Only GEP's and casts are supported as we need to be careful to not
1377 // introduce any new uses of pointers not in the liveset.
1378 // Note that it's fine to introduce new uses of pointers which were
1379 // otherwise not used after this statepoint.
1380 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr));
1381
1382 Instruction *ClonedValue = Instr->clone();
1383 ClonedValue->insertBefore(InsertPos: InsertBefore);
1384 ClonedValue->setName(Instr->getName() + ".remat");
1385
1386 // If it is not first instruction in the chain then it uses previously
1387 // cloned value. We should update it to use cloned value.
1388 if (LastClonedValue) {
1389 assert(LastValue);
1390 ClonedValue->replaceUsesOfWith(From: LastValue, To: LastClonedValue);
1391#ifndef NDEBUG
1392 for (auto *OpValue : ClonedValue->operand_values()) {
1393 // Assert that cloned instruction does not use any instructions from
1394 // this chain other than LastClonedValue
1395 assert(!is_contained(ChainToBase, OpValue) &&
1396 "incorrect use in rematerialization chain");
1397 // Assert that the cloned instruction does not use the RootOfChain
1398 // or the AlternateLiveBase.
1399 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase);
1400 }
1401#endif
1402 } else {
1403 // For the first instruction, replace the use of unrelocated base i.e.
1404 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the
1405 // live set. They have been proved to be the same PHI nodes. Note
1406 // that the *only* use of the RootOfChain in the ChainToBase list is
1407 // the first Value in the list.
1408 if (RootOfChain != AlternateLiveBase)
1409 ClonedValue->replaceUsesOfWith(From: RootOfChain, To: AlternateLiveBase);
1410 }
1411
1412 LastClonedValue = ClonedValue;
1413 LastValue = Instr;
1414 }
1415 assert(LastClonedValue);
1416 return LastClonedValue;
1417}
1418
1419// When inserting gc.relocate and gc.result calls, we need to ensure there are
1420// no uses of the original value / return value between the gc.statepoint and
1421// the gc.relocate / gc.result call. One case which can arise is a phi node
1422// starting one of the successor blocks. We also need to be able to insert the
1423// gc.relocates only on the path which goes through the statepoint. We might
1424// need to split an edge to make this possible.
1425static BasicBlock *
1426normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent,
1427 DominatorTree &DT) {
1428 BasicBlock *Ret = BB;
1429 if (!BB->getUniquePredecessor())
1430 Ret = SplitBlockPredecessors(BB, Preds: InvokeParent, Suffix: "", DT: &DT);
1431
1432 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes
1433 // from it
1434 FoldSingleEntryPHINodes(BB: Ret);
1435 assert(!isa<PHINode>(Ret->begin()) &&
1436 "All PHI nodes should have been removed!");
1437
1438 // At this point, we can safely insert a gc.relocate or gc.result as the first
1439 // instruction in Ret if needed.
1440 return Ret;
1441}
1442
1443// List of all function attributes which must be stripped when lowering from
1444// abstract machine model to physical machine model. Essentially, these are
1445// all the effects a safepoint might have which we ignored in the abstract
1446// machine model for purposes of optimization. We have to strip these on
1447// both function declarations and call sites.
1448static constexpr Attribute::AttrKind FnAttrsToStrip[] =
1449 {Attribute::Memory, Attribute::NoSync, Attribute::NoFree};
1450
1451// Create new attribute set containing only attributes which can be transferred
1452// from the original call to the safepoint.
1453static AttributeList legalizeCallAttributes(CallBase *Call, bool IsMemIntrinsic,
1454 AttributeList StatepointAL) {
1455 AttributeList OrigAL = Call->getAttributes();
1456 if (OrigAL.isEmpty())
1457 return StatepointAL;
1458
1459 // Remove the readonly, readnone, and statepoint function attributes.
1460 LLVMContext &Ctx = Call->getContext();
1461 AttrBuilder FnAttrs(Ctx, OrigAL.getFnAttrs());
1462 for (auto Attr : FnAttrsToStrip)
1463 FnAttrs.removeAttribute(Val: Attr);
1464
1465 for (Attribute A : OrigAL.getFnAttrs()) {
1466 if (isStatepointDirectiveAttr(Attr: A))
1467 FnAttrs.removeAttribute(A);
1468 }
1469
1470 StatepointAL = StatepointAL.addFnAttributes(C&: Ctx, B: FnAttrs);
1471
1472 // The memory intrinsics do not have a 1:1 correspondence of the original
1473 // call arguments to the produced statepoint. Do not transfer the argument
1474 // attributes to avoid putting them on incorrect arguments.
1475 if (IsMemIntrinsic)
1476 return StatepointAL;
1477
1478 // Attach the argument attributes from the original call at the corresponding
1479 // arguments in the statepoint. Note that any argument attributes that are
1480 // invalid after lowering are stripped in stripNonValidDataFromBody.
1481 for (unsigned I : llvm::seq(Size: Call->arg_size()))
1482 StatepointAL = StatepointAL.addParamAttributes(
1483 C&: Ctx, ArgNo: GCStatepointInst::CallArgsBeginPos + I,
1484 B: AttrBuilder(Ctx, OrigAL.getParamAttrs(ArgNo: I)));
1485
1486 // Return attributes are later attached to the gc.result intrinsic.
1487 return StatepointAL;
1488}
1489
1490/// Helper function to place all gc relocates necessary for the given
1491/// statepoint.
1492/// Inputs:
1493/// liveVariables - list of variables to be relocated.
1494/// basePtrs - base pointers.
1495/// statepointToken - statepoint instruction to which relocates should be
1496/// bound.
1497/// Builder - Llvm IR builder to be used to construct new calls.
1498static void CreateGCRelocates(ArrayRef<Value *> LiveVariables,
1499 ArrayRef<Value *> BasePtrs,
1500 Instruction *StatepointToken,
1501 IRBuilder<> &Builder, GCStrategy *GC) {
1502 if (LiveVariables.empty())
1503 return;
1504
1505 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) {
1506 auto ValIt = llvm::find(Range&: LiveVec, Val);
1507 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!");
1508 size_t Index = std::distance(first: LiveVec.begin(), last: ValIt);
1509 assert(Index < LiveVec.size() && "Bug in std::find?");
1510 return Index;
1511 };
1512 Module *M = StatepointToken->getModule();
1513
1514 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose
1515 // element type is i8 addrspace(1)*). We originally generated unique
1516 // declarations for each pointer type, but this proved problematic because
1517 // the intrinsic mangling code is incomplete and fragile. Since we're moving
1518 // towards a single unified pointer type anyways, we can just cast everything
1519 // to an i8* of the right address space. A bitcast is added later to convert
1520 // gc_relocate to the actual value's type.
1521 auto getGCRelocateDecl = [&](Type *Ty) {
1522 assert(isHandledGCPointerType(Ty, GC));
1523 auto AS = Ty->getScalarType()->getPointerAddressSpace();
1524 Type *NewTy = PointerType::get(C&: M->getContext(), AddressSpace: AS);
1525 if (auto *VT = dyn_cast<VectorType>(Val: Ty))
1526 NewTy = FixedVectorType::get(ElementType: NewTy,
1527 NumElts: cast<FixedVectorType>(Val: VT)->getNumElements());
1528 return Intrinsic::getDeclaration(M, id: Intrinsic::experimental_gc_relocate,
1529 Tys: {NewTy});
1530 };
1531
1532 // Lazily populated map from input types to the canonicalized form mentioned
1533 // in the comment above. This should probably be cached somewhere more
1534 // broadly.
1535 DenseMap<Type *, Function *> TypeToDeclMap;
1536
1537 for (unsigned i = 0; i < LiveVariables.size(); i++) {
1538 // Generate the gc.relocate call and save the result
1539 Value *BaseIdx = Builder.getInt32(C: FindIndex(LiveVariables, BasePtrs[i]));
1540 Value *LiveIdx = Builder.getInt32(C: i);
1541
1542 Type *Ty = LiveVariables[i]->getType();
1543 if (!TypeToDeclMap.count(Val: Ty))
1544 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty);
1545 Function *GCRelocateDecl = TypeToDeclMap[Ty];
1546
1547 // only specify a debug name if we can give a useful one
1548 CallInst *Reloc = Builder.CreateCall(
1549 Callee: GCRelocateDecl, Args: {StatepointToken, BaseIdx, LiveIdx},
1550 Name: suffixed_name_or(V: LiveVariables[i], Suffix: ".relocated", DefaultName: ""));
1551 // Trick CodeGen into thinking there are lots of free registers at this
1552 // fake call.
1553 Reloc->setCallingConv(CallingConv::Cold);
1554 }
1555}
1556
1557namespace {
1558
1559/// This struct is used to defer RAUWs and `eraseFromParent` s. Using this
1560/// avoids having to worry about keeping around dangling pointers to Values.
1561class DeferredReplacement {
1562 AssertingVH<Instruction> Old;
1563 AssertingVH<Instruction> New;
1564 bool IsDeoptimize = false;
1565
1566 DeferredReplacement() = default;
1567
1568public:
1569 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) {
1570 assert(Old != New && Old && New &&
1571 "Cannot RAUW equal values or to / from null!");
1572
1573 DeferredReplacement D;
1574 D.Old = Old;
1575 D.New = New;
1576 return D;
1577 }
1578
1579 static DeferredReplacement createDelete(Instruction *ToErase) {
1580 DeferredReplacement D;
1581 D.Old = ToErase;
1582 return D;
1583 }
1584
1585 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) {
1586#ifndef NDEBUG
1587 auto *F = cast<CallInst>(Old)->getCalledFunction();
1588 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize &&
1589 "Only way to construct a deoptimize deferred replacement");
1590#endif
1591 DeferredReplacement D;
1592 D.Old = Old;
1593 D.IsDeoptimize = true;
1594 return D;
1595 }
1596
1597 /// Does the task represented by this instance.
1598 void doReplacement() {
1599 Instruction *OldI = Old;
1600 Instruction *NewI = New;
1601
1602 assert(OldI != NewI && "Disallowed at construction?!");
1603 assert((!IsDeoptimize || !New) &&
1604 "Deoptimize intrinsics are not replaced!");
1605
1606 Old = nullptr;
1607 New = nullptr;
1608
1609 if (NewI)
1610 OldI->replaceAllUsesWith(V: NewI);
1611
1612 if (IsDeoptimize) {
1613 // Note: we've inserted instructions, so the call to llvm.deoptimize may
1614 // not necessarily be followed by the matching return.
1615 auto *RI = cast<ReturnInst>(Val: OldI->getParent()->getTerminator());
1616 new UnreachableInst(RI->getContext(), RI->getIterator());
1617 RI->eraseFromParent();
1618 }
1619
1620 OldI->eraseFromParent();
1621 }
1622};
1623
1624} // end anonymous namespace
1625
1626static StringRef getDeoptLowering(CallBase *Call) {
1627 const char *DeoptLowering = "deopt-lowering";
1628 if (Call->hasFnAttr(Kind: DeoptLowering)) {
1629 // FIXME: Calls have a *really* confusing interface around attributes
1630 // with values.
1631 const AttributeList &CSAS = Call->getAttributes();
1632 if (CSAS.hasFnAttr(Kind: DeoptLowering))
1633 return CSAS.getFnAttr(Kind: DeoptLowering).getValueAsString();
1634 Function *F = Call->getCalledFunction();
1635 assert(F && F->hasFnAttribute(DeoptLowering));
1636 return F->getFnAttribute(Kind: DeoptLowering).getValueAsString();
1637 }
1638 return "live-through";
1639}
1640
1641static void
1642makeStatepointExplicitImpl(CallBase *Call, /* to replace */
1643 const SmallVectorImpl<Value *> &BasePtrs,
1644 const SmallVectorImpl<Value *> &LiveVariables,
1645 PartiallyConstructedSafepointRecord &Result,
1646 std::vector<DeferredReplacement> &Replacements,
1647 const PointerToBaseTy &PointerToBase,
1648 GCStrategy *GC) {
1649 assert(BasePtrs.size() == LiveVariables.size());
1650
1651 // Then go ahead and use the builder do actually do the inserts. We insert
1652 // immediately before the previous instruction under the assumption that all
1653 // arguments will be available here. We can't insert afterwards since we may
1654 // be replacing a terminator.
1655 IRBuilder<> Builder(Call);
1656
1657 ArrayRef<Value *> GCArgs(LiveVariables);
1658 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID;
1659 uint32_t NumPatchBytes = 0;
1660 uint32_t Flags = uint32_t(StatepointFlags::None);
1661
1662 SmallVector<Value *, 8> CallArgs(Call->args());
1663 std::optional<ArrayRef<Use>> DeoptArgs;
1664 if (auto Bundle = Call->getOperandBundle(ID: LLVMContext::OB_deopt))
1665 DeoptArgs = Bundle->Inputs;
1666 std::optional<ArrayRef<Use>> TransitionArgs;
1667 if (auto Bundle = Call->getOperandBundle(ID: LLVMContext::OB_gc_transition)) {
1668 TransitionArgs = Bundle->Inputs;
1669 // TODO: This flag no longer serves a purpose and can be removed later
1670 Flags |= uint32_t(StatepointFlags::GCTransition);
1671 }
1672
1673 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls
1674 // with a return value, we lower then as never returning calls to
1675 // __llvm_deoptimize that are followed by unreachable to get better codegen.
1676 bool IsDeoptimize = false;
1677 bool IsMemIntrinsic = false;
1678
1679 StatepointDirectives SD =
1680 parseStatepointDirectivesFromAttrs(AS: Call->getAttributes());
1681 if (SD.NumPatchBytes)
1682 NumPatchBytes = *SD.NumPatchBytes;
1683 if (SD.StatepointID)
1684 StatepointID = *SD.StatepointID;
1685
1686 // Pass through the requested lowering if any. The default is live-through.
1687 StringRef DeoptLowering = getDeoptLowering(Call);
1688 if (DeoptLowering == "live-in")
1689 Flags |= uint32_t(StatepointFlags::DeoptLiveIn);
1690 else {
1691 assert(DeoptLowering == "live-through" && "Unsupported value!");
1692 }
1693
1694 FunctionCallee CallTarget(Call->getFunctionType(), Call->getCalledOperand());
1695 if (Function *F = dyn_cast<Function>(Val: CallTarget.getCallee())) {
1696 auto IID = F->getIntrinsicID();
1697 if (IID == Intrinsic::experimental_deoptimize) {
1698 // Calls to llvm.experimental.deoptimize are lowered to calls to the
1699 // __llvm_deoptimize symbol. We want to resolve this now, since the
1700 // verifier does not allow taking the address of an intrinsic function.
1701
1702 SmallVector<Type *, 8> DomainTy;
1703 for (Value *Arg : CallArgs)
1704 DomainTy.push_back(Elt: Arg->getType());
1705 auto *FTy = FunctionType::get(Result: Type::getVoidTy(C&: F->getContext()), Params: DomainTy,
1706 /* isVarArg = */ false);
1707
1708 // Note: CallTarget can be a bitcast instruction of a symbol if there are
1709 // calls to @llvm.experimental.deoptimize with different argument types in
1710 // the same module. This is fine -- we assume the frontend knew what it
1711 // was doing when generating this kind of IR.
1712 CallTarget = F->getParent()
1713 ->getOrInsertFunction(Name: "__llvm_deoptimize", T: FTy);
1714
1715 IsDeoptimize = true;
1716 } else if (IID == Intrinsic::memcpy_element_unordered_atomic ||
1717 IID == Intrinsic::memmove_element_unordered_atomic) {
1718 IsMemIntrinsic = true;
1719
1720 // Unordered atomic memcpy and memmove intrinsics which are not explicitly
1721 // marked as "gc-leaf-function" should be lowered in a GC parseable way.
1722 // Specifically, these calls should be lowered to the
1723 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols.
1724 // Similarly to __llvm_deoptimize we want to resolve this now, since the
1725 // verifier does not allow taking the address of an intrinsic function.
1726 //
1727 // Moreover we need to shuffle the arguments for the call in order to
1728 // accommodate GC. The underlying source and destination objects might be
1729 // relocated during copy operation should the GC occur. To relocate the
1730 // derived source and destination pointers the implementation of the
1731 // intrinsic should know the corresponding base pointers.
1732 //
1733 // To make the base pointers available pass them explicitly as arguments:
1734 // memcpy(dest_derived, source_derived, ...) =>
1735 // memcpy(dest_base, dest_offset, source_base, source_offset, ...)
1736 auto &Context = Call->getContext();
1737 auto &DL = Call->getDataLayout();
1738 auto GetBaseAndOffset = [&](Value *Derived) {
1739 Value *Base = nullptr;
1740 // Optimizations in unreachable code might substitute the real pointer
1741 // with undef, poison or null-derived constant. Return null base for
1742 // them to be consistent with the handling in the main algorithm in
1743 // findBaseDefiningValue.
1744 if (isa<Constant>(Val: Derived))
1745 Base =
1746 ConstantPointerNull::get(T: cast<PointerType>(Val: Derived->getType()));
1747 else {
1748 assert(PointerToBase.count(Derived));
1749 Base = PointerToBase.find(Key: Derived)->second;
1750 }
1751 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
1752 unsigned IntPtrSize = DL.getPointerSizeInBits(AS: AddressSpace);
1753 Value *Base_int = Builder.CreatePtrToInt(
1754 V: Base, DestTy: Type::getIntNTy(C&: Context, N: IntPtrSize));
1755 Value *Derived_int = Builder.CreatePtrToInt(
1756 V: Derived, DestTy: Type::getIntNTy(C&: Context, N: IntPtrSize));
1757 return std::make_pair(x&: Base, y: Builder.CreateSub(LHS: Derived_int, RHS: Base_int));
1758 };
1759
1760 auto *Dest = CallArgs[0];
1761 Value *DestBase, *DestOffset;
1762 std::tie(args&: DestBase, args&: DestOffset) = GetBaseAndOffset(Dest);
1763
1764 auto *Source = CallArgs[1];
1765 Value *SourceBase, *SourceOffset;
1766 std::tie(args&: SourceBase, args&: SourceOffset) = GetBaseAndOffset(Source);
1767
1768 auto *LengthInBytes = CallArgs[2];
1769 auto *ElementSizeCI = cast<ConstantInt>(Val: CallArgs[3]);
1770
1771 CallArgs.clear();
1772 CallArgs.push_back(Elt: DestBase);
1773 CallArgs.push_back(Elt: DestOffset);
1774 CallArgs.push_back(Elt: SourceBase);
1775 CallArgs.push_back(Elt: SourceOffset);
1776 CallArgs.push_back(Elt: LengthInBytes);
1777
1778 SmallVector<Type *, 8> DomainTy;
1779 for (Value *Arg : CallArgs)
1780 DomainTy.push_back(Elt: Arg->getType());
1781 auto *FTy = FunctionType::get(Result: Type::getVoidTy(C&: F->getContext()), Params: DomainTy,
1782 /* isVarArg = */ false);
1783
1784 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) {
1785 uint64_t ElementSize = ElementSizeCI->getZExtValue();
1786 if (IID == Intrinsic::memcpy_element_unordered_atomic) {
1787 switch (ElementSize) {
1788 case 1:
1789 return "__llvm_memcpy_element_unordered_atomic_safepoint_1";
1790 case 2:
1791 return "__llvm_memcpy_element_unordered_atomic_safepoint_2";
1792 case 4:
1793 return "__llvm_memcpy_element_unordered_atomic_safepoint_4";
1794 case 8:
1795 return "__llvm_memcpy_element_unordered_atomic_safepoint_8";
1796 case 16:
1797 return "__llvm_memcpy_element_unordered_atomic_safepoint_16";
1798 default:
1799 llvm_unreachable("unexpected element size!");
1800 }
1801 }
1802 assert(IID == Intrinsic::memmove_element_unordered_atomic);
1803 switch (ElementSize) {
1804 case 1:
1805 return "__llvm_memmove_element_unordered_atomic_safepoint_1";
1806 case 2:
1807 return "__llvm_memmove_element_unordered_atomic_safepoint_2";
1808 case 4:
1809 return "__llvm_memmove_element_unordered_atomic_safepoint_4";
1810 case 8:
1811 return "__llvm_memmove_element_unordered_atomic_safepoint_8";
1812 case 16:
1813 return "__llvm_memmove_element_unordered_atomic_safepoint_16";
1814 default:
1815 llvm_unreachable("unexpected element size!");
1816 }
1817 };
1818
1819 CallTarget =
1820 F->getParent()
1821 ->getOrInsertFunction(Name: GetFunctionName(IID, ElementSizeCI), T: FTy);
1822 }
1823 }
1824
1825 // Create the statepoint given all the arguments
1826 GCStatepointInst *Token = nullptr;
1827 if (auto *CI = dyn_cast<CallInst>(Val: Call)) {
1828 CallInst *SPCall = Builder.CreateGCStatepointCall(
1829 ID: StatepointID, NumPatchBytes, ActualCallee: CallTarget, Flags, CallArgs,
1830 TransitionArgs, DeoptArgs, GCArgs, Name: "safepoint_token");
1831
1832 SPCall->setTailCallKind(CI->getTailCallKind());
1833 SPCall->setCallingConv(CI->getCallingConv());
1834
1835 // Set up function attrs directly on statepoint and return attrs later for
1836 // gc_result intrinsic.
1837 SPCall->setAttributes(
1838 legalizeCallAttributes(Call: CI, IsMemIntrinsic, StatepointAL: SPCall->getAttributes()));
1839
1840 Token = cast<GCStatepointInst>(Val: SPCall);
1841
1842 // Put the following gc_result and gc_relocate calls immediately after the
1843 // the old call (which we're about to delete)
1844 assert(CI->getNextNode() && "Not a terminator, must have next!");
1845 Builder.SetInsertPoint(CI->getNextNode());
1846 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc());
1847 } else {
1848 auto *II = cast<InvokeInst>(Val: Call);
1849
1850 // Insert the new invoke into the old block. We'll remove the old one in a
1851 // moment at which point this will become the new terminator for the
1852 // original block.
1853 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke(
1854 ID: StatepointID, NumPatchBytes, ActualInvokee: CallTarget, NormalDest: II->getNormalDest(),
1855 UnwindDest: II->getUnwindDest(), Flags, InvokeArgs: CallArgs, TransitionArgs, DeoptArgs, GCArgs,
1856 Name: "statepoint_token");
1857
1858 SPInvoke->setCallingConv(II->getCallingConv());
1859
1860 // Set up function attrs directly on statepoint and return attrs later for
1861 // gc_result intrinsic.
1862 SPInvoke->setAttributes(
1863 legalizeCallAttributes(Call: II, IsMemIntrinsic, StatepointAL: SPInvoke->getAttributes()));
1864
1865 Token = cast<GCStatepointInst>(Val: SPInvoke);
1866
1867 // Generate gc relocates in exceptional path
1868 BasicBlock *UnwindBlock = II->getUnwindDest();
1869 assert(!isa<PHINode>(UnwindBlock->begin()) &&
1870 UnwindBlock->getUniquePredecessor() &&
1871 "can't safely insert in this block!");
1872
1873 Builder.SetInsertPoint(TheBB: UnwindBlock, IP: UnwindBlock->getFirstInsertionPt());
1874 Builder.SetCurrentDebugLocation(II->getDebugLoc());
1875
1876 // Attach exceptional gc relocates to the landingpad.
1877 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst();
1878 Result.UnwindToken = ExceptionalToken;
1879
1880 CreateGCRelocates(LiveVariables, BasePtrs, StatepointToken: ExceptionalToken, Builder, GC);
1881
1882 // Generate gc relocates and returns for normal block
1883 BasicBlock *NormalDest = II->getNormalDest();
1884 assert(!isa<PHINode>(NormalDest->begin()) &&
1885 NormalDest->getUniquePredecessor() &&
1886 "can't safely insert in this block!");
1887
1888 Builder.SetInsertPoint(TheBB: NormalDest, IP: NormalDest->getFirstInsertionPt());
1889
1890 // gc relocates will be generated later as if it were regular call
1891 // statepoint
1892 }
1893 assert(Token && "Should be set in one of the above branches!");
1894
1895 if (IsDeoptimize) {
1896 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we
1897 // transform the tail-call like structure to a call to a void function
1898 // followed by unreachable to get better codegen.
1899 Replacements.push_back(
1900 x: DeferredReplacement::createDeoptimizeReplacement(Old: Call));
1901 } else {
1902 Token->setName("statepoint_token");
1903 if (!Call->getType()->isVoidTy() && !Call->use_empty()) {
1904 StringRef Name = Call->hasName() ? Call->getName() : "";
1905 CallInst *GCResult = Builder.CreateGCResult(Statepoint: Token, ResultType: Call->getType(), Name);
1906 GCResult->setAttributes(
1907 AttributeList::get(C&: GCResult->getContext(), Index: AttributeList::ReturnIndex,
1908 Attrs: Call->getAttributes().getRetAttrs()));
1909
1910 // We cannot RAUW or delete CS.getInstruction() because it could be in the
1911 // live set of some other safepoint, in which case that safepoint's
1912 // PartiallyConstructedSafepointRecord will hold a raw pointer to this
1913 // llvm::Instruction. Instead, we defer the replacement and deletion to
1914 // after the live sets have been made explicit in the IR, and we no longer
1915 // have raw pointers to worry about.
1916 Replacements.emplace_back(
1917 args: DeferredReplacement::createRAUW(Old: Call, New: GCResult));
1918 } else {
1919 Replacements.emplace_back(args: DeferredReplacement::createDelete(ToErase: Call));
1920 }
1921 }
1922
1923 Result.StatepointToken = Token;
1924
1925 // Second, create a gc.relocate for every live variable
1926 CreateGCRelocates(LiveVariables, BasePtrs, StatepointToken: Token, Builder, GC);
1927}
1928
1929// Replace an existing gc.statepoint with a new one and a set of gc.relocates
1930// which make the relocations happening at this safepoint explicit.
1931//
1932// WARNING: Does not do any fixup to adjust users of the original live
1933// values. That's the callers responsibility.
1934static void
1935makeStatepointExplicit(DominatorTree &DT, CallBase *Call,
1936 PartiallyConstructedSafepointRecord &Result,
1937 std::vector<DeferredReplacement> &Replacements,
1938 const PointerToBaseTy &PointerToBase, GCStrategy *GC) {
1939 const auto &LiveSet = Result.LiveSet;
1940
1941 // Convert to vector for efficient cross referencing.
1942 SmallVector<Value *, 64> BaseVec, LiveVec;
1943 LiveVec.reserve(N: LiveSet.size());
1944 BaseVec.reserve(N: LiveSet.size());
1945 for (Value *L : LiveSet) {
1946 LiveVec.push_back(Elt: L);
1947 assert(PointerToBase.count(L));
1948 Value *Base = PointerToBase.find(Key: L)->second;
1949 BaseVec.push_back(Elt: Base);
1950 }
1951 assert(LiveVec.size() == BaseVec.size());
1952
1953 // Do the actual rewriting and delete the old statepoint
1954 makeStatepointExplicitImpl(Call, BasePtrs: BaseVec, LiveVariables: LiveVec, Result, Replacements,
1955 PointerToBase, GC);
1956}
1957
1958// Helper function for the relocationViaAlloca.
1959//
1960// It receives iterator to the statepoint gc relocates and emits a store to the
1961// assigned location (via allocaMap) for the each one of them. It adds the
1962// visited values into the visitedLiveValues set, which we will later use them
1963// for validation checking.
1964static void
1965insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
1966 DenseMap<Value *, AllocaInst *> &AllocaMap,
1967 DenseSet<Value *> &VisitedLiveValues) {
1968 for (User *U : GCRelocs) {
1969 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(Val: U);
1970 if (!Relocate)
1971 continue;
1972
1973 Value *OriginalValue = Relocate->getDerivedPtr();
1974 assert(AllocaMap.count(OriginalValue));
1975 Value *Alloca = AllocaMap[OriginalValue];
1976
1977 // Emit store into the related alloca.
1978 assert(Relocate->getNextNode() &&
1979 "Should always have one since it's not a terminator");
1980 new StoreInst(Relocate, Alloca, std::next(x: Relocate->getIterator()));
1981
1982#ifndef NDEBUG
1983 VisitedLiveValues.insert(OriginalValue);
1984#endif
1985 }
1986}
1987
1988// Helper function for the "relocationViaAlloca". Similar to the
1989// "insertRelocationStores" but works for rematerialized values.
1990static void insertRematerializationStores(
1991 const RematerializedValueMapTy &RematerializedValues,
1992 DenseMap<Value *, AllocaInst *> &AllocaMap,
1993 DenseSet<Value *> &VisitedLiveValues) {
1994 for (auto RematerializedValuePair: RematerializedValues) {
1995 Instruction *RematerializedValue = RematerializedValuePair.first;
1996 Value *OriginalValue = RematerializedValuePair.second;
1997
1998 assert(AllocaMap.count(OriginalValue) &&
1999 "Can not find alloca for rematerialized value");
2000 Value *Alloca = AllocaMap[OriginalValue];
2001
2002 new StoreInst(RematerializedValue, Alloca,
2003 std::next(x: RematerializedValue->getIterator()));
2004
2005#ifndef NDEBUG
2006 VisitedLiveValues.insert(OriginalValue);
2007#endif
2008 }
2009}
2010
2011/// Do all the relocation update via allocas and mem2reg
2012static void relocationViaAlloca(
2013 Function &F, DominatorTree &DT, ArrayRef<Value *> Live,
2014 ArrayRef<PartiallyConstructedSafepointRecord> Records) {
2015#ifndef NDEBUG
2016 // record initial number of (static) allocas; we'll check we have the same
2017 // number when we get done.
2018 int InitialAllocaNum = 0;
2019 for (Instruction &I : F.getEntryBlock())
2020 if (isa<AllocaInst>(I))
2021 InitialAllocaNum++;
2022#endif
2023
2024 // TODO-PERF: change data structures, reserve
2025 DenseMap<Value *, AllocaInst *> AllocaMap;
2026 SmallVector<AllocaInst *, 200> PromotableAllocas;
2027 // Used later to chack that we have enough allocas to store all values
2028 std::size_t NumRematerializedValues = 0;
2029 PromotableAllocas.reserve(N: Live.size());
2030
2031 // Emit alloca for "LiveValue" and record it in "allocaMap" and
2032 // "PromotableAllocas"
2033 const DataLayout &DL = F.getDataLayout();
2034 auto emitAllocaFor = [&](Value *LiveValue) {
2035 AllocaInst *Alloca =
2036 new AllocaInst(LiveValue->getType(), DL.getAllocaAddrSpace(), "",
2037 F.getEntryBlock().getFirstNonPHIIt());
2038 AllocaMap[LiveValue] = Alloca;
2039 PromotableAllocas.push_back(Elt: Alloca);
2040 };
2041
2042 // Emit alloca for each live gc pointer
2043 for (Value *V : Live)
2044 emitAllocaFor(V);
2045
2046 // Emit allocas for rematerialized values
2047 for (const auto &Info : Records)
2048 for (auto RematerializedValuePair : Info.RematerializedValues) {
2049 Value *OriginalValue = RematerializedValuePair.second;
2050 if (AllocaMap.contains(Val: OriginalValue))
2051 continue;
2052
2053 emitAllocaFor(OriginalValue);
2054 ++NumRematerializedValues;
2055 }
2056
2057 // The next two loops are part of the same conceptual operation. We need to
2058 // insert a store to the alloca after the original def and at each
2059 // redefinition. We need to insert a load before each use. These are split
2060 // into distinct loops for performance reasons.
2061
2062 // Update gc pointer after each statepoint: either store a relocated value or
2063 // null (if no relocated value was found for this gc pointer and it is not a
2064 // gc_result). This must happen before we update the statepoint with load of
2065 // alloca otherwise we lose the link between statepoint and old def.
2066 for (const auto &Info : Records) {
2067 Value *Statepoint = Info.StatepointToken;
2068
2069 // This will be used for consistency check
2070 DenseSet<Value *> VisitedLiveValues;
2071
2072 // Insert stores for normal statepoint gc relocates
2073 insertRelocationStores(GCRelocs: Statepoint->users(), AllocaMap, VisitedLiveValues);
2074
2075 // In case if it was invoke statepoint
2076 // we will insert stores for exceptional path gc relocates.
2077 if (isa<InvokeInst>(Val: Statepoint)) {
2078 insertRelocationStores(GCRelocs: Info.UnwindToken->users(), AllocaMap,
2079 VisitedLiveValues);
2080 }
2081
2082 // Do similar thing with rematerialized values
2083 insertRematerializationStores(RematerializedValues: Info.RematerializedValues, AllocaMap,
2084 VisitedLiveValues);
2085
2086 if (ClobberNonLive) {
2087 // As a debugging aid, pretend that an unrelocated pointer becomes null at
2088 // the gc.statepoint. This will turn some subtle GC problems into
2089 // slightly easier to debug SEGVs. Note that on large IR files with
2090 // lots of gc.statepoints this is extremely costly both memory and time
2091 // wise.
2092 SmallVector<AllocaInst *, 64> ToClobber;
2093 for (auto Pair : AllocaMap) {
2094 Value *Def = Pair.first;
2095 AllocaInst *Alloca = Pair.second;
2096
2097 // This value was relocated
2098 if (VisitedLiveValues.count(V: Def)) {
2099 continue;
2100 }
2101 ToClobber.push_back(Elt: Alloca);
2102 }
2103
2104 auto InsertClobbersAt = [&](BasicBlock::iterator IP) {
2105 for (auto *AI : ToClobber) {
2106 auto AT = AI->getAllocatedType();
2107 Constant *CPN;
2108 if (AT->isVectorTy())
2109 CPN = ConstantAggregateZero::get(Ty: AT);
2110 else
2111 CPN = ConstantPointerNull::get(T: cast<PointerType>(Val: AT));
2112 new StoreInst(CPN, AI, IP);
2113 }
2114 };
2115
2116 // Insert the clobbering stores. These may get intermixed with the
2117 // gc.results and gc.relocates, but that's fine.
2118 if (auto II = dyn_cast<InvokeInst>(Val: Statepoint)) {
2119 InsertClobbersAt(II->getNormalDest()->getFirstInsertionPt());
2120 InsertClobbersAt(II->getUnwindDest()->getFirstInsertionPt());
2121 } else {
2122 InsertClobbersAt(
2123 std::next(x: cast<Instruction>(Val: Statepoint)->getIterator()));
2124 }
2125 }
2126 }
2127
2128 // Update use with load allocas and add store for gc_relocated.
2129 for (auto Pair : AllocaMap) {
2130 Value *Def = Pair.first;
2131 AllocaInst *Alloca = Pair.second;
2132
2133 // We pre-record the uses of allocas so that we dont have to worry about
2134 // later update that changes the user information..
2135
2136 SmallVector<Instruction *, 20> Uses;
2137 // PERF: trade a linear scan for repeated reallocation
2138 Uses.reserve(N: Def->getNumUses());
2139 for (User *U : Def->users()) {
2140 if (!isa<ConstantExpr>(Val: U)) {
2141 // If the def has a ConstantExpr use, then the def is either a
2142 // ConstantExpr use itself or null. In either case
2143 // (recursively in the first, directly in the second), the oop
2144 // it is ultimately dependent on is null and this particular
2145 // use does not need to be fixed up.
2146 Uses.push_back(Elt: cast<Instruction>(Val: U));
2147 }
2148 }
2149
2150 llvm::sort(C&: Uses);
2151 auto Last = llvm::unique(R&: Uses);
2152 Uses.erase(CS: Last, CE: Uses.end());
2153
2154 for (Instruction *Use : Uses) {
2155 if (isa<PHINode>(Val: Use)) {
2156 PHINode *Phi = cast<PHINode>(Val: Use);
2157 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
2158 if (Def == Phi->getIncomingValue(i)) {
2159 LoadInst *Load = new LoadInst(
2160 Alloca->getAllocatedType(), Alloca, "",
2161 Phi->getIncomingBlock(i)->getTerminator()->getIterator());
2162 Phi->setIncomingValue(i, V: Load);
2163 }
2164 }
2165 } else {
2166 LoadInst *Load = new LoadInst(Alloca->getAllocatedType(), Alloca, "",
2167 Use->getIterator());
2168 Use->replaceUsesOfWith(From: Def, To: Load);
2169 }
2170 }
2171
2172 // Emit store for the initial gc value. Store must be inserted after load,
2173 // otherwise store will be in alloca's use list and an extra load will be
2174 // inserted before it.
2175 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false,
2176 DL.getABITypeAlign(Ty: Def->getType()));
2177 if (Instruction *Inst = dyn_cast<Instruction>(Val: Def)) {
2178 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Val: Inst)) {
2179 // InvokeInst is a terminator so the store need to be inserted into its
2180 // normal destination block.
2181 BasicBlock *NormalDest = Invoke->getNormalDest();
2182 Store->insertBefore(InsertPos: NormalDest->getFirstNonPHI());
2183 } else {
2184 assert(!Inst->isTerminator() &&
2185 "The only terminator that can produce a value is "
2186 "InvokeInst which is handled above.");
2187 Store->insertAfter(InsertPos: Inst);
2188 }
2189 } else {
2190 assert(isa<Argument>(Def));
2191 Store->insertAfter(InsertPos: cast<Instruction>(Val: Alloca));
2192 }
2193 }
2194
2195 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues &&
2196 "we must have the same allocas with lives");
2197 (void) NumRematerializedValues;
2198 if (!PromotableAllocas.empty()) {
2199 // Apply mem2reg to promote alloca to SSA
2200 PromoteMemToReg(Allocas: PromotableAllocas, DT);
2201 }
2202
2203#ifndef NDEBUG
2204 for (auto &I : F.getEntryBlock())
2205 if (isa<AllocaInst>(I))
2206 InitialAllocaNum--;
2207 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas");
2208#endif
2209}
2210
2211/// Implement a unique function which doesn't require we sort the input
2212/// vector. Doing so has the effect of changing the output of a couple of
2213/// tests in ways which make them less useful in testing fused safepoints.
2214template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
2215 SmallSet<T, 8> Seen;
2216 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; });
2217}
2218
2219/// Insert holders so that each Value is obviously live through the entire
2220/// lifetime of the call.
2221static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values,
2222 SmallVectorImpl<CallInst *> &Holders) {
2223 if (Values.empty())
2224 // No values to hold live, might as well not insert the empty holder
2225 return;
2226
2227 Module *M = Call->getModule();
2228 // Use a dummy vararg function to actually hold the values live
2229 FunctionCallee Func = M->getOrInsertFunction(
2230 Name: "__tmp_use", T: FunctionType::get(Result: Type::getVoidTy(C&: M->getContext()), isVarArg: true));
2231 if (isa<CallInst>(Val: Call)) {
2232 // For call safepoints insert dummy calls right after safepoint
2233 Holders.push_back(
2234 Elt: CallInst::Create(Func, Args: Values, NameStr: "", InsertBefore: std::next(x: Call->getIterator())));
2235 return;
2236 }
2237 // For invoke safepooints insert dummy calls both in normal and
2238 // exceptional destination blocks
2239 auto *II = cast<InvokeInst>(Val: Call);
2240 Holders.push_back(Elt: CallInst::Create(
2241 Func, Args: Values, NameStr: "", InsertBefore: II->getNormalDest()->getFirstInsertionPt()));
2242 Holders.push_back(Elt: CallInst::Create(
2243 Func, Args: Values, NameStr: "", InsertBefore: II->getUnwindDest()->getFirstInsertionPt()));
2244}
2245
2246static void findLiveReferences(
2247 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
2248 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records,
2249 GCStrategy *GC) {
2250 GCPtrLivenessData OriginalLivenessData;
2251 computeLiveInValues(DT, F, Data&: OriginalLivenessData, GC);
2252 for (size_t i = 0; i < records.size(); i++) {
2253 struct PartiallyConstructedSafepointRecord &info = records[i];
2254 analyzeParsePointLiveness(DT, OriginalLivenessData, Call: toUpdate[i], Result&: info, GC);
2255 }
2256}
2257
2258// Helper function for the "rematerializeLiveValues". It walks use chain
2259// starting from the "CurrentValue" until it reaches the root of the chain, i.e.
2260// the base or a value it cannot process. Only "simple" values are processed
2261// (currently it is GEP's and casts). The returned root is examined by the
2262// callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array
2263// with all visited values.
2264static Value* findRematerializableChainToBasePointer(
2265 SmallVectorImpl<Instruction*> &ChainToBase,
2266 Value *CurrentValue) {
2267 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: CurrentValue)) {
2268 ChainToBase.push_back(Elt: GEP);
2269 return findRematerializableChainToBasePointer(ChainToBase,
2270 CurrentValue: GEP->getPointerOperand());
2271 }
2272
2273 if (CastInst *CI = dyn_cast<CastInst>(Val: CurrentValue)) {
2274 if (!CI->isNoopCast(DL: CI->getDataLayout()))
2275 return CI;
2276
2277 ChainToBase.push_back(Elt: CI);
2278 return findRematerializableChainToBasePointer(ChainToBase,
2279 CurrentValue: CI->getOperand(i_nocapture: 0));
2280 }
2281
2282 // We have reached the root of the chain, which is either equal to the base or
2283 // is the first unsupported value along the use chain.
2284 return CurrentValue;
2285}
2286
2287// Helper function for the "rematerializeLiveValues". Compute cost of the use
2288// chain we are going to rematerialize.
2289static InstructionCost
2290chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain,
2291 TargetTransformInfo &TTI) {
2292 InstructionCost Cost = 0;
2293
2294 for (Instruction *Instr : Chain) {
2295 if (CastInst *CI = dyn_cast<CastInst>(Val: Instr)) {
2296 assert(CI->isNoopCast(CI->getDataLayout()) &&
2297 "non noop cast is found during rematerialization");
2298
2299 Type *SrcTy = CI->getOperand(i_nocapture: 0)->getType();
2300 Cost += TTI.getCastInstrCost(Opcode: CI->getOpcode(), Dst: CI->getType(), Src: SrcTy,
2301 CCH: TTI::getCastContextHint(I: CI),
2302 CostKind: TargetTransformInfo::TCK_SizeAndLatency, I: CI);
2303
2304 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: Instr)) {
2305 // Cost of the address calculation
2306 Type *ValTy = GEP->getSourceElementType();
2307 Cost += TTI.getAddressComputationCost(Ty: ValTy);
2308
2309 // And cost of the GEP itself
2310 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not
2311 // allowed for the external usage)
2312 if (!GEP->hasAllConstantIndices())
2313 Cost += 2;
2314
2315 } else {
2316 llvm_unreachable("unsupported instruction type during rematerialization");
2317 }
2318 }
2319
2320 return Cost;
2321}
2322
2323static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) {
2324 unsigned PhiNum = OrigRootPhi.getNumIncomingValues();
2325 if (PhiNum != AlternateRootPhi.getNumIncomingValues() ||
2326 OrigRootPhi.getParent() != AlternateRootPhi.getParent())
2327 return false;
2328 // Map of incoming values and their corresponding basic blocks of
2329 // OrigRootPhi.
2330 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues;
2331 for (unsigned i = 0; i < PhiNum; i++)
2332 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] =
2333 OrigRootPhi.getIncomingBlock(i);
2334
2335 // Both current and base PHIs should have same incoming values and
2336 // the same basic blocks corresponding to the incoming values.
2337 for (unsigned i = 0; i < PhiNum; i++) {
2338 auto CIVI =
2339 CurrentIncomingValues.find(Val: AlternateRootPhi.getIncomingValue(i));
2340 if (CIVI == CurrentIncomingValues.end())
2341 return false;
2342 BasicBlock *CurrentIncomingBB = CIVI->second;
2343 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i))
2344 return false;
2345 }
2346 return true;
2347}
2348
2349// Find derived pointers that can be recomputed cheap enough and fill
2350// RematerizationCandidates with such candidates.
2351static void
2352findRematerializationCandidates(PointerToBaseTy PointerToBase,
2353 RematCandTy &RematerizationCandidates,
2354 TargetTransformInfo &TTI) {
2355 const unsigned int ChainLengthThreshold = 10;
2356
2357 for (auto P2B : PointerToBase) {
2358 auto *Derived = P2B.first;
2359 auto *Base = P2B.second;
2360 // Consider only derived pointers.
2361 if (Derived == Base)
2362 continue;
2363
2364 // For each live pointer find its defining chain.
2365 SmallVector<Instruction *, 3> ChainToBase;
2366 Value *RootOfChain =
2367 findRematerializableChainToBasePointer(ChainToBase, CurrentValue: Derived);
2368
2369 // Nothing to do, or chain is too long
2370 if ( ChainToBase.size() == 0 ||
2371 ChainToBase.size() > ChainLengthThreshold)
2372 continue;
2373
2374 // Handle the scenario where the RootOfChain is not equal to the
2375 // Base Value, but they are essentially the same phi values.
2376 if (RootOfChain != PointerToBase[Derived]) {
2377 PHINode *OrigRootPhi = dyn_cast<PHINode>(Val: RootOfChain);
2378 PHINode *AlternateRootPhi = dyn_cast<PHINode>(Val: PointerToBase[Derived]);
2379 if (!OrigRootPhi || !AlternateRootPhi)
2380 continue;
2381 // PHI nodes that have the same incoming values, and belonging to the same
2382 // basic blocks are essentially the same SSA value. When the original phi
2383 // has incoming values with different base pointers, the original phi is
2384 // marked as conflict, and an additional `AlternateRootPhi` with the same
2385 // incoming values get generated by the findBasePointer function. We need
2386 // to identify the newly generated AlternateRootPhi (.base version of phi)
2387 // and RootOfChain (the original phi node itself) are the same, so that we
2388 // can rematerialize the gep and casts. This is a workaround for the
2389 // deficiency in the findBasePointer algorithm.
2390 if (!AreEquivalentPhiNodes(OrigRootPhi&: *OrigRootPhi, AlternateRootPhi&: *AlternateRootPhi))
2391 continue;
2392 }
2393 // Compute cost of this chain.
2394 InstructionCost Cost = chainToBasePointerCost(Chain&: ChainToBase, TTI);
2395 // TODO: We can also account for cases when we will be able to remove some
2396 // of the rematerialized values by later optimization passes. I.e if
2397 // we rematerialized several intersecting chains. Or if original values
2398 // don't have any uses besides this statepoint.
2399
2400 // Ok, there is a candidate.
2401 RematerizlizationCandidateRecord Record;
2402 Record.ChainToBase = ChainToBase;
2403 Record.RootOfChain = RootOfChain;
2404 Record.Cost = Cost;
2405 RematerizationCandidates.insert(KV: { Derived, Record });
2406 }
2407}
2408
2409// Try to rematerialize derived pointers immediately before their uses
2410// (instead of rematerializing after every statepoint it is live through).
2411// This can be beneficial when derived pointer is live across many
2412// statepoints, but uses are rare.
2413static void rematerializeLiveValuesAtUses(
2414 RematCandTy &RematerizationCandidates,
2415 MutableArrayRef<PartiallyConstructedSafepointRecord> Records,
2416 PointerToBaseTy &PointerToBase) {
2417 if (!RematDerivedAtUses)
2418 return;
2419
2420 SmallVector<Instruction *, 32> LiveValuesToBeDeleted;
2421
2422 LLVM_DEBUG(dbgs() << "Rematerialize derived pointers at uses, "
2423 << "Num statepoints: " << Records.size() << '\n');
2424
2425 for (auto &It : RematerizationCandidates) {
2426 Instruction *Cand = cast<Instruction>(Val: It.first);
2427 auto &Record = It.second;
2428
2429 if (Record.Cost >= RematerializationThreshold)
2430 continue;
2431
2432 if (Cand->user_empty())
2433 continue;
2434
2435 if (Cand->hasOneUse())
2436 if (auto *U = dyn_cast<Instruction>(Val: Cand->getUniqueUndroppableUser()))
2437 if (U->getParent() == Cand->getParent())
2438 continue;
2439
2440 // Rematerialization before PHI nodes is not implemented.
2441 if (llvm::any_of(Range: Cand->users(),
2442 P: [](const auto *U) { return isa<PHINode>(U); }))
2443 continue;
2444
2445 LLVM_DEBUG(dbgs() << "Trying cand " << *Cand << " ... ");
2446
2447 // Count of rematerialization instructions we introduce is equal to number
2448 // of candidate uses.
2449 // Count of rematerialization instructions we eliminate is equal to number
2450 // of statepoints it is live through.
2451 // Consider transformation profitable if latter is greater than former
2452 // (in other words, we create less than eliminate).
2453 unsigned NumLiveStatepoints = llvm::count_if(
2454 Range&: Records, P: [Cand](const auto &R) { return R.LiveSet.contains(Cand); });
2455 unsigned NumUses = Cand->getNumUses();
2456
2457 LLVM_DEBUG(dbgs() << "Num uses: " << NumUses << " Num live statepoints: "
2458 << NumLiveStatepoints << " ");
2459
2460 if (NumLiveStatepoints < NumUses) {
2461 LLVM_DEBUG(dbgs() << "not profitable\n");
2462 continue;
2463 }
2464
2465 // If rematerialization is 'free', then favor rematerialization at
2466 // uses as it generally shortens live ranges.
2467 // TODO: Short (size ==1) chains only?
2468 if (NumLiveStatepoints == NumUses && Record.Cost > 0) {
2469 LLVM_DEBUG(dbgs() << "not profitable\n");
2470 continue;
2471 }
2472
2473 LLVM_DEBUG(dbgs() << "looks profitable\n");
2474
2475 // ChainToBase may contain another remat candidate (as a sub chain) which
2476 // has been rewritten by now. Need to recollect chain to have up to date
2477 // value.
2478 // TODO: sort records in findRematerializationCandidates() in
2479 // decreasing chain size order?
2480 if (Record.ChainToBase.size() > 1) {
2481 Record.ChainToBase.clear();
2482 findRematerializableChainToBasePointer(ChainToBase&: Record.ChainToBase, CurrentValue: Cand);
2483 }
2484
2485 // Current rematerialization algorithm is very simple: we rematerialize
2486 // immediately before EVERY use, even if there are several uses in same
2487 // block or if use is local to Cand Def. The reason is that this allows
2488 // us to avoid recomputing liveness without complicated analysis:
2489 // - If we did not eliminate all uses of original Candidate, we do not
2490 // know exaclty in what BBs it is still live.
2491 // - If we rematerialize once per BB, we need to find proper insertion
2492 // place (first use in block, but after Def) and analyze if there is
2493 // statepoint between uses in the block.
2494 while (!Cand->user_empty()) {
2495 Instruction *UserI = cast<Instruction>(Val: *Cand->user_begin());
2496 Instruction *RematChain = rematerializeChain(
2497 ChainToBase: Record.ChainToBase, InsertBefore: UserI, RootOfChain: Record.RootOfChain, AlternateLiveBase: PointerToBase[Cand]);
2498 UserI->replaceUsesOfWith(From: Cand, To: RematChain);
2499 PointerToBase[RematChain] = PointerToBase[Cand];
2500 }
2501 LiveValuesToBeDeleted.push_back(Elt: Cand);
2502 }
2503
2504 LLVM_DEBUG(dbgs() << "Rematerialized " << LiveValuesToBeDeleted.size()
2505 << " derived pointers\n");
2506 for (auto *Cand : LiveValuesToBeDeleted) {
2507 assert(Cand->use_empty() && "Unexpected user remain");
2508 RematerizationCandidates.erase(Key: Cand);
2509 for (auto &R : Records) {
2510 assert(!R.LiveSet.contains(Cand) ||
2511 R.LiveSet.contains(PointerToBase[Cand]));
2512 R.LiveSet.remove(X: Cand);
2513 }
2514 }
2515
2516 // Recollect not rematerialized chains - we might have rewritten
2517 // their sub-chains.
2518 if (!LiveValuesToBeDeleted.empty()) {
2519 for (auto &P : RematerizationCandidates) {
2520 auto &R = P.second;
2521 if (R.ChainToBase.size() > 1) {
2522 R.ChainToBase.clear();
2523 findRematerializableChainToBasePointer(ChainToBase&: R.ChainToBase, CurrentValue: P.first);
2524 }
2525 }
2526 }
2527}
2528
2529// From the statepoint live set pick values that are cheaper to recompute then
2530// to relocate. Remove this values from the live set, rematerialize them after
2531// statepoint and record them in "Info" structure. Note that similar to
2532// relocated values we don't do any user adjustments here.
2533static void rematerializeLiveValues(CallBase *Call,
2534 PartiallyConstructedSafepointRecord &Info,
2535 PointerToBaseTy &PointerToBase,
2536 RematCandTy &RematerizationCandidates,
2537 TargetTransformInfo &TTI) {
2538 // Record values we are going to delete from this statepoint live set.
2539 // We can not di this in following loop due to iterator invalidation.
2540 SmallVector<Value *, 32> LiveValuesToBeDeleted;
2541
2542 for (Value *LiveValue : Info.LiveSet) {
2543 auto It = RematerizationCandidates.find(Key: LiveValue);
2544 if (It == RematerizationCandidates.end())
2545 continue;
2546
2547 RematerizlizationCandidateRecord &Record = It->second;
2548
2549 InstructionCost Cost = Record.Cost;
2550 // For invokes we need to rematerialize each chain twice - for normal and
2551 // for unwind basic blocks. Model this by multiplying cost by two.
2552 if (isa<InvokeInst>(Val: Call))
2553 Cost *= 2;
2554
2555 // If it's too expensive - skip it.
2556 if (Cost >= RematerializationThreshold)
2557 continue;
2558
2559 // Remove value from the live set
2560 LiveValuesToBeDeleted.push_back(Elt: LiveValue);
2561
2562 // Clone instructions and record them inside "Info" structure.
2563
2564 // Different cases for calls and invokes. For invokes we need to clone
2565 // instructions both on normal and unwind path.
2566 if (isa<CallInst>(Val: Call)) {
2567 Instruction *InsertBefore = Call->getNextNode();
2568 assert(InsertBefore);
2569 Instruction *RematerializedValue =
2570 rematerializeChain(ChainToBase: Record.ChainToBase, InsertBefore,
2571 RootOfChain: Record.RootOfChain, AlternateLiveBase: PointerToBase[LiveValue]);
2572 Info.RematerializedValues[RematerializedValue] = LiveValue;
2573 } else {
2574 auto *Invoke = cast<InvokeInst>(Val: Call);
2575
2576 Instruction *NormalInsertBefore =
2577 &*Invoke->getNormalDest()->getFirstInsertionPt();
2578 Instruction *UnwindInsertBefore =
2579 &*Invoke->getUnwindDest()->getFirstInsertionPt();
2580
2581 Instruction *NormalRematerializedValue =
2582 rematerializeChain(ChainToBase: Record.ChainToBase, InsertBefore: NormalInsertBefore,
2583 RootOfChain: Record.RootOfChain, AlternateLiveBase: PointerToBase[LiveValue]);
2584 Instruction *UnwindRematerializedValue =
2585 rematerializeChain(ChainToBase: Record.ChainToBase, InsertBefore: UnwindInsertBefore,
2586 RootOfChain: Record.RootOfChain, AlternateLiveBase: PointerToBase[LiveValue]);
2587
2588 Info.RematerializedValues[NormalRematerializedValue] = LiveValue;
2589 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue;
2590 }
2591 }
2592
2593 // Remove rematerialized values from the live set.
2594 for (auto *LiveValue: LiveValuesToBeDeleted) {
2595 Info.LiveSet.remove(X: LiveValue);
2596 }
2597}
2598
2599static bool inlineGetBaseAndOffset(Function &F,
2600 SmallVectorImpl<CallInst *> &Intrinsics,
2601 DefiningValueMapTy &DVCache,
2602 IsKnownBaseMapTy &KnownBases) {
2603 auto &Context = F.getContext();
2604 auto &DL = F.getDataLayout();
2605 bool Changed = false;
2606
2607 for (auto *Callsite : Intrinsics)
2608 switch (Callsite->getIntrinsicID()) {
2609 case Intrinsic::experimental_gc_get_pointer_base: {
2610 Changed = true;
2611 Value *Base =
2612 findBasePointer(I: Callsite->getOperand(i_nocapture: 0), Cache&: DVCache, KnownBases);
2613 assert(!DVCache.count(Callsite));
2614 Callsite->replaceAllUsesWith(V: Base);
2615 if (!Base->hasName())
2616 Base->takeName(V: Callsite);
2617 Callsite->eraseFromParent();
2618 break;
2619 }
2620 case Intrinsic::experimental_gc_get_pointer_offset: {
2621 Changed = true;
2622 Value *Derived = Callsite->getOperand(i_nocapture: 0);
2623 Value *Base = findBasePointer(I: Derived, Cache&: DVCache, KnownBases);
2624 assert(!DVCache.count(Callsite));
2625 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
2626 unsigned IntPtrSize = DL.getPointerSizeInBits(AS: AddressSpace);
2627 IRBuilder<> Builder(Callsite);
2628 Value *BaseInt =
2629 Builder.CreatePtrToInt(V: Base, DestTy: Type::getIntNTy(C&: Context, N: IntPtrSize),
2630 Name: suffixed_name_or(V: Base, Suffix: ".int", DefaultName: ""));
2631 Value *DerivedInt =
2632 Builder.CreatePtrToInt(V: Derived, DestTy: Type::getIntNTy(C&: Context, N: IntPtrSize),
2633 Name: suffixed_name_or(V: Derived, Suffix: ".int", DefaultName: ""));
2634 Value *Offset = Builder.CreateSub(LHS: DerivedInt, RHS: BaseInt);
2635 Callsite->replaceAllUsesWith(V: Offset);
2636 Offset->takeName(V: Callsite);
2637 Callsite->eraseFromParent();
2638 break;
2639 }
2640 default:
2641 llvm_unreachable("Unknown intrinsic");
2642 }
2643
2644 return Changed;
2645}
2646
2647static bool insertParsePoints(Function &F, DominatorTree &DT,
2648 TargetTransformInfo &TTI,
2649 SmallVectorImpl<CallBase *> &ToUpdate,
2650 DefiningValueMapTy &DVCache,
2651 IsKnownBaseMapTy &KnownBases) {
2652 std::unique_ptr<GCStrategy> GC = findGCStrategy(F);
2653
2654#ifndef NDEBUG
2655 // Validate the input
2656 std::set<CallBase *> Uniqued;
2657 Uniqued.insert(ToUpdate.begin(), ToUpdate.end());
2658 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!");
2659
2660 for (CallBase *Call : ToUpdate)
2661 assert(Call->getFunction() == &F);
2662#endif
2663
2664 // When inserting gc.relocates for invokes, we need to be able to insert at
2665 // the top of the successor blocks. See the comment on
2666 // normalForInvokeSafepoint on exactly what is needed. Note that this step
2667 // may restructure the CFG.
2668 for (CallBase *Call : ToUpdate) {
2669 auto *II = dyn_cast<InvokeInst>(Val: Call);
2670 if (!II)
2671 continue;
2672 normalizeForInvokeSafepoint(BB: II->getNormalDest(), InvokeParent: II->getParent(), DT);
2673 normalizeForInvokeSafepoint(BB: II->getUnwindDest(), InvokeParent: II->getParent(), DT);
2674 }
2675
2676 // A list of dummy calls added to the IR to keep various values obviously
2677 // live in the IR. We'll remove all of these when done.
2678 SmallVector<CallInst *, 64> Holders;
2679
2680 // Insert a dummy call with all of the deopt operands we'll need for the
2681 // actual safepoint insertion as arguments. This ensures reference operands
2682 // in the deopt argument list are considered live through the safepoint (and
2683 // thus makes sure they get relocated.)
2684 for (CallBase *Call : ToUpdate) {
2685 SmallVector<Value *, 64> DeoptValues;
2686
2687 for (Value *Arg : GetDeoptBundleOperands(Call)) {
2688 assert(!isUnhandledGCPointerType(Arg->getType(), GC.get()) &&
2689 "support for FCA unimplemented");
2690 if (isHandledGCPointerType(T: Arg->getType(), GC: GC.get()))
2691 DeoptValues.push_back(Elt: Arg);
2692 }
2693
2694 insertUseHolderAfter(Call, Values: DeoptValues, Holders);
2695 }
2696
2697 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size());
2698
2699 // A) Identify all gc pointers which are statically live at the given call
2700 // site.
2701 findLiveReferences(F, DT, toUpdate: ToUpdate, records: Records, GC: GC.get());
2702
2703 /// Global mapping from live pointers to a base-defining-value.
2704 PointerToBaseTy PointerToBase;
2705
2706 // B) Find the base pointers for each live pointer
2707 for (size_t i = 0; i < Records.size(); i++) {
2708 PartiallyConstructedSafepointRecord &info = Records[i];
2709 findBasePointers(DT, DVCache, Call: ToUpdate[i], result&: info, PointerToBase, KnownBases);
2710 }
2711 if (PrintBasePointers) {
2712 errs() << "Base Pairs (w/o Relocation):\n";
2713 for (auto &Pair : PointerToBase) {
2714 errs() << " derived ";
2715 Pair.first->printAsOperand(O&: errs(), PrintType: false);
2716 errs() << " base ";
2717 Pair.second->printAsOperand(O&: errs(), PrintType: false);
2718 errs() << "\n";
2719 ;
2720 }
2721 }
2722
2723 // The base phi insertion logic (for any safepoint) may have inserted new
2724 // instructions which are now live at some safepoint. The simplest such
2725 // example is:
2726 // loop:
2727 // phi a <-- will be a new base_phi here
2728 // safepoint 1 <-- that needs to be live here
2729 // gep a + 1
2730 // safepoint 2
2731 // br loop
2732 // We insert some dummy calls after each safepoint to definitely hold live
2733 // the base pointers which were identified for that safepoint. We'll then
2734 // ask liveness for _every_ base inserted to see what is now live. Then we
2735 // remove the dummy calls.
2736 Holders.reserve(N: Holders.size() + Records.size());
2737 for (size_t i = 0; i < Records.size(); i++) {
2738 PartiallyConstructedSafepointRecord &Info = Records[i];
2739
2740 SmallVector<Value *, 128> Bases;
2741 for (auto *Derived : Info.LiveSet) {
2742 assert(PointerToBase.count(Derived) && "Missed base for derived pointer");
2743 Bases.push_back(Elt: PointerToBase[Derived]);
2744 }
2745
2746 insertUseHolderAfter(Call: ToUpdate[i], Values: Bases, Holders);
2747 }
2748
2749 // By selecting base pointers, we've effectively inserted new uses. Thus, we
2750 // need to rerun liveness. We may *also* have inserted new defs, but that's
2751 // not the key issue.
2752 recomputeLiveInValues(F, DT, toUpdate: ToUpdate, records: Records, PointerToBase, GC: GC.get());
2753
2754 if (PrintBasePointers) {
2755 errs() << "Base Pairs: (w/Relocation)\n";
2756 for (auto Pair : PointerToBase) {
2757 errs() << " derived ";
2758 Pair.first->printAsOperand(O&: errs(), PrintType: false);
2759 errs() << " base ";
2760 Pair.second->printAsOperand(O&: errs(), PrintType: false);
2761 errs() << "\n";
2762 }
2763 }
2764
2765 // It is possible that non-constant live variables have a constant base. For
2766 // example, a GEP with a variable offset from a global. In this case we can
2767 // remove it from the liveset. We already don't add constants to the liveset
2768 // because we assume they won't move at runtime and the GC doesn't need to be
2769 // informed about them. The same reasoning applies if the base is constant.
2770 // Note that the relocation placement code relies on this filtering for
2771 // correctness as it expects the base to be in the liveset, which isn't true
2772 // if the base is constant.
2773 for (auto &Info : Records) {
2774 Info.LiveSet.remove_if(P: [&](Value *LiveV) {
2775 assert(PointerToBase.count(LiveV) && "Missed base for derived pointer");
2776 return isa<Constant>(Val: PointerToBase[LiveV]);
2777 });
2778 }
2779
2780 for (CallInst *CI : Holders)
2781 CI->eraseFromParent();
2782
2783 Holders.clear();
2784
2785 // Compute the cost of possible re-materialization of derived pointers.
2786 RematCandTy RematerizationCandidates;
2787 findRematerializationCandidates(PointerToBase, RematerizationCandidates, TTI);
2788
2789 // In order to reduce live set of statepoint we might choose to rematerialize
2790 // some values instead of relocating them. This is purely an optimization and
2791 // does not influence correctness.
2792 // First try rematerialization at uses, then after statepoints.
2793 rematerializeLiveValuesAtUses(RematerizationCandidates, Records,
2794 PointerToBase);
2795 for (size_t i = 0; i < Records.size(); i++)
2796 rematerializeLiveValues(Call: ToUpdate[i], Info&: Records[i], PointerToBase,
2797 RematerizationCandidates, TTI);
2798
2799 // We need this to safely RAUW and delete call or invoke return values that
2800 // may themselves be live over a statepoint. For details, please see usage in
2801 // makeStatepointExplicitImpl.
2802 std::vector<DeferredReplacement> Replacements;
2803
2804 // Now run through and replace the existing statepoints with new ones with
2805 // the live variables listed. We do not yet update uses of the values being
2806 // relocated. We have references to live variables that need to
2807 // survive to the last iteration of this loop. (By construction, the
2808 // previous statepoint can not be a live variable, thus we can and remove
2809 // the old statepoint calls as we go.)
2810 for (size_t i = 0; i < Records.size(); i++)
2811 makeStatepointExplicit(DT, Call: ToUpdate[i], Result&: Records[i], Replacements,
2812 PointerToBase, GC: GC.get());
2813
2814 ToUpdate.clear(); // prevent accident use of invalid calls.
2815
2816 for (auto &PR : Replacements)
2817 PR.doReplacement();
2818
2819 Replacements.clear();
2820
2821 for (auto &Info : Records) {
2822 // These live sets may contain state Value pointers, since we replaced calls
2823 // with operand bundles with calls wrapped in gc.statepoint, and some of
2824 // those calls may have been def'ing live gc pointers. Clear these out to
2825 // avoid accidentally using them.
2826 //
2827 // TODO: We should create a separate data structure that does not contain
2828 // these live sets, and migrate to using that data structure from this point
2829 // onward.
2830 Info.LiveSet.clear();
2831 }
2832 PointerToBase.clear();
2833
2834 // Do all the fixups of the original live variables to their relocated selves
2835 SmallVector<Value *, 128> Live;
2836 for (const PartiallyConstructedSafepointRecord &Info : Records) {
2837 // We can't simply save the live set from the original insertion. One of
2838 // the live values might be the result of a call which needs a safepoint.
2839 // That Value* no longer exists and we need to use the new gc_result.
2840 // Thankfully, the live set is embedded in the statepoint (and updated), so
2841 // we just grab that.
2842 llvm::append_range(C&: Live, R: Info.StatepointToken->gc_args());
2843#ifndef NDEBUG
2844 // Do some basic validation checking on our liveness results before
2845 // performing relocation. Relocation can and will turn mistakes in liveness
2846 // results into non-sensical code which is must harder to debug.
2847 // TODO: It would be nice to test consistency as well
2848 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) &&
2849 "statepoint must be reachable or liveness is meaningless");
2850 for (Value *V : Info.StatepointToken->gc_args()) {
2851 if (!isa<Instruction>(V))
2852 // Non-instruction values trivial dominate all possible uses
2853 continue;
2854 auto *LiveInst = cast<Instruction>(V);
2855 assert(DT.isReachableFromEntry(LiveInst->getParent()) &&
2856 "unreachable values should never be live");
2857 assert(DT.dominates(LiveInst, Info.StatepointToken) &&
2858 "basic SSA liveness expectation violated by liveness analysis");
2859 }
2860#endif
2861 }
2862 unique_unsorted(Vec&: Live);
2863
2864#ifndef NDEBUG
2865 // Validation check
2866 for (auto *Ptr : Live)
2867 assert(isHandledGCPointerType(Ptr->getType(), GC.get()) &&
2868 "must be a gc pointer type");
2869#endif
2870
2871 relocationViaAlloca(F, DT, Live, Records);
2872 return !Records.empty();
2873}
2874
2875// List of all parameter and return attributes which must be stripped when
2876// lowering from the abstract machine model. Note that we list attributes
2877// here which aren't valid as return attributes, that is okay.
2878static AttributeMask getParamAndReturnAttributesToRemove() {
2879 AttributeMask R;
2880 R.addAttribute(Val: Attribute::Dereferenceable);
2881 R.addAttribute(Val: Attribute::DereferenceableOrNull);
2882 R.addAttribute(Val: Attribute::ReadNone);
2883 R.addAttribute(Val: Attribute::ReadOnly);
2884 R.addAttribute(Val: Attribute::WriteOnly);
2885 R.addAttribute(Val: Attribute::NoAlias);
2886 R.addAttribute(Val: Attribute::NoFree);
2887 return R;
2888}
2889
2890static void stripNonValidAttributesFromPrototype(Function &F) {
2891 LLVMContext &Ctx = F.getContext();
2892
2893 // Intrinsics are very delicate. Lowering sometimes depends the presence
2894 // of certain attributes for correctness, but we may have also inferred
2895 // additional ones in the abstract machine model which need stripped. This
2896 // assumes that the attributes defined in Intrinsic.td are conservatively
2897 // correct for both physical and abstract model.
2898 if (Intrinsic::ID id = F.getIntrinsicID()) {
2899 F.setAttributes(Intrinsic::getAttributes(C&: Ctx, id));
2900 return;
2901 }
2902
2903 AttributeMask R = getParamAndReturnAttributesToRemove();
2904 for (Argument &A : F.args())
2905 if (isa<PointerType>(Val: A.getType()))
2906 F.removeParamAttrs(ArgNo: A.getArgNo(), Attrs: R);
2907
2908 if (isa<PointerType>(Val: F.getReturnType()))
2909 F.removeRetAttrs(Attrs: R);
2910
2911 for (auto Attr : FnAttrsToStrip)
2912 F.removeFnAttr(Kind: Attr);
2913}
2914
2915/// Certain metadata on instructions are invalid after running RS4GC.
2916/// Optimizations that run after RS4GC can incorrectly use this metadata to
2917/// optimize functions. We drop such metadata on the instruction.
2918static void stripInvalidMetadataFromInstruction(Instruction &I) {
2919 if (!isa<LoadInst>(Val: I) && !isa<StoreInst>(Val: I))
2920 return;
2921 // These are the attributes that are still valid on loads and stores after
2922 // RS4GC.
2923 // The metadata implying dereferenceability and noalias are (conservatively)
2924 // dropped. This is because semantically, after RewriteStatepointsForGC runs,
2925 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can
2926 // touch the entire heap including noalias objects. Note: The reasoning is
2927 // same as stripping the dereferenceability and noalias attributes that are
2928 // analogous to the metadata counterparts.
2929 // We also drop the invariant.load metadata on the load because that metadata
2930 // implies the address operand to the load points to memory that is never
2931 // changed once it became dereferenceable. This is no longer true after RS4GC.
2932 // Similar reasoning applies to invariant.group metadata, which applies to
2933 // loads within a group.
2934 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa,
2935 LLVMContext::MD_range,
2936 LLVMContext::MD_alias_scope,
2937 LLVMContext::MD_nontemporal,
2938 LLVMContext::MD_nonnull,
2939 LLVMContext::MD_align,
2940 LLVMContext::MD_type};
2941
2942 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC.
2943 I.dropUnknownNonDebugMetadata(KnownIDs: ValidMetadataAfterRS4GC);
2944}
2945
2946static void stripNonValidDataFromBody(Function &F) {
2947 if (F.empty())
2948 return;
2949
2950 LLVMContext &Ctx = F.getContext();
2951 MDBuilder Builder(Ctx);
2952
2953 // Set of invariantstart instructions that we need to remove.
2954 // Use this to avoid invalidating the instruction iterator.
2955 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions;
2956
2957 for (Instruction &I : instructions(F)) {
2958 // invariant.start on memory location implies that the referenced memory
2959 // location is constant and unchanging. This is no longer true after
2960 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint
2961 // which frees the entire heap and the presence of invariant.start allows
2962 // the optimizer to sink the load of a memory location past a statepoint,
2963 // which is incorrect.
2964 if (auto *II = dyn_cast<IntrinsicInst>(Val: &I))
2965 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2966 InvariantStartInstructions.push_back(Elt: II);
2967 continue;
2968 }
2969
2970 if (MDNode *Tag = I.getMetadata(KindID: LLVMContext::MD_tbaa)) {
2971 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag);
2972 I.setMetadata(KindID: LLVMContext::MD_tbaa, Node: MutableTBAA);
2973 }
2974
2975 stripInvalidMetadataFromInstruction(I);
2976
2977 AttributeMask R = getParamAndReturnAttributesToRemove();
2978 if (auto *Call = dyn_cast<CallBase>(Val: &I)) {
2979 for (int i = 0, e = Call->arg_size(); i != e; i++)
2980 if (isa<PointerType>(Val: Call->getArgOperand(i)->getType()))
2981 Call->removeParamAttrs(ArgNo: i, AttrsToRemove: R);
2982 if (isa<PointerType>(Val: Call->getType()))
2983 Call->removeRetAttrs(AttrsToRemove: R);
2984 }
2985 }
2986
2987 // Delete the invariant.start instructions and RAUW poison.
2988 for (auto *II : InvariantStartInstructions) {
2989 II->replaceAllUsesWith(V: PoisonValue::get(T: II->getType()));
2990 II->eraseFromParent();
2991 }
2992}
2993
2994/// Looks up the GC strategy for a given function, returning null if the
2995/// function doesn't have a GC tag. The strategy is stored in the cache.
2996static std::unique_ptr<GCStrategy> findGCStrategy(Function &F) {
2997 if (!F.hasGC())
2998 return nullptr;
2999
3000 return getGCStrategy(Name: F.getGC());
3001}
3002
3003/// Returns true if this function should be rewritten by this pass. The main
3004/// point of this function is as an extension point for custom logic.
3005static bool shouldRewriteStatepointsIn(Function &F) {
3006 if (!F.hasGC())
3007 return false;
3008
3009 std::unique_ptr<GCStrategy> Strategy = findGCStrategy(F);
3010
3011 assert(Strategy && "GC strategy is required by function, but was not found");
3012
3013 return Strategy->useRS4GC();
3014}
3015
3016static void stripNonValidData(Module &M) {
3017#ifndef NDEBUG
3018 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!");
3019#endif
3020
3021 for (Function &F : M)
3022 stripNonValidAttributesFromPrototype(F);
3023
3024 for (Function &F : M)
3025 stripNonValidDataFromBody(F);
3026}
3027
3028bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
3029 TargetTransformInfo &TTI,
3030 const TargetLibraryInfo &TLI) {
3031 assert(!F.isDeclaration() && !F.empty() &&
3032 "need function body to rewrite statepoints in");
3033 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision");
3034
3035 auto NeedsRewrite = [&TLI](Instruction &I) {
3036 if (const auto *Call = dyn_cast<CallBase>(Val: &I)) {
3037 if (isa<GCStatepointInst>(Val: Call))
3038 return false;
3039 if (callsGCLeafFunction(Call, TLI))
3040 return false;
3041
3042 // Normally it's up to the frontend to make sure that non-leaf calls also
3043 // have proper deopt state if it is required. We make an exception for
3044 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics
3045 // these are non-leaf by default. They might be generated by the optimizer
3046 // which doesn't know how to produce a proper deopt state. So if we see a
3047 // non-leaf memcpy/memmove without deopt state just treat it as a leaf
3048 // copy and don't produce a statepoint.
3049 if (!AllowStatepointWithNoDeoptInfo && !Call->hasDeoptState()) {
3050 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
3051 "Don't expect any other calls here!");
3052 return false;
3053 }
3054 return true;
3055 }
3056 return false;
3057 };
3058
3059 // Delete any unreachable statepoints so that we don't have unrewritten
3060 // statepoints surviving this pass. This makes testing easier and the
3061 // resulting IR less confusing to human readers.
3062 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
3063 bool MadeChange = removeUnreachableBlocks(F, DTU: &DTU);
3064 // Flush the Dominator Tree.
3065 DTU.getDomTree();
3066
3067 // Gather all the statepoints which need rewritten. Be careful to only
3068 // consider those in reachable code since we need to ask dominance queries
3069 // when rewriting. We'll delete the unreachable ones in a moment.
3070 SmallVector<CallBase *, 64> ParsePointNeeded;
3071 SmallVector<CallInst *, 64> Intrinsics;
3072 for (Instruction &I : instructions(F)) {
3073 // TODO: only the ones with the flag set!
3074 if (NeedsRewrite(I)) {
3075 // NOTE removeUnreachableBlocks() is stronger than
3076 // DominatorTree::isReachableFromEntry(). In other words
3077 // removeUnreachableBlocks can remove some blocks for which
3078 // isReachableFromEntry() returns true.
3079 assert(DT.isReachableFromEntry(I.getParent()) &&
3080 "no unreachable blocks expected");
3081 ParsePointNeeded.push_back(Elt: cast<CallBase>(Val: &I));
3082 }
3083 if (auto *CI = dyn_cast<CallInst>(Val: &I))
3084 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base ||
3085 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset)
3086 Intrinsics.emplace_back(Args&: CI);
3087 }
3088
3089 // Return early if no work to do.
3090 if (ParsePointNeeded.empty() && Intrinsics.empty())
3091 return MadeChange;
3092
3093 // As a prepass, go ahead and aggressively destroy single entry phi nodes.
3094 // These are created by LCSSA. They have the effect of increasing the size
3095 // of liveness sets for no good reason. It may be harder to do this post
3096 // insertion since relocations and base phis can confuse things.
3097 for (BasicBlock &BB : F)
3098 if (BB.getUniquePredecessor())
3099 MadeChange |= FoldSingleEntryPHINodes(BB: &BB);
3100
3101 // Before we start introducing relocations, we want to tweak the IR a bit to
3102 // avoid unfortunate code generation effects. The main example is that we
3103 // want to try to make sure the comparison feeding a branch is after any
3104 // safepoints. Otherwise, we end up with a comparison of pre-relocation
3105 // values feeding a branch after relocation. This is semantically correct,
3106 // but results in extra register pressure since both the pre-relocation and
3107 // post-relocation copies must be available in registers. For code without
3108 // relocations this is handled elsewhere, but teaching the scheduler to
3109 // reverse the transform we're about to do would be slightly complex.
3110 // Note: This may extend the live range of the inputs to the icmp and thus
3111 // increase the liveset of any statepoint we move over. This is profitable
3112 // as long as all statepoints are in rare blocks. If we had in-register
3113 // lowering for live values this would be a much safer transform.
3114 auto getConditionInst = [](Instruction *TI) -> Instruction * {
3115 if (auto *BI = dyn_cast<BranchInst>(Val: TI))
3116 if (BI->isConditional())
3117 return dyn_cast<Instruction>(Val: BI->getCondition());
3118 // TODO: Extend this to handle switches
3119 return nullptr;
3120 };
3121 for (BasicBlock &BB : F) {
3122 Instruction *TI = BB.getTerminator();
3123 if (auto *Cond = getConditionInst(TI))
3124 // TODO: Handle more than just ICmps here. We should be able to move
3125 // most instructions without side effects or memory access.
3126 if (isa<ICmpInst>(Val: Cond) && Cond->hasOneUse()) {
3127 MadeChange = true;
3128 Cond->moveBefore(MovePos: TI);
3129 }
3130 }
3131
3132 // Nasty workaround - The base computation code in the main algorithm doesn't
3133 // consider the fact that a GEP can be used to convert a scalar to a vector.
3134 // The right fix for this is to integrate GEPs into the base rewriting
3135 // algorithm properly, this is just a short term workaround to prevent
3136 // crashes by canonicalizing such GEPs into fully vector GEPs.
3137 for (Instruction &I : instructions(F)) {
3138 if (!isa<GetElementPtrInst>(Val: I))
3139 continue;
3140
3141 unsigned VF = 0;
3142 for (unsigned i = 0; i < I.getNumOperands(); i++)
3143 if (auto *OpndVTy = dyn_cast<VectorType>(Val: I.getOperand(i)->getType())) {
3144 assert(VF == 0 ||
3145 VF == cast<FixedVectorType>(OpndVTy)->getNumElements());
3146 VF = cast<FixedVectorType>(Val: OpndVTy)->getNumElements();
3147 }
3148
3149 // It's the vector to scalar traversal through the pointer operand which
3150 // confuses base pointer rewriting, so limit ourselves to that case.
3151 if (!I.getOperand(i: 0)->getType()->isVectorTy() && VF != 0) {
3152 IRBuilder<> B(&I);
3153 auto *Splat = B.CreateVectorSplat(NumElts: VF, V: I.getOperand(i: 0));
3154 I.setOperand(i: 0, Val: Splat);
3155 MadeChange = true;
3156 }
3157 }
3158
3159 // Cache the 'defining value' relation used in the computation and
3160 // insertion of base phis and selects. This ensures that we don't insert
3161 // large numbers of duplicate base_phis. Use one cache for both
3162 // inlineGetBaseAndOffset() and insertParsePoints().
3163 DefiningValueMapTy DVCache;
3164
3165 // Mapping between a base values and a flag indicating whether it's a known
3166 // base or not.
3167 IsKnownBaseMapTy KnownBases;
3168
3169 if (!Intrinsics.empty())
3170 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding
3171 // live references.
3172 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache, KnownBases);
3173
3174 if (!ParsePointNeeded.empty())
3175 MadeChange |=
3176 insertParsePoints(F, DT, TTI, ToUpdate&: ParsePointNeeded, DVCache, KnownBases);
3177
3178 return MadeChange;
3179}
3180
3181// liveness computation via standard dataflow
3182// -------------------------------------------------------------------
3183
3184// TODO: Consider using bitvectors for liveness, the set of potentially
3185// interesting values should be small and easy to pre-compute.
3186
3187/// Compute the live-in set for the location rbegin starting from
3188/// the live-out set of the basic block
3189static void computeLiveInValues(BasicBlock::reverse_iterator Begin,
3190 BasicBlock::reverse_iterator End,
3191 SetVector<Value *> &LiveTmp, GCStrategy *GC) {
3192 for (auto &I : make_range(x: Begin, y: End)) {
3193 // KILL/Def - Remove this definition from LiveIn
3194 LiveTmp.remove(X: &I);
3195
3196 // Don't consider *uses* in PHI nodes, we handle their contribution to
3197 // predecessor blocks when we seed the LiveOut sets
3198 if (isa<PHINode>(Val: I))
3199 continue;
3200
3201 // USE - Add to the LiveIn set for this instruction
3202 for (Value *V : I.operands()) {
3203 assert(!isUnhandledGCPointerType(V->getType(), GC) &&
3204 "support for FCA unimplemented");
3205 if (isHandledGCPointerType(T: V->getType(), GC) && !isa<Constant>(Val: V)) {
3206 // The choice to exclude all things constant here is slightly subtle.
3207 // There are two independent reasons:
3208 // - We assume that things which are constant (from LLVM's definition)
3209 // do not move at runtime. For example, the address of a global
3210 // variable is fixed, even though it's contents may not be.
3211 // - Second, we can't disallow arbitrary inttoptr constants even
3212 // if the language frontend does. Optimization passes are free to
3213 // locally exploit facts without respect to global reachability. This
3214 // can create sections of code which are dynamically unreachable and
3215 // contain just about anything. (see constants.ll in tests)
3216 LiveTmp.insert(X: V);
3217 }
3218 }
3219 }
3220}
3221
3222static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp,
3223 GCStrategy *GC) {
3224 for (BasicBlock *Succ : successors(BB)) {
3225 for (auto &I : *Succ) {
3226 PHINode *PN = dyn_cast<PHINode>(Val: &I);
3227 if (!PN)
3228 break;
3229
3230 Value *V = PN->getIncomingValueForBlock(BB);
3231 assert(!isUnhandledGCPointerType(V->getType(), GC) &&
3232 "support for FCA unimplemented");
3233 if (isHandledGCPointerType(T: V->getType(), GC) && !isa<Constant>(Val: V))
3234 LiveTmp.insert(X: V);
3235 }
3236 }
3237}
3238
3239static SetVector<Value *> computeKillSet(BasicBlock *BB, GCStrategy *GC) {
3240 SetVector<Value *> KillSet;
3241 for (Instruction &I : *BB)
3242 if (isHandledGCPointerType(T: I.getType(), GC))
3243 KillSet.insert(X: &I);
3244 return KillSet;
3245}
3246
3247#ifndef NDEBUG
3248/// Check that the items in 'Live' dominate 'TI'. This is used as a basic
3249/// validation check for the liveness computation.
3250static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live,
3251 Instruction *TI, bool TermOkay = false) {
3252 for (Value *V : Live) {
3253 if (auto *I = dyn_cast<Instruction>(V)) {
3254 // The terminator can be a member of the LiveOut set. LLVM's definition
3255 // of instruction dominance states that V does not dominate itself. As
3256 // such, we need to special case this to allow it.
3257 if (TermOkay && TI == I)
3258 continue;
3259 assert(DT.dominates(I, TI) &&
3260 "basic SSA liveness expectation violated by liveness analysis");
3261 }
3262 }
3263}
3264
3265/// Check that all the liveness sets used during the computation of liveness
3266/// obey basic SSA properties. This is useful for finding cases where we miss
3267/// a def.
3268static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
3269 BasicBlock &BB) {
3270 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator());
3271 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true);
3272 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator());
3273}
3274#endif
3275
3276static void computeLiveInValues(DominatorTree &DT, Function &F,
3277 GCPtrLivenessData &Data, GCStrategy *GC) {
3278 SmallSetVector<BasicBlock *, 32> Worklist;
3279
3280 // Seed the liveness for each individual block
3281 for (BasicBlock &BB : F) {
3282 Data.KillSet[&BB] = computeKillSet(BB: &BB, GC);
3283 Data.LiveSet[&BB].clear();
3284 computeLiveInValues(Begin: BB.rbegin(), End: BB.rend(), LiveTmp&: Data.LiveSet[&BB], GC);
3285
3286#ifndef NDEBUG
3287 for (Value *Kill : Data.KillSet[&BB])
3288 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill");
3289#endif
3290
3291 Data.LiveOut[&BB] = SetVector<Value *>();
3292 computeLiveOutSeed(BB: &BB, LiveTmp&: Data.LiveOut[&BB], GC);
3293 Data.LiveIn[&BB] = Data.LiveSet[&BB];
3294 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]);
3295 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]);
3296 if (!Data.LiveIn[&BB].empty())
3297 Worklist.insert(Start: pred_begin(BB: &BB), End: pred_end(BB: &BB));
3298 }
3299
3300 // Propagate that liveness until stable
3301 while (!Worklist.empty()) {
3302 BasicBlock *BB = Worklist.pop_back_val();
3303
3304 // Compute our new liveout set, then exit early if it hasn't changed despite
3305 // the contribution of our successor.
3306 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3307 const auto OldLiveOutSize = LiveOut.size();
3308 for (BasicBlock *Succ : successors(BB)) {
3309 assert(Data.LiveIn.count(Succ));
3310 LiveOut.set_union(Data.LiveIn[Succ]);
3311 }
3312 // assert OutLiveOut is a subset of LiveOut
3313 if (OldLiveOutSize == LiveOut.size()) {
3314 // If the sets are the same size, then we didn't actually add anything
3315 // when unioning our successors LiveIn. Thus, the LiveIn of this block
3316 // hasn't changed.
3317 continue;
3318 }
3319 Data.LiveOut[BB] = LiveOut;
3320
3321 // Apply the effects of this basic block
3322 SetVector<Value *> LiveTmp = LiveOut;
3323 LiveTmp.set_union(Data.LiveSet[BB]);
3324 LiveTmp.set_subtract(Data.KillSet[BB]);
3325
3326 assert(Data.LiveIn.count(BB));
3327 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB];
3328 // assert: OldLiveIn is a subset of LiveTmp
3329 if (OldLiveIn.size() != LiveTmp.size()) {
3330 Data.LiveIn[BB] = LiveTmp;
3331 Worklist.insert(Start: pred_begin(BB), End: pred_end(BB));
3332 }
3333 } // while (!Worklist.empty())
3334
3335#ifndef NDEBUG
3336 // Verify our output against SSA properties. This helps catch any
3337 // missing kills during the above iteration.
3338 for (BasicBlock &BB : F)
3339 checkBasicSSA(DT, Data, BB);
3340#endif
3341}
3342
3343static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data,
3344 StatepointLiveSetTy &Out, GCStrategy *GC) {
3345 BasicBlock *BB = Inst->getParent();
3346
3347 // Note: The copy is intentional and required
3348 assert(Data.LiveOut.count(BB));
3349 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3350
3351 // We want to handle the statepoint itself oddly. It's
3352 // call result is not live (normal), nor are it's arguments
3353 // (unless they're used again later). This adjustment is
3354 // specifically what we need to relocate
3355 computeLiveInValues(Begin: BB->rbegin(), End: ++Inst->getIterator().getReverse(), LiveTmp&: LiveOut,
3356 GC);
3357 LiveOut.remove(X: Inst);
3358 Out.insert(Start: LiveOut.begin(), End: LiveOut.end());
3359}
3360
3361static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
3362 CallBase *Call,
3363 PartiallyConstructedSafepointRecord &Info,
3364 PointerToBaseTy &PointerToBase,
3365 GCStrategy *GC) {
3366 StatepointLiveSetTy Updated;
3367 findLiveSetAtInst(Inst: Call, Data&: RevisedLivenessData, Out&: Updated, GC);
3368
3369 // We may have base pointers which are now live that weren't before. We need
3370 // to update the PointerToBase structure to reflect this.
3371 for (auto *V : Updated)
3372 PointerToBase.insert(KV: { V, V });
3373
3374 Info.LiveSet = Updated;
3375}
3376