1//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass does misc. AMDGPU optimizations on IR *just* before instruction
11/// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUTargetMachine.h"
17#include "llvm/Analysis/AssumptionCache.h"
18#include "llvm/Analysis/UniformityAnalysis.h"
19#include "llvm/Analysis/ValueTracking.h"
20#include "llvm/CodeGen/TargetPassConfig.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/InstVisitor.h"
23#include "llvm/IR/IntrinsicsAMDGPU.h"
24#include "llvm/InitializePasses.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/KnownBits.h"
27#include "llvm/Transforms/Utils/Local.h"
28
29#define DEBUG_TYPE "amdgpu-late-codegenprepare"
30
31using namespace llvm;
32
33// Scalar load widening needs running after load-store-vectorizer as that pass
34// doesn't handle overlapping cases. In addition, this pass enhances the
35// widening to handle cases where scalar sub-dword loads are naturally aligned
36// only but not dword aligned.
37static cl::opt<bool>
38 WidenLoads("amdgpu-late-codegenprepare-widen-constant-loads",
39 cl::desc("Widen sub-dword constant address space loads in "
40 "AMDGPULateCodeGenPrepare"),
41 cl::ReallyHidden, cl::init(Val: true));
42
43namespace {
44
45class AMDGPULateCodeGenPrepare
46 : public InstVisitor<AMDGPULateCodeGenPrepare, bool> {
47 Function &F;
48 const DataLayout &DL;
49 const GCNSubtarget &ST;
50
51 AssumptionCache *const AC;
52 UniformityInfo &UA;
53
54 SmallVector<WeakTrackingVH, 8> DeadInsts;
55
56public:
57 AMDGPULateCodeGenPrepare(Function &F, const GCNSubtarget &ST,
58 AssumptionCache *AC, UniformityInfo &UA)
59 : F(F), DL(F.getDataLayout()), ST(ST), AC(AC), UA(UA) {}
60 bool run();
61 bool visitInstruction(Instruction &) { return false; }
62
63 // Check if the specified value is at least DWORD aligned.
64 bool isDWORDAligned(const Value *V) const {
65 KnownBits Known = computeKnownBits(V, DL, AC);
66 return Known.countMinTrailingZeros() >= 2;
67 }
68
69 bool canWidenScalarExtLoad(LoadInst &LI) const;
70 bool visitLoadInst(LoadInst &LI);
71};
72
73using ValueToValueMap = DenseMap<const Value *, Value *>;
74
75class LiveRegOptimizer {
76private:
77 Module &Mod;
78 const DataLayout &DL;
79 const GCNSubtarget &ST;
80
81 /// The scalar type to convert to
82 Type *const ConvertToScalar;
83 /// Map of Value -> Converted Value
84 ValueToValueMap ValMap;
85 /// Map of containing conversions from Optimal Type -> Original Type per BB.
86 DenseMap<BasicBlock *, ValueToValueMap> BBUseValMap;
87
88public:
89 /// Calculate the and \p return the type to convert to given a problematic \p
90 /// OriginalType. In some instances, we may widen the type (e.g. v2i8 -> i32).
91 Type *calculateConvertType(Type *OriginalType);
92 /// Convert the virtual register defined by \p V to the compatible vector of
93 /// legal type
94 Value *convertToOptType(Instruction *V, BasicBlock::iterator &InstPt);
95 /// Convert the virtual register defined by \p V back to the original type \p
96 /// ConvertType, stripping away the MSBs in cases where there was an imperfect
97 /// fit (e.g. v2i32 -> v7i8)
98 Value *convertFromOptType(Type *ConvertType, Instruction *V,
99 BasicBlock::iterator &InstPt,
100 BasicBlock *InsertBlock);
101 /// Check for problematic PHI nodes or cross-bb values based on the value
102 /// defined by \p I, and coerce to legal types if necessary. For problematic
103 /// PHI node, we coerce all incoming values in a single invocation.
104 bool optimizeLiveType(Instruction *I,
105 SmallVectorImpl<WeakTrackingVH> &DeadInsts);
106
107 // Whether or not the type should be replaced to avoid inefficient
108 // legalization code
109 bool shouldReplace(Type *ITy) {
110 FixedVectorType *VTy = dyn_cast<FixedVectorType>(Val: ITy);
111 if (!VTy)
112 return false;
113
114 const auto *TLI = ST.getTargetLowering();
115
116 Type *EltTy = VTy->getElementType();
117 // If the element size is not less than the convert to scalar size, then we
118 // can't do any bit packing
119 if (!EltTy->isIntegerTy() ||
120 EltTy->getScalarSizeInBits() > ConvertToScalar->getScalarSizeInBits())
121 return false;
122
123 // Only coerce illegal types
124 TargetLoweringBase::LegalizeKind LK =
125 TLI->getTypeConversion(Context&: EltTy->getContext(), VT: EVT::getEVT(Ty: EltTy, HandleUnknown: false));
126 return LK.first != TargetLoweringBase::TypeLegal;
127 }
128
129 bool isOpLegal(const Instruction *I) {
130 if (isa<IntrinsicInst>(Val: I))
131 return true;
132
133 // Any store is a profitable sink (prevents flip-flopping)
134 if (isa<StoreInst>(Val: I))
135 return true;
136
137 if (auto *BO = dyn_cast<BinaryOperator>(Val: I)) {
138 if (auto *VT = dyn_cast<FixedVectorType>(Val: BO->getType())) {
139 if (const auto *IT = dyn_cast<IntegerType>(Val: VT->getElementType())) {
140 unsigned EB = IT->getBitWidth();
141 unsigned EC = VT->getNumElements();
142 // Check for SDWA-compatible operation
143 if ((EB == 8 || EB == 16) && ST.hasSDWA() && EC * EB <= 32) {
144 switch (BO->getOpcode()) {
145 case Instruction::Add:
146 case Instruction::Sub:
147 case Instruction::And:
148 case Instruction::Or:
149 case Instruction::Xor:
150 return true;
151 default:
152 break;
153 }
154 }
155 }
156 }
157 }
158
159 return false;
160 }
161
162 bool isCoercionProfitable(Instruction *II) {
163 SmallPtrSet<Instruction *, 4> CVisited;
164 SmallVector<Instruction *, 4> UserList;
165
166 // Check users for profitable conditions (across block user which can
167 // natively handle the illegal vector).
168 for (User *V : II->users())
169 if (auto *UseInst = dyn_cast<Instruction>(Val: V))
170 UserList.push_back(Elt: UseInst);
171
172 auto IsLookThru = [](Instruction *II) {
173 if (const auto *Intr = dyn_cast<IntrinsicInst>(Val: II))
174 return Intr->getIntrinsicID() == Intrinsic::amdgcn_perm;
175 return isa<PHINode, ShuffleVectorInst, InsertElementInst,
176 ExtractElementInst, CastInst>(Val: II);
177 };
178
179 while (!UserList.empty()) {
180 auto CII = UserList.pop_back_val();
181 if (!CVisited.insert(Ptr: CII).second)
182 continue;
183
184 // Same-BB filter must look at the *user*; and allow non-lookthrough
185 // users when the def is a PHI (loop-header pattern).
186 if (CII->getParent() == II->getParent() && !IsLookThru(CII) &&
187 !isa<PHINode>(Val: II))
188 continue;
189
190 if (isOpLegal(I: CII))
191 return true;
192
193 if (IsLookThru(CII))
194 for (User *V : CII->users())
195 if (auto *UseInst = dyn_cast<Instruction>(Val: V))
196 UserList.push_back(Elt: UseInst);
197 }
198 return false;
199 }
200
201 LiveRegOptimizer(Module &Mod, const GCNSubtarget &ST)
202 : Mod(Mod), DL(Mod.getDataLayout()), ST(ST),
203 ConvertToScalar(Type::getInt32Ty(C&: Mod.getContext())) {}
204};
205
206} // end anonymous namespace
207
208bool AMDGPULateCodeGenPrepare::run() {
209 // "Optimize" the virtual regs that cross basic block boundaries. When
210 // building the SelectionDAG, vectors of illegal types that cross basic blocks
211 // will be scalarized and widened, with each scalar living in its
212 // own register. To work around this, this optimization converts the
213 // vectors to equivalent vectors of legal type (which are converted back
214 // before uses in subsequent blocks), to pack the bits into fewer physical
215 // registers (used in CopyToReg/CopyFromReg pairs).
216 LiveRegOptimizer LRO(*F.getParent(), ST);
217
218 bool Changed = false;
219
220 bool HasScalarSubwordLoads = ST.hasScalarSubwordLoads();
221
222 for (auto &BB : reverse(C&: F))
223 for (Instruction &I : make_early_inc_range(Range: reverse(C&: BB))) {
224 Changed |= !HasScalarSubwordLoads && visit(I);
225 Changed |= LRO.optimizeLiveType(I: &I, DeadInsts);
226 }
227
228 RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts);
229 return Changed;
230}
231
232Type *LiveRegOptimizer::calculateConvertType(Type *OriginalType) {
233 assert(OriginalType->getScalarSizeInBits() <=
234 ConvertToScalar->getScalarSizeInBits());
235
236 FixedVectorType *VTy = cast<FixedVectorType>(Val: OriginalType);
237
238 TypeSize OriginalSize = DL.getTypeSizeInBits(Ty: VTy);
239 TypeSize ConvertScalarSize = DL.getTypeSizeInBits(Ty: ConvertToScalar);
240 unsigned ConvertEltCount =
241 (OriginalSize + ConvertScalarSize - 1) / ConvertScalarSize;
242
243 if (OriginalSize <= ConvertScalarSize)
244 return IntegerType::get(C&: Mod.getContext(), NumBits: ConvertScalarSize);
245
246 return VectorType::get(ElementType: Type::getIntNTy(C&: Mod.getContext(), N: ConvertScalarSize),
247 NumElements: ConvertEltCount, Scalable: false);
248}
249
250Value *LiveRegOptimizer::convertToOptType(Instruction *V,
251 BasicBlock::iterator &InsertPt) {
252 FixedVectorType *VTy = cast<FixedVectorType>(Val: V->getType());
253 Type *NewTy = calculateConvertType(OriginalType: V->getType());
254
255 TypeSize OriginalSize = DL.getTypeSizeInBits(Ty: VTy);
256 TypeSize NewSize = DL.getTypeSizeInBits(Ty: NewTy);
257
258 IRBuilder<> Builder(V->getParent(), InsertPt);
259 // If there is a bitsize match, we can fit the old vector into a new vector of
260 // desired type.
261 if (OriginalSize == NewSize)
262 return Builder.CreateBitCast(V, DestTy: NewTy, Name: V->getName() + ".bc");
263
264 // If there is a bitsize mismatch, we must use a wider vector.
265 assert(NewSize > OriginalSize);
266 uint64_t ExpandedVecElementCount = NewSize / VTy->getScalarSizeInBits();
267
268 SmallVector<int, 8> ShuffleMask;
269 uint64_t OriginalElementCount = VTy->getElementCount().getFixedValue();
270 for (unsigned I = 0; I < OriginalElementCount; I++)
271 ShuffleMask.push_back(Elt: I);
272
273 for (uint64_t I = OriginalElementCount; I < ExpandedVecElementCount; I++)
274 ShuffleMask.push_back(Elt: OriginalElementCount);
275
276 Value *ExpandedVec = Builder.CreateShuffleVector(V, Mask: ShuffleMask);
277 return Builder.CreateBitCast(V: ExpandedVec, DestTy: NewTy, Name: V->getName() + ".bc");
278}
279
280Value *LiveRegOptimizer::convertFromOptType(Type *ConvertType, Instruction *V,
281 BasicBlock::iterator &InsertPt,
282 BasicBlock *InsertBB) {
283 FixedVectorType *NewVTy = cast<FixedVectorType>(Val: ConvertType);
284
285 TypeSize OriginalSize = DL.getTypeSizeInBits(Ty: V->getType());
286 TypeSize NewSize = DL.getTypeSizeInBits(Ty: NewVTy);
287
288 IRBuilder<> Builder(InsertBB, InsertPt);
289 // If there is a bitsize match, we simply convert back to the original type.
290 if (OriginalSize == NewSize)
291 return Builder.CreateBitCast(V, DestTy: NewVTy, Name: V->getName() + ".bc");
292
293 // If there is a bitsize mismatch, then we must have used a wider value to
294 // hold the bits.
295 assert(OriginalSize > NewSize);
296 // For wide scalars, we can just truncate the value.
297 if (!V->getType()->isVectorTy()) {
298 Instruction *Trunc = cast<Instruction>(
299 Val: Builder.CreateTrunc(V, DestTy: IntegerType::get(C&: Mod.getContext(), NumBits: NewSize)));
300 return cast<Instruction>(Val: Builder.CreateBitCast(V: Trunc, DestTy: NewVTy));
301 }
302
303 // For wider vectors, we must strip the MSBs to convert back to the original
304 // type.
305 VectorType *ExpandedVT = VectorType::get(
306 ElementType: Type::getIntNTy(C&: Mod.getContext(), N: NewVTy->getScalarSizeInBits()),
307 NumElements: (OriginalSize / NewVTy->getScalarSizeInBits()), Scalable: false);
308 Instruction *Converted =
309 cast<Instruction>(Val: Builder.CreateBitCast(V, DestTy: ExpandedVT));
310
311 unsigned NarrowElementCount = NewVTy->getElementCount().getFixedValue();
312 SmallVector<int, 8> ShuffleMask(NarrowElementCount);
313 std::iota(first: ShuffleMask.begin(), last: ShuffleMask.end(), value: 0);
314
315 return Builder.CreateShuffleVector(V: Converted, Mask: ShuffleMask);
316}
317
318bool LiveRegOptimizer::optimizeLiveType(
319 Instruction *I, SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
320 SmallVector<Instruction *, 4> Worklist;
321 SmallPtrSet<PHINode *, 4> PhiNodes;
322 SmallPtrSet<Instruction *, 4> Defs;
323 SmallPtrSet<Instruction *, 4> Uses;
324 SmallPtrSet<Instruction *, 4> Visited;
325
326 Worklist.push_back(Elt: cast<Instruction>(Val: I));
327 while (!Worklist.empty()) {
328 Instruction *II = Worklist.pop_back_val();
329
330 if (!Visited.insert(Ptr: II).second)
331 continue;
332
333 if (!shouldReplace(ITy: II->getType()))
334 continue;
335
336 if (!isCoercionProfitable(II))
337 continue;
338
339 if (PHINode *Phi = dyn_cast<PHINode>(Val: II)) {
340 PhiNodes.insert(Ptr: Phi);
341 // Collect all the incoming values of problematic PHI nodes.
342 for (Value *V : Phi->incoming_values()) {
343 // Repeat the collection process for newly found PHI nodes.
344 if (PHINode *OpPhi = dyn_cast<PHINode>(Val: V)) {
345 if (!PhiNodes.count(Ptr: OpPhi) && !Visited.count(Ptr: OpPhi))
346 Worklist.push_back(Elt: OpPhi);
347 continue;
348 }
349
350 Instruction *IncInst = dyn_cast<Instruction>(Val: V);
351 // Other incoming value types (e.g. vector literals) are unhandled
352 if (!IncInst && !isa<ConstantAggregateZero>(Val: V))
353 return false;
354
355 // Collect all other incoming values for coercion.
356 if (IncInst)
357 Defs.insert(Ptr: IncInst);
358 }
359 }
360
361 // Collect all relevant uses.
362 for (User *V : II->users()) {
363 // Repeat the collection process for problematic PHI nodes.
364 if (PHINode *OpPhi = dyn_cast<PHINode>(Val: V)) {
365 if (!PhiNodes.count(Ptr: OpPhi) && !Visited.count(Ptr: OpPhi))
366 Worklist.push_back(Elt: OpPhi);
367 continue;
368 }
369
370 Instruction *UseInst = cast<Instruction>(Val: V);
371 // Collect all uses of PHINodes and any use the crosses BB boundaries.
372 if (UseInst->getParent() != II->getParent() || isa<PHINode>(Val: II)) {
373 Uses.insert(Ptr: UseInst);
374 if (!isa<PHINode>(Val: II))
375 Defs.insert(Ptr: II);
376 }
377 }
378 }
379
380 // Coerce and track the defs.
381 for (Instruction *D : Defs) {
382 if (!ValMap.contains(Val: D)) {
383 BasicBlock::iterator InsertPt = std::next(x: D->getIterator());
384 Value *ConvertVal = convertToOptType(V: D, InsertPt);
385 assert(ConvertVal);
386 ValMap[D] = ConvertVal;
387 }
388 }
389
390 // Construct new-typed PHI nodes.
391 for (PHINode *Phi : PhiNodes) {
392 ValMap[Phi] = PHINode::Create(Ty: calculateConvertType(OriginalType: Phi->getType()),
393 NumReservedValues: Phi->getNumIncomingValues(),
394 NameStr: Phi->getName() + ".tc", InsertBefore: Phi->getIterator());
395 }
396
397 // Connect all the PHI nodes with their new incoming values.
398 for (PHINode *Phi : PhiNodes) {
399 PHINode *NewPhi = cast<PHINode>(Val: ValMap[Phi]);
400 bool MissingIncVal = false;
401 for (int I = 0, E = Phi->getNumIncomingValues(); I < E; I++) {
402 Value *IncVal = Phi->getIncomingValue(i: I);
403 if (isa<ConstantAggregateZero>(Val: IncVal)) {
404 Type *NewType = calculateConvertType(OriginalType: Phi->getType());
405 NewPhi->addIncoming(V: ConstantInt::get(Ty: NewType, V: 0, IsSigned: false),
406 BB: Phi->getIncomingBlock(i: I));
407 } else if (Value *Val = ValMap.lookup(Val: IncVal))
408 NewPhi->addIncoming(V: Val, BB: Phi->getIncomingBlock(i: I));
409 else
410 MissingIncVal = true;
411 }
412 if (MissingIncVal) {
413 Value *DeadVal = ValMap[Phi];
414 // The coercion chain of the PHI is broken. Delete the Phi
415 // from the ValMap and any connected / user Phis.
416 SmallVector<Value *, 4> PHIWorklist;
417 SmallPtrSet<Value *, 4> VisitedPhis;
418 PHIWorklist.push_back(Elt: DeadVal);
419 while (!PHIWorklist.empty()) {
420 Value *NextDeadValue = PHIWorklist.pop_back_val();
421 VisitedPhis.insert(Ptr: NextDeadValue);
422 auto OriginalPhi =
423 llvm::find_if(Range&: PhiNodes, P: [this, &NextDeadValue](PHINode *CandPhi) {
424 return ValMap[CandPhi] == NextDeadValue;
425 });
426 // This PHI may have already been removed from maps when
427 // unwinding a previous Phi
428 if (OriginalPhi != PhiNodes.end())
429 ValMap.erase(Val: *OriginalPhi);
430
431 DeadInsts.emplace_back(Args: cast<Instruction>(Val: NextDeadValue));
432
433 for (User *U : NextDeadValue->users()) {
434 if (!VisitedPhis.contains(Ptr: cast<PHINode>(Val: U)))
435 PHIWorklist.push_back(Elt: U);
436 }
437 }
438 } else {
439 DeadInsts.emplace_back(Args: cast<Instruction>(Val: Phi));
440 }
441 }
442 // Coerce back to the original type and replace the uses.
443 for (Instruction *U : Uses) {
444 // Replace all converted operands for a use.
445 for (auto [OpIdx, Op] : enumerate(First: U->operands())) {
446 if (Value *Val = ValMap.lookup(Val: Op)) {
447 Value *NewVal = nullptr;
448 if (BBUseValMap.contains(Val: U->getParent()) &&
449 BBUseValMap[U->getParent()].contains(Val))
450 NewVal = BBUseValMap[U->getParent()][Val];
451 else {
452 BasicBlock::iterator InsertPt = U->getParent()->getFirstNonPHIIt();
453 // We may pick up ops that were previously converted for users in
454 // other blocks. If there is an originally typed definition of the Op
455 // already in this block, simply reuse it.
456 if (isa<Instruction>(Val: Op) && !isa<PHINode>(Val: Op) &&
457 U->getParent() == cast<Instruction>(Val&: Op)->getParent()) {
458 NewVal = Op;
459 } else {
460 NewVal =
461 convertFromOptType(ConvertType: Op->getType(), V: cast<Instruction>(Val: ValMap[Op]),
462 InsertPt, InsertBB: U->getParent());
463 BBUseValMap[U->getParent()][ValMap[Op]] = NewVal;
464 }
465 }
466 assert(NewVal);
467 U->setOperand(i: OpIdx, Val: NewVal);
468 }
469 }
470 }
471
472 return true;
473}
474
475bool AMDGPULateCodeGenPrepare::canWidenScalarExtLoad(LoadInst &LI) const {
476 unsigned AS = LI.getPointerAddressSpace();
477 // Skip non-constant address space.
478 if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
479 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT)
480 return false;
481 // Skip non-simple loads.
482 if (!LI.isSimple())
483 return false;
484 Type *Ty = LI.getType();
485 // Skip aggregate types.
486 if (Ty->isAggregateType())
487 return false;
488 unsigned TySize = DL.getTypeStoreSize(Ty);
489 // Only handle sub-DWORD loads.
490 if (TySize >= 4)
491 return false;
492 // That load must be at least naturally aligned.
493 if (LI.getAlign() < DL.getABITypeAlign(Ty))
494 return false;
495 // It should be uniform, i.e. a scalar load.
496 return UA.isUniform(I: &LI);
497}
498
499bool AMDGPULateCodeGenPrepare::visitLoadInst(LoadInst &LI) {
500 if (!WidenLoads)
501 return false;
502
503 // Skip if that load is already aligned on DWORD at least as it's handled in
504 // SDAG.
505 if (LI.getAlign() >= 4)
506 return false;
507
508 if (!canWidenScalarExtLoad(LI))
509 return false;
510
511 int64_t Offset = 0;
512 auto *Base =
513 GetPointerBaseWithConstantOffset(Ptr: LI.getPointerOperand(), Offset, DL);
514 // If that base is not DWORD aligned, it's not safe to perform the following
515 // transforms.
516 if (!isDWORDAligned(V: Base))
517 return false;
518
519 int64_t Adjust = Offset & 0x3;
520 if (Adjust == 0) {
521 // With a zero adjust, the original alignment could be promoted with a
522 // better one.
523 LI.setAlignment(Align(4));
524 return true;
525 }
526
527 IRBuilder<> IRB(&LI);
528 IRB.SetCurrentDebugLocation(LI.getDebugLoc());
529
530 unsigned LdBits = DL.getTypeStoreSizeInBits(Ty: LI.getType());
531 auto *IntNTy = Type::getIntNTy(C&: LI.getContext(), N: LdBits);
532
533 auto *NewPtr = IRB.CreateConstGEP1_64(
534 Ty: IRB.getInt8Ty(),
535 Ptr: IRB.CreateAddrSpaceCast(V: Base, DestTy: LI.getPointerOperand()->getType()),
536 Idx0: Offset - Adjust);
537
538 LoadInst *NewLd = IRB.CreateAlignedLoad(Ty: IRB.getInt32Ty(), Ptr: NewPtr, Align: Align(4));
539 NewLd->copyMetadata(SrcInst: LI);
540 NewLd->setMetadata(KindID: LLVMContext::MD_range, Node: nullptr);
541
542 unsigned ShAmt = Adjust * 8;
543 Value *NewVal = IRB.CreateBitCast(
544 V: IRB.CreateTrunc(V: IRB.CreateLShr(LHS: NewLd, RHS: ShAmt),
545 DestTy: DL.typeSizeEqualsStoreSize(Ty: LI.getType()) ? IntNTy
546 : LI.getType()),
547 DestTy: LI.getType());
548 LI.replaceAllUsesWith(V: NewVal);
549 DeadInsts.emplace_back(Args: &LI);
550
551 return true;
552}
553
554PreservedAnalyses
555AMDGPULateCodeGenPreparePass::run(Function &F, FunctionAnalysisManager &FAM) {
556 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
557 AssumptionCache &AC = FAM.getResult<AssumptionAnalysis>(IR&: F);
558 UniformityInfo &UI = FAM.getResult<UniformityInfoAnalysis>(IR&: F);
559
560 bool Changed = AMDGPULateCodeGenPrepare(F, ST, &AC, UI).run();
561
562 if (!Changed)
563 return PreservedAnalyses::all();
564 PreservedAnalyses PA = PreservedAnalyses::none();
565 PA.preserveSet<CFGAnalyses>();
566 return PA;
567}
568
569class AMDGPULateCodeGenPrepareLegacy : public FunctionPass {
570public:
571 static char ID;
572
573 AMDGPULateCodeGenPrepareLegacy() : FunctionPass(ID) {}
574
575 StringRef getPassName() const override {
576 return "AMDGPU IR late optimizations";
577 }
578
579 void getAnalysisUsage(AnalysisUsage &AU) const override {
580 AU.addRequired<TargetPassConfig>();
581 AU.addRequired<AssumptionCacheTracker>();
582 AU.addRequired<UniformityInfoWrapperPass>();
583 // Invalidates UniformityInfo
584 AU.setPreservesCFG();
585 }
586
587 bool runOnFunction(Function &F) override;
588};
589
590bool AMDGPULateCodeGenPrepareLegacy::runOnFunction(Function &F) {
591 if (skipFunction(F))
592 return false;
593
594 const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
595 const TargetMachine &TM = TPC.getTM<TargetMachine>();
596 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
597
598 AssumptionCache &AC =
599 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
600 UniformityInfo &UI =
601 getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
602
603 return AMDGPULateCodeGenPrepare(F, ST, &AC, UI).run();
604}
605
606INITIALIZE_PASS_BEGIN(AMDGPULateCodeGenPrepareLegacy, DEBUG_TYPE,
607 "AMDGPU IR late optimizations", false, false)
608INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
609INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
610INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
611INITIALIZE_PASS_END(AMDGPULateCodeGenPrepareLegacy, DEBUG_TYPE,
612 "AMDGPU IR late optimizations", false, false)
613
614char AMDGPULateCodeGenPrepareLegacy::ID = 0;
615
616FunctionPass *llvm::createAMDGPULateCodeGenPrepareLegacyPass() {
617 return new AMDGPULateCodeGenPrepareLegacy();
618}
619