1//===-- AMDGPUAtomicOptimizer.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass optimizes atomic operations by using a single lane of a wavefront
11/// to perform the atomic operation, thus reducing contention on that memory
12/// location.
13/// Atomic optimizer uses following strategies to compute scan and reduced
14/// values
15/// 1. DPP -
16/// This is the most efficient implementation for scan. DPP uses Whole Wave
17/// Mode (WWM)
18/// 2. Iterative -
19// An alternative implementation iterates over all active lanes
20/// of Wavefront using llvm.cttz and performs scan using readlane & writelane
21/// intrinsics
22//===----------------------------------------------------------------------===//
23
24#include "AMDGPU.h"
25#include "GCNSubtarget.h"
26#include "llvm/Analysis/DomTreeUpdater.h"
27#include "llvm/Analysis/UniformityAnalysis.h"
28#include "llvm/CodeGen/TargetPassConfig.h"
29#include "llvm/IR/IRBuilder.h"
30#include "llvm/IR/InstVisitor.h"
31#include "llvm/IR/IntrinsicsAMDGPU.h"
32#include "llvm/InitializePasses.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Transforms/Utils/BasicBlockUtils.h"
35
36#define DEBUG_TYPE "amdgpu-atomic-optimizer"
37
38using namespace llvm;
39using namespace llvm::AMDGPU;
40
41namespace {
42
43struct ReplacementInfo {
44 Instruction *I;
45 AtomicRMWInst::BinOp Op;
46 unsigned ValIdx;
47 bool ValDivergent;
48};
49
50class AMDGPUAtomicOptimizer : public FunctionPass {
51public:
52 static char ID;
53 ScanOptions ScanImpl;
54 AMDGPUAtomicOptimizer(ScanOptions ScanImpl)
55 : FunctionPass(ID), ScanImpl(ScanImpl) {}
56
57 bool runOnFunction(Function &F) override;
58
59 void getAnalysisUsage(AnalysisUsage &AU) const override {
60 AU.addPreserved<DominatorTreeWrapperPass>();
61 AU.addRequired<UniformityInfoWrapperPass>();
62 AU.addRequired<TargetPassConfig>();
63 }
64};
65
66class AMDGPUAtomicOptimizerImpl
67 : public InstVisitor<AMDGPUAtomicOptimizerImpl> {
68private:
69 Function &F;
70 SmallVector<ReplacementInfo, 8> ToReplace;
71 const UniformityInfo &UA;
72 const DataLayout &DL;
73 DomTreeUpdater &DTU;
74 const GCNSubtarget &ST;
75 bool IsPixelShader;
76 ScanOptions ScanImpl;
77
78 Value *buildReduction(IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V,
79 Value *const Identity) const;
80 Value *buildScan(IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V,
81 Value *const Identity) const;
82 Value *buildShiftRight(IRBuilder<> &B, Value *V, Value *const Identity) const;
83
84 std::pair<Value *, Value *>
85 buildScanIteratively(IRBuilder<> &B, AtomicRMWInst::BinOp Op,
86 Value *const Identity, Value *V, Instruction &I,
87 BasicBlock *ComputeLoop, BasicBlock *ComputeEnd) const;
88
89 void optimizeAtomic(Instruction &I, AtomicRMWInst::BinOp Op, unsigned ValIdx,
90 bool ValDivergent) const;
91
92public:
93 AMDGPUAtomicOptimizerImpl() = delete;
94
95 AMDGPUAtomicOptimizerImpl(Function &F, const UniformityInfo &UA,
96 DomTreeUpdater &DTU, const GCNSubtarget &ST,
97 ScanOptions ScanImpl)
98 : F(F), UA(UA), DL(F.getDataLayout()), DTU(DTU), ST(ST),
99 IsPixelShader(F.getCallingConv() == CallingConv::AMDGPU_PS),
100 ScanImpl(ScanImpl) {}
101
102 bool run();
103
104 void visitAtomicRMWInst(AtomicRMWInst &I);
105 void visitIntrinsicInst(IntrinsicInst &I);
106};
107
108} // namespace
109
110char AMDGPUAtomicOptimizer::ID = 0;
111
112char &llvm::AMDGPUAtomicOptimizerID = AMDGPUAtomicOptimizer::ID;
113
114bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) {
115 if (skipFunction(F)) {
116 return false;
117 }
118
119 const UniformityInfo &UA =
120 getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
121
122 DominatorTreeWrapperPass *DTW =
123 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
124 DomTreeUpdater DTU(DTW ? &DTW->getDomTree() : nullptr,
125 DomTreeUpdater::UpdateStrategy::Lazy);
126
127 const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
128 const TargetMachine &TM = TPC.getTM<TargetMachine>();
129 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
130
131 return AMDGPUAtomicOptimizerImpl(F, UA, DTU, ST, ScanImpl).run();
132}
133
134PreservedAnalyses AMDGPUAtomicOptimizerPass::run(Function &F,
135 FunctionAnalysisManager &AM) {
136 const auto &UA = AM.getResult<UniformityInfoAnalysis>(IR&: F);
137
138 DomTreeUpdater DTU(&AM.getResult<DominatorTreeAnalysis>(IR&: F),
139 DomTreeUpdater::UpdateStrategy::Lazy);
140 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
141
142 bool IsChanged = AMDGPUAtomicOptimizerImpl(F, UA, DTU, ST, ScanImpl).run();
143
144 if (!IsChanged) {
145 return PreservedAnalyses::all();
146 }
147
148 PreservedAnalyses PA;
149 PA.preserve<DominatorTreeAnalysis>();
150 return PA;
151}
152
153bool AMDGPUAtomicOptimizerImpl::run() {
154 // Scan option None disables the Pass
155 if (ScanImpl == ScanOptions::None)
156 return false;
157 if (ST.isSingleLaneExecution(Kernel: F))
158 return false;
159
160 visit(F);
161 if (ToReplace.empty())
162 return false;
163
164 for (auto &[I, Op, ValIdx, ValDivergent] : ToReplace)
165 optimizeAtomic(I&: *I, Op, ValIdx, ValDivergent);
166 ToReplace.clear();
167 return true;
168}
169
170static bool isLegalCrossLaneType(Type *Ty) {
171 switch (Ty->getTypeID()) {
172 case Type::FloatTyID:
173 case Type::DoubleTyID:
174 return true;
175 case Type::IntegerTyID: {
176 unsigned Size = Ty->getIntegerBitWidth();
177 return (Size == 32 || Size == 64);
178 }
179 default:
180 return false;
181 }
182}
183
184void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) {
185 // Early exit for unhandled address space atomic instructions.
186 switch (I.getPointerAddressSpace()) {
187 default:
188 return;
189 case AMDGPUAS::GLOBAL_ADDRESS:
190 case AMDGPUAS::LOCAL_ADDRESS:
191 break;
192 }
193
194 AtomicRMWInst::BinOp Op = I.getOperation();
195
196 switch (Op) {
197 default:
198 return;
199 case AtomicRMWInst::Add:
200 case AtomicRMWInst::Sub:
201 case AtomicRMWInst::And:
202 case AtomicRMWInst::Or:
203 case AtomicRMWInst::Xor:
204 case AtomicRMWInst::Max:
205 case AtomicRMWInst::Min:
206 case AtomicRMWInst::UMax:
207 case AtomicRMWInst::UMin:
208 case AtomicRMWInst::FAdd:
209 case AtomicRMWInst::FSub:
210 case AtomicRMWInst::FMax:
211 case AtomicRMWInst::FMin:
212 break;
213 }
214
215 // Only 32 and 64 bit floating point atomic ops are supported.
216 if (AtomicRMWInst::isFPOperation(Op) &&
217 !(I.getType()->isFloatTy() || I.getType()->isDoubleTy())) {
218 return;
219 }
220
221 const unsigned PtrIdx = 0;
222 const unsigned ValIdx = 1;
223
224 // If the pointer operand is divergent, then each lane is doing an atomic
225 // operation on a different address, and we cannot optimize that.
226 if (UA.isDivergentUse(U: I.getOperandUse(i: PtrIdx))) {
227 return;
228 }
229
230 bool ValDivergent = UA.isDivergentUse(U: I.getOperandUse(i: ValIdx));
231
232 // If the value operand is divergent, each lane is contributing a different
233 // value to the atomic calculation. We can only optimize divergent values if
234 // we have DPP available on our subtarget (for DPP strategy), and the atomic
235 // operation is 32 or 64 bits.
236 if (ValDivergent) {
237 if (ScanImpl == ScanOptions::DPP && !ST.hasDPP())
238 return;
239
240 if (!isLegalCrossLaneType(Ty: I.getType()))
241 return;
242 }
243
244 // If we get here, we can optimize the atomic using a single wavefront-wide
245 // atomic operation to do the calculation for the entire wavefront, so
246 // remember the instruction so we can come back to it.
247 ToReplace.push_back(Elt: {.I: &I, .Op: Op, .ValIdx: ValIdx, .ValDivergent: ValDivergent});
248}
249
250void AMDGPUAtomicOptimizerImpl::visitIntrinsicInst(IntrinsicInst &I) {
251 AtomicRMWInst::BinOp Op;
252
253 switch (I.getIntrinsicID()) {
254 default:
255 return;
256 case Intrinsic::amdgcn_struct_buffer_atomic_add:
257 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_add:
258 case Intrinsic::amdgcn_raw_buffer_atomic_add:
259 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_add:
260 Op = AtomicRMWInst::Add;
261 break;
262 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
263 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_sub:
264 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
265 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_sub:
266 Op = AtomicRMWInst::Sub;
267 break;
268 case Intrinsic::amdgcn_struct_buffer_atomic_and:
269 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_and:
270 case Intrinsic::amdgcn_raw_buffer_atomic_and:
271 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_and:
272 Op = AtomicRMWInst::And;
273 break;
274 case Intrinsic::amdgcn_struct_buffer_atomic_or:
275 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_or:
276 case Intrinsic::amdgcn_raw_buffer_atomic_or:
277 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_or:
278 Op = AtomicRMWInst::Or;
279 break;
280 case Intrinsic::amdgcn_struct_buffer_atomic_xor:
281 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_xor:
282 case Intrinsic::amdgcn_raw_buffer_atomic_xor:
283 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_xor:
284 Op = AtomicRMWInst::Xor;
285 break;
286 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
287 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_smin:
288 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
289 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_smin:
290 Op = AtomicRMWInst::Min;
291 break;
292 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
293 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_umin:
294 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
295 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_umin:
296 Op = AtomicRMWInst::UMin;
297 break;
298 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
299 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_smax:
300 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
301 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_smax:
302 Op = AtomicRMWInst::Max;
303 break;
304 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
305 case Intrinsic::amdgcn_struct_ptr_buffer_atomic_umax:
306 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
307 case Intrinsic::amdgcn_raw_ptr_buffer_atomic_umax:
308 Op = AtomicRMWInst::UMax;
309 break;
310 }
311
312 const unsigned ValIdx = 0;
313
314 const bool ValDivergent = UA.isDivergentUse(U: I.getOperandUse(i: ValIdx));
315
316 // If the value operand is divergent, each lane is contributing a different
317 // value to the atomic calculation. We can only optimize divergent values if
318 // we have DPP available on our subtarget (for DPP strategy), and the atomic
319 // operation is 32 or 64 bits.
320 if (ValDivergent) {
321 if (ScanImpl == ScanOptions::DPP && !ST.hasDPP())
322 return;
323
324 if (!isLegalCrossLaneType(Ty: I.getType()))
325 return;
326 }
327
328 // If any of the other arguments to the intrinsic are divergent, we can't
329 // optimize the operation.
330 for (unsigned Idx = 1; Idx < I.getNumOperands(); Idx++) {
331 if (UA.isDivergentUse(U: I.getOperandUse(i: Idx)))
332 return;
333 }
334
335 // If we get here, we can optimize the atomic using a single wavefront-wide
336 // atomic operation to do the calculation for the entire wavefront, so
337 // remember the instruction so we can come back to it.
338 ToReplace.push_back(Elt: {.I: &I, .Op: Op, .ValIdx: ValIdx, .ValDivergent: ValDivergent});
339}
340
341// Use the builder to create the non-atomic counterpart of the specified
342// atomicrmw binary op.
343static Value *buildNonAtomicBinOp(IRBuilder<> &B, AtomicRMWInst::BinOp Op,
344 Value *LHS, Value *RHS) {
345 CmpInst::Predicate Pred;
346
347 switch (Op) {
348 default:
349 llvm_unreachable("Unhandled atomic op");
350 case AtomicRMWInst::Add:
351 return B.CreateBinOp(Opc: Instruction::Add, LHS, RHS);
352 case AtomicRMWInst::FAdd:
353 return B.CreateFAdd(L: LHS, R: RHS);
354 case AtomicRMWInst::Sub:
355 return B.CreateBinOp(Opc: Instruction::Sub, LHS, RHS);
356 case AtomicRMWInst::FSub:
357 return B.CreateFSub(L: LHS, R: RHS);
358 case AtomicRMWInst::And:
359 return B.CreateBinOp(Opc: Instruction::And, LHS, RHS);
360 case AtomicRMWInst::Or:
361 return B.CreateBinOp(Opc: Instruction::Or, LHS, RHS);
362 case AtomicRMWInst::Xor:
363 return B.CreateBinOp(Opc: Instruction::Xor, LHS, RHS);
364
365 case AtomicRMWInst::Max:
366 Pred = CmpInst::ICMP_SGT;
367 break;
368 case AtomicRMWInst::Min:
369 Pred = CmpInst::ICMP_SLT;
370 break;
371 case AtomicRMWInst::UMax:
372 Pred = CmpInst::ICMP_UGT;
373 break;
374 case AtomicRMWInst::UMin:
375 Pred = CmpInst::ICMP_ULT;
376 break;
377 case AtomicRMWInst::FMax:
378 return B.CreateMaxNum(LHS, RHS);
379 case AtomicRMWInst::FMin:
380 return B.CreateMinNum(LHS, RHS);
381 }
382 Value *Cond = B.CreateICmp(P: Pred, LHS, RHS);
383 return B.CreateSelect(C: Cond, True: LHS, False: RHS);
384}
385
386// Use the builder to create a reduction of V across the wavefront, with all
387// lanes active, returning the same result in all lanes.
388Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,
389 AtomicRMWInst::BinOp Op,
390 Value *V,
391 Value *const Identity) const {
392 Type *AtomicTy = V->getType();
393 Module *M = B.GetInsertBlock()->getModule();
394
395 // Reduce within each row of 16 lanes.
396 for (unsigned Idx = 0; Idx < 4; Idx++) {
397 V = buildNonAtomicBinOp(
398 B, Op, LHS: V,
399 RHS: B.CreateIntrinsic(ID: Intrinsic::amdgcn_update_dpp, Types: AtomicTy,
400 Args: {Identity, V, B.getInt32(C: DPP::ROW_XMASK0 | 1 << Idx),
401 B.getInt32(C: 0xf), B.getInt32(C: 0xf), B.getFalse()}));
402 }
403
404 // Reduce within each pair of rows (i.e. 32 lanes).
405 assert(ST.hasPermLaneX16());
406 Value *Permlanex16Call =
407 B.CreateIntrinsic(RetTy: AtomicTy, ID: Intrinsic::amdgcn_permlanex16,
408 Args: {PoisonValue::get(T: AtomicTy), V, B.getInt32(C: 0),
409 B.getInt32(C: 0), B.getFalse(), B.getFalse()});
410 V = buildNonAtomicBinOp(B, Op, LHS: V, RHS: Permlanex16Call);
411 if (ST.isWave32()) {
412 return V;
413 }
414
415 if (ST.hasPermLane64()) {
416 // Reduce across the upper and lower 32 lanes.
417 Value *Permlane64Call =
418 B.CreateIntrinsic(RetTy: AtomicTy, ID: Intrinsic::amdgcn_permlane64, Args: V);
419 return buildNonAtomicBinOp(B, Op, LHS: V, RHS: Permlane64Call);
420 }
421
422 // Pick an arbitrary lane from 0..31 and an arbitrary lane from 32..63 and
423 // combine them with a scalar operation.
424 Function *ReadLane = Intrinsic::getOrInsertDeclaration(
425 M, id: Intrinsic::amdgcn_readlane, OverloadTys: AtomicTy);
426 Value *Lane0 = B.CreateCall(Callee: ReadLane, Args: {V, B.getInt32(C: 0)});
427 Value *Lane32 = B.CreateCall(Callee: ReadLane, Args: {V, B.getInt32(C: 32)});
428 return buildNonAtomicBinOp(B, Op, LHS: Lane0, RHS: Lane32);
429}
430
431// Use the builder to create an inclusive scan of V across the wavefront, with
432// all lanes active.
433Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
434 AtomicRMWInst::BinOp Op, Value *V,
435 Value *Identity) const {
436 Type *AtomicTy = V->getType();
437 Module *M = B.GetInsertBlock()->getModule();
438 Function *UpdateDPP = Intrinsic::getOrInsertDeclaration(
439 M, id: Intrinsic::amdgcn_update_dpp, OverloadTys: AtomicTy);
440
441 for (unsigned Idx = 0; Idx < 4; Idx++) {
442 V = buildNonAtomicBinOp(
443 B, Op, LHS: V,
444 RHS: B.CreateCall(Callee: UpdateDPP,
445 Args: {Identity, V, B.getInt32(C: DPP::ROW_SHR0 | 1 << Idx),
446 B.getInt32(C: 0xf), B.getInt32(C: 0xf), B.getFalse()}));
447 }
448 if (ST.hasDPPBroadcasts()) {
449 // GFX9 has DPP row broadcast operations.
450 V = buildNonAtomicBinOp(
451 B, Op, LHS: V,
452 RHS: B.CreateCall(Callee: UpdateDPP,
453 Args: {Identity, V, B.getInt32(C: DPP::BCAST15), B.getInt32(C: 0xa),
454 B.getInt32(C: 0xf), B.getFalse()}));
455 V = buildNonAtomicBinOp(
456 B, Op, LHS: V,
457 RHS: B.CreateCall(Callee: UpdateDPP,
458 Args: {Identity, V, B.getInt32(C: DPP::BCAST31), B.getInt32(C: 0xc),
459 B.getInt32(C: 0xf), B.getFalse()}));
460 } else {
461 // On GFX10 all DPP operations are confined to a single row. To get cross-
462 // row operations we have to use permlane or readlane.
463
464 // Combine lane 15 into lanes 16..31 (and, for wave 64, lane 47 into lanes
465 // 48..63).
466 assert(ST.hasPermLaneX16());
467 Value *PermX =
468 B.CreateIntrinsic(RetTy: AtomicTy, ID: Intrinsic::amdgcn_permlanex16,
469 Args: {PoisonValue::get(T: AtomicTy), V, B.getInt32(C: -1),
470 B.getInt32(C: -1), B.getFalse(), B.getFalse()});
471
472 Value *UpdateDPPCall = B.CreateCall(
473 Callee: UpdateDPP, Args: {Identity, PermX, B.getInt32(C: DPP::QUAD_PERM_ID),
474 B.getInt32(C: 0xa), B.getInt32(C: 0xf), B.getFalse()});
475 V = buildNonAtomicBinOp(B, Op, LHS: V, RHS: UpdateDPPCall);
476
477 if (!ST.isWave32()) {
478 // Combine lane 31 into lanes 32..63.
479 Value *const Lane31 = B.CreateIntrinsic(
480 RetTy: AtomicTy, ID: Intrinsic::amdgcn_readlane, Args: {V, B.getInt32(C: 31)});
481
482 Value *UpdateDPPCall = B.CreateCall(
483 Callee: UpdateDPP, Args: {Identity, Lane31, B.getInt32(C: DPP::QUAD_PERM_ID),
484 B.getInt32(C: 0xc), B.getInt32(C: 0xf), B.getFalse()});
485
486 V = buildNonAtomicBinOp(B, Op, LHS: V, RHS: UpdateDPPCall);
487 }
488 }
489 return V;
490}
491
492// Use the builder to create a shift right of V across the wavefront, with all
493// lanes active, to turn an inclusive scan into an exclusive scan.
494Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
495 Value *Identity) const {
496 Type *AtomicTy = V->getType();
497 Module *M = B.GetInsertBlock()->getModule();
498 Function *UpdateDPP = Intrinsic::getOrInsertDeclaration(
499 M, id: Intrinsic::amdgcn_update_dpp, OverloadTys: AtomicTy);
500 if (ST.hasDPPWavefrontShifts()) {
501 // GFX9 has DPP wavefront shift operations.
502 V = B.CreateCall(Callee: UpdateDPP,
503 Args: {Identity, V, B.getInt32(C: DPP::WAVE_SHR1), B.getInt32(C: 0xf),
504 B.getInt32(C: 0xf), B.getFalse()});
505 } else {
506 Function *ReadLane = Intrinsic::getOrInsertDeclaration(
507 M, id: Intrinsic::amdgcn_readlane, OverloadTys: AtomicTy);
508 Function *WriteLane = Intrinsic::getOrInsertDeclaration(
509 M, id: Intrinsic::amdgcn_writelane, OverloadTys: AtomicTy);
510
511 // On GFX10 all DPP operations are confined to a single row. To get cross-
512 // row operations we have to use permlane or readlane.
513 Value *Old = V;
514 V = B.CreateCall(Callee: UpdateDPP,
515 Args: {Identity, V, B.getInt32(C: DPP::ROW_SHR0 + 1),
516 B.getInt32(C: 0xf), B.getInt32(C: 0xf), B.getFalse()});
517
518 // Copy the old lane 15 to the new lane 16.
519 V = B.CreateCall(Callee: WriteLane, Args: {B.CreateCall(Callee: ReadLane, Args: {Old, B.getInt32(C: 15)}),
520 B.getInt32(C: 16), V});
521
522 if (!ST.isWave32()) {
523 // Copy the old lane 31 to the new lane 32.
524 V = B.CreateCall(
525 Callee: WriteLane,
526 Args: {B.CreateCall(Callee: ReadLane, Args: {Old, B.getInt32(C: 31)}), B.getInt32(C: 32), V});
527
528 // Copy the old lane 47 to the new lane 48.
529 V = B.CreateCall(
530 Callee: WriteLane,
531 Args: {B.CreateCall(Callee: ReadLane, Args: {Old, B.getInt32(C: 47)}), B.getInt32(C: 48), V});
532 }
533 }
534
535 return V;
536}
537
538// Use the builder to create an exclusive scan and compute the final reduced
539// value using an iterative approach. This provides an alternative
540// implementation to DPP which uses WMM for scan computations. This API iterate
541// over active lanes to read, compute and update the value using
542// readlane and writelane intrinsics.
543std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
544 IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *const Identity, Value *V,
545 Instruction &I, BasicBlock *ComputeLoop, BasicBlock *ComputeEnd) const {
546 auto *Ty = I.getType();
547 auto *WaveTy = B.getIntNTy(N: ST.getWavefrontSize());
548 auto *EntryBB = I.getParent();
549 auto NeedResult = !I.use_empty();
550
551 auto *Ballot =
552 B.CreateIntrinsic(ID: Intrinsic::amdgcn_ballot, Types: WaveTy, Args: B.getTrue());
553
554 // Start inserting instructions for ComputeLoop block
555 B.SetInsertPoint(ComputeLoop);
556 // Phi nodes for Accumulator, Scan results destination, and Active Lanes
557 auto *Accumulator = B.CreatePHI(Ty, NumReservedValues: 2, Name: "Accumulator");
558 Accumulator->addIncoming(V: Identity, BB: EntryBB);
559 PHINode *OldValuePhi = nullptr;
560 if (NeedResult) {
561 OldValuePhi = B.CreatePHI(Ty, NumReservedValues: 2, Name: "OldValuePhi");
562 OldValuePhi->addIncoming(V: PoisonValue::get(T: Ty), BB: EntryBB);
563 }
564 auto *ActiveBits = B.CreatePHI(Ty: WaveTy, NumReservedValues: 2, Name: "ActiveBits");
565 ActiveBits->addIncoming(V: Ballot, BB: EntryBB);
566
567 // Use llvm.cttz intrinsic to find the lowest remaining active lane.
568 auto *FF1 =
569 B.CreateIntrinsic(ID: Intrinsic::cttz, Types: WaveTy, Args: {ActiveBits, B.getTrue()});
570
571 auto *LaneIdxInt = B.CreateTrunc(V: FF1, DestTy: B.getInt32Ty());
572
573 // Get the value required for atomic operation
574 Value *LaneValue = B.CreateIntrinsic(RetTy: V->getType(), ID: Intrinsic::amdgcn_readlane,
575 Args: {V, LaneIdxInt});
576
577 // Perform writelane if intermediate scan results are required later in the
578 // kernel computations
579 Value *OldValue = nullptr;
580 if (NeedResult) {
581 OldValue = B.CreateIntrinsic(RetTy: V->getType(), ID: Intrinsic::amdgcn_writelane,
582 Args: {Accumulator, LaneIdxInt, OldValuePhi});
583 OldValuePhi->addIncoming(V: OldValue, BB: ComputeLoop);
584 }
585
586 // Accumulate the results
587 auto *NewAccumulator = buildNonAtomicBinOp(B, Op, LHS: Accumulator, RHS: LaneValue);
588 Accumulator->addIncoming(V: NewAccumulator, BB: ComputeLoop);
589
590 // Set bit to zero of current active lane so that for next iteration llvm.cttz
591 // return the next active lane
592 auto *Mask = B.CreateShl(LHS: ConstantInt::get(Ty: WaveTy, V: 1), RHS: FF1);
593
594 auto *InverseMask = B.CreateXor(LHS: Mask, RHS: ConstantInt::getAllOnesValue(Ty: WaveTy));
595 auto *NewActiveBits = B.CreateAnd(LHS: ActiveBits, RHS: InverseMask);
596 ActiveBits->addIncoming(V: NewActiveBits, BB: ComputeLoop);
597
598 // Branch out of the loop when all lanes are processed.
599 auto *IsEnd = B.CreateICmpEQ(LHS: NewActiveBits, RHS: ConstantInt::get(Ty: WaveTy, V: 0));
600 B.CreateCondBr(Cond: IsEnd, True: ComputeEnd, False: ComputeLoop);
601
602 B.SetInsertPoint(ComputeEnd);
603
604 return {OldValue, NewAccumulator};
605}
606
607static Constant *getIdentityValueForAtomicOp(Type *const Ty,
608 AtomicRMWInst::BinOp Op) {
609 LLVMContext &C = Ty->getContext();
610 const unsigned BitWidth = Ty->getPrimitiveSizeInBits();
611 switch (Op) {
612 default:
613 llvm_unreachable("Unhandled atomic op");
614 case AtomicRMWInst::Add:
615 case AtomicRMWInst::Sub:
616 case AtomicRMWInst::Or:
617 case AtomicRMWInst::Xor:
618 case AtomicRMWInst::UMax:
619 return ConstantInt::get(Context&: C, V: APInt::getMinValue(numBits: BitWidth));
620 case AtomicRMWInst::And:
621 case AtomicRMWInst::UMin:
622 return ConstantInt::get(Context&: C, V: APInt::getMaxValue(numBits: BitWidth));
623 case AtomicRMWInst::Max:
624 return ConstantInt::get(Context&: C, V: APInt::getSignedMinValue(numBits: BitWidth));
625 case AtomicRMWInst::Min:
626 return ConstantInt::get(Context&: C, V: APInt::getSignedMaxValue(numBits: BitWidth));
627 case AtomicRMWInst::FAdd:
628 return ConstantFP::get(Context&: C, V: APFloat::getZero(Sem: Ty->getFltSemantics(), Negative: true));
629 case AtomicRMWInst::FSub:
630 return ConstantFP::get(Context&: C, V: APFloat::getZero(Sem: Ty->getFltSemantics(), Negative: false));
631 case AtomicRMWInst::FMin:
632 case AtomicRMWInst::FMax:
633 // FIXME: atomicrmw fmax/fmin behave like llvm.maxnum/minnum so NaN is the
634 // closest thing they have to an identity, but it still does not preserve
635 // the difference between quiet and signaling NaNs or NaNs with different
636 // payloads.
637 return ConstantFP::get(Context&: C, V: APFloat::getNaN(Sem: Ty->getFltSemantics()));
638 }
639}
640
641static Value *buildMul(IRBuilder<> &B, Value *LHS, Value *RHS) {
642 const ConstantInt *CI = dyn_cast<ConstantInt>(Val: LHS);
643 return (CI && CI->isOne()) ? RHS : B.CreateMul(LHS, RHS);
644}
645
646void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
647 AtomicRMWInst::BinOp Op,
648 unsigned ValIdx,
649 bool ValDivergent) const {
650 // Start building just before the instruction.
651 IRBuilder<> B(&I);
652
653 if (AtomicRMWInst::isFPOperation(Op)) {
654 B.setIsFPConstrained(I.getFunction()->hasFnAttribute(Kind: Attribute::StrictFP));
655 }
656
657 // If we are in a pixel shader, because of how we have to mask out helper
658 // lane invocations, we need to record the entry and exit BB's.
659 BasicBlock *PixelEntryBB = nullptr;
660 BasicBlock *PixelExitBB = nullptr;
661
662 // If we're optimizing an atomic within a pixel shader, we need to wrap the
663 // entire atomic operation in a helper-lane check. We do not want any helper
664 // lanes that are around only for the purposes of derivatives to take part
665 // in any cross-lane communication, and we use a branch on whether the lane is
666 // live to do this.
667 if (IsPixelShader) {
668 // Record I's original position as the entry block.
669 PixelEntryBB = I.getParent();
670
671 Value *const Cond = B.CreateIntrinsic(ID: Intrinsic::amdgcn_ps_live, Args: {});
672 Instruction *const NonHelperTerminator =
673 SplitBlockAndInsertIfThen(Cond, SplitBefore: &I, Unreachable: false, BranchWeights: nullptr, DTU: &DTU, LI: nullptr);
674
675 // Record I's new position as the exit block.
676 PixelExitBB = I.getParent();
677
678 I.moveBefore(InsertPos: NonHelperTerminator->getIterator());
679 B.SetInsertPoint(&I);
680 }
681
682 Type *const Ty = I.getType();
683 Type *Int32Ty = B.getInt32Ty();
684 bool isAtomicFloatingPointTy = Ty->isFloatingPointTy();
685 [[maybe_unused]] const unsigned TyBitWidth = DL.getTypeSizeInBits(Ty);
686
687 // This is the value in the atomic operation we need to combine in order to
688 // reduce the number of atomic operations.
689 Value *V = I.getOperand(i: ValIdx);
690
691 // We need to know how many lanes are active within the wavefront, and we do
692 // this by doing a ballot of active lanes.
693 Type *const WaveTy = B.getIntNTy(N: ST.getWavefrontSize());
694 CallInst *const Ballot =
695 B.CreateIntrinsic(ID: Intrinsic::amdgcn_ballot, Types: WaveTy, Args: B.getTrue());
696
697 // We need to know how many lanes are active within the wavefront that are
698 // below us. If we counted each lane linearly starting from 0, a lane is
699 // below us only if its associated index was less than ours. We do this by
700 // using the mbcnt intrinsic.
701 Value *Mbcnt;
702 if (ST.isWave32()) {
703 Mbcnt =
704 B.CreateIntrinsic(ID: Intrinsic::amdgcn_mbcnt_lo, Args: {Ballot, B.getInt32(C: 0)});
705 } else {
706 Value *const ExtractLo = B.CreateTrunc(V: Ballot, DestTy: Int32Ty);
707 Value *const ExtractHi = B.CreateTrunc(V: B.CreateLShr(LHS: Ballot, RHS: 32), DestTy: Int32Ty);
708 Mbcnt = B.CreateIntrinsic(ID: Intrinsic::amdgcn_mbcnt_lo,
709 Args: {ExtractLo, B.getInt32(C: 0)});
710 Mbcnt = B.CreateIntrinsic(ID: Intrinsic::amdgcn_mbcnt_hi, Args: {ExtractHi, Mbcnt});
711 }
712
713 Function *F = I.getFunction();
714 LLVMContext &C = F->getContext();
715
716 // For atomic sub, perform scan with add operation and allow one lane to
717 // subtract the reduced value later.
718 AtomicRMWInst::BinOp ScanOp = Op;
719 if (Op == AtomicRMWInst::Sub) {
720 ScanOp = AtomicRMWInst::Add;
721 } else if (Op == AtomicRMWInst::FSub) {
722 ScanOp = AtomicRMWInst::FAdd;
723 }
724 Value *Identity = getIdentityValueForAtomicOp(Ty, Op: ScanOp);
725
726 Value *ExclScan = nullptr;
727 Value *NewV = nullptr;
728
729 const bool NeedResult = !I.use_empty();
730
731 BasicBlock *ComputeLoop = nullptr;
732 BasicBlock *ComputeEnd = nullptr;
733 // If we have a divergent value in each lane, we need to combine the value
734 // using DPP.
735 if (ValDivergent) {
736 if (ScanImpl == ScanOptions::DPP) {
737 // First we need to set all inactive invocations to the identity value, so
738 // that they can correctly contribute to the final result.
739 NewV =
740 B.CreateIntrinsic(ID: Intrinsic::amdgcn_set_inactive, Types: Ty, Args: {V, Identity});
741 if (!NeedResult && ST.hasPermLaneX16()) {
742 // On GFX10 the permlanex16 instruction helps us build a reduction
743 // without too many readlanes and writelanes, which are generally bad
744 // for performance.
745 NewV = buildReduction(B, Op: ScanOp, V: NewV, Identity);
746 } else {
747 NewV = buildScan(B, Op: ScanOp, V: NewV, Identity);
748 if (NeedResult)
749 ExclScan = buildShiftRight(B, V: NewV, Identity);
750 // Read the value from the last lane, which has accumulated the values
751 // of each active lane in the wavefront. This will be our new value
752 // which we will provide to the atomic operation.
753 Value *const LastLaneIdx = B.getInt32(C: ST.getWavefrontSize() - 1);
754 NewV = B.CreateIntrinsic(RetTy: Ty, ID: Intrinsic::amdgcn_readlane,
755 Args: {NewV, LastLaneIdx});
756 }
757 // Finally mark the readlanes in the WWM section.
758 NewV = B.CreateIntrinsic(ID: Intrinsic::amdgcn_strict_wwm, Types: Ty, Args: NewV);
759 } else if (ScanImpl == ScanOptions::Iterative) {
760 // Alternative implementation for scan
761 ComputeLoop = BasicBlock::Create(Context&: C, Name: "ComputeLoop", Parent: F);
762 ComputeEnd = BasicBlock::Create(Context&: C, Name: "ComputeEnd", Parent: F);
763 std::tie(args&: ExclScan, args&: NewV) = buildScanIteratively(B, Op: ScanOp, Identity, V, I,
764 ComputeLoop, ComputeEnd);
765 } else {
766 llvm_unreachable("Atomic Optimzer is disabled for None strategy");
767 }
768 } else {
769 switch (Op) {
770 default:
771 llvm_unreachable("Unhandled atomic op");
772
773 case AtomicRMWInst::Add:
774 case AtomicRMWInst::Sub: {
775 // The new value we will be contributing to the atomic operation is the
776 // old value times the number of active lanes.
777 Value *const Ctpop = B.CreateIntCast(
778 V: B.CreateUnaryIntrinsic(ID: Intrinsic::ctpop, V: Ballot), DestTy: Ty, isSigned: false);
779 NewV = buildMul(B, LHS: V, RHS: Ctpop);
780 break;
781 }
782 case AtomicRMWInst::FAdd:
783 case AtomicRMWInst::FSub: {
784 Value *const Ctpop = B.CreateIntCast(
785 V: B.CreateUnaryIntrinsic(ID: Intrinsic::ctpop, V: Ballot), DestTy: Int32Ty, isSigned: false);
786 Value *const CtpopFP = B.CreateUIToFP(V: Ctpop, DestTy: Ty);
787 NewV = B.CreateFMul(L: V, R: CtpopFP);
788 break;
789 }
790 case AtomicRMWInst::And:
791 case AtomicRMWInst::Or:
792 case AtomicRMWInst::Max:
793 case AtomicRMWInst::Min:
794 case AtomicRMWInst::UMax:
795 case AtomicRMWInst::UMin:
796 case AtomicRMWInst::FMin:
797 case AtomicRMWInst::FMax:
798 // These operations with a uniform value are idempotent: doing the atomic
799 // operation multiple times has the same effect as doing it once.
800 NewV = V;
801 break;
802
803 case AtomicRMWInst::Xor:
804 // The new value we will be contributing to the atomic operation is the
805 // old value times the parity of the number of active lanes.
806 Value *const Ctpop = B.CreateIntCast(
807 V: B.CreateUnaryIntrinsic(ID: Intrinsic::ctpop, V: Ballot), DestTy: Ty, isSigned: false);
808 NewV = buildMul(B, LHS: V, RHS: B.CreateAnd(LHS: Ctpop, RHS: 1));
809 break;
810 }
811 }
812
813 // We only want a single lane to enter our new control flow, and we do this
814 // by checking if there are any active lanes below us. Only one lane will
815 // have 0 active lanes below us, so that will be the only one to progress.
816 Value *const Cond = B.CreateICmpEQ(LHS: Mbcnt, RHS: B.getInt32(C: 0));
817
818 // Store I's original basic block before we split the block.
819 BasicBlock *const OriginalBB = I.getParent();
820
821 // We need to introduce some new control flow to force a single lane to be
822 // active. We do this by splitting I's basic block at I, and introducing the
823 // new block such that:
824 // entry --> single_lane -\
825 // \------------------> exit
826 Instruction *const SingleLaneTerminator =
827 SplitBlockAndInsertIfThen(Cond, SplitBefore: &I, Unreachable: false, BranchWeights: nullptr, DTU: &DTU, LI: nullptr);
828
829 // At this point, we have split the I's block to allow one lane in wavefront
830 // to update the precomputed reduced value. Also, completed the codegen for
831 // new control flow i.e. iterative loop which perform reduction and scan using
832 // ComputeLoop and ComputeEnd.
833 // For the new control flow, we need to move branch instruction i.e.
834 // terminator created during SplitBlockAndInsertIfThen from I's block to
835 // ComputeEnd block. We also need to set up predecessor to next block when
836 // single lane done updating the final reduced value.
837 BasicBlock *Predecessor = nullptr;
838 if (ValDivergent && ScanImpl == ScanOptions::Iterative) {
839 // Move terminator from I's block to ComputeEnd block.
840 //
841 // OriginalBB is known to have a branch as terminator because
842 // SplitBlockAndInsertIfThen will have inserted one.
843 CondBrInst *Terminator = cast<CondBrInst>(Val: OriginalBB->getTerminator());
844 B.SetInsertPoint(ComputeEnd);
845 Terminator->removeFromParent();
846 B.Insert(I: Terminator);
847
848 // Branch to ComputeLoop Block unconditionally from the I's block for
849 // iterative approach.
850 B.SetInsertPoint(OriginalBB);
851 B.CreateBr(Dest: ComputeLoop);
852
853 // Update the dominator tree for new control flow.
854 SmallVector<DominatorTree::UpdateType, 6> DomTreeUpdates(
855 {{DominatorTree::Insert, OriginalBB, ComputeLoop},
856 {DominatorTree::Insert, ComputeLoop, ComputeEnd}});
857
858 // We're moving the terminator from EntryBB to ComputeEnd, make sure we move
859 // the DT edges as well.
860 for (auto *Succ : Terminator->successors()) {
861 DomTreeUpdates.push_back(Elt: {DominatorTree::Insert, ComputeEnd, Succ});
862 DomTreeUpdates.push_back(Elt: {DominatorTree::Delete, OriginalBB, Succ});
863 }
864
865 DTU.applyUpdates(Updates: DomTreeUpdates);
866
867 Predecessor = ComputeEnd;
868 } else {
869 Predecessor = OriginalBB;
870 }
871 // Move the IR builder into single_lane next.
872 B.SetInsertPoint(SingleLaneTerminator);
873
874 // Clone the original atomic operation into single lane, replacing the
875 // original value with our newly created one.
876 Instruction *const NewI = I.clone();
877 B.Insert(I: NewI);
878 NewI->setOperand(i: ValIdx, Val: NewV);
879
880 // Move the IR builder into exit next, and start inserting just before the
881 // original instruction.
882 B.SetInsertPoint(&I);
883
884 if (NeedResult) {
885 // Create a PHI node to get our new atomic result into the exit block.
886 PHINode *const PHI = B.CreatePHI(Ty, NumReservedValues: 2);
887 PHI->addIncoming(V: PoisonValue::get(T: Ty), BB: Predecessor);
888 PHI->addIncoming(V: NewI, BB: SingleLaneTerminator->getParent());
889
890 // We need to broadcast the value who was the lowest active lane (the first
891 // lane) to all other lanes in the wavefront.
892
893 Value *ReadlaneVal = PHI;
894 if (TyBitWidth < 32)
895 ReadlaneVal = B.CreateZExt(V: PHI, DestTy: B.getInt32Ty());
896
897 Value *BroadcastI = B.CreateIntrinsic(
898 RetTy: ReadlaneVal->getType(), ID: Intrinsic::amdgcn_readfirstlane, Args: ReadlaneVal);
899 if (TyBitWidth < 32)
900 BroadcastI = B.CreateTrunc(V: BroadcastI, DestTy: Ty);
901
902 // Now that we have the result of our single atomic operation, we need to
903 // get our individual lane's slice into the result. We use the lane offset
904 // we previously calculated combined with the atomic result value we got
905 // from the first lane, to get our lane's index into the atomic result.
906 Value *LaneOffset = nullptr;
907 if (ValDivergent) {
908 if (ScanImpl == ScanOptions::DPP) {
909 LaneOffset =
910 B.CreateIntrinsic(ID: Intrinsic::amdgcn_strict_wwm, Types: Ty, Args: ExclScan);
911 } else if (ScanImpl == ScanOptions::Iterative) {
912 LaneOffset = ExclScan;
913 } else {
914 llvm_unreachable("Atomic Optimzer is disabled for None strategy");
915 }
916 } else {
917 Mbcnt = isAtomicFloatingPointTy ? B.CreateUIToFP(V: Mbcnt, DestTy: Ty)
918 : B.CreateIntCast(V: Mbcnt, DestTy: Ty, isSigned: false);
919 switch (Op) {
920 default:
921 llvm_unreachable("Unhandled atomic op");
922 case AtomicRMWInst::Add:
923 case AtomicRMWInst::Sub:
924 LaneOffset = buildMul(B, LHS: V, RHS: Mbcnt);
925 break;
926 case AtomicRMWInst::And:
927 case AtomicRMWInst::Or:
928 case AtomicRMWInst::Max:
929 case AtomicRMWInst::Min:
930 case AtomicRMWInst::UMax:
931 case AtomicRMWInst::UMin:
932 case AtomicRMWInst::FMin:
933 case AtomicRMWInst::FMax:
934 LaneOffset = B.CreateSelect(C: Cond, True: Identity, False: V);
935 break;
936 case AtomicRMWInst::Xor:
937 LaneOffset = buildMul(B, LHS: V, RHS: B.CreateAnd(LHS: Mbcnt, RHS: 1));
938 break;
939 case AtomicRMWInst::FAdd:
940 case AtomicRMWInst::FSub: {
941 LaneOffset = B.CreateFMul(L: V, R: Mbcnt);
942 break;
943 }
944 }
945 }
946 Value *Result = buildNonAtomicBinOp(B, Op, LHS: BroadcastI, RHS: LaneOffset);
947 if (isAtomicFloatingPointTy) {
948 // For fadd/fsub the first active lane of LaneOffset should be the
949 // identity (-0.0 for fadd or +0.0 for fsub) but the value we calculated
950 // is V * +0.0 which might have the wrong sign or might be nan (if V is
951 // inf or nan).
952 //
953 // For all floating point ops if the in-memory value was a nan then the
954 // binop we just built might have quieted it or changed its payload.
955 //
956 // Correct all these problems by using BroadcastI as the result in the
957 // first active lane.
958 Result = B.CreateSelect(C: Cond, True: BroadcastI, False: Result);
959 }
960
961 if (IsPixelShader) {
962 // Need a final PHI to reconverge to above the helper lane branch mask.
963 B.SetInsertPoint(TheBB: PixelExitBB, IP: PixelExitBB->getFirstNonPHIIt());
964
965 PHINode *const PHI = B.CreatePHI(Ty, NumReservedValues: 2);
966 PHI->addIncoming(V: PoisonValue::get(T: Ty), BB: PixelEntryBB);
967 PHI->addIncoming(V: Result, BB: I.getParent());
968 I.replaceAllUsesWith(V: PHI);
969 } else {
970 // Replace the original atomic instruction with the new one.
971 I.replaceAllUsesWith(V: Result);
972 }
973 }
974
975 // And delete the original.
976 I.eraseFromParent();
977}
978
979INITIALIZE_PASS_BEGIN(AMDGPUAtomicOptimizer, DEBUG_TYPE,
980 "AMDGPU atomic optimizations", false, false)
981INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
982INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
983INITIALIZE_PASS_END(AMDGPUAtomicOptimizer, DEBUG_TYPE,
984 "AMDGPU atomic optimizations", false, false)
985
986FunctionPass *llvm::createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy) {
987 return new AMDGPUAtomicOptimizer(ScanStrategy);
988}
989