1 | //===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI -----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "NVPTXTargetTransformInfo.h" |
10 | #include "NVPTXUtilities.h" |
11 | #include "llvm/ADT/STLExtras.h" |
12 | #include "llvm/Analysis/LoopInfo.h" |
13 | #include "llvm/Analysis/TargetTransformInfo.h" |
14 | #include "llvm/Analysis/ValueTracking.h" |
15 | #include "llvm/CodeGen/BasicTTIImpl.h" |
16 | #include "llvm/CodeGen/TargetLowering.h" |
17 | #include "llvm/IR/Constants.h" |
18 | #include "llvm/IR/IntrinsicInst.h" |
19 | #include "llvm/IR/Intrinsics.h" |
20 | #include "llvm/IR/IntrinsicsNVPTX.h" |
21 | #include "llvm/IR/Value.h" |
22 | #include "llvm/Support/Casting.h" |
23 | #include "llvm/Support/ErrorHandling.h" |
24 | #include "llvm/Support/NVPTXAddrSpace.h" |
25 | #include "llvm/Transforms/InstCombine/InstCombiner.h" |
26 | #include <optional> |
27 | using namespace llvm; |
28 | |
29 | #define DEBUG_TYPE "NVPTXtti" |
30 | |
31 | // Whether the given intrinsic reads threadIdx.x/y/z. |
32 | static bool readsThreadIndex(const IntrinsicInst *II) { |
33 | switch (II->getIntrinsicID()) { |
34 | default: return false; |
35 | case Intrinsic::nvvm_read_ptx_sreg_tid_x: |
36 | case Intrinsic::nvvm_read_ptx_sreg_tid_y: |
37 | case Intrinsic::nvvm_read_ptx_sreg_tid_z: |
38 | return true; |
39 | } |
40 | } |
41 | |
42 | static bool readsLaneId(const IntrinsicInst *II) { |
43 | return II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_laneid; |
44 | } |
45 | |
46 | // Whether the given intrinsic is an atomic instruction in PTX. |
47 | static bool isNVVMAtomic(const IntrinsicInst *II) { |
48 | switch (II->getIntrinsicID()) { |
49 | default: |
50 | return false; |
51 | case Intrinsic::nvvm_atomic_add_gen_f_cta: |
52 | case Intrinsic::nvvm_atomic_add_gen_f_sys: |
53 | case Intrinsic::nvvm_atomic_add_gen_i_cta: |
54 | case Intrinsic::nvvm_atomic_add_gen_i_sys: |
55 | case Intrinsic::nvvm_atomic_and_gen_i_cta: |
56 | case Intrinsic::nvvm_atomic_and_gen_i_sys: |
57 | case Intrinsic::nvvm_atomic_cas_gen_i_cta: |
58 | case Intrinsic::nvvm_atomic_cas_gen_i_sys: |
59 | case Intrinsic::nvvm_atomic_dec_gen_i_cta: |
60 | case Intrinsic::nvvm_atomic_dec_gen_i_sys: |
61 | case Intrinsic::nvvm_atomic_inc_gen_i_cta: |
62 | case Intrinsic::nvvm_atomic_inc_gen_i_sys: |
63 | case Intrinsic::nvvm_atomic_max_gen_i_cta: |
64 | case Intrinsic::nvvm_atomic_max_gen_i_sys: |
65 | case Intrinsic::nvvm_atomic_min_gen_i_cta: |
66 | case Intrinsic::nvvm_atomic_min_gen_i_sys: |
67 | case Intrinsic::nvvm_atomic_or_gen_i_cta: |
68 | case Intrinsic::nvvm_atomic_or_gen_i_sys: |
69 | case Intrinsic::nvvm_atomic_exch_gen_i_cta: |
70 | case Intrinsic::nvvm_atomic_exch_gen_i_sys: |
71 | case Intrinsic::nvvm_atomic_xor_gen_i_cta: |
72 | case Intrinsic::nvvm_atomic_xor_gen_i_sys: |
73 | return true; |
74 | } |
75 | } |
76 | |
77 | bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) const { |
78 | // Without inter-procedural analysis, we conservatively assume that arguments |
79 | // to __device__ functions are divergent. |
80 | if (const Argument *Arg = dyn_cast<Argument>(Val: V)) |
81 | return !isKernelFunction(F: *Arg->getParent()); |
82 | |
83 | if (const Instruction *I = dyn_cast<Instruction>(Val: V)) { |
84 | // Without pointer analysis, we conservatively assume values loaded from |
85 | // generic or local address space are divergent. |
86 | if (const LoadInst *LI = dyn_cast<LoadInst>(Val: I)) { |
87 | unsigned AS = LI->getPointerAddressSpace(); |
88 | return AS == ADDRESS_SPACE_GENERIC || AS == ADDRESS_SPACE_LOCAL; |
89 | } |
90 | // Atomic instructions may cause divergence. Atomic instructions are |
91 | // executed sequentially across all threads in a warp. Therefore, an earlier |
92 | // executed thread may see different memory inputs than a later executed |
93 | // thread. For example, suppose *a = 0 initially. |
94 | // |
95 | // atom.global.add.s32 d, [a], 1 |
96 | // |
97 | // returns 0 for the first thread that enters the critical region, and 1 for |
98 | // the second thread. |
99 | if (I->isAtomic()) |
100 | return true; |
101 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) { |
102 | // Instructions that read threadIdx are obviously divergent. |
103 | if (readsThreadIndex(II) || readsLaneId(II)) |
104 | return true; |
105 | // Handle the NVPTX atomic intrinsics that cannot be represented as an |
106 | // atomic IR instruction. |
107 | if (isNVVMAtomic(II)) |
108 | return true; |
109 | } |
110 | // Conservatively consider the return value of function calls as divergent. |
111 | // We could analyze callees with bodies more precisely using |
112 | // inter-procedural analysis. |
113 | if (isa<CallInst>(Val: I)) |
114 | return true; |
115 | } |
116 | |
117 | return false; |
118 | } |
119 | |
120 | // Convert NVVM intrinsics to target-generic LLVM code where possible. |
121 | static Instruction *convertNvvmIntrinsicToLlvm(InstCombiner &IC, |
122 | IntrinsicInst *II) { |
123 | // Each NVVM intrinsic we can simplify can be replaced with one of: |
124 | // |
125 | // * an LLVM intrinsic, |
126 | // * an LLVM cast operation, |
127 | // * an LLVM binary operation, or |
128 | // * ad-hoc LLVM IR for the particular operation. |
129 | |
130 | // Some transformations are only valid when the module's |
131 | // flush-denormals-to-zero (ftz) setting is true/false, whereas other |
132 | // transformations are valid regardless of the module's ftz setting. |
133 | enum FtzRequirementTy { |
134 | FTZ_Any, // Any ftz setting is ok. |
135 | FTZ_MustBeOn, // Transformation is valid only if ftz is on. |
136 | FTZ_MustBeOff, // Transformation is valid only if ftz is off. |
137 | }; |
138 | // Classes of NVVM intrinsics that can't be replaced one-to-one with a |
139 | // target-generic intrinsic, cast op, or binary op but that we can nonetheless |
140 | // simplify. |
141 | enum SpecialCase { |
142 | SPC_Reciprocal, |
143 | SCP_FunnelShiftClamp, |
144 | }; |
145 | |
146 | // SimplifyAction is a poor-man's variant (plus an additional flag) that |
147 | // represents how to replace an NVVM intrinsic with target-generic LLVM IR. |
148 | struct SimplifyAction { |
149 | // Invariant: At most one of these Optionals has a value. |
150 | std::optional<Intrinsic::ID> IID; |
151 | std::optional<Instruction::CastOps> CastOp; |
152 | std::optional<Instruction::BinaryOps> BinaryOp; |
153 | std::optional<SpecialCase> Special; |
154 | |
155 | FtzRequirementTy FtzRequirement = FTZ_Any; |
156 | // Denormal handling is guarded by different attributes depending on the |
157 | // type (denormal-fp-math vs denormal-fp-math-f32), take note of halfs. |
158 | bool IsHalfTy = false; |
159 | |
160 | SimplifyAction() = default; |
161 | |
162 | SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq, |
163 | bool IsHalfTy = false) |
164 | : IID(IID), FtzRequirement(FtzReq), IsHalfTy(IsHalfTy) {} |
165 | |
166 | // Cast operations don't have anything to do with FTZ, so we skip that |
167 | // argument. |
168 | SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {} |
169 | |
170 | SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq) |
171 | : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {} |
172 | |
173 | SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq) |
174 | : Special(Special), FtzRequirement(FtzReq) {} |
175 | }; |
176 | |
177 | // Try to generate a SimplifyAction describing how to replace our |
178 | // IntrinsicInstr with target-generic LLVM IR. |
179 | const SimplifyAction Action = [II]() -> SimplifyAction { |
180 | switch (II->getIntrinsicID()) { |
181 | // NVVM intrinsics that map directly to LLVM intrinsics. |
182 | case Intrinsic::nvvm_ceil_d: |
183 | return {Intrinsic::ceil, FTZ_Any}; |
184 | case Intrinsic::nvvm_ceil_f: |
185 | return {Intrinsic::ceil, FTZ_MustBeOff}; |
186 | case Intrinsic::nvvm_ceil_ftz_f: |
187 | return {Intrinsic::ceil, FTZ_MustBeOn}; |
188 | case Intrinsic::nvvm_floor_d: |
189 | return {Intrinsic::floor, FTZ_Any}; |
190 | case Intrinsic::nvvm_floor_f: |
191 | return {Intrinsic::floor, FTZ_MustBeOff}; |
192 | case Intrinsic::nvvm_floor_ftz_f: |
193 | return {Intrinsic::floor, FTZ_MustBeOn}; |
194 | case Intrinsic::nvvm_fma_rn_d: |
195 | return {Intrinsic::fma, FTZ_Any}; |
196 | case Intrinsic::nvvm_fma_rn_f: |
197 | return {Intrinsic::fma, FTZ_MustBeOff}; |
198 | case Intrinsic::nvvm_fma_rn_ftz_f: |
199 | return {Intrinsic::fma, FTZ_MustBeOn}; |
200 | case Intrinsic::nvvm_fma_rn_f16: |
201 | return {Intrinsic::fma, FTZ_MustBeOff, true}; |
202 | case Intrinsic::nvvm_fma_rn_ftz_f16: |
203 | return {Intrinsic::fma, FTZ_MustBeOn, true}; |
204 | case Intrinsic::nvvm_fma_rn_f16x2: |
205 | return {Intrinsic::fma, FTZ_MustBeOff, true}; |
206 | case Intrinsic::nvvm_fma_rn_ftz_f16x2: |
207 | return {Intrinsic::fma, FTZ_MustBeOn, true}; |
208 | case Intrinsic::nvvm_fma_rn_bf16: |
209 | return {Intrinsic::fma, FTZ_MustBeOff, true}; |
210 | case Intrinsic::nvvm_fma_rn_ftz_bf16: |
211 | return {Intrinsic::fma, FTZ_MustBeOn, true}; |
212 | case Intrinsic::nvvm_fma_rn_bf16x2: |
213 | return {Intrinsic::fma, FTZ_MustBeOff, true}; |
214 | case Intrinsic::nvvm_fma_rn_ftz_bf16x2: |
215 | return {Intrinsic::fma, FTZ_MustBeOn, true}; |
216 | case Intrinsic::nvvm_fmax_d: |
217 | return {Intrinsic::maxnum, FTZ_Any}; |
218 | case Intrinsic::nvvm_fmax_f: |
219 | return {Intrinsic::maxnum, FTZ_MustBeOff}; |
220 | case Intrinsic::nvvm_fmax_ftz_f: |
221 | return {Intrinsic::maxnum, FTZ_MustBeOn}; |
222 | case Intrinsic::nvvm_fmax_nan_f: |
223 | return {Intrinsic::maximum, FTZ_MustBeOff}; |
224 | case Intrinsic::nvvm_fmax_ftz_nan_f: |
225 | return {Intrinsic::maximum, FTZ_MustBeOn}; |
226 | case Intrinsic::nvvm_fmax_f16: |
227 | return {Intrinsic::maxnum, FTZ_MustBeOff, true}; |
228 | case Intrinsic::nvvm_fmax_ftz_f16: |
229 | return {Intrinsic::maxnum, FTZ_MustBeOn, true}; |
230 | case Intrinsic::nvvm_fmax_f16x2: |
231 | return {Intrinsic::maxnum, FTZ_MustBeOff, true}; |
232 | case Intrinsic::nvvm_fmax_ftz_f16x2: |
233 | return {Intrinsic::maxnum, FTZ_MustBeOn, true}; |
234 | case Intrinsic::nvvm_fmax_nan_f16: |
235 | return {Intrinsic::maximum, FTZ_MustBeOff, true}; |
236 | case Intrinsic::nvvm_fmax_ftz_nan_f16: |
237 | return {Intrinsic::maximum, FTZ_MustBeOn, true}; |
238 | case Intrinsic::nvvm_fmax_nan_f16x2: |
239 | return {Intrinsic::maximum, FTZ_MustBeOff, true}; |
240 | case Intrinsic::nvvm_fmax_ftz_nan_f16x2: |
241 | return {Intrinsic::maximum, FTZ_MustBeOn, true}; |
242 | case Intrinsic::nvvm_fmin_d: |
243 | return {Intrinsic::minnum, FTZ_Any}; |
244 | case Intrinsic::nvvm_fmin_f: |
245 | return {Intrinsic::minnum, FTZ_MustBeOff}; |
246 | case Intrinsic::nvvm_fmin_ftz_f: |
247 | return {Intrinsic::minnum, FTZ_MustBeOn}; |
248 | case Intrinsic::nvvm_fmin_nan_f: |
249 | return {Intrinsic::minimum, FTZ_MustBeOff}; |
250 | case Intrinsic::nvvm_fmin_ftz_nan_f: |
251 | return {Intrinsic::minimum, FTZ_MustBeOn}; |
252 | case Intrinsic::nvvm_fmin_f16: |
253 | return {Intrinsic::minnum, FTZ_MustBeOff, true}; |
254 | case Intrinsic::nvvm_fmin_ftz_f16: |
255 | return {Intrinsic::minnum, FTZ_MustBeOn, true}; |
256 | case Intrinsic::nvvm_fmin_f16x2: |
257 | return {Intrinsic::minnum, FTZ_MustBeOff, true}; |
258 | case Intrinsic::nvvm_fmin_ftz_f16x2: |
259 | return {Intrinsic::minnum, FTZ_MustBeOn, true}; |
260 | case Intrinsic::nvvm_fmin_nan_f16: |
261 | return {Intrinsic::minimum, FTZ_MustBeOff, true}; |
262 | case Intrinsic::nvvm_fmin_ftz_nan_f16: |
263 | return {Intrinsic::minimum, FTZ_MustBeOn, true}; |
264 | case Intrinsic::nvvm_fmin_nan_f16x2: |
265 | return {Intrinsic::minimum, FTZ_MustBeOff, true}; |
266 | case Intrinsic::nvvm_fmin_ftz_nan_f16x2: |
267 | return {Intrinsic::minimum, FTZ_MustBeOn, true}; |
268 | case Intrinsic::nvvm_sqrt_rn_d: |
269 | return {Intrinsic::sqrt, FTZ_Any}; |
270 | case Intrinsic::nvvm_sqrt_f: |
271 | // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the |
272 | // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts |
273 | // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are |
274 | // the versions with explicit ftz-ness. |
275 | return {Intrinsic::sqrt, FTZ_Any}; |
276 | case Intrinsic::nvvm_trunc_d: |
277 | return {Intrinsic::trunc, FTZ_Any}; |
278 | case Intrinsic::nvvm_trunc_f: |
279 | return {Intrinsic::trunc, FTZ_MustBeOff}; |
280 | case Intrinsic::nvvm_trunc_ftz_f: |
281 | return {Intrinsic::trunc, FTZ_MustBeOn}; |
282 | |
283 | // NVVM intrinsics that map to LLVM cast operations. |
284 | // |
285 | // Note that llvm's target-generic conversion operators correspond to the rz |
286 | // (round to zero) versions of the nvvm conversion intrinsics, even though |
287 | // most everything else here uses the rn (round to nearest even) nvvm ops. |
288 | case Intrinsic::nvvm_d2i_rz: |
289 | case Intrinsic::nvvm_f2i_rz: |
290 | case Intrinsic::nvvm_d2ll_rz: |
291 | case Intrinsic::nvvm_f2ll_rz: |
292 | return {Instruction::FPToSI}; |
293 | case Intrinsic::nvvm_d2ui_rz: |
294 | case Intrinsic::nvvm_f2ui_rz: |
295 | case Intrinsic::nvvm_d2ull_rz: |
296 | case Intrinsic::nvvm_f2ull_rz: |
297 | return {Instruction::FPToUI}; |
298 | // Integer to floating-point uses RN rounding, not RZ |
299 | case Intrinsic::nvvm_i2d_rn: |
300 | case Intrinsic::nvvm_i2f_rn: |
301 | case Intrinsic::nvvm_ll2d_rn: |
302 | case Intrinsic::nvvm_ll2f_rn: |
303 | return {Instruction::SIToFP}; |
304 | case Intrinsic::nvvm_ui2d_rn: |
305 | case Intrinsic::nvvm_ui2f_rn: |
306 | case Intrinsic::nvvm_ull2d_rn: |
307 | case Intrinsic::nvvm_ull2f_rn: |
308 | return {Instruction::UIToFP}; |
309 | |
310 | // NVVM intrinsics that map to LLVM binary ops. |
311 | case Intrinsic::nvvm_div_rn_d: |
312 | return {Instruction::FDiv, FTZ_Any}; |
313 | |
314 | // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but |
315 | // need special handling. |
316 | // |
317 | // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just |
318 | // as well. |
319 | case Intrinsic::nvvm_rcp_rn_d: |
320 | return {SPC_Reciprocal, FTZ_Any}; |
321 | |
322 | case Intrinsic::nvvm_fshl_clamp: |
323 | case Intrinsic::nvvm_fshr_clamp: |
324 | return {SCP_FunnelShiftClamp, FTZ_Any}; |
325 | |
326 | // We do not currently simplify intrinsics that give an approximate |
327 | // answer. These include: |
328 | // |
329 | // - nvvm_cos_approx_{f,ftz_f} |
330 | // - nvvm_ex2_approx_{d,f,ftz_f} |
331 | // - nvvm_lg2_approx_{d,f,ftz_f} |
332 | // - nvvm_sin_approx_{f,ftz_f} |
333 | // - nvvm_sqrt_approx_{f,ftz_f} |
334 | // - nvvm_rsqrt_approx_{d,f,ftz_f} |
335 | // - nvvm_div_approx_{ftz_d,ftz_f,f} |
336 | // - nvvm_rcp_approx_ftz_d |
337 | // |
338 | // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast" |
339 | // means that fastmath is enabled in the intrinsic. Unfortunately only |
340 | // binary operators (currently) have a fastmath bit in SelectionDAG, so |
341 | // this information gets lost and we can't select on it. |
342 | // |
343 | // TODO: div and rcp are lowered to a binary op, so these we could in |
344 | // theory lower them to "fast fdiv". |
345 | |
346 | default: |
347 | return {}; |
348 | } |
349 | }(); |
350 | |
351 | // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we |
352 | // can bail out now. (Notice that in the case that IID is not an NVVM |
353 | // intrinsic, we don't have to look up any module metadata, as |
354 | // FtzRequirementTy will be FTZ_Any.) |
355 | if (Action.FtzRequirement != FTZ_Any) { |
356 | // FIXME: Broken for f64 |
357 | DenormalMode Mode = II->getFunction()->getDenormalMode( |
358 | FPType: Action.IsHalfTy ? APFloat::IEEEhalf() : APFloat::IEEEsingle()); |
359 | bool FtzEnabled = Mode.Output == DenormalMode::PreserveSign; |
360 | |
361 | if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn)) |
362 | return nullptr; |
363 | } |
364 | |
365 | // Simplify to target-generic intrinsic. |
366 | if (Action.IID) { |
367 | SmallVector<Value *, 4> Args(II->args()); |
368 | // All the target-generic intrinsics currently of interest to us have one |
369 | // type argument, equal to that of the nvvm intrinsic's argument. |
370 | Type *Tys[] = {II->getArgOperand(i: 0)->getType()}; |
371 | return CallInst::Create( |
372 | Func: Intrinsic::getOrInsertDeclaration(M: II->getModule(), id: *Action.IID, Tys), |
373 | Args); |
374 | } |
375 | |
376 | // Simplify to target-generic binary op. |
377 | if (Action.BinaryOp) |
378 | return BinaryOperator::Create(Op: *Action.BinaryOp, S1: II->getArgOperand(i: 0), |
379 | S2: II->getArgOperand(i: 1), Name: II->getName()); |
380 | |
381 | // Simplify to target-generic cast op. |
382 | if (Action.CastOp) |
383 | return CastInst::Create(*Action.CastOp, S: II->getArgOperand(i: 0), Ty: II->getType(), |
384 | Name: II->getName()); |
385 | |
386 | // All that's left are the special cases. |
387 | if (!Action.Special) |
388 | return nullptr; |
389 | |
390 | switch (*Action.Special) { |
391 | case SPC_Reciprocal: |
392 | // Simplify reciprocal. |
393 | return BinaryOperator::Create( |
394 | Op: Instruction::FDiv, S1: ConstantFP::get(Ty: II->getArgOperand(i: 0)->getType(), V: 1), |
395 | S2: II->getArgOperand(i: 0), Name: II->getName()); |
396 | |
397 | case SCP_FunnelShiftClamp: { |
398 | // Canonicalize a clamping funnel shift to the generic llvm funnel shift |
399 | // when possible, as this is easier for llvm to optimize further. |
400 | if (const auto *ShiftConst = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 2))) { |
401 | const bool IsLeft = II->getIntrinsicID() == Intrinsic::nvvm_fshl_clamp; |
402 | if (ShiftConst->getZExtValue() >= II->getType()->getIntegerBitWidth()) |
403 | return IC.replaceInstUsesWith(I&: *II, V: II->getArgOperand(i: IsLeft ? 1 : 0)); |
404 | |
405 | const unsigned FshIID = IsLeft ? Intrinsic::fshl : Intrinsic::fshr; |
406 | return CallInst::Create(Func: Intrinsic::getOrInsertDeclaration( |
407 | M: II->getModule(), id: FshIID, Tys: II->getType()), |
408 | Args: SmallVector<Value *, 3>(II->args())); |
409 | } |
410 | return nullptr; |
411 | } |
412 | } |
413 | llvm_unreachable("All SpecialCase enumerators should be handled in switch." ); |
414 | } |
415 | |
416 | // Returns true/false when we know the answer, nullopt otherwise. |
417 | static std::optional<bool> evaluateIsSpace(Intrinsic::ID IID, unsigned AS) { |
418 | if (AS == NVPTXAS::ADDRESS_SPACE_GENERIC || |
419 | AS == NVPTXAS::ADDRESS_SPACE_PARAM) |
420 | return std::nullopt; // Got to check at run-time. |
421 | switch (IID) { |
422 | case Intrinsic::nvvm_isspacep_global: |
423 | return AS == NVPTXAS::ADDRESS_SPACE_GLOBAL; |
424 | case Intrinsic::nvvm_isspacep_local: |
425 | return AS == NVPTXAS::ADDRESS_SPACE_LOCAL; |
426 | case Intrinsic::nvvm_isspacep_shared: |
427 | // If shared cluster this can't be evaluated at compile time. |
428 | if (AS == NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER) |
429 | return std::nullopt; |
430 | return AS == NVPTXAS::ADDRESS_SPACE_SHARED; |
431 | case Intrinsic::nvvm_isspacep_shared_cluster: |
432 | return AS == NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER || |
433 | AS == NVPTXAS::ADDRESS_SPACE_SHARED; |
434 | case Intrinsic::nvvm_isspacep_const: |
435 | return AS == NVPTXAS::ADDRESS_SPACE_CONST; |
436 | default: |
437 | llvm_unreachable("Unexpected intrinsic" ); |
438 | } |
439 | } |
440 | |
441 | // Returns an instruction pointer (may be nullptr if we do not know the answer). |
442 | // Returns nullopt if `II` is not one of the `isspacep` intrinsics. |
443 | // |
444 | // TODO: If InferAddressSpaces were run early enough in the pipeline this could |
445 | // be removed in favor of the constant folding that occurs there through |
446 | // rewriteIntrinsicWithAddressSpace |
447 | static std::optional<Instruction *> |
448 | handleSpaceCheckIntrinsics(InstCombiner &IC, IntrinsicInst &II) { |
449 | |
450 | switch (auto IID = II.getIntrinsicID()) { |
451 | case Intrinsic::nvvm_isspacep_global: |
452 | case Intrinsic::nvvm_isspacep_local: |
453 | case Intrinsic::nvvm_isspacep_shared: |
454 | case Intrinsic::nvvm_isspacep_shared_cluster: |
455 | case Intrinsic::nvvm_isspacep_const: { |
456 | Value *Op0 = II.getArgOperand(i: 0); |
457 | unsigned AS = Op0->getType()->getPointerAddressSpace(); |
458 | // Peek through ASC to generic AS. |
459 | // TODO: we could dig deeper through both ASCs and GEPs. |
460 | if (AS == NVPTXAS::ADDRESS_SPACE_GENERIC) |
461 | if (auto *ASCO = dyn_cast<AddrSpaceCastOperator>(Val: Op0)) |
462 | AS = ASCO->getOperand(i_nocapture: 0)->getType()->getPointerAddressSpace(); |
463 | |
464 | if (std::optional<bool> Answer = evaluateIsSpace(IID, AS)) |
465 | return IC.replaceInstUsesWith(I&: II, |
466 | V: ConstantInt::get(Ty: II.getType(), V: *Answer)); |
467 | return nullptr; // Don't know the answer, got to check at run time. |
468 | } |
469 | default: |
470 | return std::nullopt; |
471 | } |
472 | } |
473 | |
474 | std::optional<Instruction *> |
475 | NVPTXTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { |
476 | if (std::optional<Instruction *> I = handleSpaceCheckIntrinsics(IC, II)) |
477 | return *I; |
478 | if (Instruction *I = convertNvvmIntrinsicToLlvm(IC, II: &II)) |
479 | return I; |
480 | |
481 | return std::nullopt; |
482 | } |
483 | |
484 | InstructionCost |
485 | NVPTXTTIImpl::getInstructionCost(const User *U, |
486 | ArrayRef<const Value *> Operands, |
487 | TTI::TargetCostKind CostKind) const { |
488 | if (const auto *CI = dyn_cast<CallInst>(Val: U)) |
489 | if (const auto *IA = dyn_cast<InlineAsm>(Val: CI->getCalledOperand())) { |
490 | // Without this implementation getCallCost() would return the number |
491 | // of arguments+1 as the cost. Because the cost-model assumes it is a call |
492 | // since it is classified as a call in the IR. A better cost model would |
493 | // be to return the number of asm instructions embedded in the asm |
494 | // string. |
495 | StringRef AsmStr = IA->getAsmString(); |
496 | const unsigned InstCount = |
497 | count_if(Range: split(Str: AsmStr, Separator: ';'), P: [](StringRef AsmInst) { |
498 | // Trim off scopes denoted by '{' and '}' as these can be ignored |
499 | AsmInst = AsmInst.trim().ltrim(Chars: "{} \t\n\v\f\r" ); |
500 | // This is pretty coarse but does a reasonably good job of |
501 | // identifying things that look like instructions, possibly with a |
502 | // predicate ("@"). |
503 | return !AsmInst.empty() && |
504 | (AsmInst[0] == '@' || isAlpha(C: AsmInst[0]) || |
505 | AsmInst.find(Str: ".pragma" ) != StringRef::npos); |
506 | }); |
507 | return InstCount * TargetTransformInfo::TCC_Basic; |
508 | } |
509 | |
510 | return BaseT::getInstructionCost(U, Operands, CostKind); |
511 | } |
512 | |
513 | InstructionCost NVPTXTTIImpl::getArithmeticInstrCost( |
514 | unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, |
515 | TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, |
516 | ArrayRef<const Value *> Args, const Instruction *CxtI) const { |
517 | // Legalize the type. |
518 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); |
519 | |
520 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
521 | |
522 | switch (ISD) { |
523 | default: |
524 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, |
525 | Opd2Info: Op2Info); |
526 | case ISD::ADD: |
527 | case ISD::MUL: |
528 | case ISD::XOR: |
529 | case ISD::OR: |
530 | case ISD::AND: |
531 | // The machine code (SASS) simulates an i64 with two i32. Therefore, we |
532 | // estimate that arithmetic operations on i64 are twice as expensive as |
533 | // those on types that can fit into one machine register. |
534 | if (LT.second.SimpleTy == MVT::i64) |
535 | return 2 * LT.first; |
536 | // Delegate other cases to the basic TTI. |
537 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, |
538 | Opd2Info: Op2Info); |
539 | } |
540 | } |
541 | |
542 | void NVPTXTTIImpl::( |
543 | Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, |
544 | OptimizationRemarkEmitter *ORE) const { |
545 | BaseT::getUnrollingPreferences(L, SE, UP, ORE); |
546 | |
547 | // Enable partial unrolling and runtime unrolling, but reduce the |
548 | // threshold. This partially unrolls small loops which are often |
549 | // unrolled by the PTX to SASS compiler and unrolling earlier can be |
550 | // beneficial. |
551 | UP.Partial = UP.Runtime = true; |
552 | UP.PartialThreshold = UP.Threshold / 4; |
553 | } |
554 | |
555 | void NVPTXTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, |
556 | TTI::PeelingPreferences &PP) const { |
557 | BaseT::getPeelingPreferences(L, SE, PP); |
558 | } |
559 | |
560 | bool NVPTXTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, |
561 | Intrinsic::ID IID) const { |
562 | switch (IID) { |
563 | case Intrinsic::nvvm_isspacep_const: |
564 | case Intrinsic::nvvm_isspacep_global: |
565 | case Intrinsic::nvvm_isspacep_local: |
566 | case Intrinsic::nvvm_isspacep_shared: |
567 | case Intrinsic::nvvm_isspacep_shared_cluster: { |
568 | OpIndexes.push_back(Elt: 0); |
569 | return true; |
570 | } |
571 | } |
572 | return false; |
573 | } |
574 | |
575 | Value *NVPTXTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, |
576 | Value *OldV, |
577 | Value *NewV) const { |
578 | const Intrinsic::ID IID = II->getIntrinsicID(); |
579 | switch (IID) { |
580 | case Intrinsic::nvvm_isspacep_const: |
581 | case Intrinsic::nvvm_isspacep_global: |
582 | case Intrinsic::nvvm_isspacep_local: |
583 | case Intrinsic::nvvm_isspacep_shared: |
584 | case Intrinsic::nvvm_isspacep_shared_cluster: { |
585 | const unsigned NewAS = NewV->getType()->getPointerAddressSpace(); |
586 | if (const auto R = evaluateIsSpace(IID, AS: NewAS)) |
587 | return ConstantInt::get(Ty: II->getType(), V: *R); |
588 | return nullptr; |
589 | } |
590 | } |
591 | return nullptr; |
592 | } |
593 | |
594 | unsigned NVPTXTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { |
595 | // 256 bit loads/stores are currently only supported for global address space |
596 | if (ST->has256BitVectorLoadStore(AS: AddrSpace)) |
597 | return 256; |
598 | return 128; |
599 | } |
600 | |
601 | unsigned NVPTXTTIImpl::getAssumedAddrSpace(const Value *V) const { |
602 | if (isa<AllocaInst>(Val: V)) |
603 | return ADDRESS_SPACE_LOCAL; |
604 | |
605 | if (const Argument *Arg = dyn_cast<Argument>(Val: V)) { |
606 | if (isKernelFunction(F: *Arg->getParent())) { |
607 | const NVPTXTargetMachine &TM = |
608 | static_cast<const NVPTXTargetMachine &>(getTLI()->getTargetMachine()); |
609 | if (TM.getDrvInterface() == NVPTX::CUDA && !Arg->hasByValAttr()) |
610 | return ADDRESS_SPACE_GLOBAL; |
611 | } else { |
612 | // We assume that all device parameters that are passed byval will be |
613 | // placed in the local AS. Very simple cases will be updated after ISel to |
614 | // use the device param space where possible. |
615 | if (Arg->hasByValAttr()) |
616 | return ADDRESS_SPACE_LOCAL; |
617 | } |
618 | } |
619 | |
620 | return -1; |
621 | } |
622 | |
623 | void NVPTXTTIImpl::collectKernelLaunchBounds( |
624 | const Function &F, |
625 | SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const { |
626 | if (const auto Val = getMaxClusterRank(F)) |
627 | LB.push_back(Elt: {"maxclusterrank" , *Val}); |
628 | |
629 | const auto MaxNTID = getMaxNTID(F); |
630 | if (MaxNTID.size() > 0) |
631 | LB.push_back(Elt: {"maxntidx" , MaxNTID[0]}); |
632 | if (MaxNTID.size() > 1) |
633 | LB.push_back(Elt: {"maxntidy" , MaxNTID[1]}); |
634 | if (MaxNTID.size() > 2) |
635 | LB.push_back(Elt: {"maxntidz" , MaxNTID[2]}); |
636 | } |
637 | |