1//===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Interface definition of the TargetLowering class that is common
11/// to all AMD GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
16#define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
17
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/TargetLowering.h"
20
21namespace llvm {
22
23class AMDGPUMachineFunctionInfo;
24class AMDGPUSubtarget;
25struct ArgDescriptor;
26
27class AMDGPUTargetLowering : public TargetLowering {
28private:
29 const AMDGPUSubtarget *Subtarget;
30
31 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been
32 /// legalized from a smaller type VT. Need to match pre-legalized type because
33 /// the generic legalization inserts the add/sub between the select and
34 /// compare.
35 SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const;
36
37public:
38 /// \returns The minimum number of bits needed to store the value of \Op as an
39 /// unsigned integer. Truncating to this size and then zero-extending to the
40 /// original size will not change the value.
41 static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG);
42
43 /// \returns The minimum number of bits needed to store the value of \Op as a
44 /// signed integer. Truncating to this size and then sign-extending to the
45 /// original size will not change the value.
46 static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG);
47
48protected:
49 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
50 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
51 /// Split a vector store into multiple scalar stores.
52 /// \returns The resulting chain.
53
54 SDValue LowerCTLS(SDValue Op, SelectionDAG &DAG) const;
55 SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
56 SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
57 SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
58 SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
59
60 SDValue LowerFROUNDEVEN(SDValue Op, SelectionDAG &DAG) const;
61 SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
62 SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
63
64 static bool allowApproxFunc(const SelectionDAG &DAG, SDNodeFlags Flags);
65 static bool needsDenormHandlingF32(const SelectionDAG &DAG, SDValue Src,
66 SDNodeFlags Flags);
67 SDValue getIsLtSmallestNormal(SelectionDAG &DAG, SDValue Op,
68 SDNodeFlags Flags) const;
69 SDValue getIsFinite(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const;
70 std::pair<SDValue, SDValue> getScaledLogInput(SelectionDAG &DAG,
71 const SDLoc SL, SDValue Op,
72 SDNodeFlags Flags) const;
73
74 SDValue LowerFLOG2(SDValue Op, SelectionDAG &DAG) const;
75 SDValue LowerFLOGCommon(SDValue Op, SelectionDAG &DAG) const;
76 SDValue LowerFLOG10(SDValue Op, SelectionDAG &DAG) const;
77 SDValue LowerFLOGUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG,
78 bool IsLog10, SDNodeFlags Flags) const;
79 SDValue lowerFEXP2(SDValue Op, SelectionDAG &DAG) const;
80
81 SDValue lowerFEXPUnsafeImpl(SDValue Op, const SDLoc &SL, SelectionDAG &DAG,
82 SDNodeFlags Flags, bool IsExp10) const;
83
84 SDValue lowerFEXPUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG,
85 SDNodeFlags Flags) const;
86 SDValue lowerFEXP10Unsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG,
87 SDNodeFlags Flags) const;
88 SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const;
89 SDValue lowerFEXPF64(SDValue Op, SelectionDAG &DAG) const;
90
91 SDValue lowerCTLZResults(SDValue Op, SelectionDAG &DAG) const;
92
93 SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const;
94
95 SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const;
96 SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
97 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
98 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
99
100 SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
101 SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
102 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
103 SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
104
105 SDValue LowerF64ToF16Safe(SDValue Src, const SDLoc &DL,
106 SelectionDAG &DAG) const;
107
108 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
109
110protected:
111 /// Check whether value Val can be supported by v_mov_b64, for the current
112 /// target.
113 bool isInt64ImmLegal(SDNode *Val, SelectionDAG &DAG) const;
114 bool shouldCombineMemoryType(EVT VT) const;
115 SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
116 SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
117 SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
118 SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const;
119
120 SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL,
121 unsigned Opc, SDValue LHS,
122 uint32_t ValLo, uint32_t ValHi) const;
123 SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
124 SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
125 SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
126 SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const;
127 SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
128 SDValue performMulLoHiCombine(SDNode *N, DAGCombinerInfo &DCI) const;
129 SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
130 SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
131 SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
132 SDValue RHS, DAGCombinerInfo &DCI) const;
133
134 SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
135 SDValue N) const;
136 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
137
138 TargetLowering::NegatibleCost
139 getConstantNegateCost(const ConstantFPSDNode *C) const;
140
141 bool isConstantCostlierToNegate(SDValue N) const;
142 bool isConstantCheaperToNegate(SDValue N) const;
143 SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
144 SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
145 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
146
147 static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
148
149 virtual SDValue LowerGlobalAddress(AMDGPUMachineFunctionInfo *MFI, SDValue Op,
150 SelectionDAG &DAG) const;
151
152 /// Return 64-bit value Op as two 32-bit integers.
153 std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
154 SelectionDAG &DAG) const;
155 SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
156 SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
157
158 /// Split a vector type into two parts. The first part is a power of two
159 /// vector. The second part is whatever is left over, and is a scalar if it
160 /// would otherwise be a 1-vector.
161 std::pair<EVT, EVT> getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const;
162
163 /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be
164 /// scalar.
165 std::pair<SDValue, SDValue> splitVector(const SDValue &N, const SDLoc &DL,
166 const EVT &LoVT, const EVT &HighVT,
167 SelectionDAG &DAG) const;
168
169 /// Split a vector load into 2 loads of half the vector.
170 SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
171
172 /// Widen a suitably aligned v3 load. For all other cases, split the input
173 /// vector load.
174 SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
175
176 /// Split a vector store into 2 stores of half the vector.
177 SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
178
179 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
180 SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
181 SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
182 SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
183 void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
184 SmallVectorImpl<SDValue> &Results) const;
185
186 void analyzeFormalArgumentsCompute(
187 CCState &State,
188 const SmallVectorImpl<ISD::InputArg> &Ins) const;
189
190public:
191 AMDGPUTargetLowering(const TargetMachine &TM, const TargetSubtargetInfo &STI,
192 const AMDGPUSubtarget &AMDGPUSTI);
193
194 bool mayIgnoreSignedZero(SDValue Op) const;
195
196 static inline SDValue stripBitcast(SDValue Val) {
197 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(i: 0) : Val;
198 }
199
200 static bool shouldFoldFNegIntoSrc(SDNode *FNeg, SDValue FNegSrc);
201 static bool allUsesHaveSourceMods(const SDNode *N,
202 unsigned CostThreshold = 4);
203 bool isFAbsFree(EVT VT) const override;
204 bool isFNegFree(EVT VT) const override;
205 bool isTruncateFree(EVT Src, EVT Dest) const override;
206 bool isTruncateFree(Type *Src, Type *Dest) const override;
207
208 bool isZExtFree(Type *Src, Type *Dest) const override;
209 bool isZExtFree(EVT Src, EVT Dest) const override;
210
211 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
212 bool LegalOperations, bool ForCodeSize,
213 NegatibleCost &Cost,
214 unsigned Depth) const override;
215
216 bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const override;
217
218 bool isDesirableToCommuteWithShift(const SDNode *N,
219 CombineLevel Level) const override;
220
221 EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
222 ISD::NodeType ExtendKind) const override;
223
224 unsigned getVectorIdxWidth(const DataLayout &) const override;
225 bool isSelectSupported(SelectSupportKind) const override;
226
227 bool isFPImmLegal(const APFloat &Imm, EVT VT,
228 bool ForCodeSize) const override;
229 bool ShouldShrinkFPConstant(EVT VT) const override;
230 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtType, EVT ExtVT,
231 std::optional<unsigned> ByteOffset) const override;
232
233 bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG,
234 const MachineMemOperand &MMO) const final;
235
236 bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
237 unsigned NumElem,
238 unsigned AS) const override;
239 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override;
240 bool isCheapToSpeculateCttz(Type *Ty) const override;
241 bool isCheapToSpeculateCtlz(Type *Ty) const override;
242
243 bool isSDNodeAlwaysUniform(const SDNode *N) const override;
244
245 // FIXME: This hook should not exist
246 AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override {
247 return AtomicExpansionKind::None;
248 }
249
250 AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override {
251 return AtomicExpansionKind::None;
252 }
253
254 AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *) const override {
255 return AtomicExpansionKind::None;
256 }
257
258 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
259 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
260
261 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
262 const SmallVectorImpl<ISD::OutputArg> &Outs,
263 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
264 SelectionDAG &DAG) const override;
265
266 SDValue addTokenForArgument(SDValue Chain,
267 SelectionDAG &DAG,
268 MachineFrameInfo &MFI,
269 int ClobberedFI) const;
270
271 SDValue lowerUnhandledCall(CallLoweringInfo &CLI,
272 SmallVectorImpl<SDValue> &InVals,
273 StringRef Reason) const;
274 SDValue LowerCall(CallLoweringInfo &CLI,
275 SmallVectorImpl<SDValue> &InVals) const override;
276
277 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
278 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
279 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
280
281 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
282 const APInt &OriginalDemandedBits,
283 const APInt &OriginalDemandedElts,
284 KnownBits &Known,
285 TargetLoweringOpt &TLO,
286 unsigned Depth) const override;
287
288 void ReplaceNodeResults(SDNode * N,
289 SmallVectorImpl<SDValue> &Results,
290 SelectionDAG &DAG) const override;
291
292 SDValue combineFMinMaxLegacyImpl(const SDLoc &DL, EVT VT, SDValue LHS,
293 SDValue RHS, SDValue True, SDValue False,
294 SDValue CC, DAGCombinerInfo &DCI) const;
295
296 SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
297 SDValue RHS, SDValue True, SDValue False,
298 SDValue CC, DAGCombinerInfo &DCI) const;
299
300 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for
301 // AMDGPU. Commit r319036,
302 // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6)
303 // turned on MergeConsecutiveStores() before Instruction Selection for all
304 // targets. Enough AMDGPU compiles go into an infinite loop (
305 // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges;
306 // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for
307 // now.
308 bool mergeStoresAfterLegalization(EVT) const override { return false; }
309
310 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override {
311 return true;
312 }
313 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
314 int &RefinementSteps, bool &UseOneConstNR,
315 bool Reciprocal) const override;
316 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
317 int &RefinementSteps) const override;
318
319 virtual SDNode *PostISelFolding(MachineSDNode *N,
320 SelectionDAG &DAG) const = 0;
321
322 /// Determine which of the bits specified in \p Mask are known to be
323 /// either zero or one and return them in the \p KnownZero and \p KnownOne
324 /// bitsets.
325 void computeKnownBitsForTargetNode(const SDValue Op,
326 KnownBits &Known,
327 const APInt &DemandedElts,
328 const SelectionDAG &DAG,
329 unsigned Depth = 0) const override;
330
331 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts,
332 const SelectionDAG &DAG,
333 unsigned Depth = 0) const override;
334
335 unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis,
336 Register R,
337 const APInt &DemandedElts,
338 const MachineRegisterInfo &MRI,
339 unsigned Depth = 0) const override;
340
341 bool canCreateUndefOrPoisonForTargetNode(SDValue Op,
342 const APInt &DemandedElts,
343 const SelectionDAG &DAG,
344 bool PoisonOnly, bool ConsiderFlags,
345 unsigned Depth) const override;
346
347 bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts,
348 const SelectionDAG &DAG, bool SNaN = false,
349 unsigned Depth = 0) const override;
350
351 bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0,
352 Register N1) const override;
353
354 /// Helper function that adds Reg to the LiveIn list of the DAG's
355 /// MachineFunction.
356 ///
357 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise
358 /// a copy from the register.
359 SDValue CreateLiveInRegister(SelectionDAG &DAG,
360 const TargetRegisterClass *RC,
361 Register Reg, EVT VT,
362 const SDLoc &SL,
363 bool RawReg = false) const;
364 SDValue CreateLiveInRegister(SelectionDAG &DAG,
365 const TargetRegisterClass *RC,
366 Register Reg, EVT VT) const {
367 return CreateLiveInRegister(DAG, RC, Reg, VT, SL: SDLoc(DAG.getEntryNode()));
368 }
369
370 // Returns the raw live in register rather than a copy from it.
371 SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG,
372 const TargetRegisterClass *RC,
373 Register Reg, EVT VT) const {
374 return CreateLiveInRegister(DAG, RC, Reg, VT, SL: SDLoc(DAG.getEntryNode()), RawReg: true);
375 }
376
377 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack
378 /// slot rather than passed in a register.
379 SDValue loadStackInputValue(SelectionDAG &DAG,
380 EVT VT,
381 const SDLoc &SL,
382 int64_t Offset) const;
383
384 SDValue storeStackInputValue(SelectionDAG &DAG,
385 const SDLoc &SL,
386 SDValue Chain,
387 SDValue ArgVal,
388 int64_t Offset) const;
389
390 SDValue loadInputValue(SelectionDAG &DAG,
391 const TargetRegisterClass *RC,
392 EVT VT, const SDLoc &SL,
393 const ArgDescriptor &Arg) const;
394
395 enum ImplicitParameter {
396 FIRST_IMPLICIT,
397 PRIVATE_BASE,
398 SHARED_BASE,
399 QUEUE_PTR,
400 };
401
402 /// Helper function that returns the byte offset of the given
403 /// type of implicit parameter.
404 uint32_t getImplicitParameterOffset(const MachineFunction &MF,
405 const ImplicitParameter Param) const;
406 uint32_t getImplicitParameterOffset(const uint64_t ExplicitKernArgSize,
407 const ImplicitParameter Param) const;
408
409 MVT getFenceOperandTy(const DataLayout &DL) const override {
410 return MVT::i32;
411 }
412
413 bool hasMultipleConditionRegisters(EVT VT) const override {
414 // FIXME: This is only partially true. If we have to do vector compares, any
415 // SGPR pair can be a condition register. If we have a uniform condition, we
416 // are better off doing SALU operations, where there is only one SCC. For
417 // now, we don't have a way of knowing during instruction selection if a
418 // condition will be uniform and we always use vector compares. Assume we
419 // are using vector compares until that is fixed.
420 return true;
421 }
422};
423
424} // End namespace llvm
425
426#endif
427