1//===-- SIISelLowering.h - SI DAG Lowering Interface ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI DAG Lowering interface definition
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
15#define LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
16
17#include "AMDGPUArgumentUsageInfo.h"
18#include "AMDGPUISelLowering.h"
19#include "SIDefines.h"
20#include "llvm/CodeGen/MachineFunction.h"
21
22namespace llvm {
23
24class GCNSubtarget;
25class SIMachineFunctionInfo;
26class SIRegisterInfo;
27
28namespace AMDGPU {
29struct ImageDimIntrinsicInfo;
30}
31
32class SITargetLowering final : public AMDGPUTargetLowering {
33private:
34 const GCNSubtarget *Subtarget;
35
36public:
37 MVT getRegisterTypeForCallingConv(LLVMContext &Context,
38 CallingConv::ID CC,
39 EVT VT) const override;
40 unsigned getNumRegistersForCallingConv(LLVMContext &Context,
41 CallingConv::ID CC,
42 EVT VT) const override;
43
44 unsigned getVectorTypeBreakdownForCallingConv(
45 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
46 unsigned &NumIntermediates, MVT &RegisterVT) const override;
47
48 MachinePointerInfo getKernargSegmentPtrInfo(MachineFunction &MF) const;
49
50private:
51 SDValue lowerKernArgParameterPtr(SelectionDAG &DAG, const SDLoc &SL,
52 SDValue Chain, uint64_t Offset) const;
53 SDValue getImplicitArgPtr(SelectionDAG &DAG, const SDLoc &SL) const;
54 SDValue getLDSKernelId(SelectionDAG &DAG, const SDLoc &SL) const;
55 SDValue lowerKernargMemParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
56 const SDLoc &SL, SDValue Chain,
57 uint64_t Offset, Align Alignment,
58 bool Signed,
59 const ISD::InputArg *Arg = nullptr) const;
60 SDValue loadImplicitKernelArgument(SelectionDAG &DAG, MVT VT, const SDLoc &DL,
61 Align Alignment,
62 ImplicitParameter Param) const;
63
64 SDValue convertABITypeToValueType(SelectionDAG &DAG, SDValue Val,
65 CCValAssign &VA, const SDLoc &SL) const;
66
67 SDValue lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
68 const SDLoc &SL, SDValue Chain,
69 const ISD::InputArg &Arg) const;
70 SDValue lowerWorkGroupId(
71 SelectionDAG &DAG, const SIMachineFunctionInfo &MFI, EVT VT,
72 AMDGPUFunctionArgInfo::PreloadedValue ClusterIdPV,
73 AMDGPUFunctionArgInfo::PreloadedValue ClusterMaxIdPV,
74 AMDGPUFunctionArgInfo::PreloadedValue ClusterWorkGroupIdPV) const;
75 SDValue getPreloadedValue(SelectionDAG &DAG,
76 const SIMachineFunctionInfo &MFI,
77 EVT VT,
78 AMDGPUFunctionArgInfo::PreloadedValue) const;
79
80 SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
81 SelectionDAG &DAG) const override;
82 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
83
84 SDValue lowerImplicitZextParam(SelectionDAG &DAG, SDValue Op,
85 MVT VT, unsigned Offset) const;
86 SDValue lowerImage(SDValue Op, const AMDGPU::ImageDimIntrinsicInfo *Intr,
87 SelectionDAG &DAG, bool WithChain) const;
88 SDValue lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, SDValue Offset,
89 SDValue CachePolicy, SelectionDAG &DAG) const;
90
91 SDValue lowerRawBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
92 unsigned NewOpcode) const;
93 SDValue lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
94 unsigned NewOpcode) const;
95
96 SDValue lowerWaveID(SelectionDAG &DAG, SDValue Op) const;
97 SDValue lowerConstHwRegRead(SelectionDAG &DAG, SDValue Op,
98 AMDGPU::Hwreg::Id HwReg, unsigned LowBit,
99 unsigned Width) const;
100 SDValue lowerWorkitemID(SelectionDAG &DAG, SDValue Op, unsigned Dim,
101 const ArgDescriptor &ArgDesc) const;
102
103 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
104 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
105 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
106
107 // The raw.tbuffer and struct.tbuffer intrinsics have two offset args: offset
108 // (the offset that is included in bounds checking and swizzling, to be split
109 // between the instruction's voffset and immoffset fields) and soffset (the
110 // offset that is excluded from bounds checking and swizzling, to go in the
111 // instruction's soffset field). This function takes the first kind of
112 // offset and figures out how to split it between voffset and immoffset.
113 std::pair<SDValue, SDValue> splitBufferOffsets(SDValue Offset,
114 SelectionDAG &DAG) const;
115
116 SDValue widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const;
117 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
118 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
119 SDValue lowerFastUnsafeFDIV(SDValue Op, SelectionDAG &DAG) const;
120 SDValue lowerFastUnsafeFDIV64(SDValue Op, SelectionDAG &DAG) const;
121 SDValue lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const;
122 SDValue LowerFDIV16(SDValue Op, SelectionDAG &DAG) const;
123 SDValue LowerFDIV32(SDValue Op, SelectionDAG &DAG) const;
124 SDValue LowerFDIV64(SDValue Op, SelectionDAG &DAG) const;
125 SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
126 SDValue LowerFFREXP(SDValue Op, SelectionDAG &DAG) const;
127 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
128 SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
129 SDValue lowerFSQRTF16(SDValue Op, SelectionDAG &DAG) const;
130 SDValue lowerFSQRTF32(SDValue Op, SelectionDAG &DAG) const;
131 SDValue lowerFSQRTF64(SDValue Op, SelectionDAG &DAG) const;
132 SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
133 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
134 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
135 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
136 SDValue adjustLoadValueType(unsigned Opcode, MemSDNode *M,
137 SelectionDAG &DAG, ArrayRef<SDValue> Ops,
138 bool IsIntrinsic = false) const;
139
140 SDValue lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, SelectionDAG &DAG,
141 ArrayRef<SDValue> Ops) const;
142
143 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
144 // dwordx4 if on SI.
145 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
146 ArrayRef<SDValue> Ops, EVT MemVT,
147 MachineMemOperand *MMO, SelectionDAG &DAG) const;
148
149 SDValue handleD16VData(SDValue VData, SelectionDAG &DAG,
150 bool ImageStore = false) const;
151
152 /// Converts \p Op, which must be of floating point type, to the
153 /// floating point type \p VT, by either extending or truncating it.
154 SDValue getFPExtOrFPRound(SelectionDAG &DAG,
155 SDValue Op,
156 const SDLoc &DL,
157 EVT VT) const;
158
159 SDValue convertArgType(
160 SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Val,
161 bool Signed, const ISD::InputArg *Arg = nullptr) const;
162
163 /// Custom lowering for ISD::FP_ROUND for MVT::f16.
164 SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
165 SDValue splitFP_ROUNDVectorOp(SDValue Op, SelectionDAG &DAG) const;
166 SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const;
167 SDValue lowerFMINIMUMNUM_FMAXIMUMNUM(SDValue Op, SelectionDAG &DAG) const;
168 SDValue lowerFMINIMUM_FMAXIMUM(SDValue Op, SelectionDAG &DAG) const;
169 SDValue lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const;
170 SDValue promoteUniformOpToI32(SDValue Op, DAGCombinerInfo &DCI) const;
171 SDValue lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
172 SDValue lowerMUL(SDValue Op, SelectionDAG &DAG) const;
173 SDValue lowerXMULO(SDValue Op, SelectionDAG &DAG) const;
174 SDValue lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
175
176 SDValue getSegmentAperture(unsigned AS, const SDLoc &DL,
177 SelectionDAG &DAG) const;
178
179 SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const;
180 SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
181 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
182 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
183 SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
184 SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
185 SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
186
187 SDValue lowerTRAP(SDValue Op, SelectionDAG &DAG) const;
188 SDValue lowerTrapEndpgm(SDValue Op, SelectionDAG &DAG) const;
189 SDValue lowerTrapHsaQueuePtr(SDValue Op, SelectionDAG &DAG) const;
190 SDValue lowerTrapHsa(SDValue Op, SelectionDAG &DAG) const;
191 SDValue lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const;
192
193 SDNode *adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
194
195 SDValue performUCharToFloatCombine(SDNode *N,
196 DAGCombinerInfo &DCI) const;
197 SDValue performFCopySignCombine(SDNode *N, DAGCombinerInfo &DCI) const;
198
199 SDValue performSHLPtrCombine(SDNode *N,
200 unsigned AS,
201 EVT MemVT,
202 DAGCombinerInfo &DCI) const;
203
204 SDValue performMemSDNodeCombine(MemSDNode *N, DAGCombinerInfo &DCI) const;
205
206 SDValue splitBinaryBitConstantOp(DAGCombinerInfo &DCI, const SDLoc &SL,
207 unsigned Opc, SDValue LHS,
208 const ConstantSDNode *CRHS) const;
209
210 SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
211 SDValue performOrCombine(SDNode *N, DAGCombinerInfo &DCI) const;
212 SDValue performXorCombine(SDNode *N, DAGCombinerInfo &DCI) const;
213 SDValue performZeroOrAnyExtendCombine(SDNode *N, DAGCombinerInfo &DCI) const;
214 SDValue performSignExtendInRegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
215 SDValue performClassCombine(SDNode *N, DAGCombinerInfo &DCI) const;
216 SDValue getCanonicalConstantFP(SelectionDAG &DAG, const SDLoc &SL, EVT VT,
217 const APFloat &C) const;
218 SDValue performFCanonicalizeCombine(SDNode *N, DAGCombinerInfo &DCI) const;
219
220 SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
221 SDValue Op0, SDValue Op1) const;
222 SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
223 SDValue Src, SDValue MinVal, SDValue MaxVal,
224 bool Signed) const;
225 SDValue performMinMaxCombine(SDNode *N, DAGCombinerInfo &DCI) const;
226 SDValue performFMed3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
227 SDValue performCvtPkRTZCombine(SDNode *N, DAGCombinerInfo &DCI) const;
228 SDValue performExtractVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
229 SDValue performInsertVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
230 SDValue performFPRoundCombine(SDNode *N, DAGCombinerInfo &DCI) const;
231 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
232
233 SDValue reassociateScalarOps(SDNode *N, SelectionDAG &DAG) const;
234 unsigned getFusedOpcode(const SelectionDAG &DAG,
235 const SDNode *N0, const SDNode *N1) const;
236 SDValue tryFoldToMad64_32(SDNode *N, DAGCombinerInfo &DCI) const;
237 SDValue foldAddSub64WithZeroLowBitsTo32(SDNode *N,
238 DAGCombinerInfo &DCI) const;
239
240 SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
241 SDValue performPtrAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
242 SDValue performAddCarrySubCarryCombine(SDNode *N, DAGCombinerInfo &DCI) const;
243 SDValue performSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
244 SDValue performFAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
245 SDValue performFSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
246 SDValue performFDivCombine(SDNode *N, DAGCombinerInfo &DCI) const;
247 SDValue performFMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
248 SDValue performFMACombine(SDNode *N, DAGCombinerInfo &DCI) const;
249 SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
250 SDValue performCvtF32UByteNCombine(SDNode *N, DAGCombinerInfo &DCI) const;
251 SDValue performClampCombine(SDNode *N, DAGCombinerInfo &DCI) const;
252 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
253
254 bool isLegalMUBUFAddressingMode(const AddrMode &AM) const;
255
256 unsigned isCFIntrinsic(const SDNode *Intr) const;
257
258public:
259 /// \returns True if fixup needs to be emitted for given global value \p GV,
260 /// false otherwise.
261 bool shouldEmitFixup(const GlobalValue *GV) const;
262
263 /// \returns True if GOT relocation needs to be emitted for given global value
264 /// \p GV, false otherwise.
265 bool shouldEmitGOTReloc(const GlobalValue *GV) const;
266
267 /// \returns True if PC-relative relocation needs to be emitted for given
268 /// global value \p GV, false otherwise.
269 bool shouldEmitPCReloc(const GlobalValue *GV) const;
270
271 /// \returns true if this should use a literal constant for an LDS address,
272 /// and not emit a relocation for an LDS global.
273 bool shouldUseLDSConstAddress(const GlobalValue *GV) const;
274
275 /// Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be
276 /// expanded into a set of cmp/select instructions.
277 static bool shouldExpandVectorDynExt(unsigned EltSize, unsigned NumElem,
278 bool IsDivergentIdx,
279 const GCNSubtarget *Subtarget);
280
281 bool shouldExpandVectorDynExt(SDNode *N) const;
282
283 bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override;
284
285 bool canTransformPtrArithOutOfBounds(const Function &F,
286 EVT PtrVT) const override;
287
288private:
289 /// Returns true if the first real instruction in MBB is 8 bytes and could
290 /// be split by a 32-byte fetch window boundary. Used on GFX950 to avoid
291 /// instruction fetch delays.
292 bool needsFetchWindowAlignment(const MachineBasicBlock &MBB) const;
293
294 // Analyze a combined offset from an amdgcn_s_buffer_load intrinsic and store
295 // the three offsets (voffset, soffset and instoffset) into the SDValue[3]
296 // array pointed to by Offsets.
297 void setBufferOffsets(SDValue CombinedOffset, SelectionDAG &DAG,
298 SDValue *Offsets, Align Alignment = Align(4)) const;
299
300 // Convert the i128 that an addrspace(8) pointer is natively represented as
301 // into the v4i32 that all the buffer intrinsics expect to receive. We can't
302 // add register classes for i128 on pain of the promotion logic going haywire,
303 // so this slightly ugly hack is what we've got. If passed a non-pointer
304 // argument (as would be seen in older buffer intrinsics), does nothing.
305 SDValue bufferRsrcPtrToVector(SDValue MaybePointer, SelectionDAG &DAG) const;
306
307 // Wrap a 64-bit pointer into a v4i32 (which is how all SelectionDAG code
308 // represents ptr addrspace(8)) using the flags specified in the intrinsic.
309 SDValue lowerPointerAsRsrcIntrin(SDNode *Op, SelectionDAG &DAG) const;
310
311 // Handle 8 bit and 16 bit buffer loads
312 SDValue handleByteShortBufferLoads(SelectionDAG &DAG, EVT LoadVT, SDLoc DL,
313 ArrayRef<SDValue> Ops,
314 MachineMemOperand *MMO,
315 bool IsTFE = false) const;
316
317 // Handle 8 bit and 16 bit buffer stores
318 SDValue handleByteShortBufferStores(SelectionDAG &DAG, EVT VDataType,
319 SDLoc DL, SDValue Ops[],
320 MemSDNode *M) const;
321
322public:
323 SITargetLowering(const TargetMachine &tm, const GCNSubtarget &STI);
324
325 const GCNSubtarget *getSubtarget() const;
326
327 ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
328
329 bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT,
330 EVT SrcVT) const override;
331
332 bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, LLT DestTy,
333 LLT SrcTy) const override;
334
335 bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const override;
336
337 // While address space 7 should never make it to codegen, it still needs to
338 // have a MVT to prevent some analyses that query this function from breaking.
339 // We use the custum MVT::amdgpuBufferFatPointer and
340 // amdgpu::amdgpuBufferStridedPointer for this, though we use v8i32 for the
341 // memory type (which is probably unused).
342 MVT getPointerTy(const DataLayout &DL, unsigned AS) const override;
343 MVT getPointerMemTy(const DataLayout &DL, unsigned AS) const override;
344
345 void getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &, const CallBase &,
346 MachineFunction &MF,
347 unsigned IntrinsicID) const override;
348
349 void CollectTargetIntrinsicOperands(const CallInst &I,
350 SmallVectorImpl<SDValue> &Ops,
351 SelectionDAG &DAG) const override;
352
353 bool getAddrModeArguments(const IntrinsicInst *I,
354 SmallVectorImpl<Value *> &Ops,
355 Type *&AccessTy) const override;
356
357 bool isLegalFlatAddressingMode(const AddrMode &AM, unsigned AddrSpace) const;
358 bool isLegalGlobalAddressingMode(const AddrMode &AM) const;
359 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
360 unsigned AS,
361 Instruction *I = nullptr) const override;
362
363 bool canMergeStoresTo(unsigned AS, EVT MemVT,
364 const MachineFunction &MF) const override;
365
366 bool allowsMisalignedMemoryAccessesImpl(
367 unsigned Size, unsigned AddrSpace, Align Alignment,
368 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
369 unsigned *IsFast = nullptr) const;
370
371 bool allowsMisalignedMemoryAccesses(
372 LLT Ty, unsigned AddrSpace, Align Alignment,
373 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
374 unsigned *IsFast = nullptr) const override {
375 if (IsFast)
376 *IsFast = 0;
377 return allowsMisalignedMemoryAccessesImpl(Size: Ty.getSizeInBits(), AddrSpace,
378 Alignment, Flags, IsFast);
379 }
380
381 bool allowsMisalignedMemoryAccesses(
382 EVT VT, unsigned AS, Align Alignment,
383 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
384 unsigned *IsFast = nullptr) const override;
385
386 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
387 const AttributeList &FuncAttributes) const override;
388
389 bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const;
390
391 static bool isNonGlobalAddrSpace(unsigned AS);
392
393 bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
394
395 TargetLoweringBase::LegalizeTypeAction
396 getPreferredVectorAction(MVT VT) const override;
397
398 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
399 Type *Ty) const override;
400
401 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
402 unsigned Index) const override;
403 bool isExtractVecEltCheap(EVT VT, unsigned Index) const override;
404
405 bool isTypeDesirableForOp(unsigned Op, EVT VT) const override;
406
407 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
408
409 unsigned combineRepeatedFPDivisors() const override {
410 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
411 // reciprocal.
412 return 2;
413 }
414
415 bool supportSplitCSR(MachineFunction *MF) const override;
416 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
417 void insertCopiesSplitCSR(
418 MachineBasicBlock *Entry,
419 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
420
421 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
422 bool isVarArg,
423 const SmallVectorImpl<ISD::InputArg> &Ins,
424 const SDLoc &DL, SelectionDAG &DAG,
425 SmallVectorImpl<SDValue> &InVals) const override;
426
427 bool CanLowerReturn(CallingConv::ID CallConv,
428 MachineFunction &MF, bool isVarArg,
429 const SmallVectorImpl<ISD::OutputArg> &Outs,
430 LLVMContext &Context, const Type *RetTy) const override;
431
432 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
433 const SmallVectorImpl<ISD::OutputArg> &Outs,
434 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
435 SelectionDAG &DAG) const override;
436
437 void passSpecialInputs(
438 CallLoweringInfo &CLI,
439 CCState &CCInfo,
440 const SIMachineFunctionInfo &Info,
441 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
442 SmallVectorImpl<SDValue> &MemOpChains,
443 SDValue Chain) const;
444
445 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
446 CallingConv::ID CallConv, bool isVarArg,
447 const SmallVectorImpl<ISD::InputArg> &Ins,
448 const SDLoc &DL, SelectionDAG &DAG,
449 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
450 SDValue ThisVal) const;
451
452 bool mayBeEmittedAsTailCall(const CallInst *) const override;
453
454 bool isEligibleForTailCallOptimization(
455 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
456 const SmallVectorImpl<ISD::OutputArg> &Outs,
457 const SmallVectorImpl<SDValue> &OutVals,
458 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
459
460 SDValue LowerCall(CallLoweringInfo &CLI,
461 SmallVectorImpl<SDValue> &InVals) const override;
462
463 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
465 SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
466 SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
467
468 SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
469 SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
470 SDValue lowerGET_FPENV(SDValue Op, SelectionDAG &DAG) const;
471 SDValue lowerSET_FPENV(SDValue Op, SelectionDAG &DAG) const;
472 SDValue lowerROTR(SDValue Op, SelectionDAG &DAG) const;
473
474 Register getRegisterByName(const char* RegName, LLT VT,
475 const MachineFunction &MF) const override;
476
477 MachineBasicBlock *splitKillBlock(MachineInstr &MI,
478 MachineBasicBlock *BB) const;
479
480 void bundleInstWithWaitcnt(MachineInstr &MI) const;
481 MachineBasicBlock *emitGWSMemViolTestLoop(MachineInstr &MI,
482 MachineBasicBlock *BB) const;
483
484 MachineBasicBlock *
485 EmitInstrWithCustomInserter(MachineInstr &MI,
486 MachineBasicBlock *BB) const override;
487
488 bool enableAggressiveFMAFusion(EVT VT) const override;
489 bool enableAggressiveFMAFusion(LLT Ty) const override;
490 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
491 EVT VT) const override;
492 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override;
493 LLT getPreferredShiftAmountTy(LLT Ty) const override;
494
495 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
496 EVT VT) const override;
497 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
498 const LLT Ty) const override;
499 bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const override;
500 bool isFMADLegal(const MachineInstr &MI, const LLT Ty) const override;
501
502 SDValue splitUnaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
503 SDValue splitBinaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
504 SDValue splitTernaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
505 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
506 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
507 SelectionDAG &DAG) const override;
508
509 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
510 SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
511 void AddMemOpInit(MachineInstr &MI) const;
512 void AdjustInstrPostInstrSelection(MachineInstr &MI,
513 SDNode *Node) const override;
514
515 SDNode *legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const;
516
517 MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL,
518 SDValue Ptr) const;
519 MachineSDNode *buildRSRC(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr,
520 uint32_t RsrcDword1, uint64_t RsrcDword2And3) const;
521 std::pair<unsigned, const TargetRegisterClass *>
522 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
523 StringRef Constraint, MVT VT) const override;
524 ConstraintType getConstraintType(StringRef Constraint) const override;
525 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
526 std::vector<SDValue> &Ops,
527 SelectionDAG &DAG) const override;
528 bool getAsmOperandConstVal(SDValue Op, uint64_t &Val) const;
529 bool checkAsmConstraintVal(SDValue Op, StringRef Constraint,
530 uint64_t Val) const;
531 bool checkAsmConstraintValA(SDValue Op,
532 uint64_t Val,
533 unsigned MaxSize = 64) const;
534 SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL,
535 SDValue V) const;
536
537 void finalizeLowering(MachineFunction &MF) const override;
538
539 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
540 const APInt &DemandedElts,
541 const SelectionDAG &DAG,
542 unsigned Depth = 0) const override;
543 void computeKnownBitsForFrameIndex(int FrameIdx,
544 KnownBits &Known,
545 const MachineFunction &MF) const override;
546 void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis, Register R,
547 KnownBits &Known,
548 const APInt &DemandedElts,
549 const MachineRegisterInfo &MRI,
550 unsigned Depth = 0) const override;
551
552 Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis,
553 Register R,
554 const MachineRegisterInfo &MRI,
555 unsigned Depth = 0) const override;
556 bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI,
557 UniformityInfo *UA) const override;
558
559 bool hasMemSDNodeUser(SDNode *N) const;
560
561 bool isReassocProfitable(SelectionDAG &DAG, SDValue N0,
562 SDValue N1) const override;
563
564 bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0,
565 Register N1) const override;
566
567 bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
568 SDNodeFlags UserFlags = {}, unsigned MaxDepth = 5) const;
569 bool isCanonicalized(Register Reg, const MachineFunction &MF,
570 unsigned MaxDepth = 5) const;
571 bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const;
572 bool denormalsEnabledForType(LLT Ty, const MachineFunction &MF) const;
573
574 bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts,
575 const SelectionDAG &DAG, bool SNaN = false,
576 unsigned Depth = 0) const override;
577 AtomicExpansionKind
578 shouldExpandAtomicRMWInIR(const AtomicRMWInst *) const override;
579 AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
580 AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
581 AtomicExpansionKind
582 shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override;
583
584 void emitExpandAtomicAddrSpacePredicate(Instruction *AI) const;
585 void emitExpandAtomicRMW(AtomicRMWInst *AI) const override;
586 void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const override;
587 void emitExpandAtomicLoad(LoadInst *LI) const override;
588 void emitExpandAtomicStore(StoreInst *SI) const override;
589
590 LoadInst *
591 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
592
593 const TargetRegisterClass *getRegClassFor(MVT VT,
594 bool isDivergent) const override;
595 bool requiresUniformRegister(MachineFunction &MF,
596 const Value *V) const override;
597 Align getPrefLoopAlignment(MachineLoop *ML) const override;
598 unsigned
599 getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const override;
600
601 void allocateHSAUserSGPRs(CCState &CCInfo,
602 MachineFunction &MF,
603 const SIRegisterInfo &TRI,
604 SIMachineFunctionInfo &Info) const;
605
606 void allocatePreloadKernArgSGPRs(CCState &CCInfo,
607 SmallVectorImpl<CCValAssign> &ArgLocs,
608 const SmallVectorImpl<ISD::InputArg> &Ins,
609 MachineFunction &MF,
610 const SIRegisterInfo &TRI,
611 SIMachineFunctionInfo &Info) const;
612
613 void allocateLDSKernelId(CCState &CCInfo, MachineFunction &MF,
614 const SIRegisterInfo &TRI,
615 SIMachineFunctionInfo &Info) const;
616
617 void allocateSystemSGPRs(CCState &CCInfo,
618 MachineFunction &MF,
619 SIMachineFunctionInfo &Info,
620 CallingConv::ID CallConv,
621 bool IsShader) const;
622
623 void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
624 MachineFunction &MF,
625 const SIRegisterInfo &TRI,
626 SIMachineFunctionInfo &Info) const;
627 void allocateSpecialInputSGPRs(
628 CCState &CCInfo,
629 MachineFunction &MF,
630 const SIRegisterInfo &TRI,
631 SIMachineFunctionInfo &Info) const;
632
633 void allocateSpecialInputVGPRs(CCState &CCInfo,
634 MachineFunction &MF,
635 const SIRegisterInfo &TRI,
636 SIMachineFunctionInfo &Info) const;
637 void allocateSpecialInputVGPRsFixed(CCState &CCInfo,
638 MachineFunction &MF,
639 const SIRegisterInfo &TRI,
640 SIMachineFunctionInfo &Info) const;
641
642 MachineMemOperand::Flags
643 getTargetMMOFlags(const Instruction &I) const override;
644};
645
646// Returns true if argument is a boolean value which is not serialized into
647// memory or argument and does not require v_cndmask_b32 to be deserialized.
648bool isBoolSGPR(SDValue V);
649
650} // End namespace llvm
651
652#endif
653