| 1 | //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the interfaces that PPC uses to lower LLVM code into a |
| 10 | // selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H |
| 15 | #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H |
| 16 | |
| 17 | #include "PPCInstrInfo.h" |
| 18 | #include "llvm/CodeGen/CallingConvLower.h" |
| 19 | #include "llvm/CodeGen/MachineFunction.h" |
| 20 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 21 | #include "llvm/CodeGen/SelectionDAG.h" |
| 22 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 23 | #include "llvm/CodeGen/TargetLowering.h" |
| 24 | #include "llvm/CodeGen/ValueTypes.h" |
| 25 | #include "llvm/CodeGenTypes/MachineValueType.h" |
| 26 | #include "llvm/IR/Attributes.h" |
| 27 | #include "llvm/IR/CallingConv.h" |
| 28 | #include "llvm/IR/Function.h" |
| 29 | #include "llvm/IR/InlineAsm.h" |
| 30 | #include "llvm/IR/Metadata.h" |
| 31 | #include "llvm/IR/Type.h" |
| 32 | #include <optional> |
| 33 | #include <utility> |
| 34 | |
| 35 | namespace llvm { |
| 36 | |
| 37 | /// Define some predicates that are used for node matching. |
| 38 | namespace PPC { |
| 39 | |
| 40 | /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a |
| 41 | /// VPKUHUM instruction. |
| 42 | bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
| 43 | SelectionDAG &DAG); |
| 44 | |
| 45 | /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a |
| 46 | /// VPKUWUM instruction. |
| 47 | bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
| 48 | SelectionDAG &DAG); |
| 49 | |
| 50 | /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a |
| 51 | /// VPKUDUM instruction. |
| 52 | bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
| 53 | SelectionDAG &DAG); |
| 54 | |
| 55 | /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for |
| 56 | /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). |
| 57 | bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
| 58 | unsigned ShuffleKind, SelectionDAG &DAG); |
| 59 | |
| 60 | /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for |
| 61 | /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). |
| 62 | bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
| 63 | unsigned ShuffleKind, SelectionDAG &DAG); |
| 64 | |
| 65 | /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for |
| 66 | /// a VMRGEW or VMRGOW instruction |
| 67 | bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, |
| 68 | unsigned ShuffleKind, SelectionDAG &DAG); |
| 69 | /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable |
| 70 | /// for a XXSLDWI instruction. |
| 71 | bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
| 72 | bool &Swap, bool IsLE); |
| 73 | |
| 74 | /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable |
| 75 | /// for a XXBRH instruction. |
| 76 | bool isXXBRHShuffleMask(ShuffleVectorSDNode *N); |
| 77 | |
| 78 | /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable |
| 79 | /// for a XXBRW instruction. |
| 80 | bool isXXBRWShuffleMask(ShuffleVectorSDNode *N); |
| 81 | |
| 82 | /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable |
| 83 | /// for a XXBRD instruction. |
| 84 | bool isXXBRDShuffleMask(ShuffleVectorSDNode *N); |
| 85 | |
| 86 | /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable |
| 87 | /// for a XXBRQ instruction. |
| 88 | bool isXXBRQShuffleMask(ShuffleVectorSDNode *N); |
| 89 | |
| 90 | /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable |
| 91 | /// for a XXPERMDI instruction. |
| 92 | bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
| 93 | bool &Swap, bool IsLE); |
| 94 | |
| 95 | /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the |
| 96 | /// shift amount, otherwise return -1. |
| 97 | int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, |
| 98 | SelectionDAG &DAG); |
| 99 | |
| 100 | /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand |
| 101 | /// specifies a splat of a single element that is suitable for input to |
| 102 | /// VSPLTB/VSPLTH/VSPLTW. |
| 103 | bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); |
| 104 | |
| 105 | /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by |
| 106 | /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any |
| 107 | /// shuffle of v4f32/v4i32 vectors that just inserts one element from one |
| 108 | /// vector into the other. This function will also set a couple of |
| 109 | /// output parameters for how much the source vector needs to be shifted and |
| 110 | /// what byte number needs to be specified for the instruction to put the |
| 111 | /// element in the desired location of the target vector. |
| 112 | bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
| 113 | unsigned &InsertAtByte, bool &Swap, bool IsLE); |
| 114 | |
| 115 | /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is |
| 116 | /// appropriate for PPC mnemonics (which have a big endian bias - namely |
| 117 | /// elements are counted from the left of the vector register). |
| 118 | unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, |
| 119 | SelectionDAG &DAG); |
| 120 | |
| 121 | /// get_VSPLTI_elt - If this is a build_vector of constants which can be |
| 122 | /// formed by using a vspltis[bhw] instruction of the specified element |
| 123 | /// size, return the constant being splatted. The ByteSize field indicates |
| 124 | /// the number of bytes of each element [124] -> [bhw]. |
| 125 | SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); |
| 126 | |
| 127 | // Flags for computing the optimal addressing mode for loads and stores. |
| 128 | enum MemOpFlags { |
| 129 | MOF_None = 0, |
| 130 | |
| 131 | // Extension mode for integer loads. |
| 132 | MOF_SExt = 1, |
| 133 | MOF_ZExt = 1 << 1, |
| 134 | MOF_NoExt = 1 << 2, |
| 135 | |
| 136 | // Address computation flags. |
| 137 | MOF_NotAddNorCst = 1 << 5, // Not const. or sum of ptr and scalar. |
| 138 | MOF_RPlusSImm16 = 1 << 6, // Reg plus signed 16-bit constant. |
| 139 | MOF_RPlusLo = 1 << 7, // Reg plus signed 16-bit relocation |
| 140 | MOF_RPlusSImm16Mult4 = 1 << 8, // Reg plus 16-bit signed multiple of 4. |
| 141 | MOF_RPlusSImm16Mult16 = 1 << 9, // Reg plus 16-bit signed multiple of 16. |
| 142 | MOF_RPlusSImm34 = 1 << 10, // Reg plus 34-bit signed constant. |
| 143 | MOF_RPlusR = 1 << 11, // Sum of two variables. |
| 144 | MOF_PCRel = 1 << 12, // PC-Relative relocation. |
| 145 | MOF_AddrIsSImm32 = 1 << 13, // A simple 32-bit constant. |
| 146 | |
| 147 | // The in-memory type. |
| 148 | MOF_SubWordInt = 1 << 15, |
| 149 | MOF_WordInt = 1 << 16, |
| 150 | MOF_DoubleWordInt = 1 << 17, |
| 151 | MOF_ScalarFloat = 1 << 18, // Scalar single or double precision. |
| 152 | MOF_Vector = 1 << 19, // Vector types and quad precision scalars. |
| 153 | MOF_Vector256 = 1 << 20, |
| 154 | |
| 155 | // Subtarget features. |
| 156 | MOF_SubtargetBeforeP9 = 1 << 22, |
| 157 | MOF_SubtargetP9 = 1 << 23, |
| 158 | MOF_SubtargetP10 = 1 << 24, |
| 159 | MOF_SubtargetSPE = 1 << 25 |
| 160 | }; |
| 161 | |
| 162 | // The addressing modes for loads and stores. |
| 163 | enum AddrMode { |
| 164 | AM_None, |
| 165 | AM_DForm, |
| 166 | AM_DSForm, |
| 167 | AM_DQForm, |
| 168 | AM_PrefixDForm, |
| 169 | AM_XForm, |
| 170 | AM_PCRel |
| 171 | }; |
| 172 | } // end namespace PPC |
| 173 | |
| 174 | class PPCTargetLowering : public TargetLowering { |
| 175 | const PPCSubtarget &Subtarget; |
| 176 | |
| 177 | public: |
| 178 | explicit PPCTargetLowering(const PPCTargetMachine &TM, |
| 179 | const PPCSubtarget &STI); |
| 180 | |
| 181 | bool isSelectSupported(SelectSupportKind Kind) const override { |
| 182 | // PowerPC does not support scalar condition selects on vectors. |
| 183 | return (Kind != SelectSupportKind::ScalarCondVectorVal); |
| 184 | } |
| 185 | |
| 186 | /// getPreferredVectorAction - The code we generate when vector types are |
| 187 | /// legalized by promoting the integer element type is often much worse |
| 188 | /// than code we generate if we widen the type for applicable vector types. |
| 189 | /// The issue with promoting is that the vector is scalaraized, individual |
| 190 | /// elements promoted and then the vector is rebuilt. So say we load a pair |
| 191 | /// of v4i8's and shuffle them. This will turn into a mess of 8 extending |
| 192 | /// loads, moves back into VSR's (or memory ops if we don't have moves) and |
| 193 | /// then the VPERM for the shuffle. All in all a very slow sequence. |
| 194 | TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) |
| 195 | const override { |
| 196 | // Default handling for scalable and single-element vectors. |
| 197 | if (VT.isScalableVector() || VT.getVectorNumElements() == 1) |
| 198 | return TargetLoweringBase::getPreferredVectorAction(VT); |
| 199 | |
| 200 | // Split and promote vNi1 vectors so we don't produce v256i1/v512i1 |
| 201 | // types as those are only for MMA instructions. |
| 202 | if (VT.getScalarSizeInBits() == 1 && VT.getSizeInBits() > 16) |
| 203 | return TypeSplitVector; |
| 204 | if (VT.getScalarSizeInBits() == 1) |
| 205 | return TypePromoteInteger; |
| 206 | |
| 207 | // Widen vectors that have reasonably sized elements. |
| 208 | if (VT.getScalarSizeInBits() % 8 == 0) |
| 209 | return TypeWidenVector; |
| 210 | return TargetLoweringBase::getPreferredVectorAction(VT); |
| 211 | } |
| 212 | |
| 213 | bool useSoftFloat() const override; |
| 214 | |
| 215 | bool hasSPE() const; |
| 216 | |
| 217 | MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { |
| 218 | return MVT::i32; |
| 219 | } |
| 220 | |
| 221 | bool isCheapToSpeculateCttz(Type *Ty) const override { |
| 222 | return true; |
| 223 | } |
| 224 | |
| 225 | bool isCheapToSpeculateCtlz(Type *Ty) const override { |
| 226 | return true; |
| 227 | } |
| 228 | |
| 229 | bool |
| 230 | (Type *VectorTy, |
| 231 | unsigned ElemSizeInBits, |
| 232 | unsigned &Index) const override; |
| 233 | |
| 234 | bool isCtlzFast() const override { |
| 235 | return true; |
| 236 | } |
| 237 | |
| 238 | bool isEqualityCmpFoldedWithSignedCmp() const override { |
| 239 | return false; |
| 240 | } |
| 241 | |
| 242 | bool hasAndNotCompare(SDValue) const override { |
| 243 | return true; |
| 244 | } |
| 245 | |
| 246 | bool preferIncOfAddToSubOfNot(EVT VT) const override; |
| 247 | |
| 248 | bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { |
| 249 | return VT.isScalarInteger(); |
| 250 | } |
| 251 | |
| 252 | SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, |
| 253 | bool OptForSize, NegatibleCost &Cost, |
| 254 | unsigned Depth = 0) const override; |
| 255 | |
| 256 | /// getSetCCResultType - Return the ISD::SETCC ValueType |
| 257 | EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, |
| 258 | EVT VT) const override; |
| 259 | |
| 260 | /// Return true if target always benefits from combining into FMA for a |
| 261 | /// given value type. This must typically return false on targets where FMA |
| 262 | /// takes more cycles to execute than FADD. |
| 263 | bool enableAggressiveFMAFusion(EVT VT) const override; |
| 264 | |
| 265 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
| 266 | /// offset pointer and addressing mode by reference if the node's address |
| 267 | /// can be legally represented as pre-indexed load / store address. |
| 268 | bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
| 269 | SDValue &Offset, |
| 270 | ISD::MemIndexedMode &AM, |
| 271 | SelectionDAG &DAG) const override; |
| 272 | |
| 273 | /// SelectAddressEVXRegReg - Given the specified addressed, check to see if |
| 274 | /// it can be more efficiently represented as [r+imm]. |
| 275 | bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, |
| 276 | SelectionDAG &DAG) const; |
| 277 | |
| 278 | /// SelectAddressRegReg - Given the specified addressed, check to see if it |
| 279 | /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment |
| 280 | /// is non-zero, only accept displacement which is not suitable for [r+imm]. |
| 281 | /// Returns false if it can be represented by [r+imm], which are preferred. |
| 282 | bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, |
| 283 | SelectionDAG &DAG, |
| 284 | MaybeAlign EncodingAlignment = std::nullopt) const; |
| 285 | |
| 286 | /// SelectAddressRegImm - Returns true if the address N can be represented |
| 287 | /// by a base register plus a signed 16-bit displacement [r+imm], and if it |
| 288 | /// is not better represented as reg+reg. If \p EncodingAlignment is |
| 289 | /// non-zero, only accept displacements suitable for instruction encoding |
| 290 | /// requirement, i.e. multiples of 4 for DS form. |
| 291 | bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, |
| 292 | SelectionDAG &DAG, |
| 293 | MaybeAlign EncodingAlignment) const; |
| 294 | bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, |
| 295 | SelectionDAG &DAG) const; |
| 296 | |
| 297 | /// SelectAddressRegRegOnly - Given the specified addressed, force it to be |
| 298 | /// represented as an indexed [r+r] operation. |
| 299 | bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, |
| 300 | SelectionDAG &DAG) const; |
| 301 | |
| 302 | /// SelectAddressPCRel - Represent the specified address as pc relative to |
| 303 | /// be represented as [pc+imm] |
| 304 | bool SelectAddressPCRel(SDValue N, SDValue &Base) const; |
| 305 | |
| 306 | Sched::Preference getSchedulingPreference(SDNode *N) const override; |
| 307 | |
| 308 | /// LowerOperation - Provide custom lowering hooks for some operations. |
| 309 | /// |
| 310 | SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; |
| 311 | |
| 312 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 313 | /// type with new values built out of custom code. |
| 314 | /// |
| 315 | void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, |
| 316 | SelectionDAG &DAG) const override; |
| 317 | |
| 318 | SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 319 | SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 320 | |
| 321 | SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; |
| 322 | |
| 323 | SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, |
| 324 | SmallVectorImpl<SDNode *> &Created) const override; |
| 325 | |
| 326 | Register getRegisterByName(const char* RegName, LLT VT, |
| 327 | const MachineFunction &MF) const override; |
| 328 | |
| 329 | void computeKnownBitsForTargetNode(const SDValue Op, |
| 330 | KnownBits &Known, |
| 331 | const APInt &DemandedElts, |
| 332 | const SelectionDAG &DAG, |
| 333 | unsigned Depth = 0) const override; |
| 334 | |
| 335 | Align getPrefLoopAlignment(MachineLoop *ML) const override; |
| 336 | |
| 337 | bool shouldInsertFencesForAtomic(const Instruction *I) const override { |
| 338 | return true; |
| 339 | } |
| 340 | |
| 341 | Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, |
| 342 | AtomicOrdering Ord) const override; |
| 343 | |
| 344 | Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, |
| 345 | AtomicOrdering Ord) const override; |
| 346 | |
| 347 | Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, |
| 348 | AtomicOrdering Ord) const override; |
| 349 | Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, |
| 350 | AtomicOrdering Ord) const override; |
| 351 | |
| 352 | bool shouldInlineQuadwordAtomics() const; |
| 353 | |
| 354 | TargetLowering::AtomicExpansionKind |
| 355 | shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override; |
| 356 | |
| 357 | TargetLowering::AtomicExpansionKind |
| 358 | shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override; |
| 359 | |
| 360 | Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, |
| 361 | AtomicRMWInst *AI, Value *AlignedAddr, |
| 362 | Value *Incr, Value *Mask, |
| 363 | Value *ShiftAmt, |
| 364 | AtomicOrdering Ord) const override; |
| 365 | Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, |
| 366 | AtomicCmpXchgInst *CI, |
| 367 | Value *AlignedAddr, Value *CmpVal, |
| 368 | Value *NewVal, Value *Mask, |
| 369 | AtomicOrdering Ord) const override; |
| 370 | |
| 371 | MachineBasicBlock * |
| 372 | EmitInstrWithCustomInserter(MachineInstr &MI, |
| 373 | MachineBasicBlock *MBB) const override; |
| 374 | MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI, |
| 375 | MachineBasicBlock *MBB, |
| 376 | unsigned AtomicSize, |
| 377 | unsigned BinOpcode, |
| 378 | unsigned CmpOpcode = 0, |
| 379 | unsigned CmpPred = 0) const; |
| 380 | MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI, |
| 381 | MachineBasicBlock *MBB, |
| 382 | bool is8bit, |
| 383 | unsigned Opcode, |
| 384 | unsigned CmpOpcode = 0, |
| 385 | unsigned CmpPred = 0) const; |
| 386 | |
| 387 | MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI, |
| 388 | MachineBasicBlock *MBB) const; |
| 389 | |
| 390 | MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI, |
| 391 | MachineBasicBlock *MBB) const; |
| 392 | |
| 393 | MachineBasicBlock *emitProbedAlloca(MachineInstr &MI, |
| 394 | MachineBasicBlock *MBB) const; |
| 395 | |
| 396 | bool hasInlineStackProbe(const MachineFunction &MF) const override; |
| 397 | |
| 398 | unsigned getStackProbeSize(const MachineFunction &MF) const; |
| 399 | |
| 400 | ConstraintType getConstraintType(StringRef Constraint) const override; |
| 401 | |
| 402 | /// Examine constraint string and operand type and determine a weight value. |
| 403 | /// The operand object must already have been set up with the operand type. |
| 404 | ConstraintWeight getSingleConstraintMatchWeight( |
| 405 | AsmOperandInfo &info, const char *constraint) const override; |
| 406 | |
| 407 | std::pair<unsigned, const TargetRegisterClass *> |
| 408 | getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
| 409 | StringRef Constraint, MVT VT) const override; |
| 410 | |
| 411 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
| 412 | /// function arguments in the caller parameter area. |
| 413 | Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override; |
| 414 | |
| 415 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
| 416 | /// vector. If it is invalid, don't add anything to Ops. |
| 417 | void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, |
| 418 | std::vector<SDValue> &Ops, |
| 419 | SelectionDAG &DAG) const override; |
| 420 | |
| 421 | InlineAsm::ConstraintCode |
| 422 | getInlineAsmMemConstraint(StringRef ConstraintCode) const override { |
| 423 | if (ConstraintCode == "es" ) |
| 424 | return InlineAsm::ConstraintCode::es; |
| 425 | else if (ConstraintCode == "Q" ) |
| 426 | return InlineAsm::ConstraintCode::Q; |
| 427 | else if (ConstraintCode == "Z" ) |
| 428 | return InlineAsm::ConstraintCode::Z; |
| 429 | else if (ConstraintCode == "Zy" ) |
| 430 | return InlineAsm::ConstraintCode::Zy; |
| 431 | return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); |
| 432 | } |
| 433 | |
| 434 | void CollectTargetIntrinsicOperands(const CallInst &I, |
| 435 | SmallVectorImpl<SDValue> &Ops, |
| 436 | SelectionDAG &DAG) const override; |
| 437 | |
| 438 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 439 | /// by AM is legal for this target, for a load/store of the specified type. |
| 440 | bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, |
| 441 | Type *Ty, unsigned AS, |
| 442 | Instruction *I = nullptr) const override; |
| 443 | |
| 444 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 445 | /// icmp immediate, that is the target has icmp instructions which can |
| 446 | /// compare a register against the immediate without having to materialize |
| 447 | /// the immediate into a register. |
| 448 | bool isLegalICmpImmediate(int64_t Imm) const override; |
| 449 | |
| 450 | /// isLegalAddImmediate - Return true if the specified immediate is legal |
| 451 | /// add immediate, that is the target has add instructions which can |
| 452 | /// add a register and the immediate without having to materialize |
| 453 | /// the immediate into a register. |
| 454 | bool isLegalAddImmediate(int64_t Imm) const override; |
| 455 | |
| 456 | /// isTruncateFree - Return true if it's free to truncate a value of |
| 457 | /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in |
| 458 | /// register X1 to i32 by referencing its sub-register R1. |
| 459 | bool isTruncateFree(Type *Ty1, Type *Ty2) const override; |
| 460 | bool isTruncateFree(EVT VT1, EVT VT2) const override; |
| 461 | |
| 462 | bool isZExtFree(SDValue Val, EVT VT2) const override; |
| 463 | |
| 464 | bool isFPExtFree(EVT DestVT, EVT SrcVT) const override; |
| 465 | |
| 466 | /// Returns true if it is beneficial to convert a load of a constant |
| 467 | /// to just the constant itself. |
| 468 | bool shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 469 | Type *Ty) const override; |
| 470 | |
| 471 | bool convertSelectOfConstantsToMath(EVT VT) const override { |
| 472 | return true; |
| 473 | } |
| 474 | |
| 475 | bool decomposeMulByConstant(LLVMContext &Context, EVT VT, |
| 476 | SDValue C) const override; |
| 477 | |
| 478 | bool isDesirableToTransformToIntegerOp(unsigned Opc, |
| 479 | EVT VT) const override { |
| 480 | // Only handle float load/store pair because float(fpr) load/store |
| 481 | // instruction has more cycles than integer(gpr) load/store in PPC. |
| 482 | if (Opc != ISD::LOAD && Opc != ISD::STORE) |
| 483 | return false; |
| 484 | if (VT != MVT::f32 && VT != MVT::f64) |
| 485 | return false; |
| 486 | |
| 487 | return true; |
| 488 | } |
| 489 | |
| 490 | // Returns true if the address of the global is stored in TOC entry. |
| 491 | bool isAccessedAsGotIndirect(SDValue N) const; |
| 492 | |
| 493 | bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; |
| 494 | |
| 495 | void getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos, |
| 496 | const CallBase &I, MachineFunction &MF, |
| 497 | unsigned Intrinsic) const override; |
| 498 | |
| 499 | /// It returns EVT::Other if the type should be determined using generic |
| 500 | /// target-independent logic. |
| 501 | EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, |
| 502 | const AttributeList &FuncAttributes) const override; |
| 503 | |
| 504 | /// Is unaligned memory access allowed for the given type, and is it fast |
| 505 | /// relative to software emulation. |
| 506 | bool allowsMisalignedMemoryAccesses( |
| 507 | EVT VT, unsigned AddrSpace, Align Alignment = Align(1), |
| 508 | MachineMemOperand::Flags Flags = MachineMemOperand::MONone, |
| 509 | unsigned *Fast = nullptr) const override; |
| 510 | |
| 511 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster |
| 512 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be |
| 513 | /// expanded to FMAs when this method returns true, otherwise fmuladd is |
| 514 | /// expanded to fmul + fadd. |
| 515 | bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
| 516 | EVT VT) const override; |
| 517 | |
| 518 | bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override; |
| 519 | |
| 520 | /// isProfitableToHoist - Check if it is profitable to hoist instruction |
| 521 | /// \p I to its dominator block. |
| 522 | /// For example, it is not profitable if \p I and it's only user can form a |
| 523 | /// FMA instruction, because Powerpc prefers FMADD. |
| 524 | bool isProfitableToHoist(Instruction *I) const override; |
| 525 | |
| 526 | const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; |
| 527 | |
| 528 | // Should we expand the build vector with shuffles? |
| 529 | bool |
| 530 | shouldExpandBuildVectorWithShuffles(EVT VT, |
| 531 | unsigned DefinedValues) const override; |
| 532 | |
| 533 | // Keep the zero-extensions for arguments to libcalls. |
| 534 | bool shouldKeepZExtForFP16Conv() const override { return true; } |
| 535 | |
| 536 | /// createFastISel - This method returns a target-specific FastISel object, |
| 537 | /// or null if the target does not support "fast" instruction selection. |
| 538 | FastISel * |
| 539 | createFastISel(FunctionLoweringInfo &FuncInfo, |
| 540 | const TargetLibraryInfo *LibInfo, |
| 541 | const LibcallLoweringInfo *LibcallLowering) const override; |
| 542 | |
| 543 | /// Returns true if an argument of type Ty needs to be passed in a |
| 544 | /// contiguous block of registers in calling convention CallConv. |
| 545 | bool functionArgumentNeedsConsecutiveRegisters( |
| 546 | Type *Ty, CallingConv::ID CallConv, bool isVarArg, |
| 547 | const DataLayout &DL) const override { |
| 548 | // We support any array type as "consecutive" block in the parameter |
| 549 | // save area. The element type defines the alignment requirement and |
| 550 | // whether the argument should go in GPRs, FPRs, or VRs if available. |
| 551 | // |
| 552 | // Note that clang uses this capability both to implement the ELFv2 |
| 553 | // homogeneous float/vector aggregate ABI, and to avoid having to use |
| 554 | // "byval" when passing aggregates that might fully fit in registers. |
| 555 | return Ty->isArrayTy(); |
| 556 | } |
| 557 | |
| 558 | /// If a physical register, this returns the register that receives the |
| 559 | /// exception address on entry to an EH pad. |
| 560 | Register |
| 561 | getExceptionPointerRegister(const Constant *PersonalityFn) const override; |
| 562 | |
| 563 | /// If a physical register, this returns the register that receives the |
| 564 | /// exception typeid on entry to a landing pad. |
| 565 | Register |
| 566 | getExceptionSelectorRegister(const Constant *PersonalityFn) const override; |
| 567 | |
| 568 | /// Override to support customized stack guard loading. |
| 569 | bool useLoadStackGuardNode(const Module &M) const override; |
| 570 | |
| 571 | bool isFPImmLegal(const APFloat &Imm, EVT VT, |
| 572 | bool ForCodeSize) const override; |
| 573 | |
| 574 | unsigned getJumpTableEncoding() const override; |
| 575 | bool isJumpTableRelative() const override; |
| 576 | SDValue getPICJumpTableRelocBase(SDValue Table, |
| 577 | SelectionDAG &DAG) const override; |
| 578 | const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF, |
| 579 | unsigned JTI, |
| 580 | MCContext &Ctx) const override; |
| 581 | |
| 582 | /// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), |
| 583 | /// compute the address flags of the node, get the optimal address mode |
| 584 | /// based on the flags, and set the Base and Disp based on the address mode. |
| 585 | PPC::AddrMode SelectOptimalAddrMode(const SDNode *Parent, SDValue N, |
| 586 | SDValue &Disp, SDValue &Base, |
| 587 | SelectionDAG &DAG, |
| 588 | MaybeAlign Align) const; |
| 589 | /// SelectForceXFormMode - Given the specified address, force it to be |
| 590 | /// represented as an indexed [r+r] operation (an XForm instruction). |
| 591 | PPC::AddrMode SelectForceXFormMode(SDValue N, SDValue &Disp, SDValue &Base, |
| 592 | SelectionDAG &DAG) const; |
| 593 | |
| 594 | bool splitValueIntoRegisterParts( |
| 595 | SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
| 596 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) |
| 597 | const override; |
| 598 | /// Structure that collects some common arguments that get passed around |
| 599 | /// between the functions for call lowering. |
| 600 | struct CallFlags { |
| 601 | const CallingConv::ID CallConv; |
| 602 | const bool IsTailCall : 1; |
| 603 | const bool IsVarArg : 1; |
| 604 | const bool IsPatchPoint : 1; |
| 605 | const bool IsIndirect : 1; |
| 606 | const bool HasNest : 1; |
| 607 | const bool NoMerge : 1; |
| 608 | |
| 609 | CallFlags(CallingConv::ID CC, bool IsTailCall, bool IsVarArg, |
| 610 | bool IsPatchPoint, bool IsIndirect, bool HasNest, bool NoMerge) |
| 611 | : CallConv(CC), IsTailCall(IsTailCall), IsVarArg(IsVarArg), |
| 612 | IsPatchPoint(IsPatchPoint), IsIndirect(IsIndirect), |
| 613 | HasNest(HasNest), NoMerge(NoMerge) {} |
| 614 | }; |
| 615 | |
| 616 | CCAssignFn *ccAssignFnForCall(CallingConv::ID CC, bool Return, |
| 617 | bool IsVarArg) const; |
| 618 | bool supportsTailCallFor(const CallBase *CB) const; |
| 619 | |
| 620 | bool hasMultipleConditionRegisters(EVT VT) const override; |
| 621 | |
| 622 | private: |
| 623 | struct ReuseLoadInfo { |
| 624 | SDValue Ptr; |
| 625 | SDValue Chain; |
| 626 | SDValue ResChain; |
| 627 | MachinePointerInfo MPI; |
| 628 | bool IsDereferenceable = false; |
| 629 | bool IsInvariant = false; |
| 630 | Align Alignment; |
| 631 | AAMDNodes AAInfo; |
| 632 | const MDNode *Ranges = nullptr; |
| 633 | |
| 634 | ReuseLoadInfo() = default; |
| 635 | |
| 636 | MachineMemOperand::Flags MMOFlags() const { |
| 637 | MachineMemOperand::Flags F = MachineMemOperand::MONone; |
| 638 | if (IsDereferenceable) |
| 639 | F |= MachineMemOperand::MODereferenceable; |
| 640 | if (IsInvariant) |
| 641 | F |= MachineMemOperand::MOInvariant; |
| 642 | return F; |
| 643 | } |
| 644 | }; |
| 645 | |
| 646 | // Map that relates a set of common address flags to PPC addressing modes. |
| 647 | std::map<PPC::AddrMode, SmallVector<unsigned, 16>> AddrModesMap; |
| 648 | void initializeAddrModeMap(); |
| 649 | |
| 650 | bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI, |
| 651 | SelectionDAG &DAG, |
| 652 | ISD::LoadExtType ET = ISD::NON_EXTLOAD) const; |
| 653 | |
| 654 | void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, |
| 655 | SelectionDAG &DAG, const SDLoc &dl) const; |
| 656 | SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG, |
| 657 | const SDLoc &dl) const; |
| 658 | |
| 659 | bool directMoveIsProfitable(const SDValue &Op) const; |
| 660 | SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG, |
| 661 | const SDLoc &dl) const; |
| 662 | |
| 663 | SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, |
| 664 | const SDLoc &dl) const; |
| 665 | |
| 666 | SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const; |
| 667 | |
| 668 | SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; |
| 669 | SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; |
| 670 | |
| 671 | bool IsEligibleForTailCallOptimization( |
| 672 | const GlobalValue *CalleeGV, CallingConv::ID CalleeCC, |
| 673 | CallingConv::ID CallerCC, bool isVarArg, |
| 674 | const SmallVectorImpl<ISD::InputArg> &Ins) const; |
| 675 | |
| 676 | bool IsEligibleForTailCallOptimization_64SVR4( |
| 677 | const GlobalValue *CalleeGV, CallingConv::ID CalleeCC, |
| 678 | CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg, |
| 679 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 680 | const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc, |
| 681 | bool isCalleeExternalSymbol) const; |
| 682 | |
| 683 | bool isEligibleForTCO(const GlobalValue *CalleeGV, CallingConv::ID CalleeCC, |
| 684 | CallingConv::ID CallerCC, const CallBase *CB, |
| 685 | bool isVarArg, |
| 686 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 687 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 688 | const Function *CallerFunc, |
| 689 | bool isCalleeExternalSymbol) const; |
| 690 | |
| 691 | SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff, |
| 692 | SDValue Chain, SDValue &LROpOut, |
| 693 | SDValue &FPOpOut, |
| 694 | const SDLoc &dl) const; |
| 695 | |
| 696 | SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, SDValue GA) const; |
| 697 | |
| 698 | SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; |
| 699 | SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; |
| 700 | SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; |
| 701 | SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; |
| 702 | SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; |
| 703 | SDValue LowerGlobalTLSAddressAIX(SDValue Op, SelectionDAG &DAG) const; |
| 704 | SDValue LowerGlobalTLSAddressLinux(SDValue Op, SelectionDAG &DAG) const; |
| 705 | SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; |
| 706 | SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; |
| 707 | SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
| 708 | SDValue (SDValue Op, SelectionDAG &DAG) const; |
| 709 | SDValue LowerSADDO(SDValue Op, SelectionDAG &DAG) const; |
| 710 | SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; |
| 711 | SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; |
| 712 | SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const; |
| 713 | SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
| 714 | SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; |
| 715 | SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; |
| 716 | SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; |
| 717 | SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const; |
| 718 | SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; |
| 719 | SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const; |
| 720 | SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; |
| 721 | SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; |
| 722 | SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; |
| 723 | SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; |
| 724 | SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, |
| 725 | const SDLoc &dl) const; |
| 726 | SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; |
| 727 | SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; |
| 728 | SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; |
| 729 | SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; |
| 730 | SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; |
| 731 | SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const; |
| 732 | SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) const; |
| 733 | SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
| 734 | SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; |
| 735 | SDValue LowerVPERM(SDValue Op, SelectionDAG &DAG, ArrayRef<int> PermMask, |
| 736 | EVT VT, SDValue V1, SDValue V2) const; |
| 737 | SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
| 738 | SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
| 739 | SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; |
| 740 | SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const; |
| 741 | SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; |
| 742 | SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; |
| 743 | SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const; |
| 744 | SDValue LowerADDSUBO(SDValue Op, SelectionDAG &DAG) const; |
| 745 | SDValue LowerUCMP(SDValue Op, SelectionDAG &DAG) const; |
| 746 | SDValue lowerToLibCall(const char *LibCallName, SDValue Op, |
| 747 | SelectionDAG &DAG) const; |
| 748 | SDValue lowerLibCallBasedOnType(const char *LibCallFloatName, |
| 749 | const char *LibCallDoubleName, SDValue Op, |
| 750 | SelectionDAG &DAG) const; |
| 751 | bool isLowringToMASSFiniteSafe(SDValue Op) const; |
| 752 | bool isLowringToMASSSafe(SDValue Op) const; |
| 753 | bool isScalarMASSConversionEnabled() const; |
| 754 | SDValue lowerLibCallBase(const char *LibCallDoubleName, |
| 755 | const char *LibCallFloatName, |
| 756 | const char *LibCallDoubleNameFinite, |
| 757 | const char *LibCallFloatNameFinite, SDValue Op, |
| 758 | SelectionDAG &DAG) const; |
| 759 | SDValue lowerPow(SDValue Op, SelectionDAG &DAG) const; |
| 760 | SDValue lowerSin(SDValue Op, SelectionDAG &DAG) const; |
| 761 | SDValue lowerCos(SDValue Op, SelectionDAG &DAG) const; |
| 762 | SDValue lowerLog(SDValue Op, SelectionDAG &DAG) const; |
| 763 | SDValue lowerLog10(SDValue Op, SelectionDAG &DAG) const; |
| 764 | SDValue lowerExp(SDValue Op, SelectionDAG &DAG) const; |
| 765 | SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) const; |
| 766 | SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
| 767 | SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; |
| 768 | SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
| 769 | SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; |
| 770 | SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const; |
| 771 | |
| 772 | SDValue LowerVP_LOAD(SDValue Op, SelectionDAG &DAG) const; |
| 773 | SDValue LowerVP_STORE(SDValue Op, SelectionDAG &DAG) const; |
| 774 | |
| 775 | SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const; |
| 776 | SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const; |
| 777 | SDValue LowerDMFVectorLoad(SDValue Op, SelectionDAG &DAG) const; |
| 778 | SDValue LowerDMFVectorStore(SDValue Op, SelectionDAG &DAG) const; |
| 779 | SDValue DMFInsert1024(const SmallVectorImpl<SDValue> &Pairs, |
| 780 | const SDLoc &dl, SelectionDAG &DAG) const; |
| 781 | |
| 782 | SDValue LowerCallResult(SDValue Chain, SDValue InGlue, |
| 783 | CallingConv::ID CallConv, bool isVarArg, |
| 784 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 785 | const SDLoc &dl, SelectionDAG &DAG, |
| 786 | SmallVectorImpl<SDValue> &InVals) const; |
| 787 | |
| 788 | SDValue FinishCall(CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, |
| 789 | SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, |
| 790 | SDValue InGlue, SDValue Chain, SDValue CallSeqStart, |
| 791 | SDValue &Callee, int SPDiff, unsigned NumBytes, |
| 792 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 793 | SmallVectorImpl<SDValue> &InVals, |
| 794 | const CallBase *CB) const; |
| 795 | |
| 796 | SDValue |
| 797 | LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 798 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 799 | const SDLoc &dl, SelectionDAG &DAG, |
| 800 | SmallVectorImpl<SDValue> &InVals) const override; |
| 801 | |
| 802 | SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 803 | SmallVectorImpl<SDValue> &InVals) const override; |
| 804 | |
| 805 | bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, |
| 806 | bool isVarArg, |
| 807 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 808 | LLVMContext &Context, const Type *RetTy) const override; |
| 809 | |
| 810 | SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 811 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 812 | const SmallVectorImpl<SDValue> &OutVals, |
| 813 | const SDLoc &dl, SelectionDAG &DAG) const override; |
| 814 | |
| 815 | SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, |
| 816 | SelectionDAG &DAG, SDValue ArgVal, |
| 817 | const SDLoc &dl) const; |
| 818 | |
| 819 | SDValue LowerFormalArguments_AIX( |
| 820 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 821 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 822 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; |
| 823 | SDValue LowerFormalArguments_64SVR4( |
| 824 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 825 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 826 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; |
| 827 | SDValue LowerFormalArguments_32SVR4( |
| 828 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 829 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 830 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; |
| 831 | |
| 832 | SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, |
| 833 | SDValue CallSeqStart, |
| 834 | ISD::ArgFlagsTy Flags, SelectionDAG &DAG, |
| 835 | const SDLoc &dl) const; |
| 836 | |
| 837 | SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags, |
| 838 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 839 | const SmallVectorImpl<SDValue> &OutVals, |
| 840 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 841 | const SDLoc &dl, SelectionDAG &DAG, |
| 842 | SmallVectorImpl<SDValue> &InVals, |
| 843 | const CallBase *CB) const; |
| 844 | SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags, |
| 845 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 846 | const SmallVectorImpl<SDValue> &OutVals, |
| 847 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 848 | const SDLoc &dl, SelectionDAG &DAG, |
| 849 | SmallVectorImpl<SDValue> &InVals, |
| 850 | const CallBase *CB) const; |
| 851 | SDValue LowerCall_AIX(SDValue Chain, SDValue Callee, CallFlags CFlags, |
| 852 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 853 | const SmallVectorImpl<SDValue> &OutVals, |
| 854 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 855 | const SDLoc &dl, SelectionDAG &DAG, |
| 856 | SmallVectorImpl<SDValue> &InVals, |
| 857 | const CallBase *CB) const; |
| 858 | |
| 859 | SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; |
| 860 | SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; |
| 861 | SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const; |
| 862 | |
| 863 | SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const; |
| 864 | SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const; |
| 865 | SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const; |
| 866 | SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const; |
| 867 | SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const; |
| 868 | SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const; |
| 869 | SDValue combineVectorShift(SDNode *N, DAGCombinerInfo &DCI) const; |
| 870 | SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const; |
| 871 | SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const; |
| 872 | SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const; |
| 873 | SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const; |
| 874 | SDValue combineFMALike(SDNode *N, DAGCombinerInfo &DCI) const; |
| 875 | SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 876 | SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const; |
| 877 | SDValue combineVectorShuffle(ShuffleVectorSDNode *SVN, |
| 878 | SelectionDAG &DAG) const; |
| 879 | SDValue combineVReverseMemOP(ShuffleVectorSDNode *SVN, LSBaseSDNode *LSBase, |
| 880 | DAGCombinerInfo &DCI) const; |
| 881 | |
| 882 | /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces |
| 883 | /// SETCC with integer subtraction when (1) there is a legal way of doing it |
| 884 | /// (2) keeping the result of comparison in GPR has performance benefit. |
| 885 | SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const; |
| 886 | |
| 887 | SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, |
| 888 | int &RefinementSteps, bool &UseOneConstNR, |
| 889 | bool Reciprocal) const override; |
| 890 | SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, |
| 891 | int &RefinementSteps) const override; |
| 892 | SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, |
| 893 | const DenormalMode &Mode) const override; |
| 894 | SDValue getSqrtResultForDenormInput(SDValue Operand, |
| 895 | SelectionDAG &DAG) const override; |
| 896 | unsigned combineRepeatedFPDivisors() const override; |
| 897 | |
| 898 | SDValue |
| 899 | combineElementTruncationToVectorTruncation(SDNode *N, |
| 900 | DAGCombinerInfo &DCI) const; |
| 901 | |
| 902 | SDValue combineBVLoadsSpecialValue(SDValue Operand, |
| 903 | SelectionDAG &DAG) const; |
| 904 | |
| 905 | /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be |
| 906 | /// handled by the VINSERTH instruction introduced in ISA 3.0. This is |
| 907 | /// essentially any shuffle of v8i16 vectors that just inserts one element |
| 908 | /// from one vector into the other. |
| 909 | SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const; |
| 910 | |
| 911 | /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be |
| 912 | /// handled by the VINSERTB instruction introduced in ISA 3.0. This is |
| 913 | /// essentially v16i8 vector version of VINSERTH. |
| 914 | SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const; |
| 915 | |
| 916 | /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be |
| 917 | /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1. |
| 918 | SDValue lowerToXXSPLTI32DX(ShuffleVectorSDNode *N, SelectionDAG &DAG) const; |
| 919 | |
| 920 | // Return whether the call instruction can potentially be optimized to a |
| 921 | // tail call. This will cause the optimizers to attempt to move, or |
| 922 | // duplicate return instructions to help enable tail call optimizations. |
| 923 | bool mayBeEmittedAsTailCall(const CallInst *CI) const override; |
| 924 | bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override; |
| 925 | |
| 926 | /// getAddrModeForFlags - Based on the set of address flags, select the most |
| 927 | /// optimal instruction format to match by. |
| 928 | PPC::AddrMode getAddrModeForFlags(unsigned Flags) const; |
| 929 | |
| 930 | /// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute |
| 931 | /// the address flags of the load/store instruction that is to be matched. |
| 932 | /// The address flags are stored in a map, which is then searched |
| 933 | /// through to determine the optimal load/store instruction format. |
| 934 | unsigned computeMOFlags(const SDNode *Parent, SDValue N, |
| 935 | SelectionDAG &DAG) const; |
| 936 | }; // end class PPCTargetLowering |
| 937 | |
| 938 | namespace PPC { |
| 939 | |
| 940 | FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, |
| 941 | const TargetLibraryInfo *LibInfo, |
| 942 | const LibcallLoweringInfo *LibcallLowering); |
| 943 | |
| 944 | } // end namespace PPC |
| 945 | |
| 946 | bool isIntS16Immediate(SDNode *N, int16_t &Imm); |
| 947 | bool isIntS16Immediate(SDValue Op, int16_t &Imm); |
| 948 | bool isIntS34Immediate(SDNode *N, int64_t &Imm); |
| 949 | bool isIntS34Immediate(SDValue Op, int64_t &Imm); |
| 950 | |
| 951 | bool convertToNonDenormSingle(APInt &ArgAPInt); |
| 952 | bool convertToNonDenormSingle(APFloat &ArgAPFloat); |
| 953 | bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat); |
| 954 | |
| 955 | } // end namespace llvm |
| 956 | |
| 957 | #endif // LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H |
| 958 | |