| 1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the interfaces that ARM uses to lower LLVM code into a |
| 10 | // selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "ARMISelLowering.h" |
| 15 | #include "ARMBaseInstrInfo.h" |
| 16 | #include "ARMBaseRegisterInfo.h" |
| 17 | #include "ARMCallingConv.h" |
| 18 | #include "ARMConstantPoolValue.h" |
| 19 | #include "ARMMachineFunctionInfo.h" |
| 20 | #include "ARMPerfectShuffle.h" |
| 21 | #include "ARMRegisterInfo.h" |
| 22 | #include "ARMSelectionDAGInfo.h" |
| 23 | #include "ARMSubtarget.h" |
| 24 | #include "ARMTargetTransformInfo.h" |
| 25 | #include "MCTargetDesc/ARMAddressingModes.h" |
| 26 | #include "MCTargetDesc/ARMBaseInfo.h" |
| 27 | #include "Utils/ARMBaseInfo.h" |
| 28 | #include "llvm/ADT/APFloat.h" |
| 29 | #include "llvm/ADT/APInt.h" |
| 30 | #include "llvm/ADT/ArrayRef.h" |
| 31 | #include "llvm/ADT/BitVector.h" |
| 32 | #include "llvm/ADT/DenseMap.h" |
| 33 | #include "llvm/ADT/STLExtras.h" |
| 34 | #include "llvm/ADT/SmallPtrSet.h" |
| 35 | #include "llvm/ADT/SmallVector.h" |
| 36 | #include "llvm/ADT/Statistic.h" |
| 37 | #include "llvm/ADT/StringExtras.h" |
| 38 | #include "llvm/ADT/StringRef.h" |
| 39 | #include "llvm/ADT/StringSwitch.h" |
| 40 | #include "llvm/ADT/Twine.h" |
| 41 | #include "llvm/Analysis/VectorUtils.h" |
| 42 | #include "llvm/CodeGen/CallingConvLower.h" |
| 43 | #include "llvm/CodeGen/ComplexDeinterleavingPass.h" |
| 44 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 45 | #include "llvm/CodeGen/IntrinsicLowering.h" |
| 46 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 47 | #include "llvm/CodeGen/MachineConstantPool.h" |
| 48 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 49 | #include "llvm/CodeGen/MachineFunction.h" |
| 50 | #include "llvm/CodeGen/MachineInstr.h" |
| 51 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 52 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
| 53 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 54 | #include "llvm/CodeGen/MachineOperand.h" |
| 55 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 56 | #include "llvm/CodeGen/RuntimeLibcallUtil.h" |
| 57 | #include "llvm/CodeGen/SelectionDAG.h" |
| 58 | #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" |
| 59 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 60 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 61 | #include "llvm/CodeGen/TargetLowering.h" |
| 62 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 63 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 64 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 65 | #include "llvm/CodeGen/ValueTypes.h" |
| 66 | #include "llvm/CodeGenTypes/MachineValueType.h" |
| 67 | #include "llvm/IR/Attributes.h" |
| 68 | #include "llvm/IR/CallingConv.h" |
| 69 | #include "llvm/IR/Constant.h" |
| 70 | #include "llvm/IR/Constants.h" |
| 71 | #include "llvm/IR/DataLayout.h" |
| 72 | #include "llvm/IR/DebugLoc.h" |
| 73 | #include "llvm/IR/DerivedTypes.h" |
| 74 | #include "llvm/IR/Function.h" |
| 75 | #include "llvm/IR/GlobalAlias.h" |
| 76 | #include "llvm/IR/GlobalValue.h" |
| 77 | #include "llvm/IR/GlobalVariable.h" |
| 78 | #include "llvm/IR/IRBuilder.h" |
| 79 | #include "llvm/IR/InlineAsm.h" |
| 80 | #include "llvm/IR/Instruction.h" |
| 81 | #include "llvm/IR/Instructions.h" |
| 82 | #include "llvm/IR/IntrinsicInst.h" |
| 83 | #include "llvm/IR/Intrinsics.h" |
| 84 | #include "llvm/IR/IntrinsicsARM.h" |
| 85 | #include "llvm/IR/Module.h" |
| 86 | #include "llvm/IR/Type.h" |
| 87 | #include "llvm/IR/User.h" |
| 88 | #include "llvm/IR/Value.h" |
| 89 | #include "llvm/MC/MCInstrDesc.h" |
| 90 | #include "llvm/MC/MCInstrItineraries.h" |
| 91 | #include "llvm/MC/MCSchedule.h" |
| 92 | #include "llvm/Support/AtomicOrdering.h" |
| 93 | #include "llvm/Support/BranchProbability.h" |
| 94 | #include "llvm/Support/Casting.h" |
| 95 | #include "llvm/Support/CodeGen.h" |
| 96 | #include "llvm/Support/CommandLine.h" |
| 97 | #include "llvm/Support/Compiler.h" |
| 98 | #include "llvm/Support/Debug.h" |
| 99 | #include "llvm/Support/ErrorHandling.h" |
| 100 | #include "llvm/Support/KnownBits.h" |
| 101 | #include "llvm/Support/MathExtras.h" |
| 102 | #include "llvm/Support/raw_ostream.h" |
| 103 | #include "llvm/Target/TargetMachine.h" |
| 104 | #include "llvm/Target/TargetOptions.h" |
| 105 | #include "llvm/TargetParser/Triple.h" |
| 106 | #include <algorithm> |
| 107 | #include <cassert> |
| 108 | #include <cstdint> |
| 109 | #include <cstdlib> |
| 110 | #include <iterator> |
| 111 | #include <limits> |
| 112 | #include <optional> |
| 113 | #include <tuple> |
| 114 | #include <utility> |
| 115 | #include <vector> |
| 116 | |
| 117 | using namespace llvm; |
| 118 | using namespace llvm::PatternMatch; |
| 119 | |
| 120 | #define DEBUG_TYPE "arm-isel" |
| 121 | |
| 122 | STATISTIC(NumTailCalls, "Number of tail calls" ); |
| 123 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt" ); |
| 124 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments" ); |
| 125 | STATISTIC(NumConstpoolPromoted, |
| 126 | "Number of constants with their storage promoted into constant pools" ); |
| 127 | |
| 128 | static cl::opt<bool> |
| 129 | ARMInterworking("arm-interworking" , cl::Hidden, |
| 130 | cl::desc("Enable / disable ARM interworking (for debugging only)" ), |
| 131 | cl::init(Val: true)); |
| 132 | |
| 133 | static cl::opt<bool> EnableConstpoolPromotion( |
| 134 | "arm-promote-constant" , cl::Hidden, |
| 135 | cl::desc("Enable / disable promotion of unnamed_addr constants into " |
| 136 | "constant pools" ), |
| 137 | cl::init(Val: false)); // FIXME: set to true by default once PR32780 is fixed |
| 138 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( |
| 139 | "arm-promote-constant-max-size" , cl::Hidden, |
| 140 | cl::desc("Maximum size of constant to promote into a constant pool" ), |
| 141 | cl::init(Val: 64)); |
| 142 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( |
| 143 | "arm-promote-constant-max-total" , cl::Hidden, |
| 144 | cl::desc("Maximum size of ALL constants to promote into a constant pool" ), |
| 145 | cl::init(Val: 128)); |
| 146 | |
| 147 | cl::opt<unsigned> |
| 148 | MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor" , cl::Hidden, |
| 149 | cl::desc("Maximum interleave factor for MVE VLDn to generate." ), |
| 150 | cl::init(Val: 2)); |
| 151 | |
| 152 | cl::opt<unsigned> ArmMaxBaseUpdatesToCheck( |
| 153 | "arm-max-base-updates-to-check" , cl::Hidden, |
| 154 | cl::desc("Maximum number of base-updates to check generating postindex." ), |
| 155 | cl::init(Val: 64)); |
| 156 | |
| 157 | /// Value type used for "flags" operands / results (either CPSR or FPSCR_NZCV). |
| 158 | constexpr MVT FlagsVT = MVT::i32; |
| 159 | |
| 160 | // The APCS parameter registers. |
| 161 | static const MCPhysReg GPRArgRegs[] = { |
| 162 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 |
| 163 | }; |
| 164 | |
| 165 | static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg, |
| 166 | SelectionDAG &DAG, const SDLoc &DL) { |
| 167 | assert(Arg.ArgVT.isScalarInteger()); |
| 168 | assert(Arg.ArgVT.bitsLT(MVT::i32)); |
| 169 | SDValue Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: Arg.ArgVT, Operand: Value); |
| 170 | SDValue Ext = |
| 171 | DAG.getNode(Opcode: Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
| 172 | VT: MVT::i32, Operand: Trunc); |
| 173 | return Ext; |
| 174 | } |
| 175 | |
| 176 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) { |
| 177 | if (VT != PromotedLdStVT) { |
| 178 | setOperationAction(Op: ISD::LOAD, VT, Action: Promote); |
| 179 | AddPromotedToType (Opc: ISD::LOAD, OrigVT: VT, DestVT: PromotedLdStVT); |
| 180 | |
| 181 | setOperationAction(Op: ISD::STORE, VT, Action: Promote); |
| 182 | AddPromotedToType (Opc: ISD::STORE, OrigVT: VT, DestVT: PromotedLdStVT); |
| 183 | } |
| 184 | |
| 185 | MVT ElemTy = VT.getVectorElementType(); |
| 186 | if (ElemTy != MVT::f64) |
| 187 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 188 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 189 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 190 | if (ElemTy == MVT::i32) { |
| 191 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Custom); |
| 192 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Custom); |
| 193 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Custom); |
| 194 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Custom); |
| 195 | } else { |
| 196 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 197 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 198 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 199 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 200 | } |
| 201 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 202 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 203 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Legal); |
| 204 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Legal); |
| 205 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 206 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 207 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
| 208 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT, Action: Expand); |
| 209 | if (VT.isInteger()) { |
| 210 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
| 211 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
| 212 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
| 213 | } |
| 214 | |
| 215 | // Neon does not support vector divide/remainder operations. |
| 216 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
| 217 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
| 218 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
| 219 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
| 220 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
| 221 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
| 222 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
| 223 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
| 224 | |
| 225 | if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) |
| 226 | for (auto Opcode : {ISD::ABS, ISD::ABDS, ISD::ABDU, ISD::SMIN, ISD::SMAX, |
| 227 | ISD::UMIN, ISD::UMAX}) |
| 228 | setOperationAction(Op: Opcode, VT, Action: Legal); |
| 229 | if (!VT.isFloatingPoint()) |
| 230 | for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) |
| 231 | setOperationAction(Op: Opcode, VT, Action: Legal); |
| 232 | } |
| 233 | |
| 234 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { |
| 235 | addRegisterClass(VT, RC: &ARM::DPRRegClass); |
| 236 | addTypeForNEON(VT, PromotedLdStVT: MVT::f64); |
| 237 | } |
| 238 | |
| 239 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { |
| 240 | addRegisterClass(VT, RC: &ARM::DPairRegClass); |
| 241 | addTypeForNEON(VT, PromotedLdStVT: MVT::v2f64); |
| 242 | } |
| 243 | |
| 244 | void ARMTargetLowering::setAllExpand(MVT VT) { |
| 245 | for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) |
| 246 | setOperationAction(Op: Opc, VT, Action: Expand); |
| 247 | |
| 248 | // We support these really simple operations even on types where all |
| 249 | // the actual arithmetic has to be broken down into simpler |
| 250 | // operations or turned into library calls. |
| 251 | setOperationAction(Op: ISD::BITCAST, VT, Action: Legal); |
| 252 | setOperationAction(Op: ISD::LOAD, VT, Action: Legal); |
| 253 | setOperationAction(Op: ISD::STORE, VT, Action: Legal); |
| 254 | setOperationAction(Op: ISD::UNDEF, VT, Action: Legal); |
| 255 | } |
| 256 | |
| 257 | void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, |
| 258 | LegalizeAction Action) { |
| 259 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: From, MemVT: To, Action); |
| 260 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: From, MemVT: To, Action); |
| 261 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: From, MemVT: To, Action); |
| 262 | } |
| 263 | |
| 264 | void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { |
| 265 | const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; |
| 266 | |
| 267 | for (auto VT : IntTypes) { |
| 268 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 269 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 270 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 271 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 272 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 273 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
| 274 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
| 275 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
| 276 | setOperationAction(Op: ISD::SMIN, VT, Action: Legal); |
| 277 | setOperationAction(Op: ISD::SMAX, VT, Action: Legal); |
| 278 | setOperationAction(Op: ISD::UMIN, VT, Action: Legal); |
| 279 | setOperationAction(Op: ISD::UMAX, VT, Action: Legal); |
| 280 | setOperationAction(Op: ISD::ABS, VT, Action: Legal); |
| 281 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 282 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
| 283 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
| 284 | setOperationAction(Op: ISD::CTLZ, VT, Action: Legal); |
| 285 | setOperationAction(Op: ISD::CTTZ, VT, Action: Custom); |
| 286 | setOperationAction(Op: ISD::BITREVERSE, VT, Action: Legal); |
| 287 | setOperationAction(Op: ISD::BSWAP, VT, Action: Legal); |
| 288 | setOperationAction(Op: ISD::SADDSAT, VT, Action: Legal); |
| 289 | setOperationAction(Op: ISD::UADDSAT, VT, Action: Legal); |
| 290 | setOperationAction(Op: ISD::SSUBSAT, VT, Action: Legal); |
| 291 | setOperationAction(Op: ISD::USUBSAT, VT, Action: Legal); |
| 292 | setOperationAction(Op: ISD::ABDS, VT, Action: Legal); |
| 293 | setOperationAction(Op: ISD::ABDU, VT, Action: Legal); |
| 294 | setOperationAction(Op: ISD::AVGFLOORS, VT, Action: Legal); |
| 295 | setOperationAction(Op: ISD::AVGFLOORU, VT, Action: Legal); |
| 296 | setOperationAction(Op: ISD::AVGCEILS, VT, Action: Legal); |
| 297 | setOperationAction(Op: ISD::AVGCEILU, VT, Action: Legal); |
| 298 | |
| 299 | // No native support for these. |
| 300 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
| 301 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
| 302 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
| 303 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
| 304 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
| 305 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
| 306 | setOperationAction(Op: ISD::CTPOP, VT, Action: Expand); |
| 307 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 308 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 309 | |
| 310 | // Vector reductions |
| 311 | setOperationAction(Op: ISD::VECREDUCE_ADD, VT, Action: Legal); |
| 312 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Legal); |
| 313 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Legal); |
| 314 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Legal); |
| 315 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Legal); |
| 316 | setOperationAction(Op: ISD::VECREDUCE_MUL, VT, Action: Custom); |
| 317 | setOperationAction(Op: ISD::VECREDUCE_AND, VT, Action: Custom); |
| 318 | setOperationAction(Op: ISD::VECREDUCE_OR, VT, Action: Custom); |
| 319 | setOperationAction(Op: ISD::VECREDUCE_XOR, VT, Action: Custom); |
| 320 | |
| 321 | if (!HasMVEFP) { |
| 322 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 323 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 324 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 325 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 326 | } else { |
| 327 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT, Action: Custom); |
| 328 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT, Action: Custom); |
| 329 | } |
| 330 | |
| 331 | // Pre and Post inc are supported on loads and stores |
| 332 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 333 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 334 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 335 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 336 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 337 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 338 | } |
| 339 | } |
| 340 | |
| 341 | const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; |
| 342 | for (auto VT : FloatTypes) { |
| 343 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 344 | if (!HasMVEFP) |
| 345 | setAllExpand(VT); |
| 346 | |
| 347 | // These are legal or custom whether we have MVE.fp or not |
| 348 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 349 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 350 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT: VT.getVectorElementType(), Action: Custom); |
| 351 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 352 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 353 | setOperationAction(Op: ISD::BUILD_VECTOR, VT: VT.getVectorElementType(), Action: Custom); |
| 354 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Legal); |
| 355 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 356 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
| 357 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
| 358 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 359 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 360 | |
| 361 | // Pre and Post inc are supported on loads and stores |
| 362 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 363 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 364 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 365 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 366 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 367 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 368 | } |
| 369 | |
| 370 | if (HasMVEFP) { |
| 371 | setOperationAction(Op: ISD::FMINNUM, VT, Action: Legal); |
| 372 | setOperationAction(Op: ISD::FMAXNUM, VT, Action: Legal); |
| 373 | setOperationAction(Op: ISD::FROUND, VT, Action: Legal); |
| 374 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT, Action: Custom); |
| 375 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT, Action: Custom); |
| 376 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT, Action: Custom); |
| 377 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT, Action: Custom); |
| 378 | |
| 379 | // No native support for these. |
| 380 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
| 381 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
| 382 | setOperationAction(Op: ISD::FSQRT, VT, Action: Expand); |
| 383 | setOperationAction(Op: ISD::FSIN, VT, Action: Expand); |
| 384 | setOperationAction(Op: ISD::FCOS, VT, Action: Expand); |
| 385 | setOperationAction(Op: ISD::FTAN, VT, Action: Expand); |
| 386 | setOperationAction(Op: ISD::FPOW, VT, Action: Expand); |
| 387 | setOperationAction(Op: ISD::FLOG, VT, Action: Expand); |
| 388 | setOperationAction(Op: ISD::FLOG2, VT, Action: Expand); |
| 389 | setOperationAction(Op: ISD::FLOG10, VT, Action: Expand); |
| 390 | setOperationAction(Op: ISD::FEXP, VT, Action: Expand); |
| 391 | setOperationAction(Op: ISD::FEXP2, VT, Action: Expand); |
| 392 | setOperationAction(Op: ISD::FEXP10, VT, Action: Expand); |
| 393 | setOperationAction(Op: ISD::FNEARBYINT, VT, Action: Expand); |
| 394 | } |
| 395 | } |
| 396 | |
| 397 | // Custom Expand smaller than legal vector reductions to prevent false zero |
| 398 | // items being added. |
| 399 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v4f16, Action: Custom); |
| 400 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v4f16, Action: Custom); |
| 401 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v4f16, Action: Custom); |
| 402 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v4f16, Action: Custom); |
| 403 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v2f16, Action: Custom); |
| 404 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v2f16, Action: Custom); |
| 405 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v2f16, Action: Custom); |
| 406 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v2f16, Action: Custom); |
| 407 | |
| 408 | // We 'support' these types up to bitcast/load/store level, regardless of |
| 409 | // MVE integer-only / float support. Only doing FP data processing on the FP |
| 410 | // vector types is inhibited at integer-only level. |
| 411 | const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; |
| 412 | for (auto VT : LongTypes) { |
| 413 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 414 | setAllExpand(VT); |
| 415 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 416 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 417 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 418 | setOperationAction(Op: ISD::VSELECT, VT, Action: Legal); |
| 419 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 420 | } |
| 421 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT: MVT::v2f64, Action: Legal); |
| 422 | |
| 423 | // We can do bitwise operations on v2i64 vectors |
| 424 | setOperationAction(Op: ISD::AND, VT: MVT::v2i64, Action: Legal); |
| 425 | setOperationAction(Op: ISD::OR, VT: MVT::v2i64, Action: Legal); |
| 426 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i64, Action: Legal); |
| 427 | |
| 428 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 429 | addAllExtLoads(From: MVT::v8i16, To: MVT::v8i8, Action: Legal); |
| 430 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i16, Action: Legal); |
| 431 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i8, Action: Legal); |
| 432 | |
| 433 | // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. |
| 434 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i8, Action: Legal); |
| 435 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i16, Action: Legal); |
| 436 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i32, Action: Legal); |
| 437 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i8, Action: Legal); |
| 438 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i16, Action: Legal); |
| 439 | |
| 440 | // Some truncating stores are legal too. |
| 441 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i16, Action: Legal); |
| 442 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i8, Action: Legal); |
| 443 | setTruncStoreAction(ValVT: MVT::v8i16, MemVT: MVT::v8i8, Action: Legal); |
| 444 | |
| 445 | // Pre and Post inc on these are legal, given the correct extends |
| 446 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 447 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 448 | for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { |
| 449 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 450 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 451 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 452 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 453 | } |
| 454 | } |
| 455 | |
| 456 | // Predicate types |
| 457 | const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1}; |
| 458 | for (auto VT : pTypes) { |
| 459 | addRegisterClass(VT, RC: &ARM::VCCRRegClass); |
| 460 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 461 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 462 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Custom); |
| 463 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Custom); |
| 464 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 465 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 466 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 467 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Expand); |
| 468 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
| 469 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
| 470 | setOperationAction(Op: ISD::TRUNCATE, VT, Action: Custom); |
| 471 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
| 472 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 473 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 474 | |
| 475 | if (!HasMVEFP) { |
| 476 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 477 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 478 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 479 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 480 | } |
| 481 | } |
| 482 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i1, Action: Expand); |
| 483 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v2i1, Action: Expand); |
| 484 | setOperationAction(Op: ISD::AND, VT: MVT::v2i1, Action: Expand); |
| 485 | setOperationAction(Op: ISD::OR, VT: MVT::v2i1, Action: Expand); |
| 486 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i1, Action: Expand); |
| 487 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
| 488 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
| 489 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v2i1, Action: Expand); |
| 490 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v2i1, Action: Expand); |
| 491 | |
| 492 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v8i32, Action: Custom); |
| 493 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i16, Action: Custom); |
| 494 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i32, Action: Custom); |
| 495 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v8i32, Action: Custom); |
| 496 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i16, Action: Custom); |
| 497 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i32, Action: Custom); |
| 498 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v8i32, Action: Custom); |
| 499 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v16i16, Action: Custom); |
| 500 | } |
| 501 | |
| 502 | const ARMBaseTargetMachine &ARMTargetLowering::getTM() const { |
| 503 | return static_cast<const ARMBaseTargetMachine &>(getTargetMachine()); |
| 504 | } |
| 505 | |
| 506 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_, |
| 507 | const ARMSubtarget &STI) |
| 508 | : TargetLowering(TM_), Subtarget(&STI), |
| 509 | RegInfo(Subtarget->getRegisterInfo()), |
| 510 | Itins(Subtarget->getInstrItineraryData()) { |
| 511 | const auto &TM = static_cast<const ARMBaseTargetMachine &>(TM_); |
| 512 | |
| 513 | setBooleanContents(ZeroOrOneBooleanContent); |
| 514 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
| 515 | |
| 516 | const Triple &TT = TM.getTargetTriple(); |
| 517 | |
| 518 | if (TT.isOSBinFormatMachO()) { |
| 519 | // Uses VFP for Thumb libfuncs if available. |
| 520 | if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && |
| 521 | Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { |
| 522 | // clang-format off |
| 523 | static const struct { |
| 524 | const RTLIB::Libcall Op; |
| 525 | const RTLIB::LibcallImpl Impl; |
| 526 | const CmpInst::Predicate Cond; |
| 527 | } LibraryCalls[] = { |
| 528 | // Single-precision floating-point arithmetic. |
| 529 | { .Op: RTLIB::ADD_F32, .Impl: RTLIB::__addsf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 530 | { .Op: RTLIB::SUB_F32, .Impl: RTLIB::__subsf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 531 | { .Op: RTLIB::MUL_F32, .Impl: RTLIB::__mulsf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 532 | { .Op: RTLIB::DIV_F32, .Impl: RTLIB::__divsf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 533 | |
| 534 | // Double-precision floating-point arithmetic. |
| 535 | { .Op: RTLIB::ADD_F64, .Impl: RTLIB::__adddf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 536 | { .Op: RTLIB::SUB_F64, .Impl: RTLIB::__subdf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 537 | { .Op: RTLIB::MUL_F64, .Impl: RTLIB::__muldf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 538 | { .Op: RTLIB::DIV_F64, .Impl: RTLIB::__divdf3vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 539 | |
| 540 | // Single-precision comparisons. |
| 541 | { .Op: RTLIB::OEQ_F32, .Impl: RTLIB::__eqsf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 542 | { .Op: RTLIB::UNE_F32, .Impl: RTLIB::__nesf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 543 | { .Op: RTLIB::OLT_F32, .Impl: RTLIB::__ltsf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 544 | { .Op: RTLIB::OLE_F32, .Impl: RTLIB::__lesf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 545 | { .Op: RTLIB::OGE_F32, .Impl: RTLIB::__gesf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 546 | { .Op: RTLIB::OGT_F32, .Impl: RTLIB::__gtsf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 547 | { .Op: RTLIB::UO_F32, .Impl: RTLIB::__unordsf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 548 | |
| 549 | // Double-precision comparisons. |
| 550 | { .Op: RTLIB::OEQ_F64, .Impl: RTLIB::__eqdf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 551 | { .Op: RTLIB::UNE_F64, .Impl: RTLIB::__nedf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 552 | { .Op: RTLIB::OLT_F64, .Impl: RTLIB::__ltdf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 553 | { .Op: RTLIB::OLE_F64, .Impl: RTLIB::__ledf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 554 | { .Op: RTLIB::OGE_F64, .Impl: RTLIB::__gedf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 555 | { .Op: RTLIB::OGT_F64, .Impl: RTLIB::__gtdf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 556 | { .Op: RTLIB::UO_F64, .Impl: RTLIB::__unorddf2vfp, .Cond: CmpInst::ICMP_NE }, |
| 557 | |
| 558 | // Floating-point to integer conversions. |
| 559 | // i64 conversions are done via library routines even when generating VFP |
| 560 | // instructions, so use the same ones. |
| 561 | { .Op: RTLIB::FPTOSINT_F64_I32, .Impl: RTLIB::__fixdfsivfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 562 | { .Op: RTLIB::FPTOUINT_F64_I32, .Impl: RTLIB::__fixunsdfsivfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 563 | { .Op: RTLIB::FPTOSINT_F32_I32, .Impl: RTLIB::__fixsfsivfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 564 | { .Op: RTLIB::FPTOUINT_F32_I32, .Impl: RTLIB::__fixunssfsivfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 565 | |
| 566 | // Conversions between floating types. |
| 567 | { .Op: RTLIB::FPROUND_F64_F32, .Impl: RTLIB::__truncdfsf2vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 568 | { .Op: RTLIB::FPEXT_F32_F64, .Impl: RTLIB::__extendsfdf2vfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 569 | |
| 570 | // Integer to floating-point conversions. |
| 571 | // i64 conversions are done via library routines even when generating VFP |
| 572 | // instructions, so use the same ones. |
| 573 | // FIXME: There appears to be some naming inconsistency in ARM libgcc: |
| 574 | // e.g., __floatunsidf vs. __floatunssidfvfp. |
| 575 | { .Op: RTLIB::SINTTOFP_I32_F64, .Impl: RTLIB::__floatsidfvfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 576 | { .Op: RTLIB::UINTTOFP_I32_F64, .Impl: RTLIB::__floatunssidfvfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 577 | { .Op: RTLIB::SINTTOFP_I32_F32, .Impl: RTLIB::__floatsisfvfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 578 | { .Op: RTLIB::UINTTOFP_I32_F32, .Impl: RTLIB::__floatunssisfvfp, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 579 | }; |
| 580 | // clang-format on |
| 581 | |
| 582 | for (const auto &LC : LibraryCalls) { |
| 583 | setLibcallImpl(Call: LC.Op, Impl: LC.Impl); |
| 584 | if (LC.Cond != CmpInst::BAD_ICMP_PREDICATE) |
| 585 | setCmpLibcallCC(Call: LC.Op, Pred: LC.Cond); |
| 586 | } |
| 587 | } |
| 588 | } |
| 589 | |
| 590 | // RTLIB |
| 591 | if (TM.isAAPCS_ABI() && (TT.isTargetAEABI() || TT.isTargetGNUAEABI() || |
| 592 | TT.isTargetMuslAEABI() || TT.isAndroid())) { |
| 593 | // FIXME: This does not depend on the subtarget and should go directly into |
| 594 | // RuntimeLibcalls. This is only here because of missing support for setting |
| 595 | // the calling convention of an implementation. |
| 596 | // clang-format off |
| 597 | static const struct { |
| 598 | const RTLIB::Libcall Op; |
| 599 | const RTLIB::LibcallImpl Impl; |
| 600 | const CallingConv::ID CC; |
| 601 | const CmpInst::Predicate Cond; |
| 602 | } LibraryCalls[] = { |
| 603 | // Double-precision floating-point arithmetic helper functions |
| 604 | // RTABI chapter 4.1.2, Table 2 |
| 605 | { .Op: RTLIB::ADD_F64, .Impl: RTLIB::__aeabi_dadd, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 606 | { .Op: RTLIB::DIV_F64, .Impl: RTLIB::__aeabi_ddiv, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 607 | { .Op: RTLIB::MUL_F64, .Impl: RTLIB::__aeabi_dmul, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 608 | { .Op: RTLIB::SUB_F64, .Impl: RTLIB::__aeabi_dsub, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 609 | |
| 610 | // Double-precision floating-point comparison helper functions |
| 611 | // RTABI chapter 4.1.2, Table 3 |
| 612 | { .Op: RTLIB::OEQ_F64, .Impl: RTLIB::__aeabi_dcmpeq__ne, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 613 | { .Op: RTLIB::UNE_F64, .Impl: RTLIB::__aeabi_dcmpeq__eq, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_EQ }, |
| 614 | { .Op: RTLIB::OLT_F64, .Impl: RTLIB::__aeabi_dcmplt, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 615 | { .Op: RTLIB::OLE_F64, .Impl: RTLIB::__aeabi_dcmple, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 616 | { .Op: RTLIB::OGE_F64, .Impl: RTLIB::__aeabi_dcmpge, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 617 | { .Op: RTLIB::OGT_F64, .Impl: RTLIB::__aeabi_dcmpgt, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 618 | { .Op: RTLIB::UO_F64, .Impl: RTLIB::__aeabi_dcmpun, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 619 | |
| 620 | // Single-precision floating-point arithmetic helper functions |
| 621 | // RTABI chapter 4.1.2, Table 4 |
| 622 | { .Op: RTLIB::ADD_F32, .Impl: RTLIB::__aeabi_fadd, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 623 | { .Op: RTLIB::DIV_F32, .Impl: RTLIB::__aeabi_fdiv, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 624 | { .Op: RTLIB::MUL_F32, .Impl: RTLIB::__aeabi_fmul, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 625 | { .Op: RTLIB::SUB_F32, .Impl: RTLIB::__aeabi_fsub, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 626 | |
| 627 | // Single-precision floating-point comparison helper functions |
| 628 | // RTABI chapter 4.1.2, Table 5 |
| 629 | { .Op: RTLIB::OEQ_F32, .Impl: RTLIB::__aeabi_fcmpeq__ne, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 630 | { .Op: RTLIB::UNE_F32, .Impl: RTLIB::__aeabi_fcmpeq__eq, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_EQ }, |
| 631 | { .Op: RTLIB::OLT_F32, .Impl: RTLIB::__aeabi_fcmplt, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 632 | { .Op: RTLIB::OLE_F32, .Impl: RTLIB::__aeabi_fcmple, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 633 | { .Op: RTLIB::OGE_F32, .Impl: RTLIB::__aeabi_fcmpge, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 634 | { .Op: RTLIB::OGT_F32, .Impl: RTLIB::__aeabi_fcmpgt, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::ICMP_NE }, |
| 635 | { .Op: RTLIB::UO_F32, .Impl: RTLIB::__aeabi_fcmpun, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 636 | |
| 637 | // Floating-point to integer conversions. |
| 638 | // RTABI chapter 4.1.2, Table 6 |
| 639 | { .Op: RTLIB::FPTOSINT_F64_I32, .Impl: RTLIB::__aeabi_d2iz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 640 | { .Op: RTLIB::FPTOUINT_F64_I32, .Impl: RTLIB::__aeabi_d2uiz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 641 | { .Op: RTLIB::FPTOSINT_F64_I64, .Impl: RTLIB::__aeabi_d2lz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 642 | { .Op: RTLIB::FPTOUINT_F64_I64, .Impl: RTLIB::__aeabi_d2ulz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 643 | { .Op: RTLIB::FPTOSINT_F32_I32, .Impl: RTLIB::__aeabi_f2iz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 644 | { .Op: RTLIB::FPTOUINT_F32_I32, .Impl: RTLIB::__aeabi_f2uiz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 645 | { .Op: RTLIB::FPTOSINT_F32_I64, .Impl: RTLIB::__aeabi_f2lz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 646 | { .Op: RTLIB::FPTOUINT_F32_I64, .Impl: RTLIB::__aeabi_f2ulz, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 647 | |
| 648 | // Conversions between floating types. |
| 649 | // RTABI chapter 4.1.2, Table 7 |
| 650 | { .Op: RTLIB::FPROUND_F64_F32, .Impl: RTLIB::__aeabi_d2f, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 651 | { .Op: RTLIB::FPROUND_F64_F16, .Impl: RTLIB::__aeabi_d2h, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 652 | { .Op: RTLIB::FPEXT_F32_F64, .Impl: RTLIB::__aeabi_f2d, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 653 | |
| 654 | // Integer to floating-point conversions. |
| 655 | // RTABI chapter 4.1.2, Table 8 |
| 656 | { .Op: RTLIB::SINTTOFP_I32_F64, .Impl: RTLIB::__aeabi_i2d, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 657 | { .Op: RTLIB::UINTTOFP_I32_F64, .Impl: RTLIB::__aeabi_ui2d, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 658 | { .Op: RTLIB::SINTTOFP_I64_F64, .Impl: RTLIB::__aeabi_l2d, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 659 | { .Op: RTLIB::UINTTOFP_I64_F64, .Impl: RTLIB::__aeabi_ul2d, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 660 | { .Op: RTLIB::SINTTOFP_I32_F32, .Impl: RTLIB::__aeabi_i2f, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 661 | { .Op: RTLIB::UINTTOFP_I32_F32, .Impl: RTLIB::__aeabi_ui2f, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 662 | { .Op: RTLIB::SINTTOFP_I64_F32, .Impl: RTLIB::__aeabi_l2f, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 663 | { .Op: RTLIB::UINTTOFP_I64_F32, .Impl: RTLIB::__aeabi_ul2f, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 664 | |
| 665 | // Long long helper functions |
| 666 | // RTABI chapter 4.2, Table 9 |
| 667 | { .Op: RTLIB::MUL_I64, .Impl: RTLIB::__aeabi_lmul, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 668 | { .Op: RTLIB::SHL_I64, .Impl: RTLIB::__aeabi_llsl, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 669 | { .Op: RTLIB::SRL_I64, .Impl: RTLIB::__aeabi_llsr, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 670 | { .Op: RTLIB::SRA_I64, .Impl: RTLIB::__aeabi_lasr, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 671 | |
| 672 | // Integer division functions |
| 673 | // RTABI chapter 4.3.1 |
| 674 | { .Op: RTLIB::SDIV_I8, .Impl: RTLIB::__aeabi_idiv__i8, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 675 | { .Op: RTLIB::SDIV_I16, .Impl: RTLIB::__aeabi_idiv__i16, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 676 | { .Op: RTLIB::SDIV_I32, .Impl: RTLIB::__aeabi_idiv__i32, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 677 | { .Op: RTLIB::SDIV_I64, .Impl: RTLIB::__aeabi_ldivmod, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 678 | { .Op: RTLIB::UDIV_I8, .Impl: RTLIB::__aeabi_uidiv__i8, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 679 | { .Op: RTLIB::UDIV_I16, .Impl: RTLIB::__aeabi_uidiv__i16, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 680 | { .Op: RTLIB::UDIV_I32, .Impl: RTLIB::__aeabi_uidiv__i32, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 681 | { .Op: RTLIB::UDIV_I64, .Impl: RTLIB::__aeabi_uldivmod, .CC: CallingConv::ARM_AAPCS, .Cond: CmpInst::BAD_ICMP_PREDICATE }, |
| 682 | }; |
| 683 | // clang-format on |
| 684 | |
| 685 | for (const auto &LC : LibraryCalls) { |
| 686 | setLibcallImpl(Call: LC.Op, Impl: LC.Impl); |
| 687 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
| 688 | if (LC.Cond != CmpInst::BAD_ICMP_PREDICATE) |
| 689 | setCmpLibcallCC(Call: LC.Op, Pred: LC.Cond); |
| 690 | } |
| 691 | |
| 692 | // EABI dependent RTLIB |
| 693 | if (TM.Options.EABIVersion == EABI::EABI4 || |
| 694 | TM.Options.EABIVersion == EABI::EABI5) { |
| 695 | static const struct { |
| 696 | const RTLIB::Libcall Op; |
| 697 | const RTLIB::LibcallImpl Impl; |
| 698 | const CallingConv::ID CC; |
| 699 | } MemOpsLibraryCalls[] = { |
| 700 | // Memory operations |
| 701 | // RTABI chapter 4.3.4 |
| 702 | {.Op: RTLIB::MEMCPY, .Impl: RTLIB::__aeabi_memcpy, .CC: CallingConv::ARM_AAPCS}, |
| 703 | {.Op: RTLIB::MEMMOVE, .Impl: RTLIB::__aeabi_memmove, .CC: CallingConv::ARM_AAPCS}, |
| 704 | {.Op: RTLIB::MEMSET, .Impl: RTLIB::__aeabi_memset, .CC: CallingConv::ARM_AAPCS}, |
| 705 | {.Op: RTLIB::AEABI_MEMCPY4, .Impl: RTLIB::__aeabi_memcpy4, |
| 706 | .CC: CallingConv::ARM_AAPCS}, |
| 707 | {.Op: RTLIB::AEABI_MEMCPY8, .Impl: RTLIB::__aeabi_memcpy8, |
| 708 | .CC: CallingConv::ARM_AAPCS}, |
| 709 | {.Op: RTLIB::AEABI_MEMMOVE4, .Impl: RTLIB::__aeabi_memmove4, |
| 710 | .CC: CallingConv::ARM_AAPCS}, |
| 711 | {.Op: RTLIB::AEABI_MEMMOVE8, .Impl: RTLIB::__aeabi_memmove8, |
| 712 | .CC: CallingConv::ARM_AAPCS}, |
| 713 | {.Op: RTLIB::AEABI_MEMSET4, .Impl: RTLIB::__aeabi_memset4, |
| 714 | .CC: CallingConv::ARM_AAPCS}, |
| 715 | {.Op: RTLIB::AEABI_MEMSET8, .Impl: RTLIB::__aeabi_memset8, |
| 716 | .CC: CallingConv::ARM_AAPCS}, |
| 717 | {.Op: RTLIB::AEABI_MEMCLR, .Impl: RTLIB::__aeabi_memclr, .CC: CallingConv::ARM_AAPCS}, |
| 718 | {.Op: RTLIB::AEABI_MEMCLR4, .Impl: RTLIB::__aeabi_memclr4, |
| 719 | .CC: CallingConv::ARM_AAPCS}, |
| 720 | {.Op: RTLIB::AEABI_MEMCLR8, .Impl: RTLIB::__aeabi_memclr8, |
| 721 | .CC: CallingConv::ARM_AAPCS}, |
| 722 | }; |
| 723 | |
| 724 | for (const auto &LC : MemOpsLibraryCalls) { |
| 725 | setLibcallImpl(Call: LC.Op, Impl: LC.Impl); |
| 726 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
| 727 | } |
| 728 | } |
| 729 | } |
| 730 | |
| 731 | // The half <-> float conversion functions are always soft-float on |
| 732 | // non-watchos platforms, but are needed for some targets which use a |
| 733 | // hard-float calling convention by default. |
| 734 | if (!TT.isWatchABI()) { |
| 735 | if (TM.isAAPCS_ABI()) { |
| 736 | setLibcallCallingConv(Call: RTLIB::FPROUND_F32_F16, CC: CallingConv::ARM_AAPCS); |
| 737 | setLibcallCallingConv(Call: RTLIB::FPROUND_F64_F16, CC: CallingConv::ARM_AAPCS); |
| 738 | setLibcallCallingConv(Call: RTLIB::FPEXT_F16_F32, CC: CallingConv::ARM_AAPCS); |
| 739 | } else { |
| 740 | setLibcallCallingConv(Call: RTLIB::FPROUND_F32_F16, CC: CallingConv::ARM_APCS); |
| 741 | setLibcallCallingConv(Call: RTLIB::FPROUND_F64_F16, CC: CallingConv::ARM_APCS); |
| 742 | setLibcallCallingConv(Call: RTLIB::FPEXT_F16_F32, CC: CallingConv::ARM_APCS); |
| 743 | } |
| 744 | } |
| 745 | |
| 746 | // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have |
| 747 | // a __gnu_ prefix (which is the default). |
| 748 | if (TT.isTargetAEABI()) { |
| 749 | // FIXME: This does not depend on the subtarget and should go directly into |
| 750 | // RuntimeLibcalls. This is only here because of missing support for setting |
| 751 | // the calling convention of an implementation. |
| 752 | static const struct { |
| 753 | const RTLIB::Libcall Op; |
| 754 | const RTLIB::LibcallImpl Impl; |
| 755 | const CallingConv::ID CC; |
| 756 | } LibraryCalls[] = { |
| 757 | {.Op: RTLIB::FPROUND_F32_F16, .Impl: RTLIB::__aeabi_f2h, .CC: CallingConv::ARM_AAPCS}, |
| 758 | {.Op: RTLIB::FPROUND_F64_F16, .Impl: RTLIB::__aeabi_d2h, .CC: CallingConv::ARM_AAPCS}, |
| 759 | {.Op: RTLIB::FPEXT_F16_F32, .Impl: RTLIB::__aeabi_h2f, .CC: CallingConv::ARM_AAPCS}, |
| 760 | }; |
| 761 | |
| 762 | for (const auto &LC : LibraryCalls) { |
| 763 | setLibcallImpl(Call: LC.Op, Impl: LC.Impl); |
| 764 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
| 765 | } |
| 766 | } else if (!TT.isOSBinFormatMachO()) { |
| 767 | setLibcallImpl(Call: RTLIB::FPROUND_F32_F16, Impl: RTLIB::__gnu_f2h_ieee); |
| 768 | setLibcallImpl(Call: RTLIB::FPEXT_F16_F32, Impl: RTLIB::__gnu_h2f_ieee); |
| 769 | } |
| 770 | |
| 771 | if (Subtarget->isThumb1Only()) |
| 772 | addRegisterClass(VT: MVT::i32, RC: &ARM::tGPRRegClass); |
| 773 | else |
| 774 | addRegisterClass(VT: MVT::i32, RC: &ARM::GPRRegClass); |
| 775 | |
| 776 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && |
| 777 | Subtarget->hasFPRegs()) { |
| 778 | addRegisterClass(VT: MVT::f32, RC: &ARM::SPRRegClass); |
| 779 | addRegisterClass(VT: MVT::f64, RC: &ARM::DPRRegClass); |
| 780 | |
| 781 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i32, Action: Custom); |
| 782 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i32, Action: Custom); |
| 783 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i64, Action: Custom); |
| 784 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i64, Action: Custom); |
| 785 | |
| 786 | if (!Subtarget->hasVFP2Base()) |
| 787 | setAllExpand(MVT::f32); |
| 788 | if (!Subtarget->hasFP64()) |
| 789 | setAllExpand(MVT::f64); |
| 790 | } |
| 791 | |
| 792 | if (Subtarget->hasFullFP16()) { |
| 793 | addRegisterClass(VT: MVT::f16, RC: &ARM::HPRRegClass); |
| 794 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i16, Action: Custom); |
| 795 | setOperationAction(Op: ISD::BITCAST, VT: MVT::f16, Action: Custom); |
| 796 | |
| 797 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f16, Action: Legal); |
| 798 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f16, Action: Legal); |
| 799 | } |
| 800 | |
| 801 | if (Subtarget->hasBF16()) { |
| 802 | addRegisterClass(VT: MVT::bf16, RC: &ARM::HPRRegClass); |
| 803 | setAllExpand(MVT::bf16); |
| 804 | if (!Subtarget->hasFullFP16()) |
| 805 | setOperationAction(Op: ISD::BITCAST, VT: MVT::bf16, Action: Custom); |
| 806 | } else { |
| 807 | setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f32, Action: Expand); |
| 808 | setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f64, Action: Expand); |
| 809 | setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f32, Action: Custom); |
| 810 | setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f64, Action: Custom); |
| 811 | } |
| 812 | |
| 813 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 814 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
| 815 | setTruncStoreAction(ValVT: VT, MemVT: InnerVT, Action: Expand); |
| 816 | addAllExtLoads(From: VT, To: InnerVT, Action: Expand); |
| 817 | } |
| 818 | |
| 819 | setOperationAction(Op: ISD::SMUL_LOHI, VT, Action: Expand); |
| 820 | setOperationAction(Op: ISD::UMUL_LOHI, VT, Action: Expand); |
| 821 | |
| 822 | setOperationAction(Op: ISD::BSWAP, VT, Action: Expand); |
| 823 | } |
| 824 | |
| 825 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Custom); |
| 826 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Custom); |
| 827 | |
| 828 | setOperationAction(Op: ISD::READ_REGISTER, VT: MVT::i64, Action: Custom); |
| 829 | setOperationAction(Op: ISD::WRITE_REGISTER, VT: MVT::i64, Action: Custom); |
| 830 | |
| 831 | if (Subtarget->hasMVEIntegerOps()) |
| 832 | addMVEVectorTypes(HasMVEFP: Subtarget->hasMVEFloatOps()); |
| 833 | |
| 834 | // Combine low-overhead loop intrinsics so that we can lower i1 types. |
| 835 | if (Subtarget->hasLOB()) { |
| 836 | setTargetDAGCombine({ISD::BRCOND, ISD::BR_CC}); |
| 837 | } |
| 838 | |
| 839 | if (Subtarget->hasNEON()) { |
| 840 | addDRTypeForNEON(VT: MVT::v2f32); |
| 841 | addDRTypeForNEON(VT: MVT::v8i8); |
| 842 | addDRTypeForNEON(VT: MVT::v4i16); |
| 843 | addDRTypeForNEON(VT: MVT::v2i32); |
| 844 | addDRTypeForNEON(VT: MVT::v1i64); |
| 845 | |
| 846 | addQRTypeForNEON(VT: MVT::v4f32); |
| 847 | addQRTypeForNEON(VT: MVT::v2f64); |
| 848 | addQRTypeForNEON(VT: MVT::v16i8); |
| 849 | addQRTypeForNEON(VT: MVT::v8i16); |
| 850 | addQRTypeForNEON(VT: MVT::v4i32); |
| 851 | addQRTypeForNEON(VT: MVT::v2i64); |
| 852 | |
| 853 | if (Subtarget->hasFullFP16()) { |
| 854 | addQRTypeForNEON(VT: MVT::v8f16); |
| 855 | addDRTypeForNEON(VT: MVT::v4f16); |
| 856 | } |
| 857 | |
| 858 | if (Subtarget->hasBF16()) { |
| 859 | addQRTypeForNEON(VT: MVT::v8bf16); |
| 860 | addDRTypeForNEON(VT: MVT::v4bf16); |
| 861 | } |
| 862 | } |
| 863 | |
| 864 | if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { |
| 865 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but |
| 866 | // none of Neon, MVE or VFP supports any arithmetic operations on it. |
| 867 | setOperationAction(Op: ISD::FADD, VT: MVT::v2f64, Action: Expand); |
| 868 | setOperationAction(Op: ISD::FSUB, VT: MVT::v2f64, Action: Expand); |
| 869 | setOperationAction(Op: ISD::FMUL, VT: MVT::v2f64, Action: Expand); |
| 870 | // FIXME: Code duplication: FDIV and FREM are expanded always, see |
| 871 | // ARMTargetLowering::addTypeForNEON method for details. |
| 872 | setOperationAction(Op: ISD::FDIV, VT: MVT::v2f64, Action: Expand); |
| 873 | setOperationAction(Op: ISD::FREM, VT: MVT::v2f64, Action: Expand); |
| 874 | // FIXME: Create unittest. |
| 875 | // In another words, find a way when "copysign" appears in DAG with vector |
| 876 | // operands. |
| 877 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::v2f64, Action: Expand); |
| 878 | // FIXME: Code duplication: SETCC has custom operation action, see |
| 879 | // ARMTargetLowering::addTypeForNEON method for details. |
| 880 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2f64, Action: Expand); |
| 881 | // FIXME: Create unittest for FNEG and for FABS. |
| 882 | setOperationAction(Op: ISD::FNEG, VT: MVT::v2f64, Action: Expand); |
| 883 | setOperationAction(Op: ISD::FABS, VT: MVT::v2f64, Action: Expand); |
| 884 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f64, Action: Expand); |
| 885 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f64, Action: Expand); |
| 886 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f64, Action: Expand); |
| 887 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f64, Action: Expand); |
| 888 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f64, Action: Expand); |
| 889 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f64, Action: Expand); |
| 890 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f64, Action: Expand); |
| 891 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f64, Action: Expand); |
| 892 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f64, Action: Expand); |
| 893 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f64, Action: Expand); |
| 894 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f64, Action: Expand); |
| 895 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f64, Action: Expand); |
| 896 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f64, Action: Expand); |
| 897 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f64, Action: Expand); |
| 898 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v2f64, Action: Expand); |
| 899 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f64, Action: Expand); |
| 900 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f64, Action: Expand); |
| 901 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f64, Action: Expand); |
| 902 | } |
| 903 | |
| 904 | if (Subtarget->hasNEON()) { |
| 905 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively |
| 906 | // supported for v4f32. |
| 907 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v4f32, Action: Expand); |
| 908 | setOperationAction(Op: ISD::FSIN, VT: MVT::v4f32, Action: Expand); |
| 909 | setOperationAction(Op: ISD::FCOS, VT: MVT::v4f32, Action: Expand); |
| 910 | setOperationAction(Op: ISD::FTAN, VT: MVT::v4f32, Action: Expand); |
| 911 | setOperationAction(Op: ISD::FPOW, VT: MVT::v4f32, Action: Expand); |
| 912 | setOperationAction(Op: ISD::FLOG, VT: MVT::v4f32, Action: Expand); |
| 913 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v4f32, Action: Expand); |
| 914 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v4f32, Action: Expand); |
| 915 | setOperationAction(Op: ISD::FEXP, VT: MVT::v4f32, Action: Expand); |
| 916 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v4f32, Action: Expand); |
| 917 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v4f32, Action: Expand); |
| 918 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f32, Action: Expand); |
| 919 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f32, Action: Expand); |
| 920 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f32, Action: Expand); |
| 921 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v4f32, Action: Expand); |
| 922 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v4f32, Action: Expand); |
| 923 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f32, Action: Expand); |
| 924 | |
| 925 | // Mark v2f32 intrinsics. |
| 926 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f32, Action: Expand); |
| 927 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f32, Action: Expand); |
| 928 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f32, Action: Expand); |
| 929 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f32, Action: Expand); |
| 930 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f32, Action: Expand); |
| 931 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f32, Action: Expand); |
| 932 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f32, Action: Expand); |
| 933 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f32, Action: Expand); |
| 934 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f32, Action: Expand); |
| 935 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f32, Action: Expand); |
| 936 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f32, Action: Expand); |
| 937 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f32, Action: Expand); |
| 938 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f32, Action: Expand); |
| 939 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f32, Action: Expand); |
| 940 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v2f32, Action: Expand); |
| 941 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f32, Action: Expand); |
| 942 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f32, Action: Expand); |
| 943 | |
| 944 | for (ISD::NodeType Op : {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, |
| 945 | ISD::FRINT, ISD::FTRUNC, ISD::FROUNDEVEN}) { |
| 946 | setOperationAction(Op, VT: MVT::v4f16, Action: Expand); |
| 947 | setOperationAction(Op, VT: MVT::v8f16, Action: Expand); |
| 948 | } |
| 949 | |
| 950 | // Neon does not support some operations on v1i64 and v2i64 types. |
| 951 | setOperationAction(Op: ISD::MUL, VT: MVT::v1i64, Action: Expand); |
| 952 | // Custom handling for some quad-vector types to detect VMULL. |
| 953 | setOperationAction(Op: ISD::MUL, VT: MVT::v8i16, Action: Custom); |
| 954 | setOperationAction(Op: ISD::MUL, VT: MVT::v4i32, Action: Custom); |
| 955 | setOperationAction(Op: ISD::MUL, VT: MVT::v2i64, Action: Custom); |
| 956 | // Custom handling for some vector types to avoid expensive expansions |
| 957 | setOperationAction(Op: ISD::SDIV, VT: MVT::v4i16, Action: Custom); |
| 958 | setOperationAction(Op: ISD::SDIV, VT: MVT::v8i8, Action: Custom); |
| 959 | setOperationAction(Op: ISD::UDIV, VT: MVT::v4i16, Action: Custom); |
| 960 | setOperationAction(Op: ISD::UDIV, VT: MVT::v8i8, Action: Custom); |
| 961 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with |
| 962 | // a destination type that is wider than the source, and nor does |
| 963 | // it have a FP_TO_[SU]INT instruction with a narrower destination than |
| 964 | // source. |
| 965 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
| 966 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
| 967 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
| 968 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
| 969 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v4i16, Action: Custom); |
| 970 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v8i16, Action: Custom); |
| 971 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v4i16, Action: Custom); |
| 972 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v8i16, Action: Custom); |
| 973 | |
| 974 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::v2f32, Action: Expand); |
| 975 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::v2f64, Action: Expand); |
| 976 | |
| 977 | // NEON does not have single instruction CTPOP for vectors with element |
| 978 | // types wider than 8-bits. However, custom lowering can leverage the |
| 979 | // v8i8/v16i8 vcnt instruction. |
| 980 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i32, Action: Custom); |
| 981 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i32, Action: Custom); |
| 982 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i16, Action: Custom); |
| 983 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v8i16, Action: Custom); |
| 984 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v1i64, Action: Custom); |
| 985 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i64, Action: Custom); |
| 986 | |
| 987 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v1i64, Action: Expand); |
| 988 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v2i64, Action: Expand); |
| 989 | |
| 990 | // NEON does not have single instruction CTTZ for vectors. |
| 991 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i8, Action: Custom); |
| 992 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i16, Action: Custom); |
| 993 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i32, Action: Custom); |
| 994 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v1i64, Action: Custom); |
| 995 | |
| 996 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v16i8, Action: Custom); |
| 997 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i16, Action: Custom); |
| 998 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i32, Action: Custom); |
| 999 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i64, Action: Custom); |
| 1000 | |
| 1001 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i8, Action: Custom); |
| 1002 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i16, Action: Custom); |
| 1003 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i32, Action: Custom); |
| 1004 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v1i64, Action: Custom); |
| 1005 | |
| 1006 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v16i8, Action: Custom); |
| 1007 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i16, Action: Custom); |
| 1008 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i32, Action: Custom); |
| 1009 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i64, Action: Custom); |
| 1010 | |
| 1011 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 1012 | setOperationAction(Op: ISD::MULHS, VT, Action: Expand); |
| 1013 | setOperationAction(Op: ISD::MULHU, VT, Action: Expand); |
| 1014 | } |
| 1015 | |
| 1016 | // NEON only has FMA instructions as of VFP4. |
| 1017 | if (!Subtarget->hasVFP4Base()) { |
| 1018 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f32, Action: Expand); |
| 1019 | setOperationAction(Op: ISD::FMA, VT: MVT::v4f32, Action: Expand); |
| 1020 | } |
| 1021 | |
| 1022 | setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT, |
| 1023 | ISD::FP_TO_UINT, ISD::FMUL, ISD::LOAD}); |
| 1024 | |
| 1025 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 1026 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, |
| 1027 | MVT::v2i32}) { |
| 1028 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { |
| 1029 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 1030 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 1031 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | for (auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, |
| 1036 | MVT::v4i32}) { |
| 1037 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Custom); |
| 1038 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Custom); |
| 1039 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Custom); |
| 1040 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Custom); |
| 1041 | } |
| 1042 | } |
| 1043 | |
| 1044 | if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { |
| 1045 | setTargetDAGCombine( |
| 1046 | {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::INSERT_SUBVECTOR, |
| 1047 | ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, |
| 1048 | ISD::SIGN_EXTEND_INREG, ISD::STORE, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, |
| 1049 | ISD::ANY_EXTEND, ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, |
| 1050 | ISD::INTRINSIC_VOID, ISD::VECREDUCE_ADD, ISD::ADD, ISD::BITCAST}); |
| 1051 | } |
| 1052 | if (Subtarget->hasMVEIntegerOps()) { |
| 1053 | setTargetDAGCombine({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, |
| 1054 | ISD::FP_EXTEND, ISD::SELECT, ISD::SELECT_CC, |
| 1055 | ISD::SETCC}); |
| 1056 | } |
| 1057 | if (Subtarget->hasMVEFloatOps()) { |
| 1058 | setTargetDAGCombine(ISD::FADD); |
| 1059 | } |
| 1060 | |
| 1061 | if (!Subtarget->hasFP64()) { |
| 1062 | // When targeting a floating-point unit with only single-precision |
| 1063 | // operations, f64 is legal for the few double-precision instructions which |
| 1064 | // are present However, no double-precision operations other than moves, |
| 1065 | // loads and stores are provided by the hardware. |
| 1066 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Expand); |
| 1067 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Expand); |
| 1068 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Expand); |
| 1069 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
| 1070 | setOperationAction(Op: ISD::FDIV, VT: MVT::f64, Action: Expand); |
| 1071 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: Expand); |
| 1072 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Expand); |
| 1073 | setOperationAction(Op: ISD::FGETSIGN, VT: MVT::f64, Action: Expand); |
| 1074 | setOperationAction(Op: ISD::FNEG, VT: MVT::f64, Action: Expand); |
| 1075 | setOperationAction(Op: ISD::FABS, VT: MVT::f64, Action: Expand); |
| 1076 | setOperationAction(Op: ISD::FSQRT, VT: MVT::f64, Action: Expand); |
| 1077 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
| 1078 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
| 1079 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
| 1080 | setOperationAction(Op: ISD::FLOG, VT: MVT::f64, Action: Expand); |
| 1081 | setOperationAction(Op: ISD::FLOG2, VT: MVT::f64, Action: Expand); |
| 1082 | setOperationAction(Op: ISD::FLOG10, VT: MVT::f64, Action: Expand); |
| 1083 | setOperationAction(Op: ISD::FEXP, VT: MVT::f64, Action: Expand); |
| 1084 | setOperationAction(Op: ISD::FEXP2, VT: MVT::f64, Action: Expand); |
| 1085 | setOperationAction(Op: ISD::FEXP10, VT: MVT::f64, Action: Expand); |
| 1086 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f64, Action: Expand); |
| 1087 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f64, Action: Expand); |
| 1088 | setOperationAction(Op: ISD::FRINT, VT: MVT::f64, Action: Expand); |
| 1089 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::f64, Action: Expand); |
| 1090 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f64, Action: Expand); |
| 1091 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f64, Action: Expand); |
| 1092 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i32, Action: Custom); |
| 1093 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i32, Action: Custom); |
| 1094 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i32, Action: Custom); |
| 1095 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i32, Action: Custom); |
| 1096 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::f64, Action: Custom); |
| 1097 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::f64, Action: Custom); |
| 1098 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f32, Action: Custom); |
| 1099 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::i32, Action: Custom); |
| 1100 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::i32, Action: Custom); |
| 1101 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::f64, Action: Custom); |
| 1102 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::f64, Action: Custom); |
| 1103 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f32, Action: Custom); |
| 1104 | } |
| 1105 | |
| 1106 | if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { |
| 1107 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f64, Action: Custom); |
| 1108 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f64, Action: Custom); |
| 1109 | if (Subtarget->hasFullFP16()) { |
| 1110 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f16, Action: Custom); |
| 1111 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f16, Action: Custom); |
| 1112 | } |
| 1113 | } |
| 1114 | |
| 1115 | if (!Subtarget->hasFP16()) { |
| 1116 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f32, Action: Custom); |
| 1117 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f32, Action: Custom); |
| 1118 | } |
| 1119 | |
| 1120 | computeRegisterProperties(TRI: Subtarget->getRegisterInfo()); |
| 1121 | |
| 1122 | // ARM does not have floating-point extending loads. |
| 1123 | for (MVT VT : MVT::fp_valuetypes()) { |
| 1124 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand); |
| 1125 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f16, Action: Expand); |
| 1126 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::bf16, Action: Expand); |
| 1127 | } |
| 1128 | |
| 1129 | // ... or truncating stores |
| 1130 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand); |
| 1131 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
| 1132 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
| 1133 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::bf16, Action: Expand); |
| 1134 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::bf16, Action: Expand); |
| 1135 | |
| 1136 | // ARM does not have i1 sign extending load. |
| 1137 | for (MVT VT : MVT::integer_valuetypes()) |
| 1138 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote); |
| 1139 | |
| 1140 | // ARM supports all 4 flavors of integer indexed load / store. |
| 1141 | if (!Subtarget->isThumb1Only()) { |
| 1142 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 1143 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 1144 | setIndexedLoadAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
| 1145 | setIndexedLoadAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
| 1146 | setIndexedLoadAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
| 1147 | setIndexedLoadAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
| 1148 | setIndexedStoreAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
| 1149 | setIndexedStoreAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
| 1150 | setIndexedStoreAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
| 1151 | setIndexedStoreAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
| 1152 | } |
| 1153 | } else { |
| 1154 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. |
| 1155 | setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
| 1156 | setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
| 1157 | } |
| 1158 | |
| 1159 | setOperationAction(Op: ISD::SADDO, VT: MVT::i32, Action: Custom); |
| 1160 | setOperationAction(Op: ISD::UADDO, VT: MVT::i32, Action: Custom); |
| 1161 | setOperationAction(Op: ISD::SSUBO, VT: MVT::i32, Action: Custom); |
| 1162 | setOperationAction(Op: ISD::USUBO, VT: MVT::i32, Action: Custom); |
| 1163 | |
| 1164 | setOperationAction(Op: ISD::UADDO_CARRY, VT: MVT::i32, Action: Custom); |
| 1165 | setOperationAction(Op: ISD::USUBO_CARRY, VT: MVT::i32, Action: Custom); |
| 1166 | if (Subtarget->hasDSP()) { |
| 1167 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i8, Action: Custom); |
| 1168 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i8, Action: Custom); |
| 1169 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i16, Action: Custom); |
| 1170 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i16, Action: Custom); |
| 1171 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i8, Action: Custom); |
| 1172 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i8, Action: Custom); |
| 1173 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i16, Action: Custom); |
| 1174 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i16, Action: Custom); |
| 1175 | } |
| 1176 | if (Subtarget->hasBaseDSP()) { |
| 1177 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i32, Action: Legal); |
| 1178 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i32, Action: Legal); |
| 1179 | } |
| 1180 | |
| 1181 | // i64 operation support. |
| 1182 | setOperationAction(Op: ISD::MUL, VT: MVT::i64, Action: Expand); |
| 1183 | setOperationAction(Op: ISD::MULHU, VT: MVT::i32, Action: Expand); |
| 1184 | if (Subtarget->isThumb1Only()) { |
| 1185 | setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i32, Action: Expand); |
| 1186 | setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i32, Action: Expand); |
| 1187 | } |
| 1188 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() |
| 1189 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) |
| 1190 | setOperationAction(Op: ISD::MULHS, VT: MVT::i32, Action: Expand); |
| 1191 | |
| 1192 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Custom); |
| 1193 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Custom); |
| 1194 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Custom); |
| 1195 | setOperationAction(Op: ISD::SRL, VT: MVT::i64, Action: Custom); |
| 1196 | setOperationAction(Op: ISD::SRA, VT: MVT::i64, Action: Custom); |
| 1197 | setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom); |
| 1198 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::i64, Action: Custom); |
| 1199 | setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom); |
| 1200 | setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom); |
| 1201 | |
| 1202 | // MVE lowers 64 bit shifts to lsll and lsrl |
| 1203 | // assuming that ISD::SRL and SRA of i64 are already marked custom |
| 1204 | if (Subtarget->hasMVEIntegerOps()) |
| 1205 | setOperationAction(Op: ISD::SHL, VT: MVT::i64, Action: Custom); |
| 1206 | |
| 1207 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. |
| 1208 | if (Subtarget->isThumb1Only()) { |
| 1209 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Expand); |
| 1210 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Expand); |
| 1211 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Expand); |
| 1212 | } |
| 1213 | |
| 1214 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) |
| 1215 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i32, Action: Legal); |
| 1216 | |
| 1217 | // ARM does not have ROTL. |
| 1218 | setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Expand); |
| 1219 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 1220 | setOperationAction(Op: ISD::ROTL, VT, Action: Expand); |
| 1221 | setOperationAction(Op: ISD::ROTR, VT, Action: Expand); |
| 1222 | } |
| 1223 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i32, Action: Custom); |
| 1224 | // TODO: These two should be set to LibCall, but this currently breaks |
| 1225 | // the Linux kernel build. See #101786. |
| 1226 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Expand); |
| 1227 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Expand); |
| 1228 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { |
| 1229 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i32, Action: Expand); |
| 1230 | setOperationAction(Op: ISD::CTLZ_ZERO_UNDEF, VT: MVT::i32, Action: LibCall); |
| 1231 | } |
| 1232 | |
| 1233 | // @llvm.readcyclecounter requires the Performance Monitors extension. |
| 1234 | // Default to the 0 expansion on unsupported platforms. |
| 1235 | // FIXME: Technically there are older ARM CPUs that have |
| 1236 | // implementation-specific ways of obtaining this information. |
| 1237 | if (Subtarget->hasPerfMon()) |
| 1238 | setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64, Action: Custom); |
| 1239 | |
| 1240 | // Only ARMv6 has BSWAP. |
| 1241 | if (!Subtarget->hasV6Ops()) |
| 1242 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Expand); |
| 1243 | |
| 1244 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 1245 | : Subtarget->hasDivideInARMMode(); |
| 1246 | if (!hasDivide) { |
| 1247 | // These are expanded into libcalls if the cpu doesn't have HW divider. |
| 1248 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: LibCall); |
| 1249 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: LibCall); |
| 1250 | } |
| 1251 | |
| 1252 | if (TT.isOSWindows() && !Subtarget->hasDivideInThumbMode()) { |
| 1253 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: Custom); |
| 1254 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: Custom); |
| 1255 | |
| 1256 | setOperationAction(Op: ISD::SDIV, VT: MVT::i64, Action: Custom); |
| 1257 | setOperationAction(Op: ISD::UDIV, VT: MVT::i64, Action: Custom); |
| 1258 | } |
| 1259 | |
| 1260 | setOperationAction(Op: ISD::SREM, VT: MVT::i32, Action: Expand); |
| 1261 | setOperationAction(Op: ISD::UREM, VT: MVT::i32, Action: Expand); |
| 1262 | |
| 1263 | // Register based DivRem for AEABI (RTABI 4.2) |
| 1264 | if (TT.isTargetAEABI() || TT.isAndroid() || TT.isTargetGNUAEABI() || |
| 1265 | TT.isTargetMuslAEABI() || TT.isOSWindows()) { |
| 1266 | setOperationAction(Op: ISD::SREM, VT: MVT::i64, Action: Custom); |
| 1267 | setOperationAction(Op: ISD::UREM, VT: MVT::i64, Action: Custom); |
| 1268 | HasStandaloneRem = false; |
| 1269 | |
| 1270 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Custom); |
| 1271 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Custom); |
| 1272 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i64, Action: Custom); |
| 1273 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i64, Action: Custom); |
| 1274 | } else { |
| 1275 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Expand); |
| 1276 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Expand); |
| 1277 | } |
| 1278 | |
| 1279 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom); |
| 1280 | setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom); |
| 1281 | setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom); |
| 1282 | setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom); |
| 1283 | |
| 1284 | setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal); |
| 1285 | setOperationAction(Op: ISD::DEBUGTRAP, VT: MVT::Other, Action: Legal); |
| 1286 | |
| 1287 | // Use the default implementation. |
| 1288 | setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom); |
| 1289 | setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand); |
| 1290 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand); |
| 1291 | setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand); |
| 1292 | setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand); |
| 1293 | setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand); |
| 1294 | |
| 1295 | if (TT.isOSWindows()) |
| 1296 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Custom); |
| 1297 | else |
| 1298 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Expand); |
| 1299 | |
| 1300 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use |
| 1301 | // the default expansion. |
| 1302 | InsertFencesForAtomic = false; |
| 1303 | if (Subtarget->hasAnyDataBarrier() && |
| 1304 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { |
| 1305 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded |
| 1306 | // to ldrex/strex loops already. |
| 1307 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom); |
| 1308 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) |
| 1309 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i64, Action: Custom); |
| 1310 | |
| 1311 | // On v8, we have particularly efficient implementations of atomic fences |
| 1312 | // if they can be combined with nearby atomic loads and stores. |
| 1313 | if (!Subtarget->hasAcquireRelease() || |
| 1314 | getTargetMachine().getOptLevel() == CodeGenOptLevel::None) { |
| 1315 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. |
| 1316 | InsertFencesForAtomic = true; |
| 1317 | } |
| 1318 | } else { |
| 1319 | // If there's anything we can use as a barrier, go through custom lowering |
| 1320 | // for ATOMIC_FENCE. |
| 1321 | // If target has DMB in thumb, Fences can be inserted. |
| 1322 | if (Subtarget->hasDataBarrier()) |
| 1323 | InsertFencesForAtomic = true; |
| 1324 | |
| 1325 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, |
| 1326 | Action: Subtarget->hasAnyDataBarrier() ? Custom : Expand); |
| 1327 | |
| 1328 | // Set them all for libcall, which will force libcalls. |
| 1329 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i32, Action: LibCall); |
| 1330 | setOperationAction(Op: ISD::ATOMIC_SWAP, VT: MVT::i32, Action: LibCall); |
| 1331 | setOperationAction(Op: ISD::ATOMIC_LOAD_ADD, VT: MVT::i32, Action: LibCall); |
| 1332 | setOperationAction(Op: ISD::ATOMIC_LOAD_SUB, VT: MVT::i32, Action: LibCall); |
| 1333 | setOperationAction(Op: ISD::ATOMIC_LOAD_AND, VT: MVT::i32, Action: LibCall); |
| 1334 | setOperationAction(Op: ISD::ATOMIC_LOAD_OR, VT: MVT::i32, Action: LibCall); |
| 1335 | setOperationAction(Op: ISD::ATOMIC_LOAD_XOR, VT: MVT::i32, Action: LibCall); |
| 1336 | setOperationAction(Op: ISD::ATOMIC_LOAD_NAND, VT: MVT::i32, Action: LibCall); |
| 1337 | setOperationAction(Op: ISD::ATOMIC_LOAD_MIN, VT: MVT::i32, Action: LibCall); |
| 1338 | setOperationAction(Op: ISD::ATOMIC_LOAD_MAX, VT: MVT::i32, Action: LibCall); |
| 1339 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMIN, VT: MVT::i32, Action: LibCall); |
| 1340 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMAX, VT: MVT::i32, Action: LibCall); |
| 1341 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the |
| 1342 | // Unordered/Monotonic case. |
| 1343 | if (!InsertFencesForAtomic) { |
| 1344 | setOperationAction(Op: ISD::ATOMIC_LOAD, VT: MVT::i32, Action: Custom); |
| 1345 | setOperationAction(Op: ISD::ATOMIC_STORE, VT: MVT::i32, Action: Custom); |
| 1346 | } |
| 1347 | } |
| 1348 | |
| 1349 | // Compute supported atomic widths. |
| 1350 | if (TT.isOSLinux() || (!Subtarget->isMClass() && Subtarget->hasV6Ops())) { |
| 1351 | // For targets where __sync_* routines are reliably available, we use them |
| 1352 | // if necessary. |
| 1353 | // |
| 1354 | // ARM Linux always supports 64-bit atomics through kernel-assisted atomic |
| 1355 | // routines (kernel 3.1 or later). FIXME: Not with compiler-rt? |
| 1356 | // |
| 1357 | // ARMv6 targets have native instructions in ARM mode. For Thumb mode, |
| 1358 | // such targets should provide __sync_* routines, which use the ARM mode |
| 1359 | // instructions. (ARMv6 doesn't have dmb, but it has an equivalent |
| 1360 | // encoding; see ARMISD::MEMBARRIER_MCR.) |
| 1361 | setMaxAtomicSizeInBitsSupported(64); |
| 1362 | } else if ((Subtarget->isMClass() && Subtarget->hasV8MBaselineOps()) || |
| 1363 | Subtarget->hasForced32BitAtomics()) { |
| 1364 | // Cortex-M (besides Cortex-M0) have 32-bit atomics. |
| 1365 | setMaxAtomicSizeInBitsSupported(32); |
| 1366 | } else { |
| 1367 | // We can't assume anything about other targets; just use libatomic |
| 1368 | // routines. |
| 1369 | setMaxAtomicSizeInBitsSupported(0); |
| 1370 | } |
| 1371 | |
| 1372 | setMaxDivRemBitWidthSupported(64); |
| 1373 | |
| 1374 | setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom); |
| 1375 | |
| 1376 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. |
| 1377 | if (!Subtarget->hasV6Ops()) { |
| 1378 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i16, Action: Expand); |
| 1379 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i8, Action: Expand); |
| 1380 | } |
| 1381 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand); |
| 1382 | |
| 1383 | if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && |
| 1384 | !Subtarget->isThumb1Only()) { |
| 1385 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR |
| 1386 | // iff target supports vfp2. |
| 1387 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i64, Action: Custom); |
| 1388 | setOperationAction(Op: ISD::GET_ROUNDING, VT: MVT::i32, Action: Custom); |
| 1389 | setOperationAction(Op: ISD::SET_ROUNDING, VT: MVT::Other, Action: Custom); |
| 1390 | setOperationAction(Op: ISD::GET_FPENV, VT: MVT::i32, Action: Legal); |
| 1391 | setOperationAction(Op: ISD::SET_FPENV, VT: MVT::i32, Action: Legal); |
| 1392 | setOperationAction(Op: ISD::RESET_FPENV, VT: MVT::Other, Action: Legal); |
| 1393 | setOperationAction(Op: ISD::GET_FPMODE, VT: MVT::i32, Action: Legal); |
| 1394 | setOperationAction(Op: ISD::SET_FPMODE, VT: MVT::i32, Action: Custom); |
| 1395 | setOperationAction(Op: ISD::RESET_FPMODE, VT: MVT::Other, Action: Custom); |
| 1396 | } |
| 1397 | |
| 1398 | // We want to custom lower some of our intrinsics. |
| 1399 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom); |
| 1400 | setOperationAction(Op: ISD::EH_SJLJ_SETJMP, VT: MVT::i32, Action: Custom); |
| 1401 | setOperationAction(Op: ISD::EH_SJLJ_LONGJMP, VT: MVT::Other, Action: Custom); |
| 1402 | setOperationAction(Op: ISD::EH_SJLJ_SETUP_DISPATCH, VT: MVT::Other, Action: Custom); |
| 1403 | if (Subtarget->useSjLjEH()) |
| 1404 | setLibcallImpl(Call: RTLIB::UNWIND_RESUME, Impl: RTLIB::_Unwind_SjLj_Resume); |
| 1405 | |
| 1406 | setOperationAction(Op: ISD::SETCC, VT: MVT::i32, Action: Expand); |
| 1407 | setOperationAction(Op: ISD::SETCC, VT: MVT::f32, Action: Expand); |
| 1408 | setOperationAction(Op: ISD::SETCC, VT: MVT::f64, Action: Expand); |
| 1409 | setOperationAction(Op: ISD::SELECT, VT: MVT::i32, Action: Custom); |
| 1410 | setOperationAction(Op: ISD::SELECT, VT: MVT::f32, Action: Custom); |
| 1411 | setOperationAction(Op: ISD::SELECT, VT: MVT::f64, Action: Custom); |
| 1412 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i32, Action: Custom); |
| 1413 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f32, Action: Custom); |
| 1414 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f64, Action: Custom); |
| 1415 | if (Subtarget->hasFullFP16()) { |
| 1416 | setOperationAction(Op: ISD::SETCC, VT: MVT::f16, Action: Expand); |
| 1417 | setOperationAction(Op: ISD::SELECT, VT: MVT::f16, Action: Custom); |
| 1418 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f16, Action: Custom); |
| 1419 | } |
| 1420 | |
| 1421 | setOperationAction(Op: ISD::SETCCCARRY, VT: MVT::i32, Action: Custom); |
| 1422 | |
| 1423 | setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Custom); |
| 1424 | setOperationAction(Op: ISD::BR_CC, VT: MVT::i32, Action: Custom); |
| 1425 | if (Subtarget->hasFullFP16()) |
| 1426 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f16, Action: Custom); |
| 1427 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f32, Action: Custom); |
| 1428 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f64, Action: Custom); |
| 1429 | setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Custom); |
| 1430 | |
| 1431 | // We don't support sin/cos/fmod/copysign/pow |
| 1432 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
| 1433 | setOperationAction(Op: ISD::FSIN, VT: MVT::f32, Action: Expand); |
| 1434 | setOperationAction(Op: ISD::FCOS, VT: MVT::f32, Action: Expand); |
| 1435 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
| 1436 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Expand); |
| 1437 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Expand); |
| 1438 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: Expand); |
| 1439 | setOperationAction(Op: ISD::FREM, VT: MVT::f32, Action: Expand); |
| 1440 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
| 1441 | !Subtarget->isThumb1Only()) { |
| 1442 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Custom); |
| 1443 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f32, Action: Custom); |
| 1444 | } |
| 1445 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
| 1446 | setOperationAction(Op: ISD::FPOW, VT: MVT::f32, Action: Expand); |
| 1447 | |
| 1448 | if (!Subtarget->hasVFP4Base()) { |
| 1449 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
| 1450 | setOperationAction(Op: ISD::FMA, VT: MVT::f32, Action: Expand); |
| 1451 | } |
| 1452 | |
| 1453 | // Various VFP goodness |
| 1454 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { |
| 1455 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. |
| 1456 | if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { |
| 1457 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand); |
| 1458 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand); |
| 1459 | } |
| 1460 | |
| 1461 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. |
| 1462 | if (!Subtarget->hasFP16()) { |
| 1463 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand); |
| 1464 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand); |
| 1465 | } |
| 1466 | |
| 1467 | // Strict floating-point comparisons need custom lowering. |
| 1468 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f16, Action: Custom); |
| 1469 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f16, Action: Custom); |
| 1470 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f32, Action: Custom); |
| 1471 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f32, Action: Custom); |
| 1472 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f64, Action: Custom); |
| 1473 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f64, Action: Custom); |
| 1474 | } |
| 1475 | |
| 1476 | // Use __sincos_stret if available. |
| 1477 | if (getLibcallName(Call: RTLIB::SINCOS_STRET_F32) != nullptr && |
| 1478 | getLibcallName(Call: RTLIB::SINCOS_STRET_F64) != nullptr) { |
| 1479 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Custom); |
| 1480 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Custom); |
| 1481 | } |
| 1482 | |
| 1483 | // FP-ARMv8 implements a lot of rounding-like FP operations. |
| 1484 | if (Subtarget->hasFPARMv8Base()) { |
| 1485 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f32, Action: Legal); |
| 1486 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f32, Action: Legal); |
| 1487 | setOperationAction(Op: ISD::FROUND, VT: MVT::f32, Action: Legal); |
| 1488 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f32, Action: Legal); |
| 1489 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f32, Action: Legal); |
| 1490 | setOperationAction(Op: ISD::FRINT, VT: MVT::f32, Action: Legal); |
| 1491 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::f32, Action: Legal); |
| 1492 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f32, Action: Legal); |
| 1493 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f32, Action: Legal); |
| 1494 | if (Subtarget->hasNEON()) { |
| 1495 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v2f32, Action: Legal); |
| 1496 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v2f32, Action: Legal); |
| 1497 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f32, Action: Legal); |
| 1498 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f32, Action: Legal); |
| 1499 | } |
| 1500 | |
| 1501 | if (Subtarget->hasFP64()) { |
| 1502 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f64, Action: Legal); |
| 1503 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f64, Action: Legal); |
| 1504 | setOperationAction(Op: ISD::FROUND, VT: MVT::f64, Action: Legal); |
| 1505 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f64, Action: Legal); |
| 1506 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f64, Action: Legal); |
| 1507 | setOperationAction(Op: ISD::FRINT, VT: MVT::f64, Action: Legal); |
| 1508 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::f64, Action: Legal); |
| 1509 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f64, Action: Legal); |
| 1510 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f64, Action: Legal); |
| 1511 | } |
| 1512 | } |
| 1513 | |
| 1514 | // FP16 often need to be promoted to call lib functions |
| 1515 | if (Subtarget->hasFullFP16()) { |
| 1516 | setOperationAction(Op: ISD::FREM, VT: MVT::f16, Action: Promote); |
| 1517 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f16, Action: Expand); |
| 1518 | setOperationAction(Op: ISD::FSIN, VT: MVT::f16, Action: Promote); |
| 1519 | setOperationAction(Op: ISD::FCOS, VT: MVT::f16, Action: Promote); |
| 1520 | setOperationAction(Op: ISD::FTAN, VT: MVT::f16, Action: Promote); |
| 1521 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f16, Action: Promote); |
| 1522 | setOperationAction(Op: ISD::FPOWI, VT: MVT::f16, Action: Promote); |
| 1523 | setOperationAction(Op: ISD::FPOW, VT: MVT::f16, Action: Promote); |
| 1524 | setOperationAction(Op: ISD::FEXP, VT: MVT::f16, Action: Promote); |
| 1525 | setOperationAction(Op: ISD::FEXP2, VT: MVT::f16, Action: Promote); |
| 1526 | setOperationAction(Op: ISD::FEXP10, VT: MVT::f16, Action: Promote); |
| 1527 | setOperationAction(Op: ISD::FLOG, VT: MVT::f16, Action: Promote); |
| 1528 | setOperationAction(Op: ISD::FLOG10, VT: MVT::f16, Action: Promote); |
| 1529 | setOperationAction(Op: ISD::FLOG2, VT: MVT::f16, Action: Promote); |
| 1530 | |
| 1531 | setOperationAction(Op: ISD::FROUND, VT: MVT::f16, Action: Legal); |
| 1532 | } |
| 1533 | |
| 1534 | if (Subtarget->hasNEON()) { |
| 1535 | // vmin and vmax aren't available in a scalar form, so we can use |
| 1536 | // a NEON instruction with an undef lane instead. |
| 1537 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f32, Action: Legal); |
| 1538 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f32, Action: Legal); |
| 1539 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f16, Action: Legal); |
| 1540 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f16, Action: Legal); |
| 1541 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v2f32, Action: Legal); |
| 1542 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v2f32, Action: Legal); |
| 1543 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f32, Action: Legal); |
| 1544 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f32, Action: Legal); |
| 1545 | |
| 1546 | if (Subtarget->hasV8Ops()) { |
| 1547 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f32, Action: Legal); |
| 1548 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f32, Action: Legal); |
| 1549 | setOperationAction(Op: ISD::FROUND, VT: MVT::v2f32, Action: Legal); |
| 1550 | setOperationAction(Op: ISD::FROUND, VT: MVT::v4f32, Action: Legal); |
| 1551 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f32, Action: Legal); |
| 1552 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f32, Action: Legal); |
| 1553 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f32, Action: Legal); |
| 1554 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f32, Action: Legal); |
| 1555 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f32, Action: Legal); |
| 1556 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f32, Action: Legal); |
| 1557 | } |
| 1558 | |
| 1559 | if (Subtarget->hasFullFP16()) { |
| 1560 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f16, Action: Legal); |
| 1561 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f16, Action: Legal); |
| 1562 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v8f16, Action: Legal); |
| 1563 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v8f16, Action: Legal); |
| 1564 | |
| 1565 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f16, Action: Legal); |
| 1566 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f16, Action: Legal); |
| 1567 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v8f16, Action: Legal); |
| 1568 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v8f16, Action: Legal); |
| 1569 | |
| 1570 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f16, Action: Legal); |
| 1571 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v8f16, Action: Legal); |
| 1572 | setOperationAction(Op: ISD::FROUND, VT: MVT::v4f16, Action: Legal); |
| 1573 | setOperationAction(Op: ISD::FROUND, VT: MVT::v8f16, Action: Legal); |
| 1574 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f16, Action: Legal); |
| 1575 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v8f16, Action: Legal); |
| 1576 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f16, Action: Legal); |
| 1577 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v8f16, Action: Legal); |
| 1578 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f16, Action: Legal); |
| 1579 | setOperationAction(Op: ISD::FRINT, VT: MVT::v8f16, Action: Legal); |
| 1580 | } |
| 1581 | } |
| 1582 | |
| 1583 | // On MSVC, both 32-bit and 64-bit, ldexpf(f32) is not defined. MinGW has |
| 1584 | // it, but it's just a wrapper around ldexp. |
| 1585 | if (TT.isOSWindows()) { |
| 1586 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
| 1587 | if (isOperationExpand(Op, VT: MVT::f32)) |
| 1588 | setOperationAction(Op, VT: MVT::f32, Action: Promote); |
| 1589 | } |
| 1590 | |
| 1591 | // LegalizeDAG currently can't expand fp16 LDEXP/FREXP on targets where i16 |
| 1592 | // isn't legal. |
| 1593 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
| 1594 | if (isOperationExpand(Op, VT: MVT::f16)) |
| 1595 | setOperationAction(Op, VT: MVT::f16, Action: Promote); |
| 1596 | |
| 1597 | // We have target-specific dag combine patterns for the following nodes: |
| 1598 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine |
| 1599 | setTargetDAGCombine( |
| 1600 | {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR}); |
| 1601 | |
| 1602 | if (Subtarget->hasMVEIntegerOps()) |
| 1603 | setTargetDAGCombine(ISD::VSELECT); |
| 1604 | |
| 1605 | if (Subtarget->hasV6Ops()) |
| 1606 | setTargetDAGCombine(ISD::SRL); |
| 1607 | if (Subtarget->isThumb1Only()) |
| 1608 | setTargetDAGCombine(ISD::SHL); |
| 1609 | // Attempt to lower smin/smax to ssat/usat |
| 1610 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || |
| 1611 | Subtarget->isThumb2()) { |
| 1612 | setTargetDAGCombine({ISD::SMIN, ISD::SMAX}); |
| 1613 | } |
| 1614 | |
| 1615 | setStackPointerRegisterToSaveRestore(ARM::SP); |
| 1616 | |
| 1617 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || |
| 1618 | !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) |
| 1619 | setSchedulingPreference(Sched::RegPressure); |
| 1620 | else |
| 1621 | setSchedulingPreference(Sched::Hybrid); |
| 1622 | |
| 1623 | //// temporary - rewrite interface to use type |
| 1624 | MaxStoresPerMemset = 8; |
| 1625 | MaxStoresPerMemsetOptSize = 4; |
| 1626 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores |
| 1627 | MaxStoresPerMemcpyOptSize = 2; |
| 1628 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores |
| 1629 | MaxStoresPerMemmoveOptSize = 2; |
| 1630 | |
| 1631 | // On ARM arguments smaller than 4 bytes are extended, so all arguments |
| 1632 | // are at least 4 bytes aligned. |
| 1633 | setMinStackArgumentAlignment(Align(4)); |
| 1634 | |
| 1635 | // Prefer likely predicted branches to selects on out-of-order cores. |
| 1636 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); |
| 1637 | |
| 1638 | setPrefLoopAlignment(Align(1ULL << Subtarget->getPreferBranchLogAlignment())); |
| 1639 | setPrefFunctionAlignment( |
| 1640 | Align(1ULL << Subtarget->getPreferBranchLogAlignment())); |
| 1641 | |
| 1642 | setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); |
| 1643 | } |
| 1644 | |
| 1645 | bool ARMTargetLowering::useSoftFloat() const { |
| 1646 | return Subtarget->useSoftFloat(); |
| 1647 | } |
| 1648 | |
| 1649 | // FIXME: It might make sense to define the representative register class as the |
| 1650 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is |
| 1651 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, |
| 1652 | // SPR's representative would be DPR_VFP2. This should work well if register |
| 1653 | // pressure tracking were modified such that a register use would increment the |
| 1654 | // pressure of the register class's representative and all of it's super |
| 1655 | // classes' representatives transitively. We have not implemented this because |
| 1656 | // of the difficulty prior to coalescing of modeling operand register classes |
| 1657 | // due to the common occurrence of cross class copies and subregister insertions |
| 1658 | // and extractions. |
| 1659 | std::pair<const TargetRegisterClass *, uint8_t> |
| 1660 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
| 1661 | MVT VT) const { |
| 1662 | const TargetRegisterClass *RRC = nullptr; |
| 1663 | uint8_t Cost = 1; |
| 1664 | switch (VT.SimpleTy) { |
| 1665 | default: |
| 1666 | return TargetLowering::findRepresentativeClass(TRI, VT); |
| 1667 | // Use DPR as representative register class for all floating point |
| 1668 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so |
| 1669 | // the cost is 1 for both f32 and f64. |
| 1670 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: |
| 1671 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: |
| 1672 | RRC = &ARM::DPRRegClass; |
| 1673 | // When NEON is used for SP, only half of the register file is available |
| 1674 | // because operations that define both SP and DP results will be constrained |
| 1675 | // to the VFP2 class (D0-D15). We currently model this constraint prior to |
| 1676 | // coalescing by double-counting the SP regs. See the FIXME above. |
| 1677 | if (Subtarget->useNEONForSinglePrecisionFP()) |
| 1678 | Cost = 2; |
| 1679 | break; |
| 1680 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: |
| 1681 | case MVT::v4f32: case MVT::v2f64: |
| 1682 | RRC = &ARM::DPRRegClass; |
| 1683 | Cost = 2; |
| 1684 | break; |
| 1685 | case MVT::v4i64: |
| 1686 | RRC = &ARM::DPRRegClass; |
| 1687 | Cost = 4; |
| 1688 | break; |
| 1689 | case MVT::v8i64: |
| 1690 | RRC = &ARM::DPRRegClass; |
| 1691 | Cost = 8; |
| 1692 | break; |
| 1693 | } |
| 1694 | return std::make_pair(x&: RRC, y&: Cost); |
| 1695 | } |
| 1696 | |
| 1697 | const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { |
| 1698 | #define MAKE_CASE(V) \ |
| 1699 | case V: \ |
| 1700 | return #V; |
| 1701 | switch ((ARMISD::NodeType)Opcode) { |
| 1702 | case ARMISD::FIRST_NUMBER: |
| 1703 | break; |
| 1704 | MAKE_CASE(ARMISD::Wrapper) |
| 1705 | MAKE_CASE(ARMISD::WrapperPIC) |
| 1706 | MAKE_CASE(ARMISD::WrapperJT) |
| 1707 | MAKE_CASE(ARMISD::COPY_STRUCT_BYVAL) |
| 1708 | MAKE_CASE(ARMISD::CALL) |
| 1709 | MAKE_CASE(ARMISD::CALL_PRED) |
| 1710 | MAKE_CASE(ARMISD::CALL_NOLINK) |
| 1711 | MAKE_CASE(ARMISD::tSECALL) |
| 1712 | MAKE_CASE(ARMISD::t2CALL_BTI) |
| 1713 | MAKE_CASE(ARMISD::BRCOND) |
| 1714 | MAKE_CASE(ARMISD::BR_JT) |
| 1715 | MAKE_CASE(ARMISD::BR2_JT) |
| 1716 | MAKE_CASE(ARMISD::RET_GLUE) |
| 1717 | MAKE_CASE(ARMISD::SERET_GLUE) |
| 1718 | MAKE_CASE(ARMISD::INTRET_GLUE) |
| 1719 | MAKE_CASE(ARMISD::PIC_ADD) |
| 1720 | MAKE_CASE(ARMISD::CMP) |
| 1721 | MAKE_CASE(ARMISD::CMN) |
| 1722 | MAKE_CASE(ARMISD::CMPZ) |
| 1723 | MAKE_CASE(ARMISD::CMPFP) |
| 1724 | MAKE_CASE(ARMISD::CMPFPE) |
| 1725 | MAKE_CASE(ARMISD::CMPFPw0) |
| 1726 | MAKE_CASE(ARMISD::CMPFPEw0) |
| 1727 | MAKE_CASE(ARMISD::BCC_i64) |
| 1728 | MAKE_CASE(ARMISD::FMSTAT) |
| 1729 | MAKE_CASE(ARMISD::CMOV) |
| 1730 | MAKE_CASE(ARMISD::SSAT) |
| 1731 | MAKE_CASE(ARMISD::USAT) |
| 1732 | MAKE_CASE(ARMISD::ASRL) |
| 1733 | MAKE_CASE(ARMISD::LSRL) |
| 1734 | MAKE_CASE(ARMISD::LSLL) |
| 1735 | MAKE_CASE(ARMISD::LSLS) |
| 1736 | MAKE_CASE(ARMISD::LSRS1) |
| 1737 | MAKE_CASE(ARMISD::ASRS1) |
| 1738 | MAKE_CASE(ARMISD::RRX) |
| 1739 | MAKE_CASE(ARMISD::ADDC) |
| 1740 | MAKE_CASE(ARMISD::ADDE) |
| 1741 | MAKE_CASE(ARMISD::SUBC) |
| 1742 | MAKE_CASE(ARMISD::SUBE) |
| 1743 | MAKE_CASE(ARMISD::VMOVRRD) |
| 1744 | MAKE_CASE(ARMISD::VMOVDRR) |
| 1745 | MAKE_CASE(ARMISD::VMOVhr) |
| 1746 | MAKE_CASE(ARMISD::VMOVrh) |
| 1747 | MAKE_CASE(ARMISD::VMOVSR) |
| 1748 | MAKE_CASE(ARMISD::EH_SJLJ_SETJMP) |
| 1749 | MAKE_CASE(ARMISD::EH_SJLJ_LONGJMP) |
| 1750 | MAKE_CASE(ARMISD::EH_SJLJ_SETUP_DISPATCH) |
| 1751 | MAKE_CASE(ARMISD::TC_RETURN) |
| 1752 | MAKE_CASE(ARMISD::THREAD_POINTER) |
| 1753 | MAKE_CASE(ARMISD::DYN_ALLOC) |
| 1754 | MAKE_CASE(ARMISD::MEMBARRIER_MCR) |
| 1755 | MAKE_CASE(ARMISD::PRELOAD) |
| 1756 | MAKE_CASE(ARMISD::LDRD) |
| 1757 | MAKE_CASE(ARMISD::STRD) |
| 1758 | MAKE_CASE(ARMISD::WIN__CHKSTK) |
| 1759 | MAKE_CASE(ARMISD::WIN__DBZCHK) |
| 1760 | MAKE_CASE(ARMISD::PREDICATE_CAST) |
| 1761 | MAKE_CASE(ARMISD::VECTOR_REG_CAST) |
| 1762 | MAKE_CASE(ARMISD::MVESEXT) |
| 1763 | MAKE_CASE(ARMISD::MVEZEXT) |
| 1764 | MAKE_CASE(ARMISD::MVETRUNC) |
| 1765 | MAKE_CASE(ARMISD::VCMP) |
| 1766 | MAKE_CASE(ARMISD::VCMPZ) |
| 1767 | MAKE_CASE(ARMISD::VTST) |
| 1768 | MAKE_CASE(ARMISD::VSHLs) |
| 1769 | MAKE_CASE(ARMISD::VSHLu) |
| 1770 | MAKE_CASE(ARMISD::VSHLIMM) |
| 1771 | MAKE_CASE(ARMISD::VSHRsIMM) |
| 1772 | MAKE_CASE(ARMISD::VSHRuIMM) |
| 1773 | MAKE_CASE(ARMISD::VRSHRsIMM) |
| 1774 | MAKE_CASE(ARMISD::VRSHRuIMM) |
| 1775 | MAKE_CASE(ARMISD::VRSHRNIMM) |
| 1776 | MAKE_CASE(ARMISD::VQSHLsIMM) |
| 1777 | MAKE_CASE(ARMISD::VQSHLuIMM) |
| 1778 | MAKE_CASE(ARMISD::VQSHLsuIMM) |
| 1779 | MAKE_CASE(ARMISD::VQSHRNsIMM) |
| 1780 | MAKE_CASE(ARMISD::VQSHRNuIMM) |
| 1781 | MAKE_CASE(ARMISD::VQSHRNsuIMM) |
| 1782 | MAKE_CASE(ARMISD::VQRSHRNsIMM) |
| 1783 | MAKE_CASE(ARMISD::VQRSHRNuIMM) |
| 1784 | MAKE_CASE(ARMISD::VQRSHRNsuIMM) |
| 1785 | MAKE_CASE(ARMISD::VSLIIMM) |
| 1786 | MAKE_CASE(ARMISD::VSRIIMM) |
| 1787 | MAKE_CASE(ARMISD::VGETLANEu) |
| 1788 | MAKE_CASE(ARMISD::VGETLANEs) |
| 1789 | MAKE_CASE(ARMISD::VMOVIMM) |
| 1790 | MAKE_CASE(ARMISD::VMVNIMM) |
| 1791 | MAKE_CASE(ARMISD::VMOVFPIMM) |
| 1792 | MAKE_CASE(ARMISD::VDUP) |
| 1793 | MAKE_CASE(ARMISD::VDUPLANE) |
| 1794 | MAKE_CASE(ARMISD::VEXT) |
| 1795 | MAKE_CASE(ARMISD::VREV64) |
| 1796 | MAKE_CASE(ARMISD::VREV32) |
| 1797 | MAKE_CASE(ARMISD::VREV16) |
| 1798 | MAKE_CASE(ARMISD::VZIP) |
| 1799 | MAKE_CASE(ARMISD::VUZP) |
| 1800 | MAKE_CASE(ARMISD::VTRN) |
| 1801 | MAKE_CASE(ARMISD::VTBL1) |
| 1802 | MAKE_CASE(ARMISD::VTBL2) |
| 1803 | MAKE_CASE(ARMISD::VMOVN) |
| 1804 | MAKE_CASE(ARMISD::VQMOVNs) |
| 1805 | MAKE_CASE(ARMISD::VQMOVNu) |
| 1806 | MAKE_CASE(ARMISD::VCVTN) |
| 1807 | MAKE_CASE(ARMISD::VCVTL) |
| 1808 | MAKE_CASE(ARMISD::VIDUP) |
| 1809 | MAKE_CASE(ARMISD::VMULLs) |
| 1810 | MAKE_CASE(ARMISD::VMULLu) |
| 1811 | MAKE_CASE(ARMISD::VQDMULH) |
| 1812 | MAKE_CASE(ARMISD::VADDVs) |
| 1813 | MAKE_CASE(ARMISD::VADDVu) |
| 1814 | MAKE_CASE(ARMISD::VADDVps) |
| 1815 | MAKE_CASE(ARMISD::VADDVpu) |
| 1816 | MAKE_CASE(ARMISD::VADDLVs) |
| 1817 | MAKE_CASE(ARMISD::VADDLVu) |
| 1818 | MAKE_CASE(ARMISD::VADDLVAs) |
| 1819 | MAKE_CASE(ARMISD::VADDLVAu) |
| 1820 | MAKE_CASE(ARMISD::VADDLVps) |
| 1821 | MAKE_CASE(ARMISD::VADDLVpu) |
| 1822 | MAKE_CASE(ARMISD::VADDLVAps) |
| 1823 | MAKE_CASE(ARMISD::VADDLVApu) |
| 1824 | MAKE_CASE(ARMISD::VMLAVs) |
| 1825 | MAKE_CASE(ARMISD::VMLAVu) |
| 1826 | MAKE_CASE(ARMISD::VMLAVps) |
| 1827 | MAKE_CASE(ARMISD::VMLAVpu) |
| 1828 | MAKE_CASE(ARMISD::VMLALVs) |
| 1829 | MAKE_CASE(ARMISD::VMLALVu) |
| 1830 | MAKE_CASE(ARMISD::VMLALVps) |
| 1831 | MAKE_CASE(ARMISD::VMLALVpu) |
| 1832 | MAKE_CASE(ARMISD::VMLALVAs) |
| 1833 | MAKE_CASE(ARMISD::VMLALVAu) |
| 1834 | MAKE_CASE(ARMISD::VMLALVAps) |
| 1835 | MAKE_CASE(ARMISD::VMLALVApu) |
| 1836 | MAKE_CASE(ARMISD::VMINVu) |
| 1837 | MAKE_CASE(ARMISD::VMINVs) |
| 1838 | MAKE_CASE(ARMISD::VMAXVu) |
| 1839 | MAKE_CASE(ARMISD::VMAXVs) |
| 1840 | MAKE_CASE(ARMISD::UMAAL) |
| 1841 | MAKE_CASE(ARMISD::UMLAL) |
| 1842 | MAKE_CASE(ARMISD::SMLAL) |
| 1843 | MAKE_CASE(ARMISD::SMLALBB) |
| 1844 | MAKE_CASE(ARMISD::SMLALBT) |
| 1845 | MAKE_CASE(ARMISD::SMLALTB) |
| 1846 | MAKE_CASE(ARMISD::SMLALTT) |
| 1847 | MAKE_CASE(ARMISD::SMULWB) |
| 1848 | MAKE_CASE(ARMISD::SMULWT) |
| 1849 | MAKE_CASE(ARMISD::SMLALD) |
| 1850 | MAKE_CASE(ARMISD::SMLALDX) |
| 1851 | MAKE_CASE(ARMISD::SMLSLD) |
| 1852 | MAKE_CASE(ARMISD::SMLSLDX) |
| 1853 | MAKE_CASE(ARMISD::SMMLAR) |
| 1854 | MAKE_CASE(ARMISD::SMMLSR) |
| 1855 | MAKE_CASE(ARMISD::QADD16b) |
| 1856 | MAKE_CASE(ARMISD::QSUB16b) |
| 1857 | MAKE_CASE(ARMISD::QADD8b) |
| 1858 | MAKE_CASE(ARMISD::QSUB8b) |
| 1859 | MAKE_CASE(ARMISD::UQADD16b) |
| 1860 | MAKE_CASE(ARMISD::UQSUB16b) |
| 1861 | MAKE_CASE(ARMISD::UQADD8b) |
| 1862 | MAKE_CASE(ARMISD::UQSUB8b) |
| 1863 | MAKE_CASE(ARMISD::BUILD_VECTOR) |
| 1864 | MAKE_CASE(ARMISD::BFI) |
| 1865 | MAKE_CASE(ARMISD::VORRIMM) |
| 1866 | MAKE_CASE(ARMISD::VBICIMM) |
| 1867 | MAKE_CASE(ARMISD::VBSP) |
| 1868 | MAKE_CASE(ARMISD::MEMCPY) |
| 1869 | MAKE_CASE(ARMISD::VLD1DUP) |
| 1870 | MAKE_CASE(ARMISD::VLD2DUP) |
| 1871 | MAKE_CASE(ARMISD::VLD3DUP) |
| 1872 | MAKE_CASE(ARMISD::VLD4DUP) |
| 1873 | MAKE_CASE(ARMISD::VLD1_UPD) |
| 1874 | MAKE_CASE(ARMISD::VLD2_UPD) |
| 1875 | MAKE_CASE(ARMISD::VLD3_UPD) |
| 1876 | MAKE_CASE(ARMISD::VLD4_UPD) |
| 1877 | MAKE_CASE(ARMISD::VLD1x2_UPD) |
| 1878 | MAKE_CASE(ARMISD::VLD1x3_UPD) |
| 1879 | MAKE_CASE(ARMISD::VLD1x4_UPD) |
| 1880 | MAKE_CASE(ARMISD::VLD2LN_UPD) |
| 1881 | MAKE_CASE(ARMISD::VLD3LN_UPD) |
| 1882 | MAKE_CASE(ARMISD::VLD4LN_UPD) |
| 1883 | MAKE_CASE(ARMISD::VLD1DUP_UPD) |
| 1884 | MAKE_CASE(ARMISD::VLD2DUP_UPD) |
| 1885 | MAKE_CASE(ARMISD::VLD3DUP_UPD) |
| 1886 | MAKE_CASE(ARMISD::VLD4DUP_UPD) |
| 1887 | MAKE_CASE(ARMISD::VST1_UPD) |
| 1888 | MAKE_CASE(ARMISD::VST2_UPD) |
| 1889 | MAKE_CASE(ARMISD::VST3_UPD) |
| 1890 | MAKE_CASE(ARMISD::VST4_UPD) |
| 1891 | MAKE_CASE(ARMISD::VST1x2_UPD) |
| 1892 | MAKE_CASE(ARMISD::VST1x3_UPD) |
| 1893 | MAKE_CASE(ARMISD::VST1x4_UPD) |
| 1894 | MAKE_CASE(ARMISD::VST2LN_UPD) |
| 1895 | MAKE_CASE(ARMISD::VST3LN_UPD) |
| 1896 | MAKE_CASE(ARMISD::VST4LN_UPD) |
| 1897 | MAKE_CASE(ARMISD::WLS) |
| 1898 | MAKE_CASE(ARMISD::WLSSETUP) |
| 1899 | MAKE_CASE(ARMISD::LE) |
| 1900 | MAKE_CASE(ARMISD::LOOP_DEC) |
| 1901 | MAKE_CASE(ARMISD::CSINV) |
| 1902 | MAKE_CASE(ARMISD::CSNEG) |
| 1903 | MAKE_CASE(ARMISD::CSINC) |
| 1904 | MAKE_CASE(ARMISD::MEMCPYLOOP) |
| 1905 | MAKE_CASE(ARMISD::MEMSETLOOP) |
| 1906 | #undef MAKE_CASE |
| 1907 | } |
| 1908 | return nullptr; |
| 1909 | } |
| 1910 | |
| 1911 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, |
| 1912 | EVT VT) const { |
| 1913 | if (!VT.isVector()) |
| 1914 | return getPointerTy(DL); |
| 1915 | |
| 1916 | // MVE has a predicate register. |
| 1917 | if ((Subtarget->hasMVEIntegerOps() && |
| 1918 | (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || |
| 1919 | VT == MVT::v16i8)) || |
| 1920 | (Subtarget->hasMVEFloatOps() && |
| 1921 | (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16))) |
| 1922 | return MVT::getVectorVT(VT: MVT::i1, EC: VT.getVectorElementCount()); |
| 1923 | return VT.changeVectorElementTypeToInteger(); |
| 1924 | } |
| 1925 | |
| 1926 | /// getRegClassFor - Return the register class that should be used for the |
| 1927 | /// specified value type. |
| 1928 | const TargetRegisterClass * |
| 1929 | ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
| 1930 | (void)isDivergent; |
| 1931 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map |
| 1932 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to |
| 1933 | // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive |
| 1934 | // MVE Q registers. |
| 1935 | if (Subtarget->hasNEON()) { |
| 1936 | if (VT == MVT::v4i64) |
| 1937 | return &ARM::QQPRRegClass; |
| 1938 | if (VT == MVT::v8i64) |
| 1939 | return &ARM::QQQQPRRegClass; |
| 1940 | } |
| 1941 | if (Subtarget->hasMVEIntegerOps()) { |
| 1942 | if (VT == MVT::v4i64) |
| 1943 | return &ARM::MQQPRRegClass; |
| 1944 | if (VT == MVT::v8i64) |
| 1945 | return &ARM::MQQQQPRRegClass; |
| 1946 | } |
| 1947 | return TargetLowering::getRegClassFor(VT); |
| 1948 | } |
| 1949 | |
| 1950 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the |
| 1951 | // source/dest is aligned and the copy size is large enough. We therefore want |
| 1952 | // to align such objects passed to memory intrinsics. |
| 1953 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, |
| 1954 | Align &PrefAlign) const { |
| 1955 | if (!isa<MemIntrinsic>(Val: CI)) |
| 1956 | return false; |
| 1957 | MinSize = 8; |
| 1958 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 |
| 1959 | // cycle faster than 4-byte aligned LDM. |
| 1960 | PrefAlign = |
| 1961 | (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? Align(8) : Align(4)); |
| 1962 | return true; |
| 1963 | } |
| 1964 | |
| 1965 | // Create a fast isel object. |
| 1966 | FastISel * |
| 1967 | ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, |
| 1968 | const TargetLibraryInfo *libInfo) const { |
| 1969 | return ARM::createFastISel(funcInfo, libInfo); |
| 1970 | } |
| 1971 | |
| 1972 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { |
| 1973 | unsigned NumVals = N->getNumValues(); |
| 1974 | if (!NumVals) |
| 1975 | return Sched::RegPressure; |
| 1976 | |
| 1977 | for (unsigned i = 0; i != NumVals; ++i) { |
| 1978 | EVT VT = N->getValueType(ResNo: i); |
| 1979 | if (VT == MVT::Glue || VT == MVT::Other) |
| 1980 | continue; |
| 1981 | if (VT.isFloatingPoint() || VT.isVector()) |
| 1982 | return Sched::ILP; |
| 1983 | } |
| 1984 | |
| 1985 | if (!N->isMachineOpcode()) |
| 1986 | return Sched::RegPressure; |
| 1987 | |
| 1988 | // Load are scheduled for latency even if there instruction itinerary |
| 1989 | // is not available. |
| 1990 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 1991 | const MCInstrDesc &MCID = TII->get(Opcode: N->getMachineOpcode()); |
| 1992 | |
| 1993 | if (MCID.getNumDefs() == 0) |
| 1994 | return Sched::RegPressure; |
| 1995 | if (!Itins->isEmpty() && |
| 1996 | Itins->getOperandCycle(ItinClassIndx: MCID.getSchedClass(), OperandIdx: 0) > 2U) |
| 1997 | return Sched::ILP; |
| 1998 | |
| 1999 | return Sched::RegPressure; |
| 2000 | } |
| 2001 | |
| 2002 | //===----------------------------------------------------------------------===// |
| 2003 | // Lowering Code |
| 2004 | //===----------------------------------------------------------------------===// |
| 2005 | |
| 2006 | static bool isSRL16(const SDValue &Op) { |
| 2007 | if (Op.getOpcode() != ISD::SRL) |
| 2008 | return false; |
| 2009 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 2010 | return Const->getZExtValue() == 16; |
| 2011 | return false; |
| 2012 | } |
| 2013 | |
| 2014 | static bool isSRA16(const SDValue &Op) { |
| 2015 | if (Op.getOpcode() != ISD::SRA) |
| 2016 | return false; |
| 2017 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 2018 | return Const->getZExtValue() == 16; |
| 2019 | return false; |
| 2020 | } |
| 2021 | |
| 2022 | static bool isSHL16(const SDValue &Op) { |
| 2023 | if (Op.getOpcode() != ISD::SHL) |
| 2024 | return false; |
| 2025 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 2026 | return Const->getZExtValue() == 16; |
| 2027 | return false; |
| 2028 | } |
| 2029 | |
| 2030 | // Check for a signed 16-bit value. We special case SRA because it makes it |
| 2031 | // more simple when also looking for SRAs that aren't sign extending a |
| 2032 | // smaller value. Without the check, we'd need to take extra care with |
| 2033 | // checking order for some operations. |
| 2034 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { |
| 2035 | if (isSRA16(Op)) |
| 2036 | return isSHL16(Op: Op.getOperand(i: 0)); |
| 2037 | return DAG.ComputeNumSignBits(Op) == 17; |
| 2038 | } |
| 2039 | |
| 2040 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC |
| 2041 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { |
| 2042 | switch (CC) { |
| 2043 | default: llvm_unreachable("Unknown condition code!" ); |
| 2044 | case ISD::SETNE: return ARMCC::NE; |
| 2045 | case ISD::SETEQ: return ARMCC::EQ; |
| 2046 | case ISD::SETGT: return ARMCC::GT; |
| 2047 | case ISD::SETGE: return ARMCC::GE; |
| 2048 | case ISD::SETLT: return ARMCC::LT; |
| 2049 | case ISD::SETLE: return ARMCC::LE; |
| 2050 | case ISD::SETUGT: return ARMCC::HI; |
| 2051 | case ISD::SETUGE: return ARMCC::HS; |
| 2052 | case ISD::SETULT: return ARMCC::LO; |
| 2053 | case ISD::SETULE: return ARMCC::LS; |
| 2054 | } |
| 2055 | } |
| 2056 | |
| 2057 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. |
| 2058 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 2059 | ARMCC::CondCodes &CondCode2) { |
| 2060 | CondCode2 = ARMCC::AL; |
| 2061 | switch (CC) { |
| 2062 | default: llvm_unreachable("Unknown FP condition!" ); |
| 2063 | case ISD::SETEQ: |
| 2064 | case ISD::SETOEQ: CondCode = ARMCC::EQ; break; |
| 2065 | case ISD::SETGT: |
| 2066 | case ISD::SETOGT: CondCode = ARMCC::GT; break; |
| 2067 | case ISD::SETGE: |
| 2068 | case ISD::SETOGE: CondCode = ARMCC::GE; break; |
| 2069 | case ISD::SETOLT: CondCode = ARMCC::MI; break; |
| 2070 | case ISD::SETOLE: CondCode = ARMCC::LS; break; |
| 2071 | case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; |
| 2072 | case ISD::SETO: CondCode = ARMCC::VC; break; |
| 2073 | case ISD::SETUO: CondCode = ARMCC::VS; break; |
| 2074 | case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; |
| 2075 | case ISD::SETUGT: CondCode = ARMCC::HI; break; |
| 2076 | case ISD::SETUGE: CondCode = ARMCC::PL; break; |
| 2077 | case ISD::SETLT: |
| 2078 | case ISD::SETULT: CondCode = ARMCC::LT; break; |
| 2079 | case ISD::SETLE: |
| 2080 | case ISD::SETULE: CondCode = ARMCC::LE; break; |
| 2081 | case ISD::SETNE: |
| 2082 | case ISD::SETUNE: CondCode = ARMCC::NE; break; |
| 2083 | } |
| 2084 | } |
| 2085 | |
| 2086 | //===----------------------------------------------------------------------===// |
| 2087 | // Calling Convention Implementation |
| 2088 | //===----------------------------------------------------------------------===// |
| 2089 | |
| 2090 | /// getEffectiveCallingConv - Get the effective calling convention, taking into |
| 2091 | /// account presence of floating point hardware and calling convention |
| 2092 | /// limitations, such as support for variadic functions. |
| 2093 | CallingConv::ID |
| 2094 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, |
| 2095 | bool isVarArg) const { |
| 2096 | switch (CC) { |
| 2097 | default: |
| 2098 | report_fatal_error(reason: "Unsupported calling convention" ); |
| 2099 | case CallingConv::ARM_AAPCS: |
| 2100 | case CallingConv::ARM_APCS: |
| 2101 | case CallingConv::GHC: |
| 2102 | case CallingConv::CFGuard_Check: |
| 2103 | return CC; |
| 2104 | case CallingConv::PreserveMost: |
| 2105 | return CallingConv::PreserveMost; |
| 2106 | case CallingConv::PreserveAll: |
| 2107 | return CallingConv::PreserveAll; |
| 2108 | case CallingConv::ARM_AAPCS_VFP: |
| 2109 | case CallingConv::Swift: |
| 2110 | case CallingConv::SwiftTail: |
| 2111 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; |
| 2112 | case CallingConv::C: |
| 2113 | case CallingConv::Tail: |
| 2114 | if (!getTM().isAAPCS_ABI()) |
| 2115 | return CallingConv::ARM_APCS; |
| 2116 | else if (Subtarget->hasFPRegs() && !Subtarget->isThumb1Only() && |
| 2117 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && |
| 2118 | !isVarArg) |
| 2119 | return CallingConv::ARM_AAPCS_VFP; |
| 2120 | else |
| 2121 | return CallingConv::ARM_AAPCS; |
| 2122 | case CallingConv::Fast: |
| 2123 | case CallingConv::CXX_FAST_TLS: |
| 2124 | if (!getTM().isAAPCS_ABI()) { |
| 2125 | if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) |
| 2126 | return CallingConv::Fast; |
| 2127 | return CallingConv::ARM_APCS; |
| 2128 | } else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && |
| 2129 | !isVarArg) |
| 2130 | return CallingConv::ARM_AAPCS_VFP; |
| 2131 | else |
| 2132 | return CallingConv::ARM_AAPCS; |
| 2133 | } |
| 2134 | } |
| 2135 | |
| 2136 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, |
| 2137 | bool isVarArg) const { |
| 2138 | return CCAssignFnForNode(CC, Return: false, isVarArg); |
| 2139 | } |
| 2140 | |
| 2141 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, |
| 2142 | bool isVarArg) const { |
| 2143 | return CCAssignFnForNode(CC, Return: true, isVarArg); |
| 2144 | } |
| 2145 | |
| 2146 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given |
| 2147 | /// CallingConvention. |
| 2148 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, |
| 2149 | bool Return, |
| 2150 | bool isVarArg) const { |
| 2151 | switch (getEffectiveCallingConv(CC, isVarArg)) { |
| 2152 | default: |
| 2153 | report_fatal_error(reason: "Unsupported calling convention" ); |
| 2154 | case CallingConv::ARM_APCS: |
| 2155 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); |
| 2156 | case CallingConv::ARM_AAPCS: |
| 2157 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 2158 | case CallingConv::ARM_AAPCS_VFP: |
| 2159 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); |
| 2160 | case CallingConv::Fast: |
| 2161 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); |
| 2162 | case CallingConv::GHC: |
| 2163 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); |
| 2164 | case CallingConv::PreserveMost: |
| 2165 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 2166 | case CallingConv::PreserveAll: |
| 2167 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 2168 | case CallingConv::CFGuard_Check: |
| 2169 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); |
| 2170 | } |
| 2171 | } |
| 2172 | |
| 2173 | SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 2174 | MVT LocVT, MVT ValVT, SDValue Val) const { |
| 2175 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), |
| 2176 | Operand: Val); |
| 2177 | if (Subtarget->hasFullFP16()) { |
| 2178 | Val = DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT: ValVT, Operand: Val); |
| 2179 | } else { |
| 2180 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, |
| 2181 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
| 2182 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: ValVT, Operand: Val); |
| 2183 | } |
| 2184 | return Val; |
| 2185 | } |
| 2186 | |
| 2187 | SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 2188 | MVT LocVT, MVT ValVT, |
| 2189 | SDValue Val) const { |
| 2190 | if (Subtarget->hasFullFP16()) { |
| 2191 | Val = DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, |
| 2192 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
| 2193 | } else { |
| 2194 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, |
| 2195 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
| 2196 | Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, |
| 2197 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
| 2198 | } |
| 2199 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: LocVT, Operand: Val); |
| 2200 | } |
| 2201 | |
| 2202 | /// LowerCallResult - Lower the result values of a call into the |
| 2203 | /// appropriate copies out of appropriate physical registers. |
| 2204 | SDValue ARMTargetLowering::LowerCallResult( |
| 2205 | SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, |
| 2206 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 2207 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, |
| 2208 | SDValue ThisVal, bool isCmseNSCall) const { |
| 2209 | // Assign locations to each value returned by this call. |
| 2210 | SmallVector<CCValAssign, 16> RVLocs; |
| 2211 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 2212 | *DAG.getContext()); |
| 2213 | CCInfo.AnalyzeCallResult(Ins, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 2214 | |
| 2215 | // Copy all of the result registers out of their specified physreg. |
| 2216 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 2217 | CCValAssign VA = RVLocs[i]; |
| 2218 | |
| 2219 | // Pass 'this' value directly from the argument to return value, to avoid |
| 2220 | // reg unit interference |
| 2221 | if (i == 0 && isThisReturn) { |
| 2222 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && |
| 2223 | "unexpected return calling convention register assignment" ); |
| 2224 | InVals.push_back(Elt: ThisVal); |
| 2225 | continue; |
| 2226 | } |
| 2227 | |
| 2228 | SDValue Val; |
| 2229 | if (VA.needsCustom() && |
| 2230 | (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { |
| 2231 | // Handle f64 or half of a v2f64. |
| 2232 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
| 2233 | Glue: InGlue); |
| 2234 | Chain = Lo.getValue(R: 1); |
| 2235 | InGlue = Lo.getValue(R: 2); |
| 2236 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2237 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
| 2238 | Glue: InGlue); |
| 2239 | Chain = Hi.getValue(R: 1); |
| 2240 | InGlue = Hi.getValue(R: 2); |
| 2241 | if (!Subtarget->isLittle()) |
| 2242 | std::swap (a&: Lo, b&: Hi); |
| 2243 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 2244 | |
| 2245 | if (VA.getLocVT() == MVT::v2f64) { |
| 2246 | SDValue Vec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
| 2247 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
| 2248 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 2249 | |
| 2250 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2251 | Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
| 2252 | Chain = Lo.getValue(R: 1); |
| 2253 | InGlue = Lo.getValue(R: 2); |
| 2254 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2255 | Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
| 2256 | Chain = Hi.getValue(R: 1); |
| 2257 | InGlue = Hi.getValue(R: 2); |
| 2258 | if (!Subtarget->isLittle()) |
| 2259 | std::swap (a&: Lo, b&: Hi); |
| 2260 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 2261 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
| 2262 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 2263 | } |
| 2264 | } else { |
| 2265 | Val = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: VA.getLocVT(), |
| 2266 | Glue: InGlue); |
| 2267 | Chain = Val.getValue(R: 1); |
| 2268 | InGlue = Val.getValue(R: 2); |
| 2269 | } |
| 2270 | |
| 2271 | switch (VA.getLocInfo()) { |
| 2272 | default: llvm_unreachable("Unknown loc info!" ); |
| 2273 | case CCValAssign::Full: break; |
| 2274 | case CCValAssign::BCvt: |
| 2275 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: Val); |
| 2276 | break; |
| 2277 | } |
| 2278 | |
| 2279 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 2280 | // had been copied to the LSBs of a 32-bit register. |
| 2281 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 2282 | if (VA.needsCustom() && |
| 2283 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 2284 | Val = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val); |
| 2285 | |
| 2286 | // On CMSE Non-secure Calls, call results (returned values) whose bitwidth |
| 2287 | // is less than 32 bits must be sign- or zero-extended after the call for |
| 2288 | // security reasons. Although the ABI mandates an extension done by the |
| 2289 | // callee, the latter cannot be trusted to follow the rules of the ABI. |
| 2290 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
| 2291 | if (isCmseNSCall && Arg.ArgVT.isScalarInteger() && |
| 2292 | VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
| 2293 | Val = handleCMSEValue(Value: Val, Arg, DAG, DL: dl); |
| 2294 | |
| 2295 | InVals.push_back(Elt: Val); |
| 2296 | } |
| 2297 | |
| 2298 | return Chain; |
| 2299 | } |
| 2300 | |
| 2301 | std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg( |
| 2302 | const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr, |
| 2303 | bool IsTailCall, int SPDiff) const { |
| 2304 | SDValue DstAddr; |
| 2305 | MachinePointerInfo DstInfo; |
| 2306 | int32_t Offset = VA.getLocMemOffset(); |
| 2307 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2308 | |
| 2309 | if (IsTailCall) { |
| 2310 | Offset += SPDiff; |
| 2311 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 2312 | int Size = VA.getLocVT().getFixedSizeInBits() / 8; |
| 2313 | int FI = MF.getFrameInfo().CreateFixedObject(Size, SPOffset: Offset, IsImmutable: true); |
| 2314 | DstAddr = DAG.getFrameIndex(FI, VT: PtrVT); |
| 2315 | DstInfo = |
| 2316 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI); |
| 2317 | } else { |
| 2318 | SDValue PtrOff = DAG.getIntPtrConstant(Val: Offset, DL: dl); |
| 2319 | DstAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: getPointerTy(DL: DAG.getDataLayout()), |
| 2320 | N1: StackPtr, N2: PtrOff); |
| 2321 | DstInfo = |
| 2322 | MachinePointerInfo::getStack(MF&: DAG.getMachineFunction(), Offset); |
| 2323 | } |
| 2324 | |
| 2325 | return std::make_pair(x&: DstAddr, y&: DstInfo); |
| 2326 | } |
| 2327 | |
| 2328 | // Returns the type of copying which is required to set up a byval argument to |
| 2329 | // a tail-called function. This isn't needed for non-tail calls, because they |
| 2330 | // always need the equivalent of CopyOnce, but tail-calls sometimes need two to |
| 2331 | // avoid clobbering another argument (CopyViaTemp), and sometimes can be |
| 2332 | // optimised to zero copies when forwarding an argument from the caller's |
| 2333 | // caller (NoCopy). |
| 2334 | ARMTargetLowering::ByValCopyKind ARMTargetLowering::ByValNeedsCopyForTailCall( |
| 2335 | SelectionDAG &DAG, SDValue Src, SDValue Dst, ISD::ArgFlagsTy Flags) const { |
| 2336 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 2337 | ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 2338 | |
| 2339 | // Globals are always safe to copy from. |
| 2340 | if (isa<GlobalAddressSDNode>(Val: Src) || isa<ExternalSymbolSDNode>(Val: Src)) |
| 2341 | return CopyOnce; |
| 2342 | |
| 2343 | // Can only analyse frame index nodes, conservatively assume we need a |
| 2344 | // temporary. |
| 2345 | auto *SrcFrameIdxNode = dyn_cast<FrameIndexSDNode>(Val&: Src); |
| 2346 | auto *DstFrameIdxNode = dyn_cast<FrameIndexSDNode>(Val&: Dst); |
| 2347 | if (!SrcFrameIdxNode || !DstFrameIdxNode) |
| 2348 | return CopyViaTemp; |
| 2349 | |
| 2350 | int SrcFI = SrcFrameIdxNode->getIndex(); |
| 2351 | int DstFI = DstFrameIdxNode->getIndex(); |
| 2352 | assert(MFI.isFixedObjectIndex(DstFI) && |
| 2353 | "byval passed in non-fixed stack slot" ); |
| 2354 | |
| 2355 | int64_t SrcOffset = MFI.getObjectOffset(ObjectIdx: SrcFI); |
| 2356 | int64_t DstOffset = MFI.getObjectOffset(ObjectIdx: DstFI); |
| 2357 | |
| 2358 | // If the source is in the local frame, then the copy to the argument memory |
| 2359 | // is always valid. |
| 2360 | bool FixedSrc = MFI.isFixedObjectIndex(ObjectIdx: SrcFI); |
| 2361 | if (!FixedSrc || |
| 2362 | (FixedSrc && SrcOffset < -(int64_t)AFI->getArgRegsSaveSize())) |
| 2363 | return CopyOnce; |
| 2364 | |
| 2365 | // In the case of byval arguments split between registers and the stack, |
| 2366 | // computeAddrForCallArg returns a FrameIndex which corresponds only to the |
| 2367 | // stack portion, but the Src SDValue will refer to the full value, including |
| 2368 | // the local stack memory that the register portion gets stored into. We only |
| 2369 | // need to compare them for equality, so normalise on the full value version. |
| 2370 | uint64_t RegSize = Flags.getByValSize() - MFI.getObjectSize(ObjectIdx: DstFI); |
| 2371 | DstOffset -= RegSize; |
| 2372 | |
| 2373 | // If the value is already in the correct location, then no copying is |
| 2374 | // needed. If not, then we need to copy via a temporary. |
| 2375 | if (SrcOffset == DstOffset) |
| 2376 | return NoCopy; |
| 2377 | else |
| 2378 | return CopyViaTemp; |
| 2379 | } |
| 2380 | |
| 2381 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, |
| 2382 | SDValue Chain, SDValue &Arg, |
| 2383 | RegsToPassVector &RegsToPass, |
| 2384 | CCValAssign &VA, CCValAssign &NextVA, |
| 2385 | SDValue &StackPtr, |
| 2386 | SmallVectorImpl<SDValue> &MemOpChains, |
| 2387 | bool IsTailCall, |
| 2388 | int SPDiff) const { |
| 2389 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 2390 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
| 2391 | unsigned id = Subtarget->isLittle() ? 0 : 1; |
| 2392 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y: fmrrd.getValue(R: id))); |
| 2393 | |
| 2394 | if (NextVA.isRegLoc()) |
| 2395 | RegsToPass.push_back(Elt: std::make_pair(x: NextVA.getLocReg(), y: fmrrd.getValue(R: 1-id))); |
| 2396 | else { |
| 2397 | assert(NextVA.isMemLoc()); |
| 2398 | if (!StackPtr.getNode()) |
| 2399 | StackPtr = DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, |
| 2400 | VT: getPointerTy(DL: DAG.getDataLayout())); |
| 2401 | |
| 2402 | SDValue DstAddr; |
| 2403 | MachinePointerInfo DstInfo; |
| 2404 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2405 | computeAddrForCallArg(dl, DAG, VA: NextVA, StackPtr, IsTailCall, SPDiff); |
| 2406 | MemOpChains.push_back( |
| 2407 | Elt: DAG.getStore(Chain, dl, Val: fmrrd.getValue(R: 1 - id), Ptr: DstAddr, PtrInfo: DstInfo)); |
| 2408 | } |
| 2409 | } |
| 2410 | |
| 2411 | static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { |
| 2412 | return (CC == CallingConv::Fast && GuaranteeTailCalls) || |
| 2413 | CC == CallingConv::Tail || CC == CallingConv::SwiftTail; |
| 2414 | } |
| 2415 | |
| 2416 | /// LowerCall - Lowering a call into a callseq_start <- |
| 2417 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter |
| 2418 | /// nodes. |
| 2419 | SDValue |
| 2420 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 2421 | SmallVectorImpl<SDValue> &InVals) const { |
| 2422 | SelectionDAG &DAG = CLI.DAG; |
| 2423 | SDLoc &dl = CLI.DL; |
| 2424 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 2425 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 2426 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 2427 | SDValue Chain = CLI.Chain; |
| 2428 | SDValue Callee = CLI.Callee; |
| 2429 | bool &isTailCall = CLI.IsTailCall; |
| 2430 | CallingConv::ID CallConv = CLI.CallConv; |
| 2431 | bool doesNotRet = CLI.DoesNotReturn; |
| 2432 | bool isVarArg = CLI.IsVarArg; |
| 2433 | |
| 2434 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2435 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2436 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 2437 | MachineFunction::CallSiteInfo CSInfo; |
| 2438 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); |
| 2439 | bool isThisReturn = false; |
| 2440 | bool isCmseNSCall = false; |
| 2441 | bool isSibCall = false; |
| 2442 | bool PreferIndirect = false; |
| 2443 | bool GuardWithBTI = false; |
| 2444 | |
| 2445 | // Analyze operands of the call, assigning locations to each operand. |
| 2446 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2447 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 2448 | *DAG.getContext()); |
| 2449 | CCInfo.AnalyzeCallOperands(Outs, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
| 2450 | |
| 2451 | // Lower 'returns_twice' calls to a pseudo-instruction. |
| 2452 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: Attribute::ReturnsTwice) && |
| 2453 | !Subtarget->noBTIAtReturnTwice()) |
| 2454 | GuardWithBTI = AFI->branchTargetEnforcement(); |
| 2455 | |
| 2456 | // Determine whether this is a non-secure function call. |
| 2457 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: "cmse_nonsecure_call" )) |
| 2458 | isCmseNSCall = true; |
| 2459 | |
| 2460 | // Disable tail calls if they're not supported. |
| 2461 | if (!Subtarget->supportsTailCall()) |
| 2462 | isTailCall = false; |
| 2463 | |
| 2464 | // For both the non-secure calls and the returns from a CMSE entry function, |
| 2465 | // the function needs to do some extra work after the call, or before the |
| 2466 | // return, respectively, thus it cannot end with a tail call |
| 2467 | if (isCmseNSCall || AFI->isCmseNSEntryFunction()) |
| 2468 | isTailCall = false; |
| 2469 | |
| 2470 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2471 | // If we're optimizing for minimum size and the function is called three or |
| 2472 | // more times in this block, we can improve codesize by calling indirectly |
| 2473 | // as BLXr has a 16-bit encoding. |
| 2474 | auto *GV = cast<GlobalAddressSDNode>(Val&: Callee)->getGlobal(); |
| 2475 | if (CLI.CB) { |
| 2476 | auto *BB = CLI.CB->getParent(); |
| 2477 | PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && |
| 2478 | count_if(Range: GV->users(), P: [&BB](const User *U) { |
| 2479 | return isa<Instruction>(Val: U) && |
| 2480 | cast<Instruction>(Val: U)->getParent() == BB; |
| 2481 | }) > 2; |
| 2482 | } |
| 2483 | } |
| 2484 | if (isTailCall) { |
| 2485 | // Check if it's really possible to do a tail call. |
| 2486 | isTailCall = |
| 2487 | IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs, isIndirect: PreferIndirect); |
| 2488 | |
| 2489 | if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt && |
| 2490 | CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail) |
| 2491 | isSibCall = true; |
| 2492 | |
| 2493 | // We don't support GuaranteedTailCallOpt for ARM, only automatically |
| 2494 | // detected sibcalls. |
| 2495 | if (isTailCall) |
| 2496 | ++NumTailCalls; |
| 2497 | } |
| 2498 | |
| 2499 | if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) |
| 2500 | report_fatal_error(reason: "failed to perform tail call elimination on a call " |
| 2501 | "site marked musttail" ); |
| 2502 | |
| 2503 | // Get a count of how many bytes are to be pushed on the stack. |
| 2504 | unsigned NumBytes = CCInfo.getStackSize(); |
| 2505 | |
| 2506 | // SPDiff is the byte offset of the call's argument area from the callee's. |
| 2507 | // Stores to callee stack arguments will be placed in FixedStackSlots offset |
| 2508 | // by this amount for a tail call. In a sibling call it must be 0 because the |
| 2509 | // caller will deallocate the entire stack and the callee still expects its |
| 2510 | // arguments to begin at SP+0. Completely unused for non-tail calls. |
| 2511 | int SPDiff = 0; |
| 2512 | |
| 2513 | if (isTailCall && !isSibCall) { |
| 2514 | auto FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 2515 | unsigned NumReusableBytes = FuncInfo->getArgumentStackSize(); |
| 2516 | |
| 2517 | // Since callee will pop argument stack as a tail call, we must keep the |
| 2518 | // popped size 16-byte aligned. |
| 2519 | MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment(); |
| 2520 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 2521 | NumBytes = alignTo(Size: NumBytes, A: *StackAlign); |
| 2522 | |
| 2523 | // SPDiff will be negative if this tail call requires more space than we |
| 2524 | // would automatically have in our incoming argument space. Positive if we |
| 2525 | // can actually shrink the stack. |
| 2526 | SPDiff = NumReusableBytes - NumBytes; |
| 2527 | |
| 2528 | // If this call requires more stack than we have available from |
| 2529 | // LowerFormalArguments, tell FrameLowering to reserve space for it. |
| 2530 | if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (unsigned)-SPDiff) |
| 2531 | AFI->setArgRegsSaveSize(-SPDiff); |
| 2532 | } |
| 2533 | |
| 2534 | if (isSibCall) { |
| 2535 | // For sibling tail calls, memory operands are available in our caller's stack. |
| 2536 | NumBytes = 0; |
| 2537 | } else { |
| 2538 | // Adjust the stack pointer for the new arguments... |
| 2539 | // These operations are automatically eliminated by the prolog/epilog pass |
| 2540 | Chain = DAG.getCALLSEQ_START(Chain, InSize: isTailCall ? 0 : NumBytes, OutSize: 0, DL: dl); |
| 2541 | } |
| 2542 | |
| 2543 | SDValue StackPtr = |
| 2544 | DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 2545 | |
| 2546 | RegsToPassVector RegsToPass; |
| 2547 | SmallVector<SDValue, 8> MemOpChains; |
| 2548 | |
| 2549 | // If we are doing a tail-call, any byval arguments will be written to stack |
| 2550 | // space which was used for incoming arguments. If any the values being used |
| 2551 | // are incoming byval arguments to this function, then they might be |
| 2552 | // overwritten by the stores of the outgoing arguments. To avoid this, we |
| 2553 | // need to make a temporary copy of them in local stack space, then copy back |
| 2554 | // to the argument area. |
| 2555 | DenseMap<unsigned, SDValue> ByValTemporaries; |
| 2556 | SDValue ByValTempChain; |
| 2557 | if (isTailCall) { |
| 2558 | SmallVector<SDValue, 8> ByValCopyChains; |
| 2559 | for (const CCValAssign &VA : ArgLocs) { |
| 2560 | unsigned ArgIdx = VA.getValNo(); |
| 2561 | SDValue Src = OutVals[ArgIdx]; |
| 2562 | ISD::ArgFlagsTy Flags = Outs[ArgIdx].Flags; |
| 2563 | |
| 2564 | if (!Flags.isByVal()) |
| 2565 | continue; |
| 2566 | |
| 2567 | SDValue Dst; |
| 2568 | MachinePointerInfo DstInfo; |
| 2569 | std::tie(args&: Dst, args&: DstInfo) = |
| 2570 | computeAddrForCallArg(dl, DAG, VA, StackPtr: SDValue(), IsTailCall: true, SPDiff); |
| 2571 | ByValCopyKind Copy = ByValNeedsCopyForTailCall(DAG, Src, Dst, Flags); |
| 2572 | |
| 2573 | if (Copy == NoCopy) { |
| 2574 | // If the argument is already at the correct offset on the stack |
| 2575 | // (because we are forwarding a byval argument from our caller), we |
| 2576 | // don't need any copying. |
| 2577 | continue; |
| 2578 | } else if (Copy == CopyOnce) { |
| 2579 | // If the argument is in our local stack frame, no other argument |
| 2580 | // preparation can clobber it, so we can copy it to the final location |
| 2581 | // later. |
| 2582 | ByValTemporaries[ArgIdx] = Src; |
| 2583 | } else { |
| 2584 | assert(Copy == CopyViaTemp && "unexpected enum value" ); |
| 2585 | // If we might be copying this argument from the outgoing argument |
| 2586 | // stack area, we need to copy via a temporary in the local stack |
| 2587 | // frame. |
| 2588 | int TempFrameIdx = MFI.CreateStackObject( |
| 2589 | Size: Flags.getByValSize(), Alignment: Flags.getNonZeroByValAlign(), isSpillSlot: false); |
| 2590 | SDValue Temp = |
| 2591 | DAG.getFrameIndex(FI: TempFrameIdx, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 2592 | |
| 2593 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize(), DL: dl, VT: MVT::i32); |
| 2594 | SDValue AlignNode = |
| 2595 | DAG.getConstant(Val: Flags.getNonZeroByValAlign().value(), DL: dl, VT: MVT::i32); |
| 2596 | |
| 2597 | SDVTList VTs = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 2598 | SDValue Ops[] = {Chain, Temp, Src, SizeNode, AlignNode}; |
| 2599 | ByValCopyChains.push_back( |
| 2600 | Elt: DAG.getNode(Opcode: ARMISD::COPY_STRUCT_BYVAL, DL: dl, VTList: VTs, Ops)); |
| 2601 | ByValTemporaries[ArgIdx] = Temp; |
| 2602 | } |
| 2603 | } |
| 2604 | if (!ByValCopyChains.empty()) |
| 2605 | ByValTempChain = |
| 2606 | DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: ByValCopyChains); |
| 2607 | } |
| 2608 | |
| 2609 | // During a tail call, stores to the argument area must happen after all of |
| 2610 | // the function's incoming arguments have been loaded because they may alias. |
| 2611 | // This is done by folding in a TokenFactor from LowerFormalArguments, but |
| 2612 | // there's no point in doing so repeatedly so this tracks whether that's |
| 2613 | // happened yet. |
| 2614 | bool AfterFormalArgLoads = false; |
| 2615 | |
| 2616 | // Walk the register/memloc assignments, inserting copies/loads. In the case |
| 2617 | // of tail call optimization, arguments are handled later. |
| 2618 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
| 2619 | i != e; |
| 2620 | ++i, ++realArgIdx) { |
| 2621 | CCValAssign &VA = ArgLocs[i]; |
| 2622 | SDValue Arg = OutVals[realArgIdx]; |
| 2623 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
| 2624 | bool isByVal = Flags.isByVal(); |
| 2625 | |
| 2626 | // Promote the value if needed. |
| 2627 | switch (VA.getLocInfo()) { |
| 2628 | default: llvm_unreachable("Unknown loc info!" ); |
| 2629 | case CCValAssign::Full: break; |
| 2630 | case CCValAssign::SExt: |
| 2631 | Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2632 | break; |
| 2633 | case CCValAssign::ZExt: |
| 2634 | Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2635 | break; |
| 2636 | case CCValAssign::AExt: |
| 2637 | Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2638 | break; |
| 2639 | case CCValAssign::BCvt: |
| 2640 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2641 | break; |
| 2642 | } |
| 2643 | |
| 2644 | if (isTailCall && VA.isMemLoc() && !AfterFormalArgLoads) { |
| 2645 | Chain = DAG.getStackArgumentTokenFactor(Chain); |
| 2646 | if (ByValTempChain) |
| 2647 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, N1: Chain, |
| 2648 | N2: ByValTempChain); |
| 2649 | AfterFormalArgLoads = true; |
| 2650 | } |
| 2651 | |
| 2652 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 2653 | // had been copied to the LSBs of a 32-bit register. |
| 2654 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 2655 | if (VA.needsCustom() && |
| 2656 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { |
| 2657 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
| 2658 | } else { |
| 2659 | // f16 arguments could have been extended prior to argument lowering. |
| 2660 | // Mask them arguments if this is a CMSE nonsecure call. |
| 2661 | auto ArgVT = Outs[realArgIdx].ArgVT; |
| 2662 | if (isCmseNSCall && (ArgVT == MVT::f16)) { |
| 2663 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 2664 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: ArgVT.getSizeInBits()); |
| 2665 | SDValue Mask = |
| 2666 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
| 2667 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
| 2668 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
| 2669 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2670 | } |
| 2671 | } |
| 2672 | |
| 2673 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces |
| 2674 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 2675 | SDValue Op0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 2676 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 2677 | SDValue Op1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 2678 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 2679 | |
| 2680 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op0, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2681 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2682 | |
| 2683 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 2684 | if (VA.isRegLoc()) { |
| 2685 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op1, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2686 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2687 | } else { |
| 2688 | assert(VA.isMemLoc()); |
| 2689 | SDValue DstAddr; |
| 2690 | MachinePointerInfo DstInfo; |
| 2691 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2692 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2693 | MemOpChains.push_back(Elt: DAG.getStore(Chain, dl, Val: Op1, Ptr: DstAddr, PtrInfo: DstInfo)); |
| 2694 | } |
| 2695 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 2696 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2697 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2698 | } else if (VA.isRegLoc()) { |
| 2699 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && |
| 2700 | Outs[0].VT == MVT::i32) { |
| 2701 | assert(VA.getLocVT() == MVT::i32 && |
| 2702 | "unexpected calling convention register assignment" ); |
| 2703 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 && |
| 2704 | "unexpected use of 'returned'" ); |
| 2705 | isThisReturn = true; |
| 2706 | } |
| 2707 | const TargetOptions &Options = DAG.getTarget().Options; |
| 2708 | if (Options.EmitCallSiteInfo) |
| 2709 | CSInfo.ArgRegPairs.emplace_back(Args: VA.getLocReg(), Args&: i); |
| 2710 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg)); |
| 2711 | } else if (isByVal) { |
| 2712 | assert(VA.isMemLoc()); |
| 2713 | unsigned offset = 0; |
| 2714 | |
| 2715 | // True if this byval aggregate will be split between registers |
| 2716 | // and memory. |
| 2717 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); |
| 2718 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); |
| 2719 | |
| 2720 | SDValue ByValSrc; |
| 2721 | bool NeedsStackCopy; |
| 2722 | if (auto It = ByValTemporaries.find(Val: realArgIdx); |
| 2723 | It != ByValTemporaries.end()) { |
| 2724 | ByValSrc = It->second; |
| 2725 | NeedsStackCopy = true; |
| 2726 | } else { |
| 2727 | ByValSrc = Arg; |
| 2728 | NeedsStackCopy = !isTailCall; |
| 2729 | } |
| 2730 | |
| 2731 | // If part of the argument is in registers, load them. |
| 2732 | if (CurByValIdx < ByValArgsCount) { |
| 2733 | unsigned RegBegin, RegEnd; |
| 2734 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CurByValIdx, BeginReg&: RegBegin, EndReg&: RegEnd); |
| 2735 | |
| 2736 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 2737 | unsigned int i, j; |
| 2738 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { |
| 2739 | SDValue Const = DAG.getConstant(Val: 4*i, DL: dl, VT: MVT::i32); |
| 2740 | SDValue AddArg = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ByValSrc, N2: Const); |
| 2741 | SDValue Load = |
| 2742 | DAG.getLoad(VT: PtrVT, dl, Chain, Ptr: AddArg, PtrInfo: MachinePointerInfo(), |
| 2743 | Alignment: DAG.InferPtrAlign(Ptr: AddArg)); |
| 2744 | MemOpChains.push_back(Elt: Load.getValue(R: 1)); |
| 2745 | RegsToPass.push_back(Elt: std::make_pair(x&: j, y&: Load)); |
| 2746 | } |
| 2747 | |
| 2748 | // If parameter size outsides register area, "offset" value |
| 2749 | // helps us to calculate stack slot for remained part properly. |
| 2750 | offset = RegEnd - RegBegin; |
| 2751 | |
| 2752 | CCInfo.nextInRegsParam(); |
| 2753 | } |
| 2754 | |
| 2755 | // If the memory part of the argument isn't already in the correct place |
| 2756 | // (which can happen with tail calls), copy it into the argument area. |
| 2757 | if (NeedsStackCopy && Flags.getByValSize() > 4 * offset) { |
| 2758 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 2759 | SDValue Dst; |
| 2760 | MachinePointerInfo DstInfo; |
| 2761 | std::tie(args&: Dst, args&: DstInfo) = |
| 2762 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2763 | SDValue SrcOffset = DAG.getIntPtrConstant(Val: 4*offset, DL: dl); |
| 2764 | SDValue Src = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ByValSrc, N2: SrcOffset); |
| 2765 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize() - 4*offset, DL: dl, |
| 2766 | VT: MVT::i32); |
| 2767 | SDValue AlignNode = |
| 2768 | DAG.getConstant(Val: Flags.getNonZeroByValAlign().value(), DL: dl, VT: MVT::i32); |
| 2769 | |
| 2770 | SDVTList VTs = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 2771 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; |
| 2772 | MemOpChains.push_back(Elt: DAG.getNode(Opcode: ARMISD::COPY_STRUCT_BYVAL, DL: dl, VTList: VTs, |
| 2773 | Ops)); |
| 2774 | } |
| 2775 | } else { |
| 2776 | assert(VA.isMemLoc()); |
| 2777 | SDValue DstAddr; |
| 2778 | MachinePointerInfo DstInfo; |
| 2779 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2780 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2781 | |
| 2782 | SDValue Store = DAG.getStore(Chain, dl, Val: Arg, Ptr: DstAddr, PtrInfo: DstInfo); |
| 2783 | MemOpChains.push_back(Elt: Store); |
| 2784 | } |
| 2785 | } |
| 2786 | |
| 2787 | if (!MemOpChains.empty()) |
| 2788 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains); |
| 2789 | |
| 2790 | // Build a sequence of copy-to-reg nodes chained together with token chain |
| 2791 | // and flag operands which copy the outgoing args into the appropriate regs. |
| 2792 | SDValue InGlue; |
| 2793 | for (const auto &[Reg, N] : RegsToPass) { |
| 2794 | Chain = DAG.getCopyToReg(Chain, dl, Reg, N, Glue: InGlue); |
| 2795 | InGlue = Chain.getValue(R: 1); |
| 2796 | } |
| 2797 | |
| 2798 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
| 2799 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
| 2800 | // node so that legalize doesn't hack it. |
| 2801 | bool isDirect = false; |
| 2802 | |
| 2803 | const TargetMachine &TM = getTargetMachine(); |
| 2804 | const GlobalValue *GVal = nullptr; |
| 2805 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) |
| 2806 | GVal = G->getGlobal(); |
| 2807 | bool isStub = !TM.shouldAssumeDSOLocal(GV: GVal) && Subtarget->isTargetMachO(); |
| 2808 | |
| 2809 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); |
| 2810 | bool isLocalARMFunc = false; |
| 2811 | auto PtrVt = getPointerTy(DL: DAG.getDataLayout()); |
| 2812 | |
| 2813 | if (Subtarget->genLongCalls()) { |
| 2814 | assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && |
| 2815 | "long-calls codegen is not position independent!" ); |
| 2816 | // Handle a global address or an external symbol. If it's not one of |
| 2817 | // those, the target's already in a register, so we don't need to do |
| 2818 | // anything extra. |
| 2819 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2820 | if (Subtarget->genExecuteOnly()) { |
| 2821 | if (Subtarget->useMovt()) |
| 2822 | ++NumMovwMovt; |
| 2823 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
| 2824 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
| 2825 | } else { |
| 2826 | // Create a constant pool entry for the callee address |
| 2827 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2828 | ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( |
| 2829 | C: GVal, ID: ARMPCLabelIndex, Kind: ARMCP::CPValue, PCAdj: 0); |
| 2830 | |
| 2831 | // Get the address of the callee into a register |
| 2832 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2833 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
| 2834 | Callee = DAG.getLoad( |
| 2835 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
| 2836 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2837 | } |
| 2838 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
| 2839 | const char *Sym = S->getSymbol(); |
| 2840 | |
| 2841 | if (Subtarget->genExecuteOnly()) { |
| 2842 | if (Subtarget->useMovt()) |
| 2843 | ++NumMovwMovt; |
| 2844 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
| 2845 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
| 2846 | } else { |
| 2847 | // Create a constant pool entry for the callee address |
| 2848 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2849 | ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create( |
| 2850 | C&: *DAG.getContext(), s: Sym, ID: ARMPCLabelIndex, PCAdj: 0); |
| 2851 | |
| 2852 | // Get the address of the callee into a register |
| 2853 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2854 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
| 2855 | Callee = DAG.getLoad( |
| 2856 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
| 2857 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2858 | } |
| 2859 | } |
| 2860 | } else if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2861 | if (!PreferIndirect) { |
| 2862 | isDirect = true; |
| 2863 | bool isDef = GVal->isStrongDefinitionForLinker(); |
| 2864 | |
| 2865 | // ARM call to a local ARM function is predicable. |
| 2866 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); |
| 2867 | // tBX takes a register source operand. |
| 2868 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2869 | assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?" ); |
| 2870 | Callee = DAG.getNode( |
| 2871 | Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVt, |
| 2872 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: ARMII::MO_NONLAZY)); |
| 2873 | Callee = DAG.getLoad( |
| 2874 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Callee, |
| 2875 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: MaybeAlign(), |
| 2876 | MMOFlags: MachineMemOperand::MODereferenceable | |
| 2877 | MachineMemOperand::MOInvariant); |
| 2878 | } else if (Subtarget->isTargetCOFF()) { |
| 2879 | assert(Subtarget->isTargetWindows() && |
| 2880 | "Windows is the only supported COFF target" ); |
| 2881 | unsigned TargetFlags = ARMII::MO_NO_FLAG; |
| 2882 | if (GVal->hasDLLImportStorageClass()) |
| 2883 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 2884 | else if (!TM.shouldAssumeDSOLocal(GV: GVal)) |
| 2885 | TargetFlags = ARMII::MO_COFFSTUB; |
| 2886 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, /*offset=*/0, |
| 2887 | TargetFlags); |
| 2888 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 2889 | Callee = |
| 2890 | DAG.getLoad(VT: PtrVt, dl, Chain: DAG.getEntryNode(), |
| 2891 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, Operand: Callee), |
| 2892 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 2893 | } else { |
| 2894 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: 0); |
| 2895 | } |
| 2896 | } |
| 2897 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
| 2898 | isDirect = true; |
| 2899 | // tBX takes a register source operand. |
| 2900 | const char *Sym = S->getSymbol(); |
| 2901 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2902 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2903 | ARMConstantPoolValue *CPV = |
| 2904 | ARMConstantPoolSymbol::Create(C&: *DAG.getContext(), s: Sym, |
| 2905 | ID: ARMPCLabelIndex, PCAdj: 4); |
| 2906 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2907 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 2908 | Callee = DAG.getLoad( |
| 2909 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 2910 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2911 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 2912 | Callee = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVt, N1: Callee, N2: PICLabel); |
| 2913 | } else { |
| 2914 | Callee = DAG.getTargetExternalSymbol(Sym, VT: PtrVt, TargetFlags: 0); |
| 2915 | } |
| 2916 | } |
| 2917 | |
| 2918 | if (isCmseNSCall) { |
| 2919 | assert(!isARMFunc && !isDirect && |
| 2920 | "Cannot handle call to ARM function or direct call" ); |
| 2921 | if (NumBytes > 0) { |
| 2922 | DAG.getContext()->diagnose( |
| 2923 | DI: DiagnosticInfoUnsupported(DAG.getMachineFunction().getFunction(), |
| 2924 | "call to non-secure function would require " |
| 2925 | "passing arguments on stack" , |
| 2926 | dl.getDebugLoc())); |
| 2927 | } |
| 2928 | if (isStructRet) { |
| 2929 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 2930 | DAG.getMachineFunction().getFunction(), |
| 2931 | "call to non-secure function would return value through pointer" , |
| 2932 | dl.getDebugLoc())); |
| 2933 | } |
| 2934 | } |
| 2935 | |
| 2936 | // FIXME: handle tail calls differently. |
| 2937 | unsigned CallOpc; |
| 2938 | if (Subtarget->isThumb()) { |
| 2939 | if (GuardWithBTI) |
| 2940 | CallOpc = ARMISD::t2CALL_BTI; |
| 2941 | else if (isCmseNSCall) |
| 2942 | CallOpc = ARMISD::tSECALL; |
| 2943 | else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) |
| 2944 | CallOpc = ARMISD::CALL_NOLINK; |
| 2945 | else |
| 2946 | CallOpc = ARMISD::CALL; |
| 2947 | } else { |
| 2948 | if (!isDirect && !Subtarget->hasV5TOps()) |
| 2949 | CallOpc = ARMISD::CALL_NOLINK; |
| 2950 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && |
| 2951 | // Emit regular call when code size is the priority |
| 2952 | !Subtarget->hasMinSize()) |
| 2953 | // "mov lr, pc; b _foo" to avoid confusing the RSP |
| 2954 | CallOpc = ARMISD::CALL_NOLINK; |
| 2955 | else |
| 2956 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; |
| 2957 | } |
| 2958 | |
| 2959 | // We don't usually want to end the call-sequence here because we would tidy |
| 2960 | // the frame up *after* the call, however in the ABI-changing tail-call case |
| 2961 | // we've carefully laid out the parameters so that when sp is reset they'll be |
| 2962 | // in the correct location. |
| 2963 | if (isTailCall && !isSibCall) { |
| 2964 | Chain = DAG.getCALLSEQ_END(Chain, Size1: 0, Size2: 0, Glue: InGlue, DL: dl); |
| 2965 | InGlue = Chain.getValue(R: 1); |
| 2966 | } |
| 2967 | |
| 2968 | std::vector<SDValue> Ops; |
| 2969 | Ops.push_back(x: Chain); |
| 2970 | Ops.push_back(x: Callee); |
| 2971 | |
| 2972 | if (isTailCall) { |
| 2973 | Ops.push_back(x: DAG.getSignedTargetConstant(Val: SPDiff, DL: dl, VT: MVT::i32)); |
| 2974 | } |
| 2975 | |
| 2976 | // Add argument registers to the end of the list so that they are known live |
| 2977 | // into the call. |
| 2978 | for (const auto &[Reg, N] : RegsToPass) |
| 2979 | Ops.push_back(x: DAG.getRegister(Reg, VT: N.getValueType())); |
| 2980 | |
| 2981 | // Add a register mask operand representing the call-preserved registers. |
| 2982 | const uint32_t *Mask; |
| 2983 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 2984 | if (isThisReturn) { |
| 2985 | // For 'this' returns, use the R0-preserving mask if applicable |
| 2986 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); |
| 2987 | if (!Mask) { |
| 2988 | // Set isThisReturn to false if the calling convention is not one that |
| 2989 | // allows 'returned' to be modeled in this way, so LowerCallResult does |
| 2990 | // not try to pass 'this' straight through |
| 2991 | isThisReturn = false; |
| 2992 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2993 | } |
| 2994 | } else |
| 2995 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2996 | |
| 2997 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 2998 | Ops.push_back(x: DAG.getRegisterMask(RegMask: Mask)); |
| 2999 | |
| 3000 | if (InGlue.getNode()) |
| 3001 | Ops.push_back(x: InGlue); |
| 3002 | |
| 3003 | if (isTailCall) { |
| 3004 | MF.getFrameInfo().setHasTailCall(); |
| 3005 | SDValue Ret = DAG.getNode(Opcode: ARMISD::TC_RETURN, DL: dl, VT: MVT::Other, Ops); |
| 3006 | DAG.addNoMergeSiteInfo(Node: Ret.getNode(), NoMerge: CLI.NoMerge); |
| 3007 | DAG.addCallSiteInfo(Node: Ret.getNode(), CallInfo: std::move(CSInfo)); |
| 3008 | return Ret; |
| 3009 | } |
| 3010 | |
| 3011 | // Returns a chain and a flag for retval copy to use. |
| 3012 | Chain = DAG.getNode(Opcode: CallOpc, DL: dl, ResultTys: {MVT::Other, MVT::Glue}, Ops); |
| 3013 | DAG.addNoMergeSiteInfo(Node: Chain.getNode(), NoMerge: CLI.NoMerge); |
| 3014 | InGlue = Chain.getValue(R: 1); |
| 3015 | DAG.addCallSiteInfo(Node: Chain.getNode(), CallInfo: std::move(CSInfo)); |
| 3016 | |
| 3017 | // If we're guaranteeing tail-calls will be honoured, the callee must |
| 3018 | // pop its own argument stack on return. But this call is *not* a tail call so |
| 3019 | // we need to undo that after it returns to restore the status-quo. |
| 3020 | bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; |
| 3021 | uint64_t CalleePopBytes = |
| 3022 | canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt) ? alignTo(Value: NumBytes, Align: 16) : -1U; |
| 3023 | |
| 3024 | Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: CalleePopBytes, Glue: InGlue, DL: dl); |
| 3025 | if (!Ins.empty()) |
| 3026 | InGlue = Chain.getValue(R: 1); |
| 3027 | |
| 3028 | // Handle result values, copying them out of physregs into vregs that we |
| 3029 | // return. |
| 3030 | return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG, |
| 3031 | InVals, isThisReturn, |
| 3032 | ThisVal: isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall); |
| 3033 | } |
| 3034 | |
| 3035 | /// HandleByVal - Every parameter *after* a byval parameter is passed |
| 3036 | /// on the stack. Remember the next parameter register to allocate, |
| 3037 | /// and then confiscate the rest of the parameter registers to insure |
| 3038 | /// this. |
| 3039 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, |
| 3040 | Align Alignment) const { |
| 3041 | // Byval (as with any stack) slots are always at least 4 byte aligned. |
| 3042 | Alignment = std::max(a: Alignment, b: Align(4)); |
| 3043 | |
| 3044 | MCRegister Reg = State->AllocateReg(Regs: GPRArgRegs); |
| 3045 | if (!Reg) |
| 3046 | return; |
| 3047 | |
| 3048 | unsigned AlignInRegs = Alignment.value() / 4; |
| 3049 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; |
| 3050 | for (unsigned i = 0; i < Waste; ++i) |
| 3051 | Reg = State->AllocateReg(Regs: GPRArgRegs); |
| 3052 | |
| 3053 | if (!Reg) |
| 3054 | return; |
| 3055 | |
| 3056 | unsigned Excess = 4 * (ARM::R4 - Reg); |
| 3057 | |
| 3058 | // Special case when NSAA != SP and parameter size greater than size of |
| 3059 | // all remained GPR regs. In that case we can't split parameter, we must |
| 3060 | // send it to stack. We also must set NCRN to R4, so waste all |
| 3061 | // remained registers. |
| 3062 | const unsigned NSAAOffset = State->getStackSize(); |
| 3063 | if (NSAAOffset != 0 && Size > Excess) { |
| 3064 | while (State->AllocateReg(Regs: GPRArgRegs)) |
| 3065 | ; |
| 3066 | return; |
| 3067 | } |
| 3068 | |
| 3069 | // First register for byval parameter is the first register that wasn't |
| 3070 | // allocated before this method call, so it would be "reg". |
| 3071 | // If parameter is small enough to be saved in range [reg, r4), then |
| 3072 | // the end (first after last) register would be reg + param-size-in-regs, |
| 3073 | // else parameter would be splitted between registers and stack, |
| 3074 | // end register would be r4 in this case. |
| 3075 | unsigned ByValRegBegin = Reg; |
| 3076 | unsigned ByValRegEnd = std::min<unsigned>(a: Reg + Size / 4, b: ARM::R4); |
| 3077 | State->addInRegsParamInfo(RegBegin: ByValRegBegin, RegEnd: ByValRegEnd); |
| 3078 | // Note, first register is allocated in the beginning of function already, |
| 3079 | // allocate remained amount of registers we need. |
| 3080 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) |
| 3081 | State->AllocateReg(Regs: GPRArgRegs); |
| 3082 | // A byval parameter that is split between registers and memory needs its |
| 3083 | // size truncated here. |
| 3084 | // In the case where the entire structure fits in registers, we set the |
| 3085 | // size in memory to zero. |
| 3086 | Size = std::max<int>(a: Size - Excess, b: 0); |
| 3087 | } |
| 3088 | |
| 3089 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 3090 | /// for tail call optimization. Targets which want to do tail call |
| 3091 | /// optimization should implement this function. Note that this function also |
| 3092 | /// processes musttail calls, so when this function returns false on a valid |
| 3093 | /// musttail call, a fatal backend error occurs. |
| 3094 | bool ARMTargetLowering::IsEligibleForTailCallOptimization( |
| 3095 | TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo, |
| 3096 | SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const { |
| 3097 | CallingConv::ID CalleeCC = CLI.CallConv; |
| 3098 | SDValue Callee = CLI.Callee; |
| 3099 | bool isVarArg = CLI.IsVarArg; |
| 3100 | const SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 3101 | const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 3102 | const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 3103 | const SelectionDAG &DAG = CLI.DAG; |
| 3104 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3105 | const Function &CallerF = MF.getFunction(); |
| 3106 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
| 3107 | |
| 3108 | assert(Subtarget->supportsTailCall()); |
| 3109 | |
| 3110 | // Indirect tail-calls require a register to hold the target address. That |
| 3111 | // register must be: |
| 3112 | // * Allocatable (i.e. r0-r7 if the target is Thumb1). |
| 3113 | // * Not callee-saved, so must be one of r0-r3 or r12. |
| 3114 | // * Not used to hold an argument to the tail-called function, which might be |
| 3115 | // in r0-r3. |
| 3116 | // * Not used to hold the return address authentication code, which is in r12 |
| 3117 | // if enabled. |
| 3118 | // Sometimes, no register matches all of these conditions, so we can't do a |
| 3119 | // tail-call. |
| 3120 | if (!isa<GlobalAddressSDNode>(Val: Callee.getNode()) || isIndirect) { |
| 3121 | SmallSet<MCPhysReg, 5> AddressRegisters = {ARM::R0, ARM::R1, ARM::R2, |
| 3122 | ARM::R3}; |
| 3123 | if (!(Subtarget->isThumb1Only() || |
| 3124 | MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(SpillsLR: true))) |
| 3125 | AddressRegisters.insert(V: ARM::R12); |
| 3126 | for (const CCValAssign &AL : ArgLocs) |
| 3127 | if (AL.isRegLoc()) |
| 3128 | AddressRegisters.erase(V: AL.getLocReg()); |
| 3129 | if (AddressRegisters.empty()) { |
| 3130 | LLVM_DEBUG(dbgs() << "false (no reg to hold function pointer)\n" ); |
| 3131 | return false; |
| 3132 | } |
| 3133 | } |
| 3134 | |
| 3135 | // Look for obvious safe cases to perform tail call optimization that do not |
| 3136 | // require ABI changes. This is what gcc calls sibcall. |
| 3137 | |
| 3138 | // Exception-handling functions need a special set of instructions to indicate |
| 3139 | // a return to the hardware. Tail-calling another function would probably |
| 3140 | // break this. |
| 3141 | if (CallerF.hasFnAttribute(Kind: "interrupt" )) { |
| 3142 | LLVM_DEBUG(dbgs() << "false (interrupt attribute)\n" ); |
| 3143 | return false; |
| 3144 | } |
| 3145 | |
| 3146 | if (canGuaranteeTCO(CC: CalleeCC, |
| 3147 | GuaranteeTailCalls: getTargetMachine().Options.GuaranteedTailCallOpt)) { |
| 3148 | LLVM_DEBUG(dbgs() << (CalleeCC == CallerCC ? "true" : "false" ) |
| 3149 | << " (guaranteed tail-call CC)\n" ); |
| 3150 | return CalleeCC == CallerCC; |
| 3151 | } |
| 3152 | |
| 3153 | // Also avoid sibcall optimization if either caller or callee uses struct |
| 3154 | // return semantics. |
| 3155 | bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); |
| 3156 | bool isCallerStructRet = MF.getFunction().hasStructRetAttr(); |
| 3157 | if (isCalleeStructRet != isCallerStructRet) { |
| 3158 | LLVM_DEBUG(dbgs() << "false (struct-ret)\n" ); |
| 3159 | return false; |
| 3160 | } |
| 3161 | |
| 3162 | // Externally-defined functions with weak linkage should not be |
| 3163 | // tail-called on ARM when the OS does not support dynamic |
| 3164 | // pre-emption of symbols, as the AAELF spec requires normal calls |
| 3165 | // to undefined weak functions to be replaced with a NOP or jump to the |
| 3166 | // next instruction. The behaviour of branch instructions in this |
| 3167 | // situation (as used for tail calls) is implementation-defined, so we |
| 3168 | // cannot rely on the linker replacing the tail call with a return. |
| 3169 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) { |
| 3170 | const GlobalValue *GV = G->getGlobal(); |
| 3171 | const Triple &TT = getTargetMachine().getTargetTriple(); |
| 3172 | if (GV->hasExternalWeakLinkage() && |
| 3173 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || |
| 3174 | TT.isOSBinFormatMachO())) { |
| 3175 | LLVM_DEBUG(dbgs() << "false (external weak linkage)\n" ); |
| 3176 | return false; |
| 3177 | } |
| 3178 | } |
| 3179 | |
| 3180 | // Check that the call results are passed in the same way. |
| 3181 | LLVMContext &C = *DAG.getContext(); |
| 3182 | if (!CCState::resultsCompatible( |
| 3183 | CalleeCC: getEffectiveCallingConv(CC: CalleeCC, isVarArg), |
| 3184 | CallerCC: getEffectiveCallingConv(CC: CallerCC, isVarArg: CallerF.isVarArg()), MF, C, Ins, |
| 3185 | CalleeFn: CCAssignFnForReturn(CC: CalleeCC, isVarArg), |
| 3186 | CallerFn: CCAssignFnForReturn(CC: CallerCC, isVarArg: CallerF.isVarArg()))) { |
| 3187 | LLVM_DEBUG(dbgs() << "false (incompatible results)\n" ); |
| 3188 | return false; |
| 3189 | } |
| 3190 | // The callee has to preserve all registers the caller needs to preserve. |
| 3191 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 3192 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
| 3193 | if (CalleeCC != CallerCC) { |
| 3194 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
| 3195 | if (!TRI->regmaskSubsetEqual(mask0: CallerPreserved, mask1: CalleePreserved)) { |
| 3196 | LLVM_DEBUG(dbgs() << "false (not all registers preserved)\n" ); |
| 3197 | return false; |
| 3198 | } |
| 3199 | } |
| 3200 | |
| 3201 | // If Caller's vararg argument has been split between registers and stack, do |
| 3202 | // not perform tail call, since part of the argument is in caller's local |
| 3203 | // frame. |
| 3204 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); |
| 3205 | if (CLI.IsVarArg && AFI_Caller->getArgRegsSaveSize()) { |
| 3206 | LLVM_DEBUG(dbgs() << "false (arg reg save area)\n" ); |
| 3207 | return false; |
| 3208 | } |
| 3209 | |
| 3210 | // If the callee takes no arguments then go on to check the results of the |
| 3211 | // call. |
| 3212 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 3213 | if (!parametersInCSRMatch(MRI, CallerPreservedMask: CallerPreserved, ArgLocs, OutVals)) { |
| 3214 | LLVM_DEBUG(dbgs() << "false (parameters in CSRs do not match)\n" ); |
| 3215 | return false; |
| 3216 | } |
| 3217 | |
| 3218 | // If the stack arguments for this call do not fit into our own save area then |
| 3219 | // the call cannot be made tail. |
| 3220 | if (CCInfo.getStackSize() > AFI_Caller->getArgumentStackSize()) |
| 3221 | return false; |
| 3222 | |
| 3223 | LLVM_DEBUG(dbgs() << "true\n" ); |
| 3224 | return true; |
| 3225 | } |
| 3226 | |
| 3227 | bool |
| 3228 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, |
| 3229 | MachineFunction &MF, bool isVarArg, |
| 3230 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 3231 | LLVMContext &Context, const Type *RetTy) const { |
| 3232 | SmallVector<CCValAssign, 16> RVLocs; |
| 3233 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); |
| 3234 | return CCInfo.CheckReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 3235 | } |
| 3236 | |
| 3237 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, |
| 3238 | const SDLoc &DL, SelectionDAG &DAG) { |
| 3239 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 3240 | const Function &F = MF.getFunction(); |
| 3241 | |
| 3242 | StringRef IntKind = F.getFnAttribute(Kind: "interrupt" ).getValueAsString(); |
| 3243 | |
| 3244 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset |
| 3245 | // version of the "preferred return address". These offsets affect the return |
| 3246 | // instruction if this is a return from PL1 without hypervisor extensions. |
| 3247 | // IRQ/FIQ: +4 "subs pc, lr, #4" |
| 3248 | // SWI: 0 "subs pc, lr, #0" |
| 3249 | // ABORT: +4 "subs pc, lr, #4" |
| 3250 | // UNDEF: +4/+2 "subs pc, lr, #0" |
| 3251 | // UNDEF varies depending on where the exception came from ARM or Thumb |
| 3252 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. |
| 3253 | |
| 3254 | int64_t LROffset; |
| 3255 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || |
| 3256 | IntKind == "ABORT" ) |
| 3257 | LROffset = 4; |
| 3258 | else if (IntKind == "SWI" || IntKind == "UNDEF" ) |
| 3259 | LROffset = 0; |
| 3260 | else |
| 3261 | report_fatal_error(reason: "Unsupported interrupt attribute. If present, value " |
| 3262 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF" ); |
| 3263 | |
| 3264 | RetOps.insert(I: RetOps.begin() + 1, |
| 3265 | Elt: DAG.getConstant(Val: LROffset, DL, VT: MVT::i32, isTarget: false)); |
| 3266 | |
| 3267 | return DAG.getNode(Opcode: ARMISD::INTRET_GLUE, DL, VT: MVT::Other, Ops: RetOps); |
| 3268 | } |
| 3269 | |
| 3270 | SDValue |
| 3271 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
| 3272 | bool isVarArg, |
| 3273 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 3274 | const SmallVectorImpl<SDValue> &OutVals, |
| 3275 | const SDLoc &dl, SelectionDAG &DAG) const { |
| 3276 | // CCValAssign - represent the assignment of the return value to a location. |
| 3277 | SmallVector<CCValAssign, 16> RVLocs; |
| 3278 | |
| 3279 | // CCState - Info about the registers and stack slots. |
| 3280 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 3281 | *DAG.getContext()); |
| 3282 | |
| 3283 | // Analyze outgoing return values. |
| 3284 | CCInfo.AnalyzeReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 3285 | |
| 3286 | SDValue Glue; |
| 3287 | SmallVector<SDValue, 4> RetOps; |
| 3288 | RetOps.push_back(Elt: Chain); // Operand #0 = Chain (updated below) |
| 3289 | bool isLittleEndian = Subtarget->isLittle(); |
| 3290 | |
| 3291 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3292 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3293 | AFI->setReturnRegsCount(RVLocs.size()); |
| 3294 | |
| 3295 | // Report error if cmse entry function returns structure through first ptr arg. |
| 3296 | if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { |
| 3297 | // Note: using an empty SDLoc(), as the first line of the function is a |
| 3298 | // better place to report than the last line. |
| 3299 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 3300 | DAG.getMachineFunction().getFunction(), |
| 3301 | "secure entry function would return value through pointer" , |
| 3302 | SDLoc().getDebugLoc())); |
| 3303 | } |
| 3304 | |
| 3305 | // Copy the result values into the output registers. |
| 3306 | for (unsigned i = 0, realRVLocIdx = 0; |
| 3307 | i != RVLocs.size(); |
| 3308 | ++i, ++realRVLocIdx) { |
| 3309 | CCValAssign &VA = RVLocs[i]; |
| 3310 | assert(VA.isRegLoc() && "Can only return in registers!" ); |
| 3311 | |
| 3312 | SDValue Arg = OutVals[realRVLocIdx]; |
| 3313 | bool ReturnF16 = false; |
| 3314 | |
| 3315 | if (Subtarget->hasFullFP16() && getTM().isTargetHardFloat()) { |
| 3316 | // Half-precision return values can be returned like this: |
| 3317 | // |
| 3318 | // t11 f16 = fadd ... |
| 3319 | // t12: i16 = bitcast t11 |
| 3320 | // t13: i32 = zero_extend t12 |
| 3321 | // t14: f32 = bitcast t13 <~~~~~~~ Arg |
| 3322 | // |
| 3323 | // to avoid code generation for bitcasts, we simply set Arg to the node |
| 3324 | // that produces the f16 value, t11 in this case. |
| 3325 | // |
| 3326 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { |
| 3327 | SDValue ZE = Arg.getOperand(i: 0); |
| 3328 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { |
| 3329 | SDValue BC = ZE.getOperand(i: 0); |
| 3330 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { |
| 3331 | Arg = BC.getOperand(i: 0); |
| 3332 | ReturnF16 = true; |
| 3333 | } |
| 3334 | } |
| 3335 | } |
| 3336 | } |
| 3337 | |
| 3338 | switch (VA.getLocInfo()) { |
| 3339 | default: llvm_unreachable("Unknown loc info!" ); |
| 3340 | case CCValAssign::Full: break; |
| 3341 | case CCValAssign::BCvt: |
| 3342 | if (!ReturnF16) |
| 3343 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 3344 | break; |
| 3345 | } |
| 3346 | |
| 3347 | // Mask f16 arguments if this is a CMSE nonsecure entry. |
| 3348 | auto RetVT = Outs[realRVLocIdx].ArgVT; |
| 3349 | if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { |
| 3350 | if (VA.needsCustom() && VA.getValVT() == MVT::f16) { |
| 3351 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
| 3352 | } else { |
| 3353 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 3354 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: RetVT.getSizeInBits()); |
| 3355 | SDValue Mask = |
| 3356 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
| 3357 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
| 3358 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
| 3359 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 3360 | } |
| 3361 | } |
| 3362 | |
| 3363 | if (VA.needsCustom() && |
| 3364 | (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { |
| 3365 | if (VA.getLocVT() == MVT::v2f64) { |
| 3366 | // Extract the first half and return it in two registers. |
| 3367 | SDValue Half = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 3368 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 3369 | SDValue HalfGPRs = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 3370 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Half); |
| 3371 | |
| 3372 | Chain = |
| 3373 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3374 | N: HalfGPRs.getValue(R: isLittleEndian ? 0 : 1), Glue); |
| 3375 | Glue = Chain.getValue(R: 1); |
| 3376 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3377 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3378 | Chain = |
| 3379 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3380 | N: HalfGPRs.getValue(R: isLittleEndian ? 1 : 0), Glue); |
| 3381 | Glue = Chain.getValue(R: 1); |
| 3382 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3383 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3384 | |
| 3385 | // Extract the 2nd half and fall through to handle it as an f64 value. |
| 3386 | Arg = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 3387 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 3388 | } |
| 3389 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is |
| 3390 | // available. |
| 3391 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 3392 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
| 3393 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3394 | N: fmrrd.getValue(R: isLittleEndian ? 0 : 1), Glue); |
| 3395 | Glue = Chain.getValue(R: 1); |
| 3396 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3397 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3398 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3399 | N: fmrrd.getValue(R: isLittleEndian ? 1 : 0), Glue); |
| 3400 | } else |
| 3401 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: Arg, Glue); |
| 3402 | |
| 3403 | // Guarantee that all emitted copies are |
| 3404 | // stuck together, avoiding something bad. |
| 3405 | Glue = Chain.getValue(R: 1); |
| 3406 | RetOps.push_back(Elt: DAG.getRegister( |
| 3407 | Reg: VA.getLocReg(), VT: ReturnF16 ? Arg.getValueType() : VA.getLocVT())); |
| 3408 | } |
| 3409 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 3410 | const MCPhysReg *I = |
| 3411 | TRI->getCalleeSavedRegsViaCopy(MF: &DAG.getMachineFunction()); |
| 3412 | if (I) { |
| 3413 | for (; *I; ++I) { |
| 3414 | if (ARM::GPRRegClass.contains(Reg: *I)) |
| 3415 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::i32)); |
| 3416 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
| 3417 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::getFloatingPointVT(BitWidth: 64))); |
| 3418 | else |
| 3419 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 3420 | } |
| 3421 | } |
| 3422 | |
| 3423 | // Update chain and glue. |
| 3424 | RetOps[0] = Chain; |
| 3425 | if (Glue.getNode()) |
| 3426 | RetOps.push_back(Elt: Glue); |
| 3427 | |
| 3428 | // CPUs which aren't M-class use a special sequence to return from |
| 3429 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, |
| 3430 | // though we use "subs pc, lr, #N"). |
| 3431 | // |
| 3432 | // M-class CPUs actually use a normal return sequence with a special |
| 3433 | // (hardware-provided) value in LR, so the normal code path works. |
| 3434 | if (DAG.getMachineFunction().getFunction().hasFnAttribute(Kind: "interrupt" ) && |
| 3435 | !Subtarget->isMClass()) { |
| 3436 | if (Subtarget->isThumb1Only()) |
| 3437 | report_fatal_error(reason: "interrupt attribute is not supported in Thumb1" ); |
| 3438 | return LowerInterruptReturn(RetOps, DL: dl, DAG); |
| 3439 | } |
| 3440 | |
| 3441 | ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_GLUE : |
| 3442 | ARMISD::RET_GLUE; |
| 3443 | return DAG.getNode(Opcode: RetNode, DL: dl, VT: MVT::Other, Ops: RetOps); |
| 3444 | } |
| 3445 | |
| 3446 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { |
| 3447 | if (N->getNumValues() != 1) |
| 3448 | return false; |
| 3449 | if (!N->hasNUsesOfValue(NUses: 1, Value: 0)) |
| 3450 | return false; |
| 3451 | |
| 3452 | SDValue TCChain = Chain; |
| 3453 | SDNode *Copy = *N->user_begin(); |
| 3454 | if (Copy->getOpcode() == ISD::CopyToReg) { |
| 3455 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3456 | // perform a tail call. |
| 3457 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3458 | return false; |
| 3459 | TCChain = Copy->getOperand(Num: 0); |
| 3460 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { |
| 3461 | SDNode *VMov = Copy; |
| 3462 | // f64 returned in a pair of GPRs. |
| 3463 | SmallPtrSet<SDNode*, 2> Copies; |
| 3464 | for (SDNode *U : VMov->users()) { |
| 3465 | if (U->getOpcode() != ISD::CopyToReg) |
| 3466 | return false; |
| 3467 | Copies.insert(Ptr: U); |
| 3468 | } |
| 3469 | if (Copies.size() > 2) |
| 3470 | return false; |
| 3471 | |
| 3472 | for (SDNode *U : VMov->users()) { |
| 3473 | SDValue UseChain = U->getOperand(Num: 0); |
| 3474 | if (Copies.count(Ptr: UseChain.getNode())) |
| 3475 | // Second CopyToReg |
| 3476 | Copy = U; |
| 3477 | else { |
| 3478 | // We are at the top of this chain. |
| 3479 | // If the copy has a glue operand, we conservatively assume it |
| 3480 | // isn't safe to perform a tail call. |
| 3481 | if (U->getOperand(Num: U->getNumOperands() - 1).getValueType() == MVT::Glue) |
| 3482 | return false; |
| 3483 | // First CopyToReg |
| 3484 | TCChain = UseChain; |
| 3485 | } |
| 3486 | } |
| 3487 | } else if (Copy->getOpcode() == ISD::BITCAST) { |
| 3488 | // f32 returned in a single GPR. |
| 3489 | if (!Copy->hasOneUse()) |
| 3490 | return false; |
| 3491 | Copy = *Copy->user_begin(); |
| 3492 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(NUses: 1, Value: 0)) |
| 3493 | return false; |
| 3494 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3495 | // perform a tail call. |
| 3496 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3497 | return false; |
| 3498 | TCChain = Copy->getOperand(Num: 0); |
| 3499 | } else { |
| 3500 | return false; |
| 3501 | } |
| 3502 | |
| 3503 | bool HasRet = false; |
| 3504 | for (const SDNode *U : Copy->users()) { |
| 3505 | if (U->getOpcode() != ARMISD::RET_GLUE && |
| 3506 | U->getOpcode() != ARMISD::INTRET_GLUE) |
| 3507 | return false; |
| 3508 | HasRet = true; |
| 3509 | } |
| 3510 | |
| 3511 | if (!HasRet) |
| 3512 | return false; |
| 3513 | |
| 3514 | Chain = TCChain; |
| 3515 | return true; |
| 3516 | } |
| 3517 | |
| 3518 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
| 3519 | if (!Subtarget->supportsTailCall()) |
| 3520 | return false; |
| 3521 | |
| 3522 | if (!CI->isTailCall()) |
| 3523 | return false; |
| 3524 | |
| 3525 | return true; |
| 3526 | } |
| 3527 | |
| 3528 | // Trying to write a 64 bit value so need to split into two 32 bit values first, |
| 3529 | // and pass the lower and high parts through. |
| 3530 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { |
| 3531 | SDLoc DL(Op); |
| 3532 | SDValue WriteValue = Op->getOperand(Num: 2); |
| 3533 | |
| 3534 | // This function is only supposed to be called for i64 type argument. |
| 3535 | assert(WriteValue.getValueType() == MVT::i64 |
| 3536 | && "LowerWRITE_REGISTER called for non-i64 type argument." ); |
| 3537 | |
| 3538 | SDValue Lo, Hi; |
| 3539 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: WriteValue, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
| 3540 | SDValue Ops[] = { Op->getOperand(Num: 0), Op->getOperand(Num: 1), Lo, Hi }; |
| 3541 | return DAG.getNode(Opcode: ISD::WRITE_REGISTER, DL, VT: MVT::Other, Ops); |
| 3542 | } |
| 3543 | |
| 3544 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as |
| 3545 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is |
| 3546 | // one of the above mentioned nodes. It has to be wrapped because otherwise |
| 3547 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only |
| 3548 | // be used to form addressing mode. These wrapped nodes will be selected |
| 3549 | // into MOVi. |
| 3550 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, |
| 3551 | SelectionDAG &DAG) const { |
| 3552 | EVT PtrVT = Op.getValueType(); |
| 3553 | // FIXME there is no actual debug info here |
| 3554 | SDLoc dl(Op); |
| 3555 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Val&: Op); |
| 3556 | SDValue Res; |
| 3557 | |
| 3558 | // When generating execute-only code Constant Pools must be promoted to the |
| 3559 | // global data section. It's a bit ugly that we can't share them across basic |
| 3560 | // blocks, but this way we guarantee that execute-only behaves correct with |
| 3561 | // position-independent addressing modes. |
| 3562 | if (Subtarget->genExecuteOnly()) { |
| 3563 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 3564 | auto T = const_cast<Type*>(CP->getType()); |
| 3565 | auto C = const_cast<Constant*>(CP->getConstVal()); |
| 3566 | auto M = const_cast<Module*>(DAG.getMachineFunction(). |
| 3567 | getFunction().getParent()); |
| 3568 | auto GV = new GlobalVariable( |
| 3569 | *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, |
| 3570 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + |
| 3571 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + |
| 3572 | Twine(AFI->createPICLabelUId()) |
| 3573 | ); |
| 3574 | SDValue GA = DAG.getTargetGlobalAddress(GV: dyn_cast<GlobalValue>(Val: GV), |
| 3575 | DL: dl, VT: PtrVT); |
| 3576 | return LowerGlobalAddress(Op: GA, DAG); |
| 3577 | } |
| 3578 | |
| 3579 | // The 16-bit ADR instruction can only encode offsets that are multiples of 4, |
| 3580 | // so we need to align to at least 4 bytes when we don't have 32-bit ADR. |
| 3581 | Align CPAlign = CP->getAlign(); |
| 3582 | if (Subtarget->isThumb1Only()) |
| 3583 | CPAlign = std::max(a: CPAlign, b: Align(4)); |
| 3584 | if (CP->isMachineConstantPoolEntry()) |
| 3585 | Res = |
| 3586 | DAG.getTargetConstantPool(C: CP->getMachineCPVal(), VT: PtrVT, Align: CPAlign); |
| 3587 | else |
| 3588 | Res = DAG.getTargetConstantPool(C: CP->getConstVal(), VT: PtrVT, Align: CPAlign); |
| 3589 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Res); |
| 3590 | } |
| 3591 | |
| 3592 | unsigned ARMTargetLowering::getJumpTableEncoding() const { |
| 3593 | // If we don't have a 32-bit pc-relative branch instruction then the jump |
| 3594 | // table consists of block addresses. Usually this is inline, but for |
| 3595 | // execute-only it must be placed out-of-line. |
| 3596 | if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps()) |
| 3597 | return MachineJumpTableInfo::EK_BlockAddress; |
| 3598 | return MachineJumpTableInfo::EK_Inline; |
| 3599 | } |
| 3600 | |
| 3601 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, |
| 3602 | SelectionDAG &DAG) const { |
| 3603 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3604 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3605 | unsigned ARMPCLabelIndex = 0; |
| 3606 | SDLoc DL(Op); |
| 3607 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3608 | const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress(); |
| 3609 | SDValue CPAddr; |
| 3610 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); |
| 3611 | if (!IsPositionIndependent) { |
| 3612 | CPAddr = DAG.getTargetConstantPool(C: BA, VT: PtrVT, Align: Align(4)); |
| 3613 | } else { |
| 3614 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3615 | ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3616 | ARMConstantPoolValue *CPV = |
| 3617 | ARMConstantPoolConstant::Create(C: BA, ID: ARMPCLabelIndex, |
| 3618 | Kind: ARMCP::CPBlockAddress, PCAdj); |
| 3619 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3620 | } |
| 3621 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: CPAddr); |
| 3622 | SDValue Result = DAG.getLoad( |
| 3623 | VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3624 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3625 | if (!IsPositionIndependent) |
| 3626 | return Result; |
| 3627 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL, VT: MVT::i32); |
| 3628 | return DAG.getNode(Opcode: ARMISD::PIC_ADD, DL, VT: PtrVT, N1: Result, N2: PICLabel); |
| 3629 | } |
| 3630 | |
| 3631 | /// Convert a TLS address reference into the correct sequence of loads |
| 3632 | /// and calls to compute the variable's address for Darwin, and return an |
| 3633 | /// SDValue containing the final node. |
| 3634 | |
| 3635 | /// Darwin only has one TLS scheme which must be capable of dealing with the |
| 3636 | /// fully general situation, in the worst case. This means: |
| 3637 | /// + "extern __thread" declaration. |
| 3638 | /// + Defined in a possibly unknown dynamic library. |
| 3639 | /// |
| 3640 | /// The general system is that each __thread variable has a [3 x i32] descriptor |
| 3641 | /// which contains information used by the runtime to calculate the address. The |
| 3642 | /// only part of this the compiler needs to know about is the first word, which |
| 3643 | /// contains a function pointer that must be called with the address of the |
| 3644 | /// entire descriptor in "r0". |
| 3645 | /// |
| 3646 | /// Since this descriptor may be in a different unit, in general access must |
| 3647 | /// proceed along the usual ARM rules. A common sequence to produce is: |
| 3648 | /// |
| 3649 | /// movw rT1, :lower16:_var$non_lazy_ptr |
| 3650 | /// movt rT1, :upper16:_var$non_lazy_ptr |
| 3651 | /// ldr r0, [rT1] |
| 3652 | /// ldr rT2, [r0] |
| 3653 | /// blx rT2 |
| 3654 | /// [...address now in r0...] |
| 3655 | SDValue |
| 3656 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, |
| 3657 | SelectionDAG &DAG) const { |
| 3658 | assert(Subtarget->isTargetDarwin() && |
| 3659 | "This function expects a Darwin target" ); |
| 3660 | SDLoc DL(Op); |
| 3661 | |
| 3662 | // First step is to get the address of the actua global symbol. This is where |
| 3663 | // the TLS descriptor lives. |
| 3664 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); |
| 3665 | |
| 3666 | // The first entry in the descriptor is a function pointer that we must call |
| 3667 | // to obtain the address of the variable. |
| 3668 | SDValue Chain = DAG.getEntryNode(); |
| 3669 | SDValue FuncTLVGet = DAG.getLoad( |
| 3670 | VT: MVT::i32, dl: DL, Chain, Ptr: DescAddr, |
| 3671 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: Align(4), |
| 3672 | MMOFlags: MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | |
| 3673 | MachineMemOperand::MOInvariant); |
| 3674 | Chain = FuncTLVGet.getValue(R: 1); |
| 3675 | |
| 3676 | MachineFunction &F = DAG.getMachineFunction(); |
| 3677 | MachineFrameInfo &MFI = F.getFrameInfo(); |
| 3678 | MFI.setAdjustsStack(true); |
| 3679 | |
| 3680 | // TLS calls preserve all registers except those that absolutely must be |
| 3681 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be |
| 3682 | // silly). |
| 3683 | auto TRI = |
| 3684 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); |
| 3685 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); |
| 3686 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(MF: DAG.getMachineFunction()); |
| 3687 | |
| 3688 | // Finally, we can make the call. This is just a degenerate version of a |
| 3689 | // normal AArch64 call node: r0 takes the address of the descriptor, and |
| 3690 | // returns the address of the variable in this thread. |
| 3691 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R0, N: DescAddr, Glue: SDValue()); |
| 3692 | Chain = |
| 3693 | DAG.getNode(Opcode: ARMISD::CALL, DL, VTList: DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue), |
| 3694 | N1: Chain, N2: FuncTLVGet, N3: DAG.getRegister(Reg: ARM::R0, VT: MVT::i32), |
| 3695 | N4: DAG.getRegisterMask(RegMask: Mask), N5: Chain.getValue(R: 1)); |
| 3696 | return DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::R0, VT: MVT::i32, Glue: Chain.getValue(R: 1)); |
| 3697 | } |
| 3698 | |
| 3699 | SDValue |
| 3700 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, |
| 3701 | SelectionDAG &DAG) const { |
| 3702 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering" ); |
| 3703 | |
| 3704 | SDValue Chain = DAG.getEntryNode(); |
| 3705 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3706 | SDLoc DL(Op); |
| 3707 | |
| 3708 | // Load the current TEB (thread environment block) |
| 3709 | SDValue Ops[] = {Chain, |
| 3710 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
| 3711 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
| 3712 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 3713 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
| 3714 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 3715 | DAG.getTargetConstant(Val: 2, DL, VT: MVT::i32)}; |
| 3716 | SDValue CurrentTEB = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
| 3717 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
| 3718 | |
| 3719 | SDValue TEB = CurrentTEB.getValue(R: 0); |
| 3720 | Chain = CurrentTEB.getValue(R: 1); |
| 3721 | |
| 3722 | // Load the ThreadLocalStoragePointer from the TEB |
| 3723 | // A pointer to the TLS array is located at offset 0x2c from the TEB. |
| 3724 | SDValue TLSArray = |
| 3725 | DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TEB, N2: DAG.getIntPtrConstant(Val: 0x2c, DL)); |
| 3726 | TLSArray = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSArray, PtrInfo: MachinePointerInfo()); |
| 3727 | |
| 3728 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 |
| 3729 | // offset into the TLSArray. |
| 3730 | |
| 3731 | // Load the TLS index from the C runtime |
| 3732 | SDValue TLSIndex = |
| 3733 | DAG.getTargetExternalSymbol(Sym: "_tls_index" , VT: PtrVT, TargetFlags: ARMII::MO_NO_FLAG); |
| 3734 | TLSIndex = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: TLSIndex); |
| 3735 | TLSIndex = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSIndex, PtrInfo: MachinePointerInfo()); |
| 3736 | |
| 3737 | SDValue Slot = DAG.getNode(Opcode: ISD::SHL, DL, VT: PtrVT, N1: TLSIndex, |
| 3738 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
| 3739 | SDValue TLS = DAG.getLoad(VT: PtrVT, dl: DL, Chain, |
| 3740 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLSArray, N2: Slot), |
| 3741 | PtrInfo: MachinePointerInfo()); |
| 3742 | |
| 3743 | // Get the offset of the start of the .tls section (section base) |
| 3744 | const auto *GA = cast<GlobalAddressSDNode>(Val&: Op); |
| 3745 | auto *CPV = ARMConstantPoolConstant::Create(GV: GA->getGlobal(), Modifier: ARMCP::SECREL); |
| 3746 | SDValue Offset = DAG.getLoad( |
| 3747 | VT: PtrVT, dl: DL, Chain, |
| 3748 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: MVT::i32, |
| 3749 | Operand: DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4))), |
| 3750 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3751 | |
| 3752 | return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLS, N2: Offset); |
| 3753 | } |
| 3754 | |
| 3755 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model |
| 3756 | SDValue |
| 3757 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
| 3758 | SelectionDAG &DAG) const { |
| 3759 | SDLoc dl(GA); |
| 3760 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3761 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3762 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3763 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3764 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3765 | ARMConstantPoolValue *CPV = |
| 3766 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
| 3767 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::TLSGD, AddCurrentAddress: true); |
| 3768 | SDValue Argument = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3769 | Argument = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Argument); |
| 3770 | Argument = DAG.getLoad( |
| 3771 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Argument, |
| 3772 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3773 | SDValue Chain = Argument.getValue(R: 1); |
| 3774 | |
| 3775 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3776 | Argument = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Argument, N2: PICLabel); |
| 3777 | |
| 3778 | // call __tls_get_addr. |
| 3779 | ArgListTy Args; |
| 3780 | ArgListEntry Entry; |
| 3781 | Entry.Node = Argument; |
| 3782 | Entry.Ty = (Type *) Type::getInt32Ty(C&: *DAG.getContext()); |
| 3783 | Args.push_back(x: Entry); |
| 3784 | |
| 3785 | // FIXME: is there useful debug info available here? |
| 3786 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 3787 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
| 3788 | CC: CallingConv::C, ResultType: Type::getInt32Ty(C&: *DAG.getContext()), |
| 3789 | Target: DAG.getExternalSymbol(Sym: "__tls_get_addr" , VT: PtrVT), ArgsList: std::move(Args)); |
| 3790 | |
| 3791 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 3792 | return CallResult.first; |
| 3793 | } |
| 3794 | |
| 3795 | // Lower ISD::GlobalTLSAddress using the "initial exec" or |
| 3796 | // "local exec" model. |
| 3797 | SDValue |
| 3798 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, |
| 3799 | SelectionDAG &DAG, |
| 3800 | TLSModel::Model model) const { |
| 3801 | const GlobalValue *GV = GA->getGlobal(); |
| 3802 | SDLoc dl(GA); |
| 3803 | SDValue Offset; |
| 3804 | SDValue Chain = DAG.getEntryNode(); |
| 3805 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3806 | // Get the Thread Pointer |
| 3807 | SDValue ThreadPointer = DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
| 3808 | |
| 3809 | if (model == TLSModel::InitialExec) { |
| 3810 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3811 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3812 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3813 | // Initial exec model. |
| 3814 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3815 | ARMConstantPoolValue *CPV = |
| 3816 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
| 3817 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::GOTTPOFF, |
| 3818 | AddCurrentAddress: true); |
| 3819 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3820 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
| 3821 | Offset = DAG.getLoad( |
| 3822 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3823 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3824 | Chain = Offset.getValue(R: 1); |
| 3825 | |
| 3826 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3827 | Offset = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Offset, N2: PICLabel); |
| 3828 | |
| 3829 | Offset = DAG.getLoad( |
| 3830 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3831 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3832 | } else { |
| 3833 | // local exec model |
| 3834 | assert(model == TLSModel::LocalExec); |
| 3835 | ARMConstantPoolValue *CPV = |
| 3836 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::TPOFF); |
| 3837 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3838 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
| 3839 | Offset = DAG.getLoad( |
| 3840 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3841 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3842 | } |
| 3843 | |
| 3844 | // The address of the thread local variable is the add of the thread |
| 3845 | // pointer with the offset of the variable. |
| 3846 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ThreadPointer, N2: Offset); |
| 3847 | } |
| 3848 | |
| 3849 | SDValue |
| 3850 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { |
| 3851 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op); |
| 3852 | if (DAG.getTarget().useEmulatedTLS()) |
| 3853 | return LowerToTLSEmulatedModel(GA, DAG); |
| 3854 | |
| 3855 | if (Subtarget->isTargetDarwin()) |
| 3856 | return LowerGlobalTLSAddressDarwin(Op, DAG); |
| 3857 | |
| 3858 | if (Subtarget->isTargetWindows()) |
| 3859 | return LowerGlobalTLSAddressWindows(Op, DAG); |
| 3860 | |
| 3861 | // TODO: implement the "local dynamic" model |
| 3862 | assert(Subtarget->isTargetELF() && "Only ELF implemented here" ); |
| 3863 | TLSModel::Model model = getTargetMachine().getTLSModel(GV: GA->getGlobal()); |
| 3864 | |
| 3865 | switch (model) { |
| 3866 | case TLSModel::GeneralDynamic: |
| 3867 | case TLSModel::LocalDynamic: |
| 3868 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
| 3869 | case TLSModel::InitialExec: |
| 3870 | case TLSModel::LocalExec: |
| 3871 | return LowerToTLSExecModels(GA, DAG, model); |
| 3872 | } |
| 3873 | llvm_unreachable("bogus TLS model" ); |
| 3874 | } |
| 3875 | |
| 3876 | /// Return true if all users of V are within function F, looking through |
| 3877 | /// ConstantExprs. |
| 3878 | static bool allUsersAreInFunction(const Value *V, const Function *F) { |
| 3879 | SmallVector<const User*,4> Worklist(V->users()); |
| 3880 | while (!Worklist.empty()) { |
| 3881 | auto *U = Worklist.pop_back_val(); |
| 3882 | if (isa<ConstantExpr>(Val: U)) { |
| 3883 | append_range(C&: Worklist, R: U->users()); |
| 3884 | continue; |
| 3885 | } |
| 3886 | |
| 3887 | auto *I = dyn_cast<Instruction>(Val: U); |
| 3888 | if (!I || I->getParent()->getParent() != F) |
| 3889 | return false; |
| 3890 | } |
| 3891 | return true; |
| 3892 | } |
| 3893 | |
| 3894 | static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, |
| 3895 | const GlobalValue *GV, SelectionDAG &DAG, |
| 3896 | EVT PtrVT, const SDLoc &dl) { |
| 3897 | // If we're creating a pool entry for a constant global with unnamed address, |
| 3898 | // and the global is small enough, we can emit it inline into the constant pool |
| 3899 | // to save ourselves an indirection. |
| 3900 | // |
| 3901 | // This is a win if the constant is only used in one function (so it doesn't |
| 3902 | // need to be duplicated) or duplicating the constant wouldn't increase code |
| 3903 | // size (implying the constant is no larger than 4 bytes). |
| 3904 | const Function &F = DAG.getMachineFunction().getFunction(); |
| 3905 | |
| 3906 | // We rely on this decision to inline being idemopotent and unrelated to the |
| 3907 | // use-site. We know that if we inline a variable at one use site, we'll |
| 3908 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel |
| 3909 | // doesn't know about this optimization, so bail out if it's enabled else |
| 3910 | // we could decide to inline here (and thus never emit the GV) but require |
| 3911 | // the GV from fast-isel generated code. |
| 3912 | if (!EnableConstpoolPromotion || |
| 3913 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) |
| 3914 | return SDValue(); |
| 3915 | |
| 3916 | auto *GVar = dyn_cast<GlobalVariable>(Val: GV); |
| 3917 | if (!GVar || !GVar->hasInitializer() || |
| 3918 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || |
| 3919 | !GVar->hasLocalLinkage()) |
| 3920 | return SDValue(); |
| 3921 | |
| 3922 | // If we inline a value that contains relocations, we move the relocations |
| 3923 | // from .data to .text. This is not allowed in position-independent code. |
| 3924 | auto *Init = GVar->getInitializer(); |
| 3925 | if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && |
| 3926 | Init->needsDynamicRelocation()) |
| 3927 | return SDValue(); |
| 3928 | |
| 3929 | // The constant islands pass can only really deal with alignment requests |
| 3930 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote |
| 3931 | // any type wanting greater alignment requirements than 4 bytes. We also |
| 3932 | // can only promote constants that are multiples of 4 bytes in size or |
| 3933 | // are paddable to a multiple of 4. Currently we only try and pad constants |
| 3934 | // that are strings for simplicity. |
| 3935 | auto *CDAInit = dyn_cast<ConstantDataArray>(Val: Init); |
| 3936 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Ty: Init->getType()); |
| 3937 | Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GV: GVar); |
| 3938 | unsigned RequiredPadding = 4 - (Size % 4); |
| 3939 | bool PaddingPossible = |
| 3940 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); |
| 3941 | if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || |
| 3942 | Size == 0) |
| 3943 | return SDValue(); |
| 3944 | |
| 3945 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); |
| 3946 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3947 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3948 | |
| 3949 | // We can't bloat the constant pool too much, else the ConstantIslands pass |
| 3950 | // may fail to converge. If we haven't promoted this global yet (it may have |
| 3951 | // multiple uses), and promoting it would increase the constant pool size (Sz |
| 3952 | // > 4), ensure we have space to do so up to MaxTotal. |
| 3953 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar) && Size > 4) |
| 3954 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= |
| 3955 | ConstpoolPromotionMaxTotal) |
| 3956 | return SDValue(); |
| 3957 | |
| 3958 | // This is only valid if all users are in a single function; we can't clone |
| 3959 | // the constant in general. The LLVM IR unnamed_addr allows merging |
| 3960 | // constants, but not cloning them. |
| 3961 | // |
| 3962 | // We could potentially allow cloning if we could prove all uses of the |
| 3963 | // constant in the current function don't care about the address, like |
| 3964 | // printf format strings. But that isn't implemented for now. |
| 3965 | if (!allUsersAreInFunction(V: GVar, F: &F)) |
| 3966 | return SDValue(); |
| 3967 | |
| 3968 | // We're going to inline this global. Pad it out if needed. |
| 3969 | if (RequiredPadding != 4) { |
| 3970 | StringRef S = CDAInit->getAsString(); |
| 3971 | |
| 3972 | SmallVector<uint8_t,16> V(S.size()); |
| 3973 | std::copy(first: S.bytes_begin(), last: S.bytes_end(), result: V.begin()); |
| 3974 | while (RequiredPadding--) |
| 3975 | V.push_back(Elt: 0); |
| 3976 | Init = ConstantDataArray::get(Context&: *DAG.getContext(), Elts&: V); |
| 3977 | } |
| 3978 | |
| 3979 | auto CPVal = ARMConstantPoolConstant::Create(GV: GVar, Initializer: Init); |
| 3980 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPVal, VT: PtrVT, Align: Align(4)); |
| 3981 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar)) { |
| 3982 | AFI->markGlobalAsPromotedToConstantPool(GV: GVar); |
| 3983 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + |
| 3984 | PaddedSize - 4); |
| 3985 | } |
| 3986 | ++NumConstpoolPromoted; |
| 3987 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3988 | } |
| 3989 | |
| 3990 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { |
| 3991 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: GV)) |
| 3992 | if (!(GV = GA->getAliaseeObject())) |
| 3993 | return false; |
| 3994 | if (const auto *V = dyn_cast<GlobalVariable>(Val: GV)) |
| 3995 | return V->isConstant(); |
| 3996 | return isa<Function>(Val: GV); |
| 3997 | } |
| 3998 | |
| 3999 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, |
| 4000 | SelectionDAG &DAG) const { |
| 4001 | switch (Subtarget->getTargetTriple().getObjectFormat()) { |
| 4002 | default: llvm_unreachable("unknown object format" ); |
| 4003 | case Triple::COFF: |
| 4004 | return LowerGlobalAddressWindows(Op, DAG); |
| 4005 | case Triple::ELF: |
| 4006 | return LowerGlobalAddressELF(Op, DAG); |
| 4007 | case Triple::MachO: |
| 4008 | return LowerGlobalAddressDarwin(Op, DAG); |
| 4009 | } |
| 4010 | } |
| 4011 | |
| 4012 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, |
| 4013 | SelectionDAG &DAG) const { |
| 4014 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4015 | SDLoc dl(Op); |
| 4016 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 4017 | bool IsRO = isReadOnly(GV); |
| 4018 | |
| 4019 | // promoteToConstantPool only if not generating XO text section |
| 4020 | if (GV->isDSOLocal() && !Subtarget->genExecuteOnly()) |
| 4021 | if (SDValue V = promoteToConstantPool(TLI: this, GV, DAG, PtrVT, dl)) |
| 4022 | return V; |
| 4023 | |
| 4024 | if (isPositionIndependent()) { |
| 4025 | SDValue G = DAG.getTargetGlobalAddress( |
| 4026 | GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: GV->isDSOLocal() ? 0 : ARMII::MO_GOT); |
| 4027 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
| 4028 | if (!GV->isDSOLocal()) |
| 4029 | Result = |
| 4030 | DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
| 4031 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 4032 | return Result; |
| 4033 | } else if (Subtarget->isROPI() && IsRO) { |
| 4034 | // PC-relative. |
| 4035 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT); |
| 4036 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
| 4037 | return Result; |
| 4038 | } else if (Subtarget->isRWPI() && !IsRO) { |
| 4039 | // SB-relative. |
| 4040 | SDValue RelAddr; |
| 4041 | if (Subtarget->useMovt()) { |
| 4042 | ++NumMovwMovt; |
| 4043 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_SBREL); |
| 4044 | RelAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, Operand: G); |
| 4045 | } else { // use literal pool for address constant |
| 4046 | ARMConstantPoolValue *CPV = |
| 4047 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::SBREL); |
| 4048 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 4049 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 4050 | RelAddr = DAG.getLoad( |
| 4051 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 4052 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 4053 | } |
| 4054 | SDValue SB = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: ARM::R9, VT: PtrVT); |
| 4055 | SDValue Result = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: SB, N2: RelAddr); |
| 4056 | return Result; |
| 4057 | } |
| 4058 | |
| 4059 | // If we have T2 ops, we can materialize the address directly via movt/movw |
| 4060 | // pair. This is always cheaper. If need to generate Execute Only code, and we |
| 4061 | // only have Thumb1 available, we can't use a constant pool and are forced to |
| 4062 | // use immediate relocations. |
| 4063 | if (Subtarget->useMovt() || Subtarget->genExecuteOnly()) { |
| 4064 | if (Subtarget->useMovt()) |
| 4065 | ++NumMovwMovt; |
| 4066 | // FIXME: Once remat is capable of dealing with instructions with register |
| 4067 | // operands, expand this into two nodes. |
| 4068 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, |
| 4069 | Operand: DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT)); |
| 4070 | } else { |
| 4071 | SDValue CPAddr = DAG.getTargetConstantPool(C: GV, VT: PtrVT, Align: Align(4)); |
| 4072 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 4073 | return DAG.getLoad( |
| 4074 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 4075 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 4076 | } |
| 4077 | } |
| 4078 | |
| 4079 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, |
| 4080 | SelectionDAG &DAG) const { |
| 4081 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 4082 | "ROPI/RWPI not currently supported for Darwin" ); |
| 4083 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4084 | SDLoc dl(Op); |
| 4085 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 4086 | |
| 4087 | if (Subtarget->useMovt()) |
| 4088 | ++NumMovwMovt; |
| 4089 | |
| 4090 | // FIXME: Once remat is capable of dealing with instructions with register |
| 4091 | // operands, expand this into multiple nodes |
| 4092 | unsigned Wrapper = |
| 4093 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; |
| 4094 | |
| 4095 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_NONLAZY); |
| 4096 | SDValue Result = DAG.getNode(Opcode: Wrapper, DL: dl, VT: PtrVT, Operand: G); |
| 4097 | |
| 4098 | if (Subtarget->isGVIndirectSymbol(GV)) |
| 4099 | Result = DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
| 4100 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 4101 | return Result; |
| 4102 | } |
| 4103 | |
| 4104 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, |
| 4105 | SelectionDAG &DAG) const { |
| 4106 | assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported" ); |
| 4107 | assert(Subtarget->useMovt() && |
| 4108 | "Windows on ARM expects to use movw/movt" ); |
| 4109 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 4110 | "ROPI/RWPI not currently supported for Windows" ); |
| 4111 | |
| 4112 | const TargetMachine &TM = getTargetMachine(); |
| 4113 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 4114 | ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; |
| 4115 | if (GV->hasDLLImportStorageClass()) |
| 4116 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 4117 | else if (!TM.shouldAssumeDSOLocal(GV)) |
| 4118 | TargetFlags = ARMII::MO_COFFSTUB; |
| 4119 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4120 | SDValue Result; |
| 4121 | SDLoc DL(Op); |
| 4122 | |
| 4123 | ++NumMovwMovt; |
| 4124 | |
| 4125 | // FIXME: Once remat is capable of dealing with instructions with register |
| 4126 | // operands, expand this into two nodes. |
| 4127 | Result = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, |
| 4128 | Operand: DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, /*offset=*/0, |
| 4129 | TargetFlags)); |
| 4130 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 4131 | Result = DAG.getLoad(VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: Result, |
| 4132 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 4133 | return Result; |
| 4134 | } |
| 4135 | |
| 4136 | SDValue |
| 4137 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { |
| 4138 | SDLoc dl(Op); |
| 4139 | SDValue Val = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 4140 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETJMP, DL: dl, |
| 4141 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), N1: Op.getOperand(i: 0), |
| 4142 | N2: Op.getOperand(i: 1), N3: Val); |
| 4143 | } |
| 4144 | |
| 4145 | SDValue |
| 4146 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { |
| 4147 | SDLoc dl(Op); |
| 4148 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_LONGJMP, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 4149 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 4150 | } |
| 4151 | |
| 4152 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, |
| 4153 | SelectionDAG &DAG) const { |
| 4154 | SDLoc dl(Op); |
| 4155 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETUP_DISPATCH, DL: dl, VT: MVT::Other, |
| 4156 | Operand: Op.getOperand(i: 0)); |
| 4157 | } |
| 4158 | |
| 4159 | SDValue ARMTargetLowering::LowerINTRINSIC_VOID( |
| 4160 | SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { |
| 4161 | unsigned IntNo = |
| 4162 | Op.getConstantOperandVal(i: Op.getOperand(i: 0).getValueType() == MVT::Other); |
| 4163 | switch (IntNo) { |
| 4164 | default: |
| 4165 | return SDValue(); // Don't custom lower most intrinsics. |
| 4166 | case Intrinsic::arm_gnu_eabi_mcount: { |
| 4167 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4168 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4169 | SDLoc dl(Op); |
| 4170 | SDValue Chain = Op.getOperand(i: 0); |
| 4171 | // call "\01__gnu_mcount_nc" |
| 4172 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 4173 | const uint32_t *Mask = |
| 4174 | ARI->getCallPreservedMask(MF: DAG.getMachineFunction(), CallingConv::C); |
| 4175 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 4176 | // Mark LR an implicit live-in. |
| 4177 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
| 4178 | SDValue ReturnAddress = |
| 4179 | DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT: PtrVT); |
| 4180 | constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; |
| 4181 | SDValue Callee = |
| 4182 | DAG.getTargetExternalSymbol(Sym: "\01__gnu_mcount_nc" , VT: PtrVT, TargetFlags: 0); |
| 4183 | SDValue RegisterMask = DAG.getRegisterMask(RegMask: Mask); |
| 4184 | if (Subtarget->isThumb()) |
| 4185 | return SDValue( |
| 4186 | DAG.getMachineNode( |
| 4187 | Opcode: ARM::tBL_PUSHLR, dl, ResultTys, |
| 4188 | Ops: {ReturnAddress, DAG.getTargetConstant(Val: ARMCC::AL, DL: dl, VT: PtrVT), |
| 4189 | DAG.getRegister(Reg: 0, VT: PtrVT), Callee, RegisterMask, Chain}), |
| 4190 | 0); |
| 4191 | return SDValue( |
| 4192 | DAG.getMachineNode(Opcode: ARM::BL_PUSHLR, dl, ResultTys, |
| 4193 | Ops: {ReturnAddress, Callee, RegisterMask, Chain}), |
| 4194 | 0); |
| 4195 | } |
| 4196 | } |
| 4197 | } |
| 4198 | |
| 4199 | SDValue |
| 4200 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, |
| 4201 | const ARMSubtarget *Subtarget) const { |
| 4202 | unsigned IntNo = Op.getConstantOperandVal(i: 0); |
| 4203 | SDLoc dl(Op); |
| 4204 | switch (IntNo) { |
| 4205 | default: return SDValue(); // Don't custom lower most intrinsics. |
| 4206 | case Intrinsic::thread_pointer: { |
| 4207 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4208 | return DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
| 4209 | } |
| 4210 | case Intrinsic::arm_cls: { |
| 4211 | const SDValue &Operand = Op.getOperand(i: 1); |
| 4212 | const EVT VTy = Op.getValueType(); |
| 4213 | SDValue SRA = |
| 4214 | DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: VTy, N1: Operand, N2: DAG.getConstant(Val: 31, DL: dl, VT: VTy)); |
| 4215 | SDValue XOR = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: VTy, N1: SRA, N2: Operand); |
| 4216 | SDValue SHL = |
| 4217 | DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: VTy, N1: XOR, N2: DAG.getConstant(Val: 1, DL: dl, VT: VTy)); |
| 4218 | SDValue OR = |
| 4219 | DAG.getNode(Opcode: ISD::OR, DL: dl, VT: VTy, N1: SHL, N2: DAG.getConstant(Val: 1, DL: dl, VT: VTy)); |
| 4220 | SDValue Result = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: OR); |
| 4221 | return Result; |
| 4222 | } |
| 4223 | case Intrinsic::arm_cls64: { |
| 4224 | // cls(x) = if cls(hi(x)) != 31 then cls(hi(x)) |
| 4225 | // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x))) |
| 4226 | const SDValue &Operand = Op.getOperand(i: 1); |
| 4227 | const EVT VTy = Op.getValueType(); |
| 4228 | SDValue Lo, Hi; |
| 4229 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Operand, DL: dl, LoVT: VTy, HiVT: VTy); |
| 4230 | SDValue Constant0 = DAG.getConstant(Val: 0, DL: dl, VT: VTy); |
| 4231 | SDValue Constant1 = DAG.getConstant(Val: 1, DL: dl, VT: VTy); |
| 4232 | SDValue Constant31 = DAG.getConstant(Val: 31, DL: dl, VT: VTy); |
| 4233 | SDValue SRAHi = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: VTy, N1: Hi, N2: Constant31); |
| 4234 | SDValue XORHi = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: VTy, N1: SRAHi, N2: Hi); |
| 4235 | SDValue SHLHi = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: VTy, N1: XORHi, N2: Constant1); |
| 4236 | SDValue ORHi = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: VTy, N1: SHLHi, N2: Constant1); |
| 4237 | SDValue CLSHi = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: ORHi); |
| 4238 | SDValue CheckLo = |
| 4239 | DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: CLSHi, RHS: Constant31, Cond: ISD::CondCode::SETEQ); |
| 4240 | SDValue HiIsZero = |
| 4241 | DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Hi, RHS: Constant0, Cond: ISD::CondCode::SETEQ); |
| 4242 | SDValue AdjustedLo = |
| 4243 | DAG.getSelect(DL: dl, VT: VTy, Cond: HiIsZero, LHS: Lo, RHS: DAG.getNOT(DL: dl, Val: Lo, VT: VTy)); |
| 4244 | SDValue CLZAdjustedLo = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: AdjustedLo); |
| 4245 | SDValue Result = |
| 4246 | DAG.getSelect(DL: dl, VT: VTy, Cond: CheckLo, |
| 4247 | LHS: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: VTy, N1: CLZAdjustedLo, N2: Constant31), RHS: CLSHi); |
| 4248 | return Result; |
| 4249 | } |
| 4250 | case Intrinsic::eh_sjlj_lsda: { |
| 4251 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4252 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4253 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 4254 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4255 | SDValue CPAddr; |
| 4256 | bool IsPositionIndependent = isPositionIndependent(); |
| 4257 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; |
| 4258 | ARMConstantPoolValue *CPV = |
| 4259 | ARMConstantPoolConstant::Create(C: &MF.getFunction(), ID: ARMPCLabelIndex, |
| 4260 | Kind: ARMCP::CPLSDA, PCAdj); |
| 4261 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 4262 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 4263 | SDValue Result = DAG.getLoad( |
| 4264 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 4265 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 4266 | |
| 4267 | if (IsPositionIndependent) { |
| 4268 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 4269 | Result = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Result, N2: PICLabel); |
| 4270 | } |
| 4271 | return Result; |
| 4272 | } |
| 4273 | case Intrinsic::arm_neon_vabs: |
| 4274 | return DAG.getNode(Opcode: ISD::ABS, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4275 | Operand: Op.getOperand(i: 1)); |
| 4276 | case Intrinsic::arm_neon_vabds: |
| 4277 | if (Op.getValueType().isInteger()) |
| 4278 | return DAG.getNode(Opcode: ISD::ABDS, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4279 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4280 | return SDValue(); |
| 4281 | case Intrinsic::arm_neon_vabdu: |
| 4282 | return DAG.getNode(Opcode: ISD::ABDU, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4283 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4284 | case Intrinsic::arm_neon_vmulls: |
| 4285 | case Intrinsic::arm_neon_vmullu: { |
| 4286 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) |
| 4287 | ? ARMISD::VMULLs : ARMISD::VMULLu; |
| 4288 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4289 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4290 | } |
| 4291 | case Intrinsic::arm_neon_vminnm: |
| 4292 | case Intrinsic::arm_neon_vmaxnm: { |
| 4293 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) |
| 4294 | ? ISD::FMINNUM : ISD::FMAXNUM; |
| 4295 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4296 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4297 | } |
| 4298 | case Intrinsic::arm_neon_vminu: |
| 4299 | case Intrinsic::arm_neon_vmaxu: { |
| 4300 | if (Op.getValueType().isFloatingPoint()) |
| 4301 | return SDValue(); |
| 4302 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) |
| 4303 | ? ISD::UMIN : ISD::UMAX; |
| 4304 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4305 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4306 | } |
| 4307 | case Intrinsic::arm_neon_vmins: |
| 4308 | case Intrinsic::arm_neon_vmaxs: { |
| 4309 | // v{min,max}s is overloaded between signed integers and floats. |
| 4310 | if (!Op.getValueType().isFloatingPoint()) { |
| 4311 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 4312 | ? ISD::SMIN : ISD::SMAX; |
| 4313 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4314 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4315 | } |
| 4316 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 4317 | ? ISD::FMINIMUM : ISD::FMAXIMUM; |
| 4318 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4319 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4320 | } |
| 4321 | case Intrinsic::arm_neon_vtbl1: |
| 4322 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4323 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 4324 | case Intrinsic::arm_neon_vtbl2: |
| 4325 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4326 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 4327 | case Intrinsic::arm_mve_pred_i2v: |
| 4328 | case Intrinsic::arm_mve_pred_v2i: |
| 4329 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4330 | Operand: Op.getOperand(i: 1)); |
| 4331 | case Intrinsic::arm_mve_vreinterpretq: |
| 4332 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
| 4333 | Operand: Op.getOperand(i: 1)); |
| 4334 | case Intrinsic::arm_mve_lsll: |
| 4335 | return DAG.getNode(Opcode: ARMISD::LSLL, DL: SDLoc(Op), VTList: Op->getVTList(), |
| 4336 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 4337 | case Intrinsic::arm_mve_asrl: |
| 4338 | return DAG.getNode(Opcode: ARMISD::ASRL, DL: SDLoc(Op), VTList: Op->getVTList(), |
| 4339 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 4340 | } |
| 4341 | } |
| 4342 | |
| 4343 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, |
| 4344 | const ARMSubtarget *Subtarget) { |
| 4345 | SDLoc dl(Op); |
| 4346 | auto SSID = static_cast<SyncScope::ID>(Op.getConstantOperandVal(i: 2)); |
| 4347 | if (SSID == SyncScope::SingleThread) |
| 4348 | return Op; |
| 4349 | |
| 4350 | if (!Subtarget->hasDataBarrier()) { |
| 4351 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 4352 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 4353 | // here. |
| 4354 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && |
| 4355 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ); |
| 4356 | return DAG.getNode(Opcode: ARMISD::MEMBARRIER_MCR, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 4357 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 4358 | } |
| 4359 | |
| 4360 | AtomicOrdering Ord = |
| 4361 | static_cast<AtomicOrdering>(Op.getConstantOperandVal(i: 1)); |
| 4362 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; |
| 4363 | if (Subtarget->isMClass()) { |
| 4364 | // Only a full system barrier exists in the M-class architectures. |
| 4365 | Domain = ARM_MB::SY; |
| 4366 | } else if (Subtarget->preferISHSTBarriers() && |
| 4367 | Ord == AtomicOrdering::Release) { |
| 4368 | // Swift happens to implement ISHST barriers in a way that's compatible with |
| 4369 | // Release semantics but weaker than ISH so we'd be fools not to use |
| 4370 | // it. Beware: other processors probably don't! |
| 4371 | Domain = ARM_MB::ISHST; |
| 4372 | } |
| 4373 | |
| 4374 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 4375 | N2: DAG.getConstant(Val: Intrinsic::arm_dmb, DL: dl, VT: MVT::i32), |
| 4376 | N3: DAG.getConstant(Val: Domain, DL: dl, VT: MVT::i32)); |
| 4377 | } |
| 4378 | |
| 4379 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, |
| 4380 | const ARMSubtarget *Subtarget) { |
| 4381 | // ARM pre v5TE and Thumb1 does not have preload instructions. |
| 4382 | if (!(Subtarget->isThumb2() || |
| 4383 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) |
| 4384 | // Just preserve the chain. |
| 4385 | return Op.getOperand(i: 0); |
| 4386 | |
| 4387 | SDLoc dl(Op); |
| 4388 | unsigned isRead = ~Op.getConstantOperandVal(i: 2) & 1; |
| 4389 | if (!isRead && |
| 4390 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) |
| 4391 | // ARMv7 with MP extension has PLDW. |
| 4392 | return Op.getOperand(i: 0); |
| 4393 | |
| 4394 | unsigned isData = Op.getConstantOperandVal(i: 4); |
| 4395 | if (Subtarget->isThumb()) { |
| 4396 | // Invert the bits. |
| 4397 | isRead = ~isRead & 1; |
| 4398 | isData = ~isData & 1; |
| 4399 | } |
| 4400 | |
| 4401 | return DAG.getNode(Opcode: ARMISD::PRELOAD, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 4402 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: isRead, DL: dl, VT: MVT::i32), |
| 4403 | N4: DAG.getConstant(Val: isData, DL: dl, VT: MVT::i32)); |
| 4404 | } |
| 4405 | |
| 4406 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { |
| 4407 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4408 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 4409 | |
| 4410 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
| 4411 | // memory location argument. |
| 4412 | SDLoc dl(Op); |
| 4413 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DL: DAG.getDataLayout()); |
| 4414 | SDValue FR = DAG.getFrameIndex(FI: FuncInfo->getVarArgsFrameIndex(), VT: PtrVT); |
| 4415 | const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue(); |
| 4416 | return DAG.getStore(Chain: Op.getOperand(i: 0), dl, Val: FR, Ptr: Op.getOperand(i: 1), |
| 4417 | PtrInfo: MachinePointerInfo(SV)); |
| 4418 | } |
| 4419 | |
| 4420 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, |
| 4421 | CCValAssign &NextVA, |
| 4422 | SDValue &Root, |
| 4423 | SelectionDAG &DAG, |
| 4424 | const SDLoc &dl) const { |
| 4425 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4426 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4427 | |
| 4428 | const TargetRegisterClass *RC; |
| 4429 | if (AFI->isThumb1OnlyFunction()) |
| 4430 | RC = &ARM::tGPRRegClass; |
| 4431 | else |
| 4432 | RC = &ARM::GPRRegClass; |
| 4433 | |
| 4434 | // Transform the arguments stored in physical registers into virtual ones. |
| 4435 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
| 4436 | SDValue ArgValue = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
| 4437 | |
| 4438 | SDValue ArgValue2; |
| 4439 | if (NextVA.isMemLoc()) { |
| 4440 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4441 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: NextVA.getLocMemOffset(), IsImmutable: true); |
| 4442 | |
| 4443 | // Create load node to retrieve arguments from the stack. |
| 4444 | SDValue FIN = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 4445 | ArgValue2 = DAG.getLoad( |
| 4446 | VT: MVT::i32, dl, Chain: Root, Ptr: FIN, |
| 4447 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
| 4448 | } else { |
| 4449 | Reg = MF.addLiveIn(PReg: NextVA.getLocReg(), RC); |
| 4450 | ArgValue2 = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
| 4451 | } |
| 4452 | if (!Subtarget->isLittle()) |
| 4453 | std::swap (a&: ArgValue, b&: ArgValue2); |
| 4454 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: ArgValue, N2: ArgValue2); |
| 4455 | } |
| 4456 | |
| 4457 | // The remaining GPRs hold either the beginning of variable-argument |
| 4458 | // data, or the beginning of an aggregate passed by value (usually |
| 4459 | // byval). Either way, we allocate stack slots adjacent to the data |
| 4460 | // provided by our caller, and store the unallocated registers there. |
| 4461 | // If this is a variadic function, the va_list pointer will begin with |
| 4462 | // these values; otherwise, this reassembles a (byval) structure that |
| 4463 | // was split between registers and memory. |
| 4464 | // Return: The frame index registers were stored into. |
| 4465 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, |
| 4466 | const SDLoc &dl, SDValue &Chain, |
| 4467 | const Value *OrigArg, |
| 4468 | unsigned InRegsParamRecordIdx, |
| 4469 | int ArgOffset, unsigned ArgSize) const { |
| 4470 | // Currently, two use-cases possible: |
| 4471 | // Case #1. Non-var-args function, and we meet first byval parameter. |
| 4472 | // Setup first unallocated register as first byval register; |
| 4473 | // eat all remained registers |
| 4474 | // (these two actions are performed by HandleByVal method). |
| 4475 | // Then, here, we initialize stack frame with |
| 4476 | // "store-reg" instructions. |
| 4477 | // Case #2. Var-args function, that doesn't contain byval parameters. |
| 4478 | // The same: eat all remained unallocated registers, |
| 4479 | // initialize stack frame. |
| 4480 | |
| 4481 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4482 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4483 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4484 | unsigned RBegin, REnd; |
| 4485 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { |
| 4486 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: InRegsParamRecordIdx, BeginReg&: RBegin, EndReg&: REnd); |
| 4487 | } else { |
| 4488 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
| 4489 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; |
| 4490 | REnd = ARM::R4; |
| 4491 | } |
| 4492 | |
| 4493 | if (REnd != RBegin) |
| 4494 | ArgOffset = -4 * (ARM::R4 - RBegin); |
| 4495 | |
| 4496 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4497 | int FrameIndex = MFI.CreateFixedObject(Size: ArgSize, SPOffset: ArgOffset, IsImmutable: false); |
| 4498 | SDValue FIN = DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT); |
| 4499 | |
| 4500 | SmallVector<SDValue, 4> MemOps; |
| 4501 | const TargetRegisterClass *RC = |
| 4502 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 4503 | |
| 4504 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { |
| 4505 | Register VReg = MF.addLiveIn(PReg: Reg, RC); |
| 4506 | SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: MVT::i32); |
| 4507 | SDValue Store = DAG.getStore(Chain: Val.getValue(R: 1), dl, Val, Ptr: FIN, |
| 4508 | PtrInfo: MachinePointerInfo(OrigArg, 4 * i)); |
| 4509 | MemOps.push_back(Elt: Store); |
| 4510 | FIN = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: FIN, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrVT)); |
| 4511 | } |
| 4512 | |
| 4513 | if (!MemOps.empty()) |
| 4514 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOps); |
| 4515 | return FrameIndex; |
| 4516 | } |
| 4517 | |
| 4518 | // Setup stack frame, the va_list pointer will start from. |
| 4519 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, |
| 4520 | const SDLoc &dl, SDValue &Chain, |
| 4521 | unsigned ArgOffset, |
| 4522 | unsigned TotalArgRegsSaveSize, |
| 4523 | bool ForceMutable) const { |
| 4524 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4525 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4526 | |
| 4527 | // Try to store any remaining integer argument regs |
| 4528 | // to their spots on the stack so that they may be loaded by dereferencing |
| 4529 | // the result of va_next. |
| 4530 | // If there is no regs to be stored, just point address after last |
| 4531 | // argument passed via stack. |
| 4532 | int FrameIndex = StoreByValRegs( |
| 4533 | CCInfo, DAG, dl, Chain, OrigArg: nullptr, InRegsParamRecordIdx: CCInfo.getInRegsParamsCount(), |
| 4534 | ArgOffset: CCInfo.getStackSize(), ArgSize: std::max(a: 4U, b: TotalArgRegsSaveSize)); |
| 4535 | AFI->setVarArgsFrameIndex(FrameIndex); |
| 4536 | } |
| 4537 | |
| 4538 | bool ARMTargetLowering::splitValueIntoRegisterParts( |
| 4539 | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
| 4540 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { |
| 4541 | EVT ValueVT = Val.getValueType(); |
| 4542 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
| 4543 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4544 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4545 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
| 4546 | Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
| 4547 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 4548 | Parts[0] = Val; |
| 4549 | return true; |
| 4550 | } |
| 4551 | return false; |
| 4552 | } |
| 4553 | |
| 4554 | SDValue ARMTargetLowering::joinRegisterPartsIntoValue( |
| 4555 | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, |
| 4556 | MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const { |
| 4557 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
| 4558 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4559 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4560 | SDValue Val = Parts[0]; |
| 4561 | |
| 4562 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
| 4563 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
| 4564 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 4565 | return Val; |
| 4566 | } |
| 4567 | return SDValue(); |
| 4568 | } |
| 4569 | |
| 4570 | SDValue ARMTargetLowering::LowerFormalArguments( |
| 4571 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 4572 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 4573 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
| 4574 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4575 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4576 | |
| 4577 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4578 | |
| 4579 | // Assign locations to all of the incoming arguments. |
| 4580 | SmallVector<CCValAssign, 16> ArgLocs; |
| 4581 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 4582 | *DAG.getContext()); |
| 4583 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
| 4584 | |
| 4585 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); |
| 4586 | unsigned CurArgIdx = 0; |
| 4587 | |
| 4588 | // Initially ArgRegsSaveSize is zero. |
| 4589 | // Then we increase this value each time we meet byval parameter. |
| 4590 | // We also increase this value in case of varargs function. |
| 4591 | AFI->setArgRegsSaveSize(0); |
| 4592 | |
| 4593 | // Calculate the amount of stack space that we need to allocate to store |
| 4594 | // byval and variadic arguments that are passed in registers. |
| 4595 | // We need to know this before we allocate the first byval or variadic |
| 4596 | // argument, as they will be allocated a stack slot below the CFA (Canonical |
| 4597 | // Frame Address, the stack pointer at entry to the function). |
| 4598 | unsigned ArgRegBegin = ARM::R4; |
| 4599 | for (const CCValAssign &VA : ArgLocs) { |
| 4600 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) |
| 4601 | break; |
| 4602 | |
| 4603 | unsigned Index = VA.getValNo(); |
| 4604 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; |
| 4605 | if (!Flags.isByVal()) |
| 4606 | continue; |
| 4607 | |
| 4608 | assert(VA.isMemLoc() && "unexpected byval pointer in reg" ); |
| 4609 | unsigned RBegin, REnd; |
| 4610 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CCInfo.getInRegsParamsProcessed(), BeginReg&: RBegin, EndReg&: REnd); |
| 4611 | ArgRegBegin = std::min(a: ArgRegBegin, b: RBegin); |
| 4612 | |
| 4613 | CCInfo.nextInRegsParam(); |
| 4614 | } |
| 4615 | CCInfo.rewindByValRegsInfo(); |
| 4616 | |
| 4617 | int lastInsIndex = -1; |
| 4618 | if (isVarArg && MFI.hasVAStart()) { |
| 4619 | unsigned RegIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
| 4620 | if (RegIdx != std::size(GPRArgRegs)) |
| 4621 | ArgRegBegin = std::min(a: ArgRegBegin, b: (unsigned)GPRArgRegs[RegIdx]); |
| 4622 | } |
| 4623 | |
| 4624 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); |
| 4625 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); |
| 4626 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4627 | |
| 4628 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 4629 | CCValAssign &VA = ArgLocs[i]; |
| 4630 | if (Ins[VA.getValNo()].isOrigArg()) { |
| 4631 | std::advance(i&: CurOrigArg, |
| 4632 | n: Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); |
| 4633 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); |
| 4634 | } |
| 4635 | // Arguments stored in registers. |
| 4636 | if (VA.isRegLoc()) { |
| 4637 | EVT RegVT = VA.getLocVT(); |
| 4638 | SDValue ArgValue; |
| 4639 | |
| 4640 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 4641 | // f64 and vector types are split up into multiple registers or |
| 4642 | // combinations of registers and stack slots. |
| 4643 | SDValue ArgValue1 = |
| 4644 | GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4645 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 4646 | SDValue ArgValue2; |
| 4647 | if (VA.isMemLoc()) { |
| 4648 | int FI = MFI.CreateFixedObject(Size: 8, SPOffset: VA.getLocMemOffset(), IsImmutable: true); |
| 4649 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4650 | ArgValue2 = DAG.getLoad( |
| 4651 | VT: MVT::f64, dl, Chain, Ptr: FIN, |
| 4652 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
| 4653 | } else { |
| 4654 | ArgValue2 = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4655 | } |
| 4656 | ArgValue = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
| 4657 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
| 4658 | N2: ArgValue1, N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 4659 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
| 4660 | N2: ArgValue2, N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
| 4661 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 4662 | ArgValue = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4663 | } else { |
| 4664 | const TargetRegisterClass *RC; |
| 4665 | |
| 4666 | if (RegVT == MVT::f16 || RegVT == MVT::bf16) |
| 4667 | RC = &ARM::HPRRegClass; |
| 4668 | else if (RegVT == MVT::f32) |
| 4669 | RC = &ARM::SPRRegClass; |
| 4670 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || |
| 4671 | RegVT == MVT::v4bf16) |
| 4672 | RC = &ARM::DPRRegClass; |
| 4673 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || |
| 4674 | RegVT == MVT::v8bf16) |
| 4675 | RC = &ARM::QPRRegClass; |
| 4676 | else if (RegVT == MVT::i32) |
| 4677 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass |
| 4678 | : &ARM::GPRRegClass; |
| 4679 | else |
| 4680 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering" ); |
| 4681 | |
| 4682 | // Transform the arguments in physical registers into virtual ones. |
| 4683 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
| 4684 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, VT: RegVT); |
| 4685 | |
| 4686 | // If this value is passed in r0 and has the returned attribute (e.g. |
| 4687 | // C++ 'structors), record this fact for later use. |
| 4688 | if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { |
| 4689 | AFI->setPreservesR0(); |
| 4690 | } |
| 4691 | } |
| 4692 | |
| 4693 | // If this is an 8 or 16-bit value, it is really passed promoted |
| 4694 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
| 4695 | // truncate to the right size. |
| 4696 | switch (VA.getLocInfo()) { |
| 4697 | default: llvm_unreachable("Unknown loc info!" ); |
| 4698 | case CCValAssign::Full: break; |
| 4699 | case CCValAssign::BCvt: |
| 4700 | ArgValue = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: ArgValue); |
| 4701 | break; |
| 4702 | } |
| 4703 | |
| 4704 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 4705 | // had been copied to the LSBs of a 32-bit register. |
| 4706 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 4707 | if (VA.needsCustom() && |
| 4708 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 4709 | ArgValue = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: ArgValue); |
| 4710 | |
| 4711 | // On CMSE Entry Functions, formal integer arguments whose bitwidth is |
| 4712 | // less than 32 bits must be sign- or zero-extended in the callee for |
| 4713 | // security reasons. Although the ABI mandates an extension done by the |
| 4714 | // caller, the latter cannot be trusted to follow the rules of the ABI. |
| 4715 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
| 4716 | if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() && |
| 4717 | RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
| 4718 | ArgValue = handleCMSEValue(Value: ArgValue, Arg, DAG, DL: dl); |
| 4719 | |
| 4720 | InVals.push_back(Elt: ArgValue); |
| 4721 | } else { // VA.isRegLoc() |
| 4722 | // Only arguments passed on the stack should make it here. |
| 4723 | assert(VA.isMemLoc()); |
| 4724 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered" ); |
| 4725 | |
| 4726 | int index = VA.getValNo(); |
| 4727 | |
| 4728 | // Some Ins[] entries become multiple ArgLoc[] entries. |
| 4729 | // Process them only once. |
| 4730 | if (index != lastInsIndex) |
| 4731 | { |
| 4732 | ISD::ArgFlagsTy Flags = Ins[index].Flags; |
| 4733 | // FIXME: For now, all byval parameter objects are marked mutable. |
| 4734 | // This can be changed with more analysis. |
| 4735 | // In case of tail call optimization mark all arguments mutable. |
| 4736 | // Since they could be overwritten by lowering of arguments in case of |
| 4737 | // a tail call. |
| 4738 | if (Flags.isByVal()) { |
| 4739 | assert(Ins[index].isOrigArg() && |
| 4740 | "Byval arguments cannot be implicit" ); |
| 4741 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); |
| 4742 | |
| 4743 | int FrameIndex = StoreByValRegs( |
| 4744 | CCInfo, DAG, dl, Chain, OrigArg: &*CurOrigArg, InRegsParamRecordIdx: CurByValIndex, |
| 4745 | ArgOffset: VA.getLocMemOffset(), ArgSize: Flags.getByValSize()); |
| 4746 | InVals.push_back(Elt: DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT)); |
| 4747 | CCInfo.nextInRegsParam(); |
| 4748 | } else if (VA.needsCustom() && (VA.getValVT() == MVT::f16 || |
| 4749 | VA.getValVT() == MVT::bf16)) { |
| 4750 | // f16 and bf16 values are passed in the least-significant half of |
| 4751 | // a 4 byte stack slot. This is done as-if the extension was done |
| 4752 | // in a 32-bit register, so the actual bytes used for the value |
| 4753 | // differ between little and big endian. |
| 4754 | assert(VA.getLocVT().getSizeInBits() == 32); |
| 4755 | unsigned FIOffset = VA.getLocMemOffset(); |
| 4756 | int FI = MFI.CreateFixedObject(Size: VA.getLocVT().getSizeInBits() / 8, |
| 4757 | SPOffset: FIOffset, IsImmutable: true); |
| 4758 | |
| 4759 | SDValue Addr = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4760 | if (DAG.getDataLayout().isBigEndian()) |
| 4761 | Addr = DAG.getObjectPtrOffset(SL: dl, Ptr: Addr, Offset: TypeSize::getFixed(ExactSize: 2)); |
| 4762 | |
| 4763 | InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: Addr, |
| 4764 | PtrInfo: MachinePointerInfo::getFixedStack( |
| 4765 | MF&: DAG.getMachineFunction(), FI))); |
| 4766 | |
| 4767 | } else { |
| 4768 | unsigned FIOffset = VA.getLocMemOffset(); |
| 4769 | int FI = MFI.CreateFixedObject(Size: VA.getLocVT().getSizeInBits()/8, |
| 4770 | SPOffset: FIOffset, IsImmutable: true); |
| 4771 | |
| 4772 | // Create load nodes to retrieve arguments from the stack. |
| 4773 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4774 | InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIN, |
| 4775 | PtrInfo: MachinePointerInfo::getFixedStack( |
| 4776 | MF&: DAG.getMachineFunction(), FI))); |
| 4777 | } |
| 4778 | lastInsIndex = index; |
| 4779 | } |
| 4780 | } |
| 4781 | } |
| 4782 | |
| 4783 | // varargs |
| 4784 | if (isVarArg && MFI.hasVAStart()) { |
| 4785 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, ArgOffset: CCInfo.getStackSize(), |
| 4786 | TotalArgRegsSaveSize); |
| 4787 | if (AFI->isCmseNSEntryFunction()) { |
| 4788 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 4789 | DAG.getMachineFunction().getFunction(), |
| 4790 | "secure entry function must not be variadic" , dl.getDebugLoc())); |
| 4791 | } |
| 4792 | } |
| 4793 | |
| 4794 | unsigned StackArgSize = CCInfo.getStackSize(); |
| 4795 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; |
| 4796 | if (canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt)) { |
| 4797 | // The only way to guarantee a tail call is if the callee restores its |
| 4798 | // argument area, but it must also keep the stack aligned when doing so. |
| 4799 | MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment(); |
| 4800 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 4801 | StackArgSize = alignTo(Size: StackArgSize, A: *StackAlign); |
| 4802 | |
| 4803 | AFI->setArgumentStackToRestore(StackArgSize); |
| 4804 | } |
| 4805 | AFI->setArgumentStackSize(StackArgSize); |
| 4806 | |
| 4807 | if (CCInfo.getStackSize() > 0 && AFI->isCmseNSEntryFunction()) { |
| 4808 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 4809 | DAG.getMachineFunction().getFunction(), |
| 4810 | "secure entry function requires arguments on stack" , dl.getDebugLoc())); |
| 4811 | } |
| 4812 | |
| 4813 | return Chain; |
| 4814 | } |
| 4815 | |
| 4816 | /// isFloatingPointZero - Return true if this is +0.0. |
| 4817 | static bool isFloatingPointZero(SDValue Op) { |
| 4818 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Val&: Op)) |
| 4819 | return CFP->getValueAPF().isPosZero(); |
| 4820 | else if (ISD::isEXTLoad(N: Op.getNode()) || ISD::isNON_EXTLoad(N: Op.getNode())) { |
| 4821 | // Maybe this has already been legalized into the constant pool? |
| 4822 | if (Op.getOperand(i: 1).getOpcode() == ARMISD::Wrapper) { |
| 4823 | SDValue WrapperOp = Op.getOperand(i: 1).getOperand(i: 0); |
| 4824 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Val&: WrapperOp)) |
| 4825 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Val: CP->getConstVal())) |
| 4826 | return CFP->getValueAPF().isPosZero(); |
| 4827 | } |
| 4828 | } else if (Op->getOpcode() == ISD::BITCAST && |
| 4829 | Op->getValueType(ResNo: 0) == MVT::f64) { |
| 4830 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) |
| 4831 | // created by LowerConstantFP(). |
| 4832 | SDValue BitcastOp = Op->getOperand(Num: 0); |
| 4833 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && |
| 4834 | isNullConstant(V: BitcastOp->getOperand(Num: 0))) |
| 4835 | return true; |
| 4836 | } |
| 4837 | return false; |
| 4838 | } |
| 4839 | |
| 4840 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for |
| 4841 | /// the given operands. |
| 4842 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| 4843 | SDValue &ARMcc, SelectionDAG &DAG, |
| 4844 | const SDLoc &dl) const { |
| 4845 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val: RHS.getNode())) { |
| 4846 | unsigned C = RHSC->getZExtValue(); |
| 4847 | if (!isLegalICmpImmediate(Imm: (int32_t)C)) { |
| 4848 | // Constant does not fit, try adjusting it by one. |
| 4849 | switch (CC) { |
| 4850 | default: break; |
| 4851 | case ISD::SETLT: |
| 4852 | case ISD::SETGE: |
| 4853 | if (C != 0x80000000 && isLegalICmpImmediate(Imm: C-1)) { |
| 4854 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; |
| 4855 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
| 4856 | } |
| 4857 | break; |
| 4858 | case ISD::SETULT: |
| 4859 | case ISD::SETUGE: |
| 4860 | if (C != 0 && isLegalICmpImmediate(Imm: C-1)) { |
| 4861 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; |
| 4862 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
| 4863 | } |
| 4864 | break; |
| 4865 | case ISD::SETLE: |
| 4866 | case ISD::SETGT: |
| 4867 | if (C != 0x7fffffff && isLegalICmpImmediate(Imm: C+1)) { |
| 4868 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; |
| 4869 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
| 4870 | } |
| 4871 | break; |
| 4872 | case ISD::SETULE: |
| 4873 | case ISD::SETUGT: |
| 4874 | if (C != 0xffffffff && isLegalICmpImmediate(Imm: C+1)) { |
| 4875 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; |
| 4876 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
| 4877 | } |
| 4878 | break; |
| 4879 | } |
| 4880 | } |
| 4881 | } else if ((ARM_AM::getShiftOpcForNode(Opcode: LHS.getOpcode()) != ARM_AM::no_shift) && |
| 4882 | (ARM_AM::getShiftOpcForNode(Opcode: RHS.getOpcode()) == ARM_AM::no_shift)) { |
| 4883 | // In ARM and Thumb-2, the compare instructions can shift their second |
| 4884 | // operand. |
| 4885 | CC = ISD::getSetCCSwappedOperands(Operation: CC); |
| 4886 | std::swap(a&: LHS, b&: RHS); |
| 4887 | } |
| 4888 | |
| 4889 | // Thumb1 has very limited immediate modes, so turning an "and" into a |
| 4890 | // shift can save multiple instructions. |
| 4891 | // |
| 4892 | // If we have (x & C1), and C1 is an appropriate mask, we can transform it |
| 4893 | // into "((x << n) >> n)". But that isn't necessarily profitable on its |
| 4894 | // own. If it's the operand to an unsigned comparison with an immediate, |
| 4895 | // we can eliminate one of the shifts: we transform |
| 4896 | // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". |
| 4897 | // |
| 4898 | // We avoid transforming cases which aren't profitable due to encoding |
| 4899 | // details: |
| 4900 | // |
| 4901 | // 1. C2 fits into the immediate field of a cmp, and the transformed version |
| 4902 | // would not; in that case, we're essentially trading one immediate load for |
| 4903 | // another. |
| 4904 | // 2. C1 is 255 or 65535, so we can use uxtb or uxth. |
| 4905 | // 3. C2 is zero; we have other code for this special case. |
| 4906 | // |
| 4907 | // FIXME: Figure out profitability for Thumb2; we usually can't save an |
| 4908 | // instruction, since the AND is always one instruction anyway, but we could |
| 4909 | // use narrow instructions in some cases. |
| 4910 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && |
| 4911 | LHS->hasOneUse() && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
| 4912 | LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(Val: RHS) && |
| 4913 | !isSignedIntSetCC(Code: CC)) { |
| 4914 | unsigned Mask = LHS.getConstantOperandVal(i: 1); |
| 4915 | auto *RHSC = cast<ConstantSDNode>(Val: RHS.getNode()); |
| 4916 | uint64_t RHSV = RHSC->getZExtValue(); |
| 4917 | if (isMask_32(Value: Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { |
| 4918 | unsigned ShiftBits = llvm::countl_zero(Val: Mask); |
| 4919 | if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { |
| 4920 | SDValue ShiftAmt = DAG.getConstant(Val: ShiftBits, DL: dl, VT: MVT::i32); |
| 4921 | LHS = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: LHS.getOperand(i: 0), N2: ShiftAmt); |
| 4922 | RHS = DAG.getConstant(Val: RHSV << ShiftBits, DL: dl, VT: MVT::i32); |
| 4923 | } |
| 4924 | } |
| 4925 | } |
| 4926 | |
| 4927 | // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a |
| 4928 | // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same |
| 4929 | // way a cmp would. |
| 4930 | // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and |
| 4931 | // some tweaks to the heuristics for the previous and->shift transform. |
| 4932 | // FIXME: Optimize cases where the LHS isn't a shift. |
| 4933 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && |
| 4934 | isa<ConstantSDNode>(Val: RHS) && RHS->getAsZExtVal() == 0x80000000U && |
| 4935 | CC == ISD::SETUGT && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
| 4936 | LHS.getConstantOperandVal(i: 1) < 31) { |
| 4937 | unsigned ShiftAmt = LHS.getConstantOperandVal(i: 1) + 1; |
| 4938 | SDValue Shift = |
| 4939 | DAG.getNode(Opcode: ARMISD::LSLS, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), |
| 4940 | N1: LHS.getOperand(i: 0), N2: DAG.getConstant(Val: ShiftAmt, DL: dl, VT: MVT::i32)); |
| 4941 | ARMcc = DAG.getConstant(Val: ARMCC::HI, DL: dl, VT: MVT::i32); |
| 4942 | return Shift.getValue(R: 1); |
| 4943 | } |
| 4944 | |
| 4945 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 4946 | |
| 4947 | // If the RHS is a constant zero then the V (overflow) flag will never be |
| 4948 | // set. This can allow us to simplify GE to PL or LT to MI, which can be |
| 4949 | // simpler for other passes (like the peephole optimiser) to deal with. |
| 4950 | if (isNullConstant(V: RHS)) { |
| 4951 | switch (CondCode) { |
| 4952 | default: break; |
| 4953 | case ARMCC::GE: |
| 4954 | CondCode = ARMCC::PL; |
| 4955 | break; |
| 4956 | case ARMCC::LT: |
| 4957 | CondCode = ARMCC::MI; |
| 4958 | break; |
| 4959 | } |
| 4960 | } |
| 4961 | |
| 4962 | ARMISD::NodeType CompareType; |
| 4963 | switch (CondCode) { |
| 4964 | default: |
| 4965 | CompareType = ARMISD::CMP; |
| 4966 | break; |
| 4967 | case ARMCC::EQ: |
| 4968 | case ARMCC::NE: |
| 4969 | // Uses only Z Flag |
| 4970 | CompareType = ARMISD::CMPZ; |
| 4971 | break; |
| 4972 | } |
| 4973 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 4974 | return DAG.getNode(Opcode: CompareType, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 4975 | } |
| 4976 | |
| 4977 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. |
| 4978 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, |
| 4979 | SelectionDAG &DAG, const SDLoc &dl, |
| 4980 | bool Signaling) const { |
| 4981 | assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); |
| 4982 | SDValue Flags; |
| 4983 | if (!isFloatingPointZero(Op: RHS)) |
| 4984 | Flags = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, DL: dl, VT: FlagsVT, |
| 4985 | N1: LHS, N2: RHS); |
| 4986 | else |
| 4987 | Flags = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, DL: dl, |
| 4988 | VT: FlagsVT, Operand: LHS); |
| 4989 | return DAG.getNode(Opcode: ARMISD::FMSTAT, DL: dl, VT: FlagsVT, Operand: Flags); |
| 4990 | } |
| 4991 | |
| 4992 | // This function returns three things: the arithmetic computation itself |
| 4993 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The |
| 4994 | // comparison and the condition code define the case in which the arithmetic |
| 4995 | // computation *does not* overflow. |
| 4996 | std::pair<SDValue, SDValue> |
| 4997 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, |
| 4998 | SDValue &ARMcc) const { |
| 4999 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type" ); |
| 5000 | |
| 5001 | SDValue Value, OverflowCmp; |
| 5002 | SDValue LHS = Op.getOperand(i: 0); |
| 5003 | SDValue RHS = Op.getOperand(i: 1); |
| 5004 | SDLoc dl(Op); |
| 5005 | |
| 5006 | // FIXME: We are currently always generating CMPs because we don't support |
| 5007 | // generating CMN through the backend. This is not as good as the natural |
| 5008 | // CMP case because it causes a register dependency and cannot be folded |
| 5009 | // later. |
| 5010 | |
| 5011 | switch (Op.getOpcode()) { |
| 5012 | default: |
| 5013 | llvm_unreachable("Unknown overflow instruction!" ); |
| 5014 | case ISD::SADDO: |
| 5015 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
| 5016 | Value = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 5017 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value, N2: LHS); |
| 5018 | break; |
| 5019 | case ISD::UADDO: |
| 5020 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
| 5021 | // We use ADDC here to correspond to its use in LowerUnsignedALUO. |
| 5022 | // We do not use it in the USUBO case as Value may not be used. |
| 5023 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, |
| 5024 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: MVT::i32), N1: LHS, N2: RHS) |
| 5025 | .getValue(R: 0); |
| 5026 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value, N2: LHS); |
| 5027 | break; |
| 5028 | case ISD::SSUBO: |
| 5029 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
| 5030 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 5031 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 5032 | break; |
| 5033 | case ISD::USUBO: |
| 5034 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
| 5035 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 5036 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 5037 | break; |
| 5038 | case ISD::UMULO: |
| 5039 | // We generate a UMUL_LOHI and then check if the high word is 0. |
| 5040 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
| 5041 | Value = DAG.getNode(Opcode: ISD::UMUL_LOHI, DL: dl, |
| 5042 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
| 5043 | N1: LHS, N2: RHS); |
| 5044 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value.getValue(R: 1), |
| 5045 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 5046 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
| 5047 | break; |
| 5048 | case ISD::SMULO: |
| 5049 | // We generate a SMUL_LOHI and then check if all the bits of the high word |
| 5050 | // are the same as the sign bit of the low word. |
| 5051 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
| 5052 | Value = DAG.getNode(Opcode: ISD::SMUL_LOHI, DL: dl, |
| 5053 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
| 5054 | N1: LHS, N2: RHS); |
| 5055 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value.getValue(R: 1), |
| 5056 | N2: DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: Op.getValueType(), |
| 5057 | N1: Value.getValue(R: 0), |
| 5058 | N2: DAG.getConstant(Val: 31, DL: dl, VT: MVT::i32))); |
| 5059 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
| 5060 | break; |
| 5061 | } // switch (...) |
| 5062 | |
| 5063 | return std::make_pair(x&: Value, y&: OverflowCmp); |
| 5064 | } |
| 5065 | |
| 5066 | SDValue |
| 5067 | ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { |
| 5068 | // Let legalize expand this if it isn't a legal type yet. |
| 5069 | if (!isTypeLegal(VT: Op.getValueType())) |
| 5070 | return SDValue(); |
| 5071 | |
| 5072 | SDValue Value, OverflowCmp; |
| 5073 | SDValue ARMcc; |
| 5074 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); |
| 5075 | SDLoc dl(Op); |
| 5076 | // We use 0 and 1 as false and true values. |
| 5077 | SDValue TVal = DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32); |
| 5078 | SDValue FVal = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 5079 | EVT VT = Op.getValueType(); |
| 5080 | |
| 5081 | SDValue Overflow = |
| 5082 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: TVal, N2: FVal, N3: ARMcc, N4: OverflowCmp); |
| 5083 | |
| 5084 | SDVTList VTs = DAG.getVTList(VT1: Op.getValueType(), VT2: MVT::i32); |
| 5085 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: VTs, N1: Value, N2: Overflow); |
| 5086 | } |
| 5087 | |
| 5088 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, |
| 5089 | SelectionDAG &DAG) { |
| 5090 | SDLoc DL(BoolCarry); |
| 5091 | EVT CarryVT = BoolCarry.getValueType(); |
| 5092 | |
| 5093 | // This converts the boolean value carry into the carry flag by doing |
| 5094 | // ARMISD::SUBC Carry, 1 |
| 5095 | SDValue Carry = DAG.getNode(Opcode: ARMISD::SUBC, DL, |
| 5096 | VTList: DAG.getVTList(VT1: CarryVT, VT2: MVT::i32), |
| 5097 | N1: BoolCarry, N2: DAG.getConstant(Val: 1, DL, VT: CarryVT)); |
| 5098 | return Carry.getValue(R: 1); |
| 5099 | } |
| 5100 | |
| 5101 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, |
| 5102 | SelectionDAG &DAG) { |
| 5103 | SDLoc DL(Flags); |
| 5104 | |
| 5105 | // Now convert the carry flag into a boolean carry. We do this |
| 5106 | // using ARMISD:ADDE 0, 0, Carry |
| 5107 | return DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), |
| 5108 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), |
| 5109 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N3: Flags); |
| 5110 | } |
| 5111 | |
| 5112 | SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, |
| 5113 | SelectionDAG &DAG) const { |
| 5114 | // Let legalize expand this if it isn't a legal type yet. |
| 5115 | if (!isTypeLegal(VT: Op.getValueType())) |
| 5116 | return SDValue(); |
| 5117 | |
| 5118 | SDValue LHS = Op.getOperand(i: 0); |
| 5119 | SDValue RHS = Op.getOperand(i: 1); |
| 5120 | SDLoc dl(Op); |
| 5121 | |
| 5122 | EVT VT = Op.getValueType(); |
| 5123 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 5124 | SDValue Value; |
| 5125 | SDValue Overflow; |
| 5126 | switch (Op.getOpcode()) { |
| 5127 | default: |
| 5128 | llvm_unreachable("Unknown overflow instruction!" ); |
| 5129 | case ISD::UADDO: |
| 5130 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
| 5131 | // Convert the carry flag into a boolean value. |
| 5132 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
| 5133 | break; |
| 5134 | case ISD::USUBO: { |
| 5135 | Value = DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
| 5136 | // Convert the carry flag into a boolean value. |
| 5137 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
| 5138 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow |
| 5139 | // value. So compute 1 - C. |
| 5140 | Overflow = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 5141 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Overflow); |
| 5142 | break; |
| 5143 | } |
| 5144 | } |
| 5145 | |
| 5146 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: VTs, N1: Value, N2: Overflow); |
| 5147 | } |
| 5148 | |
| 5149 | static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG, |
| 5150 | const ARMSubtarget *Subtarget) { |
| 5151 | EVT VT = Op.getValueType(); |
| 5152 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP() || Subtarget->isThumb1Only()) |
| 5153 | return SDValue(); |
| 5154 | if (!VT.isSimple()) |
| 5155 | return SDValue(); |
| 5156 | |
| 5157 | unsigned NewOpcode; |
| 5158 | switch (VT.getSimpleVT().SimpleTy) { |
| 5159 | default: |
| 5160 | return SDValue(); |
| 5161 | case MVT::i8: |
| 5162 | switch (Op->getOpcode()) { |
| 5163 | case ISD::UADDSAT: |
| 5164 | NewOpcode = ARMISD::UQADD8b; |
| 5165 | break; |
| 5166 | case ISD::SADDSAT: |
| 5167 | NewOpcode = ARMISD::QADD8b; |
| 5168 | break; |
| 5169 | case ISD::USUBSAT: |
| 5170 | NewOpcode = ARMISD::UQSUB8b; |
| 5171 | break; |
| 5172 | case ISD::SSUBSAT: |
| 5173 | NewOpcode = ARMISD::QSUB8b; |
| 5174 | break; |
| 5175 | } |
| 5176 | break; |
| 5177 | case MVT::i16: |
| 5178 | switch (Op->getOpcode()) { |
| 5179 | case ISD::UADDSAT: |
| 5180 | NewOpcode = ARMISD::UQADD16b; |
| 5181 | break; |
| 5182 | case ISD::SADDSAT: |
| 5183 | NewOpcode = ARMISD::QADD16b; |
| 5184 | break; |
| 5185 | case ISD::USUBSAT: |
| 5186 | NewOpcode = ARMISD::UQSUB16b; |
| 5187 | break; |
| 5188 | case ISD::SSUBSAT: |
| 5189 | NewOpcode = ARMISD::QSUB16b; |
| 5190 | break; |
| 5191 | } |
| 5192 | break; |
| 5193 | } |
| 5194 | |
| 5195 | SDLoc dl(Op); |
| 5196 | SDValue Add = |
| 5197 | DAG.getNode(Opcode: NewOpcode, DL: dl, VT: MVT::i32, |
| 5198 | N1: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 0), DL: dl, VT: MVT::i32), |
| 5199 | N2: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 1), DL: dl, VT: MVT::i32)); |
| 5200 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Add); |
| 5201 | } |
| 5202 | |
| 5203 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 5204 | SDValue Cond = Op.getOperand(i: 0); |
| 5205 | SDValue SelectTrue = Op.getOperand(i: 1); |
| 5206 | SDValue SelectFalse = Op.getOperand(i: 2); |
| 5207 | SDLoc dl(Op); |
| 5208 | unsigned Opc = Cond.getOpcode(); |
| 5209 | |
| 5210 | if (Cond.getResNo() == 1 && |
| 5211 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5212 | Opc == ISD::USUBO)) { |
| 5213 | if (!isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
| 5214 | return SDValue(); |
| 5215 | |
| 5216 | SDValue Value, OverflowCmp; |
| 5217 | SDValue ARMcc; |
| 5218 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
| 5219 | EVT VT = Op.getValueType(); |
| 5220 | |
| 5221 | return getCMOV(dl, VT, FalseVal: SelectTrue, TrueVal: SelectFalse, ARMcc, Flags: OverflowCmp, DAG); |
| 5222 | } |
| 5223 | |
| 5224 | // Convert: |
| 5225 | // |
| 5226 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) |
| 5227 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) |
| 5228 | // |
| 5229 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { |
| 5230 | const ConstantSDNode *CMOVTrue = |
| 5231 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 0)); |
| 5232 | const ConstantSDNode *CMOVFalse = |
| 5233 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 1)); |
| 5234 | |
| 5235 | if (CMOVTrue && CMOVFalse) { |
| 5236 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); |
| 5237 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); |
| 5238 | |
| 5239 | SDValue True; |
| 5240 | SDValue False; |
| 5241 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { |
| 5242 | True = SelectTrue; |
| 5243 | False = SelectFalse; |
| 5244 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { |
| 5245 | True = SelectFalse; |
| 5246 | False = SelectTrue; |
| 5247 | } |
| 5248 | |
| 5249 | if (True.getNode() && False.getNode()) |
| 5250 | return getCMOV(dl, VT: Op.getValueType(), FalseVal: True, TrueVal: False, ARMcc: Cond.getOperand(i: 2), |
| 5251 | Flags: Cond.getOperand(i: 3), DAG); |
| 5252 | } |
| 5253 | } |
| 5254 | |
| 5255 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the |
| 5256 | // undefined bits before doing a full-word comparison with zero. |
| 5257 | Cond = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: Cond.getValueType(), N1: Cond, |
| 5258 | N2: DAG.getConstant(Val: 1, DL: dl, VT: Cond.getValueType())); |
| 5259 | |
| 5260 | return DAG.getSelectCC(DL: dl, LHS: Cond, |
| 5261 | RHS: DAG.getConstant(Val: 0, DL: dl, VT: Cond.getValueType()), |
| 5262 | True: SelectTrue, False: SelectFalse, Cond: ISD::SETNE); |
| 5263 | } |
| 5264 | |
| 5265 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 5266 | bool &swpCmpOps, bool &swpVselOps) { |
| 5267 | // Start by selecting the GE condition code for opcodes that return true for |
| 5268 | // 'equality' |
| 5269 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || |
| 5270 | CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) |
| 5271 | CondCode = ARMCC::GE; |
| 5272 | |
| 5273 | // and GT for opcodes that return false for 'equality'. |
| 5274 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || |
| 5275 | CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) |
| 5276 | CondCode = ARMCC::GT; |
| 5277 | |
| 5278 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need |
| 5279 | // to swap the compare operands. |
| 5280 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || |
| 5281 | CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) |
| 5282 | swpCmpOps = true; |
| 5283 | |
| 5284 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. |
| 5285 | // If we have an unordered opcode, we need to swap the operands to the VSEL |
| 5286 | // instruction (effectively negating the condition). |
| 5287 | // |
| 5288 | // This also has the effect of swapping which one of 'less' or 'greater' |
| 5289 | // returns true, so we also swap the compare operands. It also switches |
| 5290 | // whether we return true for 'equality', so we compensate by picking the |
| 5291 | // opposite condition code to our original choice. |
| 5292 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || |
| 5293 | CC == ISD::SETUGT) { |
| 5294 | swpCmpOps = !swpCmpOps; |
| 5295 | swpVselOps = !swpVselOps; |
| 5296 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; |
| 5297 | } |
| 5298 | |
| 5299 | // 'ordered' is 'anything but unordered', so use the VS condition code and |
| 5300 | // swap the VSEL operands. |
| 5301 | if (CC == ISD::SETO) { |
| 5302 | CondCode = ARMCC::VS; |
| 5303 | swpVselOps = true; |
| 5304 | } |
| 5305 | |
| 5306 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition |
| 5307 | // code and swap the VSEL operands. Also do this if we don't care about the |
| 5308 | // unordered case. |
| 5309 | if (CC == ISD::SETUNE || CC == ISD::SETNE) { |
| 5310 | CondCode = ARMCC::EQ; |
| 5311 | swpVselOps = true; |
| 5312 | } |
| 5313 | } |
| 5314 | |
| 5315 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, |
| 5316 | SDValue TrueVal, SDValue ARMcc, |
| 5317 | SDValue Flags, SelectionDAG &DAG) const { |
| 5318 | if (!Subtarget->hasFP64() && VT == MVT::f64) { |
| 5319 | FalseVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 5320 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: FalseVal); |
| 5321 | TrueVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 5322 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: TrueVal); |
| 5323 | |
| 5324 | SDValue TrueLow = TrueVal.getValue(R: 0); |
| 5325 | SDValue TrueHigh = TrueVal.getValue(R: 1); |
| 5326 | SDValue FalseLow = FalseVal.getValue(R: 0); |
| 5327 | SDValue FalseHigh = FalseVal.getValue(R: 1); |
| 5328 | |
| 5329 | SDValue Low = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseLow, N2: TrueLow, |
| 5330 | N3: ARMcc, N4: Flags); |
| 5331 | SDValue High = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseHigh, N2: TrueHigh, |
| 5332 | N3: ARMcc, N4: Flags); |
| 5333 | |
| 5334 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Low, N2: High); |
| 5335 | } |
| 5336 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, N3: ARMcc, N4: Flags); |
| 5337 | } |
| 5338 | |
| 5339 | static bool isGTorGE(ISD::CondCode CC) { |
| 5340 | return CC == ISD::SETGT || CC == ISD::SETGE; |
| 5341 | } |
| 5342 | |
| 5343 | static bool isLTorLE(ISD::CondCode CC) { |
| 5344 | return CC == ISD::SETLT || CC == ISD::SETLE; |
| 5345 | } |
| 5346 | |
| 5347 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. |
| 5348 | // All of these conditions (and their <= and >= counterparts) will do: |
| 5349 | // x < k ? k : x |
| 5350 | // x > k ? x : k |
| 5351 | // k < x ? x : k |
| 5352 | // k > x ? k : x |
| 5353 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, |
| 5354 | const SDValue TrueVal, const SDValue FalseVal, |
| 5355 | const ISD::CondCode CC, const SDValue K) { |
| 5356 | return (isGTorGE(CC) && |
| 5357 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || |
| 5358 | (isLTorLE(CC) && |
| 5359 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); |
| 5360 | } |
| 5361 | |
| 5362 | // Check if two chained conditionals could be converted into SSAT or USAT. |
| 5363 | // |
| 5364 | // SSAT can replace a set of two conditional selectors that bound a number to an |
| 5365 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: |
| 5366 | // |
| 5367 | // x < -k ? -k : (x > k ? k : x) |
| 5368 | // x < -k ? -k : (x < k ? x : k) |
| 5369 | // x > -k ? (x > k ? k : x) : -k |
| 5370 | // x < k ? (x < -k ? -k : x) : k |
| 5371 | // etc. |
| 5372 | // |
| 5373 | // LLVM canonicalizes these to either a min(max()) or a max(min()) |
| 5374 | // pattern. This function tries to match one of these and will return a SSAT |
| 5375 | // node if successful. |
| 5376 | // |
| 5377 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 |
| 5378 | // is a power of 2. |
| 5379 | static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { |
| 5380 | EVT VT = Op.getValueType(); |
| 5381 | SDValue V1 = Op.getOperand(i: 0); |
| 5382 | SDValue K1 = Op.getOperand(i: 1); |
| 5383 | SDValue TrueVal1 = Op.getOperand(i: 2); |
| 5384 | SDValue FalseVal1 = Op.getOperand(i: 3); |
| 5385 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5386 | |
| 5387 | const SDValue Op2 = isa<ConstantSDNode>(Val: TrueVal1) ? FalseVal1 : TrueVal1; |
| 5388 | if (Op2.getOpcode() != ISD::SELECT_CC) |
| 5389 | return SDValue(); |
| 5390 | |
| 5391 | SDValue V2 = Op2.getOperand(i: 0); |
| 5392 | SDValue K2 = Op2.getOperand(i: 1); |
| 5393 | SDValue TrueVal2 = Op2.getOperand(i: 2); |
| 5394 | SDValue FalseVal2 = Op2.getOperand(i: 3); |
| 5395 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Val: Op2.getOperand(i: 4))->get(); |
| 5396 | |
| 5397 | SDValue V1Tmp = V1; |
| 5398 | SDValue V2Tmp = V2; |
| 5399 | |
| 5400 | // Check that the registers and the constants match a max(min()) or min(max()) |
| 5401 | // pattern |
| 5402 | if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 || |
| 5403 | K2 != FalseVal2 || |
| 5404 | !((isGTorGE(CC: CC1) && isLTorLE(CC: CC2)) || (isLTorLE(CC: CC1) && isGTorGE(CC: CC2)))) |
| 5405 | return SDValue(); |
| 5406 | |
| 5407 | // Check that the constant in the lower-bound check is |
| 5408 | // the opposite of the constant in the upper-bound check |
| 5409 | // in 1's complement. |
| 5410 | if (!isa<ConstantSDNode>(Val: K1) || !isa<ConstantSDNode>(Val: K2)) |
| 5411 | return SDValue(); |
| 5412 | |
| 5413 | int64_t Val1 = cast<ConstantSDNode>(Val&: K1)->getSExtValue(); |
| 5414 | int64_t Val2 = cast<ConstantSDNode>(Val&: K2)->getSExtValue(); |
| 5415 | int64_t PosVal = std::max(a: Val1, b: Val2); |
| 5416 | int64_t NegVal = std::min(a: Val1, b: Val2); |
| 5417 | |
| 5418 | if (!((Val1 > Val2 && isLTorLE(CC: CC1)) || (Val1 < Val2 && isLTorLE(CC: CC2))) || |
| 5419 | !isPowerOf2_64(Value: PosVal + 1)) |
| 5420 | return SDValue(); |
| 5421 | |
| 5422 | // Handle the difference between USAT (unsigned) and SSAT (signed) |
| 5423 | // saturation |
| 5424 | // At this point, PosVal is guaranteed to be positive |
| 5425 | uint64_t K = PosVal; |
| 5426 | SDLoc dl(Op); |
| 5427 | if (Val1 == ~Val2) |
| 5428 | return DAG.getNode(Opcode: ARMISD::SSAT, DL: dl, VT, N1: V2Tmp, |
| 5429 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
| 5430 | if (NegVal == 0) |
| 5431 | return DAG.getNode(Opcode: ARMISD::USAT, DL: dl, VT, N1: V2Tmp, |
| 5432 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
| 5433 | |
| 5434 | return SDValue(); |
| 5435 | } |
| 5436 | |
| 5437 | // Check if a condition of the type x < k ? k : x can be converted into a |
| 5438 | // bit operation instead of conditional moves. |
| 5439 | // Currently this is allowed given: |
| 5440 | // - The conditions and values match up |
| 5441 | // - k is 0 or -1 (all ones) |
| 5442 | // This function will not check the last condition, thats up to the caller |
| 5443 | // It returns true if the transformation can be made, and in such case |
| 5444 | // returns x in V, and k in SatK. |
| 5445 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, |
| 5446 | SDValue &SatK) |
| 5447 | { |
| 5448 | SDValue LHS = Op.getOperand(i: 0); |
| 5449 | SDValue RHS = Op.getOperand(i: 1); |
| 5450 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5451 | SDValue TrueVal = Op.getOperand(i: 2); |
| 5452 | SDValue FalseVal = Op.getOperand(i: 3); |
| 5453 | |
| 5454 | SDValue *K = isa<ConstantSDNode>(Val: LHS) ? &LHS : isa<ConstantSDNode>(Val: RHS) |
| 5455 | ? &RHS |
| 5456 | : nullptr; |
| 5457 | |
| 5458 | // No constant operation in comparison, early out |
| 5459 | if (!K) |
| 5460 | return false; |
| 5461 | |
| 5462 | SDValue KTmp = isa<ConstantSDNode>(Val: TrueVal) ? TrueVal : FalseVal; |
| 5463 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; |
| 5464 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; |
| 5465 | |
| 5466 | // If the constant on left and right side, or variable on left and right, |
| 5467 | // does not match, early out |
| 5468 | if (*K != KTmp || V != VTmp) |
| 5469 | return false; |
| 5470 | |
| 5471 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, K: *K)) { |
| 5472 | SatK = *K; |
| 5473 | return true; |
| 5474 | } |
| 5475 | |
| 5476 | return false; |
| 5477 | } |
| 5478 | |
| 5479 | bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { |
| 5480 | if (VT == MVT::f32) |
| 5481 | return !Subtarget->hasVFP2Base(); |
| 5482 | if (VT == MVT::f64) |
| 5483 | return !Subtarget->hasFP64(); |
| 5484 | if (VT == MVT::f16) |
| 5485 | return !Subtarget->hasFullFP16(); |
| 5486 | return false; |
| 5487 | } |
| 5488 | |
| 5489 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5490 | EVT VT = Op.getValueType(); |
| 5491 | SDLoc dl(Op); |
| 5492 | |
| 5493 | // Try to convert two saturating conditional selects into a single SSAT |
| 5494 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) |
| 5495 | if (SDValue SatValue = LowerSaturatingConditional(Op, DAG)) |
| 5496 | return SatValue; |
| 5497 | |
| 5498 | // Try to convert expressions of the form x < k ? k : x (and similar forms) |
| 5499 | // into more efficient bit operations, which is possible when k is 0 or -1 |
| 5500 | // On ARM and Thumb-2 which have flexible operand 2 this will result in |
| 5501 | // single instructions. On Thumb the shift and the bit operation will be two |
| 5502 | // instructions. |
| 5503 | // Only allow this transformation on full-width (32-bit) operations |
| 5504 | SDValue LowerSatConstant; |
| 5505 | SDValue SatValue; |
| 5506 | if (VT == MVT::i32 && |
| 5507 | isLowerSaturatingConditional(Op, V&: SatValue, SatK&: LowerSatConstant)) { |
| 5508 | SDValue ShiftV = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: SatValue, |
| 5509 | N2: DAG.getConstant(Val: 31, DL: dl, VT)); |
| 5510 | if (isNullConstant(V: LowerSatConstant)) { |
| 5511 | SDValue NotShiftV = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: ShiftV, |
| 5512 | N2: DAG.getAllOnesConstant(DL: dl, VT)); |
| 5513 | return DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: SatValue, N2: NotShiftV); |
| 5514 | } else if (isAllOnesConstant(V: LowerSatConstant)) |
| 5515 | return DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: SatValue, N2: ShiftV); |
| 5516 | } |
| 5517 | |
| 5518 | SDValue LHS = Op.getOperand(i: 0); |
| 5519 | SDValue RHS = Op.getOperand(i: 1); |
| 5520 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5521 | SDValue TrueVal = Op.getOperand(i: 2); |
| 5522 | SDValue FalseVal = Op.getOperand(i: 3); |
| 5523 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(Val&: FalseVal); |
| 5524 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(Val&: TrueVal); |
| 5525 | |
| 5526 | if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && |
| 5527 | LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { |
| 5528 | unsigned TVal = CTVal->getZExtValue(); |
| 5529 | unsigned FVal = CFVal->getZExtValue(); |
| 5530 | unsigned Opcode = 0; |
| 5531 | |
| 5532 | if (TVal == ~FVal) { |
| 5533 | Opcode = ARMISD::CSINV; |
| 5534 | } else if (TVal == ~FVal + 1) { |
| 5535 | Opcode = ARMISD::CSNEG; |
| 5536 | } else if (TVal + 1 == FVal) { |
| 5537 | Opcode = ARMISD::CSINC; |
| 5538 | } else if (TVal == FVal + 1) { |
| 5539 | Opcode = ARMISD::CSINC; |
| 5540 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5541 | std::swap(a&: TVal, b&: FVal); |
| 5542 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5543 | } |
| 5544 | |
| 5545 | if (Opcode) { |
| 5546 | // If one of the constants is cheaper than another, materialise the |
| 5547 | // cheaper one and let the csel generate the other. |
| 5548 | if (Opcode != ARMISD::CSINC && |
| 5549 | HasLowerConstantMaterializationCost(Val1: FVal, Val2: TVal, Subtarget)) { |
| 5550 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5551 | std::swap(a&: TVal, b&: FVal); |
| 5552 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5553 | } |
| 5554 | |
| 5555 | // Attempt to use ZR checking TVal is 0, possibly inverting the condition |
| 5556 | // to get there. CSINC not is invertable like the other two (~(~a) == a, |
| 5557 | // -(-a) == a, but (a+1)+1 != a). |
| 5558 | if (FVal == 0 && Opcode != ARMISD::CSINC) { |
| 5559 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5560 | std::swap(a&: TVal, b&: FVal); |
| 5561 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5562 | } |
| 5563 | |
| 5564 | // Drops F's value because we can get it by inverting/negating TVal. |
| 5565 | FalseVal = TrueVal; |
| 5566 | |
| 5567 | SDValue ARMcc; |
| 5568 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5569 | EVT VT = TrueVal.getValueType(); |
| 5570 | return DAG.getNode(Opcode, DL: dl, VT, N1: TrueVal, N2: FalseVal, N3: ARMcc, N4: Cmp); |
| 5571 | } |
| 5572 | } |
| 5573 | |
| 5574 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 5575 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
| 5576 | |
| 5577 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5578 | // zero. |
| 5579 | if (!RHS.getNode()) { |
| 5580 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 5581 | CC = ISD::SETNE; |
| 5582 | } |
| 5583 | } |
| 5584 | |
| 5585 | if (LHS.getValueType() == MVT::i32) { |
| 5586 | // Try to generate VSEL on ARMv8. |
| 5587 | // The VSEL instruction can't use all the usual ARM condition |
| 5588 | // codes: it only has two bits to select the condition code, so it's |
| 5589 | // constrained to use only GE, GT, VS and EQ. |
| 5590 | // |
| 5591 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to |
| 5592 | // swap the operands of the previous compare instruction (effectively |
| 5593 | // inverting the compare condition, swapping 'less' and 'greater') and |
| 5594 | // sometimes need to swap the operands to the VSEL (which inverts the |
| 5595 | // condition in the sense of firing whenever the previous condition didn't) |
| 5596 | if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || |
| 5597 | TrueVal.getValueType() == MVT::f32 || |
| 5598 | TrueVal.getValueType() == MVT::f64)) { |
| 5599 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5600 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || |
| 5601 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { |
| 5602 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5603 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5604 | } |
| 5605 | } |
| 5606 | |
| 5607 | SDValue ARMcc; |
| 5608 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5609 | // Choose GE over PL, which vsel does now support |
| 5610 | if (ARMcc->getAsZExtVal() == ARMCC::PL) |
| 5611 | ARMcc = DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32); |
| 5612 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, Flags: Cmp, DAG); |
| 5613 | } |
| 5614 | |
| 5615 | ARMCC::CondCodes CondCode, CondCode2; |
| 5616 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5617 | |
| 5618 | // Normalize the fp compare. If RHS is zero we prefer to keep it there so we |
| 5619 | // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we |
| 5620 | // must use VSEL (limited condition codes), due to not having conditional f16 |
| 5621 | // moves. |
| 5622 | if (Subtarget->hasFPARMv8Base() && |
| 5623 | !(isFloatingPointZero(Op: RHS) && TrueVal.getValueType() != MVT::f16) && |
| 5624 | (TrueVal.getValueType() == MVT::f16 || |
| 5625 | TrueVal.getValueType() == MVT::f32 || |
| 5626 | TrueVal.getValueType() == MVT::f64)) { |
| 5627 | bool swpCmpOps = false; |
| 5628 | bool swpVselOps = false; |
| 5629 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); |
| 5630 | |
| 5631 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || |
| 5632 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { |
| 5633 | if (swpCmpOps) |
| 5634 | std::swap(a&: LHS, b&: RHS); |
| 5635 | if (swpVselOps) |
| 5636 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5637 | } |
| 5638 | } |
| 5639 | |
| 5640 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5641 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5642 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, Flags: Cmp, DAG); |
| 5643 | if (CondCode2 != ARMCC::AL) { |
| 5644 | SDValue ARMcc2 = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 5645 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal, ARMcc: ARMcc2, Flags: Cmp, DAG); |
| 5646 | } |
| 5647 | return Result; |
| 5648 | } |
| 5649 | |
| 5650 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable |
| 5651 | /// to morph to an integer compare sequence. |
| 5652 | static bool canChangeToInt(SDValue Op, bool &SeenZero, |
| 5653 | const ARMSubtarget *Subtarget) { |
| 5654 | SDNode *N = Op.getNode(); |
| 5655 | if (!N->hasOneUse()) |
| 5656 | // Otherwise it requires moving the value from fp to integer registers. |
| 5657 | return false; |
| 5658 | if (!N->getNumValues()) |
| 5659 | return false; |
| 5660 | EVT VT = Op.getValueType(); |
| 5661 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) |
| 5662 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + |
| 5663 | // vmrs are very slow, e.g. cortex-a8. |
| 5664 | return false; |
| 5665 | |
| 5666 | if (isFloatingPointZero(Op)) { |
| 5667 | SeenZero = true; |
| 5668 | return true; |
| 5669 | } |
| 5670 | return ISD::isNormalLoad(N); |
| 5671 | } |
| 5672 | |
| 5673 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { |
| 5674 | if (isFloatingPointZero(Op)) |
| 5675 | return DAG.getConstant(Val: 0, DL: SDLoc(Op), VT: MVT::i32); |
| 5676 | |
| 5677 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) |
| 5678 | return DAG.getLoad(VT: MVT::i32, dl: SDLoc(Op), Chain: Ld->getChain(), Ptr: Ld->getBasePtr(), |
| 5679 | PtrInfo: Ld->getPointerInfo(), Alignment: Ld->getAlign(), |
| 5680 | MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5681 | |
| 5682 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5683 | } |
| 5684 | |
| 5685 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, |
| 5686 | SDValue &RetVal1, SDValue &RetVal2) { |
| 5687 | SDLoc dl(Op); |
| 5688 | |
| 5689 | if (isFloatingPointZero(Op)) { |
| 5690 | RetVal1 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 5691 | RetVal2 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 5692 | return; |
| 5693 | } |
| 5694 | |
| 5695 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) { |
| 5696 | SDValue Ptr = Ld->getBasePtr(); |
| 5697 | RetVal1 = |
| 5698 | DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr, PtrInfo: Ld->getPointerInfo(), |
| 5699 | Alignment: Ld->getAlign(), MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5700 | |
| 5701 | EVT PtrType = Ptr.getValueType(); |
| 5702 | SDValue NewPtr = DAG.getNode(Opcode: ISD::ADD, DL: dl, |
| 5703 | VT: PtrType, N1: Ptr, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrType)); |
| 5704 | RetVal2 = DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr: NewPtr, |
| 5705 | PtrInfo: Ld->getPointerInfo().getWithOffset(O: 4), |
| 5706 | Alignment: commonAlignment(A: Ld->getAlign(), Offset: 4), |
| 5707 | MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5708 | return; |
| 5709 | } |
| 5710 | |
| 5711 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5712 | } |
| 5713 | |
| 5714 | /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some |
| 5715 | /// f32 and even f64 comparisons to integer ones. |
| 5716 | SDValue |
| 5717 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { |
| 5718 | SDValue Chain = Op.getOperand(i: 0); |
| 5719 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
| 5720 | SDValue LHS = Op.getOperand(i: 2); |
| 5721 | SDValue RHS = Op.getOperand(i: 3); |
| 5722 | SDValue Dest = Op.getOperand(i: 4); |
| 5723 | SDLoc dl(Op); |
| 5724 | |
| 5725 | bool LHSSeenZero = false; |
| 5726 | bool LHSOk = canChangeToInt(Op: LHS, SeenZero&: LHSSeenZero, Subtarget); |
| 5727 | bool RHSSeenZero = false; |
| 5728 | bool RHSOk = canChangeToInt(Op: RHS, SeenZero&: RHSSeenZero, Subtarget); |
| 5729 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { |
| 5730 | // If unsafe fp math optimization is enabled and there are no other uses of |
| 5731 | // the CMP operands, and the condition code is EQ or NE, we can optimize it |
| 5732 | // to an integer comparison. |
| 5733 | if (CC == ISD::SETOEQ) |
| 5734 | CC = ISD::SETEQ; |
| 5735 | else if (CC == ISD::SETUNE) |
| 5736 | CC = ISD::SETNE; |
| 5737 | |
| 5738 | SDValue Mask = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
| 5739 | SDValue ARMcc; |
| 5740 | if (LHS.getValueType() == MVT::f32) { |
| 5741 | LHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5742 | N1: bitcastf32Toi32(Op: LHS, DAG), N2: Mask); |
| 5743 | RHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5744 | N1: bitcastf32Toi32(Op: RHS, DAG), N2: Mask); |
| 5745 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5746 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5747 | N4: Cmp); |
| 5748 | } |
| 5749 | |
| 5750 | SDValue LHS1, LHS2; |
| 5751 | SDValue RHS1, RHS2; |
| 5752 | expandf64Toi32(Op: LHS, DAG, RetVal1&: LHS1, RetVal2&: LHS2); |
| 5753 | expandf64Toi32(Op: RHS, DAG, RetVal1&: RHS1, RetVal2&: RHS2); |
| 5754 | LHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: LHS2, N2: Mask); |
| 5755 | RHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RHS2, N2: Mask); |
| 5756 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5757 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5758 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; |
| 5759 | return DAG.getNode(Opcode: ARMISD::BCC_i64, DL: dl, VT: MVT::Other, Ops); |
| 5760 | } |
| 5761 | |
| 5762 | return SDValue(); |
| 5763 | } |
| 5764 | |
| 5765 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { |
| 5766 | SDValue Chain = Op.getOperand(i: 0); |
| 5767 | SDValue Cond = Op.getOperand(i: 1); |
| 5768 | SDValue Dest = Op.getOperand(i: 2); |
| 5769 | SDLoc dl(Op); |
| 5770 | |
| 5771 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5772 | // instruction. |
| 5773 | unsigned Opc = Cond.getOpcode(); |
| 5774 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5775 | !Subtarget->isThumb1Only(); |
| 5776 | if (Cond.getResNo() == 1 && |
| 5777 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5778 | Opc == ISD::USUBO || OptimizeMul)) { |
| 5779 | // Only lower legal XALUO ops. |
| 5780 | if (!isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
| 5781 | return SDValue(); |
| 5782 | |
| 5783 | // The actual operation with overflow check. |
| 5784 | SDValue Value, OverflowCmp; |
| 5785 | SDValue ARMcc; |
| 5786 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
| 5787 | |
| 5788 | // Reverse the condition code. |
| 5789 | ARMCC::CondCodes CondCode = |
| 5790 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
| 5791 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
| 5792 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
| 5793 | |
| 5794 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5795 | N4: OverflowCmp); |
| 5796 | } |
| 5797 | |
| 5798 | return SDValue(); |
| 5799 | } |
| 5800 | |
| 5801 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5802 | SDValue Chain = Op.getOperand(i: 0); |
| 5803 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
| 5804 | SDValue LHS = Op.getOperand(i: 2); |
| 5805 | SDValue RHS = Op.getOperand(i: 3); |
| 5806 | SDValue Dest = Op.getOperand(i: 4); |
| 5807 | SDLoc dl(Op); |
| 5808 | |
| 5809 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 5810 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
| 5811 | |
| 5812 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5813 | // zero. |
| 5814 | if (!RHS.getNode()) { |
| 5815 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 5816 | CC = ISD::SETNE; |
| 5817 | } |
| 5818 | } |
| 5819 | |
| 5820 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5821 | // instruction. |
| 5822 | unsigned Opc = LHS.getOpcode(); |
| 5823 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5824 | !Subtarget->isThumb1Only(); |
| 5825 | if (LHS.getResNo() == 1 && (isOneConstant(V: RHS) || isNullConstant(V: RHS)) && |
| 5826 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5827 | Opc == ISD::USUBO || OptimizeMul) && |
| 5828 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
| 5829 | // Only lower legal XALUO ops. |
| 5830 | if (!isTypeLegal(VT: LHS->getValueType(ResNo: 0))) |
| 5831 | return SDValue(); |
| 5832 | |
| 5833 | // The actual operation with overflow check. |
| 5834 | SDValue Value, OverflowCmp; |
| 5835 | SDValue ARMcc; |
| 5836 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: LHS.getValue(R: 0), DAG, ARMcc); |
| 5837 | |
| 5838 | if ((CC == ISD::SETNE) != isOneConstant(V: RHS)) { |
| 5839 | // Reverse the condition code. |
| 5840 | ARMCC::CondCodes CondCode = |
| 5841 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
| 5842 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
| 5843 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
| 5844 | } |
| 5845 | |
| 5846 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5847 | N4: OverflowCmp); |
| 5848 | } |
| 5849 | |
| 5850 | if (LHS.getValueType() == MVT::i32) { |
| 5851 | SDValue ARMcc; |
| 5852 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5853 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, N4: Cmp); |
| 5854 | } |
| 5855 | |
| 5856 | if (getTargetMachine().Options.UnsafeFPMath && |
| 5857 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || |
| 5858 | CC == ISD::SETNE || CC == ISD::SETUNE)) { |
| 5859 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) |
| 5860 | return Result; |
| 5861 | } |
| 5862 | |
| 5863 | ARMCC::CondCodes CondCode, CondCode2; |
| 5864 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5865 | |
| 5866 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5867 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5868 | SDValue Ops[] = {Chain, Dest, ARMcc, Cmp}; |
| 5869 | SDValue Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, Ops); |
| 5870 | if (CondCode2 != ARMCC::AL) { |
| 5871 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 5872 | SDValue Ops[] = {Res, Dest, ARMcc, Cmp}; |
| 5873 | Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, Ops); |
| 5874 | } |
| 5875 | return Res; |
| 5876 | } |
| 5877 | |
| 5878 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { |
| 5879 | SDValue Chain = Op.getOperand(i: 0); |
| 5880 | SDValue Table = Op.getOperand(i: 1); |
| 5881 | SDValue Index = Op.getOperand(i: 2); |
| 5882 | SDLoc dl(Op); |
| 5883 | |
| 5884 | EVT PTy = getPointerTy(DL: DAG.getDataLayout()); |
| 5885 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Val&: Table); |
| 5886 | SDValue JTI = DAG.getTargetJumpTable(JTI: JT->getIndex(), VT: PTy); |
| 5887 | Table = DAG.getNode(Opcode: ARMISD::WrapperJT, DL: dl, VT: MVT::i32, Operand: JTI); |
| 5888 | Index = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: PTy, N1: Index, N2: DAG.getConstant(Val: 4, DL: dl, VT: PTy)); |
| 5889 | SDValue Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Index); |
| 5890 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { |
| 5891 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table |
| 5892 | // which does another jump to the destination. This also makes it easier |
| 5893 | // to translate it to TBB / TBH later (Thumb2 only). |
| 5894 | // FIXME: This might not work if the function is extremely large. |
| 5895 | return DAG.getNode(Opcode: ARMISD::BR2_JT, DL: dl, VT: MVT::Other, N1: Chain, |
| 5896 | N2: Addr, N3: Op.getOperand(i: 2), N4: JTI); |
| 5897 | } |
| 5898 | if (isPositionIndependent() || Subtarget->isROPI()) { |
| 5899 | Addr = |
| 5900 | DAG.getLoad(VT: (EVT)MVT::i32, dl, Chain, Ptr: Addr, |
| 5901 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
| 5902 | Chain = Addr.getValue(R: 1); |
| 5903 | Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Addr); |
| 5904 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
| 5905 | } else { |
| 5906 | Addr = |
| 5907 | DAG.getLoad(VT: PTy, dl, Chain, Ptr: Addr, |
| 5908 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
| 5909 | Chain = Addr.getValue(R: 1); |
| 5910 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
| 5911 | } |
| 5912 | } |
| 5913 | |
| 5914 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { |
| 5915 | EVT VT = Op.getValueType(); |
| 5916 | SDLoc dl(Op); |
| 5917 | |
| 5918 | if (Op.getValueType().getVectorElementType() == MVT::i32) { |
| 5919 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::f32) |
| 5920 | return Op; |
| 5921 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5922 | } |
| 5923 | |
| 5924 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
| 5925 | |
| 5926 | EVT NewTy; |
| 5927 | const EVT OpTy = Op.getOperand(i: 0).getValueType(); |
| 5928 | if (OpTy == MVT::v4f32) |
| 5929 | NewTy = MVT::v4i32; |
| 5930 | else if (OpTy == MVT::v4f16 && HasFullFP16) |
| 5931 | NewTy = MVT::v4i16; |
| 5932 | else if (OpTy == MVT::v8f16 && HasFullFP16) |
| 5933 | NewTy = MVT::v8i16; |
| 5934 | else |
| 5935 | llvm_unreachable("Invalid type for custom lowering!" ); |
| 5936 | |
| 5937 | if (VT != MVT::v4i16 && VT != MVT::v8i16) |
| 5938 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5939 | |
| 5940 | Op = DAG.getNode(Opcode: Op.getOpcode(), DL: dl, VT: NewTy, Operand: Op.getOperand(i: 0)); |
| 5941 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Op); |
| 5942 | } |
| 5943 | |
| 5944 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { |
| 5945 | EVT VT = Op.getValueType(); |
| 5946 | if (VT.isVector()) |
| 5947 | return LowerVectorFP_TO_INT(Op, DAG); |
| 5948 | |
| 5949 | bool IsStrict = Op->isStrictFPOpcode(); |
| 5950 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 5951 | |
| 5952 | if (isUnsupportedFloatingType(VT: SrcVal.getValueType())) { |
| 5953 | RTLIB::Libcall LC; |
| 5954 | if (Op.getOpcode() == ISD::FP_TO_SINT || |
| 5955 | Op.getOpcode() == ISD::STRICT_FP_TO_SINT) |
| 5956 | LC = RTLIB::getFPTOSINT(OpVT: SrcVal.getValueType(), |
| 5957 | RetVT: Op.getValueType()); |
| 5958 | else |
| 5959 | LC = RTLIB::getFPTOUINT(OpVT: SrcVal.getValueType(), |
| 5960 | RetVT: Op.getValueType()); |
| 5961 | SDLoc Loc(Op); |
| 5962 | MakeLibCallOptions CallOptions; |
| 5963 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 5964 | SDValue Result; |
| 5965 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: SrcVal, |
| 5966 | CallOptions, dl: Loc, Chain); |
| 5967 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
| 5968 | } |
| 5969 | |
| 5970 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 5971 | if (IsStrict) { |
| 5972 | SDLoc Loc(Op); |
| 5973 | SDValue Result = |
| 5974 | DAG.getNode(Opcode: Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT |
| 5975 | : ISD::FP_TO_UINT, |
| 5976 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
| 5977 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
| 5978 | } |
| 5979 | |
| 5980 | return Op; |
| 5981 | } |
| 5982 | |
| 5983 | static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, |
| 5984 | const ARMSubtarget *Subtarget) { |
| 5985 | EVT VT = Op.getValueType(); |
| 5986 | EVT ToVT = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT(); |
| 5987 | EVT FromVT = Op.getOperand(i: 0).getValueType(); |
| 5988 | |
| 5989 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f32) |
| 5990 | return Op; |
| 5991 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f64 && |
| 5992 | Subtarget->hasFP64()) |
| 5993 | return Op; |
| 5994 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f16 && |
| 5995 | Subtarget->hasFullFP16()) |
| 5996 | return Op; |
| 5997 | if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32 && |
| 5998 | Subtarget->hasMVEFloatOps()) |
| 5999 | return Op; |
| 6000 | if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16 && |
| 6001 | Subtarget->hasMVEFloatOps()) |
| 6002 | return Op; |
| 6003 | |
| 6004 | if (FromVT != MVT::v4f32 && FromVT != MVT::v8f16) |
| 6005 | return SDValue(); |
| 6006 | |
| 6007 | SDLoc DL(Op); |
| 6008 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; |
| 6009 | unsigned BW = ToVT.getScalarSizeInBits() - IsSigned; |
| 6010 | SDValue CVT = DAG.getNode(Opcode: Op.getOpcode(), DL, VT, N1: Op.getOperand(i: 0), |
| 6011 | N2: DAG.getValueType(VT.getScalarType())); |
| 6012 | SDValue Max = DAG.getNode(Opcode: IsSigned ? ISD::SMIN : ISD::UMIN, DL, VT, N1: CVT, |
| 6013 | N2: DAG.getConstant(Val: (1 << BW) - 1, DL, VT)); |
| 6014 | if (IsSigned) |
| 6015 | Max = DAG.getNode(Opcode: ISD::SMAX, DL, VT, N1: Max, |
| 6016 | N2: DAG.getSignedConstant(Val: -(1 << BW), DL, VT)); |
| 6017 | return Max; |
| 6018 | } |
| 6019 | |
| 6020 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { |
| 6021 | EVT VT = Op.getValueType(); |
| 6022 | SDLoc dl(Op); |
| 6023 | |
| 6024 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::i32) { |
| 6025 | if (VT.getVectorElementType() == MVT::f32) |
| 6026 | return Op; |
| 6027 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 6028 | } |
| 6029 | |
| 6030 | assert((Op.getOperand(0).getValueType() == MVT::v4i16 || |
| 6031 | Op.getOperand(0).getValueType() == MVT::v8i16) && |
| 6032 | "Invalid type for custom lowering!" ); |
| 6033 | |
| 6034 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
| 6035 | |
| 6036 | EVT DestVecType; |
| 6037 | if (VT == MVT::v4f32) |
| 6038 | DestVecType = MVT::v4i32; |
| 6039 | else if (VT == MVT::v4f16 && HasFullFP16) |
| 6040 | DestVecType = MVT::v4i16; |
| 6041 | else if (VT == MVT::v8f16 && HasFullFP16) |
| 6042 | DestVecType = MVT::v8i16; |
| 6043 | else |
| 6044 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 6045 | |
| 6046 | unsigned CastOpc; |
| 6047 | unsigned Opc; |
| 6048 | switch (Op.getOpcode()) { |
| 6049 | default: llvm_unreachable("Invalid opcode!" ); |
| 6050 | case ISD::SINT_TO_FP: |
| 6051 | CastOpc = ISD::SIGN_EXTEND; |
| 6052 | Opc = ISD::SINT_TO_FP; |
| 6053 | break; |
| 6054 | case ISD::UINT_TO_FP: |
| 6055 | CastOpc = ISD::ZERO_EXTEND; |
| 6056 | Opc = ISD::UINT_TO_FP; |
| 6057 | break; |
| 6058 | } |
| 6059 | |
| 6060 | Op = DAG.getNode(Opcode: CastOpc, DL: dl, VT: DestVecType, Operand: Op.getOperand(i: 0)); |
| 6061 | return DAG.getNode(Opcode: Opc, DL: dl, VT, Operand: Op); |
| 6062 | } |
| 6063 | |
| 6064 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { |
| 6065 | EVT VT = Op.getValueType(); |
| 6066 | if (VT.isVector()) |
| 6067 | return LowerVectorINT_TO_FP(Op, DAG); |
| 6068 | if (isUnsupportedFloatingType(VT)) { |
| 6069 | RTLIB::Libcall LC; |
| 6070 | if (Op.getOpcode() == ISD::SINT_TO_FP) |
| 6071 | LC = RTLIB::getSINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
| 6072 | RetVT: Op.getValueType()); |
| 6073 | else |
| 6074 | LC = RTLIB::getUINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
| 6075 | RetVT: Op.getValueType()); |
| 6076 | MakeLibCallOptions CallOptions; |
| 6077 | return makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: Op.getOperand(i: 0), |
| 6078 | CallOptions, dl: SDLoc(Op)).first; |
| 6079 | } |
| 6080 | |
| 6081 | return Op; |
| 6082 | } |
| 6083 | |
| 6084 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { |
| 6085 | // Implement fcopysign with a fabs and a conditional fneg. |
| 6086 | SDValue Tmp0 = Op.getOperand(i: 0); |
| 6087 | SDValue Tmp1 = Op.getOperand(i: 1); |
| 6088 | SDLoc dl(Op); |
| 6089 | EVT VT = Op.getValueType(); |
| 6090 | EVT SrcVT = Tmp1.getValueType(); |
| 6091 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || |
| 6092 | Tmp0.getOpcode() == ARMISD::VMOVDRR; |
| 6093 | bool UseNEON = !InGPR && Subtarget->hasNEON(); |
| 6094 | |
| 6095 | if (UseNEON) { |
| 6096 | // Use VBSL to copy the sign bit. |
| 6097 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode: 0x6, Val: 0x80); |
| 6098 | SDValue Mask = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v2i32, |
| 6099 | Operand: DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32)); |
| 6100 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; |
| 6101 | if (VT == MVT::f64) |
| 6102 | Mask = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
| 6103 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Mask), |
| 6104 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 6105 | else /*if (VT == MVT::f32)*/ |
| 6106 | Tmp0 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp0); |
| 6107 | if (SrcVT == MVT::f32) { |
| 6108 | Tmp1 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp1); |
| 6109 | if (VT == MVT::f64) |
| 6110 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
| 6111 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1), |
| 6112 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 6113 | } else if (VT == MVT::f32) |
| 6114 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHRuIMM, DL: dl, VT: MVT::v1i64, |
| 6115 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v1i64, Operand: Tmp1), |
| 6116 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 6117 | Tmp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp0); |
| 6118 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1); |
| 6119 | |
| 6120 | SDValue AllOnes = DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), |
| 6121 | DL: dl, VT: MVT::i32); |
| 6122 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v8i8, Operand: AllOnes); |
| 6123 | SDValue MaskNot = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: OpVT, N1: Mask, |
| 6124 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: AllOnes)); |
| 6125 | |
| 6126 | SDValue Res = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: OpVT, |
| 6127 | N1: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp1, N2: Mask), |
| 6128 | N2: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp0, N2: MaskNot)); |
| 6129 | if (VT == MVT::f32) { |
| 6130 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v2f32, Operand: Res); |
| 6131 | Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: Res, |
| 6132 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 6133 | } else { |
| 6134 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Res); |
| 6135 | } |
| 6136 | |
| 6137 | return Res; |
| 6138 | } |
| 6139 | |
| 6140 | // Bitcast operand 1 to i32. |
| 6141 | if (SrcVT == MVT::f64) |
| 6142 | Tmp1 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 6143 | N: Tmp1).getValue(R: 1); |
| 6144 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp1); |
| 6145 | |
| 6146 | // Or in the signbit with integer operations. |
| 6147 | SDValue Mask1 = DAG.getConstant(Val: 0x80000000, DL: dl, VT: MVT::i32); |
| 6148 | SDValue Mask2 = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
| 6149 | Tmp1 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp1, N2: Mask1); |
| 6150 | if (VT == MVT::f32) { |
| 6151 | Tmp0 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 6152 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp0), N2: Mask2); |
| 6153 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f32, |
| 6154 | Operand: DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Tmp0, N2: Tmp1)); |
| 6155 | } |
| 6156 | |
| 6157 | // f64: Or the high part with signbit and then combine two parts. |
| 6158 | Tmp0 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 6159 | N: Tmp0); |
| 6160 | SDValue Lo = Tmp0.getValue(R: 0); |
| 6161 | SDValue Hi = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp0.getValue(R: 1), N2: Mask2); |
| 6162 | Hi = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Hi, N2: Tmp1); |
| 6163 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 6164 | } |
| 6165 | |
| 6166 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ |
| 6167 | MachineFunction &MF = DAG.getMachineFunction(); |
| 6168 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 6169 | MFI.setReturnAddressIsTaken(true); |
| 6170 | |
| 6171 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
| 6172 | return SDValue(); |
| 6173 | |
| 6174 | EVT VT = Op.getValueType(); |
| 6175 | SDLoc dl(Op); |
| 6176 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 6177 | if (Depth) { |
| 6178 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
| 6179 | SDValue Offset = DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32); |
| 6180 | return DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), |
| 6181 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr, N2: Offset), |
| 6182 | PtrInfo: MachinePointerInfo()); |
| 6183 | } |
| 6184 | |
| 6185 | // Return LR, which contains the return address. Mark it an implicit live-in. |
| 6186 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
| 6187 | return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT); |
| 6188 | } |
| 6189 | |
| 6190 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
| 6191 | const ARMBaseRegisterInfo &ARI = |
| 6192 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); |
| 6193 | MachineFunction &MF = DAG.getMachineFunction(); |
| 6194 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 6195 | MFI.setFrameAddressIsTaken(true); |
| 6196 | |
| 6197 | EVT VT = Op.getValueType(); |
| 6198 | SDLoc dl(Op); // FIXME probably not meaningful |
| 6199 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 6200 | Register FrameReg = ARI.getFrameRegister(MF); |
| 6201 | SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: FrameReg, VT); |
| 6202 | while (Depth--) |
| 6203 | FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr, |
| 6204 | PtrInfo: MachinePointerInfo()); |
| 6205 | return FrameAddr; |
| 6206 | } |
| 6207 | |
| 6208 | // FIXME? Maybe this could be a TableGen attribute on some registers and |
| 6209 | // this table could be generated automatically from RegInfo. |
| 6210 | Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, |
| 6211 | const MachineFunction &MF) const { |
| 6212 | return StringSwitch<Register>(RegName) |
| 6213 | .Case(S: "sp" , Value: ARM::SP) |
| 6214 | .Default(Value: Register()); |
| 6215 | } |
| 6216 | |
| 6217 | // Result is 64 bit value so split into two 32 bit values and return as a |
| 6218 | // pair of values. |
| 6219 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 6220 | SelectionDAG &DAG) { |
| 6221 | SDLoc DL(N); |
| 6222 | |
| 6223 | // This function is only supposed to be called for i64 type destination. |
| 6224 | assert(N->getValueType(0) == MVT::i64 |
| 6225 | && "ExpandREAD_REGISTER called for non-i64 type result." ); |
| 6226 | |
| 6227 | SDValue Read = DAG.getNode(Opcode: ISD::READ_REGISTER, DL, |
| 6228 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32, VT3: MVT::Other), |
| 6229 | N1: N->getOperand(Num: 0), |
| 6230 | N2: N->getOperand(Num: 1)); |
| 6231 | |
| 6232 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Read.getValue(R: 0), |
| 6233 | N2: Read.getValue(R: 1))); |
| 6234 | Results.push_back(Elt: Read.getValue(R: 2)); // Chain |
| 6235 | } |
| 6236 | |
| 6237 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. |
| 6238 | /// When \p DstVT, the destination type of \p BC, is on the vector |
| 6239 | /// register bank and the source of bitcast, \p Op, operates on the same bank, |
| 6240 | /// it might be possible to combine them, such that everything stays on the |
| 6241 | /// vector register bank. |
| 6242 | /// \p return The node that would replace \p BT, if the combine |
| 6243 | /// is possible. |
| 6244 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, |
| 6245 | SelectionDAG &DAG) { |
| 6246 | SDValue Op = BC->getOperand(Num: 0); |
| 6247 | EVT DstVT = BC->getValueType(ResNo: 0); |
| 6248 | |
| 6249 | // The only vector instruction that can produce a scalar (remember, |
| 6250 | // since the bitcast was about to be turned into VMOVDRR, the source |
| 6251 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. |
| 6252 | // Moreover, we can do this combine only if there is one use. |
| 6253 | // Finally, if the destination type is not a vector, there is not |
| 6254 | // much point on forcing everything on the vector bank. |
| 6255 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 6256 | !Op.hasOneUse()) |
| 6257 | return SDValue(); |
| 6258 | |
| 6259 | // If the index is not constant, we will introduce an additional |
| 6260 | // multiply that will stick. |
| 6261 | // Give up in that case. |
| 6262 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
| 6263 | if (!Index) |
| 6264 | return SDValue(); |
| 6265 | unsigned DstNumElt = DstVT.getVectorNumElements(); |
| 6266 | |
| 6267 | // Compute the new index. |
| 6268 | const APInt &APIntIndex = Index->getAPIntValue(); |
| 6269 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); |
| 6270 | NewIndex *= APIntIndex; |
| 6271 | // Check if the new constant index fits into i32. |
| 6272 | if (NewIndex.getBitWidth() > 32) |
| 6273 | return SDValue(); |
| 6274 | |
| 6275 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> |
| 6276 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) |
| 6277 | SDLoc dl(Op); |
| 6278 | SDValue = Op.getOperand(i: 0); |
| 6279 | EVT VecVT = EVT::getVectorVT( |
| 6280 | Context&: *DAG.getContext(), VT: DstVT.getScalarType(), |
| 6281 | NumElements: ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); |
| 6282 | SDValue BitCast = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: ExtractSrc); |
| 6283 | return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DstVT, N1: BitCast, |
| 6284 | N2: DAG.getConstant(Val: NewIndex.getZExtValue(), DL: dl, VT: MVT::i32)); |
| 6285 | } |
| 6286 | |
| 6287 | /// ExpandBITCAST - If the target supports VFP, this function is called to |
| 6288 | /// expand a bit convert where either the source or destination type is i64 to |
| 6289 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 |
| 6290 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support |
| 6291 | /// vectors), since the legalizer won't know what to do with that. |
| 6292 | SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, |
| 6293 | const ARMSubtarget *Subtarget) const { |
| 6294 | SDLoc dl(N); |
| 6295 | SDValue Op = N->getOperand(Num: 0); |
| 6296 | |
| 6297 | // This function is only supposed to be called for i16 and i64 types, either |
| 6298 | // as the source or destination of the bit convert. |
| 6299 | EVT SrcVT = Op.getValueType(); |
| 6300 | EVT DstVT = N->getValueType(ResNo: 0); |
| 6301 | |
| 6302 | if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && |
| 6303 | (DstVT == MVT::f16 || DstVT == MVT::bf16)) |
| 6304 | return MoveToHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: DstVT.getSimpleVT(), |
| 6305 | Val: DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: SDLoc(N), VT: MVT::i32, Operand: Op)); |
| 6306 | |
| 6307 | if ((DstVT == MVT::i16 || DstVT == MVT::i32) && |
| 6308 | (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) { |
| 6309 | if (Subtarget->hasFullFP16() && !Subtarget->hasBF16()) |
| 6310 | Op = DAG.getBitcast(VT: MVT::f16, V: Op); |
| 6311 | return DAG.getNode( |
| 6312 | Opcode: ISD::TRUNCATE, DL: SDLoc(N), VT: DstVT, |
| 6313 | Operand: MoveFromHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: SrcVT.getSimpleVT(), Val: Op)); |
| 6314 | } |
| 6315 | |
| 6316 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) |
| 6317 | return SDValue(); |
| 6318 | |
| 6319 | // Turn i64->f64 into VMOVDRR. |
| 6320 | if (SrcVT == MVT::i64 && isTypeLegal(VT: DstVT)) { |
| 6321 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) |
| 6322 | // if we can combine the bitcast with its source. |
| 6323 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(BC: N, DAG)) |
| 6324 | return Val; |
| 6325 | SDValue Lo, Hi; |
| 6326 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 6327 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: DstVT, |
| 6328 | Operand: DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi)); |
| 6329 | } |
| 6330 | |
| 6331 | // Turn f64->i64 into VMOVRRD. |
| 6332 | if (DstVT == MVT::i64 && isTypeLegal(VT: SrcVT)) { |
| 6333 | SDValue Cvt; |
| 6334 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && |
| 6335 | SrcVT.getVectorNumElements() > 1) |
| 6336 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 6337 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 6338 | N: DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SrcVT, Operand: Op)); |
| 6339 | else |
| 6340 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 6341 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Op); |
| 6342 | // Merge the pieces into a single i64 value. |
| 6343 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Cvt, N2: Cvt.getValue(R: 1)); |
| 6344 | } |
| 6345 | |
| 6346 | return SDValue(); |
| 6347 | } |
| 6348 | |
| 6349 | /// getZeroVector - Returns a vector of specified type with all zero elements. |
| 6350 | /// Zero vectors are used to represent vector negation and in those cases |
| 6351 | /// will be implemented with the NEON VNEG instruction. However, VNEG does |
| 6352 | /// not support i64 elements, so sometimes the zero vectors will need to be |
| 6353 | /// explicitly constructed. Regardless, use a canonical VMOV to create the |
| 6354 | /// zero vector. |
| 6355 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { |
| 6356 | assert(VT.isVector() && "Expected a vector type" ); |
| 6357 | // The canonical modified immediate encoding of a zero vector is....0! |
| 6358 | SDValue EncodedVal = DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 6359 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 6360 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: EncodedVal); |
| 6361 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vmov); |
| 6362 | } |
| 6363 | |
| 6364 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two |
| 6365 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6366 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, |
| 6367 | SelectionDAG &DAG) const { |
| 6368 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6369 | EVT VT = Op.getValueType(); |
| 6370 | unsigned VTBits = VT.getSizeInBits(); |
| 6371 | SDLoc dl(Op); |
| 6372 | SDValue ShOpLo = Op.getOperand(i: 0); |
| 6373 | SDValue ShOpHi = Op.getOperand(i: 1); |
| 6374 | SDValue ShAmt = Op.getOperand(i: 2); |
| 6375 | SDValue ARMcc; |
| 6376 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; |
| 6377 | |
| 6378 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); |
| 6379 | |
| 6380 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6381 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6382 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
| 6383 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
| 6384 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
| 6385 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: RevShAmt); |
| 6386 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
| 6387 | SDValue LoBigShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ExtraShAmt); |
| 6388 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6389 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6390 | SDValue Lo = |
| 6391 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, N2: LoBigShift, N3: ARMcc, N4: CmpLo); |
| 6392 | |
| 6393 | SDValue HiSmallShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
| 6394 | SDValue HiBigShift = Opc == ISD::SRA |
| 6395 | ? DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, |
| 6396 | N2: DAG.getConstant(Val: VTBits - 1, DL: dl, VT)) |
| 6397 | : DAG.getConstant(Val: 0, DL: dl, VT); |
| 6398 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6399 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6400 | SDValue Hi = |
| 6401 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, N3: ARMcc, N4: CmpHi); |
| 6402 | |
| 6403 | SDValue Ops[2] = { Lo, Hi }; |
| 6404 | return DAG.getMergeValues(Ops, dl); |
| 6405 | } |
| 6406 | |
| 6407 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two |
| 6408 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6409 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, |
| 6410 | SelectionDAG &DAG) const { |
| 6411 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6412 | EVT VT = Op.getValueType(); |
| 6413 | unsigned VTBits = VT.getSizeInBits(); |
| 6414 | SDLoc dl(Op); |
| 6415 | SDValue ShOpLo = Op.getOperand(i: 0); |
| 6416 | SDValue ShOpHi = Op.getOperand(i: 1); |
| 6417 | SDValue ShAmt = Op.getOperand(i: 2); |
| 6418 | SDValue ARMcc; |
| 6419 | |
| 6420 | assert(Op.getOpcode() == ISD::SHL_PARTS); |
| 6421 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6422 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6423 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: RevShAmt); |
| 6424 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
| 6425 | SDValue HiSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
| 6426 | |
| 6427 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
| 6428 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
| 6429 | SDValue HiBigShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ExtraShAmt); |
| 6430 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6431 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6432 | SDValue Hi = |
| 6433 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, N3: ARMcc, N4: CmpHi); |
| 6434 | |
| 6435 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6436 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6437 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
| 6438 | SDValue Lo = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, |
| 6439 | N2: DAG.getConstant(Val: 0, DL: dl, VT), N3: ARMcc, N4: CmpLo); |
| 6440 | |
| 6441 | SDValue Ops[2] = { Lo, Hi }; |
| 6442 | return DAG.getMergeValues(Ops, dl); |
| 6443 | } |
| 6444 | |
| 6445 | SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op, |
| 6446 | SelectionDAG &DAG) const { |
| 6447 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 6448 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 |
| 6449 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) |
| 6450 | // so that the shift + and get folded into a bitfield extract. |
| 6451 | SDLoc dl(Op); |
| 6452 | SDValue Chain = Op.getOperand(i: 0); |
| 6453 | SDValue Ops[] = {Chain, |
| 6454 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL: dl, VT: MVT::i32)}; |
| 6455 | |
| 6456 | SDValue FPSCR = |
| 6457 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL: dl, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6458 | Chain = FPSCR.getValue(R: 1); |
| 6459 | SDValue FltRounds = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: FPSCR, |
| 6460 | N2: DAG.getConstant(Val: 1U << 22, DL: dl, VT: MVT::i32)); |
| 6461 | SDValue RMODE = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: FltRounds, |
| 6462 | N2: DAG.getConstant(Val: 22, DL: dl, VT: MVT::i32)); |
| 6463 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RMODE, |
| 6464 | N2: DAG.getConstant(Val: 3, DL: dl, VT: MVT::i32)); |
| 6465 | return DAG.getMergeValues(Ops: {And, Chain}, dl); |
| 6466 | } |
| 6467 | |
| 6468 | SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op, |
| 6469 | SelectionDAG &DAG) const { |
| 6470 | SDLoc DL(Op); |
| 6471 | SDValue Chain = Op->getOperand(Num: 0); |
| 6472 | SDValue RMValue = Op->getOperand(Num: 1); |
| 6473 | |
| 6474 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 6475 | // The llvm.set.rounding argument value to ARM rounding mode value mapping |
| 6476 | // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is |
| 6477 | // ((arg - 1) & 3) << 22). |
| 6478 | // |
| 6479 | // It is expected that the argument of llvm.set.rounding is within the |
| 6480 | // segment [0, 3], so NearestTiesToAway (4) is not handled here. It is |
| 6481 | // responsibility of the code generated llvm.set.rounding to ensure this |
| 6482 | // condition. |
| 6483 | |
| 6484 | // Calculate new value of FPSCR[23:22]. |
| 6485 | RMValue = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: RMValue, |
| 6486 | N2: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 6487 | RMValue = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: RMValue, |
| 6488 | N2: DAG.getConstant(Val: 0x3, DL, VT: MVT::i32)); |
| 6489 | RMValue = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: RMValue, |
| 6490 | N2: DAG.getConstant(Val: ARM::RoundingBitsPos, DL, VT: MVT::i32)); |
| 6491 | |
| 6492 | // Get current value of FPSCR. |
| 6493 | SDValue Ops[] = {Chain, |
| 6494 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6495 | SDValue FPSCR = |
| 6496 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6497 | Chain = FPSCR.getValue(R: 1); |
| 6498 | FPSCR = FPSCR.getValue(R: 0); |
| 6499 | |
| 6500 | // Put new rounding mode into FPSCR[23:22]. |
| 6501 | const unsigned RMMask = ~(ARM::Rounding::rmMask << ARM::RoundingBitsPos); |
| 6502 | FPSCR = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6503 | N2: DAG.getConstant(Val: RMMask, DL, VT: MVT::i32)); |
| 6504 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCR, N2: RMValue); |
| 6505 | SDValue Ops2[] = { |
| 6506 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
| 6507 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6508 | } |
| 6509 | |
| 6510 | SDValue ARMTargetLowering::LowerSET_FPMODE(SDValue Op, |
| 6511 | SelectionDAG &DAG) const { |
| 6512 | SDLoc DL(Op); |
| 6513 | SDValue Chain = Op->getOperand(Num: 0); |
| 6514 | SDValue Mode = Op->getOperand(Num: 1); |
| 6515 | |
| 6516 | // Generate nodes to build: |
| 6517 | // FPSCR = (FPSCR & FPStatusBits) | (Mode & ~FPStatusBits) |
| 6518 | SDValue Ops[] = {Chain, |
| 6519 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6520 | SDValue FPSCR = |
| 6521 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6522 | Chain = FPSCR.getValue(R: 1); |
| 6523 | FPSCR = FPSCR.getValue(R: 0); |
| 6524 | |
| 6525 | SDValue FPSCRMasked = |
| 6526 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6527 | N2: DAG.getConstant(Val: ARM::FPStatusBits, DL, VT: MVT::i32)); |
| 6528 | SDValue InputMasked = |
| 6529 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: Mode, |
| 6530 | N2: DAG.getConstant(Val: ~ARM::FPStatusBits, DL, VT: MVT::i32)); |
| 6531 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCRMasked, N2: InputMasked); |
| 6532 | |
| 6533 | SDValue Ops2[] = { |
| 6534 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
| 6535 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6536 | } |
| 6537 | |
| 6538 | SDValue ARMTargetLowering::LowerRESET_FPMODE(SDValue Op, |
| 6539 | SelectionDAG &DAG) const { |
| 6540 | SDLoc DL(Op); |
| 6541 | SDValue Chain = Op->getOperand(Num: 0); |
| 6542 | |
| 6543 | // To get the default FP mode all control bits are cleared: |
| 6544 | // FPSCR = FPSCR & (FPStatusBits | FPReservedBits) |
| 6545 | SDValue Ops[] = {Chain, |
| 6546 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6547 | SDValue FPSCR = |
| 6548 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6549 | Chain = FPSCR.getValue(R: 1); |
| 6550 | FPSCR = FPSCR.getValue(R: 0); |
| 6551 | |
| 6552 | SDValue FPSCRMasked = DAG.getNode( |
| 6553 | Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6554 | N2: DAG.getConstant(Val: ARM::FPStatusBits | ARM::FPReservedBits, DL, VT: MVT::i32)); |
| 6555 | SDValue Ops2[] = {Chain, |
| 6556 | DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), |
| 6557 | FPSCRMasked}; |
| 6558 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6559 | } |
| 6560 | |
| 6561 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, |
| 6562 | const ARMSubtarget *ST) { |
| 6563 | SDLoc dl(N); |
| 6564 | EVT VT = N->getValueType(ResNo: 0); |
| 6565 | if (VT.isVector() && ST->hasNEON()) { |
| 6566 | |
| 6567 | // Compute the least significant set bit: LSB = X & -X |
| 6568 | SDValue X = N->getOperand(Num: 0); |
| 6569 | SDValue NX = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: getZeroVector(VT, DAG, dl), N2: X); |
| 6570 | SDValue LSB = DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: X, N2: NX); |
| 6571 | |
| 6572 | EVT ElemTy = VT.getVectorElementType(); |
| 6573 | |
| 6574 | if (ElemTy == MVT::i8) { |
| 6575 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6576 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6577 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
| 6578 | SDValue Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
| 6579 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
| 6580 | } |
| 6581 | |
| 6582 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && |
| 6583 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { |
| 6584 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 |
| 6585 | unsigned NumBits = ElemTy.getSizeInBits(); |
| 6586 | SDValue WidthMinus1 = |
| 6587 | DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6588 | Operand: DAG.getTargetConstant(Val: NumBits - 1, DL: dl, VT: ElemTy)); |
| 6589 | SDValue CTLZ = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: LSB); |
| 6590 | return DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: WidthMinus1, N2: CTLZ); |
| 6591 | } |
| 6592 | |
| 6593 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6594 | |
| 6595 | // Compute LSB - 1. |
| 6596 | SDValue Bits; |
| 6597 | if (ElemTy == MVT::i64) { |
| 6598 | // Load constant 0xffff'ffff'ffff'ffff to register. |
| 6599 | SDValue FF = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6600 | Operand: DAG.getTargetConstant(Val: 0x1eff, DL: dl, VT: MVT::i32)); |
| 6601 | Bits = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: LSB, N2: FF); |
| 6602 | } else { |
| 6603 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6604 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
| 6605 | Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
| 6606 | } |
| 6607 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
| 6608 | } |
| 6609 | |
| 6610 | if (!ST->hasV6T2Ops()) |
| 6611 | return SDValue(); |
| 6612 | |
| 6613 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT, Operand: N->getOperand(Num: 0)); |
| 6614 | return DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: rbit); |
| 6615 | } |
| 6616 | |
| 6617 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, |
| 6618 | const ARMSubtarget *ST) { |
| 6619 | EVT VT = N->getValueType(ResNo: 0); |
| 6620 | SDLoc DL(N); |
| 6621 | |
| 6622 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON." ); |
| 6623 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || |
| 6624 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && |
| 6625 | "Unexpected type for custom ctpop lowering" ); |
| 6626 | |
| 6627 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 6628 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
| 6629 | SDValue Res = DAG.getBitcast(VT: VT8Bit, V: N->getOperand(Num: 0)); |
| 6630 | Res = DAG.getNode(Opcode: ISD::CTPOP, DL, VT: VT8Bit, Operand: Res); |
| 6631 | |
| 6632 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. |
| 6633 | unsigned EltSize = 8; |
| 6634 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; |
| 6635 | while (EltSize != VT.getScalarSizeInBits()) { |
| 6636 | SmallVector<SDValue, 8> Ops; |
| 6637 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddlu, DL, |
| 6638 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 6639 | Ops.push_back(Elt: Res); |
| 6640 | |
| 6641 | EltSize *= 2; |
| 6642 | NumElts /= 2; |
| 6643 | MVT WidenVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltSize), NumElements: NumElts); |
| 6644 | Res = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: WidenVT, Ops); |
| 6645 | } |
| 6646 | |
| 6647 | return Res; |
| 6648 | } |
| 6649 | |
| 6650 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate |
| 6651 | /// operand of a vector shift operation, where all the elements of the |
| 6652 | /// build_vector must have the same constant integer value. |
| 6653 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { |
| 6654 | // Ignore bit_converts. |
| 6655 | while (Op.getOpcode() == ISD::BITCAST) |
| 6656 | Op = Op.getOperand(i: 0); |
| 6657 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: Op.getNode()); |
| 6658 | APInt SplatBits, SplatUndef; |
| 6659 | unsigned SplatBitSize; |
| 6660 | bool HasAnyUndefs; |
| 6661 | if (!BVN || |
| 6662 | !BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
| 6663 | MinSplatBits: ElementBits) || |
| 6664 | SplatBitSize > ElementBits) |
| 6665 | return false; |
| 6666 | Cnt = SplatBits.getSExtValue(); |
| 6667 | return true; |
| 6668 | } |
| 6669 | |
| 6670 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate |
| 6671 | /// operand of a vector shift left operation. That value must be in the range: |
| 6672 | /// 0 <= Value < ElementBits for a left shift; or |
| 6673 | /// 0 <= Value <= ElementBits for a long left shift. |
| 6674 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { |
| 6675 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6676 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6677 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6678 | return false; |
| 6679 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); |
| 6680 | } |
| 6681 | |
| 6682 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate |
| 6683 | /// operand of a vector shift right operation. For a shift opcode, the value |
| 6684 | /// is positive, but for an intrinsic the value count must be negative. The |
| 6685 | /// absolute value must be in the range: |
| 6686 | /// 1 <= |Value| <= ElementBits for a right shift; or |
| 6687 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. |
| 6688 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, |
| 6689 | int64_t &Cnt) { |
| 6690 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6691 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6692 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6693 | return false; |
| 6694 | if (!isIntrinsic) |
| 6695 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); |
| 6696 | if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { |
| 6697 | Cnt = -Cnt; |
| 6698 | return true; |
| 6699 | } |
| 6700 | return false; |
| 6701 | } |
| 6702 | |
| 6703 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, |
| 6704 | const ARMSubtarget *ST) { |
| 6705 | EVT VT = N->getValueType(ResNo: 0); |
| 6706 | SDLoc dl(N); |
| 6707 | int64_t Cnt; |
| 6708 | |
| 6709 | if (!VT.isVector()) |
| 6710 | return SDValue(); |
| 6711 | |
| 6712 | // We essentially have two forms here. Shift by an immediate and shift by a |
| 6713 | // vector register (there are also shift by a gpr, but that is just handled |
| 6714 | // with a tablegen pattern). We cannot easily match shift by an immediate in |
| 6715 | // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. |
| 6716 | // For shifting by a vector, we don't have VSHR, only VSHL (which can be |
| 6717 | // signed or unsigned, and a negative shift indicates a shift right). |
| 6718 | if (N->getOpcode() == ISD::SHL) { |
| 6719 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) |
| 6720 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6721 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 6722 | return DAG.getNode(Opcode: ARMISD::VSHLu, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6723 | N2: N->getOperand(Num: 1)); |
| 6724 | } |
| 6725 | |
| 6726 | assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && |
| 6727 | "unexpected vector shift opcode" ); |
| 6728 | |
| 6729 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
| 6730 | unsigned VShiftOpc = |
| 6731 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 6732 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6733 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 6734 | } |
| 6735 | |
| 6736 | // Other right shifts we don't have operations for (we use a shift left by a |
| 6737 | // negative number). |
| 6738 | EVT ShiftVT = N->getOperand(Num: 1).getValueType(); |
| 6739 | SDValue NegatedCount = DAG.getNode( |
| 6740 | Opcode: ISD::SUB, DL: dl, VT: ShiftVT, N1: getZeroVector(VT: ShiftVT, DAG, dl), N2: N->getOperand(Num: 1)); |
| 6741 | unsigned VShiftOpc = |
| 6742 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); |
| 6743 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), N2: NegatedCount); |
| 6744 | } |
| 6745 | |
| 6746 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, |
| 6747 | const ARMSubtarget *ST) { |
| 6748 | EVT VT = N->getValueType(ResNo: 0); |
| 6749 | SDLoc dl(N); |
| 6750 | |
| 6751 | // We can get here for a node like i32 = ISD::SHL i32, i64 |
| 6752 | if (VT != MVT::i64) |
| 6753 | return SDValue(); |
| 6754 | |
| 6755 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || |
| 6756 | N->getOpcode() == ISD::SHL) && |
| 6757 | "Unknown shift to lower!" ); |
| 6758 | |
| 6759 | unsigned ShOpc = N->getOpcode(); |
| 6760 | if (ST->hasMVEIntegerOps()) { |
| 6761 | SDValue ShAmt = N->getOperand(Num: 1); |
| 6762 | unsigned ShPartsOpc = ARMISD::LSLL; |
| 6763 | ConstantSDNode *Con = dyn_cast<ConstantSDNode>(Val&: ShAmt); |
| 6764 | |
| 6765 | // If the shift amount is greater than 32 or has a greater bitwidth than 64 |
| 6766 | // then do the default optimisation |
| 6767 | if ((!Con && ShAmt->getValueType(ResNo: 0).getSizeInBits() > 64) || |
| 6768 | (Con && (Con->getAPIntValue() == 0 || Con->getAPIntValue().uge(RHS: 32)))) |
| 6769 | return SDValue(); |
| 6770 | |
| 6771 | // Extract the lower 32 bits of the shift amount if it's not an i32 |
| 6772 | if (ShAmt->getValueType(ResNo: 0) != MVT::i32) |
| 6773 | ShAmt = DAG.getZExtOrTrunc(Op: ShAmt, DL: dl, VT: MVT::i32); |
| 6774 | |
| 6775 | if (ShOpc == ISD::SRL) { |
| 6776 | if (!Con) |
| 6777 | // There is no t2LSRLr instruction so negate and perform an lsll if the |
| 6778 | // shift amount is in a register, emulating a right shift. |
| 6779 | ShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6780 | N1: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6781 | else |
| 6782 | // Else generate an lsrl on the immediate shift amount |
| 6783 | ShPartsOpc = ARMISD::LSRL; |
| 6784 | } else if (ShOpc == ISD::SRA) |
| 6785 | ShPartsOpc = ARMISD::ASRL; |
| 6786 | |
| 6787 | // Split Lower/Upper 32 bits of the destination/source |
| 6788 | SDValue Lo, Hi; |
| 6789 | std::tie(args&: Lo, args&: Hi) = |
| 6790 | DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 6791 | // Generate the shift operation as computed above |
| 6792 | Lo = DAG.getNode(Opcode: ShPartsOpc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: Lo, N2: Hi, |
| 6793 | N3: ShAmt); |
| 6794 | // The upper 32 bits come from the second return value of lsll |
| 6795 | Hi = SDValue(Lo.getNode(), 1); |
| 6796 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 6797 | } |
| 6798 | |
| 6799 | // We only lower SRA, SRL of 1 here, all others use generic lowering. |
| 6800 | if (!isOneConstant(V: N->getOperand(Num: 1)) || N->getOpcode() == ISD::SHL) |
| 6801 | return SDValue(); |
| 6802 | |
| 6803 | // If we are in thumb mode, we don't have RRX. |
| 6804 | if (ST->isThumb1Only()) |
| 6805 | return SDValue(); |
| 6806 | |
| 6807 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. |
| 6808 | SDValue Lo, Hi; |
| 6809 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 6810 | |
| 6811 | // First, build a LSRS1/ASRS1 op, which shifts the top part by one and |
| 6812 | // captures the shifted out bit into a carry flag. |
| 6813 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::LSRS1 : ARMISD::ASRS1; |
| 6814 | Hi = DAG.getNode(Opcode: Opc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N: Hi); |
| 6815 | |
| 6816 | // The low part is an ARMISD::RRX operand, which shifts the carry in. |
| 6817 | Lo = DAG.getNode(Opcode: ARMISD::RRX, DL: dl, VT: MVT::i32, N1: Lo, N2: Hi.getValue(R: 1)); |
| 6818 | |
| 6819 | // Merge the pieces into a single i64 value. |
| 6820 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 6821 | } |
| 6822 | |
| 6823 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, |
| 6824 | const ARMSubtarget *ST) { |
| 6825 | bool Invert = false; |
| 6826 | bool Swap = false; |
| 6827 | unsigned Opc = ARMCC::AL; |
| 6828 | |
| 6829 | SDValue Op0 = Op.getOperand(i: 0); |
| 6830 | SDValue Op1 = Op.getOperand(i: 1); |
| 6831 | SDValue CC = Op.getOperand(i: 2); |
| 6832 | EVT VT = Op.getValueType(); |
| 6833 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(Val&: CC)->get(); |
| 6834 | SDLoc dl(Op); |
| 6835 | |
| 6836 | EVT CmpVT; |
| 6837 | if (ST->hasNEON()) |
| 6838 | CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); |
| 6839 | else { |
| 6840 | assert(ST->hasMVEIntegerOps() && |
| 6841 | "No hardware support for integer vector comparison!" ); |
| 6842 | |
| 6843 | if (Op.getValueType().getVectorElementType() != MVT::i1) |
| 6844 | return SDValue(); |
| 6845 | |
| 6846 | // Make sure we expand floating point setcc to scalar if we do not have |
| 6847 | // mve.fp, so that we can handle them from there. |
| 6848 | if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) |
| 6849 | return SDValue(); |
| 6850 | |
| 6851 | CmpVT = VT; |
| 6852 | } |
| 6853 | |
| 6854 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && |
| 6855 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { |
| 6856 | // Special-case integer 64-bit equality comparisons. They aren't legal, |
| 6857 | // but they can be lowered with a few vector instructions. |
| 6858 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; |
| 6859 | EVT SplitVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: CmpElements); |
| 6860 | SDValue CastOp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op0); |
| 6861 | SDValue CastOp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op1); |
| 6862 | SDValue Cmp = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT: SplitVT, N1: CastOp0, N2: CastOp1, |
| 6863 | N3: DAG.getCondCode(Cond: ISD::SETEQ)); |
| 6864 | SDValue Reversed = DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SplitVT, Operand: Cmp); |
| 6865 | SDValue Merged = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: SplitVT, N1: Cmp, N2: Reversed); |
| 6866 | Merged = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: Merged); |
| 6867 | if (SetCCOpcode == ISD::SETNE) |
| 6868 | Merged = DAG.getNOT(DL: dl, Val: Merged, VT: CmpVT); |
| 6869 | Merged = DAG.getSExtOrTrunc(Op: Merged, DL: dl, VT); |
| 6870 | return Merged; |
| 6871 | } |
| 6872 | |
| 6873 | if (CmpVT.getVectorElementType() == MVT::i64) |
| 6874 | // 64-bit comparisons are not legal in general. |
| 6875 | return SDValue(); |
| 6876 | |
| 6877 | if (Op1.getValueType().isFloatingPoint()) { |
| 6878 | switch (SetCCOpcode) { |
| 6879 | default: llvm_unreachable("Illegal FP comparison" ); |
| 6880 | case ISD::SETUNE: |
| 6881 | case ISD::SETNE: |
| 6882 | if (ST->hasMVEFloatOps()) { |
| 6883 | Opc = ARMCC::NE; break; |
| 6884 | } else { |
| 6885 | Invert = true; [[fallthrough]]; |
| 6886 | } |
| 6887 | case ISD::SETOEQ: |
| 6888 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6889 | case ISD::SETOLT: |
| 6890 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
| 6891 | case ISD::SETOGT: |
| 6892 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6893 | case ISD::SETOLE: |
| 6894 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
| 6895 | case ISD::SETOGE: |
| 6896 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6897 | case ISD::SETUGE: Swap = true; [[fallthrough]]; |
| 6898 | case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; |
| 6899 | case ISD::SETUGT: Swap = true; [[fallthrough]]; |
| 6900 | case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; |
| 6901 | case ISD::SETUEQ: Invert = true; [[fallthrough]]; |
| 6902 | case ISD::SETONE: { |
| 6903 | // Expand this to (OLT | OGT). |
| 6904 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
| 6905 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6906 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6907 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6908 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
| 6909 | if (Invert) |
| 6910 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6911 | return Result; |
| 6912 | } |
| 6913 | case ISD::SETUO: Invert = true; [[fallthrough]]; |
| 6914 | case ISD::SETO: { |
| 6915 | // Expand this to (OLT | OGE). |
| 6916 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
| 6917 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6918 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6919 | N3: DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32)); |
| 6920 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
| 6921 | if (Invert) |
| 6922 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6923 | return Result; |
| 6924 | } |
| 6925 | } |
| 6926 | } else { |
| 6927 | // Integer comparisons. |
| 6928 | switch (SetCCOpcode) { |
| 6929 | default: llvm_unreachable("Illegal integer comparison" ); |
| 6930 | case ISD::SETNE: |
| 6931 | if (ST->hasMVEIntegerOps()) { |
| 6932 | Opc = ARMCC::NE; break; |
| 6933 | } else { |
| 6934 | Invert = true; [[fallthrough]]; |
| 6935 | } |
| 6936 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6937 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
| 6938 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6939 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
| 6940 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6941 | case ISD::SETULT: Swap = true; [[fallthrough]]; |
| 6942 | case ISD::SETUGT: Opc = ARMCC::HI; break; |
| 6943 | case ISD::SETULE: Swap = true; [[fallthrough]]; |
| 6944 | case ISD::SETUGE: Opc = ARMCC::HS; break; |
| 6945 | } |
| 6946 | |
| 6947 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). |
| 6948 | if (ST->hasNEON() && Opc == ARMCC::EQ) { |
| 6949 | SDValue AndOp; |
| 6950 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode())) |
| 6951 | AndOp = Op0; |
| 6952 | else if (ISD::isBuildVectorAllZeros(N: Op0.getNode())) |
| 6953 | AndOp = Op1; |
| 6954 | |
| 6955 | // Ignore bitconvert. |
| 6956 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) |
| 6957 | AndOp = AndOp.getOperand(i: 0); |
| 6958 | |
| 6959 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { |
| 6960 | Op0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 0)); |
| 6961 | Op1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 1)); |
| 6962 | SDValue Result = DAG.getNode(Opcode: ARMISD::VTST, DL: dl, VT: CmpVT, N1: Op0, N2: Op1); |
| 6963 | if (!Invert) |
| 6964 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6965 | return Result; |
| 6966 | } |
| 6967 | } |
| 6968 | } |
| 6969 | |
| 6970 | if (Swap) |
| 6971 | std::swap(a&: Op0, b&: Op1); |
| 6972 | |
| 6973 | // If one of the operands is a constant vector zero, attempt to fold the |
| 6974 | // comparison to a specialized compare-against-zero form. |
| 6975 | if (ISD::isBuildVectorAllZeros(N: Op0.getNode()) && |
| 6976 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::EQ || |
| 6977 | Opc == ARMCC::NE)) { |
| 6978 | if (Opc == ARMCC::GE) |
| 6979 | Opc = ARMCC::LE; |
| 6980 | else if (Opc == ARMCC::GT) |
| 6981 | Opc = ARMCC::LT; |
| 6982 | std::swap(a&: Op0, b&: Op1); |
| 6983 | } |
| 6984 | |
| 6985 | SDValue Result; |
| 6986 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode()) && |
| 6987 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::LE || |
| 6988 | Opc == ARMCC::LT || Opc == ARMCC::NE || Opc == ARMCC::EQ)) |
| 6989 | Result = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: CmpVT, N1: Op0, |
| 6990 | N2: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
| 6991 | else |
| 6992 | Result = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6993 | N3: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
| 6994 | |
| 6995 | Result = DAG.getSExtOrTrunc(Op: Result, DL: dl, VT); |
| 6996 | |
| 6997 | if (Invert) |
| 6998 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6999 | |
| 7000 | return Result; |
| 7001 | } |
| 7002 | |
| 7003 | static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { |
| 7004 | SDValue LHS = Op.getOperand(i: 0); |
| 7005 | SDValue RHS = Op.getOperand(i: 1); |
| 7006 | SDValue Carry = Op.getOperand(i: 2); |
| 7007 | SDValue Cond = Op.getOperand(i: 3); |
| 7008 | SDLoc DL(Op); |
| 7009 | |
| 7010 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only." ); |
| 7011 | |
| 7012 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
| 7013 | // have to invert the carry first. |
| 7014 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 7015 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 7016 | // This converts the boolean value carry into the carry flag. |
| 7017 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 7018 | |
| 7019 | SDVTList VTs = DAG.getVTList(VT1: LHS.getValueType(), VT2: MVT::i32); |
| 7020 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: LHS, N2: RHS, N3: Carry); |
| 7021 | |
| 7022 | SDValue FVal = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
| 7023 | SDValue TVal = DAG.getConstant(Val: 1, DL, VT: MVT::i32); |
| 7024 | SDValue ARMcc = DAG.getConstant( |
| 7025 | Val: IntCCToARMCC(CC: cast<CondCodeSDNode>(Val&: Cond)->get()), DL, VT: MVT::i32); |
| 7026 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT: Op.getValueType(), N1: FVal, N2: TVal, N3: ARMcc, |
| 7027 | N4: Cmp.getValue(R: 1)); |
| 7028 | } |
| 7029 | |
| 7030 | /// isVMOVModifiedImm - Check if the specified splat value corresponds to a |
| 7031 | /// valid vector constant for a NEON or MVE instruction with a "modified |
| 7032 | /// immediate" operand (e.g., VMOV). If so, return the encoded value. |
| 7033 | static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, |
| 7034 | unsigned SplatBitSize, SelectionDAG &DAG, |
| 7035 | const SDLoc &dl, EVT &VT, EVT VectorVT, |
| 7036 | VMOVModImmType type) { |
| 7037 | unsigned OpCmode, Imm; |
| 7038 | bool is128Bits = VectorVT.is128BitVector(); |
| 7039 | |
| 7040 | // SplatBitSize is set to the smallest size that splats the vector, so a |
| 7041 | // zero vector will always have SplatBitSize == 8. However, NEON modified |
| 7042 | // immediate instructions others than VMOV do not support the 8-bit encoding |
| 7043 | // of a zero vector, and the default encoding of zero is supposed to be the |
| 7044 | // 32-bit version. |
| 7045 | if (SplatBits == 0) |
| 7046 | SplatBitSize = 32; |
| 7047 | |
| 7048 | switch (SplatBitSize) { |
| 7049 | case 8: |
| 7050 | if (type != VMOVModImm) |
| 7051 | return SDValue(); |
| 7052 | // Any 1-byte value is OK. Op=0, Cmode=1110. |
| 7053 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big" ); |
| 7054 | OpCmode = 0xe; |
| 7055 | Imm = SplatBits; |
| 7056 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; |
| 7057 | break; |
| 7058 | |
| 7059 | case 16: |
| 7060 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. |
| 7061 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; |
| 7062 | if ((SplatBits & ~0xff) == 0) { |
| 7063 | // Value = 0x00nn: Op=x, Cmode=100x. |
| 7064 | OpCmode = 0x8; |
| 7065 | Imm = SplatBits; |
| 7066 | break; |
| 7067 | } |
| 7068 | if ((SplatBits & ~0xff00) == 0) { |
| 7069 | // Value = 0xnn00: Op=x, Cmode=101x. |
| 7070 | OpCmode = 0xa; |
| 7071 | Imm = SplatBits >> 8; |
| 7072 | break; |
| 7073 | } |
| 7074 | return SDValue(); |
| 7075 | |
| 7076 | case 32: |
| 7077 | // NEON's 32-bit VMOV supports splat values where: |
| 7078 | // * only one byte is nonzero, or |
| 7079 | // * the least significant byte is 0xff and the second byte is nonzero, or |
| 7080 | // * the least significant 2 bytes are 0xff and the third is nonzero. |
| 7081 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; |
| 7082 | if ((SplatBits & ~0xff) == 0) { |
| 7083 | // Value = 0x000000nn: Op=x, Cmode=000x. |
| 7084 | OpCmode = 0; |
| 7085 | Imm = SplatBits; |
| 7086 | break; |
| 7087 | } |
| 7088 | if ((SplatBits & ~0xff00) == 0) { |
| 7089 | // Value = 0x0000nn00: Op=x, Cmode=001x. |
| 7090 | OpCmode = 0x2; |
| 7091 | Imm = SplatBits >> 8; |
| 7092 | break; |
| 7093 | } |
| 7094 | if ((SplatBits & ~0xff0000) == 0) { |
| 7095 | // Value = 0x00nn0000: Op=x, Cmode=010x. |
| 7096 | OpCmode = 0x4; |
| 7097 | Imm = SplatBits >> 16; |
| 7098 | break; |
| 7099 | } |
| 7100 | if ((SplatBits & ~0xff000000) == 0) { |
| 7101 | // Value = 0xnn000000: Op=x, Cmode=011x. |
| 7102 | OpCmode = 0x6; |
| 7103 | Imm = SplatBits >> 24; |
| 7104 | break; |
| 7105 | } |
| 7106 | |
| 7107 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC |
| 7108 | if (type == OtherModImm) return SDValue(); |
| 7109 | |
| 7110 | if ((SplatBits & ~0xffff) == 0 && |
| 7111 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { |
| 7112 | // Value = 0x0000nnff: Op=x, Cmode=1100. |
| 7113 | OpCmode = 0xc; |
| 7114 | Imm = SplatBits >> 8; |
| 7115 | break; |
| 7116 | } |
| 7117 | |
| 7118 | // cmode == 0b1101 is not supported for MVE VMVN |
| 7119 | if (type == MVEVMVNModImm) |
| 7120 | return SDValue(); |
| 7121 | |
| 7122 | if ((SplatBits & ~0xffffff) == 0 && |
| 7123 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { |
| 7124 | // Value = 0x00nnffff: Op=x, Cmode=1101. |
| 7125 | OpCmode = 0xd; |
| 7126 | Imm = SplatBits >> 16; |
| 7127 | break; |
| 7128 | } |
| 7129 | |
| 7130 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, |
| 7131 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not |
| 7132 | // VMOV.I32. A (very) minor optimization would be to replicate the value |
| 7133 | // and fall through here to test for a valid 64-bit splat. But, then the |
| 7134 | // caller would also need to check and handle the change in size. |
| 7135 | return SDValue(); |
| 7136 | |
| 7137 | case 64: { |
| 7138 | if (type != VMOVModImm) |
| 7139 | return SDValue(); |
| 7140 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. |
| 7141 | uint64_t BitMask = 0xff; |
| 7142 | unsigned ImmMask = 1; |
| 7143 | Imm = 0; |
| 7144 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { |
| 7145 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { |
| 7146 | Imm |= ImmMask; |
| 7147 | } else if ((SplatBits & BitMask) != 0) { |
| 7148 | return SDValue(); |
| 7149 | } |
| 7150 | BitMask <<= 8; |
| 7151 | ImmMask <<= 1; |
| 7152 | } |
| 7153 | |
| 7154 | // Op=1, Cmode=1110. |
| 7155 | OpCmode = 0x1e; |
| 7156 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; |
| 7157 | break; |
| 7158 | } |
| 7159 | |
| 7160 | default: |
| 7161 | llvm_unreachable("unexpected size for isVMOVModifiedImm" ); |
| 7162 | } |
| 7163 | |
| 7164 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Val: Imm); |
| 7165 | return DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32); |
| 7166 | } |
| 7167 | |
| 7168 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, |
| 7169 | const ARMSubtarget *ST) const { |
| 7170 | EVT VT = Op.getValueType(); |
| 7171 | bool IsDouble = (VT == MVT::f64); |
| 7172 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Val&: Op); |
| 7173 | const APFloat &FPVal = CFP->getValueAPF(); |
| 7174 | |
| 7175 | // Prevent floating-point constants from using literal loads |
| 7176 | // when execute-only is enabled. |
| 7177 | if (ST->genExecuteOnly()) { |
| 7178 | // We shouldn't trigger this for v6m execute-only |
| 7179 | assert((!ST->isThumb1Only() || ST->hasV8MBaselineOps()) && |
| 7180 | "Unexpected architecture" ); |
| 7181 | |
| 7182 | // If we can represent the constant as an immediate, don't lower it |
| 7183 | if (isFPImmLegal(Imm: FPVal, VT)) |
| 7184 | return Op; |
| 7185 | // Otherwise, construct as integer, and move to float register |
| 7186 | APInt INTVal = FPVal.bitcastToAPInt(); |
| 7187 | SDLoc DL(CFP); |
| 7188 | switch (VT.getSimpleVT().SimpleTy) { |
| 7189 | default: |
| 7190 | llvm_unreachable("Unknown floating point type!" ); |
| 7191 | break; |
| 7192 | case MVT::f64: { |
| 7193 | SDValue Lo = DAG.getConstant(Val: INTVal.trunc(width: 32), DL, VT: MVT::i32); |
| 7194 | SDValue Hi = DAG.getConstant(Val: INTVal.lshr(shiftAmt: 32).trunc(width: 32), DL, VT: MVT::i32); |
| 7195 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL, VT: MVT::f64, N1: Lo, N2: Hi); |
| 7196 | } |
| 7197 | case MVT::f32: |
| 7198 | return DAG.getNode(Opcode: ARMISD::VMOVSR, DL, VT, |
| 7199 | Operand: DAG.getConstant(Val: INTVal, DL, VT: MVT::i32)); |
| 7200 | } |
| 7201 | } |
| 7202 | |
| 7203 | if (!ST->hasVFP3Base()) |
| 7204 | return SDValue(); |
| 7205 | |
| 7206 | // Use the default (constant pool) lowering for double constants when we have |
| 7207 | // an SP-only FPU |
| 7208 | if (IsDouble && !Subtarget->hasFP64()) |
| 7209 | return SDValue(); |
| 7210 | |
| 7211 | // Try splatting with a VMOV.f32... |
| 7212 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPImm: FPVal) : ARM_AM::getFP32Imm(FPImm: FPVal); |
| 7213 | |
| 7214 | if (ImmVal != -1) { |
| 7215 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { |
| 7216 | // We have code in place to select a valid ConstantFP already, no need to |
| 7217 | // do any mangling. |
| 7218 | return Op; |
| 7219 | } |
| 7220 | |
| 7221 | // It's a float and we are trying to use NEON operations where |
| 7222 | // possible. Lower it to a splat followed by an extract. |
| 7223 | SDLoc DL(Op); |
| 7224 | SDValue NewVal = DAG.getTargetConstant(Val: ImmVal, DL, VT: MVT::i32); |
| 7225 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL, VT: MVT::v2f32, |
| 7226 | Operand: NewVal); |
| 7227 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecConstant, |
| 7228 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 7229 | } |
| 7230 | |
| 7231 | // The rest of our options are NEON only, make sure that's allowed before |
| 7232 | // proceeding.. |
| 7233 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) |
| 7234 | return SDValue(); |
| 7235 | |
| 7236 | EVT VMovVT; |
| 7237 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); |
| 7238 | |
| 7239 | // It wouldn't really be worth bothering for doubles except for one very |
| 7240 | // important value, which does happen to match: 0.0. So make sure we don't do |
| 7241 | // anything stupid. |
| 7242 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) |
| 7243 | return SDValue(); |
| 7244 | |
| 7245 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). |
| 7246 | SDValue NewVal = isVMOVModifiedImm(SplatBits: iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), |
| 7247 | VT&: VMovVT, VectorVT: VT, type: VMOVModImm); |
| 7248 | if (NewVal != SDValue()) { |
| 7249 | SDLoc DL(Op); |
| 7250 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL, VT: VMovVT, |
| 7251 | Operand: NewVal); |
| 7252 | if (IsDouble) |
| 7253 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
| 7254 | |
| 7255 | // It's a float: cast and extract a vector element. |
| 7256 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
| 7257 | Operand: VecConstant); |
| 7258 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
| 7259 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 7260 | } |
| 7261 | |
| 7262 | // Finally, try a VMVN.i32 |
| 7263 | NewVal = isVMOVModifiedImm(SplatBits: ~iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), VT&: VMovVT, |
| 7264 | VectorVT: VT, type: VMVNModImm); |
| 7265 | if (NewVal != SDValue()) { |
| 7266 | SDLoc DL(Op); |
| 7267 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL, VT: VMovVT, Operand: NewVal); |
| 7268 | |
| 7269 | if (IsDouble) |
| 7270 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
| 7271 | |
| 7272 | // It's a float: cast and extract a vector element. |
| 7273 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
| 7274 | Operand: VecConstant); |
| 7275 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
| 7276 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 7277 | } |
| 7278 | |
| 7279 | return SDValue(); |
| 7280 | } |
| 7281 | |
| 7282 | // check if an VEXT instruction can handle the shuffle mask when the |
| 7283 | // vector sources of the shuffle are the same. |
| 7284 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { |
| 7285 | unsigned NumElts = VT.getVectorNumElements(); |
| 7286 | |
| 7287 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 7288 | if (M[0] < 0) |
| 7289 | return false; |
| 7290 | |
| 7291 | Imm = M[0]; |
| 7292 | |
| 7293 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 7294 | // element. The other shuffle indices must be the successive elements after |
| 7295 | // the first one. |
| 7296 | unsigned ExpectedElt = Imm; |
| 7297 | for (unsigned i = 1; i < NumElts; ++i) { |
| 7298 | // Increment the expected index. If it wraps around, just follow it |
| 7299 | // back to index zero and keep going. |
| 7300 | ++ExpectedElt; |
| 7301 | if (ExpectedElt == NumElts) |
| 7302 | ExpectedElt = 0; |
| 7303 | |
| 7304 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 7305 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 7306 | return false; |
| 7307 | } |
| 7308 | |
| 7309 | return true; |
| 7310 | } |
| 7311 | |
| 7312 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, |
| 7313 | bool &ReverseVEXT, unsigned &Imm) { |
| 7314 | unsigned NumElts = VT.getVectorNumElements(); |
| 7315 | ReverseVEXT = false; |
| 7316 | |
| 7317 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 7318 | if (M[0] < 0) |
| 7319 | return false; |
| 7320 | |
| 7321 | Imm = M[0]; |
| 7322 | |
| 7323 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 7324 | // element. The other shuffle indices must be the successive elements after |
| 7325 | // the first one. |
| 7326 | unsigned ExpectedElt = Imm; |
| 7327 | for (unsigned i = 1; i < NumElts; ++i) { |
| 7328 | // Increment the expected index. If it wraps around, it may still be |
| 7329 | // a VEXT but the source vectors must be swapped. |
| 7330 | ExpectedElt += 1; |
| 7331 | if (ExpectedElt == NumElts * 2) { |
| 7332 | ExpectedElt = 0; |
| 7333 | ReverseVEXT = true; |
| 7334 | } |
| 7335 | |
| 7336 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 7337 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 7338 | return false; |
| 7339 | } |
| 7340 | |
| 7341 | // Adjust the index value if the source operands will be swapped. |
| 7342 | if (ReverseVEXT) |
| 7343 | Imm -= NumElts; |
| 7344 | |
| 7345 | return true; |
| 7346 | } |
| 7347 | |
| 7348 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { |
| 7349 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of |
| 7350 | // range, then 0 is placed into the resulting vector. So pretty much any mask |
| 7351 | // of 8 elements can work here. |
| 7352 | return VT == MVT::v8i8 && M.size() == 8; |
| 7353 | } |
| 7354 | |
| 7355 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, |
| 7356 | unsigned Index) { |
| 7357 | if (Mask.size() == Elements * 2) |
| 7358 | return Index / Elements; |
| 7359 | return Mask[Index] == 0 ? 0 : 1; |
| 7360 | } |
| 7361 | |
| 7362 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by |
| 7363 | // checking that pairs of elements in the shuffle mask represent the same index |
| 7364 | // in each vector, incrementing the expected index by 2 at each step. |
| 7365 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] |
| 7366 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} |
| 7367 | // v2={e,f,g,h} |
| 7368 | // WhichResult gives the offset for each element in the mask based on which |
| 7369 | // of the two results it belongs to. |
| 7370 | // |
| 7371 | // The transpose can be represented either as: |
| 7372 | // result1 = shufflevector v1, v2, result1_shuffle_mask |
| 7373 | // result2 = shufflevector v1, v2, result2_shuffle_mask |
| 7374 | // where v1/v2 and the shuffle masks have the same number of elements |
| 7375 | // (here WhichResult (see below) indicates which result is being checked) |
| 7376 | // |
| 7377 | // or as: |
| 7378 | // results = shufflevector v1, v2, shuffle_mask |
| 7379 | // where both results are returned in one vector and the shuffle mask has twice |
| 7380 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we |
| 7381 | // want to check the low half and high half of the shuffle mask as if it were |
| 7382 | // the other case |
| 7383 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7384 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7385 | if (EltSz == 64) |
| 7386 | return false; |
| 7387 | |
| 7388 | unsigned NumElts = VT.getVectorNumElements(); |
| 7389 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7390 | return false; |
| 7391 | |
| 7392 | // If the mask is twice as long as the input vector then we need to check the |
| 7393 | // upper and lower parts of the mask with a matching value for WhichResult |
| 7394 | // FIXME: A mask with only even values will be rejected in case the first |
| 7395 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only |
| 7396 | // M[0] is used to determine WhichResult |
| 7397 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7398 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7399 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7400 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7401 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) |
| 7402 | return false; |
| 7403 | } |
| 7404 | } |
| 7405 | |
| 7406 | if (M.size() == NumElts*2) |
| 7407 | WhichResult = 0; |
| 7408 | |
| 7409 | return true; |
| 7410 | } |
| 7411 | |
| 7412 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of |
| 7413 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7414 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. |
| 7415 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7416 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7417 | if (EltSz == 64) |
| 7418 | return false; |
| 7419 | |
| 7420 | unsigned NumElts = VT.getVectorNumElements(); |
| 7421 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7422 | return false; |
| 7423 | |
| 7424 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7425 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7426 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7427 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7428 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) |
| 7429 | return false; |
| 7430 | } |
| 7431 | } |
| 7432 | |
| 7433 | if (M.size() == NumElts*2) |
| 7434 | WhichResult = 0; |
| 7435 | |
| 7436 | return true; |
| 7437 | } |
| 7438 | |
| 7439 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking |
| 7440 | // that the mask elements are either all even and in steps of size 2 or all odd |
| 7441 | // and in steps of size 2. |
| 7442 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] |
| 7443 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} |
| 7444 | // v2={e,f,g,h} |
| 7445 | // Requires similar checks to that of isVTRNMask with |
| 7446 | // respect the how results are returned. |
| 7447 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7448 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7449 | if (EltSz == 64) |
| 7450 | return false; |
| 7451 | |
| 7452 | unsigned NumElts = VT.getVectorNumElements(); |
| 7453 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7454 | return false; |
| 7455 | |
| 7456 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7457 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7458 | for (unsigned j = 0; j < NumElts; ++j) { |
| 7459 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) |
| 7460 | return false; |
| 7461 | } |
| 7462 | } |
| 7463 | |
| 7464 | if (M.size() == NumElts*2) |
| 7465 | WhichResult = 0; |
| 7466 | |
| 7467 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7468 | if (VT.is64BitVector() && EltSz == 32) |
| 7469 | return false; |
| 7470 | |
| 7471 | return true; |
| 7472 | } |
| 7473 | |
| 7474 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of |
| 7475 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7476 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, |
| 7477 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7478 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7479 | if (EltSz == 64) |
| 7480 | return false; |
| 7481 | |
| 7482 | unsigned NumElts = VT.getVectorNumElements(); |
| 7483 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7484 | return false; |
| 7485 | |
| 7486 | unsigned Half = NumElts / 2; |
| 7487 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7488 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7489 | for (unsigned j = 0; j < NumElts; j += Half) { |
| 7490 | unsigned Idx = WhichResult; |
| 7491 | for (unsigned k = 0; k < Half; ++k) { |
| 7492 | int MIdx = M[i + j + k]; |
| 7493 | if (MIdx >= 0 && (unsigned) MIdx != Idx) |
| 7494 | return false; |
| 7495 | Idx += 2; |
| 7496 | } |
| 7497 | } |
| 7498 | } |
| 7499 | |
| 7500 | if (M.size() == NumElts*2) |
| 7501 | WhichResult = 0; |
| 7502 | |
| 7503 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7504 | if (VT.is64BitVector() && EltSz == 32) |
| 7505 | return false; |
| 7506 | |
| 7507 | return true; |
| 7508 | } |
| 7509 | |
| 7510 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking |
| 7511 | // that pairs of elements of the shufflemask represent the same index in each |
| 7512 | // vector incrementing sequentially through the vectors. |
| 7513 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] |
| 7514 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} |
| 7515 | // v2={e,f,g,h} |
| 7516 | // Requires similar checks to that of isVTRNMask with respect the how results |
| 7517 | // are returned. |
| 7518 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7519 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7520 | if (EltSz == 64) |
| 7521 | return false; |
| 7522 | |
| 7523 | unsigned NumElts = VT.getVectorNumElements(); |
| 7524 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7525 | return false; |
| 7526 | |
| 7527 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7528 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7529 | unsigned Idx = WhichResult * NumElts / 2; |
| 7530 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7531 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7532 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) |
| 7533 | return false; |
| 7534 | Idx += 1; |
| 7535 | } |
| 7536 | } |
| 7537 | |
| 7538 | if (M.size() == NumElts*2) |
| 7539 | WhichResult = 0; |
| 7540 | |
| 7541 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7542 | if (VT.is64BitVector() && EltSz == 32) |
| 7543 | return false; |
| 7544 | |
| 7545 | return true; |
| 7546 | } |
| 7547 | |
| 7548 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of |
| 7549 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7550 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. |
| 7551 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7552 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7553 | if (EltSz == 64) |
| 7554 | return false; |
| 7555 | |
| 7556 | unsigned NumElts = VT.getVectorNumElements(); |
| 7557 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7558 | return false; |
| 7559 | |
| 7560 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7561 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7562 | unsigned Idx = WhichResult * NumElts / 2; |
| 7563 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7564 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7565 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) |
| 7566 | return false; |
| 7567 | Idx += 1; |
| 7568 | } |
| 7569 | } |
| 7570 | |
| 7571 | if (M.size() == NumElts*2) |
| 7572 | WhichResult = 0; |
| 7573 | |
| 7574 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7575 | if (VT.is64BitVector() && EltSz == 32) |
| 7576 | return false; |
| 7577 | |
| 7578 | return true; |
| 7579 | } |
| 7580 | |
| 7581 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), |
| 7582 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. |
| 7583 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, |
| 7584 | unsigned &WhichResult, |
| 7585 | bool &isV_UNDEF) { |
| 7586 | isV_UNDEF = false; |
| 7587 | if (isVTRNMask(M: ShuffleMask, VT, WhichResult)) |
| 7588 | return ARMISD::VTRN; |
| 7589 | if (isVUZPMask(M: ShuffleMask, VT, WhichResult)) |
| 7590 | return ARMISD::VUZP; |
| 7591 | if (isVZIPMask(M: ShuffleMask, VT, WhichResult)) |
| 7592 | return ARMISD::VZIP; |
| 7593 | |
| 7594 | isV_UNDEF = true; |
| 7595 | if (isVTRN_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7596 | return ARMISD::VTRN; |
| 7597 | if (isVUZP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7598 | return ARMISD::VUZP; |
| 7599 | if (isVZIP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7600 | return ARMISD::VZIP; |
| 7601 | |
| 7602 | return 0; |
| 7603 | } |
| 7604 | |
| 7605 | /// \return true if this is a reverse operation on an vector. |
| 7606 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { |
| 7607 | unsigned NumElts = VT.getVectorNumElements(); |
| 7608 | // Make sure the mask has the right size. |
| 7609 | if (NumElts != M.size()) |
| 7610 | return false; |
| 7611 | |
| 7612 | // Look for <15, ..., 3, -1, 1, 0>. |
| 7613 | for (unsigned i = 0; i != NumElts; ++i) |
| 7614 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) |
| 7615 | return false; |
| 7616 | |
| 7617 | return true; |
| 7618 | } |
| 7619 | |
| 7620 | static bool isTruncMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
| 7621 | unsigned NumElts = VT.getVectorNumElements(); |
| 7622 | // Make sure the mask has the right size. |
| 7623 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
| 7624 | return false; |
| 7625 | |
| 7626 | // Half-width truncation patterns (e.g. v4i32 -> v8i16): |
| 7627 | // !Top && SingleSource: <0, 2, 4, 6, 0, 2, 4, 6> |
| 7628 | // !Top && !SingleSource: <0, 2, 4, 6, 8, 10, 12, 14> |
| 7629 | // Top && SingleSource: <1, 3, 5, 7, 1, 3, 5, 7> |
| 7630 | // Top && !SingleSource: <1, 3, 5, 7, 9, 11, 13, 15> |
| 7631 | int Ofs = Top ? 1 : 0; |
| 7632 | int Upper = SingleSource ? 0 : NumElts; |
| 7633 | for (int i = 0, e = NumElts / 2; i != e; ++i) { |
| 7634 | if (M[i] >= 0 && M[i] != (i * 2) + Ofs) |
| 7635 | return false; |
| 7636 | if (M[i + e] >= 0 && M[i + e] != (i * 2) + Ofs + Upper) |
| 7637 | return false; |
| 7638 | } |
| 7639 | return true; |
| 7640 | } |
| 7641 | |
| 7642 | static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
| 7643 | unsigned NumElts = VT.getVectorNumElements(); |
| 7644 | // Make sure the mask has the right size. |
| 7645 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
| 7646 | return false; |
| 7647 | |
| 7648 | // If Top |
| 7649 | // Look for <0, N, 2, N+2, 4, N+4, ..>. |
| 7650 | // This inserts Input2 into Input1 |
| 7651 | // else if not Top |
| 7652 | // Look for <0, N+1, 2, N+3, 4, N+5, ..> |
| 7653 | // This inserts Input1 into Input2 |
| 7654 | unsigned Offset = Top ? 0 : 1; |
| 7655 | unsigned N = SingleSource ? 0 : NumElts; |
| 7656 | for (unsigned i = 0; i < NumElts; i += 2) { |
| 7657 | if (M[i] >= 0 && M[i] != (int)i) |
| 7658 | return false; |
| 7659 | if (M[i + 1] >= 0 && M[i + 1] != (int)(N + i + Offset)) |
| 7660 | return false; |
| 7661 | } |
| 7662 | |
| 7663 | return true; |
| 7664 | } |
| 7665 | |
| 7666 | static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) { |
| 7667 | unsigned NumElts = ToVT.getVectorNumElements(); |
| 7668 | if (NumElts != M.size()) |
| 7669 | return false; |
| 7670 | |
| 7671 | // Test if the Trunc can be convertable to a VMOVN with this shuffle. We are |
| 7672 | // looking for patterns of: |
| 7673 | // !rev: 0 N/2 1 N/2+1 2 N/2+2 ... |
| 7674 | // rev: N/2 0 N/2+1 1 N/2+2 2 ... |
| 7675 | |
| 7676 | unsigned Off0 = rev ? NumElts / 2 : 0; |
| 7677 | unsigned Off1 = rev ? 0 : NumElts / 2; |
| 7678 | for (unsigned i = 0; i < NumElts; i += 2) { |
| 7679 | if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2)) |
| 7680 | return false; |
| 7681 | if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2)) |
| 7682 | return false; |
| 7683 | } |
| 7684 | |
| 7685 | return true; |
| 7686 | } |
| 7687 | |
| 7688 | // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted |
| 7689 | // from a pair of inputs. For example: |
| 7690 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7691 | // FP_ROUND(EXTRACT_ELT(Y, 0), |
| 7692 | // FP_ROUND(EXTRACT_ELT(X, 1), |
| 7693 | // FP_ROUND(EXTRACT_ELT(Y, 1), ...) |
| 7694 | static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, |
| 7695 | const ARMSubtarget *ST) { |
| 7696 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7697 | if (!ST->hasMVEFloatOps()) |
| 7698 | return SDValue(); |
| 7699 | |
| 7700 | SDLoc dl(BV); |
| 7701 | EVT VT = BV.getValueType(); |
| 7702 | if (VT != MVT::v8f16) |
| 7703 | return SDValue(); |
| 7704 | |
| 7705 | // We are looking for a buildvector of fptrunc elements, where all the |
| 7706 | // elements are interleavingly extracted from two sources. Check the first two |
| 7707 | // items are valid enough and extract some info from them (they are checked |
| 7708 | // properly in the loop below). |
| 7709 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_ROUND || |
| 7710 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7711 | BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
| 7712 | return SDValue(); |
| 7713 | if (BV.getOperand(i: 1).getOpcode() != ISD::FP_ROUND || |
| 7714 | BV.getOperand(i: 1).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7715 | BV.getOperand(i: 1).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
| 7716 | return SDValue(); |
| 7717 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
| 7718 | SDValue Op1 = BV.getOperand(i: 1).getOperand(i: 0).getOperand(i: 0); |
| 7719 | if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) |
| 7720 | return SDValue(); |
| 7721 | |
| 7722 | // Check all the values in the BuildVector line up with our expectations. |
| 7723 | for (unsigned i = 1; i < 4; i++) { |
| 7724 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7725 | return Trunc.getOpcode() == ISD::FP_ROUND && |
| 7726 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7727 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
| 7728 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
| 7729 | }; |
| 7730 | if (!Check(BV.getOperand(i: i * 2 + 0), Op0, i)) |
| 7731 | return SDValue(); |
| 7732 | if (!Check(BV.getOperand(i: i * 2 + 1), Op1, i)) |
| 7733 | return SDValue(); |
| 7734 | } |
| 7735 | |
| 7736 | SDValue N1 = DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1: DAG.getUNDEF(VT), N2: Op0, |
| 7737 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 7738 | return DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1, N2: Op1, |
| 7739 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 7740 | } |
| 7741 | |
| 7742 | // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted |
| 7743 | // from a single input on alternating lanes. For example: |
| 7744 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7745 | // FP_ROUND(EXTRACT_ELT(X, 2), |
| 7746 | // FP_ROUND(EXTRACT_ELT(X, 4), ...) |
| 7747 | static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, |
| 7748 | const ARMSubtarget *ST) { |
| 7749 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7750 | if (!ST->hasMVEFloatOps()) |
| 7751 | return SDValue(); |
| 7752 | |
| 7753 | SDLoc dl(BV); |
| 7754 | EVT VT = BV.getValueType(); |
| 7755 | if (VT != MVT::v4f32) |
| 7756 | return SDValue(); |
| 7757 | |
| 7758 | // We are looking for a buildvector of fptext elements, where all the |
| 7759 | // elements are alternating lanes from a single source. For example <0,2,4,6> |
| 7760 | // or <1,3,5,7>. Check the first two items are valid enough and extract some |
| 7761 | // info from them (they are checked properly in the loop below). |
| 7762 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_EXTEND || |
| 7763 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 7764 | return SDValue(); |
| 7765 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
| 7766 | int Offset = BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1); |
| 7767 | if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) |
| 7768 | return SDValue(); |
| 7769 | |
| 7770 | // Check all the values in the BuildVector line up with our expectations. |
| 7771 | for (unsigned i = 1; i < 4; i++) { |
| 7772 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7773 | return Trunc.getOpcode() == ISD::FP_EXTEND && |
| 7774 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7775 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
| 7776 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
| 7777 | }; |
| 7778 | if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) |
| 7779 | return SDValue(); |
| 7780 | } |
| 7781 | |
| 7782 | return DAG.getNode(Opcode: ARMISD::VCVTL, DL: dl, VT, N1: Op0, |
| 7783 | N2: DAG.getConstant(Val: Offset, DL: dl, VT: MVT::i32)); |
| 7784 | } |
| 7785 | |
| 7786 | // If N is an integer constant that can be moved into a register in one |
| 7787 | // instruction, return an SDValue of such a constant (will become a MOV |
| 7788 | // instruction). Otherwise return null. |
| 7789 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, |
| 7790 | const ARMSubtarget *ST, const SDLoc &dl) { |
| 7791 | uint64_t Val; |
| 7792 | if (!isa<ConstantSDNode>(Val: N)) |
| 7793 | return SDValue(); |
| 7794 | Val = N->getAsZExtVal(); |
| 7795 | |
| 7796 | if (ST->isThumb1Only()) { |
| 7797 | if (Val <= 255 || ~Val <= 255) |
| 7798 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
| 7799 | } else { |
| 7800 | if (ARM_AM::getSOImmVal(Arg: Val) != -1 || ARM_AM::getSOImmVal(Arg: ~Val) != -1) |
| 7801 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
| 7802 | } |
| 7803 | return SDValue(); |
| 7804 | } |
| 7805 | |
| 7806 | static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, |
| 7807 | const ARMSubtarget *ST) { |
| 7808 | SDLoc dl(Op); |
| 7809 | EVT VT = Op.getValueType(); |
| 7810 | |
| 7811 | assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!" ); |
| 7812 | |
| 7813 | unsigned NumElts = VT.getVectorNumElements(); |
| 7814 | unsigned BoolMask; |
| 7815 | unsigned BitsPerBool; |
| 7816 | if (NumElts == 2) { |
| 7817 | BitsPerBool = 8; |
| 7818 | BoolMask = 0xff; |
| 7819 | } else if (NumElts == 4) { |
| 7820 | BitsPerBool = 4; |
| 7821 | BoolMask = 0xf; |
| 7822 | } else if (NumElts == 8) { |
| 7823 | BitsPerBool = 2; |
| 7824 | BoolMask = 0x3; |
| 7825 | } else if (NumElts == 16) { |
| 7826 | BitsPerBool = 1; |
| 7827 | BoolMask = 0x1; |
| 7828 | } else |
| 7829 | return SDValue(); |
| 7830 | |
| 7831 | // If this is a single value copied into all lanes (a splat), we can just sign |
| 7832 | // extend that single value |
| 7833 | SDValue FirstOp = Op.getOperand(i: 0); |
| 7834 | if (!isa<ConstantSDNode>(Val: FirstOp) && |
| 7835 | llvm::all_of(Range: llvm::drop_begin(RangeOrContainer: Op->ops()), P: [&FirstOp](const SDUse &U) { |
| 7836 | return U.get().isUndef() || U.get() == FirstOp; |
| 7837 | })) { |
| 7838 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, N1: FirstOp, |
| 7839 | N2: DAG.getValueType(MVT::i1)); |
| 7840 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: Ext); |
| 7841 | } |
| 7842 | |
| 7843 | // First create base with bits set where known |
| 7844 | unsigned Bits32 = 0; |
| 7845 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7846 | SDValue V = Op.getOperand(i); |
| 7847 | if (!isa<ConstantSDNode>(Val: V) && !V.isUndef()) |
| 7848 | continue; |
| 7849 | bool BitSet = V.isUndef() ? false : V->getAsZExtVal(); |
| 7850 | if (BitSet) |
| 7851 | Bits32 |= BoolMask << (i * BitsPerBool); |
| 7852 | } |
| 7853 | |
| 7854 | // Add in unknown nodes |
| 7855 | SDValue Base = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
| 7856 | Operand: DAG.getConstant(Val: Bits32, DL: dl, VT: MVT::i32)); |
| 7857 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7858 | SDValue V = Op.getOperand(i); |
| 7859 | if (isa<ConstantSDNode>(Val: V) || V.isUndef()) |
| 7860 | continue; |
| 7861 | Base = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Base, N2: V, |
| 7862 | N3: DAG.getConstant(Val: i, DL: dl, VT: MVT::i32)); |
| 7863 | } |
| 7864 | |
| 7865 | return Base; |
| 7866 | } |
| 7867 | |
| 7868 | static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG, |
| 7869 | const ARMSubtarget *ST) { |
| 7870 | if (!ST->hasMVEIntegerOps()) |
| 7871 | return SDValue(); |
| 7872 | |
| 7873 | // We are looking for a buildvector where each element is Op[0] + i*N |
| 7874 | EVT VT = Op.getValueType(); |
| 7875 | SDValue Op0 = Op.getOperand(i: 0); |
| 7876 | unsigned NumElts = VT.getVectorNumElements(); |
| 7877 | |
| 7878 | // Get the increment value from operand 1 |
| 7879 | SDValue Op1 = Op.getOperand(i: 1); |
| 7880 | if (Op1.getOpcode() != ISD::ADD || Op1.getOperand(i: 0) != Op0 || |
| 7881 | !isa<ConstantSDNode>(Val: Op1.getOperand(i: 1))) |
| 7882 | return SDValue(); |
| 7883 | unsigned N = Op1.getConstantOperandVal(i: 1); |
| 7884 | if (N != 1 && N != 2 && N != 4 && N != 8) |
| 7885 | return SDValue(); |
| 7886 | |
| 7887 | // Check that each other operand matches |
| 7888 | for (unsigned I = 2; I < NumElts; I++) { |
| 7889 | SDValue OpI = Op.getOperand(i: I); |
| 7890 | if (OpI.getOpcode() != ISD::ADD || OpI.getOperand(i: 0) != Op0 || |
| 7891 | !isa<ConstantSDNode>(Val: OpI.getOperand(i: 1)) || |
| 7892 | OpI.getConstantOperandVal(i: 1) != I * N) |
| 7893 | return SDValue(); |
| 7894 | } |
| 7895 | |
| 7896 | SDLoc DL(Op); |
| 7897 | return DAG.getNode(Opcode: ARMISD::VIDUP, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: Op0, |
| 7898 | N2: DAG.getConstant(Val: N, DL, VT: MVT::i32)); |
| 7899 | } |
| 7900 | |
| 7901 | // Returns true if the operation N can be treated as qr instruction variant at |
| 7902 | // operand Op. |
| 7903 | static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) { |
| 7904 | switch (N->getOpcode()) { |
| 7905 | case ISD::ADD: |
| 7906 | case ISD::MUL: |
| 7907 | case ISD::SADDSAT: |
| 7908 | case ISD::UADDSAT: |
| 7909 | case ISD::AVGFLOORS: |
| 7910 | case ISD::AVGFLOORU: |
| 7911 | return true; |
| 7912 | case ISD::SUB: |
| 7913 | case ISD::SSUBSAT: |
| 7914 | case ISD::USUBSAT: |
| 7915 | return N->getOperand(Num: 1).getNode() == Op; |
| 7916 | case ISD::INTRINSIC_WO_CHAIN: |
| 7917 | switch (N->getConstantOperandVal(Num: 0)) { |
| 7918 | case Intrinsic::arm_mve_add_predicated: |
| 7919 | case Intrinsic::arm_mve_mul_predicated: |
| 7920 | case Intrinsic::arm_mve_qadd_predicated: |
| 7921 | case Intrinsic::arm_mve_vhadd: |
| 7922 | case Intrinsic::arm_mve_hadd_predicated: |
| 7923 | case Intrinsic::arm_mve_vqdmulh: |
| 7924 | case Intrinsic::arm_mve_qdmulh_predicated: |
| 7925 | case Intrinsic::arm_mve_vqrdmulh: |
| 7926 | case Intrinsic::arm_mve_qrdmulh_predicated: |
| 7927 | case Intrinsic::arm_mve_vqdmull: |
| 7928 | case Intrinsic::arm_mve_vqdmull_predicated: |
| 7929 | return true; |
| 7930 | case Intrinsic::arm_mve_sub_predicated: |
| 7931 | case Intrinsic::arm_mve_qsub_predicated: |
| 7932 | case Intrinsic::arm_mve_vhsub: |
| 7933 | case Intrinsic::arm_mve_hsub_predicated: |
| 7934 | return N->getOperand(Num: 2).getNode() == Op; |
| 7935 | default: |
| 7936 | return false; |
| 7937 | } |
| 7938 | default: |
| 7939 | return false; |
| 7940 | } |
| 7941 | } |
| 7942 | |
| 7943 | // If this is a case we can't handle, return null and let the default |
| 7944 | // expansion code take care of it. |
| 7945 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, |
| 7946 | const ARMSubtarget *ST) const { |
| 7947 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Val: Op.getNode()); |
| 7948 | SDLoc dl(Op); |
| 7949 | EVT VT = Op.getValueType(); |
| 7950 | |
| 7951 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 7952 | return LowerBUILD_VECTOR_i1(Op, DAG, ST); |
| 7953 | |
| 7954 | if (SDValue R = LowerBUILD_VECTORToVIDUP(Op, DAG, ST)) |
| 7955 | return R; |
| 7956 | |
| 7957 | APInt SplatBits, SplatUndef; |
| 7958 | unsigned SplatBitSize; |
| 7959 | bool HasAnyUndefs; |
| 7960 | if (BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 7961 | if (SplatUndef.isAllOnes()) |
| 7962 | return DAG.getUNDEF(VT); |
| 7963 | |
| 7964 | // If all the users of this constant splat are qr instruction variants, |
| 7965 | // generate a vdup of the constant. |
| 7966 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == SplatBitSize && |
| 7967 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32) && |
| 7968 | all_of(Range: BVN->users(), |
| 7969 | P: [BVN](const SDNode *U) { return IsQRMVEInstruction(N: U, Op: BVN); })) { |
| 7970 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
| 7971 | : SplatBitSize == 16 ? MVT::v8i16 |
| 7972 | : MVT::v16i8; |
| 7973 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
| 7974 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
| 7975 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
| 7976 | } |
| 7977 | |
| 7978 | if ((ST->hasNEON() && SplatBitSize <= 64) || |
| 7979 | (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { |
| 7980 | // Check if an immediate VMOV works. |
| 7981 | EVT VmovVT; |
| 7982 | SDValue Val = |
| 7983 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
| 7984 | SplatBitSize, DAG, dl, VT&: VmovVT, VectorVT: VT, type: VMOVModImm); |
| 7985 | |
| 7986 | if (Val.getNode()) { |
| 7987 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: Val); |
| 7988 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vmov); |
| 7989 | } |
| 7990 | |
| 7991 | // Try an immediate VMVN. |
| 7992 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); |
| 7993 | Val = isVMOVModifiedImm( |
| 7994 | SplatBits: NegatedImm, SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VT&: VmovVT, |
| 7995 | VectorVT: VT, type: ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); |
| 7996 | if (Val.getNode()) { |
| 7997 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL: dl, VT: VmovVT, Operand: Val); |
| 7998 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vmov); |
| 7999 | } |
| 8000 | |
| 8001 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. |
| 8002 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { |
| 8003 | int ImmVal = ARM_AM::getFP32Imm(Imm: SplatBits); |
| 8004 | if (ImmVal != -1) { |
| 8005 | SDValue Val = DAG.getTargetConstant(Val: ImmVal, DL: dl, VT: MVT::i32); |
| 8006 | return DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL: dl, VT, Operand: Val); |
| 8007 | } |
| 8008 | } |
| 8009 | |
| 8010 | // If we are under MVE, generate a VDUP(constant), bitcast to the original |
| 8011 | // type. |
| 8012 | if (ST->hasMVEIntegerOps() && |
| 8013 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32)) { |
| 8014 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
| 8015 | : SplatBitSize == 16 ? MVT::v8i16 |
| 8016 | : MVT::v16i8; |
| 8017 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
| 8018 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
| 8019 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
| 8020 | } |
| 8021 | } |
| 8022 | } |
| 8023 | |
| 8024 | // Scan through the operands to see if only one value is used. |
| 8025 | // |
| 8026 | // As an optimisation, even if more than one value is used it may be more |
| 8027 | // profitable to splat with one value then change some lanes. |
| 8028 | // |
| 8029 | // Heuristically we decide to do this if the vector has a "dominant" value, |
| 8030 | // defined as splatted to more than half of the lanes. |
| 8031 | unsigned NumElts = VT.getVectorNumElements(); |
| 8032 | bool isOnlyLowElement = true; |
| 8033 | bool usesOnlyOneValue = true; |
| 8034 | bool hasDominantValue = false; |
| 8035 | bool isConstant = true; |
| 8036 | |
| 8037 | // Map of the number of times a particular SDValue appears in the |
| 8038 | // element list. |
| 8039 | DenseMap<SDValue, unsigned> ValueCounts; |
| 8040 | SDValue Value; |
| 8041 | for (unsigned i = 0; i < NumElts; ++i) { |
| 8042 | SDValue V = Op.getOperand(i); |
| 8043 | if (V.isUndef()) |
| 8044 | continue; |
| 8045 | if (i > 0) |
| 8046 | isOnlyLowElement = false; |
| 8047 | if (!isa<ConstantFPSDNode>(Val: V) && !isa<ConstantSDNode>(Val: V)) |
| 8048 | isConstant = false; |
| 8049 | |
| 8050 | unsigned &Count = ValueCounts[V]; |
| 8051 | |
| 8052 | // Is this value dominant? (takes up more than half of the lanes) |
| 8053 | if (++Count > (NumElts / 2)) { |
| 8054 | hasDominantValue = true; |
| 8055 | Value = V; |
| 8056 | } |
| 8057 | } |
| 8058 | if (ValueCounts.size() != 1) |
| 8059 | usesOnlyOneValue = false; |
| 8060 | if (!Value.getNode() && !ValueCounts.empty()) |
| 8061 | Value = ValueCounts.begin()->first; |
| 8062 | |
| 8063 | if (ValueCounts.empty()) |
| 8064 | return DAG.getUNDEF(VT); |
| 8065 | |
| 8066 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. |
| 8067 | // Keep going if we are hitting this case. |
| 8068 | if (isOnlyLowElement && !ISD::isNormalLoad(N: Value.getNode())) |
| 8069 | return DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT, Operand: Value); |
| 8070 | |
| 8071 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8072 | |
| 8073 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to |
| 8074 | // i32 and try again. |
| 8075 | if (hasDominantValue && EltSize <= 32) { |
| 8076 | if (!isConstant) { |
| 8077 | SDValue N; |
| 8078 | |
| 8079 | // If we are VDUPing a value that comes directly from a vector, that will |
| 8080 | // cause an unnecessary move to and from a GPR, where instead we could |
| 8081 | // just use VDUPLANE. We can only do this if the lane being extracted |
| 8082 | // is at a constant index, as the VDUP from lane instructions only have |
| 8083 | // constant-index forms. |
| 8084 | ConstantSDNode *constIndex; |
| 8085 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 8086 | (constIndex = dyn_cast<ConstantSDNode>(Val: Value->getOperand(Num: 1)))) { |
| 8087 | // We need to create a new undef vector to use for the VDUPLANE if the |
| 8088 | // size of the vector from which we get the value is different than the |
| 8089 | // size of the vector that we need to create. We will insert the element |
| 8090 | // such that the register coalescer will remove unnecessary copies. |
| 8091 | if (VT != Value->getOperand(Num: 0).getValueType()) { |
| 8092 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % |
| 8093 | VT.getVectorNumElements(); |
| 8094 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 8095 | N1: DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: DAG.getUNDEF(VT), |
| 8096 | N2: Value, N3: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)), |
| 8097 | N2: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)); |
| 8098 | } else |
| 8099 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 8100 | N1: Value->getOperand(Num: 0), N2: Value->getOperand(Num: 1)); |
| 8101 | } else |
| 8102 | N = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Value); |
| 8103 | |
| 8104 | if (!usesOnlyOneValue) { |
| 8105 | // The dominant value was splatted as 'N', but we now have to insert |
| 8106 | // all differing elements. |
| 8107 | for (unsigned I = 0; I < NumElts; ++I) { |
| 8108 | if (Op.getOperand(i: I) == Value) |
| 8109 | continue; |
| 8110 | SmallVector<SDValue, 3> Ops; |
| 8111 | Ops.push_back(Elt: N); |
| 8112 | Ops.push_back(Elt: Op.getOperand(i: I)); |
| 8113 | Ops.push_back(Elt: DAG.getConstant(Val: I, DL: dl, VT: MVT::i32)); |
| 8114 | N = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, Ops); |
| 8115 | } |
| 8116 | } |
| 8117 | return N; |
| 8118 | } |
| 8119 | if (VT.getVectorElementType().isFloatingPoint()) { |
| 8120 | SmallVector<SDValue, 8> Ops; |
| 8121 | MVT FVT = VT.getVectorElementType().getSimpleVT(); |
| 8122 | assert(FVT == MVT::f32 || FVT == MVT::f16); |
| 8123 | MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; |
| 8124 | for (unsigned i = 0; i < NumElts; ++i) |
| 8125 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVT, |
| 8126 | Operand: Op.getOperand(i))); |
| 8127 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IVT, NumElements: NumElts); |
| 8128 | SDValue Val = DAG.getBuildVector(VT: VecVT, DL: dl, Ops); |
| 8129 | Val = LowerBUILD_VECTOR(Op: Val, DAG, ST); |
| 8130 | if (Val.getNode()) |
| 8131 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 8132 | } |
| 8133 | if (usesOnlyOneValue) { |
| 8134 | SDValue Val = IsSingleInstrConstant(N: Value, DAG, ST, dl); |
| 8135 | if (isConstant && Val.getNode()) |
| 8136 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Val); |
| 8137 | } |
| 8138 | } |
| 8139 | |
| 8140 | // If all elements are constants and the case above didn't get hit, fall back |
| 8141 | // to the default expansion, which will generate a load from the constant |
| 8142 | // pool. |
| 8143 | if (isConstant) |
| 8144 | return SDValue(); |
| 8145 | |
| 8146 | // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and |
| 8147 | // vmovn). Empirical tests suggest this is rarely worth it for vectors of |
| 8148 | // length <= 2. |
| 8149 | if (NumElts >= 4) |
| 8150 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) |
| 8151 | return shuffle; |
| 8152 | |
| 8153 | // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into |
| 8154 | // VCVT's |
| 8155 | if (SDValue VCVT = LowerBuildVectorOfFPTrunc(BV: Op, DAG, ST: Subtarget)) |
| 8156 | return VCVT; |
| 8157 | if (SDValue VCVT = LowerBuildVectorOfFPExt(BV: Op, DAG, ST: Subtarget)) |
| 8158 | return VCVT; |
| 8159 | |
| 8160 | if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { |
| 8161 | // If we haven't found an efficient lowering, try splitting a 128-bit vector |
| 8162 | // into two 64-bit vectors; we might discover a better way to lower it. |
| 8163 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); |
| 8164 | EVT ExtVT = VT.getVectorElementType(); |
| 8165 | EVT HVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ExtVT, NumElements: NumElts / 2); |
| 8166 | SDValue Lower = DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[0], NumElts / 2)); |
| 8167 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) |
| 8168 | Lower = LowerBUILD_VECTOR(Op: Lower, DAG, ST); |
| 8169 | SDValue Upper = |
| 8170 | DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[NumElts / 2], NumElts / 2)); |
| 8171 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) |
| 8172 | Upper = LowerBUILD_VECTOR(Op: Upper, DAG, ST); |
| 8173 | if (Lower && Upper) |
| 8174 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Lower, N2: Upper); |
| 8175 | } |
| 8176 | |
| 8177 | // Vectors with 32- or 64-bit elements can be built by directly assigning |
| 8178 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands |
| 8179 | // will be legalized. |
| 8180 | if (EltSize >= 32) { |
| 8181 | // Do the expansion with floating-point types, since that is what the VFP |
| 8182 | // registers are defined to use, and since i64 is not legal. |
| 8183 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
| 8184 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
| 8185 | SmallVector<SDValue, 8> Ops; |
| 8186 | for (unsigned i = 0; i < NumElts; ++i) |
| 8187 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: EltVT, Operand: Op.getOperand(i))); |
| 8188 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
| 8189 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 8190 | } |
| 8191 | |
| 8192 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we |
| 8193 | // know the default expansion would otherwise fall back on something even |
| 8194 | // worse. For a vector with one or two non-undef values, that's |
| 8195 | // scalar_to_vector for the elements followed by a shuffle (provided the |
| 8196 | // shuffle is valid for the target) and materialization element by element |
| 8197 | // on the stack followed by a load for everything else. |
| 8198 | if (!isConstant && !usesOnlyOneValue) { |
| 8199 | SDValue Vec = DAG.getUNDEF(VT); |
| 8200 | for (unsigned i = 0 ; i < NumElts; ++i) { |
| 8201 | SDValue V = Op.getOperand(i); |
| 8202 | if (V.isUndef()) |
| 8203 | continue; |
| 8204 | SDValue LaneIdx = DAG.getConstant(Val: i, DL: dl, VT: MVT::i32); |
| 8205 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Vec, N2: V, N3: LaneIdx); |
| 8206 | } |
| 8207 | return Vec; |
| 8208 | } |
| 8209 | |
| 8210 | return SDValue(); |
| 8211 | } |
| 8212 | |
| 8213 | // Gather data to see if the operation can be modelled as a |
| 8214 | // shuffle in combination with VEXTs. |
| 8215 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, |
| 8216 | SelectionDAG &DAG) const { |
| 8217 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 8218 | SDLoc dl(Op); |
| 8219 | EVT VT = Op.getValueType(); |
| 8220 | unsigned NumElts = VT.getVectorNumElements(); |
| 8221 | |
| 8222 | struct ShuffleSourceInfo { |
| 8223 | SDValue Vec; |
| 8224 | unsigned MinElt = std::numeric_limits<unsigned>::max(); |
| 8225 | unsigned MaxElt = 0; |
| 8226 | |
| 8227 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to |
| 8228 | // be compatible with the shuffle we intend to construct. As a result |
| 8229 | // ShuffleVec will be some sliding window into the original Vec. |
| 8230 | SDValue ShuffleVec; |
| 8231 | |
| 8232 | // Code should guarantee that element i in Vec starts at element "WindowBase |
| 8233 | // + i * WindowScale in ShuffleVec". |
| 8234 | int WindowBase = 0; |
| 8235 | int WindowScale = 1; |
| 8236 | |
| 8237 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} |
| 8238 | |
| 8239 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } |
| 8240 | }; |
| 8241 | |
| 8242 | // First gather all vectors used as an immediate source for this BUILD_VECTOR |
| 8243 | // node. |
| 8244 | SmallVector<ShuffleSourceInfo, 2> Sources; |
| 8245 | for (unsigned i = 0; i < NumElts; ++i) { |
| 8246 | SDValue V = Op.getOperand(i); |
| 8247 | if (V.isUndef()) |
| 8248 | continue; |
| 8249 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { |
| 8250 | // A shuffle can only come from building a vector from various |
| 8251 | // elements of other vectors. |
| 8252 | return SDValue(); |
| 8253 | } else if (!isa<ConstantSDNode>(Val: V.getOperand(i: 1))) { |
| 8254 | // Furthermore, shuffles require a constant mask, whereas extractelts |
| 8255 | // accept variable indices. |
| 8256 | return SDValue(); |
| 8257 | } |
| 8258 | |
| 8259 | // Add this element source to the list if it's not already there. |
| 8260 | SDValue SourceVec = V.getOperand(i: 0); |
| 8261 | auto Source = llvm::find(Range&: Sources, Val: SourceVec); |
| 8262 | if (Source == Sources.end()) |
| 8263 | Source = Sources.insert(I: Sources.end(), Elt: ShuffleSourceInfo(SourceVec)); |
| 8264 | |
| 8265 | // Update the minimum and maximum lane number seen. |
| 8266 | unsigned EltNo = V.getConstantOperandVal(i: 1); |
| 8267 | Source->MinElt = std::min(a: Source->MinElt, b: EltNo); |
| 8268 | Source->MaxElt = std::max(a: Source->MaxElt, b: EltNo); |
| 8269 | } |
| 8270 | |
| 8271 | // Currently only do something sane when at most two source vectors |
| 8272 | // are involved. |
| 8273 | if (Sources.size() > 2) |
| 8274 | return SDValue(); |
| 8275 | |
| 8276 | // Find out the smallest element size among result and two sources, and use |
| 8277 | // it as element size to build the shuffle_vector. |
| 8278 | EVT SmallestEltTy = VT.getVectorElementType(); |
| 8279 | for (auto &Source : Sources) { |
| 8280 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); |
| 8281 | if (SrcEltTy.bitsLT(VT: SmallestEltTy)) |
| 8282 | SmallestEltTy = SrcEltTy; |
| 8283 | } |
| 8284 | unsigned ResMultiplier = |
| 8285 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 8286 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 8287 | EVT ShuffleVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: SmallestEltTy, NumElements: NumElts); |
| 8288 | |
| 8289 | // If the source vector is too wide or too narrow, we may nevertheless be able |
| 8290 | // to construct a compatible shuffle either by concatenating it with UNDEF or |
| 8291 | // extracting a suitable range of elements. |
| 8292 | for (auto &Src : Sources) { |
| 8293 | EVT SrcVT = Src.ShuffleVec.getValueType(); |
| 8294 | |
| 8295 | uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); |
| 8296 | uint64_t VTSize = VT.getFixedSizeInBits(); |
| 8297 | if (SrcVTSize == VTSize) |
| 8298 | continue; |
| 8299 | |
| 8300 | // This stage of the search produces a source with the same element type as |
| 8301 | // the original, but with a total width matching the BUILD_VECTOR output. |
| 8302 | EVT EltVT = SrcVT.getVectorElementType(); |
| 8303 | unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); |
| 8304 | EVT DestVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumSrcElts); |
| 8305 | |
| 8306 | if (SrcVTSize < VTSize) { |
| 8307 | if (2 * SrcVTSize != VTSize) |
| 8308 | return SDValue(); |
| 8309 | // We can pad out the smaller vector for free, so if it's part of a |
| 8310 | // shuffle... |
| 8311 | Src.ShuffleVec = |
| 8312 | DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8313 | N2: DAG.getUNDEF(VT: Src.ShuffleVec.getValueType())); |
| 8314 | continue; |
| 8315 | } |
| 8316 | |
| 8317 | if (SrcVTSize != 2 * VTSize) |
| 8318 | return SDValue(); |
| 8319 | |
| 8320 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { |
| 8321 | // Span too large for a VEXT to cope |
| 8322 | return SDValue(); |
| 8323 | } |
| 8324 | |
| 8325 | if (Src.MinElt >= NumSrcElts) { |
| 8326 | // The extraction can just take the second half |
| 8327 | Src.ShuffleVec = |
| 8328 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8329 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
| 8330 | Src.WindowBase = -NumSrcElts; |
| 8331 | } else if (Src.MaxElt < NumSrcElts) { |
| 8332 | // The extraction can just take the first half |
| 8333 | Src.ShuffleVec = |
| 8334 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8335 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 8336 | } else { |
| 8337 | // An actual VEXT is needed |
| 8338 | SDValue VEXTSrc1 = |
| 8339 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8340 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 8341 | SDValue VEXTSrc2 = |
| 8342 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8343 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
| 8344 | |
| 8345 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT: DestVT, N1: VEXTSrc1, |
| 8346 | N2: VEXTSrc2, |
| 8347 | N3: DAG.getConstant(Val: Src.MinElt, DL: dl, VT: MVT::i32)); |
| 8348 | Src.WindowBase = -Src.MinElt; |
| 8349 | } |
| 8350 | } |
| 8351 | |
| 8352 | // Another possible incompatibility occurs from the vector element types. We |
| 8353 | // can fix this by bitcasting the source vectors to the same type we intend |
| 8354 | // for the shuffle. |
| 8355 | for (auto &Src : Sources) { |
| 8356 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); |
| 8357 | if (SrcEltTy == SmallestEltTy) |
| 8358 | continue; |
| 8359 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy); |
| 8360 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: ShuffleVT, Operand: Src.ShuffleVec); |
| 8361 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 8362 | Src.WindowBase *= Src.WindowScale; |
| 8363 | } |
| 8364 | |
| 8365 | // Final check before we try to actually produce a shuffle. |
| 8366 | LLVM_DEBUG({ |
| 8367 | for (auto Src : Sources) |
| 8368 | assert(Src.ShuffleVec.getValueType() == ShuffleVT); |
| 8369 | }); |
| 8370 | |
| 8371 | // The stars all align, our next step is to produce the mask for the shuffle. |
| 8372 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); |
| 8373 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); |
| 8374 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { |
| 8375 | SDValue Entry = Op.getOperand(i); |
| 8376 | if (Entry.isUndef()) |
| 8377 | continue; |
| 8378 | |
| 8379 | auto Src = llvm::find(Range&: Sources, Val: Entry.getOperand(i: 0)); |
| 8380 | int EltNo = cast<ConstantSDNode>(Val: Entry.getOperand(i: 1))->getSExtValue(); |
| 8381 | |
| 8382 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit |
| 8383 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this |
| 8384 | // segment. |
| 8385 | EVT OrigEltTy = Entry.getOperand(i: 0).getValueType().getVectorElementType(); |
| 8386 | int BitsDefined = std::min(a: OrigEltTy.getScalarSizeInBits(), |
| 8387 | b: VT.getScalarSizeInBits()); |
| 8388 | int LanesDefined = BitsDefined / BitsPerShuffleLane; |
| 8389 | |
| 8390 | // This source is expected to fill ResMultiplier lanes of the final shuffle, |
| 8391 | // starting at the appropriate offset. |
| 8392 | int *LaneMask = &Mask[i * ResMultiplier]; |
| 8393 | |
| 8394 | int = EltNo * Src->WindowScale + Src->WindowBase; |
| 8395 | ExtractBase += NumElts * (Src - Sources.begin()); |
| 8396 | for (int j = 0; j < LanesDefined; ++j) |
| 8397 | LaneMask[j] = ExtractBase + j; |
| 8398 | } |
| 8399 | |
| 8400 | |
| 8401 | // We can't handle more than two sources. This should have already |
| 8402 | // been checked before this point. |
| 8403 | assert(Sources.size() <= 2 && "Too many sources!" ); |
| 8404 | |
| 8405 | SDValue ShuffleOps[] = { DAG.getUNDEF(VT: ShuffleVT), DAG.getUNDEF(VT: ShuffleVT) }; |
| 8406 | for (unsigned i = 0; i < Sources.size(); ++i) |
| 8407 | ShuffleOps[i] = Sources[i].ShuffleVec; |
| 8408 | |
| 8409 | SDValue Shuffle = buildLegalVectorShuffle(VT: ShuffleVT, DL: dl, N0: ShuffleOps[0], |
| 8410 | N1: ShuffleOps[1], Mask, DAG); |
| 8411 | if (!Shuffle) |
| 8412 | return SDValue(); |
| 8413 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Shuffle); |
| 8414 | } |
| 8415 | |
| 8416 | enum ShuffleOpCodes { |
| 8417 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> |
| 8418 | OP_VREV, |
| 8419 | OP_VDUP0, |
| 8420 | OP_VDUP1, |
| 8421 | OP_VDUP2, |
| 8422 | OP_VDUP3, |
| 8423 | OP_VEXT1, |
| 8424 | OP_VEXT2, |
| 8425 | OP_VEXT3, |
| 8426 | OP_VUZPL, // VUZP, left result |
| 8427 | OP_VUZPR, // VUZP, right result |
| 8428 | OP_VZIPL, // VZIP, left result |
| 8429 | OP_VZIPR, // VZIP, right result |
| 8430 | OP_VTRNL, // VTRN, left result |
| 8431 | OP_VTRNR // VTRN, right result |
| 8432 | }; |
| 8433 | |
| 8434 | static bool isLegalMVEShuffleOp(unsigned PFEntry) { |
| 8435 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 8436 | switch (OpNum) { |
| 8437 | case OP_COPY: |
| 8438 | case OP_VREV: |
| 8439 | case OP_VDUP0: |
| 8440 | case OP_VDUP1: |
| 8441 | case OP_VDUP2: |
| 8442 | case OP_VDUP3: |
| 8443 | return true; |
| 8444 | } |
| 8445 | return false; |
| 8446 | } |
| 8447 | |
| 8448 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 8449 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
| 8450 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values |
| 8451 | /// are assumed to be legal. |
| 8452 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { |
| 8453 | if (VT.getVectorNumElements() == 4 && |
| 8454 | (VT.is128BitVector() || VT.is64BitVector())) { |
| 8455 | unsigned PFIndexes[4]; |
| 8456 | for (unsigned i = 0; i != 4; ++i) { |
| 8457 | if (M[i] < 0) |
| 8458 | PFIndexes[i] = 8; |
| 8459 | else |
| 8460 | PFIndexes[i] = M[i]; |
| 8461 | } |
| 8462 | |
| 8463 | // Compute the index in the perfect shuffle table. |
| 8464 | unsigned PFTableIndex = |
| 8465 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 8466 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 8467 | unsigned Cost = (PFEntry >> 30); |
| 8468 | |
| 8469 | if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) |
| 8470 | return true; |
| 8471 | } |
| 8472 | |
| 8473 | bool ReverseVEXT, isV_UNDEF; |
| 8474 | unsigned Imm, WhichResult; |
| 8475 | |
| 8476 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8477 | if (EltSize >= 32 || |
| 8478 | ShuffleVectorSDNode::isSplatMask(Mask: M) || |
| 8479 | ShuffleVectorInst::isIdentityMask(Mask: M, NumSrcElts: M.size()) || |
| 8480 | isVREVMask(M, VT, BlockSize: 64) || |
| 8481 | isVREVMask(M, VT, BlockSize: 32) || |
| 8482 | isVREVMask(M, VT, BlockSize: 16)) |
| 8483 | return true; |
| 8484 | else if (Subtarget->hasNEON() && |
| 8485 | (isVEXTMask(M, VT, ReverseVEXT, Imm) || |
| 8486 | isVTBLMask(M, VT) || |
| 8487 | isNEONTwoResultShuffleMask(ShuffleMask: M, VT, WhichResult, isV_UNDEF))) |
| 8488 | return true; |
| 8489 | else if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8490 | isReverseMask(M, VT)) |
| 8491 | return true; |
| 8492 | else if (Subtarget->hasMVEIntegerOps() && |
| 8493 | (isVMOVNMask(M, VT, Top: true, SingleSource: false) || |
| 8494 | isVMOVNMask(M, VT, Top: false, SingleSource: false) || isVMOVNMask(M, VT, Top: true, SingleSource: true))) |
| 8495 | return true; |
| 8496 | else if (Subtarget->hasMVEIntegerOps() && |
| 8497 | (isTruncMask(M, VT, Top: false, SingleSource: false) || |
| 8498 | isTruncMask(M, VT, Top: false, SingleSource: true) || |
| 8499 | isTruncMask(M, VT, Top: true, SingleSource: false) || isTruncMask(M, VT, Top: true, SingleSource: true))) |
| 8500 | return true; |
| 8501 | else |
| 8502 | return false; |
| 8503 | } |
| 8504 | |
| 8505 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit |
| 8506 | /// the specified operations to build the shuffle. |
| 8507 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, |
| 8508 | SDValue RHS, SelectionDAG &DAG, |
| 8509 | const SDLoc &dl) { |
| 8510 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 8511 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 8512 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 8513 | |
| 8514 | if (OpNum == OP_COPY) { |
| 8515 | if (LHSID == (1*9+2)*9+3) return LHS; |
| 8516 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!" ); |
| 8517 | return RHS; |
| 8518 | } |
| 8519 | |
| 8520 | SDValue OpLHS, OpRHS; |
| 8521 | OpLHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); |
| 8522 | OpRHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); |
| 8523 | EVT VT = OpLHS.getValueType(); |
| 8524 | |
| 8525 | switch (OpNum) { |
| 8526 | default: llvm_unreachable("Unknown shuffle opcode!" ); |
| 8527 | case OP_VREV: |
| 8528 | // VREV divides the vector in half and swaps within the half. |
| 8529 | if (VT.getScalarSizeInBits() == 32) |
| 8530 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: OpLHS); |
| 8531 | // vrev <4 x i16> -> VREV32 |
| 8532 | if (VT.getScalarSizeInBits() == 16) |
| 8533 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: OpLHS); |
| 8534 | // vrev <4 x i8> -> VREV16 |
| 8535 | assert(VT.getScalarSizeInBits() == 8); |
| 8536 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: OpLHS); |
| 8537 | case OP_VDUP0: |
| 8538 | case OP_VDUP1: |
| 8539 | case OP_VDUP2: |
| 8540 | case OP_VDUP3: |
| 8541 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 8542 | N1: OpLHS, N2: DAG.getConstant(Val: OpNum-OP_VDUP0, DL: dl, VT: MVT::i32)); |
| 8543 | case OP_VEXT1: |
| 8544 | case OP_VEXT2: |
| 8545 | case OP_VEXT3: |
| 8546 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, |
| 8547 | N1: OpLHS, N2: OpRHS, |
| 8548 | N3: DAG.getConstant(Val: OpNum - OP_VEXT1 + 1, DL: dl, VT: MVT::i32)); |
| 8549 | case OP_VUZPL: |
| 8550 | case OP_VUZPR: |
| 8551 | return DAG.getNode(Opcode: ARMISD::VUZP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8552 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VUZPL); |
| 8553 | case OP_VZIPL: |
| 8554 | case OP_VZIPR: |
| 8555 | return DAG.getNode(Opcode: ARMISD::VZIP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8556 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VZIPL); |
| 8557 | case OP_VTRNL: |
| 8558 | case OP_VTRNR: |
| 8559 | return DAG.getNode(Opcode: ARMISD::VTRN, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8560 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VTRNL); |
| 8561 | } |
| 8562 | } |
| 8563 | |
| 8564 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, |
| 8565 | ArrayRef<int> ShuffleMask, |
| 8566 | SelectionDAG &DAG) { |
| 8567 | // Check to see if we can use the VTBL instruction. |
| 8568 | SDValue V1 = Op.getOperand(i: 0); |
| 8569 | SDValue V2 = Op.getOperand(i: 1); |
| 8570 | SDLoc DL(Op); |
| 8571 | |
| 8572 | SmallVector<SDValue, 8> VTBLMask; |
| 8573 | for (int I : ShuffleMask) |
| 8574 | VTBLMask.push_back(Elt: DAG.getSignedConstant(Val: I, DL, VT: MVT::i32)); |
| 8575 | |
| 8576 | if (V2.getNode()->isUndef()) |
| 8577 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL, VT: MVT::v8i8, N1: V1, |
| 8578 | N2: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
| 8579 | |
| 8580 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL, VT: MVT::v8i8, N1: V1, N2: V2, |
| 8581 | N3: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
| 8582 | } |
| 8583 | |
| 8584 | static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { |
| 8585 | SDLoc DL(Op); |
| 8586 | EVT VT = Op.getValueType(); |
| 8587 | |
| 8588 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8589 | "Expect an v8i16/v16i8 type" ); |
| 8590 | SDValue OpLHS = DAG.getNode(Opcode: ARMISD::VREV64, DL, VT, Operand: Op.getOperand(i: 0)); |
| 8591 | // For a v16i8 type: After the VREV, we have got <7, ..., 0, 15, ..., 8>. Now, |
| 8592 | // extract the first 8 bytes into the top double word and the last 8 bytes |
| 8593 | // into the bottom double word, through a new vector shuffle that will be |
| 8594 | // turned into a VEXT on Neon, or a couple of VMOVDs on MVE. |
| 8595 | std::vector<int> NewMask; |
| 8596 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
| 8597 | NewMask.push_back(x: VT.getVectorNumElements() / 2 + i); |
| 8598 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
| 8599 | NewMask.push_back(x: i); |
| 8600 | return DAG.getVectorShuffle(VT, dl: DL, N1: OpLHS, N2: OpLHS, Mask: NewMask); |
| 8601 | } |
| 8602 | |
| 8603 | static EVT getVectorTyFromPredicateVector(EVT VT) { |
| 8604 | switch (VT.getSimpleVT().SimpleTy) { |
| 8605 | case MVT::v2i1: |
| 8606 | return MVT::v2f64; |
| 8607 | case MVT::v4i1: |
| 8608 | return MVT::v4i32; |
| 8609 | case MVT::v8i1: |
| 8610 | return MVT::v8i16; |
| 8611 | case MVT::v16i1: |
| 8612 | return MVT::v16i8; |
| 8613 | default: |
| 8614 | llvm_unreachable("Unexpected vector predicate type" ); |
| 8615 | } |
| 8616 | } |
| 8617 | |
| 8618 | static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, |
| 8619 | SelectionDAG &DAG) { |
| 8620 | // Converting from boolean predicates to integers involves creating a vector |
| 8621 | // of all ones or all zeroes and selecting the lanes based upon the real |
| 8622 | // predicate. |
| 8623 | SDValue AllOnes = |
| 8624 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), DL: dl, VT: MVT::i32); |
| 8625 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllOnes); |
| 8626 | |
| 8627 | SDValue AllZeroes = |
| 8628 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0x0), DL: dl, VT: MVT::i32); |
| 8629 | AllZeroes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllZeroes); |
| 8630 | |
| 8631 | // Get full vector type from predicate type |
| 8632 | EVT NewVT = getVectorTyFromPredicateVector(VT); |
| 8633 | |
| 8634 | SDValue RecastV1; |
| 8635 | // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast |
| 8636 | // this to a v16i1. This cannot be done with an ordinary bitcast because the |
| 8637 | // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, |
| 8638 | // since we know in hardware the sizes are really the same. |
| 8639 | if (VT != MVT::v16i1) |
| 8640 | RecastV1 = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Pred); |
| 8641 | else |
| 8642 | RecastV1 = Pred; |
| 8643 | |
| 8644 | // Select either all ones or zeroes depending upon the real predicate bits. |
| 8645 | SDValue PredAsVector = |
| 8646 | DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: MVT::v16i8, N1: RecastV1, N2: AllOnes, N3: AllZeroes); |
| 8647 | |
| 8648 | // Recast our new predicate-as-integer v16i8 vector into something |
| 8649 | // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. |
| 8650 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: NewVT, Operand: PredAsVector); |
| 8651 | } |
| 8652 | |
| 8653 | static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, |
| 8654 | const ARMSubtarget *ST) { |
| 8655 | EVT VT = Op.getValueType(); |
| 8656 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
| 8657 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8658 | |
| 8659 | assert(ST->hasMVEIntegerOps() && |
| 8660 | "No support for vector shuffle of boolean predicates" ); |
| 8661 | |
| 8662 | SDValue V1 = Op.getOperand(i: 0); |
| 8663 | SDValue V2 = Op.getOperand(i: 1); |
| 8664 | SDLoc dl(Op); |
| 8665 | if (isReverseMask(M: ShuffleMask, VT)) { |
| 8666 | SDValue cast = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: V1); |
| 8667 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: cast); |
| 8668 | SDValue srl = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: rbit, |
| 8669 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
| 8670 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: srl); |
| 8671 | } |
| 8672 | |
| 8673 | // Until we can come up with optimised cases for every single vector |
| 8674 | // shuffle in existence we have chosen the least painful strategy. This is |
| 8675 | // to essentially promote the boolean predicate to a 8-bit integer, where |
| 8676 | // each predicate represents a byte. Then we fall back on a normal integer |
| 8677 | // vector shuffle and convert the result back into a predicate vector. In |
| 8678 | // many cases the generated code might be even better than scalar code |
| 8679 | // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit |
| 8680 | // fields in a register into 8 other arbitrary 2-bit fields! |
| 8681 | SDValue PredAsVector1 = PromoteMVEPredVector(dl, Pred: V1, VT, DAG); |
| 8682 | EVT NewVT = PredAsVector1.getValueType(); |
| 8683 | SDValue PredAsVector2 = V2.isUndef() ? DAG.getUNDEF(VT: NewVT) |
| 8684 | : PromoteMVEPredVector(dl, Pred: V2, VT, DAG); |
| 8685 | assert(PredAsVector2.getValueType() == NewVT && |
| 8686 | "Expected identical vector type in expanded i1 shuffle!" ); |
| 8687 | |
| 8688 | // Do the shuffle! |
| 8689 | SDValue Shuffled = DAG.getVectorShuffle(VT: NewVT, dl, N1: PredAsVector1, |
| 8690 | N2: PredAsVector2, Mask: ShuffleMask); |
| 8691 | |
| 8692 | // Now return the result of comparing the shuffled vector with zero, |
| 8693 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. For a v2i1 |
| 8694 | // we convert to a v4i1 compare to fill in the two halves of the i64 as i32s. |
| 8695 | if (VT == MVT::v2i1) { |
| 8696 | SDValue BC = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Shuffled); |
| 8697 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: BC, |
| 8698 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8699 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
| 8700 | } |
| 8701 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Shuffled, |
| 8702 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8703 | } |
| 8704 | |
| 8705 | static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, |
| 8706 | ArrayRef<int> ShuffleMask, |
| 8707 | SelectionDAG &DAG) { |
| 8708 | // Attempt to lower the vector shuffle using as many whole register movs as |
| 8709 | // possible. This is useful for types smaller than 32bits, which would |
| 8710 | // often otherwise become a series for grp movs. |
| 8711 | SDLoc dl(Op); |
| 8712 | EVT VT = Op.getValueType(); |
| 8713 | if (VT.getScalarSizeInBits() >= 32) |
| 8714 | return SDValue(); |
| 8715 | |
| 8716 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8717 | "Unexpected vector type" ); |
| 8718 | int NumElts = VT.getVectorNumElements(); |
| 8719 | int QuarterSize = NumElts / 4; |
| 8720 | // The four final parts of the vector, as i32's |
| 8721 | SDValue Parts[4]; |
| 8722 | |
| 8723 | // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not |
| 8724 | // <u,u,u,u>), returning the vmov lane index |
| 8725 | auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { |
| 8726 | // Detect which mov lane this would be from the first non-undef element. |
| 8727 | int MovIdx = -1; |
| 8728 | for (int i = 0; i < Length; i++) { |
| 8729 | if (ShuffleMask[Start + i] >= 0) { |
| 8730 | if (ShuffleMask[Start + i] % Length != i) |
| 8731 | return -1; |
| 8732 | MovIdx = ShuffleMask[Start + i] / Length; |
| 8733 | break; |
| 8734 | } |
| 8735 | } |
| 8736 | // If all items are undef, leave this for other combines |
| 8737 | if (MovIdx == -1) |
| 8738 | return -1; |
| 8739 | // Check the remaining values are the correct part of the same mov |
| 8740 | for (int i = 1; i < Length; i++) { |
| 8741 | if (ShuffleMask[Start + i] >= 0 && |
| 8742 | (ShuffleMask[Start + i] / Length != MovIdx || |
| 8743 | ShuffleMask[Start + i] % Length != i)) |
| 8744 | return -1; |
| 8745 | } |
| 8746 | return MovIdx; |
| 8747 | }; |
| 8748 | |
| 8749 | for (int Part = 0; Part < 4; ++Part) { |
| 8750 | // Does this part look like a mov |
| 8751 | int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); |
| 8752 | if (Elt != -1) { |
| 8753 | SDValue Input = Op->getOperand(Num: 0); |
| 8754 | if (Elt >= 4) { |
| 8755 | Input = Op->getOperand(Num: 1); |
| 8756 | Elt -= 4; |
| 8757 | } |
| 8758 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: Input); |
| 8759 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: BitCast, |
| 8760 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32)); |
| 8761 | } |
| 8762 | } |
| 8763 | |
| 8764 | // Nothing interesting found, just return |
| 8765 | if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) |
| 8766 | return SDValue(); |
| 8767 | |
| 8768 | // The other parts need to be built with the old shuffle vector, cast to a |
| 8769 | // v4i32 and extract_vector_elts |
| 8770 | if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { |
| 8771 | SmallVector<int, 16> NewShuffleMask; |
| 8772 | for (int Part = 0; Part < 4; ++Part) |
| 8773 | for (int i = 0; i < QuarterSize; i++) |
| 8774 | NewShuffleMask.push_back( |
| 8775 | Elt: Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); |
| 8776 | SDValue NewShuffle = DAG.getVectorShuffle( |
| 8777 | VT, dl, N1: Op->getOperand(Num: 0), N2: Op->getOperand(Num: 1), Mask: NewShuffleMask); |
| 8778 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: NewShuffle); |
| 8779 | |
| 8780 | for (int Part = 0; Part < 4; ++Part) |
| 8781 | if (!Parts[Part]) |
| 8782 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, |
| 8783 | N1: BitCast, N2: DAG.getConstant(Val: Part, DL: dl, VT: MVT::i32)); |
| 8784 | } |
| 8785 | // Build a vector out of the various parts and bitcast it back to the original |
| 8786 | // type. |
| 8787 | SDValue NewVec = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: MVT::v4f32, Ops: Parts); |
| 8788 | return DAG.getBitcast(VT, V: NewVec); |
| 8789 | } |
| 8790 | |
| 8791 | static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op, |
| 8792 | ArrayRef<int> ShuffleMask, |
| 8793 | SelectionDAG &DAG) { |
| 8794 | SDValue V1 = Op.getOperand(i: 0); |
| 8795 | SDValue V2 = Op.getOperand(i: 1); |
| 8796 | EVT VT = Op.getValueType(); |
| 8797 | unsigned NumElts = VT.getVectorNumElements(); |
| 8798 | |
| 8799 | // An One-Off Identity mask is one that is mostly an identity mask from as |
| 8800 | // single source but contains a single element out-of-place, either from a |
| 8801 | // different vector or from another position in the same vector. As opposed to |
| 8802 | // lowering this via a ARMISD::BUILD_VECTOR we can generate an extract/insert |
| 8803 | // pair directly. |
| 8804 | auto isOneOffIdentityMask = [](ArrayRef<int> Mask, EVT VT, int BaseOffset, |
| 8805 | int &OffElement) { |
| 8806 | OffElement = -1; |
| 8807 | int NonUndef = 0; |
| 8808 | for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { |
| 8809 | if (Mask[i] == -1) |
| 8810 | continue; |
| 8811 | NonUndef++; |
| 8812 | if (Mask[i] != i + BaseOffset) { |
| 8813 | if (OffElement == -1) |
| 8814 | OffElement = i; |
| 8815 | else |
| 8816 | return false; |
| 8817 | } |
| 8818 | } |
| 8819 | return NonUndef > 2 && OffElement != -1; |
| 8820 | }; |
| 8821 | int OffElement; |
| 8822 | SDValue VInput; |
| 8823 | if (isOneOffIdentityMask(ShuffleMask, VT, 0, OffElement)) |
| 8824 | VInput = V1; |
| 8825 | else if (isOneOffIdentityMask(ShuffleMask, VT, NumElts, OffElement)) |
| 8826 | VInput = V2; |
| 8827 | else |
| 8828 | return SDValue(); |
| 8829 | |
| 8830 | SDLoc dl(Op); |
| 8831 | EVT SVT = VT.getScalarType() == MVT::i8 || VT.getScalarType() == MVT::i16 |
| 8832 | ? MVT::i32 |
| 8833 | : VT.getScalarType(); |
| 8834 | SDValue Elt = DAG.getNode( |
| 8835 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: SVT, |
| 8836 | N1: ShuffleMask[OffElement] < (int)NumElts ? V1 : V2, |
| 8837 | N2: DAG.getVectorIdxConstant(Val: ShuffleMask[OffElement] % NumElts, DL: dl)); |
| 8838 | return DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: VInput, N2: Elt, |
| 8839 | N3: DAG.getVectorIdxConstant(Val: OffElement % NumElts, DL: dl)); |
| 8840 | } |
| 8841 | |
| 8842 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, |
| 8843 | const ARMSubtarget *ST) { |
| 8844 | SDValue V1 = Op.getOperand(i: 0); |
| 8845 | SDValue V2 = Op.getOperand(i: 1); |
| 8846 | SDLoc dl(Op); |
| 8847 | EVT VT = Op.getValueType(); |
| 8848 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
| 8849 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8850 | |
| 8851 | if (ST->hasMVEIntegerOps() && EltSize == 1) |
| 8852 | return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); |
| 8853 | |
| 8854 | // Convert shuffles that are directly supported on NEON to target-specific |
| 8855 | // DAG nodes, instead of keeping them as shuffles and matching them again |
| 8856 | // during code selection. This is more efficient and avoids the possibility |
| 8857 | // of inconsistencies between legalization and selection. |
| 8858 | // FIXME: floating-point vectors should be canonicalized to integer vectors |
| 8859 | // of the same time so that they get CSEd properly. |
| 8860 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8861 | |
| 8862 | if (EltSize <= 32) { |
| 8863 | if (SVN->isSplat()) { |
| 8864 | int Lane = SVN->getSplatIndex(); |
| 8865 | // If this is undef splat, generate it via "just" vdup, if possible. |
| 8866 | if (Lane == -1) Lane = 0; |
| 8867 | |
| 8868 | // Test if V1 is a SCALAR_TO_VECTOR. |
| 8869 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { |
| 8870 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
| 8871 | } |
| 8872 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR |
| 8873 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization |
| 8874 | // reaches it). |
| 8875 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && |
| 8876 | !isa<ConstantSDNode>(Val: V1.getOperand(i: 0))) { |
| 8877 | bool IsScalarToVector = true; |
| 8878 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) |
| 8879 | if (!V1.getOperand(i).isUndef()) { |
| 8880 | IsScalarToVector = false; |
| 8881 | break; |
| 8882 | } |
| 8883 | if (IsScalarToVector) |
| 8884 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
| 8885 | } |
| 8886 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, N1: V1, |
| 8887 | N2: DAG.getConstant(Val: Lane, DL: dl, VT: MVT::i32)); |
| 8888 | } |
| 8889 | |
| 8890 | bool ReverseVEXT = false; |
| 8891 | unsigned Imm = 0; |
| 8892 | if (ST->hasNEON() && isVEXTMask(M: ShuffleMask, VT, ReverseVEXT, Imm)) { |
| 8893 | if (ReverseVEXT) |
| 8894 | std::swap(a&: V1, b&: V2); |
| 8895 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V2, |
| 8896 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
| 8897 | } |
| 8898 | |
| 8899 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 64)) |
| 8900 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: V1); |
| 8901 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 32)) |
| 8902 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: V1); |
| 8903 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 16)) |
| 8904 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: V1); |
| 8905 | |
| 8906 | if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(M: ShuffleMask, VT, Imm)) { |
| 8907 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V1, |
| 8908 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
| 8909 | } |
| 8910 | |
| 8911 | // Check for Neon shuffles that modify both input vectors in place. |
| 8912 | // If both results are used, i.e., if there are two shuffles with the same |
| 8913 | // source operands and with masks corresponding to both results of one of |
| 8914 | // these operations, DAG memoization will ensure that a single node is |
| 8915 | // used for both shuffles. |
| 8916 | unsigned WhichResult = 0; |
| 8917 | bool isV_UNDEF = false; |
| 8918 | if (ST->hasNEON()) { |
| 8919 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8920 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { |
| 8921 | if (isV_UNDEF) |
| 8922 | V2 = V1; |
| 8923 | return DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), N1: V1, N2: V2) |
| 8924 | .getValue(R: WhichResult); |
| 8925 | } |
| 8926 | } |
| 8927 | if (ST->hasMVEIntegerOps()) { |
| 8928 | if (isVMOVNMask(M: ShuffleMask, VT, Top: false, SingleSource: false)) |
| 8929 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V2, N2: V1, |
| 8930 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 8931 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: false)) |
| 8932 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V2, |
| 8933 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 8934 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: true)) |
| 8935 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V1, |
| 8936 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 8937 | } |
| 8938 | |
| 8939 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize |
| 8940 | // shuffles that produce a result larger than their operands with: |
| 8941 | // shuffle(concat(v1, undef), concat(v2, undef)) |
| 8942 | // -> |
| 8943 | // shuffle(concat(v1, v2), undef) |
| 8944 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). |
| 8945 | // |
| 8946 | // This is useful in the general case, but there are special cases where |
| 8947 | // native shuffles produce larger results: the two-result ops. |
| 8948 | // |
| 8949 | // Look through the concat when lowering them: |
| 8950 | // shuffle(concat(v1, v2), undef) |
| 8951 | // -> |
| 8952 | // concat(VZIP(v1, v2):0, :1) |
| 8953 | // |
| 8954 | if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { |
| 8955 | SDValue SubV1 = V1->getOperand(Num: 0); |
| 8956 | SDValue SubV2 = V1->getOperand(Num: 1); |
| 8957 | EVT SubVT = SubV1.getValueType(); |
| 8958 | |
| 8959 | // We expect these to have been canonicalized to -1. |
| 8960 | assert(llvm::all_of(ShuffleMask, [&](int i) { |
| 8961 | return i < (int)VT.getVectorNumElements(); |
| 8962 | }) && "Unexpected shuffle index into UNDEF operand!" ); |
| 8963 | |
| 8964 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8965 | ShuffleMask, VT: SubVT, WhichResult, isV_UNDEF)) { |
| 8966 | if (isV_UNDEF) |
| 8967 | SubV2 = SubV1; |
| 8968 | assert((WhichResult == 0) && |
| 8969 | "In-place shuffle of concat can only have one result!" ); |
| 8970 | SDValue Res = DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: SubVT, VT2: SubVT), |
| 8971 | N1: SubV1, N2: SubV2); |
| 8972 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Res.getValue(R: 0), |
| 8973 | N2: Res.getValue(R: 1)); |
| 8974 | } |
| 8975 | } |
| 8976 | } |
| 8977 | |
| 8978 | if (ST->hasMVEIntegerOps() && EltSize <= 32) { |
| 8979 | if (SDValue V = LowerVECTOR_SHUFFLEUsingOneOff(Op, ShuffleMask, DAG)) |
| 8980 | return V; |
| 8981 | |
| 8982 | for (bool Top : {false, true}) { |
| 8983 | for (bool SingleSource : {false, true}) { |
| 8984 | if (isTruncMask(M: ShuffleMask, VT, Top, SingleSource)) { |
| 8985 | MVT FromSVT = MVT::getIntegerVT(BitWidth: EltSize * 2); |
| 8986 | MVT FromVT = MVT::getVectorVT(VT: FromSVT, NumElements: ShuffleMask.size() / 2); |
| 8987 | SDValue Lo = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, Operand: V1); |
| 8988 | SDValue Hi = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, |
| 8989 | Operand: SingleSource ? V1 : V2); |
| 8990 | if (Top) { |
| 8991 | SDValue Amt = DAG.getConstant(Val: EltSize, DL: dl, VT: FromVT); |
| 8992 | Lo = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Lo, N2: Amt); |
| 8993 | Hi = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Hi, N2: Amt); |
| 8994 | } |
| 8995 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT, N1: Lo, N2: Hi); |
| 8996 | } |
| 8997 | } |
| 8998 | } |
| 8999 | } |
| 9000 | |
| 9001 | // If the shuffle is not directly supported and it has 4 elements, use |
| 9002 | // the PerfectShuffle-generated table to synthesize it from other shuffles. |
| 9003 | unsigned NumElts = VT.getVectorNumElements(); |
| 9004 | if (NumElts == 4) { |
| 9005 | unsigned PFIndexes[4]; |
| 9006 | for (unsigned i = 0; i != 4; ++i) { |
| 9007 | if (ShuffleMask[i] < 0) |
| 9008 | PFIndexes[i] = 8; |
| 9009 | else |
| 9010 | PFIndexes[i] = ShuffleMask[i]; |
| 9011 | } |
| 9012 | |
| 9013 | // Compute the index in the perfect shuffle table. |
| 9014 | unsigned PFTableIndex = |
| 9015 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 9016 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 9017 | unsigned Cost = (PFEntry >> 30); |
| 9018 | |
| 9019 | if (Cost <= 4) { |
| 9020 | if (ST->hasNEON()) |
| 9021 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
| 9022 | else if (isLegalMVEShuffleOp(PFEntry)) { |
| 9023 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 9024 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 9025 | unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; |
| 9026 | unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; |
| 9027 | if (isLegalMVEShuffleOp(PFEntry: PFEntryLHS) && isLegalMVEShuffleOp(PFEntry: PFEntryRHS)) |
| 9028 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
| 9029 | } |
| 9030 | } |
| 9031 | } |
| 9032 | |
| 9033 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. |
| 9034 | if (EltSize >= 32) { |
| 9035 | // Do the expansion with floating-point types, since that is what the VFP |
| 9036 | // registers are defined to use, and since i64 is not legal. |
| 9037 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
| 9038 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
| 9039 | V1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V1); |
| 9040 | V2 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V2); |
| 9041 | SmallVector<SDValue, 8> Ops; |
| 9042 | for (unsigned i = 0; i < NumElts; ++i) { |
| 9043 | if (ShuffleMask[i] < 0) |
| 9044 | Ops.push_back(Elt: DAG.getUNDEF(VT: EltVT)); |
| 9045 | else |
| 9046 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, |
| 9047 | N1: ShuffleMask[i] < (int)NumElts ? V1 : V2, |
| 9048 | N2: DAG.getConstant(Val: ShuffleMask[i] & (NumElts-1), |
| 9049 | DL: dl, VT: MVT::i32))); |
| 9050 | } |
| 9051 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
| 9052 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 9053 | } |
| 9054 | |
| 9055 | if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 9056 | isReverseMask(M: ShuffleMask, VT)) |
| 9057 | return LowerReverse_VECTOR_SHUFFLE(Op, DAG); |
| 9058 | |
| 9059 | if (ST->hasNEON() && VT == MVT::v8i8) |
| 9060 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) |
| 9061 | return NewOp; |
| 9062 | |
| 9063 | if (ST->hasMVEIntegerOps()) |
| 9064 | if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) |
| 9065 | return NewOp; |
| 9066 | |
| 9067 | return SDValue(); |
| 9068 | } |
| 9069 | |
| 9070 | static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, |
| 9071 | const ARMSubtarget *ST) { |
| 9072 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
| 9073 | SDLoc dl(Op); |
| 9074 | |
| 9075 | assert(ST->hasMVEIntegerOps() && |
| 9076 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 9077 | |
| 9078 | SDValue Conv = |
| 9079 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
| 9080 | unsigned Lane = Op.getConstantOperandVal(i: 2); |
| 9081 | unsigned LaneWidth = |
| 9082 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
| 9083 | unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; |
| 9084 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, |
| 9085 | N1: Op.getOperand(i: 1), N2: DAG.getValueType(MVT::i1)); |
| 9086 | SDValue BFI = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT: MVT::i32, N1: Conv, N2: Ext, |
| 9087 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT: MVT::i32)); |
| 9088 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: BFI); |
| 9089 | } |
| 9090 | |
| 9091 | SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, |
| 9092 | SelectionDAG &DAG) const { |
| 9093 | // INSERT_VECTOR_ELT is legal only for immediate indexes. |
| 9094 | SDValue Lane = Op.getOperand(i: 2); |
| 9095 | if (!isa<ConstantSDNode>(Val: Lane)) |
| 9096 | return SDValue(); |
| 9097 | |
| 9098 | SDValue Elt = Op.getOperand(i: 1); |
| 9099 | EVT EltVT = Elt.getValueType(); |
| 9100 | |
| 9101 | if (Subtarget->hasMVEIntegerOps() && |
| 9102 | Op.getValueType().getScalarSizeInBits() == 1) |
| 9103 | return LowerINSERT_VECTOR_ELT_i1(Op, DAG, ST: Subtarget); |
| 9104 | |
| 9105 | if (getTypeAction(Context&: *DAG.getContext(), VT: EltVT) == |
| 9106 | TargetLowering::TypeSoftPromoteHalf) { |
| 9107 | // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, |
| 9108 | // but the type system will try to do that if we don't intervene. |
| 9109 | // Reinterpret any such vector-element insertion as one with the |
| 9110 | // corresponding integer types. |
| 9111 | |
| 9112 | SDLoc dl(Op); |
| 9113 | |
| 9114 | EVT IEltVT = MVT::getIntegerVT(BitWidth: EltVT.getScalarSizeInBits()); |
| 9115 | assert(getTypeAction(*DAG.getContext(), IEltVT) != |
| 9116 | TargetLowering::TypeSoftPromoteHalf); |
| 9117 | |
| 9118 | SDValue VecIn = Op.getOperand(i: 0); |
| 9119 | EVT VecVT = VecIn.getValueType(); |
| 9120 | EVT IVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IEltVT, |
| 9121 | NumElements: VecVT.getVectorNumElements()); |
| 9122 | |
| 9123 | SDValue IElt = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IEltVT, Operand: Elt); |
| 9124 | SDValue IVecIn = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVecVT, Operand: VecIn); |
| 9125 | SDValue IVecOut = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: IVecVT, |
| 9126 | N1: IVecIn, N2: IElt, N3: Lane); |
| 9127 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: IVecOut); |
| 9128 | } |
| 9129 | |
| 9130 | return Op; |
| 9131 | } |
| 9132 | |
| 9133 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 9134 | const ARMSubtarget *ST) { |
| 9135 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
| 9136 | SDLoc dl(Op); |
| 9137 | |
| 9138 | assert(ST->hasMVEIntegerOps() && |
| 9139 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 9140 | |
| 9141 | SDValue Conv = |
| 9142 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
| 9143 | unsigned Lane = Op.getConstantOperandVal(i: 1); |
| 9144 | unsigned LaneWidth = |
| 9145 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
| 9146 | SDValue Shift = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: Conv, |
| 9147 | N2: DAG.getConstant(Val: Lane * LaneWidth, DL: dl, VT: MVT::i32)); |
| 9148 | return Shift; |
| 9149 | } |
| 9150 | |
| 9151 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 9152 | const ARMSubtarget *ST) { |
| 9153 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. |
| 9154 | SDValue Lane = Op.getOperand(i: 1); |
| 9155 | if (!isa<ConstantSDNode>(Val: Lane)) |
| 9156 | return SDValue(); |
| 9157 | |
| 9158 | SDValue Vec = Op.getOperand(i: 0); |
| 9159 | EVT VT = Vec.getValueType(); |
| 9160 | |
| 9161 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 9162 | return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); |
| 9163 | |
| 9164 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { |
| 9165 | SDLoc dl(Op); |
| 9166 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: dl, VT: MVT::i32, N1: Vec, N2: Lane); |
| 9167 | } |
| 9168 | |
| 9169 | return Op; |
| 9170 | } |
| 9171 | |
| 9172 | static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, |
| 9173 | const ARMSubtarget *ST) { |
| 9174 | SDLoc dl(Op); |
| 9175 | assert(Op.getValueType().getScalarSizeInBits() == 1 && |
| 9176 | "Unexpected custom CONCAT_VECTORS lowering" ); |
| 9177 | assert(isPowerOf2_32(Op.getNumOperands()) && |
| 9178 | "Unexpected custom CONCAT_VECTORS lowering" ); |
| 9179 | assert(ST->hasMVEIntegerOps() && |
| 9180 | "CONCAT_VECTORS lowering only supported for MVE" ); |
| 9181 | |
| 9182 | auto ConcatPair = [&](SDValue V1, SDValue V2) { |
| 9183 | EVT Op1VT = V1.getValueType(); |
| 9184 | EVT Op2VT = V2.getValueType(); |
| 9185 | assert(Op1VT == Op2VT && "Operand types don't match!" ); |
| 9186 | assert((Op1VT == MVT::v2i1 || Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) && |
| 9187 | "Unexpected i1 concat operations!" ); |
| 9188 | EVT VT = Op1VT.getDoubleNumVectorElementsVT(Context&: *DAG.getContext()); |
| 9189 | |
| 9190 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
| 9191 | SDValue NewV2 = PromoteMVEPredVector(dl, Pred: V2, VT: Op2VT, DAG); |
| 9192 | |
| 9193 | // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets |
| 9194 | // promoted to v8i16, etc. |
| 9195 | MVT ElType = |
| 9196 | getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 9197 | unsigned NumElts = 2 * Op1VT.getVectorNumElements(); |
| 9198 | |
| 9199 | EVT ConcatVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
| 9200 | if (Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) { |
| 9201 | // Use MVETRUNC to truncate the combined NewV1::NewV2 into the smaller |
| 9202 | // ConcatVT. |
| 9203 | SDValue ConVec = |
| 9204 | DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT: ConcatVT, N1: NewV1, N2: NewV2); |
| 9205 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
| 9206 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 9207 | } |
| 9208 | |
| 9209 | // Extract the vector elements from Op1 and Op2 one by one and truncate them |
| 9210 | // to be the right size for the destination. For example, if Op1 is v4i1 |
| 9211 | // then the promoted vector is v4i32. The result of concatenation gives a |
| 9212 | // v8i1, which when promoted is v8i16. That means each i32 element from Op1 |
| 9213 | // needs truncating to i16 and inserting in the result. |
| 9214 | auto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { |
| 9215 | EVT NewVT = NewV.getValueType(); |
| 9216 | EVT ConcatVT = ConVec.getValueType(); |
| 9217 | unsigned ExtScale = 1; |
| 9218 | if (NewVT == MVT::v2f64) { |
| 9219 | NewV = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: NewV); |
| 9220 | ExtScale = 2; |
| 9221 | } |
| 9222 | for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { |
| 9223 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV, |
| 9224 | N2: DAG.getIntPtrConstant(Val: i * ExtScale, DL: dl)); |
| 9225 | ConVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: ConcatVT, N1: ConVec, N2: Elt, |
| 9226 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 9227 | } |
| 9228 | return ConVec; |
| 9229 | }; |
| 9230 | unsigned j = 0; |
| 9231 | SDValue ConVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: ConcatVT); |
| 9232 | ConVec = ExtractInto(NewV1, ConVec, j); |
| 9233 | ConVec = ExtractInto(NewV2, ConVec, j); |
| 9234 | |
| 9235 | // Now return the result of comparing the subvector with zero, which will |
| 9236 | // generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 9237 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
| 9238 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 9239 | }; |
| 9240 | |
| 9241 | // Concat each pair of subvectors and pack into the lower half of the array. |
| 9242 | SmallVector<SDValue> ConcatOps(Op->ops()); |
| 9243 | while (ConcatOps.size() > 1) { |
| 9244 | for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) { |
| 9245 | SDValue V1 = ConcatOps[I]; |
| 9246 | SDValue V2 = ConcatOps[I + 1]; |
| 9247 | ConcatOps[I / 2] = ConcatPair(V1, V2); |
| 9248 | } |
| 9249 | ConcatOps.resize(N: ConcatOps.size() / 2); |
| 9250 | } |
| 9251 | return ConcatOps[0]; |
| 9252 | } |
| 9253 | |
| 9254 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, |
| 9255 | const ARMSubtarget *ST) { |
| 9256 | EVT VT = Op->getValueType(ResNo: 0); |
| 9257 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 9258 | return LowerCONCAT_VECTORS_i1(Op, DAG, ST); |
| 9259 | |
| 9260 | // The only time a CONCAT_VECTORS operation can have legal types is when |
| 9261 | // two 64-bit vectors are concatenated to a 128-bit vector. |
| 9262 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && |
| 9263 | "unexpected CONCAT_VECTORS" ); |
| 9264 | SDLoc dl(Op); |
| 9265 | SDValue Val = DAG.getUNDEF(VT: MVT::v2f64); |
| 9266 | SDValue Op0 = Op.getOperand(i: 0); |
| 9267 | SDValue Op1 = Op.getOperand(i: 1); |
| 9268 | if (!Op0.isUndef()) |
| 9269 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
| 9270 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op0), |
| 9271 | N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9272 | if (!Op1.isUndef()) |
| 9273 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
| 9274 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op1), |
| 9275 | N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
| 9276 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: Op.getValueType(), Operand: Val); |
| 9277 | } |
| 9278 | |
| 9279 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 9280 | const ARMSubtarget *ST) { |
| 9281 | SDValue V1 = Op.getOperand(i: 0); |
| 9282 | SDValue V2 = Op.getOperand(i: 1); |
| 9283 | SDLoc dl(Op); |
| 9284 | EVT VT = Op.getValueType(); |
| 9285 | EVT Op1VT = V1.getValueType(); |
| 9286 | unsigned NumElts = VT.getVectorNumElements(); |
| 9287 | unsigned Index = V2->getAsZExtVal(); |
| 9288 | |
| 9289 | assert(VT.getScalarSizeInBits() == 1 && |
| 9290 | "Unexpected custom EXTRACT_SUBVECTOR lowering" ); |
| 9291 | assert(ST->hasMVEIntegerOps() && |
| 9292 | "EXTRACT_SUBVECTOR lowering only supported for MVE" ); |
| 9293 | |
| 9294 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
| 9295 | |
| 9296 | // We now have Op1 promoted to a vector of integers, where v8i1 gets |
| 9297 | // promoted to v8i16, etc. |
| 9298 | |
| 9299 | MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 9300 | |
| 9301 | if (NumElts == 2) { |
| 9302 | EVT SubVT = MVT::v4i32; |
| 9303 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
| 9304 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j += 2) { |
| 9305 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
| 9306 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
| 9307 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 9308 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 9309 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 9310 | N3: DAG.getConstant(Val: j + 1, DL: dl, VT: MVT::i32)); |
| 9311 | } |
| 9312 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: SubVec, |
| 9313 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 9314 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
| 9315 | } |
| 9316 | |
| 9317 | EVT SubVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
| 9318 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
| 9319 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { |
| 9320 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
| 9321 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
| 9322 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 9323 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 9324 | } |
| 9325 | |
| 9326 | // Now return the result of comparing the subvector with zero, |
| 9327 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 9328 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: SubVec, |
| 9329 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 9330 | } |
| 9331 | |
| 9332 | // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0). |
| 9333 | static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG, |
| 9334 | const ARMSubtarget *ST) { |
| 9335 | assert(ST->hasMVEIntegerOps() && "Expected MVE!" ); |
| 9336 | EVT VT = N->getValueType(ResNo: 0); |
| 9337 | assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && |
| 9338 | "Expected a vector i1 type!" ); |
| 9339 | SDValue Op = N->getOperand(Num: 0); |
| 9340 | EVT FromVT = Op.getValueType(); |
| 9341 | SDLoc DL(N); |
| 9342 | |
| 9343 | SDValue And = |
| 9344 | DAG.getNode(Opcode: ISD::AND, DL, VT: FromVT, N1: Op, N2: DAG.getConstant(Val: 1, DL, VT: FromVT)); |
| 9345 | return DAG.getNode(Opcode: ISD::SETCC, DL, VT, N1: And, N2: DAG.getConstant(Val: 0, DL, VT: FromVT), |
| 9346 | N3: DAG.getCondCode(Cond: ISD::SETNE)); |
| 9347 | } |
| 9348 | |
| 9349 | static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG, |
| 9350 | const ARMSubtarget *Subtarget) { |
| 9351 | if (!Subtarget->hasMVEIntegerOps()) |
| 9352 | return SDValue(); |
| 9353 | |
| 9354 | EVT ToVT = N->getValueType(ResNo: 0); |
| 9355 | if (ToVT.getScalarType() == MVT::i1) |
| 9356 | return LowerTruncatei1(N, DAG, ST: Subtarget); |
| 9357 | |
| 9358 | // MVE does not have a single instruction to perform the truncation of a v4i32 |
| 9359 | // into the lower half of a v8i16, in the same way that a NEON vmovn would. |
| 9360 | // Most of the instructions in MVE follow the 'Beats' system, where moving |
| 9361 | // values from different lanes is usually something that the instructions |
| 9362 | // avoid. |
| 9363 | // |
| 9364 | // Instead it has top/bottom instructions such as VMOVLT/B and VMOVNT/B, |
| 9365 | // which take a the top/bottom half of a larger lane and extend it (or do the |
| 9366 | // opposite, truncating into the top/bottom lane from a larger lane). Note |
| 9367 | // that because of the way we widen lanes, a v4i16 is really a v4i32 using the |
| 9368 | // bottom 16bits from each vector lane. This works really well with T/B |
| 9369 | // instructions, but that doesn't extend to v8i32->v8i16 where the lanes need |
| 9370 | // to move order. |
| 9371 | // |
| 9372 | // But truncates and sext/zext are always going to be fairly common from llvm. |
| 9373 | // We have several options for how to deal with them: |
| 9374 | // - Wherever possible combine them into an instruction that makes them |
| 9375 | // "free". This includes loads/stores, which can perform the trunc as part |
| 9376 | // of the memory operation. Or certain shuffles that can be turned into |
| 9377 | // VMOVN/VMOVL. |
| 9378 | // - Lane Interleaving to transform blocks surrounded by ext/trunc. So |
| 9379 | // trunc(mul(sext(a), sext(b))) may become |
| 9380 | // VMOVNT(VMUL(VMOVLB(a), VMOVLB(b)), VMUL(VMOVLT(a), VMOVLT(b))). (Which in |
| 9381 | // this case can use VMULL). This is performed in the |
| 9382 | // MVELaneInterleavingPass. |
| 9383 | // - Otherwise we have an option. By default we would expand the |
| 9384 | // zext/sext/trunc into a series of lane extract/inserts going via GPR |
| 9385 | // registers. One for each vector lane in the vector. This can obviously be |
| 9386 | // very expensive. |
| 9387 | // - The other option is to use the fact that loads/store can extend/truncate |
| 9388 | // to turn a trunc into two truncating stack stores and a stack reload. This |
| 9389 | // becomes 3 back-to-back memory operations, but at least that is less than |
| 9390 | // all the insert/extracts. |
| 9391 | // |
| 9392 | // In order to do the last, we convert certain trunc's into MVETRUNC, which |
| 9393 | // are either optimized where they can be, or eventually lowered into stack |
| 9394 | // stores/loads. This prevents us from splitting a v8i16 trunc into two stores |
| 9395 | // two early, where other instructions would be better, and stops us from |
| 9396 | // having to reconstruct multiple buildvector shuffles into loads/stores. |
| 9397 | if (ToVT != MVT::v8i16 && ToVT != MVT::v16i8) |
| 9398 | return SDValue(); |
| 9399 | EVT FromVT = N->getOperand(Num: 0).getValueType(); |
| 9400 | if (FromVT != MVT::v8i32 && FromVT != MVT::v16i16) |
| 9401 | return SDValue(); |
| 9402 | |
| 9403 | SDValue Lo, Hi; |
| 9404 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVectorOperand(N, OpNo: 0); |
| 9405 | SDLoc DL(N); |
| 9406 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT: ToVT, N1: Lo, N2: Hi); |
| 9407 | } |
| 9408 | |
| 9409 | static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG, |
| 9410 | const ARMSubtarget *Subtarget) { |
| 9411 | if (!Subtarget->hasMVEIntegerOps()) |
| 9412 | return SDValue(); |
| 9413 | |
| 9414 | // See LowerTruncate above for an explanation of MVEEXT/MVETRUNC. |
| 9415 | |
| 9416 | EVT ToVT = N->getValueType(ResNo: 0); |
| 9417 | if (ToVT != MVT::v16i32 && ToVT != MVT::v8i32 && ToVT != MVT::v16i16) |
| 9418 | return SDValue(); |
| 9419 | SDValue Op = N->getOperand(Num: 0); |
| 9420 | EVT FromVT = Op.getValueType(); |
| 9421 | if (FromVT != MVT::v8i16 && FromVT != MVT::v16i8) |
| 9422 | return SDValue(); |
| 9423 | |
| 9424 | SDLoc DL(N); |
| 9425 | EVT ExtVT = ToVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 9426 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) |
| 9427 | ExtVT = MVT::v8i16; |
| 9428 | |
| 9429 | unsigned Opcode = |
| 9430 | N->getOpcode() == ISD::SIGN_EXTEND ? ARMISD::MVESEXT : ARMISD::MVEZEXT; |
| 9431 | SDValue Ext = DAG.getNode(Opcode, DL, VTList: DAG.getVTList(VT1: ExtVT, VT2: ExtVT), N: Op); |
| 9432 | SDValue Ext1 = Ext.getValue(R: 1); |
| 9433 | |
| 9434 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) { |
| 9435 | Ext = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext); |
| 9436 | Ext1 = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext1); |
| 9437 | } |
| 9438 | |
| 9439 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, N1: Ext, N2: Ext1); |
| 9440 | } |
| 9441 | |
| 9442 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each |
| 9443 | /// element has been zero/sign-extended, depending on the isSigned parameter, |
| 9444 | /// from an integer type half its size. |
| 9445 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, |
| 9446 | bool isSigned) { |
| 9447 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. |
| 9448 | EVT VT = N->getValueType(ResNo: 0); |
| 9449 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { |
| 9450 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
| 9451 | if (BVN->getValueType(ResNo: 0) != MVT::v4i32 || |
| 9452 | BVN->getOpcode() != ISD::BUILD_VECTOR) |
| 9453 | return false; |
| 9454 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 9455 | unsigned HiElt = 1 - LoElt; |
| 9456 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt)); |
| 9457 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt)); |
| 9458 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt+2)); |
| 9459 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt+2)); |
| 9460 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) |
| 9461 | return false; |
| 9462 | if (isSigned) { |
| 9463 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && |
| 9464 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) |
| 9465 | return true; |
| 9466 | } else { |
| 9467 | if (Hi0->isZero() && Hi1->isZero()) |
| 9468 | return true; |
| 9469 | } |
| 9470 | return false; |
| 9471 | } |
| 9472 | |
| 9473 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
| 9474 | return false; |
| 9475 | |
| 9476 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 9477 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
| 9478 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Elt)) { |
| 9479 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 9480 | unsigned HalfSize = EltSize / 2; |
| 9481 | if (isSigned) { |
| 9482 | if (!isIntN(N: HalfSize, x: C->getSExtValue())) |
| 9483 | return false; |
| 9484 | } else { |
| 9485 | if (!isUIntN(N: HalfSize, x: C->getZExtValue())) |
| 9486 | return false; |
| 9487 | } |
| 9488 | continue; |
| 9489 | } |
| 9490 | return false; |
| 9491 | } |
| 9492 | |
| 9493 | return true; |
| 9494 | } |
| 9495 | |
| 9496 | /// isSignExtended - Check if a node is a vector value that is sign-extended |
| 9497 | /// or a constant BUILD_VECTOR with sign-extended elements. |
| 9498 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { |
| 9499 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) |
| 9500 | return true; |
| 9501 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: true)) |
| 9502 | return true; |
| 9503 | return false; |
| 9504 | } |
| 9505 | |
| 9506 | /// isZeroExtended - Check if a node is a vector value that is zero-extended (or |
| 9507 | /// any-extended) or a constant BUILD_VECTOR with zero-extended elements. |
| 9508 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { |
| 9509 | if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || |
| 9510 | ISD::isZEXTLoad(N)) |
| 9511 | return true; |
| 9512 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: false)) |
| 9513 | return true; |
| 9514 | return false; |
| 9515 | } |
| 9516 | |
| 9517 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { |
| 9518 | if (OrigVT.getSizeInBits() >= 64) |
| 9519 | return OrigVT; |
| 9520 | |
| 9521 | assert(OrigVT.isSimple() && "Expecting a simple value type" ); |
| 9522 | |
| 9523 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; |
| 9524 | switch (OrigSimpleTy) { |
| 9525 | default: llvm_unreachable("Unexpected Vector Type" ); |
| 9526 | case MVT::v2i8: |
| 9527 | case MVT::v2i16: |
| 9528 | return MVT::v2i32; |
| 9529 | case MVT::v4i8: |
| 9530 | return MVT::v4i16; |
| 9531 | } |
| 9532 | } |
| 9533 | |
| 9534 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total |
| 9535 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. |
| 9536 | /// We insert the required extension here to get the vector to fill a D register. |
| 9537 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, |
| 9538 | const EVT &OrigTy, |
| 9539 | const EVT &ExtTy, |
| 9540 | unsigned ExtOpcode) { |
| 9541 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. |
| 9542 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than |
| 9543 | // 64-bits we need to insert a new extension so that it will be 64-bits. |
| 9544 | assert(ExtTy.is128BitVector() && "Unexpected extension size" ); |
| 9545 | if (OrigTy.getSizeInBits() >= 64) |
| 9546 | return N; |
| 9547 | |
| 9548 | // Must extend size to at least 64 bits to be used as an operand for VMULL. |
| 9549 | EVT NewVT = getExtensionTo64Bits(OrigVT: OrigTy); |
| 9550 | |
| 9551 | return DAG.getNode(Opcode: ExtOpcode, DL: SDLoc(N), VT: NewVT, Operand: N); |
| 9552 | } |
| 9553 | |
| 9554 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that |
| 9555 | /// does not do any sign/zero extension. If the original vector is less |
| 9556 | /// than 64 bits, an appropriate extension will be added after the load to |
| 9557 | /// reach a total size of 64 bits. We have to add the extension separately |
| 9558 | /// because ARM does not have a sign/zero extending load for vectors. |
| 9559 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { |
| 9560 | EVT ExtendedTy = getExtensionTo64Bits(OrigVT: LD->getMemoryVT()); |
| 9561 | |
| 9562 | // The load already has the right type. |
| 9563 | if (ExtendedTy == LD->getMemoryVT()) |
| 9564 | return DAG.getLoad(VT: LD->getMemoryVT(), dl: SDLoc(LD), Chain: LD->getChain(), |
| 9565 | Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), Alignment: LD->getAlign(), |
| 9566 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 9567 | |
| 9568 | // We need to create a zextload/sextload. We cannot just create a load |
| 9569 | // followed by a zext/zext node because LowerMUL is also run during normal |
| 9570 | // operation legalization where we can't create illegal types. |
| 9571 | return DAG.getExtLoad(ExtType: LD->getExtensionType(), dl: SDLoc(LD), VT: ExtendedTy, |
| 9572 | Chain: LD->getChain(), Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), |
| 9573 | MemVT: LD->getMemoryVT(), Alignment: LD->getAlign(), |
| 9574 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 9575 | } |
| 9576 | |
| 9577 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, |
| 9578 | /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return |
| 9579 | /// the unextended value. The unextended vector should be 64 bits so that it can |
| 9580 | /// be used as an operand to a VMULL instruction. If the original vector size |
| 9581 | /// before extension is less than 64 bits we add a an extension to resize |
| 9582 | /// the vector to 64 bits. |
| 9583 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { |
| 9584 | if (N->getOpcode() == ISD::SIGN_EXTEND || |
| 9585 | N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) |
| 9586 | return AddRequiredExtensionForVMULL(N: N->getOperand(Num: 0), DAG, |
| 9587 | OrigTy: N->getOperand(Num: 0)->getValueType(ResNo: 0), |
| 9588 | ExtTy: N->getValueType(ResNo: 0), |
| 9589 | ExtOpcode: N->getOpcode()); |
| 9590 | |
| 9591 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 9592 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && |
| 9593 | "Expected extending load" ); |
| 9594 | |
| 9595 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); |
| 9596 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: newLoad.getValue(R: 1)); |
| 9597 | unsigned Opcode = ISD::isSEXTLoad(N: LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| 9598 | SDValue extLoad = |
| 9599 | DAG.getNode(Opcode, DL: SDLoc(newLoad), VT: LD->getValueType(ResNo: 0), Operand: newLoad); |
| 9600 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 0), To: extLoad); |
| 9601 | |
| 9602 | return newLoad; |
| 9603 | } |
| 9604 | |
| 9605 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will |
| 9606 | // have been legalized as a BITCAST from v4i32. |
| 9607 | if (N->getOpcode() == ISD::BITCAST) { |
| 9608 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
| 9609 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR && |
| 9610 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ); |
| 9611 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 9612 | return DAG.getBuildVector( |
| 9613 | VT: MVT::v2i32, DL: SDLoc(N), |
| 9614 | Ops: {BVN->getOperand(Num: LowElt), BVN->getOperand(Num: LowElt + 2)}); |
| 9615 | } |
| 9616 | // Construct a new BUILD_VECTOR with elements truncated to half the size. |
| 9617 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR" ); |
| 9618 | EVT VT = N->getValueType(ResNo: 0); |
| 9619 | unsigned EltSize = VT.getScalarSizeInBits() / 2; |
| 9620 | unsigned NumElts = VT.getVectorNumElements(); |
| 9621 | MVT TruncVT = MVT::getIntegerVT(BitWidth: EltSize); |
| 9622 | SmallVector<SDValue, 8> Ops; |
| 9623 | SDLoc dl(N); |
| 9624 | for (unsigned i = 0; i != NumElts; ++i) { |
| 9625 | const APInt &CInt = N->getConstantOperandAPInt(Num: i); |
| 9626 | // Element types smaller than 32 bits are not legal, so use i32 elements. |
| 9627 | // The values are implicitly truncated so sext vs. zext doesn't matter. |
| 9628 | Ops.push_back(Elt: DAG.getConstant(Val: CInt.zextOrTrunc(width: 32), DL: dl, VT: MVT::i32)); |
| 9629 | } |
| 9630 | return DAG.getBuildVector(VT: MVT::getVectorVT(VT: TruncVT, NumElements: NumElts), DL: dl, Ops); |
| 9631 | } |
| 9632 | |
| 9633 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { |
| 9634 | unsigned Opcode = N->getOpcode(); |
| 9635 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 9636 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 9637 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
| 9638 | return N0->hasOneUse() && N1->hasOneUse() && |
| 9639 | isSignExtended(N: N0, DAG) && isSignExtended(N: N1, DAG); |
| 9640 | } |
| 9641 | return false; |
| 9642 | } |
| 9643 | |
| 9644 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { |
| 9645 | unsigned Opcode = N->getOpcode(); |
| 9646 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 9647 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 9648 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
| 9649 | return N0->hasOneUse() && N1->hasOneUse() && |
| 9650 | isZeroExtended(N: N0, DAG) && isZeroExtended(N: N1, DAG); |
| 9651 | } |
| 9652 | return false; |
| 9653 | } |
| 9654 | |
| 9655 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { |
| 9656 | // Multiplications are only custom-lowered for 128-bit vectors so that |
| 9657 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. |
| 9658 | EVT VT = Op.getValueType(); |
| 9659 | assert(VT.is128BitVector() && VT.isInteger() && |
| 9660 | "unexpected type for custom-lowering ISD::MUL" ); |
| 9661 | SDNode *N0 = Op.getOperand(i: 0).getNode(); |
| 9662 | SDNode *N1 = Op.getOperand(i: 1).getNode(); |
| 9663 | unsigned NewOpc = 0; |
| 9664 | bool isMLA = false; |
| 9665 | bool isN0SExt = isSignExtended(N: N0, DAG); |
| 9666 | bool isN1SExt = isSignExtended(N: N1, DAG); |
| 9667 | if (isN0SExt && isN1SExt) |
| 9668 | NewOpc = ARMISD::VMULLs; |
| 9669 | else { |
| 9670 | bool isN0ZExt = isZeroExtended(N: N0, DAG); |
| 9671 | bool isN1ZExt = isZeroExtended(N: N1, DAG); |
| 9672 | if (isN0ZExt && isN1ZExt) |
| 9673 | NewOpc = ARMISD::VMULLu; |
| 9674 | else if (isN1SExt || isN1ZExt) { |
| 9675 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these |
| 9676 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) |
| 9677 | if (isN1SExt && isAddSubSExt(N: N0, DAG)) { |
| 9678 | NewOpc = ARMISD::VMULLs; |
| 9679 | isMLA = true; |
| 9680 | } else if (isN1ZExt && isAddSubZExt(N: N0, DAG)) { |
| 9681 | NewOpc = ARMISD::VMULLu; |
| 9682 | isMLA = true; |
| 9683 | } else if (isN0ZExt && isAddSubZExt(N: N1, DAG)) { |
| 9684 | std::swap(a&: N0, b&: N1); |
| 9685 | NewOpc = ARMISD::VMULLu; |
| 9686 | isMLA = true; |
| 9687 | } |
| 9688 | } |
| 9689 | |
| 9690 | if (!NewOpc) { |
| 9691 | if (VT == MVT::v2i64) |
| 9692 | // Fall through to expand this. It is not legal. |
| 9693 | return SDValue(); |
| 9694 | else |
| 9695 | // Other vector multiplications are legal. |
| 9696 | return Op; |
| 9697 | } |
| 9698 | } |
| 9699 | |
| 9700 | // Legalize to a VMULL instruction. |
| 9701 | SDLoc DL(Op); |
| 9702 | SDValue Op0; |
| 9703 | SDValue Op1 = SkipExtensionForVMULL(N: N1, DAG); |
| 9704 | if (!isMLA) { |
| 9705 | Op0 = SkipExtensionForVMULL(N: N0, DAG); |
| 9706 | assert(Op0.getValueType().is64BitVector() && |
| 9707 | Op1.getValueType().is64BitVector() && |
| 9708 | "unexpected types for extended operands to VMULL" ); |
| 9709 | return DAG.getNode(Opcode: NewOpc, DL, VT, N1: Op0, N2: Op1); |
| 9710 | } |
| 9711 | |
| 9712 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during |
| 9713 | // isel lowering to take advantage of no-stall back to back vmul + vmla. |
| 9714 | // vmull q0, d4, d6 |
| 9715 | // vmlal q0, d5, d6 |
| 9716 | // is faster than |
| 9717 | // vaddl q0, d4, d5 |
| 9718 | // vmovl q1, d6 |
| 9719 | // vmul q0, q0, q1 |
| 9720 | SDValue N00 = SkipExtensionForVMULL(N: N0->getOperand(Num: 0).getNode(), DAG); |
| 9721 | SDValue N01 = SkipExtensionForVMULL(N: N0->getOperand(Num: 1).getNode(), DAG); |
| 9722 | EVT Op1VT = Op1.getValueType(); |
| 9723 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT, |
| 9724 | N1: DAG.getNode(Opcode: NewOpc, DL, VT, |
| 9725 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N00), N2: Op1), |
| 9726 | N2: DAG.getNode(Opcode: NewOpc, DL, VT, |
| 9727 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N01), N2: Op1)); |
| 9728 | } |
| 9729 | |
| 9730 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, |
| 9731 | SelectionDAG &DAG) { |
| 9732 | // TODO: Should this propagate fast-math-flags? |
| 9733 | |
| 9734 | // Convert to float |
| 9735 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); |
| 9736 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); |
| 9737 | X = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9738 | Y = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: Y); |
| 9739 | X = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: X); |
| 9740 | Y = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: Y); |
| 9741 | // Get reciprocal estimate. |
| 9742 | // float4 recip = vrecpeq_f32(yf); |
| 9743 | Y = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9744 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9745 | N2: Y); |
| 9746 | // Because char has a smaller range than uchar, we can actually get away |
| 9747 | // without any newton steps. This requires that we use a weird bias |
| 9748 | // of 0xb000, however (again, this has been exhaustively tested). |
| 9749 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); |
| 9750 | X = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: X, N2: Y); |
| 9751 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9752 | Y = DAG.getConstant(Val: 0xb000, DL: dl, VT: MVT::v4i32); |
| 9753 | X = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: X, N2: Y); |
| 9754 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: X); |
| 9755 | // Convert back to short. |
| 9756 | X = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9757 | X = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: X); |
| 9758 | return X; |
| 9759 | } |
| 9760 | |
| 9761 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, |
| 9762 | SelectionDAG &DAG) { |
| 9763 | // TODO: Should this propagate fast-math-flags? |
| 9764 | |
| 9765 | SDValue N2; |
| 9766 | // Convert to float. |
| 9767 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); |
| 9768 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); |
| 9769 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9770 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
| 9771 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9772 | N1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
| 9773 | |
| 9774 | // Use reciprocal estimate and one refinement step. |
| 9775 | // float4 recip = vrecpeq_f32(yf); |
| 9776 | // recip *= vrecpsq_f32(yf, recip); |
| 9777 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9778 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9779 | N2: N1); |
| 9780 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9781 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9782 | N2: N1, N3: N2); |
| 9783 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9784 | // Because short has a smaller range than ushort, we can actually get away |
| 9785 | // with only a single newton step. This requires that we use a weird bias |
| 9786 | // of 89, however (again, this has been exhaustively tested). |
| 9787 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); |
| 9788 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
| 9789 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9790 | N1 = DAG.getConstant(Val: 0x89, DL: dl, VT: MVT::v4i32); |
| 9791 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
| 9792 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9793 | // Convert back to integer and return. |
| 9794 | // return vmovn_s32(vcvt_s32_f32(result)); |
| 9795 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9796 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
| 9797 | return N0; |
| 9798 | } |
| 9799 | |
| 9800 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, |
| 9801 | const ARMSubtarget *ST) { |
| 9802 | EVT VT = Op.getValueType(); |
| 9803 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9804 | "unexpected type for custom-lowering ISD::SDIV" ); |
| 9805 | |
| 9806 | SDLoc dl(Op); |
| 9807 | SDValue N0 = Op.getOperand(i: 0); |
| 9808 | SDValue N1 = Op.getOperand(i: 1); |
| 9809 | SDValue N2, N3; |
| 9810 | |
| 9811 | if (VT == MVT::v8i8) { |
| 9812 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
| 9813 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
| 9814 | |
| 9815 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9816 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9817 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9818 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9819 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9820 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9821 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9822 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9823 | |
| 9824 | N0 = LowerSDIV_v4i8(X: N0, Y: N1, dl, DAG); // v4i16 |
| 9825 | N2 = LowerSDIV_v4i8(X: N2, Y: N3, dl, DAG); // v4i16 |
| 9826 | |
| 9827 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
| 9828 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
| 9829 | |
| 9830 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v8i8, Operand: N0); |
| 9831 | return N0; |
| 9832 | } |
| 9833 | return LowerSDIV_v4i16(N0, N1, dl, DAG); |
| 9834 | } |
| 9835 | |
| 9836 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, |
| 9837 | const ARMSubtarget *ST) { |
| 9838 | // TODO: Should this propagate fast-math-flags? |
| 9839 | EVT VT = Op.getValueType(); |
| 9840 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9841 | "unexpected type for custom-lowering ISD::UDIV" ); |
| 9842 | |
| 9843 | SDLoc dl(Op); |
| 9844 | SDValue N0 = Op.getOperand(i: 0); |
| 9845 | SDValue N1 = Op.getOperand(i: 1); |
| 9846 | SDValue N2, N3; |
| 9847 | |
| 9848 | if (VT == MVT::v8i8) { |
| 9849 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
| 9850 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
| 9851 | |
| 9852 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9853 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9854 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9855 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9856 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9857 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9858 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9859 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9860 | |
| 9861 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 |
| 9862 | N2 = LowerSDIV_v4i16(N0: N2, N1: N3, dl, DAG); // v4i16 |
| 9863 | |
| 9864 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
| 9865 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
| 9866 | |
| 9867 | N0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v8i8, |
| 9868 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vqmovnsu, DL: dl, |
| 9869 | VT: MVT::i32), |
| 9870 | N2: N0); |
| 9871 | return N0; |
| 9872 | } |
| 9873 | |
| 9874 | // v4i16 sdiv ... Convert to float. |
| 9875 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); |
| 9876 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); |
| 9877 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9878 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
| 9879 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9880 | SDValue BN1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
| 9881 | |
| 9882 | // Use reciprocal estimate and two refinement steps. |
| 9883 | // float4 recip = vrecpeq_f32(yf); |
| 9884 | // recip *= vrecpsq_f32(yf, recip); |
| 9885 | // recip *= vrecpsq_f32(yf, recip); |
| 9886 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9887 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9888 | N2: BN1); |
| 9889 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9890 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9891 | N2: BN1, N3: N2); |
| 9892 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9893 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9894 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9895 | N2: BN1, N3: N2); |
| 9896 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9897 | // Simply multiplying by the reciprocal estimate can leave us a few ulps |
| 9898 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, |
| 9899 | // and that it will never cause us to return an answer too large). |
| 9900 | // float4 result = as_float4(as_int4(xf*recip) + 2); |
| 9901 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
| 9902 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9903 | N1 = DAG.getConstant(Val: 2, DL: dl, VT: MVT::v4i32); |
| 9904 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
| 9905 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9906 | // Convert back to integer and return. |
| 9907 | // return vmovn_u32(vcvt_s32_f32(result)); |
| 9908 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9909 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
| 9910 | return N0; |
| 9911 | } |
| 9912 | |
| 9913 | static SDValue LowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) { |
| 9914 | SDNode *N = Op.getNode(); |
| 9915 | EVT VT = N->getValueType(ResNo: 0); |
| 9916 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 9917 | |
| 9918 | SDValue Carry = Op.getOperand(i: 2); |
| 9919 | |
| 9920 | SDLoc DL(Op); |
| 9921 | |
| 9922 | SDValue Result; |
| 9923 | if (Op.getOpcode() == ISD::UADDO_CARRY) { |
| 9924 | // This converts the boolean value carry into the carry flag. |
| 9925 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 9926 | |
| 9927 | // Do the addition proper using the carry flag we wanted. |
| 9928 | Result = DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
| 9929 | N2: Op.getOperand(i: 1), N3: Carry); |
| 9930 | |
| 9931 | // Now convert the carry flag into a boolean value. |
| 9932 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
| 9933 | } else { |
| 9934 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
| 9935 | // have to invert the carry first. |
| 9936 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 9937 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 9938 | // This converts the boolean value carry into the carry flag. |
| 9939 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 9940 | |
| 9941 | // Do the subtraction proper using the carry flag we wanted. |
| 9942 | Result = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
| 9943 | N2: Op.getOperand(i: 1), N3: Carry); |
| 9944 | |
| 9945 | // Now convert the carry flag into a boolean value. |
| 9946 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
| 9947 | // But the carry returned by ARMISD::SUBE is not a borrow as expected |
| 9948 | // by ISD::USUBO_CARRY, so compute 1 - C. |
| 9949 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 9950 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 9951 | } |
| 9952 | |
| 9953 | // Return both values. |
| 9954 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL, VTList: N->getVTList(), N1: Result, N2: Carry); |
| 9955 | } |
| 9956 | |
| 9957 | SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { |
| 9958 | assert(Subtarget->isTargetDarwin()); |
| 9959 | |
| 9960 | // For iOS, we want to call an alternative entry point: __sincos_stret, |
| 9961 | // return values are passed via sret. |
| 9962 | SDLoc dl(Op); |
| 9963 | SDValue Arg = Op.getOperand(i: 0); |
| 9964 | EVT ArgVT = Arg.getValueType(); |
| 9965 | Type *ArgTy = ArgVT.getTypeForEVT(Context&: *DAG.getContext()); |
| 9966 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 9967 | |
| 9968 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 9969 | |
| 9970 | // Pair of floats / doubles used to pass the result. |
| 9971 | Type *RetTy = StructType::get(elt1: ArgTy, elts: ArgTy); |
| 9972 | auto &DL = DAG.getDataLayout(); |
| 9973 | |
| 9974 | ArgListTy Args; |
| 9975 | bool ShouldUseSRet = getTM().isAPCS_ABI(); |
| 9976 | SDValue SRet; |
| 9977 | if (ShouldUseSRet) { |
| 9978 | // Create stack object for sret. |
| 9979 | const uint64_t ByteSize = DL.getTypeAllocSize(Ty: RetTy); |
| 9980 | const Align StackAlign = DL.getPrefTypeAlign(Ty: RetTy); |
| 9981 | int FrameIdx = MFI.CreateStackObject(Size: ByteSize, Alignment: StackAlign, isSpillSlot: false); |
| 9982 | SRet = DAG.getFrameIndex(FI: FrameIdx, VT: getPointerTy(DL)); |
| 9983 | |
| 9984 | ArgListEntry Entry; |
| 9985 | Entry.Node = SRet; |
| 9986 | Entry.Ty = PointerType::getUnqual(C&: RetTy->getContext()); |
| 9987 | Entry.IsSExt = false; |
| 9988 | Entry.IsZExt = false; |
| 9989 | Entry.IsSRet = true; |
| 9990 | Args.push_back(x: Entry); |
| 9991 | RetTy = Type::getVoidTy(C&: *DAG.getContext()); |
| 9992 | } |
| 9993 | |
| 9994 | ArgListEntry Entry; |
| 9995 | Entry.Node = Arg; |
| 9996 | Entry.Ty = ArgTy; |
| 9997 | Entry.IsSExt = false; |
| 9998 | Entry.IsZExt = false; |
| 9999 | Args.push_back(x: Entry); |
| 10000 | |
| 10001 | RTLIB::Libcall LC = |
| 10002 | (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; |
| 10003 | const char *LibcallName = getLibcallName(Call: LC); |
| 10004 | CallingConv::ID CC = getLibcallCallingConv(Call: LC); |
| 10005 | SDValue Callee = DAG.getExternalSymbol(Sym: LibcallName, VT: getPointerTy(DL)); |
| 10006 | |
| 10007 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 10008 | CLI.setDebugLoc(dl) |
| 10009 | .setChain(DAG.getEntryNode()) |
| 10010 | .setCallee(CC, ResultType: RetTy, Target: Callee, ArgsList: std::move(Args)) |
| 10011 | .setDiscardResult(ShouldUseSRet); |
| 10012 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 10013 | |
| 10014 | if (!ShouldUseSRet) |
| 10015 | return CallResult.first; |
| 10016 | |
| 10017 | SDValue LoadSin = |
| 10018 | DAG.getLoad(VT: ArgVT, dl, Chain: CallResult.second, Ptr: SRet, PtrInfo: MachinePointerInfo()); |
| 10019 | |
| 10020 | // Address of cos field. |
| 10021 | SDValue Add = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: SRet, |
| 10022 | N2: DAG.getIntPtrConstant(Val: ArgVT.getStoreSize(), DL: dl)); |
| 10023 | SDValue LoadCos = |
| 10024 | DAG.getLoad(VT: ArgVT, dl, Chain: LoadSin.getValue(R: 1), Ptr: Add, PtrInfo: MachinePointerInfo()); |
| 10025 | |
| 10026 | SDVTList Tys = DAG.getVTList(VT1: ArgVT, VT2: ArgVT); |
| 10027 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: Tys, |
| 10028 | N1: LoadSin.getValue(R: 0), N2: LoadCos.getValue(R: 0)); |
| 10029 | } |
| 10030 | |
| 10031 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, |
| 10032 | bool Signed, |
| 10033 | SDValue &Chain) const { |
| 10034 | EVT VT = Op.getValueType(); |
| 10035 | assert((VT == MVT::i32 || VT == MVT::i64) && |
| 10036 | "unexpected type for custom lowering DIV" ); |
| 10037 | SDLoc dl(Op); |
| 10038 | |
| 10039 | const auto &DL = DAG.getDataLayout(); |
| 10040 | RTLIB::Libcall LC; |
| 10041 | if (Signed) |
| 10042 | LC = VT == MVT::i32 ? RTLIB::SDIVREM_I32 : RTLIB::SDIVREM_I64; |
| 10043 | else |
| 10044 | LC = VT == MVT::i32 ? RTLIB::UDIVREM_I32 : RTLIB::UDIVREM_I64; |
| 10045 | |
| 10046 | const char *Name = getLibcallName(Call: LC); |
| 10047 | SDValue ES = DAG.getExternalSymbol(Sym: Name, VT: getPointerTy(DL)); |
| 10048 | |
| 10049 | ARMTargetLowering::ArgListTy Args; |
| 10050 | |
| 10051 | for (auto AI : {1, 0}) { |
| 10052 | ArgListEntry Arg; |
| 10053 | Arg.Node = Op.getOperand(i: AI); |
| 10054 | Arg.Ty = Arg.Node.getValueType().getTypeForEVT(Context&: *DAG.getContext()); |
| 10055 | Args.push_back(x: Arg); |
| 10056 | } |
| 10057 | |
| 10058 | CallLoweringInfo CLI(DAG); |
| 10059 | CLI.setDebugLoc(dl) |
| 10060 | .setChain(Chain) |
| 10061 | .setCallee(CC: CallingConv::ARM_AAPCS_VFP, ResultType: VT.getTypeForEVT(Context&: *DAG.getContext()), |
| 10062 | Target: ES, ArgsList: std::move(Args)); |
| 10063 | |
| 10064 | return LowerCallTo(CLI).first; |
| 10065 | } |
| 10066 | |
| 10067 | // This is a code size optimisation: return the original SDIV node to |
| 10068 | // DAGCombiner when we don't want to expand SDIV into a sequence of |
| 10069 | // instructions, and an empty node otherwise which will cause the |
| 10070 | // SDIV to be expanded in DAGCombine. |
| 10071 | SDValue |
| 10072 | ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, |
| 10073 | SelectionDAG &DAG, |
| 10074 | SmallVectorImpl<SDNode *> &Created) const { |
| 10075 | // TODO: Support SREM |
| 10076 | if (N->getOpcode() != ISD::SDIV) |
| 10077 | return SDValue(); |
| 10078 | |
| 10079 | const auto &ST = DAG.getSubtarget<ARMSubtarget>(); |
| 10080 | const bool MinSize = ST.hasMinSize(); |
| 10081 | const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() |
| 10082 | : ST.hasDivideInARMMode(); |
| 10083 | |
| 10084 | // Don't touch vector types; rewriting this may lead to scalarizing |
| 10085 | // the int divs. |
| 10086 | if (N->getOperand(Num: 0).getValueType().isVector()) |
| 10087 | return SDValue(); |
| 10088 | |
| 10089 | // Bail if MinSize is not set, and also for both ARM and Thumb mode we need |
| 10090 | // hwdiv support for this to be really profitable. |
| 10091 | if (!(MinSize && HasDivide)) |
| 10092 | return SDValue(); |
| 10093 | |
| 10094 | // ARM mode is a bit simpler than Thumb: we can handle large power |
| 10095 | // of 2 immediates with 1 mov instruction; no further checks required, |
| 10096 | // just return the sdiv node. |
| 10097 | if (!ST.isThumb()) |
| 10098 | return SDValue(N, 0); |
| 10099 | |
| 10100 | // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, |
| 10101 | // and thus lose the code size benefits of a MOVS that requires only 2. |
| 10102 | // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, |
| 10103 | // but as it's doing exactly this, it's not worth the trouble to get TTI. |
| 10104 | if (Divisor.sgt(RHS: 128)) |
| 10105 | return SDValue(); |
| 10106 | |
| 10107 | return SDValue(N, 0); |
| 10108 | } |
| 10109 | |
| 10110 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, |
| 10111 | bool Signed) const { |
| 10112 | assert(Op.getValueType() == MVT::i32 && |
| 10113 | "unexpected type for custom lowering DIV" ); |
| 10114 | SDLoc dl(Op); |
| 10115 | |
| 10116 | SDValue DBZCHK = DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL: dl, VT: MVT::Other, |
| 10117 | N1: DAG.getEntryNode(), N2: Op.getOperand(i: 1)); |
| 10118 | |
| 10119 | return LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
| 10120 | } |
| 10121 | |
| 10122 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { |
| 10123 | SDLoc DL(N); |
| 10124 | SDValue Op = N->getOperand(Num: 1); |
| 10125 | if (N->getValueType(ResNo: 0) == MVT::i32) |
| 10126 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, N2: Op); |
| 10127 | SDValue Lo, Hi; |
| 10128 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
| 10129 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, |
| 10130 | N2: DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: Lo, N2: Hi)); |
| 10131 | } |
| 10132 | |
| 10133 | void ARMTargetLowering::ExpandDIV_Windows( |
| 10134 | SDValue Op, SelectionDAG &DAG, bool Signed, |
| 10135 | SmallVectorImpl<SDValue> &Results) const { |
| 10136 | const auto &DL = DAG.getDataLayout(); |
| 10137 | |
| 10138 | assert(Op.getValueType() == MVT::i64 && |
| 10139 | "unexpected type for custom lowering DIV" ); |
| 10140 | SDLoc dl(Op); |
| 10141 | |
| 10142 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain: DAG.getEntryNode()); |
| 10143 | |
| 10144 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
| 10145 | |
| 10146 | SDValue Lower = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Result); |
| 10147 | SDValue Upper = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i64, N1: Result, |
| 10148 | N2: DAG.getConstant(Val: 32, DL: dl, VT: getPointerTy(DL))); |
| 10149 | Upper = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Upper); |
| 10150 | |
| 10151 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lower, N2: Upper)); |
| 10152 | } |
| 10153 | |
| 10154 | static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { |
| 10155 | LoadSDNode *LD = cast<LoadSDNode>(Val: Op.getNode()); |
| 10156 | EVT MemVT = LD->getMemoryVT(); |
| 10157 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 10158 | MemVT == MVT::v16i1) && |
| 10159 | "Expected a predicate type!" ); |
| 10160 | assert(MemVT == Op.getValueType()); |
| 10161 | assert(LD->getExtensionType() == ISD::NON_EXTLOAD && |
| 10162 | "Expected a non-extending load" ); |
| 10163 | assert(LD->isUnindexed() && "Expected a unindexed load" ); |
| 10164 | |
| 10165 | // The basic MVE VLDR on a v2i1/v4i1/v8i1 actually loads the entire 16bit |
| 10166 | // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We |
| 10167 | // need to make sure that 8/4/2 bits are actually loaded into the correct |
| 10168 | // place, which means loading the value and then shuffling the values into |
| 10169 | // the bottom bits of the predicate. |
| 10170 | // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect |
| 10171 | // for BE). |
| 10172 | // Speaking of BE, apparently the rest of llvm will assume a reverse order to |
| 10173 | // a natural VMSR(load), so needs to be reversed. |
| 10174 | |
| 10175 | SDLoc dl(Op); |
| 10176 | SDValue Load = DAG.getExtLoad( |
| 10177 | ExtType: ISD::EXTLOAD, dl, VT: MVT::i32, Chain: LD->getChain(), Ptr: LD->getBasePtr(), |
| 10178 | MemVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
| 10179 | MMO: LD->getMemOperand()); |
| 10180 | SDValue Val = Load; |
| 10181 | if (DAG.getDataLayout().isBigEndian()) |
| 10182 | Val = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
| 10183 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: Load), |
| 10184 | N2: DAG.getConstant(Val: 32 - MemVT.getSizeInBits(), DL: dl, VT: MVT::i32)); |
| 10185 | SDValue Pred = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Val); |
| 10186 | if (MemVT != MVT::v16i1) |
| 10187 | Pred = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MemVT, N1: Pred, |
| 10188 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10189 | return DAG.getMergeValues(Ops: {Pred, Load.getValue(R: 1)}, dl); |
| 10190 | } |
| 10191 | |
| 10192 | void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 10193 | SelectionDAG &DAG) const { |
| 10194 | LoadSDNode *LD = cast<LoadSDNode>(Val: N); |
| 10195 | EVT MemVT = LD->getMemoryVT(); |
| 10196 | assert(LD->isUnindexed() && "Loads should be unindexed at this point." ); |
| 10197 | |
| 10198 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 10199 | !Subtarget->isThumb1Only() && LD->isVolatile() && |
| 10200 | LD->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
| 10201 | SDLoc dl(N); |
| 10202 | SDValue Result = DAG.getMemIntrinsicNode( |
| 10203 | Opcode: ARMISD::LDRD, dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32, MVT::Other}), |
| 10204 | Ops: {LD->getChain(), LD->getBasePtr()}, MemVT, MMO: LD->getMemOperand()); |
| 10205 | SDValue Lo = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 0 : 1); |
| 10206 | SDValue Hi = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 1 : 0); |
| 10207 | SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 10208 | Results.append(IL: {Pair, Result.getValue(R: 2)}); |
| 10209 | } |
| 10210 | } |
| 10211 | |
| 10212 | static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { |
| 10213 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 10214 | EVT MemVT = ST->getMemoryVT(); |
| 10215 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 10216 | MemVT == MVT::v16i1) && |
| 10217 | "Expected a predicate type!" ); |
| 10218 | assert(MemVT == ST->getValue().getValueType()); |
| 10219 | assert(!ST->isTruncatingStore() && "Expected a non-extending store" ); |
| 10220 | assert(ST->isUnindexed() && "Expected a unindexed store" ); |
| 10221 | |
| 10222 | // Only store the v2i1 or v4i1 or v8i1 worth of bits, via a buildvector with |
| 10223 | // top bits unset and a scalar store. |
| 10224 | SDLoc dl(Op); |
| 10225 | SDValue Build = ST->getValue(); |
| 10226 | if (MemVT != MVT::v16i1) { |
| 10227 | SmallVector<SDValue, 16> Ops; |
| 10228 | for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) { |
| 10229 | unsigned Elt = DAG.getDataLayout().isBigEndian() |
| 10230 | ? MemVT.getVectorNumElements() - I - 1 |
| 10231 | : I; |
| 10232 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: Build, |
| 10233 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32))); |
| 10234 | } |
| 10235 | for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) |
| 10236 | Ops.push_back(Elt: DAG.getUNDEF(VT: MVT::i32)); |
| 10237 | Build = DAG.getNode(Opcode: ISD::BUILD_VECTOR, DL: dl, VT: MVT::v16i1, Ops); |
| 10238 | } |
| 10239 | SDValue GRP = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Build); |
| 10240 | if (MemVT == MVT::v16i1 && DAG.getDataLayout().isBigEndian()) |
| 10241 | GRP = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
| 10242 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: GRP), |
| 10243 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
| 10244 | return DAG.getTruncStore( |
| 10245 | Chain: ST->getChain(), dl, Val: GRP, Ptr: ST->getBasePtr(), |
| 10246 | SVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
| 10247 | MMO: ST->getMemOperand()); |
| 10248 | } |
| 10249 | |
| 10250 | static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, |
| 10251 | const ARMSubtarget *Subtarget) { |
| 10252 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 10253 | EVT MemVT = ST->getMemoryVT(); |
| 10254 | assert(ST->isUnindexed() && "Stores should be unindexed at this point." ); |
| 10255 | |
| 10256 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 10257 | !Subtarget->isThumb1Only() && ST->isVolatile() && |
| 10258 | ST->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
| 10259 | SDNode *N = Op.getNode(); |
| 10260 | SDLoc dl(N); |
| 10261 | |
| 10262 | SDValue Lo = DAG.getNode( |
| 10263 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
| 10264 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 0 : 1, DL: dl, |
| 10265 | VT: MVT::i32)); |
| 10266 | SDValue Hi = DAG.getNode( |
| 10267 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
| 10268 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 1 : 0, DL: dl, |
| 10269 | VT: MVT::i32)); |
| 10270 | |
| 10271 | return DAG.getMemIntrinsicNode(Opcode: ARMISD::STRD, dl, VTList: DAG.getVTList(VT: MVT::Other), |
| 10272 | Ops: {ST->getChain(), Lo, Hi, ST->getBasePtr()}, |
| 10273 | MemVT, MMO: ST->getMemOperand()); |
| 10274 | } else if (Subtarget->hasMVEIntegerOps() && |
| 10275 | ((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 10276 | MemVT == MVT::v16i1))) { |
| 10277 | return LowerPredicateStore(Op, DAG); |
| 10278 | } |
| 10279 | |
| 10280 | return SDValue(); |
| 10281 | } |
| 10282 | |
| 10283 | static bool isZeroVector(SDValue N) { |
| 10284 | return (ISD::isBuildVectorAllZeros(N: N.getNode()) || |
| 10285 | (N->getOpcode() == ARMISD::VMOVIMM && |
| 10286 | isNullConstant(V: N->getOperand(Num: 0)))); |
| 10287 | } |
| 10288 | |
| 10289 | static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { |
| 10290 | MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Val: Op.getNode()); |
| 10291 | MVT VT = Op.getSimpleValueType(); |
| 10292 | SDValue Mask = N->getMask(); |
| 10293 | SDValue PassThru = N->getPassThru(); |
| 10294 | SDLoc dl(Op); |
| 10295 | |
| 10296 | if (isZeroVector(N: PassThru)) |
| 10297 | return Op; |
| 10298 | |
| 10299 | // MVE Masked loads use zero as the passthru value. Here we convert undef to |
| 10300 | // zero too, and other values are lowered to a select. |
| 10301 | SDValue ZeroVec = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 10302 | Operand: DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10303 | SDValue NewLoad = DAG.getMaskedLoad( |
| 10304 | VT, dl, Chain: N->getChain(), Base: N->getBasePtr(), Offset: N->getOffset(), Mask, Src0: ZeroVec, |
| 10305 | MemVT: N->getMemoryVT(), MMO: N->getMemOperand(), AM: N->getAddressingMode(), |
| 10306 | N->getExtensionType(), IsExpanding: N->isExpandingLoad()); |
| 10307 | SDValue Combo = NewLoad; |
| 10308 | bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || |
| 10309 | PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
| 10310 | isZeroVector(N: PassThru->getOperand(Num: 0)); |
| 10311 | if (!PassThru.isUndef() && !PassThruIsCastZero) |
| 10312 | Combo = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT, N1: Mask, N2: NewLoad, N3: PassThru); |
| 10313 | return DAG.getMergeValues(Ops: {Combo, NewLoad.getValue(R: 1)}, dl); |
| 10314 | } |
| 10315 | |
| 10316 | static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, |
| 10317 | const ARMSubtarget *ST) { |
| 10318 | if (!ST->hasMVEIntegerOps()) |
| 10319 | return SDValue(); |
| 10320 | |
| 10321 | SDLoc dl(Op); |
| 10322 | unsigned BaseOpcode = 0; |
| 10323 | switch (Op->getOpcode()) { |
| 10324 | default: llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10325 | case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; |
| 10326 | case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; |
| 10327 | case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; |
| 10328 | case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; |
| 10329 | case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; |
| 10330 | case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; |
| 10331 | case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; |
| 10332 | case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; |
| 10333 | } |
| 10334 | |
| 10335 | SDValue Op0 = Op->getOperand(Num: 0); |
| 10336 | EVT VT = Op0.getValueType(); |
| 10337 | EVT EltVT = VT.getVectorElementType(); |
| 10338 | unsigned NumElts = VT.getVectorNumElements(); |
| 10339 | unsigned NumActiveLanes = NumElts; |
| 10340 | |
| 10341 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
| 10342 | NumActiveLanes == 2) && |
| 10343 | "Only expected a power 2 vector size" ); |
| 10344 | |
| 10345 | // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements |
| 10346 | // allows us to easily extract vector elements from the lanes. |
| 10347 | while (NumActiveLanes > 4) { |
| 10348 | unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; |
| 10349 | SDValue Rev = DAG.getNode(Opcode: RevOpcode, DL: dl, VT, Operand: Op0); |
| 10350 | Op0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT, N1: Op0, N2: Rev); |
| 10351 | NumActiveLanes /= 2; |
| 10352 | } |
| 10353 | |
| 10354 | SDValue Res; |
| 10355 | if (NumActiveLanes == 4) { |
| 10356 | // The remaining 4 elements are summed sequentially |
| 10357 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10358 | N2: DAG.getConstant(Val: 0 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10359 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10360 | N2: DAG.getConstant(Val: 1 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10361 | SDValue Ext2 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10362 | N2: DAG.getConstant(Val: 2 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10363 | SDValue Ext3 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10364 | N2: DAG.getConstant(Val: 3 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10365 | SDValue Res0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
| 10366 | SDValue Res1 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext2, N2: Ext3, Flags: Op->getFlags()); |
| 10367 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Res0, N2: Res1, Flags: Op->getFlags()); |
| 10368 | } else { |
| 10369 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10370 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10371 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10372 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 10373 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
| 10374 | } |
| 10375 | |
| 10376 | // Result type may be wider than element type. |
| 10377 | if (EltVT != Op->getValueType(ResNo: 0)) |
| 10378 | Res = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: Op->getValueType(ResNo: 0), Operand: Res); |
| 10379 | return Res; |
| 10380 | } |
| 10381 | |
| 10382 | static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, |
| 10383 | const ARMSubtarget *ST) { |
| 10384 | if (!ST->hasMVEFloatOps()) |
| 10385 | return SDValue(); |
| 10386 | return LowerVecReduce(Op, DAG, ST); |
| 10387 | } |
| 10388 | |
| 10389 | static SDValue LowerVecReduceMinMax(SDValue Op, SelectionDAG &DAG, |
| 10390 | const ARMSubtarget *ST) { |
| 10391 | if (!ST->hasNEON()) |
| 10392 | return SDValue(); |
| 10393 | |
| 10394 | SDLoc dl(Op); |
| 10395 | SDValue Op0 = Op->getOperand(Num: 0); |
| 10396 | EVT VT = Op0.getValueType(); |
| 10397 | EVT EltVT = VT.getVectorElementType(); |
| 10398 | |
| 10399 | unsigned PairwiseIntrinsic = 0; |
| 10400 | switch (Op->getOpcode()) { |
| 10401 | default: |
| 10402 | llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10403 | case ISD::VECREDUCE_UMIN: |
| 10404 | PairwiseIntrinsic = Intrinsic::arm_neon_vpminu; |
| 10405 | break; |
| 10406 | case ISD::VECREDUCE_UMAX: |
| 10407 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxu; |
| 10408 | break; |
| 10409 | case ISD::VECREDUCE_SMIN: |
| 10410 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmins; |
| 10411 | break; |
| 10412 | case ISD::VECREDUCE_SMAX: |
| 10413 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxs; |
| 10414 | break; |
| 10415 | } |
| 10416 | SDValue PairwiseOp = DAG.getConstant(Val: PairwiseIntrinsic, DL: dl, VT: MVT::i32); |
| 10417 | |
| 10418 | unsigned NumElts = VT.getVectorNumElements(); |
| 10419 | unsigned NumActiveLanes = NumElts; |
| 10420 | |
| 10421 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
| 10422 | NumActiveLanes == 2) && |
| 10423 | "Only expected a power 2 vector size" ); |
| 10424 | |
| 10425 | // Split 128-bit vectors, since vpmin/max takes 2 64-bit vectors. |
| 10426 | if (VT.is128BitVector()) { |
| 10427 | SDValue Lo, Hi; |
| 10428 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVector(N: Op0, DL: dl); |
| 10429 | VT = Lo.getValueType(); |
| 10430 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Lo, Hi}); |
| 10431 | NumActiveLanes /= 2; |
| 10432 | } |
| 10433 | |
| 10434 | // Use pairwise reductions until one lane remains |
| 10435 | while (NumActiveLanes > 1) { |
| 10436 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Op0, Op0}); |
| 10437 | NumActiveLanes /= 2; |
| 10438 | } |
| 10439 | |
| 10440 | SDValue Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10441 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10442 | |
| 10443 | // Result type may be wider than element type. |
| 10444 | if (EltVT != Op.getValueType()) { |
| 10445 | unsigned Extend = 0; |
| 10446 | switch (Op->getOpcode()) { |
| 10447 | default: |
| 10448 | llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10449 | case ISD::VECREDUCE_UMIN: |
| 10450 | case ISD::VECREDUCE_UMAX: |
| 10451 | Extend = ISD::ZERO_EXTEND; |
| 10452 | break; |
| 10453 | case ISD::VECREDUCE_SMIN: |
| 10454 | case ISD::VECREDUCE_SMAX: |
| 10455 | Extend = ISD::SIGN_EXTEND; |
| 10456 | break; |
| 10457 | } |
| 10458 | Res = DAG.getNode(Opcode: Extend, DL: dl, VT: Op.getValueType(), Operand: Res); |
| 10459 | } |
| 10460 | return Res; |
| 10461 | } |
| 10462 | |
| 10463 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { |
| 10464 | if (isStrongerThanMonotonic(AO: cast<AtomicSDNode>(Val&: Op)->getSuccessOrdering())) |
| 10465 | // Acquire/Release load/store is not legal for targets without a dmb or |
| 10466 | // equivalent available. |
| 10467 | return SDValue(); |
| 10468 | |
| 10469 | // Monotonic load/store is legal for all targets. |
| 10470 | return Op; |
| 10471 | } |
| 10472 | |
| 10473 | static void ReplaceREADCYCLECOUNTER(SDNode *N, |
| 10474 | SmallVectorImpl<SDValue> &Results, |
| 10475 | SelectionDAG &DAG, |
| 10476 | const ARMSubtarget *Subtarget) { |
| 10477 | SDLoc DL(N); |
| 10478 | // Under Power Management extensions, the cycle-count is: |
| 10479 | // mrc p15, #0, <Rt>, c9, c13, #0 |
| 10480 | SDValue Ops[] = { N->getOperand(Num: 0), // Chain |
| 10481 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
| 10482 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
| 10483 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 10484 | DAG.getTargetConstant(Val: 9, DL, VT: MVT::i32), |
| 10485 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
| 10486 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32) |
| 10487 | }; |
| 10488 | |
| 10489 | SDValue Cycles32 = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
| 10490 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
| 10491 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Cycles32, |
| 10492 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32))); |
| 10493 | Results.push_back(Elt: Cycles32.getValue(R: 1)); |
| 10494 | } |
| 10495 | |
| 10496 | static SDValue createGPRPairNode2xi32(SelectionDAG &DAG, SDValue V0, |
| 10497 | SDValue V1) { |
| 10498 | SDLoc dl(V0.getNode()); |
| 10499 | SDValue RegClass = |
| 10500 | DAG.getTargetConstant(Val: ARM::GPRPairRegClassID, DL: dl, VT: MVT::i32); |
| 10501 | SDValue SubReg0 = DAG.getTargetConstant(Val: ARM::gsub_0, DL: dl, VT: MVT::i32); |
| 10502 | SDValue SubReg1 = DAG.getTargetConstant(Val: ARM::gsub_1, DL: dl, VT: MVT::i32); |
| 10503 | const SDValue Ops[] = {RegClass, V0, SubReg0, V1, SubReg1}; |
| 10504 | return SDValue( |
| 10505 | DAG.getMachineNode(Opcode: TargetOpcode::REG_SEQUENCE, dl, VT: MVT::Untyped, Ops), 0); |
| 10506 | } |
| 10507 | |
| 10508 | static SDValue createGPRPairNodei64(SelectionDAG &DAG, SDValue V) { |
| 10509 | SDLoc dl(V.getNode()); |
| 10510 | auto [VLo, VHi] = DAG.SplitScalar(N: V, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 10511 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 10512 | if (isBigEndian) |
| 10513 | std::swap(a&: VLo, b&: VHi); |
| 10514 | return createGPRPairNode2xi32(DAG, V0: VLo, V1: VHi); |
| 10515 | } |
| 10516 | |
| 10517 | static void ReplaceCMP_SWAP_64Results(SDNode *N, |
| 10518 | SmallVectorImpl<SDValue> &Results, |
| 10519 | SelectionDAG &DAG) { |
| 10520 | assert(N->getValueType(0) == MVT::i64 && |
| 10521 | "AtomicCmpSwap on types less than 64 should be legal" ); |
| 10522 | SDValue Ops[] = { |
| 10523 | createGPRPairNode2xi32(DAG, V0: N->getOperand(Num: 1), |
| 10524 | V1: DAG.getUNDEF(VT: MVT::i32)), // pointer, temp |
| 10525 | createGPRPairNodei64(DAG, V: N->getOperand(Num: 2)), // expected |
| 10526 | createGPRPairNodei64(DAG, V: N->getOperand(Num: 3)), // new |
| 10527 | N->getOperand(Num: 0), // chain in |
| 10528 | }; |
| 10529 | SDNode *CmpSwap = DAG.getMachineNode( |
| 10530 | Opcode: ARM::CMP_SWAP_64, dl: SDLoc(N), |
| 10531 | VTs: DAG.getVTList(VT1: MVT::Untyped, VT2: MVT::Untyped, VT3: MVT::Other), Ops); |
| 10532 | |
| 10533 | MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand(); |
| 10534 | DAG.setNodeMemRefs(N: cast<MachineSDNode>(Val: CmpSwap), NewMemRefs: {MemOp}); |
| 10535 | |
| 10536 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 10537 | |
| 10538 | SDValue Lo = |
| 10539 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_1 : ARM::gsub_0, |
| 10540 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
| 10541 | SDValue Hi = |
| 10542 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_0 : ARM::gsub_1, |
| 10543 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
| 10544 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: MVT::i64, N1: Lo, N2: Hi)); |
| 10545 | Results.push_back(Elt: SDValue(CmpSwap, 2)); |
| 10546 | } |
| 10547 | |
| 10548 | SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { |
| 10549 | SDLoc dl(Op); |
| 10550 | EVT VT = Op.getValueType(); |
| 10551 | SDValue Chain = Op.getOperand(i: 0); |
| 10552 | SDValue LHS = Op.getOperand(i: 1); |
| 10553 | SDValue RHS = Op.getOperand(i: 2); |
| 10554 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 3))->get(); |
| 10555 | bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; |
| 10556 | |
| 10557 | // If we don't have instructions of this float type then soften to a libcall |
| 10558 | // and use SETCC instead. |
| 10559 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 10560 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS, |
| 10561 | Chain, IsSignaling); |
| 10562 | if (!RHS.getNode()) { |
| 10563 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 10564 | CC = ISD::SETNE; |
| 10565 | } |
| 10566 | SDValue Result = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT, N1: LHS, N2: RHS, |
| 10567 | N3: DAG.getCondCode(Cond: CC)); |
| 10568 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
| 10569 | } |
| 10570 | |
| 10571 | ARMCC::CondCodes CondCode, CondCode2; |
| 10572 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 10573 | |
| 10574 | SDValue True = DAG.getConstant(Val: 1, DL: dl, VT); |
| 10575 | SDValue False = DAG.getConstant(Val: 0, DL: dl, VT); |
| 10576 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 10577 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, Signaling: IsSignaling); |
| 10578 | SDValue Result = getCMOV(dl, VT, FalseVal: False, TrueVal: True, ARMcc, Flags: Cmp, DAG); |
| 10579 | if (CondCode2 != ARMCC::AL) { |
| 10580 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 10581 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal: True, ARMcc, Flags: Cmp, DAG); |
| 10582 | } |
| 10583 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
| 10584 | } |
| 10585 | |
| 10586 | SDValue ARMTargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const { |
| 10587 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 10588 | |
| 10589 | EVT VT = getPointerTy(DL: DAG.getDataLayout()); |
| 10590 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: 0, IsImmutable: false); |
| 10591 | return DAG.getFrameIndex(FI, VT); |
| 10592 | } |
| 10593 | |
| 10594 | SDValue ARMTargetLowering::LowerFP_TO_BF16(SDValue Op, |
| 10595 | SelectionDAG &DAG) const { |
| 10596 | SDLoc DL(Op); |
| 10597 | MakeLibCallOptions CallOptions; |
| 10598 | MVT SVT = Op.getOperand(i: 0).getSimpleValueType(); |
| 10599 | RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: SVT, RetVT: MVT::bf16); |
| 10600 | SDValue Res = |
| 10601 | makeLibCall(DAG, LC, RetVT: MVT::f32, Ops: Op.getOperand(i: 0), CallOptions, dl: DL).first; |
| 10602 | return DAG.getBitcast(VT: MVT::i32, V: Res); |
| 10603 | } |
| 10604 | |
| 10605 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 10606 | LLVM_DEBUG(dbgs() << "Lowering node: " ; Op.dump()); |
| 10607 | switch (Op.getOpcode()) { |
| 10608 | default: llvm_unreachable("Don't know how to custom lower this!" ); |
| 10609 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); |
| 10610 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
| 10611 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
| 10612 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
| 10613 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
| 10614 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
| 10615 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
| 10616 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
| 10617 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); |
| 10618 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); |
| 10619 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
| 10620 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); |
| 10621 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); |
| 10622 | case ISD::SINT_TO_FP: |
| 10623 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); |
| 10624 | case ISD::STRICT_FP_TO_SINT: |
| 10625 | case ISD::STRICT_FP_TO_UINT: |
| 10626 | case ISD::FP_TO_SINT: |
| 10627 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); |
| 10628 | case ISD::FP_TO_SINT_SAT: |
| 10629 | case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG, Subtarget); |
| 10630 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); |
| 10631 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
| 10632 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
| 10633 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); |
| 10634 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); |
| 10635 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); |
| 10636 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); |
| 10637 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, |
| 10638 | Subtarget); |
| 10639 | case ISD::BITCAST: return ExpandBITCAST(N: Op.getNode(), DAG, Subtarget); |
| 10640 | case ISD::SHL: |
| 10641 | case ISD::SRL: |
| 10642 | case ISD::SRA: return LowerShift(N: Op.getNode(), DAG, ST: Subtarget); |
| 10643 | case ISD::SREM: return LowerREM(N: Op.getNode(), DAG); |
| 10644 | case ISD::UREM: return LowerREM(N: Op.getNode(), DAG); |
| 10645 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); |
| 10646 | case ISD::SRL_PARTS: |
| 10647 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); |
| 10648 | case ISD::CTTZ: |
| 10649 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(N: Op.getNode(), DAG, ST: Subtarget); |
| 10650 | case ISD::CTPOP: return LowerCTPOP(N: Op.getNode(), DAG, ST: Subtarget); |
| 10651 | case ISD::SETCC: return LowerVSETCC(Op, DAG, ST: Subtarget); |
| 10652 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); |
| 10653 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, ST: Subtarget); |
| 10654 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, ST: Subtarget); |
| 10655 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, ST: Subtarget); |
| 10656 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, ST: Subtarget); |
| 10657 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
| 10658 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, ST: Subtarget); |
| 10659 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, ST: Subtarget); |
| 10660 | case ISD::TRUNCATE: return LowerTruncate(N: Op.getNode(), DAG, Subtarget); |
| 10661 | case ISD::SIGN_EXTEND: |
| 10662 | case ISD::ZERO_EXTEND: return LowerVectorExtend(N: Op.getNode(), DAG, Subtarget); |
| 10663 | case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG); |
| 10664 | case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG); |
| 10665 | case ISD::SET_FPMODE: |
| 10666 | return LowerSET_FPMODE(Op, DAG); |
| 10667 | case ISD::RESET_FPMODE: |
| 10668 | return LowerRESET_FPMODE(Op, DAG); |
| 10669 | case ISD::MUL: return LowerMUL(Op, DAG); |
| 10670 | case ISD::SDIV: |
| 10671 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 10672 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); |
| 10673 | return LowerSDIV(Op, DAG, ST: Subtarget); |
| 10674 | case ISD::UDIV: |
| 10675 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 10676 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); |
| 10677 | return LowerUDIV(Op, DAG, ST: Subtarget); |
| 10678 | case ISD::UADDO_CARRY: |
| 10679 | case ISD::USUBO_CARRY: |
| 10680 | return LowerUADDSUBO_CARRY(Op, DAG); |
| 10681 | case ISD::SADDO: |
| 10682 | case ISD::SSUBO: |
| 10683 | return LowerSignedALUO(Op, DAG); |
| 10684 | case ISD::UADDO: |
| 10685 | case ISD::USUBO: |
| 10686 | return LowerUnsignedALUO(Op, DAG); |
| 10687 | case ISD::SADDSAT: |
| 10688 | case ISD::SSUBSAT: |
| 10689 | case ISD::UADDSAT: |
| 10690 | case ISD::USUBSAT: |
| 10691 | return LowerADDSUBSAT(Op, DAG, Subtarget); |
| 10692 | case ISD::LOAD: |
| 10693 | return LowerPredicateLoad(Op, DAG); |
| 10694 | case ISD::STORE: |
| 10695 | return LowerSTORE(Op, DAG, Subtarget); |
| 10696 | case ISD::MLOAD: |
| 10697 | return LowerMLOAD(Op, DAG); |
| 10698 | case ISD::VECREDUCE_MUL: |
| 10699 | case ISD::VECREDUCE_AND: |
| 10700 | case ISD::VECREDUCE_OR: |
| 10701 | case ISD::VECREDUCE_XOR: |
| 10702 | return LowerVecReduce(Op, DAG, ST: Subtarget); |
| 10703 | case ISD::VECREDUCE_FADD: |
| 10704 | case ISD::VECREDUCE_FMUL: |
| 10705 | case ISD::VECREDUCE_FMIN: |
| 10706 | case ISD::VECREDUCE_FMAX: |
| 10707 | return LowerVecReduceF(Op, DAG, ST: Subtarget); |
| 10708 | case ISD::VECREDUCE_UMIN: |
| 10709 | case ISD::VECREDUCE_UMAX: |
| 10710 | case ISD::VECREDUCE_SMIN: |
| 10711 | case ISD::VECREDUCE_SMAX: |
| 10712 | return LowerVecReduceMinMax(Op, DAG, ST: Subtarget); |
| 10713 | case ISD::ATOMIC_LOAD: |
| 10714 | case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); |
| 10715 | case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); |
| 10716 | case ISD::SDIVREM: |
| 10717 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); |
| 10718 | case ISD::DYNAMIC_STACKALLOC: |
| 10719 | if (Subtarget->isTargetWindows()) |
| 10720 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 10721 | llvm_unreachable("Don't know how to custom lower this!" ); |
| 10722 | case ISD::STRICT_FP_ROUND: |
| 10723 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); |
| 10724 | case ISD::STRICT_FP_EXTEND: |
| 10725 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); |
| 10726 | case ISD::STRICT_FSETCC: |
| 10727 | case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); |
| 10728 | case ISD::SPONENTRY: |
| 10729 | return LowerSPONENTRY(Op, DAG); |
| 10730 | case ISD::FP_TO_BF16: |
| 10731 | return LowerFP_TO_BF16(Op, DAG); |
| 10732 | case ARMISD::WIN__DBZCHK: return SDValue(); |
| 10733 | } |
| 10734 | } |
| 10735 | |
| 10736 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 10737 | SelectionDAG &DAG) { |
| 10738 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
| 10739 | unsigned Opc = 0; |
| 10740 | if (IntNo == Intrinsic::arm_smlald) |
| 10741 | Opc = ARMISD::SMLALD; |
| 10742 | else if (IntNo == Intrinsic::arm_smlaldx) |
| 10743 | Opc = ARMISD::SMLALDX; |
| 10744 | else if (IntNo == Intrinsic::arm_smlsld) |
| 10745 | Opc = ARMISD::SMLSLD; |
| 10746 | else if (IntNo == Intrinsic::arm_smlsldx) |
| 10747 | Opc = ARMISD::SMLSLDX; |
| 10748 | else |
| 10749 | return; |
| 10750 | |
| 10751 | SDLoc dl(N); |
| 10752 | SDValue Lo, Hi; |
| 10753 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 3), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 10754 | |
| 10755 | SDValue LongMul = DAG.getNode(Opcode: Opc, DL: dl, |
| 10756 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 10757 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
| 10758 | N3: Lo, N4: Hi); |
| 10759 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
| 10760 | N1: LongMul.getValue(R: 0), N2: LongMul.getValue(R: 1))); |
| 10761 | } |
| 10762 | |
| 10763 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 10764 | /// type with new values built out of custom code. |
| 10765 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, |
| 10766 | SmallVectorImpl<SDValue> &Results, |
| 10767 | SelectionDAG &DAG) const { |
| 10768 | SDValue Res; |
| 10769 | switch (N->getOpcode()) { |
| 10770 | default: |
| 10771 | llvm_unreachable("Don't know how to custom expand this!" ); |
| 10772 | case ISD::READ_REGISTER: |
| 10773 | ExpandREAD_REGISTER(N, Results, DAG); |
| 10774 | break; |
| 10775 | case ISD::BITCAST: |
| 10776 | Res = ExpandBITCAST(N, DAG, Subtarget); |
| 10777 | break; |
| 10778 | case ISD::SRL: |
| 10779 | case ISD::SRA: |
| 10780 | case ISD::SHL: |
| 10781 | Res = Expand64BitShift(N, DAG, ST: Subtarget); |
| 10782 | break; |
| 10783 | case ISD::SREM: |
| 10784 | case ISD::UREM: |
| 10785 | Res = LowerREM(N, DAG); |
| 10786 | break; |
| 10787 | case ISD::SDIVREM: |
| 10788 | case ISD::UDIVREM: |
| 10789 | Res = LowerDivRem(Op: SDValue(N, 0), DAG); |
| 10790 | assert(Res.getNumOperands() == 2 && "DivRem needs two values" ); |
| 10791 | Results.push_back(Elt: Res.getValue(R: 0)); |
| 10792 | Results.push_back(Elt: Res.getValue(R: 1)); |
| 10793 | return; |
| 10794 | case ISD::SADDSAT: |
| 10795 | case ISD::SSUBSAT: |
| 10796 | case ISD::UADDSAT: |
| 10797 | case ISD::USUBSAT: |
| 10798 | Res = LowerADDSUBSAT(Op: SDValue(N, 0), DAG, Subtarget); |
| 10799 | break; |
| 10800 | case ISD::READCYCLECOUNTER: |
| 10801 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); |
| 10802 | return; |
| 10803 | case ISD::UDIV: |
| 10804 | case ISD::SDIV: |
| 10805 | assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows" ); |
| 10806 | return ExpandDIV_Windows(Op: SDValue(N, 0), DAG, Signed: N->getOpcode() == ISD::SDIV, |
| 10807 | Results); |
| 10808 | case ISD::ATOMIC_CMP_SWAP: |
| 10809 | ReplaceCMP_SWAP_64Results(N, Results, DAG); |
| 10810 | return; |
| 10811 | case ISD::INTRINSIC_WO_CHAIN: |
| 10812 | return ReplaceLongIntrinsic(N, Results, DAG); |
| 10813 | case ISD::LOAD: |
| 10814 | LowerLOAD(N, Results, DAG); |
| 10815 | break; |
| 10816 | case ISD::TRUNCATE: |
| 10817 | Res = LowerTruncate(N, DAG, Subtarget); |
| 10818 | break; |
| 10819 | case ISD::SIGN_EXTEND: |
| 10820 | case ISD::ZERO_EXTEND: |
| 10821 | Res = LowerVectorExtend(N, DAG, Subtarget); |
| 10822 | break; |
| 10823 | case ISD::FP_TO_SINT_SAT: |
| 10824 | case ISD::FP_TO_UINT_SAT: |
| 10825 | Res = LowerFP_TO_INT_SAT(Op: SDValue(N, 0), DAG, Subtarget); |
| 10826 | break; |
| 10827 | } |
| 10828 | if (Res.getNode()) |
| 10829 | Results.push_back(Elt: Res); |
| 10830 | } |
| 10831 | |
| 10832 | //===----------------------------------------------------------------------===// |
| 10833 | // ARM Scheduler Hooks |
| 10834 | //===----------------------------------------------------------------------===// |
| 10835 | |
| 10836 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and |
| 10837 | /// registers the function context. |
| 10838 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, |
| 10839 | MachineBasicBlock *MBB, |
| 10840 | MachineBasicBlock *DispatchBB, |
| 10841 | int FI) const { |
| 10842 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 10843 | "ROPI/RWPI not currently supported with SjLj" ); |
| 10844 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10845 | DebugLoc dl = MI.getDebugLoc(); |
| 10846 | MachineFunction *MF = MBB->getParent(); |
| 10847 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 10848 | MachineConstantPool *MCP = MF->getConstantPool(); |
| 10849 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); |
| 10850 | const Function &F = MF->getFunction(); |
| 10851 | |
| 10852 | bool isThumb = Subtarget->isThumb(); |
| 10853 | bool isThumb2 = Subtarget->isThumb2(); |
| 10854 | |
| 10855 | unsigned PCLabelId = AFI->createPICLabelUId(); |
| 10856 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; |
| 10857 | ARMConstantPoolValue *CPV = |
| 10858 | ARMConstantPoolMBB::Create(C&: F.getContext(), mbb: DispatchBB, ID: PCLabelId, PCAdj); |
| 10859 | unsigned CPI = MCP->getConstantPoolIndex(V: CPV, Alignment: Align(4)); |
| 10860 | |
| 10861 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass |
| 10862 | : &ARM::GPRRegClass; |
| 10863 | |
| 10864 | // Grab constant pool and fixed stack memory operands. |
| 10865 | MachineMemOperand *CPMMO = |
| 10866 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
| 10867 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 10868 | |
| 10869 | MachineMemOperand *FIMMOSt = |
| 10870 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
| 10871 | F: MachineMemOperand::MOStore, Size: 4, BaseAlignment: Align(4)); |
| 10872 | |
| 10873 | // Load the address of the dispatch MBB into the jump buffer. |
| 10874 | if (isThumb2) { |
| 10875 | // Incoming value: jbuf |
| 10876 | // ldr.n r5, LCPI1_1 |
| 10877 | // orr r5, r5, #1 |
| 10878 | // add r5, pc |
| 10879 | // str r5, [$jbuf, #+4] ; &jbuf[1] |
| 10880 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10881 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRpci), DestReg: NewVReg1) |
| 10882 | .addConstantPoolIndex(Idx: CPI) |
| 10883 | .addMemOperand(MMO: CPMMO) |
| 10884 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10885 | // Set the low bit because of thumb mode. |
| 10886 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10887 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ORRri), DestReg: NewVReg2) |
| 10888 | .addReg(RegNo: NewVReg1, flags: RegState::Kill) |
| 10889 | .addImm(Val: 0x01) |
| 10890 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 10891 | .add(MO: condCodeOp()); |
| 10892 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10893 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg3) |
| 10894 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
| 10895 | .addImm(Val: PCLabelId); |
| 10896 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2STRi12)) |
| 10897 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
| 10898 | .addFrameIndex(Idx: FI) |
| 10899 | .addImm(Val: 36) // &jbuf[1] :: pc |
| 10900 | .addMemOperand(MMO: FIMMOSt) |
| 10901 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10902 | } else if (isThumb) { |
| 10903 | // Incoming value: jbuf |
| 10904 | // ldr.n r1, LCPI1_4 |
| 10905 | // add r1, pc |
| 10906 | // mov r2, #1 |
| 10907 | // orrs r1, r2 |
| 10908 | // add r2, $jbuf, #+4 ; &jbuf[1] |
| 10909 | // str r1, [r2] |
| 10910 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10911 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci), DestReg: NewVReg1) |
| 10912 | .addConstantPoolIndex(Idx: CPI) |
| 10913 | .addMemOperand(MMO: CPMMO) |
| 10914 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10915 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10916 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg2) |
| 10917 | .addReg(RegNo: NewVReg1, flags: RegState::Kill) |
| 10918 | .addImm(Val: PCLabelId); |
| 10919 | // Set the low bit because of thumb mode. |
| 10920 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10921 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi8), DestReg: NewVReg3) |
| 10922 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
| 10923 | .addImm(Val: 1) |
| 10924 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10925 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 10926 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tORR), DestReg: NewVReg4) |
| 10927 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
| 10928 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
| 10929 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
| 10930 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10931 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 10932 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDframe), DestReg: NewVReg5) |
| 10933 | .addFrameIndex(Idx: FI) |
| 10934 | .addImm(Val: 36); // &jbuf[1] :: pc |
| 10935 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tSTRi)) |
| 10936 | .addReg(RegNo: NewVReg4, flags: RegState::Kill) |
| 10937 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
| 10938 | .addImm(Val: 0) |
| 10939 | .addMemOperand(MMO: FIMMOSt) |
| 10940 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10941 | } else { |
| 10942 | // Incoming value: jbuf |
| 10943 | // ldr r1, LCPI1_1 |
| 10944 | // add r1, pc, r1 |
| 10945 | // str r1, [$jbuf, #+4] ; &jbuf[1] |
| 10946 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10947 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
| 10948 | .addConstantPoolIndex(Idx: CPI) |
| 10949 | .addImm(Val: 0) |
| 10950 | .addMemOperand(MMO: CPMMO) |
| 10951 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10952 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10953 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::PICADD), DestReg: NewVReg2) |
| 10954 | .addReg(RegNo: NewVReg1, flags: RegState::Kill) |
| 10955 | .addImm(Val: PCLabelId) |
| 10956 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10957 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::STRi12)) |
| 10958 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
| 10959 | .addFrameIndex(Idx: FI) |
| 10960 | .addImm(Val: 36) // &jbuf[1] :: pc |
| 10961 | .addMemOperand(MMO: FIMMOSt) |
| 10962 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10963 | } |
| 10964 | } |
| 10965 | |
| 10966 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, |
| 10967 | MachineBasicBlock *MBB) const { |
| 10968 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10969 | DebugLoc dl = MI.getDebugLoc(); |
| 10970 | MachineFunction *MF = MBB->getParent(); |
| 10971 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 10972 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 10973 | int FI = MFI.getFunctionContextIndex(); |
| 10974 | |
| 10975 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass |
| 10976 | : &ARM::GPRnopcRegClass; |
| 10977 | |
| 10978 | // Get a mapping of the call site numbers to all of the landing pads they're |
| 10979 | // associated with. |
| 10980 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; |
| 10981 | unsigned MaxCSNum = 0; |
| 10982 | for (MachineBasicBlock &BB : *MF) { |
| 10983 | if (!BB.isEHPad()) |
| 10984 | continue; |
| 10985 | |
| 10986 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing |
| 10987 | // pad. |
| 10988 | for (MachineInstr &II : BB) { |
| 10989 | if (!II.isEHLabel()) |
| 10990 | continue; |
| 10991 | |
| 10992 | MCSymbol *Sym = II.getOperand(i: 0).getMCSymbol(); |
| 10993 | if (!MF->hasCallSiteLandingPad(Sym)) continue; |
| 10994 | |
| 10995 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); |
| 10996 | for (unsigned Idx : CallSiteIdxs) { |
| 10997 | CallSiteNumToLPad[Idx].push_back(Elt: &BB); |
| 10998 | MaxCSNum = std::max(a: MaxCSNum, b: Idx); |
| 10999 | } |
| 11000 | break; |
| 11001 | } |
| 11002 | } |
| 11003 | |
| 11004 | // Get an ordered list of the machine basic blocks for the jump table. |
| 11005 | std::vector<MachineBasicBlock*> LPadList; |
| 11006 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; |
| 11007 | LPadList.reserve(n: CallSiteNumToLPad.size()); |
| 11008 | for (unsigned I = 1; I <= MaxCSNum; ++I) { |
| 11009 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; |
| 11010 | for (MachineBasicBlock *MBB : MBBList) { |
| 11011 | LPadList.push_back(x: MBB); |
| 11012 | InvokeBBs.insert_range(R: MBB->predecessors()); |
| 11013 | } |
| 11014 | } |
| 11015 | |
| 11016 | assert(!LPadList.empty() && |
| 11017 | "No landing pad destinations for the dispatch jump table!" ); |
| 11018 | |
| 11019 | // Create the jump table and associated information. |
| 11020 | MachineJumpTableInfo *JTI = |
| 11021 | MF->getOrCreateJumpTableInfo(JTEntryKind: MachineJumpTableInfo::EK_Inline); |
| 11022 | unsigned MJTI = JTI->createJumpTableIndex(DestBBs: LPadList); |
| 11023 | |
| 11024 | // Create the MBBs for the dispatch code. |
| 11025 | |
| 11026 | // Shove the dispatch's address into the return slot in the function context. |
| 11027 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); |
| 11028 | DispatchBB->setIsEHPad(); |
| 11029 | |
| 11030 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 11031 | unsigned trap_opcode; |
| 11032 | if (Subtarget->isThumb()) |
| 11033 | trap_opcode = ARM::tTRAP; |
| 11034 | else |
| 11035 | trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; |
| 11036 | |
| 11037 | BuildMI(BB: TrapBB, MIMD: dl, MCID: TII->get(Opcode: trap_opcode)); |
| 11038 | DispatchBB->addSuccessor(Succ: TrapBB); |
| 11039 | |
| 11040 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); |
| 11041 | DispatchBB->addSuccessor(Succ: DispContBB); |
| 11042 | |
| 11043 | // Insert and MBBs. |
| 11044 | MF->insert(MBBI: MF->end(), MBB: DispatchBB); |
| 11045 | MF->insert(MBBI: MF->end(), MBB: DispContBB); |
| 11046 | MF->insert(MBBI: MF->end(), MBB: TrapBB); |
| 11047 | |
| 11048 | // Insert code into the entry block that creates and registers the function |
| 11049 | // context. |
| 11050 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); |
| 11051 | |
| 11052 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( |
| 11053 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
| 11054 | F: MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, Size: 4, BaseAlignment: Align(4)); |
| 11055 | |
| 11056 | MachineInstrBuilder MIB; |
| 11057 | MIB = BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Int_eh_sjlj_dispatchsetup)); |
| 11058 | |
| 11059 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); |
| 11060 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); |
| 11061 | |
| 11062 | // Add a register mask with no preserved registers. This results in all |
| 11063 | // registers being marked as clobbered. This can't work if the dispatch block |
| 11064 | // is in a Thumb1 function and is linked with ARM code which uses the FP |
| 11065 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. |
| 11066 | MIB.addRegMask(Mask: RI.getSjLjDispatchPreservedMask(MF: *MF)); |
| 11067 | |
| 11068 | bool IsPositionIndependent = isPositionIndependent(); |
| 11069 | unsigned NumLPads = LPadList.size(); |
| 11070 | if (Subtarget->isThumb2()) { |
| 11071 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11072 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRi12), DestReg: NewVReg1) |
| 11073 | .addFrameIndex(Idx: FI) |
| 11074 | .addImm(Val: 4) |
| 11075 | .addMemOperand(MMO: FIMMOLd) |
| 11076 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11077 | |
| 11078 | if (NumLPads < 256) { |
| 11079 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPri)) |
| 11080 | .addReg(RegNo: NewVReg1) |
| 11081 | .addImm(Val: LPadList.size()) |
| 11082 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11083 | } else { |
| 11084 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11085 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVi16), DestReg: VReg1) |
| 11086 | .addImm(Val: NumLPads & 0xFFFF) |
| 11087 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11088 | |
| 11089 | unsigned VReg2 = VReg1; |
| 11090 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 11091 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 11092 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVTi16), DestReg: VReg2) |
| 11093 | .addReg(RegNo: VReg1) |
| 11094 | .addImm(Val: NumLPads >> 16) |
| 11095 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11096 | } |
| 11097 | |
| 11098 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPrr)) |
| 11099 | .addReg(RegNo: NewVReg1) |
| 11100 | .addReg(RegNo: VReg2) |
| 11101 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11102 | } |
| 11103 | |
| 11104 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2Bcc)) |
| 11105 | .addMBB(MBB: TrapBB) |
| 11106 | .addImm(Val: ARMCC::HI) |
| 11107 | .addReg(RegNo: ARM::CPSR); |
| 11108 | |
| 11109 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 11110 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LEApcrelJT), DestReg: NewVReg3) |
| 11111 | .addJumpTableIndex(Idx: MJTI) |
| 11112 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11113 | |
| 11114 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 11115 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ADDrs), DestReg: NewVReg4) |
| 11116 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
| 11117 | .addReg(RegNo: NewVReg1) |
| 11118 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
| 11119 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11120 | .add(MO: condCodeOp()); |
| 11121 | |
| 11122 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2BR_JT)) |
| 11123 | .addReg(RegNo: NewVReg4, flags: RegState::Kill) |
| 11124 | .addReg(RegNo: NewVReg1) |
| 11125 | .addJumpTableIndex(Idx: MJTI); |
| 11126 | } else if (Subtarget->isThumb()) { |
| 11127 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11128 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRspi), DestReg: NewVReg1) |
| 11129 | .addFrameIndex(Idx: FI) |
| 11130 | .addImm(Val: 1) |
| 11131 | .addMemOperand(MMO: FIMMOLd) |
| 11132 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11133 | |
| 11134 | if (NumLPads < 256) { |
| 11135 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPi8)) |
| 11136 | .addReg(RegNo: NewVReg1) |
| 11137 | .addImm(Val: NumLPads) |
| 11138 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11139 | } else { |
| 11140 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 11141 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 11142 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
| 11143 | |
| 11144 | // MachineConstantPool wants an explicit alignment. |
| 11145 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 11146 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 11147 | |
| 11148 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11149 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
| 11150 | .addReg(RegNo: VReg1, flags: RegState::Define) |
| 11151 | .addConstantPoolIndex(Idx) |
| 11152 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11153 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPr)) |
| 11154 | .addReg(RegNo: NewVReg1) |
| 11155 | .addReg(RegNo: VReg1) |
| 11156 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11157 | } |
| 11158 | |
| 11159 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
| 11160 | .addMBB(MBB: TrapBB) |
| 11161 | .addImm(Val: ARMCC::HI) |
| 11162 | .addReg(RegNo: ARM::CPSR); |
| 11163 | |
| 11164 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 11165 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLSLri), DestReg: NewVReg2) |
| 11166 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
| 11167 | .addReg(RegNo: NewVReg1) |
| 11168 | .addImm(Val: 2) |
| 11169 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11170 | |
| 11171 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 11172 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLEApcrelJT), DestReg: NewVReg3) |
| 11173 | .addJumpTableIndex(Idx: MJTI) |
| 11174 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11175 | |
| 11176 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 11177 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg4) |
| 11178 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
| 11179 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
| 11180 | .addReg(RegNo: NewVReg3) |
| 11181 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11182 | |
| 11183 | MachineMemOperand *JTMMOLd = |
| 11184 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
| 11185 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11186 | |
| 11187 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 11188 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRi), DestReg: NewVReg5) |
| 11189 | .addReg(RegNo: NewVReg4, flags: RegState::Kill) |
| 11190 | .addImm(Val: 0) |
| 11191 | .addMemOperand(MMO: JTMMOLd) |
| 11192 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11193 | |
| 11194 | unsigned NewVReg6 = NewVReg5; |
| 11195 | if (IsPositionIndependent) { |
| 11196 | NewVReg6 = MRI->createVirtualRegister(RegClass: TRC); |
| 11197 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg6) |
| 11198 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
| 11199 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
| 11200 | .addReg(RegNo: NewVReg3) |
| 11201 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11202 | } |
| 11203 | |
| 11204 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBR_JTr)) |
| 11205 | .addReg(RegNo: NewVReg6, flags: RegState::Kill) |
| 11206 | .addJumpTableIndex(Idx: MJTI); |
| 11207 | } else { |
| 11208 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11209 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
| 11210 | .addFrameIndex(Idx: FI) |
| 11211 | .addImm(Val: 4) |
| 11212 | .addMemOperand(MMO: FIMMOLd) |
| 11213 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11214 | |
| 11215 | if (NumLPads < 256) { |
| 11216 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPri)) |
| 11217 | .addReg(RegNo: NewVReg1) |
| 11218 | .addImm(Val: NumLPads) |
| 11219 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11220 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(x: NumLPads)) { |
| 11221 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11222 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVi16), DestReg: VReg1) |
| 11223 | .addImm(Val: NumLPads & 0xFFFF) |
| 11224 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11225 | |
| 11226 | unsigned VReg2 = VReg1; |
| 11227 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 11228 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 11229 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVTi16), DestReg: VReg2) |
| 11230 | .addReg(RegNo: VReg1) |
| 11231 | .addImm(Val: NumLPads >> 16) |
| 11232 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11233 | } |
| 11234 | |
| 11235 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
| 11236 | .addReg(RegNo: NewVReg1) |
| 11237 | .addReg(RegNo: VReg2) |
| 11238 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11239 | } else { |
| 11240 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 11241 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 11242 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
| 11243 | |
| 11244 | // MachineConstantPool wants an explicit alignment. |
| 11245 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 11246 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 11247 | |
| 11248 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11249 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
| 11250 | .addReg(RegNo: VReg1, flags: RegState::Define) |
| 11251 | .addConstantPoolIndex(Idx) |
| 11252 | .addImm(Val: 0) |
| 11253 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11254 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
| 11255 | .addReg(RegNo: NewVReg1) |
| 11256 | .addReg(RegNo: VReg1, flags: RegState::Kill) |
| 11257 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11258 | } |
| 11259 | |
| 11260 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Bcc)) |
| 11261 | .addMBB(MBB: TrapBB) |
| 11262 | .addImm(Val: ARMCC::HI) |
| 11263 | .addReg(RegNo: ARM::CPSR); |
| 11264 | |
| 11265 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 11266 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVsi), DestReg: NewVReg3) |
| 11267 | .addReg(RegNo: NewVReg1) |
| 11268 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
| 11269 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11270 | .add(MO: condCodeOp()); |
| 11271 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 11272 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LEApcrelJT), DestReg: NewVReg4) |
| 11273 | .addJumpTableIndex(Idx: MJTI) |
| 11274 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11275 | |
| 11276 | MachineMemOperand *JTMMOLd = |
| 11277 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
| 11278 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11279 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 11280 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRrs), DestReg: NewVReg5) |
| 11281 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
| 11282 | .addReg(RegNo: NewVReg4) |
| 11283 | .addImm(Val: 0) |
| 11284 | .addMemOperand(MMO: JTMMOLd) |
| 11285 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11286 | |
| 11287 | if (IsPositionIndependent) { |
| 11288 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTadd)) |
| 11289 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
| 11290 | .addReg(RegNo: NewVReg4) |
| 11291 | .addJumpTableIndex(Idx: MJTI); |
| 11292 | } else { |
| 11293 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTr)) |
| 11294 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
| 11295 | .addJumpTableIndex(Idx: MJTI); |
| 11296 | } |
| 11297 | } |
| 11298 | |
| 11299 | // Add the jump table entries as successors to the MBB. |
| 11300 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; |
| 11301 | for (MachineBasicBlock *CurMBB : LPadList) { |
| 11302 | if (SeenMBBs.insert(Ptr: CurMBB).second) |
| 11303 | DispContBB->addSuccessor(Succ: CurMBB); |
| 11304 | } |
| 11305 | |
| 11306 | // N.B. the order the invoke BBs are processed in doesn't matter here. |
| 11307 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); |
| 11308 | SmallVector<MachineBasicBlock*, 64> MBBLPads; |
| 11309 | for (MachineBasicBlock *BB : InvokeBBs) { |
| 11310 | |
| 11311 | // Remove the landing pad successor from the invoke block and replace it |
| 11312 | // with the new dispatch block. |
| 11313 | SmallVector<MachineBasicBlock*, 4> Successors(BB->successors()); |
| 11314 | while (!Successors.empty()) { |
| 11315 | MachineBasicBlock *SMBB = Successors.pop_back_val(); |
| 11316 | if (SMBB->isEHPad()) { |
| 11317 | BB->removeSuccessor(Succ: SMBB); |
| 11318 | MBBLPads.push_back(Elt: SMBB); |
| 11319 | } |
| 11320 | } |
| 11321 | |
| 11322 | BB->addSuccessor(Succ: DispatchBB, Prob: BranchProbability::getZero()); |
| 11323 | BB->normalizeSuccProbs(); |
| 11324 | |
| 11325 | // Find the invoke call and mark all of the callee-saved registers as |
| 11326 | // 'implicit defined' so that they're spilled. This prevents code from |
| 11327 | // moving instructions to before the EH block, where they will never be |
| 11328 | // executed. |
| 11329 | for (MachineBasicBlock::reverse_iterator |
| 11330 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { |
| 11331 | if (!II->isCall()) continue; |
| 11332 | |
| 11333 | DenseSet<unsigned> DefRegs; |
| 11334 | for (MachineInstr::mop_iterator |
| 11335 | OI = II->operands_begin(), OE = II->operands_end(); |
| 11336 | OI != OE; ++OI) { |
| 11337 | if (!OI->isReg()) continue; |
| 11338 | DefRegs.insert(V: OI->getReg()); |
| 11339 | } |
| 11340 | |
| 11341 | MachineInstrBuilder MIB(*MF, &*II); |
| 11342 | |
| 11343 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { |
| 11344 | unsigned Reg = SavedRegs[i]; |
| 11345 | if (Subtarget->isThumb2() && |
| 11346 | !ARM::tGPRRegClass.contains(Reg) && |
| 11347 | !ARM::hGPRRegClass.contains(Reg)) |
| 11348 | continue; |
| 11349 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) |
| 11350 | continue; |
| 11351 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) |
| 11352 | continue; |
| 11353 | if (!DefRegs.contains(V: Reg)) |
| 11354 | MIB.addReg(RegNo: Reg, flags: RegState::ImplicitDefine | RegState::Dead); |
| 11355 | } |
| 11356 | |
| 11357 | break; |
| 11358 | } |
| 11359 | } |
| 11360 | |
| 11361 | // Mark all former landing pads as non-landing pads. The dispatch is the only |
| 11362 | // landing pad now. |
| 11363 | for (MachineBasicBlock *MBBLPad : MBBLPads) |
| 11364 | MBBLPad->setIsEHPad(false); |
| 11365 | |
| 11366 | // The instruction is gone now. |
| 11367 | MI.eraseFromParent(); |
| 11368 | } |
| 11369 | |
| 11370 | static |
| 11371 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { |
| 11372 | for (MachineBasicBlock *S : MBB->successors()) |
| 11373 | if (S != Succ) |
| 11374 | return S; |
| 11375 | llvm_unreachable("Expecting a BB with two successors!" ); |
| 11376 | } |
| 11377 | |
| 11378 | /// Return the load opcode for a given load size. If load size >= 8, |
| 11379 | /// neon opcode will be returned. |
| 11380 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { |
| 11381 | if (LdSize >= 8) |
| 11382 | return LdSize == 16 ? ARM::VLD1q32wb_fixed |
| 11383 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; |
| 11384 | if (IsThumb1) |
| 11385 | return LdSize == 4 ? ARM::tLDRi |
| 11386 | : LdSize == 2 ? ARM::tLDRHi |
| 11387 | : LdSize == 1 ? ARM::tLDRBi : 0; |
| 11388 | if (IsThumb2) |
| 11389 | return LdSize == 4 ? ARM::t2LDR_POST |
| 11390 | : LdSize == 2 ? ARM::t2LDRH_POST |
| 11391 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; |
| 11392 | return LdSize == 4 ? ARM::LDR_POST_IMM |
| 11393 | : LdSize == 2 ? ARM::LDRH_POST |
| 11394 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; |
| 11395 | } |
| 11396 | |
| 11397 | /// Return the store opcode for a given store size. If store size >= 8, |
| 11398 | /// neon opcode will be returned. |
| 11399 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { |
| 11400 | if (StSize >= 8) |
| 11401 | return StSize == 16 ? ARM::VST1q32wb_fixed |
| 11402 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; |
| 11403 | if (IsThumb1) |
| 11404 | return StSize == 4 ? ARM::tSTRi |
| 11405 | : StSize == 2 ? ARM::tSTRHi |
| 11406 | : StSize == 1 ? ARM::tSTRBi : 0; |
| 11407 | if (IsThumb2) |
| 11408 | return StSize == 4 ? ARM::t2STR_POST |
| 11409 | : StSize == 2 ? ARM::t2STRH_POST |
| 11410 | : StSize == 1 ? ARM::t2STRB_POST : 0; |
| 11411 | return StSize == 4 ? ARM::STR_POST_IMM |
| 11412 | : StSize == 2 ? ARM::STRH_POST |
| 11413 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; |
| 11414 | } |
| 11415 | |
| 11416 | /// Emit a post-increment load operation with given size. The instructions |
| 11417 | /// will be added to BB at Pos. |
| 11418 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 11419 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 11420 | unsigned LdSize, unsigned Data, unsigned AddrIn, |
| 11421 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 11422 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); |
| 11423 | assert(LdOpc != 0 && "Should have a load opcode" ); |
| 11424 | if (LdSize >= 8) { |
| 11425 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11426 | .addReg(RegNo: AddrOut, flags: RegState::Define) |
| 11427 | .addReg(RegNo: AddrIn) |
| 11428 | .addImm(Val: 0) |
| 11429 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11430 | } else if (IsThumb1) { |
| 11431 | // load + update AddrIn |
| 11432 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11433 | .addReg(RegNo: AddrIn) |
| 11434 | .addImm(Val: 0) |
| 11435 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11436 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
| 11437 | .add(MO: t1CondCodeOp()) |
| 11438 | .addReg(RegNo: AddrIn) |
| 11439 | .addImm(Val: LdSize) |
| 11440 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11441 | } else if (IsThumb2) { |
| 11442 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11443 | .addReg(RegNo: AddrOut, flags: RegState::Define) |
| 11444 | .addReg(RegNo: AddrIn) |
| 11445 | .addImm(Val: LdSize) |
| 11446 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11447 | } else { // arm |
| 11448 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11449 | .addReg(RegNo: AddrOut, flags: RegState::Define) |
| 11450 | .addReg(RegNo: AddrIn) |
| 11451 | .addReg(RegNo: 0) |
| 11452 | .addImm(Val: LdSize) |
| 11453 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11454 | } |
| 11455 | } |
| 11456 | |
| 11457 | /// Emit a post-increment store operation with given size. The instructions |
| 11458 | /// will be added to BB at Pos. |
| 11459 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 11460 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 11461 | unsigned StSize, unsigned Data, unsigned AddrIn, |
| 11462 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 11463 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); |
| 11464 | assert(StOpc != 0 && "Should have a store opcode" ); |
| 11465 | if (StSize >= 8) { |
| 11466 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11467 | .addReg(RegNo: AddrIn) |
| 11468 | .addImm(Val: 0) |
| 11469 | .addReg(RegNo: Data) |
| 11470 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11471 | } else if (IsThumb1) { |
| 11472 | // store + update AddrIn |
| 11473 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc)) |
| 11474 | .addReg(RegNo: Data) |
| 11475 | .addReg(RegNo: AddrIn) |
| 11476 | .addImm(Val: 0) |
| 11477 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11478 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
| 11479 | .add(MO: t1CondCodeOp()) |
| 11480 | .addReg(RegNo: AddrIn) |
| 11481 | .addImm(Val: StSize) |
| 11482 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11483 | } else if (IsThumb2) { |
| 11484 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11485 | .addReg(RegNo: Data) |
| 11486 | .addReg(RegNo: AddrIn) |
| 11487 | .addImm(Val: StSize) |
| 11488 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11489 | } else { // arm |
| 11490 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11491 | .addReg(RegNo: Data) |
| 11492 | .addReg(RegNo: AddrIn) |
| 11493 | .addReg(RegNo: 0) |
| 11494 | .addImm(Val: StSize) |
| 11495 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11496 | } |
| 11497 | } |
| 11498 | |
| 11499 | MachineBasicBlock * |
| 11500 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, |
| 11501 | MachineBasicBlock *BB) const { |
| 11502 | // This pseudo instruction has 3 operands: dst, src, size |
| 11503 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). |
| 11504 | // Otherwise, we will generate unrolled scalar copies. |
| 11505 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11506 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 11507 | MachineFunction::iterator It = ++BB->getIterator(); |
| 11508 | |
| 11509 | Register dest = MI.getOperand(i: 0).getReg(); |
| 11510 | Register src = MI.getOperand(i: 1).getReg(); |
| 11511 | unsigned SizeVal = MI.getOperand(i: 2).getImm(); |
| 11512 | unsigned Alignment = MI.getOperand(i: 3).getImm(); |
| 11513 | DebugLoc dl = MI.getDebugLoc(); |
| 11514 | |
| 11515 | MachineFunction *MF = BB->getParent(); |
| 11516 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 11517 | unsigned UnitSize = 0; |
| 11518 | const TargetRegisterClass *TRC = nullptr; |
| 11519 | const TargetRegisterClass *VecTRC = nullptr; |
| 11520 | |
| 11521 | bool IsThumb1 = Subtarget->isThumb1Only(); |
| 11522 | bool IsThumb2 = Subtarget->isThumb2(); |
| 11523 | bool IsThumb = Subtarget->isThumb(); |
| 11524 | |
| 11525 | if (Alignment & 1) { |
| 11526 | UnitSize = 1; |
| 11527 | } else if (Alignment & 2) { |
| 11528 | UnitSize = 2; |
| 11529 | } else { |
| 11530 | // Check whether we can use NEON instructions. |
| 11531 | if (!MF->getFunction().hasFnAttribute(Kind: Attribute::NoImplicitFloat) && |
| 11532 | Subtarget->hasNEON()) { |
| 11533 | if ((Alignment % 16 == 0) && SizeVal >= 16) |
| 11534 | UnitSize = 16; |
| 11535 | else if ((Alignment % 8 == 0) && SizeVal >= 8) |
| 11536 | UnitSize = 8; |
| 11537 | } |
| 11538 | // Can't use NEON instructions. |
| 11539 | if (UnitSize == 0) |
| 11540 | UnitSize = 4; |
| 11541 | } |
| 11542 | |
| 11543 | // Select the correct opcode and register class for unit size load/store |
| 11544 | bool IsNeon = UnitSize >= 8; |
| 11545 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 11546 | if (IsNeon) |
| 11547 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass |
| 11548 | : UnitSize == 8 ? &ARM::DPRRegClass |
| 11549 | : nullptr; |
| 11550 | |
| 11551 | unsigned BytesLeft = SizeVal % UnitSize; |
| 11552 | unsigned LoopSize = SizeVal - BytesLeft; |
| 11553 | |
| 11554 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { |
| 11555 | // Use LDR and STR to copy. |
| 11556 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) |
| 11557 | // [destOut] = STR_POST(scratch, destIn, UnitSize) |
| 11558 | unsigned srcIn = src; |
| 11559 | unsigned destIn = dest; |
| 11560 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { |
| 11561 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11562 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11563 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
| 11564 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11565 | IsThumb1, IsThumb2); |
| 11566 | emitPostSt(BB, Pos: MI, TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11567 | IsThumb1, IsThumb2); |
| 11568 | srcIn = srcOut; |
| 11569 | destIn = destOut; |
| 11570 | } |
| 11571 | |
| 11572 | // Handle the leftover bytes with LDRB and STRB. |
| 11573 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) |
| 11574 | // [destOut] = STRB_POST(scratch, destIn, 1) |
| 11575 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 11576 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11577 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11578 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
| 11579 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11580 | IsThumb1, IsThumb2); |
| 11581 | emitPostSt(BB, Pos: MI, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11582 | IsThumb1, IsThumb2); |
| 11583 | srcIn = srcOut; |
| 11584 | destIn = destOut; |
| 11585 | } |
| 11586 | MI.eraseFromParent(); // The instruction is gone now. |
| 11587 | return BB; |
| 11588 | } |
| 11589 | |
| 11590 | // Expand the pseudo op to a loop. |
| 11591 | // thisMBB: |
| 11592 | // ... |
| 11593 | // movw varEnd, # --> with thumb2 |
| 11594 | // movt varEnd, # |
| 11595 | // ldrcp varEnd, idx --> without thumb2 |
| 11596 | // fallthrough --> loopMBB |
| 11597 | // loopMBB: |
| 11598 | // PHI varPhi, varEnd, varLoop |
| 11599 | // PHI srcPhi, src, srcLoop |
| 11600 | // PHI destPhi, dst, destLoop |
| 11601 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 11602 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) |
| 11603 | // subs varLoop, varPhi, #UnitSize |
| 11604 | // bne loopMBB |
| 11605 | // fallthrough --> exitMBB |
| 11606 | // exitMBB: |
| 11607 | // epilogue to handle left-over bytes |
| 11608 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 11609 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 11610 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
| 11611 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
| 11612 | MF->insert(MBBI: It, MBB: loopMBB); |
| 11613 | MF->insert(MBBI: It, MBB: exitMBB); |
| 11614 | |
| 11615 | // Set the call frame size on entry to the new basic blocks. |
| 11616 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
| 11617 | loopMBB->setCallFrameSize(CallFrameSize); |
| 11618 | exitMBB->setCallFrameSize(CallFrameSize); |
| 11619 | |
| 11620 | // Transfer the remainder of BB and its successor edges to exitMBB. |
| 11621 | exitMBB->splice(Where: exitMBB->begin(), Other: BB, |
| 11622 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
| 11623 | exitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
| 11624 | |
| 11625 | // Load an immediate to varEnd. |
| 11626 | Register varEnd = MRI.createVirtualRegister(RegClass: TRC); |
| 11627 | if (Subtarget->useMovt()) { |
| 11628 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: IsThumb ? ARM::t2MOVi32imm : ARM::MOVi32imm), |
| 11629 | DestReg: varEnd) |
| 11630 | .addImm(Val: LoopSize); |
| 11631 | } else if (Subtarget->genExecuteOnly()) { |
| 11632 | assert(IsThumb && "Non-thumb expected to have used movt" ); |
| 11633 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi32imm), DestReg: varEnd).addImm(Val: LoopSize); |
| 11634 | } else { |
| 11635 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 11636 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 11637 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: LoopSize); |
| 11638 | |
| 11639 | // MachineConstantPool wants an explicit alignment. |
| 11640 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 11641 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 11642 | MachineMemOperand *CPMMO = |
| 11643 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
| 11644 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11645 | |
| 11646 | if (IsThumb) |
| 11647 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
| 11648 | .addReg(RegNo: varEnd, flags: RegState::Define) |
| 11649 | .addConstantPoolIndex(Idx) |
| 11650 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11651 | .addMemOperand(MMO: CPMMO); |
| 11652 | else |
| 11653 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
| 11654 | .addReg(RegNo: varEnd, flags: RegState::Define) |
| 11655 | .addConstantPoolIndex(Idx) |
| 11656 | .addImm(Val: 0) |
| 11657 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11658 | .addMemOperand(MMO: CPMMO); |
| 11659 | } |
| 11660 | BB->addSuccessor(Succ: loopMBB); |
| 11661 | |
| 11662 | // Generate the loop body: |
| 11663 | // varPhi = PHI(varLoop, varEnd) |
| 11664 | // srcPhi = PHI(srcLoop, src) |
| 11665 | // destPhi = PHI(destLoop, dst) |
| 11666 | MachineBasicBlock *entryBB = BB; |
| 11667 | BB = loopMBB; |
| 11668 | Register varLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11669 | Register varPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11670 | Register srcLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11671 | Register srcPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11672 | Register destLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11673 | Register destPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11674 | |
| 11675 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: varPhi) |
| 11676 | .addReg(RegNo: varLoop).addMBB(MBB: loopMBB) |
| 11677 | .addReg(RegNo: varEnd).addMBB(MBB: entryBB); |
| 11678 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: srcPhi) |
| 11679 | .addReg(RegNo: srcLoop).addMBB(MBB: loopMBB) |
| 11680 | .addReg(RegNo: src).addMBB(MBB: entryBB); |
| 11681 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: destPhi) |
| 11682 | .addReg(RegNo: destLoop).addMBB(MBB: loopMBB) |
| 11683 | .addReg(RegNo: dest).addMBB(MBB: entryBB); |
| 11684 | |
| 11685 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 11686 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) |
| 11687 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
| 11688 | emitPostLd(BB, Pos: BB->end(), TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcPhi, AddrOut: srcLoop, |
| 11689 | IsThumb1, IsThumb2); |
| 11690 | emitPostSt(BB, Pos: BB->end(), TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destPhi, AddrOut: destLoop, |
| 11691 | IsThumb1, IsThumb2); |
| 11692 | |
| 11693 | // Decrement loop variable by UnitSize. |
| 11694 | if (IsThumb1) { |
| 11695 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, MCID: TII->get(Opcode: ARM::tSUBi8), DestReg: varLoop) |
| 11696 | .add(MO: t1CondCodeOp()) |
| 11697 | .addReg(RegNo: varPhi) |
| 11698 | .addImm(Val: UnitSize) |
| 11699 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11700 | } else { |
| 11701 | MachineInstrBuilder MIB = |
| 11702 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
| 11703 | MCID: TII->get(Opcode: IsThumb2 ? ARM::t2SUBri : ARM::SUBri), DestReg: varLoop); |
| 11704 | MIB.addReg(RegNo: varPhi) |
| 11705 | .addImm(Val: UnitSize) |
| 11706 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11707 | .add(MO: condCodeOp()); |
| 11708 | MIB->getOperand(i: 5).setReg(ARM::CPSR); |
| 11709 | MIB->getOperand(i: 5).setIsDef(true); |
| 11710 | } |
| 11711 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
| 11712 | MCID: TII->get(Opcode: IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 11713 | .addMBB(MBB: loopMBB).addImm(Val: ARMCC::NE).addReg(RegNo: ARM::CPSR); |
| 11714 | |
| 11715 | // loopMBB can loop back to loopMBB or fall through to exitMBB. |
| 11716 | BB->addSuccessor(Succ: loopMBB); |
| 11717 | BB->addSuccessor(Succ: exitMBB); |
| 11718 | |
| 11719 | // Add epilogue to handle BytesLeft. |
| 11720 | BB = exitMBB; |
| 11721 | auto StartOfExit = exitMBB->begin(); |
| 11722 | |
| 11723 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 11724 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 11725 | unsigned srcIn = srcLoop; |
| 11726 | unsigned destIn = destLoop; |
| 11727 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 11728 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11729 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11730 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
| 11731 | emitPostLd(BB, Pos: StartOfExit, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11732 | IsThumb1, IsThumb2); |
| 11733 | emitPostSt(BB, Pos: StartOfExit, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11734 | IsThumb1, IsThumb2); |
| 11735 | srcIn = srcOut; |
| 11736 | destIn = destOut; |
| 11737 | } |
| 11738 | |
| 11739 | MI.eraseFromParent(); // The instruction is gone now. |
| 11740 | return BB; |
| 11741 | } |
| 11742 | |
| 11743 | MachineBasicBlock * |
| 11744 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, |
| 11745 | MachineBasicBlock *MBB) const { |
| 11746 | const TargetMachine &TM = getTargetMachine(); |
| 11747 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); |
| 11748 | DebugLoc DL = MI.getDebugLoc(); |
| 11749 | |
| 11750 | assert(Subtarget->isTargetWindows() && |
| 11751 | "__chkstk is only supported on Windows" ); |
| 11752 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode" ); |
| 11753 | |
| 11754 | // __chkstk takes the number of words to allocate on the stack in R4, and |
| 11755 | // returns the stack adjustment in number of bytes in R4. This will not |
| 11756 | // clober any other registers (other than the obvious lr). |
| 11757 | // |
| 11758 | // Although, technically, IP should be considered a register which may be |
| 11759 | // clobbered, the call itself will not touch it. Windows on ARM is a pure |
| 11760 | // thumb-2 environment, so there is no interworking required. As a result, we |
| 11761 | // do not expect a veneer to be emitted by the linker, clobbering IP. |
| 11762 | // |
| 11763 | // Each module receives its own copy of __chkstk, so no import thunk is |
| 11764 | // required, again, ensuring that IP is not clobbered. |
| 11765 | // |
| 11766 | // Finally, although some linkers may theoretically provide a trampoline for |
| 11767 | // out of range calls (which is quite common due to a 32M range limitation of |
| 11768 | // branches for Thumb), we can generate the long-call version via |
| 11769 | // -mcmodel=large, alleviating the need for the trampoline which may clobber |
| 11770 | // IP. |
| 11771 | |
| 11772 | switch (TM.getCodeModel()) { |
| 11773 | case CodeModel::Tiny: |
| 11774 | llvm_unreachable("Tiny code model not available on ARM." ); |
| 11775 | case CodeModel::Small: |
| 11776 | case CodeModel::Medium: |
| 11777 | case CodeModel::Kernel: |
| 11778 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::tBL)) |
| 11779 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11780 | .addExternalSymbol(FnName: "__chkstk" ) |
| 11781 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Kill) |
| 11782 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Define) |
| 11783 | .addReg(RegNo: ARM::R12, |
| 11784 | flags: RegState::Implicit | RegState::Define | RegState::Dead) |
| 11785 | .addReg(RegNo: ARM::CPSR, |
| 11786 | flags: RegState::Implicit | RegState::Define | RegState::Dead); |
| 11787 | break; |
| 11788 | case CodeModel::Large: { |
| 11789 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
| 11790 | Register Reg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11791 | |
| 11792 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2MOVi32imm), DestReg: Reg) |
| 11793 | .addExternalSymbol(FnName: "__chkstk" ); |
| 11794 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: gettBLXrOpcode(MF: *MBB->getParent()))) |
| 11795 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11796 | .addReg(RegNo: Reg, flags: RegState::Kill) |
| 11797 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Kill) |
| 11798 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Define) |
| 11799 | .addReg(RegNo: ARM::R12, |
| 11800 | flags: RegState::Implicit | RegState::Define | RegState::Dead) |
| 11801 | .addReg(RegNo: ARM::CPSR, |
| 11802 | flags: RegState::Implicit | RegState::Define | RegState::Dead); |
| 11803 | break; |
| 11804 | } |
| 11805 | } |
| 11806 | |
| 11807 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2SUBrr), DestReg: ARM::SP) |
| 11808 | .addReg(RegNo: ARM::SP, flags: RegState::Kill) |
| 11809 | .addReg(RegNo: ARM::R4, flags: RegState::Kill) |
| 11810 | .setMIFlags(MachineInstr::FrameSetup) |
| 11811 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11812 | .add(MO: condCodeOp()); |
| 11813 | |
| 11814 | MI.eraseFromParent(); |
| 11815 | return MBB; |
| 11816 | } |
| 11817 | |
| 11818 | MachineBasicBlock * |
| 11819 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, |
| 11820 | MachineBasicBlock *MBB) const { |
| 11821 | DebugLoc DL = MI.getDebugLoc(); |
| 11822 | MachineFunction *MF = MBB->getParent(); |
| 11823 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11824 | |
| 11825 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); |
| 11826 | MF->insert(MBBI: ++MBB->getIterator(), MBB: ContBB); |
| 11827 | ContBB->splice(Where: ContBB->begin(), Other: MBB, |
| 11828 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: MBB->end()); |
| 11829 | ContBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
| 11830 | MBB->addSuccessor(Succ: ContBB); |
| 11831 | |
| 11832 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 11833 | BuildMI(BB: TrapBB, MIMD: DL, MCID: TII->get(Opcode: ARM::t__brkdiv0)); |
| 11834 | MF->push_back(MBB: TrapBB); |
| 11835 | MBB->addSuccessor(Succ: TrapBB); |
| 11836 | |
| 11837 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::tCMPi8)) |
| 11838 | .addReg(RegNo: MI.getOperand(i: 0).getReg()) |
| 11839 | .addImm(Val: 0) |
| 11840 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11841 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::t2Bcc)) |
| 11842 | .addMBB(MBB: TrapBB) |
| 11843 | .addImm(Val: ARMCC::EQ) |
| 11844 | .addReg(RegNo: ARM::CPSR); |
| 11845 | |
| 11846 | MI.eraseFromParent(); |
| 11847 | return ContBB; |
| 11848 | } |
| 11849 | |
| 11850 | // The CPSR operand of SelectItr might be missing a kill marker |
| 11851 | // because there were multiple uses of CPSR, and ISel didn't know |
| 11852 | // which to mark. Figure out whether SelectItr should have had a |
| 11853 | // kill marker, and set it if it should. Returns the correct kill |
| 11854 | // marker value. |
| 11855 | static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, |
| 11856 | MachineBasicBlock* BB, |
| 11857 | const TargetRegisterInfo* TRI) { |
| 11858 | // Scan forward through BB for a use/def of CPSR. |
| 11859 | MachineBasicBlock::iterator miI(std::next(x: SelectItr)); |
| 11860 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { |
| 11861 | const MachineInstr& mi = *miI; |
| 11862 | if (mi.readsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
| 11863 | return false; |
| 11864 | if (mi.definesRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
| 11865 | break; // Should have kill-flag - update below. |
| 11866 | } |
| 11867 | |
| 11868 | // If we hit the end of the block, check whether CPSR is live into a |
| 11869 | // successor. |
| 11870 | if (miI == BB->end()) { |
| 11871 | for (MachineBasicBlock *Succ : BB->successors()) |
| 11872 | if (Succ->isLiveIn(Reg: ARM::CPSR)) |
| 11873 | return false; |
| 11874 | } |
| 11875 | |
| 11876 | // We found a def, or hit the end of the basic block and CPSR wasn't live |
| 11877 | // out. SelectMI should have a kill flag on CPSR. |
| 11878 | SelectItr->addRegisterKilled(IncomingReg: ARM::CPSR, RegInfo: TRI); |
| 11879 | return true; |
| 11880 | } |
| 11881 | |
| 11882 | /// Adds logic in loop entry MBB to calculate loop iteration count and adds |
| 11883 | /// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop |
| 11884 | static Register genTPEntry(MachineBasicBlock *TpEntry, |
| 11885 | MachineBasicBlock *TpLoopBody, |
| 11886 | MachineBasicBlock *TpExit, Register OpSizeReg, |
| 11887 | const TargetInstrInfo *TII, DebugLoc Dl, |
| 11888 | MachineRegisterInfo &MRI) { |
| 11889 | // Calculates loop iteration count = ceil(n/16) = (n + 15) >> 4. |
| 11890 | Register AddDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11891 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2ADDri), DestReg: AddDestReg) |
| 11892 | .addUse(RegNo: OpSizeReg) |
| 11893 | .addImm(Val: 15) |
| 11894 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11895 | .addReg(RegNo: 0); |
| 11896 | |
| 11897 | Register LsrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11898 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LSRri), DestReg: LsrDestReg) |
| 11899 | .addUse(RegNo: AddDestReg, Flags: RegState::Kill) |
| 11900 | .addImm(Val: 4) |
| 11901 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11902 | .addReg(RegNo: 0); |
| 11903 | |
| 11904 | Register TotalIterationsReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11905 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopSetup), DestReg: TotalIterationsReg) |
| 11906 | .addUse(RegNo: LsrDestReg, Flags: RegState::Kill); |
| 11907 | |
| 11908 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopStart)) |
| 11909 | .addUse(RegNo: TotalIterationsReg) |
| 11910 | .addMBB(MBB: TpExit); |
| 11911 | |
| 11912 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 11913 | .addMBB(MBB: TpLoopBody) |
| 11914 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11915 | |
| 11916 | return TotalIterationsReg; |
| 11917 | } |
| 11918 | |
| 11919 | /// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and |
| 11920 | /// t2DoLoopEnd. These are used by later passes to generate tail predicated |
| 11921 | /// loops. |
| 11922 | static void genTPLoopBody(MachineBasicBlock *TpLoopBody, |
| 11923 | MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit, |
| 11924 | const TargetInstrInfo *TII, DebugLoc Dl, |
| 11925 | MachineRegisterInfo &MRI, Register OpSrcReg, |
| 11926 | Register OpDestReg, Register ElementCountReg, |
| 11927 | Register TotalIterationsReg, bool IsMemcpy) { |
| 11928 | // First insert 4 PHI nodes for: Current pointer to Src (if memcpy), Dest |
| 11929 | // array, loop iteration counter, predication counter. |
| 11930 | |
| 11931 | Register SrcPhiReg, CurrSrcReg; |
| 11932 | if (IsMemcpy) { |
| 11933 | // Current position in the src array |
| 11934 | SrcPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11935 | CurrSrcReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11936 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: SrcPhiReg) |
| 11937 | .addUse(RegNo: OpSrcReg) |
| 11938 | .addMBB(MBB: TpEntry) |
| 11939 | .addUse(RegNo: CurrSrcReg) |
| 11940 | .addMBB(MBB: TpLoopBody); |
| 11941 | } |
| 11942 | |
| 11943 | // Current position in the dest array |
| 11944 | Register DestPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11945 | Register CurrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11946 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: DestPhiReg) |
| 11947 | .addUse(RegNo: OpDestReg) |
| 11948 | .addMBB(MBB: TpEntry) |
| 11949 | .addUse(RegNo: CurrDestReg) |
| 11950 | .addMBB(MBB: TpLoopBody); |
| 11951 | |
| 11952 | // Current loop counter |
| 11953 | Register LoopCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11954 | Register RemainingLoopIterationsReg = |
| 11955 | MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11956 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: LoopCounterPhiReg) |
| 11957 | .addUse(RegNo: TotalIterationsReg) |
| 11958 | .addMBB(MBB: TpEntry) |
| 11959 | .addUse(RegNo: RemainingLoopIterationsReg) |
| 11960 | .addMBB(MBB: TpLoopBody); |
| 11961 | |
| 11962 | // Predication counter |
| 11963 | Register PredCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11964 | Register RemainingElementsReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11965 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: PredCounterPhiReg) |
| 11966 | .addUse(RegNo: ElementCountReg) |
| 11967 | .addMBB(MBB: TpEntry) |
| 11968 | .addUse(RegNo: RemainingElementsReg) |
| 11969 | .addMBB(MBB: TpLoopBody); |
| 11970 | |
| 11971 | // Pass predication counter to VCTP |
| 11972 | Register VccrReg = MRI.createVirtualRegister(RegClass: &ARM::VCCRRegClass); |
| 11973 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VCTP8), DestReg: VccrReg) |
| 11974 | .addUse(RegNo: PredCounterPhiReg) |
| 11975 | .addImm(Val: ARMVCC::None) |
| 11976 | .addReg(RegNo: 0) |
| 11977 | .addReg(RegNo: 0); |
| 11978 | |
| 11979 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2SUBri), DestReg: RemainingElementsReg) |
| 11980 | .addUse(RegNo: PredCounterPhiReg) |
| 11981 | .addImm(Val: 16) |
| 11982 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11983 | .addReg(RegNo: 0); |
| 11984 | |
| 11985 | // VLDRB (only if memcpy) and VSTRB instructions, predicated using VPR |
| 11986 | Register SrcValueReg; |
| 11987 | if (IsMemcpy) { |
| 11988 | SrcValueReg = MRI.createVirtualRegister(RegClass: &ARM::MQPRRegClass); |
| 11989 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VLDRBU8_post)) |
| 11990 | .addDef(RegNo: CurrSrcReg) |
| 11991 | .addDef(RegNo: SrcValueReg) |
| 11992 | .addReg(RegNo: SrcPhiReg) |
| 11993 | .addImm(Val: 16) |
| 11994 | .addImm(Val: ARMVCC::Then) |
| 11995 | .addUse(RegNo: VccrReg) |
| 11996 | .addReg(RegNo: 0); |
| 11997 | } else |
| 11998 | SrcValueReg = OpSrcReg; |
| 11999 | |
| 12000 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VSTRBU8_post)) |
| 12001 | .addDef(RegNo: CurrDestReg) |
| 12002 | .addUse(RegNo: SrcValueReg) |
| 12003 | .addReg(RegNo: DestPhiReg) |
| 12004 | .addImm(Val: 16) |
| 12005 | .addImm(Val: ARMVCC::Then) |
| 12006 | .addUse(RegNo: VccrReg) |
| 12007 | .addReg(RegNo: 0); |
| 12008 | |
| 12009 | // Add the pseudoInstrs for decrementing the loop counter and marking the |
| 12010 | // end:t2DoLoopDec and t2DoLoopEnd |
| 12011 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopDec), DestReg: RemainingLoopIterationsReg) |
| 12012 | .addUse(RegNo: LoopCounterPhiReg) |
| 12013 | .addImm(Val: 1); |
| 12014 | |
| 12015 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopEnd)) |
| 12016 | .addUse(RegNo: RemainingLoopIterationsReg) |
| 12017 | .addMBB(MBB: TpLoopBody); |
| 12018 | |
| 12019 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 12020 | .addMBB(MBB: TpExit) |
| 12021 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12022 | } |
| 12023 | |
| 12024 | MachineBasicBlock * |
| 12025 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, |
| 12026 | MachineBasicBlock *BB) const { |
| 12027 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 12028 | DebugLoc dl = MI.getDebugLoc(); |
| 12029 | bool isThumb2 = Subtarget->isThumb2(); |
| 12030 | switch (MI.getOpcode()) { |
| 12031 | default: { |
| 12032 | MI.print(OS&: errs()); |
| 12033 | llvm_unreachable("Unexpected instr type to insert" ); |
| 12034 | } |
| 12035 | |
| 12036 | // Thumb1 post-indexed loads are really just single-register LDMs. |
| 12037 | case ARM::tLDR_postidx: { |
| 12038 | MachineOperand Def(MI.getOperand(i: 1)); |
| 12039 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDMIA_UPD)) |
| 12040 | .add(MO: Def) // Rn_wb |
| 12041 | .add(MO: MI.getOperand(i: 2)) // Rn |
| 12042 | .add(MO: MI.getOperand(i: 3)) // PredImm |
| 12043 | .add(MO: MI.getOperand(i: 4)) // PredReg |
| 12044 | .add(MO: MI.getOperand(i: 0)) // Rt |
| 12045 | .cloneMemRefs(OtherMI: MI); |
| 12046 | MI.eraseFromParent(); |
| 12047 | return BB; |
| 12048 | } |
| 12049 | |
| 12050 | case ARM::MVE_MEMCPYLOOPINST: |
| 12051 | case ARM::MVE_MEMSETLOOPINST: { |
| 12052 | |
| 12053 | // Transformation below expands MVE_MEMCPYLOOPINST/MVE_MEMSETLOOPINST Pseudo |
| 12054 | // into a Tail Predicated (TP) Loop. It adds the instructions to calculate |
| 12055 | // the iteration count =ceil(size_in_bytes/16)) in the TP entry block and |
| 12056 | // adds the relevant instructions in the TP loop Body for generation of a |
| 12057 | // WLSTP loop. |
| 12058 | |
| 12059 | // Below is relevant portion of the CFG after the transformation. |
| 12060 | // The Machine Basic Blocks are shown along with branch conditions (in |
| 12061 | // brackets). Note that TP entry/exit MBBs depict the entry/exit of this |
| 12062 | // portion of the CFG and may not necessarily be the entry/exit of the |
| 12063 | // function. |
| 12064 | |
| 12065 | // (Relevant) CFG after transformation: |
| 12066 | // TP entry MBB |
| 12067 | // | |
| 12068 | // |-----------------| |
| 12069 | // (n <= 0) (n > 0) |
| 12070 | // | | |
| 12071 | // | TP loop Body MBB<--| |
| 12072 | // | | | |
| 12073 | // \ |___________| |
| 12074 | // \ / |
| 12075 | // TP exit MBB |
| 12076 | |
| 12077 | MachineFunction *MF = BB->getParent(); |
| 12078 | MachineFunctionProperties &Properties = MF->getProperties(); |
| 12079 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 12080 | |
| 12081 | Register OpDestReg = MI.getOperand(i: 0).getReg(); |
| 12082 | Register OpSrcReg = MI.getOperand(i: 1).getReg(); |
| 12083 | Register OpSizeReg = MI.getOperand(i: 2).getReg(); |
| 12084 | |
| 12085 | // Allocate the required MBBs and add to parent function. |
| 12086 | MachineBasicBlock *TpEntry = BB; |
| 12087 | MachineBasicBlock *TpLoopBody = MF->CreateMachineBasicBlock(); |
| 12088 | MachineBasicBlock *TpExit; |
| 12089 | |
| 12090 | MF->push_back(MBB: TpLoopBody); |
| 12091 | |
| 12092 | // If any instructions are present in the current block after |
| 12093 | // MVE_MEMCPYLOOPINST or MVE_MEMSETLOOPINST, split the current block and |
| 12094 | // move the instructions into the newly created exit block. If there are no |
| 12095 | // instructions add an explicit branch to the FallThrough block and then |
| 12096 | // split. |
| 12097 | // |
| 12098 | // The split is required for two reasons: |
| 12099 | // 1) A terminator(t2WhileLoopStart) will be placed at that site. |
| 12100 | // 2) Since a TPLoopBody will be added later, any phis in successive blocks |
| 12101 | // need to be updated. splitAt() already handles this. |
| 12102 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
| 12103 | if (TpExit == BB) { |
| 12104 | assert(BB->canFallThrough() && "Exit Block must be Fallthrough of the " |
| 12105 | "block containing memcpy/memset Pseudo" ); |
| 12106 | TpExit = BB->getFallThrough(); |
| 12107 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 12108 | .addMBB(MBB: TpExit) |
| 12109 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12110 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
| 12111 | } |
| 12112 | |
| 12113 | // Add logic for iteration count |
| 12114 | Register TotalIterationsReg = |
| 12115 | genTPEntry(TpEntry, TpLoopBody, TpExit, OpSizeReg, TII, Dl: dl, MRI); |
| 12116 | |
| 12117 | // Add the vectorized (and predicated) loads/store instructions |
| 12118 | bool IsMemcpy = MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST; |
| 12119 | genTPLoopBody(TpLoopBody, TpEntry, TpExit, TII, Dl: dl, MRI, OpSrcReg, |
| 12120 | OpDestReg, ElementCountReg: OpSizeReg, TotalIterationsReg, IsMemcpy); |
| 12121 | |
| 12122 | // Required to avoid conflict with the MachineVerifier during testing. |
| 12123 | Properties.resetNoPHIs(); |
| 12124 | |
| 12125 | // Connect the blocks |
| 12126 | TpEntry->addSuccessor(Succ: TpLoopBody); |
| 12127 | TpLoopBody->addSuccessor(Succ: TpLoopBody); |
| 12128 | TpLoopBody->addSuccessor(Succ: TpExit); |
| 12129 | |
| 12130 | // Reorder for a more natural layout |
| 12131 | TpLoopBody->moveAfter(NewBefore: TpEntry); |
| 12132 | TpExit->moveAfter(NewBefore: TpLoopBody); |
| 12133 | |
| 12134 | // Finally, remove the memcpy Pseudo Instruction |
| 12135 | MI.eraseFromParent(); |
| 12136 | |
| 12137 | // Return the exit block as it may contain other instructions requiring a |
| 12138 | // custom inserter |
| 12139 | return TpExit; |
| 12140 | } |
| 12141 | |
| 12142 | // The Thumb2 pre-indexed stores have the same MI operands, they just |
| 12143 | // define them differently in the .td files from the isel patterns, so |
| 12144 | // they need pseudos. |
| 12145 | case ARM::t2STR_preidx: |
| 12146 | MI.setDesc(TII->get(Opcode: ARM::t2STR_PRE)); |
| 12147 | return BB; |
| 12148 | case ARM::t2STRB_preidx: |
| 12149 | MI.setDesc(TII->get(Opcode: ARM::t2STRB_PRE)); |
| 12150 | return BB; |
| 12151 | case ARM::t2STRH_preidx: |
| 12152 | MI.setDesc(TII->get(Opcode: ARM::t2STRH_PRE)); |
| 12153 | return BB; |
| 12154 | |
| 12155 | case ARM::STRi_preidx: |
| 12156 | case ARM::STRBi_preidx: { |
| 12157 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM |
| 12158 | : ARM::STRB_PRE_IMM; |
| 12159 | // Decode the offset. |
| 12160 | unsigned Offset = MI.getOperand(i: 4).getImm(); |
| 12161 | bool isSub = ARM_AM::getAM2Op(AM2Opc: Offset) == ARM_AM::sub; |
| 12162 | Offset = ARM_AM::getAM2Offset(AM2Opc: Offset); |
| 12163 | if (isSub) |
| 12164 | Offset = -Offset; |
| 12165 | |
| 12166 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
| 12167 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)) |
| 12168 | .add(MO: MI.getOperand(i: 0)) // Rn_wb |
| 12169 | .add(MO: MI.getOperand(i: 1)) // Rt |
| 12170 | .add(MO: MI.getOperand(i: 2)) // Rn |
| 12171 | .addImm(Val: Offset) // offset (skip GPR==zero_reg) |
| 12172 | .add(MO: MI.getOperand(i: 5)) // pred |
| 12173 | .add(MO: MI.getOperand(i: 6)) |
| 12174 | .addMemOperand(MMO); |
| 12175 | MI.eraseFromParent(); |
| 12176 | return BB; |
| 12177 | } |
| 12178 | case ARM::STRr_preidx: |
| 12179 | case ARM::STRBr_preidx: |
| 12180 | case ARM::STRH_preidx: { |
| 12181 | unsigned NewOpc; |
| 12182 | switch (MI.getOpcode()) { |
| 12183 | default: llvm_unreachable("unexpected opcode!" ); |
| 12184 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; |
| 12185 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; |
| 12186 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; |
| 12187 | } |
| 12188 | MachineInstrBuilder MIB = BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)); |
| 12189 | for (const MachineOperand &MO : MI.operands()) |
| 12190 | MIB.add(MO); |
| 12191 | MI.eraseFromParent(); |
| 12192 | return BB; |
| 12193 | } |
| 12194 | |
| 12195 | case ARM::tMOVCCr_pseudo: { |
| 12196 | // To "insert" a SELECT_CC instruction, we actually have to insert the |
| 12197 | // diamond control-flow pattern. The incoming instruction knows the |
| 12198 | // destination vreg to set, the condition code register to branch on, the |
| 12199 | // true/false values to select between, and a branch opcode to use. |
| 12200 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 12201 | MachineFunction::iterator It = ++BB->getIterator(); |
| 12202 | |
| 12203 | // thisMBB: |
| 12204 | // ... |
| 12205 | // TrueVal = ... |
| 12206 | // cmpTY ccX, r1, r2 |
| 12207 | // bCC copy1MBB |
| 12208 | // fallthrough --> copy0MBB |
| 12209 | MachineBasicBlock *thisMBB = BB; |
| 12210 | MachineFunction *F = BB->getParent(); |
| 12211 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12212 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12213 | F->insert(MBBI: It, MBB: copy0MBB); |
| 12214 | F->insert(MBBI: It, MBB: sinkMBB); |
| 12215 | |
| 12216 | // Set the call frame size on entry to the new basic blocks. |
| 12217 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
| 12218 | copy0MBB->setCallFrameSize(CallFrameSize); |
| 12219 | sinkMBB->setCallFrameSize(CallFrameSize); |
| 12220 | |
| 12221 | // Check whether CPSR is live past the tMOVCCr_pseudo. |
| 12222 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 12223 | if (!MI.killsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr) && |
| 12224 | !checkAndUpdateCPSRKill(SelectItr: MI, BB: thisMBB, TRI)) { |
| 12225 | copy0MBB->addLiveIn(PhysReg: ARM::CPSR); |
| 12226 | sinkMBB->addLiveIn(PhysReg: ARM::CPSR); |
| 12227 | } |
| 12228 | |
| 12229 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 12230 | sinkMBB->splice(Where: sinkMBB->begin(), Other: BB, |
| 12231 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
| 12232 | sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
| 12233 | |
| 12234 | BB->addSuccessor(Succ: copy0MBB); |
| 12235 | BB->addSuccessor(Succ: sinkMBB); |
| 12236 | |
| 12237 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
| 12238 | .addMBB(MBB: sinkMBB) |
| 12239 | .addImm(Val: MI.getOperand(i: 3).getImm()) |
| 12240 | .addReg(RegNo: MI.getOperand(i: 4).getReg()); |
| 12241 | |
| 12242 | // copy0MBB: |
| 12243 | // %FalseValue = ... |
| 12244 | // # fallthrough to sinkMBB |
| 12245 | BB = copy0MBB; |
| 12246 | |
| 12247 | // Update machine-CFG edges |
| 12248 | BB->addSuccessor(Succ: sinkMBB); |
| 12249 | |
| 12250 | // sinkMBB: |
| 12251 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
| 12252 | // ... |
| 12253 | BB = sinkMBB; |
| 12254 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: MI.getOperand(i: 0).getReg()) |
| 12255 | .addReg(RegNo: MI.getOperand(i: 1).getReg()) |
| 12256 | .addMBB(MBB: copy0MBB) |
| 12257 | .addReg(RegNo: MI.getOperand(i: 2).getReg()) |
| 12258 | .addMBB(MBB: thisMBB); |
| 12259 | |
| 12260 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 12261 | return BB; |
| 12262 | } |
| 12263 | |
| 12264 | case ARM::BCCi64: |
| 12265 | case ARM::BCCZi64: { |
| 12266 | // If there is an unconditional branch to the other successor, remove it. |
| 12267 | BB->erase(I: std::next(x: MachineBasicBlock::iterator(MI)), E: BB->end()); |
| 12268 | |
| 12269 | // Compare both parts that make up the double comparison separately for |
| 12270 | // equality. |
| 12271 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; |
| 12272 | |
| 12273 | Register LHS1 = MI.getOperand(i: 1).getReg(); |
| 12274 | Register LHS2 = MI.getOperand(i: 2).getReg(); |
| 12275 | if (RHSisZero) { |
| 12276 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 12277 | .addReg(RegNo: LHS1) |
| 12278 | .addImm(Val: 0) |
| 12279 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12280 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 12281 | .addReg(RegNo: LHS2).addImm(Val: 0) |
| 12282 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12283 | } else { |
| 12284 | Register RHS1 = MI.getOperand(i: 3).getReg(); |
| 12285 | Register RHS2 = MI.getOperand(i: 4).getReg(); |
| 12286 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 12287 | .addReg(RegNo: LHS1) |
| 12288 | .addReg(RegNo: RHS1) |
| 12289 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12290 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 12291 | .addReg(RegNo: LHS2).addReg(RegNo: RHS2) |
| 12292 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12293 | } |
| 12294 | |
| 12295 | MachineBasicBlock *destMBB = MI.getOperand(i: RHSisZero ? 3 : 5).getMBB(); |
| 12296 | MachineBasicBlock *exitMBB = OtherSucc(MBB: BB, Succ: destMBB); |
| 12297 | if (MI.getOperand(i: 0).getImm() == ARMCC::NE) |
| 12298 | std::swap(a&: destMBB, b&: exitMBB); |
| 12299 | |
| 12300 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 12301 | .addMBB(MBB: destMBB).addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12302 | if (isThumb2) |
| 12303 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 12304 | .addMBB(MBB: exitMBB) |
| 12305 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12306 | else |
| 12307 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::B)) .addMBB(MBB: exitMBB); |
| 12308 | |
| 12309 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 12310 | return BB; |
| 12311 | } |
| 12312 | |
| 12313 | case ARM::Int_eh_sjlj_setjmp: |
| 12314 | case ARM::Int_eh_sjlj_setjmp_nofp: |
| 12315 | case ARM::tInt_eh_sjlj_setjmp: |
| 12316 | case ARM::t2Int_eh_sjlj_setjmp: |
| 12317 | case ARM::t2Int_eh_sjlj_setjmp_nofp: |
| 12318 | return BB; |
| 12319 | |
| 12320 | case ARM::Int_eh_sjlj_setup_dispatch: |
| 12321 | EmitSjLjDispatchBlock(MI, MBB: BB); |
| 12322 | return BB; |
| 12323 | |
| 12324 | case ARM::ABS: |
| 12325 | case ARM::t2ABS: { |
| 12326 | // To insert an ABS instruction, we have to insert the |
| 12327 | // diamond control-flow pattern. The incoming instruction knows the |
| 12328 | // source vreg to test against 0, the destination vreg to set, |
| 12329 | // the condition code register to branch on, the |
| 12330 | // true/false values to select between, and a branch opcode to use. |
| 12331 | // It transforms |
| 12332 | // V1 = ABS V0 |
| 12333 | // into |
| 12334 | // V2 = MOVS V0 |
| 12335 | // BCC (branch to SinkBB if V0 >= 0) |
| 12336 | // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) |
| 12337 | // SinkBB: V1 = PHI(V2, V3) |
| 12338 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 12339 | MachineFunction::iterator BBI = ++BB->getIterator(); |
| 12340 | MachineFunction *Fn = BB->getParent(); |
| 12341 | MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12342 | MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12343 | Fn->insert(MBBI: BBI, MBB: RSBBB); |
| 12344 | Fn->insert(MBBI: BBI, MBB: SinkBB); |
| 12345 | |
| 12346 | Register ABSSrcReg = MI.getOperand(i: 1).getReg(); |
| 12347 | Register ABSDstReg = MI.getOperand(i: 0).getReg(); |
| 12348 | bool ABSSrcKIll = MI.getOperand(i: 1).isKill(); |
| 12349 | bool isThumb2 = Subtarget->isThumb2(); |
| 12350 | MachineRegisterInfo &MRI = Fn->getRegInfo(); |
| 12351 | // In Thumb mode S must not be specified if source register is the SP or |
| 12352 | // PC and if destination register is the SP, so restrict register class |
| 12353 | Register NewRsbDstReg = MRI.createVirtualRegister( |
| 12354 | RegClass: isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); |
| 12355 | |
| 12356 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 12357 | SinkBB->splice(Where: SinkBB->begin(), Other: BB, |
| 12358 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
| 12359 | SinkBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
| 12360 | |
| 12361 | BB->addSuccessor(Succ: RSBBB); |
| 12362 | BB->addSuccessor(Succ: SinkBB); |
| 12363 | |
| 12364 | // fall through to SinkMBB |
| 12365 | RSBBB->addSuccessor(Succ: SinkBB); |
| 12366 | |
| 12367 | // insert a cmp at the end of BB |
| 12368 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 12369 | .addReg(RegNo: ABSSrcReg) |
| 12370 | .addImm(Val: 0) |
| 12371 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12372 | |
| 12373 | // insert a bcc with opposite CC to ARMCC::MI at the end of BB |
| 12374 | BuildMI(BB, MIMD: dl, |
| 12375 | MCID: TII->get(Opcode: isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(MBB: SinkBB) |
| 12376 | .addImm(Val: ARMCC::getOppositeCondition(CC: ARMCC::MI)).addReg(RegNo: ARM::CPSR); |
| 12377 | |
| 12378 | // insert rsbri in RSBBB |
| 12379 | // Note: BCC and rsbri will be converted into predicated rsbmi |
| 12380 | // by if-conversion pass |
| 12381 | BuildMI(BB&: *RSBBB, I: RSBBB->begin(), MIMD: dl, |
| 12382 | MCID: TII->get(Opcode: isThumb2 ? ARM::t2RSBri : ARM::RSBri), DestReg: NewRsbDstReg) |
| 12383 | .addReg(RegNo: ABSSrcReg, flags: ABSSrcKIll ? RegState::Kill : 0) |
| 12384 | .addImm(Val: 0) |
| 12385 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 12386 | .add(MO: condCodeOp()); |
| 12387 | |
| 12388 | // insert PHI in SinkBB, |
| 12389 | // reuse ABSDstReg to not change uses of ABS instruction |
| 12390 | BuildMI(BB&: *SinkBB, I: SinkBB->begin(), MIMD: dl, |
| 12391 | MCID: TII->get(Opcode: ARM::PHI), DestReg: ABSDstReg) |
| 12392 | .addReg(RegNo: NewRsbDstReg).addMBB(MBB: RSBBB) |
| 12393 | .addReg(RegNo: ABSSrcReg).addMBB(MBB: BB); |
| 12394 | |
| 12395 | // remove ABS instruction |
| 12396 | MI.eraseFromParent(); |
| 12397 | |
| 12398 | // return last added BB |
| 12399 | return SinkBB; |
| 12400 | } |
| 12401 | case ARM::COPY_STRUCT_BYVAL_I32: |
| 12402 | ++NumLoopByVals; |
| 12403 | return EmitStructByval(MI, BB); |
| 12404 | case ARM::WIN__CHKSTK: |
| 12405 | return EmitLowered__chkstk(MI, MBB: BB); |
| 12406 | case ARM::WIN__DBZCHK: |
| 12407 | return EmitLowered__dbzchk(MI, MBB: BB); |
| 12408 | } |
| 12409 | } |
| 12410 | |
| 12411 | /// Attaches vregs to MEMCPY that it will use as scratch registers |
| 12412 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering |
| 12413 | /// instead of as a custom inserter because we need the use list from the SDNode. |
| 12414 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, |
| 12415 | MachineInstr &MI, const SDNode *Node) { |
| 12416 | bool isThumb1 = Subtarget->isThumb1Only(); |
| 12417 | |
| 12418 | MachineFunction *MF = MI.getParent()->getParent(); |
| 12419 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 12420 | MachineInstrBuilder MIB(*MF, MI); |
| 12421 | |
| 12422 | // If the new dst/src is unused mark it as dead. |
| 12423 | if (!Node->hasAnyUseOfValue(Value: 0)) { |
| 12424 | MI.getOperand(i: 0).setIsDead(true); |
| 12425 | } |
| 12426 | if (!Node->hasAnyUseOfValue(Value: 1)) { |
| 12427 | MI.getOperand(i: 1).setIsDead(true); |
| 12428 | } |
| 12429 | |
| 12430 | // The MEMCPY both defines and kills the scratch registers. |
| 12431 | for (unsigned I = 0; I != MI.getOperand(i: 4).getImm(); ++I) { |
| 12432 | Register TmpReg = MRI.createVirtualRegister(RegClass: isThumb1 ? &ARM::tGPRRegClass |
| 12433 | : &ARM::GPRRegClass); |
| 12434 | MIB.addReg(RegNo: TmpReg, flags: RegState::Define|RegState::Dead); |
| 12435 | } |
| 12436 | } |
| 12437 | |
| 12438 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
| 12439 | SDNode *Node) const { |
| 12440 | if (MI.getOpcode() == ARM::MEMCPY) { |
| 12441 | attachMEMCPYScratchRegs(Subtarget, MI, Node); |
| 12442 | return; |
| 12443 | } |
| 12444 | |
| 12445 | const MCInstrDesc *MCID = &MI.getDesc(); |
| 12446 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, |
| 12447 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional |
| 12448 | // operand is still set to noreg. If needed, set the optional operand's |
| 12449 | // register to CPSR, and remove the redundant implicit def. |
| 12450 | // |
| 12451 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). |
| 12452 | |
| 12453 | // Rename pseudo opcodes. |
| 12454 | unsigned NewOpc = convertAddSubFlagsOpcode(OldOpc: MI.getOpcode()); |
| 12455 | unsigned ccOutIdx; |
| 12456 | if (NewOpc) { |
| 12457 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); |
| 12458 | MCID = &TII->get(Opcode: NewOpc); |
| 12459 | |
| 12460 | assert(MCID->getNumOperands() == |
| 12461 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() |
| 12462 | && "converted opcode should be the same except for cc_out" |
| 12463 | " (and, on Thumb1, pred)" ); |
| 12464 | |
| 12465 | MI.setDesc(*MCID); |
| 12466 | |
| 12467 | // Add the optional cc_out operand |
| 12468 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/true)); |
| 12469 | |
| 12470 | // On Thumb1, move all input operands to the end, then add the predicate |
| 12471 | if (Subtarget->isThumb1Only()) { |
| 12472 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { |
| 12473 | MI.addOperand(Op: MI.getOperand(i: 1)); |
| 12474 | MI.removeOperand(OpNo: 1); |
| 12475 | } |
| 12476 | |
| 12477 | // Restore the ties |
| 12478 | for (unsigned i = MI.getNumOperands(); i--;) { |
| 12479 | const MachineOperand& op = MI.getOperand(i); |
| 12480 | if (op.isReg() && op.isUse()) { |
| 12481 | int DefIdx = MCID->getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO); |
| 12482 | if (DefIdx != -1) |
| 12483 | MI.tieOperands(DefIdx, UseIdx: i); |
| 12484 | } |
| 12485 | } |
| 12486 | |
| 12487 | MI.addOperand(Op: MachineOperand::CreateImm(Val: ARMCC::AL)); |
| 12488 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/false)); |
| 12489 | ccOutIdx = 1; |
| 12490 | } else |
| 12491 | ccOutIdx = MCID->getNumOperands() - 1; |
| 12492 | } else |
| 12493 | ccOutIdx = MCID->getNumOperands() - 1; |
| 12494 | |
| 12495 | // Any ARM instruction that sets the 's' bit should specify an optional |
| 12496 | // "cc_out" operand in the last operand position. |
| 12497 | if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) { |
| 12498 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 12499 | return; |
| 12500 | } |
| 12501 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it |
| 12502 | // since we already have an optional CPSR def. |
| 12503 | bool definesCPSR = false; |
| 12504 | bool deadCPSR = false; |
| 12505 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; |
| 12506 | ++i) { |
| 12507 | const MachineOperand &MO = MI.getOperand(i); |
| 12508 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { |
| 12509 | definesCPSR = true; |
| 12510 | if (MO.isDead()) |
| 12511 | deadCPSR = true; |
| 12512 | MI.removeOperand(OpNo: i); |
| 12513 | break; |
| 12514 | } |
| 12515 | } |
| 12516 | if (!definesCPSR) { |
| 12517 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 12518 | return; |
| 12519 | } |
| 12520 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag" ); |
| 12521 | if (deadCPSR) { |
| 12522 | assert(!MI.getOperand(ccOutIdx).getReg() && |
| 12523 | "expect uninitialized optional cc_out operand" ); |
| 12524 | // Thumb1 instructions must have the S bit even if the CPSR is dead. |
| 12525 | if (!Subtarget->isThumb1Only()) |
| 12526 | return; |
| 12527 | } |
| 12528 | |
| 12529 | // If this instruction was defined with an optional CPSR def and its dag node |
| 12530 | // had a live implicit CPSR def, then activate the optional CPSR def. |
| 12531 | MachineOperand &MO = MI.getOperand(i: ccOutIdx); |
| 12532 | MO.setReg(ARM::CPSR); |
| 12533 | MO.setIsDef(true); |
| 12534 | } |
| 12535 | |
| 12536 | //===----------------------------------------------------------------------===// |
| 12537 | // ARM Optimization Hooks |
| 12538 | //===----------------------------------------------------------------------===// |
| 12539 | |
| 12540 | // Helper function that checks if N is a null or all ones constant. |
| 12541 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { |
| 12542 | return AllOnes ? isAllOnesConstant(V: N) : isNullConstant(V: N); |
| 12543 | } |
| 12544 | |
| 12545 | // Return true if N is conditionally 0 or all ones. |
| 12546 | // Detects these expressions where cc is an i1 value: |
| 12547 | // |
| 12548 | // (select cc 0, y) [AllOnes=0] |
| 12549 | // (select cc y, 0) [AllOnes=0] |
| 12550 | // (zext cc) [AllOnes=0] |
| 12551 | // (sext cc) [AllOnes=0/1] |
| 12552 | // (select cc -1, y) [AllOnes=1] |
| 12553 | // (select cc y, -1) [AllOnes=1] |
| 12554 | // |
| 12555 | // Invert is set when N is the null/all ones constant when CC is false. |
| 12556 | // OtherOp is set to the alternative value of N. |
| 12557 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, |
| 12558 | SDValue &CC, bool &Invert, |
| 12559 | SDValue &OtherOp, |
| 12560 | SelectionDAG &DAG) { |
| 12561 | switch (N->getOpcode()) { |
| 12562 | default: return false; |
| 12563 | case ISD::SELECT: { |
| 12564 | CC = N->getOperand(Num: 0); |
| 12565 | SDValue N1 = N->getOperand(Num: 1); |
| 12566 | SDValue N2 = N->getOperand(Num: 2); |
| 12567 | if (isZeroOrAllOnes(N: N1, AllOnes)) { |
| 12568 | Invert = false; |
| 12569 | OtherOp = N2; |
| 12570 | return true; |
| 12571 | } |
| 12572 | if (isZeroOrAllOnes(N: N2, AllOnes)) { |
| 12573 | Invert = true; |
| 12574 | OtherOp = N1; |
| 12575 | return true; |
| 12576 | } |
| 12577 | return false; |
| 12578 | } |
| 12579 | case ISD::ZERO_EXTEND: |
| 12580 | // (zext cc) can never be the all ones value. |
| 12581 | if (AllOnes) |
| 12582 | return false; |
| 12583 | [[fallthrough]]; |
| 12584 | case ISD::SIGN_EXTEND: { |
| 12585 | SDLoc dl(N); |
| 12586 | EVT VT = N->getValueType(ResNo: 0); |
| 12587 | CC = N->getOperand(Num: 0); |
| 12588 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) |
| 12589 | return false; |
| 12590 | Invert = !AllOnes; |
| 12591 | if (AllOnes) |
| 12592 | // When looking for an AllOnes constant, N is an sext, and the 'other' |
| 12593 | // value is 0. |
| 12594 | OtherOp = DAG.getConstant(Val: 0, DL: dl, VT); |
| 12595 | else if (N->getOpcode() == ISD::ZERO_EXTEND) |
| 12596 | // When looking for a 0 constant, N can be zext or sext. |
| 12597 | OtherOp = DAG.getConstant(Val: 1, DL: dl, VT); |
| 12598 | else |
| 12599 | OtherOp = DAG.getAllOnesConstant(DL: dl, VT); |
| 12600 | return true; |
| 12601 | } |
| 12602 | } |
| 12603 | } |
| 12604 | |
| 12605 | // Combine a constant select operand into its use: |
| 12606 | // |
| 12607 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 12608 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 12609 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] |
| 12610 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 12611 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 12612 | // |
| 12613 | // The transform is rejected if the select doesn't have a constant operand that |
| 12614 | // is null, or all ones when AllOnes is set. |
| 12615 | // |
| 12616 | // Also recognize sext/zext from i1: |
| 12617 | // |
| 12618 | // (add (zext cc), x) -> (select cc (add x, 1), x) |
| 12619 | // (add (sext cc), x) -> (select cc (add x, -1), x) |
| 12620 | // |
| 12621 | // These transformations eventually create predicated instructions. |
| 12622 | // |
| 12623 | // @param N The node to transform. |
| 12624 | // @param Slct The N operand that is a select. |
| 12625 | // @param OtherOp The other N operand (x above). |
| 12626 | // @param DCI Context. |
| 12627 | // @param AllOnes Require the select constant to be all ones instead of null. |
| 12628 | // @returns The new node, or SDValue() on failure. |
| 12629 | static |
| 12630 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, |
| 12631 | TargetLowering::DAGCombinerInfo &DCI, |
| 12632 | bool AllOnes = false) { |
| 12633 | SelectionDAG &DAG = DCI.DAG; |
| 12634 | EVT VT = N->getValueType(ResNo: 0); |
| 12635 | SDValue NonConstantVal; |
| 12636 | SDValue CCOp; |
| 12637 | bool SwapSelectOps; |
| 12638 | if (!isConditionalZeroOrAllOnes(N: Slct.getNode(), AllOnes, CC&: CCOp, Invert&: SwapSelectOps, |
| 12639 | OtherOp&: NonConstantVal, DAG)) |
| 12640 | return SDValue(); |
| 12641 | |
| 12642 | // Slct is now know to be the desired identity constant when CC is true. |
| 12643 | SDValue TrueVal = OtherOp; |
| 12644 | SDValue FalseVal = DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT, |
| 12645 | N1: OtherOp, N2: NonConstantVal); |
| 12646 | // Unless SwapSelectOps says CC should be false. |
| 12647 | if (SwapSelectOps) |
| 12648 | std::swap(a&: TrueVal, b&: FalseVal); |
| 12649 | |
| 12650 | return DAG.getNode(Opcode: ISD::SELECT, DL: SDLoc(N), VT, |
| 12651 | N1: CCOp, N2: TrueVal, N3: FalseVal); |
| 12652 | } |
| 12653 | |
| 12654 | // Attempt combineSelectAndUse on each operand of a commutative operator N. |
| 12655 | static |
| 12656 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, |
| 12657 | TargetLowering::DAGCombinerInfo &DCI) { |
| 12658 | SDValue N0 = N->getOperand(Num: 0); |
| 12659 | SDValue N1 = N->getOperand(Num: 1); |
| 12660 | if (N0.getNode()->hasOneUse()) |
| 12661 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI, AllOnes)) |
| 12662 | return Result; |
| 12663 | if (N1.getNode()->hasOneUse()) |
| 12664 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI, AllOnes)) |
| 12665 | return Result; |
| 12666 | return SDValue(); |
| 12667 | } |
| 12668 | |
| 12669 | static bool IsVUZPShuffleNode(SDNode *N) { |
| 12670 | // VUZP shuffle node. |
| 12671 | if (N->getOpcode() == ARMISD::VUZP) |
| 12672 | return true; |
| 12673 | |
| 12674 | // "VUZP" on i32 is an alias for VTRN. |
| 12675 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(ResNo: 0) == MVT::v2i32) |
| 12676 | return true; |
| 12677 | |
| 12678 | return false; |
| 12679 | } |
| 12680 | |
| 12681 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, |
| 12682 | TargetLowering::DAGCombinerInfo &DCI, |
| 12683 | const ARMSubtarget *Subtarget) { |
| 12684 | // Look for ADD(VUZP.0, VUZP.1). |
| 12685 | if (!IsVUZPShuffleNode(N: N0.getNode()) || N0.getNode() != N1.getNode() || |
| 12686 | N0 == N1) |
| 12687 | return SDValue(); |
| 12688 | |
| 12689 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. |
| 12690 | if (!N->getValueType(ResNo: 0).is64BitVector()) |
| 12691 | return SDValue(); |
| 12692 | |
| 12693 | // Generate vpadd. |
| 12694 | SelectionDAG &DAG = DCI.DAG; |
| 12695 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12696 | SDLoc dl(N); |
| 12697 | SDNode *Unzip = N0.getNode(); |
| 12698 | EVT VT = N->getValueType(ResNo: 0); |
| 12699 | |
| 12700 | SmallVector<SDValue, 8> Ops; |
| 12701 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpadd, DL: dl, |
| 12702 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12703 | Ops.push_back(Elt: Unzip->getOperand(Num: 0)); |
| 12704 | Ops.push_back(Elt: Unzip->getOperand(Num: 1)); |
| 12705 | |
| 12706 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
| 12707 | } |
| 12708 | |
| 12709 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 12710 | TargetLowering::DAGCombinerInfo &DCI, |
| 12711 | const ARMSubtarget *Subtarget) { |
| 12712 | // Check for two extended operands. |
| 12713 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && |
| 12714 | N1.getOpcode() == ISD::SIGN_EXTEND) && |
| 12715 | !(N0.getOpcode() == ISD::ZERO_EXTEND && |
| 12716 | N1.getOpcode() == ISD::ZERO_EXTEND)) |
| 12717 | return SDValue(); |
| 12718 | |
| 12719 | SDValue N00 = N0.getOperand(i: 0); |
| 12720 | SDValue N10 = N1.getOperand(i: 0); |
| 12721 | |
| 12722 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) |
| 12723 | if (!IsVUZPShuffleNode(N: N00.getNode()) || N00.getNode() != N10.getNode() || |
| 12724 | N00 == N10) |
| 12725 | return SDValue(); |
| 12726 | |
| 12727 | // We only recognize Q register paddl here; this can't be reached until |
| 12728 | // after type legalization. |
| 12729 | if (!N00.getValueType().is64BitVector() || |
| 12730 | !N0.getValueType().is128BitVector()) |
| 12731 | return SDValue(); |
| 12732 | |
| 12733 | // Generate vpaddl. |
| 12734 | SelectionDAG &DAG = DCI.DAG; |
| 12735 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12736 | SDLoc dl(N); |
| 12737 | EVT VT = N->getValueType(ResNo: 0); |
| 12738 | |
| 12739 | SmallVector<SDValue, 8> Ops; |
| 12740 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. |
| 12741 | unsigned Opcode; |
| 12742 | if (N0.getOpcode() == ISD::SIGN_EXTEND) |
| 12743 | Opcode = Intrinsic::arm_neon_vpaddls; |
| 12744 | else |
| 12745 | Opcode = Intrinsic::arm_neon_vpaddlu; |
| 12746 | Ops.push_back(Elt: DAG.getConstant(Val: Opcode, DL: dl, |
| 12747 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12748 | EVT ElemTy = N00.getValueType().getVectorElementType(); |
| 12749 | unsigned NumElts = VT.getVectorNumElements(); |
| 12750 | EVT ConcatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ElemTy, NumElements: NumElts * 2); |
| 12751 | SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT: ConcatVT, |
| 12752 | N1: N00.getOperand(i: 0), N2: N00.getOperand(i: 1)); |
| 12753 | Ops.push_back(Elt: Concat); |
| 12754 | |
| 12755 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
| 12756 | } |
| 12757 | |
| 12758 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in |
| 12759 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is |
| 12760 | // much easier to match. |
| 12761 | static SDValue |
| 12762 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 12763 | TargetLowering::DAGCombinerInfo &DCI, |
| 12764 | const ARMSubtarget *Subtarget) { |
| 12765 | // Only perform optimization if after legalize, and if NEON is available. We |
| 12766 | // also expected both operands to be BUILD_VECTORs. |
| 12767 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() |
| 12768 | || N0.getOpcode() != ISD::BUILD_VECTOR |
| 12769 | || N1.getOpcode() != ISD::BUILD_VECTOR) |
| 12770 | return SDValue(); |
| 12771 | |
| 12772 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. |
| 12773 | EVT VT = N->getValueType(ResNo: 0); |
| 12774 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) |
| 12775 | return SDValue(); |
| 12776 | |
| 12777 | // Check that the vector operands are of the right form. |
| 12778 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR |
| 12779 | // operands, where N is the size of the formed vector. |
| 12780 | // Each EXTRACT_VECTOR should have the same input vector and odd or even |
| 12781 | // index such that we have a pair wise add pattern. |
| 12782 | |
| 12783 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. |
| 12784 | if (N0->getOperand(Num: 0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 12785 | return SDValue(); |
| 12786 | SDValue Vec = N0->getOperand(Num: 0)->getOperand(Num: 0); |
| 12787 | SDNode *V = Vec.getNode(); |
| 12788 | unsigned nextIndex = 0; |
| 12789 | |
| 12790 | // For each operands to the ADD which are BUILD_VECTORs, |
| 12791 | // check to see if each of their operands are an EXTRACT_VECTOR with |
| 12792 | // the same vector and appropriate index. |
| 12793 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { |
| 12794 | if (N0->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT |
| 12795 | && N1->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 12796 | |
| 12797 | SDValue ExtVec0 = N0->getOperand(Num: i); |
| 12798 | SDValue ExtVec1 = N1->getOperand(Num: i); |
| 12799 | |
| 12800 | // First operand is the vector, verify its the same. |
| 12801 | if (V != ExtVec0->getOperand(Num: 0).getNode() || |
| 12802 | V != ExtVec1->getOperand(Num: 0).getNode()) |
| 12803 | return SDValue(); |
| 12804 | |
| 12805 | // Second is the constant, verify its correct. |
| 12806 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(Val: ExtVec0->getOperand(Num: 1)); |
| 12807 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Val: ExtVec1->getOperand(Num: 1)); |
| 12808 | |
| 12809 | // For the constant, we want to see all the even or all the odd. |
| 12810 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex |
| 12811 | || C1->getZExtValue() != nextIndex+1) |
| 12812 | return SDValue(); |
| 12813 | |
| 12814 | // Increment index. |
| 12815 | nextIndex+=2; |
| 12816 | } else |
| 12817 | return SDValue(); |
| 12818 | } |
| 12819 | |
| 12820 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure |
| 12821 | // we're using the entire input vector, otherwise there's a size/legality |
| 12822 | // mismatch somewhere. |
| 12823 | if (nextIndex != Vec.getValueType().getVectorNumElements() || |
| 12824 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) |
| 12825 | return SDValue(); |
| 12826 | |
| 12827 | // Create VPADDL node. |
| 12828 | SelectionDAG &DAG = DCI.DAG; |
| 12829 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12830 | |
| 12831 | SDLoc dl(N); |
| 12832 | |
| 12833 | // Build operand list. |
| 12834 | SmallVector<SDValue, 8> Ops; |
| 12835 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddls, DL: dl, |
| 12836 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12837 | |
| 12838 | // Input is the vector. |
| 12839 | Ops.push_back(Elt: Vec); |
| 12840 | |
| 12841 | // Get widened type and narrowed type. |
| 12842 | MVT widenType; |
| 12843 | unsigned numElem = VT.getVectorNumElements(); |
| 12844 | |
| 12845 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); |
| 12846 | switch (inputLaneType.getSimpleVT().SimpleTy) { |
| 12847 | case MVT::i8: widenType = MVT::getVectorVT(VT: MVT::i16, NumElements: numElem); break; |
| 12848 | case MVT::i16: widenType = MVT::getVectorVT(VT: MVT::i32, NumElements: numElem); break; |
| 12849 | case MVT::i32: widenType = MVT::getVectorVT(VT: MVT::i64, NumElements: numElem); break; |
| 12850 | default: |
| 12851 | llvm_unreachable("Invalid vector element type for padd optimization." ); |
| 12852 | } |
| 12853 | |
| 12854 | SDValue tmp = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: widenType, Ops); |
| 12855 | unsigned ExtOp = VT.bitsGT(VT: tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; |
| 12856 | return DAG.getNode(Opcode: ExtOp, DL: dl, VT, Operand: tmp); |
| 12857 | } |
| 12858 | |
| 12859 | static SDValue findMUL_LOHI(SDValue V) { |
| 12860 | if (V->getOpcode() == ISD::UMUL_LOHI || |
| 12861 | V->getOpcode() == ISD::SMUL_LOHI) |
| 12862 | return V; |
| 12863 | return SDValue(); |
| 12864 | } |
| 12865 | |
| 12866 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, |
| 12867 | TargetLowering::DAGCombinerInfo &DCI, |
| 12868 | const ARMSubtarget *Subtarget) { |
| 12869 | if (!Subtarget->hasBaseDSP()) |
| 12870 | return SDValue(); |
| 12871 | |
| 12872 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and |
| 12873 | // accumulates the product into a 64-bit value. The 16-bit values will |
| 12874 | // be sign extended somehow or SRA'd into 32-bit values |
| 12875 | // (addc (adde (mul 16bit, 16bit), lo), hi) |
| 12876 | SDValue Mul = AddcNode->getOperand(Num: 0); |
| 12877 | SDValue Lo = AddcNode->getOperand(Num: 1); |
| 12878 | if (Mul.getOpcode() != ISD::MUL) { |
| 12879 | Lo = AddcNode->getOperand(Num: 0); |
| 12880 | Mul = AddcNode->getOperand(Num: 1); |
| 12881 | if (Mul.getOpcode() != ISD::MUL) |
| 12882 | return SDValue(); |
| 12883 | } |
| 12884 | |
| 12885 | SDValue SRA = AddeNode->getOperand(Num: 0); |
| 12886 | SDValue Hi = AddeNode->getOperand(Num: 1); |
| 12887 | if (SRA.getOpcode() != ISD::SRA) { |
| 12888 | SRA = AddeNode->getOperand(Num: 1); |
| 12889 | Hi = AddeNode->getOperand(Num: 0); |
| 12890 | if (SRA.getOpcode() != ISD::SRA) |
| 12891 | return SDValue(); |
| 12892 | } |
| 12893 | if (auto Const = dyn_cast<ConstantSDNode>(Val: SRA.getOperand(i: 1))) { |
| 12894 | if (Const->getZExtValue() != 31) |
| 12895 | return SDValue(); |
| 12896 | } else |
| 12897 | return SDValue(); |
| 12898 | |
| 12899 | if (SRA.getOperand(i: 0) != Mul) |
| 12900 | return SDValue(); |
| 12901 | |
| 12902 | SelectionDAG &DAG = DCI.DAG; |
| 12903 | SDLoc dl(AddcNode); |
| 12904 | unsigned Opcode = 0; |
| 12905 | SDValue Op0; |
| 12906 | SDValue Op1; |
| 12907 | |
| 12908 | if (isS16(Op: Mul.getOperand(i: 0), DAG) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
| 12909 | Opcode = ARMISD::SMLALBB; |
| 12910 | Op0 = Mul.getOperand(i: 0); |
| 12911 | Op1 = Mul.getOperand(i: 1); |
| 12912 | } else if (isS16(Op: Mul.getOperand(i: 0), DAG) && isSRA16(Op: Mul.getOperand(i: 1))) { |
| 12913 | Opcode = ARMISD::SMLALBT; |
| 12914 | Op0 = Mul.getOperand(i: 0); |
| 12915 | Op1 = Mul.getOperand(i: 1).getOperand(i: 0); |
| 12916 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
| 12917 | Opcode = ARMISD::SMLALTB; |
| 12918 | Op0 = Mul.getOperand(i: 0).getOperand(i: 0); |
| 12919 | Op1 = Mul.getOperand(i: 1); |
| 12920 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isSRA16(Op: Mul.getOperand(i: 1))) { |
| 12921 | Opcode = ARMISD::SMLALTT; |
| 12922 | Op0 = Mul->getOperand(Num: 0).getOperand(i: 0); |
| 12923 | Op1 = Mul->getOperand(Num: 1).getOperand(i: 0); |
| 12924 | } |
| 12925 | |
| 12926 | if (!Op0 || !Op1) |
| 12927 | return SDValue(); |
| 12928 | |
| 12929 | SDValue SMLAL = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 12930 | N1: Op0, N2: Op1, N3: Lo, N4: Hi); |
| 12931 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 12932 | SDValue HiMLALResult(SMLAL.getNode(), 1); |
| 12933 | SDValue LoMLALResult(SMLAL.getNode(), 0); |
| 12934 | |
| 12935 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: LoMLALResult); |
| 12936 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: HiMLALResult); |
| 12937 | |
| 12938 | // Return original node to notify the driver to stop replacing. |
| 12939 | SDValue resNode(AddcNode, 0); |
| 12940 | return resNode; |
| 12941 | } |
| 12942 | |
| 12943 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, |
| 12944 | TargetLowering::DAGCombinerInfo &DCI, |
| 12945 | const ARMSubtarget *Subtarget) { |
| 12946 | // Look for multiply add opportunities. |
| 12947 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where |
| 12948 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is |
| 12949 | // a glue link from the first add to the second add. |
| 12950 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by |
| 12951 | // a S/UMLAL instruction. |
| 12952 | // UMUL_LOHI |
| 12953 | // / :lo \ :hi |
| 12954 | // V \ [no multiline comment] |
| 12955 | // loAdd -> ADDC | |
| 12956 | // \ :carry / |
| 12957 | // V V |
| 12958 | // ADDE <- hiAdd |
| 12959 | // |
| 12960 | // In the special case where only the higher part of a signed result is used |
| 12961 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts |
| 12962 | // a constant with the exact value of 0x80000000, we recognize we are dealing |
| 12963 | // with a "rounded multiply and add" (or subtract) and transform it into |
| 12964 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. |
| 12965 | |
| 12966 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || |
| 12967 | AddeSubeNode->getOpcode() == ARMISD::SUBE) && |
| 12968 | "Expect an ADDE or SUBE" ); |
| 12969 | |
| 12970 | assert(AddeSubeNode->getNumOperands() == 3 && |
| 12971 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && |
| 12972 | "ADDE node has the wrong inputs" ); |
| 12973 | |
| 12974 | // Check that we are chained to the right ADDC or SUBC node. |
| 12975 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(Num: 2).getNode(); |
| 12976 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 12977 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || |
| 12978 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && |
| 12979 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) |
| 12980 | return SDValue(); |
| 12981 | |
| 12982 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(Num: 0); |
| 12983 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(Num: 1); |
| 12984 | |
| 12985 | // Check if the two operands are from the same mul_lohi node. |
| 12986 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) |
| 12987 | return SDValue(); |
| 12988 | |
| 12989 | assert(AddcSubcNode->getNumValues() == 2 && |
| 12990 | AddcSubcNode->getValueType(0) == MVT::i32 && |
| 12991 | "Expect ADDC with two result values. First: i32" ); |
| 12992 | |
| 12993 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it |
| 12994 | // maybe a SMLAL which multiplies two 16-bit values. |
| 12995 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 12996 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && |
| 12997 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && |
| 12998 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && |
| 12999 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) |
| 13000 | return AddCombineTo64BitSMLAL16(AddcNode: AddcSubcNode, AddeNode: AddeSubeNode, DCI, Subtarget); |
| 13001 | |
| 13002 | // Check for the triangle shape. |
| 13003 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(Num: 0); |
| 13004 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(Num: 1); |
| 13005 | |
| 13006 | // Make sure that the ADDE/SUBE operands are not coming from the same node. |
| 13007 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) |
| 13008 | return SDValue(); |
| 13009 | |
| 13010 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. |
| 13011 | bool IsLeftOperandMUL = false; |
| 13012 | SDValue MULOp = findMUL_LOHI(V: AddeSubeOp0); |
| 13013 | if (MULOp == SDValue()) |
| 13014 | MULOp = findMUL_LOHI(V: AddeSubeOp1); |
| 13015 | else |
| 13016 | IsLeftOperandMUL = true; |
| 13017 | if (MULOp == SDValue()) |
| 13018 | return SDValue(); |
| 13019 | |
| 13020 | // Figure out the right opcode. |
| 13021 | unsigned Opc = MULOp->getOpcode(); |
| 13022 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; |
| 13023 | |
| 13024 | // Figure out the high and low input values to the MLAL node. |
| 13025 | SDValue *HiAddSub = nullptr; |
| 13026 | SDValue *LoMul = nullptr; |
| 13027 | SDValue *LowAddSub = nullptr; |
| 13028 | |
| 13029 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. |
| 13030 | if ((AddeSubeOp0 != MULOp.getValue(R: 1)) && (AddeSubeOp1 != MULOp.getValue(R: 1))) |
| 13031 | return SDValue(); |
| 13032 | |
| 13033 | if (IsLeftOperandMUL) |
| 13034 | HiAddSub = &AddeSubeOp1; |
| 13035 | else |
| 13036 | HiAddSub = &AddeSubeOp0; |
| 13037 | |
| 13038 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node |
| 13039 | // whose low result is fed to the ADDC/SUBC we are checking. |
| 13040 | |
| 13041 | if (AddcSubcOp0 == MULOp.getValue(R: 0)) { |
| 13042 | LoMul = &AddcSubcOp0; |
| 13043 | LowAddSub = &AddcSubcOp1; |
| 13044 | } |
| 13045 | if (AddcSubcOp1 == MULOp.getValue(R: 0)) { |
| 13046 | LoMul = &AddcSubcOp1; |
| 13047 | LowAddSub = &AddcSubcOp0; |
| 13048 | } |
| 13049 | |
| 13050 | if (!LoMul) |
| 13051 | return SDValue(); |
| 13052 | |
| 13053 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC |
| 13054 | // the replacement below will create a cycle. |
| 13055 | if (AddcSubcNode == HiAddSub->getNode() || |
| 13056 | AddcSubcNode->isPredecessorOf(N: HiAddSub->getNode())) |
| 13057 | return SDValue(); |
| 13058 | |
| 13059 | // Create the merged node. |
| 13060 | SelectionDAG &DAG = DCI.DAG; |
| 13061 | |
| 13062 | // Start building operand list. |
| 13063 | SmallVector<SDValue, 8> Ops; |
| 13064 | Ops.push_back(Elt: LoMul->getOperand(i: 0)); |
| 13065 | Ops.push_back(Elt: LoMul->getOperand(i: 1)); |
| 13066 | |
| 13067 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be |
| 13068 | // the case, we must be doing signed multiplication and only use the higher |
| 13069 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant |
| 13070 | // addition or subtraction with the value of 0x800000. |
| 13071 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && |
| 13072 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(Value: 1) && |
| 13073 | LowAddSub->getNode()->getOpcode() == ISD::Constant && |
| 13074 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == |
| 13075 | 0x80000000) { |
| 13076 | Ops.push_back(Elt: *HiAddSub); |
| 13077 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { |
| 13078 | FinalOpc = ARMISD::SMMLSR; |
| 13079 | } else { |
| 13080 | FinalOpc = ARMISD::SMMLAR; |
| 13081 | } |
| 13082 | SDValue NewNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), VT: MVT::i32, Ops); |
| 13083 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: NewNode); |
| 13084 | |
| 13085 | return SDValue(AddeSubeNode, 0); |
| 13086 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) |
| 13087 | // SMMLS is generated during instruction selection and the rest of this |
| 13088 | // function can not handle the case where AddcSubcNode is a SUBC. |
| 13089 | return SDValue(); |
| 13090 | |
| 13091 | // Finish building the operand list for {U/S}MLAL |
| 13092 | Ops.push_back(Elt: *LowAddSub); |
| 13093 | Ops.push_back(Elt: *HiAddSub); |
| 13094 | |
| 13095 | SDValue MLALNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), |
| 13096 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
| 13097 | |
| 13098 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 13099 | SDValue HiMLALResult(MLALNode.getNode(), 1); |
| 13100 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: HiMLALResult); |
| 13101 | |
| 13102 | SDValue LoMLALResult(MLALNode.getNode(), 0); |
| 13103 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcSubcNode, 0), To: LoMLALResult); |
| 13104 | |
| 13105 | // Return original node to notify the driver to stop replacing. |
| 13106 | return SDValue(AddeSubeNode, 0); |
| 13107 | } |
| 13108 | |
| 13109 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, |
| 13110 | TargetLowering::DAGCombinerInfo &DCI, |
| 13111 | const ARMSubtarget *Subtarget) { |
| 13112 | // UMAAL is similar to UMLAL except that it adds two unsigned values. |
| 13113 | // While trying to combine for the other MLAL nodes, first search for the |
| 13114 | // chance to use UMAAL. Check if Addc uses a node which has already |
| 13115 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde |
| 13116 | // as the addend, and it's handled in PerformUMLALCombine. |
| 13117 | |
| 13118 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 13119 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
| 13120 | |
| 13121 | // Check that we have a glued ADDC node. |
| 13122 | SDNode* AddcNode = AddeNode->getOperand(Num: 2).getNode(); |
| 13123 | if (AddcNode->getOpcode() != ARMISD::ADDC) |
| 13124 | return SDValue(); |
| 13125 | |
| 13126 | // Find the converted UMAAL or quit if it doesn't exist. |
| 13127 | SDNode *UmlalNode = nullptr; |
| 13128 | SDValue AddHi; |
| 13129 | if (AddcNode->getOperand(Num: 0).getOpcode() == ARMISD::UMLAL) { |
| 13130 | UmlalNode = AddcNode->getOperand(Num: 0).getNode(); |
| 13131 | AddHi = AddcNode->getOperand(Num: 1); |
| 13132 | } else if (AddcNode->getOperand(Num: 1).getOpcode() == ARMISD::UMLAL) { |
| 13133 | UmlalNode = AddcNode->getOperand(Num: 1).getNode(); |
| 13134 | AddHi = AddcNode->getOperand(Num: 0); |
| 13135 | } else { |
| 13136 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
| 13137 | } |
| 13138 | |
| 13139 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as |
| 13140 | // the ADDC as well as Zero. |
| 13141 | if (!isNullConstant(V: UmlalNode->getOperand(Num: 3))) |
| 13142 | return SDValue(); |
| 13143 | |
| 13144 | if ((isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
| 13145 | AddeNode->getOperand(Num: 1).getNode() == UmlalNode) || |
| 13146 | (AddeNode->getOperand(Num: 0).getNode() == UmlalNode && |
| 13147 | isNullConstant(V: AddeNode->getOperand(Num: 1)))) { |
| 13148 | SelectionDAG &DAG = DCI.DAG; |
| 13149 | SDValue Ops[] = { UmlalNode->getOperand(Num: 0), UmlalNode->getOperand(Num: 1), |
| 13150 | UmlalNode->getOperand(Num: 2), AddHi }; |
| 13151 | SDValue UMAAL = DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(AddcNode), |
| 13152 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
| 13153 | |
| 13154 | // Replace the ADDs' nodes uses by the UMAAL node's values. |
| 13155 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: SDValue(UMAAL.getNode(), 1)); |
| 13156 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: SDValue(UMAAL.getNode(), 0)); |
| 13157 | |
| 13158 | // Return original node to notify the driver to stop replacing. |
| 13159 | return SDValue(AddeNode, 0); |
| 13160 | } |
| 13161 | return SDValue(); |
| 13162 | } |
| 13163 | |
| 13164 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, |
| 13165 | const ARMSubtarget *Subtarget) { |
| 13166 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 13167 | return SDValue(); |
| 13168 | |
| 13169 | // Check that we have a pair of ADDC and ADDE as operands. |
| 13170 | // Both addends of the ADDE must be zero. |
| 13171 | SDNode* AddcNode = N->getOperand(Num: 2).getNode(); |
| 13172 | SDNode* AddeNode = N->getOperand(Num: 3).getNode(); |
| 13173 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && |
| 13174 | (AddeNode->getOpcode() == ARMISD::ADDE) && |
| 13175 | isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
| 13176 | isNullConstant(V: AddeNode->getOperand(Num: 1)) && |
| 13177 | (AddeNode->getOperand(Num: 2).getNode() == AddcNode)) |
| 13178 | return DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(N), |
| 13179 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 13180 | Ops: {N->getOperand(Num: 0), N->getOperand(Num: 1), |
| 13181 | AddcNode->getOperand(Num: 0), AddcNode->getOperand(Num: 1)}); |
| 13182 | else |
| 13183 | return SDValue(); |
| 13184 | } |
| 13185 | |
| 13186 | static SDValue PerformAddcSubcCombine(SDNode *N, |
| 13187 | TargetLowering::DAGCombinerInfo &DCI, |
| 13188 | const ARMSubtarget *Subtarget) { |
| 13189 | SelectionDAG &DAG(DCI.DAG); |
| 13190 | |
| 13191 | if (N->getOpcode() == ARMISD::SUBC && N->hasAnyUseOfValue(Value: 1)) { |
| 13192 | // (SUBC (ADDE 0, 0, C), 1) -> C |
| 13193 | SDValue LHS = N->getOperand(Num: 0); |
| 13194 | SDValue RHS = N->getOperand(Num: 1); |
| 13195 | if (LHS->getOpcode() == ARMISD::ADDE && |
| 13196 | isNullConstant(V: LHS->getOperand(Num: 0)) && |
| 13197 | isNullConstant(V: LHS->getOperand(Num: 1)) && isOneConstant(V: RHS)) { |
| 13198 | return DCI.CombineTo(N, Res0: SDValue(N, 0), Res1: LHS->getOperand(Num: 2)); |
| 13199 | } |
| 13200 | } |
| 13201 | |
| 13202 | if (Subtarget->isThumb1Only()) { |
| 13203 | SDValue RHS = N->getOperand(Num: 1); |
| 13204 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
| 13205 | int32_t imm = C->getSExtValue(); |
| 13206 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { |
| 13207 | SDLoc DL(N); |
| 13208 | RHS = DAG.getConstant(Val: -imm, DL, VT: MVT::i32); |
| 13209 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC |
| 13210 | : ARMISD::ADDC; |
| 13211 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), N1: N->getOperand(Num: 0), N2: RHS); |
| 13212 | } |
| 13213 | } |
| 13214 | } |
| 13215 | |
| 13216 | return SDValue(); |
| 13217 | } |
| 13218 | |
| 13219 | static SDValue PerformAddeSubeCombine(SDNode *N, |
| 13220 | TargetLowering::DAGCombinerInfo &DCI, |
| 13221 | const ARMSubtarget *Subtarget) { |
| 13222 | if (Subtarget->isThumb1Only()) { |
| 13223 | SelectionDAG &DAG = DCI.DAG; |
| 13224 | SDValue RHS = N->getOperand(Num: 1); |
| 13225 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
| 13226 | int64_t imm = C->getSExtValue(); |
| 13227 | if (imm < 0) { |
| 13228 | SDLoc DL(N); |
| 13229 | |
| 13230 | // The with-carry-in form matches bitwise not instead of the negation. |
| 13231 | // Effectively, the inverse interpretation of the carry flag already |
| 13232 | // accounts for part of the negation. |
| 13233 | RHS = DAG.getConstant(Val: ~imm, DL, VT: MVT::i32); |
| 13234 | |
| 13235 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE |
| 13236 | : ARMISD::ADDE; |
| 13237 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), |
| 13238 | N1: N->getOperand(Num: 0), N2: RHS, N3: N->getOperand(Num: 2)); |
| 13239 | } |
| 13240 | } |
| 13241 | } else if (N->getOperand(Num: 1)->getOpcode() == ISD::SMUL_LOHI) { |
| 13242 | return AddCombineTo64bitMLAL(AddeSubeNode: N, DCI, Subtarget); |
| 13243 | } |
| 13244 | return SDValue(); |
| 13245 | } |
| 13246 | |
| 13247 | static SDValue PerformSELECTCombine(SDNode *N, |
| 13248 | TargetLowering::DAGCombinerInfo &DCI, |
| 13249 | const ARMSubtarget *Subtarget) { |
| 13250 | if (!Subtarget->hasMVEIntegerOps()) |
| 13251 | return SDValue(); |
| 13252 | |
| 13253 | SDLoc dl(N); |
| 13254 | SDValue SetCC; |
| 13255 | SDValue LHS; |
| 13256 | SDValue RHS; |
| 13257 | ISD::CondCode CC; |
| 13258 | SDValue TrueVal; |
| 13259 | SDValue FalseVal; |
| 13260 | |
| 13261 | if (N->getOpcode() == ISD::SELECT && |
| 13262 | N->getOperand(Num: 0)->getOpcode() == ISD::SETCC) { |
| 13263 | SetCC = N->getOperand(Num: 0); |
| 13264 | LHS = SetCC->getOperand(Num: 0); |
| 13265 | RHS = SetCC->getOperand(Num: 1); |
| 13266 | CC = cast<CondCodeSDNode>(Val: SetCC->getOperand(Num: 2))->get(); |
| 13267 | TrueVal = N->getOperand(Num: 1); |
| 13268 | FalseVal = N->getOperand(Num: 2); |
| 13269 | } else if (N->getOpcode() == ISD::SELECT_CC) { |
| 13270 | LHS = N->getOperand(Num: 0); |
| 13271 | RHS = N->getOperand(Num: 1); |
| 13272 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 4))->get(); |
| 13273 | TrueVal = N->getOperand(Num: 2); |
| 13274 | FalseVal = N->getOperand(Num: 3); |
| 13275 | } else { |
| 13276 | return SDValue(); |
| 13277 | } |
| 13278 | |
| 13279 | unsigned int Opcode = 0; |
| 13280 | if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || |
| 13281 | FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && |
| 13282 | (CC == ISD::SETULT || CC == ISD::SETUGT)) { |
| 13283 | Opcode = ARMISD::VMINVu; |
| 13284 | if (CC == ISD::SETUGT) |
| 13285 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13286 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || |
| 13287 | FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && |
| 13288 | (CC == ISD::SETLT || CC == ISD::SETGT)) { |
| 13289 | Opcode = ARMISD::VMINVs; |
| 13290 | if (CC == ISD::SETGT) |
| 13291 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13292 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || |
| 13293 | FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && |
| 13294 | (CC == ISD::SETUGT || CC == ISD::SETULT)) { |
| 13295 | Opcode = ARMISD::VMAXVu; |
| 13296 | if (CC == ISD::SETULT) |
| 13297 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13298 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || |
| 13299 | FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && |
| 13300 | (CC == ISD::SETGT || CC == ISD::SETLT)) { |
| 13301 | Opcode = ARMISD::VMAXVs; |
| 13302 | if (CC == ISD::SETLT) |
| 13303 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13304 | } else |
| 13305 | return SDValue(); |
| 13306 | |
| 13307 | // Normalise to the right hand side being the vector reduction |
| 13308 | switch (TrueVal->getOpcode()) { |
| 13309 | case ISD::VECREDUCE_UMIN: |
| 13310 | case ISD::VECREDUCE_SMIN: |
| 13311 | case ISD::VECREDUCE_UMAX: |
| 13312 | case ISD::VECREDUCE_SMAX: |
| 13313 | std::swap(a&: LHS, b&: RHS); |
| 13314 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13315 | break; |
| 13316 | } |
| 13317 | |
| 13318 | EVT VectorType = FalseVal->getOperand(Num: 0).getValueType(); |
| 13319 | |
| 13320 | if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && |
| 13321 | VectorType != MVT::v4i32) |
| 13322 | return SDValue(); |
| 13323 | |
| 13324 | EVT VectorScalarType = VectorType.getVectorElementType(); |
| 13325 | |
| 13326 | // The values being selected must also be the ones being compared |
| 13327 | if (TrueVal != LHS || FalseVal != RHS) |
| 13328 | return SDValue(); |
| 13329 | |
| 13330 | EVT LeftType = LHS->getValueType(ResNo: 0); |
| 13331 | EVT RightType = RHS->getValueType(ResNo: 0); |
| 13332 | |
| 13333 | // The types must match the reduced type too |
| 13334 | if (LeftType != VectorScalarType || RightType != VectorScalarType) |
| 13335 | return SDValue(); |
| 13336 | |
| 13337 | // Legalise the scalar to an i32 |
| 13338 | if (VectorScalarType != MVT::i32) |
| 13339 | LHS = DCI.DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: MVT::i32, Operand: LHS); |
| 13340 | |
| 13341 | // Generate the reduction as an i32 for legalisation purposes |
| 13342 | auto Reduction = |
| 13343 | DCI.DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: LHS, N2: RHS->getOperand(Num: 0)); |
| 13344 | |
| 13345 | // The result isn't actually an i32 so truncate it back to its original type |
| 13346 | if (VectorScalarType != MVT::i32) |
| 13347 | Reduction = DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VectorScalarType, Operand: Reduction); |
| 13348 | |
| 13349 | return Reduction; |
| 13350 | } |
| 13351 | |
| 13352 | // A special combine for the vqdmulh family of instructions. This is one of the |
| 13353 | // potential set of patterns that could patch this instruction. The base pattern |
| 13354 | // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))). |
| 13355 | // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))), |
| 13356 | // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as |
| 13357 | // the max is unnecessary. |
| 13358 | static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { |
| 13359 | EVT VT = N->getValueType(ResNo: 0); |
| 13360 | SDValue Shft; |
| 13361 | ConstantSDNode *Clamp; |
| 13362 | |
| 13363 | if (!VT.isVector() || VT.getScalarSizeInBits() > 64) |
| 13364 | return SDValue(); |
| 13365 | |
| 13366 | if (N->getOpcode() == ISD::SMIN) { |
| 13367 | Shft = N->getOperand(Num: 0); |
| 13368 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 1)); |
| 13369 | } else if (N->getOpcode() == ISD::VSELECT) { |
| 13370 | // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin. |
| 13371 | SDValue Cmp = N->getOperand(Num: 0); |
| 13372 | if (Cmp.getOpcode() != ISD::SETCC || |
| 13373 | cast<CondCodeSDNode>(Val: Cmp.getOperand(i: 2))->get() != ISD::SETLT || |
| 13374 | Cmp.getOperand(i: 0) != N->getOperand(Num: 1) || |
| 13375 | Cmp.getOperand(i: 1) != N->getOperand(Num: 2)) |
| 13376 | return SDValue(); |
| 13377 | Shft = N->getOperand(Num: 1); |
| 13378 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 2)); |
| 13379 | } else |
| 13380 | return SDValue(); |
| 13381 | |
| 13382 | if (!Clamp) |
| 13383 | return SDValue(); |
| 13384 | |
| 13385 | MVT ScalarType; |
| 13386 | int ShftAmt = 0; |
| 13387 | switch (Clamp->getSExtValue()) { |
| 13388 | case (1 << 7) - 1: |
| 13389 | ScalarType = MVT::i8; |
| 13390 | ShftAmt = 7; |
| 13391 | break; |
| 13392 | case (1 << 15) - 1: |
| 13393 | ScalarType = MVT::i16; |
| 13394 | ShftAmt = 15; |
| 13395 | break; |
| 13396 | case (1ULL << 31) - 1: |
| 13397 | ScalarType = MVT::i32; |
| 13398 | ShftAmt = 31; |
| 13399 | break; |
| 13400 | default: |
| 13401 | return SDValue(); |
| 13402 | } |
| 13403 | |
| 13404 | if (Shft.getOpcode() != ISD::SRA) |
| 13405 | return SDValue(); |
| 13406 | ConstantSDNode *N1 = isConstOrConstSplat(N: Shft.getOperand(i: 1)); |
| 13407 | if (!N1 || N1->getSExtValue() != ShftAmt) |
| 13408 | return SDValue(); |
| 13409 | |
| 13410 | SDValue Mul = Shft.getOperand(i: 0); |
| 13411 | if (Mul.getOpcode() != ISD::MUL) |
| 13412 | return SDValue(); |
| 13413 | |
| 13414 | SDValue Ext0 = Mul.getOperand(i: 0); |
| 13415 | SDValue Ext1 = Mul.getOperand(i: 1); |
| 13416 | if (Ext0.getOpcode() != ISD::SIGN_EXTEND || |
| 13417 | Ext1.getOpcode() != ISD::SIGN_EXTEND) |
| 13418 | return SDValue(); |
| 13419 | EVT VecVT = Ext0.getOperand(i: 0).getValueType(); |
| 13420 | if (!VecVT.isPow2VectorType() || VecVT.getVectorNumElements() == 1) |
| 13421 | return SDValue(); |
| 13422 | if (Ext1.getOperand(i: 0).getValueType() != VecVT || |
| 13423 | VecVT.getScalarType() != ScalarType || |
| 13424 | VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2) |
| 13425 | return SDValue(); |
| 13426 | |
| 13427 | SDLoc DL(Mul); |
| 13428 | unsigned LegalLanes = 128 / (ShftAmt + 1); |
| 13429 | EVT LegalVecVT = MVT::getVectorVT(VT: ScalarType, NumElements: LegalLanes); |
| 13430 | // For types smaller than legal vectors extend to be legal and only use needed |
| 13431 | // lanes. |
| 13432 | if (VecVT.getSizeInBits() < 128) { |
| 13433 | EVT ExtVecVT = |
| 13434 | MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 128 / VecVT.getVectorNumElements()), |
| 13435 | NumElements: VecVT.getVectorNumElements()); |
| 13436 | SDValue Inp0 = |
| 13437 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext0.getOperand(i: 0)); |
| 13438 | SDValue Inp1 = |
| 13439 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext1.getOperand(i: 0)); |
| 13440 | Inp0 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp0); |
| 13441 | Inp1 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp1); |
| 13442 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
| 13443 | SDValue Trunc = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: ExtVecVT, Operand: VQDMULH); |
| 13444 | Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VecVT, Operand: Trunc); |
| 13445 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, Operand: Trunc); |
| 13446 | } |
| 13447 | |
| 13448 | // For larger types, split into legal sized chunks. |
| 13449 | assert(VecVT.getSizeInBits() % 128 == 0 && "Expected a power2 type" ); |
| 13450 | unsigned NumParts = VecVT.getSizeInBits() / 128; |
| 13451 | SmallVector<SDValue> Parts; |
| 13452 | for (unsigned I = 0; I < NumParts; ++I) { |
| 13453 | SDValue Inp0 = |
| 13454 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext0.getOperand(i: 0), |
| 13455 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
| 13456 | SDValue Inp1 = |
| 13457 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext1.getOperand(i: 0), |
| 13458 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
| 13459 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
| 13460 | Parts.push_back(Elt: VQDMULH); |
| 13461 | } |
| 13462 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, |
| 13463 | Operand: DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, Ops: Parts)); |
| 13464 | } |
| 13465 | |
| 13466 | static SDValue PerformVSELECTCombine(SDNode *N, |
| 13467 | TargetLowering::DAGCombinerInfo &DCI, |
| 13468 | const ARMSubtarget *Subtarget) { |
| 13469 | if (!Subtarget->hasMVEIntegerOps()) |
| 13470 | return SDValue(); |
| 13471 | |
| 13472 | if (SDValue V = PerformVQDMULHCombine(N, DAG&: DCI.DAG)) |
| 13473 | return V; |
| 13474 | |
| 13475 | // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). |
| 13476 | // |
| 13477 | // We need to re-implement this optimization here as the implementation in the |
| 13478 | // Target-Independent DAGCombiner does not handle the kind of constant we make |
| 13479 | // (it calls isConstOrConstSplat with AllowTruncation set to false - and for |
| 13480 | // good reason, allowing truncation there would break other targets). |
| 13481 | // |
| 13482 | // Currently, this is only done for MVE, as it's the only target that benefits |
| 13483 | // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). |
| 13484 | if (N->getOperand(Num: 0).getOpcode() != ISD::XOR) |
| 13485 | return SDValue(); |
| 13486 | SDValue XOR = N->getOperand(Num: 0); |
| 13487 | |
| 13488 | // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. |
| 13489 | // It is important to check with truncation allowed as the BUILD_VECTORs we |
| 13490 | // generate in those situations will truncate their operands. |
| 13491 | ConstantSDNode *Const = |
| 13492 | isConstOrConstSplat(N: XOR->getOperand(Num: 1), /*AllowUndefs*/ false, |
| 13493 | /*AllowTruncation*/ true); |
| 13494 | if (!Const || !Const->isOne()) |
| 13495 | return SDValue(); |
| 13496 | |
| 13497 | // Rewrite into vselect(cond, rhs, lhs). |
| 13498 | SDValue Cond = XOR->getOperand(Num: 0); |
| 13499 | SDValue LHS = N->getOperand(Num: 1); |
| 13500 | SDValue RHS = N->getOperand(Num: 2); |
| 13501 | EVT Type = N->getValueType(ResNo: 0); |
| 13502 | return DCI.DAG.getNode(Opcode: ISD::VSELECT, DL: SDLoc(N), VT: Type, N1: Cond, N2: RHS, N3: LHS); |
| 13503 | } |
| 13504 | |
| 13505 | // Convert vsetcc([0,1,2,..], splat(n), ult) -> vctp n |
| 13506 | static SDValue PerformVSetCCToVCTPCombine(SDNode *N, |
| 13507 | TargetLowering::DAGCombinerInfo &DCI, |
| 13508 | const ARMSubtarget *Subtarget) { |
| 13509 | SDValue Op0 = N->getOperand(Num: 0); |
| 13510 | SDValue Op1 = N->getOperand(Num: 1); |
| 13511 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 2))->get(); |
| 13512 | EVT VT = N->getValueType(ResNo: 0); |
| 13513 | |
| 13514 | if (!Subtarget->hasMVEIntegerOps() || |
| 13515 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 13516 | return SDValue(); |
| 13517 | |
| 13518 | if (CC == ISD::SETUGE) { |
| 13519 | std::swap(a&: Op0, b&: Op1); |
| 13520 | CC = ISD::SETULT; |
| 13521 | } |
| 13522 | |
| 13523 | if (CC != ISD::SETULT || VT.getScalarSizeInBits() != 1 || |
| 13524 | Op0.getOpcode() != ISD::BUILD_VECTOR) |
| 13525 | return SDValue(); |
| 13526 | |
| 13527 | // Check first operand is BuildVector of 0,1,2,... |
| 13528 | for (unsigned I = 0; I < VT.getVectorNumElements(); I++) { |
| 13529 | if (!Op0.getOperand(i: I).isUndef() && |
| 13530 | !(isa<ConstantSDNode>(Val: Op0.getOperand(i: I)) && |
| 13531 | Op0.getConstantOperandVal(i: I) == I)) |
| 13532 | return SDValue(); |
| 13533 | } |
| 13534 | |
| 13535 | // The second is a Splat of Op1S |
| 13536 | SDValue Op1S = DCI.DAG.getSplatValue(V: Op1); |
| 13537 | if (!Op1S) |
| 13538 | return SDValue(); |
| 13539 | |
| 13540 | unsigned Opc; |
| 13541 | switch (VT.getVectorNumElements()) { |
| 13542 | case 2: |
| 13543 | Opc = Intrinsic::arm_mve_vctp64; |
| 13544 | break; |
| 13545 | case 4: |
| 13546 | Opc = Intrinsic::arm_mve_vctp32; |
| 13547 | break; |
| 13548 | case 8: |
| 13549 | Opc = Intrinsic::arm_mve_vctp16; |
| 13550 | break; |
| 13551 | case 16: |
| 13552 | Opc = Intrinsic::arm_mve_vctp8; |
| 13553 | break; |
| 13554 | default: |
| 13555 | return SDValue(); |
| 13556 | } |
| 13557 | |
| 13558 | SDLoc DL(N); |
| 13559 | return DCI.DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, |
| 13560 | N1: DCI.DAG.getConstant(Val: Opc, DL, VT: MVT::i32), |
| 13561 | N2: DCI.DAG.getZExtOrTrunc(Op: Op1S, DL, VT: MVT::i32)); |
| 13562 | } |
| 13563 | |
| 13564 | /// PerformADDECombine - Target-specific dag combine transform from |
| 13565 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or |
| 13566 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL |
| 13567 | static SDValue PerformADDECombine(SDNode *N, |
| 13568 | TargetLowering::DAGCombinerInfo &DCI, |
| 13569 | const ARMSubtarget *Subtarget) { |
| 13570 | // Only ARM and Thumb2 support UMLAL/SMLAL. |
| 13571 | if (Subtarget->isThumb1Only()) |
| 13572 | return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 13573 | |
| 13574 | // Only perform the checks after legalize when the pattern is available. |
| 13575 | if (DCI.isBeforeLegalize()) return SDValue(); |
| 13576 | |
| 13577 | return AddCombineTo64bitUMAAL(AddeNode: N, DCI, Subtarget); |
| 13578 | } |
| 13579 | |
| 13580 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with |
| 13581 | /// operands N0 and N1. This is a helper for PerformADDCombine that is |
| 13582 | /// called with the default operands, and if that fails, with commuted |
| 13583 | /// operands. |
| 13584 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, |
| 13585 | TargetLowering::DAGCombinerInfo &DCI, |
| 13586 | const ARMSubtarget *Subtarget){ |
| 13587 | // Attempt to create vpadd for this add. |
| 13588 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) |
| 13589 | return Result; |
| 13590 | |
| 13591 | // Attempt to create vpaddl for this add. |
| 13592 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) |
| 13593 | return Result; |
| 13594 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, |
| 13595 | Subtarget)) |
| 13596 | return Result; |
| 13597 | |
| 13598 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 13599 | if (N0.getNode()->hasOneUse()) |
| 13600 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI)) |
| 13601 | return Result; |
| 13602 | return SDValue(); |
| 13603 | } |
| 13604 | |
| 13605 | static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { |
| 13606 | EVT VT = N->getValueType(ResNo: 0); |
| 13607 | SDValue N0 = N->getOperand(Num: 0); |
| 13608 | SDValue N1 = N->getOperand(Num: 1); |
| 13609 | SDLoc dl(N); |
| 13610 | |
| 13611 | auto IsVecReduce = [](SDValue Op) { |
| 13612 | switch (Op.getOpcode()) { |
| 13613 | case ISD::VECREDUCE_ADD: |
| 13614 | case ARMISD::VADDVs: |
| 13615 | case ARMISD::VADDVu: |
| 13616 | case ARMISD::VMLAVs: |
| 13617 | case ARMISD::VMLAVu: |
| 13618 | return true; |
| 13619 | } |
| 13620 | return false; |
| 13621 | }; |
| 13622 | |
| 13623 | auto DistrubuteAddAddVecReduce = [&](SDValue N0, SDValue N1) { |
| 13624 | // Distribute add(X, add(vecreduce(Y), vecreduce(Z))) -> |
| 13625 | // add(add(X, vecreduce(Y)), vecreduce(Z)) |
| 13626 | // to make better use of vaddva style instructions. |
| 13627 | if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) && |
| 13628 | IsVecReduce(N1.getOperand(i: 0)) && IsVecReduce(N1.getOperand(i: 1)) && |
| 13629 | !isa<ConstantSDNode>(Val: N0) && N1->hasOneUse()) { |
| 13630 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0, N2: N1.getOperand(i: 0)); |
| 13631 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N1.getOperand(i: 1)); |
| 13632 | } |
| 13633 | // And turn add(add(A, reduce(B)), add(C, reduce(D))) -> |
| 13634 | // add(add(add(A, C), reduce(B)), reduce(D)) |
| 13635 | if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
| 13636 | N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) { |
| 13637 | unsigned N0RedOp = 0; |
| 13638 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) { |
| 13639 | N0RedOp = 1; |
| 13640 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) |
| 13641 | return SDValue(); |
| 13642 | } |
| 13643 | |
| 13644 | unsigned N1RedOp = 0; |
| 13645 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
| 13646 | N1RedOp = 1; |
| 13647 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
| 13648 | return SDValue(); |
| 13649 | |
| 13650 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0.getOperand(i: 1 - N0RedOp), |
| 13651 | N2: N1.getOperand(i: 1 - N1RedOp)); |
| 13652 | SDValue Add1 = |
| 13653 | DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0.getOperand(i: N0RedOp)); |
| 13654 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add1, N2: N1.getOperand(i: N1RedOp)); |
| 13655 | } |
| 13656 | return SDValue(); |
| 13657 | }; |
| 13658 | if (SDValue R = DistrubuteAddAddVecReduce(N0, N1)) |
| 13659 | return R; |
| 13660 | if (SDValue R = DistrubuteAddAddVecReduce(N1, N0)) |
| 13661 | return R; |
| 13662 | |
| 13663 | // Distribute add(vecreduce(load(Y)), vecreduce(load(Z))) |
| 13664 | // Or add(add(X, vecreduce(load(Y))), vecreduce(load(Z))) |
| 13665 | // by ascending load offsets. This can help cores prefetch if the order of |
| 13666 | // loads is more predictable. |
| 13667 | auto DistrubuteVecReduceLoad = [&](SDValue N0, SDValue N1, bool IsForward) { |
| 13668 | // Check if two reductions are known to load data where one is before/after |
| 13669 | // another. Return negative if N0 loads data before N1, positive if N1 is |
| 13670 | // before N0 and 0 otherwise if nothing is known. |
| 13671 | auto IsKnownOrderedLoad = [&](SDValue N0, SDValue N1) { |
| 13672 | // Look through to the first operand of a MUL, for the VMLA case. |
| 13673 | // Currently only looks at the first operand, in the hope they are equal. |
| 13674 | if (N0.getOpcode() == ISD::MUL) |
| 13675 | N0 = N0.getOperand(i: 0); |
| 13676 | if (N1.getOpcode() == ISD::MUL) |
| 13677 | N1 = N1.getOperand(i: 0); |
| 13678 | |
| 13679 | // Return true if the two operands are loads to the same object and the |
| 13680 | // offset of the first is known to be less than the offset of the second. |
| 13681 | LoadSDNode *Load0 = dyn_cast<LoadSDNode>(Val&: N0); |
| 13682 | LoadSDNode *Load1 = dyn_cast<LoadSDNode>(Val&: N1); |
| 13683 | if (!Load0 || !Load1 || Load0->getChain() != Load1->getChain() || |
| 13684 | !Load0->isSimple() || !Load1->isSimple() || Load0->isIndexed() || |
| 13685 | Load1->isIndexed()) |
| 13686 | return 0; |
| 13687 | |
| 13688 | auto BaseLocDecomp0 = BaseIndexOffset::match(N: Load0, DAG); |
| 13689 | auto BaseLocDecomp1 = BaseIndexOffset::match(N: Load1, DAG); |
| 13690 | |
| 13691 | if (!BaseLocDecomp0.getBase() || |
| 13692 | BaseLocDecomp0.getBase() != BaseLocDecomp1.getBase() || |
| 13693 | !BaseLocDecomp0.hasValidOffset() || !BaseLocDecomp1.hasValidOffset()) |
| 13694 | return 0; |
| 13695 | if (BaseLocDecomp0.getOffset() < BaseLocDecomp1.getOffset()) |
| 13696 | return -1; |
| 13697 | if (BaseLocDecomp0.getOffset() > BaseLocDecomp1.getOffset()) |
| 13698 | return 1; |
| 13699 | return 0; |
| 13700 | }; |
| 13701 | |
| 13702 | SDValue X; |
| 13703 | if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) { |
| 13704 | if (IsVecReduce(N0.getOperand(i: 0)) && IsVecReduce(N0.getOperand(i: 1))) { |
| 13705 | int IsBefore = IsKnownOrderedLoad(N0.getOperand(i: 0).getOperand(i: 0), |
| 13706 | N0.getOperand(i: 1).getOperand(i: 0)); |
| 13707 | if (IsBefore < 0) { |
| 13708 | X = N0.getOperand(i: 0); |
| 13709 | N0 = N0.getOperand(i: 1); |
| 13710 | } else if (IsBefore > 0) { |
| 13711 | X = N0.getOperand(i: 1); |
| 13712 | N0 = N0.getOperand(i: 0); |
| 13713 | } else |
| 13714 | return SDValue(); |
| 13715 | } else if (IsVecReduce(N0.getOperand(i: 0))) { |
| 13716 | X = N0.getOperand(i: 1); |
| 13717 | N0 = N0.getOperand(i: 0); |
| 13718 | } else if (IsVecReduce(N0.getOperand(i: 1))) { |
| 13719 | X = N0.getOperand(i: 0); |
| 13720 | N0 = N0.getOperand(i: 1); |
| 13721 | } else |
| 13722 | return SDValue(); |
| 13723 | } else if (IsForward && IsVecReduce(N0) && IsVecReduce(N1) && |
| 13724 | IsKnownOrderedLoad(N0.getOperand(i: 0), N1.getOperand(i: 0)) < 0) { |
| 13725 | // Note this is backward to how you would expect. We create |
| 13726 | // add(reduce(load + 16), reduce(load + 0)) so that the |
| 13727 | // add(reduce(load+16), X) is combined into VADDVA(X, load+16)), leaving |
| 13728 | // the X as VADDV(load + 0) |
| 13729 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1, N2: N0); |
| 13730 | } else |
| 13731 | return SDValue(); |
| 13732 | |
| 13733 | if (!IsVecReduce(N0) || !IsVecReduce(N1)) |
| 13734 | return SDValue(); |
| 13735 | |
| 13736 | if (IsKnownOrderedLoad(N1.getOperand(i: 0), N0.getOperand(i: 0)) >= 0) |
| 13737 | return SDValue(); |
| 13738 | |
| 13739 | // Switch from add(add(X, N0), N1) to add(add(X, N1), N0) |
| 13740 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: X, N2: N1); |
| 13741 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0); |
| 13742 | }; |
| 13743 | if (SDValue R = DistrubuteVecReduceLoad(N0, N1, true)) |
| 13744 | return R; |
| 13745 | if (SDValue R = DistrubuteVecReduceLoad(N1, N0, false)) |
| 13746 | return R; |
| 13747 | return SDValue(); |
| 13748 | } |
| 13749 | |
| 13750 | static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG, |
| 13751 | const ARMSubtarget *Subtarget) { |
| 13752 | if (!Subtarget->hasMVEIntegerOps()) |
| 13753 | return SDValue(); |
| 13754 | |
| 13755 | if (SDValue R = TryDistrubutionADDVecReduce(N, DAG)) |
| 13756 | return R; |
| 13757 | |
| 13758 | EVT VT = N->getValueType(ResNo: 0); |
| 13759 | SDValue N0 = N->getOperand(Num: 0); |
| 13760 | SDValue N1 = N->getOperand(Num: 1); |
| 13761 | SDLoc dl(N); |
| 13762 | |
| 13763 | if (VT != MVT::i64) |
| 13764 | return SDValue(); |
| 13765 | |
| 13766 | // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this |
| 13767 | // will look like: |
| 13768 | // t1: i32,i32 = ARMISD::VADDLVs x |
| 13769 | // t2: i64 = build_pair t1, t1:1 |
| 13770 | // t3: i64 = add t2, y |
| 13771 | // Otherwise we try to push the add up above VADDLVAx, to potentially allow |
| 13772 | // the add to be simplified separately. |
| 13773 | // We also need to check for sext / zext and commutitive adds. |
| 13774 | auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, |
| 13775 | SDValue NB) { |
| 13776 | if (NB->getOpcode() != ISD::BUILD_PAIR) |
| 13777 | return SDValue(); |
| 13778 | SDValue VecRed = NB->getOperand(Num: 0); |
| 13779 | if ((VecRed->getOpcode() != Opcode && VecRed->getOpcode() != OpcodeA) || |
| 13780 | VecRed.getResNo() != 0 || |
| 13781 | NB->getOperand(Num: 1) != SDValue(VecRed.getNode(), 1)) |
| 13782 | return SDValue(); |
| 13783 | |
| 13784 | if (VecRed->getOpcode() == OpcodeA) { |
| 13785 | // add(NA, VADDLVA(Inp), Y) -> VADDLVA(add(NA, Inp), Y) |
| 13786 | SDValue Inp = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
| 13787 | N1: VecRed.getOperand(i: 0), N2: VecRed.getOperand(i: 1)); |
| 13788 | NA = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i64, N1: Inp, N2: NA); |
| 13789 | } |
| 13790 | |
| 13791 | SmallVector<SDValue, 4> Ops(2); |
| 13792 | std::tie(args&: Ops[0], args&: Ops[1]) = DAG.SplitScalar(N: NA, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 13793 | |
| 13794 | unsigned S = VecRed->getOpcode() == OpcodeA ? 2 : 0; |
| 13795 | for (unsigned I = S, E = VecRed.getNumOperands(); I < E; I++) |
| 13796 | Ops.push_back(Elt: VecRed->getOperand(Num: I)); |
| 13797 | SDValue Red = |
| 13798 | DAG.getNode(Opcode: OpcodeA, DL: dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32}), Ops); |
| 13799 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Red, |
| 13800 | N2: SDValue(Red.getNode(), 1)); |
| 13801 | }; |
| 13802 | |
| 13803 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) |
| 13804 | return M; |
| 13805 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) |
| 13806 | return M; |
| 13807 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) |
| 13808 | return M; |
| 13809 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) |
| 13810 | return M; |
| 13811 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) |
| 13812 | return M; |
| 13813 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) |
| 13814 | return M; |
| 13815 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) |
| 13816 | return M; |
| 13817 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) |
| 13818 | return M; |
| 13819 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) |
| 13820 | return M; |
| 13821 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) |
| 13822 | return M; |
| 13823 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) |
| 13824 | return M; |
| 13825 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) |
| 13826 | return M; |
| 13827 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1)) |
| 13828 | return M; |
| 13829 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1)) |
| 13830 | return M; |
| 13831 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0)) |
| 13832 | return M; |
| 13833 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0)) |
| 13834 | return M; |
| 13835 | return SDValue(); |
| 13836 | } |
| 13837 | |
| 13838 | bool |
| 13839 | ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, |
| 13840 | CombineLevel Level) const { |
| 13841 | assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || |
| 13842 | N->getOpcode() == ISD::SRL) && |
| 13843 | "Expected shift op" ); |
| 13844 | |
| 13845 | SDValue ShiftLHS = N->getOperand(Num: 0); |
| 13846 | if (!ShiftLHS->hasOneUse()) |
| 13847 | return false; |
| 13848 | |
| 13849 | if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND && |
| 13850 | !ShiftLHS.getOperand(i: 0)->hasOneUse()) |
| 13851 | return false; |
| 13852 | |
| 13853 | if (Level == BeforeLegalizeTypes) |
| 13854 | return true; |
| 13855 | |
| 13856 | if (N->getOpcode() != ISD::SHL) |
| 13857 | return true; |
| 13858 | |
| 13859 | if (Subtarget->isThumb1Only()) { |
| 13860 | // Avoid making expensive immediates by commuting shifts. (This logic |
| 13861 | // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted |
| 13862 | // for free.) |
| 13863 | if (N->getOpcode() != ISD::SHL) |
| 13864 | return true; |
| 13865 | SDValue N1 = N->getOperand(Num: 0); |
| 13866 | if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && |
| 13867 | N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) |
| 13868 | return true; |
| 13869 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N1->getOperand(Num: 1))) { |
| 13870 | if (Const->getAPIntValue().ult(RHS: 256)) |
| 13871 | return false; |
| 13872 | if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(RHS: 0) && |
| 13873 | Const->getAPIntValue().sgt(RHS: -256)) |
| 13874 | return false; |
| 13875 | } |
| 13876 | return true; |
| 13877 | } |
| 13878 | |
| 13879 | // Turn off commute-with-shift transform after legalization, so it doesn't |
| 13880 | // conflict with PerformSHLSimplify. (We could try to detect when |
| 13881 | // PerformSHLSimplify would trigger more precisely, but it isn't |
| 13882 | // really necessary.) |
| 13883 | return false; |
| 13884 | } |
| 13885 | |
| 13886 | bool ARMTargetLowering::isDesirableToCommuteXorWithShift( |
| 13887 | const SDNode *N) const { |
| 13888 | assert(N->getOpcode() == ISD::XOR && |
| 13889 | (N->getOperand(0).getOpcode() == ISD::SHL || |
| 13890 | N->getOperand(0).getOpcode() == ISD::SRL) && |
| 13891 | "Expected XOR(SHIFT) pattern" ); |
| 13892 | |
| 13893 | // Only commute if the entire NOT mask is a hidden shifted mask. |
| 13894 | auto *XorC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 13895 | auto *ShiftC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 0).getOperand(i: 1)); |
| 13896 | if (XorC && ShiftC) { |
| 13897 | unsigned MaskIdx, MaskLen; |
| 13898 | if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) { |
| 13899 | unsigned ShiftAmt = ShiftC->getZExtValue(); |
| 13900 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 13901 | if (N->getOperand(Num: 0).getOpcode() == ISD::SHL) |
| 13902 | return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt); |
| 13903 | return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); |
| 13904 | } |
| 13905 | } |
| 13906 | |
| 13907 | return false; |
| 13908 | } |
| 13909 | |
| 13910 | bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( |
| 13911 | const SDNode *N, CombineLevel Level) const { |
| 13912 | assert(((N->getOpcode() == ISD::SHL && |
| 13913 | N->getOperand(0).getOpcode() == ISD::SRL) || |
| 13914 | (N->getOpcode() == ISD::SRL && |
| 13915 | N->getOperand(0).getOpcode() == ISD::SHL)) && |
| 13916 | "Expected shift-shift mask" ); |
| 13917 | |
| 13918 | if (!Subtarget->isThumb1Only()) |
| 13919 | return true; |
| 13920 | |
| 13921 | if (Level == BeforeLegalizeTypes) |
| 13922 | return true; |
| 13923 | |
| 13924 | return false; |
| 13925 | } |
| 13926 | |
| 13927 | bool ARMTargetLowering::shouldFoldSelectWithIdentityConstant( |
| 13928 | unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, |
| 13929 | SDValue Y) const { |
| 13930 | return Subtarget->hasMVEIntegerOps() && isTypeLegal(VT) && |
| 13931 | SelectOpcode == ISD::VSELECT; |
| 13932 | } |
| 13933 | |
| 13934 | bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { |
| 13935 | if (!Subtarget->hasNEON()) { |
| 13936 | if (Subtarget->isThumb1Only()) |
| 13937 | return VT.getScalarSizeInBits() <= 32; |
| 13938 | return true; |
| 13939 | } |
| 13940 | return VT.isScalarInteger(); |
| 13941 | } |
| 13942 | |
| 13943 | bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, |
| 13944 | EVT VT) const { |
| 13945 | if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple()) |
| 13946 | return false; |
| 13947 | |
| 13948 | switch (FPVT.getSimpleVT().SimpleTy) { |
| 13949 | case MVT::f16: |
| 13950 | return Subtarget->hasVFP2Base(); |
| 13951 | case MVT::f32: |
| 13952 | return Subtarget->hasVFP2Base(); |
| 13953 | case MVT::f64: |
| 13954 | return Subtarget->hasFP64(); |
| 13955 | case MVT::v4f32: |
| 13956 | case MVT::v8f16: |
| 13957 | return Subtarget->hasMVEFloatOps(); |
| 13958 | default: |
| 13959 | return false; |
| 13960 | } |
| 13961 | } |
| 13962 | |
| 13963 | static SDValue PerformSHLSimplify(SDNode *N, |
| 13964 | TargetLowering::DAGCombinerInfo &DCI, |
| 13965 | const ARMSubtarget *ST) { |
| 13966 | // Allow the generic combiner to identify potential bswaps. |
| 13967 | if (DCI.isBeforeLegalize()) |
| 13968 | return SDValue(); |
| 13969 | |
| 13970 | // DAG combiner will fold: |
| 13971 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
| 13972 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 |
| 13973 | // Other code patterns that can be also be modified have the following form: |
| 13974 | // b + ((a << 1) | 510) |
| 13975 | // b + ((a << 1) & 510) |
| 13976 | // b + ((a << 1) ^ 510) |
| 13977 | // b + ((a << 1) + 510) |
| 13978 | |
| 13979 | // Many instructions can perform the shift for free, but it requires both |
| 13980 | // the operands to be registers. If c1 << c2 is too large, a mov immediate |
| 13981 | // instruction will needed. So, unfold back to the original pattern if: |
| 13982 | // - if c1 and c2 are small enough that they don't require mov imms. |
| 13983 | // - the user(s) of the node can perform an shl |
| 13984 | |
| 13985 | // No shifted operands for 16-bit instructions. |
| 13986 | if (ST->isThumb() && ST->isThumb1Only()) |
| 13987 | return SDValue(); |
| 13988 | |
| 13989 | // Check that all the users could perform the shl themselves. |
| 13990 | for (auto *U : N->users()) { |
| 13991 | switch(U->getOpcode()) { |
| 13992 | default: |
| 13993 | return SDValue(); |
| 13994 | case ISD::SUB: |
| 13995 | case ISD::ADD: |
| 13996 | case ISD::AND: |
| 13997 | case ISD::OR: |
| 13998 | case ISD::XOR: |
| 13999 | case ISD::SETCC: |
| 14000 | case ARMISD::CMP: |
| 14001 | // Check that the user isn't already using a constant because there |
| 14002 | // aren't any instructions that support an immediate operand and a |
| 14003 | // shifted operand. |
| 14004 | if (isa<ConstantSDNode>(Val: U->getOperand(Num: 0)) || |
| 14005 | isa<ConstantSDNode>(Val: U->getOperand(Num: 1))) |
| 14006 | return SDValue(); |
| 14007 | |
| 14008 | // Check that it's not already using a shift. |
| 14009 | if (U->getOperand(Num: 0).getOpcode() == ISD::SHL || |
| 14010 | U->getOperand(Num: 1).getOpcode() == ISD::SHL) |
| 14011 | return SDValue(); |
| 14012 | break; |
| 14013 | } |
| 14014 | } |
| 14015 | |
| 14016 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && |
| 14017 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) |
| 14018 | return SDValue(); |
| 14019 | |
| 14020 | if (N->getOperand(Num: 0).getOpcode() != ISD::SHL) |
| 14021 | return SDValue(); |
| 14022 | |
| 14023 | SDValue SHL = N->getOperand(Num: 0); |
| 14024 | |
| 14025 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 14026 | auto *C2 = dyn_cast<ConstantSDNode>(Val: SHL.getOperand(i: 1)); |
| 14027 | if (!C1ShlC2 || !C2) |
| 14028 | return SDValue(); |
| 14029 | |
| 14030 | APInt C2Int = C2->getAPIntValue(); |
| 14031 | APInt C1Int = C1ShlC2->getAPIntValue(); |
| 14032 | unsigned C2Width = C2Int.getBitWidth(); |
| 14033 | if (C2Int.uge(RHS: C2Width)) |
| 14034 | return SDValue(); |
| 14035 | uint64_t C2Value = C2Int.getZExtValue(); |
| 14036 | |
| 14037 | // Check that performing a lshr will not lose any information. |
| 14038 | APInt Mask = APInt::getHighBitsSet(numBits: C2Width, hiBitsSet: C2Width - C2Value); |
| 14039 | if ((C1Int & Mask) != C1Int) |
| 14040 | return SDValue(); |
| 14041 | |
| 14042 | // Shift the first constant. |
| 14043 | C1Int.lshrInPlace(ShiftAmt: C2Int); |
| 14044 | |
| 14045 | // The immediates are encoded as an 8-bit value that can be rotated. |
| 14046 | auto LargeImm = [](const APInt &Imm) { |
| 14047 | unsigned Zeros = Imm.countl_zero() + Imm.countr_zero(); |
| 14048 | return Imm.getBitWidth() - Zeros > 8; |
| 14049 | }; |
| 14050 | |
| 14051 | if (LargeImm(C1Int) || LargeImm(C2Int)) |
| 14052 | return SDValue(); |
| 14053 | |
| 14054 | SelectionDAG &DAG = DCI.DAG; |
| 14055 | SDLoc dl(N); |
| 14056 | SDValue X = SHL.getOperand(i: 0); |
| 14057 | SDValue BinOp = DAG.getNode(Opcode: N->getOpcode(), DL: dl, VT: MVT::i32, N1: X, |
| 14058 | N2: DAG.getConstant(Val: C1Int, DL: dl, VT: MVT::i32)); |
| 14059 | // Shift left to compensate for the lshr of C1Int. |
| 14060 | SDValue Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: BinOp, N2: SHL.getOperand(i: 1)); |
| 14061 | |
| 14062 | LLVM_DEBUG(dbgs() << "Simplify shl use:\n" ; SHL.getOperand(0).dump(); |
| 14063 | SHL.dump(); N->dump()); |
| 14064 | LLVM_DEBUG(dbgs() << "Into:\n" ; X.dump(); BinOp.dump(); Res.dump()); |
| 14065 | return Res; |
| 14066 | } |
| 14067 | |
| 14068 | |
| 14069 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. |
| 14070 | /// |
| 14071 | static SDValue PerformADDCombine(SDNode *N, |
| 14072 | TargetLowering::DAGCombinerInfo &DCI, |
| 14073 | const ARMSubtarget *Subtarget) { |
| 14074 | SDValue N0 = N->getOperand(Num: 0); |
| 14075 | SDValue N1 = N->getOperand(Num: 1); |
| 14076 | |
| 14077 | // Only works one way, because it needs an immediate operand. |
| 14078 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14079 | return Result; |
| 14080 | |
| 14081 | if (SDValue Result = PerformADDVecReduce(N, DAG&: DCI.DAG, Subtarget)) |
| 14082 | return Result; |
| 14083 | |
| 14084 | // First try with the default operand order. |
| 14085 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) |
| 14086 | return Result; |
| 14087 | |
| 14088 | // If that didn't work, try again with the operands commuted. |
| 14089 | return PerformADDCombineWithOperands(N, N0: N1, N1: N0, DCI, Subtarget); |
| 14090 | } |
| 14091 | |
| 14092 | // Combine (sub 0, (csinc X, Y, CC)) -> (csinv -X, Y, CC) |
| 14093 | // providing -X is as cheap as X (currently, just a constant). |
| 14094 | static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) { |
| 14095 | if (N->getValueType(ResNo: 0) != MVT::i32 || !isNullConstant(V: N->getOperand(Num: 0))) |
| 14096 | return SDValue(); |
| 14097 | SDValue CSINC = N->getOperand(Num: 1); |
| 14098 | if (CSINC.getOpcode() != ARMISD::CSINC || !CSINC.hasOneUse()) |
| 14099 | return SDValue(); |
| 14100 | |
| 14101 | ConstantSDNode *X = dyn_cast<ConstantSDNode>(Val: CSINC.getOperand(i: 0)); |
| 14102 | if (!X) |
| 14103 | return SDValue(); |
| 14104 | |
| 14105 | return DAG.getNode(Opcode: ARMISD::CSINV, DL: SDLoc(N), VT: MVT::i32, |
| 14106 | N1: DAG.getNode(Opcode: ISD::SUB, DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 14107 | N2: CSINC.getOperand(i: 0)), |
| 14108 | N2: CSINC.getOperand(i: 1), N3: CSINC.getOperand(i: 2), |
| 14109 | N4: CSINC.getOperand(i: 3)); |
| 14110 | } |
| 14111 | |
| 14112 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. |
| 14113 | /// |
| 14114 | static SDValue PerformSUBCombine(SDNode *N, |
| 14115 | TargetLowering::DAGCombinerInfo &DCI, |
| 14116 | const ARMSubtarget *Subtarget) { |
| 14117 | SDValue N0 = N->getOperand(Num: 0); |
| 14118 | SDValue N1 = N->getOperand(Num: 1); |
| 14119 | |
| 14120 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 14121 | if (N1.getNode()->hasOneUse()) |
| 14122 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI)) |
| 14123 | return Result; |
| 14124 | |
| 14125 | if (SDValue R = PerformSubCSINCCombine(N, DAG&: DCI.DAG)) |
| 14126 | return R; |
| 14127 | |
| 14128 | if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(ResNo: 0).isVector()) |
| 14129 | return SDValue(); |
| 14130 | |
| 14131 | // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) |
| 14132 | // so that we can readily pattern match more mve instructions which can use |
| 14133 | // a scalar operand. |
| 14134 | SDValue VDup = N->getOperand(Num: 1); |
| 14135 | if (VDup->getOpcode() != ARMISD::VDUP) |
| 14136 | return SDValue(); |
| 14137 | |
| 14138 | SDValue VMov = N->getOperand(Num: 0); |
| 14139 | if (VMov->getOpcode() == ISD::BITCAST) |
| 14140 | VMov = VMov->getOperand(Num: 0); |
| 14141 | |
| 14142 | if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(N: VMov)) |
| 14143 | return SDValue(); |
| 14144 | |
| 14145 | SDLoc dl(N); |
| 14146 | SDValue Negate = DCI.DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 14147 | N1: DCI.DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 14148 | N2: VDup->getOperand(Num: 0)); |
| 14149 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), Operand: Negate); |
| 14150 | } |
| 14151 | |
| 14152 | /// PerformVMULCombine |
| 14153 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the |
| 14154 | /// special multiplier accumulator forwarding. |
| 14155 | /// vmul d3, d0, d2 |
| 14156 | /// vmla d3, d1, d2 |
| 14157 | /// is faster than |
| 14158 | /// vadd d3, d0, d1 |
| 14159 | /// vmul d3, d3, d2 |
| 14160 | // However, for (A + B) * (A + B), |
| 14161 | // vadd d2, d0, d1 |
| 14162 | // vmul d3, d0, d2 |
| 14163 | // vmla d3, d1, d2 |
| 14164 | // is slower than |
| 14165 | // vadd d2, d0, d1 |
| 14166 | // vmul d3, d2, d2 |
| 14167 | static SDValue PerformVMULCombine(SDNode *N, |
| 14168 | TargetLowering::DAGCombinerInfo &DCI, |
| 14169 | const ARMSubtarget *Subtarget) { |
| 14170 | if (!Subtarget->hasVMLxForwarding()) |
| 14171 | return SDValue(); |
| 14172 | |
| 14173 | SelectionDAG &DAG = DCI.DAG; |
| 14174 | SDValue N0 = N->getOperand(Num: 0); |
| 14175 | SDValue N1 = N->getOperand(Num: 1); |
| 14176 | unsigned Opcode = N0.getOpcode(); |
| 14177 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 14178 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { |
| 14179 | Opcode = N1.getOpcode(); |
| 14180 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 14181 | Opcode != ISD::FADD && Opcode != ISD::FSUB) |
| 14182 | return SDValue(); |
| 14183 | std::swap(a&: N0, b&: N1); |
| 14184 | } |
| 14185 | |
| 14186 | if (N0 == N1) |
| 14187 | return SDValue(); |
| 14188 | |
| 14189 | EVT VT = N->getValueType(ResNo: 0); |
| 14190 | SDLoc DL(N); |
| 14191 | SDValue N00 = N0->getOperand(Num: 0); |
| 14192 | SDValue N01 = N0->getOperand(Num: 1); |
| 14193 | return DAG.getNode(Opcode, DL, VT, |
| 14194 | N1: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N00, N2: N1), |
| 14195 | N2: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N01, N2: N1)); |
| 14196 | } |
| 14197 | |
| 14198 | static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, |
| 14199 | const ARMSubtarget *Subtarget) { |
| 14200 | EVT VT = N->getValueType(ResNo: 0); |
| 14201 | if (VT != MVT::v2i64) |
| 14202 | return SDValue(); |
| 14203 | |
| 14204 | SDValue N0 = N->getOperand(Num: 0); |
| 14205 | SDValue N1 = N->getOperand(Num: 1); |
| 14206 | |
| 14207 | auto IsSignExt = [&](SDValue Op) { |
| 14208 | if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) |
| 14209 | return SDValue(); |
| 14210 | EVT VT = cast<VTSDNode>(Val: Op->getOperand(Num: 1))->getVT(); |
| 14211 | if (VT.getScalarSizeInBits() == 32) |
| 14212 | return Op->getOperand(Num: 0); |
| 14213 | return SDValue(); |
| 14214 | }; |
| 14215 | auto IsZeroExt = [&](SDValue Op) { |
| 14216 | // Zero extends are a little more awkward. At the point we are matching |
| 14217 | // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. |
| 14218 | // That might be before of after a bitcast depending on how the and is |
| 14219 | // placed. Because this has to look through bitcasts, it is currently only |
| 14220 | // supported on LE. |
| 14221 | if (!Subtarget->isLittle()) |
| 14222 | return SDValue(); |
| 14223 | |
| 14224 | SDValue And = Op; |
| 14225 | if (And->getOpcode() == ISD::BITCAST) |
| 14226 | And = And->getOperand(Num: 0); |
| 14227 | if (And->getOpcode() != ISD::AND) |
| 14228 | return SDValue(); |
| 14229 | SDValue Mask = And->getOperand(Num: 1); |
| 14230 | if (Mask->getOpcode() == ISD::BITCAST) |
| 14231 | Mask = Mask->getOperand(Num: 0); |
| 14232 | |
| 14233 | if (Mask->getOpcode() != ISD::BUILD_VECTOR || |
| 14234 | Mask.getValueType() != MVT::v4i32) |
| 14235 | return SDValue(); |
| 14236 | if (isAllOnesConstant(V: Mask->getOperand(Num: 0)) && |
| 14237 | isNullConstant(V: Mask->getOperand(Num: 1)) && |
| 14238 | isAllOnesConstant(V: Mask->getOperand(Num: 2)) && |
| 14239 | isNullConstant(V: Mask->getOperand(Num: 3))) |
| 14240 | return And->getOperand(Num: 0); |
| 14241 | return SDValue(); |
| 14242 | }; |
| 14243 | |
| 14244 | SDLoc dl(N); |
| 14245 | if (SDValue Op0 = IsSignExt(N0)) { |
| 14246 | if (SDValue Op1 = IsSignExt(N1)) { |
| 14247 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
| 14248 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
| 14249 | return DAG.getNode(Opcode: ARMISD::VMULLs, DL: dl, VT, N1: New0a, N2: New1a); |
| 14250 | } |
| 14251 | } |
| 14252 | if (SDValue Op0 = IsZeroExt(N0)) { |
| 14253 | if (SDValue Op1 = IsZeroExt(N1)) { |
| 14254 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
| 14255 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
| 14256 | return DAG.getNode(Opcode: ARMISD::VMULLu, DL: dl, VT, N1: New0a, N2: New1a); |
| 14257 | } |
| 14258 | } |
| 14259 | |
| 14260 | return SDValue(); |
| 14261 | } |
| 14262 | |
| 14263 | static SDValue PerformMULCombine(SDNode *N, |
| 14264 | TargetLowering::DAGCombinerInfo &DCI, |
| 14265 | const ARMSubtarget *Subtarget) { |
| 14266 | SelectionDAG &DAG = DCI.DAG; |
| 14267 | |
| 14268 | EVT VT = N->getValueType(ResNo: 0); |
| 14269 | if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) |
| 14270 | return PerformMVEVMULLCombine(N, DAG, Subtarget); |
| 14271 | |
| 14272 | if (Subtarget->isThumb1Only()) |
| 14273 | return SDValue(); |
| 14274 | |
| 14275 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14276 | return SDValue(); |
| 14277 | |
| 14278 | if (VT.is64BitVector() || VT.is128BitVector()) |
| 14279 | return PerformVMULCombine(N, DCI, Subtarget); |
| 14280 | if (VT != MVT::i32) |
| 14281 | return SDValue(); |
| 14282 | |
| 14283 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 14284 | if (!C) |
| 14285 | return SDValue(); |
| 14286 | |
| 14287 | int64_t MulAmt = C->getSExtValue(); |
| 14288 | unsigned ShiftAmt = llvm::countr_zero<uint64_t>(Val: MulAmt); |
| 14289 | |
| 14290 | ShiftAmt = ShiftAmt & (32 - 1); |
| 14291 | SDValue V = N->getOperand(Num: 0); |
| 14292 | SDLoc DL(N); |
| 14293 | |
| 14294 | SDValue Res; |
| 14295 | MulAmt >>= ShiftAmt; |
| 14296 | |
| 14297 | if (MulAmt >= 0) { |
| 14298 | if (llvm::has_single_bit<uint32_t>(Value: MulAmt - 1)) { |
| 14299 | // (mul x, 2^N + 1) => (add (shl x, N), x) |
| 14300 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
| 14301 | N1: V, |
| 14302 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14303 | N1: V, |
| 14304 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt - 1), DL, |
| 14305 | VT: MVT::i32))); |
| 14306 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmt + 1)) { |
| 14307 | // (mul x, 2^N - 1) => (sub (shl x, N), x) |
| 14308 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14309 | N1: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14310 | N1: V, |
| 14311 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt + 1), DL, |
| 14312 | VT: MVT::i32)), |
| 14313 | N2: V); |
| 14314 | } else |
| 14315 | return SDValue(); |
| 14316 | } else { |
| 14317 | uint64_t MulAmtAbs = -MulAmt; |
| 14318 | if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs + 1)) { |
| 14319 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) |
| 14320 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14321 | N1: V, |
| 14322 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14323 | N1: V, |
| 14324 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs + 1), DL, |
| 14325 | VT: MVT::i32))); |
| 14326 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs - 1)) { |
| 14327 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) |
| 14328 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
| 14329 | N1: V, |
| 14330 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14331 | N1: V, |
| 14332 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs - 1), DL, |
| 14333 | VT: MVT::i32))); |
| 14334 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14335 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N2: Res); |
| 14336 | } else |
| 14337 | return SDValue(); |
| 14338 | } |
| 14339 | |
| 14340 | if (ShiftAmt != 0) |
| 14341 | Res = DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14342 | N1: Res, N2: DAG.getConstant(Val: ShiftAmt, DL, VT: MVT::i32)); |
| 14343 | |
| 14344 | // Do not add new nodes to DAG combiner worklist. |
| 14345 | DCI.CombineTo(N, Res, AddTo: false); |
| 14346 | return SDValue(); |
| 14347 | } |
| 14348 | |
| 14349 | static SDValue CombineANDShift(SDNode *N, |
| 14350 | TargetLowering::DAGCombinerInfo &DCI, |
| 14351 | const ARMSubtarget *Subtarget) { |
| 14352 | // Allow DAGCombine to pattern-match before we touch the canonical form. |
| 14353 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14354 | return SDValue(); |
| 14355 | |
| 14356 | if (N->getValueType(ResNo: 0) != MVT::i32) |
| 14357 | return SDValue(); |
| 14358 | |
| 14359 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 14360 | if (!N1C) |
| 14361 | return SDValue(); |
| 14362 | |
| 14363 | uint32_t C1 = (uint32_t)N1C->getZExtValue(); |
| 14364 | // Don't transform uxtb/uxth. |
| 14365 | if (C1 == 255 || C1 == 65535) |
| 14366 | return SDValue(); |
| 14367 | |
| 14368 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 14369 | if (!N0->hasOneUse()) |
| 14370 | return SDValue(); |
| 14371 | |
| 14372 | if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) |
| 14373 | return SDValue(); |
| 14374 | |
| 14375 | bool LeftShift = N0->getOpcode() == ISD::SHL; |
| 14376 | |
| 14377 | ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
| 14378 | if (!N01C) |
| 14379 | return SDValue(); |
| 14380 | |
| 14381 | uint32_t C2 = (uint32_t)N01C->getZExtValue(); |
| 14382 | if (!C2 || C2 >= 32) |
| 14383 | return SDValue(); |
| 14384 | |
| 14385 | // Clear irrelevant bits in the mask. |
| 14386 | if (LeftShift) |
| 14387 | C1 &= (-1U << C2); |
| 14388 | else |
| 14389 | C1 &= (-1U >> C2); |
| 14390 | |
| 14391 | SelectionDAG &DAG = DCI.DAG; |
| 14392 | SDLoc DL(N); |
| 14393 | |
| 14394 | // We have a pattern of the form "(and (shl x, c2) c1)" or |
| 14395 | // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to |
| 14396 | // transform to a pair of shifts, to save materializing c1. |
| 14397 | |
| 14398 | // First pattern: right shift, then mask off leading bits. |
| 14399 | // FIXME: Use demanded bits? |
| 14400 | if (!LeftShift && isMask_32(Value: C1)) { |
| 14401 | uint32_t C3 = llvm::countl_zero(Val: C1); |
| 14402 | if (C2 < C3) { |
| 14403 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14404 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
| 14405 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 14406 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14407 | } |
| 14408 | } |
| 14409 | |
| 14410 | // First pattern, reversed: left shift, then mask off trailing bits. |
| 14411 | if (LeftShift && isMask_32(Value: ~C1)) { |
| 14412 | uint32_t C3 = llvm::countr_zero(Val: C1); |
| 14413 | if (C2 < C3) { |
| 14414 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14415 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
| 14416 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
| 14417 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14418 | } |
| 14419 | } |
| 14420 | |
| 14421 | // Second pattern: left shift, then mask off leading bits. |
| 14422 | // FIXME: Use demanded bits? |
| 14423 | if (LeftShift && isShiftedMask_32(Value: C1)) { |
| 14424 | uint32_t Trailing = llvm::countr_zero(Val: C1); |
| 14425 | uint32_t C3 = llvm::countl_zero(Val: C1); |
| 14426 | if (Trailing == C2 && C2 + C3 < 32) { |
| 14427 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14428 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
| 14429 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 14430 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14431 | } |
| 14432 | } |
| 14433 | |
| 14434 | // Second pattern, reversed: right shift, then mask off trailing bits. |
| 14435 | // FIXME: Handle other patterns of known/demanded bits. |
| 14436 | if (!LeftShift && isShiftedMask_32(Value: C1)) { |
| 14437 | uint32_t Leading = llvm::countl_zero(Val: C1); |
| 14438 | uint32_t C3 = llvm::countr_zero(Val: C1); |
| 14439 | if (Leading == C2 && C2 + C3 < 32) { |
| 14440 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14441 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
| 14442 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
| 14443 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14444 | } |
| 14445 | } |
| 14446 | |
| 14447 | // Transform "(and (shl x, c2) c1)" into "(shl (and x, c1>>c2), c2)" |
| 14448 | // if "c1 >> c2" is a cheaper immediate than "c1" |
| 14449 | if (LeftShift && |
| 14450 | HasLowerConstantMaterializationCost(Val1: C1 >> C2, Val2: C1, Subtarget)) { |
| 14451 | |
| 14452 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14453 | N2: DAG.getConstant(Val: C1 >> C2, DL, VT: MVT::i32)); |
| 14454 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: And, |
| 14455 | N2: DAG.getConstant(Val: C2, DL, VT: MVT::i32)); |
| 14456 | } |
| 14457 | |
| 14458 | return SDValue(); |
| 14459 | } |
| 14460 | |
| 14461 | static SDValue PerformANDCombine(SDNode *N, |
| 14462 | TargetLowering::DAGCombinerInfo &DCI, |
| 14463 | const ARMSubtarget *Subtarget) { |
| 14464 | // Attempt to use immediate-form VBIC |
| 14465 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
| 14466 | SDLoc dl(N); |
| 14467 | EVT VT = N->getValueType(ResNo: 0); |
| 14468 | SelectionDAG &DAG = DCI.DAG; |
| 14469 | |
| 14470 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v2i1 || |
| 14471 | VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1) |
| 14472 | return SDValue(); |
| 14473 | |
| 14474 | APInt SplatBits, SplatUndef; |
| 14475 | unsigned SplatBitSize; |
| 14476 | bool HasAnyUndefs; |
| 14477 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 14478 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 14479 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 14480 | SplatBitSize == 64) { |
| 14481 | EVT VbicVT; |
| 14482 | SDValue Val = isVMOVModifiedImm(SplatBits: (~SplatBits).getZExtValue(), |
| 14483 | SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, |
| 14484 | DAG, dl, VT&: VbicVT, VectorVT: VT, type: OtherModImm); |
| 14485 | if (Val.getNode()) { |
| 14486 | SDValue Input = |
| 14487 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: VbicVT, Operand: N->getOperand(Num: 0)); |
| 14488 | SDValue Vbic = DAG.getNode(Opcode: ARMISD::VBICIMM, DL: dl, VT: VbicVT, N1: Input, N2: Val); |
| 14489 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vbic); |
| 14490 | } |
| 14491 | } |
| 14492 | } |
| 14493 | |
| 14494 | if (!Subtarget->isThumb1Only()) { |
| 14495 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) |
| 14496 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: true, DCI)) |
| 14497 | return Result; |
| 14498 | |
| 14499 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14500 | return Result; |
| 14501 | } |
| 14502 | |
| 14503 | if (Subtarget->isThumb1Only()) |
| 14504 | if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) |
| 14505 | return Result; |
| 14506 | |
| 14507 | return SDValue(); |
| 14508 | } |
| 14509 | |
| 14510 | // Try combining OR nodes to SMULWB, SMULWT. |
| 14511 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, |
| 14512 | TargetLowering::DAGCombinerInfo &DCI, |
| 14513 | const ARMSubtarget *Subtarget) { |
| 14514 | if (!Subtarget->hasV6Ops() || |
| 14515 | (Subtarget->isThumb() && |
| 14516 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) |
| 14517 | return SDValue(); |
| 14518 | |
| 14519 | SDValue SRL = OR->getOperand(Num: 0); |
| 14520 | SDValue SHL = OR->getOperand(Num: 1); |
| 14521 | |
| 14522 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { |
| 14523 | SRL = OR->getOperand(Num: 1); |
| 14524 | SHL = OR->getOperand(Num: 0); |
| 14525 | } |
| 14526 | if (!isSRL16(Op: SRL) || !isSHL16(Op: SHL)) |
| 14527 | return SDValue(); |
| 14528 | |
| 14529 | // The first operands to the shifts need to be the two results from the |
| 14530 | // same smul_lohi node. |
| 14531 | if ((SRL.getOperand(i: 0).getNode() != SHL.getOperand(i: 0).getNode()) || |
| 14532 | SRL.getOperand(i: 0).getOpcode() != ISD::SMUL_LOHI) |
| 14533 | return SDValue(); |
| 14534 | |
| 14535 | SDNode *SMULLOHI = SRL.getOperand(i: 0).getNode(); |
| 14536 | if (SRL.getOperand(i: 0) != SDValue(SMULLOHI, 0) || |
| 14537 | SHL.getOperand(i: 0) != SDValue(SMULLOHI, 1)) |
| 14538 | return SDValue(); |
| 14539 | |
| 14540 | // Now we have: |
| 14541 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) |
| 14542 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. |
| 14543 | // For SMUWB the 16-bit value will signed extended somehow. |
| 14544 | // For SMULWT only the SRA is required. |
| 14545 | // Check both sides of SMUL_LOHI |
| 14546 | SDValue OpS16 = SMULLOHI->getOperand(Num: 0); |
| 14547 | SDValue OpS32 = SMULLOHI->getOperand(Num: 1); |
| 14548 | |
| 14549 | SelectionDAG &DAG = DCI.DAG; |
| 14550 | if (!isS16(Op: OpS16, DAG) && !isSRA16(Op: OpS16)) { |
| 14551 | OpS16 = OpS32; |
| 14552 | OpS32 = SMULLOHI->getOperand(Num: 0); |
| 14553 | } |
| 14554 | |
| 14555 | SDLoc dl(OR); |
| 14556 | unsigned Opcode = 0; |
| 14557 | if (isS16(Op: OpS16, DAG)) |
| 14558 | Opcode = ARMISD::SMULWB; |
| 14559 | else if (isSRA16(Op: OpS16)) { |
| 14560 | Opcode = ARMISD::SMULWT; |
| 14561 | OpS16 = OpS16->getOperand(Num: 0); |
| 14562 | } |
| 14563 | else |
| 14564 | return SDValue(); |
| 14565 | |
| 14566 | SDValue Res = DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: OpS32, N2: OpS16); |
| 14567 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(OR, 0), To: Res); |
| 14568 | return SDValue(OR, 0); |
| 14569 | } |
| 14570 | |
| 14571 | static SDValue PerformORCombineToBFI(SDNode *N, |
| 14572 | TargetLowering::DAGCombinerInfo &DCI, |
| 14573 | const ARMSubtarget *Subtarget) { |
| 14574 | // BFI is only available on V6T2+ |
| 14575 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) |
| 14576 | return SDValue(); |
| 14577 | |
| 14578 | EVT VT = N->getValueType(ResNo: 0); |
| 14579 | SDValue N0 = N->getOperand(Num: 0); |
| 14580 | SDValue N1 = N->getOperand(Num: 1); |
| 14581 | SelectionDAG &DAG = DCI.DAG; |
| 14582 | SDLoc DL(N); |
| 14583 | // 1) or (and A, mask), val => ARMbfi A, val, mask |
| 14584 | // iff (val & mask) == val |
| 14585 | // |
| 14586 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 14587 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) |
| 14588 | // && mask == ~mask2 |
| 14589 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) |
| 14590 | // && ~mask == mask2 |
| 14591 | // (i.e., copy a bitfield value into another bitfield of the same width) |
| 14592 | |
| 14593 | if (VT != MVT::i32) |
| 14594 | return SDValue(); |
| 14595 | |
| 14596 | SDValue N00 = N0.getOperand(i: 0); |
| 14597 | |
| 14598 | // The value and the mask need to be constants so we can verify this is |
| 14599 | // actually a bitfield set. If the mask is 0xffff, we can do better |
| 14600 | // via a movt instruction, so don't use BFI in that case. |
| 14601 | SDValue MaskOp = N0.getOperand(i: 1); |
| 14602 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Val&: MaskOp); |
| 14603 | if (!MaskC) |
| 14604 | return SDValue(); |
| 14605 | unsigned Mask = MaskC->getZExtValue(); |
| 14606 | if (Mask == 0xffff) |
| 14607 | return SDValue(); |
| 14608 | SDValue Res; |
| 14609 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask |
| 14610 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val&: N1); |
| 14611 | if (N1C) { |
| 14612 | unsigned Val = N1C->getZExtValue(); |
| 14613 | if ((Val & ~Mask) != Val) |
| 14614 | return SDValue(); |
| 14615 | |
| 14616 | if (ARM::isBitFieldInvertedMask(v: Mask)) { |
| 14617 | Val >>= llvm::countr_zero(Val: ~Mask); |
| 14618 | |
| 14619 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, |
| 14620 | N2: DAG.getConstant(Val, DL, VT: MVT::i32), |
| 14621 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
| 14622 | |
| 14623 | DCI.CombineTo(N, Res, AddTo: false); |
| 14624 | // Return value from the original node to inform the combiner than N is |
| 14625 | // now dead. |
| 14626 | return SDValue(N, 0); |
| 14627 | } |
| 14628 | } else if (N1.getOpcode() == ISD::AND) { |
| 14629 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 14630 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
| 14631 | if (!N11C) |
| 14632 | return SDValue(); |
| 14633 | unsigned Mask2 = N11C->getZExtValue(); |
| 14634 | |
| 14635 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern |
| 14636 | // as is to match. |
| 14637 | if (ARM::isBitFieldInvertedMask(v: Mask) && |
| 14638 | (Mask == ~Mask2)) { |
| 14639 | // The pack halfword instruction works better for masks that fit it, |
| 14640 | // so use that when it's available. |
| 14641 | if (Subtarget->hasDSP() && |
| 14642 | (Mask == 0xffff || Mask == 0xffff0000)) |
| 14643 | return SDValue(); |
| 14644 | // 2a |
| 14645 | unsigned amt = llvm::countr_zero(Val: Mask2); |
| 14646 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N1.getOperand(i: 0), |
| 14647 | N2: DAG.getConstant(Val: amt, DL, VT: MVT::i32)); |
| 14648 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, N2: Res, |
| 14649 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
| 14650 | DCI.CombineTo(N, Res, AddTo: false); |
| 14651 | // Return value from the original node to inform the combiner than N is |
| 14652 | // now dead. |
| 14653 | return SDValue(N, 0); |
| 14654 | } else if (ARM::isBitFieldInvertedMask(v: ~Mask) && |
| 14655 | (~Mask == Mask2)) { |
| 14656 | // The pack halfword instruction works better for masks that fit it, |
| 14657 | // so use that when it's available. |
| 14658 | if (Subtarget->hasDSP() && |
| 14659 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) |
| 14660 | return SDValue(); |
| 14661 | // 2b |
| 14662 | unsigned lsb = llvm::countr_zero(Val: Mask); |
| 14663 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N00, |
| 14664 | N2: DAG.getConstant(Val: lsb, DL, VT: MVT::i32)); |
| 14665 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N1.getOperand(i: 0), N2: Res, |
| 14666 | N3: DAG.getConstant(Val: Mask2, DL, VT: MVT::i32)); |
| 14667 | DCI.CombineTo(N, Res, AddTo: false); |
| 14668 | // Return value from the original node to inform the combiner than N is |
| 14669 | // now dead. |
| 14670 | return SDValue(N, 0); |
| 14671 | } |
| 14672 | } |
| 14673 | |
| 14674 | if (DAG.MaskedValueIsZero(Op: N1, Mask: MaskC->getAPIntValue()) && |
| 14675 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Val: N00.getOperand(i: 1)) && |
| 14676 | ARM::isBitFieldInvertedMask(v: ~Mask)) { |
| 14677 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask |
| 14678 | // where lsb(mask) == #shamt and masked bits of B are known zero. |
| 14679 | SDValue ShAmt = N00.getOperand(i: 1); |
| 14680 | unsigned ShAmtC = ShAmt->getAsZExtVal(); |
| 14681 | unsigned LSB = llvm::countr_zero(Val: Mask); |
| 14682 | if (ShAmtC != LSB) |
| 14683 | return SDValue(); |
| 14684 | |
| 14685 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1, N2: N00.getOperand(i: 0), |
| 14686 | N3: DAG.getConstant(Val: ~Mask, DL, VT: MVT::i32)); |
| 14687 | |
| 14688 | DCI.CombineTo(N, Res, AddTo: false); |
| 14689 | // Return value from the original node to inform the combiner than N is |
| 14690 | // now dead. |
| 14691 | return SDValue(N, 0); |
| 14692 | } |
| 14693 | |
| 14694 | return SDValue(); |
| 14695 | } |
| 14696 | |
| 14697 | static bool isValidMVECond(unsigned CC, bool IsFloat) { |
| 14698 | switch (CC) { |
| 14699 | case ARMCC::EQ: |
| 14700 | case ARMCC::NE: |
| 14701 | case ARMCC::LE: |
| 14702 | case ARMCC::GT: |
| 14703 | case ARMCC::GE: |
| 14704 | case ARMCC::LT: |
| 14705 | return true; |
| 14706 | case ARMCC::HS: |
| 14707 | case ARMCC::HI: |
| 14708 | return !IsFloat; |
| 14709 | default: |
| 14710 | return false; |
| 14711 | }; |
| 14712 | } |
| 14713 | |
| 14714 | static ARMCC::CondCodes getVCMPCondCode(SDValue N) { |
| 14715 | if (N->getOpcode() == ARMISD::VCMP) |
| 14716 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
| 14717 | else if (N->getOpcode() == ARMISD::VCMPZ) |
| 14718 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 1); |
| 14719 | else |
| 14720 | llvm_unreachable("Not a VCMP/VCMPZ!" ); |
| 14721 | } |
| 14722 | |
| 14723 | static bool CanInvertMVEVCMP(SDValue N) { |
| 14724 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N)); |
| 14725 | return isValidMVECond(CC, IsFloat: N->getOperand(Num: 0).getValueType().isFloatingPoint()); |
| 14726 | } |
| 14727 | |
| 14728 | static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG, |
| 14729 | const ARMSubtarget *Subtarget) { |
| 14730 | // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain |
| 14731 | // together with predicates |
| 14732 | EVT VT = N->getValueType(ResNo: 0); |
| 14733 | SDLoc DL(N); |
| 14734 | SDValue N0 = N->getOperand(Num: 0); |
| 14735 | SDValue N1 = N->getOperand(Num: 1); |
| 14736 | |
| 14737 | auto IsFreelyInvertable = [&](SDValue V) { |
| 14738 | if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) |
| 14739 | return CanInvertMVEVCMP(N: V); |
| 14740 | return false; |
| 14741 | }; |
| 14742 | |
| 14743 | // At least one operand must be freely invertable. |
| 14744 | if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) |
| 14745 | return SDValue(); |
| 14746 | |
| 14747 | SDValue NewN0 = DAG.getLogicalNOT(DL, Val: N0, VT); |
| 14748 | SDValue NewN1 = DAG.getLogicalNOT(DL, Val: N1, VT); |
| 14749 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT, N1: NewN0, N2: NewN1); |
| 14750 | return DAG.getLogicalNOT(DL, Val: And, VT); |
| 14751 | } |
| 14752 | |
| 14753 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR |
| 14754 | static SDValue PerformORCombine(SDNode *N, |
| 14755 | TargetLowering::DAGCombinerInfo &DCI, |
| 14756 | const ARMSubtarget *Subtarget) { |
| 14757 | // Attempt to use immediate-form VORR |
| 14758 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
| 14759 | SDLoc dl(N); |
| 14760 | EVT VT = N->getValueType(ResNo: 0); |
| 14761 | SelectionDAG &DAG = DCI.DAG; |
| 14762 | |
| 14763 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14764 | return SDValue(); |
| 14765 | |
| 14766 | if (Subtarget->hasMVEIntegerOps() && (VT == MVT::v2i1 || VT == MVT::v4i1 || |
| 14767 | VT == MVT::v8i1 || VT == MVT::v16i1)) |
| 14768 | return PerformORCombine_i1(N, DAG, Subtarget); |
| 14769 | |
| 14770 | APInt SplatBits, SplatUndef; |
| 14771 | unsigned SplatBitSize; |
| 14772 | bool HasAnyUndefs; |
| 14773 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 14774 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 14775 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 14776 | SplatBitSize == 64) { |
| 14777 | EVT VorrVT; |
| 14778 | SDValue Val = |
| 14779 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
| 14780 | SplatBitSize, DAG, dl, VT&: VorrVT, VectorVT: VT, type: OtherModImm); |
| 14781 | if (Val.getNode()) { |
| 14782 | SDValue Input = |
| 14783 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: VorrVT, Operand: N->getOperand(Num: 0)); |
| 14784 | SDValue Vorr = DAG.getNode(Opcode: ARMISD::VORRIMM, DL: dl, VT: VorrVT, N1: Input, N2: Val); |
| 14785 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vorr); |
| 14786 | } |
| 14787 | } |
| 14788 | } |
| 14789 | |
| 14790 | if (!Subtarget->isThumb1Only()) { |
| 14791 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 14792 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
| 14793 | return Result; |
| 14794 | if (SDValue Result = PerformORCombineToSMULWBT(OR: N, DCI, Subtarget)) |
| 14795 | return Result; |
| 14796 | } |
| 14797 | |
| 14798 | SDValue N0 = N->getOperand(Num: 0); |
| 14799 | SDValue N1 = N->getOperand(Num: 1); |
| 14800 | |
| 14801 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. |
| 14802 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && |
| 14803 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { |
| 14804 | |
| 14805 | // The code below optimizes (or (and X, Y), Z). |
| 14806 | // The AND operand needs to have a single user to make these optimizations |
| 14807 | // profitable. |
| 14808 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) |
| 14809 | return SDValue(); |
| 14810 | |
| 14811 | APInt SplatUndef; |
| 14812 | unsigned SplatBitSize; |
| 14813 | bool HasAnyUndefs; |
| 14814 | |
| 14815 | APInt SplatBits0, SplatBits1; |
| 14816 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(Val: N0->getOperand(Num: 1)); |
| 14817 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(Val: N1->getOperand(Num: 1)); |
| 14818 | // Ensure that the second operand of both ands are constants |
| 14819 | if (BVN0 && BVN0->isConstantSplat(SplatValue&: SplatBits0, SplatUndef, SplatBitSize, |
| 14820 | HasAnyUndefs) && !HasAnyUndefs) { |
| 14821 | if (BVN1 && BVN1->isConstantSplat(SplatValue&: SplatBits1, SplatUndef, SplatBitSize, |
| 14822 | HasAnyUndefs) && !HasAnyUndefs) { |
| 14823 | // Ensure that the bit width of the constants are the same and that |
| 14824 | // the splat arguments are logical inverses as per the pattern we |
| 14825 | // are trying to simplify. |
| 14826 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && |
| 14827 | SplatBits0 == ~SplatBits1) { |
| 14828 | // Canonicalize the vector type to make instruction selection |
| 14829 | // simpler. |
| 14830 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 14831 | SDValue Result = DAG.getNode(Opcode: ARMISD::VBSP, DL: dl, VT: CanonicalVT, |
| 14832 | N1: N0->getOperand(Num: 1), |
| 14833 | N2: N0->getOperand(Num: 0), |
| 14834 | N3: N1->getOperand(Num: 0)); |
| 14835 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Result); |
| 14836 | } |
| 14837 | } |
| 14838 | } |
| 14839 | } |
| 14840 | |
| 14841 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when |
| 14842 | // reasonable. |
| 14843 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { |
| 14844 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) |
| 14845 | return Res; |
| 14846 | } |
| 14847 | |
| 14848 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14849 | return Result; |
| 14850 | |
| 14851 | return SDValue(); |
| 14852 | } |
| 14853 | |
| 14854 | static SDValue PerformXORCombine(SDNode *N, |
| 14855 | TargetLowering::DAGCombinerInfo &DCI, |
| 14856 | const ARMSubtarget *Subtarget) { |
| 14857 | EVT VT = N->getValueType(ResNo: 0); |
| 14858 | SelectionDAG &DAG = DCI.DAG; |
| 14859 | |
| 14860 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14861 | return SDValue(); |
| 14862 | |
| 14863 | if (!Subtarget->isThumb1Only()) { |
| 14864 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 14865 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
| 14866 | return Result; |
| 14867 | |
| 14868 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14869 | return Result; |
| 14870 | } |
| 14871 | |
| 14872 | if (Subtarget->hasMVEIntegerOps()) { |
| 14873 | // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. |
| 14874 | SDValue N0 = N->getOperand(Num: 0); |
| 14875 | SDValue N1 = N->getOperand(Num: 1); |
| 14876 | const TargetLowering *TLI = Subtarget->getTargetLowering(); |
| 14877 | if (TLI->isConstTrueVal(N: N1) && |
| 14878 | (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { |
| 14879 | if (CanInvertMVEVCMP(N: N0)) { |
| 14880 | SDLoc DL(N0); |
| 14881 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N: N0)); |
| 14882 | |
| 14883 | SmallVector<SDValue, 4> Ops; |
| 14884 | Ops.push_back(Elt: N0->getOperand(Num: 0)); |
| 14885 | if (N0->getOpcode() == ARMISD::VCMP) |
| 14886 | Ops.push_back(Elt: N0->getOperand(Num: 1)); |
| 14887 | Ops.push_back(Elt: DAG.getConstant(Val: CC, DL, VT: MVT::i32)); |
| 14888 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT: N0->getValueType(ResNo: 0), Ops); |
| 14889 | } |
| 14890 | } |
| 14891 | } |
| 14892 | |
| 14893 | return SDValue(); |
| 14894 | } |
| 14895 | |
| 14896 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, |
| 14897 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and |
| 14898 | // their position in "to" (Rd). |
| 14899 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { |
| 14900 | assert(N->getOpcode() == ARMISD::BFI); |
| 14901 | |
| 14902 | SDValue From = N->getOperand(Num: 1); |
| 14903 | ToMask = ~N->getConstantOperandAPInt(Num: 2); |
| 14904 | FromMask = APInt::getLowBitsSet(numBits: ToMask.getBitWidth(), loBitsSet: ToMask.popcount()); |
| 14905 | |
| 14906 | // If the Base came from a SHR #C, we can deduce that it is really testing bit |
| 14907 | // #C in the base of the SHR. |
| 14908 | if (From->getOpcode() == ISD::SRL && |
| 14909 | isa<ConstantSDNode>(Val: From->getOperand(Num: 1))) { |
| 14910 | APInt Shift = From->getConstantOperandAPInt(Num: 1); |
| 14911 | assert(Shift.getLimitedValue() < 32 && "Shift too large!" ); |
| 14912 | FromMask <<= Shift.getLimitedValue(Limit: 31); |
| 14913 | From = From->getOperand(Num: 0); |
| 14914 | } |
| 14915 | |
| 14916 | return From; |
| 14917 | } |
| 14918 | |
| 14919 | // If A and B contain one contiguous set of bits, does A | B == A . B? |
| 14920 | // |
| 14921 | // Neither A nor B must be zero. |
| 14922 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { |
| 14923 | unsigned LastActiveBitInA = A.countr_zero(); |
| 14924 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countl_zero() - 1; |
| 14925 | return LastActiveBitInA - 1 == FirstActiveBitInB; |
| 14926 | } |
| 14927 | |
| 14928 | static SDValue FindBFIToCombineWith(SDNode *N) { |
| 14929 | // We have a BFI in N. Find a BFI it can combine with, if one exists. |
| 14930 | APInt ToMask, FromMask; |
| 14931 | SDValue From = ParseBFI(N, ToMask, FromMask); |
| 14932 | SDValue To = N->getOperand(Num: 0); |
| 14933 | |
| 14934 | SDValue V = To; |
| 14935 | if (V.getOpcode() != ARMISD::BFI) |
| 14936 | return SDValue(); |
| 14937 | |
| 14938 | APInt NewToMask, NewFromMask; |
| 14939 | SDValue NewFrom = ParseBFI(N: V.getNode(), ToMask&: NewToMask, FromMask&: NewFromMask); |
| 14940 | if (NewFrom != From) |
| 14941 | return SDValue(); |
| 14942 | |
| 14943 | // Do the written bits conflict with any we've seen so far? |
| 14944 | if ((NewToMask & ToMask).getBoolValue()) |
| 14945 | // Conflicting bits. |
| 14946 | return SDValue(); |
| 14947 | |
| 14948 | // Are the new bits contiguous when combined with the old bits? |
| 14949 | if (BitsProperlyConcatenate(A: ToMask, B: NewToMask) && |
| 14950 | BitsProperlyConcatenate(A: FromMask, B: NewFromMask)) |
| 14951 | return V; |
| 14952 | if (BitsProperlyConcatenate(A: NewToMask, B: ToMask) && |
| 14953 | BitsProperlyConcatenate(A: NewFromMask, B: FromMask)) |
| 14954 | return V; |
| 14955 | |
| 14956 | return SDValue(); |
| 14957 | } |
| 14958 | |
| 14959 | static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) { |
| 14960 | SDValue N0 = N->getOperand(Num: 0); |
| 14961 | SDValue N1 = N->getOperand(Num: 1); |
| 14962 | |
| 14963 | if (N1.getOpcode() == ISD::AND) { |
| 14964 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff |
| 14965 | // the bits being cleared by the AND are not demanded by the BFI. |
| 14966 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
| 14967 | if (!N11C) |
| 14968 | return SDValue(); |
| 14969 | unsigned InvMask = N->getConstantOperandVal(Num: 2); |
| 14970 | unsigned LSB = llvm::countr_zero(Val: ~InvMask); |
| 14971 | unsigned Width = llvm::bit_width<unsigned>(Value: ~InvMask) - LSB; |
| 14972 | assert(Width < |
| 14973 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && |
| 14974 | "undefined behavior" ); |
| 14975 | unsigned Mask = (1u << Width) - 1; |
| 14976 | unsigned Mask2 = N11C->getZExtValue(); |
| 14977 | if ((Mask & (~Mask2)) == 0) |
| 14978 | return DAG.getNode(Opcode: ARMISD::BFI, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
| 14979 | N1: N->getOperand(Num: 0), N2: N1.getOperand(i: 0), N3: N->getOperand(Num: 2)); |
| 14980 | return SDValue(); |
| 14981 | } |
| 14982 | |
| 14983 | // Look for another BFI to combine with. |
| 14984 | if (SDValue CombineBFI = FindBFIToCombineWith(N)) { |
| 14985 | // We've found a BFI. |
| 14986 | APInt ToMask1, FromMask1; |
| 14987 | SDValue From1 = ParseBFI(N, ToMask&: ToMask1, FromMask&: FromMask1); |
| 14988 | |
| 14989 | APInt ToMask2, FromMask2; |
| 14990 | SDValue From2 = ParseBFI(N: CombineBFI.getNode(), ToMask&: ToMask2, FromMask&: FromMask2); |
| 14991 | assert(From1 == From2); |
| 14992 | (void)From2; |
| 14993 | |
| 14994 | // Create a new BFI, combining the two together. |
| 14995 | APInt NewFromMask = FromMask1 | FromMask2; |
| 14996 | APInt NewToMask = ToMask1 | ToMask2; |
| 14997 | |
| 14998 | EVT VT = N->getValueType(ResNo: 0); |
| 14999 | SDLoc dl(N); |
| 15000 | |
| 15001 | if (NewFromMask[0] == 0) |
| 15002 | From1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: From1, |
| 15003 | N2: DAG.getConstant(Val: NewFromMask.countr_zero(), DL: dl, VT)); |
| 15004 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: CombineBFI.getOperand(i: 0), N2: From1, |
| 15005 | N3: DAG.getConstant(Val: ~NewToMask, DL: dl, VT)); |
| 15006 | } |
| 15007 | |
| 15008 | // Reassociate BFI(BFI (A, B, M1), C, M2) to BFI(BFI (A, C, M2), B, M1) so |
| 15009 | // that lower bit insertions are performed first, providing that M1 and M2 |
| 15010 | // do no overlap. This can allow multiple BFI instructions to be combined |
| 15011 | // together by the other folds above. |
| 15012 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::BFI) { |
| 15013 | APInt ToMask1 = ~N->getConstantOperandAPInt(Num: 2); |
| 15014 | APInt ToMask2 = ~N0.getConstantOperandAPInt(i: 2); |
| 15015 | |
| 15016 | if (!N0.hasOneUse() || (ToMask1 & ToMask2) != 0 || |
| 15017 | ToMask1.countl_zero() < ToMask2.countl_zero()) |
| 15018 | return SDValue(); |
| 15019 | |
| 15020 | EVT VT = N->getValueType(ResNo: 0); |
| 15021 | SDLoc dl(N); |
| 15022 | SDValue BFI1 = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: N0.getOperand(i: 0), |
| 15023 | N2: N->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
| 15024 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: BFI1, N2: N0.getOperand(i: 1), |
| 15025 | N3: N0.getOperand(i: 2)); |
| 15026 | } |
| 15027 | |
| 15028 | return SDValue(); |
| 15029 | } |
| 15030 | |
| 15031 | // Check that N is CMPZ(CSINC(0, 0, CC, X)), |
| 15032 | // or CMPZ(CMOV(1, 0, CC, X)) |
| 15033 | // return X if valid. |
| 15034 | static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) { |
| 15035 | if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(V: Cmp->getOperand(Num: 1))) |
| 15036 | return SDValue(); |
| 15037 | SDValue CSInc = Cmp->getOperand(Num: 0); |
| 15038 | |
| 15039 | // Ignore any `And 1` nodes that may not yet have been removed. We are |
| 15040 | // looking for a value that produces 1/0, so these have no effect on the |
| 15041 | // code. |
| 15042 | while (CSInc.getOpcode() == ISD::AND && |
| 15043 | isa<ConstantSDNode>(Val: CSInc.getOperand(i: 1)) && |
| 15044 | CSInc.getConstantOperandVal(i: 1) == 1 && CSInc->hasOneUse()) |
| 15045 | CSInc = CSInc.getOperand(i: 0); |
| 15046 | |
| 15047 | if (CSInc.getOpcode() == ARMISD::CSINC && |
| 15048 | isNullConstant(V: CSInc.getOperand(i: 0)) && |
| 15049 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
| 15050 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
| 15051 | return CSInc.getOperand(i: 3); |
| 15052 | } |
| 15053 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 0)) && |
| 15054 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
| 15055 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
| 15056 | return CSInc.getOperand(i: 3); |
| 15057 | } |
| 15058 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 1)) && |
| 15059 | isNullConstant(V: CSInc.getOperand(i: 0)) && CSInc->hasOneUse()) { |
| 15060 | CC = ARMCC::getOppositeCondition( |
| 15061 | CC: (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2)); |
| 15062 | return CSInc.getOperand(i: 3); |
| 15063 | } |
| 15064 | return SDValue(); |
| 15065 | } |
| 15066 | |
| 15067 | static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) { |
| 15068 | // Given CMPZ(CSINC(C, 0, 0, EQ), 0), we can just use C directly. As in |
| 15069 | // t92: flags = ARMISD::CMPZ t74, 0 |
| 15070 | // t93: i32 = ARMISD::CSINC 0, 0, 1, t92 |
| 15071 | // t96: flags = ARMISD::CMPZ t93, 0 |
| 15072 | // t114: i32 = ARMISD::CSINV 0, 0, 0, t96 |
| 15073 | ARMCC::CondCodes Cond; |
| 15074 | if (SDValue C = IsCMPZCSINC(Cmp: N, CC&: Cond)) |
| 15075 | if (Cond == ARMCC::EQ) |
| 15076 | return C; |
| 15077 | return SDValue(); |
| 15078 | } |
| 15079 | |
| 15080 | static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) { |
| 15081 | // Fold away an unneccessary CMPZ/CSINC |
| 15082 | // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) -> |
| 15083 | // if C1==EQ -> CSXYZ A, B, C2, D |
| 15084 | // if C1==NE -> CSXYZ A, B, NOT(C2), D |
| 15085 | ARMCC::CondCodes Cond; |
| 15086 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 3).getNode(), CC&: Cond)) { |
| 15087 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ) |
| 15088 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 15089 | N2: N->getOperand(Num: 1), |
| 15090 | N3: DAG.getConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 15091 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
| 15092 | return DAG.getNode( |
| 15093 | Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 15094 | N2: N->getOperand(Num: 1), |
| 15095 | N3: DAG.getConstant(Val: ARMCC::getOppositeCondition(CC: Cond), DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 15096 | } |
| 15097 | return SDValue(); |
| 15098 | } |
| 15099 | |
| 15100 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for |
| 15101 | /// ARMISD::VMOVRRD. |
| 15102 | static SDValue PerformVMOVRRDCombine(SDNode *N, |
| 15103 | TargetLowering::DAGCombinerInfo &DCI, |
| 15104 | const ARMSubtarget *Subtarget) { |
| 15105 | // vmovrrd(vmovdrr x, y) -> x,y |
| 15106 | SDValue InDouble = N->getOperand(Num: 0); |
| 15107 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) |
| 15108 | return DCI.CombineTo(N, Res0: InDouble.getOperand(i: 0), Res1: InDouble.getOperand(i: 1)); |
| 15109 | |
| 15110 | // vmovrrd(load f64) -> (load i32), (load i32) |
| 15111 | SDNode *InNode = InDouble.getNode(); |
| 15112 | if (ISD::isNormalLoad(N: InNode) && InNode->hasOneUse() && |
| 15113 | InNode->getValueType(ResNo: 0) == MVT::f64 && |
| 15114 | InNode->getOperand(Num: 1).getOpcode() == ISD::FrameIndex && |
| 15115 | !cast<LoadSDNode>(Val: InNode)->isVolatile()) { |
| 15116 | // TODO: Should this be done for non-FrameIndex operands? |
| 15117 | LoadSDNode *LD = cast<LoadSDNode>(Val: InNode); |
| 15118 | |
| 15119 | SelectionDAG &DAG = DCI.DAG; |
| 15120 | SDLoc DL(LD); |
| 15121 | SDValue BasePtr = LD->getBasePtr(); |
| 15122 | SDValue NewLD1 = |
| 15123 | DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: BasePtr, PtrInfo: LD->getPointerInfo(), |
| 15124 | Alignment: LD->getAlign(), MMOFlags: LD->getMemOperand()->getFlags()); |
| 15125 | |
| 15126 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
| 15127 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
| 15128 | |
| 15129 | SDValue NewLD2 = DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: OffsetPtr, |
| 15130 | PtrInfo: LD->getPointerInfo().getWithOffset(O: 4), |
| 15131 | Alignment: commonAlignment(A: LD->getAlign(), Offset: 4), |
| 15132 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 15133 | |
| 15134 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewLD2.getValue(R: 1)); |
| 15135 | if (DCI.DAG.getDataLayout().isBigEndian()) |
| 15136 | std::swap (a&: NewLD1, b&: NewLD2); |
| 15137 | SDValue Result = DCI.CombineTo(N, Res0: NewLD1, Res1: NewLD2); |
| 15138 | return Result; |
| 15139 | } |
| 15140 | |
| 15141 | // VMOVRRD(extract(..(build_vector(a, b, c, d)))) -> a,b or c,d |
| 15142 | // VMOVRRD(extract(insert_vector(insert_vector(.., a, l1), b, l2))) -> a,b |
| 15143 | if (InDouble.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15144 | isa<ConstantSDNode>(Val: InDouble.getOperand(i: 1))) { |
| 15145 | SDValue BV = InDouble.getOperand(i: 0); |
| 15146 | // Look up through any nop bitcasts and vector_reg_casts. bitcasts may |
| 15147 | // change lane order under big endian. |
| 15148 | bool BVSwap = BV.getOpcode() == ISD::BITCAST; |
| 15149 | while ( |
| 15150 | (BV.getOpcode() == ISD::BITCAST || |
| 15151 | BV.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
| 15152 | (BV.getValueType() == MVT::v2f64 || BV.getValueType() == MVT::v2i64)) { |
| 15153 | BVSwap = BV.getOpcode() == ISD::BITCAST; |
| 15154 | BV = BV.getOperand(i: 0); |
| 15155 | } |
| 15156 | if (BV.getValueType() != MVT::v4i32) |
| 15157 | return SDValue(); |
| 15158 | |
| 15159 | // Handle buildvectors, pulling out the correct lane depending on |
| 15160 | // endianness. |
| 15161 | unsigned Offset = InDouble.getConstantOperandVal(i: 1) == 1 ? 2 : 0; |
| 15162 | if (BV.getOpcode() == ISD::BUILD_VECTOR) { |
| 15163 | SDValue Op0 = BV.getOperand(i: Offset); |
| 15164 | SDValue Op1 = BV.getOperand(i: Offset + 1); |
| 15165 | if (!Subtarget->isLittle() && BVSwap) |
| 15166 | std::swap(a&: Op0, b&: Op1); |
| 15167 | |
| 15168 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
| 15169 | } |
| 15170 | |
| 15171 | // A chain of insert_vectors, grabbing the correct value of the chain of |
| 15172 | // inserts. |
| 15173 | SDValue Op0, Op1; |
| 15174 | while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) { |
| 15175 | if (isa<ConstantSDNode>(Val: BV.getOperand(i: 2))) { |
| 15176 | if (BV.getConstantOperandVal(i: 2) == Offset && !Op0) |
| 15177 | Op0 = BV.getOperand(i: 1); |
| 15178 | if (BV.getConstantOperandVal(i: 2) == Offset + 1 && !Op1) |
| 15179 | Op1 = BV.getOperand(i: 1); |
| 15180 | } |
| 15181 | BV = BV.getOperand(i: 0); |
| 15182 | } |
| 15183 | if (!Subtarget->isLittle() && BVSwap) |
| 15184 | std::swap(a&: Op0, b&: Op1); |
| 15185 | if (Op0 && Op1) |
| 15186 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
| 15187 | } |
| 15188 | |
| 15189 | return SDValue(); |
| 15190 | } |
| 15191 | |
| 15192 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for |
| 15193 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. |
| 15194 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { |
| 15195 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) |
| 15196 | SDValue Op0 = N->getOperand(Num: 0); |
| 15197 | SDValue Op1 = N->getOperand(Num: 1); |
| 15198 | if (Op0.getOpcode() == ISD::BITCAST) |
| 15199 | Op0 = Op0.getOperand(i: 0); |
| 15200 | if (Op1.getOpcode() == ISD::BITCAST) |
| 15201 | Op1 = Op1.getOperand(i: 0); |
| 15202 | if (Op0.getOpcode() == ARMISD::VMOVRRD && |
| 15203 | Op0.getNode() == Op1.getNode() && |
| 15204 | Op0.getResNo() == 0 && Op1.getResNo() == 1) |
| 15205 | return DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), |
| 15206 | VT: N->getValueType(ResNo: 0), Operand: Op0.getOperand(i: 0)); |
| 15207 | return SDValue(); |
| 15208 | } |
| 15209 | |
| 15210 | static SDValue PerformVMOVhrCombine(SDNode *N, |
| 15211 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15212 | SDValue Op0 = N->getOperand(Num: 0); |
| 15213 | |
| 15214 | // VMOVhr (VMOVrh (X)) -> X |
| 15215 | if (Op0->getOpcode() == ARMISD::VMOVrh) |
| 15216 | return Op0->getOperand(Num: 0); |
| 15217 | |
| 15218 | // FullFP16: half values are passed in S-registers, and we don't |
| 15219 | // need any of the bitcast and moves: |
| 15220 | // |
| 15221 | // t2: f32,ch1,gl1? = CopyFromReg ch, Register:f32 %0, gl? |
| 15222 | // t5: i32 = bitcast t2 |
| 15223 | // t18: f16 = ARMISD::VMOVhr t5 |
| 15224 | // => |
| 15225 | // tN: f16,ch2,gl2? = CopyFromReg ch, Register::f32 %0, gl? |
| 15226 | if (Op0->getOpcode() == ISD::BITCAST) { |
| 15227 | SDValue Copy = Op0->getOperand(Num: 0); |
| 15228 | if (Copy.getValueType() == MVT::f32 && |
| 15229 | Copy->getOpcode() == ISD::CopyFromReg) { |
| 15230 | bool HasGlue = Copy->getNumOperands() == 3; |
| 15231 | SDValue Ops[] = {Copy->getOperand(Num: 0), Copy->getOperand(Num: 1), |
| 15232 | HasGlue ? Copy->getOperand(Num: 2) : SDValue()}; |
| 15233 | EVT OutTys[] = {N->getValueType(ResNo: 0), MVT::Other, MVT::Glue}; |
| 15234 | SDValue NewCopy = |
| 15235 | DCI.DAG.getNode(Opcode: ISD::CopyFromReg, DL: SDLoc(N), |
| 15236 | VTList: DCI.DAG.getVTList(VTs: ArrayRef(OutTys, HasGlue ? 3 : 2)), |
| 15237 | Ops: ArrayRef(Ops, HasGlue ? 3 : 2)); |
| 15238 | |
| 15239 | // Update Users, Chains, and Potential Glue. |
| 15240 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: NewCopy.getValue(R: 0)); |
| 15241 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 1), To: NewCopy.getValue(R: 1)); |
| 15242 | if (HasGlue) |
| 15243 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 2), |
| 15244 | To: NewCopy.getValue(R: 2)); |
| 15245 | |
| 15246 | return NewCopy; |
| 15247 | } |
| 15248 | } |
| 15249 | |
| 15250 | // fold (VMOVhr (load x)) -> (load (f16*)x) |
| 15251 | if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Val&: Op0)) { |
| 15252 | if (LN0->hasOneUse() && LN0->isUnindexed() && |
| 15253 | LN0->getMemoryVT() == MVT::i16) { |
| 15254 | SDValue Load = |
| 15255 | DCI.DAG.getLoad(VT: N->getValueType(ResNo: 0), dl: SDLoc(N), Chain: LN0->getChain(), |
| 15256 | Ptr: LN0->getBasePtr(), MMO: LN0->getMemOperand()); |
| 15257 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
| 15258 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Op0.getValue(R: 1), To: Load.getValue(R: 1)); |
| 15259 | return Load; |
| 15260 | } |
| 15261 | } |
| 15262 | |
| 15263 | // Only the bottom 16 bits of the source register are used. |
| 15264 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
| 15265 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15266 | if (TLI.SimplifyDemandedBits(Op: Op0, DemandedBits: DemandedMask, DCI)) |
| 15267 | return SDValue(N, 0); |
| 15268 | |
| 15269 | return SDValue(); |
| 15270 | } |
| 15271 | |
| 15272 | static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) { |
| 15273 | SDValue N0 = N->getOperand(Num: 0); |
| 15274 | EVT VT = N->getValueType(ResNo: 0); |
| 15275 | |
| 15276 | // fold (VMOVrh (fpconst x)) -> const x |
| 15277 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val&: N0)) { |
| 15278 | APFloat V = C->getValueAPF(); |
| 15279 | return DAG.getConstant(Val: V.bitcastToAPInt().getZExtValue(), DL: SDLoc(N), VT); |
| 15280 | } |
| 15281 | |
| 15282 | // fold (VMOVrh (load x)) -> (zextload (i16*)x) |
| 15283 | if (ISD::isNormalLoad(N: N0.getNode()) && N0.hasOneUse()) { |
| 15284 | LoadSDNode *LN0 = cast<LoadSDNode>(Val&: N0); |
| 15285 | |
| 15286 | SDValue Load = |
| 15287 | DAG.getExtLoad(ExtType: ISD::ZEXTLOAD, dl: SDLoc(N), VT, Chain: LN0->getChain(), |
| 15288 | Ptr: LN0->getBasePtr(), MemVT: MVT::i16, MMO: LN0->getMemOperand()); |
| 15289 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
| 15290 | DAG.ReplaceAllUsesOfValueWith(From: N0.getValue(R: 1), To: Load.getValue(R: 1)); |
| 15291 | return Load; |
| 15292 | } |
| 15293 | |
| 15294 | // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) |
| 15295 | if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15296 | isa<ConstantSDNode>(Val: N0->getOperand(Num: 1))) |
| 15297 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: SDLoc(N), VT, N1: N0->getOperand(Num: 0), |
| 15298 | N2: N0->getOperand(Num: 1)); |
| 15299 | |
| 15300 | return SDValue(); |
| 15301 | } |
| 15302 | |
| 15303 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node |
| 15304 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an |
| 15305 | /// i64 vector to have f64 elements, since the value can then be loaded |
| 15306 | /// directly into a VFP register. |
| 15307 | static bool hasNormalLoadOperand(SDNode *N) { |
| 15308 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 15309 | for (unsigned i = 0; i < NumElts; ++i) { |
| 15310 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
| 15311 | if (ISD::isNormalLoad(N: Elt) && !cast<LoadSDNode>(Val: Elt)->isVolatile()) |
| 15312 | return true; |
| 15313 | } |
| 15314 | return false; |
| 15315 | } |
| 15316 | |
| 15317 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for |
| 15318 | /// ISD::BUILD_VECTOR. |
| 15319 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, |
| 15320 | TargetLowering::DAGCombinerInfo &DCI, |
| 15321 | const ARMSubtarget *Subtarget) { |
| 15322 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): |
| 15323 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value |
| 15324 | // into a pair of GPRs, which is fine when the value is used as a scalar, |
| 15325 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. |
| 15326 | SelectionDAG &DAG = DCI.DAG; |
| 15327 | if (N->getNumOperands() == 2) |
| 15328 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) |
| 15329 | return RV; |
| 15330 | |
| 15331 | // Load i64 elements as f64 values so that type legalization does not split |
| 15332 | // them up into i32 values. |
| 15333 | EVT VT = N->getValueType(ResNo: 0); |
| 15334 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) |
| 15335 | return SDValue(); |
| 15336 | SDLoc dl(N); |
| 15337 | SmallVector<SDValue, 8> Ops; |
| 15338 | unsigned NumElts = VT.getVectorNumElements(); |
| 15339 | for (unsigned i = 0; i < NumElts; ++i) { |
| 15340 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: i)); |
| 15341 | Ops.push_back(Elt: V); |
| 15342 | // Make the DAGCombiner fold the bitcast. |
| 15343 | DCI.AddToWorklist(N: V.getNode()); |
| 15344 | } |
| 15345 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, NumElements: NumElts); |
| 15346 | SDValue BV = DAG.getBuildVector(VT: FloatVT, DL: dl, Ops); |
| 15347 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: BV); |
| 15348 | } |
| 15349 | |
| 15350 | /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. |
| 15351 | static SDValue |
| 15352 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15353 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. |
| 15354 | // At that time, we may have inserted bitcasts from integer to float. |
| 15355 | // If these bitcasts have survived DAGCombine, change the lowering of this |
| 15356 | // BUILD_VECTOR in something more vector friendly, i.e., that does not |
| 15357 | // force to use floating point types. |
| 15358 | |
| 15359 | // Make sure we can change the type of the vector. |
| 15360 | // This is possible iff: |
| 15361 | // 1. The vector is only used in a bitcast to a integer type. I.e., |
| 15362 | // 1.1. Vector is used only once. |
| 15363 | // 1.2. Use is a bit convert to an integer type. |
| 15364 | // 2. The size of its operands are 32-bits (64-bits are not legal). |
| 15365 | EVT VT = N->getValueType(ResNo: 0); |
| 15366 | EVT EltVT = VT.getVectorElementType(); |
| 15367 | |
| 15368 | // Check 1.1. and 2. |
| 15369 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) |
| 15370 | return SDValue(); |
| 15371 | |
| 15372 | // By construction, the input type must be float. |
| 15373 | assert(EltVT == MVT::f32 && "Unexpected type!" ); |
| 15374 | |
| 15375 | // Check 1.2. |
| 15376 | SDNode *Use = *N->user_begin(); |
| 15377 | if (Use->getOpcode() != ISD::BITCAST || |
| 15378 | Use->getValueType(ResNo: 0).isFloatingPoint()) |
| 15379 | return SDValue(); |
| 15380 | |
| 15381 | // Check profitability. |
| 15382 | // Model is, if more than half of the relevant operands are bitcast from |
| 15383 | // i32, turn the build_vector into a sequence of insert_vector_elt. |
| 15384 | // Relevant operands are everything that is not statically |
| 15385 | // (i.e., at compile time) bitcasted. |
| 15386 | unsigned NumOfBitCastedElts = 0; |
| 15387 | unsigned NumElts = VT.getVectorNumElements(); |
| 15388 | unsigned NumOfRelevantElts = NumElts; |
| 15389 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { |
| 15390 | SDValue Elt = N->getOperand(Num: Idx); |
| 15391 | if (Elt->getOpcode() == ISD::BITCAST) { |
| 15392 | // Assume only bit cast to i32 will go away. |
| 15393 | if (Elt->getOperand(Num: 0).getValueType() == MVT::i32) |
| 15394 | ++NumOfBitCastedElts; |
| 15395 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Val: Elt)) |
| 15396 | // Constants are statically casted, thus do not count them as |
| 15397 | // relevant operands. |
| 15398 | --NumOfRelevantElts; |
| 15399 | } |
| 15400 | |
| 15401 | // Check if more than half of the elements require a non-free bitcast. |
| 15402 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) |
| 15403 | return SDValue(); |
| 15404 | |
| 15405 | SelectionDAG &DAG = DCI.DAG; |
| 15406 | // Create the new vector type. |
| 15407 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: NumElts); |
| 15408 | // Check if the type is legal. |
| 15409 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15410 | if (!TLI.isTypeLegal(VT: VecVT)) |
| 15411 | return SDValue(); |
| 15412 | |
| 15413 | // Combine: |
| 15414 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. |
| 15415 | // => BITCAST INSERT_VECTOR_ELT |
| 15416 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), |
| 15417 | // (BITCAST EN), N. |
| 15418 | SDValue Vec = DAG.getUNDEF(VT: VecVT); |
| 15419 | SDLoc dl(N); |
| 15420 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { |
| 15421 | SDValue V = N->getOperand(Num: Idx); |
| 15422 | if (V.isUndef()) |
| 15423 | continue; |
| 15424 | if (V.getOpcode() == ISD::BITCAST && |
| 15425 | V->getOperand(Num: 0).getValueType() == MVT::i32) |
| 15426 | // Fold obvious case. |
| 15427 | V = V.getOperand(i: 0); |
| 15428 | else { |
| 15429 | V = DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(V), VT: MVT::i32, Operand: V); |
| 15430 | // Make the DAGCombiner fold the bitcasts. |
| 15431 | DCI.AddToWorklist(N: V.getNode()); |
| 15432 | } |
| 15433 | SDValue LaneIdx = DAG.getConstant(Val: Idx, DL: dl, VT: MVT::i32); |
| 15434 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: VecVT, N1: Vec, N2: V, N3: LaneIdx); |
| 15435 | } |
| 15436 | Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vec); |
| 15437 | // Make the DAGCombiner fold the bitcasts. |
| 15438 | DCI.AddToWorklist(N: Vec.getNode()); |
| 15439 | return Vec; |
| 15440 | } |
| 15441 | |
| 15442 | static SDValue |
| 15443 | PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15444 | EVT VT = N->getValueType(ResNo: 0); |
| 15445 | SDValue Op = N->getOperand(Num: 0); |
| 15446 | SDLoc dl(N); |
| 15447 | |
| 15448 | // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) |
| 15449 | if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { |
| 15450 | // If the valuetypes are the same, we can remove the cast entirely. |
| 15451 | if (Op->getOperand(Num: 0).getValueType() == VT) |
| 15452 | return Op->getOperand(Num: 0); |
| 15453 | return DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15454 | } |
| 15455 | |
| 15456 | // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce |
| 15457 | // more VPNOT which might get folded as else predicates. |
| 15458 | if (Op.getValueType() == MVT::i32 && isBitwiseNot(V: Op)) { |
| 15459 | SDValue X = |
| 15460 | DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15461 | SDValue C = DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
| 15462 | Operand: DCI.DAG.getConstant(Val: 65535, DL: dl, VT: MVT::i32)); |
| 15463 | return DCI.DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: X, N2: C); |
| 15464 | } |
| 15465 | |
| 15466 | // Only the bottom 16 bits of the source register are used. |
| 15467 | if (Op.getValueType() == MVT::i32) { |
| 15468 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
| 15469 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15470 | if (TLI.SimplifyDemandedBits(Op, DemandedBits: DemandedMask, DCI)) |
| 15471 | return SDValue(N, 0); |
| 15472 | } |
| 15473 | return SDValue(); |
| 15474 | } |
| 15475 | |
| 15476 | static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG, |
| 15477 | const ARMSubtarget *ST) { |
| 15478 | EVT VT = N->getValueType(ResNo: 0); |
| 15479 | SDValue Op = N->getOperand(Num: 0); |
| 15480 | SDLoc dl(N); |
| 15481 | |
| 15482 | // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST |
| 15483 | if (ST->isLittle()) |
| 15484 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Op); |
| 15485 | |
| 15486 | // VT VECTOR_REG_CAST (VT Op) -> Op |
| 15487 | if (Op.getValueType() == VT) |
| 15488 | return Op; |
| 15489 | // VECTOR_REG_CAST undef -> undef |
| 15490 | if (Op.isUndef()) |
| 15491 | return DAG.getUNDEF(VT); |
| 15492 | |
| 15493 | // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) |
| 15494 | if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { |
| 15495 | // If the valuetypes are the same, we can remove the cast entirely. |
| 15496 | if (Op->getOperand(Num: 0).getValueType() == VT) |
| 15497 | return Op->getOperand(Num: 0); |
| 15498 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15499 | } |
| 15500 | |
| 15501 | return SDValue(); |
| 15502 | } |
| 15503 | |
| 15504 | static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG, |
| 15505 | const ARMSubtarget *Subtarget) { |
| 15506 | if (!Subtarget->hasMVEIntegerOps()) |
| 15507 | return SDValue(); |
| 15508 | |
| 15509 | EVT VT = N->getValueType(ResNo: 0); |
| 15510 | SDValue Op0 = N->getOperand(Num: 0); |
| 15511 | SDValue Op1 = N->getOperand(Num: 1); |
| 15512 | ARMCC::CondCodes Cond = (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
| 15513 | SDLoc dl(N); |
| 15514 | |
| 15515 | // vcmp X, 0, cc -> vcmpz X, cc |
| 15516 | if (isZeroVector(N: Op1)) |
| 15517 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op0, N2: N->getOperand(Num: 2)); |
| 15518 | |
| 15519 | unsigned SwappedCond = getSwappedCondition(CC: Cond); |
| 15520 | if (isValidMVECond(CC: SwappedCond, IsFloat: VT.isFloatingPoint())) { |
| 15521 | // vcmp 0, X, cc -> vcmpz X, reversed(cc) |
| 15522 | if (isZeroVector(N: Op0)) |
| 15523 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op1, |
| 15524 | N2: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
| 15525 | // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) |
| 15526 | if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) |
| 15527 | return DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT, N1: Op1, N2: Op0, |
| 15528 | N3: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
| 15529 | } |
| 15530 | |
| 15531 | return SDValue(); |
| 15532 | } |
| 15533 | |
| 15534 | /// PerformInsertEltCombine - Target-specific dag combine xforms for |
| 15535 | /// ISD::INSERT_VECTOR_ELT. |
| 15536 | static SDValue PerformInsertEltCombine(SDNode *N, |
| 15537 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15538 | // Bitcast an i64 load inserted into a vector to f64. |
| 15539 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 15540 | EVT VT = N->getValueType(ResNo: 0); |
| 15541 | SDNode *Elt = N->getOperand(Num: 1).getNode(); |
| 15542 | if (VT.getVectorElementType() != MVT::i64 || |
| 15543 | !ISD::isNormalLoad(N: Elt) || cast<LoadSDNode>(Val: Elt)->isVolatile()) |
| 15544 | return SDValue(); |
| 15545 | |
| 15546 | SelectionDAG &DAG = DCI.DAG; |
| 15547 | SDLoc dl(N); |
| 15548 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
| 15549 | NumElements: VT.getVectorNumElements()); |
| 15550 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: N->getOperand(Num: 0)); |
| 15551 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: 1)); |
| 15552 | // Make the DAGCombiner fold the bitcasts. |
| 15553 | DCI.AddToWorklist(N: Vec.getNode()); |
| 15554 | DCI.AddToWorklist(N: V.getNode()); |
| 15555 | SDValue InsElt = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: FloatVT, |
| 15556 | N1: Vec, N2: V, N3: N->getOperand(Num: 2)); |
| 15557 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: InsElt); |
| 15558 | } |
| 15559 | |
| 15560 | // Convert a pair of extracts from the same base vector to a VMOVRRD. Either |
| 15561 | // directly or bitcast to an integer if the original is a float vector. |
| 15562 | // extract(x, n); extract(x, n+1) -> VMOVRRD(extract v2f64 x, n/2) |
| 15563 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD(extract x, n/2) |
| 15564 | static SDValue |
| 15565 | (SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15566 | EVT VT = N->getValueType(ResNo: 0); |
| 15567 | SDLoc dl(N); |
| 15568 | |
| 15569 | if (!DCI.isAfterLegalizeDAG() || VT != MVT::i32 || |
| 15570 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: MVT::f64)) |
| 15571 | return SDValue(); |
| 15572 | |
| 15573 | SDValue Ext = SDValue(N, 0); |
| 15574 | if (Ext.getOpcode() == ISD::BITCAST && |
| 15575 | Ext.getOperand(i: 0).getValueType() == MVT::f32) |
| 15576 | Ext = Ext.getOperand(i: 0); |
| 15577 | if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 15578 | !isa<ConstantSDNode>(Val: Ext.getOperand(i: 1)) || |
| 15579 | Ext.getConstantOperandVal(i: 1) % 2 != 0) |
| 15580 | return SDValue(); |
| 15581 | if (Ext->hasOneUse() && (Ext->user_begin()->getOpcode() == ISD::SINT_TO_FP || |
| 15582 | Ext->user_begin()->getOpcode() == ISD::UINT_TO_FP)) |
| 15583 | return SDValue(); |
| 15584 | |
| 15585 | SDValue Op0 = Ext.getOperand(i: 0); |
| 15586 | EVT VecVT = Op0.getValueType(); |
| 15587 | unsigned ResNo = Op0.getResNo(); |
| 15588 | unsigned Lane = Ext.getConstantOperandVal(i: 1); |
| 15589 | if (VecVT.getVectorNumElements() != 4) |
| 15590 | return SDValue(); |
| 15591 | |
| 15592 | // Find another extract, of Lane + 1 |
| 15593 | auto OtherIt = find_if(Range: Op0->users(), P: [&](SDNode *V) { |
| 15594 | return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15595 | isa<ConstantSDNode>(Val: V->getOperand(Num: 1)) && |
| 15596 | V->getConstantOperandVal(Num: 1) == Lane + 1 && |
| 15597 | V->getOperand(Num: 0).getResNo() == ResNo; |
| 15598 | }); |
| 15599 | if (OtherIt == Op0->users().end()) |
| 15600 | return SDValue(); |
| 15601 | |
| 15602 | // For float extracts, we need to be converting to a i32 for both vector |
| 15603 | // lanes. |
| 15604 | SDValue OtherExt(*OtherIt, 0); |
| 15605 | if (OtherExt.getValueType() != MVT::i32) { |
| 15606 | if (!OtherExt->hasOneUse() || |
| 15607 | OtherExt->user_begin()->getOpcode() != ISD::BITCAST || |
| 15608 | OtherExt->user_begin()->getValueType(ResNo: 0) != MVT::i32) |
| 15609 | return SDValue(); |
| 15610 | OtherExt = SDValue(*OtherExt->user_begin(), 0); |
| 15611 | } |
| 15612 | |
| 15613 | // Convert the type to a f64 and extract with a VMOVRRD. |
| 15614 | SDValue F64 = DCI.DAG.getNode( |
| 15615 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
| 15616 | N1: DCI.DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v2f64, Operand: Op0), |
| 15617 | N2: DCI.DAG.getConstant(Val: Ext.getConstantOperandVal(i: 1) / 2, DL: dl, VT: MVT::i32)); |
| 15618 | SDValue VMOVRRD = |
| 15619 | DCI.DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops: F64); |
| 15620 | |
| 15621 | DCI.CombineTo(N: OtherExt.getNode(), Res: SDValue(VMOVRRD.getNode(), 1)); |
| 15622 | return VMOVRRD; |
| 15623 | } |
| 15624 | |
| 15625 | static SDValue (SDNode *N, |
| 15626 | TargetLowering::DAGCombinerInfo &DCI, |
| 15627 | const ARMSubtarget *ST) { |
| 15628 | SDValue Op0 = N->getOperand(Num: 0); |
| 15629 | EVT VT = N->getValueType(ResNo: 0); |
| 15630 | SDLoc dl(N); |
| 15631 | |
| 15632 | // extract (vdup x) -> x |
| 15633 | if (Op0->getOpcode() == ARMISD::VDUP) { |
| 15634 | SDValue X = Op0->getOperand(Num: 0); |
| 15635 | if (VT == MVT::f16 && X.getValueType() == MVT::i32) |
| 15636 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT, Operand: X); |
| 15637 | if (VT == MVT::i32 && X.getValueType() == MVT::f16) |
| 15638 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT, Operand: X); |
| 15639 | if (VT == MVT::f32 && X.getValueType() == MVT::i32) |
| 15640 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: X); |
| 15641 | |
| 15642 | while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) |
| 15643 | X = X->getOperand(Num: 0); |
| 15644 | if (X.getValueType() == VT) |
| 15645 | return X; |
| 15646 | } |
| 15647 | |
| 15648 | // extract ARM_BUILD_VECTOR -> x |
| 15649 | if (Op0->getOpcode() == ARMISD::BUILD_VECTOR && |
| 15650 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
| 15651 | N->getConstantOperandVal(Num: 1) < Op0.getNumOperands()) { |
| 15652 | return Op0.getOperand(i: N->getConstantOperandVal(Num: 1)); |
| 15653 | } |
| 15654 | |
| 15655 | // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b |
| 15656 | if (Op0.getValueType() == MVT::v4i32 && |
| 15657 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
| 15658 | Op0.getOpcode() == ISD::BITCAST && |
| 15659 | Op0.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR && |
| 15660 | Op0.getOperand(i: 0).getValueType() == MVT::v2f64) { |
| 15661 | SDValue BV = Op0.getOperand(i: 0); |
| 15662 | unsigned Offset = N->getConstantOperandVal(Num: 1); |
| 15663 | SDValue MOV = BV.getOperand(i: Offset < 2 ? 0 : 1); |
| 15664 | if (MOV.getOpcode() == ARMISD::VMOVDRR) |
| 15665 | return MOV.getOperand(i: ST->isLittle() ? Offset % 2 : 1 - Offset % 2); |
| 15666 | } |
| 15667 | |
| 15668 | // extract x, n; extract x, n+1 -> VMOVRRD x |
| 15669 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
| 15670 | return R; |
| 15671 | |
| 15672 | // extract (MVETrunc(x)) -> extract x |
| 15673 | if (Op0->getOpcode() == ARMISD::MVETRUNC) { |
| 15674 | unsigned Idx = N->getConstantOperandVal(Num: 1); |
| 15675 | unsigned Vec = |
| 15676 | Idx / Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
| 15677 | unsigned SubIdx = |
| 15678 | Idx % Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
| 15679 | return DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT, N1: Op0.getOperand(i: Vec), |
| 15680 | N2: DCI.DAG.getConstant(Val: SubIdx, DL: dl, VT: MVT::i32)); |
| 15681 | } |
| 15682 | |
| 15683 | return SDValue(); |
| 15684 | } |
| 15685 | |
| 15686 | static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) { |
| 15687 | SDValue Op = N->getOperand(Num: 0); |
| 15688 | EVT VT = N->getValueType(ResNo: 0); |
| 15689 | |
| 15690 | // sext_inreg(VGETLANEu) -> VGETLANEs |
| 15691 | if (Op.getOpcode() == ARMISD::VGETLANEu && |
| 15692 | cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT() == |
| 15693 | Op.getOperand(i: 0).getValueType().getScalarType()) |
| 15694 | return DAG.getNode(Opcode: ARMISD::VGETLANEs, DL: SDLoc(N), VT, N1: Op.getOperand(i: 0), |
| 15695 | N2: Op.getOperand(i: 1)); |
| 15696 | |
| 15697 | return SDValue(); |
| 15698 | } |
| 15699 | |
| 15700 | static SDValue |
| 15701 | PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15702 | SDValue Vec = N->getOperand(Num: 0); |
| 15703 | SDValue SubVec = N->getOperand(Num: 1); |
| 15704 | uint64_t IdxVal = N->getConstantOperandVal(Num: 2); |
| 15705 | EVT VecVT = Vec.getValueType(); |
| 15706 | EVT SubVT = SubVec.getValueType(); |
| 15707 | |
| 15708 | // Only do this for legal fixed vector types. |
| 15709 | if (!VecVT.isFixedLengthVector() || |
| 15710 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: VecVT) || |
| 15711 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: SubVT)) |
| 15712 | return SDValue(); |
| 15713 | |
| 15714 | // Ignore widening patterns. |
| 15715 | if (IdxVal == 0 && Vec.isUndef()) |
| 15716 | return SDValue(); |
| 15717 | |
| 15718 | // Subvector must be half the width and an "aligned" insertion. |
| 15719 | unsigned NumSubElts = SubVT.getVectorNumElements(); |
| 15720 | if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() || |
| 15721 | (IdxVal != 0 && IdxVal != NumSubElts)) |
| 15722 | return SDValue(); |
| 15723 | |
| 15724 | // Fold insert_subvector -> concat_vectors |
| 15725 | // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi)) |
| 15726 | // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub) |
| 15727 | SDLoc DL(N); |
| 15728 | SDValue Lo, Hi; |
| 15729 | if (IdxVal == 0) { |
| 15730 | Lo = SubVec; |
| 15731 | Hi = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
| 15732 | N2: DCI.DAG.getVectorIdxConstant(Val: NumSubElts, DL)); |
| 15733 | } else { |
| 15734 | Lo = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
| 15735 | N2: DCI.DAG.getVectorIdxConstant(Val: 0, DL)); |
| 15736 | Hi = SubVec; |
| 15737 | } |
| 15738 | return DCI.DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, N1: Lo, N2: Hi); |
| 15739 | } |
| 15740 | |
| 15741 | // shuffle(MVETrunc(x, y)) -> VMOVN(x, y) |
| 15742 | static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N, |
| 15743 | SelectionDAG &DAG) { |
| 15744 | SDValue Trunc = N->getOperand(Num: 0); |
| 15745 | EVT VT = Trunc.getValueType(); |
| 15746 | if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(Num: 1).isUndef()) |
| 15747 | return SDValue(); |
| 15748 | |
| 15749 | SDLoc DL(Trunc); |
| 15750 | if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: false)) |
| 15751 | return DAG.getNode( |
| 15752 | Opcode: ARMISD::VMOVN, DL, VT, |
| 15753 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
| 15754 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
| 15755 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 15756 | else if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: true)) |
| 15757 | return DAG.getNode( |
| 15758 | Opcode: ARMISD::VMOVN, DL, VT, |
| 15759 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
| 15760 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
| 15761 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 15762 | return SDValue(); |
| 15763 | } |
| 15764 | |
| 15765 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for |
| 15766 | /// ISD::VECTOR_SHUFFLE. |
| 15767 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { |
| 15768 | if (SDValue R = PerformShuffleVMOVNCombine(N: cast<ShuffleVectorSDNode>(Val: N), DAG)) |
| 15769 | return R; |
| 15770 | |
| 15771 | // The LLVM shufflevector instruction does not require the shuffle mask |
| 15772 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does |
| 15773 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the |
| 15774 | // operands do not match the mask length, they are extended by concatenating |
| 15775 | // them with undef vectors. That is probably the right thing for other |
| 15776 | // targets, but for NEON it is better to concatenate two double-register |
| 15777 | // size vector operands into a single quad-register size vector. Do that |
| 15778 | // transformation here: |
| 15779 | // shuffle(concat(v1, undef), concat(v2, undef)) -> |
| 15780 | // shuffle(concat(v1, v2), undef) |
| 15781 | SDValue Op0 = N->getOperand(Num: 0); |
| 15782 | SDValue Op1 = N->getOperand(Num: 1); |
| 15783 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || |
| 15784 | Op1.getOpcode() != ISD::CONCAT_VECTORS || |
| 15785 | Op0.getNumOperands() != 2 || |
| 15786 | Op1.getNumOperands() != 2) |
| 15787 | return SDValue(); |
| 15788 | SDValue Concat0Op1 = Op0.getOperand(i: 1); |
| 15789 | SDValue Concat1Op1 = Op1.getOperand(i: 1); |
| 15790 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) |
| 15791 | return SDValue(); |
| 15792 | // Skip the transformation if any of the types are illegal. |
| 15793 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15794 | EVT VT = N->getValueType(ResNo: 0); |
| 15795 | if (!TLI.isTypeLegal(VT) || |
| 15796 | !TLI.isTypeLegal(VT: Concat0Op1.getValueType()) || |
| 15797 | !TLI.isTypeLegal(VT: Concat1Op1.getValueType())) |
| 15798 | return SDValue(); |
| 15799 | |
| 15800 | SDValue NewConcat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT, |
| 15801 | N1: Op0.getOperand(i: 0), N2: Op1.getOperand(i: 0)); |
| 15802 | // Translate the shuffle mask. |
| 15803 | SmallVector<int, 16> NewMask; |
| 15804 | unsigned NumElts = VT.getVectorNumElements(); |
| 15805 | unsigned HalfElts = NumElts/2; |
| 15806 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: N); |
| 15807 | for (unsigned n = 0; n < NumElts; ++n) { |
| 15808 | int MaskElt = SVN->getMaskElt(Idx: n); |
| 15809 | int NewElt = -1; |
| 15810 | if (MaskElt < (int)HalfElts) |
| 15811 | NewElt = MaskElt; |
| 15812 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) |
| 15813 | NewElt = HalfElts + MaskElt - NumElts; |
| 15814 | NewMask.push_back(Elt: NewElt); |
| 15815 | } |
| 15816 | return DAG.getVectorShuffle(VT, dl: SDLoc(N), N1: NewConcat, |
| 15817 | N2: DAG.getUNDEF(VT), Mask: NewMask); |
| 15818 | } |
| 15819 | |
| 15820 | /// Load/store instruction that can be merged with a base address |
| 15821 | /// update |
| 15822 | struct BaseUpdateTarget { |
| 15823 | SDNode *N; |
| 15824 | bool isIntrinsic; |
| 15825 | bool isStore; |
| 15826 | unsigned AddrOpIdx; |
| 15827 | }; |
| 15828 | |
| 15829 | struct BaseUpdateUser { |
| 15830 | /// Instruction that updates a pointer |
| 15831 | SDNode *N; |
| 15832 | /// Pointer increment operand |
| 15833 | SDValue Inc; |
| 15834 | /// Pointer increment value if it is a constant, or 0 otherwise |
| 15835 | unsigned ConstInc; |
| 15836 | }; |
| 15837 | |
| 15838 | static bool isValidBaseUpdate(SDNode *N, SDNode *User) { |
| 15839 | // Check that the add is independent of the load/store. |
| 15840 | // Otherwise, folding it would create a cycle. Search through Addr |
| 15841 | // as well, since the User may not be a direct user of Addr and |
| 15842 | // only share a base pointer. |
| 15843 | SmallPtrSet<const SDNode *, 32> Visited; |
| 15844 | SmallVector<const SDNode *, 16> Worklist; |
| 15845 | Worklist.push_back(Elt: N); |
| 15846 | Worklist.push_back(Elt: User); |
| 15847 | const unsigned MaxSteps = 1024; |
| 15848 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps) || |
| 15849 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist, MaxSteps)) |
| 15850 | return false; |
| 15851 | return true; |
| 15852 | } |
| 15853 | |
| 15854 | static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target, |
| 15855 | struct BaseUpdateUser &User, |
| 15856 | bool SimpleConstIncOnly, |
| 15857 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15858 | SelectionDAG &DAG = DCI.DAG; |
| 15859 | SDNode *N = Target.N; |
| 15860 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
| 15861 | SDLoc dl(N); |
| 15862 | |
| 15863 | // Find the new opcode for the updating load/store. |
| 15864 | bool isLoadOp = true; |
| 15865 | bool isLaneOp = false; |
| 15866 | // Workaround for vst1x and vld1x intrinsics which do not have alignment |
| 15867 | // as an operand. |
| 15868 | bool hasAlignment = true; |
| 15869 | unsigned NewOpc = 0; |
| 15870 | unsigned NumVecs = 0; |
| 15871 | if (Target.isIntrinsic) { |
| 15872 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
| 15873 | switch (IntNo) { |
| 15874 | default: |
| 15875 | llvm_unreachable("unexpected intrinsic for Neon base update" ); |
| 15876 | case Intrinsic::arm_neon_vld1: |
| 15877 | NewOpc = ARMISD::VLD1_UPD; |
| 15878 | NumVecs = 1; |
| 15879 | break; |
| 15880 | case Intrinsic::arm_neon_vld2: |
| 15881 | NewOpc = ARMISD::VLD2_UPD; |
| 15882 | NumVecs = 2; |
| 15883 | break; |
| 15884 | case Intrinsic::arm_neon_vld3: |
| 15885 | NewOpc = ARMISD::VLD3_UPD; |
| 15886 | NumVecs = 3; |
| 15887 | break; |
| 15888 | case Intrinsic::arm_neon_vld4: |
| 15889 | NewOpc = ARMISD::VLD4_UPD; |
| 15890 | NumVecs = 4; |
| 15891 | break; |
| 15892 | case Intrinsic::arm_neon_vld1x2: |
| 15893 | NewOpc = ARMISD::VLD1x2_UPD; |
| 15894 | NumVecs = 2; |
| 15895 | hasAlignment = false; |
| 15896 | break; |
| 15897 | case Intrinsic::arm_neon_vld1x3: |
| 15898 | NewOpc = ARMISD::VLD1x3_UPD; |
| 15899 | NumVecs = 3; |
| 15900 | hasAlignment = false; |
| 15901 | break; |
| 15902 | case Intrinsic::arm_neon_vld1x4: |
| 15903 | NewOpc = ARMISD::VLD1x4_UPD; |
| 15904 | NumVecs = 4; |
| 15905 | hasAlignment = false; |
| 15906 | break; |
| 15907 | case Intrinsic::arm_neon_vld2dup: |
| 15908 | NewOpc = ARMISD::VLD2DUP_UPD; |
| 15909 | NumVecs = 2; |
| 15910 | break; |
| 15911 | case Intrinsic::arm_neon_vld3dup: |
| 15912 | NewOpc = ARMISD::VLD3DUP_UPD; |
| 15913 | NumVecs = 3; |
| 15914 | break; |
| 15915 | case Intrinsic::arm_neon_vld4dup: |
| 15916 | NewOpc = ARMISD::VLD4DUP_UPD; |
| 15917 | NumVecs = 4; |
| 15918 | break; |
| 15919 | case Intrinsic::arm_neon_vld2lane: |
| 15920 | NewOpc = ARMISD::VLD2LN_UPD; |
| 15921 | NumVecs = 2; |
| 15922 | isLaneOp = true; |
| 15923 | break; |
| 15924 | case Intrinsic::arm_neon_vld3lane: |
| 15925 | NewOpc = ARMISD::VLD3LN_UPD; |
| 15926 | NumVecs = 3; |
| 15927 | isLaneOp = true; |
| 15928 | break; |
| 15929 | case Intrinsic::arm_neon_vld4lane: |
| 15930 | NewOpc = ARMISD::VLD4LN_UPD; |
| 15931 | NumVecs = 4; |
| 15932 | isLaneOp = true; |
| 15933 | break; |
| 15934 | case Intrinsic::arm_neon_vst1: |
| 15935 | NewOpc = ARMISD::VST1_UPD; |
| 15936 | NumVecs = 1; |
| 15937 | isLoadOp = false; |
| 15938 | break; |
| 15939 | case Intrinsic::arm_neon_vst2: |
| 15940 | NewOpc = ARMISD::VST2_UPD; |
| 15941 | NumVecs = 2; |
| 15942 | isLoadOp = false; |
| 15943 | break; |
| 15944 | case Intrinsic::arm_neon_vst3: |
| 15945 | NewOpc = ARMISD::VST3_UPD; |
| 15946 | NumVecs = 3; |
| 15947 | isLoadOp = false; |
| 15948 | break; |
| 15949 | case Intrinsic::arm_neon_vst4: |
| 15950 | NewOpc = ARMISD::VST4_UPD; |
| 15951 | NumVecs = 4; |
| 15952 | isLoadOp = false; |
| 15953 | break; |
| 15954 | case Intrinsic::arm_neon_vst2lane: |
| 15955 | NewOpc = ARMISD::VST2LN_UPD; |
| 15956 | NumVecs = 2; |
| 15957 | isLoadOp = false; |
| 15958 | isLaneOp = true; |
| 15959 | break; |
| 15960 | case Intrinsic::arm_neon_vst3lane: |
| 15961 | NewOpc = ARMISD::VST3LN_UPD; |
| 15962 | NumVecs = 3; |
| 15963 | isLoadOp = false; |
| 15964 | isLaneOp = true; |
| 15965 | break; |
| 15966 | case Intrinsic::arm_neon_vst4lane: |
| 15967 | NewOpc = ARMISD::VST4LN_UPD; |
| 15968 | NumVecs = 4; |
| 15969 | isLoadOp = false; |
| 15970 | isLaneOp = true; |
| 15971 | break; |
| 15972 | case Intrinsic::arm_neon_vst1x2: |
| 15973 | NewOpc = ARMISD::VST1x2_UPD; |
| 15974 | NumVecs = 2; |
| 15975 | isLoadOp = false; |
| 15976 | hasAlignment = false; |
| 15977 | break; |
| 15978 | case Intrinsic::arm_neon_vst1x3: |
| 15979 | NewOpc = ARMISD::VST1x3_UPD; |
| 15980 | NumVecs = 3; |
| 15981 | isLoadOp = false; |
| 15982 | hasAlignment = false; |
| 15983 | break; |
| 15984 | case Intrinsic::arm_neon_vst1x4: |
| 15985 | NewOpc = ARMISD::VST1x4_UPD; |
| 15986 | NumVecs = 4; |
| 15987 | isLoadOp = false; |
| 15988 | hasAlignment = false; |
| 15989 | break; |
| 15990 | } |
| 15991 | } else { |
| 15992 | isLaneOp = true; |
| 15993 | switch (N->getOpcode()) { |
| 15994 | default: |
| 15995 | llvm_unreachable("unexpected opcode for Neon base update" ); |
| 15996 | case ARMISD::VLD1DUP: |
| 15997 | NewOpc = ARMISD::VLD1DUP_UPD; |
| 15998 | NumVecs = 1; |
| 15999 | break; |
| 16000 | case ARMISD::VLD2DUP: |
| 16001 | NewOpc = ARMISD::VLD2DUP_UPD; |
| 16002 | NumVecs = 2; |
| 16003 | break; |
| 16004 | case ARMISD::VLD3DUP: |
| 16005 | NewOpc = ARMISD::VLD3DUP_UPD; |
| 16006 | NumVecs = 3; |
| 16007 | break; |
| 16008 | case ARMISD::VLD4DUP: |
| 16009 | NewOpc = ARMISD::VLD4DUP_UPD; |
| 16010 | NumVecs = 4; |
| 16011 | break; |
| 16012 | case ISD::LOAD: |
| 16013 | NewOpc = ARMISD::VLD1_UPD; |
| 16014 | NumVecs = 1; |
| 16015 | isLaneOp = false; |
| 16016 | break; |
| 16017 | case ISD::STORE: |
| 16018 | NewOpc = ARMISD::VST1_UPD; |
| 16019 | NumVecs = 1; |
| 16020 | isLaneOp = false; |
| 16021 | isLoadOp = false; |
| 16022 | break; |
| 16023 | } |
| 16024 | } |
| 16025 | |
| 16026 | // Find the size of memory referenced by the load/store. |
| 16027 | EVT VecTy; |
| 16028 | if (isLoadOp) { |
| 16029 | VecTy = N->getValueType(ResNo: 0); |
| 16030 | } else if (Target.isIntrinsic) { |
| 16031 | VecTy = N->getOperand(Num: Target.AddrOpIdx + 1).getValueType(); |
| 16032 | } else { |
| 16033 | assert(Target.isStore && |
| 16034 | "Node has to be a load, a store, or an intrinsic!" ); |
| 16035 | VecTy = N->getOperand(Num: 1).getValueType(); |
| 16036 | } |
| 16037 | |
| 16038 | bool isVLDDUPOp = |
| 16039 | NewOpc == ARMISD::VLD1DUP_UPD || NewOpc == ARMISD::VLD2DUP_UPD || |
| 16040 | NewOpc == ARMISD::VLD3DUP_UPD || NewOpc == ARMISD::VLD4DUP_UPD; |
| 16041 | |
| 16042 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 16043 | if (isLaneOp || isVLDDUPOp) |
| 16044 | NumBytes /= VecTy.getVectorNumElements(); |
| 16045 | |
| 16046 | if (NumBytes >= 3 * 16 && User.ConstInc != NumBytes) { |
| 16047 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two |
| 16048 | // separate instructions that make it harder to use a non-constant update. |
| 16049 | return false; |
| 16050 | } |
| 16051 | |
| 16052 | if (SimpleConstIncOnly && User.ConstInc != NumBytes) |
| 16053 | return false; |
| 16054 | |
| 16055 | if (!isValidBaseUpdate(N, User: User.N)) |
| 16056 | return false; |
| 16057 | |
| 16058 | // OK, we found an ADD we can fold into the base update. |
| 16059 | // Now, create a _UPD node, taking care of not breaking alignment. |
| 16060 | |
| 16061 | EVT AlignedVecTy = VecTy; |
| 16062 | Align Alignment = MemN->getAlign(); |
| 16063 | |
| 16064 | // If this is a less-than-standard-aligned load/store, change the type to |
| 16065 | // match the standard alignment. |
| 16066 | // The alignment is overlooked when selecting _UPD variants; and it's |
| 16067 | // easier to introduce bitcasts here than fix that. |
| 16068 | // There are 3 ways to get to this base-update combine: |
| 16069 | // - intrinsics: they are assumed to be properly aligned (to the standard |
| 16070 | // alignment of the memory type), so we don't need to do anything. |
| 16071 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned |
| 16072 | // intrinsics, so, likewise, there's nothing to do. |
| 16073 | // - generic load/store instructions: the alignment is specified as an |
| 16074 | // explicit operand, rather than implicitly as the standard alignment |
| 16075 | // of the memory type (like the intrisics). We need to change the |
| 16076 | // memory type to match the explicit alignment. That way, we don't |
| 16077 | // generate non-standard-aligned ARMISD::VLDx nodes. |
| 16078 | if (isa<LSBaseSDNode>(Val: N)) { |
| 16079 | if (Alignment.value() < VecTy.getScalarSizeInBits() / 8) { |
| 16080 | MVT EltTy = MVT::getIntegerVT(BitWidth: Alignment.value() * 8); |
| 16081 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store." ); |
| 16082 | assert(!isLaneOp && "Unexpected generic load/store lane." ); |
| 16083 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); |
| 16084 | AlignedVecTy = MVT::getVectorVT(VT: EltTy, NumElements: NumElts); |
| 16085 | } |
| 16086 | // Don't set an explicit alignment on regular load/stores that we want |
| 16087 | // to transform to VLD/VST 1_UPD nodes. |
| 16088 | // This matches the behavior of regular load/stores, which only get an |
| 16089 | // explicit alignment if the MMO alignment is larger than the standard |
| 16090 | // alignment of the memory type. |
| 16091 | // Intrinsics, however, always get an explicit alignment, set to the |
| 16092 | // alignment of the MMO. |
| 16093 | Alignment = Align(1); |
| 16094 | } |
| 16095 | |
| 16096 | // Create the new updating load/store node. |
| 16097 | // First, create an SDVTList for the new updating node's results. |
| 16098 | EVT Tys[6]; |
| 16099 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 16100 | unsigned n; |
| 16101 | for (n = 0; n < NumResultVecs; ++n) |
| 16102 | Tys[n] = AlignedVecTy; |
| 16103 | Tys[n++] = MVT::i32; |
| 16104 | Tys[n] = MVT::Other; |
| 16105 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
| 16106 | |
| 16107 | // Then, gather the new node's operands. |
| 16108 | SmallVector<SDValue, 8> Ops; |
| 16109 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
| 16110 | Ops.push_back(Elt: N->getOperand(Num: Target.AddrOpIdx)); |
| 16111 | Ops.push_back(Elt: User.Inc); |
| 16112 | |
| 16113 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(Val: N)) { |
| 16114 | // Try to match the intrinsic's signature |
| 16115 | Ops.push_back(Elt: StN->getValue()); |
| 16116 | } else { |
| 16117 | // Loads (and of course intrinsics) match the intrinsics' signature, |
| 16118 | // so just add all but the alignment operand. |
| 16119 | unsigned LastOperand = |
| 16120 | hasAlignment ? N->getNumOperands() - 1 : N->getNumOperands(); |
| 16121 | for (unsigned i = Target.AddrOpIdx + 1; i < LastOperand; ++i) |
| 16122 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 16123 | } |
| 16124 | |
| 16125 | // For all node types, the alignment operand is always the last one. |
| 16126 | Ops.push_back(Elt: DAG.getConstant(Val: Alignment.value(), DL: dl, VT: MVT::i32)); |
| 16127 | |
| 16128 | // If this is a non-standard-aligned STORE, the penultimate operand is the |
| 16129 | // stored value. Bitcast it to the aligned type. |
| 16130 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { |
| 16131 | SDValue &StVal = Ops[Ops.size() - 2]; |
| 16132 | StVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: AlignedVecTy, Operand: StVal); |
| 16133 | } |
| 16134 | |
| 16135 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; |
| 16136 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: LoadVT, |
| 16137 | MMO: MemN->getMemOperand()); |
| 16138 | |
| 16139 | // Update the uses. |
| 16140 | SmallVector<SDValue, 5> NewResults; |
| 16141 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 16142 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
| 16143 | |
| 16144 | // If this is an non-standard-aligned LOAD, the first result is the loaded |
| 16145 | // value. Bitcast it to the expected result type. |
| 16146 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { |
| 16147 | SDValue &LdVal = NewResults[0]; |
| 16148 | LdVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecTy, Operand: LdVal); |
| 16149 | } |
| 16150 | |
| 16151 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
| 16152 | DCI.CombineTo(N, To: NewResults); |
| 16153 | DCI.CombineTo(N: User.N, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
| 16154 | |
| 16155 | return true; |
| 16156 | } |
| 16157 | |
| 16158 | // If (opcode ptr inc) is and ADD-like instruction, return the |
| 16159 | // increment value. Otherwise return 0. |
| 16160 | static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr, |
| 16161 | SDValue Inc, const SelectionDAG &DAG) { |
| 16162 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
| 16163 | if (!CInc) |
| 16164 | return 0; |
| 16165 | |
| 16166 | switch (Opcode) { |
| 16167 | case ARMISD::VLD1_UPD: |
| 16168 | case ISD::ADD: |
| 16169 | return CInc->getZExtValue(); |
| 16170 | case ISD::OR: { |
| 16171 | if (DAG.haveNoCommonBitsSet(A: Ptr, B: Inc)) { |
| 16172 | // (OR ptr inc) is the same as (ADD ptr inc) |
| 16173 | return CInc->getZExtValue(); |
| 16174 | } |
| 16175 | return 0; |
| 16176 | } |
| 16177 | default: |
| 16178 | return 0; |
| 16179 | } |
| 16180 | } |
| 16181 | |
| 16182 | static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) { |
| 16183 | switch (N->getOpcode()) { |
| 16184 | case ISD::ADD: |
| 16185 | case ISD::OR: { |
| 16186 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
| 16187 | *Ptr = N->getOperand(Num: 0); |
| 16188 | *CInc = N->getOperand(Num: 1); |
| 16189 | return true; |
| 16190 | } |
| 16191 | return false; |
| 16192 | } |
| 16193 | case ARMISD::VLD1_UPD: { |
| 16194 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
| 16195 | *Ptr = N->getOperand(Num: 1); |
| 16196 | *CInc = N->getOperand(Num: 2); |
| 16197 | return true; |
| 16198 | } |
| 16199 | return false; |
| 16200 | } |
| 16201 | default: |
| 16202 | return false; |
| 16203 | } |
| 16204 | } |
| 16205 | |
| 16206 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, |
| 16207 | /// NEON load/store intrinsics, and generic vector load/stores, to merge |
| 16208 | /// base address updates. |
| 16209 | /// For generic load/stores, the memory type is assumed to be a vector. |
| 16210 | /// The caller is assumed to have checked legality. |
| 16211 | static SDValue CombineBaseUpdate(SDNode *N, |
| 16212 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16213 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || |
| 16214 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
| 16215 | const bool isStore = N->getOpcode() == ISD::STORE; |
| 16216 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); |
| 16217 | BaseUpdateTarget Target = {.N: N, .isIntrinsic: isIntrinsic, .isStore: isStore, .AddrOpIdx: AddrOpIdx}; |
| 16218 | |
| 16219 | // Limit the number of possible base-updates we look at to prevent degenerate |
| 16220 | // cases. |
| 16221 | unsigned MaxBaseUpdates = ArmMaxBaseUpdatesToCheck; |
| 16222 | |
| 16223 | SDValue Addr = N->getOperand(Num: AddrOpIdx); |
| 16224 | |
| 16225 | SmallVector<BaseUpdateUser, 8> BaseUpdates; |
| 16226 | |
| 16227 | // Search for a use of the address operand that is an increment. |
| 16228 | for (SDUse &Use : Addr->uses()) { |
| 16229 | SDNode *User = Use.getUser(); |
| 16230 | if (Use.getResNo() != Addr.getResNo() || User->getNumOperands() != 2) |
| 16231 | continue; |
| 16232 | |
| 16233 | SDValue Inc = User->getOperand(Num: Use.getOperandNo() == 1 ? 0 : 1); |
| 16234 | unsigned ConstInc = |
| 16235 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Addr, Inc, DAG: DCI.DAG); |
| 16236 | |
| 16237 | if (ConstInc || User->getOpcode() == ISD::ADD) { |
| 16238 | BaseUpdates.push_back(Elt: {.N: User, .Inc: Inc, .ConstInc: ConstInc}); |
| 16239 | if (BaseUpdates.size() >= MaxBaseUpdates) |
| 16240 | break; |
| 16241 | } |
| 16242 | } |
| 16243 | |
| 16244 | // If the address is a constant pointer increment itself, find |
| 16245 | // another constant increment that has the same base operand |
| 16246 | SDValue Base; |
| 16247 | SDValue CInc; |
| 16248 | if (findPointerConstIncrement(N: Addr.getNode(), Ptr: &Base, CInc: &CInc)) { |
| 16249 | unsigned Offset = |
| 16250 | getPointerConstIncrement(Opcode: Addr->getOpcode(), Ptr: Base, Inc: CInc, DAG: DCI.DAG); |
| 16251 | for (SDUse &Use : Base->uses()) { |
| 16252 | |
| 16253 | SDNode *User = Use.getUser(); |
| 16254 | if (Use.getResNo() != Base.getResNo() || User == Addr.getNode() || |
| 16255 | User->getNumOperands() != 2) |
| 16256 | continue; |
| 16257 | |
| 16258 | SDValue UserInc = User->getOperand(Num: Use.getOperandNo() == 0 ? 1 : 0); |
| 16259 | unsigned UserOffset = |
| 16260 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Base, Inc: UserInc, DAG: DCI.DAG); |
| 16261 | |
| 16262 | if (!UserOffset || UserOffset <= Offset) |
| 16263 | continue; |
| 16264 | |
| 16265 | unsigned NewConstInc = UserOffset - Offset; |
| 16266 | SDValue NewInc = DCI.DAG.getConstant(Val: NewConstInc, DL: SDLoc(N), VT: MVT::i32); |
| 16267 | BaseUpdates.push_back(Elt: {.N: User, .Inc: NewInc, .ConstInc: NewConstInc}); |
| 16268 | if (BaseUpdates.size() >= MaxBaseUpdates) |
| 16269 | break; |
| 16270 | } |
| 16271 | } |
| 16272 | |
| 16273 | // Try to fold the load/store with an update that matches memory |
| 16274 | // access size. This should work well for sequential loads. |
| 16275 | unsigned NumValidUpd = BaseUpdates.size(); |
| 16276 | for (unsigned I = 0; I < NumValidUpd; I++) { |
| 16277 | BaseUpdateUser &User = BaseUpdates[I]; |
| 16278 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/true, DCI)) |
| 16279 | return SDValue(); |
| 16280 | } |
| 16281 | |
| 16282 | // Try to fold with other users. Non-constant updates are considered |
| 16283 | // first, and constant updates are sorted to not break a sequence of |
| 16284 | // strided accesses (if there is any). |
| 16285 | llvm::stable_sort(Range&: BaseUpdates, |
| 16286 | C: [](const BaseUpdateUser &LHS, const BaseUpdateUser &RHS) { |
| 16287 | return LHS.ConstInc < RHS.ConstInc; |
| 16288 | }); |
| 16289 | for (BaseUpdateUser &User : BaseUpdates) { |
| 16290 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/false, DCI)) |
| 16291 | return SDValue(); |
| 16292 | } |
| 16293 | return SDValue(); |
| 16294 | } |
| 16295 | |
| 16296 | static SDValue PerformVLDCombine(SDNode *N, |
| 16297 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16298 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 16299 | return SDValue(); |
| 16300 | |
| 16301 | return CombineBaseUpdate(N, DCI); |
| 16302 | } |
| 16303 | |
| 16304 | static SDValue PerformMVEVLDCombine(SDNode *N, |
| 16305 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16306 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 16307 | return SDValue(); |
| 16308 | |
| 16309 | SelectionDAG &DAG = DCI.DAG; |
| 16310 | SDValue Addr = N->getOperand(Num: 2); |
| 16311 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
| 16312 | SDLoc dl(N); |
| 16313 | |
| 16314 | // For the stores, where there are multiple intrinsics we only actually want |
| 16315 | // to post-inc the last of the them. |
| 16316 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
| 16317 | if (IntNo == Intrinsic::arm_mve_vst2q && N->getConstantOperandVal(Num: 5) != 1) |
| 16318 | return SDValue(); |
| 16319 | if (IntNo == Intrinsic::arm_mve_vst4q && N->getConstantOperandVal(Num: 7) != 3) |
| 16320 | return SDValue(); |
| 16321 | |
| 16322 | // Search for a use of the address operand that is an increment. |
| 16323 | for (SDUse &Use : Addr->uses()) { |
| 16324 | SDNode *User = Use.getUser(); |
| 16325 | if (User->getOpcode() != ISD::ADD || Use.getResNo() != Addr.getResNo()) |
| 16326 | continue; |
| 16327 | |
| 16328 | // Check that the add is independent of the load/store. Otherwise, folding |
| 16329 | // it would create a cycle. We can avoid searching through Addr as it's a |
| 16330 | // predecessor to both. |
| 16331 | SmallPtrSet<const SDNode *, 32> Visited; |
| 16332 | SmallVector<const SDNode *, 16> Worklist; |
| 16333 | Visited.insert(Ptr: Addr.getNode()); |
| 16334 | Worklist.push_back(Elt: N); |
| 16335 | Worklist.push_back(Elt: User); |
| 16336 | const unsigned MaxSteps = 1024; |
| 16337 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps) || |
| 16338 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist, MaxSteps)) |
| 16339 | continue; |
| 16340 | |
| 16341 | // Find the new opcode for the updating load/store. |
| 16342 | bool isLoadOp = true; |
| 16343 | unsigned NewOpc = 0; |
| 16344 | unsigned NumVecs = 0; |
| 16345 | switch (IntNo) { |
| 16346 | default: |
| 16347 | llvm_unreachable("unexpected intrinsic for MVE VLDn combine" ); |
| 16348 | case Intrinsic::arm_mve_vld2q: |
| 16349 | NewOpc = ARMISD::VLD2_UPD; |
| 16350 | NumVecs = 2; |
| 16351 | break; |
| 16352 | case Intrinsic::arm_mve_vld4q: |
| 16353 | NewOpc = ARMISD::VLD4_UPD; |
| 16354 | NumVecs = 4; |
| 16355 | break; |
| 16356 | case Intrinsic::arm_mve_vst2q: |
| 16357 | NewOpc = ARMISD::VST2_UPD; |
| 16358 | NumVecs = 2; |
| 16359 | isLoadOp = false; |
| 16360 | break; |
| 16361 | case Intrinsic::arm_mve_vst4q: |
| 16362 | NewOpc = ARMISD::VST4_UPD; |
| 16363 | NumVecs = 4; |
| 16364 | isLoadOp = false; |
| 16365 | break; |
| 16366 | } |
| 16367 | |
| 16368 | // Find the size of memory referenced by the load/store. |
| 16369 | EVT VecTy; |
| 16370 | if (isLoadOp) { |
| 16371 | VecTy = N->getValueType(ResNo: 0); |
| 16372 | } else { |
| 16373 | VecTy = N->getOperand(Num: 3).getValueType(); |
| 16374 | } |
| 16375 | |
| 16376 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 16377 | |
| 16378 | // If the increment is a constant, it must match the memory ref size. |
| 16379 | SDValue Inc = User->getOperand(Num: User->getOperand(Num: 0) == Addr ? 1 : 0); |
| 16380 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
| 16381 | if (!CInc || CInc->getZExtValue() != NumBytes) |
| 16382 | continue; |
| 16383 | |
| 16384 | // Create the new updating load/store node. |
| 16385 | // First, create an SDVTList for the new updating node's results. |
| 16386 | EVT Tys[6]; |
| 16387 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 16388 | unsigned n; |
| 16389 | for (n = 0; n < NumResultVecs; ++n) |
| 16390 | Tys[n] = VecTy; |
| 16391 | Tys[n++] = MVT::i32; |
| 16392 | Tys[n] = MVT::Other; |
| 16393 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
| 16394 | |
| 16395 | // Then, gather the new node's operands. |
| 16396 | SmallVector<SDValue, 8> Ops; |
| 16397 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
| 16398 | Ops.push_back(Elt: N->getOperand(Num: 2)); // ptr |
| 16399 | Ops.push_back(Elt: Inc); |
| 16400 | |
| 16401 | for (unsigned i = 3; i < N->getNumOperands(); ++i) |
| 16402 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 16403 | |
| 16404 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: VecTy, |
| 16405 | MMO: MemN->getMemOperand()); |
| 16406 | |
| 16407 | // Update the uses. |
| 16408 | SmallVector<SDValue, 5> NewResults; |
| 16409 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 16410 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
| 16411 | |
| 16412 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
| 16413 | DCI.CombineTo(N, To: NewResults); |
| 16414 | DCI.CombineTo(N: User, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
| 16415 | |
| 16416 | break; |
| 16417 | } |
| 16418 | |
| 16419 | return SDValue(); |
| 16420 | } |
| 16421 | |
| 16422 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a |
| 16423 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic |
| 16424 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and |
| 16425 | /// return true. |
| 16426 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 16427 | SelectionDAG &DAG = DCI.DAG; |
| 16428 | EVT VT = N->getValueType(ResNo: 0); |
| 16429 | // vldN-dup instructions only support 64-bit vectors for N > 1. |
| 16430 | if (!VT.is64BitVector()) |
| 16431 | return false; |
| 16432 | |
| 16433 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. |
| 16434 | SDNode *VLD = N->getOperand(Num: 0).getNode(); |
| 16435 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) |
| 16436 | return false; |
| 16437 | unsigned NumVecs = 0; |
| 16438 | unsigned NewOpc = 0; |
| 16439 | unsigned IntNo = VLD->getConstantOperandVal(Num: 1); |
| 16440 | if (IntNo == Intrinsic::arm_neon_vld2lane) { |
| 16441 | NumVecs = 2; |
| 16442 | NewOpc = ARMISD::VLD2DUP; |
| 16443 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { |
| 16444 | NumVecs = 3; |
| 16445 | NewOpc = ARMISD::VLD3DUP; |
| 16446 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { |
| 16447 | NumVecs = 4; |
| 16448 | NewOpc = ARMISD::VLD4DUP; |
| 16449 | } else { |
| 16450 | return false; |
| 16451 | } |
| 16452 | |
| 16453 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane |
| 16454 | // numbers match the load. |
| 16455 | unsigned VLDLaneNo = VLD->getConstantOperandVal(Num: NumVecs + 3); |
| 16456 | for (SDUse &Use : VLD->uses()) { |
| 16457 | // Ignore uses of the chain result. |
| 16458 | if (Use.getResNo() == NumVecs) |
| 16459 | continue; |
| 16460 | SDNode *User = Use.getUser(); |
| 16461 | if (User->getOpcode() != ARMISD::VDUPLANE || |
| 16462 | VLDLaneNo != User->getConstantOperandVal(Num: 1)) |
| 16463 | return false; |
| 16464 | } |
| 16465 | |
| 16466 | // Create the vldN-dup node. |
| 16467 | EVT Tys[5]; |
| 16468 | unsigned n; |
| 16469 | for (n = 0; n < NumVecs; ++n) |
| 16470 | Tys[n] = VT; |
| 16471 | Tys[n] = MVT::Other; |
| 16472 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumVecs + 1)); |
| 16473 | SDValue Ops[] = { VLD->getOperand(Num: 0), VLD->getOperand(Num: 2) }; |
| 16474 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(Val: VLD); |
| 16475 | SDValue VLDDup = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl: SDLoc(VLD), VTList: SDTys, |
| 16476 | Ops, MemVT: VLDMemInt->getMemoryVT(), |
| 16477 | MMO: VLDMemInt->getMemOperand()); |
| 16478 | |
| 16479 | // Update the uses. |
| 16480 | for (SDUse &Use : VLD->uses()) { |
| 16481 | unsigned ResNo = Use.getResNo(); |
| 16482 | // Ignore uses of the chain result. |
| 16483 | if (ResNo == NumVecs) |
| 16484 | continue; |
| 16485 | DCI.CombineTo(N: Use.getUser(), Res: SDValue(VLDDup.getNode(), ResNo)); |
| 16486 | } |
| 16487 | |
| 16488 | // Now the vldN-lane intrinsic is dead except for its chain result. |
| 16489 | // Update uses of the chain. |
| 16490 | std::vector<SDValue> VLDDupResults; |
| 16491 | for (unsigned n = 0; n < NumVecs; ++n) |
| 16492 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), n)); |
| 16493 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), NumVecs)); |
| 16494 | DCI.CombineTo(N: VLD, To: VLDDupResults); |
| 16495 | |
| 16496 | return true; |
| 16497 | } |
| 16498 | |
| 16499 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for |
| 16500 | /// ARMISD::VDUPLANE. |
| 16501 | static SDValue PerformVDUPLANECombine(SDNode *N, |
| 16502 | TargetLowering::DAGCombinerInfo &DCI, |
| 16503 | const ARMSubtarget *Subtarget) { |
| 16504 | SDValue Op = N->getOperand(Num: 0); |
| 16505 | EVT VT = N->getValueType(ResNo: 0); |
| 16506 | |
| 16507 | // On MVE, we just convert the VDUPLANE to a VDUP with an extract. |
| 16508 | if (Subtarget->hasMVEIntegerOps()) { |
| 16509 | EVT = VT.getVectorElementType(); |
| 16510 | // We need to ensure we are creating a legal type. |
| 16511 | if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: ExtractVT)) |
| 16512 | ExtractVT = MVT::i32; |
| 16513 | SDValue = DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: SDLoc(N), VT: ExtractVT, |
| 16514 | N1: N->getOperand(Num: 0), N2: N->getOperand(Num: 1)); |
| 16515 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT, Operand: Extract); |
| 16516 | } |
| 16517 | |
| 16518 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses |
| 16519 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. |
| 16520 | if (CombineVLDDUP(N, DCI)) |
| 16521 | return SDValue(N, 0); |
| 16522 | |
| 16523 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is |
| 16524 | // redundant. Ignore bit_converts for now; element sizes are checked below. |
| 16525 | while (Op.getOpcode() == ISD::BITCAST) |
| 16526 | Op = Op.getOperand(i: 0); |
| 16527 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) |
| 16528 | return SDValue(); |
| 16529 | |
| 16530 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. |
| 16531 | unsigned EltSize = Op.getScalarValueSizeInBits(); |
| 16532 | // The canonical VMOV for a zero vector uses a 32-bit element size. |
| 16533 | unsigned Imm = Op.getConstantOperandVal(i: 0); |
| 16534 | unsigned EltBits; |
| 16535 | if (ARM_AM::decodeVMOVModImm(ModImm: Imm, EltBits) == 0) |
| 16536 | EltSize = 8; |
| 16537 | if (EltSize > VT.getScalarSizeInBits()) |
| 16538 | return SDValue(); |
| 16539 | |
| 16540 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), VT, Operand: Op); |
| 16541 | } |
| 16542 | |
| 16543 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. |
| 16544 | static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG, |
| 16545 | const ARMSubtarget *Subtarget) { |
| 16546 | SDValue Op = N->getOperand(Num: 0); |
| 16547 | SDLoc dl(N); |
| 16548 | |
| 16549 | if (Subtarget->hasMVEIntegerOps()) { |
| 16550 | // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will |
| 16551 | // need to come from a GPR. |
| 16552 | if (Op.getValueType() == MVT::f32) |
| 16553 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
| 16554 | Operand: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Op)); |
| 16555 | else if (Op.getValueType() == MVT::f16) |
| 16556 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
| 16557 | Operand: DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT: MVT::i32, Operand: Op)); |
| 16558 | } |
| 16559 | |
| 16560 | if (!Subtarget->hasNEON()) |
| 16561 | return SDValue(); |
| 16562 | |
| 16563 | // Match VDUP(LOAD) -> VLD1DUP. |
| 16564 | // We match this pattern here rather than waiting for isel because the |
| 16565 | // transform is only legal for unindexed loads. |
| 16566 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: Op.getNode()); |
| 16567 | if (LD && Op.hasOneUse() && LD->isUnindexed() && |
| 16568 | LD->getMemoryVT() == N->getValueType(ResNo: 0).getVectorElementType()) { |
| 16569 | SDValue Ops[] = {LD->getOperand(Num: 0), LD->getOperand(Num: 1), |
| 16570 | DAG.getConstant(Val: LD->getAlign().value(), DL: SDLoc(N), VT: MVT::i32)}; |
| 16571 | SDVTList SDTys = DAG.getVTList(VT1: N->getValueType(ResNo: 0), VT2: MVT::Other); |
| 16572 | SDValue VLDDup = |
| 16573 | DAG.getMemIntrinsicNode(Opcode: ARMISD::VLD1DUP, dl: SDLoc(N), VTList: SDTys, Ops, |
| 16574 | MemVT: LD->getMemoryVT(), MMO: LD->getMemOperand()); |
| 16575 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: VLDDup.getValue(R: 1)); |
| 16576 | return VLDDup; |
| 16577 | } |
| 16578 | |
| 16579 | return SDValue(); |
| 16580 | } |
| 16581 | |
| 16582 | static SDValue PerformLOADCombine(SDNode *N, |
| 16583 | TargetLowering::DAGCombinerInfo &DCI, |
| 16584 | const ARMSubtarget *Subtarget) { |
| 16585 | EVT VT = N->getValueType(ResNo: 0); |
| 16586 | |
| 16587 | // If this is a legal vector load, try to combine it into a VLD1_UPD. |
| 16588 | if (Subtarget->hasNEON() && ISD::isNormalLoad(N) && VT.isVector() && |
| 16589 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 16590 | return CombineBaseUpdate(N, DCI); |
| 16591 | |
| 16592 | return SDValue(); |
| 16593 | } |
| 16594 | |
| 16595 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, |
| 16596 | // pack all of the elements in one place. Next, store to memory in fewer |
| 16597 | // chunks. |
| 16598 | static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, |
| 16599 | SelectionDAG &DAG) { |
| 16600 | SDValue StVal = St->getValue(); |
| 16601 | EVT VT = StVal.getValueType(); |
| 16602 | if (!St->isTruncatingStore() || !VT.isVector()) |
| 16603 | return SDValue(); |
| 16604 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 16605 | EVT StVT = St->getMemoryVT(); |
| 16606 | unsigned NumElems = VT.getVectorNumElements(); |
| 16607 | assert(StVT != VT && "Cannot truncate to the same type" ); |
| 16608 | unsigned FromEltSz = VT.getScalarSizeInBits(); |
| 16609 | unsigned ToEltSz = StVT.getScalarSizeInBits(); |
| 16610 | |
| 16611 | // From, To sizes and ElemCount must be pow of two |
| 16612 | if (!isPowerOf2_32(Value: NumElems * FromEltSz * ToEltSz)) |
| 16613 | return SDValue(); |
| 16614 | |
| 16615 | // We are going to use the original vector elt for storing. |
| 16616 | // Accumulated smaller vector elements must be a multiple of the store size. |
| 16617 | if (0 != (NumElems * FromEltSz) % ToEltSz) |
| 16618 | return SDValue(); |
| 16619 | |
| 16620 | unsigned SizeRatio = FromEltSz / ToEltSz; |
| 16621 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); |
| 16622 | |
| 16623 | // Create a type on which we perform the shuffle. |
| 16624 | EVT WideVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: StVT.getScalarType(), |
| 16625 | NumElements: NumElems * SizeRatio); |
| 16626 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 16627 | |
| 16628 | SDLoc DL(St); |
| 16629 | SDValue WideVec = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: WideVecVT, Operand: StVal); |
| 16630 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); |
| 16631 | for (unsigned i = 0; i < NumElems; ++i) |
| 16632 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 |
| 16633 | : i * SizeRatio; |
| 16634 | |
| 16635 | // Can't shuffle using an illegal type. |
| 16636 | if (!TLI.isTypeLegal(VT: WideVecVT)) |
| 16637 | return SDValue(); |
| 16638 | |
| 16639 | SDValue Shuff = DAG.getVectorShuffle( |
| 16640 | VT: WideVecVT, dl: DL, N1: WideVec, N2: DAG.getUNDEF(VT: WideVec.getValueType()), Mask: ShuffleVec); |
| 16641 | // At this point all of the data is stored at the bottom of the |
| 16642 | // register. We now need to save it to mem. |
| 16643 | |
| 16644 | // Find the largest store unit |
| 16645 | MVT StoreType = MVT::i8; |
| 16646 | for (MVT Tp : MVT::integer_valuetypes()) { |
| 16647 | if (TLI.isTypeLegal(VT: Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) |
| 16648 | StoreType = Tp; |
| 16649 | } |
| 16650 | // Didn't find a legal store type. |
| 16651 | if (!TLI.isTypeLegal(VT: StoreType)) |
| 16652 | return SDValue(); |
| 16653 | |
| 16654 | // Bitcast the original vector into a vector of store-size units |
| 16655 | EVT StoreVecVT = |
| 16656 | EVT::getVectorVT(Context&: *DAG.getContext(), VT: StoreType, |
| 16657 | NumElements: VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); |
| 16658 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 16659 | SDValue ShuffWide = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: StoreVecVT, Operand: Shuff); |
| 16660 | SmallVector<SDValue, 8> Chains; |
| 16661 | SDValue Increment = DAG.getConstant(Val: StoreType.getSizeInBits() / 8, DL, |
| 16662 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 16663 | SDValue BasePtr = St->getBasePtr(); |
| 16664 | |
| 16665 | // Perform one or more big stores into memory. |
| 16666 | unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); |
| 16667 | for (unsigned I = 0; I < E; I++) { |
| 16668 | SDValue SubVec = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: StoreType, |
| 16669 | N1: ShuffWide, N2: DAG.getIntPtrConstant(Val: I, DL)); |
| 16670 | SDValue Ch = |
| 16671 | DAG.getStore(Chain: St->getChain(), dl: DL, Val: SubVec, Ptr: BasePtr, PtrInfo: St->getPointerInfo(), |
| 16672 | Alignment: St->getAlign(), MMOFlags: St->getMemOperand()->getFlags()); |
| 16673 | BasePtr = |
| 16674 | DAG.getNode(Opcode: ISD::ADD, DL, VT: BasePtr.getValueType(), N1: BasePtr, N2: Increment); |
| 16675 | Chains.push_back(Elt: Ch); |
| 16676 | } |
| 16677 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 16678 | } |
| 16679 | |
| 16680 | // Try taking a single vector store from an fpround (which would otherwise turn |
| 16681 | // into an expensive buildvector) and splitting it into a series of narrowing |
| 16682 | // stores. |
| 16683 | static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, |
| 16684 | SelectionDAG &DAG) { |
| 16685 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16686 | return SDValue(); |
| 16687 | SDValue Trunc = St->getValue(); |
| 16688 | if (Trunc->getOpcode() != ISD::FP_ROUND) |
| 16689 | return SDValue(); |
| 16690 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
| 16691 | EVT ToVT = Trunc.getValueType(); |
| 16692 | if (!ToVT.isVector()) |
| 16693 | return SDValue(); |
| 16694 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 16695 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 16696 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 16697 | |
| 16698 | if (FromEltVT != MVT::f32 || ToEltVT != MVT::f16) |
| 16699 | return SDValue(); |
| 16700 | |
| 16701 | unsigned NumElements = 4; |
| 16702 | if (FromVT.getVectorNumElements() % NumElements != 0) |
| 16703 | return SDValue(); |
| 16704 | |
| 16705 | // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so |
| 16706 | // use the VMOVN over splitting the store. We are looking for patterns of: |
| 16707 | // !rev: 0 N 1 N+1 2 N+2 ... |
| 16708 | // rev: N 0 N+1 1 N+2 2 ... |
| 16709 | // The shuffle may either be a single source (in which case N = NumElts/2) or |
| 16710 | // two inputs extended with concat to the same size (in which case N = |
| 16711 | // NumElts). |
| 16712 | auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) { |
| 16713 | ArrayRef<int> M = SVN->getMask(); |
| 16714 | unsigned NumElts = ToVT.getVectorNumElements(); |
| 16715 | if (SVN->getOperand(Num: 1).isUndef()) |
| 16716 | NumElts /= 2; |
| 16717 | |
| 16718 | unsigned Off0 = Rev ? NumElts : 0; |
| 16719 | unsigned Off1 = Rev ? 0 : NumElts; |
| 16720 | |
| 16721 | for (unsigned I = 0; I < NumElts; I += 2) { |
| 16722 | if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2)) |
| 16723 | return false; |
| 16724 | if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2)) |
| 16725 | return false; |
| 16726 | } |
| 16727 | |
| 16728 | return true; |
| 16729 | }; |
| 16730 | |
| 16731 | if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Val: Trunc.getOperand(i: 0))) |
| 16732 | if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true)) |
| 16733 | return SDValue(); |
| 16734 | |
| 16735 | LLVMContext &C = *DAG.getContext(); |
| 16736 | SDLoc DL(St); |
| 16737 | // Details about the old store |
| 16738 | SDValue Ch = St->getChain(); |
| 16739 | SDValue BasePtr = St->getBasePtr(); |
| 16740 | Align Alignment = St->getBaseAlign(); |
| 16741 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16742 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16743 | |
| 16744 | // We split the store into slices of NumElements. fp16 trunc stores are vcvt |
| 16745 | // and then stored as truncating integer stores. |
| 16746 | EVT NewFromVT = EVT::getVectorVT(Context&: C, VT: FromEltVT, NumElements); |
| 16747 | EVT NewToVT = EVT::getVectorVT( |
| 16748 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getSizeInBits()), NumElements); |
| 16749 | |
| 16750 | SmallVector<SDValue, 4> Stores; |
| 16751 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 16752 | unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; |
| 16753 | SDValue NewPtr = |
| 16754 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 16755 | |
| 16756 | SDValue = |
| 16757 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: NewFromVT, N1: Trunc.getOperand(i: 0), |
| 16758 | N2: DAG.getConstant(Val: i * NumElements, DL, VT: MVT::i32)); |
| 16759 | |
| 16760 | SDValue FPTrunc = |
| 16761 | DAG.getNode(Opcode: ARMISD::VCVTN, DL, VT: MVT::v8f16, N1: DAG.getUNDEF(VT: MVT::v8f16), |
| 16762 | N2: Extract, N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 16763 | Extract = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v4i32, Operand: FPTrunc); |
| 16764 | |
| 16765 | SDValue Store = DAG.getTruncStore( |
| 16766 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
| 16767 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
| 16768 | Stores.push_back(Elt: Store); |
| 16769 | } |
| 16770 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
| 16771 | } |
| 16772 | |
| 16773 | // Try taking a single vector store from an MVETRUNC (which would otherwise turn |
| 16774 | // into an expensive buildvector) and splitting it into a series of narrowing |
| 16775 | // stores. |
| 16776 | static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St, |
| 16777 | SelectionDAG &DAG) { |
| 16778 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16779 | return SDValue(); |
| 16780 | SDValue Trunc = St->getValue(); |
| 16781 | if (Trunc->getOpcode() != ARMISD::MVETRUNC) |
| 16782 | return SDValue(); |
| 16783 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
| 16784 | EVT ToVT = Trunc.getValueType(); |
| 16785 | |
| 16786 | LLVMContext &C = *DAG.getContext(); |
| 16787 | SDLoc DL(St); |
| 16788 | // Details about the old store |
| 16789 | SDValue Ch = St->getChain(); |
| 16790 | SDValue BasePtr = St->getBasePtr(); |
| 16791 | Align Alignment = St->getBaseAlign(); |
| 16792 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16793 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16794 | |
| 16795 | EVT NewToVT = EVT::getVectorVT(Context&: C, VT: ToVT.getVectorElementType(), |
| 16796 | NumElements: FromVT.getVectorNumElements()); |
| 16797 | |
| 16798 | SmallVector<SDValue, 4> Stores; |
| 16799 | for (unsigned i = 0; i < Trunc.getNumOperands(); i++) { |
| 16800 | unsigned NewOffset = |
| 16801 | i * FromVT.getVectorNumElements() * ToVT.getScalarSizeInBits() / 8; |
| 16802 | SDValue NewPtr = |
| 16803 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 16804 | |
| 16805 | SDValue = Trunc.getOperand(i); |
| 16806 | SDValue Store = DAG.getTruncStore( |
| 16807 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
| 16808 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
| 16809 | Stores.push_back(Elt: Store); |
| 16810 | } |
| 16811 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
| 16812 | } |
| 16813 | |
| 16814 | // Given a floating point store from an extracted vector, with an integer |
| 16815 | // VGETLANE that already exists, store the existing VGETLANEu directly. This can |
| 16816 | // help reduce fp register pressure, doesn't require the fp extract and allows |
| 16817 | // use of more integer post-inc stores not available with vstr. |
| 16818 | static SDValue (StoreSDNode *St, SelectionDAG &DAG) { |
| 16819 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16820 | return SDValue(); |
| 16821 | SDValue = St->getValue(); |
| 16822 | EVT VT = Extract.getValueType(); |
| 16823 | // For now only uses f16. This may be useful for f32 too, but that will |
| 16824 | // be bitcast(extract), not the VGETLANEu we currently check here. |
| 16825 | if (VT != MVT::f16 || Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 16826 | return SDValue(); |
| 16827 | |
| 16828 | SDNode *GetLane = |
| 16829 | DAG.getNodeIfExists(Opcode: ARMISD::VGETLANEu, VTList: DAG.getVTList(VT: MVT::i32), |
| 16830 | Ops: {Extract.getOperand(i: 0), Extract.getOperand(i: 1)}); |
| 16831 | if (!GetLane) |
| 16832 | return SDValue(); |
| 16833 | |
| 16834 | LLVMContext &C = *DAG.getContext(); |
| 16835 | SDLoc DL(St); |
| 16836 | // Create a new integer store to replace the existing floating point version. |
| 16837 | SDValue Ch = St->getChain(); |
| 16838 | SDValue BasePtr = St->getBasePtr(); |
| 16839 | Align Alignment = St->getBaseAlign(); |
| 16840 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16841 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16842 | EVT NewToVT = EVT::getIntegerVT(Context&: C, BitWidth: VT.getSizeInBits()); |
| 16843 | SDValue Store = DAG.getTruncStore(Chain: Ch, dl: DL, Val: SDValue(GetLane, 0), Ptr: BasePtr, |
| 16844 | PtrInfo: St->getPointerInfo(), SVT: NewToVT, Alignment, |
| 16845 | MMOFlags, AAInfo); |
| 16846 | |
| 16847 | return Store; |
| 16848 | } |
| 16849 | |
| 16850 | /// PerformSTORECombine - Target-specific dag combine xforms for |
| 16851 | /// ISD::STORE. |
| 16852 | static SDValue PerformSTORECombine(SDNode *N, |
| 16853 | TargetLowering::DAGCombinerInfo &DCI, |
| 16854 | const ARMSubtarget *Subtarget) { |
| 16855 | StoreSDNode *St = cast<StoreSDNode>(Val: N); |
| 16856 | if (St->isVolatile()) |
| 16857 | return SDValue(); |
| 16858 | SDValue StVal = St->getValue(); |
| 16859 | EVT VT = StVal.getValueType(); |
| 16860 | |
| 16861 | if (Subtarget->hasNEON()) |
| 16862 | if (SDValue Store = PerformTruncatingStoreCombine(St, DAG&: DCI.DAG)) |
| 16863 | return Store; |
| 16864 | |
| 16865 | if (Subtarget->hasMVEFloatOps()) |
| 16866 | if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DAG&: DCI.DAG)) |
| 16867 | return NewToken; |
| 16868 | |
| 16869 | if (Subtarget->hasMVEIntegerOps()) { |
| 16870 | if (SDValue NewChain = PerformExtractFpToIntStores(St, DAG&: DCI.DAG)) |
| 16871 | return NewChain; |
| 16872 | if (SDValue NewToken = |
| 16873 | PerformSplittingMVETruncToNarrowingStores(St, DAG&: DCI.DAG)) |
| 16874 | return NewToken; |
| 16875 | } |
| 16876 | |
| 16877 | if (!ISD::isNormalStore(N: St)) |
| 16878 | return SDValue(); |
| 16879 | |
| 16880 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and |
| 16881 | // ARM stores of arguments in the same cache line. |
| 16882 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && |
| 16883 | StVal.getNode()->hasOneUse()) { |
| 16884 | SelectionDAG &DAG = DCI.DAG; |
| 16885 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 16886 | SDLoc DL(St); |
| 16887 | SDValue BasePtr = St->getBasePtr(); |
| 16888 | SDValue NewST1 = DAG.getStore( |
| 16889 | Chain: St->getChain(), dl: DL, Val: StVal.getNode()->getOperand(Num: isBigEndian ? 1 : 0), |
| 16890 | Ptr: BasePtr, PtrInfo: St->getPointerInfo(), Alignment: St->getBaseAlign(), |
| 16891 | MMOFlags: St->getMemOperand()->getFlags()); |
| 16892 | |
| 16893 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
| 16894 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
| 16895 | return DAG.getStore(Chain: NewST1.getValue(R: 0), dl: DL, |
| 16896 | Val: StVal.getNode()->getOperand(Num: isBigEndian ? 0 : 1), |
| 16897 | Ptr: OffsetPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: 4), |
| 16898 | Alignment: St->getBaseAlign(), MMOFlags: St->getMemOperand()->getFlags()); |
| 16899 | } |
| 16900 | |
| 16901 | if (StVal.getValueType() == MVT::i64 && |
| 16902 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 16903 | |
| 16904 | // Bitcast an i64 store extracted from a vector to f64. |
| 16905 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 16906 | SelectionDAG &DAG = DCI.DAG; |
| 16907 | SDLoc dl(StVal); |
| 16908 | SDValue IntVec = StVal.getOperand(i: 0); |
| 16909 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
| 16910 | NumElements: IntVec.getValueType().getVectorNumElements()); |
| 16911 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: IntVec); |
| 16912 | SDValue ExtElt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
| 16913 | N1: Vec, N2: StVal.getOperand(i: 1)); |
| 16914 | dl = SDLoc(N); |
| 16915 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i64, Operand: ExtElt); |
| 16916 | // Make the DAGCombiner fold the bitcasts. |
| 16917 | DCI.AddToWorklist(N: Vec.getNode()); |
| 16918 | DCI.AddToWorklist(N: ExtElt.getNode()); |
| 16919 | DCI.AddToWorklist(N: V.getNode()); |
| 16920 | return DAG.getStore(Chain: St->getChain(), dl, Val: V, Ptr: St->getBasePtr(), |
| 16921 | PtrInfo: St->getPointerInfo(), Alignment: St->getAlign(), |
| 16922 | MMOFlags: St->getMemOperand()->getFlags(), AAInfo: St->getAAInfo()); |
| 16923 | } |
| 16924 | |
| 16925 | // If this is a legal vector store, try to combine it into a VST1_UPD. |
| 16926 | if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && |
| 16927 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 16928 | return CombineBaseUpdate(N, DCI); |
| 16929 | |
| 16930 | return SDValue(); |
| 16931 | } |
| 16932 | |
| 16933 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) |
| 16934 | /// can replace combinations of VMUL and VCVT (floating-point to integer) |
| 16935 | /// when the VMUL has a constant operand that is a power of 2. |
| 16936 | /// |
| 16937 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
| 16938 | /// vmul.f32 d16, d17, d16 |
| 16939 | /// vcvt.s32.f32 d16, d16 |
| 16940 | /// becomes: |
| 16941 | /// vcvt.s32.f32 d16, d16, #3 |
| 16942 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, |
| 16943 | const ARMSubtarget *Subtarget) { |
| 16944 | if (!Subtarget->hasNEON()) |
| 16945 | return SDValue(); |
| 16946 | |
| 16947 | SDValue Op = N->getOperand(Num: 0); |
| 16948 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || |
| 16949 | Op.getOpcode() != ISD::FMUL) |
| 16950 | return SDValue(); |
| 16951 | |
| 16952 | SDValue ConstVec = Op->getOperand(Num: 1); |
| 16953 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
| 16954 | return SDValue(); |
| 16955 | |
| 16956 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); |
| 16957 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 16958 | MVT IntTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
| 16959 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 16960 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 16961 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 16962 | // These instructions only exist converting from f32 to i32. We can handle |
| 16963 | // smaller integers by generating an extra truncate, but larger ones would |
| 16964 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 16965 | // these intructions only support v2i32/v4i32 types. |
| 16966 | return SDValue(); |
| 16967 | } |
| 16968 | |
| 16969 | BitVector UndefElements; |
| 16970 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Val&: ConstVec); |
| 16971 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(UndefElements: &UndefElements, BitWidth: 33); |
| 16972 | if (C == -1 || C == 0 || C > 32) |
| 16973 | return SDValue(); |
| 16974 | |
| 16975 | SDLoc dl(N); |
| 16976 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; |
| 16977 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : |
| 16978 | Intrinsic::arm_neon_vcvtfp2fxu; |
| 16979 | SDValue FixConv = DAG.getNode( |
| 16980 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
| 16981 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL: dl, VT: MVT::i32), N2: Op->getOperand(Num: 0), |
| 16982 | N3: DAG.getConstant(Val: C, DL: dl, VT: MVT::i32)); |
| 16983 | |
| 16984 | if (IntBits < FloatBits) |
| 16985 | FixConv = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: N->getValueType(ResNo: 0), Operand: FixConv); |
| 16986 | |
| 16987 | return FixConv; |
| 16988 | } |
| 16989 | |
| 16990 | static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG, |
| 16991 | const ARMSubtarget *Subtarget) { |
| 16992 | if (!Subtarget->hasMVEFloatOps()) |
| 16993 | return SDValue(); |
| 16994 | |
| 16995 | // Turn (fadd x, (vselect c, y, -0.0)) into (vselect c, (fadd x, y), x) |
| 16996 | // The second form can be more easily turned into a predicated vadd, and |
| 16997 | // possibly combined into a fma to become a predicated vfma. |
| 16998 | SDValue Op0 = N->getOperand(Num: 0); |
| 16999 | SDValue Op1 = N->getOperand(Num: 1); |
| 17000 | EVT VT = N->getValueType(ResNo: 0); |
| 17001 | SDLoc DL(N); |
| 17002 | |
| 17003 | // The identity element for a fadd is -0.0 or +0.0 when the nsz flag is set, |
| 17004 | // which these VMOV's represent. |
| 17005 | auto isIdentitySplat = [&](SDValue Op, bool NSZ) { |
| 17006 | if (Op.getOpcode() != ISD::BITCAST || |
| 17007 | Op.getOperand(i: 0).getOpcode() != ARMISD::VMOVIMM) |
| 17008 | return false; |
| 17009 | uint64_t ImmVal = Op.getOperand(i: 0).getConstantOperandVal(i: 0); |
| 17010 | if (VT == MVT::v4f32 && (ImmVal == 1664 || (ImmVal == 0 && NSZ))) |
| 17011 | return true; |
| 17012 | if (VT == MVT::v8f16 && (ImmVal == 2688 || (ImmVal == 0 && NSZ))) |
| 17013 | return true; |
| 17014 | return false; |
| 17015 | }; |
| 17016 | |
| 17017 | if (Op0.getOpcode() == ISD::VSELECT && Op1.getOpcode() != ISD::VSELECT) |
| 17018 | std::swap(a&: Op0, b&: Op1); |
| 17019 | |
| 17020 | if (Op1.getOpcode() != ISD::VSELECT) |
| 17021 | return SDValue(); |
| 17022 | |
| 17023 | SDNodeFlags FaddFlags = N->getFlags(); |
| 17024 | bool NSZ = FaddFlags.hasNoSignedZeros(); |
| 17025 | if (!isIdentitySplat(Op1.getOperand(i: 2), NSZ)) |
| 17026 | return SDValue(); |
| 17027 | |
| 17028 | SDValue FAdd = |
| 17029 | DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: Op0, N2: Op1.getOperand(i: 1), Flags: FaddFlags); |
| 17030 | return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: Op1.getOperand(i: 0), N2: FAdd, N3: Op0, Flags: FaddFlags); |
| 17031 | } |
| 17032 | |
| 17033 | static SDValue PerformFADDVCMLACombine(SDNode *N, SelectionDAG &DAG) { |
| 17034 | SDValue LHS = N->getOperand(Num: 0); |
| 17035 | SDValue RHS = N->getOperand(Num: 1); |
| 17036 | EVT VT = N->getValueType(ResNo: 0); |
| 17037 | SDLoc DL(N); |
| 17038 | |
| 17039 | if (!N->getFlags().hasAllowReassociation()) |
| 17040 | return SDValue(); |
| 17041 | |
| 17042 | // Combine fadd(a, vcmla(b, c, d)) -> vcmla(fadd(a, b), b, c) |
| 17043 | auto ReassocComplex = [&](SDValue A, SDValue B) { |
| 17044 | if (A.getOpcode() != ISD::INTRINSIC_WO_CHAIN) |
| 17045 | return SDValue(); |
| 17046 | unsigned Opc = A.getConstantOperandVal(i: 0); |
| 17047 | if (Opc != Intrinsic::arm_mve_vcmlaq) |
| 17048 | return SDValue(); |
| 17049 | SDValue VCMLA = DAG.getNode( |
| 17050 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, N1: A.getOperand(i: 0), N2: A.getOperand(i: 1), |
| 17051 | N3: DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: A.getOperand(i: 2), N2: B, Flags: N->getFlags()), |
| 17052 | N4: A.getOperand(i: 3), N5: A.getOperand(i: 4)); |
| 17053 | VCMLA->setFlags(A->getFlags()); |
| 17054 | return VCMLA; |
| 17055 | }; |
| 17056 | if (SDValue R = ReassocComplex(LHS, RHS)) |
| 17057 | return R; |
| 17058 | if (SDValue R = ReassocComplex(RHS, LHS)) |
| 17059 | return R; |
| 17060 | |
| 17061 | return SDValue(); |
| 17062 | } |
| 17063 | |
| 17064 | static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, |
| 17065 | const ARMSubtarget *Subtarget) { |
| 17066 | if (SDValue S = PerformFAddVSelectCombine(N, DAG, Subtarget)) |
| 17067 | return S; |
| 17068 | if (SDValue S = PerformFADDVCMLACombine(N, DAG)) |
| 17069 | return S; |
| 17070 | return SDValue(); |
| 17071 | } |
| 17072 | |
| 17073 | /// PerformVMulVCTPCombine - VCVT (fixed-point to floating-point, Advanced SIMD) |
| 17074 | /// can replace combinations of VCVT (integer to floating-point) and VMUL |
| 17075 | /// when the VMUL has a constant operand that is a power of 2. |
| 17076 | /// |
| 17077 | /// Example (assume d17 = <float 0.125, float 0.125>): |
| 17078 | /// vcvt.f32.s32 d16, d16 |
| 17079 | /// vmul.f32 d16, d16, d17 |
| 17080 | /// becomes: |
| 17081 | /// vcvt.f32.s32 d16, d16, #3 |
| 17082 | static SDValue PerformVMulVCTPCombine(SDNode *N, SelectionDAG &DAG, |
| 17083 | const ARMSubtarget *Subtarget) { |
| 17084 | if (!Subtarget->hasNEON()) |
| 17085 | return SDValue(); |
| 17086 | |
| 17087 | SDValue Op = N->getOperand(Num: 0); |
| 17088 | unsigned OpOpcode = Op.getNode()->getOpcode(); |
| 17089 | if (!N->getValueType(ResNo: 0).isVector() || !N->getValueType(ResNo: 0).isSimple() || |
| 17090 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) |
| 17091 | return SDValue(); |
| 17092 | |
| 17093 | SDValue ConstVec = N->getOperand(Num: 1); |
| 17094 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
| 17095 | return SDValue(); |
| 17096 | |
| 17097 | MVT FloatTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
| 17098 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 17099 | MVT IntTy = Op.getOperand(i: 0).getSimpleValueType().getVectorElementType(); |
| 17100 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 17101 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 17102 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 17103 | // These instructions only exist converting from i32 to f32. We can handle |
| 17104 | // smaller integers by generating an extra extend, but larger ones would |
| 17105 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 17106 | // these intructions only support v2i32/v4i32 types. |
| 17107 | return SDValue(); |
| 17108 | } |
| 17109 | |
| 17110 | ConstantFPSDNode *CN = isConstOrConstSplatFP(N: ConstVec, AllowUndefs: true); |
| 17111 | APFloat Recip(0.0f); |
| 17112 | if (!CN || !CN->getValueAPF().getExactInverse(inv: &Recip)) |
| 17113 | return SDValue(); |
| 17114 | |
| 17115 | bool IsExact; |
| 17116 | APSInt IntVal(33); |
| 17117 | if (Recip.convertToInteger(Result&: IntVal, RM: APFloat::rmTowardZero, IsExact: &IsExact) != |
| 17118 | APFloat::opOK || |
| 17119 | !IsExact) |
| 17120 | return SDValue(); |
| 17121 | |
| 17122 | int32_t C = IntVal.exactLogBase2(); |
| 17123 | if (C == -1 || C == 0 || C > 32) |
| 17124 | return SDValue(); |
| 17125 | |
| 17126 | SDLoc DL(N); |
| 17127 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; |
| 17128 | SDValue ConvInput = Op.getOperand(i: 0); |
| 17129 | if (IntBits < FloatBits) |
| 17130 | ConvInput = DAG.getNode(Opcode: isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
| 17131 | VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, Operand: ConvInput); |
| 17132 | |
| 17133 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp |
| 17134 | : Intrinsic::arm_neon_vcvtfxu2fp; |
| 17135 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: Op.getValueType(), |
| 17136 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL, VT: MVT::i32), N2: ConvInput, |
| 17137 | N3: DAG.getConstant(Val: C, DL, VT: MVT::i32)); |
| 17138 | } |
| 17139 | |
| 17140 | static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, |
| 17141 | const ARMSubtarget *ST) { |
| 17142 | if (!ST->hasMVEIntegerOps()) |
| 17143 | return SDValue(); |
| 17144 | |
| 17145 | assert(N->getOpcode() == ISD::VECREDUCE_ADD); |
| 17146 | EVT ResVT = N->getValueType(ResNo: 0); |
| 17147 | SDValue N0 = N->getOperand(Num: 0); |
| 17148 | SDLoc dl(N); |
| 17149 | |
| 17150 | // Try to turn vecreduce_add(add(x, y)) into vecreduce(x) + vecreduce(y) |
| 17151 | if (ResVT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
| 17152 | (N0.getValueType() == MVT::v4i32 || N0.getValueType() == MVT::v8i16 || |
| 17153 | N0.getValueType() == MVT::v16i8)) { |
| 17154 | SDValue Red0 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 0)); |
| 17155 | SDValue Red1 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 1)); |
| 17156 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: ResVT, N1: Red0, N2: Red1); |
| 17157 | } |
| 17158 | |
| 17159 | // We are looking for something that will have illegal types if left alone, |
| 17160 | // but that we can convert to a single instruction under MVE. For example |
| 17161 | // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A |
| 17162 | // or |
| 17163 | // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B |
| 17164 | |
| 17165 | // The legal cases are: |
| 17166 | // VADDV u/s 8/16/32 |
| 17167 | // VMLAV u/s 8/16/32 |
| 17168 | // VADDLV u/s 32 |
| 17169 | // VMLALV u/s 16/32 |
| 17170 | |
| 17171 | // If the input vector is smaller than legal (v4i8/v4i16 for example) we can |
| 17172 | // extend it and use v4i32 instead. |
| 17173 | auto ExtTypeMatches = [](SDValue A, ArrayRef<MVT> ExtTypes) { |
| 17174 | EVT AVT = A.getValueType(); |
| 17175 | return any_of(Range&: ExtTypes, P: [&](MVT Ty) { |
| 17176 | return AVT.getVectorNumElements() == Ty.getVectorNumElements() && |
| 17177 | AVT.bitsLE(VT: Ty); |
| 17178 | }); |
| 17179 | }; |
| 17180 | auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) { |
| 17181 | EVT AVT = A.getValueType(); |
| 17182 | if (!AVT.is128BitVector()) |
| 17183 | A = DAG.getNode(Opcode: ExtendCode, DL: dl, |
| 17184 | VT: AVT.changeVectorElementType(EltVT: MVT::getIntegerVT( |
| 17185 | BitWidth: 128 / AVT.getVectorMinNumElements())), |
| 17186 | Operand: A); |
| 17187 | return A; |
| 17188 | }; |
| 17189 | auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { |
| 17190 | if (ResVT != RetTy || N0->getOpcode() != ExtendCode) |
| 17191 | return SDValue(); |
| 17192 | SDValue A = N0->getOperand(Num: 0); |
| 17193 | if (ExtTypeMatches(A, ExtTypes)) |
| 17194 | return ExtendIfNeeded(A, ExtendCode); |
| 17195 | return SDValue(); |
| 17196 | }; |
| 17197 | auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, |
| 17198 | ArrayRef<MVT> ExtTypes, SDValue &Mask) { |
| 17199 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 17200 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
| 17201 | return SDValue(); |
| 17202 | Mask = N0->getOperand(Num: 0); |
| 17203 | SDValue Ext = N0->getOperand(Num: 1); |
| 17204 | if (Ext->getOpcode() != ExtendCode) |
| 17205 | return SDValue(); |
| 17206 | SDValue A = Ext->getOperand(Num: 0); |
| 17207 | if (ExtTypeMatches(A, ExtTypes)) |
| 17208 | return ExtendIfNeeded(A, ExtendCode); |
| 17209 | return SDValue(); |
| 17210 | }; |
| 17211 | auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 17212 | SDValue &A, SDValue &B) { |
| 17213 | // For a vmla we are trying to match a larger pattern: |
| 17214 | // ExtA = sext/zext A |
| 17215 | // ExtB = sext/zext B |
| 17216 | // Mul = mul ExtA, ExtB |
| 17217 | // vecreduce.add Mul |
| 17218 | // There might also be en extra extend between the mul and the addreduce, so |
| 17219 | // long as the bitwidth is high enough to make them equivalent (for example |
| 17220 | // original v8i16 might be mul at v8i32 and the reduce happens at v8i64). |
| 17221 | if (ResVT != RetTy) |
| 17222 | return false; |
| 17223 | SDValue Mul = N0; |
| 17224 | if (Mul->getOpcode() == ExtendCode && |
| 17225 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
| 17226 | ResVT.getScalarSizeInBits()) |
| 17227 | Mul = Mul->getOperand(Num: 0); |
| 17228 | if (Mul->getOpcode() != ISD::MUL) |
| 17229 | return false; |
| 17230 | SDValue ExtA = Mul->getOperand(Num: 0); |
| 17231 | SDValue ExtB = Mul->getOperand(Num: 1); |
| 17232 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
| 17233 | return false; |
| 17234 | A = ExtA->getOperand(Num: 0); |
| 17235 | B = ExtB->getOperand(Num: 0); |
| 17236 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
| 17237 | A = ExtendIfNeeded(A, ExtendCode); |
| 17238 | B = ExtendIfNeeded(B, ExtendCode); |
| 17239 | return true; |
| 17240 | } |
| 17241 | return false; |
| 17242 | }; |
| 17243 | auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 17244 | SDValue &A, SDValue &B, SDValue &Mask) { |
| 17245 | // Same as the pattern above with a select for the zero predicated lanes |
| 17246 | // ExtA = sext/zext A |
| 17247 | // ExtB = sext/zext B |
| 17248 | // Mul = mul ExtA, ExtB |
| 17249 | // N0 = select Mask, Mul, 0 |
| 17250 | // vecreduce.add N0 |
| 17251 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 17252 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
| 17253 | return false; |
| 17254 | Mask = N0->getOperand(Num: 0); |
| 17255 | SDValue Mul = N0->getOperand(Num: 1); |
| 17256 | if (Mul->getOpcode() == ExtendCode && |
| 17257 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
| 17258 | ResVT.getScalarSizeInBits()) |
| 17259 | Mul = Mul->getOperand(Num: 0); |
| 17260 | if (Mul->getOpcode() != ISD::MUL) |
| 17261 | return false; |
| 17262 | SDValue ExtA = Mul->getOperand(Num: 0); |
| 17263 | SDValue ExtB = Mul->getOperand(Num: 1); |
| 17264 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
| 17265 | return false; |
| 17266 | A = ExtA->getOperand(Num: 0); |
| 17267 | B = ExtB->getOperand(Num: 0); |
| 17268 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
| 17269 | A = ExtendIfNeeded(A, ExtendCode); |
| 17270 | B = ExtendIfNeeded(B, ExtendCode); |
| 17271 | return true; |
| 17272 | } |
| 17273 | return false; |
| 17274 | }; |
| 17275 | auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { |
| 17276 | // Split illegal MVT::v16i8->i64 vector reductions into two legal v8i16->i64 |
| 17277 | // reductions. The operands are extended with MVEEXT, but as they are |
| 17278 | // reductions the lane orders do not matter. MVEEXT may be combined with |
| 17279 | // loads to produce two extending loads, or else they will be expanded to |
| 17280 | // VREV/VMOVL. |
| 17281 | EVT VT = Ops[0].getValueType(); |
| 17282 | if (VT == MVT::v16i8) { |
| 17283 | assert((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && |
| 17284 | "Unexpected illegal long reduction opcode" ); |
| 17285 | bool IsUnsigned = Opcode == ARMISD::VMLALVu; |
| 17286 | |
| 17287 | SDValue Ext0 = |
| 17288 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
| 17289 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[0]); |
| 17290 | SDValue Ext1 = |
| 17291 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
| 17292 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[1]); |
| 17293 | |
| 17294 | SDValue MLA0 = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 17295 | N1: Ext0, N2: Ext1); |
| 17296 | SDValue MLA1 = |
| 17297 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::VMLALVAu : ARMISD::VMLALVAs, DL: dl, |
| 17298 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: MLA0, N2: MLA0.getValue(R: 1), |
| 17299 | N3: Ext0.getValue(R: 1), N4: Ext1.getValue(R: 1)); |
| 17300 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: MLA1, N2: MLA1.getValue(R: 1)); |
| 17301 | } |
| 17302 | SDValue Node = DAG.getNode(Opcode, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
| 17303 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Node, |
| 17304 | N2: SDValue(Node.getNode(), 1)); |
| 17305 | }; |
| 17306 | |
| 17307 | SDValue A, B; |
| 17308 | SDValue Mask; |
| 17309 | if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 17310 | return DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: ResVT, N1: A, N2: B); |
| 17311 | if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 17312 | return DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: ResVT, N1: A, N2: B); |
| 17313 | if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
| 17314 | A, B)) |
| 17315 | return Create64bitNode(ARMISD::VMLALVs, {A, B}); |
| 17316 | if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
| 17317 | A, B)) |
| 17318 | return Create64bitNode(ARMISD::VMLALVu, {A, B}); |
| 17319 | if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) |
| 17320 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17321 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
| 17322 | if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) |
| 17323 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17324 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
| 17325 | |
| 17326 | if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
| 17327 | Mask)) |
| 17328 | return DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
| 17329 | if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
| 17330 | Mask)) |
| 17331 | return DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
| 17332 | if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
| 17333 | Mask)) |
| 17334 | return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); |
| 17335 | if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
| 17336 | Mask)) |
| 17337 | return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); |
| 17338 | if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 17339 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17340 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
| 17341 | if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 17342 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17343 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
| 17344 | |
| 17345 | if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 17346 | return DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: ResVT, Operand: A); |
| 17347 | if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 17348 | return DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: ResVT, Operand: A); |
| 17349 | if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32})) |
| 17350 | return Create64bitNode(ARMISD::VADDLVs, {A}); |
| 17351 | if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) |
| 17352 | return Create64bitNode(ARMISD::VADDLVu, {A}); |
| 17353 | if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) |
| 17354 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17355 | Operand: DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: MVT::i32, Operand: A)); |
| 17356 | if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) |
| 17357 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17358 | Operand: DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: MVT::i32, Operand: A)); |
| 17359 | |
| 17360 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 17361 | return DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: ResVT, N1: A, N2: Mask); |
| 17362 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 17363 | return DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: ResVT, N1: A, N2: Mask); |
| 17364 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask)) |
| 17365 | return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); |
| 17366 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask)) |
| 17367 | return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); |
| 17368 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) |
| 17369 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17370 | Operand: DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
| 17371 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) |
| 17372 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17373 | Operand: DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
| 17374 | |
| 17375 | // Some complications. We can get a case where the two inputs of the mul are |
| 17376 | // the same, then the output sext will have been helpfully converted to a |
| 17377 | // zext. Turn it back. |
| 17378 | SDValue Op = N0; |
| 17379 | if (Op->getOpcode() == ISD::VSELECT) |
| 17380 | Op = Op->getOperand(Num: 1); |
| 17381 | if (Op->getOpcode() == ISD::ZERO_EXTEND && |
| 17382 | Op->getOperand(Num: 0)->getOpcode() == ISD::MUL) { |
| 17383 | SDValue Mul = Op->getOperand(Num: 0); |
| 17384 | if (Mul->getOperand(Num: 0) == Mul->getOperand(Num: 1) && |
| 17385 | Mul->getOperand(Num: 0)->getOpcode() == ISD::SIGN_EXTEND) { |
| 17386 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: N0->getValueType(ResNo: 0), Operand: Mul); |
| 17387 | if (Op != N0) |
| 17388 | Ext = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: N0->getValueType(ResNo: 0), |
| 17389 | N1: N0->getOperand(Num: 0), N2: Ext, N3: N0->getOperand(Num: 2)); |
| 17390 | return DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: Ext); |
| 17391 | } |
| 17392 | } |
| 17393 | |
| 17394 | return SDValue(); |
| 17395 | } |
| 17396 | |
| 17397 | // Looks for vaddv(shuffle) or vmlav(shuffle, shuffle), with a shuffle where all |
| 17398 | // the lanes are used. Due to the reduction being commutative the shuffle can be |
| 17399 | // removed. |
| 17400 | static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) { |
| 17401 | unsigned VecOp = N->getOperand(Num: 0).getValueType().isVector() ? 0 : 2; |
| 17402 | auto *Shuf = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp)); |
| 17403 | if (!Shuf || !Shuf->getOperand(Num: 1).isUndef()) |
| 17404 | return SDValue(); |
| 17405 | |
| 17406 | // Check all elements are used once in the mask. |
| 17407 | ArrayRef<int> Mask = Shuf->getMask(); |
| 17408 | APInt SetElts(Mask.size(), 0); |
| 17409 | for (int E : Mask) { |
| 17410 | if (E < 0 || E >= (int)Mask.size()) |
| 17411 | return SDValue(); |
| 17412 | SetElts.setBit(E); |
| 17413 | } |
| 17414 | if (!SetElts.isAllOnes()) |
| 17415 | return SDValue(); |
| 17416 | |
| 17417 | if (N->getNumOperands() != VecOp + 1) { |
| 17418 | auto *Shuf2 = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp + 1)); |
| 17419 | if (!Shuf2 || !Shuf2->getOperand(Num: 1).isUndef() || Shuf2->getMask() != Mask) |
| 17420 | return SDValue(); |
| 17421 | } |
| 17422 | |
| 17423 | SmallVector<SDValue> Ops; |
| 17424 | for (SDValue Op : N->ops()) { |
| 17425 | if (Op.getValueType().isVector()) |
| 17426 | Ops.push_back(Elt: Op.getOperand(i: 0)); |
| 17427 | else |
| 17428 | Ops.push_back(Elt: Op); |
| 17429 | } |
| 17430 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VTList: N->getVTList(), Ops); |
| 17431 | } |
| 17432 | |
| 17433 | static SDValue PerformVMOVNCombine(SDNode *N, |
| 17434 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17435 | SDValue Op0 = N->getOperand(Num: 0); |
| 17436 | SDValue Op1 = N->getOperand(Num: 1); |
| 17437 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
| 17438 | |
| 17439 | // VMOVNT a undef -> a |
| 17440 | // VMOVNB a undef -> a |
| 17441 | // VMOVNB undef a -> a |
| 17442 | if (Op1->isUndef()) |
| 17443 | return Op0; |
| 17444 | if (Op0->isUndef() && !IsTop) |
| 17445 | return Op1; |
| 17446 | |
| 17447 | // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) |
| 17448 | // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) |
| 17449 | if ((Op1->getOpcode() == ARMISD::VQMOVNs || |
| 17450 | Op1->getOpcode() == ARMISD::VQMOVNu) && |
| 17451 | Op1->getConstantOperandVal(Num: 2) == 0) |
| 17452 | return DCI.DAG.getNode(Opcode: Op1->getOpcode(), DL: SDLoc(Op1), VT: N->getValueType(ResNo: 0), |
| 17453 | N1: Op0, N2: Op1->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
| 17454 | |
| 17455 | // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from |
| 17456 | // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting |
| 17457 | // into the top or bottom lanes. |
| 17458 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 17459 | APInt Op1DemandedElts = APInt::getSplat(NewLen: NumElts, V: APInt::getLowBitsSet(numBits: 2, loBitsSet: 1)); |
| 17460 | APInt Op0DemandedElts = |
| 17461 | IsTop ? Op1DemandedElts |
| 17462 | : APInt::getSplat(NewLen: NumElts, V: APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
| 17463 | |
| 17464 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 17465 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
| 17466 | return SDValue(N, 0); |
| 17467 | if (TLI.SimplifyDemandedVectorElts(Op: Op1, DemandedElts: Op1DemandedElts, DCI)) |
| 17468 | return SDValue(N, 0); |
| 17469 | |
| 17470 | return SDValue(); |
| 17471 | } |
| 17472 | |
| 17473 | static SDValue PerformVQMOVNCombine(SDNode *N, |
| 17474 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17475 | SDValue Op0 = N->getOperand(Num: 0); |
| 17476 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
| 17477 | |
| 17478 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 17479 | APInt Op0DemandedElts = |
| 17480 | APInt::getSplat(NewLen: NumElts, V: IsTop ? APInt::getLowBitsSet(numBits: 2, loBitsSet: 1) |
| 17481 | : APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
| 17482 | |
| 17483 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 17484 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
| 17485 | return SDValue(N, 0); |
| 17486 | return SDValue(); |
| 17487 | } |
| 17488 | |
| 17489 | static SDValue PerformVQDMULHCombine(SDNode *N, |
| 17490 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17491 | EVT VT = N->getValueType(ResNo: 0); |
| 17492 | SDValue LHS = N->getOperand(Num: 0); |
| 17493 | SDValue RHS = N->getOperand(Num: 1); |
| 17494 | |
| 17495 | auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Val&: LHS); |
| 17496 | auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(Val&: RHS); |
| 17497 | // Turn VQDMULH(shuffle, shuffle) -> shuffle(VQDMULH) |
| 17498 | if (Shuf0 && Shuf1 && Shuf0->getMask().equals(RHS: Shuf1->getMask()) && |
| 17499 | LHS.getOperand(i: 1).isUndef() && RHS.getOperand(i: 1).isUndef() && |
| 17500 | (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) { |
| 17501 | SDLoc DL(N); |
| 17502 | SDValue NewBinOp = DCI.DAG.getNode(Opcode: N->getOpcode(), DL, VT, |
| 17503 | N1: LHS.getOperand(i: 0), N2: RHS.getOperand(i: 0)); |
| 17504 | SDValue UndefV = LHS.getOperand(i: 1); |
| 17505 | return DCI.DAG.getVectorShuffle(VT, dl: DL, N1: NewBinOp, N2: UndefV, Mask: Shuf0->getMask()); |
| 17506 | } |
| 17507 | return SDValue(); |
| 17508 | } |
| 17509 | |
| 17510 | static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { |
| 17511 | SDLoc DL(N); |
| 17512 | SDValue Op0 = N->getOperand(Num: 0); |
| 17513 | SDValue Op1 = N->getOperand(Num: 1); |
| 17514 | |
| 17515 | // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from |
| 17516 | // uses of the intrinsics. |
| 17517 | if (auto C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
| 17518 | int ShiftAmt = C->getSExtValue(); |
| 17519 | if (ShiftAmt == 0) { |
| 17520 | SDValue Merge = DAG.getMergeValues(Ops: {Op0, Op1}, dl: DL); |
| 17521 | DAG.ReplaceAllUsesWith(From: N, To: Merge.getNode()); |
| 17522 | return SDValue(); |
| 17523 | } |
| 17524 | |
| 17525 | if (ShiftAmt >= -32 && ShiftAmt < 0) { |
| 17526 | unsigned NewOpcode = |
| 17527 | N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; |
| 17528 | SDValue NewShift = DAG.getNode(Opcode: NewOpcode, DL, VTList: N->getVTList(), N1: Op0, N2: Op1, |
| 17529 | N3: DAG.getConstant(Val: -ShiftAmt, DL, VT: MVT::i32)); |
| 17530 | DAG.ReplaceAllUsesWith(From: N, To: NewShift.getNode()); |
| 17531 | return NewShift; |
| 17532 | } |
| 17533 | } |
| 17534 | |
| 17535 | return SDValue(); |
| 17536 | } |
| 17537 | |
| 17538 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. |
| 17539 | SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, |
| 17540 | DAGCombinerInfo &DCI) const { |
| 17541 | SelectionDAG &DAG = DCI.DAG; |
| 17542 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
| 17543 | switch (IntNo) { |
| 17544 | default: |
| 17545 | // Don't do anything for most intrinsics. |
| 17546 | break; |
| 17547 | |
| 17548 | // Vector shifts: check for immediate versions and lower them. |
| 17549 | // Note: This is done during DAG combining instead of DAG legalizing because |
| 17550 | // the build_vectors for 64-bit vector element shift counts are generally |
| 17551 | // not legal, and it is hard to see their values after they get legalized to |
| 17552 | // loads from a constant pool. |
| 17553 | case Intrinsic::arm_neon_vshifts: |
| 17554 | case Intrinsic::arm_neon_vshiftu: |
| 17555 | case Intrinsic::arm_neon_vrshifts: |
| 17556 | case Intrinsic::arm_neon_vrshiftu: |
| 17557 | case Intrinsic::arm_neon_vrshiftn: |
| 17558 | case Intrinsic::arm_neon_vqshifts: |
| 17559 | case Intrinsic::arm_neon_vqshiftu: |
| 17560 | case Intrinsic::arm_neon_vqshiftsu: |
| 17561 | case Intrinsic::arm_neon_vqshiftns: |
| 17562 | case Intrinsic::arm_neon_vqshiftnu: |
| 17563 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17564 | case Intrinsic::arm_neon_vqrshiftns: |
| 17565 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17566 | case Intrinsic::arm_neon_vqrshiftnsu: { |
| 17567 | EVT VT = N->getOperand(Num: 1).getValueType(); |
| 17568 | int64_t Cnt; |
| 17569 | unsigned VShiftOpc = 0; |
| 17570 | |
| 17571 | switch (IntNo) { |
| 17572 | case Intrinsic::arm_neon_vshifts: |
| 17573 | case Intrinsic::arm_neon_vshiftu: |
| 17574 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) { |
| 17575 | VShiftOpc = ARMISD::VSHLIMM; |
| 17576 | break; |
| 17577 | } |
| 17578 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) { |
| 17579 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM |
| 17580 | : ARMISD::VSHRuIMM); |
| 17581 | break; |
| 17582 | } |
| 17583 | return SDValue(); |
| 17584 | |
| 17585 | case Intrinsic::arm_neon_vrshifts: |
| 17586 | case Intrinsic::arm_neon_vrshiftu: |
| 17587 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
| 17588 | break; |
| 17589 | return SDValue(); |
| 17590 | |
| 17591 | case Intrinsic::arm_neon_vqshifts: |
| 17592 | case Intrinsic::arm_neon_vqshiftu: |
| 17593 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
| 17594 | break; |
| 17595 | return SDValue(); |
| 17596 | |
| 17597 | case Intrinsic::arm_neon_vqshiftsu: |
| 17598 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
| 17599 | break; |
| 17600 | llvm_unreachable("invalid shift count for vqshlu intrinsic" ); |
| 17601 | |
| 17602 | case Intrinsic::arm_neon_vrshiftn: |
| 17603 | case Intrinsic::arm_neon_vqshiftns: |
| 17604 | case Intrinsic::arm_neon_vqshiftnu: |
| 17605 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17606 | case Intrinsic::arm_neon_vqrshiftns: |
| 17607 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17608 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 17609 | // Narrowing shifts require an immediate right shift. |
| 17610 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: true, isIntrinsic: true, Cnt)) |
| 17611 | break; |
| 17612 | llvm_unreachable("invalid shift count for narrowing vector shift " |
| 17613 | "intrinsic" ); |
| 17614 | |
| 17615 | default: |
| 17616 | llvm_unreachable("unhandled vector shift" ); |
| 17617 | } |
| 17618 | |
| 17619 | switch (IntNo) { |
| 17620 | case Intrinsic::arm_neon_vshifts: |
| 17621 | case Intrinsic::arm_neon_vshiftu: |
| 17622 | // Opcode already set above. |
| 17623 | break; |
| 17624 | case Intrinsic::arm_neon_vrshifts: |
| 17625 | VShiftOpc = ARMISD::VRSHRsIMM; |
| 17626 | break; |
| 17627 | case Intrinsic::arm_neon_vrshiftu: |
| 17628 | VShiftOpc = ARMISD::VRSHRuIMM; |
| 17629 | break; |
| 17630 | case Intrinsic::arm_neon_vrshiftn: |
| 17631 | VShiftOpc = ARMISD::VRSHRNIMM; |
| 17632 | break; |
| 17633 | case Intrinsic::arm_neon_vqshifts: |
| 17634 | VShiftOpc = ARMISD::VQSHLsIMM; |
| 17635 | break; |
| 17636 | case Intrinsic::arm_neon_vqshiftu: |
| 17637 | VShiftOpc = ARMISD::VQSHLuIMM; |
| 17638 | break; |
| 17639 | case Intrinsic::arm_neon_vqshiftsu: |
| 17640 | VShiftOpc = ARMISD::VQSHLsuIMM; |
| 17641 | break; |
| 17642 | case Intrinsic::arm_neon_vqshiftns: |
| 17643 | VShiftOpc = ARMISD::VQSHRNsIMM; |
| 17644 | break; |
| 17645 | case Intrinsic::arm_neon_vqshiftnu: |
| 17646 | VShiftOpc = ARMISD::VQSHRNuIMM; |
| 17647 | break; |
| 17648 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17649 | VShiftOpc = ARMISD::VQSHRNsuIMM; |
| 17650 | break; |
| 17651 | case Intrinsic::arm_neon_vqrshiftns: |
| 17652 | VShiftOpc = ARMISD::VQRSHRNsIMM; |
| 17653 | break; |
| 17654 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17655 | VShiftOpc = ARMISD::VQRSHRNuIMM; |
| 17656 | break; |
| 17657 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 17658 | VShiftOpc = ARMISD::VQRSHRNsuIMM; |
| 17659 | break; |
| 17660 | } |
| 17661 | |
| 17662 | SDLoc dl(N); |
| 17663 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
| 17664 | N1: N->getOperand(Num: 1), N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17665 | } |
| 17666 | |
| 17667 | case Intrinsic::arm_neon_vshiftins: { |
| 17668 | EVT VT = N->getOperand(Num: 1).getValueType(); |
| 17669 | int64_t Cnt; |
| 17670 | unsigned VShiftOpc = 0; |
| 17671 | |
| 17672 | if (isVShiftLImm(Op: N->getOperand(Num: 3), VT, isLong: false, Cnt)) |
| 17673 | VShiftOpc = ARMISD::VSLIIMM; |
| 17674 | else if (isVShiftRImm(Op: N->getOperand(Num: 3), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
| 17675 | VShiftOpc = ARMISD::VSRIIMM; |
| 17676 | else { |
| 17677 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic" ); |
| 17678 | } |
| 17679 | |
| 17680 | SDLoc dl(N); |
| 17681 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
| 17682 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
| 17683 | N3: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17684 | } |
| 17685 | |
| 17686 | case Intrinsic::arm_neon_vqrshifts: |
| 17687 | case Intrinsic::arm_neon_vqrshiftu: |
| 17688 | // No immediate versions of these to check for. |
| 17689 | break; |
| 17690 | |
| 17691 | case Intrinsic::arm_neon_vbsl: { |
| 17692 | SDLoc dl(N); |
| 17693 | return DAG.getNode(Opcode: ARMISD::VBSP, DL: dl, VT: N->getValueType(ResNo: 0), N1: N->getOperand(Num: 1), |
| 17694 | N2: N->getOperand(Num: 2), N3: N->getOperand(Num: 3)); |
| 17695 | } |
| 17696 | case Intrinsic::arm_mve_vqdmlah: |
| 17697 | case Intrinsic::arm_mve_vqdmlash: |
| 17698 | case Intrinsic::arm_mve_vqrdmlah: |
| 17699 | case Intrinsic::arm_mve_vqrdmlash: |
| 17700 | case Intrinsic::arm_mve_vmla_n_predicated: |
| 17701 | case Intrinsic::arm_mve_vmlas_n_predicated: |
| 17702 | case Intrinsic::arm_mve_vqdmlah_predicated: |
| 17703 | case Intrinsic::arm_mve_vqdmlash_predicated: |
| 17704 | case Intrinsic::arm_mve_vqrdmlah_predicated: |
| 17705 | case Intrinsic::arm_mve_vqrdmlash_predicated: { |
| 17706 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 17707 | // size of a single lane of the vector type they return. So we don't need |
| 17708 | // any bits of that operand above that point, which allows us to eliminate |
| 17709 | // uxth/sxth. |
| 17710 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 17711 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
| 17712 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 3), DemandedBits: DemandedMask, DCI)) |
| 17713 | return SDValue(); |
| 17714 | break; |
| 17715 | } |
| 17716 | |
| 17717 | case Intrinsic::arm_mve_minv: |
| 17718 | case Intrinsic::arm_mve_maxv: |
| 17719 | case Intrinsic::arm_mve_minav: |
| 17720 | case Intrinsic::arm_mve_maxav: |
| 17721 | case Intrinsic::arm_mve_minv_predicated: |
| 17722 | case Intrinsic::arm_mve_maxv_predicated: |
| 17723 | case Intrinsic::arm_mve_minav_predicated: |
| 17724 | case Intrinsic::arm_mve_maxav_predicated: { |
| 17725 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 17726 | // size of a single lane of the vector type they take as the other input. |
| 17727 | unsigned BitWidth = N->getOperand(Num: 2)->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 17728 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
| 17729 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 17730 | return SDValue(); |
| 17731 | break; |
| 17732 | } |
| 17733 | |
| 17734 | case Intrinsic::arm_mve_addv: { |
| 17735 | // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, |
| 17736 | // which allow PerformADDVecReduce to turn it into VADDLV when possible. |
| 17737 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
| 17738 | unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; |
| 17739 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VTList: N->getVTList(), N: N->getOperand(Num: 1)); |
| 17740 | } |
| 17741 | |
| 17742 | case Intrinsic::arm_mve_addlv: |
| 17743 | case Intrinsic::arm_mve_addlv_predicated: { |
| 17744 | // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR |
| 17745 | // which recombines the two outputs into an i64 |
| 17746 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
| 17747 | unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? |
| 17748 | (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : |
| 17749 | (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); |
| 17750 | |
| 17751 | SmallVector<SDValue, 4> Ops; |
| 17752 | for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) |
| 17753 | if (i != 2) // skip the unsigned flag |
| 17754 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 17755 | |
| 17756 | SDLoc dl(N); |
| 17757 | SDValue val = DAG.getNode(Opcode: Opc, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
| 17758 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: val.getValue(R: 0), |
| 17759 | N2: val.getValue(R: 1)); |
| 17760 | } |
| 17761 | } |
| 17762 | |
| 17763 | return SDValue(); |
| 17764 | } |
| 17765 | |
| 17766 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and |
| 17767 | /// lowers them. As with the vector shift intrinsics, this is done during DAG |
| 17768 | /// combining instead of DAG legalizing because the build_vectors for 64-bit |
| 17769 | /// vector element shift counts are generally not legal, and it is hard to see |
| 17770 | /// their values after they get legalized to loads from a constant pool. |
| 17771 | static SDValue PerformShiftCombine(SDNode *N, |
| 17772 | TargetLowering::DAGCombinerInfo &DCI, |
| 17773 | const ARMSubtarget *ST) { |
| 17774 | SelectionDAG &DAG = DCI.DAG; |
| 17775 | EVT VT = N->getValueType(ResNo: 0); |
| 17776 | |
| 17777 | if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && |
| 17778 | N->getOperand(Num: 0)->getOpcode() == ISD::AND && |
| 17779 | N->getOperand(Num: 0)->hasOneUse()) { |
| 17780 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 17781 | return SDValue(); |
| 17782 | // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't |
| 17783 | // usually show up because instcombine prefers to canonicalize it to |
| 17784 | // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come |
| 17785 | // out of GEP lowering in some cases. |
| 17786 | SDValue N0 = N->getOperand(Num: 0); |
| 17787 | ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 17788 | if (!ShiftAmtNode) |
| 17789 | return SDValue(); |
| 17790 | uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); |
| 17791 | ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
| 17792 | if (!AndMaskNode) |
| 17793 | return SDValue(); |
| 17794 | uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); |
| 17795 | // Don't transform uxtb/uxth. |
| 17796 | if (AndMask == 255 || AndMask == 65535) |
| 17797 | return SDValue(); |
| 17798 | if (isMask_32(Value: AndMask)) { |
| 17799 | uint32_t MaskedBits = llvm::countl_zero(Val: AndMask); |
| 17800 | if (MaskedBits > ShiftAmt) { |
| 17801 | SDLoc DL(N); |
| 17802 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 17803 | N2: DAG.getConstant(Val: MaskedBits, DL, VT: MVT::i32)); |
| 17804 | return DAG.getNode( |
| 17805 | Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 17806 | N2: DAG.getConstant(Val: MaskedBits - ShiftAmt, DL, VT: MVT::i32)); |
| 17807 | } |
| 17808 | } |
| 17809 | } |
| 17810 | |
| 17811 | // Nothing to be done for scalar shifts. |
| 17812 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 17813 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) |
| 17814 | return SDValue(); |
| 17815 | if (ST->hasMVEIntegerOps()) |
| 17816 | return SDValue(); |
| 17817 | |
| 17818 | int64_t Cnt; |
| 17819 | |
| 17820 | switch (N->getOpcode()) { |
| 17821 | default: llvm_unreachable("unexpected shift opcode" ); |
| 17822 | |
| 17823 | case ISD::SHL: |
| 17824 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) { |
| 17825 | SDLoc dl(N); |
| 17826 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 17827 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17828 | } |
| 17829 | break; |
| 17830 | |
| 17831 | case ISD::SRA: |
| 17832 | case ISD::SRL: |
| 17833 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
| 17834 | unsigned VShiftOpc = |
| 17835 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 17836 | SDLoc dl(N); |
| 17837 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 17838 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17839 | } |
| 17840 | } |
| 17841 | return SDValue(); |
| 17842 | } |
| 17843 | |
| 17844 | // Look for a sign/zero/fpextend extend of a larger than legal load. This can be |
| 17845 | // split into multiple extending loads, which are simpler to deal with than an |
| 17846 | // arbitrary extend. For fp extends we use an integer extending load and a VCVTL |
| 17847 | // to convert the type to an f32. |
| 17848 | static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { |
| 17849 | SDValue N0 = N->getOperand(Num: 0); |
| 17850 | if (N0.getOpcode() != ISD::LOAD) |
| 17851 | return SDValue(); |
| 17852 | LoadSDNode *LD = cast<LoadSDNode>(Val: N0.getNode()); |
| 17853 | if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || |
| 17854 | LD->getExtensionType() != ISD::NON_EXTLOAD) |
| 17855 | return SDValue(); |
| 17856 | EVT FromVT = LD->getValueType(ResNo: 0); |
| 17857 | EVT ToVT = N->getValueType(ResNo: 0); |
| 17858 | if (!ToVT.isVector()) |
| 17859 | return SDValue(); |
| 17860 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 17861 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 17862 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 17863 | |
| 17864 | unsigned NumElements = 0; |
| 17865 | if (ToEltVT == MVT::i32 && FromEltVT == MVT::i8) |
| 17866 | NumElements = 4; |
| 17867 | if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) |
| 17868 | NumElements = 4; |
| 17869 | if (NumElements == 0 || |
| 17870 | (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || |
| 17871 | FromVT.getVectorNumElements() % NumElements != 0 || |
| 17872 | !isPowerOf2_32(Value: NumElements)) |
| 17873 | return SDValue(); |
| 17874 | |
| 17875 | LLVMContext &C = *DAG.getContext(); |
| 17876 | SDLoc DL(LD); |
| 17877 | // Details about the old load |
| 17878 | SDValue Ch = LD->getChain(); |
| 17879 | SDValue BasePtr = LD->getBasePtr(); |
| 17880 | Align Alignment = LD->getBaseAlign(); |
| 17881 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
| 17882 | AAMDNodes AAInfo = LD->getAAInfo(); |
| 17883 | |
| 17884 | ISD::LoadExtType NewExtType = |
| 17885 | N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
| 17886 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
| 17887 | EVT NewFromVT = EVT::getVectorVT( |
| 17888 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
| 17889 | EVT NewToVT = EVT::getVectorVT( |
| 17890 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
| 17891 | |
| 17892 | SmallVector<SDValue, 4> Loads; |
| 17893 | SmallVector<SDValue, 4> Chains; |
| 17894 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 17895 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
| 17896 | SDValue NewPtr = |
| 17897 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 17898 | |
| 17899 | SDValue NewLoad = |
| 17900 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
| 17901 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
| 17902 | Alignment, MMOFlags, AAInfo); |
| 17903 | Loads.push_back(Elt: NewLoad); |
| 17904 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
| 17905 | } |
| 17906 | |
| 17907 | // Float truncs need to extended with VCVTB's into their floating point types. |
| 17908 | if (FromEltVT == MVT::f16) { |
| 17909 | SmallVector<SDValue, 4> Extends; |
| 17910 | |
| 17911 | for (unsigned i = 0; i < Loads.size(); i++) { |
| 17912 | SDValue LoadBC = |
| 17913 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v8f16, Operand: Loads[i]); |
| 17914 | SDValue FPExt = DAG.getNode(Opcode: ARMISD::VCVTL, DL, VT: MVT::v4f32, N1: LoadBC, |
| 17915 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 17916 | Extends.push_back(Elt: FPExt); |
| 17917 | } |
| 17918 | |
| 17919 | Loads = Extends; |
| 17920 | } |
| 17921 | |
| 17922 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 17923 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
| 17924 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, Ops: Loads); |
| 17925 | } |
| 17926 | |
| 17927 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, |
| 17928 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. |
| 17929 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 17930 | const ARMSubtarget *ST) { |
| 17931 | SDValue N0 = N->getOperand(Num: 0); |
| 17932 | |
| 17933 | // Check for sign- and zero-extensions of vector extract operations of 8- and |
| 17934 | // 16-bit vector elements. NEON and MVE support these directly. They are |
| 17935 | // handled during DAG combining because type legalization will promote them |
| 17936 | // to 32-bit types and it is messy to recognize the operations after that. |
| 17937 | if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && |
| 17938 | N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 17939 | SDValue Vec = N0.getOperand(i: 0); |
| 17940 | SDValue Lane = N0.getOperand(i: 1); |
| 17941 | EVT VT = N->getValueType(ResNo: 0); |
| 17942 | EVT EltVT = N0.getValueType(); |
| 17943 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 17944 | |
| 17945 | if (VT == MVT::i32 && |
| 17946 | (EltVT == MVT::i8 || EltVT == MVT::i16) && |
| 17947 | TLI.isTypeLegal(VT: Vec.getValueType()) && |
| 17948 | isa<ConstantSDNode>(Val: Lane)) { |
| 17949 | |
| 17950 | unsigned Opc = 0; |
| 17951 | switch (N->getOpcode()) { |
| 17952 | default: llvm_unreachable("unexpected opcode" ); |
| 17953 | case ISD::SIGN_EXTEND: |
| 17954 | Opc = ARMISD::VGETLANEs; |
| 17955 | break; |
| 17956 | case ISD::ZERO_EXTEND: |
| 17957 | case ISD::ANY_EXTEND: |
| 17958 | Opc = ARMISD::VGETLANEu; |
| 17959 | break; |
| 17960 | } |
| 17961 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VT, N1: Vec, N2: Lane); |
| 17962 | } |
| 17963 | } |
| 17964 | |
| 17965 | if (ST->hasMVEIntegerOps()) |
| 17966 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 17967 | return NewLoad; |
| 17968 | |
| 17969 | return SDValue(); |
| 17970 | } |
| 17971 | |
| 17972 | static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 17973 | const ARMSubtarget *ST) { |
| 17974 | if (ST->hasMVEFloatOps()) |
| 17975 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 17976 | return NewLoad; |
| 17977 | |
| 17978 | return SDValue(); |
| 17979 | } |
| 17980 | |
| 17981 | // Lower smin(smax(x, C1), C2) to ssat or usat, if they have saturating |
| 17982 | // constant bounds. |
| 17983 | static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG, |
| 17984 | const ARMSubtarget *Subtarget) { |
| 17985 | if ((Subtarget->isThumb() || !Subtarget->hasV6Ops()) && |
| 17986 | !Subtarget->isThumb2()) |
| 17987 | return SDValue(); |
| 17988 | |
| 17989 | EVT VT = Op.getValueType(); |
| 17990 | SDValue Op0 = Op.getOperand(i: 0); |
| 17991 | |
| 17992 | if (VT != MVT::i32 || |
| 17993 | (Op0.getOpcode() != ISD::SMIN && Op0.getOpcode() != ISD::SMAX) || |
| 17994 | !isa<ConstantSDNode>(Val: Op.getOperand(i: 1)) || |
| 17995 | !isa<ConstantSDNode>(Val: Op0.getOperand(i: 1))) |
| 17996 | return SDValue(); |
| 17997 | |
| 17998 | SDValue Min = Op; |
| 17999 | SDValue Max = Op0; |
| 18000 | SDValue Input = Op0.getOperand(i: 0); |
| 18001 | if (Min.getOpcode() == ISD::SMAX) |
| 18002 | std::swap(a&: Min, b&: Max); |
| 18003 | |
| 18004 | APInt MinC = Min.getConstantOperandAPInt(i: 1); |
| 18005 | APInt MaxC = Max.getConstantOperandAPInt(i: 1); |
| 18006 | |
| 18007 | if (Min.getOpcode() != ISD::SMIN || Max.getOpcode() != ISD::SMAX || |
| 18008 | !(MinC + 1).isPowerOf2()) |
| 18009 | return SDValue(); |
| 18010 | |
| 18011 | SDLoc DL(Op); |
| 18012 | if (MinC == ~MaxC) |
| 18013 | return DAG.getNode(Opcode: ARMISD::SSAT, DL, VT, N1: Input, |
| 18014 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
| 18015 | if (MaxC == 0) |
| 18016 | return DAG.getNode(Opcode: ARMISD::USAT, DL, VT, N1: Input, |
| 18017 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
| 18018 | |
| 18019 | return SDValue(); |
| 18020 | } |
| 18021 | |
| 18022 | /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating |
| 18023 | /// saturates. |
| 18024 | static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, |
| 18025 | const ARMSubtarget *ST) { |
| 18026 | EVT VT = N->getValueType(ResNo: 0); |
| 18027 | SDValue N0 = N->getOperand(Num: 0); |
| 18028 | |
| 18029 | if (VT == MVT::i32) |
| 18030 | return PerformMinMaxToSatCombine(Op: SDValue(N, 0), DAG, Subtarget: ST); |
| 18031 | |
| 18032 | if (!ST->hasMVEIntegerOps()) |
| 18033 | return SDValue(); |
| 18034 | |
| 18035 | if (SDValue V = PerformVQDMULHCombine(N, DAG)) |
| 18036 | return V; |
| 18037 | |
| 18038 | if (VT != MVT::v4i32 && VT != MVT::v8i16) |
| 18039 | return SDValue(); |
| 18040 | |
| 18041 | auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { |
| 18042 | // Check one is a smin and the other is a smax |
| 18043 | if (Min->getOpcode() != ISD::SMIN) |
| 18044 | std::swap(a&: Min, b&: Max); |
| 18045 | if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) |
| 18046 | return false; |
| 18047 | |
| 18048 | APInt SaturateC; |
| 18049 | if (VT == MVT::v4i32) |
| 18050 | SaturateC = APInt(32, (1 << 15) - 1, true); |
| 18051 | else //if (VT == MVT::v8i16) |
| 18052 | SaturateC = APInt(16, (1 << 7) - 1, true); |
| 18053 | |
| 18054 | APInt MinC, MaxC; |
| 18055 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
| 18056 | MinC != SaturateC) |
| 18057 | return false; |
| 18058 | if (!ISD::isConstantSplatVector(N: Max->getOperand(Num: 1).getNode(), SplatValue&: MaxC) || |
| 18059 | MaxC != ~SaturateC) |
| 18060 | return false; |
| 18061 | return true; |
| 18062 | }; |
| 18063 | |
| 18064 | if (IsSignedSaturate(N, N0.getNode())) { |
| 18065 | SDLoc DL(N); |
| 18066 | MVT ExtVT, HalfVT; |
| 18067 | if (VT == MVT::v4i32) { |
| 18068 | HalfVT = MVT::v8i16; |
| 18069 | ExtVT = MVT::v4i16; |
| 18070 | } else { // if (VT == MVT::v8i16) |
| 18071 | HalfVT = MVT::v16i8; |
| 18072 | ExtVT = MVT::v8i8; |
| 18073 | } |
| 18074 | |
| 18075 | // Create a VQMOVNB with undef top lanes, then signed extended into the top |
| 18076 | // half. That extend will hopefully be removed if only the bottom bits are |
| 18077 | // demanded (though a truncating store, for example). |
| 18078 | SDValue VQMOVN = |
| 18079 | DAG.getNode(Opcode: ARMISD::VQMOVNs, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), |
| 18080 | N2: N0->getOperand(Num: 0), N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 18081 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
| 18082 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: Bitcast, |
| 18083 | N2: DAG.getValueType(ExtVT)); |
| 18084 | } |
| 18085 | |
| 18086 | auto IsUnsignedSaturate = [&](SDNode *Min) { |
| 18087 | // For unsigned, we just need to check for <= 0xffff |
| 18088 | if (Min->getOpcode() != ISD::UMIN) |
| 18089 | return false; |
| 18090 | |
| 18091 | APInt SaturateC; |
| 18092 | if (VT == MVT::v4i32) |
| 18093 | SaturateC = APInt(32, (1 << 16) - 1, true); |
| 18094 | else //if (VT == MVT::v8i16) |
| 18095 | SaturateC = APInt(16, (1 << 8) - 1, true); |
| 18096 | |
| 18097 | APInt MinC; |
| 18098 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
| 18099 | MinC != SaturateC) |
| 18100 | return false; |
| 18101 | return true; |
| 18102 | }; |
| 18103 | |
| 18104 | if (IsUnsignedSaturate(N)) { |
| 18105 | SDLoc DL(N); |
| 18106 | MVT HalfVT; |
| 18107 | unsigned ExtConst; |
| 18108 | if (VT == MVT::v4i32) { |
| 18109 | HalfVT = MVT::v8i16; |
| 18110 | ExtConst = 0x0000FFFF; |
| 18111 | } else { //if (VT == MVT::v8i16) |
| 18112 | HalfVT = MVT::v16i8; |
| 18113 | ExtConst = 0x00FF; |
| 18114 | } |
| 18115 | |
| 18116 | // Create a VQMOVNB with undef top lanes, then ZExt into the top half with |
| 18117 | // an AND. That extend will hopefully be removed if only the bottom bits are |
| 18118 | // demanded (though a truncating store, for example). |
| 18119 | SDValue VQMOVN = |
| 18120 | DAG.getNode(Opcode: ARMISD::VQMOVNu, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), N2: N0, |
| 18121 | N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 18122 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
| 18123 | return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Bitcast, |
| 18124 | N2: DAG.getConstant(Val: ExtConst, DL, VT)); |
| 18125 | } |
| 18126 | |
| 18127 | return SDValue(); |
| 18128 | } |
| 18129 | |
| 18130 | static const APInt *isPowerOf2Constant(SDValue V) { |
| 18131 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: V); |
| 18132 | if (!C) |
| 18133 | return nullptr; |
| 18134 | const APInt *CV = &C->getAPIntValue(); |
| 18135 | return CV->isPowerOf2() ? CV : nullptr; |
| 18136 | } |
| 18137 | |
| 18138 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { |
| 18139 | // If we have a CMOV, OR and AND combination such as: |
| 18140 | // if (x & CN) |
| 18141 | // y |= CM; |
| 18142 | // |
| 18143 | // And: |
| 18144 | // * CN is a single bit; |
| 18145 | // * All bits covered by CM are known zero in y |
| 18146 | // |
| 18147 | // Then we can convert this into a sequence of BFI instructions. This will |
| 18148 | // always be a win if CM is a single bit, will always be no worse than the |
| 18149 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is |
| 18150 | // three bits (due to the extra IT instruction). |
| 18151 | |
| 18152 | SDValue Op0 = CMOV->getOperand(Num: 0); |
| 18153 | SDValue Op1 = CMOV->getOperand(Num: 1); |
| 18154 | auto CC = CMOV->getConstantOperandAPInt(Num: 2).getLimitedValue(); |
| 18155 | SDValue CmpZ = CMOV->getOperand(Num: 3); |
| 18156 | |
| 18157 | // The compare must be against zero. |
| 18158 | if (!isNullConstant(V: CmpZ->getOperand(Num: 1))) |
| 18159 | return SDValue(); |
| 18160 | |
| 18161 | assert(CmpZ->getOpcode() == ARMISD::CMPZ); |
| 18162 | SDValue And = CmpZ->getOperand(Num: 0); |
| 18163 | if (And->getOpcode() != ISD::AND) |
| 18164 | return SDValue(); |
| 18165 | const APInt *AndC = isPowerOf2Constant(V: And->getOperand(Num: 1)); |
| 18166 | if (!AndC) |
| 18167 | return SDValue(); |
| 18168 | SDValue X = And->getOperand(Num: 0); |
| 18169 | |
| 18170 | if (CC == ARMCC::EQ) { |
| 18171 | // We're performing an "equal to zero" compare. Swap the operands so we |
| 18172 | // canonicalize on a "not equal to zero" compare. |
| 18173 | std::swap(a&: Op0, b&: Op1); |
| 18174 | } else { |
| 18175 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ); |
| 18176 | } |
| 18177 | |
| 18178 | if (Op1->getOpcode() != ISD::OR) |
| 18179 | return SDValue(); |
| 18180 | |
| 18181 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Val: Op1->getOperand(Num: 1)); |
| 18182 | if (!OrC) |
| 18183 | return SDValue(); |
| 18184 | SDValue Y = Op1->getOperand(Num: 0); |
| 18185 | |
| 18186 | if (Op0 != Y) |
| 18187 | return SDValue(); |
| 18188 | |
| 18189 | // Now, is it profitable to continue? |
| 18190 | APInt OrCI = OrC->getAPIntValue(); |
| 18191 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; |
| 18192 | if (OrCI.popcount() > Heuristic) |
| 18193 | return SDValue(); |
| 18194 | |
| 18195 | // Lastly, can we determine that the bits defined by OrCI |
| 18196 | // are zero in Y? |
| 18197 | KnownBits Known = DAG.computeKnownBits(Op: Y); |
| 18198 | if ((OrCI & Known.Zero) != OrCI) |
| 18199 | return SDValue(); |
| 18200 | |
| 18201 | // OK, we can do the combine. |
| 18202 | SDValue V = Y; |
| 18203 | SDLoc dl(X); |
| 18204 | EVT VT = X.getValueType(); |
| 18205 | unsigned BitInX = AndC->logBase2(); |
| 18206 | |
| 18207 | if (BitInX != 0) { |
| 18208 | // We must shift X first. |
| 18209 | X = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: X, |
| 18210 | N2: DAG.getConstant(Val: BitInX, DL: dl, VT)); |
| 18211 | } |
| 18212 | |
| 18213 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); |
| 18214 | BitInY < NumActiveBits; ++BitInY) { |
| 18215 | if (OrCI[BitInY] == 0) |
| 18216 | continue; |
| 18217 | APInt Mask(VT.getSizeInBits(), 0); |
| 18218 | Mask.setBit(BitInY); |
| 18219 | V = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: V, N2: X, |
| 18220 | // Confusingly, the operand is an *inverted* mask. |
| 18221 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT)); |
| 18222 | } |
| 18223 | |
| 18224 | return V; |
| 18225 | } |
| 18226 | |
| 18227 | // Given N, the value controlling the conditional branch, search for the loop |
| 18228 | // intrinsic, returning it, along with how the value is used. We need to handle |
| 18229 | // patterns such as the following: |
| 18230 | // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) |
| 18231 | // (brcond (setcc (loop.decrement), 0, eq), exit) |
| 18232 | // (brcond (setcc (loop.decrement), 0, ne), header) |
| 18233 | static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, |
| 18234 | bool &Negate) { |
| 18235 | switch (N->getOpcode()) { |
| 18236 | default: |
| 18237 | break; |
| 18238 | case ISD::XOR: { |
| 18239 | if (!isa<ConstantSDNode>(Val: N.getOperand(i: 1))) |
| 18240 | return SDValue(); |
| 18241 | if (!cast<ConstantSDNode>(Val: N.getOperand(i: 1))->isOne()) |
| 18242 | return SDValue(); |
| 18243 | Negate = !Negate; |
| 18244 | return SearchLoopIntrinsic(N: N.getOperand(i: 0), CC, Imm, Negate); |
| 18245 | } |
| 18246 | case ISD::SETCC: { |
| 18247 | auto *Const = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1)); |
| 18248 | if (!Const) |
| 18249 | return SDValue(); |
| 18250 | if (Const->isZero()) |
| 18251 | Imm = 0; |
| 18252 | else if (Const->isOne()) |
| 18253 | Imm = 1; |
| 18254 | else |
| 18255 | return SDValue(); |
| 18256 | CC = cast<CondCodeSDNode>(Val: N.getOperand(i: 2))->get(); |
| 18257 | return SearchLoopIntrinsic(N: N->getOperand(Num: 0), CC, Imm, Negate); |
| 18258 | } |
| 18259 | case ISD::INTRINSIC_W_CHAIN: { |
| 18260 | unsigned IntOp = N.getConstantOperandVal(i: 1); |
| 18261 | if (IntOp != Intrinsic::test_start_loop_iterations && |
| 18262 | IntOp != Intrinsic::loop_decrement_reg) |
| 18263 | return SDValue(); |
| 18264 | return N; |
| 18265 | } |
| 18266 | } |
| 18267 | return SDValue(); |
| 18268 | } |
| 18269 | |
| 18270 | static SDValue PerformHWLoopCombine(SDNode *N, |
| 18271 | TargetLowering::DAGCombinerInfo &DCI, |
| 18272 | const ARMSubtarget *ST) { |
| 18273 | |
| 18274 | // The hwloop intrinsics that we're interested are used for control-flow, |
| 18275 | // either for entering or exiting the loop: |
| 18276 | // - test.start.loop.iterations will test whether its operand is zero. If it |
| 18277 | // is zero, the proceeding branch should not enter the loop. |
| 18278 | // - loop.decrement.reg also tests whether its operand is zero. If it is |
| 18279 | // zero, the proceeding branch should not branch back to the beginning of |
| 18280 | // the loop. |
| 18281 | // So here, we need to check that how the brcond is using the result of each |
| 18282 | // of the intrinsics to ensure that we're branching to the right place at the |
| 18283 | // right time. |
| 18284 | |
| 18285 | ISD::CondCode CC; |
| 18286 | SDValue Cond; |
| 18287 | int Imm = 1; |
| 18288 | bool Negate = false; |
| 18289 | SDValue Chain = N->getOperand(Num: 0); |
| 18290 | SDValue Dest; |
| 18291 | |
| 18292 | if (N->getOpcode() == ISD::BRCOND) { |
| 18293 | CC = ISD::SETEQ; |
| 18294 | Cond = N->getOperand(Num: 1); |
| 18295 | Dest = N->getOperand(Num: 2); |
| 18296 | } else { |
| 18297 | assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!" ); |
| 18298 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 1))->get(); |
| 18299 | Cond = N->getOperand(Num: 2); |
| 18300 | Dest = N->getOperand(Num: 4); |
| 18301 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 3))) { |
| 18302 | if (!Const->isOne() && !Const->isZero()) |
| 18303 | return SDValue(); |
| 18304 | Imm = Const->getZExtValue(); |
| 18305 | } else |
| 18306 | return SDValue(); |
| 18307 | } |
| 18308 | |
| 18309 | SDValue Int = SearchLoopIntrinsic(N: Cond, CC, Imm, Negate); |
| 18310 | if (!Int) |
| 18311 | return SDValue(); |
| 18312 | |
| 18313 | if (Negate) |
| 18314 | CC = ISD::getSetCCInverse(Operation: CC, /* Integer inverse */ Type: MVT::i32); |
| 18315 | |
| 18316 | auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { |
| 18317 | return (CC == ISD::SETEQ && Imm == 0) || |
| 18318 | (CC == ISD::SETNE && Imm == 1) || |
| 18319 | (CC == ISD::SETLT && Imm == 1) || |
| 18320 | (CC == ISD::SETULT && Imm == 1); |
| 18321 | }; |
| 18322 | |
| 18323 | auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { |
| 18324 | return (CC == ISD::SETEQ && Imm == 1) || |
| 18325 | (CC == ISD::SETNE && Imm == 0) || |
| 18326 | (CC == ISD::SETGT && Imm == 0) || |
| 18327 | (CC == ISD::SETUGT && Imm == 0) || |
| 18328 | (CC == ISD::SETGE && Imm == 1) || |
| 18329 | (CC == ISD::SETUGE && Imm == 1); |
| 18330 | }; |
| 18331 | |
| 18332 | assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && |
| 18333 | "unsupported condition" ); |
| 18334 | |
| 18335 | SDLoc dl(Int); |
| 18336 | SelectionDAG &DAG = DCI.DAG; |
| 18337 | SDValue Elements = Int.getOperand(i: 2); |
| 18338 | unsigned IntOp = Int->getConstantOperandVal(Num: 1); |
| 18339 | assert((N->hasOneUse() && N->user_begin()->getOpcode() == ISD::BR) && |
| 18340 | "expected single br user" ); |
| 18341 | SDNode *Br = *N->user_begin(); |
| 18342 | SDValue OtherTarget = Br->getOperand(Num: 1); |
| 18343 | |
| 18344 | // Update the unconditional branch to branch to the given Dest. |
| 18345 | auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { |
| 18346 | SDValue NewBrOps[] = { Br->getOperand(Num: 0), Dest }; |
| 18347 | SDValue NewBr = DAG.getNode(Opcode: ISD::BR, DL: SDLoc(Br), VT: MVT::Other, Ops: NewBrOps); |
| 18348 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(Br, 0), To: NewBr); |
| 18349 | }; |
| 18350 | |
| 18351 | if (IntOp == Intrinsic::test_start_loop_iterations) { |
| 18352 | SDValue Res; |
| 18353 | SDValue Setup = DAG.getNode(Opcode: ARMISD::WLSSETUP, DL: dl, VT: MVT::i32, Operand: Elements); |
| 18354 | // We expect this 'instruction' to branch when the counter is zero. |
| 18355 | if (IsTrueIfZero(CC, Imm)) { |
| 18356 | SDValue Ops[] = {Chain, Setup, Dest}; |
| 18357 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
| 18358 | } else { |
| 18359 | // The logic is the reverse of what we need for WLS, so find the other |
| 18360 | // basic block target: the target of the proceeding br. |
| 18361 | UpdateUncondBr(Br, Dest, DAG); |
| 18362 | |
| 18363 | SDValue Ops[] = {Chain, Setup, OtherTarget}; |
| 18364 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
| 18365 | } |
| 18366 | // Update LR count to the new value |
| 18367 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 0), To: Setup); |
| 18368 | // Update chain |
| 18369 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 2), To: Int.getOperand(i: 0)); |
| 18370 | return Res; |
| 18371 | } else { |
| 18372 | SDValue Size = |
| 18373 | DAG.getTargetConstant(Val: Int.getConstantOperandVal(i: 3), DL: dl, VT: MVT::i32); |
| 18374 | SDValue Args[] = { Int.getOperand(i: 0), Elements, Size, }; |
| 18375 | SDValue LoopDec = DAG.getNode(Opcode: ARMISD::LOOP_DEC, DL: dl, |
| 18376 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops: Args); |
| 18377 | DAG.ReplaceAllUsesWith(From: Int.getNode(), To: LoopDec.getNode()); |
| 18378 | |
| 18379 | // We expect this instruction to branch when the count is not zero. |
| 18380 | SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; |
| 18381 | |
| 18382 | // Update the unconditional branch to target the loop preheader if we've |
| 18383 | // found the condition has been reversed. |
| 18384 | if (Target == OtherTarget) |
| 18385 | UpdateUncondBr(Br, Dest, DAG); |
| 18386 | |
| 18387 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 18388 | N1: SDValue(LoopDec.getNode(), 1), N2: Chain); |
| 18389 | |
| 18390 | SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; |
| 18391 | return DAG.getNode(Opcode: ARMISD::LE, DL: dl, VT: MVT::Other, Ops: EndArgs); |
| 18392 | } |
| 18393 | return SDValue(); |
| 18394 | } |
| 18395 | |
| 18396 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. |
| 18397 | SDValue |
| 18398 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { |
| 18399 | SDValue Cmp = N->getOperand(Num: 3); |
| 18400 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 18401 | // Only looking at NE cases. |
| 18402 | return SDValue(); |
| 18403 | |
| 18404 | SDLoc dl(N); |
| 18405 | SDValue LHS = Cmp.getOperand(i: 0); |
| 18406 | SDValue RHS = Cmp.getOperand(i: 1); |
| 18407 | SDValue Chain = N->getOperand(Num: 0); |
| 18408 | SDValue BB = N->getOperand(Num: 1); |
| 18409 | SDValue ARMcc = N->getOperand(Num: 2); |
| 18410 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
| 18411 | |
| 18412 | // (brcond Chain BB ne (cmpz (and (cmov 0 1 CC Flags) 1) 0)) |
| 18413 | // -> (brcond Chain BB CC Flags) |
| 18414 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && |
| 18415 | LHS->getOperand(Num: 0)->getOpcode() == ARMISD::CMOV && |
| 18416 | LHS->getOperand(Num: 0)->hasOneUse() && |
| 18417 | isNullConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 0)) && |
| 18418 | isOneConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 1)) && |
| 18419 | isOneConstant(V: LHS->getOperand(Num: 1)) && isNullConstant(V: RHS)) { |
| 18420 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: BB, |
| 18421 | N3: LHS->getOperand(Num: 0)->getOperand(Num: 2), |
| 18422 | N4: LHS->getOperand(Num: 0)->getOperand(Num: 3)); |
| 18423 | } |
| 18424 | |
| 18425 | return SDValue(); |
| 18426 | } |
| 18427 | |
| 18428 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. |
| 18429 | SDValue |
| 18430 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { |
| 18431 | SDValue Cmp = N->getOperand(Num: 3); |
| 18432 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 18433 | // Only looking at EQ and NE cases. |
| 18434 | return SDValue(); |
| 18435 | |
| 18436 | EVT VT = N->getValueType(ResNo: 0); |
| 18437 | SDLoc dl(N); |
| 18438 | SDValue LHS = Cmp.getOperand(i: 0); |
| 18439 | SDValue RHS = Cmp.getOperand(i: 1); |
| 18440 | SDValue FalseVal = N->getOperand(Num: 0); |
| 18441 | SDValue TrueVal = N->getOperand(Num: 1); |
| 18442 | SDValue ARMcc = N->getOperand(Num: 2); |
| 18443 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
| 18444 | |
| 18445 | // BFI is only available on V6T2+. |
| 18446 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { |
| 18447 | SDValue R = PerformCMOVToBFICombine(CMOV: N, DAG); |
| 18448 | if (R) |
| 18449 | return R; |
| 18450 | } |
| 18451 | |
| 18452 | // Simplify |
| 18453 | // mov r1, r0 |
| 18454 | // cmp r1, x |
| 18455 | // mov r0, y |
| 18456 | // moveq r0, x |
| 18457 | // to |
| 18458 | // cmp r0, x |
| 18459 | // movne r0, y |
| 18460 | // |
| 18461 | // mov r1, r0 |
| 18462 | // cmp r1, x |
| 18463 | // mov r0, x |
| 18464 | // movne r0, y |
| 18465 | // to |
| 18466 | // cmp r0, x |
| 18467 | // movne r0, y |
| 18468 | /// FIXME: Turn this into a target neutral optimization? |
| 18469 | SDValue Res; |
| 18470 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { |
| 18471 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: TrueVal, N3: ARMcc, N4: Cmp); |
| 18472 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { |
| 18473 | SDValue ARMcc; |
| 18474 | SDValue NewCmp = getARMCmp(LHS, RHS, CC: ISD::SETNE, ARMcc, DAG, dl); |
| 18475 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: FalseVal, N3: ARMcc, N4: NewCmp); |
| 18476 | } |
| 18477 | |
| 18478 | // (cmov F T ne (cmpz (cmov 0 1 CC Flags) 0)) |
| 18479 | // -> (cmov F T CC Flags) |
| 18480 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse() && |
| 18481 | isNullConstant(V: LHS->getOperand(Num: 0)) && isOneConstant(V: LHS->getOperand(Num: 1)) && |
| 18482 | isNullConstant(V: RHS)) { |
| 18483 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, |
| 18484 | N3: LHS->getOperand(Num: 2), N4: LHS->getOperand(Num: 3)); |
| 18485 | } |
| 18486 | |
| 18487 | if (!VT.isInteger()) |
| 18488 | return SDValue(); |
| 18489 | |
| 18490 | // Fold away an unneccessary CMPZ/CMOV |
| 18491 | // CMOV A, B, C1, (CMPZ (CMOV 1, 0, C2, D), 0) -> |
| 18492 | // if C1==EQ -> CMOV A, B, C2, D |
| 18493 | // if C1==NE -> CMOV A, B, NOT(C2), D |
| 18494 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ || |
| 18495 | N->getConstantOperandVal(Num: 2) == ARMCC::NE) { |
| 18496 | ARMCC::CondCodes Cond; |
| 18497 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 3).getNode(), CC&: Cond)) { |
| 18498 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
| 18499 | Cond = ARMCC::getOppositeCondition(CC: Cond); |
| 18500 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 18501 | N2: N->getOperand(Num: 1), |
| 18502 | N3: DAG.getConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 18503 | } |
| 18504 | } |
| 18505 | |
| 18506 | // Materialize a boolean comparison for integers so we can avoid branching. |
| 18507 | if (isNullConstant(V: FalseVal)) { |
| 18508 | if (CC == ARMCC::EQ && isOneConstant(V: TrueVal)) { |
| 18509 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { |
| 18510 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it |
| 18511 | // right 5 bits will make that 32 be 1, otherwise it will be 0. |
| 18512 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 |
| 18513 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
| 18514 | Res = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: Sub), |
| 18515 | N2: DAG.getConstant(Val: 5, DL: dl, VT: MVT::i32)); |
| 18516 | } else { |
| 18517 | // CMOV 0, 1, ==, (CMPZ x, y) -> |
| 18518 | // (UADDO_CARRY (SUB x, y), t:0, t:1) |
| 18519 | // where t = (USUBO_CARRY 0, (SUB x, y), 0) |
| 18520 | // |
| 18521 | // The USUBO_CARRY computes 0 - (x - y) and this will give a borrow when |
| 18522 | // x != y. In other words, a carry C == 1 when x == y, C == 0 |
| 18523 | // otherwise. |
| 18524 | // The final UADDO_CARRY computes |
| 18525 | // x - y + (0 - (x - y)) + C == C |
| 18526 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
| 18527 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 18528 | SDValue Neg = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: Sub); |
| 18529 | // ISD::USUBO_CARRY returns a borrow but we want the carry here |
| 18530 | // actually. |
| 18531 | SDValue Carry = |
| 18532 | DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 18533 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Neg.getValue(R: 1)); |
| 18534 | Res = DAG.getNode(Opcode: ISD::UADDO_CARRY, DL: dl, VTList: VTs, N1: Sub, N2: Neg, N3: Carry); |
| 18535 | } |
| 18536 | } else if (CC == ARMCC::NE && !isNullConstant(V: RHS) && |
| 18537 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: TrueVal))) { |
| 18538 | // This seems pointless but will allow us to combine it further below. |
| 18539 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
| 18540 | SDValue Sub = |
| 18541 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
| 18542 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: TrueVal, N3: ARMcc, |
| 18543 | N4: Sub.getValue(R: 1)); |
| 18544 | FalseVal = Sub; |
| 18545 | } |
| 18546 | } else if (isNullConstant(V: TrueVal)) { |
| 18547 | if (CC == ARMCC::EQ && !isNullConstant(V: RHS) && |
| 18548 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: FalseVal))) { |
| 18549 | // This seems pointless but will allow us to combine it further below |
| 18550 | // Note that we change == for != as this is the dual for the case above. |
| 18551 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
| 18552 | SDValue Sub = |
| 18553 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
| 18554 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: FalseVal, |
| 18555 | N3: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32), |
| 18556 | N4: Sub.getValue(R: 1)); |
| 18557 | FalseVal = Sub; |
| 18558 | } |
| 18559 | } |
| 18560 | |
| 18561 | // On Thumb1, the DAG above may be further combined if z is a power of 2 |
| 18562 | // (z == 2 ^ K). |
| 18563 | // CMOV (SUBC x, y), z, !=, (SUBC x, y):1 -> |
| 18564 | // t1 = (USUBO (SUB x, y), 1) |
| 18565 | // t2 = (USUBO_CARRY (SUB x, y), t1:0, t1:1) |
| 18566 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 18567 | // |
| 18568 | // This also handles the special case of comparing against zero; it's |
| 18569 | // essentially, the same pattern, except there's no SUBC: |
| 18570 | // CMOV x, z, !=, (CMPZ x, 0) -> |
| 18571 | // t1 = (USUBO x, 1) |
| 18572 | // t2 = (USUBO_CARRY x, t1:0, t1:1) |
| 18573 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 18574 | const APInt *TrueConst; |
| 18575 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && |
| 18576 | ((FalseVal.getOpcode() == ARMISD::SUBC && FalseVal.getOperand(i: 0) == LHS && |
| 18577 | FalseVal.getOperand(i: 1) == RHS) || |
| 18578 | (FalseVal == LHS && isNullConstant(V: RHS))) && |
| 18579 | (TrueConst = isPowerOf2Constant(V: TrueVal))) { |
| 18580 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 18581 | unsigned ShiftAmount = TrueConst->logBase2(); |
| 18582 | if (ShiftAmount) |
| 18583 | TrueVal = DAG.getConstant(Val: 1, DL: dl, VT); |
| 18584 | SDValue Subc = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: TrueVal); |
| 18585 | Res = DAG.getNode(Opcode: ISD::USUBO_CARRY, DL: dl, VTList: VTs, N1: FalseVal, N2: Subc, |
| 18586 | N3: Subc.getValue(R: 1)); |
| 18587 | |
| 18588 | if (ShiftAmount) |
| 18589 | Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: Res, |
| 18590 | N2: DAG.getConstant(Val: ShiftAmount, DL: dl, VT: MVT::i32)); |
| 18591 | } |
| 18592 | |
| 18593 | if (Res.getNode()) { |
| 18594 | KnownBits Known = DAG.computeKnownBits(Op: SDValue(N,0)); |
| 18595 | // Capture demanded bits information that would be otherwise lost. |
| 18596 | if (Known.Zero == 0xfffffffe) |
| 18597 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18598 | N2: DAG.getValueType(MVT::i1)); |
| 18599 | else if (Known.Zero == 0xffffff00) |
| 18600 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18601 | N2: DAG.getValueType(MVT::i8)); |
| 18602 | else if (Known.Zero == 0xffff0000) |
| 18603 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18604 | N2: DAG.getValueType(MVT::i16)); |
| 18605 | } |
| 18606 | |
| 18607 | return Res; |
| 18608 | } |
| 18609 | |
| 18610 | static SDValue PerformBITCASTCombine(SDNode *N, |
| 18611 | TargetLowering::DAGCombinerInfo &DCI, |
| 18612 | const ARMSubtarget *ST) { |
| 18613 | SelectionDAG &DAG = DCI.DAG; |
| 18614 | SDValue Src = N->getOperand(Num: 0); |
| 18615 | EVT DstVT = N->getValueType(ResNo: 0); |
| 18616 | |
| 18617 | // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. |
| 18618 | if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { |
| 18619 | EVT SrcVT = Src.getValueType(); |
| 18620 | if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) |
| 18621 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT: DstVT, Operand: Src.getOperand(i: 0)); |
| 18622 | } |
| 18623 | |
| 18624 | // We may have a bitcast of something that has already had this bitcast |
| 18625 | // combine performed on it, so skip past any VECTOR_REG_CASTs. |
| 18626 | if (Src.getOpcode() == ARMISD::VECTOR_REG_CAST && |
| 18627 | Src.getOperand(i: 0).getValueType().getScalarSizeInBits() <= |
| 18628 | Src.getValueType().getScalarSizeInBits()) |
| 18629 | Src = Src.getOperand(i: 0); |
| 18630 | |
| 18631 | // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that |
| 18632 | // would be generated is at least the width of the element type. |
| 18633 | EVT SrcVT = Src.getValueType(); |
| 18634 | if ((Src.getOpcode() == ARMISD::VMOVIMM || |
| 18635 | Src.getOpcode() == ARMISD::VMVNIMM || |
| 18636 | Src.getOpcode() == ARMISD::VMOVFPIMM) && |
| 18637 | SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && |
| 18638 | DAG.getDataLayout().isBigEndian()) |
| 18639 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(N), VT: DstVT, Operand: Src); |
| 18640 | |
| 18641 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD x |
| 18642 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
| 18643 | return R; |
| 18644 | |
| 18645 | return SDValue(); |
| 18646 | } |
| 18647 | |
| 18648 | // Some combines for the MVETrunc truncations legalizer helper. Also lowers the |
| 18649 | // node into stack operations after legalizeOps. |
| 18650 | SDValue ARMTargetLowering::PerformMVETruncCombine( |
| 18651 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
| 18652 | SelectionDAG &DAG = DCI.DAG; |
| 18653 | EVT VT = N->getValueType(ResNo: 0); |
| 18654 | SDLoc DL(N); |
| 18655 | |
| 18656 | // MVETrunc(Undef, Undef) -> Undef |
| 18657 | if (all_of(Range: N->ops(), P: [](SDValue Op) { return Op.isUndef(); })) |
| 18658 | return DAG.getUNDEF(VT); |
| 18659 | |
| 18660 | // MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc |
| 18661 | if (N->getNumOperands() == 2 && |
| 18662 | N->getOperand(Num: 0).getOpcode() == ARMISD::MVETRUNC && |
| 18663 | N->getOperand(Num: 1).getOpcode() == ARMISD::MVETRUNC) |
| 18664 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT, N1: N->getOperand(Num: 0).getOperand(i: 0), |
| 18665 | N2: N->getOperand(Num: 0).getOperand(i: 1), |
| 18666 | N3: N->getOperand(Num: 1).getOperand(i: 0), |
| 18667 | N4: N->getOperand(Num: 1).getOperand(i: 1)); |
| 18668 | |
| 18669 | // MVETrunc(shuffle, shuffle) -> VMOVN |
| 18670 | if (N->getNumOperands() == 2 && |
| 18671 | N->getOperand(Num: 0).getOpcode() == ISD::VECTOR_SHUFFLE && |
| 18672 | N->getOperand(Num: 1).getOpcode() == ISD::VECTOR_SHUFFLE) { |
| 18673 | auto *S0 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0).getNode()); |
| 18674 | auto *S1 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 1).getNode()); |
| 18675 | |
| 18676 | if (S0->getOperand(Num: 0) == S1->getOperand(Num: 0) && |
| 18677 | S0->getOperand(Num: 1) == S1->getOperand(Num: 1)) { |
| 18678 | // Construct complete shuffle mask |
| 18679 | SmallVector<int, 8> Mask(S0->getMask()); |
| 18680 | Mask.append(in_start: S1->getMask().begin(), in_end: S1->getMask().end()); |
| 18681 | |
| 18682 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: false)) |
| 18683 | return DAG.getNode( |
| 18684 | Opcode: ARMISD::VMOVN, DL, VT, |
| 18685 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
| 18686 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
| 18687 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 18688 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: true)) |
| 18689 | return DAG.getNode( |
| 18690 | Opcode: ARMISD::VMOVN, DL, VT, |
| 18691 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
| 18692 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
| 18693 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 18694 | } |
| 18695 | } |
| 18696 | |
| 18697 | // For MVETrunc of a buildvector or shuffle, it can be beneficial to lower the |
| 18698 | // truncate to a buildvector to allow the generic optimisations to kick in. |
| 18699 | if (all_of(Range: N->ops(), P: [](SDValue Op) { |
| 18700 | return Op.getOpcode() == ISD::BUILD_VECTOR || |
| 18701 | Op.getOpcode() == ISD::VECTOR_SHUFFLE || |
| 18702 | (Op.getOpcode() == ISD::BITCAST && |
| 18703 | Op.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR); |
| 18704 | })) { |
| 18705 | SmallVector<SDValue, 8> ; |
| 18706 | for (unsigned Op = 0; Op < N->getNumOperands(); Op++) { |
| 18707 | SDValue O = N->getOperand(Num: Op); |
| 18708 | for (unsigned i = 0; i < O.getValueType().getVectorNumElements(); i++) { |
| 18709 | SDValue Ext = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::i32, N1: O, |
| 18710 | N2: DAG.getConstant(Val: i, DL, VT: MVT::i32)); |
| 18711 | Extracts.push_back(Elt: Ext); |
| 18712 | } |
| 18713 | } |
| 18714 | return DAG.getBuildVector(VT, DL, Ops: Extracts); |
| 18715 | } |
| 18716 | |
| 18717 | // If we are late in the legalization process and nothing has optimised |
| 18718 | // the trunc to anything better, lower it to a stack store and reload, |
| 18719 | // performing the truncation whilst keeping the lanes in the correct order: |
| 18720 | // VSTRH.32 a, stack; VSTRH.32 b, stack+8; VLDRW.32 stack; |
| 18721 | if (!DCI.isAfterLegalizeDAG()) |
| 18722 | return SDValue(); |
| 18723 | |
| 18724 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
| 18725 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
| 18726 | int NumIns = N->getNumOperands(); |
| 18727 | assert((NumIns == 2 || NumIns == 4) && |
| 18728 | "Expected 2 or 4 inputs to an MVETrunc" ); |
| 18729 | EVT StoreVT = VT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18730 | if (N->getNumOperands() == 4) |
| 18731 | StoreVT = StoreVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18732 | |
| 18733 | SmallVector<SDValue> Chains; |
| 18734 | for (int I = 0; I < NumIns; I++) { |
| 18735 | SDValue Ptr = DAG.getNode( |
| 18736 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
| 18737 | N2: DAG.getConstant(Val: I * 16 / NumIns, DL, VT: StackPtr.getValueType())); |
| 18738 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
| 18739 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumIns); |
| 18740 | SDValue Ch = DAG.getTruncStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: I), |
| 18741 | Ptr, PtrInfo: MPI, SVT: StoreVT, Alignment: Align(4)); |
| 18742 | Chains.push_back(Elt: Ch); |
| 18743 | } |
| 18744 | |
| 18745 | SDValue Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 18746 | MachinePointerInfo MPI = |
| 18747 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
| 18748 | return DAG.getLoad(VT, dl: DL, Chain, Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
| 18749 | } |
| 18750 | |
| 18751 | // Take a MVEEXT(load x) and split that into (extload x, extload x+8) |
| 18752 | static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N, |
| 18753 | SelectionDAG &DAG) { |
| 18754 | SDValue N0 = N->getOperand(Num: 0); |
| 18755 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N0.getNode()); |
| 18756 | if (!LD || !LD->isSimple() || !N0.hasOneUse() || LD->isIndexed()) |
| 18757 | return SDValue(); |
| 18758 | |
| 18759 | EVT FromVT = LD->getMemoryVT(); |
| 18760 | EVT ToVT = N->getValueType(ResNo: 0); |
| 18761 | if (!ToVT.isVector()) |
| 18762 | return SDValue(); |
| 18763 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements() * 2); |
| 18764 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 18765 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 18766 | |
| 18767 | unsigned NumElements = 0; |
| 18768 | if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) |
| 18769 | NumElements = 4; |
| 18770 | if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) |
| 18771 | NumElements = 8; |
| 18772 | assert(NumElements != 0); |
| 18773 | |
| 18774 | ISD::LoadExtType NewExtType = |
| 18775 | N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
| 18776 | if (LD->getExtensionType() != ISD::NON_EXTLOAD && |
| 18777 | LD->getExtensionType() != ISD::EXTLOAD && |
| 18778 | LD->getExtensionType() != NewExtType) |
| 18779 | return SDValue(); |
| 18780 | |
| 18781 | LLVMContext &C = *DAG.getContext(); |
| 18782 | SDLoc DL(LD); |
| 18783 | // Details about the old load |
| 18784 | SDValue Ch = LD->getChain(); |
| 18785 | SDValue BasePtr = LD->getBasePtr(); |
| 18786 | Align Alignment = LD->getBaseAlign(); |
| 18787 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
| 18788 | AAMDNodes AAInfo = LD->getAAInfo(); |
| 18789 | |
| 18790 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
| 18791 | EVT NewFromVT = EVT::getVectorVT( |
| 18792 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
| 18793 | EVT NewToVT = EVT::getVectorVT( |
| 18794 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
| 18795 | |
| 18796 | SmallVector<SDValue, 4> Loads; |
| 18797 | SmallVector<SDValue, 4> Chains; |
| 18798 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 18799 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
| 18800 | SDValue NewPtr = |
| 18801 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 18802 | |
| 18803 | SDValue NewLoad = |
| 18804 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
| 18805 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
| 18806 | Alignment, MMOFlags, AAInfo); |
| 18807 | Loads.push_back(Elt: NewLoad); |
| 18808 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
| 18809 | } |
| 18810 | |
| 18811 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 18812 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
| 18813 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
| 18814 | } |
| 18815 | |
| 18816 | // Perform combines for MVEEXT. If it has not be optimized to anything better |
| 18817 | // before lowering, it gets converted to stack store and extloads performing the |
| 18818 | // extend whilst still keeping the same lane ordering. |
| 18819 | SDValue ARMTargetLowering::PerformMVEExtCombine( |
| 18820 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
| 18821 | SelectionDAG &DAG = DCI.DAG; |
| 18822 | EVT VT = N->getValueType(ResNo: 0); |
| 18823 | SDLoc DL(N); |
| 18824 | assert(N->getNumValues() == 2 && "Expected MVEEXT with 2 elements" ); |
| 18825 | assert((VT == MVT::v4i32 || VT == MVT::v8i16) && "Unexpected MVEEXT type" ); |
| 18826 | |
| 18827 | EVT ExtVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
| 18828 | Context&: *DAG.getContext()); |
| 18829 | auto Extend = [&](SDValue V) { |
| 18830 | SDValue VVT = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: V); |
| 18831 | return N->getOpcode() == ARMISD::MVESEXT |
| 18832 | ? DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: VVT, |
| 18833 | N2: DAG.getValueType(ExtVT)) |
| 18834 | : DAG.getZeroExtendInReg(Op: VVT, DL, VT: ExtVT); |
| 18835 | }; |
| 18836 | |
| 18837 | // MVEEXT(VDUP) -> SIGN_EXTEND_INREG(VDUP) |
| 18838 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::VDUP) { |
| 18839 | SDValue Ext = Extend(N->getOperand(Num: 0)); |
| 18840 | return DAG.getMergeValues(Ops: {Ext, Ext}, dl: DL); |
| 18841 | } |
| 18842 | |
| 18843 | // MVEEXT(shuffle) -> SIGN_EXTEND_INREG/ZERO_EXTEND_INREG |
| 18844 | if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0))) { |
| 18845 | ArrayRef<int> Mask = SVN->getMask(); |
| 18846 | assert(Mask.size() == 2 * VT.getVectorNumElements()); |
| 18847 | assert(Mask.size() == SVN->getValueType(0).getVectorNumElements()); |
| 18848 | unsigned Rev = VT == MVT::v4i32 ? ARMISD::VREV32 : ARMISD::VREV16; |
| 18849 | SDValue Op0 = SVN->getOperand(Num: 0); |
| 18850 | SDValue Op1 = SVN->getOperand(Num: 1); |
| 18851 | |
| 18852 | auto CheckInregMask = [&](int Start, int Offset) { |
| 18853 | for (int Idx = 0, E = VT.getVectorNumElements(); Idx < E; ++Idx) |
| 18854 | if (Mask[Start + Idx] >= 0 && Mask[Start + Idx] != Idx * 2 + Offset) |
| 18855 | return false; |
| 18856 | return true; |
| 18857 | }; |
| 18858 | SDValue V0 = SDValue(N, 0); |
| 18859 | SDValue V1 = SDValue(N, 1); |
| 18860 | if (CheckInregMask(0, 0)) |
| 18861 | V0 = Extend(Op0); |
| 18862 | else if (CheckInregMask(0, 1)) |
| 18863 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
| 18864 | else if (CheckInregMask(0, Mask.size())) |
| 18865 | V0 = Extend(Op1); |
| 18866 | else if (CheckInregMask(0, Mask.size() + 1)) |
| 18867 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
| 18868 | |
| 18869 | if (CheckInregMask(VT.getVectorNumElements(), Mask.size())) |
| 18870 | V1 = Extend(Op1); |
| 18871 | else if (CheckInregMask(VT.getVectorNumElements(), Mask.size() + 1)) |
| 18872 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
| 18873 | else if (CheckInregMask(VT.getVectorNumElements(), 0)) |
| 18874 | V1 = Extend(Op0); |
| 18875 | else if (CheckInregMask(VT.getVectorNumElements(), 1)) |
| 18876 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
| 18877 | |
| 18878 | if (V0.getNode() != N || V1.getNode() != N) |
| 18879 | return DAG.getMergeValues(Ops: {V0, V1}, dl: DL); |
| 18880 | } |
| 18881 | |
| 18882 | // MVEEXT(load) -> extload, extload |
| 18883 | if (N->getOperand(Num: 0)->getOpcode() == ISD::LOAD) |
| 18884 | if (SDValue L = PerformSplittingMVEEXTToWideningLoad(N, DAG)) |
| 18885 | return L; |
| 18886 | |
| 18887 | if (!DCI.isAfterLegalizeDAG()) |
| 18888 | return SDValue(); |
| 18889 | |
| 18890 | // Lower to a stack store and reload: |
| 18891 | // VSTRW.32 a, stack; VLDRH.32 stack; VLDRH.32 stack+8; |
| 18892 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
| 18893 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
| 18894 | int NumOuts = N->getNumValues(); |
| 18895 | assert((NumOuts == 2 || NumOuts == 4) && |
| 18896 | "Expected 2 or 4 outputs to an MVEEXT" ); |
| 18897 | EVT LoadVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
| 18898 | Context&: *DAG.getContext()); |
| 18899 | if (N->getNumOperands() == 4) |
| 18900 | LoadVT = LoadVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18901 | |
| 18902 | MachinePointerInfo MPI = |
| 18903 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
| 18904 | SDValue Chain = DAG.getStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: 0), |
| 18905 | Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
| 18906 | |
| 18907 | SmallVector<SDValue> Loads; |
| 18908 | for (int I = 0; I < NumOuts; I++) { |
| 18909 | SDValue Ptr = DAG.getNode( |
| 18910 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
| 18911 | N2: DAG.getConstant(Val: I * 16 / NumOuts, DL, VT: StackPtr.getValueType())); |
| 18912 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
| 18913 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumOuts); |
| 18914 | SDValue Load = DAG.getExtLoad( |
| 18915 | ExtType: N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD, dl: DL, |
| 18916 | VT, Chain, Ptr, PtrInfo: MPI, MemVT: LoadVT, Alignment: Align(4)); |
| 18917 | Loads.push_back(Elt: Load); |
| 18918 | } |
| 18919 | |
| 18920 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
| 18921 | } |
| 18922 | |
| 18923 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, |
| 18924 | DAGCombinerInfo &DCI) const { |
| 18925 | switch (N->getOpcode()) { |
| 18926 | default: break; |
| 18927 | case ISD::SELECT_CC: |
| 18928 | case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); |
| 18929 | case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); |
| 18930 | case ISD::SETCC: return PerformVSetCCToVCTPCombine(N, DCI, Subtarget); |
| 18931 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); |
| 18932 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18933 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); |
| 18934 | case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); |
| 18935 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); |
| 18936 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); |
| 18937 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); |
| 18938 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); |
| 18939 | case ISD::BRCOND: |
| 18940 | case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, ST: Subtarget); |
| 18941 | case ARMISD::ADDC: |
| 18942 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); |
| 18943 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 18944 | case ARMISD::BFI: return PerformBFICombine(N, DAG&: DCI.DAG); |
| 18945 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); |
| 18946 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DAG&: DCI.DAG); |
| 18947 | case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); |
| 18948 | case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DAG&: DCI.DAG); |
| 18949 | case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); |
| 18950 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); |
| 18951 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); |
| 18952 | case ISD::EXTRACT_VECTOR_ELT: |
| 18953 | return PerformExtractEltCombine(N, DCI, ST: Subtarget); |
| 18954 | case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DAG&: DCI.DAG); |
| 18955 | case ISD::INSERT_SUBVECTOR: return PerformInsertSubvectorCombine(N, DCI); |
| 18956 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DAG&: DCI.DAG); |
| 18957 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); |
| 18958 | case ARMISD::VDUP: return PerformVDUPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18959 | case ISD::FP_TO_SINT: |
| 18960 | case ISD::FP_TO_UINT: |
| 18961 | return PerformVCVTCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18962 | case ISD::FADD: |
| 18963 | return PerformFADDCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18964 | case ISD::FMUL: |
| 18965 | return PerformVMulVCTPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18966 | case ISD::INTRINSIC_WO_CHAIN: |
| 18967 | return PerformIntrinsicCombine(N, DCI); |
| 18968 | case ISD::SHL: |
| 18969 | case ISD::SRA: |
| 18970 | case ISD::SRL: |
| 18971 | return PerformShiftCombine(N, DCI, ST: Subtarget); |
| 18972 | case ISD::SIGN_EXTEND: |
| 18973 | case ISD::ZERO_EXTEND: |
| 18974 | case ISD::ANY_EXTEND: |
| 18975 | return PerformExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18976 | case ISD::FP_EXTEND: |
| 18977 | return PerformFPExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18978 | case ISD::SMIN: |
| 18979 | case ISD::UMIN: |
| 18980 | case ISD::SMAX: |
| 18981 | case ISD::UMAX: |
| 18982 | return PerformMinMaxCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18983 | case ARMISD::CMOV: |
| 18984 | return PerformCMOVCombine(N, DAG&: DCI.DAG); |
| 18985 | case ARMISD::BRCOND: |
| 18986 | return PerformBRCONDCombine(N, DAG&: DCI.DAG); |
| 18987 | case ARMISD::CMPZ: |
| 18988 | return PerformCMPZCombine(N, DAG&: DCI.DAG); |
| 18989 | case ARMISD::CSINC: |
| 18990 | case ARMISD::CSINV: |
| 18991 | case ARMISD::CSNEG: |
| 18992 | return PerformCSETCombine(N, DAG&: DCI.DAG); |
| 18993 | case ISD::LOAD: |
| 18994 | return PerformLOADCombine(N, DCI, Subtarget); |
| 18995 | case ARMISD::VLD1DUP: |
| 18996 | case ARMISD::VLD2DUP: |
| 18997 | case ARMISD::VLD3DUP: |
| 18998 | case ARMISD::VLD4DUP: |
| 18999 | return PerformVLDCombine(N, DCI); |
| 19000 | case ARMISD::BUILD_VECTOR: |
| 19001 | return PerformARMBUILD_VECTORCombine(N, DCI); |
| 19002 | case ISD::BITCAST: |
| 19003 | return PerformBITCASTCombine(N, DCI, ST: Subtarget); |
| 19004 | case ARMISD::PREDICATE_CAST: |
| 19005 | return PerformPREDICATE_CASTCombine(N, DCI); |
| 19006 | case ARMISD::VECTOR_REG_CAST: |
| 19007 | return PerformVECTOR_REG_CASTCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 19008 | case ARMISD::MVETRUNC: |
| 19009 | return PerformMVETruncCombine(N, DCI); |
| 19010 | case ARMISD::MVESEXT: |
| 19011 | case ARMISD::MVEZEXT: |
| 19012 | return PerformMVEExtCombine(N, DCI); |
| 19013 | case ARMISD::VCMP: |
| 19014 | return PerformVCMPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 19015 | case ISD::VECREDUCE_ADD: |
| 19016 | return PerformVECREDUCE_ADDCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 19017 | case ARMISD::VADDVs: |
| 19018 | case ARMISD::VADDVu: |
| 19019 | case ARMISD::VADDLVs: |
| 19020 | case ARMISD::VADDLVu: |
| 19021 | case ARMISD::VADDLVAs: |
| 19022 | case ARMISD::VADDLVAu: |
| 19023 | case ARMISD::VMLAVs: |
| 19024 | case ARMISD::VMLAVu: |
| 19025 | case ARMISD::VMLALVs: |
| 19026 | case ARMISD::VMLALVu: |
| 19027 | case ARMISD::VMLALVAs: |
| 19028 | case ARMISD::VMLALVAu: |
| 19029 | return PerformReduceShuffleCombine(N, DAG&: DCI.DAG); |
| 19030 | case ARMISD::VMOVN: |
| 19031 | return PerformVMOVNCombine(N, DCI); |
| 19032 | case ARMISD::VQMOVNs: |
| 19033 | case ARMISD::VQMOVNu: |
| 19034 | return PerformVQMOVNCombine(N, DCI); |
| 19035 | case ARMISD::VQDMULH: |
| 19036 | return PerformVQDMULHCombine(N, DCI); |
| 19037 | case ARMISD::ASRL: |
| 19038 | case ARMISD::LSRL: |
| 19039 | case ARMISD::LSLL: |
| 19040 | return PerformLongShiftCombine(N, DAG&: DCI.DAG); |
| 19041 | case ARMISD::SMULWB: { |
| 19042 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 19043 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
| 19044 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 19045 | return SDValue(); |
| 19046 | break; |
| 19047 | } |
| 19048 | case ARMISD::SMULWT: { |
| 19049 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 19050 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
| 19051 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 19052 | return SDValue(); |
| 19053 | break; |
| 19054 | } |
| 19055 | case ARMISD::SMLALBB: |
| 19056 | case ARMISD::QADD16b: |
| 19057 | case ARMISD::QSUB16b: |
| 19058 | case ARMISD::UQADD16b: |
| 19059 | case ARMISD::UQSUB16b: { |
| 19060 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 19061 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
| 19062 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 19063 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 19064 | return SDValue(); |
| 19065 | break; |
| 19066 | } |
| 19067 | case ARMISD::SMLALBT: { |
| 19068 | unsigned LowWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
| 19069 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
| 19070 | unsigned HighWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
| 19071 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
| 19072 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: LowMask, DCI)) || |
| 19073 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: HighMask, DCI))) |
| 19074 | return SDValue(); |
| 19075 | break; |
| 19076 | } |
| 19077 | case ARMISD::SMLALTB: { |
| 19078 | unsigned HighWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
| 19079 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
| 19080 | unsigned LowWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
| 19081 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
| 19082 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: HighMask, DCI)) || |
| 19083 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: LowMask, DCI))) |
| 19084 | return SDValue(); |
| 19085 | break; |
| 19086 | } |
| 19087 | case ARMISD::SMLALTT: { |
| 19088 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 19089 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
| 19090 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 19091 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 19092 | return SDValue(); |
| 19093 | break; |
| 19094 | } |
| 19095 | case ARMISD::QADD8b: |
| 19096 | case ARMISD::QSUB8b: |
| 19097 | case ARMISD::UQADD8b: |
| 19098 | case ARMISD::UQSUB8b: { |
| 19099 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 19100 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 8); |
| 19101 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 19102 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 19103 | return SDValue(); |
| 19104 | break; |
| 19105 | } |
| 19106 | case ARMISD::VBSP: |
| 19107 | if (N->getOperand(Num: 1) == N->getOperand(Num: 2)) |
| 19108 | return N->getOperand(Num: 1); |
| 19109 | return SDValue(); |
| 19110 | case ISD::INTRINSIC_VOID: |
| 19111 | case ISD::INTRINSIC_W_CHAIN: |
| 19112 | switch (N->getConstantOperandVal(Num: 1)) { |
| 19113 | case Intrinsic::arm_neon_vld1: |
| 19114 | case Intrinsic::arm_neon_vld1x2: |
| 19115 | case Intrinsic::arm_neon_vld1x3: |
| 19116 | case Intrinsic::arm_neon_vld1x4: |
| 19117 | case Intrinsic::arm_neon_vld2: |
| 19118 | case Intrinsic::arm_neon_vld3: |
| 19119 | case Intrinsic::arm_neon_vld4: |
| 19120 | case Intrinsic::arm_neon_vld2lane: |
| 19121 | case Intrinsic::arm_neon_vld3lane: |
| 19122 | case Intrinsic::arm_neon_vld4lane: |
| 19123 | case Intrinsic::arm_neon_vld2dup: |
| 19124 | case Intrinsic::arm_neon_vld3dup: |
| 19125 | case Intrinsic::arm_neon_vld4dup: |
| 19126 | case Intrinsic::arm_neon_vst1: |
| 19127 | case Intrinsic::arm_neon_vst1x2: |
| 19128 | case Intrinsic::arm_neon_vst1x3: |
| 19129 | case Intrinsic::arm_neon_vst1x4: |
| 19130 | case Intrinsic::arm_neon_vst2: |
| 19131 | case Intrinsic::arm_neon_vst3: |
| 19132 | case Intrinsic::arm_neon_vst4: |
| 19133 | case Intrinsic::arm_neon_vst2lane: |
| 19134 | case Intrinsic::arm_neon_vst3lane: |
| 19135 | case Intrinsic::arm_neon_vst4lane: |
| 19136 | return PerformVLDCombine(N, DCI); |
| 19137 | case Intrinsic::arm_mve_vld2q: |
| 19138 | case Intrinsic::arm_mve_vld4q: |
| 19139 | case Intrinsic::arm_mve_vst2q: |
| 19140 | case Intrinsic::arm_mve_vst4q: |
| 19141 | return PerformMVEVLDCombine(N, DCI); |
| 19142 | default: break; |
| 19143 | } |
| 19144 | break; |
| 19145 | } |
| 19146 | return SDValue(); |
| 19147 | } |
| 19148 | |
| 19149 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, |
| 19150 | EVT VT) const { |
| 19151 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); |
| 19152 | } |
| 19153 | |
| 19154 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, |
| 19155 | Align Alignment, |
| 19156 | MachineMemOperand::Flags, |
| 19157 | unsigned *Fast) const { |
| 19158 | // Depends what it gets converted into if the type is weird. |
| 19159 | if (!VT.isSimple()) |
| 19160 | return false; |
| 19161 | |
| 19162 | // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus |
| 19163 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 19164 | auto Ty = VT.getSimpleVT().SimpleTy; |
| 19165 | |
| 19166 | if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { |
| 19167 | // Unaligned access can use (for example) LRDB, LRDH, LDR |
| 19168 | if (AllowsUnaligned) { |
| 19169 | if (Fast) |
| 19170 | *Fast = Subtarget->hasV7Ops(); |
| 19171 | return true; |
| 19172 | } |
| 19173 | } |
| 19174 | |
| 19175 | if (Ty == MVT::f64 || Ty == MVT::v2f64) { |
| 19176 | // For any little-endian targets with neon, we can support unaligned ld/st |
| 19177 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. |
| 19178 | // A big-endian target may also explicitly support unaligned accesses |
| 19179 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { |
| 19180 | if (Fast) |
| 19181 | *Fast = 1; |
| 19182 | return true; |
| 19183 | } |
| 19184 | } |
| 19185 | |
| 19186 | if (!Subtarget->hasMVEIntegerOps()) |
| 19187 | return false; |
| 19188 | |
| 19189 | // These are for predicates |
| 19190 | if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1 || |
| 19191 | Ty == MVT::v2i1)) { |
| 19192 | if (Fast) |
| 19193 | *Fast = 1; |
| 19194 | return true; |
| 19195 | } |
| 19196 | |
| 19197 | // These are for truncated stores/narrowing loads. They are fine so long as |
| 19198 | // the alignment is at least the size of the item being loaded |
| 19199 | if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && |
| 19200 | Alignment >= VT.getScalarSizeInBits() / 8) { |
| 19201 | if (Fast) |
| 19202 | *Fast = true; |
| 19203 | return true; |
| 19204 | } |
| 19205 | |
| 19206 | // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and |
| 19207 | // VSTRW.U32 all store the vector register in exactly the same format, and |
| 19208 | // differ only in the range of their immediate offset field and the required |
| 19209 | // alignment. So there is always a store that can be used, regardless of |
| 19210 | // actual type. |
| 19211 | // |
| 19212 | // For big endian, that is not the case. But can still emit a (VSTRB.U8; |
| 19213 | // VREV64.8) pair and get the same effect. This will likely be better than |
| 19214 | // aligning the vector through the stack. |
| 19215 | if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || |
| 19216 | Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || |
| 19217 | Ty == MVT::v2f64) { |
| 19218 | if (Fast) |
| 19219 | *Fast = 1; |
| 19220 | return true; |
| 19221 | } |
| 19222 | |
| 19223 | return false; |
| 19224 | } |
| 19225 | |
| 19226 | |
| 19227 | EVT ARMTargetLowering::getOptimalMemOpType( |
| 19228 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
| 19229 | // See if we can use NEON instructions for this... |
| 19230 | if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && |
| 19231 | !FuncAttributes.hasFnAttr(Kind: Attribute::NoImplicitFloat)) { |
| 19232 | unsigned Fast; |
| 19233 | if (Op.size() >= 16 && |
| 19234 | (Op.isAligned(AlignCheck: Align(16)) || |
| 19235 | (allowsMisalignedMemoryAccesses(VT: MVT::v2f64, 0, Alignment: Align(1), |
| 19236 | MachineMemOperand::MONone, Fast: &Fast) && |
| 19237 | Fast))) { |
| 19238 | return MVT::v2f64; |
| 19239 | } else if (Op.size() >= 8 && |
| 19240 | (Op.isAligned(AlignCheck: Align(8)) || |
| 19241 | (allowsMisalignedMemoryAccesses( |
| 19242 | VT: MVT::f64, 0, Alignment: Align(1), MachineMemOperand::MONone, Fast: &Fast) && |
| 19243 | Fast))) { |
| 19244 | return MVT::f64; |
| 19245 | } |
| 19246 | } |
| 19247 | |
| 19248 | // Let the target-independent logic figure it out. |
| 19249 | return MVT::Other; |
| 19250 | } |
| 19251 | |
| 19252 | // 64-bit integers are split into their high and low parts and held in two |
| 19253 | // different registers, so the trunc is free since the low register can just |
| 19254 | // be used. |
| 19255 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { |
| 19256 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) |
| 19257 | return false; |
| 19258 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); |
| 19259 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); |
| 19260 | return (SrcBits == 64 && DestBits == 32); |
| 19261 | } |
| 19262 | |
| 19263 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { |
| 19264 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || |
| 19265 | !DstVT.isInteger()) |
| 19266 | return false; |
| 19267 | unsigned SrcBits = SrcVT.getSizeInBits(); |
| 19268 | unsigned DestBits = DstVT.getSizeInBits(); |
| 19269 | return (SrcBits == 64 && DestBits == 32); |
| 19270 | } |
| 19271 | |
| 19272 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { |
| 19273 | if (Val.getOpcode() != ISD::LOAD) |
| 19274 | return false; |
| 19275 | |
| 19276 | EVT VT1 = Val.getValueType(); |
| 19277 | if (!VT1.isSimple() || !VT1.isInteger() || |
| 19278 | !VT2.isSimple() || !VT2.isInteger()) |
| 19279 | return false; |
| 19280 | |
| 19281 | switch (VT1.getSimpleVT().SimpleTy) { |
| 19282 | default: break; |
| 19283 | case MVT::i1: |
| 19284 | case MVT::i8: |
| 19285 | case MVT::i16: |
| 19286 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. |
| 19287 | return true; |
| 19288 | } |
| 19289 | |
| 19290 | return false; |
| 19291 | } |
| 19292 | |
| 19293 | bool ARMTargetLowering::isFNegFree(EVT VT) const { |
| 19294 | if (!VT.isSimple()) |
| 19295 | return false; |
| 19296 | |
| 19297 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that |
| 19298 | // negate values directly (fneg is free). So, we don't want to let the DAG |
| 19299 | // combiner rewrite fneg into xors and some other instructions. For f16 and |
| 19300 | // FullFP16 argument passing, some bitcast nodes may be introduced, |
| 19301 | // triggering this DAG combine rewrite, so we are avoiding that with this. |
| 19302 | switch (VT.getSimpleVT().SimpleTy) { |
| 19303 | default: break; |
| 19304 | case MVT::f16: |
| 19305 | return Subtarget->hasFullFP16(); |
| 19306 | } |
| 19307 | |
| 19308 | return false; |
| 19309 | } |
| 19310 | |
| 19311 | Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { |
| 19312 | if (!Subtarget->hasMVEIntegerOps()) |
| 19313 | return nullptr; |
| 19314 | Type *SVIType = SVI->getType(); |
| 19315 | Type *ScalarType = SVIType->getScalarType(); |
| 19316 | |
| 19317 | if (ScalarType->isFloatTy()) |
| 19318 | return Type::getInt32Ty(C&: SVIType->getContext()); |
| 19319 | if (ScalarType->isHalfTy()) |
| 19320 | return Type::getInt16Ty(C&: SVIType->getContext()); |
| 19321 | return nullptr; |
| 19322 | } |
| 19323 | |
| 19324 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { |
| 19325 | EVT VT = ExtVal.getValueType(); |
| 19326 | |
| 19327 | if (!isTypeLegal(VT)) |
| 19328 | return false; |
| 19329 | |
| 19330 | if (auto *Ld = dyn_cast<MaskedLoadSDNode>(Val: ExtVal.getOperand(i: 0))) { |
| 19331 | if (Ld->isExpandingLoad()) |
| 19332 | return false; |
| 19333 | } |
| 19334 | |
| 19335 | if (Subtarget->hasMVEIntegerOps()) |
| 19336 | return true; |
| 19337 | |
| 19338 | // Don't create a loadext if we can fold the extension into a wide/long |
| 19339 | // instruction. |
| 19340 | // If there's more than one user instruction, the loadext is desirable no |
| 19341 | // matter what. There can be two uses by the same instruction. |
| 19342 | if (ExtVal->use_empty() || |
| 19343 | !ExtVal->user_begin()->isOnlyUserOf(N: ExtVal.getNode())) |
| 19344 | return true; |
| 19345 | |
| 19346 | SDNode *U = *ExtVal->user_begin(); |
| 19347 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || |
| 19348 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) |
| 19349 | return false; |
| 19350 | |
| 19351 | return true; |
| 19352 | } |
| 19353 | |
| 19354 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
| 19355 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
| 19356 | return false; |
| 19357 | |
| 19358 | if (!isTypeLegal(VT: EVT::getEVT(Ty: Ty1))) |
| 19359 | return false; |
| 19360 | |
| 19361 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop" ); |
| 19362 | |
| 19363 | // Assuming the caller doesn't have a zeroext or signext return parameter, |
| 19364 | // truncation all the way down to i1 is valid. |
| 19365 | return true; |
| 19366 | } |
| 19367 | |
| 19368 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster |
| 19369 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be |
| 19370 | /// expanded to FMAs when this method returns true, otherwise fmuladd is |
| 19371 | /// expanded to fmul + fadd. |
| 19372 | /// |
| 19373 | /// ARM supports both fused and unfused multiply-add operations; we already |
| 19374 | /// lower a pair of fmul and fadd to the latter so it's not clear that there |
| 19375 | /// would be a gain or that the gain would be worthwhile enough to risk |
| 19376 | /// correctness bugs. |
| 19377 | /// |
| 19378 | /// For MVE, we set this to true as it helps simplify the need for some |
| 19379 | /// patterns (and we don't have the non-fused floating point instruction). |
| 19380 | bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
| 19381 | EVT VT) const { |
| 19382 | if (Subtarget->useSoftFloat()) |
| 19383 | return false; |
| 19384 | |
| 19385 | if (!VT.isSimple()) |
| 19386 | return false; |
| 19387 | |
| 19388 | switch (VT.getSimpleVT().SimpleTy) { |
| 19389 | case MVT::v4f32: |
| 19390 | case MVT::v8f16: |
| 19391 | return Subtarget->hasMVEFloatOps(); |
| 19392 | case MVT::f16: |
| 19393 | return Subtarget->useFPVFMx16(); |
| 19394 | case MVT::f32: |
| 19395 | return Subtarget->useFPVFMx(); |
| 19396 | case MVT::f64: |
| 19397 | return Subtarget->useFPVFMx64(); |
| 19398 | default: |
| 19399 | break; |
| 19400 | } |
| 19401 | |
| 19402 | return false; |
| 19403 | } |
| 19404 | |
| 19405 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { |
| 19406 | if (V < 0) |
| 19407 | return false; |
| 19408 | |
| 19409 | unsigned Scale = 1; |
| 19410 | switch (VT.getSimpleVT().SimpleTy) { |
| 19411 | case MVT::i1: |
| 19412 | case MVT::i8: |
| 19413 | // Scale == 1; |
| 19414 | break; |
| 19415 | case MVT::i16: |
| 19416 | // Scale == 2; |
| 19417 | Scale = 2; |
| 19418 | break; |
| 19419 | default: |
| 19420 | // On thumb1 we load most things (i32, i64, floats, etc) with a LDR |
| 19421 | // Scale == 4; |
| 19422 | Scale = 4; |
| 19423 | break; |
| 19424 | } |
| 19425 | |
| 19426 | if ((V & (Scale - 1)) != 0) |
| 19427 | return false; |
| 19428 | return isUInt<5>(x: V / Scale); |
| 19429 | } |
| 19430 | |
| 19431 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, |
| 19432 | const ARMSubtarget *Subtarget) { |
| 19433 | if (!VT.isInteger() && !VT.isFloatingPoint()) |
| 19434 | return false; |
| 19435 | if (VT.isVector() && Subtarget->hasNEON()) |
| 19436 | return false; |
| 19437 | if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && |
| 19438 | !Subtarget->hasMVEFloatOps()) |
| 19439 | return false; |
| 19440 | |
| 19441 | bool IsNeg = false; |
| 19442 | if (V < 0) { |
| 19443 | IsNeg = true; |
| 19444 | V = -V; |
| 19445 | } |
| 19446 | |
| 19447 | unsigned NumBytes = std::max(a: (unsigned)VT.getSizeInBits() / 8, b: 1U); |
| 19448 | |
| 19449 | // MVE: size * imm7 |
| 19450 | if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { |
| 19451 | switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { |
| 19452 | case MVT::i32: |
| 19453 | case MVT::f32: |
| 19454 | return isShiftedUInt<7,2>(x: V); |
| 19455 | case MVT::i16: |
| 19456 | case MVT::f16: |
| 19457 | return isShiftedUInt<7,1>(x: V); |
| 19458 | case MVT::i8: |
| 19459 | return isUInt<7>(x: V); |
| 19460 | default: |
| 19461 | return false; |
| 19462 | } |
| 19463 | } |
| 19464 | |
| 19465 | // half VLDR: 2 * imm8 |
| 19466 | if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) |
| 19467 | return isShiftedUInt<8, 1>(x: V); |
| 19468 | // VLDR and LDRD: 4 * imm8 |
| 19469 | if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) |
| 19470 | return isShiftedUInt<8, 2>(x: V); |
| 19471 | |
| 19472 | if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { |
| 19473 | // + imm12 or - imm8 |
| 19474 | if (IsNeg) |
| 19475 | return isUInt<8>(x: V); |
| 19476 | return isUInt<12>(x: V); |
| 19477 | } |
| 19478 | |
| 19479 | return false; |
| 19480 | } |
| 19481 | |
| 19482 | /// isLegalAddressImmediate - Return true if the integer value can be used |
| 19483 | /// as the offset of the target addressing mode for load / store of the |
| 19484 | /// given type. |
| 19485 | static bool isLegalAddressImmediate(int64_t V, EVT VT, |
| 19486 | const ARMSubtarget *Subtarget) { |
| 19487 | if (V == 0) |
| 19488 | return true; |
| 19489 | |
| 19490 | if (!VT.isSimple()) |
| 19491 | return false; |
| 19492 | |
| 19493 | if (Subtarget->isThumb1Only()) |
| 19494 | return isLegalT1AddressImmediate(V, VT); |
| 19495 | else if (Subtarget->isThumb2()) |
| 19496 | return isLegalT2AddressImmediate(V, VT, Subtarget); |
| 19497 | |
| 19498 | // ARM mode. |
| 19499 | if (V < 0) |
| 19500 | V = - V; |
| 19501 | switch (VT.getSimpleVT().SimpleTy) { |
| 19502 | default: return false; |
| 19503 | case MVT::i1: |
| 19504 | case MVT::i8: |
| 19505 | case MVT::i32: |
| 19506 | // +- imm12 |
| 19507 | return isUInt<12>(x: V); |
| 19508 | case MVT::i16: |
| 19509 | // +- imm8 |
| 19510 | return isUInt<8>(x: V); |
| 19511 | case MVT::f32: |
| 19512 | case MVT::f64: |
| 19513 | if (!Subtarget->hasVFP2Base()) // FIXME: NEON? |
| 19514 | return false; |
| 19515 | return isShiftedUInt<8, 2>(x: V); |
| 19516 | } |
| 19517 | } |
| 19518 | |
| 19519 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, |
| 19520 | EVT VT) const { |
| 19521 | int Scale = AM.Scale; |
| 19522 | if (Scale < 0) |
| 19523 | return false; |
| 19524 | |
| 19525 | switch (VT.getSimpleVT().SimpleTy) { |
| 19526 | default: return false; |
| 19527 | case MVT::i1: |
| 19528 | case MVT::i8: |
| 19529 | case MVT::i16: |
| 19530 | case MVT::i32: |
| 19531 | if (Scale == 1) |
| 19532 | return true; |
| 19533 | // r + r << imm |
| 19534 | Scale = Scale & ~1; |
| 19535 | return Scale == 2 || Scale == 4 || Scale == 8; |
| 19536 | case MVT::i64: |
| 19537 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r |
| 19538 | // version in Thumb mode. |
| 19539 | // r + r |
| 19540 | if (Scale == 1) |
| 19541 | return true; |
| 19542 | // r * 2 (this can be lowered to r + r). |
| 19543 | if (!AM.HasBaseReg && Scale == 2) |
| 19544 | return true; |
| 19545 | return false; |
| 19546 | case MVT::isVoid: |
| 19547 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 19548 | // stores), because arm allows folding a scale into many arithmetic |
| 19549 | // operations. This should be made more precise and revisited later. |
| 19550 | |
| 19551 | // Allow r << imm, but the imm has to be a multiple of two. |
| 19552 | if (Scale & 1) return false; |
| 19553 | return isPowerOf2_32(Value: Scale); |
| 19554 | } |
| 19555 | } |
| 19556 | |
| 19557 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, |
| 19558 | EVT VT) const { |
| 19559 | const int Scale = AM.Scale; |
| 19560 | |
| 19561 | // Negative scales are not supported in Thumb1. |
| 19562 | if (Scale < 0) |
| 19563 | return false; |
| 19564 | |
| 19565 | // Thumb1 addressing modes do not support register scaling excepting the |
| 19566 | // following cases: |
| 19567 | // 1. Scale == 1 means no scaling. |
| 19568 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. |
| 19569 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); |
| 19570 | } |
| 19571 | |
| 19572 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 19573 | /// by AM is legal for this target, for a load/store of the specified type. |
| 19574 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 19575 | const AddrMode &AM, Type *Ty, |
| 19576 | unsigned AS, Instruction *I) const { |
| 19577 | EVT VT = getValueType(DL, Ty, AllowUnknown: true); |
| 19578 | if (!isLegalAddressImmediate(V: AM.BaseOffs, VT, Subtarget)) |
| 19579 | return false; |
| 19580 | |
| 19581 | // Can never fold addr of global into load/store. |
| 19582 | if (AM.BaseGV) |
| 19583 | return false; |
| 19584 | |
| 19585 | switch (AM.Scale) { |
| 19586 | case 0: // no scale reg, must be "r+i" or "r", or "i". |
| 19587 | break; |
| 19588 | default: |
| 19589 | // ARM doesn't support any R+R*scale+imm addr modes. |
| 19590 | if (AM.BaseOffs) |
| 19591 | return false; |
| 19592 | |
| 19593 | if (!VT.isSimple()) |
| 19594 | return false; |
| 19595 | |
| 19596 | if (Subtarget->isThumb1Only()) |
| 19597 | return isLegalT1ScaledAddressingMode(AM, VT); |
| 19598 | |
| 19599 | if (Subtarget->isThumb2()) |
| 19600 | return isLegalT2ScaledAddressingMode(AM, VT); |
| 19601 | |
| 19602 | int Scale = AM.Scale; |
| 19603 | switch (VT.getSimpleVT().SimpleTy) { |
| 19604 | default: return false; |
| 19605 | case MVT::i1: |
| 19606 | case MVT::i8: |
| 19607 | case MVT::i32: |
| 19608 | if (Scale < 0) Scale = -Scale; |
| 19609 | if (Scale == 1) |
| 19610 | return true; |
| 19611 | // r + r << imm |
| 19612 | return isPowerOf2_32(Value: Scale & ~1); |
| 19613 | case MVT::i16: |
| 19614 | case MVT::i64: |
| 19615 | // r +/- r |
| 19616 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) |
| 19617 | return true; |
| 19618 | // r * 2 (this can be lowered to r + r). |
| 19619 | if (!AM.HasBaseReg && Scale == 2) |
| 19620 | return true; |
| 19621 | return false; |
| 19622 | |
| 19623 | case MVT::isVoid: |
| 19624 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 19625 | // stores), because arm allows folding a scale into many arithmetic |
| 19626 | // operations. This should be made more precise and revisited later. |
| 19627 | |
| 19628 | // Allow r << imm, but the imm has to be a multiple of two. |
| 19629 | if (Scale & 1) return false; |
| 19630 | return isPowerOf2_32(Value: Scale); |
| 19631 | } |
| 19632 | } |
| 19633 | return true; |
| 19634 | } |
| 19635 | |
| 19636 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 19637 | /// icmp immediate, that is the target has icmp instructions which can compare |
| 19638 | /// a register against the immediate without having to materialize the |
| 19639 | /// immediate into a register. |
| 19640 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
| 19641 | // Thumb2 and ARM modes can use cmn for negative immediates. |
| 19642 | if (!Subtarget->isThumb()) |
| 19643 | return ARM_AM::getSOImmVal(Arg: (uint32_t)Imm) != -1 || |
| 19644 | ARM_AM::getSOImmVal(Arg: -(uint32_t)Imm) != -1; |
| 19645 | if (Subtarget->isThumb2()) |
| 19646 | return ARM_AM::getT2SOImmVal(Arg: (uint32_t)Imm) != -1 || |
| 19647 | ARM_AM::getT2SOImmVal(Arg: -(uint32_t)Imm) != -1; |
| 19648 | // Thumb1 doesn't have cmn, and only 8-bit immediates. |
| 19649 | return Imm >= 0 && Imm <= 255; |
| 19650 | } |
| 19651 | |
| 19652 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add |
| 19653 | /// *or sub* immediate, that is the target has add or sub instructions which can |
| 19654 | /// add a register with the immediate without having to materialize the |
| 19655 | /// immediate into a register. |
| 19656 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
| 19657 | // Same encoding for add/sub, just flip the sign. |
| 19658 | uint64_t AbsImm = AbsoluteValue(X: Imm); |
| 19659 | if (!Subtarget->isThumb()) |
| 19660 | return ARM_AM::getSOImmVal(Arg: AbsImm) != -1; |
| 19661 | if (Subtarget->isThumb2()) |
| 19662 | return ARM_AM::getT2SOImmVal(Arg: AbsImm) != -1; |
| 19663 | // Thumb1 only has 8-bit unsigned immediate. |
| 19664 | return AbsImm <= 255; |
| 19665 | } |
| 19666 | |
| 19667 | // Return false to prevent folding |
| 19668 | // (mul (add r, c0), c1) -> (add (mul r, c1), c0*c1) in DAGCombine, |
| 19669 | // if the folding leads to worse code. |
| 19670 | bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode, |
| 19671 | SDValue ConstNode) const { |
| 19672 | // Let the DAGCombiner decide for vector types and large types. |
| 19673 | const EVT VT = AddNode.getValueType(); |
| 19674 | if (VT.isVector() || VT.getScalarSizeInBits() > 32) |
| 19675 | return true; |
| 19676 | |
| 19677 | // It is worse if c0 is legal add immediate, while c1*c0 is not |
| 19678 | // and has to be composed by at least two instructions. |
| 19679 | const ConstantSDNode *C0Node = cast<ConstantSDNode>(Val: AddNode.getOperand(i: 1)); |
| 19680 | const ConstantSDNode *C1Node = cast<ConstantSDNode>(Val&: ConstNode); |
| 19681 | const int64_t C0 = C0Node->getSExtValue(); |
| 19682 | APInt CA = C0Node->getAPIntValue() * C1Node->getAPIntValue(); |
| 19683 | if (!isLegalAddImmediate(Imm: C0) || isLegalAddImmediate(Imm: CA.getSExtValue())) |
| 19684 | return true; |
| 19685 | if (ConstantMaterializationCost(Val: (unsigned)CA.getZExtValue(), Subtarget) > 1) |
| 19686 | return false; |
| 19687 | |
| 19688 | // Default to true and let the DAGCombiner decide. |
| 19689 | return true; |
| 19690 | } |
| 19691 | |
| 19692 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, |
| 19693 | bool isSEXTLoad, SDValue &Base, |
| 19694 | SDValue &Offset, bool &isInc, |
| 19695 | SelectionDAG &DAG) { |
| 19696 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19697 | return false; |
| 19698 | |
| 19699 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { |
| 19700 | // AddressingMode 3 |
| 19701 | Base = Ptr->getOperand(Num: 0); |
| 19702 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19703 | int RHSC = (int)RHS->getZExtValue(); |
| 19704 | if (RHSC < 0 && RHSC > -256) { |
| 19705 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19706 | isInc = false; |
| 19707 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19708 | return true; |
| 19709 | } |
| 19710 | } |
| 19711 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 19712 | Offset = Ptr->getOperand(Num: 1); |
| 19713 | return true; |
| 19714 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { |
| 19715 | // AddressingMode 2 |
| 19716 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19717 | int RHSC = (int)RHS->getZExtValue(); |
| 19718 | if (RHSC < 0 && RHSC > -0x1000) { |
| 19719 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19720 | isInc = false; |
| 19721 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19722 | Base = Ptr->getOperand(Num: 0); |
| 19723 | return true; |
| 19724 | } |
| 19725 | } |
| 19726 | |
| 19727 | if (Ptr->getOpcode() == ISD::ADD) { |
| 19728 | isInc = true; |
| 19729 | ARM_AM::ShiftOpc ShOpcVal= |
| 19730 | ARM_AM::getShiftOpcForNode(Opcode: Ptr->getOperand(Num: 0).getOpcode()); |
| 19731 | if (ShOpcVal != ARM_AM::no_shift) { |
| 19732 | Base = Ptr->getOperand(Num: 1); |
| 19733 | Offset = Ptr->getOperand(Num: 0); |
| 19734 | } else { |
| 19735 | Base = Ptr->getOperand(Num: 0); |
| 19736 | Offset = Ptr->getOperand(Num: 1); |
| 19737 | } |
| 19738 | return true; |
| 19739 | } |
| 19740 | |
| 19741 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 19742 | Base = Ptr->getOperand(Num: 0); |
| 19743 | Offset = Ptr->getOperand(Num: 1); |
| 19744 | return true; |
| 19745 | } |
| 19746 | |
| 19747 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. |
| 19748 | return false; |
| 19749 | } |
| 19750 | |
| 19751 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, |
| 19752 | bool isSEXTLoad, SDValue &Base, |
| 19753 | SDValue &Offset, bool &isInc, |
| 19754 | SelectionDAG &DAG) { |
| 19755 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19756 | return false; |
| 19757 | |
| 19758 | Base = Ptr->getOperand(Num: 0); |
| 19759 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19760 | int RHSC = (int)RHS->getZExtValue(); |
| 19761 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. |
| 19762 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19763 | isInc = false; |
| 19764 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19765 | return true; |
| 19766 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. |
| 19767 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 19768 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19769 | return true; |
| 19770 | } |
| 19771 | } |
| 19772 | |
| 19773 | return false; |
| 19774 | } |
| 19775 | |
| 19776 | static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, |
| 19777 | bool isSEXTLoad, bool IsMasked, bool isLE, |
| 19778 | SDValue &Base, SDValue &Offset, |
| 19779 | bool &isInc, SelectionDAG &DAG) { |
| 19780 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19781 | return false; |
| 19782 | if (!isa<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) |
| 19783 | return false; |
| 19784 | |
| 19785 | // We allow LE non-masked loads to change the type (for example use a vldrb.8 |
| 19786 | // as opposed to a vldrw.32). This can allow extra addressing modes or |
| 19787 | // alignments for what is otherwise an equivalent instruction. |
| 19788 | bool CanChangeType = isLE && !IsMasked; |
| 19789 | |
| 19790 | ConstantSDNode *RHS = cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1)); |
| 19791 | int RHSC = (int)RHS->getZExtValue(); |
| 19792 | |
| 19793 | auto IsInRange = [&](int RHSC, int Limit, int Scale) { |
| 19794 | if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { |
| 19795 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19796 | isInc = false; |
| 19797 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19798 | return true; |
| 19799 | } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { |
| 19800 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 19801 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19802 | return true; |
| 19803 | } |
| 19804 | return false; |
| 19805 | }; |
| 19806 | |
| 19807 | // Try to find a matching instruction based on s/zext, Alignment, Offset and |
| 19808 | // (in BE/masked) type. |
| 19809 | Base = Ptr->getOperand(Num: 0); |
| 19810 | if (VT == MVT::v4i16) { |
| 19811 | if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) |
| 19812 | return true; |
| 19813 | } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { |
| 19814 | if (IsInRange(RHSC, 0x80, 1)) |
| 19815 | return true; |
| 19816 | } else if (Alignment >= 4 && |
| 19817 | (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && |
| 19818 | IsInRange(RHSC, 0x80, 4)) |
| 19819 | return true; |
| 19820 | else if (Alignment >= 2 && |
| 19821 | (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && |
| 19822 | IsInRange(RHSC, 0x80, 2)) |
| 19823 | return true; |
| 19824 | else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) |
| 19825 | return true; |
| 19826 | return false; |
| 19827 | } |
| 19828 | |
| 19829 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
| 19830 | /// offset pointer and addressing mode by reference if the node's address |
| 19831 | /// can be legally represented as pre-indexed load / store address. |
| 19832 | bool |
| 19833 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
| 19834 | SDValue &Offset, |
| 19835 | ISD::MemIndexedMode &AM, |
| 19836 | SelectionDAG &DAG) const { |
| 19837 | if (Subtarget->isThumb1Only()) |
| 19838 | return false; |
| 19839 | |
| 19840 | EVT VT; |
| 19841 | SDValue Ptr; |
| 19842 | Align Alignment; |
| 19843 | bool isSEXTLoad = false; |
| 19844 | bool IsMasked = false; |
| 19845 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 19846 | Ptr = LD->getBasePtr(); |
| 19847 | VT = LD->getMemoryVT(); |
| 19848 | Alignment = LD->getAlign(); |
| 19849 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19850 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
| 19851 | Ptr = ST->getBasePtr(); |
| 19852 | VT = ST->getMemoryVT(); |
| 19853 | Alignment = ST->getAlign(); |
| 19854 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
| 19855 | Ptr = LD->getBasePtr(); |
| 19856 | VT = LD->getMemoryVT(); |
| 19857 | Alignment = LD->getAlign(); |
| 19858 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19859 | IsMasked = true; |
| 19860 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
| 19861 | Ptr = ST->getBasePtr(); |
| 19862 | VT = ST->getMemoryVT(); |
| 19863 | Alignment = ST->getAlign(); |
| 19864 | IsMasked = true; |
| 19865 | } else |
| 19866 | return false; |
| 19867 | |
| 19868 | bool isInc; |
| 19869 | bool isLegal = false; |
| 19870 | if (VT.isVector()) |
| 19871 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 19872 | getMVEIndexedAddressParts( |
| 19873 | Ptr: Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, |
| 19874 | isLE: Subtarget->isLittle(), Base, Offset, isInc, DAG); |
| 19875 | else { |
| 19876 | if (Subtarget->isThumb2()) |
| 19877 | isLegal = getT2IndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
| 19878 | Offset, isInc, DAG); |
| 19879 | else |
| 19880 | isLegal = getARMIndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
| 19881 | Offset, isInc, DAG); |
| 19882 | } |
| 19883 | if (!isLegal) |
| 19884 | return false; |
| 19885 | |
| 19886 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; |
| 19887 | return true; |
| 19888 | } |
| 19889 | |
| 19890 | /// getPostIndexedAddressParts - returns true by value, base pointer and |
| 19891 | /// offset pointer and addressing mode by reference if this node can be |
| 19892 | /// combined with a load / store to form a post-indexed load / store. |
| 19893 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
| 19894 | SDValue &Base, |
| 19895 | SDValue &Offset, |
| 19896 | ISD::MemIndexedMode &AM, |
| 19897 | SelectionDAG &DAG) const { |
| 19898 | EVT VT; |
| 19899 | SDValue Ptr; |
| 19900 | Align Alignment; |
| 19901 | bool isSEXTLoad = false, isNonExt; |
| 19902 | bool IsMasked = false; |
| 19903 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 19904 | VT = LD->getMemoryVT(); |
| 19905 | Ptr = LD->getBasePtr(); |
| 19906 | Alignment = LD->getAlign(); |
| 19907 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19908 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 19909 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
| 19910 | VT = ST->getMemoryVT(); |
| 19911 | Ptr = ST->getBasePtr(); |
| 19912 | Alignment = ST->getAlign(); |
| 19913 | isNonExt = !ST->isTruncatingStore(); |
| 19914 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
| 19915 | VT = LD->getMemoryVT(); |
| 19916 | Ptr = LD->getBasePtr(); |
| 19917 | Alignment = LD->getAlign(); |
| 19918 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19919 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 19920 | IsMasked = true; |
| 19921 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
| 19922 | VT = ST->getMemoryVT(); |
| 19923 | Ptr = ST->getBasePtr(); |
| 19924 | Alignment = ST->getAlign(); |
| 19925 | isNonExt = !ST->isTruncatingStore(); |
| 19926 | IsMasked = true; |
| 19927 | } else |
| 19928 | return false; |
| 19929 | |
| 19930 | if (Subtarget->isThumb1Only()) { |
| 19931 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It |
| 19932 | // must be non-extending/truncating, i32, with an offset of 4. |
| 19933 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!" ); |
| 19934 | if (Op->getOpcode() != ISD::ADD || !isNonExt) |
| 19935 | return false; |
| 19936 | auto *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1)); |
| 19937 | if (!RHS || RHS->getZExtValue() != 4) |
| 19938 | return false; |
| 19939 | if (Alignment < Align(4)) |
| 19940 | return false; |
| 19941 | |
| 19942 | Offset = Op->getOperand(Num: 1); |
| 19943 | Base = Op->getOperand(Num: 0); |
| 19944 | AM = ISD::POST_INC; |
| 19945 | return true; |
| 19946 | } |
| 19947 | |
| 19948 | bool isInc; |
| 19949 | bool isLegal = false; |
| 19950 | if (VT.isVector()) |
| 19951 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 19952 | getMVEIndexedAddressParts(Ptr: Op, VT, Alignment, isSEXTLoad, IsMasked, |
| 19953 | isLE: Subtarget->isLittle(), Base, Offset, |
| 19954 | isInc, DAG); |
| 19955 | else { |
| 19956 | if (Subtarget->isThumb2()) |
| 19957 | isLegal = getT2IndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
| 19958 | isInc, DAG); |
| 19959 | else |
| 19960 | isLegal = getARMIndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
| 19961 | isInc, DAG); |
| 19962 | } |
| 19963 | if (!isLegal) |
| 19964 | return false; |
| 19965 | |
| 19966 | if (Ptr != Base) { |
| 19967 | // Swap base ptr and offset to catch more post-index load / store when |
| 19968 | // it's legal. In Thumb2 mode, offset must be an immediate. |
| 19969 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && |
| 19970 | !Subtarget->isThumb2()) |
| 19971 | std::swap(a&: Base, b&: Offset); |
| 19972 | |
| 19973 | // Post-indexed load / store update the base pointer. |
| 19974 | if (Ptr != Base) |
| 19975 | return false; |
| 19976 | } |
| 19977 | |
| 19978 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; |
| 19979 | return true; |
| 19980 | } |
| 19981 | |
| 19982 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
| 19983 | KnownBits &Known, |
| 19984 | const APInt &DemandedElts, |
| 19985 | const SelectionDAG &DAG, |
| 19986 | unsigned Depth) const { |
| 19987 | unsigned BitWidth = Known.getBitWidth(); |
| 19988 | Known.resetAll(); |
| 19989 | switch (Op.getOpcode()) { |
| 19990 | default: break; |
| 19991 | case ARMISD::ADDC: |
| 19992 | case ARMISD::ADDE: |
| 19993 | case ARMISD::SUBC: |
| 19994 | case ARMISD::SUBE: |
| 19995 | // Special cases when we convert a carry to a boolean. |
| 19996 | if (Op.getResNo() == 0) { |
| 19997 | SDValue LHS = Op.getOperand(i: 0); |
| 19998 | SDValue RHS = Op.getOperand(i: 1); |
| 19999 | // (ADDE 0, 0, C) will give us a single bit. |
| 20000 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(V: LHS) && |
| 20001 | isNullConstant(V: RHS)) { |
| 20002 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 1); |
| 20003 | return; |
| 20004 | } |
| 20005 | } |
| 20006 | break; |
| 20007 | case ARMISD::CMOV: { |
| 20008 | // Bits are known zero/one if known on the LHS and RHS. |
| 20009 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth+1); |
| 20010 | if (Known.isUnknown()) |
| 20011 | return; |
| 20012 | |
| 20013 | KnownBits KnownRHS = DAG.computeKnownBits(Op: Op.getOperand(i: 1), Depth: Depth+1); |
| 20014 | Known = Known.intersectWith(RHS: KnownRHS); |
| 20015 | return; |
| 20016 | } |
| 20017 | case ISD::INTRINSIC_W_CHAIN: { |
| 20018 | Intrinsic::ID IntID = |
| 20019 | static_cast<Intrinsic::ID>(Op->getConstantOperandVal(Num: 1)); |
| 20020 | switch (IntID) { |
| 20021 | default: return; |
| 20022 | case Intrinsic::arm_ldaex: |
| 20023 | case Intrinsic::arm_ldrex: { |
| 20024 | EVT VT = cast<MemIntrinsicSDNode>(Val: Op)->getMemoryVT(); |
| 20025 | unsigned MemBits = VT.getScalarSizeInBits(); |
| 20026 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - MemBits); |
| 20027 | return; |
| 20028 | } |
| 20029 | } |
| 20030 | } |
| 20031 | case ARMISD::BFI: { |
| 20032 | // Conservatively, we can recurse down the first operand |
| 20033 | // and just mask out all affected bits. |
| 20034 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1); |
| 20035 | |
| 20036 | // The operand to BFI is already a mask suitable for removing the bits it |
| 20037 | // sets. |
| 20038 | const APInt &Mask = Op.getConstantOperandAPInt(i: 2); |
| 20039 | Known.Zero &= Mask; |
| 20040 | Known.One &= Mask; |
| 20041 | return; |
| 20042 | } |
| 20043 | case ARMISD::VGETLANEs: |
| 20044 | case ARMISD::VGETLANEu: { |
| 20045 | const SDValue &SrcSV = Op.getOperand(i: 0); |
| 20046 | EVT VecVT = SrcSV.getValueType(); |
| 20047 | assert(VecVT.isVector() && "VGETLANE expected a vector type" ); |
| 20048 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
| 20049 | ConstantSDNode *Pos = cast<ConstantSDNode>(Val: Op.getOperand(i: 1).getNode()); |
| 20050 | assert(Pos->getAPIntValue().ult(NumSrcElts) && |
| 20051 | "VGETLANE index out of bounds" ); |
| 20052 | unsigned Idx = Pos->getZExtValue(); |
| 20053 | APInt DemandedElt = APInt::getOneBitSet(numBits: NumSrcElts, BitNo: Idx); |
| 20054 | Known = DAG.computeKnownBits(Op: SrcSV, DemandedElts: DemandedElt, Depth: Depth + 1); |
| 20055 | |
| 20056 | EVT VT = Op.getValueType(); |
| 20057 | const unsigned DstSz = VT.getScalarSizeInBits(); |
| 20058 | const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); |
| 20059 | (void)SrcSz; |
| 20060 | assert(SrcSz == Known.getBitWidth()); |
| 20061 | assert(DstSz > SrcSz); |
| 20062 | if (Op.getOpcode() == ARMISD::VGETLANEs) |
| 20063 | Known = Known.sext(BitWidth: DstSz); |
| 20064 | else { |
| 20065 | Known = Known.zext(BitWidth: DstSz); |
| 20066 | } |
| 20067 | assert(DstSz == Known.getBitWidth()); |
| 20068 | break; |
| 20069 | } |
| 20070 | case ARMISD::VMOVrh: { |
| 20071 | KnownBits KnownOp = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
| 20072 | assert(KnownOp.getBitWidth() == 16); |
| 20073 | Known = KnownOp.zext(BitWidth: 32); |
| 20074 | break; |
| 20075 | } |
| 20076 | case ARMISD::CSINC: |
| 20077 | case ARMISD::CSINV: |
| 20078 | case ARMISD::CSNEG: { |
| 20079 | KnownBits KnownOp0 = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
| 20080 | KnownBits KnownOp1 = DAG.computeKnownBits(Op: Op->getOperand(Num: 1), Depth: Depth + 1); |
| 20081 | |
| 20082 | // The result is either: |
| 20083 | // CSINC: KnownOp0 or KnownOp1 + 1 |
| 20084 | // CSINV: KnownOp0 or ~KnownOp1 |
| 20085 | // CSNEG: KnownOp0 or KnownOp1 * -1 |
| 20086 | if (Op.getOpcode() == ARMISD::CSINC) |
| 20087 | KnownOp1 = |
| 20088 | KnownBits::add(LHS: KnownOp1, RHS: KnownBits::makeConstant(C: APInt(32, 1))); |
| 20089 | else if (Op.getOpcode() == ARMISD::CSINV) |
| 20090 | std::swap(a&: KnownOp1.Zero, b&: KnownOp1.One); |
| 20091 | else if (Op.getOpcode() == ARMISD::CSNEG) |
| 20092 | KnownOp1 = KnownBits::mul(LHS: KnownOp1, |
| 20093 | RHS: KnownBits::makeConstant(C: APInt::getAllOnes(numBits: 32))); |
| 20094 | |
| 20095 | Known = KnownOp0.intersectWith(RHS: KnownOp1); |
| 20096 | break; |
| 20097 | } |
| 20098 | } |
| 20099 | } |
| 20100 | |
| 20101 | bool ARMTargetLowering::targetShrinkDemandedConstant( |
| 20102 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, |
| 20103 | TargetLoweringOpt &TLO) const { |
| 20104 | // Delay optimization, so we don't have to deal with illegal types, or block |
| 20105 | // optimizations. |
| 20106 | if (!TLO.LegalOps) |
| 20107 | return false; |
| 20108 | |
| 20109 | // Only optimize AND for now. |
| 20110 | if (Op.getOpcode() != ISD::AND) |
| 20111 | return false; |
| 20112 | |
| 20113 | EVT VT = Op.getValueType(); |
| 20114 | |
| 20115 | // Ignore vectors. |
| 20116 | if (VT.isVector()) |
| 20117 | return false; |
| 20118 | |
| 20119 | assert(VT == MVT::i32 && "Unexpected integer type" ); |
| 20120 | |
| 20121 | // Make sure the RHS really is a constant. |
| 20122 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
| 20123 | if (!C) |
| 20124 | return false; |
| 20125 | |
| 20126 | unsigned Mask = C->getZExtValue(); |
| 20127 | |
| 20128 | unsigned Demanded = DemandedBits.getZExtValue(); |
| 20129 | unsigned ShrunkMask = Mask & Demanded; |
| 20130 | unsigned ExpandedMask = Mask | ~Demanded; |
| 20131 | |
| 20132 | // If the mask is all zeros, let the target-independent code replace the |
| 20133 | // result with zero. |
| 20134 | if (ShrunkMask == 0) |
| 20135 | return false; |
| 20136 | |
| 20137 | // If the mask is all ones, erase the AND. (Currently, the target-independent |
| 20138 | // code won't do this, so we have to do it explicitly to avoid an infinite |
| 20139 | // loop in obscure cases.) |
| 20140 | if (ExpandedMask == ~0U) |
| 20141 | return TLO.CombineTo(O: Op, N: Op.getOperand(i: 0)); |
| 20142 | |
| 20143 | auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { |
| 20144 | return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; |
| 20145 | }; |
| 20146 | auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { |
| 20147 | if (NewMask == Mask) |
| 20148 | return true; |
| 20149 | SDLoc DL(Op); |
| 20150 | SDValue NewC = TLO.DAG.getConstant(Val: NewMask, DL, VT); |
| 20151 | SDValue NewOp = TLO.DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Op.getOperand(i: 0), N2: NewC); |
| 20152 | return TLO.CombineTo(O: Op, N: NewOp); |
| 20153 | }; |
| 20154 | |
| 20155 | // Prefer uxtb mask. |
| 20156 | if (IsLegalMask(0xFF)) |
| 20157 | return UseMask(0xFF); |
| 20158 | |
| 20159 | // Prefer uxth mask. |
| 20160 | if (IsLegalMask(0xFFFF)) |
| 20161 | return UseMask(0xFFFF); |
| 20162 | |
| 20163 | // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. |
| 20164 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 20165 | if (ShrunkMask < 256) |
| 20166 | return UseMask(ShrunkMask); |
| 20167 | |
| 20168 | // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. |
| 20169 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 20170 | if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) |
| 20171 | return UseMask(ExpandedMask); |
| 20172 | |
| 20173 | // Potential improvements: |
| 20174 | // |
| 20175 | // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. |
| 20176 | // We could try to prefer Thumb1 immediates which can be lowered to a |
| 20177 | // two-instruction sequence. |
| 20178 | // We could try to recognize more legal ARM/Thumb2 immediates here. |
| 20179 | |
| 20180 | return false; |
| 20181 | } |
| 20182 | |
| 20183 | bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( |
| 20184 | SDValue Op, const APInt &OriginalDemandedBits, |
| 20185 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, |
| 20186 | unsigned Depth) const { |
| 20187 | unsigned Opc = Op.getOpcode(); |
| 20188 | |
| 20189 | switch (Opc) { |
| 20190 | case ARMISD::ASRL: |
| 20191 | case ARMISD::LSRL: { |
| 20192 | // If this is result 0 and the other result is unused, see if the demand |
| 20193 | // bits allow us to shrink this long shift into a standard small shift in |
| 20194 | // the opposite direction. |
| 20195 | if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(Value: 1) && |
| 20196 | isa<ConstantSDNode>(Val: Op->getOperand(Num: 2))) { |
| 20197 | unsigned ShAmt = Op->getConstantOperandVal(Num: 2); |
| 20198 | if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(RHS: APInt::getAllOnes(numBits: 32) |
| 20199 | << (32 - ShAmt))) |
| 20200 | return TLO.CombineTo( |
| 20201 | O: Op, N: TLO.DAG.getNode( |
| 20202 | Opcode: ISD::SHL, DL: SDLoc(Op), VT: MVT::i32, N1: Op.getOperand(i: 1), |
| 20203 | N2: TLO.DAG.getConstant(Val: 32 - ShAmt, DL: SDLoc(Op), VT: MVT::i32))); |
| 20204 | } |
| 20205 | break; |
| 20206 | } |
| 20207 | case ARMISD::VBICIMM: { |
| 20208 | SDValue Op0 = Op.getOperand(i: 0); |
| 20209 | unsigned ModImm = Op.getConstantOperandVal(i: 1); |
| 20210 | unsigned EltBits = 0; |
| 20211 | uint64_t Mask = ARM_AM::decodeVMOVModImm(ModImm, EltBits); |
| 20212 | if ((OriginalDemandedBits & Mask) == 0) |
| 20213 | return TLO.CombineTo(O: Op, N: Op0); |
| 20214 | } |
| 20215 | } |
| 20216 | |
| 20217 | return TargetLowering::SimplifyDemandedBitsForTargetNode( |
| 20218 | Op, DemandedBits: OriginalDemandedBits, DemandedElts: OriginalDemandedElts, Known, TLO, Depth); |
| 20219 | } |
| 20220 | |
| 20221 | //===----------------------------------------------------------------------===// |
| 20222 | // ARM Inline Assembly Support |
| 20223 | //===----------------------------------------------------------------------===// |
| 20224 | |
| 20225 | bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { |
| 20226 | // Looking for "rev" which is V6+. |
| 20227 | if (!Subtarget->hasV6Ops()) |
| 20228 | return false; |
| 20229 | |
| 20230 | InlineAsm *IA = cast<InlineAsm>(Val: CI->getCalledOperand()); |
| 20231 | StringRef AsmStr = IA->getAsmString(); |
| 20232 | SmallVector<StringRef, 4> AsmPieces; |
| 20233 | SplitString(Source: AsmStr, OutFragments&: AsmPieces, Delimiters: ";\n" ); |
| 20234 | |
| 20235 | switch (AsmPieces.size()) { |
| 20236 | default: return false; |
| 20237 | case 1: |
| 20238 | AsmStr = AsmPieces[0]; |
| 20239 | AsmPieces.clear(); |
| 20240 | SplitString(Source: AsmStr, OutFragments&: AsmPieces, Delimiters: " \t," ); |
| 20241 | |
| 20242 | // rev $0, $1 |
| 20243 | if (AsmPieces.size() == 3 && AsmPieces[0] == "rev" && |
| 20244 | AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && |
| 20245 | IA->getConstraintString().starts_with(Prefix: "=l,l" )) { |
| 20246 | IntegerType *Ty = dyn_cast<IntegerType>(Val: CI->getType()); |
| 20247 | if (Ty && Ty->getBitWidth() == 32) |
| 20248 | return IntrinsicLowering::LowerToByteSwap(CI); |
| 20249 | } |
| 20250 | break; |
| 20251 | } |
| 20252 | |
| 20253 | return false; |
| 20254 | } |
| 20255 | |
| 20256 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { |
| 20257 | // At this point, we have to lower this constraint to something else, so we |
| 20258 | // lower it to an "r" or "w". However, by doing this we will force the result |
| 20259 | // to be in register, while the X constraint is much more permissive. |
| 20260 | // |
| 20261 | // Although we are correct (we are free to emit anything, without |
| 20262 | // constraints), we might break use cases that would expect us to be more |
| 20263 | // efficient and emit something else. |
| 20264 | if (!Subtarget->hasVFP2Base()) |
| 20265 | return "r" ; |
| 20266 | if (ConstraintVT.isFloatingPoint()) |
| 20267 | return "w" ; |
| 20268 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && |
| 20269 | (ConstraintVT.getSizeInBits() == 64 || |
| 20270 | ConstraintVT.getSizeInBits() == 128)) |
| 20271 | return "w" ; |
| 20272 | |
| 20273 | return "r" ; |
| 20274 | } |
| 20275 | |
| 20276 | /// getConstraintType - Given a constraint letter, return the type of |
| 20277 | /// constraint it is for this target. |
| 20278 | ARMTargetLowering::ConstraintType |
| 20279 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { |
| 20280 | unsigned S = Constraint.size(); |
| 20281 | if (S == 1) { |
| 20282 | switch (Constraint[0]) { |
| 20283 | default: break; |
| 20284 | case 'l': return C_RegisterClass; |
| 20285 | case 'w': return C_RegisterClass; |
| 20286 | case 'h': return C_RegisterClass; |
| 20287 | case 'x': return C_RegisterClass; |
| 20288 | case 't': return C_RegisterClass; |
| 20289 | case 'j': return C_Immediate; // Constant for movw. |
| 20290 | // An address with a single base register. Due to the way we |
| 20291 | // currently handle addresses it is the same as an 'r' memory constraint. |
| 20292 | case 'Q': return C_Memory; |
| 20293 | } |
| 20294 | } else if (S == 2) { |
| 20295 | switch (Constraint[0]) { |
| 20296 | default: break; |
| 20297 | case 'T': return C_RegisterClass; |
| 20298 | // All 'U+' constraints are addresses. |
| 20299 | case 'U': return C_Memory; |
| 20300 | } |
| 20301 | } |
| 20302 | return TargetLowering::getConstraintType(Constraint); |
| 20303 | } |
| 20304 | |
| 20305 | /// Examine constraint type and operand type and determine a weight value. |
| 20306 | /// This object must already have been set up with the operand type |
| 20307 | /// and the current alternative constraint selected. |
| 20308 | TargetLowering::ConstraintWeight |
| 20309 | ARMTargetLowering::getSingleConstraintMatchWeight( |
| 20310 | AsmOperandInfo &info, const char *constraint) const { |
| 20311 | ConstraintWeight weight = CW_Invalid; |
| 20312 | Value *CallOperandVal = info.CallOperandVal; |
| 20313 | // If we don't have a value, we can't do a match, |
| 20314 | // but allow it at the lowest weight. |
| 20315 | if (!CallOperandVal) |
| 20316 | return CW_Default; |
| 20317 | Type *type = CallOperandVal->getType(); |
| 20318 | // Look at the constraint type. |
| 20319 | switch (*constraint) { |
| 20320 | default: |
| 20321 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
| 20322 | break; |
| 20323 | case 'l': |
| 20324 | if (type->isIntegerTy()) { |
| 20325 | if (Subtarget->isThumb()) |
| 20326 | weight = CW_SpecificReg; |
| 20327 | else |
| 20328 | weight = CW_Register; |
| 20329 | } |
| 20330 | break; |
| 20331 | case 'w': |
| 20332 | if (type->isFloatingPointTy()) |
| 20333 | weight = CW_Register; |
| 20334 | break; |
| 20335 | } |
| 20336 | return weight; |
| 20337 | } |
| 20338 | |
| 20339 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; |
| 20340 | |
| 20341 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( |
| 20342 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
| 20343 | switch (Constraint.size()) { |
| 20344 | case 1: |
| 20345 | // GCC ARM Constraint Letters |
| 20346 | switch (Constraint[0]) { |
| 20347 | case 'l': // Low regs or general regs. |
| 20348 | if (Subtarget->isThumb()) |
| 20349 | return RCPair(0U, &ARM::tGPRRegClass); |
| 20350 | return RCPair(0U, &ARM::GPRRegClass); |
| 20351 | case 'h': // High regs or no regs. |
| 20352 | if (Subtarget->isThumb()) |
| 20353 | return RCPair(0U, &ARM::hGPRRegClass); |
| 20354 | break; |
| 20355 | case 'r': |
| 20356 | if (Subtarget->isThumb1Only()) |
| 20357 | return RCPair(0U, &ARM::tGPRRegClass); |
| 20358 | return RCPair(0U, &ARM::GPRRegClass); |
| 20359 | case 'w': |
| 20360 | if (VT == MVT::Other) |
| 20361 | break; |
| 20362 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20363 | return RCPair(0U, &ARM::SPRRegClass); |
| 20364 | if (VT.getSizeInBits() == 64) |
| 20365 | return RCPair(0U, &ARM::DPRRegClass); |
| 20366 | if (VT.getSizeInBits() == 128) |
| 20367 | return RCPair(0U, &ARM::QPRRegClass); |
| 20368 | break; |
| 20369 | case 'x': |
| 20370 | if (VT == MVT::Other) |
| 20371 | break; |
| 20372 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20373 | return RCPair(0U, &ARM::SPR_8RegClass); |
| 20374 | if (VT.getSizeInBits() == 64) |
| 20375 | return RCPair(0U, &ARM::DPR_8RegClass); |
| 20376 | if (VT.getSizeInBits() == 128) |
| 20377 | return RCPair(0U, &ARM::QPR_8RegClass); |
| 20378 | break; |
| 20379 | case 't': |
| 20380 | if (VT == MVT::Other) |
| 20381 | break; |
| 20382 | if (VT == MVT::f32 || VT == MVT::i32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20383 | return RCPair(0U, &ARM::SPRRegClass); |
| 20384 | if (VT.getSizeInBits() == 64) |
| 20385 | return RCPair(0U, &ARM::DPR_VFP2RegClass); |
| 20386 | if (VT.getSizeInBits() == 128) |
| 20387 | return RCPair(0U, &ARM::QPR_VFP2RegClass); |
| 20388 | break; |
| 20389 | } |
| 20390 | break; |
| 20391 | |
| 20392 | case 2: |
| 20393 | if (Constraint[0] == 'T') { |
| 20394 | switch (Constraint[1]) { |
| 20395 | default: |
| 20396 | break; |
| 20397 | case 'e': |
| 20398 | return RCPair(0U, &ARM::tGPREvenRegClass); |
| 20399 | case 'o': |
| 20400 | return RCPair(0U, &ARM::tGPROddRegClass); |
| 20401 | } |
| 20402 | } |
| 20403 | break; |
| 20404 | |
| 20405 | default: |
| 20406 | break; |
| 20407 | } |
| 20408 | |
| 20409 | if (StringRef("{cc}" ).equals_insensitive(RHS: Constraint)) |
| 20410 | return std::make_pair(x: unsigned(ARM::CPSR), y: &ARM::CCRRegClass); |
| 20411 | |
| 20412 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 20413 | } |
| 20414 | |
| 20415 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
| 20416 | /// vector. If it is invalid, don't add anything to Ops. |
| 20417 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
| 20418 | StringRef Constraint, |
| 20419 | std::vector<SDValue> &Ops, |
| 20420 | SelectionDAG &DAG) const { |
| 20421 | SDValue Result; |
| 20422 | |
| 20423 | // Currently only support length 1 constraints. |
| 20424 | if (Constraint.size() != 1) |
| 20425 | return; |
| 20426 | |
| 20427 | char ConstraintLetter = Constraint[0]; |
| 20428 | switch (ConstraintLetter) { |
| 20429 | default: break; |
| 20430 | case 'j': |
| 20431 | case 'I': case 'J': case 'K': case 'L': |
| 20432 | case 'M': case 'N': case 'O': |
| 20433 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op); |
| 20434 | if (!C) |
| 20435 | return; |
| 20436 | |
| 20437 | int64_t CVal64 = C->getSExtValue(); |
| 20438 | int CVal = (int) CVal64; |
| 20439 | // None of these constraints allow values larger than 32 bits. Check |
| 20440 | // that the value fits in an int. |
| 20441 | if (CVal != CVal64) |
| 20442 | return; |
| 20443 | |
| 20444 | switch (ConstraintLetter) { |
| 20445 | case 'j': |
| 20446 | // Constant suitable for movw, must be between 0 and |
| 20447 | // 65535. |
| 20448 | if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) |
| 20449 | if (CVal >= 0 && CVal <= 65535) |
| 20450 | break; |
| 20451 | return; |
| 20452 | case 'I': |
| 20453 | if (Subtarget->isThumb1Only()) { |
| 20454 | // This must be a constant between 0 and 255, for ADD |
| 20455 | // immediates. |
| 20456 | if (CVal >= 0 && CVal <= 255) |
| 20457 | break; |
| 20458 | } else if (Subtarget->isThumb2()) { |
| 20459 | // A constant that can be used as an immediate value in a |
| 20460 | // data-processing instruction. |
| 20461 | if (ARM_AM::getT2SOImmVal(Arg: CVal) != -1) |
| 20462 | break; |
| 20463 | } else { |
| 20464 | // A constant that can be used as an immediate value in a |
| 20465 | // data-processing instruction. |
| 20466 | if (ARM_AM::getSOImmVal(Arg: CVal) != -1) |
| 20467 | break; |
| 20468 | } |
| 20469 | return; |
| 20470 | |
| 20471 | case 'J': |
| 20472 | if (Subtarget->isThumb1Only()) { |
| 20473 | // This must be a constant between -255 and -1, for negated ADD |
| 20474 | // immediates. This can be used in GCC with an "n" modifier that |
| 20475 | // prints the negated value, for use with SUB instructions. It is |
| 20476 | // not useful otherwise but is implemented for compatibility. |
| 20477 | if (CVal >= -255 && CVal <= -1) |
| 20478 | break; |
| 20479 | } else { |
| 20480 | // This must be a constant between -4095 and 4095. It is not clear |
| 20481 | // what this constraint is intended for. Implemented for |
| 20482 | // compatibility with GCC. |
| 20483 | if (CVal >= -4095 && CVal <= 4095) |
| 20484 | break; |
| 20485 | } |
| 20486 | return; |
| 20487 | |
| 20488 | case 'K': |
| 20489 | if (Subtarget->isThumb1Only()) { |
| 20490 | // A 32-bit value where only one byte has a nonzero value. Exclude |
| 20491 | // zero to match GCC. This constraint is used by GCC internally for |
| 20492 | // constants that can be loaded with a move/shift combination. |
| 20493 | // It is not useful otherwise but is implemented for compatibility. |
| 20494 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(V: CVal)) |
| 20495 | break; |
| 20496 | } else if (Subtarget->isThumb2()) { |
| 20497 | // A constant whose bitwise inverse can be used as an immediate |
| 20498 | // value in a data-processing instruction. This can be used in GCC |
| 20499 | // with a "B" modifier that prints the inverted value, for use with |
| 20500 | // BIC and MVN instructions. It is not useful otherwise but is |
| 20501 | // implemented for compatibility. |
| 20502 | if (ARM_AM::getT2SOImmVal(Arg: ~CVal) != -1) |
| 20503 | break; |
| 20504 | } else { |
| 20505 | // A constant whose bitwise inverse can be used as an immediate |
| 20506 | // value in a data-processing instruction. This can be used in GCC |
| 20507 | // with a "B" modifier that prints the inverted value, for use with |
| 20508 | // BIC and MVN instructions. It is not useful otherwise but is |
| 20509 | // implemented for compatibility. |
| 20510 | if (ARM_AM::getSOImmVal(Arg: ~CVal) != -1) |
| 20511 | break; |
| 20512 | } |
| 20513 | return; |
| 20514 | |
| 20515 | case 'L': |
| 20516 | if (Subtarget->isThumb1Only()) { |
| 20517 | // This must be a constant between -7 and 7, |
| 20518 | // for 3-operand ADD/SUB immediate instructions. |
| 20519 | if (CVal >= -7 && CVal < 7) |
| 20520 | break; |
| 20521 | } else if (Subtarget->isThumb2()) { |
| 20522 | // A constant whose negation can be used as an immediate value in a |
| 20523 | // data-processing instruction. This can be used in GCC with an "n" |
| 20524 | // modifier that prints the negated value, for use with SUB |
| 20525 | // instructions. It is not useful otherwise but is implemented for |
| 20526 | // compatibility. |
| 20527 | if (ARM_AM::getT2SOImmVal(Arg: -CVal) != -1) |
| 20528 | break; |
| 20529 | } else { |
| 20530 | // A constant whose negation can be used as an immediate value in a |
| 20531 | // data-processing instruction. This can be used in GCC with an "n" |
| 20532 | // modifier that prints the negated value, for use with SUB |
| 20533 | // instructions. It is not useful otherwise but is implemented for |
| 20534 | // compatibility. |
| 20535 | if (ARM_AM::getSOImmVal(Arg: -CVal) != -1) |
| 20536 | break; |
| 20537 | } |
| 20538 | return; |
| 20539 | |
| 20540 | case 'M': |
| 20541 | if (Subtarget->isThumb1Only()) { |
| 20542 | // This must be a multiple of 4 between 0 and 1020, for |
| 20543 | // ADD sp + immediate. |
| 20544 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) |
| 20545 | break; |
| 20546 | } else { |
| 20547 | // A power of two or a constant between 0 and 32. This is used in |
| 20548 | // GCC for the shift amount on shifted register operands, but it is |
| 20549 | // useful in general for any shift amounts. |
| 20550 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) |
| 20551 | break; |
| 20552 | } |
| 20553 | return; |
| 20554 | |
| 20555 | case 'N': |
| 20556 | if (Subtarget->isThumb1Only()) { |
| 20557 | // This must be a constant between 0 and 31, for shift amounts. |
| 20558 | if (CVal >= 0 && CVal <= 31) |
| 20559 | break; |
| 20560 | } |
| 20561 | return; |
| 20562 | |
| 20563 | case 'O': |
| 20564 | if (Subtarget->isThumb1Only()) { |
| 20565 | // This must be a multiple of 4 between -508 and 508, for |
| 20566 | // ADD/SUB sp = sp + immediate. |
| 20567 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) |
| 20568 | break; |
| 20569 | } |
| 20570 | return; |
| 20571 | } |
| 20572 | Result = DAG.getSignedTargetConstant(Val: CVal, DL: SDLoc(Op), VT: Op.getValueType()); |
| 20573 | break; |
| 20574 | } |
| 20575 | |
| 20576 | if (Result.getNode()) { |
| 20577 | Ops.push_back(x: Result); |
| 20578 | return; |
| 20579 | } |
| 20580 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
| 20581 | } |
| 20582 | |
| 20583 | static RTLIB::Libcall getDivRemLibcall( |
| 20584 | const SDNode *N, MVT::SimpleValueType SVT) { |
| 20585 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 20586 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 20587 | "Unhandled Opcode in getDivRemLibcall" ); |
| 20588 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 20589 | N->getOpcode() == ISD::SREM; |
| 20590 | RTLIB::Libcall LC; |
| 20591 | switch (SVT) { |
| 20592 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 20593 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
| 20594 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
| 20595 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
| 20596 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
| 20597 | } |
| 20598 | return LC; |
| 20599 | } |
| 20600 | |
| 20601 | static TargetLowering::ArgListTy getDivRemArgList( |
| 20602 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { |
| 20603 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 20604 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 20605 | "Unhandled Opcode in getDivRemArgList" ); |
| 20606 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 20607 | N->getOpcode() == ISD::SREM; |
| 20608 | TargetLowering::ArgListTy Args; |
| 20609 | TargetLowering::ArgListEntry Entry; |
| 20610 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 20611 | EVT ArgVT = N->getOperand(Num: i).getValueType(); |
| 20612 | Type *ArgTy = ArgVT.getTypeForEVT(Context&: *Context); |
| 20613 | Entry.Node = N->getOperand(Num: i); |
| 20614 | Entry.Ty = ArgTy; |
| 20615 | Entry.IsSExt = isSigned; |
| 20616 | Entry.IsZExt = !isSigned; |
| 20617 | Args.push_back(x: Entry); |
| 20618 | } |
| 20619 | if (Subtarget->isTargetWindows() && Args.size() >= 2) |
| 20620 | std::swap(a&: Args[0], b&: Args[1]); |
| 20621 | return Args; |
| 20622 | } |
| 20623 | |
| 20624 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { |
| 20625 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
| 20626 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
| 20627 | Subtarget->isTargetWindows()) && |
| 20628 | "Register-based DivRem lowering only" ); |
| 20629 | unsigned Opcode = Op->getOpcode(); |
| 20630 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && |
| 20631 | "Invalid opcode for Div/Rem lowering" ); |
| 20632 | bool isSigned = (Opcode == ISD::SDIVREM); |
| 20633 | EVT VT = Op->getValueType(ResNo: 0); |
| 20634 | SDLoc dl(Op); |
| 20635 | |
| 20636 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: Op.getOperand(i: 1))) { |
| 20637 | SmallVector<SDValue> Result; |
| 20638 | if (expandDIVREMByConstant(N: Op.getNode(), Result, HiLoVT: MVT::i32, DAG)) { |
| 20639 | SDValue Res0 = |
| 20640 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[0], N2: Result[1]); |
| 20641 | SDValue Res1 = |
| 20642 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[2], N2: Result[3]); |
| 20643 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: Op->getVTList(), |
| 20644 | Ops: {Res0, Res1}); |
| 20645 | } |
| 20646 | } |
| 20647 | |
| 20648 | Type *Ty = VT.getTypeForEVT(Context&: *DAG.getContext()); |
| 20649 | |
| 20650 | // If the target has hardware divide, use divide + multiply + subtract: |
| 20651 | // div = a / b |
| 20652 | // rem = a - b * div |
| 20653 | // return {div, rem} |
| 20654 | // This should be lowered into UDIV/SDIV + MLS later on. |
| 20655 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 20656 | : Subtarget->hasDivideInARMMode(); |
| 20657 | if (hasDivide && Op->getValueType(ResNo: 0).isSimple() && |
| 20658 | Op->getSimpleValueType(ResNo: 0) == MVT::i32) { |
| 20659 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
| 20660 | const SDValue Dividend = Op->getOperand(Num: 0); |
| 20661 | const SDValue Divisor = Op->getOperand(Num: 1); |
| 20662 | SDValue Div = DAG.getNode(Opcode: DivOpcode, DL: dl, VT, N1: Dividend, N2: Divisor); |
| 20663 | SDValue Mul = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT, N1: Div, N2: Divisor); |
| 20664 | SDValue Rem = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: Dividend, N2: Mul); |
| 20665 | |
| 20666 | SDValue Values[2] = {Div, Rem}; |
| 20667 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), Ops: Values); |
| 20668 | } |
| 20669 | |
| 20670 | RTLIB::Libcall LC = getDivRemLibcall(N: Op.getNode(), |
| 20671 | SVT: VT.getSimpleVT().SimpleTy); |
| 20672 | SDValue InChain = DAG.getEntryNode(); |
| 20673 | |
| 20674 | TargetLowering::ArgListTy Args = getDivRemArgList(N: Op.getNode(), |
| 20675 | Context: DAG.getContext(), |
| 20676 | Subtarget); |
| 20677 | |
| 20678 | SDValue Callee = DAG.getExternalSymbol(Sym: getLibcallName(Call: LC), |
| 20679 | VT: getPointerTy(DL: DAG.getDataLayout())); |
| 20680 | |
| 20681 | Type *RetTy = StructType::get(elt1: Ty, elts: Ty); |
| 20682 | |
| 20683 | if (Subtarget->isTargetWindows()) |
| 20684 | InChain = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain); |
| 20685 | |
| 20686 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 20687 | CLI.setDebugLoc(dl).setChain(InChain) |
| 20688 | .setCallee(CC: getLibcallCallingConv(Call: LC), ResultType: RetTy, Target: Callee, ArgsList: std::move(Args)) |
| 20689 | .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); |
| 20690 | |
| 20691 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); |
| 20692 | return CallInfo.first; |
| 20693 | } |
| 20694 | |
| 20695 | // Lowers REM using divmod helpers |
| 20696 | // see RTABI section 4.2/4.3 |
| 20697 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { |
| 20698 | EVT VT = N->getValueType(ResNo: 0); |
| 20699 | |
| 20700 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
| 20701 | SmallVector<SDValue> Result; |
| 20702 | if (expandDIVREMByConstant(N, Result, HiLoVT: MVT::i32, DAG)) |
| 20703 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
| 20704 | N1: Result[0], N2: Result[1]); |
| 20705 | } |
| 20706 | |
| 20707 | // Build return types (div and rem) |
| 20708 | std::vector<Type*> RetTyParams; |
| 20709 | Type *RetTyElement; |
| 20710 | |
| 20711 | switch (VT.getSimpleVT().SimpleTy) { |
| 20712 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 20713 | case MVT::i8: RetTyElement = Type::getInt8Ty(C&: *DAG.getContext()); break; |
| 20714 | case MVT::i16: RetTyElement = Type::getInt16Ty(C&: *DAG.getContext()); break; |
| 20715 | case MVT::i32: RetTyElement = Type::getInt32Ty(C&: *DAG.getContext()); break; |
| 20716 | case MVT::i64: RetTyElement = Type::getInt64Ty(C&: *DAG.getContext()); break; |
| 20717 | } |
| 20718 | |
| 20719 | RetTyParams.push_back(x: RetTyElement); |
| 20720 | RetTyParams.push_back(x: RetTyElement); |
| 20721 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); |
| 20722 | Type *RetTy = StructType::get(Context&: *DAG.getContext(), Elements: ret); |
| 20723 | |
| 20724 | RTLIB::Libcall LC = getDivRemLibcall(N, SVT: N->getValueType(ResNo: 0).getSimpleVT(). |
| 20725 | SimpleTy); |
| 20726 | SDValue InChain = DAG.getEntryNode(); |
| 20727 | TargetLowering::ArgListTy Args = getDivRemArgList(N, Context: DAG.getContext(), |
| 20728 | Subtarget); |
| 20729 | bool isSigned = N->getOpcode() == ISD::SREM; |
| 20730 | SDValue Callee = DAG.getExternalSymbol(Sym: getLibcallName(Call: LC), |
| 20731 | VT: getPointerTy(DL: DAG.getDataLayout())); |
| 20732 | |
| 20733 | if (Subtarget->isTargetWindows()) |
| 20734 | InChain = WinDBZCheckDenominator(DAG, N, InChain); |
| 20735 | |
| 20736 | // Lower call |
| 20737 | CallLoweringInfo CLI(DAG); |
| 20738 | CLI.setChain(InChain) |
| 20739 | .setCallee(CC: CallingConv::ARM_AAPCS, ResultType: RetTy, Target: Callee, ArgsList: std::move(Args)) |
| 20740 | .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); |
| 20741 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 20742 | |
| 20743 | // Return second (rem) result operand (first contains div) |
| 20744 | SDNode *ResNode = CallResult.first.getNode(); |
| 20745 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands" ); |
| 20746 | return ResNode->getOperand(Num: 1); |
| 20747 | } |
| 20748 | |
| 20749 | SDValue |
| 20750 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
| 20751 | assert(Subtarget->isTargetWindows() && "unsupported target platform" ); |
| 20752 | SDLoc DL(Op); |
| 20753 | |
| 20754 | // Get the inputs. |
| 20755 | SDValue Chain = Op.getOperand(i: 0); |
| 20756 | SDValue Size = Op.getOperand(i: 1); |
| 20757 | |
| 20758 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( |
| 20759 | Kind: "no-stack-arg-probe" )) { |
| 20760 | MaybeAlign Align = |
| 20761 | cast<ConstantSDNode>(Val: Op.getOperand(i: 2))->getMaybeAlignValue(); |
| 20762 | SDValue SP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
| 20763 | Chain = SP.getValue(R: 1); |
| 20764 | SP = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: SP, N2: Size); |
| 20765 | if (Align) |
| 20766 | SP = DAG.getNode( |
| 20767 | Opcode: ISD::AND, DL, VT: MVT::i32, N1: SP.getValue(R: 0), |
| 20768 | N2: DAG.getSignedConstant(Val: -(uint64_t)Align->value(), DL, VT: MVT::i32)); |
| 20769 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::SP, N: SP); |
| 20770 | SDValue Ops[2] = { SP, Chain }; |
| 20771 | return DAG.getMergeValues(Ops, dl: DL); |
| 20772 | } |
| 20773 | |
| 20774 | SDValue Words = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: Size, |
| 20775 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
| 20776 | |
| 20777 | SDValue Glue; |
| 20778 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R4, N: Words, Glue); |
| 20779 | Glue = Chain.getValue(R: 1); |
| 20780 | |
| 20781 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 20782 | Chain = DAG.getNode(Opcode: ARMISD::WIN__CHKSTK, DL, VTList: NodeTys, N1: Chain, N2: Glue); |
| 20783 | |
| 20784 | SDValue NewSP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
| 20785 | Chain = NewSP.getValue(R: 1); |
| 20786 | |
| 20787 | SDValue Ops[2] = { NewSP, Chain }; |
| 20788 | return DAG.getMergeValues(Ops, dl: DL); |
| 20789 | } |
| 20790 | |
| 20791 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { |
| 20792 | bool IsStrict = Op->isStrictFPOpcode(); |
| 20793 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 20794 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 20795 | const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); |
| 20796 | assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && |
| 20797 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 20798 | |
| 20799 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 20800 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 20801 | |
| 20802 | assert(!(DstSz == 32 && Subtarget->hasFP16()) && |
| 20803 | "With FP16, 16 to 32 conversion is legal!" ); |
| 20804 | |
| 20805 | // Converting from 32 -> 64 is valid if we have FP64. |
| 20806 | if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { |
| 20807 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 20808 | if (IsStrict) { |
| 20809 | SDLoc Loc(Op); |
| 20810 | SDValue Result = DAG.getNode(Opcode: ISD::FP_EXTEND, |
| 20811 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
| 20812 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
| 20813 | } |
| 20814 | return Op; |
| 20815 | } |
| 20816 | |
| 20817 | // Either we are converting from 16 -> 64, without FP16 and/or |
| 20818 | // FP.double-precision or without Armv8-fp. So we must do it in two |
| 20819 | // steps. |
| 20820 | // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 |
| 20821 | // without FP16. So we must do a function call. |
| 20822 | SDLoc Loc(Op); |
| 20823 | RTLIB::Libcall LC; |
| 20824 | MakeLibCallOptions CallOptions; |
| 20825 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 20826 | for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { |
| 20827 | bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); |
| 20828 | MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); |
| 20829 | MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); |
| 20830 | if (Supported) { |
| 20831 | if (IsStrict) { |
| 20832 | SrcVal = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL: Loc, |
| 20833 | ResultTys: {DstVT, MVT::Other}, Ops: {Chain, SrcVal}); |
| 20834 | Chain = SrcVal.getValue(R: 1); |
| 20835 | } else { |
| 20836 | SrcVal = DAG.getNode(Opcode: ISD::FP_EXTEND, DL: Loc, VT: DstVT, Operand: SrcVal); |
| 20837 | } |
| 20838 | } else { |
| 20839 | LC = RTLIB::getFPEXT(OpVT: SrcVT, RetVT: DstVT); |
| 20840 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 20841 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 20842 | std::tie(args&: SrcVal, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
| 20843 | dl: Loc, Chain); |
| 20844 | } |
| 20845 | } |
| 20846 | |
| 20847 | return IsStrict ? DAG.getMergeValues(Ops: {SrcVal, Chain}, dl: Loc) : SrcVal; |
| 20848 | } |
| 20849 | |
| 20850 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
| 20851 | bool IsStrict = Op->isStrictFPOpcode(); |
| 20852 | |
| 20853 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 20854 | EVT SrcVT = SrcVal.getValueType(); |
| 20855 | EVT DstVT = Op.getValueType(); |
| 20856 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 20857 | const unsigned SrcSz = SrcVT.getSizeInBits(); |
| 20858 | (void)DstSz; |
| 20859 | assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && |
| 20860 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 20861 | |
| 20862 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 20863 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 20864 | |
| 20865 | SDLoc Loc(Op); |
| 20866 | |
| 20867 | // Instruction from 32 -> 16 if hasFP16 is valid |
| 20868 | if (SrcSz == 32 && Subtarget->hasFP16()) |
| 20869 | return Op; |
| 20870 | |
| 20871 | // Lib call from 32 -> 16 / 64 -> [32, 16] |
| 20872 | RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: SrcVT, RetVT: DstVT); |
| 20873 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 20874 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 20875 | MakeLibCallOptions CallOptions; |
| 20876 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 20877 | SDValue Result; |
| 20878 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
| 20879 | dl: Loc, Chain); |
| 20880 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
| 20881 | } |
| 20882 | |
| 20883 | bool |
| 20884 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
| 20885 | // The ARM target isn't yet aware of offsets. |
| 20886 | return false; |
| 20887 | } |
| 20888 | |
| 20889 | bool ARM::isBitFieldInvertedMask(unsigned v) { |
| 20890 | if (v == 0xffffffff) |
| 20891 | return false; |
| 20892 | |
| 20893 | // there can be 1's on either or both "outsides", all the "inside" |
| 20894 | // bits must be 0's |
| 20895 | return isShiftedMask_32(Value: ~v); |
| 20896 | } |
| 20897 | |
| 20898 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 20899 | /// specified FP immediate natively. If false, the legalizer will |
| 20900 | /// materialize the FP immediate as a load from a constant pool. |
| 20901 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
| 20902 | bool ForCodeSize) const { |
| 20903 | if (!Subtarget->hasVFP3Base()) |
| 20904 | return false; |
| 20905 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) |
| 20906 | return ARM_AM::getFP16Imm(FPImm: Imm) != -1; |
| 20907 | if (VT == MVT::f32 && Subtarget->hasFullFP16() && |
| 20908 | ARM_AM::getFP32FP16Imm(FPImm: Imm) != -1) |
| 20909 | return true; |
| 20910 | if (VT == MVT::f32) |
| 20911 | return ARM_AM::getFP32Imm(FPImm: Imm) != -1; |
| 20912 | if (VT == MVT::f64 && Subtarget->hasFP64()) |
| 20913 | return ARM_AM::getFP64Imm(FPImm: Imm) != -1; |
| 20914 | return false; |
| 20915 | } |
| 20916 | |
| 20917 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as |
| 20918 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment |
| 20919 | /// specified in the intrinsic calls. |
| 20920 | bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
| 20921 | const CallInst &I, |
| 20922 | MachineFunction &MF, |
| 20923 | unsigned Intrinsic) const { |
| 20924 | switch (Intrinsic) { |
| 20925 | case Intrinsic::arm_neon_vld1: |
| 20926 | case Intrinsic::arm_neon_vld2: |
| 20927 | case Intrinsic::arm_neon_vld3: |
| 20928 | case Intrinsic::arm_neon_vld4: |
| 20929 | case Intrinsic::arm_neon_vld2lane: |
| 20930 | case Intrinsic::arm_neon_vld3lane: |
| 20931 | case Intrinsic::arm_neon_vld4lane: |
| 20932 | case Intrinsic::arm_neon_vld2dup: |
| 20933 | case Intrinsic::arm_neon_vld3dup: |
| 20934 | case Intrinsic::arm_neon_vld4dup: { |
| 20935 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20936 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20937 | auto &DL = I.getDataLayout(); |
| 20938 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
| 20939 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20940 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20941 | Info.offset = 0; |
| 20942 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
| 20943 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
| 20944 | // volatile loads with NEON intrinsics not supported |
| 20945 | Info.flags = MachineMemOperand::MOLoad; |
| 20946 | return true; |
| 20947 | } |
| 20948 | case Intrinsic::arm_neon_vld1x2: |
| 20949 | case Intrinsic::arm_neon_vld1x3: |
| 20950 | case Intrinsic::arm_neon_vld1x4: { |
| 20951 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20952 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20953 | auto &DL = I.getDataLayout(); |
| 20954 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
| 20955 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20956 | Info.ptrVal = I.getArgOperand(i: I.arg_size() - 1); |
| 20957 | Info.offset = 0; |
| 20958 | Info.align = I.getParamAlign(ArgNo: I.arg_size() - 1).valueOrOne(); |
| 20959 | // volatile loads with NEON intrinsics not supported |
| 20960 | Info.flags = MachineMemOperand::MOLoad; |
| 20961 | return true; |
| 20962 | } |
| 20963 | case Intrinsic::arm_neon_vst1: |
| 20964 | case Intrinsic::arm_neon_vst2: |
| 20965 | case Intrinsic::arm_neon_vst3: |
| 20966 | case Intrinsic::arm_neon_vst4: |
| 20967 | case Intrinsic::arm_neon_vst2lane: |
| 20968 | case Intrinsic::arm_neon_vst3lane: |
| 20969 | case Intrinsic::arm_neon_vst4lane: { |
| 20970 | Info.opc = ISD::INTRINSIC_VOID; |
| 20971 | // Conservatively set memVT to the entire set of vectors stored. |
| 20972 | auto &DL = I.getDataLayout(); |
| 20973 | unsigned NumElts = 0; |
| 20974 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
| 20975 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
| 20976 | if (!ArgTy->isVectorTy()) |
| 20977 | break; |
| 20978 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
| 20979 | } |
| 20980 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20981 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20982 | Info.offset = 0; |
| 20983 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
| 20984 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
| 20985 | // volatile stores with NEON intrinsics not supported |
| 20986 | Info.flags = MachineMemOperand::MOStore; |
| 20987 | return true; |
| 20988 | } |
| 20989 | case Intrinsic::arm_neon_vst1x2: |
| 20990 | case Intrinsic::arm_neon_vst1x3: |
| 20991 | case Intrinsic::arm_neon_vst1x4: { |
| 20992 | Info.opc = ISD::INTRINSIC_VOID; |
| 20993 | // Conservatively set memVT to the entire set of vectors stored. |
| 20994 | auto &DL = I.getDataLayout(); |
| 20995 | unsigned NumElts = 0; |
| 20996 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
| 20997 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
| 20998 | if (!ArgTy->isVectorTy()) |
| 20999 | break; |
| 21000 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
| 21001 | } |
| 21002 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 21003 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21004 | Info.offset = 0; |
| 21005 | Info.align = I.getParamAlign(ArgNo: 0).valueOrOne(); |
| 21006 | // volatile stores with NEON intrinsics not supported |
| 21007 | Info.flags = MachineMemOperand::MOStore; |
| 21008 | return true; |
| 21009 | } |
| 21010 | case Intrinsic::arm_mve_vld2q: |
| 21011 | case Intrinsic::arm_mve_vld4q: { |
| 21012 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21013 | // Conservatively set memVT to the entire set of vectors loaded. |
| 21014 | Type *VecTy = cast<StructType>(Val: I.getType())->getElementType(N: 1); |
| 21015 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; |
| 21016 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
| 21017 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21018 | Info.offset = 0; |
| 21019 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 21020 | // volatile loads with MVE intrinsics not supported |
| 21021 | Info.flags = MachineMemOperand::MOLoad; |
| 21022 | return true; |
| 21023 | } |
| 21024 | case Intrinsic::arm_mve_vst2q: |
| 21025 | case Intrinsic::arm_mve_vst4q: { |
| 21026 | Info.opc = ISD::INTRINSIC_VOID; |
| 21027 | // Conservatively set memVT to the entire set of vectors stored. |
| 21028 | Type *VecTy = I.getArgOperand(i: 1)->getType(); |
| 21029 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; |
| 21030 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
| 21031 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21032 | Info.offset = 0; |
| 21033 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 21034 | // volatile stores with MVE intrinsics not supported |
| 21035 | Info.flags = MachineMemOperand::MOStore; |
| 21036 | return true; |
| 21037 | } |
| 21038 | case Intrinsic::arm_mve_vldr_gather_base: |
| 21039 | case Intrinsic::arm_mve_vldr_gather_base_predicated: { |
| 21040 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21041 | Info.ptrVal = nullptr; |
| 21042 | Info.memVT = MVT::getVT(Ty: I.getType()); |
| 21043 | Info.align = Align(1); |
| 21044 | Info.flags |= MachineMemOperand::MOLoad; |
| 21045 | return true; |
| 21046 | } |
| 21047 | case Intrinsic::arm_mve_vldr_gather_base_wb: |
| 21048 | case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: { |
| 21049 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21050 | Info.ptrVal = nullptr; |
| 21051 | Info.memVT = MVT::getVT(Ty: I.getType()->getContainedType(i: 0)); |
| 21052 | Info.align = Align(1); |
| 21053 | Info.flags |= MachineMemOperand::MOLoad; |
| 21054 | return true; |
| 21055 | } |
| 21056 | case Intrinsic::arm_mve_vldr_gather_offset: |
| 21057 | case Intrinsic::arm_mve_vldr_gather_offset_predicated: { |
| 21058 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21059 | Info.ptrVal = nullptr; |
| 21060 | MVT DataVT = MVT::getVT(Ty: I.getType()); |
| 21061 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 2))->getZExtValue(); |
| 21062 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
| 21063 | NumElements: DataVT.getVectorNumElements()); |
| 21064 | Info.align = Align(1); |
| 21065 | Info.flags |= MachineMemOperand::MOLoad; |
| 21066 | return true; |
| 21067 | } |
| 21068 | case Intrinsic::arm_mve_vstr_scatter_base: |
| 21069 | case Intrinsic::arm_mve_vstr_scatter_base_predicated: { |
| 21070 | Info.opc = ISD::INTRINSIC_VOID; |
| 21071 | Info.ptrVal = nullptr; |
| 21072 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 21073 | Info.align = Align(1); |
| 21074 | Info.flags |= MachineMemOperand::MOStore; |
| 21075 | return true; |
| 21076 | } |
| 21077 | case Intrinsic::arm_mve_vstr_scatter_base_wb: |
| 21078 | case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: { |
| 21079 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21080 | Info.ptrVal = nullptr; |
| 21081 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 21082 | Info.align = Align(1); |
| 21083 | Info.flags |= MachineMemOperand::MOStore; |
| 21084 | return true; |
| 21085 | } |
| 21086 | case Intrinsic::arm_mve_vstr_scatter_offset: |
| 21087 | case Intrinsic::arm_mve_vstr_scatter_offset_predicated: { |
| 21088 | Info.opc = ISD::INTRINSIC_VOID; |
| 21089 | Info.ptrVal = nullptr; |
| 21090 | MVT DataVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 21091 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 3))->getZExtValue(); |
| 21092 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
| 21093 | NumElements: DataVT.getVectorNumElements()); |
| 21094 | Info.align = Align(1); |
| 21095 | Info.flags |= MachineMemOperand::MOStore; |
| 21096 | return true; |
| 21097 | } |
| 21098 | case Intrinsic::arm_ldaex: |
| 21099 | case Intrinsic::arm_ldrex: { |
| 21100 | auto &DL = I.getDataLayout(); |
| 21101 | Type *ValTy = I.getParamElementType(ArgNo: 0); |
| 21102 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21103 | Info.memVT = MVT::getVT(Ty: ValTy); |
| 21104 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21105 | Info.offset = 0; |
| 21106 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
| 21107 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 21108 | return true; |
| 21109 | } |
| 21110 | case Intrinsic::arm_stlex: |
| 21111 | case Intrinsic::arm_strex: { |
| 21112 | auto &DL = I.getDataLayout(); |
| 21113 | Type *ValTy = I.getParamElementType(ArgNo: 1); |
| 21114 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21115 | Info.memVT = MVT::getVT(Ty: ValTy); |
| 21116 | Info.ptrVal = I.getArgOperand(i: 1); |
| 21117 | Info.offset = 0; |
| 21118 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
| 21119 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 21120 | return true; |
| 21121 | } |
| 21122 | case Intrinsic::arm_stlexd: |
| 21123 | case Intrinsic::arm_strexd: |
| 21124 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21125 | Info.memVT = MVT::i64; |
| 21126 | Info.ptrVal = I.getArgOperand(i: 2); |
| 21127 | Info.offset = 0; |
| 21128 | Info.align = Align(8); |
| 21129 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 21130 | return true; |
| 21131 | |
| 21132 | case Intrinsic::arm_ldaexd: |
| 21133 | case Intrinsic::arm_ldrexd: |
| 21134 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21135 | Info.memVT = MVT::i64; |
| 21136 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21137 | Info.offset = 0; |
| 21138 | Info.align = Align(8); |
| 21139 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 21140 | return true; |
| 21141 | |
| 21142 | default: |
| 21143 | break; |
| 21144 | } |
| 21145 | |
| 21146 | return false; |
| 21147 | } |
| 21148 | |
| 21149 | /// Returns true if it is beneficial to convert a load of a constant |
| 21150 | /// to just the constant itself. |
| 21151 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 21152 | Type *Ty) const { |
| 21153 | assert(Ty->isIntegerTy()); |
| 21154 | |
| 21155 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
| 21156 | if (Bits == 0 || Bits > 32) |
| 21157 | return false; |
| 21158 | return true; |
| 21159 | } |
| 21160 | |
| 21161 | bool ARMTargetLowering::(EVT ResVT, EVT SrcVT, |
| 21162 | unsigned Index) const { |
| 21163 | if (!isOperationLegalOrCustom(Op: ISD::EXTRACT_SUBVECTOR, VT: ResVT)) |
| 21164 | return false; |
| 21165 | |
| 21166 | return (Index == 0 || Index == ResVT.getVectorNumElements()); |
| 21167 | } |
| 21168 | |
| 21169 | Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder, |
| 21170 | ARM_MB::MemBOpt Domain) const { |
| 21171 | // First, if the target has no DMB, see what fallback we can use. |
| 21172 | if (!Subtarget->hasDataBarrier()) { |
| 21173 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 21174 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 21175 | // here. |
| 21176 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { |
| 21177 | Value* args[6] = {Builder.getInt32(C: 15), Builder.getInt32(C: 0), |
| 21178 | Builder.getInt32(C: 0), Builder.getInt32(C: 7), |
| 21179 | Builder.getInt32(C: 10), Builder.getInt32(C: 5)}; |
| 21180 | return Builder.CreateIntrinsic(ID: Intrinsic::arm_mcr, Args: args); |
| 21181 | } else { |
| 21182 | // Instead of using barriers, atomic accesses on these subtargets use |
| 21183 | // libcalls. |
| 21184 | llvm_unreachable("makeDMB on a target so old that it has no barriers" ); |
| 21185 | } |
| 21186 | } else { |
| 21187 | // Only a full system barrier exists in the M-class architectures. |
| 21188 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; |
| 21189 | Constant *CDomain = Builder.getInt32(C: Domain); |
| 21190 | return Builder.CreateIntrinsic(ID: Intrinsic::arm_dmb, Args: CDomain); |
| 21191 | } |
| 21192 | } |
| 21193 | |
| 21194 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html |
| 21195 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder, |
| 21196 | Instruction *Inst, |
| 21197 | AtomicOrdering Ord) const { |
| 21198 | switch (Ord) { |
| 21199 | case AtomicOrdering::NotAtomic: |
| 21200 | case AtomicOrdering::Unordered: |
| 21201 | llvm_unreachable("Invalid fence: unordered/non-atomic" ); |
| 21202 | case AtomicOrdering::Monotonic: |
| 21203 | case AtomicOrdering::Acquire: |
| 21204 | return nullptr; // Nothing to do |
| 21205 | case AtomicOrdering::SequentiallyConsistent: |
| 21206 | if (!Inst->hasAtomicStore()) |
| 21207 | return nullptr; // Nothing to do |
| 21208 | [[fallthrough]]; |
| 21209 | case AtomicOrdering::Release: |
| 21210 | case AtomicOrdering::AcquireRelease: |
| 21211 | if (Subtarget->preferISHSTBarriers()) |
| 21212 | return makeDMB(Builder, Domain: ARM_MB::ISHST); |
| 21213 | // FIXME: add a comment with a link to documentation justifying this. |
| 21214 | else |
| 21215 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
| 21216 | } |
| 21217 | llvm_unreachable("Unknown fence ordering in emitLeadingFence" ); |
| 21218 | } |
| 21219 | |
| 21220 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder, |
| 21221 | Instruction *Inst, |
| 21222 | AtomicOrdering Ord) const { |
| 21223 | switch (Ord) { |
| 21224 | case AtomicOrdering::NotAtomic: |
| 21225 | case AtomicOrdering::Unordered: |
| 21226 | llvm_unreachable("Invalid fence: unordered/not-atomic" ); |
| 21227 | case AtomicOrdering::Monotonic: |
| 21228 | case AtomicOrdering::Release: |
| 21229 | return nullptr; // Nothing to do |
| 21230 | case AtomicOrdering::Acquire: |
| 21231 | case AtomicOrdering::AcquireRelease: |
| 21232 | case AtomicOrdering::SequentiallyConsistent: |
| 21233 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
| 21234 | } |
| 21235 | llvm_unreachable("Unknown fence ordering in emitTrailingFence" ); |
| 21236 | } |
| 21237 | |
| 21238 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 21239 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 21240 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 21241 | // anything for those. |
| 21242 | TargetLoweringBase::AtomicExpansionKind |
| 21243 | ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
| 21244 | bool has64BitAtomicStore; |
| 21245 | if (Subtarget->isMClass()) |
| 21246 | has64BitAtomicStore = false; |
| 21247 | else if (Subtarget->isThumb()) |
| 21248 | has64BitAtomicStore = Subtarget->hasV7Ops(); |
| 21249 | else |
| 21250 | has64BitAtomicStore = Subtarget->hasV6Ops(); |
| 21251 | |
| 21252 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); |
| 21253 | return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand |
| 21254 | : AtomicExpansionKind::None; |
| 21255 | } |
| 21256 | |
| 21257 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 21258 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 21259 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 21260 | // anything for those. |
| 21261 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that |
| 21262 | // guarantee, see DDI0406C ARM architecture reference manual, |
| 21263 | // sections A8.8.72-74 LDRD) |
| 21264 | TargetLowering::AtomicExpansionKind |
| 21265 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
| 21266 | bool has64BitAtomicLoad; |
| 21267 | if (Subtarget->isMClass()) |
| 21268 | has64BitAtomicLoad = false; |
| 21269 | else if (Subtarget->isThumb()) |
| 21270 | has64BitAtomicLoad = Subtarget->hasV7Ops(); |
| 21271 | else |
| 21272 | has64BitAtomicLoad = Subtarget->hasV6Ops(); |
| 21273 | |
| 21274 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); |
| 21275 | return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly |
| 21276 | : AtomicExpansionKind::None; |
| 21277 | } |
| 21278 | |
| 21279 | // For the real atomic operations, we have ldrex/strex up to 32 bits, |
| 21280 | // and up to 64 bits on the non-M profiles |
| 21281 | TargetLowering::AtomicExpansionKind |
| 21282 | ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { |
| 21283 | if (AI->isFloatingPointOperation()) |
| 21284 | return AtomicExpansionKind::CmpXChg; |
| 21285 | |
| 21286 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); |
| 21287 | bool hasAtomicRMW; |
| 21288 | if (Subtarget->isMClass()) |
| 21289 | hasAtomicRMW = Subtarget->hasV8MBaselineOps(); |
| 21290 | else if (Subtarget->isThumb()) |
| 21291 | hasAtomicRMW = Subtarget->hasV7Ops(); |
| 21292 | else |
| 21293 | hasAtomicRMW = Subtarget->hasV6Ops(); |
| 21294 | if (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) { |
| 21295 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 21296 | // implement atomicrmw without spilling. If the target address is also on |
| 21297 | // the stack and close enough to the spill slot, this can lead to a |
| 21298 | // situation where the monitor always gets cleared and the atomic operation |
| 21299 | // can never succeed. So at -O0 lower this operation to a CAS loop. |
| 21300 | if (getTargetMachine().getOptLevel() == CodeGenOptLevel::None) |
| 21301 | return AtomicExpansionKind::CmpXChg; |
| 21302 | return AtomicExpansionKind::LLSC; |
| 21303 | } |
| 21304 | return AtomicExpansionKind::None; |
| 21305 | } |
| 21306 | |
| 21307 | // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 |
| 21308 | // bits, and up to 64 bits on the non-M profiles. |
| 21309 | TargetLowering::AtomicExpansionKind |
| 21310 | ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { |
| 21311 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 21312 | // implement cmpxchg without spilling. If the address being exchanged is also |
| 21313 | // on the stack and close enough to the spill slot, this can lead to a |
| 21314 | // situation where the monitor always gets cleared and the atomic operation |
| 21315 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. |
| 21316 | unsigned Size = AI->getOperand(i_nocapture: 1)->getType()->getPrimitiveSizeInBits(); |
| 21317 | bool HasAtomicCmpXchg; |
| 21318 | if (Subtarget->isMClass()) |
| 21319 | HasAtomicCmpXchg = Subtarget->hasV8MBaselineOps(); |
| 21320 | else if (Subtarget->isThumb()) |
| 21321 | HasAtomicCmpXchg = Subtarget->hasV7Ops(); |
| 21322 | else |
| 21323 | HasAtomicCmpXchg = Subtarget->hasV6Ops(); |
| 21324 | if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None && |
| 21325 | HasAtomicCmpXchg && Size <= (Subtarget->isMClass() ? 32U : 64U)) |
| 21326 | return AtomicExpansionKind::LLSC; |
| 21327 | return AtomicExpansionKind::None; |
| 21328 | } |
| 21329 | |
| 21330 | bool ARMTargetLowering::shouldInsertFencesForAtomic( |
| 21331 | const Instruction *I) const { |
| 21332 | return InsertFencesForAtomic; |
| 21333 | } |
| 21334 | |
| 21335 | bool ARMTargetLowering::useLoadStackGuardNode(const Module &M) const { |
| 21336 | // ROPI/RWPI are not supported currently. |
| 21337 | return !Subtarget->isROPI() && !Subtarget->isRWPI(); |
| 21338 | } |
| 21339 | |
| 21340 | void ARMTargetLowering::insertSSPDeclarations(Module &M) const { |
| 21341 | if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
| 21342 | return TargetLowering::insertSSPDeclarations(M); |
| 21343 | |
| 21344 | // MSVC CRT has a global variable holding security cookie. |
| 21345 | M.getOrInsertGlobal(Name: "__security_cookie" , |
| 21346 | Ty: PointerType::getUnqual(C&: M.getContext())); |
| 21347 | |
| 21348 | // MSVC CRT has a function to validate security cookie. |
| 21349 | FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( |
| 21350 | Name: "__security_check_cookie" , RetTy: Type::getVoidTy(C&: M.getContext()), |
| 21351 | Args: PointerType::getUnqual(C&: M.getContext())); |
| 21352 | if (Function *F = dyn_cast<Function>(Val: SecurityCheckCookie.getCallee())) |
| 21353 | F->addParamAttr(ArgNo: 0, Kind: Attribute::AttrKind::InReg); |
| 21354 | } |
| 21355 | |
| 21356 | Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const { |
| 21357 | // MSVC CRT has a global variable holding security cookie. |
| 21358 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
| 21359 | return M.getGlobalVariable(Name: "__security_cookie" ); |
| 21360 | return TargetLowering::getSDagStackGuard(M); |
| 21361 | } |
| 21362 | |
| 21363 | Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const { |
| 21364 | // MSVC CRT has a function to validate security cookie. |
| 21365 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
| 21366 | return M.getFunction(Name: "__security_check_cookie" ); |
| 21367 | return TargetLowering::getSSPStackGuardCheck(M); |
| 21368 | } |
| 21369 | |
| 21370 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
| 21371 | unsigned &Cost) const { |
| 21372 | // If we do not have NEON, vector types are not natively supported. |
| 21373 | if (!Subtarget->hasNEON()) |
| 21374 | return false; |
| 21375 | |
| 21376 | // Floating point values and vector values map to the same register file. |
| 21377 | // Therefore, although we could do a store extract of a vector type, this is |
| 21378 | // better to leave at float as we have more freedom in the addressing mode for |
| 21379 | // those. |
| 21380 | if (VectorTy->isFPOrFPVectorTy()) |
| 21381 | return false; |
| 21382 | |
| 21383 | // If the index is unknown at compile time, this is very expensive to lower |
| 21384 | // and it is not possible to combine the store with the extract. |
| 21385 | if (!isa<ConstantInt>(Val: Idx)) |
| 21386 | return false; |
| 21387 | |
| 21388 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type" ); |
| 21389 | unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedValue(); |
| 21390 | // We can do a store + vector extract on any vector that fits perfectly in a D |
| 21391 | // or Q register. |
| 21392 | if (BitWidth == 64 || BitWidth == 128) { |
| 21393 | Cost = 0; |
| 21394 | return true; |
| 21395 | } |
| 21396 | return false; |
| 21397 | } |
| 21398 | |
| 21399 | bool ARMTargetLowering::isCheapToSpeculateCttz(Type *Ty) const { |
| 21400 | return Subtarget->hasV6T2Ops(); |
| 21401 | } |
| 21402 | |
| 21403 | bool ARMTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const { |
| 21404 | return Subtarget->hasV6T2Ops(); |
| 21405 | } |
| 21406 | |
| 21407 | bool ARMTargetLowering::isMaskAndCmp0FoldingBeneficial( |
| 21408 | const Instruction &AndI) const { |
| 21409 | if (!Subtarget->hasV7Ops()) |
| 21410 | return false; |
| 21411 | |
| 21412 | // Sink the `and` instruction only if the mask would fit into a modified |
| 21413 | // immediate operand. |
| 21414 | ConstantInt *Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1)); |
| 21415 | if (!Mask || Mask->getValue().getBitWidth() > 32u) |
| 21416 | return false; |
| 21417 | auto MaskVal = unsigned(Mask->getValue().getZExtValue()); |
| 21418 | return (Subtarget->isThumb2() ? ARM_AM::getT2SOImmVal(Arg: MaskVal) |
| 21419 | : ARM_AM::getSOImmVal(Arg: MaskVal)) != -1; |
| 21420 | } |
| 21421 | |
| 21422 | TargetLowering::ShiftLegalizationStrategy |
| 21423 | ARMTargetLowering::preferredShiftLegalizationStrategy( |
| 21424 | SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const { |
| 21425 | if (Subtarget->hasMinSize() && !Subtarget->isTargetWindows()) |
| 21426 | return ShiftLegalizationStrategy::LowerToLibcall; |
| 21427 | return TargetLowering::preferredShiftLegalizationStrategy(DAG, N, |
| 21428 | ExpansionFactor); |
| 21429 | } |
| 21430 | |
| 21431 | Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, |
| 21432 | Value *Addr, |
| 21433 | AtomicOrdering Ord) const { |
| 21434 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 21435 | bool IsAcquire = isAcquireOrStronger(AO: Ord); |
| 21436 | |
| 21437 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd |
| 21438 | // intrinsic must return {i32, i32} and we have to recombine them into a |
| 21439 | // single i64 here. |
| 21440 | if (ValueTy->getPrimitiveSizeInBits() == 64) { |
| 21441 | Intrinsic::ID Int = |
| 21442 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; |
| 21443 | |
| 21444 | Value *LoHi = |
| 21445 | Builder.CreateIntrinsic(ID: Int, Args: Addr, /*FMFSource=*/nullptr, Name: "lohi" ); |
| 21446 | |
| 21447 | Value *Lo = Builder.CreateExtractValue(Agg: LoHi, Idxs: 0, Name: "lo" ); |
| 21448 | Value *Hi = Builder.CreateExtractValue(Agg: LoHi, Idxs: 1, Name: "hi" ); |
| 21449 | if (!Subtarget->isLittle()) |
| 21450 | std::swap (a&: Lo, b&: Hi); |
| 21451 | Lo = Builder.CreateZExt(V: Lo, DestTy: ValueTy, Name: "lo64" ); |
| 21452 | Hi = Builder.CreateZExt(V: Hi, DestTy: ValueTy, Name: "hi64" ); |
| 21453 | return Builder.CreateOr( |
| 21454 | LHS: Lo, RHS: Builder.CreateShl(LHS: Hi, RHS: ConstantInt::get(Ty: ValueTy, V: 32)), Name: "val64" ); |
| 21455 | } |
| 21456 | |
| 21457 | Type *Tys[] = { Addr->getType() }; |
| 21458 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; |
| 21459 | CallInst *CI = Builder.CreateIntrinsic(ID: Int, Types: Tys, Args: Addr); |
| 21460 | |
| 21461 | CI->addParamAttr( |
| 21462 | ArgNo: 0, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, Ty: ValueTy)); |
| 21463 | return Builder.CreateTruncOrBitCast(V: CI, DestTy: ValueTy); |
| 21464 | } |
| 21465 | |
| 21466 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( |
| 21467 | IRBuilderBase &Builder) const { |
| 21468 | if (!Subtarget->hasV7Ops()) |
| 21469 | return; |
| 21470 | Builder.CreateIntrinsic(ID: Intrinsic::arm_clrex, Args: {}); |
| 21471 | } |
| 21472 | |
| 21473 | Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder, |
| 21474 | Value *Val, Value *Addr, |
| 21475 | AtomicOrdering Ord) const { |
| 21476 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 21477 | bool IsRelease = isReleaseOrStronger(AO: Ord); |
| 21478 | |
| 21479 | // Since the intrinsics must have legal type, the i64 intrinsics take two |
| 21480 | // parameters: "i32, i32". We must marshal Val into the appropriate form |
| 21481 | // before the call. |
| 21482 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { |
| 21483 | Intrinsic::ID Int = |
| 21484 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; |
| 21485 | Type *Int32Ty = Type::getInt32Ty(C&: M->getContext()); |
| 21486 | |
| 21487 | Value *Lo = Builder.CreateTrunc(V: Val, DestTy: Int32Ty, Name: "lo" ); |
| 21488 | Value *Hi = Builder.CreateTrunc(V: Builder.CreateLShr(LHS: Val, RHS: 32), DestTy: Int32Ty, Name: "hi" ); |
| 21489 | if (!Subtarget->isLittle()) |
| 21490 | std::swap(a&: Lo, b&: Hi); |
| 21491 | return Builder.CreateIntrinsic(ID: Int, Args: {Lo, Hi, Addr}); |
| 21492 | } |
| 21493 | |
| 21494 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; |
| 21495 | Type *Tys[] = { Addr->getType() }; |
| 21496 | Function *Strex = Intrinsic::getOrInsertDeclaration(M, id: Int, Tys); |
| 21497 | |
| 21498 | CallInst *CI = Builder.CreateCall( |
| 21499 | Callee: Strex, Args: {Builder.CreateZExtOrBitCast( |
| 21500 | V: Val, DestTy: Strex->getFunctionType()->getParamType(i: 0)), |
| 21501 | Addr}); |
| 21502 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, |
| 21503 | Ty: Val->getType())); |
| 21504 | return CI; |
| 21505 | } |
| 21506 | |
| 21507 | |
| 21508 | bool ARMTargetLowering::alignLoopsWithOptSize() const { |
| 21509 | return Subtarget->isMClass(); |
| 21510 | } |
| 21511 | |
| 21512 | /// A helper function for determining the number of interleaved accesses we |
| 21513 | /// will generate when lowering accesses of the given type. |
| 21514 | unsigned |
| 21515 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, |
| 21516 | const DataLayout &DL) const { |
| 21517 | return (DL.getTypeSizeInBits(Ty: VecTy) + 127) / 128; |
| 21518 | } |
| 21519 | |
| 21520 | bool ARMTargetLowering::isLegalInterleavedAccessType( |
| 21521 | unsigned Factor, FixedVectorType *VecTy, Align Alignment, |
| 21522 | const DataLayout &DL) const { |
| 21523 | |
| 21524 | unsigned VecSize = DL.getTypeSizeInBits(Ty: VecTy); |
| 21525 | unsigned ElSize = DL.getTypeSizeInBits(Ty: VecTy->getElementType()); |
| 21526 | |
| 21527 | if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) |
| 21528 | return false; |
| 21529 | |
| 21530 | // Ensure the vector doesn't have f16 elements. Even though we could do an |
| 21531 | // i16 vldN, we can't hold the f16 vectors and will end up converting via |
| 21532 | // f32. |
| 21533 | if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) |
| 21534 | return false; |
| 21535 | if (Subtarget->hasMVEIntegerOps() && Factor == 3) |
| 21536 | return false; |
| 21537 | |
| 21538 | // Ensure the number of vector elements is greater than 1. |
| 21539 | if (VecTy->getNumElements() < 2) |
| 21540 | return false; |
| 21541 | |
| 21542 | // Ensure the element type is legal. |
| 21543 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) |
| 21544 | return false; |
| 21545 | // And the alignment if high enough under MVE. |
| 21546 | if (Subtarget->hasMVEIntegerOps() && Alignment < ElSize / 8) |
| 21547 | return false; |
| 21548 | |
| 21549 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than |
| 21550 | // 128 will be split into multiple interleaved accesses. |
| 21551 | if (Subtarget->hasNEON() && VecSize == 64) |
| 21552 | return true; |
| 21553 | return VecSize % 128 == 0; |
| 21554 | } |
| 21555 | |
| 21556 | unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { |
| 21557 | if (Subtarget->hasNEON()) |
| 21558 | return 4; |
| 21559 | if (Subtarget->hasMVEIntegerOps()) |
| 21560 | return MVEMaxSupportedInterleaveFactor; |
| 21561 | return TargetLoweringBase::getMaxSupportedInterleaveFactor(); |
| 21562 | } |
| 21563 | |
| 21564 | /// Lower an interleaved load into a vldN intrinsic. |
| 21565 | /// |
| 21566 | /// E.g. Lower an interleaved load (Factor = 2): |
| 21567 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 |
| 21568 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements |
| 21569 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements |
| 21570 | /// |
| 21571 | /// Into: |
| 21572 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) |
| 21573 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 |
| 21574 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 |
| 21575 | bool ARMTargetLowering::lowerInterleavedLoad( |
| 21576 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, |
| 21577 | ArrayRef<unsigned> Indices, unsigned Factor) const { |
| 21578 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 21579 | "Invalid interleave factor" ); |
| 21580 | assert(!Shuffles.empty() && "Empty shufflevector input" ); |
| 21581 | assert(Shuffles.size() == Indices.size() && |
| 21582 | "Unmatched number of shufflevectors and indices" ); |
| 21583 | |
| 21584 | auto *VecTy = cast<FixedVectorType>(Val: Shuffles[0]->getType()); |
| 21585 | Type *EltTy = VecTy->getElementType(); |
| 21586 | |
| 21587 | const DataLayout &DL = LI->getDataLayout(); |
| 21588 | Align Alignment = LI->getAlign(); |
| 21589 | |
| 21590 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 21591 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 21592 | // the vector types are divisible by 128. |
| 21593 | if (!isLegalInterleavedAccessType(Factor, VecTy, Alignment, DL)) |
| 21594 | return false; |
| 21595 | |
| 21596 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); |
| 21597 | |
| 21598 | // A pointer vector can not be the return type of the ldN intrinsics. Need to |
| 21599 | // load integer vectors first and then convert to pointer vectors. |
| 21600 | if (EltTy->isPointerTy()) |
| 21601 | VecTy = FixedVectorType::get(ElementType: DL.getIntPtrType(EltTy), FVTy: VecTy); |
| 21602 | |
| 21603 | IRBuilder<> Builder(LI); |
| 21604 | |
| 21605 | // The base address of the load. |
| 21606 | Value *BaseAddr = LI->getPointerOperand(); |
| 21607 | |
| 21608 | if (NumLoads > 1) { |
| 21609 | // If we're going to generate more than one load, reset the sub-vector type |
| 21610 | // to something legal. |
| 21611 | VecTy = FixedVectorType::get(ElementType: VecTy->getElementType(), |
| 21612 | NumElts: VecTy->getNumElements() / NumLoads); |
| 21613 | } |
| 21614 | |
| 21615 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!" ); |
| 21616 | |
| 21617 | auto createLoadIntrinsic = [&](Value *BaseAddr) { |
| 21618 | if (Subtarget->hasNEON()) { |
| 21619 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
| 21620 | Type *Tys[] = {VecTy, PtrTy}; |
| 21621 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, |
| 21622 | Intrinsic::arm_neon_vld3, |
| 21623 | Intrinsic::arm_neon_vld4}; |
| 21624 | |
| 21625 | SmallVector<Value *, 2> Ops; |
| 21626 | Ops.push_back(Elt: BaseAddr); |
| 21627 | Ops.push_back(Elt: Builder.getInt32(C: LI->getAlign().value())); |
| 21628 | |
| 21629 | return Builder.CreateIntrinsic(ID: LoadInts[Factor - 2], Types: Tys, Args: Ops, |
| 21630 | /*FMFSource=*/nullptr, Name: "vldN" ); |
| 21631 | } else { |
| 21632 | assert((Factor == 2 || Factor == 4) && |
| 21633 | "expected interleave factor of 2 or 4 for MVE" ); |
| 21634 | Intrinsic::ID LoadInts = |
| 21635 | Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; |
| 21636 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
| 21637 | Type *Tys[] = {VecTy, PtrTy}; |
| 21638 | |
| 21639 | SmallVector<Value *, 2> Ops; |
| 21640 | Ops.push_back(Elt: BaseAddr); |
| 21641 | return Builder.CreateIntrinsic(ID: LoadInts, Types: Tys, Args: Ops, /*FMFSource=*/nullptr, |
| 21642 | Name: "vldN" ); |
| 21643 | } |
| 21644 | }; |
| 21645 | |
| 21646 | // Holds sub-vectors extracted from the load intrinsic return values. The |
| 21647 | // sub-vectors are associated with the shufflevector instructions they will |
| 21648 | // replace. |
| 21649 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; |
| 21650 | |
| 21651 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { |
| 21652 | // If we're generating more than one load, compute the base address of |
| 21653 | // subsequent loads as an offset from the previous. |
| 21654 | if (LoadCount > 0) |
| 21655 | BaseAddr = Builder.CreateConstGEP1_32(Ty: VecTy->getElementType(), Ptr: BaseAddr, |
| 21656 | Idx0: VecTy->getNumElements() * Factor); |
| 21657 | |
| 21658 | CallInst *VldN = createLoadIntrinsic(BaseAddr); |
| 21659 | |
| 21660 | // Replace uses of each shufflevector with the corresponding vector loaded |
| 21661 | // by ldN. |
| 21662 | for (unsigned i = 0; i < Shuffles.size(); i++) { |
| 21663 | ShuffleVectorInst *SV = Shuffles[i]; |
| 21664 | unsigned Index = Indices[i]; |
| 21665 | |
| 21666 | Value *SubVec = Builder.CreateExtractValue(Agg: VldN, Idxs: Index); |
| 21667 | |
| 21668 | // Convert the integer vector to pointer vector if the element is pointer. |
| 21669 | if (EltTy->isPointerTy()) |
| 21670 | SubVec = Builder.CreateIntToPtr( |
| 21671 | V: SubVec, |
| 21672 | DestTy: FixedVectorType::get(ElementType: SV->getType()->getElementType(), FVTy: VecTy)); |
| 21673 | |
| 21674 | SubVecs[SV].push_back(Elt: SubVec); |
| 21675 | } |
| 21676 | } |
| 21677 | |
| 21678 | // Replace uses of the shufflevector instructions with the sub-vectors |
| 21679 | // returned by the load intrinsic. If a shufflevector instruction is |
| 21680 | // associated with more than one sub-vector, those sub-vectors will be |
| 21681 | // concatenated into a single wide vector. |
| 21682 | for (ShuffleVectorInst *SVI : Shuffles) { |
| 21683 | auto &SubVec = SubVecs[SVI]; |
| 21684 | auto *WideVec = |
| 21685 | SubVec.size() > 1 ? concatenateVectors(Builder, Vecs: SubVec) : SubVec[0]; |
| 21686 | SVI->replaceAllUsesWith(V: WideVec); |
| 21687 | } |
| 21688 | |
| 21689 | return true; |
| 21690 | } |
| 21691 | |
| 21692 | /// Lower an interleaved store into a vstN intrinsic. |
| 21693 | /// |
| 21694 | /// E.g. Lower an interleaved store (Factor = 3): |
| 21695 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, |
| 21696 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> |
| 21697 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 |
| 21698 | /// |
| 21699 | /// Into: |
| 21700 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> |
| 21701 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> |
| 21702 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> |
| 21703 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 21704 | /// |
| 21705 | /// Note that the new shufflevectors will be removed and we'll only generate one |
| 21706 | /// vst3 instruction in CodeGen. |
| 21707 | /// |
| 21708 | /// Example for a more general valid mask (Factor 3). Lower: |
| 21709 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, |
| 21710 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> |
| 21711 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr |
| 21712 | /// |
| 21713 | /// Into: |
| 21714 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> |
| 21715 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> |
| 21716 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> |
| 21717 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 21718 | bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, |
| 21719 | ShuffleVectorInst *SVI, |
| 21720 | unsigned Factor) const { |
| 21721 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 21722 | "Invalid interleave factor" ); |
| 21723 | |
| 21724 | auto *VecTy = cast<FixedVectorType>(Val: SVI->getType()); |
| 21725 | assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store" ); |
| 21726 | |
| 21727 | unsigned LaneLen = VecTy->getNumElements() / Factor; |
| 21728 | Type *EltTy = VecTy->getElementType(); |
| 21729 | auto *SubVecTy = FixedVectorType::get(ElementType: EltTy, NumElts: LaneLen); |
| 21730 | |
| 21731 | const DataLayout &DL = SI->getDataLayout(); |
| 21732 | Align Alignment = SI->getAlign(); |
| 21733 | |
| 21734 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 21735 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 21736 | // the vector types are divisible by 128. |
| 21737 | if (!isLegalInterleavedAccessType(Factor, VecTy: SubVecTy, Alignment, DL)) |
| 21738 | return false; |
| 21739 | |
| 21740 | unsigned NumStores = getNumInterleavedAccesses(VecTy: SubVecTy, DL); |
| 21741 | |
| 21742 | Value *Op0 = SVI->getOperand(i_nocapture: 0); |
| 21743 | Value *Op1 = SVI->getOperand(i_nocapture: 1); |
| 21744 | IRBuilder<> Builder(SI); |
| 21745 | |
| 21746 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer |
| 21747 | // vectors to integer vectors. |
| 21748 | if (EltTy->isPointerTy()) { |
| 21749 | Type *IntTy = DL.getIntPtrType(EltTy); |
| 21750 | |
| 21751 | // Convert to the corresponding integer vector. |
| 21752 | auto *IntVecTy = |
| 21753 | FixedVectorType::get(ElementType: IntTy, FVTy: cast<FixedVectorType>(Val: Op0->getType())); |
| 21754 | Op0 = Builder.CreatePtrToInt(V: Op0, DestTy: IntVecTy); |
| 21755 | Op1 = Builder.CreatePtrToInt(V: Op1, DestTy: IntVecTy); |
| 21756 | |
| 21757 | SubVecTy = FixedVectorType::get(ElementType: IntTy, NumElts: LaneLen); |
| 21758 | } |
| 21759 | |
| 21760 | // The base address of the store. |
| 21761 | Value *BaseAddr = SI->getPointerOperand(); |
| 21762 | |
| 21763 | if (NumStores > 1) { |
| 21764 | // If we're going to generate more than one store, reset the lane length |
| 21765 | // and sub-vector type to something legal. |
| 21766 | LaneLen /= NumStores; |
| 21767 | SubVecTy = FixedVectorType::get(ElementType: SubVecTy->getElementType(), NumElts: LaneLen); |
| 21768 | } |
| 21769 | |
| 21770 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!" ); |
| 21771 | |
| 21772 | auto Mask = SVI->getShuffleMask(); |
| 21773 | |
| 21774 | auto createStoreIntrinsic = [&](Value *BaseAddr, |
| 21775 | SmallVectorImpl<Value *> &Shuffles) { |
| 21776 | if (Subtarget->hasNEON()) { |
| 21777 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, |
| 21778 | Intrinsic::arm_neon_vst3, |
| 21779 | Intrinsic::arm_neon_vst4}; |
| 21780 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
| 21781 | Type *Tys[] = {PtrTy, SubVecTy}; |
| 21782 | |
| 21783 | SmallVector<Value *, 6> Ops; |
| 21784 | Ops.push_back(Elt: BaseAddr); |
| 21785 | append_range(C&: Ops, R&: Shuffles); |
| 21786 | Ops.push_back(Elt: Builder.getInt32(C: SI->getAlign().value())); |
| 21787 | Builder.CreateIntrinsic(ID: StoreInts[Factor - 2], Types: Tys, Args: Ops); |
| 21788 | } else { |
| 21789 | assert((Factor == 2 || Factor == 4) && |
| 21790 | "expected interleave factor of 2 or 4 for MVE" ); |
| 21791 | Intrinsic::ID StoreInts = |
| 21792 | Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; |
| 21793 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
| 21794 | Type *Tys[] = {PtrTy, SubVecTy}; |
| 21795 | |
| 21796 | SmallVector<Value *, 6> Ops; |
| 21797 | Ops.push_back(Elt: BaseAddr); |
| 21798 | append_range(C&: Ops, R&: Shuffles); |
| 21799 | for (unsigned F = 0; F < Factor; F++) { |
| 21800 | Ops.push_back(Elt: Builder.getInt32(C: F)); |
| 21801 | Builder.CreateIntrinsic(ID: StoreInts, Types: Tys, Args: Ops); |
| 21802 | Ops.pop_back(); |
| 21803 | } |
| 21804 | } |
| 21805 | }; |
| 21806 | |
| 21807 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { |
| 21808 | // If we generating more than one store, we compute the base address of |
| 21809 | // subsequent stores as an offset from the previous. |
| 21810 | if (StoreCount > 0) |
| 21811 | BaseAddr = Builder.CreateConstGEP1_32(Ty: SubVecTy->getElementType(), |
| 21812 | Ptr: BaseAddr, Idx0: LaneLen * Factor); |
| 21813 | |
| 21814 | SmallVector<Value *, 4> Shuffles; |
| 21815 | |
| 21816 | // Split the shufflevector operands into sub vectors for the new vstN call. |
| 21817 | for (unsigned i = 0; i < Factor; i++) { |
| 21818 | unsigned IdxI = StoreCount * LaneLen * Factor + i; |
| 21819 | if (Mask[IdxI] >= 0) { |
| 21820 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
| 21821 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: Mask[IdxI], NumInts: LaneLen, NumUndefs: 0))); |
| 21822 | } else { |
| 21823 | unsigned StartMask = 0; |
| 21824 | for (unsigned j = 1; j < LaneLen; j++) { |
| 21825 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; |
| 21826 | if (Mask[IdxJ * Factor + IdxI] >= 0) { |
| 21827 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; |
| 21828 | break; |
| 21829 | } |
| 21830 | } |
| 21831 | // Note: If all elements in a chunk are undefs, StartMask=0! |
| 21832 | // Note: Filling undef gaps with random elements is ok, since |
| 21833 | // those elements were being written anyway (with undefs). |
| 21834 | // In the case of all undefs we're defaulting to using elems from 0 |
| 21835 | // Note: StartMask cannot be negative, it's checked in |
| 21836 | // isReInterleaveMask |
| 21837 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
| 21838 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: StartMask, NumInts: LaneLen, NumUndefs: 0))); |
| 21839 | } |
| 21840 | } |
| 21841 | |
| 21842 | createStoreIntrinsic(BaseAddr, Shuffles); |
| 21843 | } |
| 21844 | return true; |
| 21845 | } |
| 21846 | |
| 21847 | enum HABaseType { |
| 21848 | HA_UNKNOWN = 0, |
| 21849 | HA_FLOAT, |
| 21850 | HA_DOUBLE, |
| 21851 | HA_VECT64, |
| 21852 | HA_VECT128 |
| 21853 | }; |
| 21854 | |
| 21855 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, |
| 21856 | uint64_t &Members) { |
| 21857 | if (auto *ST = dyn_cast<StructType>(Val: Ty)) { |
| 21858 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { |
| 21859 | uint64_t SubMembers = 0; |
| 21860 | if (!isHomogeneousAggregate(Ty: ST->getElementType(N: i), Base, Members&: SubMembers)) |
| 21861 | return false; |
| 21862 | Members += SubMembers; |
| 21863 | } |
| 21864 | } else if (auto *AT = dyn_cast<ArrayType>(Val: Ty)) { |
| 21865 | uint64_t SubMembers = 0; |
| 21866 | if (!isHomogeneousAggregate(Ty: AT->getElementType(), Base, Members&: SubMembers)) |
| 21867 | return false; |
| 21868 | Members += SubMembers * AT->getNumElements(); |
| 21869 | } else if (Ty->isFloatTy()) { |
| 21870 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) |
| 21871 | return false; |
| 21872 | Members = 1; |
| 21873 | Base = HA_FLOAT; |
| 21874 | } else if (Ty->isDoubleTy()) { |
| 21875 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) |
| 21876 | return false; |
| 21877 | Members = 1; |
| 21878 | Base = HA_DOUBLE; |
| 21879 | } else if (auto *VT = dyn_cast<VectorType>(Val: Ty)) { |
| 21880 | Members = 1; |
| 21881 | switch (Base) { |
| 21882 | case HA_FLOAT: |
| 21883 | case HA_DOUBLE: |
| 21884 | return false; |
| 21885 | case HA_VECT64: |
| 21886 | return VT->getPrimitiveSizeInBits().getFixedValue() == 64; |
| 21887 | case HA_VECT128: |
| 21888 | return VT->getPrimitiveSizeInBits().getFixedValue() == 128; |
| 21889 | case HA_UNKNOWN: |
| 21890 | switch (VT->getPrimitiveSizeInBits().getFixedValue()) { |
| 21891 | case 64: |
| 21892 | Base = HA_VECT64; |
| 21893 | return true; |
| 21894 | case 128: |
| 21895 | Base = HA_VECT128; |
| 21896 | return true; |
| 21897 | default: |
| 21898 | return false; |
| 21899 | } |
| 21900 | } |
| 21901 | } |
| 21902 | |
| 21903 | return (Members > 0 && Members <= 4); |
| 21904 | } |
| 21905 | |
| 21906 | /// Return the correct alignment for the current calling convention. |
| 21907 | Align ARMTargetLowering::getABIAlignmentForCallingConv( |
| 21908 | Type *ArgTy, const DataLayout &DL) const { |
| 21909 | const Align ABITypeAlign = DL.getABITypeAlign(Ty: ArgTy); |
| 21910 | if (!ArgTy->isVectorTy()) |
| 21911 | return ABITypeAlign; |
| 21912 | |
| 21913 | // Avoid over-aligning vector parameters. It would require realigning the |
| 21914 | // stack and waste space for no real benefit. |
| 21915 | MaybeAlign StackAlign = DL.getStackAlignment(); |
| 21916 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 21917 | return std::min(a: ABITypeAlign, b: *StackAlign); |
| 21918 | } |
| 21919 | |
| 21920 | /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of |
| 21921 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when |
| 21922 | /// passing according to AAPCS rules. |
| 21923 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( |
| 21924 | Type *Ty, CallingConv::ID CallConv, bool isVarArg, |
| 21925 | const DataLayout &DL) const { |
| 21926 | if (getEffectiveCallingConv(CC: CallConv, isVarArg) != |
| 21927 | CallingConv::ARM_AAPCS_VFP) |
| 21928 | return false; |
| 21929 | |
| 21930 | HABaseType Base = HA_UNKNOWN; |
| 21931 | uint64_t Members = 0; |
| 21932 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); |
| 21933 | LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " " ; Ty->dump()); |
| 21934 | |
| 21935 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); |
| 21936 | return IsHA || IsIntArray; |
| 21937 | } |
| 21938 | |
| 21939 | Register ARMTargetLowering::getExceptionPointerRegister( |
| 21940 | const Constant *PersonalityFn) const { |
| 21941 | // Platforms which do not use SjLj EH may return values in these registers |
| 21942 | // via the personality function. |
| 21943 | return Subtarget->useSjLjEH() ? Register() : ARM::R0; |
| 21944 | } |
| 21945 | |
| 21946 | Register ARMTargetLowering::getExceptionSelectorRegister( |
| 21947 | const Constant *PersonalityFn) const { |
| 21948 | // Platforms which do not use SjLj EH may return values in these registers |
| 21949 | // via the personality function. |
| 21950 | return Subtarget->useSjLjEH() ? Register() : ARM::R1; |
| 21951 | } |
| 21952 | |
| 21953 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
| 21954 | // Update IsSplitCSR in ARMFunctionInfo. |
| 21955 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); |
| 21956 | AFI->setIsSplitCSR(true); |
| 21957 | } |
| 21958 | |
| 21959 | void ARMTargetLowering::insertCopiesSplitCSR( |
| 21960 | MachineBasicBlock *Entry, |
| 21961 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
| 21962 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 21963 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(MF: Entry->getParent()); |
| 21964 | if (!IStart) |
| 21965 | return; |
| 21966 | |
| 21967 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 21968 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
| 21969 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
| 21970 | for (const MCPhysReg *I = IStart; *I; ++I) { |
| 21971 | const TargetRegisterClass *RC = nullptr; |
| 21972 | if (ARM::GPRRegClass.contains(Reg: *I)) |
| 21973 | RC = &ARM::GPRRegClass; |
| 21974 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
| 21975 | RC = &ARM::DPRRegClass; |
| 21976 | else |
| 21977 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 21978 | |
| 21979 | Register NewVR = MRI->createVirtualRegister(RegClass: RC); |
| 21980 | // Create copy from CSR to a virtual register. |
| 21981 | // FIXME: this currently does not emit CFI pseudo-instructions, it works |
| 21982 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be |
| 21983 | // nounwind. If we want to generalize this later, we may need to emit |
| 21984 | // CFI pseudo-instructions. |
| 21985 | assert(Entry->getParent()->getFunction().hasFnAttribute( |
| 21986 | Attribute::NoUnwind) && |
| 21987 | "Function should be nounwind in insertCopiesSplitCSR!" ); |
| 21988 | Entry->addLiveIn(PhysReg: *I); |
| 21989 | BuildMI(BB&: *Entry, I: MBBI, MIMD: DebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVR) |
| 21990 | .addReg(RegNo: *I); |
| 21991 | |
| 21992 | // Insert the copy-back instructions right before the terminator. |
| 21993 | for (auto *Exit : Exits) |
| 21994 | BuildMI(BB&: *Exit, I: Exit->getFirstTerminator(), MIMD: DebugLoc(), |
| 21995 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: *I) |
| 21996 | .addReg(RegNo: NewVR); |
| 21997 | } |
| 21998 | } |
| 21999 | |
| 22000 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { |
| 22001 | MF.getFrameInfo().computeMaxCallFrameSize(MF); |
| 22002 | TargetLoweringBase::finalizeLowering(MF); |
| 22003 | } |
| 22004 | |
| 22005 | bool ARMTargetLowering::isComplexDeinterleavingSupported() const { |
| 22006 | return Subtarget->hasMVEIntegerOps(); |
| 22007 | } |
| 22008 | |
| 22009 | bool ARMTargetLowering::isComplexDeinterleavingOperationSupported( |
| 22010 | ComplexDeinterleavingOperation Operation, Type *Ty) const { |
| 22011 | auto *VTy = dyn_cast<FixedVectorType>(Val: Ty); |
| 22012 | if (!VTy) |
| 22013 | return false; |
| 22014 | |
| 22015 | auto *ScalarTy = VTy->getScalarType(); |
| 22016 | unsigned NumElements = VTy->getNumElements(); |
| 22017 | |
| 22018 | unsigned VTyWidth = VTy->getScalarSizeInBits() * NumElements; |
| 22019 | if (VTyWidth < 128 || !llvm::isPowerOf2_32(Value: VTyWidth)) |
| 22020 | return false; |
| 22021 | |
| 22022 | // Both VCADD and VCMUL/VCMLA support the same types, F16 and F32 |
| 22023 | if (ScalarTy->isHalfTy() || ScalarTy->isFloatTy()) |
| 22024 | return Subtarget->hasMVEFloatOps(); |
| 22025 | |
| 22026 | if (Operation != ComplexDeinterleavingOperation::CAdd) |
| 22027 | return false; |
| 22028 | |
| 22029 | return Subtarget->hasMVEIntegerOps() && |
| 22030 | (ScalarTy->isIntegerTy(Bitwidth: 8) || ScalarTy->isIntegerTy(Bitwidth: 16) || |
| 22031 | ScalarTy->isIntegerTy(Bitwidth: 32)); |
| 22032 | } |
| 22033 | |
| 22034 | Value *ARMTargetLowering::createComplexDeinterleavingIR( |
| 22035 | IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, |
| 22036 | ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, |
| 22037 | Value *Accumulator) const { |
| 22038 | |
| 22039 | FixedVectorType *Ty = cast<FixedVectorType>(Val: InputA->getType()); |
| 22040 | |
| 22041 | unsigned TyWidth = Ty->getScalarSizeInBits() * Ty->getNumElements(); |
| 22042 | |
| 22043 | assert(TyWidth >= 128 && "Width of vector type must be at least 128 bits" ); |
| 22044 | |
| 22045 | if (TyWidth > 128) { |
| 22046 | int Stride = Ty->getNumElements() / 2; |
| 22047 | auto SplitSeq = llvm::seq<int>(Begin: 0, End: Ty->getNumElements()); |
| 22048 | auto SplitSeqVec = llvm::to_vector(Range&: SplitSeq); |
| 22049 | ArrayRef<int> LowerSplitMask(&SplitSeqVec[0], Stride); |
| 22050 | ArrayRef<int> UpperSplitMask(&SplitSeqVec[Stride], Stride); |
| 22051 | |
| 22052 | auto *LowerSplitA = B.CreateShuffleVector(V: InputA, Mask: LowerSplitMask); |
| 22053 | auto *LowerSplitB = B.CreateShuffleVector(V: InputB, Mask: LowerSplitMask); |
| 22054 | auto *UpperSplitA = B.CreateShuffleVector(V: InputA, Mask: UpperSplitMask); |
| 22055 | auto *UpperSplitB = B.CreateShuffleVector(V: InputB, Mask: UpperSplitMask); |
| 22056 | Value *LowerSplitAcc = nullptr; |
| 22057 | Value *UpperSplitAcc = nullptr; |
| 22058 | |
| 22059 | if (Accumulator) { |
| 22060 | LowerSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: LowerSplitMask); |
| 22061 | UpperSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: UpperSplitMask); |
| 22062 | } |
| 22063 | |
| 22064 | auto *LowerSplitInt = createComplexDeinterleavingIR( |
| 22065 | B, OperationType, Rotation, InputA: LowerSplitA, InputB: LowerSplitB, Accumulator: LowerSplitAcc); |
| 22066 | auto *UpperSplitInt = createComplexDeinterleavingIR( |
| 22067 | B, OperationType, Rotation, InputA: UpperSplitA, InputB: UpperSplitB, Accumulator: UpperSplitAcc); |
| 22068 | |
| 22069 | ArrayRef<int> JoinMask(&SplitSeqVec[0], Ty->getNumElements()); |
| 22070 | return B.CreateShuffleVector(V1: LowerSplitInt, V2: UpperSplitInt, Mask: JoinMask); |
| 22071 | } |
| 22072 | |
| 22073 | auto *IntTy = Type::getInt32Ty(C&: B.getContext()); |
| 22074 | |
| 22075 | ConstantInt *ConstRotation = nullptr; |
| 22076 | if (OperationType == ComplexDeinterleavingOperation::CMulPartial) { |
| 22077 | ConstRotation = ConstantInt::get(Ty: IntTy, V: (int)Rotation); |
| 22078 | |
| 22079 | if (Accumulator) |
| 22080 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmlaq, Types: Ty, |
| 22081 | Args: {ConstRotation, Accumulator, InputB, InputA}); |
| 22082 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmulq, Types: Ty, |
| 22083 | Args: {ConstRotation, InputB, InputA}); |
| 22084 | } |
| 22085 | |
| 22086 | if (OperationType == ComplexDeinterleavingOperation::CAdd) { |
| 22087 | // 1 means the value is not halved. |
| 22088 | auto *ConstHalving = ConstantInt::get(Ty: IntTy, V: 1); |
| 22089 | |
| 22090 | if (Rotation == ComplexDeinterleavingRotation::Rotation_90) |
| 22091 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 0); |
| 22092 | else if (Rotation == ComplexDeinterleavingRotation::Rotation_270) |
| 22093 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 1); |
| 22094 | |
| 22095 | if (!ConstRotation) |
| 22096 | return nullptr; // Invalid rotation for arm_mve_vcaddq |
| 22097 | |
| 22098 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcaddq, Types: Ty, |
| 22099 | Args: {ConstHalving, ConstRotation, InputA, InputB}); |
| 22100 | } |
| 22101 | |
| 22102 | return nullptr; |
| 22103 | } |
| 22104 | |