| 1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the interfaces that ARM uses to lower LLVM code into a |
| 10 | // selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "ARMISelLowering.h" |
| 15 | #include "ARMBaseInstrInfo.h" |
| 16 | #include "ARMBaseRegisterInfo.h" |
| 17 | #include "ARMCallingConv.h" |
| 18 | #include "ARMConstantPoolValue.h" |
| 19 | #include "ARMMachineFunctionInfo.h" |
| 20 | #include "ARMPerfectShuffle.h" |
| 21 | #include "ARMRegisterInfo.h" |
| 22 | #include "ARMSelectionDAGInfo.h" |
| 23 | #include "ARMSubtarget.h" |
| 24 | #include "ARMTargetTransformInfo.h" |
| 25 | #include "MCTargetDesc/ARMAddressingModes.h" |
| 26 | #include "MCTargetDesc/ARMBaseInfo.h" |
| 27 | #include "Utils/ARMBaseInfo.h" |
| 28 | #include "llvm/ADT/APFloat.h" |
| 29 | #include "llvm/ADT/APInt.h" |
| 30 | #include "llvm/ADT/ArrayRef.h" |
| 31 | #include "llvm/ADT/BitVector.h" |
| 32 | #include "llvm/ADT/DenseMap.h" |
| 33 | #include "llvm/ADT/STLExtras.h" |
| 34 | #include "llvm/ADT/SmallPtrSet.h" |
| 35 | #include "llvm/ADT/SmallVector.h" |
| 36 | #include "llvm/ADT/Statistic.h" |
| 37 | #include "llvm/ADT/StringExtras.h" |
| 38 | #include "llvm/ADT/StringRef.h" |
| 39 | #include "llvm/ADT/StringSwitch.h" |
| 40 | #include "llvm/ADT/Twine.h" |
| 41 | #include "llvm/Analysis/VectorUtils.h" |
| 42 | #include "llvm/CodeGen/CallingConvLower.h" |
| 43 | #include "llvm/CodeGen/ComplexDeinterleavingPass.h" |
| 44 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 45 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 46 | #include "llvm/CodeGen/MachineConstantPool.h" |
| 47 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 48 | #include "llvm/CodeGen/MachineFunction.h" |
| 49 | #include "llvm/CodeGen/MachineInstr.h" |
| 50 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 51 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
| 52 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 53 | #include "llvm/CodeGen/MachineOperand.h" |
| 54 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 55 | #include "llvm/CodeGen/RuntimeLibcallUtil.h" |
| 56 | #include "llvm/CodeGen/SelectionDAG.h" |
| 57 | #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" |
| 58 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 59 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 60 | #include "llvm/CodeGen/TargetLowering.h" |
| 61 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 62 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 63 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 64 | #include "llvm/CodeGen/ValueTypes.h" |
| 65 | #include "llvm/CodeGenTypes/MachineValueType.h" |
| 66 | #include "llvm/IR/Attributes.h" |
| 67 | #include "llvm/IR/CallingConv.h" |
| 68 | #include "llvm/IR/Constant.h" |
| 69 | #include "llvm/IR/Constants.h" |
| 70 | #include "llvm/IR/DataLayout.h" |
| 71 | #include "llvm/IR/DebugLoc.h" |
| 72 | #include "llvm/IR/DerivedTypes.h" |
| 73 | #include "llvm/IR/Function.h" |
| 74 | #include "llvm/IR/GlobalAlias.h" |
| 75 | #include "llvm/IR/GlobalValue.h" |
| 76 | #include "llvm/IR/GlobalVariable.h" |
| 77 | #include "llvm/IR/IRBuilder.h" |
| 78 | #include "llvm/IR/InlineAsm.h" |
| 79 | #include "llvm/IR/Instruction.h" |
| 80 | #include "llvm/IR/Instructions.h" |
| 81 | #include "llvm/IR/IntrinsicInst.h" |
| 82 | #include "llvm/IR/Intrinsics.h" |
| 83 | #include "llvm/IR/IntrinsicsARM.h" |
| 84 | #include "llvm/IR/Module.h" |
| 85 | #include "llvm/IR/Type.h" |
| 86 | #include "llvm/IR/User.h" |
| 87 | #include "llvm/IR/Value.h" |
| 88 | #include "llvm/MC/MCInstrDesc.h" |
| 89 | #include "llvm/MC/MCInstrItineraries.h" |
| 90 | #include "llvm/MC/MCSchedule.h" |
| 91 | #include "llvm/Support/AtomicOrdering.h" |
| 92 | #include "llvm/Support/BranchProbability.h" |
| 93 | #include "llvm/Support/Casting.h" |
| 94 | #include "llvm/Support/CodeGen.h" |
| 95 | #include "llvm/Support/CommandLine.h" |
| 96 | #include "llvm/Support/Compiler.h" |
| 97 | #include "llvm/Support/Debug.h" |
| 98 | #include "llvm/Support/ErrorHandling.h" |
| 99 | #include "llvm/Support/KnownBits.h" |
| 100 | #include "llvm/Support/MathExtras.h" |
| 101 | #include "llvm/Support/raw_ostream.h" |
| 102 | #include "llvm/Target/TargetMachine.h" |
| 103 | #include "llvm/Target/TargetOptions.h" |
| 104 | #include "llvm/TargetParser/Triple.h" |
| 105 | #include <algorithm> |
| 106 | #include <cassert> |
| 107 | #include <cstdint> |
| 108 | #include <cstdlib> |
| 109 | #include <iterator> |
| 110 | #include <limits> |
| 111 | #include <optional> |
| 112 | #include <tuple> |
| 113 | #include <utility> |
| 114 | #include <vector> |
| 115 | |
| 116 | using namespace llvm; |
| 117 | |
| 118 | #define DEBUG_TYPE "arm-isel" |
| 119 | |
| 120 | STATISTIC(NumTailCalls, "Number of tail calls" ); |
| 121 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt" ); |
| 122 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments" ); |
| 123 | STATISTIC(NumConstpoolPromoted, |
| 124 | "Number of constants with their storage promoted into constant pools" ); |
| 125 | |
| 126 | static cl::opt<bool> |
| 127 | ARMInterworking("arm-interworking" , cl::Hidden, |
| 128 | cl::desc("Enable / disable ARM interworking (for debugging only)" ), |
| 129 | cl::init(Val: true)); |
| 130 | |
| 131 | static cl::opt<bool> EnableConstpoolPromotion( |
| 132 | "arm-promote-constant" , cl::Hidden, |
| 133 | cl::desc("Enable / disable promotion of unnamed_addr constants into " |
| 134 | "constant pools" ), |
| 135 | cl::init(Val: false)); // FIXME: set to true by default once PR32780 is fixed |
| 136 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( |
| 137 | "arm-promote-constant-max-size" , cl::Hidden, |
| 138 | cl::desc("Maximum size of constant to promote into a constant pool" ), |
| 139 | cl::init(Val: 64)); |
| 140 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( |
| 141 | "arm-promote-constant-max-total" , cl::Hidden, |
| 142 | cl::desc("Maximum size of ALL constants to promote into a constant pool" ), |
| 143 | cl::init(Val: 128)); |
| 144 | |
| 145 | cl::opt<unsigned> |
| 146 | MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor" , cl::Hidden, |
| 147 | cl::desc("Maximum interleave factor for MVE VLDn to generate." ), |
| 148 | cl::init(Val: 2)); |
| 149 | |
| 150 | cl::opt<unsigned> ArmMaxBaseUpdatesToCheck( |
| 151 | "arm-max-base-updates-to-check" , cl::Hidden, |
| 152 | cl::desc("Maximum number of base-updates to check generating postindex." ), |
| 153 | cl::init(Val: 64)); |
| 154 | |
| 155 | /// Value type used for "flags" operands / results (either CPSR or FPSCR_NZCV). |
| 156 | constexpr MVT FlagsVT = MVT::i32; |
| 157 | |
| 158 | // The APCS parameter registers. |
| 159 | static const MCPhysReg GPRArgRegs[] = { |
| 160 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 |
| 161 | }; |
| 162 | |
| 163 | static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg, |
| 164 | SelectionDAG &DAG, const SDLoc &DL) { |
| 165 | assert(Arg.ArgVT.isScalarInteger()); |
| 166 | assert(Arg.ArgVT.bitsLT(MVT::i32)); |
| 167 | SDValue Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: Arg.ArgVT, Operand: Value); |
| 168 | SDValue Ext = |
| 169 | DAG.getNode(Opcode: Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
| 170 | VT: MVT::i32, Operand: Trunc); |
| 171 | return Ext; |
| 172 | } |
| 173 | |
| 174 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) { |
| 175 | if (VT != PromotedLdStVT) { |
| 176 | setOperationAction(Op: ISD::LOAD, VT, Action: Promote); |
| 177 | AddPromotedToType (Opc: ISD::LOAD, OrigVT: VT, DestVT: PromotedLdStVT); |
| 178 | |
| 179 | setOperationAction(Op: ISD::STORE, VT, Action: Promote); |
| 180 | AddPromotedToType (Opc: ISD::STORE, OrigVT: VT, DestVT: PromotedLdStVT); |
| 181 | } |
| 182 | |
| 183 | MVT ElemTy = VT.getVectorElementType(); |
| 184 | if (ElemTy != MVT::f64) |
| 185 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 186 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 187 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 188 | if (ElemTy == MVT::i32) { |
| 189 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Custom); |
| 190 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Custom); |
| 191 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Custom); |
| 192 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Custom); |
| 193 | } else { |
| 194 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 195 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 196 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 197 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 198 | } |
| 199 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 200 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 201 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Legal); |
| 202 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Legal); |
| 203 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 204 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 205 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
| 206 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT, Action: Expand); |
| 207 | if (VT.isInteger()) { |
| 208 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
| 209 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
| 210 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
| 211 | } |
| 212 | |
| 213 | // Neon does not support vector divide/remainder operations. |
| 214 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
| 215 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
| 216 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
| 217 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
| 218 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
| 219 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
| 220 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
| 221 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
| 222 | |
| 223 | if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) |
| 224 | for (auto Opcode : {ISD::ABS, ISD::ABDS, ISD::ABDU, ISD::SMIN, ISD::SMAX, |
| 225 | ISD::UMIN, ISD::UMAX, ISD::CTLS}) |
| 226 | setOperationAction(Op: Opcode, VT, Action: Legal); |
| 227 | if (!VT.isFloatingPoint()) |
| 228 | for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) |
| 229 | setOperationAction(Op: Opcode, VT, Action: Legal); |
| 230 | } |
| 231 | |
| 232 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { |
| 233 | addRegisterClass(VT, RC: &ARM::DPRRegClass); |
| 234 | addTypeForNEON(VT, PromotedLdStVT: MVT::f64); |
| 235 | } |
| 236 | |
| 237 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { |
| 238 | addRegisterClass(VT, RC: &ARM::DPairRegClass); |
| 239 | addTypeForNEON(VT, PromotedLdStVT: MVT::v2f64); |
| 240 | } |
| 241 | |
| 242 | void ARMTargetLowering::setAllExpand(MVT VT) { |
| 243 | for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) |
| 244 | setOperationAction(Op: Opc, VT, Action: Expand); |
| 245 | |
| 246 | // We support these really simple operations even on types where all |
| 247 | // the actual arithmetic has to be broken down into simpler |
| 248 | // operations or turned into library calls. |
| 249 | setOperationAction(Op: ISD::BITCAST, VT, Action: Legal); |
| 250 | setOperationAction(Op: ISD::LOAD, VT, Action: Legal); |
| 251 | setOperationAction(Op: ISD::STORE, VT, Action: Legal); |
| 252 | setOperationAction(Op: ISD::UNDEF, VT, Action: Legal); |
| 253 | } |
| 254 | |
| 255 | void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, |
| 256 | LegalizeAction Action) { |
| 257 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: From, MemVT: To, Action); |
| 258 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: From, MemVT: To, Action); |
| 259 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: From, MemVT: To, Action); |
| 260 | } |
| 261 | |
| 262 | void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { |
| 263 | const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; |
| 264 | |
| 265 | for (auto VT : IntTypes) { |
| 266 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 267 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 268 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 269 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 270 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 271 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
| 272 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
| 273 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
| 274 | setOperationAction(Op: ISD::SMIN, VT, Action: Legal); |
| 275 | setOperationAction(Op: ISD::SMAX, VT, Action: Legal); |
| 276 | setOperationAction(Op: ISD::UMIN, VT, Action: Legal); |
| 277 | setOperationAction(Op: ISD::UMAX, VT, Action: Legal); |
| 278 | setOperationAction(Op: ISD::ABS, VT, Action: Legal); |
| 279 | setOperationAction(Op: ISD::CTLS, VT, Action: Legal); |
| 280 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 281 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
| 282 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
| 283 | setOperationAction(Op: ISD::CTLZ, VT, Action: Legal); |
| 284 | setOperationAction(Op: ISD::CTTZ, VT, Action: Custom); |
| 285 | setOperationAction(Op: ISD::BITREVERSE, VT, Action: Legal); |
| 286 | setOperationAction(Op: ISD::BSWAP, VT, Action: Legal); |
| 287 | setOperationAction(Op: ISD::SADDSAT, VT, Action: Legal); |
| 288 | setOperationAction(Op: ISD::UADDSAT, VT, Action: Legal); |
| 289 | setOperationAction(Op: ISD::SSUBSAT, VT, Action: Legal); |
| 290 | setOperationAction(Op: ISD::USUBSAT, VT, Action: Legal); |
| 291 | setOperationAction(Op: ISD::ABDS, VT, Action: Legal); |
| 292 | setOperationAction(Op: ISD::ABDU, VT, Action: Legal); |
| 293 | setOperationAction(Op: ISD::AVGFLOORS, VT, Action: Legal); |
| 294 | setOperationAction(Op: ISD::AVGFLOORU, VT, Action: Legal); |
| 295 | setOperationAction(Op: ISD::AVGCEILS, VT, Action: Legal); |
| 296 | setOperationAction(Op: ISD::AVGCEILU, VT, Action: Legal); |
| 297 | |
| 298 | // No native support for these. |
| 299 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
| 300 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
| 301 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
| 302 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
| 303 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
| 304 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
| 305 | setOperationAction(Op: ISD::CTPOP, VT, Action: Expand); |
| 306 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 307 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 308 | |
| 309 | // Vector reductions |
| 310 | setOperationAction(Op: ISD::VECREDUCE_ADD, VT, Action: Legal); |
| 311 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Legal); |
| 312 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Legal); |
| 313 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Legal); |
| 314 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Legal); |
| 315 | setOperationAction(Op: ISD::VECREDUCE_MUL, VT, Action: Custom); |
| 316 | setOperationAction(Op: ISD::VECREDUCE_AND, VT, Action: Custom); |
| 317 | setOperationAction(Op: ISD::VECREDUCE_OR, VT, Action: Custom); |
| 318 | setOperationAction(Op: ISD::VECREDUCE_XOR, VT, Action: Custom); |
| 319 | |
| 320 | if (!HasMVEFP) { |
| 321 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 322 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 323 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 324 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 325 | } else { |
| 326 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT, Action: Custom); |
| 327 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT, Action: Custom); |
| 328 | } |
| 329 | |
| 330 | // Pre and Post inc are supported on loads and stores |
| 331 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 332 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 333 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 334 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 335 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 336 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; |
| 341 | for (auto VT : FloatTypes) { |
| 342 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 343 | if (!HasMVEFP) |
| 344 | setAllExpand(VT); |
| 345 | |
| 346 | // These are legal or custom whether we have MVE.fp or not |
| 347 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 348 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 349 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT: VT.getVectorElementType(), Action: Custom); |
| 350 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 351 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 352 | setOperationAction(Op: ISD::BUILD_VECTOR, VT: VT.getVectorElementType(), Action: Custom); |
| 353 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Legal); |
| 354 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 355 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
| 356 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
| 357 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 358 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 359 | |
| 360 | // Pre and Post inc are supported on loads and stores |
| 361 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 362 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 363 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 364 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 365 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 366 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 367 | } |
| 368 | |
| 369 | if (HasMVEFP) { |
| 370 | setOperationAction(Op: ISD::FMINNUM, VT, Action: Legal); |
| 371 | setOperationAction(Op: ISD::FMAXNUM, VT, Action: Legal); |
| 372 | for (auto Op : {ISD::FROUND, ISD::STRICT_FROUND, ISD::FROUNDEVEN, |
| 373 | ISD::STRICT_FROUNDEVEN, ISD::FTRUNC, ISD::STRICT_FTRUNC, |
| 374 | ISD::FRINT, ISD::STRICT_FRINT, ISD::FFLOOR, |
| 375 | ISD::STRICT_FFLOOR, ISD::FCEIL, ISD::STRICT_FCEIL}) { |
| 376 | setOperationAction(Op, VT, Action: Legal); |
| 377 | } |
| 378 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT, Action: Custom); |
| 379 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT, Action: Custom); |
| 380 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT, Action: Custom); |
| 381 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT, Action: Custom); |
| 382 | |
| 383 | // No native support for these. |
| 384 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
| 385 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
| 386 | setOperationAction(Op: ISD::FSQRT, VT, Action: Expand); |
| 387 | setOperationAction(Op: ISD::FSIN, VT, Action: Expand); |
| 388 | setOperationAction(Op: ISD::FCOS, VT, Action: Expand); |
| 389 | setOperationAction(Op: ISD::FTAN, VT, Action: Expand); |
| 390 | setOperationAction(Op: ISD::FPOW, VT, Action: Expand); |
| 391 | setOperationAction(Op: ISD::FLOG, VT, Action: Expand); |
| 392 | setOperationAction(Op: ISD::FLOG2, VT, Action: Expand); |
| 393 | setOperationAction(Op: ISD::FLOG10, VT, Action: Expand); |
| 394 | setOperationAction(Op: ISD::FEXP, VT, Action: Expand); |
| 395 | setOperationAction(Op: ISD::FEXP2, VT, Action: Expand); |
| 396 | setOperationAction(Op: ISD::FEXP10, VT, Action: Expand); |
| 397 | setOperationAction(Op: ISD::FNEARBYINT, VT, Action: Expand); |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | // Custom Expand smaller than legal vector reductions to prevent false zero |
| 402 | // items being added. |
| 403 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v4f16, Action: Custom); |
| 404 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v4f16, Action: Custom); |
| 405 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v4f16, Action: Custom); |
| 406 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v4f16, Action: Custom); |
| 407 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v2f16, Action: Custom); |
| 408 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v2f16, Action: Custom); |
| 409 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v2f16, Action: Custom); |
| 410 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v2f16, Action: Custom); |
| 411 | |
| 412 | // We 'support' these types up to bitcast/load/store level, regardless of |
| 413 | // MVE integer-only / float support. Only doing FP data processing on the FP |
| 414 | // vector types is inhibited at integer-only level. |
| 415 | const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; |
| 416 | for (auto VT : LongTypes) { |
| 417 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 418 | setAllExpand(VT); |
| 419 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 420 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 421 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 422 | setOperationAction(Op: ISD::VSELECT, VT, Action: Legal); |
| 423 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 424 | } |
| 425 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT: MVT::v2f64, Action: Legal); |
| 426 | |
| 427 | // We can do bitwise operations on v2i64 vectors |
| 428 | setOperationAction(Op: ISD::AND, VT: MVT::v2i64, Action: Legal); |
| 429 | setOperationAction(Op: ISD::OR, VT: MVT::v2i64, Action: Legal); |
| 430 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i64, Action: Legal); |
| 431 | |
| 432 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 433 | addAllExtLoads(From: MVT::v8i16, To: MVT::v8i8, Action: Legal); |
| 434 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i16, Action: Legal); |
| 435 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i8, Action: Legal); |
| 436 | |
| 437 | // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. |
| 438 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i8, Action: Legal); |
| 439 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i16, Action: Legal); |
| 440 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i32, Action: Legal); |
| 441 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i8, Action: Legal); |
| 442 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i16, Action: Legal); |
| 443 | |
| 444 | // Some truncating stores are legal too. |
| 445 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i16, Action: Legal); |
| 446 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i8, Action: Legal); |
| 447 | setTruncStoreAction(ValVT: MVT::v8i16, MemVT: MVT::v8i8, Action: Legal); |
| 448 | |
| 449 | // Pre and Post inc on these are legal, given the correct extends |
| 450 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 451 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 452 | for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { |
| 453 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 454 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 455 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 456 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 457 | } |
| 458 | } |
| 459 | |
| 460 | // Predicate types |
| 461 | const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1}; |
| 462 | for (auto VT : pTypes) { |
| 463 | addRegisterClass(VT, RC: &ARM::VCCRRegClass); |
| 464 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 465 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 466 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Custom); |
| 467 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Custom); |
| 468 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 469 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 470 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 471 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Expand); |
| 472 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
| 473 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
| 474 | setOperationAction(Op: ISD::TRUNCATE, VT, Action: Custom); |
| 475 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
| 476 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 477 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 478 | |
| 479 | if (!HasMVEFP) { |
| 480 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 481 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 482 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 483 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 484 | } |
| 485 | } |
| 486 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i1, Action: Expand); |
| 487 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v2i1, Action: Expand); |
| 488 | setOperationAction(Op: ISD::AND, VT: MVT::v2i1, Action: Expand); |
| 489 | setOperationAction(Op: ISD::OR, VT: MVT::v2i1, Action: Expand); |
| 490 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i1, Action: Expand); |
| 491 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
| 492 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
| 493 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v2i1, Action: Expand); |
| 494 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v2i1, Action: Expand); |
| 495 | |
| 496 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v8i32, Action: Custom); |
| 497 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i16, Action: Custom); |
| 498 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i32, Action: Custom); |
| 499 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v8i32, Action: Custom); |
| 500 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i16, Action: Custom); |
| 501 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i32, Action: Custom); |
| 502 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v8i32, Action: Custom); |
| 503 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v16i16, Action: Custom); |
| 504 | } |
| 505 | |
| 506 | const ARMBaseTargetMachine &ARMTargetLowering::getTM() const { |
| 507 | return static_cast<const ARMBaseTargetMachine &>(getTargetMachine()); |
| 508 | } |
| 509 | |
| 510 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_, |
| 511 | const ARMSubtarget &STI) |
| 512 | : TargetLowering(TM_, STI), Subtarget(&STI), |
| 513 | RegInfo(Subtarget->getRegisterInfo()), |
| 514 | Itins(Subtarget->getInstrItineraryData()) { |
| 515 | const auto &TM = static_cast<const ARMBaseTargetMachine &>(TM_); |
| 516 | |
| 517 | setBooleanContents(ZeroOrOneBooleanContent); |
| 518 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
| 519 | |
| 520 | const Triple &TT = TM.getTargetTriple(); |
| 521 | |
| 522 | if (Subtarget->isThumb1Only()) |
| 523 | addRegisterClass(VT: MVT::i32, RC: &ARM::tGPRRegClass); |
| 524 | else |
| 525 | addRegisterClass(VT: MVT::i32, RC: &ARM::GPRRegClass); |
| 526 | |
| 527 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && |
| 528 | Subtarget->hasFPRegs()) { |
| 529 | addRegisterClass(VT: MVT::f32, RC: &ARM::SPRRegClass); |
| 530 | addRegisterClass(VT: MVT::f64, RC: &ARM::DPRRegClass); |
| 531 | |
| 532 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i32, Action: Custom); |
| 533 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i32, Action: Custom); |
| 534 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i64, Action: Custom); |
| 535 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i64, Action: Custom); |
| 536 | |
| 537 | if (!Subtarget->hasVFP2Base()) { |
| 538 | setAllExpand(MVT::f32); |
| 539 | } else { |
| 540 | for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, |
| 541 | ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) |
| 542 | setOperationAction(Op, VT: MVT::f32, Action: Legal); |
| 543 | } |
| 544 | if (!Subtarget->hasFP64()) { |
| 545 | setAllExpand(MVT::f64); |
| 546 | } else { |
| 547 | for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, |
| 548 | ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) |
| 549 | setOperationAction(Op, VT: MVT::f64, Action: Legal); |
| 550 | |
| 551 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f32, Action: Legal); |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | if (Subtarget->hasFullFP16()) { |
| 556 | for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, |
| 557 | ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) |
| 558 | setOperationAction(Op, VT: MVT::f16, Action: Legal); |
| 559 | |
| 560 | addRegisterClass(VT: MVT::f16, RC: &ARM::HPRRegClass); |
| 561 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i16, Action: Custom); |
| 562 | setOperationAction(Op: ISD::BITCAST, VT: MVT::f16, Action: Custom); |
| 563 | |
| 564 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f16, Action: Legal); |
| 565 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f16, Action: Legal); |
| 566 | setOperationAction(Op: ISD::STRICT_FMINNUM, VT: MVT::f16, Action: Legal); |
| 567 | setOperationAction(Op: ISD::STRICT_FMAXNUM, VT: MVT::f16, Action: Legal); |
| 568 | } |
| 569 | |
| 570 | if (Subtarget->hasBF16()) { |
| 571 | addRegisterClass(VT: MVT::bf16, RC: &ARM::HPRRegClass); |
| 572 | setAllExpand(MVT::bf16); |
| 573 | if (!Subtarget->hasFullFP16()) |
| 574 | setOperationAction(Op: ISD::BITCAST, VT: MVT::bf16, Action: Custom); |
| 575 | } else { |
| 576 | setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f32, Action: Expand); |
| 577 | setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f64, Action: Expand); |
| 578 | setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f32, Action: Custom); |
| 579 | setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f64, Action: Custom); |
| 580 | } |
| 581 | |
| 582 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 583 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
| 584 | setTruncStoreAction(ValVT: VT, MemVT: InnerVT, Action: Expand); |
| 585 | addAllExtLoads(From: VT, To: InnerVT, Action: Expand); |
| 586 | } |
| 587 | |
| 588 | setOperationAction(Op: ISD::SMUL_LOHI, VT, Action: Expand); |
| 589 | setOperationAction(Op: ISD::UMUL_LOHI, VT, Action: Expand); |
| 590 | |
| 591 | setOperationAction(Op: ISD::BSWAP, VT, Action: Expand); |
| 592 | } |
| 593 | |
| 594 | if (!Subtarget->isThumb1Only() && !Subtarget->hasV8_1MMainlineOps()) |
| 595 | setOperationAction(Op: ISD::SCMP, VT: MVT::i32, Action: Custom); |
| 596 | |
| 597 | if (!Subtarget->hasV8_1MMainlineOps()) |
| 598 | setOperationAction(Op: ISD::UCMP, VT: MVT::i32, Action: Custom); |
| 599 | |
| 600 | if (!Subtarget->isThumb1Only()) |
| 601 | setOperationAction(Op: ISD::ABS, VT: MVT::i32, Action: Custom); |
| 602 | |
| 603 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Custom); |
| 604 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Custom); |
| 605 | |
| 606 | setOperationAction(Op: ISD::READ_REGISTER, VT: MVT::i64, Action: Custom); |
| 607 | setOperationAction(Op: ISD::WRITE_REGISTER, VT: MVT::i64, Action: Custom); |
| 608 | |
| 609 | if (Subtarget->hasMVEIntegerOps()) |
| 610 | addMVEVectorTypes(HasMVEFP: Subtarget->hasMVEFloatOps()); |
| 611 | |
| 612 | // Combine low-overhead loop intrinsics so that we can lower i1 types. |
| 613 | if (Subtarget->hasLOB()) { |
| 614 | setTargetDAGCombine({ISD::BRCOND, ISD::BR_CC}); |
| 615 | } |
| 616 | |
| 617 | if (Subtarget->hasNEON()) { |
| 618 | addDRTypeForNEON(VT: MVT::v2f32); |
| 619 | addDRTypeForNEON(VT: MVT::v8i8); |
| 620 | addDRTypeForNEON(VT: MVT::v4i16); |
| 621 | addDRTypeForNEON(VT: MVT::v2i32); |
| 622 | addDRTypeForNEON(VT: MVT::v1i64); |
| 623 | |
| 624 | addQRTypeForNEON(VT: MVT::v4f32); |
| 625 | addQRTypeForNEON(VT: MVT::v2f64); |
| 626 | addQRTypeForNEON(VT: MVT::v16i8); |
| 627 | addQRTypeForNEON(VT: MVT::v8i16); |
| 628 | addQRTypeForNEON(VT: MVT::v4i32); |
| 629 | addQRTypeForNEON(VT: MVT::v2i64); |
| 630 | |
| 631 | if (Subtarget->hasFullFP16()) { |
| 632 | addQRTypeForNEON(VT: MVT::v8f16); |
| 633 | addDRTypeForNEON(VT: MVT::v4f16); |
| 634 | } |
| 635 | |
| 636 | if (Subtarget->hasBF16()) { |
| 637 | addQRTypeForNEON(VT: MVT::v8bf16); |
| 638 | addDRTypeForNEON(VT: MVT::v4bf16); |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { |
| 643 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but |
| 644 | // none of Neon, MVE or VFP supports any arithmetic operations on it. |
| 645 | setOperationAction(Op: ISD::FADD, VT: MVT::v2f64, Action: Expand); |
| 646 | setOperationAction(Op: ISD::FSUB, VT: MVT::v2f64, Action: Expand); |
| 647 | setOperationAction(Op: ISD::FMUL, VT: MVT::v2f64, Action: Expand); |
| 648 | // FIXME: Code duplication: FDIV and FREM are expanded always, see |
| 649 | // ARMTargetLowering::addTypeForNEON method for details. |
| 650 | setOperationAction(Op: ISD::FDIV, VT: MVT::v2f64, Action: Expand); |
| 651 | setOperationAction(Op: ISD::FREM, VT: MVT::v2f64, Action: Expand); |
| 652 | // FIXME: Create unittest. |
| 653 | // In another words, find a way when "copysign" appears in DAG with vector |
| 654 | // operands. |
| 655 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::v2f64, Action: Expand); |
| 656 | // FIXME: Code duplication: SETCC has custom operation action, see |
| 657 | // ARMTargetLowering::addTypeForNEON method for details. |
| 658 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2f64, Action: Expand); |
| 659 | // FIXME: Create unittest for FNEG and for FABS. |
| 660 | setOperationAction(Op: ISD::FNEG, VT: MVT::v2f64, Action: Expand); |
| 661 | setOperationAction(Op: ISD::FABS, VT: MVT::v2f64, Action: Expand); |
| 662 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f64, Action: Expand); |
| 663 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f64, Action: Expand); |
| 664 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f64, Action: Expand); |
| 665 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f64, Action: Expand); |
| 666 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f64, Action: Expand); |
| 667 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f64, Action: Expand); |
| 668 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f64, Action: Expand); |
| 669 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f64, Action: Expand); |
| 670 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f64, Action: Expand); |
| 671 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f64, Action: Expand); |
| 672 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f64, Action: Expand); |
| 673 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f64, Action: Expand); |
| 674 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f64, Action: Expand); |
| 675 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f64, Action: Expand); |
| 676 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v2f64, Action: Expand); |
| 677 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f64, Action: Expand); |
| 678 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f64, Action: Expand); |
| 679 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f64, Action: Expand); |
| 680 | } |
| 681 | |
| 682 | if (Subtarget->hasNEON()) { |
| 683 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively |
| 684 | // supported for v4f32. |
| 685 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v4f32, Action: Expand); |
| 686 | setOperationAction(Op: ISD::FSIN, VT: MVT::v4f32, Action: Expand); |
| 687 | setOperationAction(Op: ISD::FCOS, VT: MVT::v4f32, Action: Expand); |
| 688 | setOperationAction(Op: ISD::FTAN, VT: MVT::v4f32, Action: Expand); |
| 689 | setOperationAction(Op: ISD::FPOW, VT: MVT::v4f32, Action: Expand); |
| 690 | setOperationAction(Op: ISD::FLOG, VT: MVT::v4f32, Action: Expand); |
| 691 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v4f32, Action: Expand); |
| 692 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v4f32, Action: Expand); |
| 693 | setOperationAction(Op: ISD::FEXP, VT: MVT::v4f32, Action: Expand); |
| 694 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v4f32, Action: Expand); |
| 695 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v4f32, Action: Expand); |
| 696 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f32, Action: Expand); |
| 697 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f32, Action: Expand); |
| 698 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f32, Action: Expand); |
| 699 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v4f32, Action: Expand); |
| 700 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v4f32, Action: Expand); |
| 701 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f32, Action: Expand); |
| 702 | |
| 703 | // Mark v2f32 intrinsics. |
| 704 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f32, Action: Expand); |
| 705 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f32, Action: Expand); |
| 706 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f32, Action: Expand); |
| 707 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f32, Action: Expand); |
| 708 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f32, Action: Expand); |
| 709 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f32, Action: Expand); |
| 710 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f32, Action: Expand); |
| 711 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f32, Action: Expand); |
| 712 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f32, Action: Expand); |
| 713 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f32, Action: Expand); |
| 714 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f32, Action: Expand); |
| 715 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f32, Action: Expand); |
| 716 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f32, Action: Expand); |
| 717 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f32, Action: Expand); |
| 718 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v2f32, Action: Expand); |
| 719 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f32, Action: Expand); |
| 720 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f32, Action: Expand); |
| 721 | |
| 722 | for (ISD::NodeType Op : {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, |
| 723 | ISD::FRINT, ISD::FTRUNC, ISD::FROUNDEVEN}) { |
| 724 | setOperationAction(Op, VT: MVT::v4f16, Action: Expand); |
| 725 | setOperationAction(Op, VT: MVT::v8f16, Action: Expand); |
| 726 | } |
| 727 | |
| 728 | // Neon does not support some operations on v1i64 and v2i64 types. |
| 729 | setOperationAction(Op: ISD::MUL, VT: MVT::v1i64, Action: Expand); |
| 730 | // Custom handling for some quad-vector types to detect VMULL. |
| 731 | setOperationAction(Op: ISD::MUL, VT: MVT::v8i16, Action: Custom); |
| 732 | setOperationAction(Op: ISD::MUL, VT: MVT::v4i32, Action: Custom); |
| 733 | setOperationAction(Op: ISD::MUL, VT: MVT::v2i64, Action: Custom); |
| 734 | // Custom handling for some vector types to avoid expensive expansions |
| 735 | setOperationAction(Op: ISD::SDIV, VT: MVT::v4i16, Action: Custom); |
| 736 | setOperationAction(Op: ISD::SDIV, VT: MVT::v8i8, Action: Custom); |
| 737 | setOperationAction(Op: ISD::UDIV, VT: MVT::v4i16, Action: Custom); |
| 738 | setOperationAction(Op: ISD::UDIV, VT: MVT::v8i8, Action: Custom); |
| 739 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with |
| 740 | // a destination type that is wider than the source, and nor does |
| 741 | // it have a FP_TO_[SU]INT instruction with a narrower destination than |
| 742 | // source. |
| 743 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
| 744 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
| 745 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
| 746 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
| 747 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v4i16, Action: Custom); |
| 748 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v8i16, Action: Custom); |
| 749 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v4i16, Action: Custom); |
| 750 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v8i16, Action: Custom); |
| 751 | |
| 752 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::v2f32, Action: Expand); |
| 753 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::v2f64, Action: Expand); |
| 754 | |
| 755 | // NEON does not have single instruction CTPOP for vectors with element |
| 756 | // types wider than 8-bits. However, custom lowering can leverage the |
| 757 | // v8i8/v16i8 vcnt instruction. |
| 758 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i32, Action: Custom); |
| 759 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i32, Action: Custom); |
| 760 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i16, Action: Custom); |
| 761 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v8i16, Action: Custom); |
| 762 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v1i64, Action: Custom); |
| 763 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i64, Action: Custom); |
| 764 | |
| 765 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v1i64, Action: Expand); |
| 766 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v2i64, Action: Expand); |
| 767 | |
| 768 | // NEON does not have single instruction CTTZ for vectors. |
| 769 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i8, Action: Custom); |
| 770 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i16, Action: Custom); |
| 771 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i32, Action: Custom); |
| 772 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v1i64, Action: Custom); |
| 773 | |
| 774 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v16i8, Action: Custom); |
| 775 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i16, Action: Custom); |
| 776 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i32, Action: Custom); |
| 777 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i64, Action: Custom); |
| 778 | |
| 779 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i8, Action: Custom); |
| 780 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i16, Action: Custom); |
| 781 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i32, Action: Custom); |
| 782 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v1i64, Action: Custom); |
| 783 | |
| 784 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v16i8, Action: Custom); |
| 785 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i16, Action: Custom); |
| 786 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i32, Action: Custom); |
| 787 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i64, Action: Custom); |
| 788 | |
| 789 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 790 | setOperationAction(Op: ISD::MULHS, VT, Action: Expand); |
| 791 | setOperationAction(Op: ISD::MULHU, VT, Action: Expand); |
| 792 | } |
| 793 | |
| 794 | // NEON only has FMA instructions as of VFP4. |
| 795 | if (!Subtarget->hasVFP4Base()) { |
| 796 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f32, Action: Expand); |
| 797 | setOperationAction(Op: ISD::FMA, VT: MVT::v4f32, Action: Expand); |
| 798 | } |
| 799 | |
| 800 | setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT, |
| 801 | ISD::FP_TO_UINT, ISD::FMUL, ISD::LOAD}); |
| 802 | |
| 803 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 804 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, |
| 805 | MVT::v2i32}) { |
| 806 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { |
| 807 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 808 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 809 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 810 | } |
| 811 | } |
| 812 | |
| 813 | for (auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, |
| 814 | MVT::v4i32}) { |
| 815 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Custom); |
| 816 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Custom); |
| 817 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Custom); |
| 818 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Custom); |
| 819 | } |
| 820 | } |
| 821 | |
| 822 | if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { |
| 823 | setTargetDAGCombine( |
| 824 | {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::INSERT_SUBVECTOR, |
| 825 | ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, |
| 826 | ISD::SIGN_EXTEND_INREG, ISD::STORE, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, |
| 827 | ISD::ANY_EXTEND, ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, |
| 828 | ISD::INTRINSIC_VOID, ISD::VECREDUCE_ADD, ISD::ADD, ISD::BITCAST}); |
| 829 | } |
| 830 | if (Subtarget->hasMVEIntegerOps()) { |
| 831 | setTargetDAGCombine({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, |
| 832 | ISD::FP_EXTEND, ISD::SELECT, ISD::SELECT_CC, |
| 833 | ISD::SETCC}); |
| 834 | } |
| 835 | if (Subtarget->hasMVEFloatOps()) { |
| 836 | setTargetDAGCombine(ISD::FADD); |
| 837 | } |
| 838 | |
| 839 | if (!Subtarget->hasFP64()) { |
| 840 | // When targeting a floating-point unit with only single-precision |
| 841 | // operations, f64 is legal for the few double-precision instructions which |
| 842 | // are present However, no double-precision operations other than moves, |
| 843 | // loads and stores are provided by the hardware. |
| 844 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Expand); |
| 845 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Expand); |
| 846 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Expand); |
| 847 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
| 848 | setOperationAction(Op: ISD::FDIV, VT: MVT::f64, Action: Expand); |
| 849 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: LibCall); |
| 850 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Expand); |
| 851 | setOperationAction(Op: ISD::FGETSIGN, VT: MVT::f64, Action: Expand); |
| 852 | setOperationAction(Op: ISD::FNEG, VT: MVT::f64, Action: Expand); |
| 853 | setOperationAction(Op: ISD::FABS, VT: MVT::f64, Action: Expand); |
| 854 | setOperationAction(Op: ISD::FSQRT, VT: MVT::f64, Action: Expand); |
| 855 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
| 856 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
| 857 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
| 858 | setOperationAction(Op: ISD::FLOG, VT: MVT::f64, Action: Expand); |
| 859 | setOperationAction(Op: ISD::FLOG2, VT: MVT::f64, Action: Expand); |
| 860 | setOperationAction(Op: ISD::FLOG10, VT: MVT::f64, Action: Expand); |
| 861 | setOperationAction(Op: ISD::FEXP, VT: MVT::f64, Action: Expand); |
| 862 | setOperationAction(Op: ISD::FEXP2, VT: MVT::f64, Action: Expand); |
| 863 | setOperationAction(Op: ISD::FEXP10, VT: MVT::f64, Action: Expand); |
| 864 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f64, Action: Expand); |
| 865 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f64, Action: Expand); |
| 866 | setOperationAction(Op: ISD::FRINT, VT: MVT::f64, Action: Expand); |
| 867 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::f64, Action: Expand); |
| 868 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f64, Action: Expand); |
| 869 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f64, Action: Expand); |
| 870 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i32, Action: Custom); |
| 871 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i32, Action: Custom); |
| 872 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i32, Action: Custom); |
| 873 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i32, Action: Custom); |
| 874 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::f64, Action: Custom); |
| 875 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::f64, Action: Custom); |
| 876 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f32, Action: Custom); |
| 877 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::f64, Action: Custom); |
| 878 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::f64, Action: Custom); |
| 879 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f32, Action: Custom); |
| 880 | } |
| 881 | |
| 882 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::i32, Action: Custom); |
| 883 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::i32, Action: Custom); |
| 884 | |
| 885 | if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { |
| 886 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f64, Action: Custom); |
| 887 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f64, Action: Custom); |
| 888 | if (Subtarget->hasFullFP16()) { |
| 889 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f16, Action: Custom); |
| 890 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f16, Action: Custom); |
| 891 | } |
| 892 | } else { |
| 893 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f64, Action: Legal); |
| 894 | } |
| 895 | |
| 896 | if (!Subtarget->hasFP16()) { |
| 897 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f32, Action: Custom); |
| 898 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f32, Action: Custom); |
| 899 | } else { |
| 900 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f32, Action: Legal); |
| 901 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f16, Action: Legal); |
| 902 | } |
| 903 | |
| 904 | computeRegisterProperties(TRI: Subtarget->getRegisterInfo()); |
| 905 | |
| 906 | // ARM does not have floating-point extending loads. |
| 907 | for (MVT VT : MVT::fp_valuetypes()) { |
| 908 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand); |
| 909 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f16, Action: Expand); |
| 910 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::bf16, Action: Expand); |
| 911 | } |
| 912 | |
| 913 | // ... or truncating stores |
| 914 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand); |
| 915 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
| 916 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
| 917 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::bf16, Action: Expand); |
| 918 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::bf16, Action: Expand); |
| 919 | |
| 920 | // ARM does not have i1 sign extending load. |
| 921 | for (MVT VT : MVT::integer_valuetypes()) |
| 922 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote); |
| 923 | |
| 924 | // ARM supports all 4 flavors of integer indexed load / store. |
| 925 | if (!Subtarget->isThumb1Only()) { |
| 926 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 927 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 928 | setIndexedLoadAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
| 929 | setIndexedLoadAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
| 930 | setIndexedLoadAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
| 931 | setIndexedLoadAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
| 932 | setIndexedStoreAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
| 933 | setIndexedStoreAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
| 934 | setIndexedStoreAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
| 935 | setIndexedStoreAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
| 936 | } |
| 937 | } else { |
| 938 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. |
| 939 | setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
| 940 | setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
| 941 | } |
| 942 | |
| 943 | // Custom loads/stores to possible use __aeabi_uread/write* |
| 944 | if (TT.isTargetAEABI() && !Subtarget->allowsUnalignedMem()) { |
| 945 | setOperationAction(Op: ISD::STORE, VT: MVT::i32, Action: Custom); |
| 946 | setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom); |
| 947 | setOperationAction(Op: ISD::LOAD, VT: MVT::i32, Action: Custom); |
| 948 | setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom); |
| 949 | } |
| 950 | |
| 951 | setOperationAction(Op: ISD::SADDO, VT: MVT::i32, Action: Custom); |
| 952 | setOperationAction(Op: ISD::UADDO, VT: MVT::i32, Action: Custom); |
| 953 | setOperationAction(Op: ISD::SSUBO, VT: MVT::i32, Action: Custom); |
| 954 | setOperationAction(Op: ISD::USUBO, VT: MVT::i32, Action: Custom); |
| 955 | |
| 956 | if (!Subtarget->isThumb1Only()) { |
| 957 | setOperationAction(Op: ISD::UMULO, VT: MVT::i32, Action: Custom); |
| 958 | setOperationAction(Op: ISD::SMULO, VT: MVT::i32, Action: Custom); |
| 959 | } |
| 960 | |
| 961 | setOperationAction(Op: ISD::UADDO_CARRY, VT: MVT::i32, Action: Custom); |
| 962 | setOperationAction(Op: ISD::USUBO_CARRY, VT: MVT::i32, Action: Custom); |
| 963 | if (Subtarget->hasDSP()) { |
| 964 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i8, Action: Custom); |
| 965 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i8, Action: Custom); |
| 966 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i16, Action: Custom); |
| 967 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i16, Action: Custom); |
| 968 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i8, Action: Custom); |
| 969 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i8, Action: Custom); |
| 970 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i16, Action: Custom); |
| 971 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i16, Action: Custom); |
| 972 | } |
| 973 | if (Subtarget->hasBaseDSP()) { |
| 974 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i32, Action: Legal); |
| 975 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i32, Action: Legal); |
| 976 | } |
| 977 | |
| 978 | // i64 operation support. |
| 979 | setOperationAction(Op: ISD::MUL, VT: MVT::i64, Action: Expand); |
| 980 | setOperationAction(Op: ISD::MULHU, VT: MVT::i32, Action: Expand); |
| 981 | if (Subtarget->isThumb1Only()) { |
| 982 | setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i32, Action: Expand); |
| 983 | setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i32, Action: Expand); |
| 984 | } |
| 985 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() |
| 986 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) |
| 987 | setOperationAction(Op: ISD::MULHS, VT: MVT::i32, Action: Expand); |
| 988 | |
| 989 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Custom); |
| 990 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Custom); |
| 991 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Custom); |
| 992 | setOperationAction(Op: ISD::SRL, VT: MVT::i64, Action: Custom); |
| 993 | setOperationAction(Op: ISD::SRA, VT: MVT::i64, Action: Custom); |
| 994 | setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom); |
| 995 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::i64, Action: Custom); |
| 996 | setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom); |
| 997 | setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom); |
| 998 | |
| 999 | // MVE lowers 64 bit shifts to lsll and lsrl |
| 1000 | // assuming that ISD::SRL and SRA of i64 are already marked custom |
| 1001 | if (Subtarget->hasMVEIntegerOps()) |
| 1002 | setOperationAction(Op: ISD::SHL, VT: MVT::i64, Action: Custom); |
| 1003 | |
| 1004 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. |
| 1005 | if (Subtarget->isThumb1Only()) { |
| 1006 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Expand); |
| 1007 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Expand); |
| 1008 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Expand); |
| 1009 | } |
| 1010 | |
| 1011 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) |
| 1012 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i32, Action: Legal); |
| 1013 | |
| 1014 | // ARM does not have ROTL. |
| 1015 | setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Expand); |
| 1016 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 1017 | setOperationAction(Op: ISD::ROTL, VT, Action: Expand); |
| 1018 | setOperationAction(Op: ISD::ROTR, VT, Action: Expand); |
| 1019 | } |
| 1020 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i32, Action: Custom); |
| 1021 | // TODO: These two should be set to LibCall, but this currently breaks |
| 1022 | // the Linux kernel build. See #101786. |
| 1023 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Expand); |
| 1024 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Expand); |
| 1025 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { |
| 1026 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i32, Action: Expand); |
| 1027 | setOperationAction(Op: ISD::CTLZ_ZERO_UNDEF, VT: MVT::i32, Action: LibCall); |
| 1028 | } |
| 1029 | |
| 1030 | // @llvm.readcyclecounter requires the Performance Monitors extension. |
| 1031 | // Default to the 0 expansion on unsupported platforms. |
| 1032 | // FIXME: Technically there are older ARM CPUs that have |
| 1033 | // implementation-specific ways of obtaining this information. |
| 1034 | if (Subtarget->hasPerfMon()) |
| 1035 | setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64, Action: Custom); |
| 1036 | |
| 1037 | // Only ARMv6 has BSWAP. |
| 1038 | if (!Subtarget->hasV6Ops()) |
| 1039 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Expand); |
| 1040 | |
| 1041 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 1042 | : Subtarget->hasDivideInARMMode(); |
| 1043 | if (!hasDivide) { |
| 1044 | // These are expanded into libcalls if the cpu doesn't have HW divider. |
| 1045 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: LibCall); |
| 1046 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: LibCall); |
| 1047 | } |
| 1048 | |
| 1049 | if (TT.isOSWindows() && !Subtarget->hasDivideInThumbMode()) { |
| 1050 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: Custom); |
| 1051 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: Custom); |
| 1052 | |
| 1053 | setOperationAction(Op: ISD::SDIV, VT: MVT::i64, Action: Custom); |
| 1054 | setOperationAction(Op: ISD::UDIV, VT: MVT::i64, Action: Custom); |
| 1055 | } |
| 1056 | |
| 1057 | setOperationAction(Op: ISD::SREM, VT: MVT::i32, Action: Expand); |
| 1058 | setOperationAction(Op: ISD::UREM, VT: MVT::i32, Action: Expand); |
| 1059 | |
| 1060 | // Register based DivRem for AEABI (RTABI 4.2) |
| 1061 | if (TT.isTargetAEABI() || TT.isAndroid() || TT.isTargetGNUAEABI() || |
| 1062 | TT.isTargetMuslAEABI() || TT.isOSFuchsia() || TT.isOSWindows()) { |
| 1063 | setOperationAction(Op: ISD::SREM, VT: MVT::i64, Action: Custom); |
| 1064 | setOperationAction(Op: ISD::UREM, VT: MVT::i64, Action: Custom); |
| 1065 | HasStandaloneRem = false; |
| 1066 | |
| 1067 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Custom); |
| 1068 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Custom); |
| 1069 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i64, Action: Custom); |
| 1070 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i64, Action: Custom); |
| 1071 | } else { |
| 1072 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Expand); |
| 1073 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Expand); |
| 1074 | } |
| 1075 | |
| 1076 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom); |
| 1077 | setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom); |
| 1078 | setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom); |
| 1079 | setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom); |
| 1080 | |
| 1081 | setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal); |
| 1082 | setOperationAction(Op: ISD::DEBUGTRAP, VT: MVT::Other, Action: Legal); |
| 1083 | |
| 1084 | // Use the default implementation. |
| 1085 | setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom); |
| 1086 | setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand); |
| 1087 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand); |
| 1088 | setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand); |
| 1089 | setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand); |
| 1090 | setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand); |
| 1091 | |
| 1092 | if (TT.isOSWindows()) |
| 1093 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Custom); |
| 1094 | else |
| 1095 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Expand); |
| 1096 | |
| 1097 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use |
| 1098 | // the default expansion. |
| 1099 | InsertFencesForAtomic = false; |
| 1100 | if (Subtarget->hasAnyDataBarrier() && |
| 1101 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { |
| 1102 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded |
| 1103 | // to ldrex/strex loops already. |
| 1104 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom); |
| 1105 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) |
| 1106 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i64, Action: Custom); |
| 1107 | |
| 1108 | // On v8, we have particularly efficient implementations of atomic fences |
| 1109 | // if they can be combined with nearby atomic loads and stores. |
| 1110 | if (!Subtarget->hasAcquireRelease() || |
| 1111 | getTargetMachine().getOptLevel() == CodeGenOptLevel::None) { |
| 1112 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. |
| 1113 | InsertFencesForAtomic = true; |
| 1114 | } |
| 1115 | } else { |
| 1116 | // If there's anything we can use as a barrier, go through custom lowering |
| 1117 | // for ATOMIC_FENCE. |
| 1118 | // If target has DMB in thumb, Fences can be inserted. |
| 1119 | if (Subtarget->hasDataBarrier()) |
| 1120 | InsertFencesForAtomic = true; |
| 1121 | |
| 1122 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, |
| 1123 | Action: Subtarget->hasAnyDataBarrier() ? Custom : Expand); |
| 1124 | |
| 1125 | // Set them all for libcall, which will force libcalls. |
| 1126 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i32, Action: LibCall); |
| 1127 | setOperationAction(Op: ISD::ATOMIC_SWAP, VT: MVT::i32, Action: LibCall); |
| 1128 | setOperationAction(Op: ISD::ATOMIC_LOAD_ADD, VT: MVT::i32, Action: LibCall); |
| 1129 | setOperationAction(Op: ISD::ATOMIC_LOAD_SUB, VT: MVT::i32, Action: LibCall); |
| 1130 | setOperationAction(Op: ISD::ATOMIC_LOAD_AND, VT: MVT::i32, Action: LibCall); |
| 1131 | setOperationAction(Op: ISD::ATOMIC_LOAD_OR, VT: MVT::i32, Action: LibCall); |
| 1132 | setOperationAction(Op: ISD::ATOMIC_LOAD_XOR, VT: MVT::i32, Action: LibCall); |
| 1133 | setOperationAction(Op: ISD::ATOMIC_LOAD_NAND, VT: MVT::i32, Action: LibCall); |
| 1134 | setOperationAction(Op: ISD::ATOMIC_LOAD_MIN, VT: MVT::i32, Action: LibCall); |
| 1135 | setOperationAction(Op: ISD::ATOMIC_LOAD_MAX, VT: MVT::i32, Action: LibCall); |
| 1136 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMIN, VT: MVT::i32, Action: LibCall); |
| 1137 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMAX, VT: MVT::i32, Action: LibCall); |
| 1138 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the |
| 1139 | // Unordered/Monotonic case. |
| 1140 | if (!InsertFencesForAtomic) { |
| 1141 | setOperationAction(Op: ISD::ATOMIC_LOAD, VT: MVT::i32, Action: Custom); |
| 1142 | setOperationAction(Op: ISD::ATOMIC_STORE, VT: MVT::i32, Action: Custom); |
| 1143 | } |
| 1144 | } |
| 1145 | |
| 1146 | // Compute supported atomic widths. |
| 1147 | if (TT.isOSLinux() || (!Subtarget->isMClass() && Subtarget->hasV6Ops())) { |
| 1148 | // For targets where __sync_* routines are reliably available, we use them |
| 1149 | // if necessary. |
| 1150 | // |
| 1151 | // ARM Linux always supports 64-bit atomics through kernel-assisted atomic |
| 1152 | // routines (kernel 3.1 or later). FIXME: Not with compiler-rt? |
| 1153 | // |
| 1154 | // ARMv6 targets have native instructions in ARM mode. For Thumb mode, |
| 1155 | // such targets should provide __sync_* routines, which use the ARM mode |
| 1156 | // instructions. (ARMv6 doesn't have dmb, but it has an equivalent |
| 1157 | // encoding; see ARMISD::MEMBARRIER_MCR.) |
| 1158 | setMaxAtomicSizeInBitsSupported(64); |
| 1159 | } else if ((Subtarget->isMClass() && Subtarget->hasV8MBaselineOps()) || |
| 1160 | Subtarget->hasForced32BitAtomics()) { |
| 1161 | // Cortex-M (besides Cortex-M0) have 32-bit atomics. |
| 1162 | setMaxAtomicSizeInBitsSupported(32); |
| 1163 | } else { |
| 1164 | // We can't assume anything about other targets; just use libatomic |
| 1165 | // routines. |
| 1166 | setMaxAtomicSizeInBitsSupported(0); |
| 1167 | } |
| 1168 | |
| 1169 | setMaxDivRemBitWidthSupported(64); |
| 1170 | |
| 1171 | setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom); |
| 1172 | |
| 1173 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. |
| 1174 | if (!Subtarget->hasV6Ops()) { |
| 1175 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i16, Action: Expand); |
| 1176 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i8, Action: Expand); |
| 1177 | } |
| 1178 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand); |
| 1179 | |
| 1180 | if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && |
| 1181 | !Subtarget->isThumb1Only()) { |
| 1182 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR |
| 1183 | // iff target supports vfp2. |
| 1184 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i64, Action: Custom); |
| 1185 | setOperationAction(Op: ISD::GET_ROUNDING, VT: MVT::i32, Action: Custom); |
| 1186 | setOperationAction(Op: ISD::SET_ROUNDING, VT: MVT::Other, Action: Custom); |
| 1187 | setOperationAction(Op: ISD::GET_FPENV, VT: MVT::i32, Action: Legal); |
| 1188 | setOperationAction(Op: ISD::SET_FPENV, VT: MVT::i32, Action: Legal); |
| 1189 | setOperationAction(Op: ISD::RESET_FPENV, VT: MVT::Other, Action: Legal); |
| 1190 | setOperationAction(Op: ISD::GET_FPMODE, VT: MVT::i32, Action: Legal); |
| 1191 | setOperationAction(Op: ISD::SET_FPMODE, VT: MVT::i32, Action: Custom); |
| 1192 | setOperationAction(Op: ISD::RESET_FPMODE, VT: MVT::Other, Action: Custom); |
| 1193 | } |
| 1194 | |
| 1195 | // We want to custom lower some of our intrinsics. |
| 1196 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom); |
| 1197 | setOperationAction(Op: ISD::EH_SJLJ_SETJMP, VT: MVT::i32, Action: Custom); |
| 1198 | setOperationAction(Op: ISD::EH_SJLJ_LONGJMP, VT: MVT::Other, Action: Custom); |
| 1199 | setOperationAction(Op: ISD::EH_SJLJ_SETUP_DISPATCH, VT: MVT::Other, Action: Custom); |
| 1200 | |
| 1201 | setOperationAction(Op: ISD::SETCC, VT: MVT::i32, Action: Expand); |
| 1202 | setOperationAction(Op: ISD::SETCC, VT: MVT::f32, Action: Expand); |
| 1203 | setOperationAction(Op: ISD::SETCC, VT: MVT::f64, Action: Expand); |
| 1204 | setOperationAction(Op: ISD::SELECT, VT: MVT::i32, Action: Custom); |
| 1205 | setOperationAction(Op: ISD::SELECT, VT: MVT::f32, Action: Custom); |
| 1206 | setOperationAction(Op: ISD::SELECT, VT: MVT::f64, Action: Custom); |
| 1207 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i32, Action: Custom); |
| 1208 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f32, Action: Custom); |
| 1209 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f64, Action: Custom); |
| 1210 | if (Subtarget->hasFullFP16()) { |
| 1211 | setOperationAction(Op: ISD::SETCC, VT: MVT::f16, Action: Expand); |
| 1212 | setOperationAction(Op: ISD::SELECT, VT: MVT::f16, Action: Custom); |
| 1213 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f16, Action: Custom); |
| 1214 | } |
| 1215 | |
| 1216 | setOperationAction(Op: ISD::SETCCCARRY, VT: MVT::i32, Action: Custom); |
| 1217 | |
| 1218 | setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Custom); |
| 1219 | setOperationAction(Op: ISD::BR_CC, VT: MVT::i32, Action: Custom); |
| 1220 | if (Subtarget->hasFullFP16()) |
| 1221 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f16, Action: Custom); |
| 1222 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f32, Action: Custom); |
| 1223 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f64, Action: Custom); |
| 1224 | setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Custom); |
| 1225 | |
| 1226 | // We don't support sin/cos/fmod/copysign/pow |
| 1227 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
| 1228 | setOperationAction(Op: ISD::FSIN, VT: MVT::f32, Action: Expand); |
| 1229 | setOperationAction(Op: ISD::FCOS, VT: MVT::f32, Action: Expand); |
| 1230 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
| 1231 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Expand); |
| 1232 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Expand); |
| 1233 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: LibCall); |
| 1234 | setOperationAction(Op: ISD::FREM, VT: MVT::f32, Action: LibCall); |
| 1235 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
| 1236 | !Subtarget->isThumb1Only()) { |
| 1237 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Custom); |
| 1238 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f32, Action: Custom); |
| 1239 | } |
| 1240 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
| 1241 | setOperationAction(Op: ISD::FPOW, VT: MVT::f32, Action: Expand); |
| 1242 | |
| 1243 | if (!Subtarget->hasVFP4Base()) { |
| 1244 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
| 1245 | setOperationAction(Op: ISD::FMA, VT: MVT::f32, Action: Expand); |
| 1246 | } |
| 1247 | |
| 1248 | // Various VFP goodness |
| 1249 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { |
| 1250 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. |
| 1251 | if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { |
| 1252 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand); |
| 1253 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand); |
| 1254 | setOperationAction(Op: ISD::STRICT_FP16_TO_FP, VT: MVT::f64, Action: Expand); |
| 1255 | setOperationAction(Op: ISD::STRICT_FP_TO_FP16, VT: MVT::f64, Action: Expand); |
| 1256 | } |
| 1257 | |
| 1258 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. |
| 1259 | if (!Subtarget->hasFP16()) { |
| 1260 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand); |
| 1261 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand); |
| 1262 | setOperationAction(Op: ISD::STRICT_FP16_TO_FP, VT: MVT::f32, Action: Expand); |
| 1263 | setOperationAction(Op: ISD::STRICT_FP_TO_FP16, VT: MVT::f32, Action: Expand); |
| 1264 | } |
| 1265 | |
| 1266 | // Strict floating-point comparisons need custom lowering. |
| 1267 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f16, Action: Custom); |
| 1268 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f16, Action: Custom); |
| 1269 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f32, Action: Custom); |
| 1270 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f32, Action: Custom); |
| 1271 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f64, Action: Custom); |
| 1272 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f64, Action: Custom); |
| 1273 | } |
| 1274 | |
| 1275 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Expand); |
| 1276 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Expand); |
| 1277 | |
| 1278 | // FP-ARMv8 implements a lot of rounding-like FP operations. |
| 1279 | if (Subtarget->hasFPARMv8Base()) { |
| 1280 | for (auto Op : |
| 1281 | {ISD::FFLOOR, ISD::FCEIL, ISD::FROUND, |
| 1282 | ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT, |
| 1283 | ISD::FROUNDEVEN, ISD::FMINNUM, ISD::FMAXNUM, |
| 1284 | ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, ISD::STRICT_FROUND, |
| 1285 | ISD::STRICT_FTRUNC, ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT, |
| 1286 | ISD::STRICT_FROUNDEVEN, ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM}) { |
| 1287 | setOperationAction(Op, VT: MVT::f32, Action: Legal); |
| 1288 | |
| 1289 | if (Subtarget->hasFP64()) |
| 1290 | setOperationAction(Op, VT: MVT::f64, Action: Legal); |
| 1291 | } |
| 1292 | |
| 1293 | if (Subtarget->hasNEON()) { |
| 1294 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v2f32, Action: Legal); |
| 1295 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v2f32, Action: Legal); |
| 1296 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f32, Action: Legal); |
| 1297 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f32, Action: Legal); |
| 1298 | } |
| 1299 | } |
| 1300 | |
| 1301 | // FP16 often need to be promoted to call lib functions |
| 1302 | // clang-format off |
| 1303 | if (Subtarget->hasFullFP16()) { |
| 1304 | setOperationAction(Op: ISD::LRINT, VT: MVT::f16, Action: Expand); |
| 1305 | setOperationAction(Op: ISD::LROUND, VT: MVT::f16, Action: Expand); |
| 1306 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f16, Action: Expand); |
| 1307 | |
| 1308 | for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI, |
| 1309 | ISD::FCOS, ISD::FSIN, ISD::FSINCOS, |
| 1310 | ISD::FSINCOSPI, ISD::FMODF, ISD::FACOS, |
| 1311 | ISD::FASIN, ISD::FATAN, ISD::FATAN2, |
| 1312 | ISD::FCOSH, ISD::FSINH, ISD::FTANH, |
| 1313 | ISD::FTAN, ISD::FEXP, ISD::FEXP2, |
| 1314 | ISD::FEXP10, ISD::FLOG, ISD::FLOG2, |
| 1315 | ISD::FLOG10, ISD::STRICT_FREM, ISD::STRICT_FPOW, |
| 1316 | ISD::STRICT_FPOWI, ISD::STRICT_FCOS, ISD::STRICT_FSIN, |
| 1317 | ISD::STRICT_FACOS, ISD::STRICT_FASIN, ISD::STRICT_FATAN, |
| 1318 | ISD::STRICT_FATAN2, ISD::STRICT_FCOSH, ISD::STRICT_FSINH, |
| 1319 | ISD::STRICT_FTANH, ISD::STRICT_FEXP, ISD::STRICT_FEXP2, |
| 1320 | ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10, |
| 1321 | ISD::STRICT_FTAN}) { |
| 1322 | setOperationAction(Op, VT: MVT::f16, Action: Promote); |
| 1323 | } |
| 1324 | |
| 1325 | // Round-to-integer need custom lowering for fp16, as Promote doesn't work |
| 1326 | // because the result type is integer. |
| 1327 | for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT}) |
| 1328 | setOperationAction(Op, VT: MVT::f16, Action: Custom); |
| 1329 | |
| 1330 | for (auto Op : {ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC, |
| 1331 | ISD::FNEARBYINT, ISD::FRINT, ISD::FFLOOR, |
| 1332 | ISD::FCEIL, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN, |
| 1333 | ISD::STRICT_FTRUNC, ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT, |
| 1334 | ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL}) { |
| 1335 | setOperationAction(Op, VT: MVT::f16, Action: Legal); |
| 1336 | } |
| 1337 | // clang-format on |
| 1338 | } |
| 1339 | |
| 1340 | if (Subtarget->hasNEON()) { |
| 1341 | // vmin and vmax aren't available in a scalar form, so we can use |
| 1342 | // a NEON instruction with an undef lane instead. |
| 1343 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f32, Action: Legal); |
| 1344 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f32, Action: Legal); |
| 1345 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f16, Action: Legal); |
| 1346 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f16, Action: Legal); |
| 1347 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v2f32, Action: Legal); |
| 1348 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v2f32, Action: Legal); |
| 1349 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f32, Action: Legal); |
| 1350 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f32, Action: Legal); |
| 1351 | |
| 1352 | if (Subtarget->hasV8Ops()) { |
| 1353 | for (auto Op : {ISD::FROUND, ISD::STRICT_FROUND, ISD::FROUNDEVEN, |
| 1354 | ISD::STRICT_FROUNDEVEN, ISD::FTRUNC, ISD::STRICT_FTRUNC, |
| 1355 | ISD::FRINT, ISD::STRICT_FRINT, ISD::FFLOOR, |
| 1356 | ISD::STRICT_FFLOOR, ISD::FCEIL, ISD::STRICT_FCEIL}) { |
| 1357 | setOperationAction(Op, VT: MVT::v2f32, Action: Legal); |
| 1358 | setOperationAction(Op, VT: MVT::v4f32, Action: Legal); |
| 1359 | } |
| 1360 | } |
| 1361 | |
| 1362 | if (Subtarget->hasFullFP16()) { |
| 1363 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f16, Action: Legal); |
| 1364 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f16, Action: Legal); |
| 1365 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v8f16, Action: Legal); |
| 1366 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v8f16, Action: Legal); |
| 1367 | |
| 1368 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f16, Action: Legal); |
| 1369 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f16, Action: Legal); |
| 1370 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v8f16, Action: Legal); |
| 1371 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v8f16, Action: Legal); |
| 1372 | |
| 1373 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f16, Action: Legal); |
| 1374 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v8f16, Action: Legal); |
| 1375 | setOperationAction(Op: ISD::FROUND, VT: MVT::v4f16, Action: Legal); |
| 1376 | setOperationAction(Op: ISD::FROUND, VT: MVT::v8f16, Action: Legal); |
| 1377 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v4f16, Action: Legal); |
| 1378 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v8f16, Action: Legal); |
| 1379 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f16, Action: Legal); |
| 1380 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v8f16, Action: Legal); |
| 1381 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f16, Action: Legal); |
| 1382 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v8f16, Action: Legal); |
| 1383 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f16, Action: Legal); |
| 1384 | setOperationAction(Op: ISD::FRINT, VT: MVT::v8f16, Action: Legal); |
| 1385 | } |
| 1386 | } |
| 1387 | |
| 1388 | // On MSVC, both 32-bit and 64-bit, ldexpf(f32) is not defined. MinGW has |
| 1389 | // it, but it's just a wrapper around ldexp. |
| 1390 | if (TT.isOSWindows()) { |
| 1391 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
| 1392 | if (isOperationExpand(Op, VT: MVT::f32)) |
| 1393 | setOperationAction(Op, VT: MVT::f32, Action: Promote); |
| 1394 | } |
| 1395 | |
| 1396 | // LegalizeDAG currently can't expand fp16 LDEXP/FREXP on targets where i16 |
| 1397 | // isn't legal. |
| 1398 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
| 1399 | if (isOperationExpand(Op, VT: MVT::f16)) |
| 1400 | setOperationAction(Op, VT: MVT::f16, Action: Promote); |
| 1401 | |
| 1402 | // We have target-specific dag combine patterns for the following nodes: |
| 1403 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine |
| 1404 | setTargetDAGCombine( |
| 1405 | {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR}); |
| 1406 | |
| 1407 | if (Subtarget->hasMVEIntegerOps()) |
| 1408 | setTargetDAGCombine(ISD::VSELECT); |
| 1409 | |
| 1410 | if (Subtarget->hasV6Ops()) |
| 1411 | setTargetDAGCombine(ISD::SRL); |
| 1412 | if (Subtarget->isThumb1Only()) |
| 1413 | setTargetDAGCombine(ISD::SHL); |
| 1414 | // Attempt to lower smin/smax to ssat/usat |
| 1415 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || |
| 1416 | Subtarget->isThumb2()) { |
| 1417 | setTargetDAGCombine({ISD::SMIN, ISD::SMAX}); |
| 1418 | } |
| 1419 | |
| 1420 | setStackPointerRegisterToSaveRestore(ARM::SP); |
| 1421 | |
| 1422 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || |
| 1423 | !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) |
| 1424 | setSchedulingPreference(Sched::RegPressure); |
| 1425 | else |
| 1426 | setSchedulingPreference(Sched::Hybrid); |
| 1427 | |
| 1428 | //// temporary - rewrite interface to use type |
| 1429 | MaxStoresPerMemset = 8; |
| 1430 | MaxStoresPerMemsetOptSize = 4; |
| 1431 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores |
| 1432 | MaxStoresPerMemcpyOptSize = 2; |
| 1433 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores |
| 1434 | MaxStoresPerMemmoveOptSize = 2; |
| 1435 | |
| 1436 | // On ARM arguments smaller than 4 bytes are extended, so all arguments |
| 1437 | // are at least 4 bytes aligned. |
| 1438 | setMinStackArgumentAlignment(Align(4)); |
| 1439 | |
| 1440 | // Prefer likely predicted branches to selects on out-of-order cores. |
| 1441 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); |
| 1442 | |
| 1443 | setPrefLoopAlignment(Align(1ULL << Subtarget->getPreferBranchLogAlignment())); |
| 1444 | setPrefFunctionAlignment( |
| 1445 | Align(1ULL << Subtarget->getPreferBranchLogAlignment())); |
| 1446 | |
| 1447 | setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); |
| 1448 | |
| 1449 | IsStrictFPEnabled = true; |
| 1450 | } |
| 1451 | |
| 1452 | bool ARMTargetLowering::useSoftFloat() const { |
| 1453 | return Subtarget->useSoftFloat(); |
| 1454 | } |
| 1455 | |
| 1456 | bool ARMTargetLowering::preferSelectsOverBooleanArithmetic(EVT VT) const { |
| 1457 | return !Subtarget->isThumb1Only() && VT.getSizeInBits() <= 32; |
| 1458 | } |
| 1459 | |
| 1460 | // FIXME: It might make sense to define the representative register class as the |
| 1461 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is |
| 1462 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, |
| 1463 | // SPR's representative would be DPR_VFP2. This should work well if register |
| 1464 | // pressure tracking were modified such that a register use would increment the |
| 1465 | // pressure of the register class's representative and all of it's super |
| 1466 | // classes' representatives transitively. We have not implemented this because |
| 1467 | // of the difficulty prior to coalescing of modeling operand register classes |
| 1468 | // due to the common occurrence of cross class copies and subregister insertions |
| 1469 | // and extractions. |
| 1470 | std::pair<const TargetRegisterClass *, uint8_t> |
| 1471 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
| 1472 | MVT VT) const { |
| 1473 | const TargetRegisterClass *RRC = nullptr; |
| 1474 | uint8_t Cost = 1; |
| 1475 | switch (VT.SimpleTy) { |
| 1476 | default: |
| 1477 | return TargetLowering::findRepresentativeClass(TRI, VT); |
| 1478 | // Use DPR as representative register class for all floating point |
| 1479 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so |
| 1480 | // the cost is 1 for both f32 and f64. |
| 1481 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: |
| 1482 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: |
| 1483 | RRC = &ARM::DPRRegClass; |
| 1484 | // When NEON is used for SP, only half of the register file is available |
| 1485 | // because operations that define both SP and DP results will be constrained |
| 1486 | // to the VFP2 class (D0-D15). We currently model this constraint prior to |
| 1487 | // coalescing by double-counting the SP regs. See the FIXME above. |
| 1488 | if (Subtarget->useNEONForSinglePrecisionFP()) |
| 1489 | Cost = 2; |
| 1490 | break; |
| 1491 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: |
| 1492 | case MVT::v4f32: case MVT::v2f64: |
| 1493 | RRC = &ARM::DPRRegClass; |
| 1494 | Cost = 2; |
| 1495 | break; |
| 1496 | case MVT::v4i64: |
| 1497 | RRC = &ARM::DPRRegClass; |
| 1498 | Cost = 4; |
| 1499 | break; |
| 1500 | case MVT::v8i64: |
| 1501 | RRC = &ARM::DPRRegClass; |
| 1502 | Cost = 8; |
| 1503 | break; |
| 1504 | } |
| 1505 | return std::make_pair(x&: RRC, y&: Cost); |
| 1506 | } |
| 1507 | |
| 1508 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, |
| 1509 | EVT VT) const { |
| 1510 | if (!VT.isVector()) |
| 1511 | return getPointerTy(DL); |
| 1512 | |
| 1513 | // MVE has a predicate register. |
| 1514 | if ((Subtarget->hasMVEIntegerOps() && |
| 1515 | (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || |
| 1516 | VT == MVT::v16i8)) || |
| 1517 | (Subtarget->hasMVEFloatOps() && |
| 1518 | (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16))) |
| 1519 | return MVT::getVectorVT(VT: MVT::i1, EC: VT.getVectorElementCount()); |
| 1520 | return VT.changeVectorElementTypeToInteger(); |
| 1521 | } |
| 1522 | |
| 1523 | /// getRegClassFor - Return the register class that should be used for the |
| 1524 | /// specified value type. |
| 1525 | const TargetRegisterClass * |
| 1526 | ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
| 1527 | (void)isDivergent; |
| 1528 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map |
| 1529 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to |
| 1530 | // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive |
| 1531 | // MVE Q registers. |
| 1532 | if (Subtarget->hasNEON()) { |
| 1533 | if (VT == MVT::v4i64) |
| 1534 | return &ARM::QQPRRegClass; |
| 1535 | if (VT == MVT::v8i64) |
| 1536 | return &ARM::QQQQPRRegClass; |
| 1537 | } |
| 1538 | if (Subtarget->hasMVEIntegerOps()) { |
| 1539 | if (VT == MVT::v4i64) |
| 1540 | return &ARM::MQQPRRegClass; |
| 1541 | if (VT == MVT::v8i64) |
| 1542 | return &ARM::MQQQQPRRegClass; |
| 1543 | } |
| 1544 | return TargetLowering::getRegClassFor(VT); |
| 1545 | } |
| 1546 | |
| 1547 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the |
| 1548 | // source/dest is aligned and the copy size is large enough. We therefore want |
| 1549 | // to align such objects passed to memory intrinsics. |
| 1550 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, |
| 1551 | Align &PrefAlign) const { |
| 1552 | if (!isa<MemIntrinsic>(Val: CI)) |
| 1553 | return false; |
| 1554 | MinSize = 8; |
| 1555 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 |
| 1556 | // cycle faster than 4-byte aligned LDM. |
| 1557 | PrefAlign = |
| 1558 | (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? Align(8) : Align(4)); |
| 1559 | return true; |
| 1560 | } |
| 1561 | |
| 1562 | // Create a fast isel object. |
| 1563 | FastISel *ARMTargetLowering::createFastISel( |
| 1564 | FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, |
| 1565 | const LibcallLoweringInfo *libcallLowering) const { |
| 1566 | return ARM::createFastISel(funcInfo, libInfo, libcallLowering); |
| 1567 | } |
| 1568 | |
| 1569 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { |
| 1570 | unsigned NumVals = N->getNumValues(); |
| 1571 | if (!NumVals) |
| 1572 | return Sched::RegPressure; |
| 1573 | |
| 1574 | for (unsigned i = 0; i != NumVals; ++i) { |
| 1575 | EVT VT = N->getValueType(ResNo: i); |
| 1576 | if (VT == MVT::Glue || VT == MVT::Other) |
| 1577 | continue; |
| 1578 | if (VT.isFloatingPoint() || VT.isVector()) |
| 1579 | return Sched::ILP; |
| 1580 | } |
| 1581 | |
| 1582 | if (!N->isMachineOpcode()) |
| 1583 | return Sched::RegPressure; |
| 1584 | |
| 1585 | // Load are scheduled for latency even if there instruction itinerary |
| 1586 | // is not available. |
| 1587 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 1588 | const MCInstrDesc &MCID = TII->get(Opcode: N->getMachineOpcode()); |
| 1589 | |
| 1590 | if (MCID.getNumDefs() == 0) |
| 1591 | return Sched::RegPressure; |
| 1592 | if (!Itins->isEmpty() && |
| 1593 | Itins->getOperandCycle(ItinClassIndx: MCID.getSchedClass(), OperandIdx: 0) > 2U) |
| 1594 | return Sched::ILP; |
| 1595 | |
| 1596 | return Sched::RegPressure; |
| 1597 | } |
| 1598 | |
| 1599 | //===----------------------------------------------------------------------===// |
| 1600 | // Lowering Code |
| 1601 | //===----------------------------------------------------------------------===// |
| 1602 | |
| 1603 | static bool isSRL16(const SDValue &Op) { |
| 1604 | if (Op.getOpcode() != ISD::SRL) |
| 1605 | return false; |
| 1606 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 1607 | return Const->getZExtValue() == 16; |
| 1608 | return false; |
| 1609 | } |
| 1610 | |
| 1611 | static bool isSRA16(const SDValue &Op) { |
| 1612 | if (Op.getOpcode() != ISD::SRA) |
| 1613 | return false; |
| 1614 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 1615 | return Const->getZExtValue() == 16; |
| 1616 | return false; |
| 1617 | } |
| 1618 | |
| 1619 | static bool isSHL16(const SDValue &Op) { |
| 1620 | if (Op.getOpcode() != ISD::SHL) |
| 1621 | return false; |
| 1622 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 1623 | return Const->getZExtValue() == 16; |
| 1624 | return false; |
| 1625 | } |
| 1626 | |
| 1627 | // Check for a signed 16-bit value. We special case SRA because it makes it |
| 1628 | // more simple when also looking for SRAs that aren't sign extending a |
| 1629 | // smaller value. Without the check, we'd need to take extra care with |
| 1630 | // checking order for some operations. |
| 1631 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { |
| 1632 | if (isSRA16(Op)) |
| 1633 | return isSHL16(Op: Op.getOperand(i: 0)); |
| 1634 | return DAG.ComputeNumSignBits(Op) == 17; |
| 1635 | } |
| 1636 | |
| 1637 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC |
| 1638 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { |
| 1639 | switch (CC) { |
| 1640 | default: llvm_unreachable("Unknown condition code!" ); |
| 1641 | case ISD::SETNE: return ARMCC::NE; |
| 1642 | case ISD::SETEQ: return ARMCC::EQ; |
| 1643 | case ISD::SETGT: return ARMCC::GT; |
| 1644 | case ISD::SETGE: return ARMCC::GE; |
| 1645 | case ISD::SETLT: return ARMCC::LT; |
| 1646 | case ISD::SETLE: return ARMCC::LE; |
| 1647 | case ISD::SETUGT: return ARMCC::HI; |
| 1648 | case ISD::SETUGE: return ARMCC::HS; |
| 1649 | case ISD::SETULT: return ARMCC::LO; |
| 1650 | case ISD::SETULE: return ARMCC::LS; |
| 1651 | } |
| 1652 | } |
| 1653 | |
| 1654 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. |
| 1655 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 1656 | ARMCC::CondCodes &CondCode2) { |
| 1657 | CondCode2 = ARMCC::AL; |
| 1658 | switch (CC) { |
| 1659 | default: llvm_unreachable("Unknown FP condition!" ); |
| 1660 | case ISD::SETEQ: |
| 1661 | case ISD::SETOEQ: CondCode = ARMCC::EQ; break; |
| 1662 | case ISD::SETGT: |
| 1663 | case ISD::SETOGT: CondCode = ARMCC::GT; break; |
| 1664 | case ISD::SETGE: |
| 1665 | case ISD::SETOGE: CondCode = ARMCC::GE; break; |
| 1666 | case ISD::SETOLT: CondCode = ARMCC::MI; break; |
| 1667 | case ISD::SETOLE: CondCode = ARMCC::LS; break; |
| 1668 | case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; |
| 1669 | case ISD::SETO: CondCode = ARMCC::VC; break; |
| 1670 | case ISD::SETUO: CondCode = ARMCC::VS; break; |
| 1671 | case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; |
| 1672 | case ISD::SETUGT: CondCode = ARMCC::HI; break; |
| 1673 | case ISD::SETUGE: CondCode = ARMCC::PL; break; |
| 1674 | case ISD::SETLT: |
| 1675 | case ISD::SETULT: CondCode = ARMCC::LT; break; |
| 1676 | case ISD::SETLE: |
| 1677 | case ISD::SETULE: CondCode = ARMCC::LE; break; |
| 1678 | case ISD::SETNE: |
| 1679 | case ISD::SETUNE: CondCode = ARMCC::NE; break; |
| 1680 | } |
| 1681 | } |
| 1682 | |
| 1683 | //===----------------------------------------------------------------------===// |
| 1684 | // Calling Convention Implementation |
| 1685 | //===----------------------------------------------------------------------===// |
| 1686 | |
| 1687 | /// getEffectiveCallingConv - Get the effective calling convention, taking into |
| 1688 | /// account presence of floating point hardware and calling convention |
| 1689 | /// limitations, such as support for variadic functions. |
| 1690 | CallingConv::ID |
| 1691 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, |
| 1692 | bool isVarArg) const { |
| 1693 | switch (CC) { |
| 1694 | default: |
| 1695 | report_fatal_error(reason: "Unsupported calling convention" ); |
| 1696 | case CallingConv::ARM_AAPCS: |
| 1697 | case CallingConv::ARM_APCS: |
| 1698 | case CallingConv::GHC: |
| 1699 | case CallingConv::CFGuard_Check: |
| 1700 | return CC; |
| 1701 | case CallingConv::PreserveMost: |
| 1702 | return CallingConv::PreserveMost; |
| 1703 | case CallingConv::PreserveAll: |
| 1704 | return CallingConv::PreserveAll; |
| 1705 | case CallingConv::ARM_AAPCS_VFP: |
| 1706 | case CallingConv::Swift: |
| 1707 | case CallingConv::SwiftTail: |
| 1708 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; |
| 1709 | case CallingConv::C: |
| 1710 | case CallingConv::Tail: |
| 1711 | if (!getTM().isAAPCS_ABI()) |
| 1712 | return CallingConv::ARM_APCS; |
| 1713 | else if (Subtarget->hasFPRegs() && !Subtarget->isThumb1Only() && |
| 1714 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && |
| 1715 | !isVarArg) |
| 1716 | return CallingConv::ARM_AAPCS_VFP; |
| 1717 | else |
| 1718 | return CallingConv::ARM_AAPCS; |
| 1719 | case CallingConv::Fast: |
| 1720 | case CallingConv::CXX_FAST_TLS: |
| 1721 | if (!getTM().isAAPCS_ABI()) { |
| 1722 | if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) |
| 1723 | return CallingConv::Fast; |
| 1724 | return CallingConv::ARM_APCS; |
| 1725 | } else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && |
| 1726 | !isVarArg) |
| 1727 | return CallingConv::ARM_AAPCS_VFP; |
| 1728 | else |
| 1729 | return CallingConv::ARM_AAPCS; |
| 1730 | } |
| 1731 | } |
| 1732 | |
| 1733 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, |
| 1734 | bool isVarArg) const { |
| 1735 | return CCAssignFnForNode(CC, Return: false, isVarArg); |
| 1736 | } |
| 1737 | |
| 1738 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, |
| 1739 | bool isVarArg) const { |
| 1740 | return CCAssignFnForNode(CC, Return: true, isVarArg); |
| 1741 | } |
| 1742 | |
| 1743 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given |
| 1744 | /// CallingConvention. |
| 1745 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, |
| 1746 | bool Return, |
| 1747 | bool isVarArg) const { |
| 1748 | switch (getEffectiveCallingConv(CC, isVarArg)) { |
| 1749 | default: |
| 1750 | report_fatal_error(reason: "Unsupported calling convention" ); |
| 1751 | case CallingConv::ARM_APCS: |
| 1752 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); |
| 1753 | case CallingConv::ARM_AAPCS: |
| 1754 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1755 | case CallingConv::ARM_AAPCS_VFP: |
| 1756 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); |
| 1757 | case CallingConv::Fast: |
| 1758 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); |
| 1759 | case CallingConv::GHC: |
| 1760 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); |
| 1761 | case CallingConv::PreserveMost: |
| 1762 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1763 | case CallingConv::PreserveAll: |
| 1764 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1765 | case CallingConv::CFGuard_Check: |
| 1766 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); |
| 1767 | } |
| 1768 | } |
| 1769 | |
| 1770 | SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 1771 | MVT LocVT, MVT ValVT, SDValue Val) const { |
| 1772 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), |
| 1773 | Operand: Val); |
| 1774 | if (Subtarget->hasFullFP16()) { |
| 1775 | Val = DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT: ValVT, Operand: Val); |
| 1776 | } else { |
| 1777 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, |
| 1778 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
| 1779 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: ValVT, Operand: Val); |
| 1780 | } |
| 1781 | return Val; |
| 1782 | } |
| 1783 | |
| 1784 | SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 1785 | MVT LocVT, MVT ValVT, |
| 1786 | SDValue Val) const { |
| 1787 | if (Subtarget->hasFullFP16()) { |
| 1788 | Val = DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, |
| 1789 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
| 1790 | } else { |
| 1791 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, |
| 1792 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
| 1793 | Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, |
| 1794 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
| 1795 | } |
| 1796 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: LocVT, Operand: Val); |
| 1797 | } |
| 1798 | |
| 1799 | /// LowerCallResult - Lower the result values of a call into the |
| 1800 | /// appropriate copies out of appropriate physical registers. |
| 1801 | SDValue ARMTargetLowering::LowerCallResult( |
| 1802 | SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, |
| 1803 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 1804 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, |
| 1805 | SDValue ThisVal, bool isCmseNSCall) const { |
| 1806 | // Assign locations to each value returned by this call. |
| 1807 | SmallVector<CCValAssign, 16> RVLocs; |
| 1808 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 1809 | *DAG.getContext()); |
| 1810 | CCInfo.AnalyzeCallResult(Ins, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 1811 | |
| 1812 | // Copy all of the result registers out of their specified physreg. |
| 1813 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 1814 | CCValAssign VA = RVLocs[i]; |
| 1815 | |
| 1816 | // Pass 'this' value directly from the argument to return value, to avoid |
| 1817 | // reg unit interference |
| 1818 | if (i == 0 && isThisReturn) { |
| 1819 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && |
| 1820 | "unexpected return calling convention register assignment" ); |
| 1821 | InVals.push_back(Elt: ThisVal); |
| 1822 | continue; |
| 1823 | } |
| 1824 | |
| 1825 | SDValue Val; |
| 1826 | if (VA.needsCustom() && |
| 1827 | (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { |
| 1828 | // Handle f64 or half of a v2f64. |
| 1829 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
| 1830 | Glue: InGlue); |
| 1831 | Chain = Lo.getValue(R: 1); |
| 1832 | InGlue = Lo.getValue(R: 2); |
| 1833 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1834 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
| 1835 | Glue: InGlue); |
| 1836 | Chain = Hi.getValue(R: 1); |
| 1837 | InGlue = Hi.getValue(R: 2); |
| 1838 | if (!Subtarget->isLittle()) |
| 1839 | std::swap (a&: Lo, b&: Hi); |
| 1840 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 1841 | |
| 1842 | if (VA.getLocVT() == MVT::v2f64) { |
| 1843 | SDValue Vec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
| 1844 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
| 1845 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 1846 | |
| 1847 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1848 | Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
| 1849 | Chain = Lo.getValue(R: 1); |
| 1850 | InGlue = Lo.getValue(R: 2); |
| 1851 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1852 | Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
| 1853 | Chain = Hi.getValue(R: 1); |
| 1854 | InGlue = Hi.getValue(R: 2); |
| 1855 | if (!Subtarget->isLittle()) |
| 1856 | std::swap (a&: Lo, b&: Hi); |
| 1857 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 1858 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
| 1859 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 1860 | } |
| 1861 | } else { |
| 1862 | Val = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: VA.getLocVT(), |
| 1863 | Glue: InGlue); |
| 1864 | Chain = Val.getValue(R: 1); |
| 1865 | InGlue = Val.getValue(R: 2); |
| 1866 | } |
| 1867 | |
| 1868 | switch (VA.getLocInfo()) { |
| 1869 | default: llvm_unreachable("Unknown loc info!" ); |
| 1870 | case CCValAssign::Full: break; |
| 1871 | case CCValAssign::BCvt: |
| 1872 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: Val); |
| 1873 | break; |
| 1874 | } |
| 1875 | |
| 1876 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 1877 | // had been copied to the LSBs of a 32-bit register. |
| 1878 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 1879 | if (VA.needsCustom() && |
| 1880 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 1881 | Val = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val); |
| 1882 | |
| 1883 | // On CMSE Non-secure Calls, call results (returned values) whose bitwidth |
| 1884 | // is less than 32 bits must be sign- or zero-extended after the call for |
| 1885 | // security reasons. Although the ABI mandates an extension done by the |
| 1886 | // callee, the latter cannot be trusted to follow the rules of the ABI. |
| 1887 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
| 1888 | if (isCmseNSCall && Arg.ArgVT.isScalarInteger() && |
| 1889 | VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
| 1890 | Val = handleCMSEValue(Value: Val, Arg, DAG, DL: dl); |
| 1891 | |
| 1892 | InVals.push_back(Elt: Val); |
| 1893 | } |
| 1894 | |
| 1895 | return Chain; |
| 1896 | } |
| 1897 | |
| 1898 | std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg( |
| 1899 | const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr, |
| 1900 | bool IsTailCall, int SPDiff) const { |
| 1901 | SDValue DstAddr; |
| 1902 | MachinePointerInfo DstInfo; |
| 1903 | int32_t Offset = VA.getLocMemOffset(); |
| 1904 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1905 | |
| 1906 | if (IsTailCall) { |
| 1907 | Offset += SPDiff; |
| 1908 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1909 | int Size = VA.getLocVT().getFixedSizeInBits() / 8; |
| 1910 | int FI = MF.getFrameInfo().CreateFixedObject(Size, SPOffset: Offset, IsImmutable: true); |
| 1911 | DstAddr = DAG.getFrameIndex(FI, VT: PtrVT); |
| 1912 | DstInfo = |
| 1913 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI); |
| 1914 | } else { |
| 1915 | SDValue PtrOff = DAG.getIntPtrConstant(Val: Offset, DL: dl); |
| 1916 | DstAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: getPointerTy(DL: DAG.getDataLayout()), |
| 1917 | N1: StackPtr, N2: PtrOff); |
| 1918 | DstInfo = |
| 1919 | MachinePointerInfo::getStack(MF&: DAG.getMachineFunction(), Offset); |
| 1920 | } |
| 1921 | |
| 1922 | return std::make_pair(x&: DstAddr, y&: DstInfo); |
| 1923 | } |
| 1924 | |
| 1925 | // Returns the type of copying which is required to set up a byval argument to |
| 1926 | // a tail-called function. This isn't needed for non-tail calls, because they |
| 1927 | // always need the equivalent of CopyOnce, but tail-calls sometimes need two to |
| 1928 | // avoid clobbering another argument (CopyViaTemp), and sometimes can be |
| 1929 | // optimised to zero copies when forwarding an argument from the caller's |
| 1930 | // caller (NoCopy). |
| 1931 | ARMTargetLowering::ByValCopyKind ARMTargetLowering::ByValNeedsCopyForTailCall( |
| 1932 | SelectionDAG &DAG, SDValue Src, SDValue Dst, ISD::ArgFlagsTy Flags) const { |
| 1933 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 1934 | ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 1935 | |
| 1936 | // Globals are always safe to copy from. |
| 1937 | if (isa<GlobalAddressSDNode>(Val: Src) || isa<ExternalSymbolSDNode>(Val: Src)) |
| 1938 | return CopyOnce; |
| 1939 | |
| 1940 | // Can only analyse frame index nodes, conservatively assume we need a |
| 1941 | // temporary. |
| 1942 | auto *SrcFrameIdxNode = dyn_cast<FrameIndexSDNode>(Val&: Src); |
| 1943 | auto *DstFrameIdxNode = dyn_cast<FrameIndexSDNode>(Val&: Dst); |
| 1944 | if (!SrcFrameIdxNode || !DstFrameIdxNode) |
| 1945 | return CopyViaTemp; |
| 1946 | |
| 1947 | int SrcFI = SrcFrameIdxNode->getIndex(); |
| 1948 | int DstFI = DstFrameIdxNode->getIndex(); |
| 1949 | assert(MFI.isFixedObjectIndex(DstFI) && |
| 1950 | "byval passed in non-fixed stack slot" ); |
| 1951 | |
| 1952 | int64_t SrcOffset = MFI.getObjectOffset(ObjectIdx: SrcFI); |
| 1953 | int64_t DstOffset = MFI.getObjectOffset(ObjectIdx: DstFI); |
| 1954 | |
| 1955 | // If the source is in the local frame, then the copy to the argument memory |
| 1956 | // is always valid. |
| 1957 | bool FixedSrc = MFI.isFixedObjectIndex(ObjectIdx: SrcFI); |
| 1958 | if (!FixedSrc || |
| 1959 | (FixedSrc && SrcOffset < -(int64_t)AFI->getArgRegsSaveSize())) |
| 1960 | return CopyOnce; |
| 1961 | |
| 1962 | // In the case of byval arguments split between registers and the stack, |
| 1963 | // computeAddrForCallArg returns a FrameIndex which corresponds only to the |
| 1964 | // stack portion, but the Src SDValue will refer to the full value, including |
| 1965 | // the local stack memory that the register portion gets stored into. We only |
| 1966 | // need to compare them for equality, so normalise on the full value version. |
| 1967 | uint64_t RegSize = Flags.getByValSize() - MFI.getObjectSize(ObjectIdx: DstFI); |
| 1968 | DstOffset -= RegSize; |
| 1969 | |
| 1970 | // If the value is already in the correct location, then no copying is |
| 1971 | // needed. If not, then we need to copy via a temporary. |
| 1972 | if (SrcOffset == DstOffset) |
| 1973 | return NoCopy; |
| 1974 | else |
| 1975 | return CopyViaTemp; |
| 1976 | } |
| 1977 | |
| 1978 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, |
| 1979 | SDValue Chain, SDValue &Arg, |
| 1980 | RegsToPassVector &RegsToPass, |
| 1981 | CCValAssign &VA, CCValAssign &NextVA, |
| 1982 | SDValue &StackPtr, |
| 1983 | SmallVectorImpl<SDValue> &MemOpChains, |
| 1984 | bool IsTailCall, |
| 1985 | int SPDiff) const { |
| 1986 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 1987 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
| 1988 | unsigned id = Subtarget->isLittle() ? 0 : 1; |
| 1989 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y: fmrrd.getValue(R: id))); |
| 1990 | |
| 1991 | if (NextVA.isRegLoc()) |
| 1992 | RegsToPass.push_back(Elt: std::make_pair(x: NextVA.getLocReg(), y: fmrrd.getValue(R: 1-id))); |
| 1993 | else { |
| 1994 | assert(NextVA.isMemLoc()); |
| 1995 | if (!StackPtr.getNode()) |
| 1996 | StackPtr = DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, |
| 1997 | VT: getPointerTy(DL: DAG.getDataLayout())); |
| 1998 | |
| 1999 | SDValue DstAddr; |
| 2000 | MachinePointerInfo DstInfo; |
| 2001 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2002 | computeAddrForCallArg(dl, DAG, VA: NextVA, StackPtr, IsTailCall, SPDiff); |
| 2003 | MemOpChains.push_back( |
| 2004 | Elt: DAG.getStore(Chain, dl, Val: fmrrd.getValue(R: 1 - id), Ptr: DstAddr, PtrInfo: DstInfo)); |
| 2005 | } |
| 2006 | } |
| 2007 | |
| 2008 | static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { |
| 2009 | return (CC == CallingConv::Fast && GuaranteeTailCalls) || |
| 2010 | CC == CallingConv::Tail || CC == CallingConv::SwiftTail; |
| 2011 | } |
| 2012 | |
| 2013 | /// LowerCall - Lowering a call into a callseq_start <- |
| 2014 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter |
| 2015 | /// nodes. |
| 2016 | SDValue |
| 2017 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 2018 | SmallVectorImpl<SDValue> &InVals) const { |
| 2019 | SelectionDAG &DAG = CLI.DAG; |
| 2020 | SDLoc &dl = CLI.DL; |
| 2021 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 2022 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 2023 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 2024 | SDValue Chain = CLI.Chain; |
| 2025 | SDValue Callee = CLI.Callee; |
| 2026 | bool &isTailCall = CLI.IsTailCall; |
| 2027 | CallingConv::ID CallConv = CLI.CallConv; |
| 2028 | bool doesNotRet = CLI.DoesNotReturn; |
| 2029 | bool isVarArg = CLI.IsVarArg; |
| 2030 | const CallBase *CB = CLI.CB; |
| 2031 | |
| 2032 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2033 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2034 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 2035 | MachineFunction::CallSiteInfo CSInfo; |
| 2036 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); |
| 2037 | bool isThisReturn = false; |
| 2038 | bool isCmseNSCall = false; |
| 2039 | bool isSibCall = false; |
| 2040 | bool PreferIndirect = false; |
| 2041 | bool GuardWithBTI = false; |
| 2042 | |
| 2043 | // Analyze operands of the call, assigning locations to each operand. |
| 2044 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2045 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 2046 | *DAG.getContext()); |
| 2047 | CCInfo.AnalyzeCallOperands(Outs, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
| 2048 | |
| 2049 | // Lower 'returns_twice' calls to a pseudo-instruction. |
| 2050 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: Attribute::ReturnsTwice) && |
| 2051 | !Subtarget->noBTIAtReturnTwice()) |
| 2052 | GuardWithBTI = AFI->branchTargetEnforcement(); |
| 2053 | |
| 2054 | // Set type id for call site info. |
| 2055 | if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall()) |
| 2056 | CSInfo = MachineFunction::CallSiteInfo(*CB); |
| 2057 | |
| 2058 | // Determine whether this is a non-secure function call. |
| 2059 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: "cmse_nonsecure_call" )) |
| 2060 | isCmseNSCall = true; |
| 2061 | |
| 2062 | // Disable tail calls if they're not supported. |
| 2063 | if (!Subtarget->supportsTailCall()) |
| 2064 | isTailCall = false; |
| 2065 | |
| 2066 | // For both the non-secure calls and the returns from a CMSE entry function, |
| 2067 | // the function needs to do some extra work after the call, or before the |
| 2068 | // return, respectively, thus it cannot end with a tail call |
| 2069 | if (isCmseNSCall || AFI->isCmseNSEntryFunction()) |
| 2070 | isTailCall = false; |
| 2071 | |
| 2072 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2073 | // If we're optimizing for minimum size and the function is called three or |
| 2074 | // more times in this block, we can improve codesize by calling indirectly |
| 2075 | // as BLXr has a 16-bit encoding. |
| 2076 | auto *GV = cast<GlobalAddressSDNode>(Val&: Callee)->getGlobal(); |
| 2077 | if (CLI.CB) { |
| 2078 | auto *BB = CLI.CB->getParent(); |
| 2079 | PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && |
| 2080 | count_if(Range: GV->users(), P: [&BB](const User *U) { |
| 2081 | return isa<Instruction>(Val: U) && |
| 2082 | cast<Instruction>(Val: U)->getParent() == BB; |
| 2083 | }) > 2; |
| 2084 | } |
| 2085 | } |
| 2086 | if (isTailCall) { |
| 2087 | // Check if it's really possible to do a tail call. |
| 2088 | isTailCall = |
| 2089 | IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs, isIndirect: PreferIndirect); |
| 2090 | |
| 2091 | if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt && |
| 2092 | CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail) |
| 2093 | isSibCall = true; |
| 2094 | |
| 2095 | // We don't support GuaranteedTailCallOpt for ARM, only automatically |
| 2096 | // detected sibcalls. |
| 2097 | if (isTailCall) |
| 2098 | ++NumTailCalls; |
| 2099 | } |
| 2100 | |
| 2101 | if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) |
| 2102 | report_fatal_error(reason: "failed to perform tail call elimination on a call " |
| 2103 | "site marked musttail" ); |
| 2104 | |
| 2105 | // Get a count of how many bytes are to be pushed on the stack. |
| 2106 | unsigned NumBytes = CCInfo.getStackSize(); |
| 2107 | |
| 2108 | // SPDiff is the byte offset of the call's argument area from the callee's. |
| 2109 | // Stores to callee stack arguments will be placed in FixedStackSlots offset |
| 2110 | // by this amount for a tail call. In a sibling call it must be 0 because the |
| 2111 | // caller will deallocate the entire stack and the callee still expects its |
| 2112 | // arguments to begin at SP+0. Completely unused for non-tail calls. |
| 2113 | int SPDiff = 0; |
| 2114 | |
| 2115 | if (isTailCall && !isSibCall) { |
| 2116 | auto FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 2117 | unsigned NumReusableBytes = FuncInfo->getArgumentStackSize(); |
| 2118 | |
| 2119 | // Since callee will pop argument stack as a tail call, we must keep the |
| 2120 | // popped size 16-byte aligned. |
| 2121 | MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment(); |
| 2122 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 2123 | NumBytes = alignTo(Size: NumBytes, A: *StackAlign); |
| 2124 | |
| 2125 | // SPDiff will be negative if this tail call requires more space than we |
| 2126 | // would automatically have in our incoming argument space. Positive if we |
| 2127 | // can actually shrink the stack. |
| 2128 | SPDiff = NumReusableBytes - NumBytes; |
| 2129 | |
| 2130 | // If this call requires more stack than we have available from |
| 2131 | // LowerFormalArguments, tell FrameLowering to reserve space for it. |
| 2132 | if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (unsigned)-SPDiff) |
| 2133 | AFI->setArgRegsSaveSize(-SPDiff); |
| 2134 | } |
| 2135 | |
| 2136 | if (isSibCall) { |
| 2137 | // For sibling tail calls, memory operands are available in our caller's stack. |
| 2138 | NumBytes = 0; |
| 2139 | } else { |
| 2140 | // Adjust the stack pointer for the new arguments... |
| 2141 | // These operations are automatically eliminated by the prolog/epilog pass |
| 2142 | Chain = DAG.getCALLSEQ_START(Chain, InSize: isTailCall ? 0 : NumBytes, OutSize: 0, DL: dl); |
| 2143 | } |
| 2144 | |
| 2145 | SDValue StackPtr = |
| 2146 | DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 2147 | |
| 2148 | RegsToPassVector RegsToPass; |
| 2149 | SmallVector<SDValue, 8> MemOpChains; |
| 2150 | |
| 2151 | // If we are doing a tail-call, any byval arguments will be written to stack |
| 2152 | // space which was used for incoming arguments. If any the values being used |
| 2153 | // are incoming byval arguments to this function, then they might be |
| 2154 | // overwritten by the stores of the outgoing arguments. To avoid this, we |
| 2155 | // need to make a temporary copy of them in local stack space, then copy back |
| 2156 | // to the argument area. |
| 2157 | DenseMap<unsigned, SDValue> ByValTemporaries; |
| 2158 | SDValue ByValTempChain; |
| 2159 | if (isTailCall) { |
| 2160 | SmallVector<SDValue, 8> ByValCopyChains; |
| 2161 | for (const CCValAssign &VA : ArgLocs) { |
| 2162 | unsigned ArgIdx = VA.getValNo(); |
| 2163 | SDValue Src = OutVals[ArgIdx]; |
| 2164 | ISD::ArgFlagsTy Flags = Outs[ArgIdx].Flags; |
| 2165 | |
| 2166 | if (!Flags.isByVal()) |
| 2167 | continue; |
| 2168 | |
| 2169 | SDValue Dst; |
| 2170 | MachinePointerInfo DstInfo; |
| 2171 | std::tie(args&: Dst, args&: DstInfo) = |
| 2172 | computeAddrForCallArg(dl, DAG, VA, StackPtr: SDValue(), IsTailCall: true, SPDiff); |
| 2173 | ByValCopyKind Copy = ByValNeedsCopyForTailCall(DAG, Src, Dst, Flags); |
| 2174 | |
| 2175 | if (Copy == NoCopy) { |
| 2176 | // If the argument is already at the correct offset on the stack |
| 2177 | // (because we are forwarding a byval argument from our caller), we |
| 2178 | // don't need any copying. |
| 2179 | continue; |
| 2180 | } else if (Copy == CopyOnce) { |
| 2181 | // If the argument is in our local stack frame, no other argument |
| 2182 | // preparation can clobber it, so we can copy it to the final location |
| 2183 | // later. |
| 2184 | ByValTemporaries[ArgIdx] = Src; |
| 2185 | } else { |
| 2186 | assert(Copy == CopyViaTemp && "unexpected enum value" ); |
| 2187 | // If we might be copying this argument from the outgoing argument |
| 2188 | // stack area, we need to copy via a temporary in the local stack |
| 2189 | // frame. |
| 2190 | int TempFrameIdx = MFI.CreateStackObject( |
| 2191 | Size: Flags.getByValSize(), Alignment: Flags.getNonZeroByValAlign(), isSpillSlot: false); |
| 2192 | SDValue Temp = |
| 2193 | DAG.getFrameIndex(FI: TempFrameIdx, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 2194 | |
| 2195 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize(), DL: dl, VT: MVT::i32); |
| 2196 | SDValue AlignNode = |
| 2197 | DAG.getConstant(Val: Flags.getNonZeroByValAlign().value(), DL: dl, VT: MVT::i32); |
| 2198 | |
| 2199 | SDVTList VTs = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 2200 | SDValue Ops[] = {Chain, Temp, Src, SizeNode, AlignNode}; |
| 2201 | ByValCopyChains.push_back( |
| 2202 | Elt: DAG.getNode(Opcode: ARMISD::COPY_STRUCT_BYVAL, DL: dl, VTList: VTs, Ops)); |
| 2203 | ByValTemporaries[ArgIdx] = Temp; |
| 2204 | } |
| 2205 | } |
| 2206 | if (!ByValCopyChains.empty()) |
| 2207 | ByValTempChain = |
| 2208 | DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: ByValCopyChains); |
| 2209 | } |
| 2210 | |
| 2211 | // During a tail call, stores to the argument area must happen after all of |
| 2212 | // the function's incoming arguments have been loaded because they may alias. |
| 2213 | // This is done by folding in a TokenFactor from LowerFormalArguments, but |
| 2214 | // there's no point in doing so repeatedly so this tracks whether that's |
| 2215 | // happened yet. |
| 2216 | bool AfterFormalArgLoads = false; |
| 2217 | |
| 2218 | // Walk the register/memloc assignments, inserting copies/loads. In the case |
| 2219 | // of tail call optimization, arguments are handled later. |
| 2220 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
| 2221 | i != e; |
| 2222 | ++i, ++realArgIdx) { |
| 2223 | CCValAssign &VA = ArgLocs[i]; |
| 2224 | SDValue Arg = OutVals[realArgIdx]; |
| 2225 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
| 2226 | bool isByVal = Flags.isByVal(); |
| 2227 | |
| 2228 | // Promote the value if needed. |
| 2229 | switch (VA.getLocInfo()) { |
| 2230 | default: llvm_unreachable("Unknown loc info!" ); |
| 2231 | case CCValAssign::Full: break; |
| 2232 | case CCValAssign::SExt: |
| 2233 | Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2234 | break; |
| 2235 | case CCValAssign::ZExt: |
| 2236 | Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2237 | break; |
| 2238 | case CCValAssign::AExt: |
| 2239 | Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2240 | break; |
| 2241 | case CCValAssign::BCvt: |
| 2242 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2243 | break; |
| 2244 | } |
| 2245 | |
| 2246 | if (isTailCall && VA.isMemLoc() && !AfterFormalArgLoads) { |
| 2247 | Chain = DAG.getStackArgumentTokenFactor(Chain); |
| 2248 | if (ByValTempChain) { |
| 2249 | // In case of large byval copies, re-using the stackframe for tail-calls |
| 2250 | // can lead to overwriting incoming arguments on the stack. Force |
| 2251 | // loading these stack arguments before the copy to avoid that. |
| 2252 | SmallVector<SDValue, 8> IncomingLoad; |
| 2253 | for (unsigned I = 0; I < OutVals.size(); ++I) { |
| 2254 | if (Outs[I].Flags.isByVal()) |
| 2255 | continue; |
| 2256 | |
| 2257 | SDValue OutVal = OutVals[I]; |
| 2258 | LoadSDNode *OutLN = dyn_cast_or_null<LoadSDNode>(Val&: OutVal); |
| 2259 | if (!OutLN) |
| 2260 | continue; |
| 2261 | |
| 2262 | FrameIndexSDNode *FIN = |
| 2263 | dyn_cast_or_null<FrameIndexSDNode>(Val: OutLN->getBasePtr()); |
| 2264 | if (!FIN) |
| 2265 | continue; |
| 2266 | |
| 2267 | if (!MFI.isFixedObjectIndex(ObjectIdx: FIN->getIndex())) |
| 2268 | continue; |
| 2269 | |
| 2270 | for (const CCValAssign &VA : ArgLocs) { |
| 2271 | if (VA.isMemLoc()) |
| 2272 | IncomingLoad.push_back(Elt: OutVal.getValue(R: 1)); |
| 2273 | } |
| 2274 | } |
| 2275 | |
| 2276 | // Update the chain to force loads for potentially clobbered argument |
| 2277 | // loads to happen before the byval copy. |
| 2278 | if (!IncomingLoad.empty()) { |
| 2279 | IncomingLoad.push_back(Elt: Chain); |
| 2280 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: IncomingLoad); |
| 2281 | } |
| 2282 | |
| 2283 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, N1: Chain, |
| 2284 | N2: ByValTempChain); |
| 2285 | } |
| 2286 | AfterFormalArgLoads = true; |
| 2287 | } |
| 2288 | |
| 2289 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 2290 | // had been copied to the LSBs of a 32-bit register. |
| 2291 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 2292 | if (VA.needsCustom() && |
| 2293 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { |
| 2294 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
| 2295 | } else { |
| 2296 | // f16 arguments could have been extended prior to argument lowering. |
| 2297 | // Mask them arguments if this is a CMSE nonsecure call. |
| 2298 | auto ArgVT = Outs[realArgIdx].ArgVT; |
| 2299 | if (isCmseNSCall && (ArgVT == MVT::f16)) { |
| 2300 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 2301 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: ArgVT.getSizeInBits()); |
| 2302 | SDValue Mask = |
| 2303 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
| 2304 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
| 2305 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
| 2306 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2307 | } |
| 2308 | } |
| 2309 | |
| 2310 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces |
| 2311 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 2312 | SDValue Op0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 2313 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 2314 | SDValue Op1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 2315 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 2316 | |
| 2317 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op0, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2318 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2319 | |
| 2320 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 2321 | if (VA.isRegLoc()) { |
| 2322 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op1, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2323 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2324 | } else { |
| 2325 | assert(VA.isMemLoc()); |
| 2326 | SDValue DstAddr; |
| 2327 | MachinePointerInfo DstInfo; |
| 2328 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2329 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2330 | MemOpChains.push_back(Elt: DAG.getStore(Chain, dl, Val: Op1, Ptr: DstAddr, PtrInfo: DstInfo)); |
| 2331 | } |
| 2332 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 2333 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2334 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2335 | } else if (VA.isRegLoc()) { |
| 2336 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && |
| 2337 | Outs[0].VT == MVT::i32) { |
| 2338 | assert(VA.getLocVT() == MVT::i32 && |
| 2339 | "unexpected calling convention register assignment" ); |
| 2340 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 && |
| 2341 | "unexpected use of 'returned'" ); |
| 2342 | isThisReturn = true; |
| 2343 | } |
| 2344 | const TargetOptions &Options = DAG.getTarget().Options; |
| 2345 | if (Options.EmitCallSiteInfo) |
| 2346 | CSInfo.ArgRegPairs.emplace_back(Args: VA.getLocReg(), Args&: i); |
| 2347 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg)); |
| 2348 | } else if (isByVal) { |
| 2349 | assert(VA.isMemLoc()); |
| 2350 | unsigned offset = 0; |
| 2351 | |
| 2352 | // True if this byval aggregate will be split between registers |
| 2353 | // and memory. |
| 2354 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); |
| 2355 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); |
| 2356 | |
| 2357 | SDValue ByValSrc; |
| 2358 | bool NeedsStackCopy; |
| 2359 | if (auto It = ByValTemporaries.find(Val: realArgIdx); |
| 2360 | It != ByValTemporaries.end()) { |
| 2361 | ByValSrc = It->second; |
| 2362 | NeedsStackCopy = true; |
| 2363 | } else { |
| 2364 | ByValSrc = Arg; |
| 2365 | NeedsStackCopy = !isTailCall; |
| 2366 | } |
| 2367 | |
| 2368 | // If part of the argument is in registers, load them. |
| 2369 | if (CurByValIdx < ByValArgsCount) { |
| 2370 | unsigned RegBegin, RegEnd; |
| 2371 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CurByValIdx, BeginReg&: RegBegin, EndReg&: RegEnd); |
| 2372 | |
| 2373 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 2374 | unsigned int i, j; |
| 2375 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { |
| 2376 | SDValue Const = DAG.getConstant(Val: 4*i, DL: dl, VT: MVT::i32); |
| 2377 | SDValue AddArg = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ByValSrc, N2: Const); |
| 2378 | SDValue Load = |
| 2379 | DAG.getLoad(VT: PtrVT, dl, Chain, Ptr: AddArg, PtrInfo: MachinePointerInfo(), |
| 2380 | Alignment: DAG.InferPtrAlign(Ptr: AddArg)); |
| 2381 | MemOpChains.push_back(Elt: Load.getValue(R: 1)); |
| 2382 | RegsToPass.push_back(Elt: std::make_pair(x&: j, y&: Load)); |
| 2383 | } |
| 2384 | |
| 2385 | // If parameter size outsides register area, "offset" value |
| 2386 | // helps us to calculate stack slot for remained part properly. |
| 2387 | offset = RegEnd - RegBegin; |
| 2388 | |
| 2389 | CCInfo.nextInRegsParam(); |
| 2390 | } |
| 2391 | |
| 2392 | // If the memory part of the argument isn't already in the correct place |
| 2393 | // (which can happen with tail calls), copy it into the argument area. |
| 2394 | if (NeedsStackCopy && Flags.getByValSize() > 4 * offset) { |
| 2395 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 2396 | SDValue Dst; |
| 2397 | MachinePointerInfo DstInfo; |
| 2398 | std::tie(args&: Dst, args&: DstInfo) = |
| 2399 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2400 | SDValue SrcOffset = DAG.getIntPtrConstant(Val: 4*offset, DL: dl); |
| 2401 | SDValue Src = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ByValSrc, N2: SrcOffset); |
| 2402 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize() - 4*offset, DL: dl, |
| 2403 | VT: MVT::i32); |
| 2404 | SDValue AlignNode = |
| 2405 | DAG.getConstant(Val: Flags.getNonZeroByValAlign().value(), DL: dl, VT: MVT::i32); |
| 2406 | |
| 2407 | SDVTList VTs = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 2408 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; |
| 2409 | MemOpChains.push_back(Elt: DAG.getNode(Opcode: ARMISD::COPY_STRUCT_BYVAL, DL: dl, VTList: VTs, |
| 2410 | Ops)); |
| 2411 | } |
| 2412 | } else { |
| 2413 | assert(VA.isMemLoc()); |
| 2414 | SDValue DstAddr; |
| 2415 | MachinePointerInfo DstInfo; |
| 2416 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2417 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2418 | |
| 2419 | SDValue Store = DAG.getStore(Chain, dl, Val: Arg, Ptr: DstAddr, PtrInfo: DstInfo); |
| 2420 | MemOpChains.push_back(Elt: Store); |
| 2421 | } |
| 2422 | } |
| 2423 | |
| 2424 | if (!MemOpChains.empty()) |
| 2425 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains); |
| 2426 | |
| 2427 | // Build a sequence of copy-to-reg nodes chained together with token chain |
| 2428 | // and flag operands which copy the outgoing args into the appropriate regs. |
| 2429 | SDValue InGlue; |
| 2430 | for (const auto &[Reg, N] : RegsToPass) { |
| 2431 | Chain = DAG.getCopyToReg(Chain, dl, Reg, N, Glue: InGlue); |
| 2432 | InGlue = Chain.getValue(R: 1); |
| 2433 | } |
| 2434 | |
| 2435 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
| 2436 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
| 2437 | // node so that legalize doesn't hack it. |
| 2438 | bool isDirect = false; |
| 2439 | |
| 2440 | const TargetMachine &TM = getTargetMachine(); |
| 2441 | const Triple &TT = TM.getTargetTriple(); |
| 2442 | const GlobalValue *GVal = nullptr; |
| 2443 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) |
| 2444 | GVal = G->getGlobal(); |
| 2445 | bool isStub = !TM.shouldAssumeDSOLocal(GV: GVal) && TT.isOSBinFormatMachO(); |
| 2446 | |
| 2447 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); |
| 2448 | bool isLocalARMFunc = false; |
| 2449 | auto PtrVt = getPointerTy(DL: DAG.getDataLayout()); |
| 2450 | |
| 2451 | if (Subtarget->genLongCalls()) { |
| 2452 | assert((!isPositionIndependent() || TT.isOSWindows()) && |
| 2453 | "long-calls codegen is not position independent!" ); |
| 2454 | // Handle a global address or an external symbol. If it's not one of |
| 2455 | // those, the target's already in a register, so we don't need to do |
| 2456 | // anything extra. |
| 2457 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2458 | if (Subtarget->genExecuteOnly()) { |
| 2459 | if (Subtarget->useMovt()) |
| 2460 | ++NumMovwMovt; |
| 2461 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
| 2462 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
| 2463 | } else { |
| 2464 | // Create a constant pool entry for the callee address |
| 2465 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2466 | ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( |
| 2467 | C: GVal, ID: ARMPCLabelIndex, Kind: ARMCP::CPValue, PCAdj: 0); |
| 2468 | |
| 2469 | // Get the address of the callee into a register |
| 2470 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2471 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
| 2472 | Callee = DAG.getLoad( |
| 2473 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
| 2474 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2475 | } |
| 2476 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
| 2477 | const char *Sym = S->getSymbol(); |
| 2478 | |
| 2479 | if (Subtarget->genExecuteOnly()) { |
| 2480 | if (Subtarget->useMovt()) |
| 2481 | ++NumMovwMovt; |
| 2482 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
| 2483 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
| 2484 | } else { |
| 2485 | // Create a constant pool entry for the callee address |
| 2486 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2487 | ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create( |
| 2488 | C&: *DAG.getContext(), s: Sym, ID: ARMPCLabelIndex, PCAdj: 0); |
| 2489 | |
| 2490 | // Get the address of the callee into a register |
| 2491 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2492 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
| 2493 | Callee = DAG.getLoad( |
| 2494 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
| 2495 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2496 | } |
| 2497 | } |
| 2498 | } else if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2499 | if (!PreferIndirect) { |
| 2500 | isDirect = true; |
| 2501 | bool isDef = GVal->isStrongDefinitionForLinker(); |
| 2502 | |
| 2503 | // ARM call to a local ARM function is predicable. |
| 2504 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); |
| 2505 | // tBX takes a register source operand. |
| 2506 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2507 | assert(TT.isOSBinFormatMachO() && "WrapperPIC use on non-MachO?" ); |
| 2508 | Callee = DAG.getNode( |
| 2509 | Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVt, |
| 2510 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: ARMII::MO_NONLAZY)); |
| 2511 | Callee = DAG.getLoad( |
| 2512 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Callee, |
| 2513 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: MaybeAlign(), |
| 2514 | MMOFlags: MachineMemOperand::MODereferenceable | |
| 2515 | MachineMemOperand::MOInvariant); |
| 2516 | } else if (Subtarget->isTargetCOFF()) { |
| 2517 | assert(Subtarget->isTargetWindows() && |
| 2518 | "Windows is the only supported COFF target" ); |
| 2519 | unsigned TargetFlags = ARMII::MO_NO_FLAG; |
| 2520 | if (GVal->hasDLLImportStorageClass()) |
| 2521 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 2522 | else if (!TM.shouldAssumeDSOLocal(GV: GVal)) |
| 2523 | TargetFlags = ARMII::MO_COFFSTUB; |
| 2524 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, /*offset=*/0, |
| 2525 | TargetFlags); |
| 2526 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 2527 | Callee = |
| 2528 | DAG.getLoad(VT: PtrVt, dl, Chain: DAG.getEntryNode(), |
| 2529 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, Operand: Callee), |
| 2530 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 2531 | } else { |
| 2532 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: 0); |
| 2533 | } |
| 2534 | } |
| 2535 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
| 2536 | isDirect = true; |
| 2537 | // tBX takes a register source operand. |
| 2538 | const char *Sym = S->getSymbol(); |
| 2539 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2540 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2541 | ARMConstantPoolValue *CPV = |
| 2542 | ARMConstantPoolSymbol::Create(C&: *DAG.getContext(), s: Sym, |
| 2543 | ID: ARMPCLabelIndex, PCAdj: 4); |
| 2544 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2545 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 2546 | Callee = DAG.getLoad( |
| 2547 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 2548 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2549 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 2550 | Callee = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVt, N1: Callee, N2: PICLabel); |
| 2551 | } else { |
| 2552 | Callee = DAG.getTargetExternalSymbol(Sym, VT: PtrVt, TargetFlags: 0); |
| 2553 | } |
| 2554 | } |
| 2555 | |
| 2556 | if (isCmseNSCall) { |
| 2557 | assert(!isARMFunc && !isDirect && |
| 2558 | "Cannot handle call to ARM function or direct call" ); |
| 2559 | if (NumBytes > 0) { |
| 2560 | DAG.getContext()->diagnose( |
| 2561 | DI: DiagnosticInfoUnsupported(DAG.getMachineFunction().getFunction(), |
| 2562 | "call to non-secure function would require " |
| 2563 | "passing arguments on stack" , |
| 2564 | dl.getDebugLoc())); |
| 2565 | } |
| 2566 | if (isStructRet) { |
| 2567 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 2568 | DAG.getMachineFunction().getFunction(), |
| 2569 | "call to non-secure function would return value through pointer" , |
| 2570 | dl.getDebugLoc())); |
| 2571 | } |
| 2572 | } |
| 2573 | |
| 2574 | // FIXME: handle tail calls differently. |
| 2575 | unsigned CallOpc; |
| 2576 | if (Subtarget->isThumb()) { |
| 2577 | if (GuardWithBTI) |
| 2578 | CallOpc = ARMISD::t2CALL_BTI; |
| 2579 | else if (isCmseNSCall) |
| 2580 | CallOpc = ARMISD::tSECALL; |
| 2581 | else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) |
| 2582 | CallOpc = ARMISD::CALL_NOLINK; |
| 2583 | else |
| 2584 | CallOpc = ARMISD::CALL; |
| 2585 | } else { |
| 2586 | if (!isDirect && !Subtarget->hasV5TOps()) |
| 2587 | CallOpc = ARMISD::CALL_NOLINK; |
| 2588 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && |
| 2589 | // Emit regular call when code size is the priority |
| 2590 | !Subtarget->hasMinSize()) |
| 2591 | // "mov lr, pc; b _foo" to avoid confusing the RSP |
| 2592 | CallOpc = ARMISD::CALL_NOLINK; |
| 2593 | else |
| 2594 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; |
| 2595 | } |
| 2596 | |
| 2597 | // We don't usually want to end the call-sequence here because we would tidy |
| 2598 | // the frame up *after* the call, however in the ABI-changing tail-call case |
| 2599 | // we've carefully laid out the parameters so that when sp is reset they'll be |
| 2600 | // in the correct location. |
| 2601 | if (isTailCall && !isSibCall) { |
| 2602 | Chain = DAG.getCALLSEQ_END(Chain, Size1: 0, Size2: 0, Glue: InGlue, DL: dl); |
| 2603 | InGlue = Chain.getValue(R: 1); |
| 2604 | } |
| 2605 | |
| 2606 | std::vector<SDValue> Ops; |
| 2607 | Ops.push_back(x: Chain); |
| 2608 | Ops.push_back(x: Callee); |
| 2609 | |
| 2610 | if (isTailCall) { |
| 2611 | Ops.push_back(x: DAG.getSignedTargetConstant(Val: SPDiff, DL: dl, VT: MVT::i32)); |
| 2612 | } |
| 2613 | |
| 2614 | // Add argument registers to the end of the list so that they are known live |
| 2615 | // into the call. |
| 2616 | for (const auto &[Reg, N] : RegsToPass) |
| 2617 | Ops.push_back(x: DAG.getRegister(Reg, VT: N.getValueType())); |
| 2618 | |
| 2619 | // Add a register mask operand representing the call-preserved registers. |
| 2620 | const uint32_t *Mask; |
| 2621 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 2622 | if (isThisReturn) { |
| 2623 | // For 'this' returns, use the R0-preserving mask if applicable |
| 2624 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); |
| 2625 | if (!Mask) { |
| 2626 | // Set isThisReturn to false if the calling convention is not one that |
| 2627 | // allows 'returned' to be modeled in this way, so LowerCallResult does |
| 2628 | // not try to pass 'this' straight through |
| 2629 | isThisReturn = false; |
| 2630 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2631 | } |
| 2632 | } else |
| 2633 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2634 | |
| 2635 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 2636 | Ops.push_back(x: DAG.getRegisterMask(RegMask: Mask)); |
| 2637 | |
| 2638 | if (InGlue.getNode()) |
| 2639 | Ops.push_back(x: InGlue); |
| 2640 | |
| 2641 | if (isTailCall) { |
| 2642 | MF.getFrameInfo().setHasTailCall(); |
| 2643 | SDValue Ret = DAG.getNode(Opcode: ARMISD::TC_RETURN, DL: dl, VT: MVT::Other, Ops); |
| 2644 | if (CLI.CFIType) |
| 2645 | Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue()); |
| 2646 | DAG.addNoMergeSiteInfo(Node: Ret.getNode(), NoMerge: CLI.NoMerge); |
| 2647 | DAG.addCallSiteInfo(Node: Ret.getNode(), CallInfo: std::move(CSInfo)); |
| 2648 | return Ret; |
| 2649 | } |
| 2650 | |
| 2651 | // Returns a chain and a flag for retval copy to use. |
| 2652 | Chain = DAG.getNode(Opcode: CallOpc, DL: dl, ResultTys: {MVT::Other, MVT::Glue}, Ops); |
| 2653 | if (CLI.CFIType) |
| 2654 | Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue()); |
| 2655 | DAG.addNoMergeSiteInfo(Node: Chain.getNode(), NoMerge: CLI.NoMerge); |
| 2656 | InGlue = Chain.getValue(R: 1); |
| 2657 | DAG.addCallSiteInfo(Node: Chain.getNode(), CallInfo: std::move(CSInfo)); |
| 2658 | |
| 2659 | // If we're guaranteeing tail-calls will be honoured, the callee must |
| 2660 | // pop its own argument stack on return. But this call is *not* a tail call so |
| 2661 | // we need to undo that after it returns to restore the status-quo. |
| 2662 | bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; |
| 2663 | uint64_t CalleePopBytes = |
| 2664 | canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt) ? alignTo(Value: NumBytes, Align: 16) : -1U; |
| 2665 | |
| 2666 | Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: CalleePopBytes, Glue: InGlue, DL: dl); |
| 2667 | if (!Ins.empty()) |
| 2668 | InGlue = Chain.getValue(R: 1); |
| 2669 | |
| 2670 | // Handle result values, copying them out of physregs into vregs that we |
| 2671 | // return. |
| 2672 | return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG, |
| 2673 | InVals, isThisReturn, |
| 2674 | ThisVal: isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall); |
| 2675 | } |
| 2676 | |
| 2677 | /// HandleByVal - Every parameter *after* a byval parameter is passed |
| 2678 | /// on the stack. Remember the next parameter register to allocate, |
| 2679 | /// and then confiscate the rest of the parameter registers to insure |
| 2680 | /// this. |
| 2681 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, |
| 2682 | Align Alignment) const { |
| 2683 | // Byval (as with any stack) slots are always at least 4 byte aligned. |
| 2684 | Alignment = std::max(a: Alignment, b: Align(4)); |
| 2685 | |
| 2686 | MCRegister Reg = State->AllocateReg(Regs: GPRArgRegs); |
| 2687 | if (!Reg) |
| 2688 | return; |
| 2689 | |
| 2690 | unsigned AlignInRegs = Alignment.value() / 4; |
| 2691 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; |
| 2692 | for (unsigned i = 0; i < Waste; ++i) |
| 2693 | Reg = State->AllocateReg(Regs: GPRArgRegs); |
| 2694 | |
| 2695 | if (!Reg) |
| 2696 | return; |
| 2697 | |
| 2698 | unsigned Excess = 4 * (ARM::R4 - Reg); |
| 2699 | |
| 2700 | // Special case when NSAA != SP and parameter size greater than size of |
| 2701 | // all remained GPR regs. In that case we can't split parameter, we must |
| 2702 | // send it to stack. We also must set NCRN to R4, so waste all |
| 2703 | // remained registers. |
| 2704 | const unsigned NSAAOffset = State->getStackSize(); |
| 2705 | if (NSAAOffset != 0 && Size > Excess) { |
| 2706 | while (State->AllocateReg(Regs: GPRArgRegs)) |
| 2707 | ; |
| 2708 | return; |
| 2709 | } |
| 2710 | |
| 2711 | // First register for byval parameter is the first register that wasn't |
| 2712 | // allocated before this method call, so it would be "reg". |
| 2713 | // If parameter is small enough to be saved in range [reg, r4), then |
| 2714 | // the end (first after last) register would be reg + param-size-in-regs, |
| 2715 | // else parameter would be splitted between registers and stack, |
| 2716 | // end register would be r4 in this case. |
| 2717 | unsigned ByValRegBegin = Reg; |
| 2718 | unsigned ByValRegEnd = std::min<unsigned>(a: Reg + Size / 4, b: ARM::R4); |
| 2719 | State->addInRegsParamInfo(RegBegin: ByValRegBegin, RegEnd: ByValRegEnd); |
| 2720 | // Note, first register is allocated in the beginning of function already, |
| 2721 | // allocate remained amount of registers we need. |
| 2722 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) |
| 2723 | State->AllocateReg(Regs: GPRArgRegs); |
| 2724 | // A byval parameter that is split between registers and memory needs its |
| 2725 | // size truncated here. |
| 2726 | // In the case where the entire structure fits in registers, we set the |
| 2727 | // size in memory to zero. |
| 2728 | Size = std::max<int>(a: Size - Excess, b: 0); |
| 2729 | } |
| 2730 | |
| 2731 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 2732 | /// for tail call optimization. Targets which want to do tail call |
| 2733 | /// optimization should implement this function. Note that this function also |
| 2734 | /// processes musttail calls, so when this function returns false on a valid |
| 2735 | /// musttail call, a fatal backend error occurs. |
| 2736 | bool ARMTargetLowering::IsEligibleForTailCallOptimization( |
| 2737 | TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo, |
| 2738 | SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const { |
| 2739 | CallingConv::ID CalleeCC = CLI.CallConv; |
| 2740 | SDValue Callee = CLI.Callee; |
| 2741 | bool isVarArg = CLI.IsVarArg; |
| 2742 | const SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 2743 | const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 2744 | const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 2745 | const SelectionDAG &DAG = CLI.DAG; |
| 2746 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2747 | const Function &CallerF = MF.getFunction(); |
| 2748 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
| 2749 | |
| 2750 | assert(Subtarget->supportsTailCall()); |
| 2751 | |
| 2752 | // Indirect tail-calls require a register to hold the target address. That |
| 2753 | // register must be: |
| 2754 | // * Allocatable (i.e. r0-r7 if the target is Thumb1). |
| 2755 | // * Not callee-saved, so must be one of r0-r3 or r12. |
| 2756 | // * Not used to hold an argument to the tail-called function, which might be |
| 2757 | // in r0-r3. |
| 2758 | // * Not used to hold the return address authentication code, which is in r12 |
| 2759 | // if enabled. |
| 2760 | // Sometimes, no register matches all of these conditions, so we can't do a |
| 2761 | // tail-call. |
| 2762 | if (!isa<GlobalAddressSDNode>(Val: Callee.getNode()) || isIndirect) { |
| 2763 | SmallSet<MCPhysReg, 5> AddressRegisters = {ARM::R0, ARM::R1, ARM::R2, |
| 2764 | ARM::R3}; |
| 2765 | if (!(Subtarget->isThumb1Only() || |
| 2766 | MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(SpillsLR: true))) |
| 2767 | AddressRegisters.insert(V: ARM::R12); |
| 2768 | for (const CCValAssign &AL : ArgLocs) |
| 2769 | if (AL.isRegLoc()) |
| 2770 | AddressRegisters.erase(V: AL.getLocReg()); |
| 2771 | if (AddressRegisters.empty()) { |
| 2772 | LLVM_DEBUG(dbgs() << "false (no reg to hold function pointer)\n" ); |
| 2773 | return false; |
| 2774 | } |
| 2775 | } |
| 2776 | |
| 2777 | // Look for obvious safe cases to perform tail call optimization that do not |
| 2778 | // require ABI changes. This is what gcc calls sibcall. |
| 2779 | |
| 2780 | // Exception-handling functions need a special set of instructions to indicate |
| 2781 | // a return to the hardware. Tail-calling another function would probably |
| 2782 | // break this. |
| 2783 | if (CallerF.hasFnAttribute(Kind: "interrupt" )) { |
| 2784 | LLVM_DEBUG(dbgs() << "false (interrupt attribute)\n" ); |
| 2785 | return false; |
| 2786 | } |
| 2787 | |
| 2788 | if (canGuaranteeTCO(CC: CalleeCC, |
| 2789 | GuaranteeTailCalls: getTargetMachine().Options.GuaranteedTailCallOpt)) { |
| 2790 | LLVM_DEBUG(dbgs() << (CalleeCC == CallerCC ? "true" : "false" ) |
| 2791 | << " (guaranteed tail-call CC)\n" ); |
| 2792 | return CalleeCC == CallerCC; |
| 2793 | } |
| 2794 | |
| 2795 | // Also avoid sibcall optimization if either caller or callee uses struct |
| 2796 | // return semantics. |
| 2797 | bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); |
| 2798 | bool isCallerStructRet = MF.getFunction().hasStructRetAttr(); |
| 2799 | if (isCalleeStructRet != isCallerStructRet) { |
| 2800 | LLVM_DEBUG(dbgs() << "false (struct-ret)\n" ); |
| 2801 | return false; |
| 2802 | } |
| 2803 | |
| 2804 | // Externally-defined functions with weak linkage should not be |
| 2805 | // tail-called on ARM when the OS does not support dynamic |
| 2806 | // pre-emption of symbols, as the AAELF spec requires normal calls |
| 2807 | // to undefined weak functions to be replaced with a NOP or jump to the |
| 2808 | // next instruction. The behaviour of branch instructions in this |
| 2809 | // situation (as used for tail calls) is implementation-defined, so we |
| 2810 | // cannot rely on the linker replacing the tail call with a return. |
| 2811 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) { |
| 2812 | const GlobalValue *GV = G->getGlobal(); |
| 2813 | const Triple &TT = getTargetMachine().getTargetTriple(); |
| 2814 | if (GV->hasExternalWeakLinkage() && |
| 2815 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || |
| 2816 | TT.isOSBinFormatMachO())) { |
| 2817 | LLVM_DEBUG(dbgs() << "false (external weak linkage)\n" ); |
| 2818 | return false; |
| 2819 | } |
| 2820 | } |
| 2821 | |
| 2822 | // Check that the call results are passed in the same way. |
| 2823 | LLVMContext &C = *DAG.getContext(); |
| 2824 | if (!CCState::resultsCompatible( |
| 2825 | CalleeCC: getEffectiveCallingConv(CC: CalleeCC, isVarArg), |
| 2826 | CallerCC: getEffectiveCallingConv(CC: CallerCC, isVarArg: CallerF.isVarArg()), MF, C, Ins, |
| 2827 | CalleeFn: CCAssignFnForReturn(CC: CalleeCC, isVarArg), |
| 2828 | CallerFn: CCAssignFnForReturn(CC: CallerCC, isVarArg: CallerF.isVarArg()))) { |
| 2829 | LLVM_DEBUG(dbgs() << "false (incompatible results)\n" ); |
| 2830 | return false; |
| 2831 | } |
| 2832 | // The callee has to preserve all registers the caller needs to preserve. |
| 2833 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 2834 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
| 2835 | if (CalleeCC != CallerCC) { |
| 2836 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
| 2837 | if (!TRI->regmaskSubsetEqual(mask0: CallerPreserved, mask1: CalleePreserved)) { |
| 2838 | LLVM_DEBUG(dbgs() << "false (not all registers preserved)\n" ); |
| 2839 | return false; |
| 2840 | } |
| 2841 | } |
| 2842 | |
| 2843 | // If Caller's vararg argument has been split between registers and stack, do |
| 2844 | // not perform tail call, since part of the argument is in caller's local |
| 2845 | // frame. |
| 2846 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); |
| 2847 | if (CLI.IsVarArg && AFI_Caller->getArgRegsSaveSize()) { |
| 2848 | LLVM_DEBUG(dbgs() << "false (arg reg save area)\n" ); |
| 2849 | return false; |
| 2850 | } |
| 2851 | |
| 2852 | // If the callee takes no arguments then go on to check the results of the |
| 2853 | // call. |
| 2854 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 2855 | if (!parametersInCSRMatch(MRI, CallerPreservedMask: CallerPreserved, ArgLocs, OutVals)) { |
| 2856 | LLVM_DEBUG(dbgs() << "false (parameters in CSRs do not match)\n" ); |
| 2857 | return false; |
| 2858 | } |
| 2859 | |
| 2860 | // If the stack arguments for this call do not fit into our own save area then |
| 2861 | // the call cannot be made tail. |
| 2862 | if (CCInfo.getStackSize() > AFI_Caller->getArgumentStackSize()) |
| 2863 | return false; |
| 2864 | |
| 2865 | LLVM_DEBUG(dbgs() << "true\n" ); |
| 2866 | return true; |
| 2867 | } |
| 2868 | |
| 2869 | bool |
| 2870 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, |
| 2871 | MachineFunction &MF, bool isVarArg, |
| 2872 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2873 | LLVMContext &Context, const Type *RetTy) const { |
| 2874 | SmallVector<CCValAssign, 16> RVLocs; |
| 2875 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); |
| 2876 | return CCInfo.CheckReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 2877 | } |
| 2878 | |
| 2879 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, |
| 2880 | const SDLoc &DL, SelectionDAG &DAG) { |
| 2881 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 2882 | const Function &F = MF.getFunction(); |
| 2883 | |
| 2884 | StringRef IntKind = F.getFnAttribute(Kind: "interrupt" ).getValueAsString(); |
| 2885 | |
| 2886 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset |
| 2887 | // version of the "preferred return address". These offsets affect the return |
| 2888 | // instruction if this is a return from PL1 without hypervisor extensions. |
| 2889 | // IRQ/FIQ: +4 "subs pc, lr, #4" |
| 2890 | // SWI: 0 "subs pc, lr, #0" |
| 2891 | // ABORT: +4 "subs pc, lr, #4" |
| 2892 | // UNDEF: +4/+2 "subs pc, lr, #0" |
| 2893 | // UNDEF varies depending on where the exception came from ARM or Thumb |
| 2894 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. |
| 2895 | |
| 2896 | int64_t LROffset; |
| 2897 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || |
| 2898 | IntKind == "ABORT" ) |
| 2899 | LROffset = 4; |
| 2900 | else if (IntKind == "SWI" || IntKind == "UNDEF" ) |
| 2901 | LROffset = 0; |
| 2902 | else |
| 2903 | report_fatal_error(reason: "Unsupported interrupt attribute. If present, value " |
| 2904 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF" ); |
| 2905 | |
| 2906 | RetOps.insert(I: RetOps.begin() + 1, |
| 2907 | Elt: DAG.getConstant(Val: LROffset, DL, VT: MVT::i32, isTarget: false)); |
| 2908 | |
| 2909 | return DAG.getNode(Opcode: ARMISD::INTRET_GLUE, DL, VT: MVT::Other, Ops: RetOps); |
| 2910 | } |
| 2911 | |
| 2912 | SDValue |
| 2913 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
| 2914 | bool isVarArg, |
| 2915 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2916 | const SmallVectorImpl<SDValue> &OutVals, |
| 2917 | const SDLoc &dl, SelectionDAG &DAG) const { |
| 2918 | // CCValAssign - represent the assignment of the return value to a location. |
| 2919 | SmallVector<CCValAssign, 16> RVLocs; |
| 2920 | |
| 2921 | // CCState - Info about the registers and stack slots. |
| 2922 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 2923 | *DAG.getContext()); |
| 2924 | |
| 2925 | // Analyze outgoing return values. |
| 2926 | CCInfo.AnalyzeReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 2927 | |
| 2928 | SDValue Glue; |
| 2929 | SmallVector<SDValue, 4> RetOps; |
| 2930 | RetOps.push_back(Elt: Chain); // Operand #0 = Chain (updated below) |
| 2931 | bool isLittleEndian = Subtarget->isLittle(); |
| 2932 | |
| 2933 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2934 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2935 | AFI->setReturnRegsCount(RVLocs.size()); |
| 2936 | |
| 2937 | // Report error if cmse entry function returns structure through first ptr arg. |
| 2938 | if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { |
| 2939 | // Note: using an empty SDLoc(), as the first line of the function is a |
| 2940 | // better place to report than the last line. |
| 2941 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 2942 | DAG.getMachineFunction().getFunction(), |
| 2943 | "secure entry function would return value through pointer" , |
| 2944 | SDLoc().getDebugLoc())); |
| 2945 | } |
| 2946 | |
| 2947 | // Copy the result values into the output registers. |
| 2948 | for (unsigned i = 0, realRVLocIdx = 0; |
| 2949 | i != RVLocs.size(); |
| 2950 | ++i, ++realRVLocIdx) { |
| 2951 | CCValAssign &VA = RVLocs[i]; |
| 2952 | assert(VA.isRegLoc() && "Can only return in registers!" ); |
| 2953 | |
| 2954 | SDValue Arg = OutVals[realRVLocIdx]; |
| 2955 | bool ReturnF16 = false; |
| 2956 | |
| 2957 | if (Subtarget->hasFullFP16() && getTM().isTargetHardFloat()) { |
| 2958 | // Half-precision return values can be returned like this: |
| 2959 | // |
| 2960 | // t11 f16 = fadd ... |
| 2961 | // t12: i16 = bitcast t11 |
| 2962 | // t13: i32 = zero_extend t12 |
| 2963 | // t14: f32 = bitcast t13 <~~~~~~~ Arg |
| 2964 | // |
| 2965 | // to avoid code generation for bitcasts, we simply set Arg to the node |
| 2966 | // that produces the f16 value, t11 in this case. |
| 2967 | // |
| 2968 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { |
| 2969 | SDValue ZE = Arg.getOperand(i: 0); |
| 2970 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { |
| 2971 | SDValue BC = ZE.getOperand(i: 0); |
| 2972 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { |
| 2973 | Arg = BC.getOperand(i: 0); |
| 2974 | ReturnF16 = true; |
| 2975 | } |
| 2976 | } |
| 2977 | } |
| 2978 | } |
| 2979 | |
| 2980 | switch (VA.getLocInfo()) { |
| 2981 | default: llvm_unreachable("Unknown loc info!" ); |
| 2982 | case CCValAssign::Full: break; |
| 2983 | case CCValAssign::BCvt: |
| 2984 | if (!ReturnF16) |
| 2985 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2986 | break; |
| 2987 | } |
| 2988 | |
| 2989 | // Mask f16 arguments if this is a CMSE nonsecure entry. |
| 2990 | auto RetVT = Outs[realRVLocIdx].ArgVT; |
| 2991 | if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { |
| 2992 | if (VA.needsCustom() && VA.getValVT() == MVT::f16) { |
| 2993 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
| 2994 | } else { |
| 2995 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 2996 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: RetVT.getSizeInBits()); |
| 2997 | SDValue Mask = |
| 2998 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
| 2999 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
| 3000 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
| 3001 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 3002 | } |
| 3003 | } |
| 3004 | |
| 3005 | if (VA.needsCustom() && |
| 3006 | (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { |
| 3007 | if (VA.getLocVT() == MVT::v2f64) { |
| 3008 | // Extract the first half and return it in two registers. |
| 3009 | SDValue Half = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 3010 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 3011 | SDValue HalfGPRs = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 3012 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Half); |
| 3013 | |
| 3014 | Chain = |
| 3015 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3016 | N: HalfGPRs.getValue(R: isLittleEndian ? 0 : 1), Glue); |
| 3017 | Glue = Chain.getValue(R: 1); |
| 3018 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3019 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3020 | Chain = |
| 3021 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3022 | N: HalfGPRs.getValue(R: isLittleEndian ? 1 : 0), Glue); |
| 3023 | Glue = Chain.getValue(R: 1); |
| 3024 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3025 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3026 | |
| 3027 | // Extract the 2nd half and fall through to handle it as an f64 value. |
| 3028 | Arg = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 3029 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 3030 | } |
| 3031 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is |
| 3032 | // available. |
| 3033 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 3034 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
| 3035 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3036 | N: fmrrd.getValue(R: isLittleEndian ? 0 : 1), Glue); |
| 3037 | Glue = Chain.getValue(R: 1); |
| 3038 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3039 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3040 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3041 | N: fmrrd.getValue(R: isLittleEndian ? 1 : 0), Glue); |
| 3042 | } else |
| 3043 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: Arg, Glue); |
| 3044 | |
| 3045 | // Guarantee that all emitted copies are |
| 3046 | // stuck together, avoiding something bad. |
| 3047 | Glue = Chain.getValue(R: 1); |
| 3048 | RetOps.push_back(Elt: DAG.getRegister( |
| 3049 | Reg: VA.getLocReg(), VT: ReturnF16 ? Arg.getValueType() : VA.getLocVT())); |
| 3050 | } |
| 3051 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 3052 | const MCPhysReg *I = |
| 3053 | TRI->getCalleeSavedRegsViaCopy(MF: &DAG.getMachineFunction()); |
| 3054 | if (I) { |
| 3055 | for (; *I; ++I) { |
| 3056 | if (ARM::GPRRegClass.contains(Reg: *I)) |
| 3057 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::i32)); |
| 3058 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
| 3059 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::getFloatingPointVT(BitWidth: 64))); |
| 3060 | else |
| 3061 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 3062 | } |
| 3063 | } |
| 3064 | |
| 3065 | // Update chain and glue. |
| 3066 | RetOps[0] = Chain; |
| 3067 | if (Glue.getNode()) |
| 3068 | RetOps.push_back(Elt: Glue); |
| 3069 | |
| 3070 | // CPUs which aren't M-class use a special sequence to return from |
| 3071 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, |
| 3072 | // though we use "subs pc, lr, #N"). |
| 3073 | // |
| 3074 | // M-class CPUs actually use a normal return sequence with a special |
| 3075 | // (hardware-provided) value in LR, so the normal code path works. |
| 3076 | if (DAG.getMachineFunction().getFunction().hasFnAttribute(Kind: "interrupt" ) && |
| 3077 | !Subtarget->isMClass()) { |
| 3078 | if (Subtarget->isThumb1Only()) |
| 3079 | report_fatal_error(reason: "interrupt attribute is not supported in Thumb1" ); |
| 3080 | return LowerInterruptReturn(RetOps, DL: dl, DAG); |
| 3081 | } |
| 3082 | |
| 3083 | unsigned RetNode = |
| 3084 | AFI->isCmseNSEntryFunction() ? ARMISD::SERET_GLUE : ARMISD::RET_GLUE; |
| 3085 | return DAG.getNode(Opcode: RetNode, DL: dl, VT: MVT::Other, Ops: RetOps); |
| 3086 | } |
| 3087 | |
| 3088 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { |
| 3089 | if (N->getNumValues() != 1) |
| 3090 | return false; |
| 3091 | if (!N->hasNUsesOfValue(NUses: 1, Value: 0)) |
| 3092 | return false; |
| 3093 | |
| 3094 | SDValue TCChain = Chain; |
| 3095 | SDNode *Copy = *N->user_begin(); |
| 3096 | if (Copy->getOpcode() == ISD::CopyToReg) { |
| 3097 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3098 | // perform a tail call. |
| 3099 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3100 | return false; |
| 3101 | TCChain = Copy->getOperand(Num: 0); |
| 3102 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { |
| 3103 | SDNode *VMov = Copy; |
| 3104 | // f64 returned in a pair of GPRs. |
| 3105 | SmallPtrSet<SDNode*, 2> Copies; |
| 3106 | for (SDNode *U : VMov->users()) { |
| 3107 | if (U->getOpcode() != ISD::CopyToReg) |
| 3108 | return false; |
| 3109 | Copies.insert(Ptr: U); |
| 3110 | } |
| 3111 | if (Copies.size() > 2) |
| 3112 | return false; |
| 3113 | |
| 3114 | for (SDNode *U : VMov->users()) { |
| 3115 | SDValue UseChain = U->getOperand(Num: 0); |
| 3116 | if (Copies.count(Ptr: UseChain.getNode())) |
| 3117 | // Second CopyToReg |
| 3118 | Copy = U; |
| 3119 | else { |
| 3120 | // We are at the top of this chain. |
| 3121 | // If the copy has a glue operand, we conservatively assume it |
| 3122 | // isn't safe to perform a tail call. |
| 3123 | if (U->getOperand(Num: U->getNumOperands() - 1).getValueType() == MVT::Glue) |
| 3124 | return false; |
| 3125 | // First CopyToReg |
| 3126 | TCChain = UseChain; |
| 3127 | } |
| 3128 | } |
| 3129 | } else if (Copy->getOpcode() == ISD::BITCAST) { |
| 3130 | // f32 returned in a single GPR. |
| 3131 | if (!Copy->hasOneUse()) |
| 3132 | return false; |
| 3133 | Copy = *Copy->user_begin(); |
| 3134 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(NUses: 1, Value: 0)) |
| 3135 | return false; |
| 3136 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3137 | // perform a tail call. |
| 3138 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3139 | return false; |
| 3140 | TCChain = Copy->getOperand(Num: 0); |
| 3141 | } else { |
| 3142 | return false; |
| 3143 | } |
| 3144 | |
| 3145 | bool HasRet = false; |
| 3146 | for (const SDNode *U : Copy->users()) { |
| 3147 | if (U->getOpcode() != ARMISD::RET_GLUE && |
| 3148 | U->getOpcode() != ARMISD::INTRET_GLUE) |
| 3149 | return false; |
| 3150 | HasRet = true; |
| 3151 | } |
| 3152 | |
| 3153 | if (!HasRet) |
| 3154 | return false; |
| 3155 | |
| 3156 | Chain = TCChain; |
| 3157 | return true; |
| 3158 | } |
| 3159 | |
| 3160 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
| 3161 | if (!Subtarget->supportsTailCall()) |
| 3162 | return false; |
| 3163 | |
| 3164 | if (!CI->isTailCall()) |
| 3165 | return false; |
| 3166 | |
| 3167 | return true; |
| 3168 | } |
| 3169 | |
| 3170 | // Trying to write a 64 bit value so need to split into two 32 bit values first, |
| 3171 | // and pass the lower and high parts through. |
| 3172 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { |
| 3173 | SDLoc DL(Op); |
| 3174 | SDValue WriteValue = Op->getOperand(Num: 2); |
| 3175 | |
| 3176 | // This function is only supposed to be called for i64 type argument. |
| 3177 | assert(WriteValue.getValueType() == MVT::i64 |
| 3178 | && "LowerWRITE_REGISTER called for non-i64 type argument." ); |
| 3179 | |
| 3180 | SDValue Lo, Hi; |
| 3181 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: WriteValue, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
| 3182 | SDValue Ops[] = { Op->getOperand(Num: 0), Op->getOperand(Num: 1), Lo, Hi }; |
| 3183 | return DAG.getNode(Opcode: ISD::WRITE_REGISTER, DL, VT: MVT::Other, Ops); |
| 3184 | } |
| 3185 | |
| 3186 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as |
| 3187 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is |
| 3188 | // one of the above mentioned nodes. It has to be wrapped because otherwise |
| 3189 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only |
| 3190 | // be used to form addressing mode. These wrapped nodes will be selected |
| 3191 | // into MOVi. |
| 3192 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, |
| 3193 | SelectionDAG &DAG) const { |
| 3194 | EVT PtrVT = Op.getValueType(); |
| 3195 | // FIXME there is no actual debug info here |
| 3196 | SDLoc dl(Op); |
| 3197 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Val&: Op); |
| 3198 | SDValue Res; |
| 3199 | |
| 3200 | // When generating execute-only code Constant Pools must be promoted to the |
| 3201 | // global data section. It's a bit ugly that we can't share them across basic |
| 3202 | // blocks, but this way we guarantee that execute-only behaves correct with |
| 3203 | // position-independent addressing modes. |
| 3204 | if (Subtarget->genExecuteOnly()) { |
| 3205 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 3206 | auto *T = CP->getType(); |
| 3207 | auto C = const_cast<Constant*>(CP->getConstVal()); |
| 3208 | auto M = DAG.getMachineFunction().getFunction().getParent(); |
| 3209 | auto GV = new GlobalVariable( |
| 3210 | *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, |
| 3211 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + |
| 3212 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + |
| 3213 | Twine(AFI->createPICLabelUId()) |
| 3214 | ); |
| 3215 | SDValue GA = DAG.getTargetGlobalAddress(GV: dyn_cast<GlobalValue>(Val: GV), |
| 3216 | DL: dl, VT: PtrVT); |
| 3217 | return LowerGlobalAddress(Op: GA, DAG); |
| 3218 | } |
| 3219 | |
| 3220 | // The 16-bit ADR instruction can only encode offsets that are multiples of 4, |
| 3221 | // so we need to align to at least 4 bytes when we don't have 32-bit ADR. |
| 3222 | Align CPAlign = CP->getAlign(); |
| 3223 | if (Subtarget->isThumb1Only()) |
| 3224 | CPAlign = std::max(a: CPAlign, b: Align(4)); |
| 3225 | if (CP->isMachineConstantPoolEntry()) |
| 3226 | Res = |
| 3227 | DAG.getTargetConstantPool(C: CP->getMachineCPVal(), VT: PtrVT, Align: CPAlign); |
| 3228 | else |
| 3229 | Res = DAG.getTargetConstantPool(C: CP->getConstVal(), VT: PtrVT, Align: CPAlign); |
| 3230 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Res); |
| 3231 | } |
| 3232 | |
| 3233 | unsigned ARMTargetLowering::getJumpTableEncoding() const { |
| 3234 | // If we don't have a 32-bit pc-relative branch instruction then the jump |
| 3235 | // table consists of block addresses. Usually this is inline, but for |
| 3236 | // execute-only it must be placed out-of-line. |
| 3237 | if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps()) |
| 3238 | return MachineJumpTableInfo::EK_BlockAddress; |
| 3239 | return MachineJumpTableInfo::EK_Inline; |
| 3240 | } |
| 3241 | |
| 3242 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, |
| 3243 | SelectionDAG &DAG) const { |
| 3244 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3245 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3246 | unsigned ARMPCLabelIndex = 0; |
| 3247 | SDLoc DL(Op); |
| 3248 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3249 | const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress(); |
| 3250 | SDValue CPAddr; |
| 3251 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); |
| 3252 | if (!IsPositionIndependent) { |
| 3253 | CPAddr = DAG.getTargetConstantPool(C: BA, VT: PtrVT, Align: Align(4)); |
| 3254 | } else { |
| 3255 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3256 | ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3257 | ARMConstantPoolValue *CPV = |
| 3258 | ARMConstantPoolConstant::Create(C: BA, ID: ARMPCLabelIndex, |
| 3259 | Kind: ARMCP::CPBlockAddress, PCAdj); |
| 3260 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3261 | } |
| 3262 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: CPAddr); |
| 3263 | SDValue Result = DAG.getLoad( |
| 3264 | VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3265 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3266 | if (!IsPositionIndependent) |
| 3267 | return Result; |
| 3268 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL, VT: MVT::i32); |
| 3269 | return DAG.getNode(Opcode: ARMISD::PIC_ADD, DL, VT: PtrVT, N1: Result, N2: PICLabel); |
| 3270 | } |
| 3271 | |
| 3272 | /// Convert a TLS address reference into the correct sequence of loads |
| 3273 | /// and calls to compute the variable's address for Darwin, and return an |
| 3274 | /// SDValue containing the final node. |
| 3275 | |
| 3276 | /// Darwin only has one TLS scheme which must be capable of dealing with the |
| 3277 | /// fully general situation, in the worst case. This means: |
| 3278 | /// + "extern __thread" declaration. |
| 3279 | /// + Defined in a possibly unknown dynamic library. |
| 3280 | /// |
| 3281 | /// The general system is that each __thread variable has a [3 x i32] descriptor |
| 3282 | /// which contains information used by the runtime to calculate the address. The |
| 3283 | /// only part of this the compiler needs to know about is the first word, which |
| 3284 | /// contains a function pointer that must be called with the address of the |
| 3285 | /// entire descriptor in "r0". |
| 3286 | /// |
| 3287 | /// Since this descriptor may be in a different unit, in general access must |
| 3288 | /// proceed along the usual ARM rules. A common sequence to produce is: |
| 3289 | /// |
| 3290 | /// movw rT1, :lower16:_var$non_lazy_ptr |
| 3291 | /// movt rT1, :upper16:_var$non_lazy_ptr |
| 3292 | /// ldr r0, [rT1] |
| 3293 | /// ldr rT2, [r0] |
| 3294 | /// blx rT2 |
| 3295 | /// [...address now in r0...] |
| 3296 | SDValue |
| 3297 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, |
| 3298 | SelectionDAG &DAG) const { |
| 3299 | assert(getTargetMachine().getTargetTriple().isOSDarwin() && |
| 3300 | "This function expects a Darwin target" ); |
| 3301 | SDLoc DL(Op); |
| 3302 | |
| 3303 | // First step is to get the address of the actua global symbol. This is where |
| 3304 | // the TLS descriptor lives. |
| 3305 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); |
| 3306 | |
| 3307 | // The first entry in the descriptor is a function pointer that we must call |
| 3308 | // to obtain the address of the variable. |
| 3309 | SDValue Chain = DAG.getEntryNode(); |
| 3310 | SDValue FuncTLVGet = DAG.getLoad( |
| 3311 | VT: MVT::i32, dl: DL, Chain, Ptr: DescAddr, |
| 3312 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: Align(4), |
| 3313 | MMOFlags: MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | |
| 3314 | MachineMemOperand::MOInvariant); |
| 3315 | Chain = FuncTLVGet.getValue(R: 1); |
| 3316 | |
| 3317 | MachineFunction &F = DAG.getMachineFunction(); |
| 3318 | MachineFrameInfo &MFI = F.getFrameInfo(); |
| 3319 | MFI.setAdjustsStack(true); |
| 3320 | |
| 3321 | // TLS calls preserve all registers except those that absolutely must be |
| 3322 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be |
| 3323 | // silly). |
| 3324 | auto TRI = |
| 3325 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); |
| 3326 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); |
| 3327 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(MF: DAG.getMachineFunction()); |
| 3328 | |
| 3329 | // Finally, we can make the call. This is just a degenerate version of a |
| 3330 | // normal AArch64 call node: r0 takes the address of the descriptor, and |
| 3331 | // returns the address of the variable in this thread. |
| 3332 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R0, N: DescAddr, Glue: SDValue()); |
| 3333 | Chain = |
| 3334 | DAG.getNode(Opcode: ARMISD::CALL, DL, VTList: DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue), |
| 3335 | N1: Chain, N2: FuncTLVGet, N3: DAG.getRegister(Reg: ARM::R0, VT: MVT::i32), |
| 3336 | N4: DAG.getRegisterMask(RegMask: Mask), N5: Chain.getValue(R: 1)); |
| 3337 | return DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::R0, VT: MVT::i32, Glue: Chain.getValue(R: 1)); |
| 3338 | } |
| 3339 | |
| 3340 | SDValue |
| 3341 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, |
| 3342 | SelectionDAG &DAG) const { |
| 3343 | assert(getTargetMachine().getTargetTriple().isOSWindows() && |
| 3344 | "Windows specific TLS lowering" ); |
| 3345 | |
| 3346 | SDValue Chain = DAG.getEntryNode(); |
| 3347 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3348 | SDLoc DL(Op); |
| 3349 | |
| 3350 | // Load the current TEB (thread environment block) |
| 3351 | SDValue Ops[] = {Chain, |
| 3352 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
| 3353 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
| 3354 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 3355 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
| 3356 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 3357 | DAG.getTargetConstant(Val: 2, DL, VT: MVT::i32)}; |
| 3358 | SDValue CurrentTEB = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
| 3359 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
| 3360 | |
| 3361 | SDValue TEB = CurrentTEB.getValue(R: 0); |
| 3362 | Chain = CurrentTEB.getValue(R: 1); |
| 3363 | |
| 3364 | // Load the ThreadLocalStoragePointer from the TEB |
| 3365 | // A pointer to the TLS array is located at offset 0x2c from the TEB. |
| 3366 | SDValue TLSArray = |
| 3367 | DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TEB, N2: DAG.getIntPtrConstant(Val: 0x2c, DL)); |
| 3368 | TLSArray = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSArray, PtrInfo: MachinePointerInfo()); |
| 3369 | |
| 3370 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 |
| 3371 | // offset into the TLSArray. |
| 3372 | |
| 3373 | // Load the TLS index from the C runtime |
| 3374 | SDValue TLSIndex = |
| 3375 | DAG.getTargetExternalSymbol(Sym: "_tls_index" , VT: PtrVT, TargetFlags: ARMII::MO_NO_FLAG); |
| 3376 | TLSIndex = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: TLSIndex); |
| 3377 | TLSIndex = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSIndex, PtrInfo: MachinePointerInfo()); |
| 3378 | |
| 3379 | SDValue Slot = DAG.getNode(Opcode: ISD::SHL, DL, VT: PtrVT, N1: TLSIndex, |
| 3380 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
| 3381 | SDValue TLS = DAG.getLoad(VT: PtrVT, dl: DL, Chain, |
| 3382 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLSArray, N2: Slot), |
| 3383 | PtrInfo: MachinePointerInfo()); |
| 3384 | |
| 3385 | // Get the offset of the start of the .tls section (section base) |
| 3386 | const auto *GA = cast<GlobalAddressSDNode>(Val&: Op); |
| 3387 | auto *CPV = ARMConstantPoolConstant::Create(GV: GA->getGlobal(), Modifier: ARMCP::SECREL); |
| 3388 | SDValue Offset = DAG.getLoad( |
| 3389 | VT: PtrVT, dl: DL, Chain, |
| 3390 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: MVT::i32, |
| 3391 | Operand: DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4))), |
| 3392 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3393 | |
| 3394 | return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLS, N2: Offset); |
| 3395 | } |
| 3396 | |
| 3397 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model |
| 3398 | SDValue |
| 3399 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
| 3400 | SelectionDAG &DAG) const { |
| 3401 | SDLoc dl(GA); |
| 3402 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3403 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3404 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3405 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3406 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3407 | ARMConstantPoolValue *CPV = |
| 3408 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
| 3409 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::TLSGD, AddCurrentAddress: true); |
| 3410 | SDValue Argument = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3411 | Argument = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Argument); |
| 3412 | Argument = DAG.getLoad( |
| 3413 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Argument, |
| 3414 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3415 | SDValue Chain = Argument.getValue(R: 1); |
| 3416 | |
| 3417 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3418 | Argument = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Argument, N2: PICLabel); |
| 3419 | |
| 3420 | // call __tls_get_addr. |
| 3421 | ArgListTy Args; |
| 3422 | Args.emplace_back(args&: Argument, args: Type::getInt32Ty(C&: *DAG.getContext())); |
| 3423 | |
| 3424 | // FIXME: is there useful debug info available here? |
| 3425 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 3426 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
| 3427 | CC: CallingConv::C, ResultType: Type::getInt32Ty(C&: *DAG.getContext()), |
| 3428 | Target: DAG.getExternalSymbol(Sym: "__tls_get_addr" , VT: PtrVT), ArgsList: std::move(Args)); |
| 3429 | |
| 3430 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 3431 | return CallResult.first; |
| 3432 | } |
| 3433 | |
| 3434 | // Lower ISD::GlobalTLSAddress using the "initial exec" or |
| 3435 | // "local exec" model. |
| 3436 | SDValue |
| 3437 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, |
| 3438 | SelectionDAG &DAG, |
| 3439 | TLSModel::Model model) const { |
| 3440 | const GlobalValue *GV = GA->getGlobal(); |
| 3441 | SDLoc dl(GA); |
| 3442 | SDValue Offset; |
| 3443 | SDValue Chain = DAG.getEntryNode(); |
| 3444 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3445 | // Get the Thread Pointer |
| 3446 | SDValue ThreadPointer = DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
| 3447 | |
| 3448 | if (model == TLSModel::InitialExec) { |
| 3449 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3450 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3451 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3452 | // Initial exec model. |
| 3453 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3454 | ARMConstantPoolValue *CPV = |
| 3455 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
| 3456 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::GOTTPOFF, |
| 3457 | AddCurrentAddress: true); |
| 3458 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3459 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
| 3460 | Offset = DAG.getLoad( |
| 3461 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3462 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3463 | Chain = Offset.getValue(R: 1); |
| 3464 | |
| 3465 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3466 | Offset = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Offset, N2: PICLabel); |
| 3467 | |
| 3468 | Offset = DAG.getLoad( |
| 3469 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3470 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3471 | } else { |
| 3472 | // local exec model |
| 3473 | assert(model == TLSModel::LocalExec); |
| 3474 | ARMConstantPoolValue *CPV = |
| 3475 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::TPOFF); |
| 3476 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3477 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
| 3478 | Offset = DAG.getLoad( |
| 3479 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3480 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3481 | } |
| 3482 | |
| 3483 | // The address of the thread local variable is the add of the thread |
| 3484 | // pointer with the offset of the variable. |
| 3485 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ThreadPointer, N2: Offset); |
| 3486 | } |
| 3487 | |
| 3488 | SDValue |
| 3489 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { |
| 3490 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op); |
| 3491 | if (DAG.getTarget().useEmulatedTLS()) |
| 3492 | return LowerToTLSEmulatedModel(GA, DAG); |
| 3493 | |
| 3494 | const Triple &TT = getTargetMachine().getTargetTriple(); |
| 3495 | if (TT.isOSDarwin()) |
| 3496 | return LowerGlobalTLSAddressDarwin(Op, DAG); |
| 3497 | |
| 3498 | if (TT.isOSWindows()) |
| 3499 | return LowerGlobalTLSAddressWindows(Op, DAG); |
| 3500 | |
| 3501 | // TODO: implement the "local dynamic" model |
| 3502 | assert(TT.isOSBinFormatELF() && "Only ELF implemented here" ); |
| 3503 | TLSModel::Model model = getTargetMachine().getTLSModel(GV: GA->getGlobal()); |
| 3504 | |
| 3505 | switch (model) { |
| 3506 | case TLSModel::GeneralDynamic: |
| 3507 | case TLSModel::LocalDynamic: |
| 3508 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
| 3509 | case TLSModel::InitialExec: |
| 3510 | case TLSModel::LocalExec: |
| 3511 | return LowerToTLSExecModels(GA, DAG, model); |
| 3512 | } |
| 3513 | llvm_unreachable("bogus TLS model" ); |
| 3514 | } |
| 3515 | |
| 3516 | /// Return true if all users of V are within function F, looking through |
| 3517 | /// ConstantExprs. |
| 3518 | static bool allUsersAreInFunction(const Value *V, const Function *F) { |
| 3519 | SmallVector<const User*,4> Worklist(V->users()); |
| 3520 | while (!Worklist.empty()) { |
| 3521 | auto *U = Worklist.pop_back_val(); |
| 3522 | if (isa<ConstantExpr>(Val: U)) { |
| 3523 | append_range(C&: Worklist, R: U->users()); |
| 3524 | continue; |
| 3525 | } |
| 3526 | |
| 3527 | auto *I = dyn_cast<Instruction>(Val: U); |
| 3528 | if (!I || I->getParent()->getParent() != F) |
| 3529 | return false; |
| 3530 | } |
| 3531 | return true; |
| 3532 | } |
| 3533 | |
| 3534 | static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, |
| 3535 | const GlobalValue *GV, SelectionDAG &DAG, |
| 3536 | EVT PtrVT, const SDLoc &dl) { |
| 3537 | // If we're creating a pool entry for a constant global with unnamed address, |
| 3538 | // and the global is small enough, we can emit it inline into the constant pool |
| 3539 | // to save ourselves an indirection. |
| 3540 | // |
| 3541 | // This is a win if the constant is only used in one function (so it doesn't |
| 3542 | // need to be duplicated) or duplicating the constant wouldn't increase code |
| 3543 | // size (implying the constant is no larger than 4 bytes). |
| 3544 | const Function &F = DAG.getMachineFunction().getFunction(); |
| 3545 | |
| 3546 | // We rely on this decision to inline being idemopotent and unrelated to the |
| 3547 | // use-site. We know that if we inline a variable at one use site, we'll |
| 3548 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel |
| 3549 | // doesn't know about this optimization, so bail out if it's enabled else |
| 3550 | // we could decide to inline here (and thus never emit the GV) but require |
| 3551 | // the GV from fast-isel generated code. |
| 3552 | if (!EnableConstpoolPromotion || |
| 3553 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) |
| 3554 | return SDValue(); |
| 3555 | |
| 3556 | auto *GVar = dyn_cast<GlobalVariable>(Val: GV); |
| 3557 | if (!GVar || !GVar->hasInitializer() || |
| 3558 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || |
| 3559 | !GVar->hasLocalLinkage()) |
| 3560 | return SDValue(); |
| 3561 | |
| 3562 | // If we inline a value that contains relocations, we move the relocations |
| 3563 | // from .data to .text. This is not allowed in position-independent code. |
| 3564 | auto *Init = GVar->getInitializer(); |
| 3565 | if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && |
| 3566 | Init->needsDynamicRelocation()) |
| 3567 | return SDValue(); |
| 3568 | |
| 3569 | // The constant islands pass can only really deal with alignment requests |
| 3570 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote |
| 3571 | // any type wanting greater alignment requirements than 4 bytes. We also |
| 3572 | // can only promote constants that are multiples of 4 bytes in size or |
| 3573 | // are paddable to a multiple of 4. Currently we only try and pad constants |
| 3574 | // that are strings for simplicity. |
| 3575 | auto *CDAInit = dyn_cast<ConstantDataArray>(Val: Init); |
| 3576 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Ty: Init->getType()); |
| 3577 | Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GV: GVar); |
| 3578 | unsigned RequiredPadding = 4 - (Size % 4); |
| 3579 | bool PaddingPossible = |
| 3580 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); |
| 3581 | if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || |
| 3582 | Size == 0) |
| 3583 | return SDValue(); |
| 3584 | |
| 3585 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); |
| 3586 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3587 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3588 | |
| 3589 | // We can't bloat the constant pool too much, else the ConstantIslands pass |
| 3590 | // may fail to converge. If we haven't promoted this global yet (it may have |
| 3591 | // multiple uses), and promoting it would increase the constant pool size (Sz |
| 3592 | // > 4), ensure we have space to do so up to MaxTotal. |
| 3593 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar) && Size > 4) |
| 3594 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= |
| 3595 | ConstpoolPromotionMaxTotal) |
| 3596 | return SDValue(); |
| 3597 | |
| 3598 | // This is only valid if all users are in a single function; we can't clone |
| 3599 | // the constant in general. The LLVM IR unnamed_addr allows merging |
| 3600 | // constants, but not cloning them. |
| 3601 | // |
| 3602 | // We could potentially allow cloning if we could prove all uses of the |
| 3603 | // constant in the current function don't care about the address, like |
| 3604 | // printf format strings. But that isn't implemented for now. |
| 3605 | if (!allUsersAreInFunction(V: GVar, F: &F)) |
| 3606 | return SDValue(); |
| 3607 | |
| 3608 | // We're going to inline this global. Pad it out if needed. |
| 3609 | if (RequiredPadding != 4) { |
| 3610 | StringRef S = CDAInit->getAsString(); |
| 3611 | |
| 3612 | SmallVector<uint8_t,16> V(S.size()); |
| 3613 | std::copy(first: S.bytes_begin(), last: S.bytes_end(), result: V.begin()); |
| 3614 | while (RequiredPadding--) |
| 3615 | V.push_back(Elt: 0); |
| 3616 | Init = ConstantDataArray::get(Context&: *DAG.getContext(), Elts&: V); |
| 3617 | } |
| 3618 | |
| 3619 | auto CPVal = ARMConstantPoolConstant::Create(GV: GVar, Initializer: Init); |
| 3620 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPVal, VT: PtrVT, Align: Align(4)); |
| 3621 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar)) { |
| 3622 | AFI->markGlobalAsPromotedToConstantPool(GV: GVar); |
| 3623 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + |
| 3624 | PaddedSize - 4); |
| 3625 | } |
| 3626 | ++NumConstpoolPromoted; |
| 3627 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3628 | } |
| 3629 | |
| 3630 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { |
| 3631 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: GV)) |
| 3632 | if (!(GV = GA->getAliaseeObject())) |
| 3633 | return false; |
| 3634 | if (const auto *V = dyn_cast<GlobalVariable>(Val: GV)) |
| 3635 | return V->isConstant(); |
| 3636 | return isa<Function>(Val: GV); |
| 3637 | } |
| 3638 | |
| 3639 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, |
| 3640 | SelectionDAG &DAG) const { |
| 3641 | switch (Subtarget->getTargetTriple().getObjectFormat()) { |
| 3642 | default: llvm_unreachable("unknown object format" ); |
| 3643 | case Triple::COFF: |
| 3644 | return LowerGlobalAddressWindows(Op, DAG); |
| 3645 | case Triple::ELF: |
| 3646 | return LowerGlobalAddressELF(Op, DAG); |
| 3647 | case Triple::MachO: |
| 3648 | return LowerGlobalAddressDarwin(Op, DAG); |
| 3649 | } |
| 3650 | } |
| 3651 | |
| 3652 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, |
| 3653 | SelectionDAG &DAG) const { |
| 3654 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3655 | SDLoc dl(Op); |
| 3656 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 3657 | bool IsRO = isReadOnly(GV); |
| 3658 | |
| 3659 | // promoteToConstantPool only if not generating XO text section |
| 3660 | if (GV->isDSOLocal() && !Subtarget->genExecuteOnly()) |
| 3661 | if (SDValue V = promoteToConstantPool(TLI: this, GV, DAG, PtrVT, dl)) |
| 3662 | return V; |
| 3663 | |
| 3664 | if (isPositionIndependent()) { |
| 3665 | SDValue G = DAG.getTargetGlobalAddress( |
| 3666 | GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: GV->isDSOLocal() ? 0 : ARMII::MO_GOT); |
| 3667 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
| 3668 | if (!GV->isDSOLocal()) |
| 3669 | Result = |
| 3670 | DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
| 3671 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 3672 | return Result; |
| 3673 | } else if (Subtarget->isROPI() && IsRO) { |
| 3674 | // PC-relative. |
| 3675 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT); |
| 3676 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
| 3677 | return Result; |
| 3678 | } else if (Subtarget->isRWPI() && !IsRO) { |
| 3679 | // SB-relative. |
| 3680 | SDValue RelAddr; |
| 3681 | if (Subtarget->useMovt()) { |
| 3682 | ++NumMovwMovt; |
| 3683 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_SBREL); |
| 3684 | RelAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, Operand: G); |
| 3685 | } else { // use literal pool for address constant |
| 3686 | ARMConstantPoolValue *CPV = |
| 3687 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::SBREL); |
| 3688 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3689 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3690 | RelAddr = DAG.getLoad( |
| 3691 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3692 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3693 | } |
| 3694 | SDValue SB = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: ARM::R9, VT: PtrVT); |
| 3695 | SDValue Result = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: SB, N2: RelAddr); |
| 3696 | return Result; |
| 3697 | } |
| 3698 | |
| 3699 | // If we have T2 ops, we can materialize the address directly via movt/movw |
| 3700 | // pair. This is always cheaper. If need to generate Execute Only code, and we |
| 3701 | // only have Thumb1 available, we can't use a constant pool and are forced to |
| 3702 | // use immediate relocations. |
| 3703 | if (Subtarget->useMovt() || Subtarget->genExecuteOnly()) { |
| 3704 | if (Subtarget->useMovt()) |
| 3705 | ++NumMovwMovt; |
| 3706 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3707 | // operands, expand this into two nodes. |
| 3708 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, |
| 3709 | Operand: DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT)); |
| 3710 | } else { |
| 3711 | SDValue CPAddr = DAG.getTargetConstantPool(C: GV, VT: PtrVT, Align: Align(4)); |
| 3712 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3713 | return DAG.getLoad( |
| 3714 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3715 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3716 | } |
| 3717 | } |
| 3718 | |
| 3719 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, |
| 3720 | SelectionDAG &DAG) const { |
| 3721 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3722 | "ROPI/RWPI not currently supported for Darwin" ); |
| 3723 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3724 | SDLoc dl(Op); |
| 3725 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 3726 | |
| 3727 | if (Subtarget->useMovt()) |
| 3728 | ++NumMovwMovt; |
| 3729 | |
| 3730 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3731 | // operands, expand this into multiple nodes |
| 3732 | unsigned Wrapper = |
| 3733 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; |
| 3734 | |
| 3735 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_NONLAZY); |
| 3736 | SDValue Result = DAG.getNode(Opcode: Wrapper, DL: dl, VT: PtrVT, Operand: G); |
| 3737 | |
| 3738 | if (Subtarget->isGVIndirectSymbol(GV)) |
| 3739 | Result = DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
| 3740 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 3741 | return Result; |
| 3742 | } |
| 3743 | |
| 3744 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, |
| 3745 | SelectionDAG &DAG) const { |
| 3746 | assert(getTargetMachine().getTargetTriple().isOSWindows() && |
| 3747 | "non-Windows COFF is not supported" ); |
| 3748 | assert(Subtarget->useMovt() && |
| 3749 | "Windows on ARM expects to use movw/movt" ); |
| 3750 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3751 | "ROPI/RWPI not currently supported for Windows" ); |
| 3752 | |
| 3753 | const TargetMachine &TM = getTargetMachine(); |
| 3754 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 3755 | ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; |
| 3756 | if (GV->hasDLLImportStorageClass()) |
| 3757 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 3758 | else if (!TM.shouldAssumeDSOLocal(GV)) |
| 3759 | TargetFlags = ARMII::MO_COFFSTUB; |
| 3760 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3761 | SDValue Result; |
| 3762 | SDLoc DL(Op); |
| 3763 | |
| 3764 | ++NumMovwMovt; |
| 3765 | |
| 3766 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3767 | // operands, expand this into two nodes. |
| 3768 | Result = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, |
| 3769 | Operand: DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, /*offset=*/0, |
| 3770 | TargetFlags)); |
| 3771 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 3772 | Result = DAG.getLoad(VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: Result, |
| 3773 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 3774 | return Result; |
| 3775 | } |
| 3776 | |
| 3777 | SDValue |
| 3778 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3779 | SDLoc dl(Op); |
| 3780 | SDValue Val = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 3781 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETJMP, DL: dl, |
| 3782 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), N1: Op.getOperand(i: 0), |
| 3783 | N2: Op.getOperand(i: 1), N3: Val); |
| 3784 | } |
| 3785 | |
| 3786 | SDValue |
| 3787 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3788 | SDLoc dl(Op); |
| 3789 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_LONGJMP, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 3790 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 3791 | } |
| 3792 | |
| 3793 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, |
| 3794 | SelectionDAG &DAG) const { |
| 3795 | SDLoc dl(Op); |
| 3796 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETUP_DISPATCH, DL: dl, VT: MVT::Other, |
| 3797 | Operand: Op.getOperand(i: 0)); |
| 3798 | } |
| 3799 | |
| 3800 | SDValue ARMTargetLowering::LowerINTRINSIC_VOID( |
| 3801 | SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { |
| 3802 | unsigned IntNo = |
| 3803 | Op.getConstantOperandVal(i: Op.getOperand(i: 0).getValueType() == MVT::Other); |
| 3804 | switch (IntNo) { |
| 3805 | default: |
| 3806 | return SDValue(); // Don't custom lower most intrinsics. |
| 3807 | case Intrinsic::arm_gnu_eabi_mcount: { |
| 3808 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3809 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3810 | SDLoc dl(Op); |
| 3811 | SDValue Chain = Op.getOperand(i: 0); |
| 3812 | // call "\01__gnu_mcount_nc" |
| 3813 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 3814 | const uint32_t *Mask = |
| 3815 | ARI->getCallPreservedMask(MF: DAG.getMachineFunction(), CallingConv::C); |
| 3816 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 3817 | // Mark LR an implicit live-in. |
| 3818 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
| 3819 | SDValue ReturnAddress = |
| 3820 | DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT: PtrVT); |
| 3821 | constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; |
| 3822 | SDValue Callee = |
| 3823 | DAG.getTargetExternalSymbol(Sym: "\01__gnu_mcount_nc" , VT: PtrVT, TargetFlags: 0); |
| 3824 | SDValue RegisterMask = DAG.getRegisterMask(RegMask: Mask); |
| 3825 | if (Subtarget->isThumb()) |
| 3826 | return SDValue( |
| 3827 | DAG.getMachineNode( |
| 3828 | Opcode: ARM::tBL_PUSHLR, dl, ResultTys, |
| 3829 | Ops: {ReturnAddress, DAG.getTargetConstant(Val: ARMCC::AL, DL: dl, VT: PtrVT), |
| 3830 | DAG.getRegister(Reg: 0, VT: PtrVT), Callee, RegisterMask, Chain}), |
| 3831 | 0); |
| 3832 | return SDValue( |
| 3833 | DAG.getMachineNode(Opcode: ARM::BL_PUSHLR, dl, ResultTys, |
| 3834 | Ops: {ReturnAddress, Callee, RegisterMask, Chain}), |
| 3835 | 0); |
| 3836 | } |
| 3837 | } |
| 3838 | } |
| 3839 | |
| 3840 | SDValue |
| 3841 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, |
| 3842 | const ARMSubtarget *Subtarget) const { |
| 3843 | unsigned IntNo = Op.getConstantOperandVal(i: 0); |
| 3844 | SDLoc dl(Op); |
| 3845 | switch (IntNo) { |
| 3846 | default: return SDValue(); // Don't custom lower most intrinsics. |
| 3847 | case Intrinsic::thread_pointer: { |
| 3848 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3849 | return DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
| 3850 | } |
| 3851 | case Intrinsic::arm_cls: { |
| 3852 | // Note: arm_cls and arm_cls64 intrinsics are expanded directly here |
| 3853 | // in LowerINTRINSIC_WO_CHAIN since there's no native scalar CLS |
| 3854 | // instruction. |
| 3855 | const SDValue &Operand = Op.getOperand(i: 1); |
| 3856 | const EVT VTy = Op.getValueType(); |
| 3857 | return DAG.getNode(Opcode: ISD::CTLS, DL: dl, VT: VTy, Operand); |
| 3858 | } |
| 3859 | case Intrinsic::arm_cls64: { |
| 3860 | // arm_cls64 returns i32 but takes i64 input. |
| 3861 | // Use ISD::CTLS for i64 and truncate the result. |
| 3862 | SDValue CTLS64 = DAG.getNode(Opcode: ISD::CTLS, DL: dl, VT: MVT::i64, Operand: Op.getOperand(i: 1)); |
| 3863 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: CTLS64); |
| 3864 | } |
| 3865 | case Intrinsic::arm_neon_vcls: |
| 3866 | case Intrinsic::arm_mve_vcls: { |
| 3867 | // Lower vector CLS intrinsics to ISD::CTLS. |
| 3868 | // Vector CTLS is Legal when NEON/MVE is available (set elsewhere). |
| 3869 | const EVT VTy = Op.getValueType(); |
| 3870 | return DAG.getNode(Opcode: ISD::CTLS, DL: dl, VT: VTy, Operand: Op.getOperand(i: 1)); |
| 3871 | } |
| 3872 | case Intrinsic::eh_sjlj_lsda: { |
| 3873 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3874 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3875 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3876 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3877 | SDValue CPAddr; |
| 3878 | bool IsPositionIndependent = isPositionIndependent(); |
| 3879 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; |
| 3880 | ARMConstantPoolValue *CPV = |
| 3881 | ARMConstantPoolConstant::Create(C: &MF.getFunction(), ID: ARMPCLabelIndex, |
| 3882 | Kind: ARMCP::CPLSDA, PCAdj); |
| 3883 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3884 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3885 | SDValue Result = DAG.getLoad( |
| 3886 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3887 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3888 | |
| 3889 | if (IsPositionIndependent) { |
| 3890 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3891 | Result = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Result, N2: PICLabel); |
| 3892 | } |
| 3893 | return Result; |
| 3894 | } |
| 3895 | case Intrinsic::arm_neon_vabs: |
| 3896 | return DAG.getNode(Opcode: ISD::ABS, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3897 | Operand: Op.getOperand(i: 1)); |
| 3898 | case Intrinsic::arm_neon_vabds: |
| 3899 | if (Op.getValueType().isInteger()) |
| 3900 | return DAG.getNode(Opcode: ISD::ABDS, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3901 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3902 | return SDValue(); |
| 3903 | case Intrinsic::arm_neon_vabdu: |
| 3904 | return DAG.getNode(Opcode: ISD::ABDU, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3905 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3906 | case Intrinsic::arm_neon_vmulls: |
| 3907 | case Intrinsic::arm_neon_vmullu: { |
| 3908 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) |
| 3909 | ? ARMISD::VMULLs : ARMISD::VMULLu; |
| 3910 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3911 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3912 | } |
| 3913 | case Intrinsic::arm_neon_vminnm: |
| 3914 | case Intrinsic::arm_neon_vmaxnm: { |
| 3915 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) |
| 3916 | ? ISD::FMINNUM : ISD::FMAXNUM; |
| 3917 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3918 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3919 | } |
| 3920 | case Intrinsic::arm_neon_vminu: |
| 3921 | case Intrinsic::arm_neon_vmaxu: { |
| 3922 | if (Op.getValueType().isFloatingPoint()) |
| 3923 | return SDValue(); |
| 3924 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) |
| 3925 | ? ISD::UMIN : ISD::UMAX; |
| 3926 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3927 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3928 | } |
| 3929 | case Intrinsic::arm_neon_vmins: |
| 3930 | case Intrinsic::arm_neon_vmaxs: { |
| 3931 | // v{min,max}s is overloaded between signed integers and floats. |
| 3932 | if (!Op.getValueType().isFloatingPoint()) { |
| 3933 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3934 | ? ISD::SMIN : ISD::SMAX; |
| 3935 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3936 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3937 | } |
| 3938 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3939 | ? ISD::FMINIMUM : ISD::FMAXIMUM; |
| 3940 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3941 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3942 | } |
| 3943 | case Intrinsic::arm_neon_vtbl1: |
| 3944 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3945 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3946 | case Intrinsic::arm_neon_vtbl2: |
| 3947 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3948 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 3949 | case Intrinsic::arm_mve_pred_i2v: |
| 3950 | case Intrinsic::arm_mve_pred_v2i: |
| 3951 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3952 | Operand: Op.getOperand(i: 1)); |
| 3953 | case Intrinsic::arm_mve_vreinterpretq: |
| 3954 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3955 | Operand: Op.getOperand(i: 1)); |
| 3956 | case Intrinsic::arm_mve_lsll: |
| 3957 | return DAG.getNode(Opcode: ARMISD::LSLL, DL: SDLoc(Op), VTList: Op->getVTList(), |
| 3958 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 3959 | case Intrinsic::arm_mve_asrl: |
| 3960 | return DAG.getNode(Opcode: ARMISD::ASRL, DL: SDLoc(Op), VTList: Op->getVTList(), |
| 3961 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 3962 | } |
| 3963 | } |
| 3964 | |
| 3965 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, |
| 3966 | const ARMSubtarget *Subtarget) { |
| 3967 | SDLoc dl(Op); |
| 3968 | auto SSID = static_cast<SyncScope::ID>(Op.getConstantOperandVal(i: 2)); |
| 3969 | if (SSID == SyncScope::SingleThread) |
| 3970 | return Op; |
| 3971 | |
| 3972 | if (!Subtarget->hasDataBarrier()) { |
| 3973 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 3974 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 3975 | // here. |
| 3976 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && |
| 3977 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ); |
| 3978 | return DAG.getNode(Opcode: ARMISD::MEMBARRIER_MCR, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 3979 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 3980 | } |
| 3981 | |
| 3982 | AtomicOrdering Ord = |
| 3983 | static_cast<AtomicOrdering>(Op.getConstantOperandVal(i: 1)); |
| 3984 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; |
| 3985 | if (Subtarget->isMClass()) { |
| 3986 | // Only a full system barrier exists in the M-class architectures. |
| 3987 | Domain = ARM_MB::SY; |
| 3988 | } else if (Subtarget->preferISHSTBarriers() && |
| 3989 | Ord == AtomicOrdering::Release) { |
| 3990 | // Swift happens to implement ISHST barriers in a way that's compatible with |
| 3991 | // Release semantics but weaker than ISH so we'd be fools not to use |
| 3992 | // it. Beware: other processors probably don't! |
| 3993 | Domain = ARM_MB::ISHST; |
| 3994 | } |
| 3995 | |
| 3996 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 3997 | N2: DAG.getConstant(Val: Intrinsic::arm_dmb, DL: dl, VT: MVT::i32), |
| 3998 | N3: DAG.getConstant(Val: Domain, DL: dl, VT: MVT::i32)); |
| 3999 | } |
| 4000 | |
| 4001 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, |
| 4002 | const ARMSubtarget *Subtarget) { |
| 4003 | // ARM pre v5TE and Thumb1 does not have preload instructions. |
| 4004 | if (!(Subtarget->isThumb2() || |
| 4005 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) |
| 4006 | // Just preserve the chain. |
| 4007 | return Op.getOperand(i: 0); |
| 4008 | |
| 4009 | SDLoc dl(Op); |
| 4010 | unsigned isRead = ~Op.getConstantOperandVal(i: 2) & 1; |
| 4011 | if (!isRead && |
| 4012 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) |
| 4013 | // ARMv7 with MP extension has PLDW. |
| 4014 | return Op.getOperand(i: 0); |
| 4015 | |
| 4016 | unsigned isData = Op.getConstantOperandVal(i: 4); |
| 4017 | if (Subtarget->isThumb()) { |
| 4018 | // Invert the bits. |
| 4019 | isRead = ~isRead & 1; |
| 4020 | isData = ~isData & 1; |
| 4021 | } |
| 4022 | |
| 4023 | return DAG.getNode(Opcode: ARMISD::PRELOAD, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 4024 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: isRead, DL: dl, VT: MVT::i32), |
| 4025 | N4: DAG.getConstant(Val: isData, DL: dl, VT: MVT::i32)); |
| 4026 | } |
| 4027 | |
| 4028 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { |
| 4029 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4030 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 4031 | |
| 4032 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
| 4033 | // memory location argument. |
| 4034 | SDLoc dl(Op); |
| 4035 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DL: DAG.getDataLayout()); |
| 4036 | SDValue FR = DAG.getFrameIndex(FI: FuncInfo->getVarArgsFrameIndex(), VT: PtrVT); |
| 4037 | const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue(); |
| 4038 | return DAG.getStore(Chain: Op.getOperand(i: 0), dl, Val: FR, Ptr: Op.getOperand(i: 1), |
| 4039 | PtrInfo: MachinePointerInfo(SV)); |
| 4040 | } |
| 4041 | |
| 4042 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, |
| 4043 | CCValAssign &NextVA, |
| 4044 | SDValue &Root, |
| 4045 | SelectionDAG &DAG, |
| 4046 | const SDLoc &dl) const { |
| 4047 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4048 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4049 | |
| 4050 | const TargetRegisterClass *RC; |
| 4051 | if (AFI->isThumb1OnlyFunction()) |
| 4052 | RC = &ARM::tGPRRegClass; |
| 4053 | else |
| 4054 | RC = &ARM::GPRRegClass; |
| 4055 | |
| 4056 | // Transform the arguments stored in physical registers into virtual ones. |
| 4057 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
| 4058 | SDValue ArgValue = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
| 4059 | |
| 4060 | SDValue ArgValue2; |
| 4061 | if (NextVA.isMemLoc()) { |
| 4062 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4063 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: NextVA.getLocMemOffset(), IsImmutable: true); |
| 4064 | |
| 4065 | // Create load node to retrieve arguments from the stack. |
| 4066 | SDValue FIN = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 4067 | ArgValue2 = DAG.getLoad( |
| 4068 | VT: MVT::i32, dl, Chain: Root, Ptr: FIN, |
| 4069 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
| 4070 | } else { |
| 4071 | Reg = MF.addLiveIn(PReg: NextVA.getLocReg(), RC); |
| 4072 | ArgValue2 = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
| 4073 | } |
| 4074 | if (!Subtarget->isLittle()) |
| 4075 | std::swap (a&: ArgValue, b&: ArgValue2); |
| 4076 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: ArgValue, N2: ArgValue2); |
| 4077 | } |
| 4078 | |
| 4079 | // The remaining GPRs hold either the beginning of variable-argument |
| 4080 | // data, or the beginning of an aggregate passed by value (usually |
| 4081 | // byval). Either way, we allocate stack slots adjacent to the data |
| 4082 | // provided by our caller, and store the unallocated registers there. |
| 4083 | // If this is a variadic function, the va_list pointer will begin with |
| 4084 | // these values; otherwise, this reassembles a (byval) structure that |
| 4085 | // was split between registers and memory. |
| 4086 | // Return: The frame index registers were stored into. |
| 4087 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, |
| 4088 | const SDLoc &dl, SDValue &Chain, |
| 4089 | const Value *OrigArg, |
| 4090 | unsigned InRegsParamRecordIdx, |
| 4091 | int ArgOffset, unsigned ArgSize) const { |
| 4092 | // Currently, two use-cases possible: |
| 4093 | // Case #1. Non-var-args function, and we meet first byval parameter. |
| 4094 | // Setup first unallocated register as first byval register; |
| 4095 | // eat all remained registers |
| 4096 | // (these two actions are performed by HandleByVal method). |
| 4097 | // Then, here, we initialize stack frame with |
| 4098 | // "store-reg" instructions. |
| 4099 | // Case #2. Var-args function, that doesn't contain byval parameters. |
| 4100 | // The same: eat all remained unallocated registers, |
| 4101 | // initialize stack frame. |
| 4102 | |
| 4103 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4104 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4105 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4106 | unsigned RBegin, REnd; |
| 4107 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { |
| 4108 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: InRegsParamRecordIdx, BeginReg&: RBegin, EndReg&: REnd); |
| 4109 | } else { |
| 4110 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
| 4111 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; |
| 4112 | REnd = ARM::R4; |
| 4113 | } |
| 4114 | |
| 4115 | if (REnd != RBegin) |
| 4116 | ArgOffset = -4 * (ARM::R4 - RBegin); |
| 4117 | |
| 4118 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4119 | int FrameIndex = MFI.CreateFixedObject(Size: ArgSize, SPOffset: ArgOffset, IsImmutable: false); |
| 4120 | SDValue FIN = DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT); |
| 4121 | |
| 4122 | SmallVector<SDValue, 4> MemOps; |
| 4123 | const TargetRegisterClass *RC = |
| 4124 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 4125 | |
| 4126 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { |
| 4127 | Register VReg = MF.addLiveIn(PReg: Reg, RC); |
| 4128 | SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: MVT::i32); |
| 4129 | SDValue Store = DAG.getStore(Chain: Val.getValue(R: 1), dl, Val, Ptr: FIN, |
| 4130 | PtrInfo: MachinePointerInfo(OrigArg, 4 * i)); |
| 4131 | MemOps.push_back(Elt: Store); |
| 4132 | FIN = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: FIN, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrVT)); |
| 4133 | } |
| 4134 | |
| 4135 | if (!MemOps.empty()) |
| 4136 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOps); |
| 4137 | return FrameIndex; |
| 4138 | } |
| 4139 | |
| 4140 | // Setup stack frame, the va_list pointer will start from. |
| 4141 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, |
| 4142 | const SDLoc &dl, SDValue &Chain, |
| 4143 | unsigned ArgOffset, |
| 4144 | unsigned TotalArgRegsSaveSize, |
| 4145 | bool ForceMutable) const { |
| 4146 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4147 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4148 | |
| 4149 | // Try to store any remaining integer argument regs |
| 4150 | // to their spots on the stack so that they may be loaded by dereferencing |
| 4151 | // the result of va_next. |
| 4152 | // If there is no regs to be stored, just point address after last |
| 4153 | // argument passed via stack. |
| 4154 | int FrameIndex = StoreByValRegs( |
| 4155 | CCInfo, DAG, dl, Chain, OrigArg: nullptr, InRegsParamRecordIdx: CCInfo.getInRegsParamsCount(), |
| 4156 | ArgOffset: CCInfo.getStackSize(), ArgSize: std::max(a: 4U, b: TotalArgRegsSaveSize)); |
| 4157 | AFI->setVarArgsFrameIndex(FrameIndex); |
| 4158 | } |
| 4159 | |
| 4160 | bool ARMTargetLowering::splitValueIntoRegisterParts( |
| 4161 | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
| 4162 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { |
| 4163 | EVT ValueVT = Val.getValueType(); |
| 4164 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
| 4165 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4166 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4167 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
| 4168 | Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
| 4169 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 4170 | Parts[0] = Val; |
| 4171 | return true; |
| 4172 | } |
| 4173 | return false; |
| 4174 | } |
| 4175 | |
| 4176 | SDValue ARMTargetLowering::joinRegisterPartsIntoValue( |
| 4177 | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, |
| 4178 | MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const { |
| 4179 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
| 4180 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4181 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4182 | SDValue Val = Parts[0]; |
| 4183 | |
| 4184 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
| 4185 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
| 4186 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 4187 | return Val; |
| 4188 | } |
| 4189 | return SDValue(); |
| 4190 | } |
| 4191 | |
| 4192 | SDValue ARMTargetLowering::LowerFormalArguments( |
| 4193 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 4194 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 4195 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
| 4196 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4197 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4198 | |
| 4199 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4200 | |
| 4201 | // Assign locations to all of the incoming arguments. |
| 4202 | SmallVector<CCValAssign, 16> ArgLocs; |
| 4203 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 4204 | *DAG.getContext()); |
| 4205 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
| 4206 | |
| 4207 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); |
| 4208 | unsigned CurArgIdx = 0; |
| 4209 | |
| 4210 | // Initially ArgRegsSaveSize is zero. |
| 4211 | // Then we increase this value each time we meet byval parameter. |
| 4212 | // We also increase this value in case of varargs function. |
| 4213 | AFI->setArgRegsSaveSize(0); |
| 4214 | |
| 4215 | // Calculate the amount of stack space that we need to allocate to store |
| 4216 | // byval and variadic arguments that are passed in registers. |
| 4217 | // We need to know this before we allocate the first byval or variadic |
| 4218 | // argument, as they will be allocated a stack slot below the CFA (Canonical |
| 4219 | // Frame Address, the stack pointer at entry to the function). |
| 4220 | unsigned ArgRegBegin = ARM::R4; |
| 4221 | for (const CCValAssign &VA : ArgLocs) { |
| 4222 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) |
| 4223 | break; |
| 4224 | |
| 4225 | unsigned Index = VA.getValNo(); |
| 4226 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; |
| 4227 | if (!Flags.isByVal()) |
| 4228 | continue; |
| 4229 | |
| 4230 | assert(VA.isMemLoc() && "unexpected byval pointer in reg" ); |
| 4231 | unsigned RBegin, REnd; |
| 4232 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CCInfo.getInRegsParamsProcessed(), BeginReg&: RBegin, EndReg&: REnd); |
| 4233 | ArgRegBegin = std::min(a: ArgRegBegin, b: RBegin); |
| 4234 | |
| 4235 | CCInfo.nextInRegsParam(); |
| 4236 | } |
| 4237 | CCInfo.rewindByValRegsInfo(); |
| 4238 | |
| 4239 | int lastInsIndex = -1; |
| 4240 | if (isVarArg && MFI.hasVAStart()) { |
| 4241 | unsigned RegIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
| 4242 | if (RegIdx != std::size(GPRArgRegs)) |
| 4243 | ArgRegBegin = std::min(a: ArgRegBegin, b: (unsigned)GPRArgRegs[RegIdx]); |
| 4244 | } |
| 4245 | |
| 4246 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); |
| 4247 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); |
| 4248 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4249 | |
| 4250 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 4251 | CCValAssign &VA = ArgLocs[i]; |
| 4252 | if (Ins[VA.getValNo()].isOrigArg()) { |
| 4253 | std::advance(i&: CurOrigArg, |
| 4254 | n: Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); |
| 4255 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); |
| 4256 | } |
| 4257 | // Arguments stored in registers. |
| 4258 | if (VA.isRegLoc()) { |
| 4259 | EVT RegVT = VA.getLocVT(); |
| 4260 | SDValue ArgValue; |
| 4261 | |
| 4262 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 4263 | // f64 and vector types are split up into multiple registers or |
| 4264 | // combinations of registers and stack slots. |
| 4265 | SDValue ArgValue1 = |
| 4266 | GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4267 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 4268 | SDValue ArgValue2; |
| 4269 | if (VA.isMemLoc()) { |
| 4270 | int FI = MFI.CreateFixedObject(Size: 8, SPOffset: VA.getLocMemOffset(), IsImmutable: true); |
| 4271 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4272 | ArgValue2 = DAG.getLoad( |
| 4273 | VT: MVT::f64, dl, Chain, Ptr: FIN, |
| 4274 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
| 4275 | } else { |
| 4276 | ArgValue2 = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4277 | } |
| 4278 | ArgValue = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
| 4279 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
| 4280 | N2: ArgValue1, N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 4281 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
| 4282 | N2: ArgValue2, N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
| 4283 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 4284 | ArgValue = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4285 | } else { |
| 4286 | const TargetRegisterClass *RC; |
| 4287 | |
| 4288 | if (RegVT == MVT::f16 || RegVT == MVT::bf16) |
| 4289 | RC = &ARM::HPRRegClass; |
| 4290 | else if (RegVT == MVT::f32) |
| 4291 | RC = &ARM::SPRRegClass; |
| 4292 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || |
| 4293 | RegVT == MVT::v4bf16) |
| 4294 | RC = &ARM::DPRRegClass; |
| 4295 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || |
| 4296 | RegVT == MVT::v8bf16) |
| 4297 | RC = &ARM::QPRRegClass; |
| 4298 | else if (RegVT == MVT::i32) |
| 4299 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass |
| 4300 | : &ARM::GPRRegClass; |
| 4301 | else |
| 4302 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering" ); |
| 4303 | |
| 4304 | // Transform the arguments in physical registers into virtual ones. |
| 4305 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
| 4306 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, VT: RegVT); |
| 4307 | |
| 4308 | // If this value is passed in r0 and has the returned attribute (e.g. |
| 4309 | // C++ 'structors), record this fact for later use. |
| 4310 | if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { |
| 4311 | AFI->setPreservesR0(); |
| 4312 | } |
| 4313 | } |
| 4314 | |
| 4315 | // If this is an 8 or 16-bit value, it is really passed promoted |
| 4316 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
| 4317 | // truncate to the right size. |
| 4318 | switch (VA.getLocInfo()) { |
| 4319 | default: llvm_unreachable("Unknown loc info!" ); |
| 4320 | case CCValAssign::Full: break; |
| 4321 | case CCValAssign::BCvt: |
| 4322 | ArgValue = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: ArgValue); |
| 4323 | break; |
| 4324 | } |
| 4325 | |
| 4326 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 4327 | // had been copied to the LSBs of a 32-bit register. |
| 4328 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 4329 | if (VA.needsCustom() && |
| 4330 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 4331 | ArgValue = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: ArgValue); |
| 4332 | |
| 4333 | // On CMSE Entry Functions, formal integer arguments whose bitwidth is |
| 4334 | // less than 32 bits must be sign- or zero-extended in the callee for |
| 4335 | // security reasons. Although the ABI mandates an extension done by the |
| 4336 | // caller, the latter cannot be trusted to follow the rules of the ABI. |
| 4337 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
| 4338 | if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() && |
| 4339 | RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
| 4340 | ArgValue = handleCMSEValue(Value: ArgValue, Arg, DAG, DL: dl); |
| 4341 | |
| 4342 | InVals.push_back(Elt: ArgValue); |
| 4343 | } else { // VA.isRegLoc() |
| 4344 | // Only arguments passed on the stack should make it here. |
| 4345 | assert(VA.isMemLoc()); |
| 4346 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered" ); |
| 4347 | |
| 4348 | int index = VA.getValNo(); |
| 4349 | |
| 4350 | // Some Ins[] entries become multiple ArgLoc[] entries. |
| 4351 | // Process them only once. |
| 4352 | if (index != lastInsIndex) |
| 4353 | { |
| 4354 | ISD::ArgFlagsTy Flags = Ins[index].Flags; |
| 4355 | // FIXME: For now, all byval parameter objects are marked mutable. |
| 4356 | // This can be changed with more analysis. |
| 4357 | // In case of tail call optimization mark all arguments mutable. |
| 4358 | // Since they could be overwritten by lowering of arguments in case of |
| 4359 | // a tail call. |
| 4360 | if (Flags.isByVal()) { |
| 4361 | assert(Ins[index].isOrigArg() && |
| 4362 | "Byval arguments cannot be implicit" ); |
| 4363 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); |
| 4364 | |
| 4365 | int FrameIndex = StoreByValRegs( |
| 4366 | CCInfo, DAG, dl, Chain, OrigArg: &*CurOrigArg, InRegsParamRecordIdx: CurByValIndex, |
| 4367 | ArgOffset: VA.getLocMemOffset(), ArgSize: Flags.getByValSize()); |
| 4368 | InVals.push_back(Elt: DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT)); |
| 4369 | CCInfo.nextInRegsParam(); |
| 4370 | } else if (VA.needsCustom() && (VA.getValVT() == MVT::f16 || |
| 4371 | VA.getValVT() == MVT::bf16)) { |
| 4372 | // f16 and bf16 values are passed in the least-significant half of |
| 4373 | // a 4 byte stack slot. This is done as-if the extension was done |
| 4374 | // in a 32-bit register, so the actual bytes used for the value |
| 4375 | // differ between little and big endian. |
| 4376 | assert(VA.getLocVT().getSizeInBits() == 32); |
| 4377 | unsigned FIOffset = VA.getLocMemOffset(); |
| 4378 | int FI = MFI.CreateFixedObject(Size: VA.getLocVT().getSizeInBits() / 8, |
| 4379 | SPOffset: FIOffset, IsImmutable: true); |
| 4380 | |
| 4381 | SDValue Addr = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4382 | if (DAG.getDataLayout().isBigEndian()) |
| 4383 | Addr = DAG.getObjectPtrOffset(SL: dl, Ptr: Addr, Offset: TypeSize::getFixed(ExactSize: 2)); |
| 4384 | |
| 4385 | InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: Addr, |
| 4386 | PtrInfo: MachinePointerInfo::getFixedStack( |
| 4387 | MF&: DAG.getMachineFunction(), FI))); |
| 4388 | |
| 4389 | } else { |
| 4390 | unsigned FIOffset = VA.getLocMemOffset(); |
| 4391 | int FI = MFI.CreateFixedObject(Size: VA.getLocVT().getSizeInBits()/8, |
| 4392 | SPOffset: FIOffset, IsImmutable: true); |
| 4393 | |
| 4394 | // Create load nodes to retrieve arguments from the stack. |
| 4395 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4396 | InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIN, |
| 4397 | PtrInfo: MachinePointerInfo::getFixedStack( |
| 4398 | MF&: DAG.getMachineFunction(), FI))); |
| 4399 | } |
| 4400 | lastInsIndex = index; |
| 4401 | } |
| 4402 | } |
| 4403 | } |
| 4404 | |
| 4405 | // varargs |
| 4406 | if (isVarArg && MFI.hasVAStart()) { |
| 4407 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, ArgOffset: CCInfo.getStackSize(), |
| 4408 | TotalArgRegsSaveSize); |
| 4409 | if (AFI->isCmseNSEntryFunction()) { |
| 4410 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 4411 | DAG.getMachineFunction().getFunction(), |
| 4412 | "secure entry function must not be variadic" , dl.getDebugLoc())); |
| 4413 | } |
| 4414 | } |
| 4415 | |
| 4416 | unsigned StackArgSize = CCInfo.getStackSize(); |
| 4417 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; |
| 4418 | if (canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt)) { |
| 4419 | // The only way to guarantee a tail call is if the callee restores its |
| 4420 | // argument area, but it must also keep the stack aligned when doing so. |
| 4421 | MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment(); |
| 4422 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 4423 | StackArgSize = alignTo(Size: StackArgSize, A: *StackAlign); |
| 4424 | |
| 4425 | AFI->setArgumentStackToRestore(StackArgSize); |
| 4426 | } |
| 4427 | AFI->setArgumentStackSize(StackArgSize); |
| 4428 | |
| 4429 | if (CCInfo.getStackSize() > 0 && AFI->isCmseNSEntryFunction()) { |
| 4430 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 4431 | DAG.getMachineFunction().getFunction(), |
| 4432 | "secure entry function requires arguments on stack" , dl.getDebugLoc())); |
| 4433 | } |
| 4434 | |
| 4435 | return Chain; |
| 4436 | } |
| 4437 | |
| 4438 | /// isFloatingPointZero - Return true if this is +0.0. |
| 4439 | static bool isFloatingPointZero(SDValue Op) { |
| 4440 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Val&: Op)) |
| 4441 | return CFP->getValueAPF().isPosZero(); |
| 4442 | else if (ISD::isEXTLoad(N: Op.getNode()) || ISD::isNON_EXTLoad(N: Op.getNode())) { |
| 4443 | // Maybe this has already been legalized into the constant pool? |
| 4444 | if (Op.getOperand(i: 1).getOpcode() == ARMISD::Wrapper) { |
| 4445 | SDValue WrapperOp = Op.getOperand(i: 1).getOperand(i: 0); |
| 4446 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Val&: WrapperOp)) |
| 4447 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Val: CP->getConstVal())) |
| 4448 | return CFP->getValueAPF().isPosZero(); |
| 4449 | } |
| 4450 | } else if (Op->getOpcode() == ISD::BITCAST && |
| 4451 | Op->getValueType(ResNo: 0) == MVT::f64) { |
| 4452 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) |
| 4453 | // created by LowerConstantFP(). |
| 4454 | SDValue BitcastOp = Op->getOperand(Num: 0); |
| 4455 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && |
| 4456 | isNullConstant(V: BitcastOp->getOperand(Num: 0))) |
| 4457 | return true; |
| 4458 | } |
| 4459 | return false; |
| 4460 | } |
| 4461 | |
| 4462 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for |
| 4463 | /// the given operands. |
| 4464 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| 4465 | SDValue &ARMcc, SelectionDAG &DAG, |
| 4466 | const SDLoc &dl) const { |
| 4467 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val: RHS.getNode())) { |
| 4468 | unsigned C = RHSC->getZExtValue(); |
| 4469 | if (!isLegalICmpImmediate(Imm: (int32_t)C)) { |
| 4470 | // Constant does not fit, try adjusting it by one. |
| 4471 | switch (CC) { |
| 4472 | default: break; |
| 4473 | case ISD::SETLT: |
| 4474 | case ISD::SETGE: |
| 4475 | if (C != 0x80000000 && isLegalICmpImmediate(Imm: C-1)) { |
| 4476 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; |
| 4477 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
| 4478 | } |
| 4479 | break; |
| 4480 | case ISD::SETULT: |
| 4481 | case ISD::SETUGE: |
| 4482 | if (C != 0 && isLegalICmpImmediate(Imm: C-1)) { |
| 4483 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; |
| 4484 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
| 4485 | } |
| 4486 | break; |
| 4487 | case ISD::SETLE: |
| 4488 | case ISD::SETGT: |
| 4489 | if (C != 0x7fffffff && isLegalICmpImmediate(Imm: C+1)) { |
| 4490 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; |
| 4491 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
| 4492 | } |
| 4493 | break; |
| 4494 | case ISD::SETULE: |
| 4495 | case ISD::SETUGT: |
| 4496 | if (C != 0xffffffff && isLegalICmpImmediate(Imm: C+1)) { |
| 4497 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; |
| 4498 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
| 4499 | } |
| 4500 | break; |
| 4501 | } |
| 4502 | } |
| 4503 | } else if ((ARM_AM::getShiftOpcForNode(Opcode: LHS.getOpcode()) != ARM_AM::no_shift) && |
| 4504 | (ARM_AM::getShiftOpcForNode(Opcode: RHS.getOpcode()) == ARM_AM::no_shift)) { |
| 4505 | // In ARM and Thumb-2, the compare instructions can shift their second |
| 4506 | // operand. |
| 4507 | CC = ISD::getSetCCSwappedOperands(Operation: CC); |
| 4508 | std::swap(a&: LHS, b&: RHS); |
| 4509 | } |
| 4510 | |
| 4511 | // Thumb1 has very limited immediate modes, so turning an "and" into a |
| 4512 | // shift can save multiple instructions. |
| 4513 | // |
| 4514 | // If we have (x & C1), and C1 is an appropriate mask, we can transform it |
| 4515 | // into "((x << n) >> n)". But that isn't necessarily profitable on its |
| 4516 | // own. If it's the operand to an unsigned comparison with an immediate, |
| 4517 | // we can eliminate one of the shifts: we transform |
| 4518 | // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". |
| 4519 | // |
| 4520 | // We avoid transforming cases which aren't profitable due to encoding |
| 4521 | // details: |
| 4522 | // |
| 4523 | // 1. C2 fits into the immediate field of a cmp, and the transformed version |
| 4524 | // would not; in that case, we're essentially trading one immediate load for |
| 4525 | // another. |
| 4526 | // 2. C1 is 255 or 65535, so we can use uxtb or uxth. |
| 4527 | // 3. C2 is zero; we have other code for this special case. |
| 4528 | // |
| 4529 | // FIXME: Figure out profitability for Thumb2; we usually can't save an |
| 4530 | // instruction, since the AND is always one instruction anyway, but we could |
| 4531 | // use narrow instructions in some cases. |
| 4532 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && |
| 4533 | LHS->hasOneUse() && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
| 4534 | LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(Val: RHS) && |
| 4535 | !isSignedIntSetCC(Code: CC)) { |
| 4536 | unsigned Mask = LHS.getConstantOperandVal(i: 1); |
| 4537 | auto *RHSC = cast<ConstantSDNode>(Val: RHS.getNode()); |
| 4538 | uint64_t RHSV = RHSC->getZExtValue(); |
| 4539 | if (isMask_32(Value: Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { |
| 4540 | unsigned ShiftBits = llvm::countl_zero(Val: Mask); |
| 4541 | if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { |
| 4542 | SDValue ShiftAmt = DAG.getConstant(Val: ShiftBits, DL: dl, VT: MVT::i32); |
| 4543 | LHS = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: LHS.getOperand(i: 0), N2: ShiftAmt); |
| 4544 | RHS = DAG.getConstant(Val: RHSV << ShiftBits, DL: dl, VT: MVT::i32); |
| 4545 | } |
| 4546 | } |
| 4547 | } |
| 4548 | |
| 4549 | // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a |
| 4550 | // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same |
| 4551 | // way a cmp would. |
| 4552 | // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and |
| 4553 | // some tweaks to the heuristics for the previous and->shift transform. |
| 4554 | // FIXME: Optimize cases where the LHS isn't a shift. |
| 4555 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && |
| 4556 | isa<ConstantSDNode>(Val: RHS) && RHS->getAsZExtVal() == 0x80000000U && |
| 4557 | CC == ISD::SETUGT && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
| 4558 | LHS.getConstantOperandVal(i: 1) < 31) { |
| 4559 | unsigned ShiftAmt = LHS.getConstantOperandVal(i: 1) + 1; |
| 4560 | SDValue Shift = |
| 4561 | DAG.getNode(Opcode: ARMISD::LSLS, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), |
| 4562 | N1: LHS.getOperand(i: 0), N2: DAG.getConstant(Val: ShiftAmt, DL: dl, VT: MVT::i32)); |
| 4563 | ARMcc = DAG.getConstant(Val: ARMCC::HI, DL: dl, VT: MVT::i32); |
| 4564 | return Shift.getValue(R: 1); |
| 4565 | } |
| 4566 | |
| 4567 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 4568 | |
| 4569 | // If the RHS is a constant zero then the V (overflow) flag will never be |
| 4570 | // set. This can allow us to simplify GE to PL or LT to MI, which can be |
| 4571 | // simpler for other passes (like the peephole optimiser) to deal with. |
| 4572 | if (isNullConstant(V: RHS)) { |
| 4573 | switch (CondCode) { |
| 4574 | default: break; |
| 4575 | case ARMCC::GE: |
| 4576 | CondCode = ARMCC::PL; |
| 4577 | break; |
| 4578 | case ARMCC::LT: |
| 4579 | CondCode = ARMCC::MI; |
| 4580 | break; |
| 4581 | } |
| 4582 | } |
| 4583 | |
| 4584 | unsigned CompareType; |
| 4585 | switch (CondCode) { |
| 4586 | default: |
| 4587 | CompareType = ARMISD::CMP; |
| 4588 | break; |
| 4589 | case ARMCC::EQ: |
| 4590 | case ARMCC::NE: |
| 4591 | // Uses only Z Flag |
| 4592 | CompareType = ARMISD::CMPZ; |
| 4593 | break; |
| 4594 | } |
| 4595 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 4596 | return DAG.getNode(Opcode: CompareType, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 4597 | } |
| 4598 | |
| 4599 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. |
| 4600 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, |
| 4601 | SelectionDAG &DAG, const SDLoc &dl, |
| 4602 | bool Signaling) const { |
| 4603 | assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); |
| 4604 | SDValue Flags; |
| 4605 | if (!isFloatingPointZero(Op: RHS)) |
| 4606 | Flags = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, DL: dl, VT: FlagsVT, |
| 4607 | N1: LHS, N2: RHS); |
| 4608 | else |
| 4609 | Flags = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, DL: dl, |
| 4610 | VT: FlagsVT, Operand: LHS); |
| 4611 | return DAG.getNode(Opcode: ARMISD::FMSTAT, DL: dl, VT: FlagsVT, Operand: Flags); |
| 4612 | } |
| 4613 | |
| 4614 | // This function returns three things: the arithmetic computation itself |
| 4615 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The |
| 4616 | // comparison and the condition code define the case in which the arithmetic |
| 4617 | // computation *does not* overflow. |
| 4618 | std::pair<SDValue, SDValue> |
| 4619 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, |
| 4620 | SDValue &ARMcc) const { |
| 4621 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type" ); |
| 4622 | |
| 4623 | SDValue Value, OverflowCmp; |
| 4624 | SDValue LHS = Op.getOperand(i: 0); |
| 4625 | SDValue RHS = Op.getOperand(i: 1); |
| 4626 | SDLoc dl(Op); |
| 4627 | |
| 4628 | // FIXME: We are currently always generating CMPs because we don't support |
| 4629 | // generating CMN through the backend. This is not as good as the natural |
| 4630 | // CMP case because it causes a register dependency and cannot be folded |
| 4631 | // later. |
| 4632 | |
| 4633 | switch (Op.getOpcode()) { |
| 4634 | default: |
| 4635 | llvm_unreachable("Unknown overflow instruction!" ); |
| 4636 | case ISD::SADDO: |
| 4637 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
| 4638 | Value = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 4639 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value, N2: LHS); |
| 4640 | break; |
| 4641 | case ISD::UADDO: |
| 4642 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
| 4643 | // We use ADDC here to correspond to its use in LowerALUO. |
| 4644 | // We do not use it in the USUBO case as Value may not be used. |
| 4645 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, |
| 4646 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: MVT::i32), N1: LHS, N2: RHS) |
| 4647 | .getValue(R: 0); |
| 4648 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value, N2: LHS); |
| 4649 | break; |
| 4650 | case ISD::SSUBO: |
| 4651 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
| 4652 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 4653 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 4654 | break; |
| 4655 | case ISD::USUBO: |
| 4656 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
| 4657 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 4658 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 4659 | break; |
| 4660 | case ISD::UMULO: |
| 4661 | // We generate a UMUL_LOHI and then check if the high word is 0. |
| 4662 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
| 4663 | Value = DAG.getNode(Opcode: ISD::UMUL_LOHI, DL: dl, |
| 4664 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
| 4665 | N1: LHS, N2: RHS); |
| 4666 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMPZ, DL: dl, VT: FlagsVT, N1: Value.getValue(R: 1), |
| 4667 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 4668 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
| 4669 | break; |
| 4670 | case ISD::SMULO: |
| 4671 | // We generate a SMUL_LOHI and then check if all the bits of the high word |
| 4672 | // are the same as the sign bit of the low word. |
| 4673 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
| 4674 | Value = DAG.getNode(Opcode: ISD::SMUL_LOHI, DL: dl, |
| 4675 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
| 4676 | N1: LHS, N2: RHS); |
| 4677 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMPZ, DL: dl, VT: FlagsVT, N1: Value.getValue(R: 1), |
| 4678 | N2: DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: Op.getValueType(), |
| 4679 | N1: Value.getValue(R: 0), |
| 4680 | N2: DAG.getConstant(Val: 31, DL: dl, VT: MVT::i32))); |
| 4681 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
| 4682 | break; |
| 4683 | } // switch (...) |
| 4684 | |
| 4685 | return std::make_pair(x&: Value, y&: OverflowCmp); |
| 4686 | } |
| 4687 | |
| 4688 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, |
| 4689 | SelectionDAG &DAG) { |
| 4690 | SDLoc DL(BoolCarry); |
| 4691 | EVT CarryVT = BoolCarry.getValueType(); |
| 4692 | |
| 4693 | // This converts the boolean value carry into the carry flag by doing |
| 4694 | // ARMISD::SUBC Carry, 1 |
| 4695 | SDValue Carry = DAG.getNode(Opcode: ARMISD::SUBC, DL, |
| 4696 | VTList: DAG.getVTList(VT1: CarryVT, VT2: MVT::i32), |
| 4697 | N1: BoolCarry, N2: DAG.getConstant(Val: 1, DL, VT: CarryVT)); |
| 4698 | return Carry.getValue(R: 1); |
| 4699 | } |
| 4700 | |
| 4701 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, |
| 4702 | SelectionDAG &DAG) { |
| 4703 | SDLoc DL(Flags); |
| 4704 | |
| 4705 | // Now convert the carry flag into a boolean carry. We do this |
| 4706 | // using ARMISD:ADDE 0, 0, Carry |
| 4707 | return DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), |
| 4708 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), |
| 4709 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N3: Flags); |
| 4710 | } |
| 4711 | |
| 4712 | SDValue ARMTargetLowering::LowerALUO(SDValue Op, SelectionDAG &DAG) const { |
| 4713 | // Let legalize expand this if it isn't a legal type yet. |
| 4714 | if (!isTypeLegal(VT: Op.getValueType())) |
| 4715 | return SDValue(); |
| 4716 | |
| 4717 | SDValue LHS = Op.getOperand(i: 0); |
| 4718 | SDValue RHS = Op.getOperand(i: 1); |
| 4719 | SDLoc dl(Op); |
| 4720 | |
| 4721 | EVT VT = Op.getValueType(); |
| 4722 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 4723 | SDValue Value; |
| 4724 | SDValue Overflow; |
| 4725 | switch (Op.getOpcode()) { |
| 4726 | case ISD::UADDO: |
| 4727 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
| 4728 | // Convert the carry flag into a boolean value. |
| 4729 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
| 4730 | break; |
| 4731 | case ISD::USUBO: |
| 4732 | Value = DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
| 4733 | // Convert the carry flag into a boolean value. |
| 4734 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
| 4735 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow |
| 4736 | // value. So compute 1 - C. |
| 4737 | Overflow = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 4738 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Overflow); |
| 4739 | break; |
| 4740 | default: { |
| 4741 | // Handle other operations with getARMXALUOOp |
| 4742 | SDValue OverflowCmp, ARMcc; |
| 4743 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); |
| 4744 | // We use 0 and 1 as false and true values. |
| 4745 | // ARMcc represents the "no overflow" condition (e.g., VC for signed ops). |
| 4746 | // CMOV operand order is (FalseVal, TrueVal), so we put 1 in FalseVal |
| 4747 | // position to get Overflow=1 when the "no overflow" condition is false. |
| 4748 | Overflow = |
| 4749 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, |
| 4750 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), // FalseVal: overflow |
| 4751 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), // TrueVal: no overflow |
| 4752 | N3: ARMcc, N4: OverflowCmp); |
| 4753 | break; |
| 4754 | } |
| 4755 | } |
| 4756 | |
| 4757 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: VTs, N1: Value, N2: Overflow); |
| 4758 | } |
| 4759 | |
| 4760 | static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG, |
| 4761 | const ARMSubtarget *Subtarget) { |
| 4762 | EVT VT = Op.getValueType(); |
| 4763 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP() || Subtarget->isThumb1Only()) |
| 4764 | return SDValue(); |
| 4765 | if (!VT.isSimple()) |
| 4766 | return SDValue(); |
| 4767 | |
| 4768 | unsigned NewOpcode; |
| 4769 | switch (VT.getSimpleVT().SimpleTy) { |
| 4770 | default: |
| 4771 | return SDValue(); |
| 4772 | case MVT::i8: |
| 4773 | switch (Op->getOpcode()) { |
| 4774 | case ISD::UADDSAT: |
| 4775 | NewOpcode = ARMISD::UQADD8b; |
| 4776 | break; |
| 4777 | case ISD::SADDSAT: |
| 4778 | NewOpcode = ARMISD::QADD8b; |
| 4779 | break; |
| 4780 | case ISD::USUBSAT: |
| 4781 | NewOpcode = ARMISD::UQSUB8b; |
| 4782 | break; |
| 4783 | case ISD::SSUBSAT: |
| 4784 | NewOpcode = ARMISD::QSUB8b; |
| 4785 | break; |
| 4786 | } |
| 4787 | break; |
| 4788 | case MVT::i16: |
| 4789 | switch (Op->getOpcode()) { |
| 4790 | case ISD::UADDSAT: |
| 4791 | NewOpcode = ARMISD::UQADD16b; |
| 4792 | break; |
| 4793 | case ISD::SADDSAT: |
| 4794 | NewOpcode = ARMISD::QADD16b; |
| 4795 | break; |
| 4796 | case ISD::USUBSAT: |
| 4797 | NewOpcode = ARMISD::UQSUB16b; |
| 4798 | break; |
| 4799 | case ISD::SSUBSAT: |
| 4800 | NewOpcode = ARMISD::QSUB16b; |
| 4801 | break; |
| 4802 | } |
| 4803 | break; |
| 4804 | } |
| 4805 | |
| 4806 | SDLoc dl(Op); |
| 4807 | SDValue Add = |
| 4808 | DAG.getNode(Opcode: NewOpcode, DL: dl, VT: MVT::i32, |
| 4809 | N1: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 0), DL: dl, VT: MVT::i32), |
| 4810 | N2: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 1), DL: dl, VT: MVT::i32)); |
| 4811 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Add); |
| 4812 | } |
| 4813 | |
| 4814 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 4815 | SDValue Cond = Op.getOperand(i: 0); |
| 4816 | SDValue SelectTrue = Op.getOperand(i: 1); |
| 4817 | SDValue SelectFalse = Op.getOperand(i: 2); |
| 4818 | SDLoc dl(Op); |
| 4819 | unsigned Opc = Cond.getOpcode(); |
| 4820 | |
| 4821 | if (Cond.getResNo() == 1 && |
| 4822 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 4823 | Opc == ISD::USUBO)) { |
| 4824 | if (!isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
| 4825 | return SDValue(); |
| 4826 | |
| 4827 | SDValue Value, OverflowCmp; |
| 4828 | SDValue ARMcc; |
| 4829 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
| 4830 | EVT VT = Op.getValueType(); |
| 4831 | |
| 4832 | return getCMOV(dl, VT, FalseVal: SelectTrue, TrueVal: SelectFalse, ARMcc, Flags: OverflowCmp, DAG); |
| 4833 | } |
| 4834 | |
| 4835 | // Convert: |
| 4836 | // |
| 4837 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) |
| 4838 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) |
| 4839 | // |
| 4840 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { |
| 4841 | const ConstantSDNode *CMOVTrue = |
| 4842 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 0)); |
| 4843 | const ConstantSDNode *CMOVFalse = |
| 4844 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 1)); |
| 4845 | |
| 4846 | if (CMOVTrue && CMOVFalse) { |
| 4847 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); |
| 4848 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); |
| 4849 | |
| 4850 | SDValue True; |
| 4851 | SDValue False; |
| 4852 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { |
| 4853 | True = SelectTrue; |
| 4854 | False = SelectFalse; |
| 4855 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { |
| 4856 | True = SelectFalse; |
| 4857 | False = SelectTrue; |
| 4858 | } |
| 4859 | |
| 4860 | if (True.getNode() && False.getNode()) |
| 4861 | return getCMOV(dl, VT: Op.getValueType(), FalseVal: True, TrueVal: False, ARMcc: Cond.getOperand(i: 2), |
| 4862 | Flags: Cond.getOperand(i: 3), DAG); |
| 4863 | } |
| 4864 | } |
| 4865 | |
| 4866 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the |
| 4867 | // undefined bits before doing a full-word comparison with zero. |
| 4868 | Cond = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: Cond.getValueType(), N1: Cond, |
| 4869 | N2: DAG.getConstant(Val: 1, DL: dl, VT: Cond.getValueType())); |
| 4870 | |
| 4871 | return DAG.getSelectCC(DL: dl, LHS: Cond, |
| 4872 | RHS: DAG.getConstant(Val: 0, DL: dl, VT: Cond.getValueType()), |
| 4873 | True: SelectTrue, False: SelectFalse, Cond: ISD::SETNE); |
| 4874 | } |
| 4875 | |
| 4876 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 4877 | bool &swpCmpOps, bool &swpVselOps) { |
| 4878 | // Start by selecting the GE condition code for opcodes that return true for |
| 4879 | // 'equality' |
| 4880 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || |
| 4881 | CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) |
| 4882 | CondCode = ARMCC::GE; |
| 4883 | |
| 4884 | // and GT for opcodes that return false for 'equality'. |
| 4885 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || |
| 4886 | CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) |
| 4887 | CondCode = ARMCC::GT; |
| 4888 | |
| 4889 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need |
| 4890 | // to swap the compare operands. |
| 4891 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || |
| 4892 | CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) |
| 4893 | swpCmpOps = true; |
| 4894 | |
| 4895 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. |
| 4896 | // If we have an unordered opcode, we need to swap the operands to the VSEL |
| 4897 | // instruction (effectively negating the condition). |
| 4898 | // |
| 4899 | // This also has the effect of swapping which one of 'less' or 'greater' |
| 4900 | // returns true, so we also swap the compare operands. It also switches |
| 4901 | // whether we return true for 'equality', so we compensate by picking the |
| 4902 | // opposite condition code to our original choice. |
| 4903 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || |
| 4904 | CC == ISD::SETUGT) { |
| 4905 | swpCmpOps = !swpCmpOps; |
| 4906 | swpVselOps = !swpVselOps; |
| 4907 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; |
| 4908 | } |
| 4909 | |
| 4910 | // 'ordered' is 'anything but unordered', so use the VS condition code and |
| 4911 | // swap the VSEL operands. |
| 4912 | if (CC == ISD::SETO) { |
| 4913 | CondCode = ARMCC::VS; |
| 4914 | swpVselOps = true; |
| 4915 | } |
| 4916 | |
| 4917 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition |
| 4918 | // code and swap the VSEL operands. Also do this if we don't care about the |
| 4919 | // unordered case. |
| 4920 | if (CC == ISD::SETUNE || CC == ISD::SETNE) { |
| 4921 | CondCode = ARMCC::EQ; |
| 4922 | swpVselOps = true; |
| 4923 | } |
| 4924 | } |
| 4925 | |
| 4926 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, |
| 4927 | SDValue TrueVal, SDValue ARMcc, |
| 4928 | SDValue Flags, SelectionDAG &DAG) const { |
| 4929 | if (!Subtarget->hasFP64() && VT == MVT::f64) { |
| 4930 | FalseVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 4931 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: FalseVal); |
| 4932 | TrueVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 4933 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: TrueVal); |
| 4934 | |
| 4935 | SDValue TrueLow = TrueVal.getValue(R: 0); |
| 4936 | SDValue TrueHigh = TrueVal.getValue(R: 1); |
| 4937 | SDValue FalseLow = FalseVal.getValue(R: 0); |
| 4938 | SDValue FalseHigh = FalseVal.getValue(R: 1); |
| 4939 | |
| 4940 | SDValue Low = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseLow, N2: TrueLow, |
| 4941 | N3: ARMcc, N4: Flags); |
| 4942 | SDValue High = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseHigh, N2: TrueHigh, |
| 4943 | N3: ARMcc, N4: Flags); |
| 4944 | |
| 4945 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Low, N2: High); |
| 4946 | } |
| 4947 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, N3: ARMcc, N4: Flags); |
| 4948 | } |
| 4949 | |
| 4950 | static bool isGTorGE(ISD::CondCode CC) { |
| 4951 | return CC == ISD::SETGT || CC == ISD::SETGE; |
| 4952 | } |
| 4953 | |
| 4954 | static bool isLTorLE(ISD::CondCode CC) { |
| 4955 | return CC == ISD::SETLT || CC == ISD::SETLE; |
| 4956 | } |
| 4957 | |
| 4958 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. |
| 4959 | // All of these conditions (and their <= and >= counterparts) will do: |
| 4960 | // x < k ? k : x |
| 4961 | // x > k ? x : k |
| 4962 | // k < x ? x : k |
| 4963 | // k > x ? k : x |
| 4964 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, |
| 4965 | const SDValue TrueVal, const SDValue FalseVal, |
| 4966 | const ISD::CondCode CC, const SDValue K) { |
| 4967 | return (isGTorGE(CC) && |
| 4968 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || |
| 4969 | (isLTorLE(CC) && |
| 4970 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); |
| 4971 | } |
| 4972 | |
| 4973 | // Check if two chained conditionals could be converted into SSAT or USAT. |
| 4974 | // |
| 4975 | // SSAT can replace a set of two conditional selectors that bound a number to an |
| 4976 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: |
| 4977 | // |
| 4978 | // x < -k ? -k : (x > k ? k : x) |
| 4979 | // x < -k ? -k : (x < k ? x : k) |
| 4980 | // x > -k ? (x > k ? k : x) : -k |
| 4981 | // x < k ? (x < -k ? -k : x) : k |
| 4982 | // etc. |
| 4983 | // |
| 4984 | // LLVM canonicalizes these to either a min(max()) or a max(min()) |
| 4985 | // pattern. This function tries to match one of these and will return a SSAT |
| 4986 | // node if successful. |
| 4987 | // |
| 4988 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 |
| 4989 | // is a power of 2. |
| 4990 | static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { |
| 4991 | EVT VT = Op.getValueType(); |
| 4992 | SDValue V1 = Op.getOperand(i: 0); |
| 4993 | SDValue K1 = Op.getOperand(i: 1); |
| 4994 | SDValue TrueVal1 = Op.getOperand(i: 2); |
| 4995 | SDValue FalseVal1 = Op.getOperand(i: 3); |
| 4996 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 4997 | |
| 4998 | const SDValue Op2 = isa<ConstantSDNode>(Val: TrueVal1) ? FalseVal1 : TrueVal1; |
| 4999 | if (Op2.getOpcode() != ISD::SELECT_CC) |
| 5000 | return SDValue(); |
| 5001 | |
| 5002 | SDValue V2 = Op2.getOperand(i: 0); |
| 5003 | SDValue K2 = Op2.getOperand(i: 1); |
| 5004 | SDValue TrueVal2 = Op2.getOperand(i: 2); |
| 5005 | SDValue FalseVal2 = Op2.getOperand(i: 3); |
| 5006 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Val: Op2.getOperand(i: 4))->get(); |
| 5007 | |
| 5008 | SDValue V1Tmp = V1; |
| 5009 | SDValue V2Tmp = V2; |
| 5010 | |
| 5011 | // Check that the registers and the constants match a max(min()) or min(max()) |
| 5012 | // pattern |
| 5013 | if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 || |
| 5014 | K2 != FalseVal2 || |
| 5015 | !((isGTorGE(CC: CC1) && isLTorLE(CC: CC2)) || (isLTorLE(CC: CC1) && isGTorGE(CC: CC2)))) |
| 5016 | return SDValue(); |
| 5017 | |
| 5018 | // Check that the constant in the lower-bound check is |
| 5019 | // the opposite of the constant in the upper-bound check |
| 5020 | // in 1's complement. |
| 5021 | if (!isa<ConstantSDNode>(Val: K1) || !isa<ConstantSDNode>(Val: K2)) |
| 5022 | return SDValue(); |
| 5023 | |
| 5024 | int64_t Val1 = cast<ConstantSDNode>(Val&: K1)->getSExtValue(); |
| 5025 | int64_t Val2 = cast<ConstantSDNode>(Val&: K2)->getSExtValue(); |
| 5026 | int64_t PosVal = std::max(a: Val1, b: Val2); |
| 5027 | int64_t NegVal = std::min(a: Val1, b: Val2); |
| 5028 | |
| 5029 | if (!((Val1 > Val2 && isLTorLE(CC: CC1)) || (Val1 < Val2 && isLTorLE(CC: CC2))) || |
| 5030 | !isPowerOf2_64(Value: PosVal + 1)) |
| 5031 | return SDValue(); |
| 5032 | |
| 5033 | // Handle the difference between USAT (unsigned) and SSAT (signed) |
| 5034 | // saturation |
| 5035 | // At this point, PosVal is guaranteed to be positive |
| 5036 | uint64_t K = PosVal; |
| 5037 | SDLoc dl(Op); |
| 5038 | if (Val1 == ~Val2) |
| 5039 | return DAG.getNode(Opcode: ARMISD::SSAT, DL: dl, VT, N1: V2Tmp, |
| 5040 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
| 5041 | if (NegVal == 0) |
| 5042 | return DAG.getNode(Opcode: ARMISD::USAT, DL: dl, VT, N1: V2Tmp, |
| 5043 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
| 5044 | |
| 5045 | return SDValue(); |
| 5046 | } |
| 5047 | |
| 5048 | // Check if a condition of the type x < k ? k : x can be converted into a |
| 5049 | // bit operation instead of conditional moves. |
| 5050 | // Currently this is allowed given: |
| 5051 | // - The conditions and values match up |
| 5052 | // - k is 0 or -1 (all ones) |
| 5053 | // This function will not check the last condition, thats up to the caller |
| 5054 | // It returns true if the transformation can be made, and in such case |
| 5055 | // returns x in V, and k in SatK. |
| 5056 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, |
| 5057 | SDValue &SatK) |
| 5058 | { |
| 5059 | SDValue LHS = Op.getOperand(i: 0); |
| 5060 | SDValue RHS = Op.getOperand(i: 1); |
| 5061 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5062 | SDValue TrueVal = Op.getOperand(i: 2); |
| 5063 | SDValue FalseVal = Op.getOperand(i: 3); |
| 5064 | |
| 5065 | SDValue *K = isa<ConstantSDNode>(Val: LHS) ? &LHS : isa<ConstantSDNode>(Val: RHS) |
| 5066 | ? &RHS |
| 5067 | : nullptr; |
| 5068 | |
| 5069 | // No constant operation in comparison, early out |
| 5070 | if (!K) |
| 5071 | return false; |
| 5072 | |
| 5073 | SDValue KTmp = isa<ConstantSDNode>(Val: TrueVal) ? TrueVal : FalseVal; |
| 5074 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; |
| 5075 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; |
| 5076 | |
| 5077 | // If the constant on left and right side, or variable on left and right, |
| 5078 | // does not match, early out |
| 5079 | if (*K != KTmp || V != VTmp) |
| 5080 | return false; |
| 5081 | |
| 5082 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, K: *K)) { |
| 5083 | SatK = *K; |
| 5084 | return true; |
| 5085 | } |
| 5086 | |
| 5087 | return false; |
| 5088 | } |
| 5089 | |
| 5090 | bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { |
| 5091 | if (VT == MVT::f32) |
| 5092 | return !Subtarget->hasVFP2Base(); |
| 5093 | if (VT == MVT::f64) |
| 5094 | return !Subtarget->hasFP64(); |
| 5095 | if (VT == MVT::f16) |
| 5096 | return !Subtarget->hasFullFP16(); |
| 5097 | return false; |
| 5098 | } |
| 5099 | |
| 5100 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5101 | EVT VT = Op.getValueType(); |
| 5102 | SDLoc dl(Op); |
| 5103 | |
| 5104 | // Try to convert two saturating conditional selects into a single SSAT |
| 5105 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) |
| 5106 | if (SDValue SatValue = LowerSaturatingConditional(Op, DAG)) |
| 5107 | return SatValue; |
| 5108 | |
| 5109 | // Try to convert expressions of the form x < k ? k : x (and similar forms) |
| 5110 | // into more efficient bit operations, which is possible when k is 0 or -1 |
| 5111 | // On ARM and Thumb-2 which have flexible operand 2 this will result in |
| 5112 | // single instructions. On Thumb the shift and the bit operation will be two |
| 5113 | // instructions. |
| 5114 | // Only allow this transformation on full-width (32-bit) operations |
| 5115 | SDValue LowerSatConstant; |
| 5116 | SDValue SatValue; |
| 5117 | if (VT == MVT::i32 && |
| 5118 | isLowerSaturatingConditional(Op, V&: SatValue, SatK&: LowerSatConstant)) { |
| 5119 | SDValue ShiftV = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: SatValue, |
| 5120 | N2: DAG.getConstant(Val: 31, DL: dl, VT)); |
| 5121 | if (isNullConstant(V: LowerSatConstant)) { |
| 5122 | SDValue NotShiftV = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: ShiftV, |
| 5123 | N2: DAG.getAllOnesConstant(DL: dl, VT)); |
| 5124 | return DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: SatValue, N2: NotShiftV); |
| 5125 | } else if (isAllOnesConstant(V: LowerSatConstant)) |
| 5126 | return DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: SatValue, N2: ShiftV); |
| 5127 | } |
| 5128 | |
| 5129 | SDValue LHS = Op.getOperand(i: 0); |
| 5130 | SDValue RHS = Op.getOperand(i: 1); |
| 5131 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5132 | SDValue TrueVal = Op.getOperand(i: 2); |
| 5133 | SDValue FalseVal = Op.getOperand(i: 3); |
| 5134 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(Val&: FalseVal); |
| 5135 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(Val&: TrueVal); |
| 5136 | ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val&: RHS); |
| 5137 | if (Op.getValueType().isInteger()) { |
| 5138 | |
| 5139 | // Check for SMAX(lhs, 0) and SMIN(lhs, 0) patterns. |
| 5140 | // (SELECT_CC setgt, lhs, 0, lhs, 0) -> (BIC lhs, (SRA lhs, typesize-1)) |
| 5141 | // (SELECT_CC setlt, lhs, 0, lhs, 0) -> (AND lhs, (SRA lhs, typesize-1)) |
| 5142 | // Both require less instructions than compare and conditional select. |
| 5143 | if ((CC == ISD::SETGT || CC == ISD::SETLT) && LHS == TrueVal && RHSC && |
| 5144 | RHSC->isZero() && CFVal && CFVal->isZero() && |
| 5145 | LHS.getValueType() == RHS.getValueType()) { |
| 5146 | EVT VT = LHS.getValueType(); |
| 5147 | SDValue Shift = |
| 5148 | DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: LHS, |
| 5149 | N2: DAG.getConstant(Val: VT.getSizeInBits() - 1, DL: dl, VT)); |
| 5150 | |
| 5151 | if (CC == ISD::SETGT) |
| 5152 | Shift = DAG.getNOT(DL: dl, Val: Shift, VT); |
| 5153 | |
| 5154 | return DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: LHS, N2: Shift); |
| 5155 | } |
| 5156 | } |
| 5157 | |
| 5158 | if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && |
| 5159 | LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { |
| 5160 | unsigned TVal = CTVal->getZExtValue(); |
| 5161 | unsigned FVal = CFVal->getZExtValue(); |
| 5162 | unsigned Opcode = 0; |
| 5163 | |
| 5164 | if (TVal == ~FVal) { |
| 5165 | Opcode = ARMISD::CSINV; |
| 5166 | } else if (TVal == ~FVal + 1) { |
| 5167 | Opcode = ARMISD::CSNEG; |
| 5168 | } else if (TVal + 1 == FVal) { |
| 5169 | Opcode = ARMISD::CSINC; |
| 5170 | } else if (TVal == FVal + 1) { |
| 5171 | Opcode = ARMISD::CSINC; |
| 5172 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5173 | std::swap(a&: TVal, b&: FVal); |
| 5174 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5175 | } |
| 5176 | |
| 5177 | if (Opcode) { |
| 5178 | // If one of the constants is cheaper than another, materialise the |
| 5179 | // cheaper one and let the csel generate the other. |
| 5180 | if (Opcode != ARMISD::CSINC && |
| 5181 | HasLowerConstantMaterializationCost(Val1: FVal, Val2: TVal, Subtarget)) { |
| 5182 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5183 | std::swap(a&: TVal, b&: FVal); |
| 5184 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5185 | } |
| 5186 | |
| 5187 | // Attempt to use ZR checking TVal is 0, possibly inverting the condition |
| 5188 | // to get there. CSINC not is invertable like the other two (~(~a) == a, |
| 5189 | // -(-a) == a, but (a+1)+1 != a). |
| 5190 | if (FVal == 0 && Opcode != ARMISD::CSINC) { |
| 5191 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5192 | std::swap(a&: TVal, b&: FVal); |
| 5193 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5194 | } |
| 5195 | |
| 5196 | // Drops F's value because we can get it by inverting/negating TVal. |
| 5197 | FalseVal = TrueVal; |
| 5198 | |
| 5199 | SDValue ARMcc; |
| 5200 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5201 | EVT VT = TrueVal.getValueType(); |
| 5202 | return DAG.getNode(Opcode, DL: dl, VT, N1: TrueVal, N2: FalseVal, N3: ARMcc, N4: Cmp); |
| 5203 | } |
| 5204 | } |
| 5205 | |
| 5206 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 5207 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
| 5208 | |
| 5209 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5210 | // zero. |
| 5211 | if (!RHS.getNode()) { |
| 5212 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 5213 | CC = ISD::SETNE; |
| 5214 | } |
| 5215 | } |
| 5216 | |
| 5217 | if (LHS.getValueType() == MVT::i32) { |
| 5218 | // Try to generate VSEL on ARMv8. |
| 5219 | // The VSEL instruction can't use all the usual ARM condition |
| 5220 | // codes: it only has two bits to select the condition code, so it's |
| 5221 | // constrained to use only GE, GT, VS and EQ. |
| 5222 | // |
| 5223 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to |
| 5224 | // swap the operands of the previous compare instruction (effectively |
| 5225 | // inverting the compare condition, swapping 'less' and 'greater') and |
| 5226 | // sometimes need to swap the operands to the VSEL (which inverts the |
| 5227 | // condition in the sense of firing whenever the previous condition didn't) |
| 5228 | if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || |
| 5229 | TrueVal.getValueType() == MVT::f32 || |
| 5230 | TrueVal.getValueType() == MVT::f64)) { |
| 5231 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5232 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || |
| 5233 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { |
| 5234 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5235 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5236 | } |
| 5237 | } |
| 5238 | |
| 5239 | SDValue ARMcc; |
| 5240 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5241 | // Choose GE over PL, which vsel does now support |
| 5242 | if (ARMcc->getAsZExtVal() == ARMCC::PL) |
| 5243 | ARMcc = DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32); |
| 5244 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, Flags: Cmp, DAG); |
| 5245 | } |
| 5246 | |
| 5247 | ARMCC::CondCodes CondCode, CondCode2; |
| 5248 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5249 | |
| 5250 | // Normalize the fp compare. If RHS is zero we prefer to keep it there so we |
| 5251 | // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we |
| 5252 | // must use VSEL (limited condition codes), due to not having conditional f16 |
| 5253 | // moves. |
| 5254 | if (Subtarget->hasFPARMv8Base() && |
| 5255 | !(isFloatingPointZero(Op: RHS) && TrueVal.getValueType() != MVT::f16) && |
| 5256 | (TrueVal.getValueType() == MVT::f16 || |
| 5257 | TrueVal.getValueType() == MVT::f32 || |
| 5258 | TrueVal.getValueType() == MVT::f64)) { |
| 5259 | bool swpCmpOps = false; |
| 5260 | bool swpVselOps = false; |
| 5261 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); |
| 5262 | |
| 5263 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || |
| 5264 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { |
| 5265 | if (swpCmpOps) |
| 5266 | std::swap(a&: LHS, b&: RHS); |
| 5267 | if (swpVselOps) |
| 5268 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5269 | } |
| 5270 | } |
| 5271 | |
| 5272 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5273 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5274 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, Flags: Cmp, DAG); |
| 5275 | if (CondCode2 != ARMCC::AL) { |
| 5276 | SDValue ARMcc2 = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 5277 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal, ARMcc: ARMcc2, Flags: Cmp, DAG); |
| 5278 | } |
| 5279 | return Result; |
| 5280 | } |
| 5281 | |
| 5282 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable |
| 5283 | /// to morph to an integer compare sequence. |
| 5284 | static bool canChangeToInt(SDValue Op, bool &SeenZero, |
| 5285 | const ARMSubtarget *Subtarget) { |
| 5286 | SDNode *N = Op.getNode(); |
| 5287 | if (!N->hasOneUse()) |
| 5288 | // Otherwise it requires moving the value from fp to integer registers. |
| 5289 | return false; |
| 5290 | if (!N->getNumValues()) |
| 5291 | return false; |
| 5292 | EVT VT = Op.getValueType(); |
| 5293 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) |
| 5294 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + |
| 5295 | // vmrs are very slow, e.g. cortex-a8. |
| 5296 | return false; |
| 5297 | |
| 5298 | if (isFloatingPointZero(Op)) { |
| 5299 | SeenZero = true; |
| 5300 | return true; |
| 5301 | } |
| 5302 | return ISD::isNormalLoad(N); |
| 5303 | } |
| 5304 | |
| 5305 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { |
| 5306 | if (isFloatingPointZero(Op)) |
| 5307 | return DAG.getConstant(Val: 0, DL: SDLoc(Op), VT: MVT::i32); |
| 5308 | |
| 5309 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) |
| 5310 | return DAG.getLoad(VT: MVT::i32, dl: SDLoc(Op), Chain: Ld->getChain(), Ptr: Ld->getBasePtr(), |
| 5311 | PtrInfo: Ld->getPointerInfo(), Alignment: Ld->getAlign(), |
| 5312 | MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5313 | |
| 5314 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5315 | } |
| 5316 | |
| 5317 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, |
| 5318 | SDValue &RetVal1, SDValue &RetVal2) { |
| 5319 | SDLoc dl(Op); |
| 5320 | |
| 5321 | if (isFloatingPointZero(Op)) { |
| 5322 | RetVal1 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 5323 | RetVal2 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 5324 | return; |
| 5325 | } |
| 5326 | |
| 5327 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) { |
| 5328 | SDValue Ptr = Ld->getBasePtr(); |
| 5329 | RetVal1 = |
| 5330 | DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr, PtrInfo: Ld->getPointerInfo(), |
| 5331 | Alignment: Ld->getAlign(), MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5332 | |
| 5333 | EVT PtrType = Ptr.getValueType(); |
| 5334 | SDValue NewPtr = DAG.getNode(Opcode: ISD::ADD, DL: dl, |
| 5335 | VT: PtrType, N1: Ptr, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrType)); |
| 5336 | RetVal2 = DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr: NewPtr, |
| 5337 | PtrInfo: Ld->getPointerInfo().getWithOffset(O: 4), |
| 5338 | Alignment: commonAlignment(A: Ld->getAlign(), Offset: 4), |
| 5339 | MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5340 | return; |
| 5341 | } |
| 5342 | |
| 5343 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5344 | } |
| 5345 | |
| 5346 | /// OptimizeVFPBrcond - With nnan and without daz, it's legal to optimize some |
| 5347 | /// f32 and even f64 comparisons to integer ones. |
| 5348 | SDValue |
| 5349 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { |
| 5350 | SDValue Chain = Op.getOperand(i: 0); |
| 5351 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
| 5352 | SDValue LHS = Op.getOperand(i: 2); |
| 5353 | SDValue RHS = Op.getOperand(i: 3); |
| 5354 | SDValue Dest = Op.getOperand(i: 4); |
| 5355 | SDLoc dl(Op); |
| 5356 | |
| 5357 | bool LHSSeenZero = false; |
| 5358 | bool LHSOk = canChangeToInt(Op: LHS, SeenZero&: LHSSeenZero, Subtarget); |
| 5359 | bool RHSSeenZero = false; |
| 5360 | bool RHSOk = canChangeToInt(Op: RHS, SeenZero&: RHSSeenZero, Subtarget); |
| 5361 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { |
| 5362 | // If unsafe fp math optimization is enabled and there are no other uses of |
| 5363 | // the CMP operands, and the condition code is EQ or NE, we can optimize it |
| 5364 | // to an integer comparison. |
| 5365 | if (CC == ISD::SETOEQ) |
| 5366 | CC = ISD::SETEQ; |
| 5367 | else if (CC == ISD::SETUNE) |
| 5368 | CC = ISD::SETNE; |
| 5369 | |
| 5370 | SDValue Mask = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
| 5371 | SDValue ARMcc; |
| 5372 | if (LHS.getValueType() == MVT::f32) { |
| 5373 | LHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5374 | N1: bitcastf32Toi32(Op: LHS, DAG), N2: Mask); |
| 5375 | RHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5376 | N1: bitcastf32Toi32(Op: RHS, DAG), N2: Mask); |
| 5377 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5378 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5379 | N4: Cmp); |
| 5380 | } |
| 5381 | |
| 5382 | SDValue LHS1, LHS2; |
| 5383 | SDValue RHS1, RHS2; |
| 5384 | expandf64Toi32(Op: LHS, DAG, RetVal1&: LHS1, RetVal2&: LHS2); |
| 5385 | expandf64Toi32(Op: RHS, DAG, RetVal1&: RHS1, RetVal2&: RHS2); |
| 5386 | LHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: LHS2, N2: Mask); |
| 5387 | RHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RHS2, N2: Mask); |
| 5388 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5389 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5390 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; |
| 5391 | return DAG.getNode(Opcode: ARMISD::BCC_i64, DL: dl, VT: MVT::Other, Ops); |
| 5392 | } |
| 5393 | |
| 5394 | return SDValue(); |
| 5395 | } |
| 5396 | |
| 5397 | // Generate CMP + CMOV for integer abs. |
| 5398 | SDValue ARMTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { |
| 5399 | SDLoc DL(Op); |
| 5400 | |
| 5401 | SDValue Neg = DAG.getNegative(Val: Op.getOperand(i: 0), DL, VT: MVT::i32); |
| 5402 | |
| 5403 | // Generate CMP & CMOV. |
| 5404 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::CMP, DL, VT: FlagsVT, N1: Op.getOperand(i: 0), |
| 5405 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 5406 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT: MVT::i32, N1: Op.getOperand(i: 0), N2: Neg, |
| 5407 | N3: DAG.getConstant(Val: ARMCC::MI, DL, VT: MVT::i32), N4: Cmp); |
| 5408 | } |
| 5409 | |
| 5410 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { |
| 5411 | SDValue Chain = Op.getOperand(i: 0); |
| 5412 | SDValue Cond = Op.getOperand(i: 1); |
| 5413 | SDValue Dest = Op.getOperand(i: 2); |
| 5414 | SDLoc dl(Op); |
| 5415 | |
| 5416 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5417 | // instruction. |
| 5418 | unsigned Opc = Cond.getOpcode(); |
| 5419 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5420 | !Subtarget->isThumb1Only(); |
| 5421 | if (Cond.getResNo() == 1 && |
| 5422 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5423 | Opc == ISD::USUBO || OptimizeMul)) { |
| 5424 | // Only lower legal XALUO ops. |
| 5425 | if (!isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
| 5426 | return SDValue(); |
| 5427 | |
| 5428 | // The actual operation with overflow check. |
| 5429 | SDValue Value, OverflowCmp; |
| 5430 | SDValue ARMcc; |
| 5431 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
| 5432 | |
| 5433 | // Reverse the condition code. |
| 5434 | ARMCC::CondCodes CondCode = |
| 5435 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
| 5436 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
| 5437 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
| 5438 | |
| 5439 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5440 | N4: OverflowCmp); |
| 5441 | } |
| 5442 | |
| 5443 | return SDValue(); |
| 5444 | } |
| 5445 | |
| 5446 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5447 | SDValue Chain = Op.getOperand(i: 0); |
| 5448 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
| 5449 | SDValue LHS = Op.getOperand(i: 2); |
| 5450 | SDValue RHS = Op.getOperand(i: 3); |
| 5451 | SDValue Dest = Op.getOperand(i: 4); |
| 5452 | SDLoc dl(Op); |
| 5453 | |
| 5454 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 5455 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
| 5456 | |
| 5457 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5458 | // zero. |
| 5459 | if (!RHS.getNode()) { |
| 5460 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 5461 | CC = ISD::SETNE; |
| 5462 | } |
| 5463 | } |
| 5464 | |
| 5465 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5466 | // instruction. |
| 5467 | unsigned Opc = LHS.getOpcode(); |
| 5468 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5469 | !Subtarget->isThumb1Only(); |
| 5470 | if (LHS.getResNo() == 1 && (isOneConstant(V: RHS) || isNullConstant(V: RHS)) && |
| 5471 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5472 | Opc == ISD::USUBO || OptimizeMul) && |
| 5473 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
| 5474 | // Only lower legal XALUO ops. |
| 5475 | if (!isTypeLegal(VT: LHS->getValueType(ResNo: 0))) |
| 5476 | return SDValue(); |
| 5477 | |
| 5478 | // The actual operation with overflow check. |
| 5479 | SDValue Value, OverflowCmp; |
| 5480 | SDValue ARMcc; |
| 5481 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: LHS.getValue(R: 0), DAG, ARMcc); |
| 5482 | |
| 5483 | if ((CC == ISD::SETNE) != isOneConstant(V: RHS)) { |
| 5484 | // Reverse the condition code. |
| 5485 | ARMCC::CondCodes CondCode = |
| 5486 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
| 5487 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
| 5488 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
| 5489 | } |
| 5490 | |
| 5491 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5492 | N4: OverflowCmp); |
| 5493 | } |
| 5494 | |
| 5495 | if (LHS.getValueType() == MVT::i32) { |
| 5496 | SDValue ARMcc; |
| 5497 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5498 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, N4: Cmp); |
| 5499 | } |
| 5500 | |
| 5501 | SDNodeFlags Flags = Op->getFlags(); |
| 5502 | if (Flags.hasNoNaNs() && |
| 5503 | DAG.getDenormalMode(VT: MVT::f32) == DenormalMode::getIEEE() && |
| 5504 | DAG.getDenormalMode(VT: MVT::f64) == DenormalMode::getIEEE() && |
| 5505 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETNE || |
| 5506 | CC == ISD::SETUNE)) { |
| 5507 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) |
| 5508 | return Result; |
| 5509 | } |
| 5510 | |
| 5511 | ARMCC::CondCodes CondCode, CondCode2; |
| 5512 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5513 | |
| 5514 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5515 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5516 | SDValue Ops[] = {Chain, Dest, ARMcc, Cmp}; |
| 5517 | SDValue Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, Ops); |
| 5518 | if (CondCode2 != ARMCC::AL) { |
| 5519 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 5520 | SDValue Ops[] = {Res, Dest, ARMcc, Cmp}; |
| 5521 | Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, Ops); |
| 5522 | } |
| 5523 | return Res; |
| 5524 | } |
| 5525 | |
| 5526 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { |
| 5527 | SDValue Chain = Op.getOperand(i: 0); |
| 5528 | SDValue Table = Op.getOperand(i: 1); |
| 5529 | SDValue Index = Op.getOperand(i: 2); |
| 5530 | SDLoc dl(Op); |
| 5531 | |
| 5532 | EVT PTy = getPointerTy(DL: DAG.getDataLayout()); |
| 5533 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Val&: Table); |
| 5534 | SDValue JTI = DAG.getTargetJumpTable(JTI: JT->getIndex(), VT: PTy); |
| 5535 | Table = DAG.getNode(Opcode: ARMISD::WrapperJT, DL: dl, VT: MVT::i32, Operand: JTI); |
| 5536 | Index = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: PTy, N1: Index, N2: DAG.getConstant(Val: 4, DL: dl, VT: PTy)); |
| 5537 | SDValue Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Index); |
| 5538 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { |
| 5539 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table |
| 5540 | // which does another jump to the destination. This also makes it easier |
| 5541 | // to translate it to TBB / TBH later (Thumb2 only). |
| 5542 | // FIXME: This might not work if the function is extremely large. |
| 5543 | return DAG.getNode(Opcode: ARMISD::BR2_JT, DL: dl, VT: MVT::Other, N1: Chain, |
| 5544 | N2: Addr, N3: Op.getOperand(i: 2), N4: JTI); |
| 5545 | } |
| 5546 | if (isPositionIndependent() || Subtarget->isROPI()) { |
| 5547 | Addr = |
| 5548 | DAG.getLoad(VT: (EVT)MVT::i32, dl, Chain, Ptr: Addr, |
| 5549 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
| 5550 | Chain = Addr.getValue(R: 1); |
| 5551 | Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Addr); |
| 5552 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
| 5553 | } else { |
| 5554 | Addr = |
| 5555 | DAG.getLoad(VT: PTy, dl, Chain, Ptr: Addr, |
| 5556 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
| 5557 | Chain = Addr.getValue(R: 1); |
| 5558 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
| 5559 | } |
| 5560 | } |
| 5561 | |
| 5562 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { |
| 5563 | EVT VT = Op.getValueType(); |
| 5564 | SDLoc dl(Op); |
| 5565 | |
| 5566 | if (Op.getValueType().getVectorElementType() == MVT::i32) { |
| 5567 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::f32) |
| 5568 | return Op; |
| 5569 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5570 | } |
| 5571 | |
| 5572 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
| 5573 | |
| 5574 | EVT NewTy; |
| 5575 | const EVT OpTy = Op.getOperand(i: 0).getValueType(); |
| 5576 | if (OpTy == MVT::v4f32) |
| 5577 | NewTy = MVT::v4i32; |
| 5578 | else if (OpTy == MVT::v4f16 && HasFullFP16) |
| 5579 | NewTy = MVT::v4i16; |
| 5580 | else if (OpTy == MVT::v8f16 && HasFullFP16) |
| 5581 | NewTy = MVT::v8i16; |
| 5582 | else |
| 5583 | llvm_unreachable("Invalid type for custom lowering!" ); |
| 5584 | |
| 5585 | if (VT != MVT::v4i16 && VT != MVT::v8i16) |
| 5586 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5587 | |
| 5588 | Op = DAG.getNode(Opcode: Op.getOpcode(), DL: dl, VT: NewTy, Operand: Op.getOperand(i: 0)); |
| 5589 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Op); |
| 5590 | } |
| 5591 | |
| 5592 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { |
| 5593 | EVT VT = Op.getValueType(); |
| 5594 | if (VT.isVector()) |
| 5595 | return LowerVectorFP_TO_INT(Op, DAG); |
| 5596 | |
| 5597 | bool IsStrict = Op->isStrictFPOpcode(); |
| 5598 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 5599 | |
| 5600 | if (isUnsupportedFloatingType(VT: SrcVal.getValueType())) { |
| 5601 | RTLIB::Libcall LC; |
| 5602 | if (Op.getOpcode() == ISD::FP_TO_SINT || |
| 5603 | Op.getOpcode() == ISD::STRICT_FP_TO_SINT) |
| 5604 | LC = RTLIB::getFPTOSINT(OpVT: SrcVal.getValueType(), |
| 5605 | RetVT: Op.getValueType()); |
| 5606 | else |
| 5607 | LC = RTLIB::getFPTOUINT(OpVT: SrcVal.getValueType(), |
| 5608 | RetVT: Op.getValueType()); |
| 5609 | SDLoc Loc(Op); |
| 5610 | MakeLibCallOptions CallOptions; |
| 5611 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 5612 | SDValue Result; |
| 5613 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: SrcVal, |
| 5614 | CallOptions, dl: Loc, Chain); |
| 5615 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
| 5616 | } |
| 5617 | |
| 5618 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 5619 | if (IsStrict) { |
| 5620 | SDLoc Loc(Op); |
| 5621 | SDValue Result = |
| 5622 | DAG.getNode(Opcode: Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT |
| 5623 | : ISD::FP_TO_UINT, |
| 5624 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
| 5625 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
| 5626 | } |
| 5627 | |
| 5628 | return Op; |
| 5629 | } |
| 5630 | |
| 5631 | static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, |
| 5632 | const ARMSubtarget *Subtarget) { |
| 5633 | EVT VT = Op.getValueType(); |
| 5634 | EVT ToVT = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT(); |
| 5635 | EVT FromVT = Op.getOperand(i: 0).getValueType(); |
| 5636 | |
| 5637 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f32) |
| 5638 | return Op; |
| 5639 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f64 && |
| 5640 | Subtarget->hasFP64()) |
| 5641 | return Op; |
| 5642 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f16 && |
| 5643 | Subtarget->hasFullFP16()) |
| 5644 | return Op; |
| 5645 | if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32 && |
| 5646 | Subtarget->hasMVEFloatOps()) |
| 5647 | return Op; |
| 5648 | if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16 && |
| 5649 | Subtarget->hasMVEFloatOps()) |
| 5650 | return Op; |
| 5651 | |
| 5652 | if (FromVT != MVT::v4f32 && FromVT != MVT::v8f16) |
| 5653 | return SDValue(); |
| 5654 | |
| 5655 | SDLoc DL(Op); |
| 5656 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; |
| 5657 | unsigned BW = ToVT.getScalarSizeInBits() - IsSigned; |
| 5658 | SDValue CVT = DAG.getNode(Opcode: Op.getOpcode(), DL, VT, N1: Op.getOperand(i: 0), |
| 5659 | N2: DAG.getValueType(VT.getScalarType())); |
| 5660 | SDValue Max = DAG.getNode(Opcode: IsSigned ? ISD::SMIN : ISD::UMIN, DL, VT, N1: CVT, |
| 5661 | N2: DAG.getConstant(Val: (1 << BW) - 1, DL, VT)); |
| 5662 | if (IsSigned) |
| 5663 | Max = DAG.getNode(Opcode: ISD::SMAX, DL, VT, N1: Max, |
| 5664 | N2: DAG.getSignedConstant(Val: -(1 << BW), DL, VT)); |
| 5665 | return Max; |
| 5666 | } |
| 5667 | |
| 5668 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { |
| 5669 | EVT VT = Op.getValueType(); |
| 5670 | SDLoc dl(Op); |
| 5671 | |
| 5672 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::i32) { |
| 5673 | if (VT.getVectorElementType() == MVT::f32) |
| 5674 | return Op; |
| 5675 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5676 | } |
| 5677 | |
| 5678 | assert((Op.getOperand(0).getValueType() == MVT::v4i16 || |
| 5679 | Op.getOperand(0).getValueType() == MVT::v8i16) && |
| 5680 | "Invalid type for custom lowering!" ); |
| 5681 | |
| 5682 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
| 5683 | |
| 5684 | EVT DestVecType; |
| 5685 | if (VT == MVT::v4f32) |
| 5686 | DestVecType = MVT::v4i32; |
| 5687 | else if (VT == MVT::v4f16 && HasFullFP16) |
| 5688 | DestVecType = MVT::v4i16; |
| 5689 | else if (VT == MVT::v8f16 && HasFullFP16) |
| 5690 | DestVecType = MVT::v8i16; |
| 5691 | else |
| 5692 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5693 | |
| 5694 | unsigned CastOpc; |
| 5695 | unsigned Opc; |
| 5696 | switch (Op.getOpcode()) { |
| 5697 | default: llvm_unreachable("Invalid opcode!" ); |
| 5698 | case ISD::SINT_TO_FP: |
| 5699 | CastOpc = ISD::SIGN_EXTEND; |
| 5700 | Opc = ISD::SINT_TO_FP; |
| 5701 | break; |
| 5702 | case ISD::UINT_TO_FP: |
| 5703 | CastOpc = ISD::ZERO_EXTEND; |
| 5704 | Opc = ISD::UINT_TO_FP; |
| 5705 | break; |
| 5706 | } |
| 5707 | |
| 5708 | Op = DAG.getNode(Opcode: CastOpc, DL: dl, VT: DestVecType, Operand: Op.getOperand(i: 0)); |
| 5709 | return DAG.getNode(Opcode: Opc, DL: dl, VT, Operand: Op); |
| 5710 | } |
| 5711 | |
| 5712 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { |
| 5713 | EVT VT = Op.getValueType(); |
| 5714 | if (VT.isVector()) |
| 5715 | return LowerVectorINT_TO_FP(Op, DAG); |
| 5716 | if (isUnsupportedFloatingType(VT)) { |
| 5717 | RTLIB::Libcall LC; |
| 5718 | if (Op.getOpcode() == ISD::SINT_TO_FP) |
| 5719 | LC = RTLIB::getSINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
| 5720 | RetVT: Op.getValueType()); |
| 5721 | else |
| 5722 | LC = RTLIB::getUINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
| 5723 | RetVT: Op.getValueType()); |
| 5724 | MakeLibCallOptions CallOptions; |
| 5725 | return makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: Op.getOperand(i: 0), |
| 5726 | CallOptions, dl: SDLoc(Op)).first; |
| 5727 | } |
| 5728 | |
| 5729 | return Op; |
| 5730 | } |
| 5731 | |
| 5732 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { |
| 5733 | // Implement fcopysign with a fabs and a conditional fneg. |
| 5734 | SDValue Tmp0 = Op.getOperand(i: 0); |
| 5735 | SDValue Tmp1 = Op.getOperand(i: 1); |
| 5736 | SDLoc dl(Op); |
| 5737 | EVT VT = Op.getValueType(); |
| 5738 | EVT SrcVT = Tmp1.getValueType(); |
| 5739 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || |
| 5740 | Tmp0.getOpcode() == ARMISD::VMOVDRR; |
| 5741 | bool UseNEON = !InGPR && Subtarget->hasNEON(); |
| 5742 | |
| 5743 | if (UseNEON) { |
| 5744 | // Use VBSL to copy the sign bit. |
| 5745 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode: 0x6, Val: 0x80); |
| 5746 | SDValue Mask = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v2i32, |
| 5747 | Operand: DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32)); |
| 5748 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; |
| 5749 | if (VT == MVT::f64) |
| 5750 | Mask = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
| 5751 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Mask), |
| 5752 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 5753 | else /*if (VT == MVT::f32)*/ |
| 5754 | Tmp0 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp0); |
| 5755 | if (SrcVT == MVT::f32) { |
| 5756 | Tmp1 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp1); |
| 5757 | if (VT == MVT::f64) |
| 5758 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
| 5759 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1), |
| 5760 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 5761 | } else if (VT == MVT::f32) |
| 5762 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHRuIMM, DL: dl, VT: MVT::v1i64, |
| 5763 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v1i64, Operand: Tmp1), |
| 5764 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 5765 | Tmp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp0); |
| 5766 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1); |
| 5767 | |
| 5768 | SDValue AllOnes = DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), |
| 5769 | DL: dl, VT: MVT::i32); |
| 5770 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v8i8, Operand: AllOnes); |
| 5771 | SDValue MaskNot = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: OpVT, N1: Mask, |
| 5772 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: AllOnes)); |
| 5773 | |
| 5774 | SDValue Res = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: OpVT, |
| 5775 | N1: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp1, N2: Mask), |
| 5776 | N2: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp0, N2: MaskNot)); |
| 5777 | if (VT == MVT::f32) { |
| 5778 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v2f32, Operand: Res); |
| 5779 | Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: Res, |
| 5780 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 5781 | } else { |
| 5782 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Res); |
| 5783 | } |
| 5784 | |
| 5785 | return Res; |
| 5786 | } |
| 5787 | |
| 5788 | // Bitcast operand 1 to i32. |
| 5789 | if (SrcVT == MVT::f64) |
| 5790 | Tmp1 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 5791 | N: Tmp1).getValue(R: 1); |
| 5792 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp1); |
| 5793 | |
| 5794 | // Or in the signbit with integer operations. |
| 5795 | SDValue Mask1 = DAG.getConstant(Val: 0x80000000, DL: dl, VT: MVT::i32); |
| 5796 | SDValue Mask2 = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
| 5797 | Tmp1 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp1, N2: Mask1); |
| 5798 | if (VT == MVT::f32) { |
| 5799 | Tmp0 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5800 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp0), N2: Mask2); |
| 5801 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f32, |
| 5802 | Operand: DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Tmp0, N2: Tmp1)); |
| 5803 | } |
| 5804 | |
| 5805 | // f64: Or the high part with signbit and then combine two parts. |
| 5806 | Tmp0 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 5807 | N: Tmp0); |
| 5808 | SDValue Lo = Tmp0.getValue(R: 0); |
| 5809 | SDValue Hi = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp0.getValue(R: 1), N2: Mask2); |
| 5810 | Hi = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Hi, N2: Tmp1); |
| 5811 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 5812 | } |
| 5813 | |
| 5814 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ |
| 5815 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5816 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5817 | MFI.setReturnAddressIsTaken(true); |
| 5818 | |
| 5819 | EVT VT = Op.getValueType(); |
| 5820 | SDLoc dl(Op); |
| 5821 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 5822 | if (Depth) { |
| 5823 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
| 5824 | SDValue Offset = DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32); |
| 5825 | return DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), |
| 5826 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr, N2: Offset), |
| 5827 | PtrInfo: MachinePointerInfo()); |
| 5828 | } |
| 5829 | |
| 5830 | // Return LR, which contains the return address. Mark it an implicit live-in. |
| 5831 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
| 5832 | return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT); |
| 5833 | } |
| 5834 | |
| 5835 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
| 5836 | const ARMBaseRegisterInfo &ARI = |
| 5837 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); |
| 5838 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5839 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5840 | MFI.setFrameAddressIsTaken(true); |
| 5841 | |
| 5842 | EVT VT = Op.getValueType(); |
| 5843 | SDLoc dl(Op); // FIXME probably not meaningful |
| 5844 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 5845 | Register FrameReg = ARI.getFrameRegister(MF); |
| 5846 | SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: FrameReg, VT); |
| 5847 | while (Depth--) |
| 5848 | FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr, |
| 5849 | PtrInfo: MachinePointerInfo()); |
| 5850 | return FrameAddr; |
| 5851 | } |
| 5852 | |
| 5853 | // FIXME? Maybe this could be a TableGen attribute on some registers and |
| 5854 | // this table could be generated automatically from RegInfo. |
| 5855 | Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, |
| 5856 | const MachineFunction &MF) const { |
| 5857 | return StringSwitch<Register>(RegName) |
| 5858 | .Case(S: "sp" , Value: ARM::SP) |
| 5859 | .Default(Value: Register()); |
| 5860 | } |
| 5861 | |
| 5862 | // Result is 64 bit value so split into two 32 bit values and return as a |
| 5863 | // pair of values. |
| 5864 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 5865 | SelectionDAG &DAG) { |
| 5866 | SDLoc DL(N); |
| 5867 | |
| 5868 | // This function is only supposed to be called for i64 type destination. |
| 5869 | assert(N->getValueType(0) == MVT::i64 |
| 5870 | && "ExpandREAD_REGISTER called for non-i64 type result." ); |
| 5871 | |
| 5872 | SDValue Read = DAG.getNode(Opcode: ISD::READ_REGISTER, DL, |
| 5873 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32, VT3: MVT::Other), |
| 5874 | N1: N->getOperand(Num: 0), |
| 5875 | N2: N->getOperand(Num: 1)); |
| 5876 | |
| 5877 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Read.getValue(R: 0), |
| 5878 | N2: Read.getValue(R: 1))); |
| 5879 | Results.push_back(Elt: Read.getValue(R: 2)); // Chain |
| 5880 | } |
| 5881 | |
| 5882 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. |
| 5883 | /// When \p DstVT, the destination type of \p BC, is on the vector |
| 5884 | /// register bank and the source of bitcast, \p Op, operates on the same bank, |
| 5885 | /// it might be possible to combine them, such that everything stays on the |
| 5886 | /// vector register bank. |
| 5887 | /// \p return The node that would replace \p BT, if the combine |
| 5888 | /// is possible. |
| 5889 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, |
| 5890 | SelectionDAG &DAG) { |
| 5891 | SDValue Op = BC->getOperand(Num: 0); |
| 5892 | EVT DstVT = BC->getValueType(ResNo: 0); |
| 5893 | |
| 5894 | // The only vector instruction that can produce a scalar (remember, |
| 5895 | // since the bitcast was about to be turned into VMOVDRR, the source |
| 5896 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. |
| 5897 | // Moreover, we can do this combine only if there is one use. |
| 5898 | // Finally, if the destination type is not a vector, there is not |
| 5899 | // much point on forcing everything on the vector bank. |
| 5900 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 5901 | !Op.hasOneUse()) |
| 5902 | return SDValue(); |
| 5903 | |
| 5904 | // If the index is not constant, we will introduce an additional |
| 5905 | // multiply that will stick. |
| 5906 | // Give up in that case. |
| 5907 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
| 5908 | if (!Index) |
| 5909 | return SDValue(); |
| 5910 | unsigned DstNumElt = DstVT.getVectorNumElements(); |
| 5911 | |
| 5912 | // Compute the new index. |
| 5913 | const APInt &APIntIndex = Index->getAPIntValue(); |
| 5914 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); |
| 5915 | NewIndex *= APIntIndex; |
| 5916 | // Check if the new constant index fits into i32. |
| 5917 | if (NewIndex.getBitWidth() > 32) |
| 5918 | return SDValue(); |
| 5919 | |
| 5920 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> |
| 5921 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) |
| 5922 | SDLoc dl(Op); |
| 5923 | SDValue = Op.getOperand(i: 0); |
| 5924 | EVT VecVT = EVT::getVectorVT( |
| 5925 | Context&: *DAG.getContext(), VT: DstVT.getScalarType(), |
| 5926 | NumElements: ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); |
| 5927 | SDValue BitCast = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: ExtractSrc); |
| 5928 | return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DstVT, N1: BitCast, |
| 5929 | N2: DAG.getConstant(Val: NewIndex.getZExtValue(), DL: dl, VT: MVT::i32)); |
| 5930 | } |
| 5931 | |
| 5932 | /// ExpandBITCAST - If the target supports VFP, this function is called to |
| 5933 | /// expand a bit convert where either the source or destination type is i64 to |
| 5934 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 |
| 5935 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support |
| 5936 | /// vectors), since the legalizer won't know what to do with that. |
| 5937 | SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, |
| 5938 | const ARMSubtarget *Subtarget) const { |
| 5939 | SDLoc dl(N); |
| 5940 | SDValue Op = N->getOperand(Num: 0); |
| 5941 | |
| 5942 | // This function is only supposed to be called for i16 and i64 types, either |
| 5943 | // as the source or destination of the bit convert. |
| 5944 | EVT SrcVT = Op.getValueType(); |
| 5945 | EVT DstVT = N->getValueType(ResNo: 0); |
| 5946 | |
| 5947 | if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && |
| 5948 | (DstVT == MVT::f16 || DstVT == MVT::bf16)) |
| 5949 | return MoveToHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: DstVT.getSimpleVT(), |
| 5950 | Val: DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: SDLoc(N), VT: MVT::i32, Operand: Op)); |
| 5951 | |
| 5952 | if ((DstVT == MVT::i16 || DstVT == MVT::i32) && |
| 5953 | (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) { |
| 5954 | if (Subtarget->hasFullFP16() && !Subtarget->hasBF16()) |
| 5955 | Op = DAG.getBitcast(VT: MVT::f16, V: Op); |
| 5956 | return DAG.getNode( |
| 5957 | Opcode: ISD::TRUNCATE, DL: SDLoc(N), VT: DstVT, |
| 5958 | Operand: MoveFromHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: SrcVT.getSimpleVT(), Val: Op)); |
| 5959 | } |
| 5960 | |
| 5961 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) |
| 5962 | return SDValue(); |
| 5963 | |
| 5964 | // Turn i64->f64 into VMOVDRR. |
| 5965 | if (SrcVT == MVT::i64 && isTypeLegal(VT: DstVT)) { |
| 5966 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) |
| 5967 | // if we can combine the bitcast with its source. |
| 5968 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(BC: N, DAG)) |
| 5969 | return Val; |
| 5970 | SDValue Lo, Hi; |
| 5971 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 5972 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: DstVT, |
| 5973 | Operand: DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi)); |
| 5974 | } |
| 5975 | |
| 5976 | // Turn f64->i64 into VMOVRRD. |
| 5977 | if (DstVT == MVT::i64 && isTypeLegal(VT: SrcVT)) { |
| 5978 | SDValue Cvt; |
| 5979 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && |
| 5980 | SrcVT.getVectorNumElements() > 1) |
| 5981 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 5982 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 5983 | N: DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SrcVT, Operand: Op)); |
| 5984 | else |
| 5985 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 5986 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Op); |
| 5987 | // Merge the pieces into a single i64 value. |
| 5988 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Cvt, N2: Cvt.getValue(R: 1)); |
| 5989 | } |
| 5990 | |
| 5991 | return SDValue(); |
| 5992 | } |
| 5993 | |
| 5994 | /// getZeroVector - Returns a vector of specified type with all zero elements. |
| 5995 | /// Zero vectors are used to represent vector negation and in those cases |
| 5996 | /// will be implemented with the NEON VNEG instruction. However, VNEG does |
| 5997 | /// not support i64 elements, so sometimes the zero vectors will need to be |
| 5998 | /// explicitly constructed. Regardless, use a canonical VMOV to create the |
| 5999 | /// zero vector. |
| 6000 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { |
| 6001 | assert(VT.isVector() && "Expected a vector type" ); |
| 6002 | // The canonical modified immediate encoding of a zero vector is....0! |
| 6003 | SDValue EncodedVal = DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 6004 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 6005 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: EncodedVal); |
| 6006 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vmov); |
| 6007 | } |
| 6008 | |
| 6009 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two |
| 6010 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6011 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, |
| 6012 | SelectionDAG &DAG) const { |
| 6013 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6014 | EVT VT = Op.getValueType(); |
| 6015 | unsigned VTBits = VT.getSizeInBits(); |
| 6016 | SDLoc dl(Op); |
| 6017 | SDValue ShOpLo = Op.getOperand(i: 0); |
| 6018 | SDValue ShOpHi = Op.getOperand(i: 1); |
| 6019 | SDValue ShAmt = Op.getOperand(i: 2); |
| 6020 | SDValue ARMcc; |
| 6021 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; |
| 6022 | |
| 6023 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); |
| 6024 | |
| 6025 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6026 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6027 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
| 6028 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
| 6029 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
| 6030 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: RevShAmt); |
| 6031 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
| 6032 | SDValue LoBigShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ExtraShAmt); |
| 6033 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6034 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6035 | SDValue Lo = |
| 6036 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, N2: LoBigShift, N3: ARMcc, N4: CmpLo); |
| 6037 | |
| 6038 | SDValue HiSmallShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
| 6039 | SDValue HiBigShift = Opc == ISD::SRA |
| 6040 | ? DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, |
| 6041 | N2: DAG.getConstant(Val: VTBits - 1, DL: dl, VT)) |
| 6042 | : DAG.getConstant(Val: 0, DL: dl, VT); |
| 6043 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6044 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6045 | SDValue Hi = |
| 6046 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, N3: ARMcc, N4: CmpHi); |
| 6047 | |
| 6048 | SDValue Ops[2] = { Lo, Hi }; |
| 6049 | return DAG.getMergeValues(Ops, dl); |
| 6050 | } |
| 6051 | |
| 6052 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two |
| 6053 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6054 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, |
| 6055 | SelectionDAG &DAG) const { |
| 6056 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6057 | EVT VT = Op.getValueType(); |
| 6058 | unsigned VTBits = VT.getSizeInBits(); |
| 6059 | SDLoc dl(Op); |
| 6060 | SDValue ShOpLo = Op.getOperand(i: 0); |
| 6061 | SDValue ShOpHi = Op.getOperand(i: 1); |
| 6062 | SDValue ShAmt = Op.getOperand(i: 2); |
| 6063 | SDValue ARMcc; |
| 6064 | |
| 6065 | assert(Op.getOpcode() == ISD::SHL_PARTS); |
| 6066 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6067 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6068 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: RevShAmt); |
| 6069 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
| 6070 | SDValue HiSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
| 6071 | |
| 6072 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
| 6073 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
| 6074 | SDValue HiBigShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ExtraShAmt); |
| 6075 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6076 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6077 | SDValue Hi = |
| 6078 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, N3: ARMcc, N4: CmpHi); |
| 6079 | |
| 6080 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6081 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6082 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
| 6083 | SDValue Lo = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, |
| 6084 | N2: DAG.getConstant(Val: 0, DL: dl, VT), N3: ARMcc, N4: CmpLo); |
| 6085 | |
| 6086 | SDValue Ops[2] = { Lo, Hi }; |
| 6087 | return DAG.getMergeValues(Ops, dl); |
| 6088 | } |
| 6089 | |
| 6090 | SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op, |
| 6091 | SelectionDAG &DAG) const { |
| 6092 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 6093 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 |
| 6094 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) |
| 6095 | // so that the shift + and get folded into a bitfield extract. |
| 6096 | SDLoc dl(Op); |
| 6097 | SDValue Chain = Op.getOperand(i: 0); |
| 6098 | SDValue Ops[] = {Chain, |
| 6099 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL: dl, VT: MVT::i32)}; |
| 6100 | |
| 6101 | SDValue FPSCR = |
| 6102 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL: dl, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6103 | Chain = FPSCR.getValue(R: 1); |
| 6104 | SDValue FltRounds = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: FPSCR, |
| 6105 | N2: DAG.getConstant(Val: 1U << 22, DL: dl, VT: MVT::i32)); |
| 6106 | SDValue RMODE = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: FltRounds, |
| 6107 | N2: DAG.getConstant(Val: 22, DL: dl, VT: MVT::i32)); |
| 6108 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RMODE, |
| 6109 | N2: DAG.getConstant(Val: 3, DL: dl, VT: MVT::i32)); |
| 6110 | return DAG.getMergeValues(Ops: {And, Chain}, dl); |
| 6111 | } |
| 6112 | |
| 6113 | SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op, |
| 6114 | SelectionDAG &DAG) const { |
| 6115 | SDLoc DL(Op); |
| 6116 | SDValue Chain = Op->getOperand(Num: 0); |
| 6117 | SDValue RMValue = Op->getOperand(Num: 1); |
| 6118 | |
| 6119 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 6120 | // The llvm.set.rounding argument value to ARM rounding mode value mapping |
| 6121 | // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is |
| 6122 | // ((arg - 1) & 3) << 22). |
| 6123 | // |
| 6124 | // It is expected that the argument of llvm.set.rounding is within the |
| 6125 | // segment [0, 3], so NearestTiesToAway (4) is not handled here. It is |
| 6126 | // responsibility of the code generated llvm.set.rounding to ensure this |
| 6127 | // condition. |
| 6128 | |
| 6129 | // Calculate new value of FPSCR[23:22]. |
| 6130 | RMValue = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: RMValue, |
| 6131 | N2: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 6132 | RMValue = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: RMValue, |
| 6133 | N2: DAG.getConstant(Val: 0x3, DL, VT: MVT::i32)); |
| 6134 | RMValue = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: RMValue, |
| 6135 | N2: DAG.getConstant(Val: ARM::RoundingBitsPos, DL, VT: MVT::i32)); |
| 6136 | |
| 6137 | // Get current value of FPSCR. |
| 6138 | SDValue Ops[] = {Chain, |
| 6139 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6140 | SDValue FPSCR = |
| 6141 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6142 | Chain = FPSCR.getValue(R: 1); |
| 6143 | FPSCR = FPSCR.getValue(R: 0); |
| 6144 | |
| 6145 | // Put new rounding mode into FPSCR[23:22]. |
| 6146 | const unsigned RMMask = ~(ARM::Rounding::rmMask << ARM::RoundingBitsPos); |
| 6147 | FPSCR = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6148 | N2: DAG.getConstant(Val: RMMask, DL, VT: MVT::i32)); |
| 6149 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCR, N2: RMValue); |
| 6150 | SDValue Ops2[] = { |
| 6151 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
| 6152 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6153 | } |
| 6154 | |
| 6155 | SDValue ARMTargetLowering::LowerSET_FPMODE(SDValue Op, |
| 6156 | SelectionDAG &DAG) const { |
| 6157 | SDLoc DL(Op); |
| 6158 | SDValue Chain = Op->getOperand(Num: 0); |
| 6159 | SDValue Mode = Op->getOperand(Num: 1); |
| 6160 | |
| 6161 | // Generate nodes to build: |
| 6162 | // FPSCR = (FPSCR & FPStatusBits) | (Mode & ~FPStatusBits) |
| 6163 | SDValue Ops[] = {Chain, |
| 6164 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6165 | SDValue FPSCR = |
| 6166 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6167 | Chain = FPSCR.getValue(R: 1); |
| 6168 | FPSCR = FPSCR.getValue(R: 0); |
| 6169 | |
| 6170 | SDValue FPSCRMasked = |
| 6171 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6172 | N2: DAG.getConstant(Val: ARM::FPStatusBits, DL, VT: MVT::i32)); |
| 6173 | SDValue InputMasked = |
| 6174 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: Mode, |
| 6175 | N2: DAG.getConstant(Val: ~ARM::FPStatusBits, DL, VT: MVT::i32)); |
| 6176 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCRMasked, N2: InputMasked); |
| 6177 | |
| 6178 | SDValue Ops2[] = { |
| 6179 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
| 6180 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6181 | } |
| 6182 | |
| 6183 | SDValue ARMTargetLowering::LowerRESET_FPMODE(SDValue Op, |
| 6184 | SelectionDAG &DAG) const { |
| 6185 | SDLoc DL(Op); |
| 6186 | SDValue Chain = Op->getOperand(Num: 0); |
| 6187 | |
| 6188 | // To get the default FP mode all control bits are cleared: |
| 6189 | // FPSCR = FPSCR & (FPStatusBits | FPReservedBits) |
| 6190 | SDValue Ops[] = {Chain, |
| 6191 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6192 | SDValue FPSCR = |
| 6193 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6194 | Chain = FPSCR.getValue(R: 1); |
| 6195 | FPSCR = FPSCR.getValue(R: 0); |
| 6196 | |
| 6197 | SDValue FPSCRMasked = DAG.getNode( |
| 6198 | Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6199 | N2: DAG.getConstant(Val: ARM::FPStatusBits | ARM::FPReservedBits, DL, VT: MVT::i32)); |
| 6200 | SDValue Ops2[] = {Chain, |
| 6201 | DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), |
| 6202 | FPSCRMasked}; |
| 6203 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6204 | } |
| 6205 | |
| 6206 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, |
| 6207 | const ARMSubtarget *ST) { |
| 6208 | SDLoc dl(N); |
| 6209 | EVT VT = N->getValueType(ResNo: 0); |
| 6210 | if (VT.isVector() && ST->hasNEON()) { |
| 6211 | |
| 6212 | // Compute the least significant set bit: LSB = X & -X |
| 6213 | SDValue X = N->getOperand(Num: 0); |
| 6214 | SDValue NX = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: getZeroVector(VT, DAG, dl), N2: X); |
| 6215 | SDValue LSB = DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: X, N2: NX); |
| 6216 | |
| 6217 | EVT ElemTy = VT.getVectorElementType(); |
| 6218 | |
| 6219 | if (ElemTy == MVT::i8) { |
| 6220 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6221 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6222 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
| 6223 | SDValue Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
| 6224 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
| 6225 | } |
| 6226 | |
| 6227 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && |
| 6228 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { |
| 6229 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 |
| 6230 | unsigned NumBits = ElemTy.getSizeInBits(); |
| 6231 | SDValue WidthMinus1 = |
| 6232 | DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6233 | Operand: DAG.getTargetConstant(Val: NumBits - 1, DL: dl, VT: ElemTy)); |
| 6234 | SDValue CTLZ = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: LSB); |
| 6235 | return DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: WidthMinus1, N2: CTLZ); |
| 6236 | } |
| 6237 | |
| 6238 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6239 | |
| 6240 | // Compute LSB - 1. |
| 6241 | SDValue Bits; |
| 6242 | if (ElemTy == MVT::i64) { |
| 6243 | // Load constant 0xffff'ffff'ffff'ffff to register. |
| 6244 | SDValue FF = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6245 | Operand: DAG.getTargetConstant(Val: 0x1eff, DL: dl, VT: MVT::i32)); |
| 6246 | Bits = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: LSB, N2: FF); |
| 6247 | } else { |
| 6248 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6249 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
| 6250 | Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
| 6251 | } |
| 6252 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
| 6253 | } |
| 6254 | |
| 6255 | if (!ST->hasV6T2Ops()) |
| 6256 | return SDValue(); |
| 6257 | |
| 6258 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT, Operand: N->getOperand(Num: 0)); |
| 6259 | return DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: rbit); |
| 6260 | } |
| 6261 | |
| 6262 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, |
| 6263 | const ARMSubtarget *ST) { |
| 6264 | EVT VT = N->getValueType(ResNo: 0); |
| 6265 | SDLoc DL(N); |
| 6266 | |
| 6267 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON." ); |
| 6268 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || |
| 6269 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && |
| 6270 | "Unexpected type for custom ctpop lowering" ); |
| 6271 | |
| 6272 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 6273 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
| 6274 | SDValue Res = DAG.getBitcast(VT: VT8Bit, V: N->getOperand(Num: 0)); |
| 6275 | Res = DAG.getNode(Opcode: ISD::CTPOP, DL, VT: VT8Bit, Operand: Res); |
| 6276 | |
| 6277 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. |
| 6278 | unsigned EltSize = 8; |
| 6279 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; |
| 6280 | while (EltSize != VT.getScalarSizeInBits()) { |
| 6281 | SmallVector<SDValue, 8> Ops; |
| 6282 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddlu, DL, |
| 6283 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 6284 | Ops.push_back(Elt: Res); |
| 6285 | |
| 6286 | EltSize *= 2; |
| 6287 | NumElts /= 2; |
| 6288 | MVT WidenVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltSize), NumElements: NumElts); |
| 6289 | Res = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: WidenVT, Ops); |
| 6290 | } |
| 6291 | |
| 6292 | return Res; |
| 6293 | } |
| 6294 | |
| 6295 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate |
| 6296 | /// operand of a vector shift operation, where all the elements of the |
| 6297 | /// build_vector must have the same constant integer value. |
| 6298 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { |
| 6299 | // Ignore bit_converts. |
| 6300 | while (Op.getOpcode() == ISD::BITCAST) |
| 6301 | Op = Op.getOperand(i: 0); |
| 6302 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: Op.getNode()); |
| 6303 | APInt SplatBits, SplatUndef; |
| 6304 | unsigned SplatBitSize; |
| 6305 | bool HasAnyUndefs; |
| 6306 | if (!BVN || |
| 6307 | !BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
| 6308 | MinSplatBits: ElementBits) || |
| 6309 | SplatBitSize > ElementBits) |
| 6310 | return false; |
| 6311 | Cnt = SplatBits.getSExtValue(); |
| 6312 | return true; |
| 6313 | } |
| 6314 | |
| 6315 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate |
| 6316 | /// operand of a vector shift left operation. That value must be in the range: |
| 6317 | /// 0 <= Value < ElementBits for a left shift; or |
| 6318 | /// 0 <= Value <= ElementBits for a long left shift. |
| 6319 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { |
| 6320 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6321 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6322 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6323 | return false; |
| 6324 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); |
| 6325 | } |
| 6326 | |
| 6327 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate |
| 6328 | /// operand of a vector shift right operation. For a shift opcode, the value |
| 6329 | /// is positive, but for an intrinsic the value count must be negative. The |
| 6330 | /// absolute value must be in the range: |
| 6331 | /// 1 <= |Value| <= ElementBits for a right shift; or |
| 6332 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. |
| 6333 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, |
| 6334 | int64_t &Cnt) { |
| 6335 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6336 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6337 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6338 | return false; |
| 6339 | if (!isIntrinsic) |
| 6340 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); |
| 6341 | if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { |
| 6342 | Cnt = -Cnt; |
| 6343 | return true; |
| 6344 | } |
| 6345 | return false; |
| 6346 | } |
| 6347 | |
| 6348 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, |
| 6349 | const ARMSubtarget *ST) { |
| 6350 | EVT VT = N->getValueType(ResNo: 0); |
| 6351 | SDLoc dl(N); |
| 6352 | int64_t Cnt; |
| 6353 | |
| 6354 | if (!VT.isVector()) |
| 6355 | return SDValue(); |
| 6356 | |
| 6357 | // We essentially have two forms here. Shift by an immediate and shift by a |
| 6358 | // vector register (there are also shift by a gpr, but that is just handled |
| 6359 | // with a tablegen pattern). We cannot easily match shift by an immediate in |
| 6360 | // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. |
| 6361 | // For shifting by a vector, we don't have VSHR, only VSHL (which can be |
| 6362 | // signed or unsigned, and a negative shift indicates a shift right). |
| 6363 | if (N->getOpcode() == ISD::SHL) { |
| 6364 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) |
| 6365 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6366 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 6367 | return DAG.getNode(Opcode: ARMISD::VSHLu, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6368 | N2: N->getOperand(Num: 1)); |
| 6369 | } |
| 6370 | |
| 6371 | assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && |
| 6372 | "unexpected vector shift opcode" ); |
| 6373 | |
| 6374 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
| 6375 | unsigned VShiftOpc = |
| 6376 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 6377 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6378 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 6379 | } |
| 6380 | |
| 6381 | // Other right shifts we don't have operations for (we use a shift left by a |
| 6382 | // negative number). |
| 6383 | EVT ShiftVT = N->getOperand(Num: 1).getValueType(); |
| 6384 | SDValue NegatedCount = DAG.getNode( |
| 6385 | Opcode: ISD::SUB, DL: dl, VT: ShiftVT, N1: getZeroVector(VT: ShiftVT, DAG, dl), N2: N->getOperand(Num: 1)); |
| 6386 | unsigned VShiftOpc = |
| 6387 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); |
| 6388 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), N2: NegatedCount); |
| 6389 | } |
| 6390 | |
| 6391 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, |
| 6392 | const ARMSubtarget *ST) { |
| 6393 | EVT VT = N->getValueType(ResNo: 0); |
| 6394 | SDLoc dl(N); |
| 6395 | |
| 6396 | // We can get here for a node like i32 = ISD::SHL i32, i64 |
| 6397 | if (VT != MVT::i64) |
| 6398 | return SDValue(); |
| 6399 | |
| 6400 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || |
| 6401 | N->getOpcode() == ISD::SHL) && |
| 6402 | "Unknown shift to lower!" ); |
| 6403 | |
| 6404 | unsigned ShOpc = N->getOpcode(); |
| 6405 | if (ST->hasMVEIntegerOps()) { |
| 6406 | SDValue ShAmt = N->getOperand(Num: 1); |
| 6407 | unsigned ShPartsOpc = ARMISD::LSLL; |
| 6408 | ConstantSDNode *Con = dyn_cast<ConstantSDNode>(Val&: ShAmt); |
| 6409 | |
| 6410 | // If the shift amount is greater than 32 or has a greater bitwidth than 64 |
| 6411 | // then do the default optimisation |
| 6412 | if ((!Con && ShAmt->getValueType(ResNo: 0).getSizeInBits() > 64) || |
| 6413 | (Con && (Con->getAPIntValue() == 0 || Con->getAPIntValue().uge(RHS: 32)))) |
| 6414 | return SDValue(); |
| 6415 | |
| 6416 | // Extract the lower 32 bits of the shift amount if it's not an i32 |
| 6417 | if (ShAmt->getValueType(ResNo: 0) != MVT::i32) |
| 6418 | ShAmt = DAG.getZExtOrTrunc(Op: ShAmt, DL: dl, VT: MVT::i32); |
| 6419 | |
| 6420 | if (ShOpc == ISD::SRL) { |
| 6421 | if (!Con) |
| 6422 | // There is no t2LSRLr instruction so negate and perform an lsll if the |
| 6423 | // shift amount is in a register, emulating a right shift. |
| 6424 | ShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6425 | N1: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6426 | else |
| 6427 | // Else generate an lsrl on the immediate shift amount |
| 6428 | ShPartsOpc = ARMISD::LSRL; |
| 6429 | } else if (ShOpc == ISD::SRA) |
| 6430 | ShPartsOpc = ARMISD::ASRL; |
| 6431 | |
| 6432 | // Split Lower/Upper 32 bits of the destination/source |
| 6433 | SDValue Lo, Hi; |
| 6434 | std::tie(args&: Lo, args&: Hi) = |
| 6435 | DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 6436 | // Generate the shift operation as computed above |
| 6437 | Lo = DAG.getNode(Opcode: ShPartsOpc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: Lo, N2: Hi, |
| 6438 | N3: ShAmt); |
| 6439 | // The upper 32 bits come from the second return value of lsll |
| 6440 | Hi = SDValue(Lo.getNode(), 1); |
| 6441 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 6442 | } |
| 6443 | |
| 6444 | // We only lower SRA, SRL of 1 here, all others use generic lowering. |
| 6445 | if (!isOneConstant(V: N->getOperand(Num: 1)) || N->getOpcode() == ISD::SHL) |
| 6446 | return SDValue(); |
| 6447 | |
| 6448 | // If we are in thumb mode, we don't have RRX. |
| 6449 | if (ST->isThumb1Only()) |
| 6450 | return SDValue(); |
| 6451 | |
| 6452 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. |
| 6453 | SDValue Lo, Hi; |
| 6454 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 6455 | |
| 6456 | // First, build a LSRS1/ASRS1 op, which shifts the top part by one and |
| 6457 | // captures the shifted out bit into a carry flag. |
| 6458 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::LSRS1 : ARMISD::ASRS1; |
| 6459 | Hi = DAG.getNode(Opcode: Opc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N: Hi); |
| 6460 | |
| 6461 | // The low part is an ARMISD::RRX operand, which shifts the carry in. |
| 6462 | Lo = DAG.getNode(Opcode: ARMISD::RRX, DL: dl, VT: MVT::i32, N1: Lo, N2: Hi.getValue(R: 1)); |
| 6463 | |
| 6464 | // Merge the pieces into a single i64 value. |
| 6465 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 6466 | } |
| 6467 | |
| 6468 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, |
| 6469 | const ARMSubtarget *ST) { |
| 6470 | bool Invert = false; |
| 6471 | bool Swap = false; |
| 6472 | unsigned Opc = ARMCC::AL; |
| 6473 | |
| 6474 | SDValue Op0 = Op.getOperand(i: 0); |
| 6475 | SDValue Op1 = Op.getOperand(i: 1); |
| 6476 | SDValue CC = Op.getOperand(i: 2); |
| 6477 | EVT VT = Op.getValueType(); |
| 6478 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(Val&: CC)->get(); |
| 6479 | SDLoc dl(Op); |
| 6480 | |
| 6481 | EVT CmpVT; |
| 6482 | if (ST->hasNEON()) |
| 6483 | CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); |
| 6484 | else { |
| 6485 | assert(ST->hasMVEIntegerOps() && |
| 6486 | "No hardware support for integer vector comparison!" ); |
| 6487 | |
| 6488 | if (Op.getValueType().getVectorElementType() != MVT::i1) |
| 6489 | return SDValue(); |
| 6490 | |
| 6491 | // Make sure we expand floating point setcc to scalar if we do not have |
| 6492 | // mve.fp, so that we can handle them from there. |
| 6493 | if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) |
| 6494 | return SDValue(); |
| 6495 | |
| 6496 | CmpVT = VT; |
| 6497 | } |
| 6498 | |
| 6499 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && |
| 6500 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { |
| 6501 | // Special-case integer 64-bit equality comparisons. They aren't legal, |
| 6502 | // but they can be lowered with a few vector instructions. |
| 6503 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; |
| 6504 | EVT SplitVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: CmpElements); |
| 6505 | SDValue CastOp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op0); |
| 6506 | SDValue CastOp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op1); |
| 6507 | SDValue Cmp = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT: SplitVT, N1: CastOp0, N2: CastOp1, |
| 6508 | N3: DAG.getCondCode(Cond: ISD::SETEQ)); |
| 6509 | SDValue Reversed = DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SplitVT, Operand: Cmp); |
| 6510 | SDValue Merged = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: SplitVT, N1: Cmp, N2: Reversed); |
| 6511 | Merged = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: Merged); |
| 6512 | if (SetCCOpcode == ISD::SETNE) |
| 6513 | Merged = DAG.getNOT(DL: dl, Val: Merged, VT: CmpVT); |
| 6514 | Merged = DAG.getSExtOrTrunc(Op: Merged, DL: dl, VT); |
| 6515 | return Merged; |
| 6516 | } |
| 6517 | |
| 6518 | if (CmpVT.getVectorElementType() == MVT::i64) |
| 6519 | // 64-bit comparisons are not legal in general. |
| 6520 | return SDValue(); |
| 6521 | |
| 6522 | if (Op1.getValueType().isFloatingPoint()) { |
| 6523 | switch (SetCCOpcode) { |
| 6524 | default: llvm_unreachable("Illegal FP comparison" ); |
| 6525 | case ISD::SETUNE: |
| 6526 | case ISD::SETNE: |
| 6527 | if (ST->hasMVEFloatOps()) { |
| 6528 | Opc = ARMCC::NE; break; |
| 6529 | } else { |
| 6530 | Invert = true; [[fallthrough]]; |
| 6531 | } |
| 6532 | case ISD::SETOEQ: |
| 6533 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6534 | case ISD::SETOLT: |
| 6535 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
| 6536 | case ISD::SETOGT: |
| 6537 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6538 | case ISD::SETOLE: |
| 6539 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
| 6540 | case ISD::SETOGE: |
| 6541 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6542 | case ISD::SETUGE: Swap = true; [[fallthrough]]; |
| 6543 | case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; |
| 6544 | case ISD::SETUGT: Swap = true; [[fallthrough]]; |
| 6545 | case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; |
| 6546 | case ISD::SETUEQ: Invert = true; [[fallthrough]]; |
| 6547 | case ISD::SETONE: { |
| 6548 | // Expand this to (OLT | OGT). |
| 6549 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
| 6550 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6551 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6552 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6553 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
| 6554 | if (Invert) |
| 6555 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6556 | return Result; |
| 6557 | } |
| 6558 | case ISD::SETUO: Invert = true; [[fallthrough]]; |
| 6559 | case ISD::SETO: { |
| 6560 | // Expand this to (OLT | OGE). |
| 6561 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
| 6562 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6563 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6564 | N3: DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32)); |
| 6565 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
| 6566 | if (Invert) |
| 6567 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6568 | return Result; |
| 6569 | } |
| 6570 | } |
| 6571 | } else { |
| 6572 | // Integer comparisons. |
| 6573 | switch (SetCCOpcode) { |
| 6574 | default: llvm_unreachable("Illegal integer comparison" ); |
| 6575 | case ISD::SETNE: |
| 6576 | if (ST->hasMVEIntegerOps()) { |
| 6577 | Opc = ARMCC::NE; break; |
| 6578 | } else { |
| 6579 | Invert = true; [[fallthrough]]; |
| 6580 | } |
| 6581 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6582 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
| 6583 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6584 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
| 6585 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6586 | case ISD::SETULT: Swap = true; [[fallthrough]]; |
| 6587 | case ISD::SETUGT: Opc = ARMCC::HI; break; |
| 6588 | case ISD::SETULE: Swap = true; [[fallthrough]]; |
| 6589 | case ISD::SETUGE: Opc = ARMCC::HS; break; |
| 6590 | } |
| 6591 | |
| 6592 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). |
| 6593 | if (ST->hasNEON() && Opc == ARMCC::EQ) { |
| 6594 | SDValue AndOp; |
| 6595 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode())) |
| 6596 | AndOp = Op0; |
| 6597 | else if (ISD::isBuildVectorAllZeros(N: Op0.getNode())) |
| 6598 | AndOp = Op1; |
| 6599 | |
| 6600 | // Ignore bitconvert. |
| 6601 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) |
| 6602 | AndOp = AndOp.getOperand(i: 0); |
| 6603 | |
| 6604 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { |
| 6605 | Op0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 0)); |
| 6606 | Op1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 1)); |
| 6607 | SDValue Result = DAG.getNode(Opcode: ARMISD::VTST, DL: dl, VT: CmpVT, N1: Op0, N2: Op1); |
| 6608 | if (!Invert) |
| 6609 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6610 | return Result; |
| 6611 | } |
| 6612 | } |
| 6613 | } |
| 6614 | |
| 6615 | if (Swap) |
| 6616 | std::swap(a&: Op0, b&: Op1); |
| 6617 | |
| 6618 | // If one of the operands is a constant vector zero, attempt to fold the |
| 6619 | // comparison to a specialized compare-against-zero form. |
| 6620 | if (ISD::isBuildVectorAllZeros(N: Op0.getNode()) && |
| 6621 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::EQ || |
| 6622 | Opc == ARMCC::NE)) { |
| 6623 | if (Opc == ARMCC::GE) |
| 6624 | Opc = ARMCC::LE; |
| 6625 | else if (Opc == ARMCC::GT) |
| 6626 | Opc = ARMCC::LT; |
| 6627 | std::swap(a&: Op0, b&: Op1); |
| 6628 | } |
| 6629 | |
| 6630 | SDValue Result; |
| 6631 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode()) && |
| 6632 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::LE || |
| 6633 | Opc == ARMCC::LT || Opc == ARMCC::NE || Opc == ARMCC::EQ)) |
| 6634 | Result = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: CmpVT, N1: Op0, |
| 6635 | N2: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
| 6636 | else |
| 6637 | Result = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6638 | N3: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
| 6639 | |
| 6640 | Result = DAG.getSExtOrTrunc(Op: Result, DL: dl, VT); |
| 6641 | |
| 6642 | if (Invert) |
| 6643 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6644 | |
| 6645 | return Result; |
| 6646 | } |
| 6647 | |
| 6648 | static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { |
| 6649 | SDValue LHS = Op.getOperand(i: 0); |
| 6650 | SDValue RHS = Op.getOperand(i: 1); |
| 6651 | SDValue Carry = Op.getOperand(i: 2); |
| 6652 | SDValue Cond = Op.getOperand(i: 3); |
| 6653 | SDLoc DL(Op); |
| 6654 | |
| 6655 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only." ); |
| 6656 | |
| 6657 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
| 6658 | // have to invert the carry first. |
| 6659 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 6660 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 6661 | // This converts the boolean value carry into the carry flag. |
| 6662 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 6663 | |
| 6664 | SDVTList VTs = DAG.getVTList(VT1: LHS.getValueType(), VT2: MVT::i32); |
| 6665 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: LHS, N2: RHS, N3: Carry); |
| 6666 | |
| 6667 | SDValue FVal = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
| 6668 | SDValue TVal = DAG.getConstant(Val: 1, DL, VT: MVT::i32); |
| 6669 | SDValue ARMcc = DAG.getConstant( |
| 6670 | Val: IntCCToARMCC(CC: cast<CondCodeSDNode>(Val&: Cond)->get()), DL, VT: MVT::i32); |
| 6671 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT: Op.getValueType(), N1: FVal, N2: TVal, N3: ARMcc, |
| 6672 | N4: Cmp.getValue(R: 1)); |
| 6673 | } |
| 6674 | |
| 6675 | /// isVMOVModifiedImm - Check if the specified splat value corresponds to a |
| 6676 | /// valid vector constant for a NEON or MVE instruction with a "modified |
| 6677 | /// immediate" operand (e.g., VMOV). If so, return the encoded value. |
| 6678 | static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, |
| 6679 | unsigned SplatBitSize, SelectionDAG &DAG, |
| 6680 | const SDLoc &dl, EVT &VT, EVT VectorVT, |
| 6681 | VMOVModImmType type) { |
| 6682 | unsigned OpCmode, Imm; |
| 6683 | bool is128Bits = VectorVT.is128BitVector(); |
| 6684 | |
| 6685 | // SplatBitSize is set to the smallest size that splats the vector, so a |
| 6686 | // zero vector will always have SplatBitSize == 8. However, NEON modified |
| 6687 | // immediate instructions others than VMOV do not support the 8-bit encoding |
| 6688 | // of a zero vector, and the default encoding of zero is supposed to be the |
| 6689 | // 32-bit version. |
| 6690 | if (SplatBits == 0) |
| 6691 | SplatBitSize = 32; |
| 6692 | |
| 6693 | switch (SplatBitSize) { |
| 6694 | case 8: |
| 6695 | if (type != VMOVModImm) |
| 6696 | return SDValue(); |
| 6697 | // Any 1-byte value is OK. Op=0, Cmode=1110. |
| 6698 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big" ); |
| 6699 | OpCmode = 0xe; |
| 6700 | Imm = SplatBits; |
| 6701 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; |
| 6702 | break; |
| 6703 | |
| 6704 | case 16: |
| 6705 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. |
| 6706 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; |
| 6707 | if ((SplatBits & ~0xff) == 0) { |
| 6708 | // Value = 0x00nn: Op=x, Cmode=100x. |
| 6709 | OpCmode = 0x8; |
| 6710 | Imm = SplatBits; |
| 6711 | break; |
| 6712 | } |
| 6713 | if ((SplatBits & ~0xff00) == 0) { |
| 6714 | // Value = 0xnn00: Op=x, Cmode=101x. |
| 6715 | OpCmode = 0xa; |
| 6716 | Imm = SplatBits >> 8; |
| 6717 | break; |
| 6718 | } |
| 6719 | return SDValue(); |
| 6720 | |
| 6721 | case 32: |
| 6722 | // NEON's 32-bit VMOV supports splat values where: |
| 6723 | // * only one byte is nonzero, or |
| 6724 | // * the least significant byte is 0xff and the second byte is nonzero, or |
| 6725 | // * the least significant 2 bytes are 0xff and the third is nonzero. |
| 6726 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; |
| 6727 | if ((SplatBits & ~0xff) == 0) { |
| 6728 | // Value = 0x000000nn: Op=x, Cmode=000x. |
| 6729 | OpCmode = 0; |
| 6730 | Imm = SplatBits; |
| 6731 | break; |
| 6732 | } |
| 6733 | if ((SplatBits & ~0xff00) == 0) { |
| 6734 | // Value = 0x0000nn00: Op=x, Cmode=001x. |
| 6735 | OpCmode = 0x2; |
| 6736 | Imm = SplatBits >> 8; |
| 6737 | break; |
| 6738 | } |
| 6739 | if ((SplatBits & ~0xff0000) == 0) { |
| 6740 | // Value = 0x00nn0000: Op=x, Cmode=010x. |
| 6741 | OpCmode = 0x4; |
| 6742 | Imm = SplatBits >> 16; |
| 6743 | break; |
| 6744 | } |
| 6745 | if ((SplatBits & ~0xff000000) == 0) { |
| 6746 | // Value = 0xnn000000: Op=x, Cmode=011x. |
| 6747 | OpCmode = 0x6; |
| 6748 | Imm = SplatBits >> 24; |
| 6749 | break; |
| 6750 | } |
| 6751 | |
| 6752 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC |
| 6753 | if (type == OtherModImm) return SDValue(); |
| 6754 | |
| 6755 | if ((SplatBits & ~0xffff) == 0 && |
| 6756 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { |
| 6757 | // Value = 0x0000nnff: Op=x, Cmode=1100. |
| 6758 | OpCmode = 0xc; |
| 6759 | Imm = SplatBits >> 8; |
| 6760 | break; |
| 6761 | } |
| 6762 | |
| 6763 | // cmode == 0b1101 is not supported for MVE VMVN |
| 6764 | if (type == MVEVMVNModImm) |
| 6765 | return SDValue(); |
| 6766 | |
| 6767 | if ((SplatBits & ~0xffffff) == 0 && |
| 6768 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { |
| 6769 | // Value = 0x00nnffff: Op=x, Cmode=1101. |
| 6770 | OpCmode = 0xd; |
| 6771 | Imm = SplatBits >> 16; |
| 6772 | break; |
| 6773 | } |
| 6774 | |
| 6775 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, |
| 6776 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not |
| 6777 | // VMOV.I32. A (very) minor optimization would be to replicate the value |
| 6778 | // and fall through here to test for a valid 64-bit splat. But, then the |
| 6779 | // caller would also need to check and handle the change in size. |
| 6780 | return SDValue(); |
| 6781 | |
| 6782 | case 64: { |
| 6783 | if (type != VMOVModImm) |
| 6784 | return SDValue(); |
| 6785 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. |
| 6786 | uint64_t BitMask = 0xff; |
| 6787 | unsigned ImmMask = 1; |
| 6788 | Imm = 0; |
| 6789 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { |
| 6790 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { |
| 6791 | Imm |= ImmMask; |
| 6792 | } else if ((SplatBits & BitMask) != 0) { |
| 6793 | return SDValue(); |
| 6794 | } |
| 6795 | BitMask <<= 8; |
| 6796 | ImmMask <<= 1; |
| 6797 | } |
| 6798 | |
| 6799 | // Op=1, Cmode=1110. |
| 6800 | OpCmode = 0x1e; |
| 6801 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; |
| 6802 | break; |
| 6803 | } |
| 6804 | |
| 6805 | default: |
| 6806 | llvm_unreachable("unexpected size for isVMOVModifiedImm" ); |
| 6807 | } |
| 6808 | |
| 6809 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Val: Imm); |
| 6810 | return DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32); |
| 6811 | } |
| 6812 | |
| 6813 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, |
| 6814 | const ARMSubtarget *ST) const { |
| 6815 | EVT VT = Op.getValueType(); |
| 6816 | bool IsDouble = (VT == MVT::f64); |
| 6817 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Val&: Op); |
| 6818 | const APFloat &FPVal = CFP->getValueAPF(); |
| 6819 | |
| 6820 | // Prevent floating-point constants from using literal loads |
| 6821 | // when execute-only is enabled. |
| 6822 | if (ST->genExecuteOnly()) { |
| 6823 | // We shouldn't trigger this for v6m execute-only |
| 6824 | assert((!ST->isThumb1Only() || ST->hasV8MBaselineOps()) && |
| 6825 | "Unexpected architecture" ); |
| 6826 | |
| 6827 | // If we can represent the constant as an immediate, don't lower it |
| 6828 | if (isFPImmLegal(Imm: FPVal, VT)) |
| 6829 | return Op; |
| 6830 | // Otherwise, construct as integer, and move to float register |
| 6831 | APInt INTVal = FPVal.bitcastToAPInt(); |
| 6832 | SDLoc DL(CFP); |
| 6833 | switch (VT.getSimpleVT().SimpleTy) { |
| 6834 | default: |
| 6835 | llvm_unreachable("Unknown floating point type!" ); |
| 6836 | break; |
| 6837 | case MVT::f64: { |
| 6838 | SDValue Lo = DAG.getConstant(Val: INTVal.trunc(width: 32), DL, VT: MVT::i32); |
| 6839 | SDValue Hi = DAG.getConstant(Val: INTVal.lshr(shiftAmt: 32).trunc(width: 32), DL, VT: MVT::i32); |
| 6840 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL, VT: MVT::f64, N1: Lo, N2: Hi); |
| 6841 | } |
| 6842 | case MVT::f32: |
| 6843 | return DAG.getNode(Opcode: ARMISD::VMOVSR, DL, VT, |
| 6844 | Operand: DAG.getConstant(Val: INTVal, DL, VT: MVT::i32)); |
| 6845 | } |
| 6846 | } |
| 6847 | |
| 6848 | if (!ST->hasVFP3Base()) |
| 6849 | return SDValue(); |
| 6850 | |
| 6851 | // Use the default (constant pool) lowering for double constants when we have |
| 6852 | // an SP-only FPU |
| 6853 | if (IsDouble && !Subtarget->hasFP64()) |
| 6854 | return SDValue(); |
| 6855 | |
| 6856 | // Try splatting with a VMOV.f32... |
| 6857 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPImm: FPVal) : ARM_AM::getFP32Imm(FPImm: FPVal); |
| 6858 | |
| 6859 | if (ImmVal != -1) { |
| 6860 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { |
| 6861 | // We have code in place to select a valid ConstantFP already, no need to |
| 6862 | // do any mangling. |
| 6863 | return Op; |
| 6864 | } |
| 6865 | |
| 6866 | // It's a float and we are trying to use NEON operations where |
| 6867 | // possible. Lower it to a splat followed by an extract. |
| 6868 | SDLoc DL(Op); |
| 6869 | SDValue NewVal = DAG.getTargetConstant(Val: ImmVal, DL, VT: MVT::i32); |
| 6870 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL, VT: MVT::v2f32, |
| 6871 | Operand: NewVal); |
| 6872 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecConstant, |
| 6873 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 6874 | } |
| 6875 | |
| 6876 | // The rest of our options are NEON only, make sure that's allowed before |
| 6877 | // proceeding.. |
| 6878 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) |
| 6879 | return SDValue(); |
| 6880 | |
| 6881 | EVT VMovVT; |
| 6882 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); |
| 6883 | |
| 6884 | // It wouldn't really be worth bothering for doubles except for one very |
| 6885 | // important value, which does happen to match: 0.0. So make sure we don't do |
| 6886 | // anything stupid. |
| 6887 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) |
| 6888 | return SDValue(); |
| 6889 | |
| 6890 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). |
| 6891 | SDValue NewVal = isVMOVModifiedImm(SplatBits: iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), |
| 6892 | VT&: VMovVT, VectorVT: VT, type: VMOVModImm); |
| 6893 | if (NewVal != SDValue()) { |
| 6894 | SDLoc DL(Op); |
| 6895 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL, VT: VMovVT, |
| 6896 | Operand: NewVal); |
| 6897 | if (IsDouble) |
| 6898 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
| 6899 | |
| 6900 | // It's a float: cast and extract a vector element. |
| 6901 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
| 6902 | Operand: VecConstant); |
| 6903 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
| 6904 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 6905 | } |
| 6906 | |
| 6907 | // Finally, try a VMVN.i32 |
| 6908 | NewVal = isVMOVModifiedImm(SplatBits: ~iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), VT&: VMovVT, |
| 6909 | VectorVT: VT, type: VMVNModImm); |
| 6910 | if (NewVal != SDValue()) { |
| 6911 | SDLoc DL(Op); |
| 6912 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL, VT: VMovVT, Operand: NewVal); |
| 6913 | |
| 6914 | if (IsDouble) |
| 6915 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
| 6916 | |
| 6917 | // It's a float: cast and extract a vector element. |
| 6918 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
| 6919 | Operand: VecConstant); |
| 6920 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
| 6921 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 6922 | } |
| 6923 | |
| 6924 | return SDValue(); |
| 6925 | } |
| 6926 | |
| 6927 | // check if an VEXT instruction can handle the shuffle mask when the |
| 6928 | // vector sources of the shuffle are the same. |
| 6929 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { |
| 6930 | unsigned NumElts = VT.getVectorNumElements(); |
| 6931 | |
| 6932 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 6933 | if (M[0] < 0) |
| 6934 | return false; |
| 6935 | |
| 6936 | Imm = M[0]; |
| 6937 | |
| 6938 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6939 | // element. The other shuffle indices must be the successive elements after |
| 6940 | // the first one. |
| 6941 | unsigned ExpectedElt = Imm; |
| 6942 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6943 | // Increment the expected index. If it wraps around, just follow it |
| 6944 | // back to index zero and keep going. |
| 6945 | ++ExpectedElt; |
| 6946 | if (ExpectedElt == NumElts) |
| 6947 | ExpectedElt = 0; |
| 6948 | |
| 6949 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6950 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 6951 | return false; |
| 6952 | } |
| 6953 | |
| 6954 | return true; |
| 6955 | } |
| 6956 | |
| 6957 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, |
| 6958 | bool &ReverseVEXT, unsigned &Imm) { |
| 6959 | unsigned NumElts = VT.getVectorNumElements(); |
| 6960 | ReverseVEXT = false; |
| 6961 | |
| 6962 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 6963 | if (M[0] < 0) |
| 6964 | return false; |
| 6965 | |
| 6966 | Imm = M[0]; |
| 6967 | |
| 6968 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6969 | // element. The other shuffle indices must be the successive elements after |
| 6970 | // the first one. |
| 6971 | unsigned ExpectedElt = Imm; |
| 6972 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6973 | // Increment the expected index. If it wraps around, it may still be |
| 6974 | // a VEXT but the source vectors must be swapped. |
| 6975 | ExpectedElt += 1; |
| 6976 | if (ExpectedElt == NumElts * 2) { |
| 6977 | ExpectedElt = 0; |
| 6978 | ReverseVEXT = true; |
| 6979 | } |
| 6980 | |
| 6981 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6982 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 6983 | return false; |
| 6984 | } |
| 6985 | |
| 6986 | // Adjust the index value if the source operands will be swapped. |
| 6987 | if (ReverseVEXT) |
| 6988 | Imm -= NumElts; |
| 6989 | |
| 6990 | return true; |
| 6991 | } |
| 6992 | |
| 6993 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { |
| 6994 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of |
| 6995 | // range, then 0 is placed into the resulting vector. So pretty much any mask |
| 6996 | // of 8 elements can work here. |
| 6997 | return VT == MVT::v8i8 && M.size() == 8; |
| 6998 | } |
| 6999 | |
| 7000 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, |
| 7001 | unsigned Index) { |
| 7002 | if (Mask.size() == Elements * 2) |
| 7003 | return Index / Elements; |
| 7004 | return Mask[Index] == 0 ? 0 : 1; |
| 7005 | } |
| 7006 | |
| 7007 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by |
| 7008 | // checking that pairs of elements in the shuffle mask represent the same index |
| 7009 | // in each vector, incrementing the expected index by 2 at each step. |
| 7010 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] |
| 7011 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} |
| 7012 | // v2={e,f,g,h} |
| 7013 | // WhichResult gives the offset for each element in the mask based on which |
| 7014 | // of the two results it belongs to. |
| 7015 | // |
| 7016 | // The transpose can be represented either as: |
| 7017 | // result1 = shufflevector v1, v2, result1_shuffle_mask |
| 7018 | // result2 = shufflevector v1, v2, result2_shuffle_mask |
| 7019 | // where v1/v2 and the shuffle masks have the same number of elements |
| 7020 | // (here WhichResult (see below) indicates which result is being checked) |
| 7021 | // |
| 7022 | // or as: |
| 7023 | // results = shufflevector v1, v2, shuffle_mask |
| 7024 | // where both results are returned in one vector and the shuffle mask has twice |
| 7025 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we |
| 7026 | // want to check the low half and high half of the shuffle mask as if it were |
| 7027 | // the other case |
| 7028 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7029 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7030 | if (EltSz == 64) |
| 7031 | return false; |
| 7032 | |
| 7033 | unsigned NumElts = VT.getVectorNumElements(); |
| 7034 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7035 | return false; |
| 7036 | |
| 7037 | // If the mask is twice as long as the input vector then we need to check the |
| 7038 | // upper and lower parts of the mask with a matching value for WhichResult |
| 7039 | // FIXME: A mask with only even values will be rejected in case the first |
| 7040 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only |
| 7041 | // M[0] is used to determine WhichResult |
| 7042 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7043 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7044 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7045 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7046 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) |
| 7047 | return false; |
| 7048 | } |
| 7049 | } |
| 7050 | |
| 7051 | if (M.size() == NumElts*2) |
| 7052 | WhichResult = 0; |
| 7053 | |
| 7054 | return true; |
| 7055 | } |
| 7056 | |
| 7057 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of |
| 7058 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7059 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. |
| 7060 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7061 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7062 | if (EltSz == 64) |
| 7063 | return false; |
| 7064 | |
| 7065 | unsigned NumElts = VT.getVectorNumElements(); |
| 7066 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7067 | return false; |
| 7068 | |
| 7069 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7070 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7071 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7072 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7073 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) |
| 7074 | return false; |
| 7075 | } |
| 7076 | } |
| 7077 | |
| 7078 | if (M.size() == NumElts*2) |
| 7079 | WhichResult = 0; |
| 7080 | |
| 7081 | return true; |
| 7082 | } |
| 7083 | |
| 7084 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking |
| 7085 | // that the mask elements are either all even and in steps of size 2 or all odd |
| 7086 | // and in steps of size 2. |
| 7087 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] |
| 7088 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} |
| 7089 | // v2={e,f,g,h} |
| 7090 | // Requires similar checks to that of isVTRNMask with |
| 7091 | // respect the how results are returned. |
| 7092 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7093 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7094 | if (EltSz == 64) |
| 7095 | return false; |
| 7096 | |
| 7097 | unsigned NumElts = VT.getVectorNumElements(); |
| 7098 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7099 | return false; |
| 7100 | |
| 7101 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7102 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7103 | for (unsigned j = 0; j < NumElts; ++j) { |
| 7104 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) |
| 7105 | return false; |
| 7106 | } |
| 7107 | } |
| 7108 | |
| 7109 | if (M.size() == NumElts*2) |
| 7110 | WhichResult = 0; |
| 7111 | |
| 7112 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7113 | if (VT.is64BitVector() && EltSz == 32) |
| 7114 | return false; |
| 7115 | |
| 7116 | return true; |
| 7117 | } |
| 7118 | |
| 7119 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of |
| 7120 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7121 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, |
| 7122 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7123 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7124 | if (EltSz == 64) |
| 7125 | return false; |
| 7126 | |
| 7127 | unsigned NumElts = VT.getVectorNumElements(); |
| 7128 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7129 | return false; |
| 7130 | |
| 7131 | unsigned Half = NumElts / 2; |
| 7132 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7133 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7134 | for (unsigned j = 0; j < NumElts; j += Half) { |
| 7135 | unsigned Idx = WhichResult; |
| 7136 | for (unsigned k = 0; k < Half; ++k) { |
| 7137 | int MIdx = M[i + j + k]; |
| 7138 | if (MIdx >= 0 && (unsigned) MIdx != Idx) |
| 7139 | return false; |
| 7140 | Idx += 2; |
| 7141 | } |
| 7142 | } |
| 7143 | } |
| 7144 | |
| 7145 | if (M.size() == NumElts*2) |
| 7146 | WhichResult = 0; |
| 7147 | |
| 7148 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7149 | if (VT.is64BitVector() && EltSz == 32) |
| 7150 | return false; |
| 7151 | |
| 7152 | return true; |
| 7153 | } |
| 7154 | |
| 7155 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking |
| 7156 | // that pairs of elements of the shufflemask represent the same index in each |
| 7157 | // vector incrementing sequentially through the vectors. |
| 7158 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] |
| 7159 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} |
| 7160 | // v2={e,f,g,h} |
| 7161 | // Requires similar checks to that of isVTRNMask with respect the how results |
| 7162 | // are returned. |
| 7163 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7164 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7165 | if (EltSz == 64) |
| 7166 | return false; |
| 7167 | |
| 7168 | unsigned NumElts = VT.getVectorNumElements(); |
| 7169 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7170 | return false; |
| 7171 | |
| 7172 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7173 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7174 | unsigned Idx = WhichResult * NumElts / 2; |
| 7175 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7176 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7177 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) |
| 7178 | return false; |
| 7179 | Idx += 1; |
| 7180 | } |
| 7181 | } |
| 7182 | |
| 7183 | if (M.size() == NumElts*2) |
| 7184 | WhichResult = 0; |
| 7185 | |
| 7186 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7187 | if (VT.is64BitVector() && EltSz == 32) |
| 7188 | return false; |
| 7189 | |
| 7190 | return true; |
| 7191 | } |
| 7192 | |
| 7193 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of |
| 7194 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7195 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. |
| 7196 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7197 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7198 | if (EltSz == 64) |
| 7199 | return false; |
| 7200 | |
| 7201 | unsigned NumElts = VT.getVectorNumElements(); |
| 7202 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7203 | return false; |
| 7204 | |
| 7205 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7206 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7207 | unsigned Idx = WhichResult * NumElts / 2; |
| 7208 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7209 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7210 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) |
| 7211 | return false; |
| 7212 | Idx += 1; |
| 7213 | } |
| 7214 | } |
| 7215 | |
| 7216 | if (M.size() == NumElts*2) |
| 7217 | WhichResult = 0; |
| 7218 | |
| 7219 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7220 | if (VT.is64BitVector() && EltSz == 32) |
| 7221 | return false; |
| 7222 | |
| 7223 | return true; |
| 7224 | } |
| 7225 | |
| 7226 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), |
| 7227 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. |
| 7228 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, |
| 7229 | unsigned &WhichResult, |
| 7230 | bool &isV_UNDEF) { |
| 7231 | isV_UNDEF = false; |
| 7232 | if (isVTRNMask(M: ShuffleMask, VT, WhichResult)) |
| 7233 | return ARMISD::VTRN; |
| 7234 | if (isVUZPMask(M: ShuffleMask, VT, WhichResult)) |
| 7235 | return ARMISD::VUZP; |
| 7236 | if (isVZIPMask(M: ShuffleMask, VT, WhichResult)) |
| 7237 | return ARMISD::VZIP; |
| 7238 | |
| 7239 | isV_UNDEF = true; |
| 7240 | if (isVTRN_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7241 | return ARMISD::VTRN; |
| 7242 | if (isVUZP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7243 | return ARMISD::VUZP; |
| 7244 | if (isVZIP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7245 | return ARMISD::VZIP; |
| 7246 | |
| 7247 | return 0; |
| 7248 | } |
| 7249 | |
| 7250 | /// \return true if this is a reverse operation on an vector. |
| 7251 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { |
| 7252 | unsigned NumElts = VT.getVectorNumElements(); |
| 7253 | // Make sure the mask has the right size. |
| 7254 | if (NumElts != M.size()) |
| 7255 | return false; |
| 7256 | |
| 7257 | // Look for <15, ..., 3, -1, 1, 0>. |
| 7258 | for (unsigned i = 0; i != NumElts; ++i) |
| 7259 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) |
| 7260 | return false; |
| 7261 | |
| 7262 | return true; |
| 7263 | } |
| 7264 | |
| 7265 | static bool isTruncMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
| 7266 | unsigned NumElts = VT.getVectorNumElements(); |
| 7267 | // Make sure the mask has the right size. |
| 7268 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
| 7269 | return false; |
| 7270 | |
| 7271 | // Half-width truncation patterns (e.g. v4i32 -> v8i16): |
| 7272 | // !Top && SingleSource: <0, 2, 4, 6, 0, 2, 4, 6> |
| 7273 | // !Top && !SingleSource: <0, 2, 4, 6, 8, 10, 12, 14> |
| 7274 | // Top && SingleSource: <1, 3, 5, 7, 1, 3, 5, 7> |
| 7275 | // Top && !SingleSource: <1, 3, 5, 7, 9, 11, 13, 15> |
| 7276 | int Ofs = Top ? 1 : 0; |
| 7277 | int Upper = SingleSource ? 0 : NumElts; |
| 7278 | for (int i = 0, e = NumElts / 2; i != e; ++i) { |
| 7279 | if (M[i] >= 0 && M[i] != (i * 2) + Ofs) |
| 7280 | return false; |
| 7281 | if (M[i + e] >= 0 && M[i + e] != (i * 2) + Ofs + Upper) |
| 7282 | return false; |
| 7283 | } |
| 7284 | return true; |
| 7285 | } |
| 7286 | |
| 7287 | static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
| 7288 | unsigned NumElts = VT.getVectorNumElements(); |
| 7289 | // Make sure the mask has the right size. |
| 7290 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
| 7291 | return false; |
| 7292 | |
| 7293 | // If Top |
| 7294 | // Look for <0, N, 2, N+2, 4, N+4, ..>. |
| 7295 | // This inserts Input2 into Input1 |
| 7296 | // else if not Top |
| 7297 | // Look for <0, N+1, 2, N+3, 4, N+5, ..> |
| 7298 | // This inserts Input1 into Input2 |
| 7299 | unsigned Offset = Top ? 0 : 1; |
| 7300 | unsigned N = SingleSource ? 0 : NumElts; |
| 7301 | for (unsigned i = 0; i < NumElts; i += 2) { |
| 7302 | if (M[i] >= 0 && M[i] != (int)i) |
| 7303 | return false; |
| 7304 | if (M[i + 1] >= 0 && M[i + 1] != (int)(N + i + Offset)) |
| 7305 | return false; |
| 7306 | } |
| 7307 | |
| 7308 | return true; |
| 7309 | } |
| 7310 | |
| 7311 | static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) { |
| 7312 | unsigned NumElts = ToVT.getVectorNumElements(); |
| 7313 | if (NumElts != M.size()) |
| 7314 | return false; |
| 7315 | |
| 7316 | // Test if the Trunc can be convertable to a VMOVN with this shuffle. We are |
| 7317 | // looking for patterns of: |
| 7318 | // !rev: 0 N/2 1 N/2+1 2 N/2+2 ... |
| 7319 | // rev: N/2 0 N/2+1 1 N/2+2 2 ... |
| 7320 | |
| 7321 | unsigned Off0 = rev ? NumElts / 2 : 0; |
| 7322 | unsigned Off1 = rev ? 0 : NumElts / 2; |
| 7323 | for (unsigned i = 0; i < NumElts; i += 2) { |
| 7324 | if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2)) |
| 7325 | return false; |
| 7326 | if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2)) |
| 7327 | return false; |
| 7328 | } |
| 7329 | |
| 7330 | return true; |
| 7331 | } |
| 7332 | |
| 7333 | // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted |
| 7334 | // from a pair of inputs. For example: |
| 7335 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7336 | // FP_ROUND(EXTRACT_ELT(Y, 0), |
| 7337 | // FP_ROUND(EXTRACT_ELT(X, 1), |
| 7338 | // FP_ROUND(EXTRACT_ELT(Y, 1), ...) |
| 7339 | static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, |
| 7340 | const ARMSubtarget *ST) { |
| 7341 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7342 | if (!ST->hasMVEFloatOps()) |
| 7343 | return SDValue(); |
| 7344 | |
| 7345 | SDLoc dl(BV); |
| 7346 | EVT VT = BV.getValueType(); |
| 7347 | if (VT != MVT::v8f16) |
| 7348 | return SDValue(); |
| 7349 | |
| 7350 | // We are looking for a buildvector of fptrunc elements, where all the |
| 7351 | // elements are interleavingly extracted from two sources. Check the first two |
| 7352 | // items are valid enough and extract some info from them (they are checked |
| 7353 | // properly in the loop below). |
| 7354 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_ROUND || |
| 7355 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7356 | BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
| 7357 | return SDValue(); |
| 7358 | if (BV.getOperand(i: 1).getOpcode() != ISD::FP_ROUND || |
| 7359 | BV.getOperand(i: 1).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7360 | BV.getOperand(i: 1).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
| 7361 | return SDValue(); |
| 7362 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
| 7363 | SDValue Op1 = BV.getOperand(i: 1).getOperand(i: 0).getOperand(i: 0); |
| 7364 | if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) |
| 7365 | return SDValue(); |
| 7366 | |
| 7367 | // Check all the values in the BuildVector line up with our expectations. |
| 7368 | for (unsigned i = 1; i < 4; i++) { |
| 7369 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7370 | return Trunc.getOpcode() == ISD::FP_ROUND && |
| 7371 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7372 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
| 7373 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
| 7374 | }; |
| 7375 | if (!Check(BV.getOperand(i: i * 2 + 0), Op0, i)) |
| 7376 | return SDValue(); |
| 7377 | if (!Check(BV.getOperand(i: i * 2 + 1), Op1, i)) |
| 7378 | return SDValue(); |
| 7379 | } |
| 7380 | |
| 7381 | SDValue N1 = DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1: DAG.getUNDEF(VT), N2: Op0, |
| 7382 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 7383 | return DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1, N2: Op1, |
| 7384 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 7385 | } |
| 7386 | |
| 7387 | // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted |
| 7388 | // from a single input on alternating lanes. For example: |
| 7389 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7390 | // FP_ROUND(EXTRACT_ELT(X, 2), |
| 7391 | // FP_ROUND(EXTRACT_ELT(X, 4), ...) |
| 7392 | static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, |
| 7393 | const ARMSubtarget *ST) { |
| 7394 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7395 | if (!ST->hasMVEFloatOps()) |
| 7396 | return SDValue(); |
| 7397 | |
| 7398 | SDLoc dl(BV); |
| 7399 | EVT VT = BV.getValueType(); |
| 7400 | if (VT != MVT::v4f32) |
| 7401 | return SDValue(); |
| 7402 | |
| 7403 | // We are looking for a buildvector of fptext elements, where all the |
| 7404 | // elements are alternating lanes from a single source. For example <0,2,4,6> |
| 7405 | // or <1,3,5,7>. Check the first two items are valid enough and extract some |
| 7406 | // info from them (they are checked properly in the loop below). |
| 7407 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_EXTEND || |
| 7408 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 7409 | return SDValue(); |
| 7410 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
| 7411 | int Offset = BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1); |
| 7412 | if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) |
| 7413 | return SDValue(); |
| 7414 | |
| 7415 | // Check all the values in the BuildVector line up with our expectations. |
| 7416 | for (unsigned i = 1; i < 4; i++) { |
| 7417 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7418 | return Trunc.getOpcode() == ISD::FP_EXTEND && |
| 7419 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7420 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
| 7421 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
| 7422 | }; |
| 7423 | if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) |
| 7424 | return SDValue(); |
| 7425 | } |
| 7426 | |
| 7427 | return DAG.getNode(Opcode: ARMISD::VCVTL, DL: dl, VT, N1: Op0, |
| 7428 | N2: DAG.getConstant(Val: Offset, DL: dl, VT: MVT::i32)); |
| 7429 | } |
| 7430 | |
| 7431 | // If N is an integer constant that can be moved into a register in one |
| 7432 | // instruction, return an SDValue of such a constant (will become a MOV |
| 7433 | // instruction). Otherwise return null. |
| 7434 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, |
| 7435 | const ARMSubtarget *ST, const SDLoc &dl) { |
| 7436 | uint64_t Val; |
| 7437 | if (!isa<ConstantSDNode>(Val: N)) |
| 7438 | return SDValue(); |
| 7439 | Val = N->getAsZExtVal(); |
| 7440 | |
| 7441 | if (ST->isThumb1Only()) { |
| 7442 | if (Val <= 255 || ~Val <= 255) |
| 7443 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
| 7444 | } else { |
| 7445 | if (ARM_AM::getSOImmVal(Arg: Val) != -1 || ARM_AM::getSOImmVal(Arg: ~Val) != -1) |
| 7446 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
| 7447 | } |
| 7448 | return SDValue(); |
| 7449 | } |
| 7450 | |
| 7451 | static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, |
| 7452 | const ARMSubtarget *ST) { |
| 7453 | SDLoc dl(Op); |
| 7454 | EVT VT = Op.getValueType(); |
| 7455 | |
| 7456 | assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!" ); |
| 7457 | |
| 7458 | unsigned NumElts = VT.getVectorNumElements(); |
| 7459 | unsigned BoolMask; |
| 7460 | unsigned BitsPerBool; |
| 7461 | if (NumElts == 2) { |
| 7462 | BitsPerBool = 8; |
| 7463 | BoolMask = 0xff; |
| 7464 | } else if (NumElts == 4) { |
| 7465 | BitsPerBool = 4; |
| 7466 | BoolMask = 0xf; |
| 7467 | } else if (NumElts == 8) { |
| 7468 | BitsPerBool = 2; |
| 7469 | BoolMask = 0x3; |
| 7470 | } else if (NumElts == 16) { |
| 7471 | BitsPerBool = 1; |
| 7472 | BoolMask = 0x1; |
| 7473 | } else |
| 7474 | return SDValue(); |
| 7475 | |
| 7476 | // If this is a single value copied into all lanes (a splat), we can just sign |
| 7477 | // extend that single value |
| 7478 | SDValue FirstOp = Op.getOperand(i: 0); |
| 7479 | if (!isa<ConstantSDNode>(Val: FirstOp) && |
| 7480 | llvm::all_of(Range: llvm::drop_begin(RangeOrContainer: Op->ops()), P: [&FirstOp](const SDUse &U) { |
| 7481 | return U.get().isUndef() || U.get() == FirstOp; |
| 7482 | })) { |
| 7483 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, N1: FirstOp, |
| 7484 | N2: DAG.getValueType(MVT::i1)); |
| 7485 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: Ext); |
| 7486 | } |
| 7487 | |
| 7488 | // First create base with bits set where known |
| 7489 | unsigned Bits32 = 0; |
| 7490 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7491 | SDValue V = Op.getOperand(i); |
| 7492 | if (!isa<ConstantSDNode>(Val: V) && !V.isUndef()) |
| 7493 | continue; |
| 7494 | bool BitSet = V.isUndef() ? false : V->getAsZExtVal(); |
| 7495 | if (BitSet) |
| 7496 | Bits32 |= BoolMask << (i * BitsPerBool); |
| 7497 | } |
| 7498 | |
| 7499 | // Add in unknown nodes |
| 7500 | SDValue Base = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
| 7501 | Operand: DAG.getConstant(Val: Bits32, DL: dl, VT: MVT::i32)); |
| 7502 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7503 | SDValue V = Op.getOperand(i); |
| 7504 | if (isa<ConstantSDNode>(Val: V) || V.isUndef()) |
| 7505 | continue; |
| 7506 | Base = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Base, N2: V, |
| 7507 | N3: DAG.getConstant(Val: i, DL: dl, VT: MVT::i32)); |
| 7508 | } |
| 7509 | |
| 7510 | return Base; |
| 7511 | } |
| 7512 | |
| 7513 | static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG, |
| 7514 | const ARMSubtarget *ST) { |
| 7515 | if (!ST->hasMVEIntegerOps()) |
| 7516 | return SDValue(); |
| 7517 | |
| 7518 | // We are looking for a buildvector where each element is Op[0] + i*N |
| 7519 | EVT VT = Op.getValueType(); |
| 7520 | SDValue Op0 = Op.getOperand(i: 0); |
| 7521 | unsigned NumElts = VT.getVectorNumElements(); |
| 7522 | |
| 7523 | // Get the increment value from operand 1 |
| 7524 | SDValue Op1 = Op.getOperand(i: 1); |
| 7525 | if (Op1.getOpcode() != ISD::ADD || Op1.getOperand(i: 0) != Op0 || |
| 7526 | !isa<ConstantSDNode>(Val: Op1.getOperand(i: 1))) |
| 7527 | return SDValue(); |
| 7528 | unsigned N = Op1.getConstantOperandVal(i: 1); |
| 7529 | if (N != 1 && N != 2 && N != 4 && N != 8) |
| 7530 | return SDValue(); |
| 7531 | |
| 7532 | // Check that each other operand matches |
| 7533 | for (unsigned I = 2; I < NumElts; I++) { |
| 7534 | SDValue OpI = Op.getOperand(i: I); |
| 7535 | if (OpI.getOpcode() != ISD::ADD || OpI.getOperand(i: 0) != Op0 || |
| 7536 | !isa<ConstantSDNode>(Val: OpI.getOperand(i: 1)) || |
| 7537 | OpI.getConstantOperandVal(i: 1) != I * N) |
| 7538 | return SDValue(); |
| 7539 | } |
| 7540 | |
| 7541 | SDLoc DL(Op); |
| 7542 | return DAG.getNode(Opcode: ARMISD::VIDUP, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: Op0, |
| 7543 | N2: DAG.getConstant(Val: N, DL, VT: MVT::i32)); |
| 7544 | } |
| 7545 | |
| 7546 | // Returns true if the operation N can be treated as qr instruction variant at |
| 7547 | // operand Op. |
| 7548 | static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) { |
| 7549 | switch (N->getOpcode()) { |
| 7550 | case ISD::ADD: |
| 7551 | case ISD::MUL: |
| 7552 | case ISD::SADDSAT: |
| 7553 | case ISD::UADDSAT: |
| 7554 | case ISD::AVGFLOORS: |
| 7555 | case ISD::AVGFLOORU: |
| 7556 | return true; |
| 7557 | case ISD::SUB: |
| 7558 | case ISD::SSUBSAT: |
| 7559 | case ISD::USUBSAT: |
| 7560 | return N->getOperand(Num: 1).getNode() == Op; |
| 7561 | case ISD::INTRINSIC_WO_CHAIN: |
| 7562 | switch (N->getConstantOperandVal(Num: 0)) { |
| 7563 | case Intrinsic::arm_mve_add_predicated: |
| 7564 | case Intrinsic::arm_mve_mul_predicated: |
| 7565 | case Intrinsic::arm_mve_qadd_predicated: |
| 7566 | case Intrinsic::arm_mve_vhadd: |
| 7567 | case Intrinsic::arm_mve_hadd_predicated: |
| 7568 | case Intrinsic::arm_mve_vqdmulh: |
| 7569 | case Intrinsic::arm_mve_qdmulh_predicated: |
| 7570 | case Intrinsic::arm_mve_vqrdmulh: |
| 7571 | case Intrinsic::arm_mve_qrdmulh_predicated: |
| 7572 | case Intrinsic::arm_mve_vqdmull: |
| 7573 | case Intrinsic::arm_mve_vqdmull_predicated: |
| 7574 | return true; |
| 7575 | case Intrinsic::arm_mve_sub_predicated: |
| 7576 | case Intrinsic::arm_mve_qsub_predicated: |
| 7577 | case Intrinsic::arm_mve_vhsub: |
| 7578 | case Intrinsic::arm_mve_hsub_predicated: |
| 7579 | return N->getOperand(Num: 2).getNode() == Op; |
| 7580 | default: |
| 7581 | return false; |
| 7582 | } |
| 7583 | default: |
| 7584 | return false; |
| 7585 | } |
| 7586 | } |
| 7587 | |
| 7588 | // If this is a case we can't handle, return null and let the default |
| 7589 | // expansion code take care of it. |
| 7590 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, |
| 7591 | const ARMSubtarget *ST) const { |
| 7592 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Val: Op.getNode()); |
| 7593 | SDLoc dl(Op); |
| 7594 | EVT VT = Op.getValueType(); |
| 7595 | |
| 7596 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 7597 | return LowerBUILD_VECTOR_i1(Op, DAG, ST); |
| 7598 | |
| 7599 | if (SDValue R = LowerBUILD_VECTORToVIDUP(Op, DAG, ST)) |
| 7600 | return R; |
| 7601 | |
| 7602 | APInt SplatBits, SplatUndef; |
| 7603 | unsigned SplatBitSize; |
| 7604 | bool HasAnyUndefs; |
| 7605 | if (BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 7606 | if (SplatUndef.isAllOnes()) |
| 7607 | return DAG.getUNDEF(VT); |
| 7608 | |
| 7609 | // If all the users of this constant splat are qr instruction variants, |
| 7610 | // generate a vdup of the constant. |
| 7611 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == SplatBitSize && |
| 7612 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32) && |
| 7613 | all_of(Range: BVN->users(), |
| 7614 | P: [BVN](const SDNode *U) { return IsQRMVEInstruction(N: U, Op: BVN); })) { |
| 7615 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
| 7616 | : SplatBitSize == 16 ? MVT::v8i16 |
| 7617 | : MVT::v16i8; |
| 7618 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
| 7619 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
| 7620 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
| 7621 | } |
| 7622 | |
| 7623 | if ((ST->hasNEON() && SplatBitSize <= 64) || |
| 7624 | (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { |
| 7625 | // Check if an immediate VMOV works. |
| 7626 | EVT VmovVT; |
| 7627 | SDValue Val = |
| 7628 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
| 7629 | SplatBitSize, DAG, dl, VT&: VmovVT, VectorVT: VT, type: VMOVModImm); |
| 7630 | |
| 7631 | if (Val.getNode()) { |
| 7632 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: Val); |
| 7633 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vmov); |
| 7634 | } |
| 7635 | |
| 7636 | // Try an immediate VMVN. |
| 7637 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); |
| 7638 | Val = isVMOVModifiedImm( |
| 7639 | SplatBits: NegatedImm, SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VT&: VmovVT, |
| 7640 | VectorVT: VT, type: ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); |
| 7641 | if (Val.getNode()) { |
| 7642 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL: dl, VT: VmovVT, Operand: Val); |
| 7643 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vmov); |
| 7644 | } |
| 7645 | |
| 7646 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. |
| 7647 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { |
| 7648 | int ImmVal = ARM_AM::getFP32Imm(Imm: SplatBits); |
| 7649 | if (ImmVal != -1) { |
| 7650 | SDValue Val = DAG.getTargetConstant(Val: ImmVal, DL: dl, VT: MVT::i32); |
| 7651 | return DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL: dl, VT, Operand: Val); |
| 7652 | } |
| 7653 | } |
| 7654 | |
| 7655 | // If we are under MVE, generate a VDUP(constant), bitcast to the original |
| 7656 | // type. |
| 7657 | if (ST->hasMVEIntegerOps() && |
| 7658 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32)) { |
| 7659 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
| 7660 | : SplatBitSize == 16 ? MVT::v8i16 |
| 7661 | : MVT::v16i8; |
| 7662 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
| 7663 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
| 7664 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
| 7665 | } |
| 7666 | } |
| 7667 | } |
| 7668 | |
| 7669 | // Scan through the operands to see if only one value is used. |
| 7670 | // |
| 7671 | // As an optimisation, even if more than one value is used it may be more |
| 7672 | // profitable to splat with one value then change some lanes. |
| 7673 | // |
| 7674 | // Heuristically we decide to do this if the vector has a "dominant" value, |
| 7675 | // defined as splatted to more than half of the lanes. |
| 7676 | unsigned NumElts = VT.getVectorNumElements(); |
| 7677 | bool isOnlyLowElement = true; |
| 7678 | bool usesOnlyOneValue = true; |
| 7679 | bool hasDominantValue = false; |
| 7680 | bool isConstant = true; |
| 7681 | |
| 7682 | // Map of the number of times a particular SDValue appears in the |
| 7683 | // element list. |
| 7684 | DenseMap<SDValue, unsigned> ValueCounts; |
| 7685 | SDValue Value; |
| 7686 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7687 | SDValue V = Op.getOperand(i); |
| 7688 | if (V.isUndef()) |
| 7689 | continue; |
| 7690 | if (i > 0) |
| 7691 | isOnlyLowElement = false; |
| 7692 | if (!isa<ConstantFPSDNode>(Val: V) && !isa<ConstantSDNode>(Val: V)) |
| 7693 | isConstant = false; |
| 7694 | |
| 7695 | unsigned &Count = ValueCounts[V]; |
| 7696 | |
| 7697 | // Is this value dominant? (takes up more than half of the lanes) |
| 7698 | if (++Count > (NumElts / 2)) { |
| 7699 | hasDominantValue = true; |
| 7700 | Value = V; |
| 7701 | } |
| 7702 | } |
| 7703 | if (ValueCounts.size() != 1) |
| 7704 | usesOnlyOneValue = false; |
| 7705 | if (!Value.getNode() && !ValueCounts.empty()) |
| 7706 | Value = ValueCounts.begin()->first; |
| 7707 | |
| 7708 | if (ValueCounts.empty()) |
| 7709 | return DAG.getUNDEF(VT); |
| 7710 | |
| 7711 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. |
| 7712 | // Keep going if we are hitting this case. |
| 7713 | if (isOnlyLowElement && !ISD::isNormalLoad(N: Value.getNode())) |
| 7714 | return DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT, Operand: Value); |
| 7715 | |
| 7716 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 7717 | |
| 7718 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to |
| 7719 | // i32 and try again. |
| 7720 | if (hasDominantValue && EltSize <= 32) { |
| 7721 | if (!isConstant) { |
| 7722 | SDValue N; |
| 7723 | |
| 7724 | // If we are VDUPing a value that comes directly from a vector, that will |
| 7725 | // cause an unnecessary move to and from a GPR, where instead we could |
| 7726 | // just use VDUPLANE. We can only do this if the lane being extracted |
| 7727 | // is at a constant index, as the VDUP from lane instructions only have |
| 7728 | // constant-index forms. |
| 7729 | ConstantSDNode *constIndex; |
| 7730 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7731 | (constIndex = dyn_cast<ConstantSDNode>(Val: Value->getOperand(Num: 1)))) { |
| 7732 | // We need to create a new undef vector to use for the VDUPLANE if the |
| 7733 | // size of the vector from which we get the value is different than the |
| 7734 | // size of the vector that we need to create. We will insert the element |
| 7735 | // such that the register coalescer will remove unnecessary copies. |
| 7736 | if (VT != Value->getOperand(Num: 0).getValueType()) { |
| 7737 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % |
| 7738 | VT.getVectorNumElements(); |
| 7739 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 7740 | N1: DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: DAG.getUNDEF(VT), |
| 7741 | N2: Value, N3: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)), |
| 7742 | N2: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)); |
| 7743 | } else |
| 7744 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 7745 | N1: Value->getOperand(Num: 0), N2: Value->getOperand(Num: 1)); |
| 7746 | } else |
| 7747 | N = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Value); |
| 7748 | |
| 7749 | if (!usesOnlyOneValue) { |
| 7750 | // The dominant value was splatted as 'N', but we now have to insert |
| 7751 | // all differing elements. |
| 7752 | for (unsigned I = 0; I < NumElts; ++I) { |
| 7753 | if (Op.getOperand(i: I) == Value) |
| 7754 | continue; |
| 7755 | SmallVector<SDValue, 3> Ops; |
| 7756 | Ops.push_back(Elt: N); |
| 7757 | Ops.push_back(Elt: Op.getOperand(i: I)); |
| 7758 | Ops.push_back(Elt: DAG.getConstant(Val: I, DL: dl, VT: MVT::i32)); |
| 7759 | N = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, Ops); |
| 7760 | } |
| 7761 | } |
| 7762 | return N; |
| 7763 | } |
| 7764 | if (VT.getVectorElementType().isFloatingPoint()) { |
| 7765 | SmallVector<SDValue, 8> Ops; |
| 7766 | MVT FVT = VT.getVectorElementType().getSimpleVT(); |
| 7767 | assert(FVT == MVT::f32 || FVT == MVT::f16); |
| 7768 | MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; |
| 7769 | for (unsigned i = 0; i < NumElts; ++i) |
| 7770 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVT, |
| 7771 | Operand: Op.getOperand(i))); |
| 7772 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IVT, NumElements: NumElts); |
| 7773 | SDValue Val = DAG.getBuildVector(VT: VecVT, DL: dl, Ops); |
| 7774 | Val = LowerBUILD_VECTOR(Op: Val, DAG, ST); |
| 7775 | if (Val.getNode()) |
| 7776 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 7777 | } |
| 7778 | if (usesOnlyOneValue) { |
| 7779 | SDValue Val = IsSingleInstrConstant(N: Value, DAG, ST, dl); |
| 7780 | if (isConstant && Val.getNode()) |
| 7781 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Val); |
| 7782 | } |
| 7783 | } |
| 7784 | |
| 7785 | // If all elements are constants and the case above didn't get hit, fall back |
| 7786 | // to the default expansion, which will generate a load from the constant |
| 7787 | // pool. |
| 7788 | if (isConstant) |
| 7789 | return SDValue(); |
| 7790 | |
| 7791 | // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and |
| 7792 | // vmovn). Empirical tests suggest this is rarely worth it for vectors of |
| 7793 | // length <= 2. |
| 7794 | if (NumElts >= 4) |
| 7795 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) |
| 7796 | return shuffle; |
| 7797 | |
| 7798 | // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into |
| 7799 | // VCVT's |
| 7800 | if (SDValue VCVT = LowerBuildVectorOfFPTrunc(BV: Op, DAG, ST: Subtarget)) |
| 7801 | return VCVT; |
| 7802 | if (SDValue VCVT = LowerBuildVectorOfFPExt(BV: Op, DAG, ST: Subtarget)) |
| 7803 | return VCVT; |
| 7804 | |
| 7805 | if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { |
| 7806 | // If we haven't found an efficient lowering, try splitting a 128-bit vector |
| 7807 | // into two 64-bit vectors; we might discover a better way to lower it. |
| 7808 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); |
| 7809 | EVT ExtVT = VT.getVectorElementType(); |
| 7810 | EVT HVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ExtVT, NumElements: NumElts / 2); |
| 7811 | SDValue Lower = DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[0], NumElts / 2)); |
| 7812 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) |
| 7813 | Lower = LowerBUILD_VECTOR(Op: Lower, DAG, ST); |
| 7814 | SDValue Upper = |
| 7815 | DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[NumElts / 2], NumElts / 2)); |
| 7816 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) |
| 7817 | Upper = LowerBUILD_VECTOR(Op: Upper, DAG, ST); |
| 7818 | if (Lower && Upper) |
| 7819 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Lower, N2: Upper); |
| 7820 | } |
| 7821 | |
| 7822 | // Vectors with 32- or 64-bit elements can be built by directly assigning |
| 7823 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands |
| 7824 | // will be legalized. |
| 7825 | if (EltSize >= 32) { |
| 7826 | // Do the expansion with floating-point types, since that is what the VFP |
| 7827 | // registers are defined to use, and since i64 is not legal. |
| 7828 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
| 7829 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
| 7830 | SmallVector<SDValue, 8> Ops; |
| 7831 | for (unsigned i = 0; i < NumElts; ++i) |
| 7832 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: EltVT, Operand: Op.getOperand(i))); |
| 7833 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
| 7834 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 7835 | } |
| 7836 | |
| 7837 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we |
| 7838 | // know the default expansion would otherwise fall back on something even |
| 7839 | // worse. For a vector with one or two non-undef values, that's |
| 7840 | // scalar_to_vector for the elements followed by a shuffle (provided the |
| 7841 | // shuffle is valid for the target) and materialization element by element |
| 7842 | // on the stack followed by a load for everything else. |
| 7843 | if (!isConstant && !usesOnlyOneValue) { |
| 7844 | SDValue Vec = DAG.getUNDEF(VT); |
| 7845 | for (unsigned i = 0 ; i < NumElts; ++i) { |
| 7846 | SDValue V = Op.getOperand(i); |
| 7847 | if (V.isUndef()) |
| 7848 | continue; |
| 7849 | SDValue LaneIdx = DAG.getConstant(Val: i, DL: dl, VT: MVT::i32); |
| 7850 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Vec, N2: V, N3: LaneIdx); |
| 7851 | } |
| 7852 | return Vec; |
| 7853 | } |
| 7854 | |
| 7855 | return SDValue(); |
| 7856 | } |
| 7857 | |
| 7858 | // Gather data to see if the operation can be modelled as a |
| 7859 | // shuffle in combination with VEXTs. |
| 7860 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, |
| 7861 | SelectionDAG &DAG) const { |
| 7862 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7863 | SDLoc dl(Op); |
| 7864 | EVT VT = Op.getValueType(); |
| 7865 | unsigned NumElts = VT.getVectorNumElements(); |
| 7866 | |
| 7867 | struct ShuffleSourceInfo { |
| 7868 | SDValue Vec; |
| 7869 | unsigned MinElt = std::numeric_limits<unsigned>::max(); |
| 7870 | unsigned MaxElt = 0; |
| 7871 | |
| 7872 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to |
| 7873 | // be compatible with the shuffle we intend to construct. As a result |
| 7874 | // ShuffleVec will be some sliding window into the original Vec. |
| 7875 | SDValue ShuffleVec; |
| 7876 | |
| 7877 | // Code should guarantee that element i in Vec starts at element "WindowBase |
| 7878 | // + i * WindowScale in ShuffleVec". |
| 7879 | int WindowBase = 0; |
| 7880 | int WindowScale = 1; |
| 7881 | |
| 7882 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} |
| 7883 | |
| 7884 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } |
| 7885 | }; |
| 7886 | |
| 7887 | // First gather all vectors used as an immediate source for this BUILD_VECTOR |
| 7888 | // node. |
| 7889 | SmallVector<ShuffleSourceInfo, 2> Sources; |
| 7890 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7891 | SDValue V = Op.getOperand(i); |
| 7892 | if (V.isUndef()) |
| 7893 | continue; |
| 7894 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { |
| 7895 | // A shuffle can only come from building a vector from various |
| 7896 | // elements of other vectors. |
| 7897 | return SDValue(); |
| 7898 | } else if (!isa<ConstantSDNode>(Val: V.getOperand(i: 1))) { |
| 7899 | // Furthermore, shuffles require a constant mask, whereas extractelts |
| 7900 | // accept variable indices. |
| 7901 | return SDValue(); |
| 7902 | } |
| 7903 | |
| 7904 | // Add this element source to the list if it's not already there. |
| 7905 | SDValue SourceVec = V.getOperand(i: 0); |
| 7906 | auto Source = llvm::find(Range&: Sources, Val: SourceVec); |
| 7907 | if (Source == Sources.end()) |
| 7908 | Source = Sources.insert(I: Sources.end(), Elt: ShuffleSourceInfo(SourceVec)); |
| 7909 | |
| 7910 | // Update the minimum and maximum lane number seen. |
| 7911 | unsigned EltNo = V.getConstantOperandVal(i: 1); |
| 7912 | Source->MinElt = std::min(a: Source->MinElt, b: EltNo); |
| 7913 | Source->MaxElt = std::max(a: Source->MaxElt, b: EltNo); |
| 7914 | } |
| 7915 | |
| 7916 | // Currently only do something sane when at most two source vectors |
| 7917 | // are involved. |
| 7918 | if (Sources.size() > 2) |
| 7919 | return SDValue(); |
| 7920 | |
| 7921 | // Find out the smallest element size among result and two sources, and use |
| 7922 | // it as element size to build the shuffle_vector. |
| 7923 | EVT SmallestEltTy = VT.getVectorElementType(); |
| 7924 | for (auto &Source : Sources) { |
| 7925 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); |
| 7926 | if (SrcEltTy.bitsLT(VT: SmallestEltTy)) |
| 7927 | SmallestEltTy = SrcEltTy; |
| 7928 | } |
| 7929 | unsigned ResMultiplier = |
| 7930 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 7931 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 7932 | EVT ShuffleVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: SmallestEltTy, NumElements: NumElts); |
| 7933 | |
| 7934 | // If the source vector is too wide or too narrow, we may nevertheless be able |
| 7935 | // to construct a compatible shuffle either by concatenating it with UNDEF or |
| 7936 | // extracting a suitable range of elements. |
| 7937 | for (auto &Src : Sources) { |
| 7938 | EVT SrcVT = Src.ShuffleVec.getValueType(); |
| 7939 | |
| 7940 | uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); |
| 7941 | uint64_t VTSize = VT.getFixedSizeInBits(); |
| 7942 | if (SrcVTSize == VTSize) |
| 7943 | continue; |
| 7944 | |
| 7945 | // This stage of the search produces a source with the same element type as |
| 7946 | // the original, but with a total width matching the BUILD_VECTOR output. |
| 7947 | EVT EltVT = SrcVT.getVectorElementType(); |
| 7948 | unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); |
| 7949 | EVT DestVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumSrcElts); |
| 7950 | |
| 7951 | if (SrcVTSize < VTSize) { |
| 7952 | if (2 * SrcVTSize != VTSize) |
| 7953 | return SDValue(); |
| 7954 | // We can pad out the smaller vector for free, so if it's part of a |
| 7955 | // shuffle... |
| 7956 | Src.ShuffleVec = |
| 7957 | DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 7958 | N2: DAG.getUNDEF(VT: Src.ShuffleVec.getValueType())); |
| 7959 | continue; |
| 7960 | } |
| 7961 | |
| 7962 | if (SrcVTSize != 2 * VTSize) |
| 7963 | return SDValue(); |
| 7964 | |
| 7965 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { |
| 7966 | // Span too large for a VEXT to cope |
| 7967 | return SDValue(); |
| 7968 | } |
| 7969 | |
| 7970 | if (Src.MinElt >= NumSrcElts) { |
| 7971 | // The extraction can just take the second half |
| 7972 | Src.ShuffleVec = |
| 7973 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 7974 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
| 7975 | Src.WindowBase = -NumSrcElts; |
| 7976 | } else if (Src.MaxElt < NumSrcElts) { |
| 7977 | // The extraction can just take the first half |
| 7978 | Src.ShuffleVec = |
| 7979 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 7980 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 7981 | } else { |
| 7982 | // An actual VEXT is needed |
| 7983 | SDValue VEXTSrc1 = |
| 7984 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 7985 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 7986 | SDValue VEXTSrc2 = |
| 7987 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 7988 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
| 7989 | |
| 7990 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT: DestVT, N1: VEXTSrc1, |
| 7991 | N2: VEXTSrc2, |
| 7992 | N3: DAG.getConstant(Val: Src.MinElt, DL: dl, VT: MVT::i32)); |
| 7993 | Src.WindowBase = -Src.MinElt; |
| 7994 | } |
| 7995 | } |
| 7996 | |
| 7997 | // Another possible incompatibility occurs from the vector element types. We |
| 7998 | // can fix this by bitcasting the source vectors to the same type we intend |
| 7999 | // for the shuffle. |
| 8000 | for (auto &Src : Sources) { |
| 8001 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); |
| 8002 | if (SrcEltTy == SmallestEltTy) |
| 8003 | continue; |
| 8004 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy); |
| 8005 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: ShuffleVT, Operand: Src.ShuffleVec); |
| 8006 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 8007 | Src.WindowBase *= Src.WindowScale; |
| 8008 | } |
| 8009 | |
| 8010 | // Final check before we try to actually produce a shuffle. |
| 8011 | LLVM_DEBUG({ |
| 8012 | for (auto Src : Sources) |
| 8013 | assert(Src.ShuffleVec.getValueType() == ShuffleVT); |
| 8014 | }); |
| 8015 | |
| 8016 | // The stars all align, our next step is to produce the mask for the shuffle. |
| 8017 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); |
| 8018 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); |
| 8019 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { |
| 8020 | SDValue Entry = Op.getOperand(i); |
| 8021 | if (Entry.isUndef()) |
| 8022 | continue; |
| 8023 | |
| 8024 | auto Src = llvm::find(Range&: Sources, Val: Entry.getOperand(i: 0)); |
| 8025 | int EltNo = cast<ConstantSDNode>(Val: Entry.getOperand(i: 1))->getSExtValue(); |
| 8026 | |
| 8027 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit |
| 8028 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this |
| 8029 | // segment. |
| 8030 | EVT OrigEltTy = Entry.getOperand(i: 0).getValueType().getVectorElementType(); |
| 8031 | int BitsDefined = std::min(a: OrigEltTy.getScalarSizeInBits(), |
| 8032 | b: VT.getScalarSizeInBits()); |
| 8033 | int LanesDefined = BitsDefined / BitsPerShuffleLane; |
| 8034 | |
| 8035 | // This source is expected to fill ResMultiplier lanes of the final shuffle, |
| 8036 | // starting at the appropriate offset. |
| 8037 | int *LaneMask = &Mask[i * ResMultiplier]; |
| 8038 | |
| 8039 | int = EltNo * Src->WindowScale + Src->WindowBase; |
| 8040 | ExtractBase += NumElts * (Src - Sources.begin()); |
| 8041 | for (int j = 0; j < LanesDefined; ++j) |
| 8042 | LaneMask[j] = ExtractBase + j; |
| 8043 | } |
| 8044 | |
| 8045 | |
| 8046 | // We can't handle more than two sources. This should have already |
| 8047 | // been checked before this point. |
| 8048 | assert(Sources.size() <= 2 && "Too many sources!" ); |
| 8049 | |
| 8050 | SDValue ShuffleOps[] = { DAG.getUNDEF(VT: ShuffleVT), DAG.getUNDEF(VT: ShuffleVT) }; |
| 8051 | for (unsigned i = 0; i < Sources.size(); ++i) |
| 8052 | ShuffleOps[i] = Sources[i].ShuffleVec; |
| 8053 | |
| 8054 | SDValue Shuffle = buildLegalVectorShuffle(VT: ShuffleVT, DL: dl, N0: ShuffleOps[0], |
| 8055 | N1: ShuffleOps[1], Mask, DAG); |
| 8056 | if (!Shuffle) |
| 8057 | return SDValue(); |
| 8058 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Shuffle); |
| 8059 | } |
| 8060 | |
| 8061 | enum ShuffleOpCodes { |
| 8062 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> |
| 8063 | OP_VREV, |
| 8064 | OP_VDUP0, |
| 8065 | OP_VDUP1, |
| 8066 | OP_VDUP2, |
| 8067 | OP_VDUP3, |
| 8068 | OP_VEXT1, |
| 8069 | OP_VEXT2, |
| 8070 | OP_VEXT3, |
| 8071 | OP_VUZPL, // VUZP, left result |
| 8072 | OP_VUZPR, // VUZP, right result |
| 8073 | OP_VZIPL, // VZIP, left result |
| 8074 | OP_VZIPR, // VZIP, right result |
| 8075 | OP_VTRNL, // VTRN, left result |
| 8076 | OP_VTRNR // VTRN, right result |
| 8077 | }; |
| 8078 | |
| 8079 | static bool isLegalMVEShuffleOp(unsigned PFEntry) { |
| 8080 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 8081 | switch (OpNum) { |
| 8082 | case OP_COPY: |
| 8083 | case OP_VREV: |
| 8084 | case OP_VDUP0: |
| 8085 | case OP_VDUP1: |
| 8086 | case OP_VDUP2: |
| 8087 | case OP_VDUP3: |
| 8088 | return true; |
| 8089 | } |
| 8090 | return false; |
| 8091 | } |
| 8092 | |
| 8093 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 8094 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
| 8095 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values |
| 8096 | /// are assumed to be legal. |
| 8097 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { |
| 8098 | if (VT.getVectorNumElements() == 4 && |
| 8099 | (VT.is128BitVector() || VT.is64BitVector())) { |
| 8100 | unsigned PFIndexes[4]; |
| 8101 | for (unsigned i = 0; i != 4; ++i) { |
| 8102 | if (M[i] < 0) |
| 8103 | PFIndexes[i] = 8; |
| 8104 | else |
| 8105 | PFIndexes[i] = M[i]; |
| 8106 | } |
| 8107 | |
| 8108 | // Compute the index in the perfect shuffle table. |
| 8109 | unsigned PFTableIndex = |
| 8110 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 8111 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 8112 | unsigned Cost = (PFEntry >> 30); |
| 8113 | |
| 8114 | if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) |
| 8115 | return true; |
| 8116 | } |
| 8117 | |
| 8118 | bool ReverseVEXT, isV_UNDEF; |
| 8119 | unsigned Imm, WhichResult; |
| 8120 | |
| 8121 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8122 | if (EltSize >= 32 || |
| 8123 | ShuffleVectorSDNode::isSplatMask(Mask: M) || |
| 8124 | ShuffleVectorInst::isIdentityMask(Mask: M, NumSrcElts: M.size()) || |
| 8125 | isVREVMask(M, VT, BlockSize: 64) || |
| 8126 | isVREVMask(M, VT, BlockSize: 32) || |
| 8127 | isVREVMask(M, VT, BlockSize: 16)) |
| 8128 | return true; |
| 8129 | else if (Subtarget->hasNEON() && |
| 8130 | (isVEXTMask(M, VT, ReverseVEXT, Imm) || |
| 8131 | isVTBLMask(M, VT) || |
| 8132 | isNEONTwoResultShuffleMask(ShuffleMask: M, VT, WhichResult, isV_UNDEF))) |
| 8133 | return true; |
| 8134 | else if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8135 | isReverseMask(M, VT)) |
| 8136 | return true; |
| 8137 | else if (Subtarget->hasMVEIntegerOps() && |
| 8138 | (isVMOVNMask(M, VT, Top: true, SingleSource: false) || |
| 8139 | isVMOVNMask(M, VT, Top: false, SingleSource: false) || isVMOVNMask(M, VT, Top: true, SingleSource: true))) |
| 8140 | return true; |
| 8141 | else if (Subtarget->hasMVEIntegerOps() && |
| 8142 | (isTruncMask(M, VT, Top: false, SingleSource: false) || |
| 8143 | isTruncMask(M, VT, Top: false, SingleSource: true) || |
| 8144 | isTruncMask(M, VT, Top: true, SingleSource: false) || isTruncMask(M, VT, Top: true, SingleSource: true))) |
| 8145 | return true; |
| 8146 | else |
| 8147 | return false; |
| 8148 | } |
| 8149 | |
| 8150 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit |
| 8151 | /// the specified operations to build the shuffle. |
| 8152 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, |
| 8153 | SDValue RHS, SelectionDAG &DAG, |
| 8154 | const SDLoc &dl) { |
| 8155 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 8156 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 8157 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 8158 | |
| 8159 | if (OpNum == OP_COPY) { |
| 8160 | if (LHSID == (1*9+2)*9+3) return LHS; |
| 8161 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!" ); |
| 8162 | return RHS; |
| 8163 | } |
| 8164 | |
| 8165 | SDValue OpLHS, OpRHS; |
| 8166 | OpLHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); |
| 8167 | OpRHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); |
| 8168 | EVT VT = OpLHS.getValueType(); |
| 8169 | |
| 8170 | switch (OpNum) { |
| 8171 | default: llvm_unreachable("Unknown shuffle opcode!" ); |
| 8172 | case OP_VREV: |
| 8173 | // VREV divides the vector in half and swaps within the half. |
| 8174 | if (VT.getScalarSizeInBits() == 32) |
| 8175 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: OpLHS); |
| 8176 | // vrev <4 x i16> -> VREV32 |
| 8177 | if (VT.getScalarSizeInBits() == 16) |
| 8178 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: OpLHS); |
| 8179 | // vrev <4 x i8> -> VREV16 |
| 8180 | assert(VT.getScalarSizeInBits() == 8); |
| 8181 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: OpLHS); |
| 8182 | case OP_VDUP0: |
| 8183 | case OP_VDUP1: |
| 8184 | case OP_VDUP2: |
| 8185 | case OP_VDUP3: |
| 8186 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 8187 | N1: OpLHS, N2: DAG.getConstant(Val: OpNum-OP_VDUP0, DL: dl, VT: MVT::i32)); |
| 8188 | case OP_VEXT1: |
| 8189 | case OP_VEXT2: |
| 8190 | case OP_VEXT3: |
| 8191 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, |
| 8192 | N1: OpLHS, N2: OpRHS, |
| 8193 | N3: DAG.getConstant(Val: OpNum - OP_VEXT1 + 1, DL: dl, VT: MVT::i32)); |
| 8194 | case OP_VUZPL: |
| 8195 | case OP_VUZPR: |
| 8196 | return DAG.getNode(Opcode: ARMISD::VUZP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8197 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VUZPL); |
| 8198 | case OP_VZIPL: |
| 8199 | case OP_VZIPR: |
| 8200 | return DAG.getNode(Opcode: ARMISD::VZIP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8201 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VZIPL); |
| 8202 | case OP_VTRNL: |
| 8203 | case OP_VTRNR: |
| 8204 | return DAG.getNode(Opcode: ARMISD::VTRN, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8205 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VTRNL); |
| 8206 | } |
| 8207 | } |
| 8208 | |
| 8209 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, |
| 8210 | ArrayRef<int> ShuffleMask, |
| 8211 | SelectionDAG &DAG) { |
| 8212 | // Check to see if we can use the VTBL instruction. |
| 8213 | SDValue V1 = Op.getOperand(i: 0); |
| 8214 | SDValue V2 = Op.getOperand(i: 1); |
| 8215 | SDLoc DL(Op); |
| 8216 | |
| 8217 | SmallVector<SDValue, 8> VTBLMask; |
| 8218 | for (int I : ShuffleMask) |
| 8219 | VTBLMask.push_back(Elt: DAG.getSignedConstant(Val: I, DL, VT: MVT::i32)); |
| 8220 | |
| 8221 | if (V2.getNode()->isUndef()) |
| 8222 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL, VT: MVT::v8i8, N1: V1, |
| 8223 | N2: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
| 8224 | |
| 8225 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL, VT: MVT::v8i8, N1: V1, N2: V2, |
| 8226 | N3: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
| 8227 | } |
| 8228 | |
| 8229 | static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { |
| 8230 | SDLoc DL(Op); |
| 8231 | EVT VT = Op.getValueType(); |
| 8232 | |
| 8233 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8234 | "Expect an v8i16/v16i8 type" ); |
| 8235 | SDValue OpLHS = DAG.getNode(Opcode: ARMISD::VREV64, DL, VT, Operand: Op.getOperand(i: 0)); |
| 8236 | // For a v16i8 type: After the VREV, we have got <7, ..., 0, 15, ..., 8>. Now, |
| 8237 | // extract the first 8 bytes into the top double word and the last 8 bytes |
| 8238 | // into the bottom double word, through a new vector shuffle that will be |
| 8239 | // turned into a VEXT on Neon, or a couple of VMOVDs on MVE. |
| 8240 | std::vector<int> NewMask; |
| 8241 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
| 8242 | NewMask.push_back(x: VT.getVectorNumElements() / 2 + i); |
| 8243 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
| 8244 | NewMask.push_back(x: i); |
| 8245 | return DAG.getVectorShuffle(VT, dl: DL, N1: OpLHS, N2: OpLHS, Mask: NewMask); |
| 8246 | } |
| 8247 | |
| 8248 | static EVT getVectorTyFromPredicateVector(EVT VT) { |
| 8249 | switch (VT.getSimpleVT().SimpleTy) { |
| 8250 | case MVT::v2i1: |
| 8251 | return MVT::v2f64; |
| 8252 | case MVT::v4i1: |
| 8253 | return MVT::v4i32; |
| 8254 | case MVT::v8i1: |
| 8255 | return MVT::v8i16; |
| 8256 | case MVT::v16i1: |
| 8257 | return MVT::v16i8; |
| 8258 | default: |
| 8259 | llvm_unreachable("Unexpected vector predicate type" ); |
| 8260 | } |
| 8261 | } |
| 8262 | |
| 8263 | static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, |
| 8264 | SelectionDAG &DAG) { |
| 8265 | // Converting from boolean predicates to integers involves creating a vector |
| 8266 | // of all ones or all zeroes and selecting the lanes based upon the real |
| 8267 | // predicate. |
| 8268 | SDValue AllOnes = |
| 8269 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), DL: dl, VT: MVT::i32); |
| 8270 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllOnes); |
| 8271 | |
| 8272 | SDValue AllZeroes = |
| 8273 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0x0), DL: dl, VT: MVT::i32); |
| 8274 | AllZeroes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllZeroes); |
| 8275 | |
| 8276 | // Get full vector type from predicate type |
| 8277 | EVT NewVT = getVectorTyFromPredicateVector(VT); |
| 8278 | |
| 8279 | SDValue RecastV1; |
| 8280 | // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast |
| 8281 | // this to a v16i1. This cannot be done with an ordinary bitcast because the |
| 8282 | // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, |
| 8283 | // since we know in hardware the sizes are really the same. |
| 8284 | if (VT != MVT::v16i1) |
| 8285 | RecastV1 = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Pred); |
| 8286 | else |
| 8287 | RecastV1 = Pred; |
| 8288 | |
| 8289 | // Select either all ones or zeroes depending upon the real predicate bits. |
| 8290 | SDValue PredAsVector = |
| 8291 | DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: MVT::v16i8, N1: RecastV1, N2: AllOnes, N3: AllZeroes); |
| 8292 | |
| 8293 | // Recast our new predicate-as-integer v16i8 vector into something |
| 8294 | // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. |
| 8295 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: NewVT, Operand: PredAsVector); |
| 8296 | } |
| 8297 | |
| 8298 | static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, |
| 8299 | const ARMSubtarget *ST) { |
| 8300 | EVT VT = Op.getValueType(); |
| 8301 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
| 8302 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8303 | |
| 8304 | assert(ST->hasMVEIntegerOps() && |
| 8305 | "No support for vector shuffle of boolean predicates" ); |
| 8306 | |
| 8307 | SDValue V1 = Op.getOperand(i: 0); |
| 8308 | SDValue V2 = Op.getOperand(i: 1); |
| 8309 | SDLoc dl(Op); |
| 8310 | if (isReverseMask(M: ShuffleMask, VT)) { |
| 8311 | SDValue cast = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: V1); |
| 8312 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: cast); |
| 8313 | SDValue srl = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: rbit, |
| 8314 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
| 8315 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: srl); |
| 8316 | } |
| 8317 | |
| 8318 | // Until we can come up with optimised cases for every single vector |
| 8319 | // shuffle in existence we have chosen the least painful strategy. This is |
| 8320 | // to essentially promote the boolean predicate to a 8-bit integer, where |
| 8321 | // each predicate represents a byte. Then we fall back on a normal integer |
| 8322 | // vector shuffle and convert the result back into a predicate vector. In |
| 8323 | // many cases the generated code might be even better than scalar code |
| 8324 | // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit |
| 8325 | // fields in a register into 8 other arbitrary 2-bit fields! |
| 8326 | SDValue PredAsVector1 = PromoteMVEPredVector(dl, Pred: V1, VT, DAG); |
| 8327 | EVT NewVT = PredAsVector1.getValueType(); |
| 8328 | SDValue PredAsVector2 = V2.isUndef() ? DAG.getUNDEF(VT: NewVT) |
| 8329 | : PromoteMVEPredVector(dl, Pred: V2, VT, DAG); |
| 8330 | assert(PredAsVector2.getValueType() == NewVT && |
| 8331 | "Expected identical vector type in expanded i1 shuffle!" ); |
| 8332 | |
| 8333 | // Do the shuffle! |
| 8334 | SDValue Shuffled = DAG.getVectorShuffle(VT: NewVT, dl, N1: PredAsVector1, |
| 8335 | N2: PredAsVector2, Mask: ShuffleMask); |
| 8336 | |
| 8337 | // Now return the result of comparing the shuffled vector with zero, |
| 8338 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. For a v2i1 |
| 8339 | // we convert to a v4i1 compare to fill in the two halves of the i64 as i32s. |
| 8340 | if (VT == MVT::v2i1) { |
| 8341 | SDValue BC = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Shuffled); |
| 8342 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: BC, |
| 8343 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8344 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
| 8345 | } |
| 8346 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Shuffled, |
| 8347 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8348 | } |
| 8349 | |
| 8350 | static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, |
| 8351 | ArrayRef<int> ShuffleMask, |
| 8352 | SelectionDAG &DAG) { |
| 8353 | // Attempt to lower the vector shuffle using as many whole register movs as |
| 8354 | // possible. This is useful for types smaller than 32bits, which would |
| 8355 | // often otherwise become a series for grp movs. |
| 8356 | SDLoc dl(Op); |
| 8357 | EVT VT = Op.getValueType(); |
| 8358 | if (VT.getScalarSizeInBits() >= 32) |
| 8359 | return SDValue(); |
| 8360 | |
| 8361 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8362 | "Unexpected vector type" ); |
| 8363 | int NumElts = VT.getVectorNumElements(); |
| 8364 | int QuarterSize = NumElts / 4; |
| 8365 | // The four final parts of the vector, as i32's |
| 8366 | SDValue Parts[4]; |
| 8367 | |
| 8368 | // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not |
| 8369 | // <u,u,u,u>), returning the vmov lane index |
| 8370 | auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { |
| 8371 | // Detect which mov lane this would be from the first non-undef element. |
| 8372 | int MovIdx = -1; |
| 8373 | for (int i = 0; i < Length; i++) { |
| 8374 | if (ShuffleMask[Start + i] >= 0) { |
| 8375 | if (ShuffleMask[Start + i] % Length != i) |
| 8376 | return -1; |
| 8377 | MovIdx = ShuffleMask[Start + i] / Length; |
| 8378 | break; |
| 8379 | } |
| 8380 | } |
| 8381 | // If all items are undef, leave this for other combines |
| 8382 | if (MovIdx == -1) |
| 8383 | return -1; |
| 8384 | // Check the remaining values are the correct part of the same mov |
| 8385 | for (int i = 1; i < Length; i++) { |
| 8386 | if (ShuffleMask[Start + i] >= 0 && |
| 8387 | (ShuffleMask[Start + i] / Length != MovIdx || |
| 8388 | ShuffleMask[Start + i] % Length != i)) |
| 8389 | return -1; |
| 8390 | } |
| 8391 | return MovIdx; |
| 8392 | }; |
| 8393 | |
| 8394 | for (int Part = 0; Part < 4; ++Part) { |
| 8395 | // Does this part look like a mov |
| 8396 | int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); |
| 8397 | if (Elt != -1) { |
| 8398 | SDValue Input = Op->getOperand(Num: 0); |
| 8399 | if (Elt >= 4) { |
| 8400 | Input = Op->getOperand(Num: 1); |
| 8401 | Elt -= 4; |
| 8402 | } |
| 8403 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: Input); |
| 8404 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: BitCast, |
| 8405 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32)); |
| 8406 | } |
| 8407 | } |
| 8408 | |
| 8409 | // Nothing interesting found, just return |
| 8410 | if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) |
| 8411 | return SDValue(); |
| 8412 | |
| 8413 | // The other parts need to be built with the old shuffle vector, cast to a |
| 8414 | // v4i32 and extract_vector_elts |
| 8415 | if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { |
| 8416 | SmallVector<int, 16> NewShuffleMask; |
| 8417 | for (int Part = 0; Part < 4; ++Part) |
| 8418 | for (int i = 0; i < QuarterSize; i++) |
| 8419 | NewShuffleMask.push_back( |
| 8420 | Elt: Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); |
| 8421 | SDValue NewShuffle = DAG.getVectorShuffle( |
| 8422 | VT, dl, N1: Op->getOperand(Num: 0), N2: Op->getOperand(Num: 1), Mask: NewShuffleMask); |
| 8423 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: NewShuffle); |
| 8424 | |
| 8425 | for (int Part = 0; Part < 4; ++Part) |
| 8426 | if (!Parts[Part]) |
| 8427 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, |
| 8428 | N1: BitCast, N2: DAG.getConstant(Val: Part, DL: dl, VT: MVT::i32)); |
| 8429 | } |
| 8430 | // Build a vector out of the various parts and bitcast it back to the original |
| 8431 | // type. |
| 8432 | SDValue NewVec = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: MVT::v4f32, Ops: Parts); |
| 8433 | return DAG.getBitcast(VT, V: NewVec); |
| 8434 | } |
| 8435 | |
| 8436 | static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op, |
| 8437 | ArrayRef<int> ShuffleMask, |
| 8438 | SelectionDAG &DAG) { |
| 8439 | SDValue V1 = Op.getOperand(i: 0); |
| 8440 | SDValue V2 = Op.getOperand(i: 1); |
| 8441 | EVT VT = Op.getValueType(); |
| 8442 | unsigned NumElts = VT.getVectorNumElements(); |
| 8443 | |
| 8444 | // An One-Off Identity mask is one that is mostly an identity mask from as |
| 8445 | // single source but contains a single element out-of-place, either from a |
| 8446 | // different vector or from another position in the same vector. As opposed to |
| 8447 | // lowering this via a ARMISD::BUILD_VECTOR we can generate an extract/insert |
| 8448 | // pair directly. |
| 8449 | auto isOneOffIdentityMask = [](ArrayRef<int> Mask, EVT VT, int BaseOffset, |
| 8450 | int &OffElement) { |
| 8451 | OffElement = -1; |
| 8452 | int NonUndef = 0; |
| 8453 | for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { |
| 8454 | if (Mask[i] == -1) |
| 8455 | continue; |
| 8456 | NonUndef++; |
| 8457 | if (Mask[i] != i + BaseOffset) { |
| 8458 | if (OffElement == -1) |
| 8459 | OffElement = i; |
| 8460 | else |
| 8461 | return false; |
| 8462 | } |
| 8463 | } |
| 8464 | return NonUndef > 2 && OffElement != -1; |
| 8465 | }; |
| 8466 | int OffElement; |
| 8467 | SDValue VInput; |
| 8468 | if (isOneOffIdentityMask(ShuffleMask, VT, 0, OffElement)) |
| 8469 | VInput = V1; |
| 8470 | else if (isOneOffIdentityMask(ShuffleMask, VT, NumElts, OffElement)) |
| 8471 | VInput = V2; |
| 8472 | else |
| 8473 | return SDValue(); |
| 8474 | |
| 8475 | SDLoc dl(Op); |
| 8476 | EVT SVT = VT.getScalarType() == MVT::i8 || VT.getScalarType() == MVT::i16 |
| 8477 | ? MVT::i32 |
| 8478 | : VT.getScalarType(); |
| 8479 | SDValue Elt = DAG.getNode( |
| 8480 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: SVT, |
| 8481 | N1: ShuffleMask[OffElement] < (int)NumElts ? V1 : V2, |
| 8482 | N2: DAG.getVectorIdxConstant(Val: ShuffleMask[OffElement] % NumElts, DL: dl)); |
| 8483 | return DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: VInput, N2: Elt, |
| 8484 | N3: DAG.getVectorIdxConstant(Val: OffElement % NumElts, DL: dl)); |
| 8485 | } |
| 8486 | |
| 8487 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, |
| 8488 | const ARMSubtarget *ST) { |
| 8489 | SDValue V1 = Op.getOperand(i: 0); |
| 8490 | SDValue V2 = Op.getOperand(i: 1); |
| 8491 | SDLoc dl(Op); |
| 8492 | EVT VT = Op.getValueType(); |
| 8493 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
| 8494 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8495 | |
| 8496 | if (ST->hasMVEIntegerOps() && EltSize == 1) |
| 8497 | return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); |
| 8498 | |
| 8499 | // Convert shuffles that are directly supported on NEON to target-specific |
| 8500 | // DAG nodes, instead of keeping them as shuffles and matching them again |
| 8501 | // during code selection. This is more efficient and avoids the possibility |
| 8502 | // of inconsistencies between legalization and selection. |
| 8503 | // FIXME: floating-point vectors should be canonicalized to integer vectors |
| 8504 | // of the same time so that they get CSEd properly. |
| 8505 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8506 | |
| 8507 | if (EltSize <= 32) { |
| 8508 | if (SVN->isSplat()) { |
| 8509 | int Lane = SVN->getSplatIndex(); |
| 8510 | // If this is undef splat, generate it via "just" vdup, if possible. |
| 8511 | if (Lane == -1) Lane = 0; |
| 8512 | |
| 8513 | // Test if V1 is a SCALAR_TO_VECTOR. |
| 8514 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { |
| 8515 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
| 8516 | } |
| 8517 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR |
| 8518 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization |
| 8519 | // reaches it). |
| 8520 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && |
| 8521 | !isa<ConstantSDNode>(Val: V1.getOperand(i: 0))) { |
| 8522 | bool IsScalarToVector = true; |
| 8523 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) |
| 8524 | if (!V1.getOperand(i).isUndef()) { |
| 8525 | IsScalarToVector = false; |
| 8526 | break; |
| 8527 | } |
| 8528 | if (IsScalarToVector) |
| 8529 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
| 8530 | } |
| 8531 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, N1: V1, |
| 8532 | N2: DAG.getConstant(Val: Lane, DL: dl, VT: MVT::i32)); |
| 8533 | } |
| 8534 | |
| 8535 | bool ReverseVEXT = false; |
| 8536 | unsigned Imm = 0; |
| 8537 | if (ST->hasNEON() && isVEXTMask(M: ShuffleMask, VT, ReverseVEXT, Imm)) { |
| 8538 | if (ReverseVEXT) |
| 8539 | std::swap(a&: V1, b&: V2); |
| 8540 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V2, |
| 8541 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
| 8542 | } |
| 8543 | |
| 8544 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 64)) |
| 8545 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: V1); |
| 8546 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 32)) |
| 8547 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: V1); |
| 8548 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 16)) |
| 8549 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: V1); |
| 8550 | |
| 8551 | if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(M: ShuffleMask, VT, Imm)) { |
| 8552 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V1, |
| 8553 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
| 8554 | } |
| 8555 | |
| 8556 | // Check for Neon shuffles that modify both input vectors in place. |
| 8557 | // If both results are used, i.e., if there are two shuffles with the same |
| 8558 | // source operands and with masks corresponding to both results of one of |
| 8559 | // these operations, DAG memoization will ensure that a single node is |
| 8560 | // used for both shuffles. |
| 8561 | unsigned WhichResult = 0; |
| 8562 | bool isV_UNDEF = false; |
| 8563 | if (ST->hasNEON()) { |
| 8564 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8565 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { |
| 8566 | if (isV_UNDEF) |
| 8567 | V2 = V1; |
| 8568 | return DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), N1: V1, N2: V2) |
| 8569 | .getValue(R: WhichResult); |
| 8570 | } |
| 8571 | } |
| 8572 | if (ST->hasMVEIntegerOps()) { |
| 8573 | if (isVMOVNMask(M: ShuffleMask, VT, Top: false, SingleSource: false)) |
| 8574 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V2, N2: V1, |
| 8575 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 8576 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: false)) |
| 8577 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V2, |
| 8578 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 8579 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: true)) |
| 8580 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V1, |
| 8581 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 8582 | } |
| 8583 | |
| 8584 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize |
| 8585 | // shuffles that produce a result larger than their operands with: |
| 8586 | // shuffle(concat(v1, undef), concat(v2, undef)) |
| 8587 | // -> |
| 8588 | // shuffle(concat(v1, v2), undef) |
| 8589 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). |
| 8590 | // |
| 8591 | // This is useful in the general case, but there are special cases where |
| 8592 | // native shuffles produce larger results: the two-result ops. |
| 8593 | // |
| 8594 | // Look through the concat when lowering them: |
| 8595 | // shuffle(concat(v1, v2), undef) |
| 8596 | // -> |
| 8597 | // concat(VZIP(v1, v2):0, :1) |
| 8598 | // |
| 8599 | if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { |
| 8600 | SDValue SubV1 = V1->getOperand(Num: 0); |
| 8601 | SDValue SubV2 = V1->getOperand(Num: 1); |
| 8602 | EVT SubVT = SubV1.getValueType(); |
| 8603 | |
| 8604 | // We expect these to have been canonicalized to -1. |
| 8605 | assert(llvm::all_of(ShuffleMask, [&](int i) { |
| 8606 | return i < (int)VT.getVectorNumElements(); |
| 8607 | }) && "Unexpected shuffle index into UNDEF operand!" ); |
| 8608 | |
| 8609 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8610 | ShuffleMask, VT: SubVT, WhichResult, isV_UNDEF)) { |
| 8611 | if (isV_UNDEF) |
| 8612 | SubV2 = SubV1; |
| 8613 | assert((WhichResult == 0) && |
| 8614 | "In-place shuffle of concat can only have one result!" ); |
| 8615 | SDValue Res = DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: SubVT, VT2: SubVT), |
| 8616 | N1: SubV1, N2: SubV2); |
| 8617 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Res.getValue(R: 0), |
| 8618 | N2: Res.getValue(R: 1)); |
| 8619 | } |
| 8620 | } |
| 8621 | } |
| 8622 | |
| 8623 | if (ST->hasMVEIntegerOps() && EltSize <= 32) { |
| 8624 | if (SDValue V = LowerVECTOR_SHUFFLEUsingOneOff(Op, ShuffleMask, DAG)) |
| 8625 | return V; |
| 8626 | |
| 8627 | for (bool Top : {false, true}) { |
| 8628 | for (bool SingleSource : {false, true}) { |
| 8629 | if (isTruncMask(M: ShuffleMask, VT, Top, SingleSource)) { |
| 8630 | MVT FromSVT = MVT::getIntegerVT(BitWidth: EltSize * 2); |
| 8631 | MVT FromVT = MVT::getVectorVT(VT: FromSVT, NumElements: ShuffleMask.size() / 2); |
| 8632 | SDValue Lo = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, Operand: V1); |
| 8633 | SDValue Hi = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, |
| 8634 | Operand: SingleSource ? V1 : V2); |
| 8635 | if (Top) { |
| 8636 | SDValue Amt = DAG.getConstant(Val: EltSize, DL: dl, VT: FromVT); |
| 8637 | Lo = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Lo, N2: Amt); |
| 8638 | Hi = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Hi, N2: Amt); |
| 8639 | } |
| 8640 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT, N1: Lo, N2: Hi); |
| 8641 | } |
| 8642 | } |
| 8643 | } |
| 8644 | } |
| 8645 | |
| 8646 | // If the shuffle is not directly supported and it has 4 elements, use |
| 8647 | // the PerfectShuffle-generated table to synthesize it from other shuffles. |
| 8648 | unsigned NumElts = VT.getVectorNumElements(); |
| 8649 | if (NumElts == 4) { |
| 8650 | unsigned PFIndexes[4]; |
| 8651 | for (unsigned i = 0; i != 4; ++i) { |
| 8652 | if (ShuffleMask[i] < 0) |
| 8653 | PFIndexes[i] = 8; |
| 8654 | else |
| 8655 | PFIndexes[i] = ShuffleMask[i]; |
| 8656 | } |
| 8657 | |
| 8658 | // Compute the index in the perfect shuffle table. |
| 8659 | unsigned PFTableIndex = |
| 8660 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 8661 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 8662 | unsigned Cost = (PFEntry >> 30); |
| 8663 | |
| 8664 | if (Cost <= 4) { |
| 8665 | if (ST->hasNEON()) |
| 8666 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
| 8667 | else if (isLegalMVEShuffleOp(PFEntry)) { |
| 8668 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 8669 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 8670 | unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; |
| 8671 | unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; |
| 8672 | if (isLegalMVEShuffleOp(PFEntry: PFEntryLHS) && isLegalMVEShuffleOp(PFEntry: PFEntryRHS)) |
| 8673 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
| 8674 | } |
| 8675 | } |
| 8676 | } |
| 8677 | |
| 8678 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. |
| 8679 | if (EltSize >= 32) { |
| 8680 | // Do the expansion with floating-point types, since that is what the VFP |
| 8681 | // registers are defined to use, and since i64 is not legal. |
| 8682 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
| 8683 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
| 8684 | V1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V1); |
| 8685 | V2 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V2); |
| 8686 | SmallVector<SDValue, 8> Ops; |
| 8687 | for (unsigned i = 0; i < NumElts; ++i) { |
| 8688 | if (ShuffleMask[i] < 0) |
| 8689 | Ops.push_back(Elt: DAG.getUNDEF(VT: EltVT)); |
| 8690 | else |
| 8691 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, |
| 8692 | N1: ShuffleMask[i] < (int)NumElts ? V1 : V2, |
| 8693 | N2: DAG.getConstant(Val: ShuffleMask[i] & (NumElts-1), |
| 8694 | DL: dl, VT: MVT::i32))); |
| 8695 | } |
| 8696 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
| 8697 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 8698 | } |
| 8699 | |
| 8700 | if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8701 | isReverseMask(M: ShuffleMask, VT)) |
| 8702 | return LowerReverse_VECTOR_SHUFFLE(Op, DAG); |
| 8703 | |
| 8704 | if (ST->hasNEON() && VT == MVT::v8i8) |
| 8705 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) |
| 8706 | return NewOp; |
| 8707 | |
| 8708 | if (ST->hasMVEIntegerOps()) |
| 8709 | if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) |
| 8710 | return NewOp; |
| 8711 | |
| 8712 | return SDValue(); |
| 8713 | } |
| 8714 | |
| 8715 | static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, |
| 8716 | const ARMSubtarget *ST) { |
| 8717 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
| 8718 | SDLoc dl(Op); |
| 8719 | |
| 8720 | assert(ST->hasMVEIntegerOps() && |
| 8721 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 8722 | |
| 8723 | SDValue Conv = |
| 8724 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
| 8725 | unsigned Lane = Op.getConstantOperandVal(i: 2); |
| 8726 | unsigned LaneWidth = |
| 8727 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
| 8728 | unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; |
| 8729 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, |
| 8730 | N1: Op.getOperand(i: 1), N2: DAG.getValueType(MVT::i1)); |
| 8731 | SDValue BFI = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT: MVT::i32, N1: Conv, N2: Ext, |
| 8732 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT: MVT::i32)); |
| 8733 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: BFI); |
| 8734 | } |
| 8735 | |
| 8736 | SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, |
| 8737 | SelectionDAG &DAG) const { |
| 8738 | // INSERT_VECTOR_ELT is legal only for immediate indexes. |
| 8739 | SDValue Lane = Op.getOperand(i: 2); |
| 8740 | if (!isa<ConstantSDNode>(Val: Lane)) |
| 8741 | return SDValue(); |
| 8742 | |
| 8743 | SDValue Elt = Op.getOperand(i: 1); |
| 8744 | EVT EltVT = Elt.getValueType(); |
| 8745 | |
| 8746 | if (Subtarget->hasMVEIntegerOps() && |
| 8747 | Op.getValueType().getScalarSizeInBits() == 1) |
| 8748 | return LowerINSERT_VECTOR_ELT_i1(Op, DAG, ST: Subtarget); |
| 8749 | |
| 8750 | if (getTypeAction(Context&: *DAG.getContext(), VT: EltVT) == |
| 8751 | TargetLowering::TypeSoftPromoteHalf) { |
| 8752 | // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, |
| 8753 | // but the type system will try to do that if we don't intervene. |
| 8754 | // Reinterpret any such vector-element insertion as one with the |
| 8755 | // corresponding integer types. |
| 8756 | |
| 8757 | SDLoc dl(Op); |
| 8758 | |
| 8759 | EVT IEltVT = MVT::getIntegerVT(BitWidth: EltVT.getScalarSizeInBits()); |
| 8760 | assert(getTypeAction(*DAG.getContext(), IEltVT) != |
| 8761 | TargetLowering::TypeSoftPromoteHalf); |
| 8762 | |
| 8763 | SDValue VecIn = Op.getOperand(i: 0); |
| 8764 | EVT VecVT = VecIn.getValueType(); |
| 8765 | EVT IVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IEltVT, |
| 8766 | NumElements: VecVT.getVectorNumElements()); |
| 8767 | |
| 8768 | SDValue IElt = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IEltVT, Operand: Elt); |
| 8769 | SDValue IVecIn = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVecVT, Operand: VecIn); |
| 8770 | SDValue IVecOut = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: IVecVT, |
| 8771 | N1: IVecIn, N2: IElt, N3: Lane); |
| 8772 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: IVecOut); |
| 8773 | } |
| 8774 | |
| 8775 | return Op; |
| 8776 | } |
| 8777 | |
| 8778 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8779 | const ARMSubtarget *ST) { |
| 8780 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
| 8781 | SDLoc dl(Op); |
| 8782 | |
| 8783 | assert(ST->hasMVEIntegerOps() && |
| 8784 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 8785 | |
| 8786 | SDValue Conv = |
| 8787 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
| 8788 | unsigned Lane = Op.getConstantOperandVal(i: 1); |
| 8789 | unsigned LaneWidth = |
| 8790 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
| 8791 | SDValue Shift = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: Conv, |
| 8792 | N2: DAG.getConstant(Val: Lane * LaneWidth, DL: dl, VT: MVT::i32)); |
| 8793 | return Shift; |
| 8794 | } |
| 8795 | |
| 8796 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8797 | const ARMSubtarget *ST) { |
| 8798 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. |
| 8799 | SDValue Lane = Op.getOperand(i: 1); |
| 8800 | if (!isa<ConstantSDNode>(Val: Lane)) |
| 8801 | return SDValue(); |
| 8802 | |
| 8803 | SDValue Vec = Op.getOperand(i: 0); |
| 8804 | EVT VT = Vec.getValueType(); |
| 8805 | |
| 8806 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 8807 | return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); |
| 8808 | |
| 8809 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { |
| 8810 | SDLoc dl(Op); |
| 8811 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: dl, VT: MVT::i32, N1: Vec, N2: Lane); |
| 8812 | } |
| 8813 | |
| 8814 | return Op; |
| 8815 | } |
| 8816 | |
| 8817 | static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, |
| 8818 | const ARMSubtarget *ST) { |
| 8819 | SDLoc dl(Op); |
| 8820 | assert(Op.getValueType().getScalarSizeInBits() == 1 && |
| 8821 | "Unexpected custom CONCAT_VECTORS lowering" ); |
| 8822 | assert(isPowerOf2_32(Op.getNumOperands()) && |
| 8823 | "Unexpected custom CONCAT_VECTORS lowering" ); |
| 8824 | assert(ST->hasMVEIntegerOps() && |
| 8825 | "CONCAT_VECTORS lowering only supported for MVE" ); |
| 8826 | |
| 8827 | auto ConcatPair = [&](SDValue V1, SDValue V2) { |
| 8828 | EVT Op1VT = V1.getValueType(); |
| 8829 | EVT Op2VT = V2.getValueType(); |
| 8830 | assert(Op1VT == Op2VT && "Operand types don't match!" ); |
| 8831 | assert((Op1VT == MVT::v2i1 || Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) && |
| 8832 | "Unexpected i1 concat operations!" ); |
| 8833 | EVT VT = Op1VT.getDoubleNumVectorElementsVT(Context&: *DAG.getContext()); |
| 8834 | |
| 8835 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
| 8836 | SDValue NewV2 = PromoteMVEPredVector(dl, Pred: V2, VT: Op2VT, DAG); |
| 8837 | |
| 8838 | // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets |
| 8839 | // promoted to v8i16, etc. |
| 8840 | MVT ElType = |
| 8841 | getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 8842 | unsigned NumElts = 2 * Op1VT.getVectorNumElements(); |
| 8843 | |
| 8844 | EVT ConcatVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
| 8845 | if (Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) { |
| 8846 | // Use MVETRUNC to truncate the combined NewV1::NewV2 into the smaller |
| 8847 | // ConcatVT. |
| 8848 | SDValue ConVec = |
| 8849 | DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT: ConcatVT, N1: NewV1, N2: NewV2); |
| 8850 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
| 8851 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8852 | } |
| 8853 | |
| 8854 | // Extract the vector elements from Op1 and Op2 one by one and truncate them |
| 8855 | // to be the right size for the destination. For example, if Op1 is v4i1 |
| 8856 | // then the promoted vector is v4i32. The result of concatenation gives a |
| 8857 | // v8i1, which when promoted is v8i16. That means each i32 element from Op1 |
| 8858 | // needs truncating to i16 and inserting in the result. |
| 8859 | auto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { |
| 8860 | EVT NewVT = NewV.getValueType(); |
| 8861 | EVT ConcatVT = ConVec.getValueType(); |
| 8862 | unsigned ExtScale = 1; |
| 8863 | if (NewVT == MVT::v2f64) { |
| 8864 | NewV = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: NewV); |
| 8865 | ExtScale = 2; |
| 8866 | } |
| 8867 | for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { |
| 8868 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV, |
| 8869 | N2: DAG.getIntPtrConstant(Val: i * ExtScale, DL: dl)); |
| 8870 | ConVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: ConcatVT, N1: ConVec, N2: Elt, |
| 8871 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 8872 | } |
| 8873 | return ConVec; |
| 8874 | }; |
| 8875 | unsigned j = 0; |
| 8876 | SDValue ConVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: ConcatVT); |
| 8877 | ConVec = ExtractInto(NewV1, ConVec, j); |
| 8878 | ConVec = ExtractInto(NewV2, ConVec, j); |
| 8879 | |
| 8880 | // Now return the result of comparing the subvector with zero, which will |
| 8881 | // generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 8882 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
| 8883 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8884 | }; |
| 8885 | |
| 8886 | // Concat each pair of subvectors and pack into the lower half of the array. |
| 8887 | SmallVector<SDValue> ConcatOps(Op->ops()); |
| 8888 | while (ConcatOps.size() > 1) { |
| 8889 | for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) { |
| 8890 | SDValue V1 = ConcatOps[I]; |
| 8891 | SDValue V2 = ConcatOps[I + 1]; |
| 8892 | ConcatOps[I / 2] = ConcatPair(V1, V2); |
| 8893 | } |
| 8894 | ConcatOps.resize(N: ConcatOps.size() / 2); |
| 8895 | } |
| 8896 | return ConcatOps[0]; |
| 8897 | } |
| 8898 | |
| 8899 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, |
| 8900 | const ARMSubtarget *ST) { |
| 8901 | EVT VT = Op->getValueType(ResNo: 0); |
| 8902 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 8903 | return LowerCONCAT_VECTORS_i1(Op, DAG, ST); |
| 8904 | |
| 8905 | // The only time a CONCAT_VECTORS operation can have legal types is when |
| 8906 | // two 64-bit vectors are concatenated to a 128-bit vector. |
| 8907 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && |
| 8908 | "unexpected CONCAT_VECTORS" ); |
| 8909 | SDLoc dl(Op); |
| 8910 | SDValue Val = DAG.getUNDEF(VT: MVT::v2f64); |
| 8911 | SDValue Op0 = Op.getOperand(i: 0); |
| 8912 | SDValue Op1 = Op.getOperand(i: 1); |
| 8913 | if (!Op0.isUndef()) |
| 8914 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
| 8915 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op0), |
| 8916 | N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 8917 | if (!Op1.isUndef()) |
| 8918 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
| 8919 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op1), |
| 8920 | N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
| 8921 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: Op.getValueType(), Operand: Val); |
| 8922 | } |
| 8923 | |
| 8924 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8925 | const ARMSubtarget *ST) { |
| 8926 | SDValue V1 = Op.getOperand(i: 0); |
| 8927 | SDValue V2 = Op.getOperand(i: 1); |
| 8928 | SDLoc dl(Op); |
| 8929 | EVT VT = Op.getValueType(); |
| 8930 | EVT Op1VT = V1.getValueType(); |
| 8931 | unsigned NumElts = VT.getVectorNumElements(); |
| 8932 | unsigned Index = V2->getAsZExtVal(); |
| 8933 | |
| 8934 | assert(VT.getScalarSizeInBits() == 1 && |
| 8935 | "Unexpected custom EXTRACT_SUBVECTOR lowering" ); |
| 8936 | assert(ST->hasMVEIntegerOps() && |
| 8937 | "EXTRACT_SUBVECTOR lowering only supported for MVE" ); |
| 8938 | |
| 8939 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
| 8940 | |
| 8941 | // We now have Op1 promoted to a vector of integers, where v8i1 gets |
| 8942 | // promoted to v8i16, etc. |
| 8943 | |
| 8944 | MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 8945 | |
| 8946 | if (NumElts == 2) { |
| 8947 | EVT SubVT = MVT::v4i32; |
| 8948 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
| 8949 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j += 2) { |
| 8950 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
| 8951 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
| 8952 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 8953 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 8954 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 8955 | N3: DAG.getConstant(Val: j + 1, DL: dl, VT: MVT::i32)); |
| 8956 | } |
| 8957 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: SubVec, |
| 8958 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8959 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
| 8960 | } |
| 8961 | |
| 8962 | EVT SubVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
| 8963 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
| 8964 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { |
| 8965 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
| 8966 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
| 8967 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 8968 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 8969 | } |
| 8970 | |
| 8971 | // Now return the result of comparing the subvector with zero, |
| 8972 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 8973 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: SubVec, |
| 8974 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8975 | } |
| 8976 | |
| 8977 | // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0). |
| 8978 | static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG, |
| 8979 | const ARMSubtarget *ST) { |
| 8980 | assert(ST->hasMVEIntegerOps() && "Expected MVE!" ); |
| 8981 | EVT VT = N->getValueType(ResNo: 0); |
| 8982 | assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && |
| 8983 | "Expected a vector i1 type!" ); |
| 8984 | SDValue Op = N->getOperand(Num: 0); |
| 8985 | EVT FromVT = Op.getValueType(); |
| 8986 | SDLoc DL(N); |
| 8987 | |
| 8988 | SDValue And = |
| 8989 | DAG.getNode(Opcode: ISD::AND, DL, VT: FromVT, N1: Op, N2: DAG.getConstant(Val: 1, DL, VT: FromVT)); |
| 8990 | return DAG.getNode(Opcode: ISD::SETCC, DL, VT, N1: And, N2: DAG.getConstant(Val: 0, DL, VT: FromVT), |
| 8991 | N3: DAG.getCondCode(Cond: ISD::SETNE)); |
| 8992 | } |
| 8993 | |
| 8994 | static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG, |
| 8995 | const ARMSubtarget *Subtarget) { |
| 8996 | if (!Subtarget->hasMVEIntegerOps()) |
| 8997 | return SDValue(); |
| 8998 | |
| 8999 | EVT ToVT = N->getValueType(ResNo: 0); |
| 9000 | if (ToVT.getScalarType() == MVT::i1) |
| 9001 | return LowerTruncatei1(N, DAG, ST: Subtarget); |
| 9002 | |
| 9003 | // MVE does not have a single instruction to perform the truncation of a v4i32 |
| 9004 | // into the lower half of a v8i16, in the same way that a NEON vmovn would. |
| 9005 | // Most of the instructions in MVE follow the 'Beats' system, where moving |
| 9006 | // values from different lanes is usually something that the instructions |
| 9007 | // avoid. |
| 9008 | // |
| 9009 | // Instead it has top/bottom instructions such as VMOVLT/B and VMOVNT/B, |
| 9010 | // which take a the top/bottom half of a larger lane and extend it (or do the |
| 9011 | // opposite, truncating into the top/bottom lane from a larger lane). Note |
| 9012 | // that because of the way we widen lanes, a v4i16 is really a v4i32 using the |
| 9013 | // bottom 16bits from each vector lane. This works really well with T/B |
| 9014 | // instructions, but that doesn't extend to v8i32->v8i16 where the lanes need |
| 9015 | // to move order. |
| 9016 | // |
| 9017 | // But truncates and sext/zext are always going to be fairly common from llvm. |
| 9018 | // We have several options for how to deal with them: |
| 9019 | // - Wherever possible combine them into an instruction that makes them |
| 9020 | // "free". This includes loads/stores, which can perform the trunc as part |
| 9021 | // of the memory operation. Or certain shuffles that can be turned into |
| 9022 | // VMOVN/VMOVL. |
| 9023 | // - Lane Interleaving to transform blocks surrounded by ext/trunc. So |
| 9024 | // trunc(mul(sext(a), sext(b))) may become |
| 9025 | // VMOVNT(VMUL(VMOVLB(a), VMOVLB(b)), VMUL(VMOVLT(a), VMOVLT(b))). (Which in |
| 9026 | // this case can use VMULL). This is performed in the |
| 9027 | // MVELaneInterleavingPass. |
| 9028 | // - Otherwise we have an option. By default we would expand the |
| 9029 | // zext/sext/trunc into a series of lane extract/inserts going via GPR |
| 9030 | // registers. One for each vector lane in the vector. This can obviously be |
| 9031 | // very expensive. |
| 9032 | // - The other option is to use the fact that loads/store can extend/truncate |
| 9033 | // to turn a trunc into two truncating stack stores and a stack reload. This |
| 9034 | // becomes 3 back-to-back memory operations, but at least that is less than |
| 9035 | // all the insert/extracts. |
| 9036 | // |
| 9037 | // In order to do the last, we convert certain trunc's into MVETRUNC, which |
| 9038 | // are either optimized where they can be, or eventually lowered into stack |
| 9039 | // stores/loads. This prevents us from splitting a v8i16 trunc into two stores |
| 9040 | // two early, where other instructions would be better, and stops us from |
| 9041 | // having to reconstruct multiple buildvector shuffles into loads/stores. |
| 9042 | if (ToVT != MVT::v8i16 && ToVT != MVT::v16i8) |
| 9043 | return SDValue(); |
| 9044 | EVT FromVT = N->getOperand(Num: 0).getValueType(); |
| 9045 | if (FromVT != MVT::v8i32 && FromVT != MVT::v16i16) |
| 9046 | return SDValue(); |
| 9047 | |
| 9048 | SDValue Lo, Hi; |
| 9049 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVectorOperand(N, OpNo: 0); |
| 9050 | SDLoc DL(N); |
| 9051 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT: ToVT, N1: Lo, N2: Hi); |
| 9052 | } |
| 9053 | |
| 9054 | static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG, |
| 9055 | const ARMSubtarget *Subtarget) { |
| 9056 | if (!Subtarget->hasMVEIntegerOps()) |
| 9057 | return SDValue(); |
| 9058 | |
| 9059 | // See LowerTruncate above for an explanation of MVEEXT/MVETRUNC. |
| 9060 | |
| 9061 | EVT ToVT = N->getValueType(ResNo: 0); |
| 9062 | if (ToVT != MVT::v16i32 && ToVT != MVT::v8i32 && ToVT != MVT::v16i16) |
| 9063 | return SDValue(); |
| 9064 | SDValue Op = N->getOperand(Num: 0); |
| 9065 | EVT FromVT = Op.getValueType(); |
| 9066 | if (FromVT != MVT::v8i16 && FromVT != MVT::v16i8) |
| 9067 | return SDValue(); |
| 9068 | |
| 9069 | SDLoc DL(N); |
| 9070 | EVT ExtVT = ToVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 9071 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) |
| 9072 | ExtVT = MVT::v8i16; |
| 9073 | |
| 9074 | unsigned Opcode = |
| 9075 | N->getOpcode() == ISD::SIGN_EXTEND ? ARMISD::MVESEXT : ARMISD::MVEZEXT; |
| 9076 | SDValue Ext = DAG.getNode(Opcode, DL, VTList: DAG.getVTList(VT1: ExtVT, VT2: ExtVT), N: Op); |
| 9077 | SDValue Ext1 = Ext.getValue(R: 1); |
| 9078 | |
| 9079 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) { |
| 9080 | Ext = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext); |
| 9081 | Ext1 = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext1); |
| 9082 | } |
| 9083 | |
| 9084 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, N1: Ext, N2: Ext1); |
| 9085 | } |
| 9086 | |
| 9087 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each |
| 9088 | /// element has been zero/sign-extended, depending on the isSigned parameter, |
| 9089 | /// from an integer type half its size. |
| 9090 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, |
| 9091 | bool isSigned) { |
| 9092 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. |
| 9093 | EVT VT = N->getValueType(ResNo: 0); |
| 9094 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { |
| 9095 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
| 9096 | if (BVN->getValueType(ResNo: 0) != MVT::v4i32 || |
| 9097 | BVN->getOpcode() != ISD::BUILD_VECTOR) |
| 9098 | return false; |
| 9099 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 9100 | unsigned HiElt = 1 - LoElt; |
| 9101 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt)); |
| 9102 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt)); |
| 9103 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt+2)); |
| 9104 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt+2)); |
| 9105 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) |
| 9106 | return false; |
| 9107 | if (isSigned) { |
| 9108 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && |
| 9109 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) |
| 9110 | return true; |
| 9111 | } else { |
| 9112 | if (Hi0->isZero() && Hi1->isZero()) |
| 9113 | return true; |
| 9114 | } |
| 9115 | return false; |
| 9116 | } |
| 9117 | |
| 9118 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
| 9119 | return false; |
| 9120 | |
| 9121 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 9122 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
| 9123 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Elt)) { |
| 9124 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 9125 | unsigned HalfSize = EltSize / 2; |
| 9126 | if (isSigned) { |
| 9127 | if (!isIntN(N: HalfSize, x: C->getSExtValue())) |
| 9128 | return false; |
| 9129 | } else { |
| 9130 | if (!isUIntN(N: HalfSize, x: C->getZExtValue())) |
| 9131 | return false; |
| 9132 | } |
| 9133 | continue; |
| 9134 | } |
| 9135 | return false; |
| 9136 | } |
| 9137 | |
| 9138 | return true; |
| 9139 | } |
| 9140 | |
| 9141 | /// isSignExtended - Check if a node is a vector value that is sign-extended |
| 9142 | /// or a constant BUILD_VECTOR with sign-extended elements. |
| 9143 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { |
| 9144 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) |
| 9145 | return true; |
| 9146 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: true)) |
| 9147 | return true; |
| 9148 | return false; |
| 9149 | } |
| 9150 | |
| 9151 | /// isZeroExtended - Check if a node is a vector value that is zero-extended (or |
| 9152 | /// any-extended) or a constant BUILD_VECTOR with zero-extended elements. |
| 9153 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { |
| 9154 | if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || |
| 9155 | ISD::isZEXTLoad(N)) |
| 9156 | return true; |
| 9157 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: false)) |
| 9158 | return true; |
| 9159 | return false; |
| 9160 | } |
| 9161 | |
| 9162 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { |
| 9163 | if (OrigVT.getSizeInBits() >= 64) |
| 9164 | return OrigVT; |
| 9165 | |
| 9166 | assert(OrigVT.isSimple() && "Expecting a simple value type" ); |
| 9167 | |
| 9168 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; |
| 9169 | switch (OrigSimpleTy) { |
| 9170 | default: llvm_unreachable("Unexpected Vector Type" ); |
| 9171 | case MVT::v2i8: |
| 9172 | case MVT::v2i16: |
| 9173 | return MVT::v2i32; |
| 9174 | case MVT::v4i8: |
| 9175 | return MVT::v4i16; |
| 9176 | } |
| 9177 | } |
| 9178 | |
| 9179 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total |
| 9180 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. |
| 9181 | /// We insert the required extension here to get the vector to fill a D register. |
| 9182 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, |
| 9183 | const EVT &OrigTy, |
| 9184 | const EVT &ExtTy, |
| 9185 | unsigned ExtOpcode) { |
| 9186 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. |
| 9187 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than |
| 9188 | // 64-bits we need to insert a new extension so that it will be 64-bits. |
| 9189 | assert(ExtTy.is128BitVector() && "Unexpected extension size" ); |
| 9190 | if (OrigTy.getSizeInBits() >= 64) |
| 9191 | return N; |
| 9192 | |
| 9193 | // Must extend size to at least 64 bits to be used as an operand for VMULL. |
| 9194 | EVT NewVT = getExtensionTo64Bits(OrigVT: OrigTy); |
| 9195 | |
| 9196 | return DAG.getNode(Opcode: ExtOpcode, DL: SDLoc(N), VT: NewVT, Operand: N); |
| 9197 | } |
| 9198 | |
| 9199 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that |
| 9200 | /// does not do any sign/zero extension. If the original vector is less |
| 9201 | /// than 64 bits, an appropriate extension will be added after the load to |
| 9202 | /// reach a total size of 64 bits. We have to add the extension separately |
| 9203 | /// because ARM does not have a sign/zero extending load for vectors. |
| 9204 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { |
| 9205 | EVT ExtendedTy = getExtensionTo64Bits(OrigVT: LD->getMemoryVT()); |
| 9206 | |
| 9207 | // The load already has the right type. |
| 9208 | if (ExtendedTy == LD->getMemoryVT()) |
| 9209 | return DAG.getLoad(VT: LD->getMemoryVT(), dl: SDLoc(LD), Chain: LD->getChain(), |
| 9210 | Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), Alignment: LD->getAlign(), |
| 9211 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 9212 | |
| 9213 | // We need to create a zextload/sextload. We cannot just create a load |
| 9214 | // followed by a zext/zext node because LowerMUL is also run during normal |
| 9215 | // operation legalization where we can't create illegal types. |
| 9216 | return DAG.getExtLoad(ExtType: LD->getExtensionType(), dl: SDLoc(LD), VT: ExtendedTy, |
| 9217 | Chain: LD->getChain(), Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), |
| 9218 | MemVT: LD->getMemoryVT(), Alignment: LD->getAlign(), |
| 9219 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 9220 | } |
| 9221 | |
| 9222 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, |
| 9223 | /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return |
| 9224 | /// the unextended value. The unextended vector should be 64 bits so that it can |
| 9225 | /// be used as an operand to a VMULL instruction. If the original vector size |
| 9226 | /// before extension is less than 64 bits we add a an extension to resize |
| 9227 | /// the vector to 64 bits. |
| 9228 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { |
| 9229 | if (N->getOpcode() == ISD::SIGN_EXTEND || |
| 9230 | N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) |
| 9231 | return AddRequiredExtensionForVMULL(N: N->getOperand(Num: 0), DAG, |
| 9232 | OrigTy: N->getOperand(Num: 0)->getValueType(ResNo: 0), |
| 9233 | ExtTy: N->getValueType(ResNo: 0), |
| 9234 | ExtOpcode: N->getOpcode()); |
| 9235 | |
| 9236 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 9237 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && |
| 9238 | "Expected extending load" ); |
| 9239 | |
| 9240 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); |
| 9241 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: newLoad.getValue(R: 1)); |
| 9242 | unsigned Opcode = ISD::isSEXTLoad(N: LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| 9243 | SDValue extLoad = |
| 9244 | DAG.getNode(Opcode, DL: SDLoc(newLoad), VT: LD->getValueType(ResNo: 0), Operand: newLoad); |
| 9245 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 0), To: extLoad); |
| 9246 | |
| 9247 | return newLoad; |
| 9248 | } |
| 9249 | |
| 9250 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will |
| 9251 | // have been legalized as a BITCAST from v4i32. |
| 9252 | if (N->getOpcode() == ISD::BITCAST) { |
| 9253 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
| 9254 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR && |
| 9255 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ); |
| 9256 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 9257 | return DAG.getBuildVector( |
| 9258 | VT: MVT::v2i32, DL: SDLoc(N), |
| 9259 | Ops: {BVN->getOperand(Num: LowElt), BVN->getOperand(Num: LowElt + 2)}); |
| 9260 | } |
| 9261 | // Construct a new BUILD_VECTOR with elements truncated to half the size. |
| 9262 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR" ); |
| 9263 | EVT VT = N->getValueType(ResNo: 0); |
| 9264 | unsigned EltSize = VT.getScalarSizeInBits() / 2; |
| 9265 | unsigned NumElts = VT.getVectorNumElements(); |
| 9266 | MVT TruncVT = MVT::getIntegerVT(BitWidth: EltSize); |
| 9267 | SmallVector<SDValue, 8> Ops; |
| 9268 | SDLoc dl(N); |
| 9269 | for (unsigned i = 0; i != NumElts; ++i) { |
| 9270 | const APInt &CInt = N->getConstantOperandAPInt(Num: i); |
| 9271 | // Element types smaller than 32 bits are not legal, so use i32 elements. |
| 9272 | // The values are implicitly truncated so sext vs. zext doesn't matter. |
| 9273 | Ops.push_back(Elt: DAG.getConstant(Val: CInt.zextOrTrunc(width: 32), DL: dl, VT: MVT::i32)); |
| 9274 | } |
| 9275 | return DAG.getBuildVector(VT: MVT::getVectorVT(VT: TruncVT, NumElements: NumElts), DL: dl, Ops); |
| 9276 | } |
| 9277 | |
| 9278 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { |
| 9279 | unsigned Opcode = N->getOpcode(); |
| 9280 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 9281 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 9282 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
| 9283 | return N0->hasOneUse() && N1->hasOneUse() && |
| 9284 | isSignExtended(N: N0, DAG) && isSignExtended(N: N1, DAG); |
| 9285 | } |
| 9286 | return false; |
| 9287 | } |
| 9288 | |
| 9289 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { |
| 9290 | unsigned Opcode = N->getOpcode(); |
| 9291 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 9292 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 9293 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
| 9294 | return N0->hasOneUse() && N1->hasOneUse() && |
| 9295 | isZeroExtended(N: N0, DAG) && isZeroExtended(N: N1, DAG); |
| 9296 | } |
| 9297 | return false; |
| 9298 | } |
| 9299 | |
| 9300 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { |
| 9301 | // Multiplications are only custom-lowered for 128-bit vectors so that |
| 9302 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. |
| 9303 | EVT VT = Op.getValueType(); |
| 9304 | assert(VT.is128BitVector() && VT.isInteger() && |
| 9305 | "unexpected type for custom-lowering ISD::MUL" ); |
| 9306 | SDNode *N0 = Op.getOperand(i: 0).getNode(); |
| 9307 | SDNode *N1 = Op.getOperand(i: 1).getNode(); |
| 9308 | unsigned NewOpc = 0; |
| 9309 | bool isMLA = false; |
| 9310 | bool isN0SExt = isSignExtended(N: N0, DAG); |
| 9311 | bool isN1SExt = isSignExtended(N: N1, DAG); |
| 9312 | if (isN0SExt && isN1SExt) |
| 9313 | NewOpc = ARMISD::VMULLs; |
| 9314 | else { |
| 9315 | bool isN0ZExt = isZeroExtended(N: N0, DAG); |
| 9316 | bool isN1ZExt = isZeroExtended(N: N1, DAG); |
| 9317 | if (isN0ZExt && isN1ZExt) |
| 9318 | NewOpc = ARMISD::VMULLu; |
| 9319 | else if (isN1SExt || isN1ZExt) { |
| 9320 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these |
| 9321 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) |
| 9322 | if (isN1SExt && isAddSubSExt(N: N0, DAG)) { |
| 9323 | NewOpc = ARMISD::VMULLs; |
| 9324 | isMLA = true; |
| 9325 | } else if (isN1ZExt && isAddSubZExt(N: N0, DAG)) { |
| 9326 | NewOpc = ARMISD::VMULLu; |
| 9327 | isMLA = true; |
| 9328 | } else if (isN0ZExt && isAddSubZExt(N: N1, DAG)) { |
| 9329 | std::swap(a&: N0, b&: N1); |
| 9330 | NewOpc = ARMISD::VMULLu; |
| 9331 | isMLA = true; |
| 9332 | } |
| 9333 | } |
| 9334 | |
| 9335 | if (!NewOpc) { |
| 9336 | if (VT == MVT::v2i64) |
| 9337 | // Fall through to expand this. It is not legal. |
| 9338 | return SDValue(); |
| 9339 | else |
| 9340 | // Other vector multiplications are legal. |
| 9341 | return Op; |
| 9342 | } |
| 9343 | } |
| 9344 | |
| 9345 | // Legalize to a VMULL instruction. |
| 9346 | SDLoc DL(Op); |
| 9347 | SDValue Op0; |
| 9348 | SDValue Op1 = SkipExtensionForVMULL(N: N1, DAG); |
| 9349 | if (!isMLA) { |
| 9350 | Op0 = SkipExtensionForVMULL(N: N0, DAG); |
| 9351 | assert(Op0.getValueType().is64BitVector() && |
| 9352 | Op1.getValueType().is64BitVector() && |
| 9353 | "unexpected types for extended operands to VMULL" ); |
| 9354 | return DAG.getNode(Opcode: NewOpc, DL, VT, N1: Op0, N2: Op1); |
| 9355 | } |
| 9356 | |
| 9357 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during |
| 9358 | // isel lowering to take advantage of no-stall back to back vmul + vmla. |
| 9359 | // vmull q0, d4, d6 |
| 9360 | // vmlal q0, d5, d6 |
| 9361 | // is faster than |
| 9362 | // vaddl q0, d4, d5 |
| 9363 | // vmovl q1, d6 |
| 9364 | // vmul q0, q0, q1 |
| 9365 | SDValue N00 = SkipExtensionForVMULL(N: N0->getOperand(Num: 0).getNode(), DAG); |
| 9366 | SDValue N01 = SkipExtensionForVMULL(N: N0->getOperand(Num: 1).getNode(), DAG); |
| 9367 | EVT Op1VT = Op1.getValueType(); |
| 9368 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT, |
| 9369 | N1: DAG.getNode(Opcode: NewOpc, DL, VT, |
| 9370 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N00), N2: Op1), |
| 9371 | N2: DAG.getNode(Opcode: NewOpc, DL, VT, |
| 9372 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N01), N2: Op1)); |
| 9373 | } |
| 9374 | |
| 9375 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, |
| 9376 | SelectionDAG &DAG) { |
| 9377 | // TODO: Should this propagate fast-math-flags? |
| 9378 | |
| 9379 | // Convert to float |
| 9380 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); |
| 9381 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); |
| 9382 | X = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9383 | Y = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: Y); |
| 9384 | X = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: X); |
| 9385 | Y = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: Y); |
| 9386 | // Get reciprocal estimate. |
| 9387 | // float4 recip = vrecpeq_f32(yf); |
| 9388 | Y = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9389 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9390 | N2: Y); |
| 9391 | // Because char has a smaller range than uchar, we can actually get away |
| 9392 | // without any newton steps. This requires that we use a weird bias |
| 9393 | // of 0xb000, however (again, this has been exhaustively tested). |
| 9394 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); |
| 9395 | X = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: X, N2: Y); |
| 9396 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9397 | Y = DAG.getConstant(Val: 0xb000, DL: dl, VT: MVT::v4i32); |
| 9398 | X = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: X, N2: Y); |
| 9399 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: X); |
| 9400 | // Convert back to short. |
| 9401 | X = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9402 | X = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: X); |
| 9403 | return X; |
| 9404 | } |
| 9405 | |
| 9406 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, |
| 9407 | SelectionDAG &DAG) { |
| 9408 | // TODO: Should this propagate fast-math-flags? |
| 9409 | |
| 9410 | SDValue N2; |
| 9411 | // Convert to float. |
| 9412 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); |
| 9413 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); |
| 9414 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9415 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
| 9416 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9417 | N1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
| 9418 | |
| 9419 | // Use reciprocal estimate and one refinement step. |
| 9420 | // float4 recip = vrecpeq_f32(yf); |
| 9421 | // recip *= vrecpsq_f32(yf, recip); |
| 9422 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9423 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9424 | N2: N1); |
| 9425 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9426 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9427 | N2: N1, N3: N2); |
| 9428 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9429 | // Because short has a smaller range than ushort, we can actually get away |
| 9430 | // with only a single newton step. This requires that we use a weird bias |
| 9431 | // of 89, however (again, this has been exhaustively tested). |
| 9432 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); |
| 9433 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
| 9434 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9435 | N1 = DAG.getConstant(Val: 0x89, DL: dl, VT: MVT::v4i32); |
| 9436 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
| 9437 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9438 | // Convert back to integer and return. |
| 9439 | // return vmovn_s32(vcvt_s32_f32(result)); |
| 9440 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9441 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
| 9442 | return N0; |
| 9443 | } |
| 9444 | |
| 9445 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, |
| 9446 | const ARMSubtarget *ST) { |
| 9447 | EVT VT = Op.getValueType(); |
| 9448 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9449 | "unexpected type for custom-lowering ISD::SDIV" ); |
| 9450 | |
| 9451 | SDLoc dl(Op); |
| 9452 | SDValue N0 = Op.getOperand(i: 0); |
| 9453 | SDValue N1 = Op.getOperand(i: 1); |
| 9454 | SDValue N2, N3; |
| 9455 | |
| 9456 | if (VT == MVT::v8i8) { |
| 9457 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
| 9458 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
| 9459 | |
| 9460 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9461 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9462 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9463 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9464 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9465 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9466 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9467 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9468 | |
| 9469 | N0 = LowerSDIV_v4i8(X: N0, Y: N1, dl, DAG); // v4i16 |
| 9470 | N2 = LowerSDIV_v4i8(X: N2, Y: N3, dl, DAG); // v4i16 |
| 9471 | |
| 9472 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
| 9473 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
| 9474 | |
| 9475 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v8i8, Operand: N0); |
| 9476 | return N0; |
| 9477 | } |
| 9478 | return LowerSDIV_v4i16(N0, N1, dl, DAG); |
| 9479 | } |
| 9480 | |
| 9481 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, |
| 9482 | const ARMSubtarget *ST) { |
| 9483 | // TODO: Should this propagate fast-math-flags? |
| 9484 | EVT VT = Op.getValueType(); |
| 9485 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9486 | "unexpected type for custom-lowering ISD::UDIV" ); |
| 9487 | |
| 9488 | SDLoc dl(Op); |
| 9489 | SDValue N0 = Op.getOperand(i: 0); |
| 9490 | SDValue N1 = Op.getOperand(i: 1); |
| 9491 | SDValue N2, N3; |
| 9492 | |
| 9493 | if (VT == MVT::v8i8) { |
| 9494 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
| 9495 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
| 9496 | |
| 9497 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9498 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9499 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9500 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9501 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9502 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9503 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9504 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9505 | |
| 9506 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 |
| 9507 | N2 = LowerSDIV_v4i16(N0: N2, N1: N3, dl, DAG); // v4i16 |
| 9508 | |
| 9509 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
| 9510 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
| 9511 | |
| 9512 | N0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v8i8, |
| 9513 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vqmovnsu, DL: dl, |
| 9514 | VT: MVT::i32), |
| 9515 | N2: N0); |
| 9516 | return N0; |
| 9517 | } |
| 9518 | |
| 9519 | // v4i16 sdiv ... Convert to float. |
| 9520 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); |
| 9521 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); |
| 9522 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9523 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
| 9524 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9525 | SDValue BN1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
| 9526 | |
| 9527 | // Use reciprocal estimate and two refinement steps. |
| 9528 | // float4 recip = vrecpeq_f32(yf); |
| 9529 | // recip *= vrecpsq_f32(yf, recip); |
| 9530 | // recip *= vrecpsq_f32(yf, recip); |
| 9531 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9532 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9533 | N2: BN1); |
| 9534 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9535 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9536 | N2: BN1, N3: N2); |
| 9537 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9538 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9539 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9540 | N2: BN1, N3: N2); |
| 9541 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9542 | // Simply multiplying by the reciprocal estimate can leave us a few ulps |
| 9543 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, |
| 9544 | // and that it will never cause us to return an answer too large). |
| 9545 | // float4 result = as_float4(as_int4(xf*recip) + 2); |
| 9546 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
| 9547 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9548 | N1 = DAG.getConstant(Val: 2, DL: dl, VT: MVT::v4i32); |
| 9549 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
| 9550 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9551 | // Convert back to integer and return. |
| 9552 | // return vmovn_u32(vcvt_s32_f32(result)); |
| 9553 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9554 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
| 9555 | return N0; |
| 9556 | } |
| 9557 | |
| 9558 | static SDValue LowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) { |
| 9559 | SDNode *N = Op.getNode(); |
| 9560 | EVT VT = N->getValueType(ResNo: 0); |
| 9561 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 9562 | |
| 9563 | SDValue Carry = Op.getOperand(i: 2); |
| 9564 | |
| 9565 | SDLoc DL(Op); |
| 9566 | |
| 9567 | SDValue Result; |
| 9568 | if (Op.getOpcode() == ISD::UADDO_CARRY) { |
| 9569 | // This converts the boolean value carry into the carry flag. |
| 9570 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 9571 | |
| 9572 | // Do the addition proper using the carry flag we wanted. |
| 9573 | Result = DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
| 9574 | N2: Op.getOperand(i: 1), N3: Carry); |
| 9575 | |
| 9576 | // Now convert the carry flag into a boolean value. |
| 9577 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
| 9578 | } else { |
| 9579 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
| 9580 | // have to invert the carry first. |
| 9581 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 9582 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 9583 | // This converts the boolean value carry into the carry flag. |
| 9584 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 9585 | |
| 9586 | // Do the subtraction proper using the carry flag we wanted. |
| 9587 | Result = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
| 9588 | N2: Op.getOperand(i: 1), N3: Carry); |
| 9589 | |
| 9590 | // Now convert the carry flag into a boolean value. |
| 9591 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
| 9592 | // But the carry returned by ARMISD::SUBE is not a borrow as expected |
| 9593 | // by ISD::USUBO_CARRY, so compute 1 - C. |
| 9594 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 9595 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 9596 | } |
| 9597 | |
| 9598 | // Return both values. |
| 9599 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL, VTList: N->getVTList(), N1: Result, N2: Carry); |
| 9600 | } |
| 9601 | |
| 9602 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, |
| 9603 | bool Signed, |
| 9604 | SDValue &Chain) const { |
| 9605 | EVT VT = Op.getValueType(); |
| 9606 | assert((VT == MVT::i32 || VT == MVT::i64) && |
| 9607 | "unexpected type for custom lowering DIV" ); |
| 9608 | SDLoc dl(Op); |
| 9609 | |
| 9610 | const auto &DL = DAG.getDataLayout(); |
| 9611 | RTLIB::Libcall LC; |
| 9612 | if (Signed) |
| 9613 | LC = VT == MVT::i32 ? RTLIB::SDIVREM_I32 : RTLIB::SDIVREM_I64; |
| 9614 | else |
| 9615 | LC = VT == MVT::i32 ? RTLIB::UDIVREM_I32 : RTLIB::UDIVREM_I64; |
| 9616 | |
| 9617 | RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(Call: LC); |
| 9618 | SDValue ES = DAG.getExternalSymbol(LCImpl, VT: getPointerTy(DL)); |
| 9619 | |
| 9620 | ARMTargetLowering::ArgListTy Args; |
| 9621 | |
| 9622 | for (auto AI : {1, 0}) { |
| 9623 | SDValue Operand = Op.getOperand(i: AI); |
| 9624 | Args.emplace_back(args&: Operand, |
| 9625 | args: Operand.getValueType().getTypeForEVT(Context&: *DAG.getContext())); |
| 9626 | } |
| 9627 | |
| 9628 | CallLoweringInfo CLI(DAG); |
| 9629 | CLI.setDebugLoc(dl).setChain(Chain).setCallee( |
| 9630 | CC: DAG.getLibcalls().getLibcallImplCallingConv(Call: LCImpl), |
| 9631 | ResultType: VT.getTypeForEVT(Context&: *DAG.getContext()), Target: ES, ArgsList: std::move(Args)); |
| 9632 | |
| 9633 | return LowerCallTo(CLI).first; |
| 9634 | } |
| 9635 | |
| 9636 | // This is a code size optimisation: return the original SDIV node to |
| 9637 | // DAGCombiner when we don't want to expand SDIV into a sequence of |
| 9638 | // instructions, and an empty node otherwise which will cause the |
| 9639 | // SDIV to be expanded in DAGCombine. |
| 9640 | SDValue |
| 9641 | ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, |
| 9642 | SelectionDAG &DAG, |
| 9643 | SmallVectorImpl<SDNode *> &Created) const { |
| 9644 | // TODO: Support SREM |
| 9645 | if (N->getOpcode() != ISD::SDIV) |
| 9646 | return SDValue(); |
| 9647 | |
| 9648 | const auto &ST = DAG.getSubtarget<ARMSubtarget>(); |
| 9649 | const bool MinSize = ST.hasMinSize(); |
| 9650 | const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() |
| 9651 | : ST.hasDivideInARMMode(); |
| 9652 | |
| 9653 | // Don't touch vector types; rewriting this may lead to scalarizing |
| 9654 | // the int divs. |
| 9655 | if (N->getOperand(Num: 0).getValueType().isVector()) |
| 9656 | return SDValue(); |
| 9657 | |
| 9658 | // Bail if MinSize is not set, and also for both ARM and Thumb mode we need |
| 9659 | // hwdiv support for this to be really profitable. |
| 9660 | if (!(MinSize && HasDivide)) |
| 9661 | return SDValue(); |
| 9662 | |
| 9663 | // ARM mode is a bit simpler than Thumb: we can handle large power |
| 9664 | // of 2 immediates with 1 mov instruction; no further checks required, |
| 9665 | // just return the sdiv node. |
| 9666 | if (!ST.isThumb()) |
| 9667 | return SDValue(N, 0); |
| 9668 | |
| 9669 | // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, |
| 9670 | // and thus lose the code size benefits of a MOVS that requires only 2. |
| 9671 | // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, |
| 9672 | // but as it's doing exactly this, it's not worth the trouble to get TTI. |
| 9673 | if (Divisor.sgt(RHS: 128)) |
| 9674 | return SDValue(); |
| 9675 | |
| 9676 | return SDValue(N, 0); |
| 9677 | } |
| 9678 | |
| 9679 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, |
| 9680 | bool Signed) const { |
| 9681 | assert(Op.getValueType() == MVT::i32 && |
| 9682 | "unexpected type for custom lowering DIV" ); |
| 9683 | SDLoc dl(Op); |
| 9684 | |
| 9685 | SDValue DBZCHK = DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL: dl, VT: MVT::Other, |
| 9686 | N1: DAG.getEntryNode(), N2: Op.getOperand(i: 1)); |
| 9687 | |
| 9688 | return LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
| 9689 | } |
| 9690 | |
| 9691 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { |
| 9692 | SDLoc DL(N); |
| 9693 | SDValue Op = N->getOperand(Num: 1); |
| 9694 | if (N->getValueType(ResNo: 0) == MVT::i32) |
| 9695 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, N2: Op); |
| 9696 | SDValue Lo, Hi; |
| 9697 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
| 9698 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, |
| 9699 | N2: DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: Lo, N2: Hi)); |
| 9700 | } |
| 9701 | |
| 9702 | void ARMTargetLowering::ExpandDIV_Windows( |
| 9703 | SDValue Op, SelectionDAG &DAG, bool Signed, |
| 9704 | SmallVectorImpl<SDValue> &Results) const { |
| 9705 | const auto &DL = DAG.getDataLayout(); |
| 9706 | |
| 9707 | assert(Op.getValueType() == MVT::i64 && |
| 9708 | "unexpected type for custom lowering DIV" ); |
| 9709 | SDLoc dl(Op); |
| 9710 | |
| 9711 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain: DAG.getEntryNode()); |
| 9712 | |
| 9713 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
| 9714 | |
| 9715 | SDValue Lower = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Result); |
| 9716 | SDValue Upper = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i64, N1: Result, |
| 9717 | N2: DAG.getConstant(Val: 32, DL: dl, VT: getPointerTy(DL))); |
| 9718 | Upper = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Upper); |
| 9719 | |
| 9720 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lower, N2: Upper)); |
| 9721 | } |
| 9722 | |
| 9723 | std::pair<SDValue, SDValue> |
| 9724 | ARMTargetLowering::LowerAEABIUnalignedLoad(SDValue Op, |
| 9725 | SelectionDAG &DAG) const { |
| 9726 | // If we have an unaligned load from a i32 or i64 that would normally be |
| 9727 | // split into separate ldrb's, we can use the __aeabi_uread4/__aeabi_uread8 |
| 9728 | // functions instead. |
| 9729 | LoadSDNode *LD = cast<LoadSDNode>(Val: Op.getNode()); |
| 9730 | EVT MemVT = LD->getMemoryVT(); |
| 9731 | if (MemVT != MVT::i32 && MemVT != MVT::i64) |
| 9732 | return std::make_pair(x: SDValue(), y: SDValue()); |
| 9733 | |
| 9734 | const auto &MF = DAG.getMachineFunction(); |
| 9735 | unsigned AS = LD->getAddressSpace(); |
| 9736 | Align Alignment = LD->getAlign(); |
| 9737 | const DataLayout &DL = DAG.getDataLayout(); |
| 9738 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 9739 | |
| 9740 | if (MF.getFunction().hasMinSize() && !AllowsUnaligned && |
| 9741 | Alignment <= llvm::Align(2)) { |
| 9742 | |
| 9743 | RTLIB::Libcall LC = |
| 9744 | (MemVT == MVT::i32) ? RTLIB::AEABI_UREAD4 : RTLIB::AEABI_UREAD8; |
| 9745 | |
| 9746 | MakeLibCallOptions Opts; |
| 9747 | SDLoc dl(Op); |
| 9748 | |
| 9749 | auto Pair = makeLibCall(DAG, LC, RetVT: MemVT.getSimpleVT(), Ops: LD->getBasePtr(), |
| 9750 | CallOptions: Opts, dl, Chain: LD->getChain()); |
| 9751 | |
| 9752 | // If necessary, extend the node to 64bit |
| 9753 | if (LD->getExtensionType() != ISD::NON_EXTLOAD) { |
| 9754 | unsigned ExtType = LD->getExtensionType() == ISD::SEXTLOAD |
| 9755 | ? ISD::SIGN_EXTEND |
| 9756 | : ISD::ZERO_EXTEND; |
| 9757 | SDValue EN = DAG.getNode(Opcode: ExtType, DL: dl, VT: LD->getValueType(ResNo: 0), Operand: Pair.first); |
| 9758 | Pair.first = EN; |
| 9759 | } |
| 9760 | return Pair; |
| 9761 | } |
| 9762 | |
| 9763 | // Default expand to individual loads |
| 9764 | if (!allowsMemoryAccess(Context&: *DAG.getContext(), DL, VT: MemVT, AddrSpace: AS, Alignment)) |
| 9765 | return expandUnalignedLoad(LD, DAG); |
| 9766 | return std::make_pair(x: SDValue(), y: SDValue()); |
| 9767 | } |
| 9768 | |
| 9769 | SDValue ARMTargetLowering::LowerAEABIUnalignedStore(SDValue Op, |
| 9770 | SelectionDAG &DAG) const { |
| 9771 | // If we have an unaligned store to a i32 or i64 that would normally be |
| 9772 | // split into separate ldrb's, we can use the __aeabi_uwrite4/__aeabi_uwrite8 |
| 9773 | // functions instead. |
| 9774 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 9775 | EVT MemVT = ST->getMemoryVT(); |
| 9776 | if (MemVT != MVT::i32 && MemVT != MVT::i64) |
| 9777 | return SDValue(); |
| 9778 | |
| 9779 | const auto &MF = DAG.getMachineFunction(); |
| 9780 | unsigned AS = ST->getAddressSpace(); |
| 9781 | Align Alignment = ST->getAlign(); |
| 9782 | const DataLayout &DL = DAG.getDataLayout(); |
| 9783 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 9784 | |
| 9785 | if (MF.getFunction().hasMinSize() && !AllowsUnaligned && |
| 9786 | Alignment <= llvm::Align(2)) { |
| 9787 | |
| 9788 | SDLoc dl(Op); |
| 9789 | |
| 9790 | // If necessary, trunc the value to 32bit |
| 9791 | SDValue StoreVal = ST->getOperand(Num: 1); |
| 9792 | if (ST->isTruncatingStore()) |
| 9793 | StoreVal = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MemVT, Operand: ST->getOperand(Num: 1)); |
| 9794 | |
| 9795 | RTLIB::Libcall LC = |
| 9796 | (MemVT == MVT::i32) ? RTLIB::AEABI_UWRITE4 : RTLIB::AEABI_UWRITE8; |
| 9797 | |
| 9798 | MakeLibCallOptions Opts; |
| 9799 | auto CallResult = |
| 9800 | makeLibCall(DAG, LC, RetVT: MVT::isVoid, Ops: {StoreVal, ST->getBasePtr()}, CallOptions: Opts, |
| 9801 | dl, Chain: ST->getChain()); |
| 9802 | |
| 9803 | return CallResult.second; |
| 9804 | } |
| 9805 | |
| 9806 | // Default expand to individual stores |
| 9807 | if (!allowsMemoryAccess(Context&: *DAG.getContext(), DL, VT: MemVT, AddrSpace: AS, Alignment)) |
| 9808 | return expandUnalignedStore(ST, DAG); |
| 9809 | return SDValue(); |
| 9810 | } |
| 9811 | |
| 9812 | static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { |
| 9813 | LoadSDNode *LD = cast<LoadSDNode>(Val: Op.getNode()); |
| 9814 | EVT MemVT = LD->getMemoryVT(); |
| 9815 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 9816 | MemVT == MVT::v16i1) && |
| 9817 | "Expected a predicate type!" ); |
| 9818 | assert(MemVT == Op.getValueType()); |
| 9819 | assert(LD->getExtensionType() == ISD::NON_EXTLOAD && |
| 9820 | "Expected a non-extending load" ); |
| 9821 | assert(LD->isUnindexed() && "Expected a unindexed load" ); |
| 9822 | |
| 9823 | // The basic MVE VLDR on a v2i1/v4i1/v8i1 actually loads the entire 16bit |
| 9824 | // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We |
| 9825 | // need to make sure that 8/4/2 bits are actually loaded into the correct |
| 9826 | // place, which means loading the value and then shuffling the values into |
| 9827 | // the bottom bits of the predicate. |
| 9828 | // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect |
| 9829 | // for BE). |
| 9830 | // Speaking of BE, apparently the rest of llvm will assume a reverse order to |
| 9831 | // a natural VMSR(load), so needs to be reversed. |
| 9832 | |
| 9833 | SDLoc dl(Op); |
| 9834 | SDValue Load = DAG.getExtLoad( |
| 9835 | ExtType: ISD::EXTLOAD, dl, VT: MVT::i32, Chain: LD->getChain(), Ptr: LD->getBasePtr(), |
| 9836 | MemVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
| 9837 | MMO: LD->getMemOperand()); |
| 9838 | SDValue Val = Load; |
| 9839 | if (DAG.getDataLayout().isBigEndian()) |
| 9840 | Val = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
| 9841 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: Load), |
| 9842 | N2: DAG.getConstant(Val: 32 - MemVT.getSizeInBits(), DL: dl, VT: MVT::i32)); |
| 9843 | SDValue Pred = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Val); |
| 9844 | if (MemVT != MVT::v16i1) |
| 9845 | Pred = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MemVT, N1: Pred, |
| 9846 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 9847 | return DAG.getMergeValues(Ops: {Pred, Load.getValue(R: 1)}, dl); |
| 9848 | } |
| 9849 | |
| 9850 | void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 9851 | SelectionDAG &DAG) const { |
| 9852 | LoadSDNode *LD = cast<LoadSDNode>(Val: N); |
| 9853 | EVT MemVT = LD->getMemoryVT(); |
| 9854 | |
| 9855 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 9856 | !Subtarget->isThumb1Only() && LD->isVolatile() && |
| 9857 | LD->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
| 9858 | assert(LD->isUnindexed() && "Loads should be unindexed at this point." ); |
| 9859 | SDLoc dl(N); |
| 9860 | SDValue Result = DAG.getMemIntrinsicNode( |
| 9861 | Opcode: ARMISD::LDRD, dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32, MVT::Other}), |
| 9862 | Ops: {LD->getChain(), LD->getBasePtr()}, MemVT, MMO: LD->getMemOperand()); |
| 9863 | SDValue Lo = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 0 : 1); |
| 9864 | SDValue Hi = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 1 : 0); |
| 9865 | SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 9866 | Results.append(IL: {Pair, Result.getValue(R: 2)}); |
| 9867 | } else if (MemVT == MVT::i32 || MemVT == MVT::i64) { |
| 9868 | auto Pair = LowerAEABIUnalignedLoad(Op: SDValue(N, 0), DAG); |
| 9869 | if (Pair.first) { |
| 9870 | Results.push_back(Elt: Pair.first); |
| 9871 | Results.push_back(Elt: Pair.second); |
| 9872 | } |
| 9873 | } |
| 9874 | } |
| 9875 | |
| 9876 | static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { |
| 9877 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 9878 | EVT MemVT = ST->getMemoryVT(); |
| 9879 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 9880 | MemVT == MVT::v16i1) && |
| 9881 | "Expected a predicate type!" ); |
| 9882 | assert(MemVT == ST->getValue().getValueType()); |
| 9883 | assert(!ST->isTruncatingStore() && "Expected a non-extending store" ); |
| 9884 | assert(ST->isUnindexed() && "Expected a unindexed store" ); |
| 9885 | |
| 9886 | // Only store the v2i1 or v4i1 or v8i1 worth of bits, via a buildvector with |
| 9887 | // top bits unset and a scalar store. |
| 9888 | SDLoc dl(Op); |
| 9889 | SDValue Build = ST->getValue(); |
| 9890 | if (MemVT != MVT::v16i1) { |
| 9891 | SmallVector<SDValue, 16> Ops; |
| 9892 | for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) { |
| 9893 | unsigned Elt = DAG.getDataLayout().isBigEndian() |
| 9894 | ? MemVT.getVectorNumElements() - I - 1 |
| 9895 | : I; |
| 9896 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: Build, |
| 9897 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32))); |
| 9898 | } |
| 9899 | for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) |
| 9900 | Ops.push_back(Elt: DAG.getUNDEF(VT: MVT::i32)); |
| 9901 | Build = DAG.getNode(Opcode: ISD::BUILD_VECTOR, DL: dl, VT: MVT::v16i1, Ops); |
| 9902 | } |
| 9903 | SDValue GRP = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Build); |
| 9904 | if (MemVT == MVT::v16i1 && DAG.getDataLayout().isBigEndian()) |
| 9905 | GRP = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
| 9906 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: GRP), |
| 9907 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
| 9908 | return DAG.getTruncStore( |
| 9909 | Chain: ST->getChain(), dl, Val: GRP, Ptr: ST->getBasePtr(), |
| 9910 | SVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
| 9911 | MMO: ST->getMemOperand()); |
| 9912 | } |
| 9913 | |
| 9914 | SDValue ARMTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG, |
| 9915 | const ARMSubtarget *Subtarget) const { |
| 9916 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 9917 | EVT MemVT = ST->getMemoryVT(); |
| 9918 | |
| 9919 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 9920 | !Subtarget->isThumb1Only() && ST->isVolatile() && |
| 9921 | ST->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
| 9922 | assert(ST->isUnindexed() && "Stores should be unindexed at this point." ); |
| 9923 | SDNode *N = Op.getNode(); |
| 9924 | SDLoc dl(N); |
| 9925 | |
| 9926 | SDValue Lo = DAG.getNode( |
| 9927 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
| 9928 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 0 : 1, DL: dl, |
| 9929 | VT: MVT::i32)); |
| 9930 | SDValue Hi = DAG.getNode( |
| 9931 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
| 9932 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 1 : 0, DL: dl, |
| 9933 | VT: MVT::i32)); |
| 9934 | |
| 9935 | return DAG.getMemIntrinsicNode(Opcode: ARMISD::STRD, dl, VTList: DAG.getVTList(VT: MVT::Other), |
| 9936 | Ops: {ST->getChain(), Lo, Hi, ST->getBasePtr()}, |
| 9937 | MemVT, MMO: ST->getMemOperand()); |
| 9938 | } else if (Subtarget->hasMVEIntegerOps() && |
| 9939 | ((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 9940 | MemVT == MVT::v16i1))) { |
| 9941 | return LowerPredicateStore(Op, DAG); |
| 9942 | } else if (MemVT == MVT::i32 || MemVT == MVT::i64) { |
| 9943 | return LowerAEABIUnalignedStore(Op, DAG); |
| 9944 | } |
| 9945 | return SDValue(); |
| 9946 | } |
| 9947 | |
| 9948 | static bool isZeroVector(SDValue N) { |
| 9949 | return (ISD::isBuildVectorAllZeros(N: N.getNode()) || |
| 9950 | (N->getOpcode() == ARMISD::VMOVIMM && |
| 9951 | isNullConstant(V: N->getOperand(Num: 0)))); |
| 9952 | } |
| 9953 | |
| 9954 | static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { |
| 9955 | MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Val: Op.getNode()); |
| 9956 | MVT VT = Op.getSimpleValueType(); |
| 9957 | SDValue Mask = N->getMask(); |
| 9958 | SDValue PassThru = N->getPassThru(); |
| 9959 | SDLoc dl(Op); |
| 9960 | |
| 9961 | if (isZeroVector(N: PassThru)) |
| 9962 | return Op; |
| 9963 | |
| 9964 | // MVE Masked loads use zero as the passthru value. Here we convert undef to |
| 9965 | // zero too, and other values are lowered to a select. |
| 9966 | SDValue ZeroVec = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 9967 | Operand: DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 9968 | SDValue NewLoad = DAG.getMaskedLoad( |
| 9969 | VT, dl, Chain: N->getChain(), Base: N->getBasePtr(), Offset: N->getOffset(), Mask, Src0: ZeroVec, |
| 9970 | MemVT: N->getMemoryVT(), MMO: N->getMemOperand(), AM: N->getAddressingMode(), |
| 9971 | N->getExtensionType(), IsExpanding: N->isExpandingLoad()); |
| 9972 | SDValue Combo = NewLoad; |
| 9973 | bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || |
| 9974 | PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
| 9975 | isZeroVector(N: PassThru->getOperand(Num: 0)); |
| 9976 | if (!PassThru.isUndef() && !PassThruIsCastZero) |
| 9977 | Combo = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT, N1: Mask, N2: NewLoad, N3: PassThru); |
| 9978 | return DAG.getMergeValues(Ops: {Combo, NewLoad.getValue(R: 1)}, dl); |
| 9979 | } |
| 9980 | |
| 9981 | static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, |
| 9982 | const ARMSubtarget *ST) { |
| 9983 | if (!ST->hasMVEIntegerOps()) |
| 9984 | return SDValue(); |
| 9985 | |
| 9986 | SDLoc dl(Op); |
| 9987 | unsigned BaseOpcode = 0; |
| 9988 | switch (Op->getOpcode()) { |
| 9989 | default: llvm_unreachable("Expected VECREDUCE opcode" ); |
| 9990 | case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; |
| 9991 | case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; |
| 9992 | case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; |
| 9993 | case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; |
| 9994 | case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; |
| 9995 | case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; |
| 9996 | case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; |
| 9997 | case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; |
| 9998 | } |
| 9999 | |
| 10000 | SDValue Op0 = Op->getOperand(Num: 0); |
| 10001 | EVT VT = Op0.getValueType(); |
| 10002 | EVT EltVT = VT.getVectorElementType(); |
| 10003 | unsigned NumElts = VT.getVectorNumElements(); |
| 10004 | unsigned NumActiveLanes = NumElts; |
| 10005 | |
| 10006 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
| 10007 | NumActiveLanes == 2) && |
| 10008 | "Only expected a power 2 vector size" ); |
| 10009 | |
| 10010 | // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements |
| 10011 | // allows us to easily extract vector elements from the lanes. |
| 10012 | while (NumActiveLanes > 4) { |
| 10013 | unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; |
| 10014 | SDValue Rev = DAG.getNode(Opcode: RevOpcode, DL: dl, VT, Operand: Op0); |
| 10015 | Op0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT, N1: Op0, N2: Rev); |
| 10016 | NumActiveLanes /= 2; |
| 10017 | } |
| 10018 | |
| 10019 | SDValue Res; |
| 10020 | if (NumActiveLanes == 4) { |
| 10021 | // The remaining 4 elements are summed sequentially |
| 10022 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10023 | N2: DAG.getConstant(Val: 0 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10024 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10025 | N2: DAG.getConstant(Val: 1 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10026 | SDValue Ext2 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10027 | N2: DAG.getConstant(Val: 2 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10028 | SDValue Ext3 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10029 | N2: DAG.getConstant(Val: 3 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10030 | SDValue Res0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
| 10031 | SDValue Res1 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext2, N2: Ext3, Flags: Op->getFlags()); |
| 10032 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Res0, N2: Res1, Flags: Op->getFlags()); |
| 10033 | } else { |
| 10034 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10035 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10036 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10037 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 10038 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
| 10039 | } |
| 10040 | |
| 10041 | // Result type may be wider than element type. |
| 10042 | if (EltVT != Op->getValueType(ResNo: 0)) |
| 10043 | Res = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: Op->getValueType(ResNo: 0), Operand: Res); |
| 10044 | return Res; |
| 10045 | } |
| 10046 | |
| 10047 | static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, |
| 10048 | const ARMSubtarget *ST) { |
| 10049 | if (!ST->hasMVEFloatOps()) |
| 10050 | return SDValue(); |
| 10051 | return LowerVecReduce(Op, DAG, ST); |
| 10052 | } |
| 10053 | |
| 10054 | static SDValue LowerVecReduceMinMax(SDValue Op, SelectionDAG &DAG, |
| 10055 | const ARMSubtarget *ST) { |
| 10056 | if (!ST->hasNEON()) |
| 10057 | return SDValue(); |
| 10058 | |
| 10059 | SDLoc dl(Op); |
| 10060 | SDValue Op0 = Op->getOperand(Num: 0); |
| 10061 | EVT VT = Op0.getValueType(); |
| 10062 | EVT EltVT = VT.getVectorElementType(); |
| 10063 | |
| 10064 | unsigned PairwiseIntrinsic = 0; |
| 10065 | switch (Op->getOpcode()) { |
| 10066 | default: |
| 10067 | llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10068 | case ISD::VECREDUCE_UMIN: |
| 10069 | PairwiseIntrinsic = Intrinsic::arm_neon_vpminu; |
| 10070 | break; |
| 10071 | case ISD::VECREDUCE_UMAX: |
| 10072 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxu; |
| 10073 | break; |
| 10074 | case ISD::VECREDUCE_SMIN: |
| 10075 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmins; |
| 10076 | break; |
| 10077 | case ISD::VECREDUCE_SMAX: |
| 10078 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxs; |
| 10079 | break; |
| 10080 | } |
| 10081 | SDValue PairwiseOp = DAG.getConstant(Val: PairwiseIntrinsic, DL: dl, VT: MVT::i32); |
| 10082 | |
| 10083 | unsigned NumElts = VT.getVectorNumElements(); |
| 10084 | unsigned NumActiveLanes = NumElts; |
| 10085 | |
| 10086 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
| 10087 | NumActiveLanes == 2) && |
| 10088 | "Only expected a power 2 vector size" ); |
| 10089 | |
| 10090 | // Split 128-bit vectors, since vpmin/max takes 2 64-bit vectors. |
| 10091 | if (VT.is128BitVector()) { |
| 10092 | SDValue Lo, Hi; |
| 10093 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVector(N: Op0, DL: dl); |
| 10094 | VT = Lo.getValueType(); |
| 10095 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Lo, Hi}); |
| 10096 | NumActiveLanes /= 2; |
| 10097 | } |
| 10098 | |
| 10099 | // Use pairwise reductions until one lane remains |
| 10100 | while (NumActiveLanes > 1) { |
| 10101 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Op0, Op0}); |
| 10102 | NumActiveLanes /= 2; |
| 10103 | } |
| 10104 | |
| 10105 | SDValue Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10106 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10107 | |
| 10108 | // Result type may be wider than element type. |
| 10109 | if (EltVT != Op.getValueType()) { |
| 10110 | unsigned Extend = 0; |
| 10111 | switch (Op->getOpcode()) { |
| 10112 | default: |
| 10113 | llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10114 | case ISD::VECREDUCE_UMIN: |
| 10115 | case ISD::VECREDUCE_UMAX: |
| 10116 | Extend = ISD::ZERO_EXTEND; |
| 10117 | break; |
| 10118 | case ISD::VECREDUCE_SMIN: |
| 10119 | case ISD::VECREDUCE_SMAX: |
| 10120 | Extend = ISD::SIGN_EXTEND; |
| 10121 | break; |
| 10122 | } |
| 10123 | Res = DAG.getNode(Opcode: Extend, DL: dl, VT: Op.getValueType(), Operand: Res); |
| 10124 | } |
| 10125 | return Res; |
| 10126 | } |
| 10127 | |
| 10128 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { |
| 10129 | if (isStrongerThanMonotonic(AO: cast<AtomicSDNode>(Val&: Op)->getSuccessOrdering())) |
| 10130 | // Acquire/Release load/store is not legal for targets without a dmb or |
| 10131 | // equivalent available. |
| 10132 | return SDValue(); |
| 10133 | |
| 10134 | // Monotonic load/store is legal for all targets. |
| 10135 | return Op; |
| 10136 | } |
| 10137 | |
| 10138 | static void ReplaceREADCYCLECOUNTER(SDNode *N, |
| 10139 | SmallVectorImpl<SDValue> &Results, |
| 10140 | SelectionDAG &DAG, |
| 10141 | const ARMSubtarget *Subtarget) { |
| 10142 | SDLoc DL(N); |
| 10143 | // Under Power Management extensions, the cycle-count is: |
| 10144 | // mrc p15, #0, <Rt>, c9, c13, #0 |
| 10145 | SDValue Ops[] = { N->getOperand(Num: 0), // Chain |
| 10146 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
| 10147 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
| 10148 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 10149 | DAG.getTargetConstant(Val: 9, DL, VT: MVT::i32), |
| 10150 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
| 10151 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32) |
| 10152 | }; |
| 10153 | |
| 10154 | SDValue Cycles32 = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
| 10155 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
| 10156 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Cycles32, |
| 10157 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32))); |
| 10158 | Results.push_back(Elt: Cycles32.getValue(R: 1)); |
| 10159 | } |
| 10160 | |
| 10161 | static SDValue createGPRPairNode2xi32(SelectionDAG &DAG, SDValue V0, |
| 10162 | SDValue V1) { |
| 10163 | SDLoc dl(V0.getNode()); |
| 10164 | SDValue RegClass = |
| 10165 | DAG.getTargetConstant(Val: ARM::GPRPairRegClassID, DL: dl, VT: MVT::i32); |
| 10166 | SDValue SubReg0 = DAG.getTargetConstant(Val: ARM::gsub_0, DL: dl, VT: MVT::i32); |
| 10167 | SDValue SubReg1 = DAG.getTargetConstant(Val: ARM::gsub_1, DL: dl, VT: MVT::i32); |
| 10168 | const SDValue Ops[] = {RegClass, V0, SubReg0, V1, SubReg1}; |
| 10169 | return SDValue( |
| 10170 | DAG.getMachineNode(Opcode: TargetOpcode::REG_SEQUENCE, dl, VT: MVT::Untyped, Ops), 0); |
| 10171 | } |
| 10172 | |
| 10173 | static SDValue createGPRPairNodei64(SelectionDAG &DAG, SDValue V) { |
| 10174 | SDLoc dl(V.getNode()); |
| 10175 | auto [VLo, VHi] = DAG.SplitScalar(N: V, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 10176 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 10177 | if (isBigEndian) |
| 10178 | std::swap(a&: VLo, b&: VHi); |
| 10179 | return createGPRPairNode2xi32(DAG, V0: VLo, V1: VHi); |
| 10180 | } |
| 10181 | |
| 10182 | static void ReplaceCMP_SWAP_64Results(SDNode *N, |
| 10183 | SmallVectorImpl<SDValue> &Results, |
| 10184 | SelectionDAG &DAG) { |
| 10185 | assert(N->getValueType(0) == MVT::i64 && |
| 10186 | "AtomicCmpSwap on types less than 64 should be legal" ); |
| 10187 | SDValue Ops[] = { |
| 10188 | createGPRPairNode2xi32(DAG, V0: N->getOperand(Num: 1), |
| 10189 | V1: DAG.getUNDEF(VT: MVT::i32)), // pointer, temp |
| 10190 | createGPRPairNodei64(DAG, V: N->getOperand(Num: 2)), // expected |
| 10191 | createGPRPairNodei64(DAG, V: N->getOperand(Num: 3)), // new |
| 10192 | N->getOperand(Num: 0), // chain in |
| 10193 | }; |
| 10194 | SDNode *CmpSwap = DAG.getMachineNode( |
| 10195 | Opcode: ARM::CMP_SWAP_64, dl: SDLoc(N), |
| 10196 | VTs: DAG.getVTList(VT1: MVT::Untyped, VT2: MVT::Untyped, VT3: MVT::Other), Ops); |
| 10197 | |
| 10198 | MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand(); |
| 10199 | DAG.setNodeMemRefs(N: cast<MachineSDNode>(Val: CmpSwap), NewMemRefs: {MemOp}); |
| 10200 | |
| 10201 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 10202 | |
| 10203 | SDValue Lo = |
| 10204 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_1 : ARM::gsub_0, |
| 10205 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
| 10206 | SDValue Hi = |
| 10207 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_0 : ARM::gsub_1, |
| 10208 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
| 10209 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: MVT::i64, N1: Lo, N2: Hi)); |
| 10210 | Results.push_back(Elt: SDValue(CmpSwap, 2)); |
| 10211 | } |
| 10212 | |
| 10213 | SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { |
| 10214 | SDLoc dl(Op); |
| 10215 | EVT VT = Op.getValueType(); |
| 10216 | SDValue Chain = Op.getOperand(i: 0); |
| 10217 | SDValue LHS = Op.getOperand(i: 1); |
| 10218 | SDValue RHS = Op.getOperand(i: 2); |
| 10219 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 3))->get(); |
| 10220 | bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; |
| 10221 | |
| 10222 | // If we don't have instructions of this float type then soften to a libcall |
| 10223 | // and use SETCC instead. |
| 10224 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 10225 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS, |
| 10226 | Chain, IsSignaling); |
| 10227 | if (!RHS.getNode()) { |
| 10228 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 10229 | CC = ISD::SETNE; |
| 10230 | } |
| 10231 | SDValue Result = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT, N1: LHS, N2: RHS, |
| 10232 | N3: DAG.getCondCode(Cond: CC)); |
| 10233 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
| 10234 | } |
| 10235 | |
| 10236 | ARMCC::CondCodes CondCode, CondCode2; |
| 10237 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 10238 | |
| 10239 | SDValue True = DAG.getConstant(Val: 1, DL: dl, VT); |
| 10240 | SDValue False = DAG.getConstant(Val: 0, DL: dl, VT); |
| 10241 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 10242 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, Signaling: IsSignaling); |
| 10243 | SDValue Result = getCMOV(dl, VT, FalseVal: False, TrueVal: True, ARMcc, Flags: Cmp, DAG); |
| 10244 | if (CondCode2 != ARMCC::AL) { |
| 10245 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 10246 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal: True, ARMcc, Flags: Cmp, DAG); |
| 10247 | } |
| 10248 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
| 10249 | } |
| 10250 | |
| 10251 | SDValue ARMTargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const { |
| 10252 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 10253 | |
| 10254 | EVT VT = getPointerTy(DL: DAG.getDataLayout()); |
| 10255 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: 0, IsImmutable: false); |
| 10256 | return DAG.getFrameIndex(FI, VT); |
| 10257 | } |
| 10258 | |
| 10259 | SDValue ARMTargetLowering::LowerFP_TO_BF16(SDValue Op, |
| 10260 | SelectionDAG &DAG) const { |
| 10261 | SDLoc DL(Op); |
| 10262 | MakeLibCallOptions CallOptions; |
| 10263 | MVT SVT = Op.getOperand(i: 0).getSimpleValueType(); |
| 10264 | RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: SVT, RetVT: MVT::bf16); |
| 10265 | SDValue Res = |
| 10266 | makeLibCall(DAG, LC, RetVT: MVT::f32, Ops: Op.getOperand(i: 0), CallOptions, dl: DL).first; |
| 10267 | return DAG.getBitcast(VT: MVT::i32, V: Res); |
| 10268 | } |
| 10269 | |
| 10270 | SDValue ARMTargetLowering::LowerCMP(SDValue Op, SelectionDAG &DAG) const { |
| 10271 | SDLoc dl(Op); |
| 10272 | SDValue LHS = Op.getOperand(i: 0); |
| 10273 | SDValue RHS = Op.getOperand(i: 1); |
| 10274 | |
| 10275 | // Determine if this is signed or unsigned comparison |
| 10276 | bool IsSigned = (Op.getOpcode() == ISD::SCMP); |
| 10277 | |
| 10278 | // Special case for Thumb1 UCMP only |
| 10279 | if (!IsSigned && Subtarget->isThumb1Only()) { |
| 10280 | // For Thumb unsigned comparison, use this sequence: |
| 10281 | // subs r2, r0, r1 ; r2 = LHS - RHS, sets flags |
| 10282 | // sbc r2, r2 ; r2 = r2 - r2 - !carry |
| 10283 | // cmp r1, r0 ; compare RHS with LHS |
| 10284 | // sbc r1, r1 ; r1 = r1 - r1 - !carry |
| 10285 | // subs r0, r2, r1 ; r0 = r2 - r1 (final result) |
| 10286 | |
| 10287 | // First subtraction: LHS - RHS |
| 10288 | SDValue Sub1WithFlags = DAG.getNode( |
| 10289 | Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N1: LHS, N2: RHS); |
| 10290 | SDValue Sub1Result = Sub1WithFlags.getValue(R: 0); |
| 10291 | SDValue Flags1 = Sub1WithFlags.getValue(R: 1); |
| 10292 | |
| 10293 | // SUBE: Sub1Result - Sub1Result - !carry |
| 10294 | // This gives 0 if LHS >= RHS (unsigned), -1 if LHS < RHS (unsigned) |
| 10295 | SDValue Sbc1 = |
| 10296 | DAG.getNode(Opcode: ARMISD::SUBE, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), |
| 10297 | N1: Sub1Result, N2: Sub1Result, N3: Flags1); |
| 10298 | SDValue Sbc1Result = Sbc1.getValue(R: 0); |
| 10299 | |
| 10300 | // Second comparison: RHS vs LHS (reverse comparison) |
| 10301 | SDValue CmpFlags = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: RHS, N2: LHS); |
| 10302 | |
| 10303 | // SUBE: RHS - RHS - !carry |
| 10304 | // This gives 0 if RHS <= LHS (unsigned), -1 if RHS > LHS (unsigned) |
| 10305 | SDValue Sbc2 = DAG.getNode( |
| 10306 | Opcode: ARMISD::SUBE, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N1: RHS, N2: RHS, N3: CmpFlags); |
| 10307 | SDValue Sbc2Result = Sbc2.getValue(R: 0); |
| 10308 | |
| 10309 | // Final subtraction: Sbc1Result - Sbc2Result (no flags needed) |
| 10310 | SDValue Result = |
| 10311 | DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: Sbc1Result, N2: Sbc2Result); |
| 10312 | if (Op.getValueType() != MVT::i32) |
| 10313 | Result = DAG.getSExtOrTrunc(Op: Result, DL: dl, VT: Op.getValueType()); |
| 10314 | |
| 10315 | return Result; |
| 10316 | } |
| 10317 | |
| 10318 | // For the ARM assembly pattern: |
| 10319 | // subs r0, r0, r1 ; subtract RHS from LHS and set flags |
| 10320 | // movgt r0, #1 ; if LHS > RHS, set result to 1 (GT for signed, HI for |
| 10321 | // unsigned) mvnlt r0, #0 ; if LHS < RHS, set result to -1 (LT for |
| 10322 | // signed, LO for unsigned) |
| 10323 | // ; if LHS == RHS, result remains 0 from the subs |
| 10324 | |
| 10325 | // Optimization: if RHS is a subtraction against 0, use ADDC instead of SUBC |
| 10326 | unsigned Opcode = ARMISD::SUBC; |
| 10327 | |
| 10328 | // Check if RHS is a subtraction against 0: (0 - X) |
| 10329 | if (RHS.getOpcode() == ISD::SUB) { |
| 10330 | SDValue SubLHS = RHS.getOperand(i: 0); |
| 10331 | SDValue SubRHS = RHS.getOperand(i: 1); |
| 10332 | |
| 10333 | // Check if it's 0 - X |
| 10334 | if (isNullConstant(V: SubLHS)) { |
| 10335 | bool CanUseAdd = false; |
| 10336 | if (IsSigned) { |
| 10337 | // For SCMP: only if X is known to never be INT_MIN (to avoid overflow) |
| 10338 | if (RHS->getFlags().hasNoSignedWrap() || !DAG.computeKnownBits(Op: SubRHS) |
| 10339 | .getSignedMinValue() |
| 10340 | .isMinSignedValue()) { |
| 10341 | CanUseAdd = true; |
| 10342 | } |
| 10343 | } else { |
| 10344 | // For UCMP: only if X is known to never be zero |
| 10345 | if (DAG.isKnownNeverZero(Op: SubRHS)) { |
| 10346 | CanUseAdd = true; |
| 10347 | } |
| 10348 | } |
| 10349 | |
| 10350 | if (CanUseAdd) { |
| 10351 | Opcode = ARMISD::ADDC; |
| 10352 | RHS = SubRHS; // Replace RHS with X, so we do LHS + X instead of |
| 10353 | // LHS - (0 - X) |
| 10354 | } |
| 10355 | } |
| 10356 | } |
| 10357 | |
| 10358 | // Generate the operation with flags |
| 10359 | SDValue OpWithFlags = |
| 10360 | DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N1: LHS, N2: RHS); |
| 10361 | |
| 10362 | SDValue OpResult = OpWithFlags.getValue(R: 0); |
| 10363 | SDValue Flags = OpWithFlags.getValue(R: 1); |
| 10364 | |
| 10365 | // Constants for conditional moves |
| 10366 | SDValue One = DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32); |
| 10367 | SDValue MinusOne = DAG.getAllOnesConstant(DL: dl, VT: MVT::i32); |
| 10368 | |
| 10369 | // Select condition codes based on signed vs unsigned |
| 10370 | ARMCC::CondCodes GTCond = IsSigned ? ARMCC::GT : ARMCC::HI; |
| 10371 | ARMCC::CondCodes LTCond = IsSigned ? ARMCC::LT : ARMCC::LO; |
| 10372 | |
| 10373 | // First conditional move: if greater than, set to 1 |
| 10374 | SDValue GTCondValue = DAG.getConstant(Val: GTCond, DL: dl, VT: MVT::i32); |
| 10375 | SDValue Result1 = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: OpResult, N2: One, |
| 10376 | N3: GTCondValue, N4: Flags); |
| 10377 | |
| 10378 | // Second conditional move: if less than, set to -1 |
| 10379 | SDValue LTCondValue = DAG.getConstant(Val: LTCond, DL: dl, VT: MVT::i32); |
| 10380 | SDValue Result2 = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: Result1, N2: MinusOne, |
| 10381 | N3: LTCondValue, N4: Flags); |
| 10382 | |
| 10383 | if (Op.getValueType() != MVT::i32) |
| 10384 | Result2 = DAG.getSExtOrTrunc(Op: Result2, DL: dl, VT: Op.getValueType()); |
| 10385 | |
| 10386 | return Result2; |
| 10387 | } |
| 10388 | |
| 10389 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 10390 | LLVM_DEBUG(dbgs() << "Lowering node: " ; Op.dump()); |
| 10391 | switch (Op.getOpcode()) { |
| 10392 | default: llvm_unreachable("Don't know how to custom lower this!" ); |
| 10393 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); |
| 10394 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
| 10395 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
| 10396 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
| 10397 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
| 10398 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
| 10399 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
| 10400 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
| 10401 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); |
| 10402 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); |
| 10403 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
| 10404 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); |
| 10405 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); |
| 10406 | case ISD::SINT_TO_FP: |
| 10407 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); |
| 10408 | case ISD::STRICT_FP_TO_SINT: |
| 10409 | case ISD::STRICT_FP_TO_UINT: |
| 10410 | case ISD::FP_TO_SINT: |
| 10411 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); |
| 10412 | case ISD::FP_TO_SINT_SAT: |
| 10413 | case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG, Subtarget); |
| 10414 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); |
| 10415 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
| 10416 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
| 10417 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); |
| 10418 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); |
| 10419 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); |
| 10420 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); |
| 10421 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, |
| 10422 | Subtarget); |
| 10423 | case ISD::BITCAST: return ExpandBITCAST(N: Op.getNode(), DAG, Subtarget); |
| 10424 | case ISD::SHL: |
| 10425 | case ISD::SRL: |
| 10426 | case ISD::SRA: return LowerShift(N: Op.getNode(), DAG, ST: Subtarget); |
| 10427 | case ISD::SREM: return LowerREM(N: Op.getNode(), DAG); |
| 10428 | case ISD::UREM: return LowerREM(N: Op.getNode(), DAG); |
| 10429 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); |
| 10430 | case ISD::SRL_PARTS: |
| 10431 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); |
| 10432 | case ISD::CTTZ: |
| 10433 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(N: Op.getNode(), DAG, ST: Subtarget); |
| 10434 | case ISD::CTPOP: return LowerCTPOP(N: Op.getNode(), DAG, ST: Subtarget); |
| 10435 | case ISD::SETCC: return LowerVSETCC(Op, DAG, ST: Subtarget); |
| 10436 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); |
| 10437 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, ST: Subtarget); |
| 10438 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, ST: Subtarget); |
| 10439 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, ST: Subtarget); |
| 10440 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, ST: Subtarget); |
| 10441 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
| 10442 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, ST: Subtarget); |
| 10443 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, ST: Subtarget); |
| 10444 | case ISD::TRUNCATE: return LowerTruncate(N: Op.getNode(), DAG, Subtarget); |
| 10445 | case ISD::SIGN_EXTEND: |
| 10446 | case ISD::ZERO_EXTEND: return LowerVectorExtend(N: Op.getNode(), DAG, Subtarget); |
| 10447 | case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG); |
| 10448 | case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG); |
| 10449 | case ISD::SET_FPMODE: |
| 10450 | return LowerSET_FPMODE(Op, DAG); |
| 10451 | case ISD::RESET_FPMODE: |
| 10452 | return LowerRESET_FPMODE(Op, DAG); |
| 10453 | case ISD::MUL: return LowerMUL(Op, DAG); |
| 10454 | case ISD::SDIV: |
| 10455 | if (getTargetMachine().getTargetTriple().isOSWindows() && |
| 10456 | !Op.getValueType().isVector()) |
| 10457 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); |
| 10458 | return LowerSDIV(Op, DAG, ST: Subtarget); |
| 10459 | case ISD::UDIV: |
| 10460 | if (getTargetMachine().getTargetTriple().isOSWindows() && |
| 10461 | !Op.getValueType().isVector()) |
| 10462 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); |
| 10463 | return LowerUDIV(Op, DAG, ST: Subtarget); |
| 10464 | case ISD::UADDO_CARRY: |
| 10465 | case ISD::USUBO_CARRY: |
| 10466 | return LowerUADDSUBO_CARRY(Op, DAG); |
| 10467 | case ISD::UADDO: |
| 10468 | case ISD::USUBO: |
| 10469 | case ISD::UMULO: |
| 10470 | case ISD::SADDO: |
| 10471 | case ISD::SSUBO: |
| 10472 | case ISD::SMULO: |
| 10473 | return LowerALUO(Op, DAG); |
| 10474 | case ISD::SADDSAT: |
| 10475 | case ISD::SSUBSAT: |
| 10476 | case ISD::UADDSAT: |
| 10477 | case ISD::USUBSAT: |
| 10478 | return LowerADDSUBSAT(Op, DAG, Subtarget); |
| 10479 | case ISD::LOAD: { |
| 10480 | auto *LD = cast<LoadSDNode>(Val&: Op); |
| 10481 | EVT MemVT = LD->getMemoryVT(); |
| 10482 | if (Subtarget->hasMVEIntegerOps() && |
| 10483 | (MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 10484 | MemVT == MVT::v16i1)) |
| 10485 | return LowerPredicateLoad(Op, DAG); |
| 10486 | |
| 10487 | auto Pair = LowerAEABIUnalignedLoad(Op, DAG); |
| 10488 | if (Pair.first) |
| 10489 | return DAG.getMergeValues(Ops: {Pair.first, Pair.second}, dl: SDLoc(Pair.first)); |
| 10490 | return SDValue(); |
| 10491 | } |
| 10492 | case ISD::STORE: |
| 10493 | return LowerSTORE(Op, DAG, Subtarget); |
| 10494 | case ISD::MLOAD: |
| 10495 | return LowerMLOAD(Op, DAG); |
| 10496 | case ISD::VECREDUCE_MUL: |
| 10497 | case ISD::VECREDUCE_AND: |
| 10498 | case ISD::VECREDUCE_OR: |
| 10499 | case ISD::VECREDUCE_XOR: |
| 10500 | return LowerVecReduce(Op, DAG, ST: Subtarget); |
| 10501 | case ISD::VECREDUCE_FADD: |
| 10502 | case ISD::VECREDUCE_FMUL: |
| 10503 | case ISD::VECREDUCE_FMIN: |
| 10504 | case ISD::VECREDUCE_FMAX: |
| 10505 | return LowerVecReduceF(Op, DAG, ST: Subtarget); |
| 10506 | case ISD::VECREDUCE_UMIN: |
| 10507 | case ISD::VECREDUCE_UMAX: |
| 10508 | case ISD::VECREDUCE_SMIN: |
| 10509 | case ISD::VECREDUCE_SMAX: |
| 10510 | return LowerVecReduceMinMax(Op, DAG, ST: Subtarget); |
| 10511 | case ISD::ATOMIC_LOAD: |
| 10512 | case ISD::ATOMIC_STORE: |
| 10513 | return LowerAtomicLoadStore(Op, DAG); |
| 10514 | case ISD::SDIVREM: |
| 10515 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); |
| 10516 | case ISD::DYNAMIC_STACKALLOC: |
| 10517 | if (getTargetMachine().getTargetTriple().isOSWindows()) |
| 10518 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 10519 | llvm_unreachable("Don't know how to custom lower this!" ); |
| 10520 | case ISD::STRICT_FP_ROUND: |
| 10521 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); |
| 10522 | case ISD::STRICT_FP_EXTEND: |
| 10523 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); |
| 10524 | case ISD::STRICT_FSETCC: |
| 10525 | case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); |
| 10526 | case ISD::SPONENTRY: |
| 10527 | return LowerSPONENTRY(Op, DAG); |
| 10528 | case ISD::FP_TO_BF16: |
| 10529 | return LowerFP_TO_BF16(Op, DAG); |
| 10530 | case ARMISD::WIN__DBZCHK: return SDValue(); |
| 10531 | case ISD::UCMP: |
| 10532 | case ISD::SCMP: |
| 10533 | return LowerCMP(Op, DAG); |
| 10534 | case ISD::ABS: |
| 10535 | return LowerABS(Op, DAG); |
| 10536 | case ISD::STRICT_LROUND: |
| 10537 | case ISD::STRICT_LLROUND: |
| 10538 | case ISD::STRICT_LRINT: |
| 10539 | case ISD::STRICT_LLRINT: { |
| 10540 | assert((Op.getOperand(1).getValueType() == MVT::f16 || |
| 10541 | Op.getOperand(1).getValueType() == MVT::bf16) && |
| 10542 | "Expected custom lowering of rounding operations only for f16" ); |
| 10543 | SDLoc DL(Op); |
| 10544 | SDValue Ext = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL, ResultTys: {MVT::f32, MVT::Other}, |
| 10545 | Ops: {Op.getOperand(i: 0), Op.getOperand(i: 1)}); |
| 10546 | return DAG.getNode(Opcode: Op.getOpcode(), DL, ResultTys: {Op.getValueType(), MVT::Other}, |
| 10547 | Ops: {Ext.getValue(R: 1), Ext.getValue(R: 0)}); |
| 10548 | } |
| 10549 | } |
| 10550 | } |
| 10551 | |
| 10552 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 10553 | SelectionDAG &DAG) { |
| 10554 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
| 10555 | unsigned Opc = 0; |
| 10556 | if (IntNo == Intrinsic::arm_smlald) |
| 10557 | Opc = ARMISD::SMLALD; |
| 10558 | else if (IntNo == Intrinsic::arm_smlaldx) |
| 10559 | Opc = ARMISD::SMLALDX; |
| 10560 | else if (IntNo == Intrinsic::arm_smlsld) |
| 10561 | Opc = ARMISD::SMLSLD; |
| 10562 | else if (IntNo == Intrinsic::arm_smlsldx) |
| 10563 | Opc = ARMISD::SMLSLDX; |
| 10564 | else |
| 10565 | return; |
| 10566 | |
| 10567 | SDLoc dl(N); |
| 10568 | SDValue Lo, Hi; |
| 10569 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 3), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 10570 | |
| 10571 | SDValue LongMul = DAG.getNode(Opcode: Opc, DL: dl, |
| 10572 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 10573 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
| 10574 | N3: Lo, N4: Hi); |
| 10575 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
| 10576 | N1: LongMul.getValue(R: 0), N2: LongMul.getValue(R: 1))); |
| 10577 | } |
| 10578 | |
| 10579 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 10580 | /// type with new values built out of custom code. |
| 10581 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, |
| 10582 | SmallVectorImpl<SDValue> &Results, |
| 10583 | SelectionDAG &DAG) const { |
| 10584 | SDValue Res; |
| 10585 | switch (N->getOpcode()) { |
| 10586 | default: |
| 10587 | llvm_unreachable("Don't know how to custom expand this!" ); |
| 10588 | case ISD::READ_REGISTER: |
| 10589 | ExpandREAD_REGISTER(N, Results, DAG); |
| 10590 | break; |
| 10591 | case ISD::BITCAST: |
| 10592 | Res = ExpandBITCAST(N, DAG, Subtarget); |
| 10593 | break; |
| 10594 | case ISD::SRL: |
| 10595 | case ISD::SRA: |
| 10596 | case ISD::SHL: |
| 10597 | Res = Expand64BitShift(N, DAG, ST: Subtarget); |
| 10598 | break; |
| 10599 | case ISD::SREM: |
| 10600 | case ISD::UREM: |
| 10601 | Res = LowerREM(N, DAG); |
| 10602 | break; |
| 10603 | case ISD::SDIVREM: |
| 10604 | case ISD::UDIVREM: |
| 10605 | Res = LowerDivRem(Op: SDValue(N, 0), DAG); |
| 10606 | assert(Res.getNumOperands() == 2 && "DivRem needs two values" ); |
| 10607 | Results.push_back(Elt: Res.getValue(R: 0)); |
| 10608 | Results.push_back(Elt: Res.getValue(R: 1)); |
| 10609 | return; |
| 10610 | case ISD::SADDSAT: |
| 10611 | case ISD::SSUBSAT: |
| 10612 | case ISD::UADDSAT: |
| 10613 | case ISD::USUBSAT: |
| 10614 | Res = LowerADDSUBSAT(Op: SDValue(N, 0), DAG, Subtarget); |
| 10615 | break; |
| 10616 | case ISD::READCYCLECOUNTER: |
| 10617 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); |
| 10618 | return; |
| 10619 | case ISD::UDIV: |
| 10620 | case ISD::SDIV: |
| 10621 | assert(getTargetMachine().getTargetTriple().isOSWindows() && |
| 10622 | "can only expand DIV on Windows" ); |
| 10623 | return ExpandDIV_Windows(Op: SDValue(N, 0), DAG, Signed: N->getOpcode() == ISD::SDIV, |
| 10624 | Results); |
| 10625 | case ISD::ATOMIC_CMP_SWAP: |
| 10626 | ReplaceCMP_SWAP_64Results(N, Results, DAG); |
| 10627 | return; |
| 10628 | case ISD::INTRINSIC_WO_CHAIN: |
| 10629 | return ReplaceLongIntrinsic(N, Results, DAG); |
| 10630 | case ISD::LOAD: |
| 10631 | LowerLOAD(N, Results, DAG); |
| 10632 | break; |
| 10633 | case ISD::STORE: |
| 10634 | Res = LowerAEABIUnalignedStore(Op: SDValue(N, 0), DAG); |
| 10635 | break; |
| 10636 | case ISD::TRUNCATE: |
| 10637 | Res = LowerTruncate(N, DAG, Subtarget); |
| 10638 | break; |
| 10639 | case ISD::SIGN_EXTEND: |
| 10640 | case ISD::ZERO_EXTEND: |
| 10641 | Res = LowerVectorExtend(N, DAG, Subtarget); |
| 10642 | break; |
| 10643 | case ISD::FP_TO_SINT_SAT: |
| 10644 | case ISD::FP_TO_UINT_SAT: |
| 10645 | Res = LowerFP_TO_INT_SAT(Op: SDValue(N, 0), DAG, Subtarget); |
| 10646 | break; |
| 10647 | } |
| 10648 | if (Res.getNode()) |
| 10649 | Results.push_back(Elt: Res); |
| 10650 | } |
| 10651 | |
| 10652 | //===----------------------------------------------------------------------===// |
| 10653 | // ARM Scheduler Hooks |
| 10654 | //===----------------------------------------------------------------------===// |
| 10655 | |
| 10656 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and |
| 10657 | /// registers the function context. |
| 10658 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, |
| 10659 | MachineBasicBlock *MBB, |
| 10660 | MachineBasicBlock *DispatchBB, |
| 10661 | int FI) const { |
| 10662 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 10663 | "ROPI/RWPI not currently supported with SjLj" ); |
| 10664 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10665 | DebugLoc dl = MI.getDebugLoc(); |
| 10666 | MachineFunction *MF = MBB->getParent(); |
| 10667 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 10668 | MachineConstantPool *MCP = MF->getConstantPool(); |
| 10669 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); |
| 10670 | const Function &F = MF->getFunction(); |
| 10671 | |
| 10672 | bool isThumb = Subtarget->isThumb(); |
| 10673 | bool isThumb2 = Subtarget->isThumb2(); |
| 10674 | |
| 10675 | unsigned PCLabelId = AFI->createPICLabelUId(); |
| 10676 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; |
| 10677 | ARMConstantPoolValue *CPV = |
| 10678 | ARMConstantPoolMBB::Create(C&: F.getContext(), mbb: DispatchBB, ID: PCLabelId, PCAdj); |
| 10679 | unsigned CPI = MCP->getConstantPoolIndex(V: CPV, Alignment: Align(4)); |
| 10680 | |
| 10681 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass |
| 10682 | : &ARM::GPRRegClass; |
| 10683 | |
| 10684 | // Grab constant pool and fixed stack memory operands. |
| 10685 | MachineMemOperand *CPMMO = |
| 10686 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
| 10687 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 10688 | |
| 10689 | MachineMemOperand *FIMMOSt = |
| 10690 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
| 10691 | F: MachineMemOperand::MOStore, Size: 4, BaseAlignment: Align(4)); |
| 10692 | |
| 10693 | // Load the address of the dispatch MBB into the jump buffer. |
| 10694 | if (isThumb2) { |
| 10695 | // Incoming value: jbuf |
| 10696 | // ldr.n r5, LCPI1_1 |
| 10697 | // orr r5, r5, #1 |
| 10698 | // add r5, pc |
| 10699 | // str r5, [$jbuf, #+4] ; &jbuf[1] |
| 10700 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10701 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRpci), DestReg: NewVReg1) |
| 10702 | .addConstantPoolIndex(Idx: CPI) |
| 10703 | .addMemOperand(MMO: CPMMO) |
| 10704 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10705 | // Set the low bit because of thumb mode. |
| 10706 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10707 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ORRri), DestReg: NewVReg2) |
| 10708 | .addReg(RegNo: NewVReg1, Flags: RegState::Kill) |
| 10709 | .addImm(Val: 0x01) |
| 10710 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 10711 | .add(MO: condCodeOp()); |
| 10712 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10713 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg3) |
| 10714 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 10715 | .addImm(Val: PCLabelId); |
| 10716 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2STRi12)) |
| 10717 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 10718 | .addFrameIndex(Idx: FI) |
| 10719 | .addImm(Val: 36) // &jbuf[1] :: pc |
| 10720 | .addMemOperand(MMO: FIMMOSt) |
| 10721 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10722 | } else if (isThumb) { |
| 10723 | // Incoming value: jbuf |
| 10724 | // ldr.n r1, LCPI1_4 |
| 10725 | // add r1, pc |
| 10726 | // mov r2, #1 |
| 10727 | // orrs r1, r2 |
| 10728 | // add r2, $jbuf, #+4 ; &jbuf[1] |
| 10729 | // str r1, [r2] |
| 10730 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10731 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci), DestReg: NewVReg1) |
| 10732 | .addConstantPoolIndex(Idx: CPI) |
| 10733 | .addMemOperand(MMO: CPMMO) |
| 10734 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10735 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10736 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg2) |
| 10737 | .addReg(RegNo: NewVReg1, Flags: RegState::Kill) |
| 10738 | .addImm(Val: PCLabelId); |
| 10739 | // Set the low bit because of thumb mode. |
| 10740 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10741 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi8), DestReg: NewVReg3) |
| 10742 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 10743 | .addImm(Val: 1) |
| 10744 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10745 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 10746 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tORR), DestReg: NewVReg4) |
| 10747 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 10748 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 10749 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 10750 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10751 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 10752 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDframe), DestReg: NewVReg5) |
| 10753 | .addFrameIndex(Idx: FI) |
| 10754 | .addImm(Val: 36); // &jbuf[1] :: pc |
| 10755 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tSTRi)) |
| 10756 | .addReg(RegNo: NewVReg4, Flags: RegState::Kill) |
| 10757 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 10758 | .addImm(Val: 0) |
| 10759 | .addMemOperand(MMO: FIMMOSt) |
| 10760 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10761 | } else { |
| 10762 | // Incoming value: jbuf |
| 10763 | // ldr r1, LCPI1_1 |
| 10764 | // add r1, pc, r1 |
| 10765 | // str r1, [$jbuf, #+4] ; &jbuf[1] |
| 10766 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10767 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
| 10768 | .addConstantPoolIndex(Idx: CPI) |
| 10769 | .addImm(Val: 0) |
| 10770 | .addMemOperand(MMO: CPMMO) |
| 10771 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10772 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10773 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::PICADD), DestReg: NewVReg2) |
| 10774 | .addReg(RegNo: NewVReg1, Flags: RegState::Kill) |
| 10775 | .addImm(Val: PCLabelId) |
| 10776 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10777 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::STRi12)) |
| 10778 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 10779 | .addFrameIndex(Idx: FI) |
| 10780 | .addImm(Val: 36) // &jbuf[1] :: pc |
| 10781 | .addMemOperand(MMO: FIMMOSt) |
| 10782 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10783 | } |
| 10784 | } |
| 10785 | |
| 10786 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, |
| 10787 | MachineBasicBlock *MBB) const { |
| 10788 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10789 | DebugLoc dl = MI.getDebugLoc(); |
| 10790 | MachineFunction *MF = MBB->getParent(); |
| 10791 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 10792 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 10793 | int FI = MFI.getFunctionContextIndex(); |
| 10794 | |
| 10795 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass |
| 10796 | : &ARM::GPRnopcRegClass; |
| 10797 | |
| 10798 | // Get a mapping of the call site numbers to all of the landing pads they're |
| 10799 | // associated with. |
| 10800 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; |
| 10801 | unsigned MaxCSNum = 0; |
| 10802 | for (MachineBasicBlock &BB : *MF) { |
| 10803 | if (!BB.isEHPad()) |
| 10804 | continue; |
| 10805 | |
| 10806 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing |
| 10807 | // pad. |
| 10808 | for (MachineInstr &II : BB) { |
| 10809 | if (!II.isEHLabel()) |
| 10810 | continue; |
| 10811 | |
| 10812 | MCSymbol *Sym = II.getOperand(i: 0).getMCSymbol(); |
| 10813 | if (!MF->hasCallSiteLandingPad(Sym)) continue; |
| 10814 | |
| 10815 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); |
| 10816 | for (unsigned Idx : CallSiteIdxs) { |
| 10817 | CallSiteNumToLPad[Idx].push_back(Elt: &BB); |
| 10818 | MaxCSNum = std::max(a: MaxCSNum, b: Idx); |
| 10819 | } |
| 10820 | break; |
| 10821 | } |
| 10822 | } |
| 10823 | |
| 10824 | // Get an ordered list of the machine basic blocks for the jump table. |
| 10825 | std::vector<MachineBasicBlock*> LPadList; |
| 10826 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; |
| 10827 | LPadList.reserve(n: CallSiteNumToLPad.size()); |
| 10828 | for (unsigned I = 1; I <= MaxCSNum; ++I) { |
| 10829 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; |
| 10830 | for (MachineBasicBlock *MBB : MBBList) { |
| 10831 | LPadList.push_back(x: MBB); |
| 10832 | InvokeBBs.insert_range(R: MBB->predecessors()); |
| 10833 | } |
| 10834 | } |
| 10835 | |
| 10836 | assert(!LPadList.empty() && |
| 10837 | "No landing pad destinations for the dispatch jump table!" ); |
| 10838 | |
| 10839 | // Create the jump table and associated information. |
| 10840 | MachineJumpTableInfo *JTI = |
| 10841 | MF->getOrCreateJumpTableInfo(JTEntryKind: MachineJumpTableInfo::EK_Inline); |
| 10842 | unsigned MJTI = JTI->createJumpTableIndex(DestBBs: LPadList); |
| 10843 | |
| 10844 | // Create the MBBs for the dispatch code. |
| 10845 | |
| 10846 | // Shove the dispatch's address into the return slot in the function context. |
| 10847 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); |
| 10848 | DispatchBB->setIsEHPad(); |
| 10849 | |
| 10850 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 10851 | |
| 10852 | BuildMI(BB: TrapBB, MIMD: dl, MCID: TII->get(Opcode: Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); |
| 10853 | DispatchBB->addSuccessor(Succ: TrapBB); |
| 10854 | |
| 10855 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); |
| 10856 | DispatchBB->addSuccessor(Succ: DispContBB); |
| 10857 | |
| 10858 | // Insert and MBBs. |
| 10859 | MF->insert(MBBI: MF->end(), MBB: DispatchBB); |
| 10860 | MF->insert(MBBI: MF->end(), MBB: DispContBB); |
| 10861 | MF->insert(MBBI: MF->end(), MBB: TrapBB); |
| 10862 | |
| 10863 | // Insert code into the entry block that creates and registers the function |
| 10864 | // context. |
| 10865 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); |
| 10866 | |
| 10867 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( |
| 10868 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
| 10869 | F: MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, Size: 4, BaseAlignment: Align(4)); |
| 10870 | |
| 10871 | MachineInstrBuilder MIB; |
| 10872 | MIB = BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Int_eh_sjlj_dispatchsetup)); |
| 10873 | |
| 10874 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); |
| 10875 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); |
| 10876 | |
| 10877 | // Add a register mask with no preserved registers. This results in all |
| 10878 | // registers being marked as clobbered. This can't work if the dispatch block |
| 10879 | // is in a Thumb1 function and is linked with ARM code which uses the FP |
| 10880 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. |
| 10881 | MIB.addRegMask(Mask: RI.getSjLjDispatchPreservedMask(MF: *MF)); |
| 10882 | |
| 10883 | bool IsPositionIndependent = isPositionIndependent(); |
| 10884 | unsigned NumLPads = LPadList.size(); |
| 10885 | if (Subtarget->isThumb2()) { |
| 10886 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10887 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRi12), DestReg: NewVReg1) |
| 10888 | .addFrameIndex(Idx: FI) |
| 10889 | .addImm(Val: 4) |
| 10890 | .addMemOperand(MMO: FIMMOLd) |
| 10891 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10892 | |
| 10893 | if (NumLPads < 256) { |
| 10894 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPri)) |
| 10895 | .addReg(RegNo: NewVReg1) |
| 10896 | .addImm(Val: LPadList.size()) |
| 10897 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10898 | } else { |
| 10899 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10900 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVi16), DestReg: VReg1) |
| 10901 | .addImm(Val: NumLPads & 0xFFFF) |
| 10902 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10903 | |
| 10904 | unsigned VReg2 = VReg1; |
| 10905 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 10906 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10907 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVTi16), DestReg: VReg2) |
| 10908 | .addReg(RegNo: VReg1) |
| 10909 | .addImm(Val: NumLPads >> 16) |
| 10910 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10911 | } |
| 10912 | |
| 10913 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPrr)) |
| 10914 | .addReg(RegNo: NewVReg1) |
| 10915 | .addReg(RegNo: VReg2) |
| 10916 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10917 | } |
| 10918 | |
| 10919 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2Bcc)) |
| 10920 | .addMBB(MBB: TrapBB) |
| 10921 | .addImm(Val: ARMCC::HI) |
| 10922 | .addReg(RegNo: ARM::CPSR); |
| 10923 | |
| 10924 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10925 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LEApcrelJT), DestReg: NewVReg3) |
| 10926 | .addJumpTableIndex(Idx: MJTI) |
| 10927 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10928 | |
| 10929 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 10930 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ADDrs), DestReg: NewVReg4) |
| 10931 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 10932 | .addReg(RegNo: NewVReg1) |
| 10933 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
| 10934 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 10935 | .add(MO: condCodeOp()); |
| 10936 | |
| 10937 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2BR_JT)) |
| 10938 | .addReg(RegNo: NewVReg4, Flags: RegState::Kill) |
| 10939 | .addReg(RegNo: NewVReg1) |
| 10940 | .addJumpTableIndex(Idx: MJTI); |
| 10941 | } else if (Subtarget->isThumb()) { |
| 10942 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10943 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRspi), DestReg: NewVReg1) |
| 10944 | .addFrameIndex(Idx: FI) |
| 10945 | .addImm(Val: 1) |
| 10946 | .addMemOperand(MMO: FIMMOLd) |
| 10947 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10948 | |
| 10949 | if (NumLPads < 256) { |
| 10950 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPi8)) |
| 10951 | .addReg(RegNo: NewVReg1) |
| 10952 | .addImm(Val: NumLPads) |
| 10953 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10954 | } else { |
| 10955 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 10956 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 10957 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
| 10958 | |
| 10959 | // MachineConstantPool wants an explicit alignment. |
| 10960 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 10961 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 10962 | |
| 10963 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10964 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
| 10965 | .addReg(RegNo: VReg1, Flags: RegState::Define) |
| 10966 | .addConstantPoolIndex(Idx) |
| 10967 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10968 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPr)) |
| 10969 | .addReg(RegNo: NewVReg1) |
| 10970 | .addReg(RegNo: VReg1) |
| 10971 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10972 | } |
| 10973 | |
| 10974 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
| 10975 | .addMBB(MBB: TrapBB) |
| 10976 | .addImm(Val: ARMCC::HI) |
| 10977 | .addReg(RegNo: ARM::CPSR); |
| 10978 | |
| 10979 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10980 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLSLri), DestReg: NewVReg2) |
| 10981 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 10982 | .addReg(RegNo: NewVReg1) |
| 10983 | .addImm(Val: 2) |
| 10984 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10985 | |
| 10986 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10987 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLEApcrelJT), DestReg: NewVReg3) |
| 10988 | .addJumpTableIndex(Idx: MJTI) |
| 10989 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10990 | |
| 10991 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 10992 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg4) |
| 10993 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 10994 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 10995 | .addReg(RegNo: NewVReg3) |
| 10996 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10997 | |
| 10998 | MachineMemOperand *JTMMOLd = |
| 10999 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
| 11000 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11001 | |
| 11002 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 11003 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRi), DestReg: NewVReg5) |
| 11004 | .addReg(RegNo: NewVReg4, Flags: RegState::Kill) |
| 11005 | .addImm(Val: 0) |
| 11006 | .addMemOperand(MMO: JTMMOLd) |
| 11007 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11008 | |
| 11009 | unsigned NewVReg6 = NewVReg5; |
| 11010 | if (IsPositionIndependent) { |
| 11011 | NewVReg6 = MRI->createVirtualRegister(RegClass: TRC); |
| 11012 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg6) |
| 11013 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 11014 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 11015 | .addReg(RegNo: NewVReg3) |
| 11016 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11017 | } |
| 11018 | |
| 11019 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBR_JTr)) |
| 11020 | .addReg(RegNo: NewVReg6, Flags: RegState::Kill) |
| 11021 | .addJumpTableIndex(Idx: MJTI); |
| 11022 | } else { |
| 11023 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11024 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
| 11025 | .addFrameIndex(Idx: FI) |
| 11026 | .addImm(Val: 4) |
| 11027 | .addMemOperand(MMO: FIMMOLd) |
| 11028 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11029 | |
| 11030 | if (NumLPads < 256) { |
| 11031 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPri)) |
| 11032 | .addReg(RegNo: NewVReg1) |
| 11033 | .addImm(Val: NumLPads) |
| 11034 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11035 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(x: NumLPads)) { |
| 11036 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11037 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVi16), DestReg: VReg1) |
| 11038 | .addImm(Val: NumLPads & 0xFFFF) |
| 11039 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11040 | |
| 11041 | unsigned VReg2 = VReg1; |
| 11042 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 11043 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 11044 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVTi16), DestReg: VReg2) |
| 11045 | .addReg(RegNo: VReg1) |
| 11046 | .addImm(Val: NumLPads >> 16) |
| 11047 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11048 | } |
| 11049 | |
| 11050 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
| 11051 | .addReg(RegNo: NewVReg1) |
| 11052 | .addReg(RegNo: VReg2) |
| 11053 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11054 | } else { |
| 11055 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 11056 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 11057 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
| 11058 | |
| 11059 | // MachineConstantPool wants an explicit alignment. |
| 11060 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 11061 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 11062 | |
| 11063 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11064 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
| 11065 | .addReg(RegNo: VReg1, Flags: RegState::Define) |
| 11066 | .addConstantPoolIndex(Idx) |
| 11067 | .addImm(Val: 0) |
| 11068 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11069 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
| 11070 | .addReg(RegNo: NewVReg1) |
| 11071 | .addReg(RegNo: VReg1, Flags: RegState::Kill) |
| 11072 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11073 | } |
| 11074 | |
| 11075 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Bcc)) |
| 11076 | .addMBB(MBB: TrapBB) |
| 11077 | .addImm(Val: ARMCC::HI) |
| 11078 | .addReg(RegNo: ARM::CPSR); |
| 11079 | |
| 11080 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 11081 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVsi), DestReg: NewVReg3) |
| 11082 | .addReg(RegNo: NewVReg1) |
| 11083 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
| 11084 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11085 | .add(MO: condCodeOp()); |
| 11086 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 11087 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LEApcrelJT), DestReg: NewVReg4) |
| 11088 | .addJumpTableIndex(Idx: MJTI) |
| 11089 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11090 | |
| 11091 | MachineMemOperand *JTMMOLd = |
| 11092 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
| 11093 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11094 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 11095 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRrs), DestReg: NewVReg5) |
| 11096 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 11097 | .addReg(RegNo: NewVReg4) |
| 11098 | .addImm(Val: 0) |
| 11099 | .addMemOperand(MMO: JTMMOLd) |
| 11100 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11101 | |
| 11102 | if (IsPositionIndependent) { |
| 11103 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTadd)) |
| 11104 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 11105 | .addReg(RegNo: NewVReg4) |
| 11106 | .addJumpTableIndex(Idx: MJTI); |
| 11107 | } else { |
| 11108 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTr)) |
| 11109 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 11110 | .addJumpTableIndex(Idx: MJTI); |
| 11111 | } |
| 11112 | } |
| 11113 | |
| 11114 | // Add the jump table entries as successors to the MBB. |
| 11115 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; |
| 11116 | for (MachineBasicBlock *CurMBB : LPadList) { |
| 11117 | if (SeenMBBs.insert(Ptr: CurMBB).second) |
| 11118 | DispContBB->addSuccessor(Succ: CurMBB); |
| 11119 | } |
| 11120 | |
| 11121 | // N.B. the order the invoke BBs are processed in doesn't matter here. |
| 11122 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); |
| 11123 | SmallVector<MachineBasicBlock*, 64> MBBLPads; |
| 11124 | for (MachineBasicBlock *BB : InvokeBBs) { |
| 11125 | |
| 11126 | // Remove the landing pad successor from the invoke block and replace it |
| 11127 | // with the new dispatch block. |
| 11128 | SmallVector<MachineBasicBlock*, 4> Successors(BB->successors()); |
| 11129 | while (!Successors.empty()) { |
| 11130 | MachineBasicBlock *SMBB = Successors.pop_back_val(); |
| 11131 | if (SMBB->isEHPad()) { |
| 11132 | BB->removeSuccessor(Succ: SMBB); |
| 11133 | MBBLPads.push_back(Elt: SMBB); |
| 11134 | } |
| 11135 | } |
| 11136 | |
| 11137 | BB->addSuccessor(Succ: DispatchBB, Prob: BranchProbability::getZero()); |
| 11138 | BB->normalizeSuccProbs(); |
| 11139 | |
| 11140 | // Find the invoke call and mark all of the callee-saved registers as |
| 11141 | // 'implicit defined' so that they're spilled. This prevents code from |
| 11142 | // moving instructions to before the EH block, where they will never be |
| 11143 | // executed. |
| 11144 | for (MachineBasicBlock::reverse_iterator |
| 11145 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { |
| 11146 | if (!II->isCall()) continue; |
| 11147 | |
| 11148 | DenseSet<unsigned> DefRegs; |
| 11149 | for (MachineInstr::mop_iterator |
| 11150 | OI = II->operands_begin(), OE = II->operands_end(); |
| 11151 | OI != OE; ++OI) { |
| 11152 | if (!OI->isReg()) continue; |
| 11153 | DefRegs.insert(V: OI->getReg()); |
| 11154 | } |
| 11155 | |
| 11156 | MachineInstrBuilder MIB(*MF, &*II); |
| 11157 | |
| 11158 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { |
| 11159 | unsigned Reg = SavedRegs[i]; |
| 11160 | if (Subtarget->isThumb2() && |
| 11161 | !ARM::tGPRRegClass.contains(Reg) && |
| 11162 | !ARM::hGPRRegClass.contains(Reg)) |
| 11163 | continue; |
| 11164 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) |
| 11165 | continue; |
| 11166 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) |
| 11167 | continue; |
| 11168 | if (!DefRegs.contains(V: Reg)) |
| 11169 | MIB.addReg(RegNo: Reg, Flags: RegState::ImplicitDefine | RegState::Dead); |
| 11170 | } |
| 11171 | |
| 11172 | break; |
| 11173 | } |
| 11174 | } |
| 11175 | |
| 11176 | // Mark all former landing pads as non-landing pads. The dispatch is the only |
| 11177 | // landing pad now. |
| 11178 | for (MachineBasicBlock *MBBLPad : MBBLPads) |
| 11179 | MBBLPad->setIsEHPad(false); |
| 11180 | |
| 11181 | // The instruction is gone now. |
| 11182 | MI.eraseFromParent(); |
| 11183 | } |
| 11184 | |
| 11185 | static |
| 11186 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { |
| 11187 | for (MachineBasicBlock *S : MBB->successors()) |
| 11188 | if (S != Succ) |
| 11189 | return S; |
| 11190 | llvm_unreachable("Expecting a BB with two successors!" ); |
| 11191 | } |
| 11192 | |
| 11193 | /// Return the load opcode for a given load size. If load size >= 8, |
| 11194 | /// neon opcode will be returned. |
| 11195 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { |
| 11196 | if (LdSize >= 8) |
| 11197 | return LdSize == 16 ? ARM::VLD1q32wb_fixed |
| 11198 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; |
| 11199 | if (IsThumb1) |
| 11200 | return LdSize == 4 ? ARM::tLDRi |
| 11201 | : LdSize == 2 ? ARM::tLDRHi |
| 11202 | : LdSize == 1 ? ARM::tLDRBi : 0; |
| 11203 | if (IsThumb2) |
| 11204 | return LdSize == 4 ? ARM::t2LDR_POST |
| 11205 | : LdSize == 2 ? ARM::t2LDRH_POST |
| 11206 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; |
| 11207 | return LdSize == 4 ? ARM::LDR_POST_IMM |
| 11208 | : LdSize == 2 ? ARM::LDRH_POST |
| 11209 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; |
| 11210 | } |
| 11211 | |
| 11212 | /// Return the store opcode for a given store size. If store size >= 8, |
| 11213 | /// neon opcode will be returned. |
| 11214 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { |
| 11215 | if (StSize >= 8) |
| 11216 | return StSize == 16 ? ARM::VST1q32wb_fixed |
| 11217 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; |
| 11218 | if (IsThumb1) |
| 11219 | return StSize == 4 ? ARM::tSTRi |
| 11220 | : StSize == 2 ? ARM::tSTRHi |
| 11221 | : StSize == 1 ? ARM::tSTRBi : 0; |
| 11222 | if (IsThumb2) |
| 11223 | return StSize == 4 ? ARM::t2STR_POST |
| 11224 | : StSize == 2 ? ARM::t2STRH_POST |
| 11225 | : StSize == 1 ? ARM::t2STRB_POST : 0; |
| 11226 | return StSize == 4 ? ARM::STR_POST_IMM |
| 11227 | : StSize == 2 ? ARM::STRH_POST |
| 11228 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; |
| 11229 | } |
| 11230 | |
| 11231 | /// Emit a post-increment load operation with given size. The instructions |
| 11232 | /// will be added to BB at Pos. |
| 11233 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 11234 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 11235 | unsigned LdSize, unsigned Data, unsigned AddrIn, |
| 11236 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 11237 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); |
| 11238 | assert(LdOpc != 0 && "Should have a load opcode" ); |
| 11239 | if (LdSize >= 8) { |
| 11240 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11241 | .addReg(RegNo: AddrOut, Flags: RegState::Define) |
| 11242 | .addReg(RegNo: AddrIn) |
| 11243 | .addImm(Val: 0) |
| 11244 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11245 | } else if (IsThumb1) { |
| 11246 | // load + update AddrIn |
| 11247 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11248 | .addReg(RegNo: AddrIn) |
| 11249 | .addImm(Val: 0) |
| 11250 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11251 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
| 11252 | .add(MO: t1CondCodeOp()) |
| 11253 | .addReg(RegNo: AddrIn) |
| 11254 | .addImm(Val: LdSize) |
| 11255 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11256 | } else if (IsThumb2) { |
| 11257 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11258 | .addReg(RegNo: AddrOut, Flags: RegState::Define) |
| 11259 | .addReg(RegNo: AddrIn) |
| 11260 | .addImm(Val: LdSize) |
| 11261 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11262 | } else { // arm |
| 11263 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11264 | .addReg(RegNo: AddrOut, Flags: RegState::Define) |
| 11265 | .addReg(RegNo: AddrIn) |
| 11266 | .addReg(RegNo: 0) |
| 11267 | .addImm(Val: LdSize) |
| 11268 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11269 | } |
| 11270 | } |
| 11271 | |
| 11272 | /// Emit a post-increment store operation with given size. The instructions |
| 11273 | /// will be added to BB at Pos. |
| 11274 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 11275 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 11276 | unsigned StSize, unsigned Data, unsigned AddrIn, |
| 11277 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 11278 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); |
| 11279 | assert(StOpc != 0 && "Should have a store opcode" ); |
| 11280 | if (StSize >= 8) { |
| 11281 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11282 | .addReg(RegNo: AddrIn) |
| 11283 | .addImm(Val: 0) |
| 11284 | .addReg(RegNo: Data) |
| 11285 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11286 | } else if (IsThumb1) { |
| 11287 | // store + update AddrIn |
| 11288 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc)) |
| 11289 | .addReg(RegNo: Data) |
| 11290 | .addReg(RegNo: AddrIn) |
| 11291 | .addImm(Val: 0) |
| 11292 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11293 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
| 11294 | .add(MO: t1CondCodeOp()) |
| 11295 | .addReg(RegNo: AddrIn) |
| 11296 | .addImm(Val: StSize) |
| 11297 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11298 | } else if (IsThumb2) { |
| 11299 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11300 | .addReg(RegNo: Data) |
| 11301 | .addReg(RegNo: AddrIn) |
| 11302 | .addImm(Val: StSize) |
| 11303 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11304 | } else { // arm |
| 11305 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11306 | .addReg(RegNo: Data) |
| 11307 | .addReg(RegNo: AddrIn) |
| 11308 | .addReg(RegNo: 0) |
| 11309 | .addImm(Val: StSize) |
| 11310 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11311 | } |
| 11312 | } |
| 11313 | |
| 11314 | MachineBasicBlock * |
| 11315 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, |
| 11316 | MachineBasicBlock *BB) const { |
| 11317 | // This pseudo instruction has 3 operands: dst, src, size |
| 11318 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). |
| 11319 | // Otherwise, we will generate unrolled scalar copies. |
| 11320 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11321 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 11322 | MachineFunction::iterator It = ++BB->getIterator(); |
| 11323 | |
| 11324 | Register dest = MI.getOperand(i: 0).getReg(); |
| 11325 | Register src = MI.getOperand(i: 1).getReg(); |
| 11326 | unsigned SizeVal = MI.getOperand(i: 2).getImm(); |
| 11327 | unsigned Alignment = MI.getOperand(i: 3).getImm(); |
| 11328 | DebugLoc dl = MI.getDebugLoc(); |
| 11329 | |
| 11330 | MachineFunction *MF = BB->getParent(); |
| 11331 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 11332 | unsigned UnitSize = 0; |
| 11333 | const TargetRegisterClass *TRC = nullptr; |
| 11334 | const TargetRegisterClass *VecTRC = nullptr; |
| 11335 | |
| 11336 | bool IsThumb1 = Subtarget->isThumb1Only(); |
| 11337 | bool IsThumb2 = Subtarget->isThumb2(); |
| 11338 | bool IsThumb = Subtarget->isThumb(); |
| 11339 | |
| 11340 | if (Alignment & 1) { |
| 11341 | UnitSize = 1; |
| 11342 | } else if (Alignment & 2) { |
| 11343 | UnitSize = 2; |
| 11344 | } else { |
| 11345 | // Check whether we can use NEON instructions. |
| 11346 | if (!MF->getFunction().hasFnAttribute(Kind: Attribute::NoImplicitFloat) && |
| 11347 | Subtarget->hasNEON()) { |
| 11348 | if ((Alignment % 16 == 0) && SizeVal >= 16) |
| 11349 | UnitSize = 16; |
| 11350 | else if ((Alignment % 8 == 0) && SizeVal >= 8) |
| 11351 | UnitSize = 8; |
| 11352 | } |
| 11353 | // Can't use NEON instructions. |
| 11354 | if (UnitSize == 0) |
| 11355 | UnitSize = 4; |
| 11356 | } |
| 11357 | |
| 11358 | // Select the correct opcode and register class for unit size load/store |
| 11359 | bool IsNeon = UnitSize >= 8; |
| 11360 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 11361 | if (IsNeon) |
| 11362 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass |
| 11363 | : UnitSize == 8 ? &ARM::DPRRegClass |
| 11364 | : nullptr; |
| 11365 | |
| 11366 | unsigned BytesLeft = SizeVal % UnitSize; |
| 11367 | unsigned LoopSize = SizeVal - BytesLeft; |
| 11368 | |
| 11369 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { |
| 11370 | // Use LDR and STR to copy. |
| 11371 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) |
| 11372 | // [destOut] = STR_POST(scratch, destIn, UnitSize) |
| 11373 | unsigned srcIn = src; |
| 11374 | unsigned destIn = dest; |
| 11375 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { |
| 11376 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11377 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11378 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
| 11379 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11380 | IsThumb1, IsThumb2); |
| 11381 | emitPostSt(BB, Pos: MI, TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11382 | IsThumb1, IsThumb2); |
| 11383 | srcIn = srcOut; |
| 11384 | destIn = destOut; |
| 11385 | } |
| 11386 | |
| 11387 | // Handle the leftover bytes with LDRB and STRB. |
| 11388 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) |
| 11389 | // [destOut] = STRB_POST(scratch, destIn, 1) |
| 11390 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 11391 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11392 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11393 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
| 11394 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11395 | IsThumb1, IsThumb2); |
| 11396 | emitPostSt(BB, Pos: MI, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11397 | IsThumb1, IsThumb2); |
| 11398 | srcIn = srcOut; |
| 11399 | destIn = destOut; |
| 11400 | } |
| 11401 | MI.eraseFromParent(); // The instruction is gone now. |
| 11402 | return BB; |
| 11403 | } |
| 11404 | |
| 11405 | // Expand the pseudo op to a loop. |
| 11406 | // thisMBB: |
| 11407 | // ... |
| 11408 | // movw varEnd, # --> with thumb2 |
| 11409 | // movt varEnd, # |
| 11410 | // ldrcp varEnd, idx --> without thumb2 |
| 11411 | // fallthrough --> loopMBB |
| 11412 | // loopMBB: |
| 11413 | // PHI varPhi, varEnd, varLoop |
| 11414 | // PHI srcPhi, src, srcLoop |
| 11415 | // PHI destPhi, dst, destLoop |
| 11416 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 11417 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) |
| 11418 | // subs varLoop, varPhi, #UnitSize |
| 11419 | // bne loopMBB |
| 11420 | // fallthrough --> exitMBB |
| 11421 | // exitMBB: |
| 11422 | // epilogue to handle left-over bytes |
| 11423 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 11424 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 11425 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
| 11426 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
| 11427 | MF->insert(MBBI: It, MBB: loopMBB); |
| 11428 | MF->insert(MBBI: It, MBB: exitMBB); |
| 11429 | |
| 11430 | // Set the call frame size on entry to the new basic blocks. |
| 11431 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
| 11432 | loopMBB->setCallFrameSize(CallFrameSize); |
| 11433 | exitMBB->setCallFrameSize(CallFrameSize); |
| 11434 | |
| 11435 | // Transfer the remainder of BB and its successor edges to exitMBB. |
| 11436 | exitMBB->splice(Where: exitMBB->begin(), Other: BB, |
| 11437 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
| 11438 | exitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
| 11439 | |
| 11440 | // Load an immediate to varEnd. |
| 11441 | Register varEnd = MRI.createVirtualRegister(RegClass: TRC); |
| 11442 | if (Subtarget->useMovt()) { |
| 11443 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: IsThumb ? ARM::t2MOVi32imm : ARM::MOVi32imm), |
| 11444 | DestReg: varEnd) |
| 11445 | .addImm(Val: LoopSize); |
| 11446 | } else if (Subtarget->genExecuteOnly()) { |
| 11447 | assert(IsThumb && "Non-thumb expected to have used movt" ); |
| 11448 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi32imm), DestReg: varEnd).addImm(Val: LoopSize); |
| 11449 | } else { |
| 11450 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 11451 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 11452 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: LoopSize); |
| 11453 | |
| 11454 | // MachineConstantPool wants an explicit alignment. |
| 11455 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 11456 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 11457 | MachineMemOperand *CPMMO = |
| 11458 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
| 11459 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11460 | |
| 11461 | if (IsThumb) |
| 11462 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
| 11463 | .addReg(RegNo: varEnd, Flags: RegState::Define) |
| 11464 | .addConstantPoolIndex(Idx) |
| 11465 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11466 | .addMemOperand(MMO: CPMMO); |
| 11467 | else |
| 11468 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
| 11469 | .addReg(RegNo: varEnd, Flags: RegState::Define) |
| 11470 | .addConstantPoolIndex(Idx) |
| 11471 | .addImm(Val: 0) |
| 11472 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11473 | .addMemOperand(MMO: CPMMO); |
| 11474 | } |
| 11475 | BB->addSuccessor(Succ: loopMBB); |
| 11476 | |
| 11477 | // Generate the loop body: |
| 11478 | // varPhi = PHI(varLoop, varEnd) |
| 11479 | // srcPhi = PHI(srcLoop, src) |
| 11480 | // destPhi = PHI(destLoop, dst) |
| 11481 | MachineBasicBlock *entryBB = BB; |
| 11482 | BB = loopMBB; |
| 11483 | Register varLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11484 | Register varPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11485 | Register srcLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11486 | Register srcPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11487 | Register destLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11488 | Register destPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11489 | |
| 11490 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: varPhi) |
| 11491 | .addReg(RegNo: varLoop).addMBB(MBB: loopMBB) |
| 11492 | .addReg(RegNo: varEnd).addMBB(MBB: entryBB); |
| 11493 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: srcPhi) |
| 11494 | .addReg(RegNo: srcLoop).addMBB(MBB: loopMBB) |
| 11495 | .addReg(RegNo: src).addMBB(MBB: entryBB); |
| 11496 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: destPhi) |
| 11497 | .addReg(RegNo: destLoop).addMBB(MBB: loopMBB) |
| 11498 | .addReg(RegNo: dest).addMBB(MBB: entryBB); |
| 11499 | |
| 11500 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 11501 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) |
| 11502 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
| 11503 | emitPostLd(BB, Pos: BB->end(), TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcPhi, AddrOut: srcLoop, |
| 11504 | IsThumb1, IsThumb2); |
| 11505 | emitPostSt(BB, Pos: BB->end(), TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destPhi, AddrOut: destLoop, |
| 11506 | IsThumb1, IsThumb2); |
| 11507 | |
| 11508 | // Decrement loop variable by UnitSize. |
| 11509 | if (IsThumb1) { |
| 11510 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, MCID: TII->get(Opcode: ARM::tSUBi8), DestReg: varLoop) |
| 11511 | .add(MO: t1CondCodeOp()) |
| 11512 | .addReg(RegNo: varPhi) |
| 11513 | .addImm(Val: UnitSize) |
| 11514 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11515 | } else { |
| 11516 | MachineInstrBuilder MIB = |
| 11517 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
| 11518 | MCID: TII->get(Opcode: IsThumb2 ? ARM::t2SUBri : ARM::SUBri), DestReg: varLoop); |
| 11519 | MIB.addReg(RegNo: varPhi) |
| 11520 | .addImm(Val: UnitSize) |
| 11521 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11522 | .add(MO: condCodeOp()); |
| 11523 | MIB->getOperand(i: 5).setReg(ARM::CPSR); |
| 11524 | MIB->getOperand(i: 5).setIsDef(true); |
| 11525 | } |
| 11526 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
| 11527 | MCID: TII->get(Opcode: IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 11528 | .addMBB(MBB: loopMBB).addImm(Val: ARMCC::NE).addReg(RegNo: ARM::CPSR); |
| 11529 | |
| 11530 | // loopMBB can loop back to loopMBB or fall through to exitMBB. |
| 11531 | BB->addSuccessor(Succ: loopMBB); |
| 11532 | BB->addSuccessor(Succ: exitMBB); |
| 11533 | |
| 11534 | // Add epilogue to handle BytesLeft. |
| 11535 | BB = exitMBB; |
| 11536 | auto StartOfExit = exitMBB->begin(); |
| 11537 | |
| 11538 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 11539 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 11540 | unsigned srcIn = srcLoop; |
| 11541 | unsigned destIn = destLoop; |
| 11542 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 11543 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11544 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11545 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
| 11546 | emitPostLd(BB, Pos: StartOfExit, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11547 | IsThumb1, IsThumb2); |
| 11548 | emitPostSt(BB, Pos: StartOfExit, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11549 | IsThumb1, IsThumb2); |
| 11550 | srcIn = srcOut; |
| 11551 | destIn = destOut; |
| 11552 | } |
| 11553 | |
| 11554 | MI.eraseFromParent(); // The instruction is gone now. |
| 11555 | return BB; |
| 11556 | } |
| 11557 | |
| 11558 | MachineBasicBlock * |
| 11559 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, |
| 11560 | MachineBasicBlock *MBB) const { |
| 11561 | const TargetMachine &TM = getTargetMachine(); |
| 11562 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); |
| 11563 | DebugLoc DL = MI.getDebugLoc(); |
| 11564 | |
| 11565 | assert(TM.getTargetTriple().isOSWindows() && |
| 11566 | "__chkstk is only supported on Windows" ); |
| 11567 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode" ); |
| 11568 | |
| 11569 | // __chkstk takes the number of words to allocate on the stack in R4, and |
| 11570 | // returns the stack adjustment in number of bytes in R4. This will not |
| 11571 | // clober any other registers (other than the obvious lr). |
| 11572 | // |
| 11573 | // Although, technically, IP should be considered a register which may be |
| 11574 | // clobbered, the call itself will not touch it. Windows on ARM is a pure |
| 11575 | // thumb-2 environment, so there is no interworking required. As a result, we |
| 11576 | // do not expect a veneer to be emitted by the linker, clobbering IP. |
| 11577 | // |
| 11578 | // Each module receives its own copy of __chkstk, so no import thunk is |
| 11579 | // required, again, ensuring that IP is not clobbered. |
| 11580 | // |
| 11581 | // Finally, although some linkers may theoretically provide a trampoline for |
| 11582 | // out of range calls (which is quite common due to a 32M range limitation of |
| 11583 | // branches for Thumb), we can generate the long-call version via |
| 11584 | // -mcmodel=large, alleviating the need for the trampoline which may clobber |
| 11585 | // IP. |
| 11586 | |
| 11587 | RTLIB::LibcallImpl ChkStkLibcall = getLibcallImpl(Call: RTLIB::STACK_PROBE); |
| 11588 | if (ChkStkLibcall == RTLIB::Unsupported) |
| 11589 | reportFatalUsageError(reason: "no available implementation of __chkstk" ); |
| 11590 | |
| 11591 | const char *ChkStk = getLibcallImplName(Call: ChkStkLibcall).data(); |
| 11592 | switch (TM.getCodeModel()) { |
| 11593 | case CodeModel::Tiny: |
| 11594 | llvm_unreachable("Tiny code model not available on ARM." ); |
| 11595 | case CodeModel::Small: |
| 11596 | case CodeModel::Medium: |
| 11597 | case CodeModel::Kernel: |
| 11598 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::tBL)) |
| 11599 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11600 | .addExternalSymbol(FnName: ChkStk) |
| 11601 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Kill) |
| 11602 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Define) |
| 11603 | .addReg(RegNo: ARM::R12, |
| 11604 | Flags: RegState::Implicit | RegState::Define | RegState::Dead) |
| 11605 | .addReg(RegNo: ARM::CPSR, |
| 11606 | Flags: RegState::Implicit | RegState::Define | RegState::Dead); |
| 11607 | break; |
| 11608 | case CodeModel::Large: { |
| 11609 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
| 11610 | Register Reg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11611 | |
| 11612 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2MOVi32imm), DestReg: Reg) |
| 11613 | .addExternalSymbol(FnName: ChkStk); |
| 11614 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: gettBLXrOpcode(MF: *MBB->getParent()))) |
| 11615 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11616 | .addReg(RegNo: Reg, Flags: RegState::Kill) |
| 11617 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Kill) |
| 11618 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Define) |
| 11619 | .addReg(RegNo: ARM::R12, |
| 11620 | Flags: RegState::Implicit | RegState::Define | RegState::Dead) |
| 11621 | .addReg(RegNo: ARM::CPSR, |
| 11622 | Flags: RegState::Implicit | RegState::Define | RegState::Dead); |
| 11623 | break; |
| 11624 | } |
| 11625 | } |
| 11626 | |
| 11627 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2SUBrr), DestReg: ARM::SP) |
| 11628 | .addReg(RegNo: ARM::SP, Flags: RegState::Kill) |
| 11629 | .addReg(RegNo: ARM::R4, Flags: RegState::Kill) |
| 11630 | .setMIFlags(MachineInstr::FrameSetup) |
| 11631 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11632 | .add(MO: condCodeOp()); |
| 11633 | |
| 11634 | MI.eraseFromParent(); |
| 11635 | return MBB; |
| 11636 | } |
| 11637 | |
| 11638 | MachineBasicBlock * |
| 11639 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, |
| 11640 | MachineBasicBlock *MBB) const { |
| 11641 | DebugLoc DL = MI.getDebugLoc(); |
| 11642 | MachineFunction *MF = MBB->getParent(); |
| 11643 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11644 | |
| 11645 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); |
| 11646 | MF->insert(MBBI: ++MBB->getIterator(), MBB: ContBB); |
| 11647 | ContBB->splice(Where: ContBB->begin(), Other: MBB, |
| 11648 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: MBB->end()); |
| 11649 | ContBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
| 11650 | MBB->addSuccessor(Succ: ContBB); |
| 11651 | |
| 11652 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 11653 | BuildMI(BB: TrapBB, MIMD: DL, MCID: TII->get(Opcode: ARM::t__brkdiv0)); |
| 11654 | MF->push_back(MBB: TrapBB); |
| 11655 | MBB->addSuccessor(Succ: TrapBB); |
| 11656 | |
| 11657 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::tCMPi8)) |
| 11658 | .addReg(RegNo: MI.getOperand(i: 0).getReg()) |
| 11659 | .addImm(Val: 0) |
| 11660 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11661 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::t2Bcc)) |
| 11662 | .addMBB(MBB: TrapBB) |
| 11663 | .addImm(Val: ARMCC::EQ) |
| 11664 | .addReg(RegNo: ARM::CPSR); |
| 11665 | |
| 11666 | MI.eraseFromParent(); |
| 11667 | return ContBB; |
| 11668 | } |
| 11669 | |
| 11670 | // The CPSR operand of SelectItr might be missing a kill marker |
| 11671 | // because there were multiple uses of CPSR, and ISel didn't know |
| 11672 | // which to mark. Figure out whether SelectItr should have had a |
| 11673 | // kill marker, and set it if it should. Returns the correct kill |
| 11674 | // marker value. |
| 11675 | static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, |
| 11676 | MachineBasicBlock* BB, |
| 11677 | const TargetRegisterInfo* TRI) { |
| 11678 | // Scan forward through BB for a use/def of CPSR. |
| 11679 | MachineBasicBlock::iterator miI(std::next(x: SelectItr)); |
| 11680 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { |
| 11681 | const MachineInstr& mi = *miI; |
| 11682 | if (mi.readsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
| 11683 | return false; |
| 11684 | if (mi.definesRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
| 11685 | break; // Should have kill-flag - update below. |
| 11686 | } |
| 11687 | |
| 11688 | // If we hit the end of the block, check whether CPSR is live into a |
| 11689 | // successor. |
| 11690 | if (miI == BB->end()) { |
| 11691 | for (MachineBasicBlock *Succ : BB->successors()) |
| 11692 | if (Succ->isLiveIn(Reg: ARM::CPSR)) |
| 11693 | return false; |
| 11694 | } |
| 11695 | |
| 11696 | // We found a def, or hit the end of the basic block and CPSR wasn't live |
| 11697 | // out. SelectMI should have a kill flag on CPSR. |
| 11698 | SelectItr->addRegisterKilled(IncomingReg: ARM::CPSR, RegInfo: TRI); |
| 11699 | return true; |
| 11700 | } |
| 11701 | |
| 11702 | /// Adds logic in loop entry MBB to calculate loop iteration count and adds |
| 11703 | /// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop |
| 11704 | static Register genTPEntry(MachineBasicBlock *TpEntry, |
| 11705 | MachineBasicBlock *TpLoopBody, |
| 11706 | MachineBasicBlock *TpExit, Register OpSizeReg, |
| 11707 | const TargetInstrInfo *TII, DebugLoc Dl, |
| 11708 | MachineRegisterInfo &MRI) { |
| 11709 | // Calculates loop iteration count = ceil(n/16) = (n + 15) >> 4. |
| 11710 | Register AddDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11711 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2ADDri), DestReg: AddDestReg) |
| 11712 | .addUse(RegNo: OpSizeReg) |
| 11713 | .addImm(Val: 15) |
| 11714 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11715 | .addReg(RegNo: 0); |
| 11716 | |
| 11717 | Register LsrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11718 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LSRri), DestReg: LsrDestReg) |
| 11719 | .addUse(RegNo: AddDestReg, Flags: RegState::Kill) |
| 11720 | .addImm(Val: 4) |
| 11721 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11722 | .addReg(RegNo: 0); |
| 11723 | |
| 11724 | Register TotalIterationsReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11725 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopSetup), DestReg: TotalIterationsReg) |
| 11726 | .addUse(RegNo: LsrDestReg, Flags: RegState::Kill); |
| 11727 | |
| 11728 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopStart)) |
| 11729 | .addUse(RegNo: TotalIterationsReg) |
| 11730 | .addMBB(MBB: TpExit); |
| 11731 | |
| 11732 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 11733 | .addMBB(MBB: TpLoopBody) |
| 11734 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11735 | |
| 11736 | return TotalIterationsReg; |
| 11737 | } |
| 11738 | |
| 11739 | /// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and |
| 11740 | /// t2DoLoopEnd. These are used by later passes to generate tail predicated |
| 11741 | /// loops. |
| 11742 | static void genTPLoopBody(MachineBasicBlock *TpLoopBody, |
| 11743 | MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit, |
| 11744 | const TargetInstrInfo *TII, DebugLoc Dl, |
| 11745 | MachineRegisterInfo &MRI, Register OpSrcReg, |
| 11746 | Register OpDestReg, Register ElementCountReg, |
| 11747 | Register TotalIterationsReg, bool IsMemcpy) { |
| 11748 | // First insert 4 PHI nodes for: Current pointer to Src (if memcpy), Dest |
| 11749 | // array, loop iteration counter, predication counter. |
| 11750 | |
| 11751 | Register SrcPhiReg, CurrSrcReg; |
| 11752 | if (IsMemcpy) { |
| 11753 | // Current position in the src array |
| 11754 | SrcPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11755 | CurrSrcReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11756 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: SrcPhiReg) |
| 11757 | .addUse(RegNo: OpSrcReg) |
| 11758 | .addMBB(MBB: TpEntry) |
| 11759 | .addUse(RegNo: CurrSrcReg) |
| 11760 | .addMBB(MBB: TpLoopBody); |
| 11761 | } |
| 11762 | |
| 11763 | // Current position in the dest array |
| 11764 | Register DestPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11765 | Register CurrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11766 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: DestPhiReg) |
| 11767 | .addUse(RegNo: OpDestReg) |
| 11768 | .addMBB(MBB: TpEntry) |
| 11769 | .addUse(RegNo: CurrDestReg) |
| 11770 | .addMBB(MBB: TpLoopBody); |
| 11771 | |
| 11772 | // Current loop counter |
| 11773 | Register LoopCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11774 | Register RemainingLoopIterationsReg = |
| 11775 | MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11776 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: LoopCounterPhiReg) |
| 11777 | .addUse(RegNo: TotalIterationsReg) |
| 11778 | .addMBB(MBB: TpEntry) |
| 11779 | .addUse(RegNo: RemainingLoopIterationsReg) |
| 11780 | .addMBB(MBB: TpLoopBody); |
| 11781 | |
| 11782 | // Predication counter |
| 11783 | Register PredCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11784 | Register RemainingElementsReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11785 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: PredCounterPhiReg) |
| 11786 | .addUse(RegNo: ElementCountReg) |
| 11787 | .addMBB(MBB: TpEntry) |
| 11788 | .addUse(RegNo: RemainingElementsReg) |
| 11789 | .addMBB(MBB: TpLoopBody); |
| 11790 | |
| 11791 | // Pass predication counter to VCTP |
| 11792 | Register VccrReg = MRI.createVirtualRegister(RegClass: &ARM::VCCRRegClass); |
| 11793 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VCTP8), DestReg: VccrReg) |
| 11794 | .addUse(RegNo: PredCounterPhiReg) |
| 11795 | .addImm(Val: ARMVCC::None) |
| 11796 | .addReg(RegNo: 0) |
| 11797 | .addReg(RegNo: 0); |
| 11798 | |
| 11799 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2SUBri), DestReg: RemainingElementsReg) |
| 11800 | .addUse(RegNo: PredCounterPhiReg) |
| 11801 | .addImm(Val: 16) |
| 11802 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11803 | .addReg(RegNo: 0); |
| 11804 | |
| 11805 | // VLDRB (only if memcpy) and VSTRB instructions, predicated using VPR |
| 11806 | Register SrcValueReg; |
| 11807 | if (IsMemcpy) { |
| 11808 | SrcValueReg = MRI.createVirtualRegister(RegClass: &ARM::MQPRRegClass); |
| 11809 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VLDRBU8_post)) |
| 11810 | .addDef(RegNo: CurrSrcReg) |
| 11811 | .addDef(RegNo: SrcValueReg) |
| 11812 | .addReg(RegNo: SrcPhiReg) |
| 11813 | .addImm(Val: 16) |
| 11814 | .addImm(Val: ARMVCC::Then) |
| 11815 | .addUse(RegNo: VccrReg) |
| 11816 | .addReg(RegNo: 0); |
| 11817 | } else |
| 11818 | SrcValueReg = OpSrcReg; |
| 11819 | |
| 11820 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VSTRBU8_post)) |
| 11821 | .addDef(RegNo: CurrDestReg) |
| 11822 | .addUse(RegNo: SrcValueReg) |
| 11823 | .addReg(RegNo: DestPhiReg) |
| 11824 | .addImm(Val: 16) |
| 11825 | .addImm(Val: ARMVCC::Then) |
| 11826 | .addUse(RegNo: VccrReg) |
| 11827 | .addReg(RegNo: 0); |
| 11828 | |
| 11829 | // Add the pseudoInstrs for decrementing the loop counter and marking the |
| 11830 | // end:t2DoLoopDec and t2DoLoopEnd |
| 11831 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopDec), DestReg: RemainingLoopIterationsReg) |
| 11832 | .addUse(RegNo: LoopCounterPhiReg) |
| 11833 | .addImm(Val: 1); |
| 11834 | |
| 11835 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopEnd)) |
| 11836 | .addUse(RegNo: RemainingLoopIterationsReg) |
| 11837 | .addMBB(MBB: TpLoopBody); |
| 11838 | |
| 11839 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 11840 | .addMBB(MBB: TpExit) |
| 11841 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11842 | } |
| 11843 | |
| 11844 | bool ARMTargetLowering::supportKCFIBundles() const { |
| 11845 | // KCFI is supported in all ARM/Thumb modes |
| 11846 | return true; |
| 11847 | } |
| 11848 | |
| 11849 | MachineInstr * |
| 11850 | ARMTargetLowering::EmitKCFICheck(MachineBasicBlock &MBB, |
| 11851 | MachineBasicBlock::instr_iterator &MBBI, |
| 11852 | const TargetInstrInfo *TII) const { |
| 11853 | assert(MBBI->isCall() && MBBI->getCFIType() && |
| 11854 | "Invalid call instruction for a KCFI check" ); |
| 11855 | |
| 11856 | MachineOperand *TargetOp = nullptr; |
| 11857 | switch (MBBI->getOpcode()) { |
| 11858 | // ARM mode opcodes |
| 11859 | case ARM::BLX: |
| 11860 | case ARM::BLX_pred: |
| 11861 | case ARM::BLX_noip: |
| 11862 | case ARM::BLX_pred_noip: |
| 11863 | case ARM::BX_CALL: |
| 11864 | TargetOp = &MBBI->getOperand(i: 0); |
| 11865 | break; |
| 11866 | case ARM::TCRETURNri: |
| 11867 | case ARM::TCRETURNrinotr12: |
| 11868 | case ARM::TAILJMPr: |
| 11869 | case ARM::TAILJMPr4: |
| 11870 | TargetOp = &MBBI->getOperand(i: 0); |
| 11871 | break; |
| 11872 | // Thumb mode opcodes (Thumb1 and Thumb2) |
| 11873 | // Note: Most Thumb call instructions have predicate operands before the |
| 11874 | // target register Format: tBLXr pred, predreg, target_register, ... |
| 11875 | case ARM::tBLXr: // Thumb1/Thumb2: BLX register (requires V5T) |
| 11876 | case ARM::tBLXr_noip: // Thumb1/Thumb2: BLX register, no IP clobber |
| 11877 | case ARM::tBX_CALL: // Thumb1 only: BX call (push LR, BX) |
| 11878 | TargetOp = &MBBI->getOperand(i: 2); |
| 11879 | break; |
| 11880 | // Tail call instructions don't have predicates, target is operand 0 |
| 11881 | case ARM::tTAILJMPr: // Thumb1/Thumb2: Tail call via register |
| 11882 | TargetOp = &MBBI->getOperand(i: 0); |
| 11883 | break; |
| 11884 | default: |
| 11885 | llvm_unreachable("Unexpected CFI call opcode" ); |
| 11886 | } |
| 11887 | |
| 11888 | assert(TargetOp && TargetOp->isReg() && "Invalid target operand" ); |
| 11889 | TargetOp->setIsRenamable(false); |
| 11890 | |
| 11891 | // Select the appropriate KCFI_CHECK variant based on the instruction set |
| 11892 | unsigned KCFICheckOpcode; |
| 11893 | if (Subtarget->isThumb()) { |
| 11894 | if (Subtarget->isThumb2()) { |
| 11895 | KCFICheckOpcode = ARM::KCFI_CHECK_Thumb2; |
| 11896 | } else { |
| 11897 | KCFICheckOpcode = ARM::KCFI_CHECK_Thumb1; |
| 11898 | } |
| 11899 | } else { |
| 11900 | KCFICheckOpcode = ARM::KCFI_CHECK_ARM; |
| 11901 | } |
| 11902 | |
| 11903 | return BuildMI(BB&: MBB, I: MBBI, MIMD: MBBI->getDebugLoc(), MCID: TII->get(Opcode: KCFICheckOpcode)) |
| 11904 | .addReg(RegNo: TargetOp->getReg()) |
| 11905 | .addImm(Val: MBBI->getCFIType()) |
| 11906 | .getInstr(); |
| 11907 | } |
| 11908 | |
| 11909 | MachineBasicBlock * |
| 11910 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, |
| 11911 | MachineBasicBlock *BB) const { |
| 11912 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11913 | DebugLoc dl = MI.getDebugLoc(); |
| 11914 | bool isThumb2 = Subtarget->isThumb2(); |
| 11915 | switch (MI.getOpcode()) { |
| 11916 | default: { |
| 11917 | MI.print(OS&: errs()); |
| 11918 | llvm_unreachable("Unexpected instr type to insert" ); |
| 11919 | } |
| 11920 | |
| 11921 | // Thumb1 post-indexed loads are really just single-register LDMs. |
| 11922 | case ARM::tLDR_postidx: { |
| 11923 | MachineOperand Def(MI.getOperand(i: 1)); |
| 11924 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDMIA_UPD)) |
| 11925 | .add(MO: Def) // Rn_wb |
| 11926 | .add(MO: MI.getOperand(i: 2)) // Rn |
| 11927 | .add(MO: MI.getOperand(i: 3)) // PredImm |
| 11928 | .add(MO: MI.getOperand(i: 4)) // PredReg |
| 11929 | .add(MO: MI.getOperand(i: 0)) // Rt |
| 11930 | .cloneMemRefs(OtherMI: MI); |
| 11931 | MI.eraseFromParent(); |
| 11932 | return BB; |
| 11933 | } |
| 11934 | |
| 11935 | case ARM::MVE_MEMCPYLOOPINST: |
| 11936 | case ARM::MVE_MEMSETLOOPINST: { |
| 11937 | |
| 11938 | // Transformation below expands MVE_MEMCPYLOOPINST/MVE_MEMSETLOOPINST Pseudo |
| 11939 | // into a Tail Predicated (TP) Loop. It adds the instructions to calculate |
| 11940 | // the iteration count =ceil(size_in_bytes/16)) in the TP entry block and |
| 11941 | // adds the relevant instructions in the TP loop Body for generation of a |
| 11942 | // WLSTP loop. |
| 11943 | |
| 11944 | // Below is relevant portion of the CFG after the transformation. |
| 11945 | // The Machine Basic Blocks are shown along with branch conditions (in |
| 11946 | // brackets). Note that TP entry/exit MBBs depict the entry/exit of this |
| 11947 | // portion of the CFG and may not necessarily be the entry/exit of the |
| 11948 | // function. |
| 11949 | |
| 11950 | // (Relevant) CFG after transformation: |
| 11951 | // TP entry MBB |
| 11952 | // | |
| 11953 | // |-----------------| |
| 11954 | // (n <= 0) (n > 0) |
| 11955 | // | | |
| 11956 | // | TP loop Body MBB<--| |
| 11957 | // | | | |
| 11958 | // \ |___________| |
| 11959 | // \ / |
| 11960 | // TP exit MBB |
| 11961 | |
| 11962 | MachineFunction *MF = BB->getParent(); |
| 11963 | MachineFunctionProperties &Properties = MF->getProperties(); |
| 11964 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 11965 | |
| 11966 | Register OpDestReg = MI.getOperand(i: 0).getReg(); |
| 11967 | Register OpSrcReg = MI.getOperand(i: 1).getReg(); |
| 11968 | Register OpSizeReg = MI.getOperand(i: 2).getReg(); |
| 11969 | |
| 11970 | // Allocate the required MBBs and add to parent function. |
| 11971 | MachineBasicBlock *TpEntry = BB; |
| 11972 | MachineBasicBlock *TpLoopBody = MF->CreateMachineBasicBlock(); |
| 11973 | MachineBasicBlock *TpExit; |
| 11974 | |
| 11975 | MF->push_back(MBB: TpLoopBody); |
| 11976 | |
| 11977 | // If any instructions are present in the current block after |
| 11978 | // MVE_MEMCPYLOOPINST or MVE_MEMSETLOOPINST, split the current block and |
| 11979 | // move the instructions into the newly created exit block. If there are no |
| 11980 | // instructions add an explicit branch to the FallThrough block and then |
| 11981 | // split. |
| 11982 | // |
| 11983 | // The split is required for two reasons: |
| 11984 | // 1) A terminator(t2WhileLoopStart) will be placed at that site. |
| 11985 | // 2) Since a TPLoopBody will be added later, any phis in successive blocks |
| 11986 | // need to be updated. splitAt() already handles this. |
| 11987 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
| 11988 | if (TpExit == BB) { |
| 11989 | assert(BB->canFallThrough() && "Exit Block must be Fallthrough of the " |
| 11990 | "block containing memcpy/memset Pseudo" ); |
| 11991 | TpExit = BB->getFallThrough(); |
| 11992 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 11993 | .addMBB(MBB: TpExit) |
| 11994 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11995 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
| 11996 | } |
| 11997 | |
| 11998 | // Add logic for iteration count |
| 11999 | Register TotalIterationsReg = |
| 12000 | genTPEntry(TpEntry, TpLoopBody, TpExit, OpSizeReg, TII, Dl: dl, MRI); |
| 12001 | |
| 12002 | // Add the vectorized (and predicated) loads/store instructions |
| 12003 | bool IsMemcpy = MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST; |
| 12004 | genTPLoopBody(TpLoopBody, TpEntry, TpExit, TII, Dl: dl, MRI, OpSrcReg, |
| 12005 | OpDestReg, ElementCountReg: OpSizeReg, TotalIterationsReg, IsMemcpy); |
| 12006 | |
| 12007 | // Required to avoid conflict with the MachineVerifier during testing. |
| 12008 | Properties.resetNoPHIs(); |
| 12009 | |
| 12010 | // Connect the blocks |
| 12011 | TpEntry->addSuccessor(Succ: TpLoopBody); |
| 12012 | TpLoopBody->addSuccessor(Succ: TpLoopBody); |
| 12013 | TpLoopBody->addSuccessor(Succ: TpExit); |
| 12014 | |
| 12015 | // Reorder for a more natural layout |
| 12016 | TpLoopBody->moveAfter(NewBefore: TpEntry); |
| 12017 | TpExit->moveAfter(NewBefore: TpLoopBody); |
| 12018 | |
| 12019 | // Finally, remove the memcpy Pseudo Instruction |
| 12020 | MI.eraseFromParent(); |
| 12021 | |
| 12022 | // Return the exit block as it may contain other instructions requiring a |
| 12023 | // custom inserter |
| 12024 | return TpExit; |
| 12025 | } |
| 12026 | |
| 12027 | // The Thumb2 pre-indexed stores have the same MI operands, they just |
| 12028 | // define them differently in the .td files from the isel patterns, so |
| 12029 | // they need pseudos. |
| 12030 | case ARM::t2STR_preidx: |
| 12031 | MI.setDesc(TII->get(Opcode: ARM::t2STR_PRE)); |
| 12032 | return BB; |
| 12033 | case ARM::t2STRB_preidx: |
| 12034 | MI.setDesc(TII->get(Opcode: ARM::t2STRB_PRE)); |
| 12035 | return BB; |
| 12036 | case ARM::t2STRH_preidx: |
| 12037 | MI.setDesc(TII->get(Opcode: ARM::t2STRH_PRE)); |
| 12038 | return BB; |
| 12039 | |
| 12040 | case ARM::STRi_preidx: |
| 12041 | case ARM::STRBi_preidx: { |
| 12042 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM |
| 12043 | : ARM::STRB_PRE_IMM; |
| 12044 | // Decode the offset. |
| 12045 | unsigned Offset = MI.getOperand(i: 4).getImm(); |
| 12046 | bool isSub = ARM_AM::getAM2Op(AM2Opc: Offset) == ARM_AM::sub; |
| 12047 | Offset = ARM_AM::getAM2Offset(AM2Opc: Offset); |
| 12048 | if (isSub) |
| 12049 | Offset = -Offset; |
| 12050 | |
| 12051 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
| 12052 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)) |
| 12053 | .add(MO: MI.getOperand(i: 0)) // Rn_wb |
| 12054 | .add(MO: MI.getOperand(i: 1)) // Rt |
| 12055 | .add(MO: MI.getOperand(i: 2)) // Rn |
| 12056 | .addImm(Val: Offset) // offset (skip GPR==zero_reg) |
| 12057 | .add(MO: MI.getOperand(i: 5)) // pred |
| 12058 | .add(MO: MI.getOperand(i: 6)) |
| 12059 | .addMemOperand(MMO); |
| 12060 | MI.eraseFromParent(); |
| 12061 | return BB; |
| 12062 | } |
| 12063 | case ARM::STRr_preidx: |
| 12064 | case ARM::STRBr_preidx: |
| 12065 | case ARM::STRH_preidx: { |
| 12066 | unsigned NewOpc; |
| 12067 | switch (MI.getOpcode()) { |
| 12068 | default: llvm_unreachable("unexpected opcode!" ); |
| 12069 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; |
| 12070 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; |
| 12071 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; |
| 12072 | } |
| 12073 | MachineInstrBuilder MIB = BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)); |
| 12074 | for (const MachineOperand &MO : MI.operands()) |
| 12075 | MIB.add(MO); |
| 12076 | MI.eraseFromParent(); |
| 12077 | return BB; |
| 12078 | } |
| 12079 | |
| 12080 | case ARM::tMOVCCr_pseudo: { |
| 12081 | // To "insert" a SELECT_CC instruction, we actually have to insert the |
| 12082 | // diamond control-flow pattern. The incoming instruction knows the |
| 12083 | // destination vreg to set, the condition code register to branch on, the |
| 12084 | // true/false values to select between, and a branch opcode to use. |
| 12085 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 12086 | MachineFunction::iterator It = ++BB->getIterator(); |
| 12087 | |
| 12088 | // thisMBB: |
| 12089 | // ... |
| 12090 | // TrueVal = ... |
| 12091 | // cmpTY ccX, r1, r2 |
| 12092 | // bCC copy1MBB |
| 12093 | // fallthrough --> copy0MBB |
| 12094 | MachineBasicBlock *thisMBB = BB; |
| 12095 | MachineFunction *F = BB->getParent(); |
| 12096 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12097 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12098 | F->insert(MBBI: It, MBB: copy0MBB); |
| 12099 | F->insert(MBBI: It, MBB: sinkMBB); |
| 12100 | |
| 12101 | // Set the call frame size on entry to the new basic blocks. |
| 12102 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
| 12103 | copy0MBB->setCallFrameSize(CallFrameSize); |
| 12104 | sinkMBB->setCallFrameSize(CallFrameSize); |
| 12105 | |
| 12106 | // Check whether CPSR is live past the tMOVCCr_pseudo. |
| 12107 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 12108 | if (!MI.killsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr) && |
| 12109 | !checkAndUpdateCPSRKill(SelectItr: MI, BB: thisMBB, TRI)) { |
| 12110 | copy0MBB->addLiveIn(PhysReg: ARM::CPSR); |
| 12111 | sinkMBB->addLiveIn(PhysReg: ARM::CPSR); |
| 12112 | } |
| 12113 | |
| 12114 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 12115 | sinkMBB->splice(Where: sinkMBB->begin(), Other: BB, |
| 12116 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
| 12117 | sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
| 12118 | |
| 12119 | BB->addSuccessor(Succ: copy0MBB); |
| 12120 | BB->addSuccessor(Succ: sinkMBB); |
| 12121 | |
| 12122 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
| 12123 | .addMBB(MBB: sinkMBB) |
| 12124 | .addImm(Val: MI.getOperand(i: 3).getImm()) |
| 12125 | .addReg(RegNo: MI.getOperand(i: 4).getReg()); |
| 12126 | |
| 12127 | // copy0MBB: |
| 12128 | // %FalseValue = ... |
| 12129 | // # fallthrough to sinkMBB |
| 12130 | BB = copy0MBB; |
| 12131 | |
| 12132 | // Update machine-CFG edges |
| 12133 | BB->addSuccessor(Succ: sinkMBB); |
| 12134 | |
| 12135 | // sinkMBB: |
| 12136 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
| 12137 | // ... |
| 12138 | BB = sinkMBB; |
| 12139 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: MI.getOperand(i: 0).getReg()) |
| 12140 | .addReg(RegNo: MI.getOperand(i: 1).getReg()) |
| 12141 | .addMBB(MBB: copy0MBB) |
| 12142 | .addReg(RegNo: MI.getOperand(i: 2).getReg()) |
| 12143 | .addMBB(MBB: thisMBB); |
| 12144 | |
| 12145 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 12146 | return BB; |
| 12147 | } |
| 12148 | |
| 12149 | case ARM::BCCi64: |
| 12150 | case ARM::BCCZi64: { |
| 12151 | // If there is an unconditional branch to the other successor, remove it. |
| 12152 | BB->erase(I: std::next(x: MachineBasicBlock::iterator(MI)), E: BB->end()); |
| 12153 | |
| 12154 | // Compare both parts that make up the double comparison separately for |
| 12155 | // equality. |
| 12156 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; |
| 12157 | |
| 12158 | Register LHS1 = MI.getOperand(i: 1).getReg(); |
| 12159 | Register LHS2 = MI.getOperand(i: 2).getReg(); |
| 12160 | if (RHSisZero) { |
| 12161 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 12162 | .addReg(RegNo: LHS1) |
| 12163 | .addImm(Val: 0) |
| 12164 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12165 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 12166 | .addReg(RegNo: LHS2).addImm(Val: 0) |
| 12167 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12168 | } else { |
| 12169 | Register RHS1 = MI.getOperand(i: 3).getReg(); |
| 12170 | Register RHS2 = MI.getOperand(i: 4).getReg(); |
| 12171 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 12172 | .addReg(RegNo: LHS1) |
| 12173 | .addReg(RegNo: RHS1) |
| 12174 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12175 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 12176 | .addReg(RegNo: LHS2).addReg(RegNo: RHS2) |
| 12177 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12178 | } |
| 12179 | |
| 12180 | MachineBasicBlock *destMBB = MI.getOperand(i: RHSisZero ? 3 : 5).getMBB(); |
| 12181 | MachineBasicBlock *exitMBB = OtherSucc(MBB: BB, Succ: destMBB); |
| 12182 | if (MI.getOperand(i: 0).getImm() == ARMCC::NE) |
| 12183 | std::swap(a&: destMBB, b&: exitMBB); |
| 12184 | |
| 12185 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 12186 | .addMBB(MBB: destMBB).addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12187 | if (isThumb2) |
| 12188 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 12189 | .addMBB(MBB: exitMBB) |
| 12190 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12191 | else |
| 12192 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::B)) .addMBB(MBB: exitMBB); |
| 12193 | |
| 12194 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 12195 | return BB; |
| 12196 | } |
| 12197 | |
| 12198 | case ARM::Int_eh_sjlj_setjmp: |
| 12199 | case ARM::Int_eh_sjlj_setjmp_nofp: |
| 12200 | case ARM::tInt_eh_sjlj_setjmp: |
| 12201 | case ARM::t2Int_eh_sjlj_setjmp: |
| 12202 | case ARM::t2Int_eh_sjlj_setjmp_nofp: |
| 12203 | return BB; |
| 12204 | |
| 12205 | case ARM::Int_eh_sjlj_setup_dispatch: |
| 12206 | EmitSjLjDispatchBlock(MI, MBB: BB); |
| 12207 | return BB; |
| 12208 | case ARM::COPY_STRUCT_BYVAL_I32: |
| 12209 | ++NumLoopByVals; |
| 12210 | return EmitStructByval(MI, BB); |
| 12211 | case ARM::WIN__CHKSTK: |
| 12212 | return EmitLowered__chkstk(MI, MBB: BB); |
| 12213 | case ARM::WIN__DBZCHK: |
| 12214 | return EmitLowered__dbzchk(MI, MBB: BB); |
| 12215 | } |
| 12216 | } |
| 12217 | |
| 12218 | /// Attaches vregs to MEMCPY that it will use as scratch registers |
| 12219 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering |
| 12220 | /// instead of as a custom inserter because we need the use list from the SDNode. |
| 12221 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, |
| 12222 | MachineInstr &MI, const SDNode *Node) { |
| 12223 | bool isThumb1 = Subtarget->isThumb1Only(); |
| 12224 | |
| 12225 | MachineFunction *MF = MI.getParent()->getParent(); |
| 12226 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 12227 | MachineInstrBuilder MIB(*MF, MI); |
| 12228 | |
| 12229 | // If the new dst/src is unused mark it as dead. |
| 12230 | if (!Node->hasAnyUseOfValue(Value: 0)) { |
| 12231 | MI.getOperand(i: 0).setIsDead(true); |
| 12232 | } |
| 12233 | if (!Node->hasAnyUseOfValue(Value: 1)) { |
| 12234 | MI.getOperand(i: 1).setIsDead(true); |
| 12235 | } |
| 12236 | |
| 12237 | // The MEMCPY both defines and kills the scratch registers. |
| 12238 | for (unsigned I = 0; I != MI.getOperand(i: 4).getImm(); ++I) { |
| 12239 | Register TmpReg = MRI.createVirtualRegister(RegClass: isThumb1 ? &ARM::tGPRRegClass |
| 12240 | : &ARM::GPRRegClass); |
| 12241 | MIB.addReg(RegNo: TmpReg, Flags: RegState::Define|RegState::Dead); |
| 12242 | } |
| 12243 | } |
| 12244 | |
| 12245 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
| 12246 | SDNode *Node) const { |
| 12247 | if (MI.getOpcode() == ARM::MEMCPY) { |
| 12248 | attachMEMCPYScratchRegs(Subtarget, MI, Node); |
| 12249 | return; |
| 12250 | } |
| 12251 | |
| 12252 | const MCInstrDesc *MCID = &MI.getDesc(); |
| 12253 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, |
| 12254 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional |
| 12255 | // operand is still set to noreg. If needed, set the optional operand's |
| 12256 | // register to CPSR, and remove the redundant implicit def. |
| 12257 | // |
| 12258 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). |
| 12259 | |
| 12260 | // Rename pseudo opcodes. |
| 12261 | unsigned NewOpc = convertAddSubFlagsOpcode(OldOpc: MI.getOpcode()); |
| 12262 | unsigned ccOutIdx; |
| 12263 | if (NewOpc) { |
| 12264 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); |
| 12265 | MCID = &TII->get(Opcode: NewOpc); |
| 12266 | |
| 12267 | assert(MCID->getNumOperands() == |
| 12268 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() |
| 12269 | && "converted opcode should be the same except for cc_out" |
| 12270 | " (and, on Thumb1, pred)" ); |
| 12271 | |
| 12272 | MI.setDesc(*MCID); |
| 12273 | |
| 12274 | // Add the optional cc_out operand |
| 12275 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/true)); |
| 12276 | |
| 12277 | // On Thumb1, move all input operands to the end, then add the predicate |
| 12278 | if (Subtarget->isThumb1Only()) { |
| 12279 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { |
| 12280 | MI.addOperand(Op: MI.getOperand(i: 1)); |
| 12281 | MI.removeOperand(OpNo: 1); |
| 12282 | } |
| 12283 | |
| 12284 | // Restore the ties |
| 12285 | for (unsigned i = MI.getNumOperands(); i--;) { |
| 12286 | const MachineOperand& op = MI.getOperand(i); |
| 12287 | if (op.isReg() && op.isUse()) { |
| 12288 | int DefIdx = MCID->getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO); |
| 12289 | if (DefIdx != -1) |
| 12290 | MI.tieOperands(DefIdx, UseIdx: i); |
| 12291 | } |
| 12292 | } |
| 12293 | |
| 12294 | MI.addOperand(Op: MachineOperand::CreateImm(Val: ARMCC::AL)); |
| 12295 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/false)); |
| 12296 | ccOutIdx = 1; |
| 12297 | } else |
| 12298 | ccOutIdx = MCID->getNumOperands() - 1; |
| 12299 | } else |
| 12300 | ccOutIdx = MCID->getNumOperands() - 1; |
| 12301 | |
| 12302 | // Any ARM instruction that sets the 's' bit should specify an optional |
| 12303 | // "cc_out" operand in the last operand position. |
| 12304 | if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) { |
| 12305 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 12306 | return; |
| 12307 | } |
| 12308 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it |
| 12309 | // since we already have an optional CPSR def. |
| 12310 | bool definesCPSR = false; |
| 12311 | bool deadCPSR = false; |
| 12312 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; |
| 12313 | ++i) { |
| 12314 | const MachineOperand &MO = MI.getOperand(i); |
| 12315 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { |
| 12316 | definesCPSR = true; |
| 12317 | if (MO.isDead()) |
| 12318 | deadCPSR = true; |
| 12319 | MI.removeOperand(OpNo: i); |
| 12320 | break; |
| 12321 | } |
| 12322 | } |
| 12323 | if (!definesCPSR) { |
| 12324 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 12325 | return; |
| 12326 | } |
| 12327 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag" ); |
| 12328 | if (deadCPSR) { |
| 12329 | assert(!MI.getOperand(ccOutIdx).getReg() && |
| 12330 | "expect uninitialized optional cc_out operand" ); |
| 12331 | // Thumb1 instructions must have the S bit even if the CPSR is dead. |
| 12332 | if (!Subtarget->isThumb1Only()) |
| 12333 | return; |
| 12334 | } |
| 12335 | |
| 12336 | // If this instruction was defined with an optional CPSR def and its dag node |
| 12337 | // had a live implicit CPSR def, then activate the optional CPSR def. |
| 12338 | MachineOperand &MO = MI.getOperand(i: ccOutIdx); |
| 12339 | MO.setReg(ARM::CPSR); |
| 12340 | MO.setIsDef(true); |
| 12341 | } |
| 12342 | |
| 12343 | //===----------------------------------------------------------------------===// |
| 12344 | // ARM Optimization Hooks |
| 12345 | //===----------------------------------------------------------------------===// |
| 12346 | |
| 12347 | // Helper function that checks if N is a null or all ones constant. |
| 12348 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { |
| 12349 | return AllOnes ? isAllOnesConstant(V: N) : isNullConstant(V: N); |
| 12350 | } |
| 12351 | |
| 12352 | // Return true if N is conditionally 0 or all ones. |
| 12353 | // Detects these expressions where cc is an i1 value: |
| 12354 | // |
| 12355 | // (select cc 0, y) [AllOnes=0] |
| 12356 | // (select cc y, 0) [AllOnes=0] |
| 12357 | // (zext cc) [AllOnes=0] |
| 12358 | // (sext cc) [AllOnes=0/1] |
| 12359 | // (select cc -1, y) [AllOnes=1] |
| 12360 | // (select cc y, -1) [AllOnes=1] |
| 12361 | // |
| 12362 | // Invert is set when N is the null/all ones constant when CC is false. |
| 12363 | // OtherOp is set to the alternative value of N. |
| 12364 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, |
| 12365 | SDValue &CC, bool &Invert, |
| 12366 | SDValue &OtherOp, |
| 12367 | SelectionDAG &DAG) { |
| 12368 | switch (N->getOpcode()) { |
| 12369 | default: return false; |
| 12370 | case ISD::SELECT: { |
| 12371 | CC = N->getOperand(Num: 0); |
| 12372 | SDValue N1 = N->getOperand(Num: 1); |
| 12373 | SDValue N2 = N->getOperand(Num: 2); |
| 12374 | if (isZeroOrAllOnes(N: N1, AllOnes)) { |
| 12375 | Invert = false; |
| 12376 | OtherOp = N2; |
| 12377 | return true; |
| 12378 | } |
| 12379 | if (isZeroOrAllOnes(N: N2, AllOnes)) { |
| 12380 | Invert = true; |
| 12381 | OtherOp = N1; |
| 12382 | return true; |
| 12383 | } |
| 12384 | return false; |
| 12385 | } |
| 12386 | case ISD::ZERO_EXTEND: |
| 12387 | // (zext cc) can never be the all ones value. |
| 12388 | if (AllOnes) |
| 12389 | return false; |
| 12390 | [[fallthrough]]; |
| 12391 | case ISD::SIGN_EXTEND: { |
| 12392 | SDLoc dl(N); |
| 12393 | EVT VT = N->getValueType(ResNo: 0); |
| 12394 | CC = N->getOperand(Num: 0); |
| 12395 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) |
| 12396 | return false; |
| 12397 | Invert = !AllOnes; |
| 12398 | if (AllOnes) |
| 12399 | // When looking for an AllOnes constant, N is an sext, and the 'other' |
| 12400 | // value is 0. |
| 12401 | OtherOp = DAG.getConstant(Val: 0, DL: dl, VT); |
| 12402 | else if (N->getOpcode() == ISD::ZERO_EXTEND) |
| 12403 | // When looking for a 0 constant, N can be zext or sext. |
| 12404 | OtherOp = DAG.getConstant(Val: 1, DL: dl, VT); |
| 12405 | else |
| 12406 | OtherOp = DAG.getAllOnesConstant(DL: dl, VT); |
| 12407 | return true; |
| 12408 | } |
| 12409 | } |
| 12410 | } |
| 12411 | |
| 12412 | // Combine a constant select operand into its use: |
| 12413 | // |
| 12414 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 12415 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 12416 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] |
| 12417 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 12418 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 12419 | // |
| 12420 | // The transform is rejected if the select doesn't have a constant operand that |
| 12421 | // is null, or all ones when AllOnes is set. |
| 12422 | // |
| 12423 | // Also recognize sext/zext from i1: |
| 12424 | // |
| 12425 | // (add (zext cc), x) -> (select cc (add x, 1), x) |
| 12426 | // (add (sext cc), x) -> (select cc (add x, -1), x) |
| 12427 | // |
| 12428 | // These transformations eventually create predicated instructions. |
| 12429 | // |
| 12430 | // @param N The node to transform. |
| 12431 | // @param Slct The N operand that is a select. |
| 12432 | // @param OtherOp The other N operand (x above). |
| 12433 | // @param DCI Context. |
| 12434 | // @param AllOnes Require the select constant to be all ones instead of null. |
| 12435 | // @returns The new node, or SDValue() on failure. |
| 12436 | static |
| 12437 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, |
| 12438 | TargetLowering::DAGCombinerInfo &DCI, |
| 12439 | bool AllOnes = false) { |
| 12440 | SelectionDAG &DAG = DCI.DAG; |
| 12441 | EVT VT = N->getValueType(ResNo: 0); |
| 12442 | SDValue NonConstantVal; |
| 12443 | SDValue CCOp; |
| 12444 | bool SwapSelectOps; |
| 12445 | if (!isConditionalZeroOrAllOnes(N: Slct.getNode(), AllOnes, CC&: CCOp, Invert&: SwapSelectOps, |
| 12446 | OtherOp&: NonConstantVal, DAG)) |
| 12447 | return SDValue(); |
| 12448 | |
| 12449 | // Slct is now know to be the desired identity constant when CC is true. |
| 12450 | SDValue TrueVal = OtherOp; |
| 12451 | SDValue FalseVal = DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT, |
| 12452 | N1: OtherOp, N2: NonConstantVal); |
| 12453 | // Unless SwapSelectOps says CC should be false. |
| 12454 | if (SwapSelectOps) |
| 12455 | std::swap(a&: TrueVal, b&: FalseVal); |
| 12456 | |
| 12457 | return DAG.getNode(Opcode: ISD::SELECT, DL: SDLoc(N), VT, |
| 12458 | N1: CCOp, N2: TrueVal, N3: FalseVal); |
| 12459 | } |
| 12460 | |
| 12461 | // Attempt combineSelectAndUse on each operand of a commutative operator N. |
| 12462 | static |
| 12463 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, |
| 12464 | TargetLowering::DAGCombinerInfo &DCI) { |
| 12465 | SDValue N0 = N->getOperand(Num: 0); |
| 12466 | SDValue N1 = N->getOperand(Num: 1); |
| 12467 | if (N0.getNode()->hasOneUse()) |
| 12468 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI, AllOnes)) |
| 12469 | return Result; |
| 12470 | if (N1.getNode()->hasOneUse()) |
| 12471 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI, AllOnes)) |
| 12472 | return Result; |
| 12473 | return SDValue(); |
| 12474 | } |
| 12475 | |
| 12476 | static bool IsVUZPShuffleNode(SDNode *N) { |
| 12477 | // VUZP shuffle node. |
| 12478 | if (N->getOpcode() == ARMISD::VUZP) |
| 12479 | return true; |
| 12480 | |
| 12481 | // "VUZP" on i32 is an alias for VTRN. |
| 12482 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(ResNo: 0) == MVT::v2i32) |
| 12483 | return true; |
| 12484 | |
| 12485 | return false; |
| 12486 | } |
| 12487 | |
| 12488 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, |
| 12489 | TargetLowering::DAGCombinerInfo &DCI, |
| 12490 | const ARMSubtarget *Subtarget) { |
| 12491 | // Look for ADD(VUZP.0, VUZP.1). |
| 12492 | if (!IsVUZPShuffleNode(N: N0.getNode()) || N0.getNode() != N1.getNode() || |
| 12493 | N0 == N1) |
| 12494 | return SDValue(); |
| 12495 | |
| 12496 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. |
| 12497 | if (!N->getValueType(ResNo: 0).is64BitVector()) |
| 12498 | return SDValue(); |
| 12499 | |
| 12500 | // Generate vpadd. |
| 12501 | SelectionDAG &DAG = DCI.DAG; |
| 12502 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12503 | SDLoc dl(N); |
| 12504 | SDNode *Unzip = N0.getNode(); |
| 12505 | EVT VT = N->getValueType(ResNo: 0); |
| 12506 | |
| 12507 | SmallVector<SDValue, 8> Ops; |
| 12508 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpadd, DL: dl, |
| 12509 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12510 | Ops.push_back(Elt: Unzip->getOperand(Num: 0)); |
| 12511 | Ops.push_back(Elt: Unzip->getOperand(Num: 1)); |
| 12512 | |
| 12513 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
| 12514 | } |
| 12515 | |
| 12516 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 12517 | TargetLowering::DAGCombinerInfo &DCI, |
| 12518 | const ARMSubtarget *Subtarget) { |
| 12519 | // Check for two extended operands. |
| 12520 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && |
| 12521 | N1.getOpcode() == ISD::SIGN_EXTEND) && |
| 12522 | !(N0.getOpcode() == ISD::ZERO_EXTEND && |
| 12523 | N1.getOpcode() == ISD::ZERO_EXTEND)) |
| 12524 | return SDValue(); |
| 12525 | |
| 12526 | SDValue N00 = N0.getOperand(i: 0); |
| 12527 | SDValue N10 = N1.getOperand(i: 0); |
| 12528 | |
| 12529 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) |
| 12530 | if (!IsVUZPShuffleNode(N: N00.getNode()) || N00.getNode() != N10.getNode() || |
| 12531 | N00 == N10) |
| 12532 | return SDValue(); |
| 12533 | |
| 12534 | // We only recognize Q register paddl here; this can't be reached until |
| 12535 | // after type legalization. |
| 12536 | if (!N00.getValueType().is64BitVector() || |
| 12537 | !N0.getValueType().is128BitVector()) |
| 12538 | return SDValue(); |
| 12539 | |
| 12540 | // Generate vpaddl. |
| 12541 | SelectionDAG &DAG = DCI.DAG; |
| 12542 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12543 | SDLoc dl(N); |
| 12544 | EVT VT = N->getValueType(ResNo: 0); |
| 12545 | |
| 12546 | SmallVector<SDValue, 8> Ops; |
| 12547 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. |
| 12548 | unsigned Opcode; |
| 12549 | if (N0.getOpcode() == ISD::SIGN_EXTEND) |
| 12550 | Opcode = Intrinsic::arm_neon_vpaddls; |
| 12551 | else |
| 12552 | Opcode = Intrinsic::arm_neon_vpaddlu; |
| 12553 | Ops.push_back(Elt: DAG.getConstant(Val: Opcode, DL: dl, |
| 12554 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12555 | EVT ElemTy = N00.getValueType().getVectorElementType(); |
| 12556 | unsigned NumElts = VT.getVectorNumElements(); |
| 12557 | EVT ConcatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ElemTy, NumElements: NumElts * 2); |
| 12558 | SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT: ConcatVT, |
| 12559 | N1: N00.getOperand(i: 0), N2: N00.getOperand(i: 1)); |
| 12560 | Ops.push_back(Elt: Concat); |
| 12561 | |
| 12562 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
| 12563 | } |
| 12564 | |
| 12565 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in |
| 12566 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is |
| 12567 | // much easier to match. |
| 12568 | static SDValue |
| 12569 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 12570 | TargetLowering::DAGCombinerInfo &DCI, |
| 12571 | const ARMSubtarget *Subtarget) { |
| 12572 | // Only perform optimization if after legalize, and if NEON is available. We |
| 12573 | // also expected both operands to be BUILD_VECTORs. |
| 12574 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() |
| 12575 | || N0.getOpcode() != ISD::BUILD_VECTOR |
| 12576 | || N1.getOpcode() != ISD::BUILD_VECTOR) |
| 12577 | return SDValue(); |
| 12578 | |
| 12579 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. |
| 12580 | EVT VT = N->getValueType(ResNo: 0); |
| 12581 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) |
| 12582 | return SDValue(); |
| 12583 | |
| 12584 | // Check that the vector operands are of the right form. |
| 12585 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR |
| 12586 | // operands, where N is the size of the formed vector. |
| 12587 | // Each EXTRACT_VECTOR should have the same input vector and odd or even |
| 12588 | // index such that we have a pair wise add pattern. |
| 12589 | |
| 12590 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. |
| 12591 | if (N0->getOperand(Num: 0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 12592 | return SDValue(); |
| 12593 | SDValue Vec = N0->getOperand(Num: 0)->getOperand(Num: 0); |
| 12594 | SDNode *V = Vec.getNode(); |
| 12595 | unsigned nextIndex = 0; |
| 12596 | |
| 12597 | // For each operands to the ADD which are BUILD_VECTORs, |
| 12598 | // check to see if each of their operands are an EXTRACT_VECTOR with |
| 12599 | // the same vector and appropriate index. |
| 12600 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { |
| 12601 | if (N0->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT |
| 12602 | && N1->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 12603 | |
| 12604 | SDValue ExtVec0 = N0->getOperand(Num: i); |
| 12605 | SDValue ExtVec1 = N1->getOperand(Num: i); |
| 12606 | |
| 12607 | // First operand is the vector, verify its the same. |
| 12608 | if (V != ExtVec0->getOperand(Num: 0).getNode() || |
| 12609 | V != ExtVec1->getOperand(Num: 0).getNode()) |
| 12610 | return SDValue(); |
| 12611 | |
| 12612 | // Second is the constant, verify its correct. |
| 12613 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(Val: ExtVec0->getOperand(Num: 1)); |
| 12614 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Val: ExtVec1->getOperand(Num: 1)); |
| 12615 | |
| 12616 | // For the constant, we want to see all the even or all the odd. |
| 12617 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex |
| 12618 | || C1->getZExtValue() != nextIndex+1) |
| 12619 | return SDValue(); |
| 12620 | |
| 12621 | // Increment index. |
| 12622 | nextIndex+=2; |
| 12623 | } else |
| 12624 | return SDValue(); |
| 12625 | } |
| 12626 | |
| 12627 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure |
| 12628 | // we're using the entire input vector, otherwise there's a size/legality |
| 12629 | // mismatch somewhere. |
| 12630 | if (nextIndex != Vec.getValueType().getVectorNumElements() || |
| 12631 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) |
| 12632 | return SDValue(); |
| 12633 | |
| 12634 | // Create VPADDL node. |
| 12635 | SelectionDAG &DAG = DCI.DAG; |
| 12636 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12637 | |
| 12638 | SDLoc dl(N); |
| 12639 | |
| 12640 | // Build operand list. |
| 12641 | SmallVector<SDValue, 8> Ops; |
| 12642 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddls, DL: dl, |
| 12643 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12644 | |
| 12645 | // Input is the vector. |
| 12646 | Ops.push_back(Elt: Vec); |
| 12647 | |
| 12648 | // Get widened type and narrowed type. |
| 12649 | MVT widenType; |
| 12650 | unsigned numElem = VT.getVectorNumElements(); |
| 12651 | |
| 12652 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); |
| 12653 | switch (inputLaneType.getSimpleVT().SimpleTy) { |
| 12654 | case MVT::i8: widenType = MVT::getVectorVT(VT: MVT::i16, NumElements: numElem); break; |
| 12655 | case MVT::i16: widenType = MVT::getVectorVT(VT: MVT::i32, NumElements: numElem); break; |
| 12656 | case MVT::i32: widenType = MVT::getVectorVT(VT: MVT::i64, NumElements: numElem); break; |
| 12657 | default: |
| 12658 | llvm_unreachable("Invalid vector element type for padd optimization." ); |
| 12659 | } |
| 12660 | |
| 12661 | SDValue tmp = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: widenType, Ops); |
| 12662 | unsigned ExtOp = VT.bitsGT(VT: tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; |
| 12663 | return DAG.getNode(Opcode: ExtOp, DL: dl, VT, Operand: tmp); |
| 12664 | } |
| 12665 | |
| 12666 | static SDValue findMUL_LOHI(SDValue V) { |
| 12667 | if (V->getOpcode() == ISD::UMUL_LOHI || |
| 12668 | V->getOpcode() == ISD::SMUL_LOHI) |
| 12669 | return V; |
| 12670 | return SDValue(); |
| 12671 | } |
| 12672 | |
| 12673 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, |
| 12674 | TargetLowering::DAGCombinerInfo &DCI, |
| 12675 | const ARMSubtarget *Subtarget) { |
| 12676 | if (!Subtarget->hasBaseDSP()) |
| 12677 | return SDValue(); |
| 12678 | |
| 12679 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and |
| 12680 | // accumulates the product into a 64-bit value. The 16-bit values will |
| 12681 | // be sign extended somehow or SRA'd into 32-bit values |
| 12682 | // (addc (adde (mul 16bit, 16bit), lo), hi) |
| 12683 | SDValue Mul = AddcNode->getOperand(Num: 0); |
| 12684 | SDValue Lo = AddcNode->getOperand(Num: 1); |
| 12685 | if (Mul.getOpcode() != ISD::MUL) { |
| 12686 | Lo = AddcNode->getOperand(Num: 0); |
| 12687 | Mul = AddcNode->getOperand(Num: 1); |
| 12688 | if (Mul.getOpcode() != ISD::MUL) |
| 12689 | return SDValue(); |
| 12690 | } |
| 12691 | |
| 12692 | SDValue SRA = AddeNode->getOperand(Num: 0); |
| 12693 | SDValue Hi = AddeNode->getOperand(Num: 1); |
| 12694 | if (SRA.getOpcode() != ISD::SRA) { |
| 12695 | SRA = AddeNode->getOperand(Num: 1); |
| 12696 | Hi = AddeNode->getOperand(Num: 0); |
| 12697 | if (SRA.getOpcode() != ISD::SRA) |
| 12698 | return SDValue(); |
| 12699 | } |
| 12700 | if (auto Const = dyn_cast<ConstantSDNode>(Val: SRA.getOperand(i: 1))) { |
| 12701 | if (Const->getZExtValue() != 31) |
| 12702 | return SDValue(); |
| 12703 | } else |
| 12704 | return SDValue(); |
| 12705 | |
| 12706 | if (SRA.getOperand(i: 0) != Mul) |
| 12707 | return SDValue(); |
| 12708 | |
| 12709 | SelectionDAG &DAG = DCI.DAG; |
| 12710 | SDLoc dl(AddcNode); |
| 12711 | unsigned Opcode = 0; |
| 12712 | SDValue Op0; |
| 12713 | SDValue Op1; |
| 12714 | |
| 12715 | if (isS16(Op: Mul.getOperand(i: 0), DAG) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
| 12716 | Opcode = ARMISD::SMLALBB; |
| 12717 | Op0 = Mul.getOperand(i: 0); |
| 12718 | Op1 = Mul.getOperand(i: 1); |
| 12719 | } else if (isS16(Op: Mul.getOperand(i: 0), DAG) && isSRA16(Op: Mul.getOperand(i: 1))) { |
| 12720 | Opcode = ARMISD::SMLALBT; |
| 12721 | Op0 = Mul.getOperand(i: 0); |
| 12722 | Op1 = Mul.getOperand(i: 1).getOperand(i: 0); |
| 12723 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
| 12724 | Opcode = ARMISD::SMLALTB; |
| 12725 | Op0 = Mul.getOperand(i: 0).getOperand(i: 0); |
| 12726 | Op1 = Mul.getOperand(i: 1); |
| 12727 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isSRA16(Op: Mul.getOperand(i: 1))) { |
| 12728 | Opcode = ARMISD::SMLALTT; |
| 12729 | Op0 = Mul->getOperand(Num: 0).getOperand(i: 0); |
| 12730 | Op1 = Mul->getOperand(Num: 1).getOperand(i: 0); |
| 12731 | } |
| 12732 | |
| 12733 | if (!Op0 || !Op1) |
| 12734 | return SDValue(); |
| 12735 | |
| 12736 | SDValue SMLAL = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 12737 | N1: Op0, N2: Op1, N3: Lo, N4: Hi); |
| 12738 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 12739 | SDValue HiMLALResult(SMLAL.getNode(), 1); |
| 12740 | SDValue LoMLALResult(SMLAL.getNode(), 0); |
| 12741 | |
| 12742 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: LoMLALResult); |
| 12743 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: HiMLALResult); |
| 12744 | |
| 12745 | // Return original node to notify the driver to stop replacing. |
| 12746 | SDValue resNode(AddcNode, 0); |
| 12747 | return resNode; |
| 12748 | } |
| 12749 | |
| 12750 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, |
| 12751 | TargetLowering::DAGCombinerInfo &DCI, |
| 12752 | const ARMSubtarget *Subtarget) { |
| 12753 | // Look for multiply add opportunities. |
| 12754 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where |
| 12755 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is |
| 12756 | // a glue link from the first add to the second add. |
| 12757 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by |
| 12758 | // a S/UMLAL instruction. |
| 12759 | // UMUL_LOHI |
| 12760 | // / :lo \ :hi |
| 12761 | // V \ [no multiline comment] |
| 12762 | // loAdd -> ADDC | |
| 12763 | // \ :carry / |
| 12764 | // V V |
| 12765 | // ADDE <- hiAdd |
| 12766 | // |
| 12767 | // In the special case where only the higher part of a signed result is used |
| 12768 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts |
| 12769 | // a constant with the exact value of 0x80000000, we recognize we are dealing |
| 12770 | // with a "rounded multiply and add" (or subtract) and transform it into |
| 12771 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. |
| 12772 | |
| 12773 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || |
| 12774 | AddeSubeNode->getOpcode() == ARMISD::SUBE) && |
| 12775 | "Expect an ADDE or SUBE" ); |
| 12776 | |
| 12777 | assert(AddeSubeNode->getNumOperands() == 3 && |
| 12778 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && |
| 12779 | "ADDE node has the wrong inputs" ); |
| 12780 | |
| 12781 | // Check that we are chained to the right ADDC or SUBC node. |
| 12782 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(Num: 2).getNode(); |
| 12783 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 12784 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || |
| 12785 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && |
| 12786 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) |
| 12787 | return SDValue(); |
| 12788 | |
| 12789 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(Num: 0); |
| 12790 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(Num: 1); |
| 12791 | |
| 12792 | // Check if the two operands are from the same mul_lohi node. |
| 12793 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) |
| 12794 | return SDValue(); |
| 12795 | |
| 12796 | assert(AddcSubcNode->getNumValues() == 2 && |
| 12797 | AddcSubcNode->getValueType(0) == MVT::i32 && |
| 12798 | "Expect ADDC with two result values. First: i32" ); |
| 12799 | |
| 12800 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it |
| 12801 | // maybe a SMLAL which multiplies two 16-bit values. |
| 12802 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 12803 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && |
| 12804 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && |
| 12805 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && |
| 12806 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) |
| 12807 | return AddCombineTo64BitSMLAL16(AddcNode: AddcSubcNode, AddeNode: AddeSubeNode, DCI, Subtarget); |
| 12808 | |
| 12809 | // Check for the triangle shape. |
| 12810 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(Num: 0); |
| 12811 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(Num: 1); |
| 12812 | |
| 12813 | // Make sure that the ADDE/SUBE operands are not coming from the same node. |
| 12814 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) |
| 12815 | return SDValue(); |
| 12816 | |
| 12817 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. |
| 12818 | bool IsLeftOperandMUL = false; |
| 12819 | SDValue MULOp = findMUL_LOHI(V: AddeSubeOp0); |
| 12820 | if (MULOp == SDValue()) |
| 12821 | MULOp = findMUL_LOHI(V: AddeSubeOp1); |
| 12822 | else |
| 12823 | IsLeftOperandMUL = true; |
| 12824 | if (MULOp == SDValue()) |
| 12825 | return SDValue(); |
| 12826 | |
| 12827 | // Figure out the right opcode. |
| 12828 | unsigned Opc = MULOp->getOpcode(); |
| 12829 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; |
| 12830 | |
| 12831 | // Figure out the high and low input values to the MLAL node. |
| 12832 | SDValue *HiAddSub = nullptr; |
| 12833 | SDValue *LoMul = nullptr; |
| 12834 | SDValue *LowAddSub = nullptr; |
| 12835 | |
| 12836 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. |
| 12837 | if ((AddeSubeOp0 != MULOp.getValue(R: 1)) && (AddeSubeOp1 != MULOp.getValue(R: 1))) |
| 12838 | return SDValue(); |
| 12839 | |
| 12840 | if (IsLeftOperandMUL) |
| 12841 | HiAddSub = &AddeSubeOp1; |
| 12842 | else |
| 12843 | HiAddSub = &AddeSubeOp0; |
| 12844 | |
| 12845 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node |
| 12846 | // whose low result is fed to the ADDC/SUBC we are checking. |
| 12847 | |
| 12848 | if (AddcSubcOp0 == MULOp.getValue(R: 0)) { |
| 12849 | LoMul = &AddcSubcOp0; |
| 12850 | LowAddSub = &AddcSubcOp1; |
| 12851 | } |
| 12852 | if (AddcSubcOp1 == MULOp.getValue(R: 0)) { |
| 12853 | LoMul = &AddcSubcOp1; |
| 12854 | LowAddSub = &AddcSubcOp0; |
| 12855 | } |
| 12856 | |
| 12857 | if (!LoMul) |
| 12858 | return SDValue(); |
| 12859 | |
| 12860 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC |
| 12861 | // the replacement below will create a cycle. |
| 12862 | if (AddcSubcNode == HiAddSub->getNode() || |
| 12863 | AddcSubcNode->isPredecessorOf(N: HiAddSub->getNode())) |
| 12864 | return SDValue(); |
| 12865 | |
| 12866 | // Create the merged node. |
| 12867 | SelectionDAG &DAG = DCI.DAG; |
| 12868 | |
| 12869 | // Start building operand list. |
| 12870 | SmallVector<SDValue, 8> Ops; |
| 12871 | Ops.push_back(Elt: LoMul->getOperand(i: 0)); |
| 12872 | Ops.push_back(Elt: LoMul->getOperand(i: 1)); |
| 12873 | |
| 12874 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be |
| 12875 | // the case, we must be doing signed multiplication and only use the higher |
| 12876 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant |
| 12877 | // addition or subtraction with the value of 0x800000. |
| 12878 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && |
| 12879 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(Value: 1) && |
| 12880 | LowAddSub->getNode()->getOpcode() == ISD::Constant && |
| 12881 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == |
| 12882 | 0x80000000) { |
| 12883 | Ops.push_back(Elt: *HiAddSub); |
| 12884 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { |
| 12885 | FinalOpc = ARMISD::SMMLSR; |
| 12886 | } else { |
| 12887 | FinalOpc = ARMISD::SMMLAR; |
| 12888 | } |
| 12889 | SDValue NewNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), VT: MVT::i32, Ops); |
| 12890 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: NewNode); |
| 12891 | |
| 12892 | return SDValue(AddeSubeNode, 0); |
| 12893 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) |
| 12894 | // SMMLS is generated during instruction selection and the rest of this |
| 12895 | // function can not handle the case where AddcSubcNode is a SUBC. |
| 12896 | return SDValue(); |
| 12897 | |
| 12898 | // Finish building the operand list for {U/S}MLAL |
| 12899 | Ops.push_back(Elt: *LowAddSub); |
| 12900 | Ops.push_back(Elt: *HiAddSub); |
| 12901 | |
| 12902 | SDValue MLALNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), |
| 12903 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
| 12904 | |
| 12905 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 12906 | SDValue HiMLALResult(MLALNode.getNode(), 1); |
| 12907 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: HiMLALResult); |
| 12908 | |
| 12909 | SDValue LoMLALResult(MLALNode.getNode(), 0); |
| 12910 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcSubcNode, 0), To: LoMLALResult); |
| 12911 | |
| 12912 | // Return original node to notify the driver to stop replacing. |
| 12913 | return SDValue(AddeSubeNode, 0); |
| 12914 | } |
| 12915 | |
| 12916 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, |
| 12917 | TargetLowering::DAGCombinerInfo &DCI, |
| 12918 | const ARMSubtarget *Subtarget) { |
| 12919 | // UMAAL is similar to UMLAL except that it adds two unsigned values. |
| 12920 | // While trying to combine for the other MLAL nodes, first search for the |
| 12921 | // chance to use UMAAL. Check if Addc uses a node which has already |
| 12922 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde |
| 12923 | // as the addend, and it's handled in PerformUMLALCombine. |
| 12924 | |
| 12925 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 12926 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
| 12927 | |
| 12928 | // Check that we have a glued ADDC node. |
| 12929 | SDNode* AddcNode = AddeNode->getOperand(Num: 2).getNode(); |
| 12930 | if (AddcNode->getOpcode() != ARMISD::ADDC) |
| 12931 | return SDValue(); |
| 12932 | |
| 12933 | // Find the converted UMAAL or quit if it doesn't exist. |
| 12934 | SDNode *UmlalNode = nullptr; |
| 12935 | SDValue AddHi; |
| 12936 | if (AddcNode->getOperand(Num: 0).getOpcode() == ARMISD::UMLAL) { |
| 12937 | UmlalNode = AddcNode->getOperand(Num: 0).getNode(); |
| 12938 | AddHi = AddcNode->getOperand(Num: 1); |
| 12939 | } else if (AddcNode->getOperand(Num: 1).getOpcode() == ARMISD::UMLAL) { |
| 12940 | UmlalNode = AddcNode->getOperand(Num: 1).getNode(); |
| 12941 | AddHi = AddcNode->getOperand(Num: 0); |
| 12942 | } else { |
| 12943 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
| 12944 | } |
| 12945 | |
| 12946 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as |
| 12947 | // the ADDC as well as Zero. |
| 12948 | if (!isNullConstant(V: UmlalNode->getOperand(Num: 3))) |
| 12949 | return SDValue(); |
| 12950 | |
| 12951 | if ((isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
| 12952 | AddeNode->getOperand(Num: 1).getNode() == UmlalNode) || |
| 12953 | (AddeNode->getOperand(Num: 0).getNode() == UmlalNode && |
| 12954 | isNullConstant(V: AddeNode->getOperand(Num: 1)))) { |
| 12955 | SelectionDAG &DAG = DCI.DAG; |
| 12956 | SDValue Ops[] = { UmlalNode->getOperand(Num: 0), UmlalNode->getOperand(Num: 1), |
| 12957 | UmlalNode->getOperand(Num: 2), AddHi }; |
| 12958 | SDValue UMAAL = DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(AddcNode), |
| 12959 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
| 12960 | |
| 12961 | // Replace the ADDs' nodes uses by the UMAAL node's values. |
| 12962 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: SDValue(UMAAL.getNode(), 1)); |
| 12963 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: SDValue(UMAAL.getNode(), 0)); |
| 12964 | |
| 12965 | // Return original node to notify the driver to stop replacing. |
| 12966 | return SDValue(AddeNode, 0); |
| 12967 | } |
| 12968 | return SDValue(); |
| 12969 | } |
| 12970 | |
| 12971 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, |
| 12972 | const ARMSubtarget *Subtarget) { |
| 12973 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 12974 | return SDValue(); |
| 12975 | |
| 12976 | // Check that we have a pair of ADDC and ADDE as operands. |
| 12977 | // Both addends of the ADDE must be zero. |
| 12978 | SDNode* AddcNode = N->getOperand(Num: 2).getNode(); |
| 12979 | SDNode* AddeNode = N->getOperand(Num: 3).getNode(); |
| 12980 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && |
| 12981 | (AddeNode->getOpcode() == ARMISD::ADDE) && |
| 12982 | isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
| 12983 | isNullConstant(V: AddeNode->getOperand(Num: 1)) && |
| 12984 | (AddeNode->getOperand(Num: 2).getNode() == AddcNode)) |
| 12985 | return DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(N), |
| 12986 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 12987 | Ops: {N->getOperand(Num: 0), N->getOperand(Num: 1), |
| 12988 | AddcNode->getOperand(Num: 0), AddcNode->getOperand(Num: 1)}); |
| 12989 | else |
| 12990 | return SDValue(); |
| 12991 | } |
| 12992 | |
| 12993 | static SDValue PerformAddcSubcCombine(SDNode *N, |
| 12994 | TargetLowering::DAGCombinerInfo &DCI, |
| 12995 | const ARMSubtarget *Subtarget) { |
| 12996 | SelectionDAG &DAG(DCI.DAG); |
| 12997 | |
| 12998 | if (N->getOpcode() == ARMISD::SUBC && N->hasAnyUseOfValue(Value: 1)) { |
| 12999 | // (SUBC (ADDE 0, 0, C), 1) -> C |
| 13000 | SDValue LHS = N->getOperand(Num: 0); |
| 13001 | SDValue RHS = N->getOperand(Num: 1); |
| 13002 | if (LHS->getOpcode() == ARMISD::ADDE && |
| 13003 | isNullConstant(V: LHS->getOperand(Num: 0)) && |
| 13004 | isNullConstant(V: LHS->getOperand(Num: 1)) && isOneConstant(V: RHS)) { |
| 13005 | return DCI.CombineTo(N, Res0: SDValue(N, 0), Res1: LHS->getOperand(Num: 2)); |
| 13006 | } |
| 13007 | } |
| 13008 | |
| 13009 | if (Subtarget->isThumb1Only()) { |
| 13010 | SDValue RHS = N->getOperand(Num: 1); |
| 13011 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
| 13012 | int32_t imm = C->getSExtValue(); |
| 13013 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { |
| 13014 | SDLoc DL(N); |
| 13015 | RHS = DAG.getConstant(Val: -imm, DL, VT: MVT::i32); |
| 13016 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC |
| 13017 | : ARMISD::ADDC; |
| 13018 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), N1: N->getOperand(Num: 0), N2: RHS); |
| 13019 | } |
| 13020 | } |
| 13021 | } |
| 13022 | |
| 13023 | return SDValue(); |
| 13024 | } |
| 13025 | |
| 13026 | static SDValue PerformAddeSubeCombine(SDNode *N, |
| 13027 | TargetLowering::DAGCombinerInfo &DCI, |
| 13028 | const ARMSubtarget *Subtarget) { |
| 13029 | if (Subtarget->isThumb1Only()) { |
| 13030 | SelectionDAG &DAG = DCI.DAG; |
| 13031 | SDValue RHS = N->getOperand(Num: 1); |
| 13032 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
| 13033 | int64_t imm = C->getSExtValue(); |
| 13034 | if (imm < 0) { |
| 13035 | SDLoc DL(N); |
| 13036 | |
| 13037 | // The with-carry-in form matches bitwise not instead of the negation. |
| 13038 | // Effectively, the inverse interpretation of the carry flag already |
| 13039 | // accounts for part of the negation. |
| 13040 | RHS = DAG.getConstant(Val: ~imm, DL, VT: MVT::i32); |
| 13041 | |
| 13042 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE |
| 13043 | : ARMISD::ADDE; |
| 13044 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), |
| 13045 | N1: N->getOperand(Num: 0), N2: RHS, N3: N->getOperand(Num: 2)); |
| 13046 | } |
| 13047 | } |
| 13048 | } else if (N->getOperand(Num: 1)->getOpcode() == ISD::SMUL_LOHI) { |
| 13049 | return AddCombineTo64bitMLAL(AddeSubeNode: N, DCI, Subtarget); |
| 13050 | } |
| 13051 | return SDValue(); |
| 13052 | } |
| 13053 | |
| 13054 | static SDValue PerformSELECTCombine(SDNode *N, |
| 13055 | TargetLowering::DAGCombinerInfo &DCI, |
| 13056 | const ARMSubtarget *Subtarget) { |
| 13057 | if (!Subtarget->hasMVEIntegerOps()) |
| 13058 | return SDValue(); |
| 13059 | |
| 13060 | SDLoc dl(N); |
| 13061 | SDValue SetCC; |
| 13062 | SDValue LHS; |
| 13063 | SDValue RHS; |
| 13064 | ISD::CondCode CC; |
| 13065 | SDValue TrueVal; |
| 13066 | SDValue FalseVal; |
| 13067 | |
| 13068 | if (N->getOpcode() == ISD::SELECT && |
| 13069 | N->getOperand(Num: 0)->getOpcode() == ISD::SETCC) { |
| 13070 | SetCC = N->getOperand(Num: 0); |
| 13071 | LHS = SetCC->getOperand(Num: 0); |
| 13072 | RHS = SetCC->getOperand(Num: 1); |
| 13073 | CC = cast<CondCodeSDNode>(Val: SetCC->getOperand(Num: 2))->get(); |
| 13074 | TrueVal = N->getOperand(Num: 1); |
| 13075 | FalseVal = N->getOperand(Num: 2); |
| 13076 | } else if (N->getOpcode() == ISD::SELECT_CC) { |
| 13077 | LHS = N->getOperand(Num: 0); |
| 13078 | RHS = N->getOperand(Num: 1); |
| 13079 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 4))->get(); |
| 13080 | TrueVal = N->getOperand(Num: 2); |
| 13081 | FalseVal = N->getOperand(Num: 3); |
| 13082 | } else { |
| 13083 | return SDValue(); |
| 13084 | } |
| 13085 | |
| 13086 | unsigned int Opcode = 0; |
| 13087 | if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || |
| 13088 | FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && |
| 13089 | (CC == ISD::SETULT || CC == ISD::SETUGT)) { |
| 13090 | Opcode = ARMISD::VMINVu; |
| 13091 | if (CC == ISD::SETUGT) |
| 13092 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13093 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || |
| 13094 | FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && |
| 13095 | (CC == ISD::SETLT || CC == ISD::SETGT)) { |
| 13096 | Opcode = ARMISD::VMINVs; |
| 13097 | if (CC == ISD::SETGT) |
| 13098 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13099 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || |
| 13100 | FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && |
| 13101 | (CC == ISD::SETUGT || CC == ISD::SETULT)) { |
| 13102 | Opcode = ARMISD::VMAXVu; |
| 13103 | if (CC == ISD::SETULT) |
| 13104 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13105 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || |
| 13106 | FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && |
| 13107 | (CC == ISD::SETGT || CC == ISD::SETLT)) { |
| 13108 | Opcode = ARMISD::VMAXVs; |
| 13109 | if (CC == ISD::SETLT) |
| 13110 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13111 | } else |
| 13112 | return SDValue(); |
| 13113 | |
| 13114 | // Normalise to the right hand side being the vector reduction |
| 13115 | switch (TrueVal->getOpcode()) { |
| 13116 | case ISD::VECREDUCE_UMIN: |
| 13117 | case ISD::VECREDUCE_SMIN: |
| 13118 | case ISD::VECREDUCE_UMAX: |
| 13119 | case ISD::VECREDUCE_SMAX: |
| 13120 | std::swap(a&: LHS, b&: RHS); |
| 13121 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13122 | break; |
| 13123 | } |
| 13124 | |
| 13125 | EVT VectorType = FalseVal->getOperand(Num: 0).getValueType(); |
| 13126 | |
| 13127 | if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && |
| 13128 | VectorType != MVT::v4i32) |
| 13129 | return SDValue(); |
| 13130 | |
| 13131 | EVT VectorScalarType = VectorType.getVectorElementType(); |
| 13132 | |
| 13133 | // The values being selected must also be the ones being compared |
| 13134 | if (TrueVal != LHS || FalseVal != RHS) |
| 13135 | return SDValue(); |
| 13136 | |
| 13137 | EVT LeftType = LHS->getValueType(ResNo: 0); |
| 13138 | EVT RightType = RHS->getValueType(ResNo: 0); |
| 13139 | |
| 13140 | // The types must match the reduced type too |
| 13141 | if (LeftType != VectorScalarType || RightType != VectorScalarType) |
| 13142 | return SDValue(); |
| 13143 | |
| 13144 | // Legalise the scalar to an i32 |
| 13145 | if (VectorScalarType != MVT::i32) |
| 13146 | LHS = DCI.DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: MVT::i32, Operand: LHS); |
| 13147 | |
| 13148 | // Generate the reduction as an i32 for legalisation purposes |
| 13149 | auto Reduction = |
| 13150 | DCI.DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: LHS, N2: RHS->getOperand(Num: 0)); |
| 13151 | |
| 13152 | // The result isn't actually an i32 so truncate it back to its original type |
| 13153 | if (VectorScalarType != MVT::i32) |
| 13154 | Reduction = DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VectorScalarType, Operand: Reduction); |
| 13155 | |
| 13156 | return Reduction; |
| 13157 | } |
| 13158 | |
| 13159 | // A special combine for the vqdmulh family of instructions. This is one of the |
| 13160 | // potential set of patterns that could patch this instruction. The base pattern |
| 13161 | // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))). |
| 13162 | // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))), |
| 13163 | // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as |
| 13164 | // the max is unnecessary. |
| 13165 | static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { |
| 13166 | EVT VT = N->getValueType(ResNo: 0); |
| 13167 | SDValue Shft; |
| 13168 | ConstantSDNode *Clamp; |
| 13169 | |
| 13170 | if (!VT.isVector() || VT.getScalarSizeInBits() > 64) |
| 13171 | return SDValue(); |
| 13172 | |
| 13173 | if (N->getOpcode() == ISD::SMIN) { |
| 13174 | Shft = N->getOperand(Num: 0); |
| 13175 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 1)); |
| 13176 | } else if (N->getOpcode() == ISD::VSELECT) { |
| 13177 | // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin. |
| 13178 | SDValue Cmp = N->getOperand(Num: 0); |
| 13179 | if (Cmp.getOpcode() != ISD::SETCC || |
| 13180 | cast<CondCodeSDNode>(Val: Cmp.getOperand(i: 2))->get() != ISD::SETLT || |
| 13181 | Cmp.getOperand(i: 0) != N->getOperand(Num: 1) || |
| 13182 | Cmp.getOperand(i: 1) != N->getOperand(Num: 2)) |
| 13183 | return SDValue(); |
| 13184 | Shft = N->getOperand(Num: 1); |
| 13185 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 2)); |
| 13186 | } else |
| 13187 | return SDValue(); |
| 13188 | |
| 13189 | if (!Clamp) |
| 13190 | return SDValue(); |
| 13191 | |
| 13192 | MVT ScalarType; |
| 13193 | int ShftAmt = 0; |
| 13194 | switch (Clamp->getSExtValue()) { |
| 13195 | case (1 << 7) - 1: |
| 13196 | ScalarType = MVT::i8; |
| 13197 | ShftAmt = 7; |
| 13198 | break; |
| 13199 | case (1 << 15) - 1: |
| 13200 | ScalarType = MVT::i16; |
| 13201 | ShftAmt = 15; |
| 13202 | break; |
| 13203 | case (1ULL << 31) - 1: |
| 13204 | ScalarType = MVT::i32; |
| 13205 | ShftAmt = 31; |
| 13206 | break; |
| 13207 | default: |
| 13208 | return SDValue(); |
| 13209 | } |
| 13210 | |
| 13211 | if (Shft.getOpcode() != ISD::SRA) |
| 13212 | return SDValue(); |
| 13213 | ConstantSDNode *N1 = isConstOrConstSplat(N: Shft.getOperand(i: 1)); |
| 13214 | if (!N1 || N1->getSExtValue() != ShftAmt) |
| 13215 | return SDValue(); |
| 13216 | |
| 13217 | SDValue Mul = Shft.getOperand(i: 0); |
| 13218 | if (Mul.getOpcode() != ISD::MUL) |
| 13219 | return SDValue(); |
| 13220 | |
| 13221 | SDValue Ext0 = Mul.getOperand(i: 0); |
| 13222 | SDValue Ext1 = Mul.getOperand(i: 1); |
| 13223 | if (Ext0.getOpcode() != ISD::SIGN_EXTEND || |
| 13224 | Ext1.getOpcode() != ISD::SIGN_EXTEND) |
| 13225 | return SDValue(); |
| 13226 | EVT VecVT = Ext0.getOperand(i: 0).getValueType(); |
| 13227 | if (!VecVT.isPow2VectorType() || VecVT.getVectorNumElements() == 1) |
| 13228 | return SDValue(); |
| 13229 | if (Ext1.getOperand(i: 0).getValueType() != VecVT || |
| 13230 | VecVT.getScalarType() != ScalarType || |
| 13231 | VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2) |
| 13232 | return SDValue(); |
| 13233 | |
| 13234 | SDLoc DL(Mul); |
| 13235 | unsigned LegalLanes = 128 / (ShftAmt + 1); |
| 13236 | EVT LegalVecVT = MVT::getVectorVT(VT: ScalarType, NumElements: LegalLanes); |
| 13237 | // For types smaller than legal vectors extend to be legal and only use needed |
| 13238 | // lanes. |
| 13239 | if (VecVT.getSizeInBits() < 128) { |
| 13240 | EVT ExtVecVT = |
| 13241 | MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 128 / VecVT.getVectorNumElements()), |
| 13242 | NumElements: VecVT.getVectorNumElements()); |
| 13243 | SDValue Inp0 = |
| 13244 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext0.getOperand(i: 0)); |
| 13245 | SDValue Inp1 = |
| 13246 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext1.getOperand(i: 0)); |
| 13247 | Inp0 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp0); |
| 13248 | Inp1 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp1); |
| 13249 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
| 13250 | SDValue Trunc = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: ExtVecVT, Operand: VQDMULH); |
| 13251 | Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VecVT, Operand: Trunc); |
| 13252 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, Operand: Trunc); |
| 13253 | } |
| 13254 | |
| 13255 | // For larger types, split into legal sized chunks. |
| 13256 | assert(VecVT.getSizeInBits() % 128 == 0 && "Expected a power2 type" ); |
| 13257 | unsigned NumParts = VecVT.getSizeInBits() / 128; |
| 13258 | SmallVector<SDValue> Parts; |
| 13259 | for (unsigned I = 0; I < NumParts; ++I) { |
| 13260 | SDValue Inp0 = |
| 13261 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext0.getOperand(i: 0), |
| 13262 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
| 13263 | SDValue Inp1 = |
| 13264 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext1.getOperand(i: 0), |
| 13265 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
| 13266 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
| 13267 | Parts.push_back(Elt: VQDMULH); |
| 13268 | } |
| 13269 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, |
| 13270 | Operand: DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, Ops: Parts)); |
| 13271 | } |
| 13272 | |
| 13273 | static SDValue PerformVSELECTCombine(SDNode *N, |
| 13274 | TargetLowering::DAGCombinerInfo &DCI, |
| 13275 | const ARMSubtarget *Subtarget) { |
| 13276 | if (!Subtarget->hasMVEIntegerOps()) |
| 13277 | return SDValue(); |
| 13278 | |
| 13279 | if (SDValue V = PerformVQDMULHCombine(N, DAG&: DCI.DAG)) |
| 13280 | return V; |
| 13281 | |
| 13282 | // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). |
| 13283 | // |
| 13284 | // We need to re-implement this optimization here as the implementation in the |
| 13285 | // Target-Independent DAGCombiner does not handle the kind of constant we make |
| 13286 | // (it calls isConstOrConstSplat with AllowTruncation set to false - and for |
| 13287 | // good reason, allowing truncation there would break other targets). |
| 13288 | // |
| 13289 | // Currently, this is only done for MVE, as it's the only target that benefits |
| 13290 | // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). |
| 13291 | if (N->getOperand(Num: 0).getOpcode() != ISD::XOR) |
| 13292 | return SDValue(); |
| 13293 | SDValue XOR = N->getOperand(Num: 0); |
| 13294 | |
| 13295 | // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. |
| 13296 | // It is important to check with truncation allowed as the BUILD_VECTORs we |
| 13297 | // generate in those situations will truncate their operands. |
| 13298 | ConstantSDNode *Const = |
| 13299 | isConstOrConstSplat(N: XOR->getOperand(Num: 1), /*AllowUndefs*/ false, |
| 13300 | /*AllowTruncation*/ true); |
| 13301 | if (!Const || !Const->isOne()) |
| 13302 | return SDValue(); |
| 13303 | |
| 13304 | // Rewrite into vselect(cond, rhs, lhs). |
| 13305 | SDValue Cond = XOR->getOperand(Num: 0); |
| 13306 | SDValue LHS = N->getOperand(Num: 1); |
| 13307 | SDValue RHS = N->getOperand(Num: 2); |
| 13308 | EVT Type = N->getValueType(ResNo: 0); |
| 13309 | return DCI.DAG.getNode(Opcode: ISD::VSELECT, DL: SDLoc(N), VT: Type, N1: Cond, N2: RHS, N3: LHS); |
| 13310 | } |
| 13311 | |
| 13312 | // Convert vsetcc([0,1,2,..], splat(n), ult) -> vctp n |
| 13313 | static SDValue PerformVSetCCToVCTPCombine(SDNode *N, |
| 13314 | TargetLowering::DAGCombinerInfo &DCI, |
| 13315 | const ARMSubtarget *Subtarget) { |
| 13316 | SDValue Op0 = N->getOperand(Num: 0); |
| 13317 | SDValue Op1 = N->getOperand(Num: 1); |
| 13318 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 2))->get(); |
| 13319 | EVT VT = N->getValueType(ResNo: 0); |
| 13320 | |
| 13321 | if (!Subtarget->hasMVEIntegerOps() || |
| 13322 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 13323 | return SDValue(); |
| 13324 | |
| 13325 | if (CC == ISD::SETUGE) { |
| 13326 | std::swap(a&: Op0, b&: Op1); |
| 13327 | CC = ISD::SETULT; |
| 13328 | } |
| 13329 | |
| 13330 | if (CC != ISD::SETULT || VT.getScalarSizeInBits() != 1 || |
| 13331 | Op0.getOpcode() != ISD::BUILD_VECTOR) |
| 13332 | return SDValue(); |
| 13333 | |
| 13334 | // Check first operand is BuildVector of 0,1,2,... |
| 13335 | for (unsigned I = 0; I < VT.getVectorNumElements(); I++) { |
| 13336 | if (!Op0.getOperand(i: I).isUndef() && |
| 13337 | !(isa<ConstantSDNode>(Val: Op0.getOperand(i: I)) && |
| 13338 | Op0.getConstantOperandVal(i: I) == I)) |
| 13339 | return SDValue(); |
| 13340 | } |
| 13341 | |
| 13342 | // The second is a Splat of Op1S |
| 13343 | SDValue Op1S = DCI.DAG.getSplatValue(V: Op1); |
| 13344 | if (!Op1S) |
| 13345 | return SDValue(); |
| 13346 | |
| 13347 | unsigned Opc; |
| 13348 | switch (VT.getVectorNumElements()) { |
| 13349 | case 2: |
| 13350 | Opc = Intrinsic::arm_mve_vctp64; |
| 13351 | break; |
| 13352 | case 4: |
| 13353 | Opc = Intrinsic::arm_mve_vctp32; |
| 13354 | break; |
| 13355 | case 8: |
| 13356 | Opc = Intrinsic::arm_mve_vctp16; |
| 13357 | break; |
| 13358 | case 16: |
| 13359 | Opc = Intrinsic::arm_mve_vctp8; |
| 13360 | break; |
| 13361 | default: |
| 13362 | return SDValue(); |
| 13363 | } |
| 13364 | |
| 13365 | SDLoc DL(N); |
| 13366 | return DCI.DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, |
| 13367 | N1: DCI.DAG.getConstant(Val: Opc, DL, VT: MVT::i32), |
| 13368 | N2: DCI.DAG.getZExtOrTrunc(Op: Op1S, DL, VT: MVT::i32)); |
| 13369 | } |
| 13370 | |
| 13371 | /// PerformADDECombine - Target-specific dag combine transform from |
| 13372 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or |
| 13373 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL |
| 13374 | static SDValue PerformADDECombine(SDNode *N, |
| 13375 | TargetLowering::DAGCombinerInfo &DCI, |
| 13376 | const ARMSubtarget *Subtarget) { |
| 13377 | // Only ARM and Thumb2 support UMLAL/SMLAL. |
| 13378 | if (Subtarget->isThumb1Only()) |
| 13379 | return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 13380 | |
| 13381 | // Only perform the checks after legalize when the pattern is available. |
| 13382 | if (DCI.isBeforeLegalize()) return SDValue(); |
| 13383 | |
| 13384 | return AddCombineTo64bitUMAAL(AddeNode: N, DCI, Subtarget); |
| 13385 | } |
| 13386 | |
| 13387 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with |
| 13388 | /// operands N0 and N1. This is a helper for PerformADDCombine that is |
| 13389 | /// called with the default operands, and if that fails, with commuted |
| 13390 | /// operands. |
| 13391 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, |
| 13392 | TargetLowering::DAGCombinerInfo &DCI, |
| 13393 | const ARMSubtarget *Subtarget){ |
| 13394 | // Attempt to create vpadd for this add. |
| 13395 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) |
| 13396 | return Result; |
| 13397 | |
| 13398 | // Attempt to create vpaddl for this add. |
| 13399 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) |
| 13400 | return Result; |
| 13401 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, |
| 13402 | Subtarget)) |
| 13403 | return Result; |
| 13404 | |
| 13405 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 13406 | if (N0.getNode()->hasOneUse()) |
| 13407 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI)) |
| 13408 | return Result; |
| 13409 | return SDValue(); |
| 13410 | } |
| 13411 | |
| 13412 | static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { |
| 13413 | EVT VT = N->getValueType(ResNo: 0); |
| 13414 | SDValue N0 = N->getOperand(Num: 0); |
| 13415 | SDValue N1 = N->getOperand(Num: 1); |
| 13416 | SDLoc dl(N); |
| 13417 | |
| 13418 | auto IsVecReduce = [](SDValue Op) { |
| 13419 | switch (Op.getOpcode()) { |
| 13420 | case ISD::VECREDUCE_ADD: |
| 13421 | case ARMISD::VADDVs: |
| 13422 | case ARMISD::VADDVu: |
| 13423 | case ARMISD::VMLAVs: |
| 13424 | case ARMISD::VMLAVu: |
| 13425 | return true; |
| 13426 | } |
| 13427 | return false; |
| 13428 | }; |
| 13429 | |
| 13430 | auto DistrubuteAddAddVecReduce = [&](SDValue N0, SDValue N1) { |
| 13431 | // Distribute add(X, add(vecreduce(Y), vecreduce(Z))) -> |
| 13432 | // add(add(X, vecreduce(Y)), vecreduce(Z)) |
| 13433 | // to make better use of vaddva style instructions. |
| 13434 | if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) && |
| 13435 | IsVecReduce(N1.getOperand(i: 0)) && IsVecReduce(N1.getOperand(i: 1)) && |
| 13436 | !isa<ConstantSDNode>(Val: N0) && N1->hasOneUse()) { |
| 13437 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0, N2: N1.getOperand(i: 0)); |
| 13438 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N1.getOperand(i: 1)); |
| 13439 | } |
| 13440 | // And turn add(add(A, reduce(B)), add(C, reduce(D))) -> |
| 13441 | // add(add(add(A, C), reduce(B)), reduce(D)) |
| 13442 | if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
| 13443 | N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) { |
| 13444 | unsigned N0RedOp = 0; |
| 13445 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) { |
| 13446 | N0RedOp = 1; |
| 13447 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) |
| 13448 | return SDValue(); |
| 13449 | } |
| 13450 | |
| 13451 | unsigned N1RedOp = 0; |
| 13452 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
| 13453 | N1RedOp = 1; |
| 13454 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
| 13455 | return SDValue(); |
| 13456 | |
| 13457 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0.getOperand(i: 1 - N0RedOp), |
| 13458 | N2: N1.getOperand(i: 1 - N1RedOp)); |
| 13459 | SDValue Add1 = |
| 13460 | DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0.getOperand(i: N0RedOp)); |
| 13461 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add1, N2: N1.getOperand(i: N1RedOp)); |
| 13462 | } |
| 13463 | return SDValue(); |
| 13464 | }; |
| 13465 | if (SDValue R = DistrubuteAddAddVecReduce(N0, N1)) |
| 13466 | return R; |
| 13467 | if (SDValue R = DistrubuteAddAddVecReduce(N1, N0)) |
| 13468 | return R; |
| 13469 | |
| 13470 | // Distribute add(vecreduce(load(Y)), vecreduce(load(Z))) |
| 13471 | // Or add(add(X, vecreduce(load(Y))), vecreduce(load(Z))) |
| 13472 | // by ascending load offsets. This can help cores prefetch if the order of |
| 13473 | // loads is more predictable. |
| 13474 | auto DistrubuteVecReduceLoad = [&](SDValue N0, SDValue N1, bool IsForward) { |
| 13475 | // Check if two reductions are known to load data where one is before/after |
| 13476 | // another. Return negative if N0 loads data before N1, positive if N1 is |
| 13477 | // before N0 and 0 otherwise if nothing is known. |
| 13478 | auto IsKnownOrderedLoad = [&](SDValue N0, SDValue N1) { |
| 13479 | // Look through to the first operand of a MUL, for the VMLA case. |
| 13480 | // Currently only looks at the first operand, in the hope they are equal. |
| 13481 | if (N0.getOpcode() == ISD::MUL) |
| 13482 | N0 = N0.getOperand(i: 0); |
| 13483 | if (N1.getOpcode() == ISD::MUL) |
| 13484 | N1 = N1.getOperand(i: 0); |
| 13485 | |
| 13486 | // Return true if the two operands are loads to the same object and the |
| 13487 | // offset of the first is known to be less than the offset of the second. |
| 13488 | LoadSDNode *Load0 = dyn_cast<LoadSDNode>(Val&: N0); |
| 13489 | LoadSDNode *Load1 = dyn_cast<LoadSDNode>(Val&: N1); |
| 13490 | if (!Load0 || !Load1 || Load0->getChain() != Load1->getChain() || |
| 13491 | !Load0->isSimple() || !Load1->isSimple() || Load0->isIndexed() || |
| 13492 | Load1->isIndexed()) |
| 13493 | return 0; |
| 13494 | |
| 13495 | auto BaseLocDecomp0 = BaseIndexOffset::match(N: Load0, DAG); |
| 13496 | auto BaseLocDecomp1 = BaseIndexOffset::match(N: Load1, DAG); |
| 13497 | |
| 13498 | if (!BaseLocDecomp0.getBase() || |
| 13499 | BaseLocDecomp0.getBase() != BaseLocDecomp1.getBase() || |
| 13500 | !BaseLocDecomp0.hasValidOffset() || !BaseLocDecomp1.hasValidOffset()) |
| 13501 | return 0; |
| 13502 | if (BaseLocDecomp0.getOffset() < BaseLocDecomp1.getOffset()) |
| 13503 | return -1; |
| 13504 | if (BaseLocDecomp0.getOffset() > BaseLocDecomp1.getOffset()) |
| 13505 | return 1; |
| 13506 | return 0; |
| 13507 | }; |
| 13508 | |
| 13509 | SDValue X; |
| 13510 | if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) { |
| 13511 | if (IsVecReduce(N0.getOperand(i: 0)) && IsVecReduce(N0.getOperand(i: 1))) { |
| 13512 | int IsBefore = IsKnownOrderedLoad(N0.getOperand(i: 0).getOperand(i: 0), |
| 13513 | N0.getOperand(i: 1).getOperand(i: 0)); |
| 13514 | if (IsBefore < 0) { |
| 13515 | X = N0.getOperand(i: 0); |
| 13516 | N0 = N0.getOperand(i: 1); |
| 13517 | } else if (IsBefore > 0) { |
| 13518 | X = N0.getOperand(i: 1); |
| 13519 | N0 = N0.getOperand(i: 0); |
| 13520 | } else |
| 13521 | return SDValue(); |
| 13522 | } else if (IsVecReduce(N0.getOperand(i: 0))) { |
| 13523 | X = N0.getOperand(i: 1); |
| 13524 | N0 = N0.getOperand(i: 0); |
| 13525 | } else if (IsVecReduce(N0.getOperand(i: 1))) { |
| 13526 | X = N0.getOperand(i: 0); |
| 13527 | N0 = N0.getOperand(i: 1); |
| 13528 | } else |
| 13529 | return SDValue(); |
| 13530 | } else if (IsForward && IsVecReduce(N0) && IsVecReduce(N1) && |
| 13531 | IsKnownOrderedLoad(N0.getOperand(i: 0), N1.getOperand(i: 0)) < 0) { |
| 13532 | // Note this is backward to how you would expect. We create |
| 13533 | // add(reduce(load + 16), reduce(load + 0)) so that the |
| 13534 | // add(reduce(load+16), X) is combined into VADDVA(X, load+16)), leaving |
| 13535 | // the X as VADDV(load + 0) |
| 13536 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1, N2: N0); |
| 13537 | } else |
| 13538 | return SDValue(); |
| 13539 | |
| 13540 | if (!IsVecReduce(N0) || !IsVecReduce(N1)) |
| 13541 | return SDValue(); |
| 13542 | |
| 13543 | if (IsKnownOrderedLoad(N1.getOperand(i: 0), N0.getOperand(i: 0)) >= 0) |
| 13544 | return SDValue(); |
| 13545 | |
| 13546 | // Switch from add(add(X, N0), N1) to add(add(X, N1), N0) |
| 13547 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: X, N2: N1); |
| 13548 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0); |
| 13549 | }; |
| 13550 | if (SDValue R = DistrubuteVecReduceLoad(N0, N1, true)) |
| 13551 | return R; |
| 13552 | if (SDValue R = DistrubuteVecReduceLoad(N1, N0, false)) |
| 13553 | return R; |
| 13554 | return SDValue(); |
| 13555 | } |
| 13556 | |
| 13557 | static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG, |
| 13558 | const ARMSubtarget *Subtarget) { |
| 13559 | if (!Subtarget->hasMVEIntegerOps()) |
| 13560 | return SDValue(); |
| 13561 | |
| 13562 | if (SDValue R = TryDistrubutionADDVecReduce(N, DAG)) |
| 13563 | return R; |
| 13564 | |
| 13565 | EVT VT = N->getValueType(ResNo: 0); |
| 13566 | SDValue N0 = N->getOperand(Num: 0); |
| 13567 | SDValue N1 = N->getOperand(Num: 1); |
| 13568 | SDLoc dl(N); |
| 13569 | |
| 13570 | if (VT != MVT::i64) |
| 13571 | return SDValue(); |
| 13572 | |
| 13573 | // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this |
| 13574 | // will look like: |
| 13575 | // t1: i32,i32 = ARMISD::VADDLVs x |
| 13576 | // t2: i64 = build_pair t1, t1:1 |
| 13577 | // t3: i64 = add t2, y |
| 13578 | // Otherwise we try to push the add up above VADDLVAx, to potentially allow |
| 13579 | // the add to be simplified separately. |
| 13580 | // We also need to check for sext / zext and commutitive adds. |
| 13581 | auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, |
| 13582 | SDValue NB) { |
| 13583 | if (NB->getOpcode() != ISD::BUILD_PAIR) |
| 13584 | return SDValue(); |
| 13585 | SDValue VecRed = NB->getOperand(Num: 0); |
| 13586 | if ((VecRed->getOpcode() != Opcode && VecRed->getOpcode() != OpcodeA) || |
| 13587 | VecRed.getResNo() != 0 || |
| 13588 | NB->getOperand(Num: 1) != SDValue(VecRed.getNode(), 1)) |
| 13589 | return SDValue(); |
| 13590 | |
| 13591 | if (VecRed->getOpcode() == OpcodeA) { |
| 13592 | // add(NA, VADDLVA(Inp), Y) -> VADDLVA(add(NA, Inp), Y) |
| 13593 | SDValue Inp = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
| 13594 | N1: VecRed.getOperand(i: 0), N2: VecRed.getOperand(i: 1)); |
| 13595 | NA = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i64, N1: Inp, N2: NA); |
| 13596 | } |
| 13597 | |
| 13598 | SmallVector<SDValue, 4> Ops(2); |
| 13599 | std::tie(args&: Ops[0], args&: Ops[1]) = DAG.SplitScalar(N: NA, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 13600 | |
| 13601 | unsigned S = VecRed->getOpcode() == OpcodeA ? 2 : 0; |
| 13602 | for (unsigned I = S, E = VecRed.getNumOperands(); I < E; I++) |
| 13603 | Ops.push_back(Elt: VecRed->getOperand(Num: I)); |
| 13604 | SDValue Red = |
| 13605 | DAG.getNode(Opcode: OpcodeA, DL: dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32}), Ops); |
| 13606 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Red, |
| 13607 | N2: SDValue(Red.getNode(), 1)); |
| 13608 | }; |
| 13609 | |
| 13610 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) |
| 13611 | return M; |
| 13612 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) |
| 13613 | return M; |
| 13614 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) |
| 13615 | return M; |
| 13616 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) |
| 13617 | return M; |
| 13618 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) |
| 13619 | return M; |
| 13620 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) |
| 13621 | return M; |
| 13622 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) |
| 13623 | return M; |
| 13624 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) |
| 13625 | return M; |
| 13626 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) |
| 13627 | return M; |
| 13628 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) |
| 13629 | return M; |
| 13630 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) |
| 13631 | return M; |
| 13632 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) |
| 13633 | return M; |
| 13634 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1)) |
| 13635 | return M; |
| 13636 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1)) |
| 13637 | return M; |
| 13638 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0)) |
| 13639 | return M; |
| 13640 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0)) |
| 13641 | return M; |
| 13642 | return SDValue(); |
| 13643 | } |
| 13644 | |
| 13645 | bool |
| 13646 | ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, |
| 13647 | CombineLevel Level) const { |
| 13648 | assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || |
| 13649 | N->getOpcode() == ISD::SRL) && |
| 13650 | "Expected shift op" ); |
| 13651 | |
| 13652 | SDValue ShiftLHS = N->getOperand(Num: 0); |
| 13653 | if (!ShiftLHS->hasOneUse()) |
| 13654 | return false; |
| 13655 | |
| 13656 | if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND && |
| 13657 | !ShiftLHS.getOperand(i: 0)->hasOneUse()) |
| 13658 | return false; |
| 13659 | |
| 13660 | if (Level == BeforeLegalizeTypes) |
| 13661 | return true; |
| 13662 | |
| 13663 | if (N->getOpcode() != ISD::SHL) |
| 13664 | return true; |
| 13665 | |
| 13666 | if (Subtarget->isThumb1Only()) { |
| 13667 | // Avoid making expensive immediates by commuting shifts. (This logic |
| 13668 | // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted |
| 13669 | // for free.) |
| 13670 | if (N->getOpcode() != ISD::SHL) |
| 13671 | return true; |
| 13672 | SDValue N1 = N->getOperand(Num: 0); |
| 13673 | if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && |
| 13674 | N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) |
| 13675 | return true; |
| 13676 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N1->getOperand(Num: 1))) { |
| 13677 | if (Const->getAPIntValue().ult(RHS: 256)) |
| 13678 | return false; |
| 13679 | if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(RHS: 0) && |
| 13680 | Const->getAPIntValue().sgt(RHS: -256)) |
| 13681 | return false; |
| 13682 | } |
| 13683 | return true; |
| 13684 | } |
| 13685 | |
| 13686 | // Turn off commute-with-shift transform after legalization, so it doesn't |
| 13687 | // conflict with PerformSHLSimplify. (We could try to detect when |
| 13688 | // PerformSHLSimplify would trigger more precisely, but it isn't |
| 13689 | // really necessary.) |
| 13690 | return false; |
| 13691 | } |
| 13692 | |
| 13693 | bool ARMTargetLowering::isDesirableToCommuteXorWithShift( |
| 13694 | const SDNode *N) const { |
| 13695 | assert(N->getOpcode() == ISD::XOR && |
| 13696 | (N->getOperand(0).getOpcode() == ISD::SHL || |
| 13697 | N->getOperand(0).getOpcode() == ISD::SRL) && |
| 13698 | "Expected XOR(SHIFT) pattern" ); |
| 13699 | |
| 13700 | // Only commute if the entire NOT mask is a hidden shifted mask. |
| 13701 | auto *XorC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 13702 | auto *ShiftC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 0).getOperand(i: 1)); |
| 13703 | if (XorC && ShiftC) { |
| 13704 | unsigned MaskIdx, MaskLen; |
| 13705 | if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) { |
| 13706 | unsigned ShiftAmt = ShiftC->getZExtValue(); |
| 13707 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 13708 | if (N->getOperand(Num: 0).getOpcode() == ISD::SHL) |
| 13709 | return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt); |
| 13710 | return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); |
| 13711 | } |
| 13712 | } |
| 13713 | |
| 13714 | return false; |
| 13715 | } |
| 13716 | |
| 13717 | bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( |
| 13718 | const SDNode *N) const { |
| 13719 | assert(((N->getOpcode() == ISD::SHL && |
| 13720 | N->getOperand(0).getOpcode() == ISD::SRL) || |
| 13721 | (N->getOpcode() == ISD::SRL && |
| 13722 | N->getOperand(0).getOpcode() == ISD::SHL)) && |
| 13723 | "Expected shift-shift mask" ); |
| 13724 | |
| 13725 | if (!Subtarget->isThumb1Only()) |
| 13726 | return true; |
| 13727 | |
| 13728 | EVT VT = N->getValueType(ResNo: 0); |
| 13729 | if (VT.getScalarSizeInBits() > 32) |
| 13730 | return true; |
| 13731 | |
| 13732 | return false; |
| 13733 | } |
| 13734 | |
| 13735 | bool ARMTargetLowering::shouldFoldSelectWithIdentityConstant( |
| 13736 | unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, |
| 13737 | SDValue Y) const { |
| 13738 | return Subtarget->hasMVEIntegerOps() && isTypeLegal(VT) && |
| 13739 | SelectOpcode == ISD::VSELECT; |
| 13740 | } |
| 13741 | |
| 13742 | bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { |
| 13743 | if (!Subtarget->hasNEON()) { |
| 13744 | if (Subtarget->isThumb1Only()) |
| 13745 | return VT.getScalarSizeInBits() <= 32; |
| 13746 | return true; |
| 13747 | } |
| 13748 | return VT.isScalarInteger(); |
| 13749 | } |
| 13750 | |
| 13751 | bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, |
| 13752 | EVT VT) const { |
| 13753 | if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple()) |
| 13754 | return false; |
| 13755 | |
| 13756 | switch (FPVT.getSimpleVT().SimpleTy) { |
| 13757 | case MVT::f16: |
| 13758 | return Subtarget->hasVFP2Base(); |
| 13759 | case MVT::f32: |
| 13760 | return Subtarget->hasVFP2Base(); |
| 13761 | case MVT::f64: |
| 13762 | return Subtarget->hasFP64(); |
| 13763 | case MVT::v4f32: |
| 13764 | case MVT::v8f16: |
| 13765 | return Subtarget->hasMVEFloatOps(); |
| 13766 | default: |
| 13767 | return false; |
| 13768 | } |
| 13769 | } |
| 13770 | |
| 13771 | static SDValue PerformSHLSimplify(SDNode *N, |
| 13772 | TargetLowering::DAGCombinerInfo &DCI, |
| 13773 | const ARMSubtarget *ST) { |
| 13774 | // Allow the generic combiner to identify potential bswaps. |
| 13775 | if (DCI.isBeforeLegalize()) |
| 13776 | return SDValue(); |
| 13777 | |
| 13778 | // DAG combiner will fold: |
| 13779 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
| 13780 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 |
| 13781 | // Other code patterns that can be also be modified have the following form: |
| 13782 | // b + ((a << 1) | 510) |
| 13783 | // b + ((a << 1) & 510) |
| 13784 | // b + ((a << 1) ^ 510) |
| 13785 | // b + ((a << 1) + 510) |
| 13786 | |
| 13787 | // Many instructions can perform the shift for free, but it requires both |
| 13788 | // the operands to be registers. If c1 << c2 is too large, a mov immediate |
| 13789 | // instruction will needed. So, unfold back to the original pattern if: |
| 13790 | // - if c1 and c2 are small enough that they don't require mov imms. |
| 13791 | // - the user(s) of the node can perform an shl |
| 13792 | |
| 13793 | // No shifted operands for 16-bit instructions. |
| 13794 | if (ST->isThumb() && ST->isThumb1Only()) |
| 13795 | return SDValue(); |
| 13796 | |
| 13797 | // Check that all the users could perform the shl themselves. |
| 13798 | for (auto *U : N->users()) { |
| 13799 | switch(U->getOpcode()) { |
| 13800 | default: |
| 13801 | return SDValue(); |
| 13802 | case ISD::SUB: |
| 13803 | case ISD::ADD: |
| 13804 | case ISD::AND: |
| 13805 | case ISD::OR: |
| 13806 | case ISD::XOR: |
| 13807 | case ISD::SETCC: |
| 13808 | case ARMISD::CMP: |
| 13809 | // Check that the user isn't already using a constant because there |
| 13810 | // aren't any instructions that support an immediate operand and a |
| 13811 | // shifted operand. |
| 13812 | if (isa<ConstantSDNode>(Val: U->getOperand(Num: 0)) || |
| 13813 | isa<ConstantSDNode>(Val: U->getOperand(Num: 1))) |
| 13814 | return SDValue(); |
| 13815 | |
| 13816 | // Check that it's not already using a shift. |
| 13817 | if (U->getOperand(Num: 0).getOpcode() == ISD::SHL || |
| 13818 | U->getOperand(Num: 1).getOpcode() == ISD::SHL) |
| 13819 | return SDValue(); |
| 13820 | break; |
| 13821 | } |
| 13822 | } |
| 13823 | |
| 13824 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && |
| 13825 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) |
| 13826 | return SDValue(); |
| 13827 | |
| 13828 | if (N->getOperand(Num: 0).getOpcode() != ISD::SHL) |
| 13829 | return SDValue(); |
| 13830 | |
| 13831 | SDValue SHL = N->getOperand(Num: 0); |
| 13832 | |
| 13833 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 13834 | auto *C2 = dyn_cast<ConstantSDNode>(Val: SHL.getOperand(i: 1)); |
| 13835 | if (!C1ShlC2 || !C2) |
| 13836 | return SDValue(); |
| 13837 | |
| 13838 | APInt C2Int = C2->getAPIntValue(); |
| 13839 | APInt C1Int = C1ShlC2->getAPIntValue(); |
| 13840 | unsigned C2Width = C2Int.getBitWidth(); |
| 13841 | if (C2Int.uge(RHS: C2Width)) |
| 13842 | return SDValue(); |
| 13843 | uint64_t C2Value = C2Int.getZExtValue(); |
| 13844 | |
| 13845 | // Check that performing a lshr will not lose any information. |
| 13846 | APInt Mask = APInt::getHighBitsSet(numBits: C2Width, hiBitsSet: C2Width - C2Value); |
| 13847 | if ((C1Int & Mask) != C1Int) |
| 13848 | return SDValue(); |
| 13849 | |
| 13850 | // Shift the first constant. |
| 13851 | C1Int.lshrInPlace(ShiftAmt: C2Int); |
| 13852 | |
| 13853 | // The immediates are encoded as an 8-bit value that can be rotated. |
| 13854 | auto LargeImm = [](const APInt &Imm) { |
| 13855 | unsigned Zeros = Imm.countl_zero() + Imm.countr_zero(); |
| 13856 | return Imm.getBitWidth() - Zeros > 8; |
| 13857 | }; |
| 13858 | |
| 13859 | if (LargeImm(C1Int) || LargeImm(C2Int)) |
| 13860 | return SDValue(); |
| 13861 | |
| 13862 | SelectionDAG &DAG = DCI.DAG; |
| 13863 | SDLoc dl(N); |
| 13864 | SDValue X = SHL.getOperand(i: 0); |
| 13865 | SDValue BinOp = DAG.getNode(Opcode: N->getOpcode(), DL: dl, VT: MVT::i32, N1: X, |
| 13866 | N2: DAG.getConstant(Val: C1Int, DL: dl, VT: MVT::i32)); |
| 13867 | // Shift left to compensate for the lshr of C1Int. |
| 13868 | SDValue Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: BinOp, N2: SHL.getOperand(i: 1)); |
| 13869 | |
| 13870 | LLVM_DEBUG(dbgs() << "Simplify shl use:\n" ; SHL.getOperand(0).dump(); |
| 13871 | SHL.dump(); N->dump()); |
| 13872 | LLVM_DEBUG(dbgs() << "Into:\n" ; X.dump(); BinOp.dump(); Res.dump()); |
| 13873 | return Res; |
| 13874 | } |
| 13875 | |
| 13876 | |
| 13877 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. |
| 13878 | /// |
| 13879 | static SDValue PerformADDCombine(SDNode *N, |
| 13880 | TargetLowering::DAGCombinerInfo &DCI, |
| 13881 | const ARMSubtarget *Subtarget) { |
| 13882 | SDValue N0 = N->getOperand(Num: 0); |
| 13883 | SDValue N1 = N->getOperand(Num: 1); |
| 13884 | |
| 13885 | // Only works one way, because it needs an immediate operand. |
| 13886 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 13887 | return Result; |
| 13888 | |
| 13889 | if (SDValue Result = PerformADDVecReduce(N, DAG&: DCI.DAG, Subtarget)) |
| 13890 | return Result; |
| 13891 | |
| 13892 | // First try with the default operand order. |
| 13893 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) |
| 13894 | return Result; |
| 13895 | |
| 13896 | // If that didn't work, try again with the operands commuted. |
| 13897 | return PerformADDCombineWithOperands(N, N0: N1, N1: N0, DCI, Subtarget); |
| 13898 | } |
| 13899 | |
| 13900 | // Combine (sub 0, (csinc X, Y, CC)) -> (csinv -X, Y, CC) |
| 13901 | // providing -X is as cheap as X (currently, just a constant). |
| 13902 | static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) { |
| 13903 | if (N->getValueType(ResNo: 0) != MVT::i32 || !isNullConstant(V: N->getOperand(Num: 0))) |
| 13904 | return SDValue(); |
| 13905 | SDValue CSINC = N->getOperand(Num: 1); |
| 13906 | if (CSINC.getOpcode() != ARMISD::CSINC || !CSINC.hasOneUse()) |
| 13907 | return SDValue(); |
| 13908 | |
| 13909 | ConstantSDNode *X = dyn_cast<ConstantSDNode>(Val: CSINC.getOperand(i: 0)); |
| 13910 | if (!X) |
| 13911 | return SDValue(); |
| 13912 | |
| 13913 | return DAG.getNode(Opcode: ARMISD::CSINV, DL: SDLoc(N), VT: MVT::i32, |
| 13914 | N1: DAG.getNode(Opcode: ISD::SUB, DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 13915 | N2: CSINC.getOperand(i: 0)), |
| 13916 | N2: CSINC.getOperand(i: 1), N3: CSINC.getOperand(i: 2), |
| 13917 | N4: CSINC.getOperand(i: 3)); |
| 13918 | } |
| 13919 | |
| 13920 | static bool isNegatedInteger(SDValue Op) { |
| 13921 | return Op.getOpcode() == ISD::SUB && isNullConstant(V: Op.getOperand(i: 0)); |
| 13922 | } |
| 13923 | |
| 13924 | // Try to fold |
| 13925 | // |
| 13926 | // (neg (cmov X, Y)) -> (cmov (neg X), (neg Y)) |
| 13927 | // |
| 13928 | // The folding helps cmov to be matched with csneg without generating |
| 13929 | // redundant neg instruction. |
| 13930 | static SDValue performNegCMovCombine(SDNode *N, SelectionDAG &DAG) { |
| 13931 | if (!isNegatedInteger(Op: SDValue(N, 0))) |
| 13932 | return SDValue(); |
| 13933 | |
| 13934 | SDValue CMov = N->getOperand(Num: 1); |
| 13935 | if (CMov.getOpcode() != ARMISD::CMOV || !CMov->hasOneUse()) |
| 13936 | return SDValue(); |
| 13937 | |
| 13938 | SDValue N0 = CMov.getOperand(i: 0); |
| 13939 | SDValue N1 = CMov.getOperand(i: 1); |
| 13940 | |
| 13941 | // If neither of them are negations, it's not worth the folding as it |
| 13942 | // introduces two additional negations while reducing one negation. |
| 13943 | if (!isNegatedInteger(Op: N0) && !isNegatedInteger(Op: N1)) |
| 13944 | return SDValue(); |
| 13945 | |
| 13946 | SDLoc DL(N); |
| 13947 | EVT VT = CMov.getValueType(); |
| 13948 | |
| 13949 | SDValue N0N = DAG.getNegative(Val: N0, DL, VT); |
| 13950 | SDValue N1N = DAG.getNegative(Val: N1, DL, VT); |
| 13951 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT, N1: N0N, N2: N1N, N3: CMov.getOperand(i: 2), |
| 13952 | N4: CMov.getOperand(i: 3)); |
| 13953 | } |
| 13954 | |
| 13955 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. |
| 13956 | /// |
| 13957 | static SDValue PerformSUBCombine(SDNode *N, |
| 13958 | TargetLowering::DAGCombinerInfo &DCI, |
| 13959 | const ARMSubtarget *Subtarget) { |
| 13960 | SDValue N0 = N->getOperand(Num: 0); |
| 13961 | SDValue N1 = N->getOperand(Num: 1); |
| 13962 | |
| 13963 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 13964 | if (N1.getNode()->hasOneUse()) |
| 13965 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI)) |
| 13966 | return Result; |
| 13967 | |
| 13968 | if (SDValue R = PerformSubCSINCCombine(N, DAG&: DCI.DAG)) |
| 13969 | return R; |
| 13970 | |
| 13971 | if (SDValue Val = performNegCMovCombine(N, DAG&: DCI.DAG)) |
| 13972 | return Val; |
| 13973 | |
| 13974 | if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(ResNo: 0).isVector()) |
| 13975 | return SDValue(); |
| 13976 | |
| 13977 | // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) |
| 13978 | // so that we can readily pattern match more mve instructions which can use |
| 13979 | // a scalar operand. |
| 13980 | SDValue VDup = N->getOperand(Num: 1); |
| 13981 | if (VDup->getOpcode() != ARMISD::VDUP) |
| 13982 | return SDValue(); |
| 13983 | |
| 13984 | SDValue VMov = N->getOperand(Num: 0); |
| 13985 | if (VMov->getOpcode() == ISD::BITCAST) |
| 13986 | VMov = VMov->getOperand(Num: 0); |
| 13987 | |
| 13988 | if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(N: VMov)) |
| 13989 | return SDValue(); |
| 13990 | |
| 13991 | SDLoc dl(N); |
| 13992 | SDValue Negate = DCI.DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 13993 | N1: DCI.DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 13994 | N2: VDup->getOperand(Num: 0)); |
| 13995 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), Operand: Negate); |
| 13996 | } |
| 13997 | |
| 13998 | /// PerformVMULCombine |
| 13999 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the |
| 14000 | /// special multiplier accumulator forwarding. |
| 14001 | /// vmul d3, d0, d2 |
| 14002 | /// vmla d3, d1, d2 |
| 14003 | /// is faster than |
| 14004 | /// vadd d3, d0, d1 |
| 14005 | /// vmul d3, d3, d2 |
| 14006 | // However, for (A + B) * (A + B), |
| 14007 | // vadd d2, d0, d1 |
| 14008 | // vmul d3, d0, d2 |
| 14009 | // vmla d3, d1, d2 |
| 14010 | // is slower than |
| 14011 | // vadd d2, d0, d1 |
| 14012 | // vmul d3, d2, d2 |
| 14013 | static SDValue PerformVMULCombine(SDNode *N, |
| 14014 | TargetLowering::DAGCombinerInfo &DCI, |
| 14015 | const ARMSubtarget *Subtarget) { |
| 14016 | if (!Subtarget->hasVMLxForwarding()) |
| 14017 | return SDValue(); |
| 14018 | |
| 14019 | SelectionDAG &DAG = DCI.DAG; |
| 14020 | SDValue N0 = N->getOperand(Num: 0); |
| 14021 | SDValue N1 = N->getOperand(Num: 1); |
| 14022 | unsigned Opcode = N0.getOpcode(); |
| 14023 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 14024 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { |
| 14025 | Opcode = N1.getOpcode(); |
| 14026 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 14027 | Opcode != ISD::FADD && Opcode != ISD::FSUB) |
| 14028 | return SDValue(); |
| 14029 | std::swap(a&: N0, b&: N1); |
| 14030 | } |
| 14031 | |
| 14032 | if (N0 == N1) |
| 14033 | return SDValue(); |
| 14034 | |
| 14035 | EVT VT = N->getValueType(ResNo: 0); |
| 14036 | SDLoc DL(N); |
| 14037 | SDValue N00 = N0->getOperand(Num: 0); |
| 14038 | SDValue N01 = N0->getOperand(Num: 1); |
| 14039 | return DAG.getNode(Opcode, DL, VT, |
| 14040 | N1: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N00, N2: N1), |
| 14041 | N2: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N01, N2: N1)); |
| 14042 | } |
| 14043 | |
| 14044 | static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, |
| 14045 | const ARMSubtarget *Subtarget) { |
| 14046 | EVT VT = N->getValueType(ResNo: 0); |
| 14047 | if (VT != MVT::v2i64) |
| 14048 | return SDValue(); |
| 14049 | |
| 14050 | SDValue N0 = N->getOperand(Num: 0); |
| 14051 | SDValue N1 = N->getOperand(Num: 1); |
| 14052 | |
| 14053 | auto IsSignExt = [&](SDValue Op) { |
| 14054 | if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) |
| 14055 | return SDValue(); |
| 14056 | EVT VT = cast<VTSDNode>(Val: Op->getOperand(Num: 1))->getVT(); |
| 14057 | if (VT.getScalarSizeInBits() == 32) |
| 14058 | return Op->getOperand(Num: 0); |
| 14059 | return SDValue(); |
| 14060 | }; |
| 14061 | auto IsZeroExt = [&](SDValue Op) { |
| 14062 | // Zero extends are a little more awkward. At the point we are matching |
| 14063 | // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. |
| 14064 | // That might be before of after a bitcast depending on how the and is |
| 14065 | // placed. Because this has to look through bitcasts, it is currently only |
| 14066 | // supported on LE. |
| 14067 | if (!Subtarget->isLittle()) |
| 14068 | return SDValue(); |
| 14069 | |
| 14070 | SDValue And = Op; |
| 14071 | if (And->getOpcode() == ISD::BITCAST) |
| 14072 | And = And->getOperand(Num: 0); |
| 14073 | if (And->getOpcode() != ISD::AND) |
| 14074 | return SDValue(); |
| 14075 | SDValue Mask = And->getOperand(Num: 1); |
| 14076 | if (Mask->getOpcode() == ISD::BITCAST) |
| 14077 | Mask = Mask->getOperand(Num: 0); |
| 14078 | |
| 14079 | if (Mask->getOpcode() != ISD::BUILD_VECTOR || |
| 14080 | Mask.getValueType() != MVT::v4i32) |
| 14081 | return SDValue(); |
| 14082 | if (isAllOnesConstant(V: Mask->getOperand(Num: 0)) && |
| 14083 | isNullConstant(V: Mask->getOperand(Num: 1)) && |
| 14084 | isAllOnesConstant(V: Mask->getOperand(Num: 2)) && |
| 14085 | isNullConstant(V: Mask->getOperand(Num: 3))) |
| 14086 | return And->getOperand(Num: 0); |
| 14087 | return SDValue(); |
| 14088 | }; |
| 14089 | |
| 14090 | SDLoc dl(N); |
| 14091 | if (SDValue Op0 = IsSignExt(N0)) { |
| 14092 | if (SDValue Op1 = IsSignExt(N1)) { |
| 14093 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
| 14094 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
| 14095 | return DAG.getNode(Opcode: ARMISD::VMULLs, DL: dl, VT, N1: New0a, N2: New1a); |
| 14096 | } |
| 14097 | } |
| 14098 | if (SDValue Op0 = IsZeroExt(N0)) { |
| 14099 | if (SDValue Op1 = IsZeroExt(N1)) { |
| 14100 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
| 14101 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
| 14102 | return DAG.getNode(Opcode: ARMISD::VMULLu, DL: dl, VT, N1: New0a, N2: New1a); |
| 14103 | } |
| 14104 | } |
| 14105 | |
| 14106 | return SDValue(); |
| 14107 | } |
| 14108 | |
| 14109 | static SDValue PerformMULCombine(SDNode *N, |
| 14110 | TargetLowering::DAGCombinerInfo &DCI, |
| 14111 | const ARMSubtarget *Subtarget) { |
| 14112 | SelectionDAG &DAG = DCI.DAG; |
| 14113 | |
| 14114 | EVT VT = N->getValueType(ResNo: 0); |
| 14115 | if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) |
| 14116 | return PerformMVEVMULLCombine(N, DAG, Subtarget); |
| 14117 | |
| 14118 | if (Subtarget->isThumb1Only()) |
| 14119 | return SDValue(); |
| 14120 | |
| 14121 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14122 | return SDValue(); |
| 14123 | |
| 14124 | if (VT.is64BitVector() || VT.is128BitVector()) |
| 14125 | return PerformVMULCombine(N, DCI, Subtarget); |
| 14126 | if (VT != MVT::i32) |
| 14127 | return SDValue(); |
| 14128 | |
| 14129 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 14130 | if (!C) |
| 14131 | return SDValue(); |
| 14132 | |
| 14133 | int64_t MulAmt = C->getSExtValue(); |
| 14134 | unsigned ShiftAmt = llvm::countr_zero<uint64_t>(Val: MulAmt); |
| 14135 | |
| 14136 | ShiftAmt = ShiftAmt & (32 - 1); |
| 14137 | SDValue V = N->getOperand(Num: 0); |
| 14138 | SDLoc DL(N); |
| 14139 | |
| 14140 | SDValue Res; |
| 14141 | MulAmt >>= ShiftAmt; |
| 14142 | |
| 14143 | if (MulAmt >= 0) { |
| 14144 | if (llvm::has_single_bit<uint32_t>(Value: MulAmt - 1)) { |
| 14145 | // (mul x, 2^N + 1) => (add (shl x, N), x) |
| 14146 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
| 14147 | N1: V, |
| 14148 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14149 | N1: V, |
| 14150 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt - 1), DL, |
| 14151 | VT: MVT::i32))); |
| 14152 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmt + 1)) { |
| 14153 | // (mul x, 2^N - 1) => (sub (shl x, N), x) |
| 14154 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14155 | N1: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14156 | N1: V, |
| 14157 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt + 1), DL, |
| 14158 | VT: MVT::i32)), |
| 14159 | N2: V); |
| 14160 | } else |
| 14161 | return SDValue(); |
| 14162 | } else { |
| 14163 | uint64_t MulAmtAbs = -MulAmt; |
| 14164 | if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs + 1)) { |
| 14165 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) |
| 14166 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14167 | N1: V, |
| 14168 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14169 | N1: V, |
| 14170 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs + 1), DL, |
| 14171 | VT: MVT::i32))); |
| 14172 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs - 1)) { |
| 14173 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) |
| 14174 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
| 14175 | N1: V, |
| 14176 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14177 | N1: V, |
| 14178 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs - 1), DL, |
| 14179 | VT: MVT::i32))); |
| 14180 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14181 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N2: Res); |
| 14182 | } else |
| 14183 | return SDValue(); |
| 14184 | } |
| 14185 | |
| 14186 | if (ShiftAmt != 0) |
| 14187 | Res = DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14188 | N1: Res, N2: DAG.getConstant(Val: ShiftAmt, DL, VT: MVT::i32)); |
| 14189 | |
| 14190 | // Do not add new nodes to DAG combiner worklist. |
| 14191 | DCI.CombineTo(N, Res, AddTo: false); |
| 14192 | return SDValue(); |
| 14193 | } |
| 14194 | |
| 14195 | static SDValue CombineANDShift(SDNode *N, |
| 14196 | TargetLowering::DAGCombinerInfo &DCI, |
| 14197 | const ARMSubtarget *Subtarget) { |
| 14198 | // Allow DAGCombine to pattern-match before we touch the canonical form. |
| 14199 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14200 | return SDValue(); |
| 14201 | |
| 14202 | if (N->getValueType(ResNo: 0) != MVT::i32) |
| 14203 | return SDValue(); |
| 14204 | |
| 14205 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 14206 | if (!N1C) |
| 14207 | return SDValue(); |
| 14208 | |
| 14209 | uint32_t C1 = (uint32_t)N1C->getZExtValue(); |
| 14210 | // Don't transform uxtb/uxth. |
| 14211 | if (C1 == 255 || C1 == 65535) |
| 14212 | return SDValue(); |
| 14213 | |
| 14214 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 14215 | if (!N0->hasOneUse()) |
| 14216 | return SDValue(); |
| 14217 | |
| 14218 | if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) |
| 14219 | return SDValue(); |
| 14220 | |
| 14221 | bool LeftShift = N0->getOpcode() == ISD::SHL; |
| 14222 | |
| 14223 | ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
| 14224 | if (!N01C) |
| 14225 | return SDValue(); |
| 14226 | |
| 14227 | uint32_t C2 = (uint32_t)N01C->getZExtValue(); |
| 14228 | if (!C2 || C2 >= 32) |
| 14229 | return SDValue(); |
| 14230 | |
| 14231 | // Clear irrelevant bits in the mask. |
| 14232 | if (LeftShift) |
| 14233 | C1 &= (-1U << C2); |
| 14234 | else |
| 14235 | C1 &= (-1U >> C2); |
| 14236 | |
| 14237 | SelectionDAG &DAG = DCI.DAG; |
| 14238 | SDLoc DL(N); |
| 14239 | |
| 14240 | // We have a pattern of the form "(and (shl x, c2) c1)" or |
| 14241 | // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to |
| 14242 | // transform to a pair of shifts, to save materializing c1. |
| 14243 | |
| 14244 | // First pattern: right shift, then mask off leading bits. |
| 14245 | // FIXME: Use demanded bits? |
| 14246 | if (!LeftShift && isMask_32(Value: C1)) { |
| 14247 | uint32_t C3 = llvm::countl_zero(Val: C1); |
| 14248 | if (C2 < C3) { |
| 14249 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14250 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
| 14251 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 14252 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14253 | } |
| 14254 | } |
| 14255 | |
| 14256 | // First pattern, reversed: left shift, then mask off trailing bits. |
| 14257 | if (LeftShift && isMask_32(Value: ~C1)) { |
| 14258 | uint32_t C3 = llvm::countr_zero(Val: C1); |
| 14259 | if (C2 < C3) { |
| 14260 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14261 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
| 14262 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
| 14263 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14264 | } |
| 14265 | } |
| 14266 | |
| 14267 | // Second pattern: left shift, then mask off leading bits. |
| 14268 | // FIXME: Use demanded bits? |
| 14269 | if (LeftShift && isShiftedMask_32(Value: C1)) { |
| 14270 | uint32_t Trailing = llvm::countr_zero(Val: C1); |
| 14271 | uint32_t C3 = llvm::countl_zero(Val: C1); |
| 14272 | if (Trailing == C2 && C2 + C3 < 32) { |
| 14273 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14274 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
| 14275 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 14276 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14277 | } |
| 14278 | } |
| 14279 | |
| 14280 | // Second pattern, reversed: right shift, then mask off trailing bits. |
| 14281 | // FIXME: Handle other patterns of known/demanded bits. |
| 14282 | if (!LeftShift && isShiftedMask_32(Value: C1)) { |
| 14283 | uint32_t Leading = llvm::countl_zero(Val: C1); |
| 14284 | uint32_t C3 = llvm::countr_zero(Val: C1); |
| 14285 | if (Leading == C2 && C2 + C3 < 32) { |
| 14286 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14287 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
| 14288 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
| 14289 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14290 | } |
| 14291 | } |
| 14292 | |
| 14293 | // Transform "(and (shl x, c2) c1)" into "(shl (and x, c1>>c2), c2)" |
| 14294 | // if "c1 >> c2" is a cheaper immediate than "c1" |
| 14295 | if (LeftShift && |
| 14296 | HasLowerConstantMaterializationCost(Val1: C1 >> C2, Val2: C1, Subtarget)) { |
| 14297 | |
| 14298 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14299 | N2: DAG.getConstant(Val: C1 >> C2, DL, VT: MVT::i32)); |
| 14300 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: And, |
| 14301 | N2: DAG.getConstant(Val: C2, DL, VT: MVT::i32)); |
| 14302 | } |
| 14303 | |
| 14304 | return SDValue(); |
| 14305 | } |
| 14306 | |
| 14307 | static SDValue PerformANDCombine(SDNode *N, |
| 14308 | TargetLowering::DAGCombinerInfo &DCI, |
| 14309 | const ARMSubtarget *Subtarget) { |
| 14310 | // Attempt to use immediate-form VBIC |
| 14311 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
| 14312 | SDLoc dl(N); |
| 14313 | EVT VT = N->getValueType(ResNo: 0); |
| 14314 | SelectionDAG &DAG = DCI.DAG; |
| 14315 | |
| 14316 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v2i1 || |
| 14317 | VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1) |
| 14318 | return SDValue(); |
| 14319 | |
| 14320 | APInt SplatBits, SplatUndef; |
| 14321 | unsigned SplatBitSize; |
| 14322 | bool HasAnyUndefs; |
| 14323 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 14324 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 14325 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 14326 | SplatBitSize == 64) { |
| 14327 | EVT VbicVT; |
| 14328 | SDValue Val = isVMOVModifiedImm(SplatBits: (~SplatBits).getZExtValue(), |
| 14329 | SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, |
| 14330 | DAG, dl, VT&: VbicVT, VectorVT: VT, type: OtherModImm); |
| 14331 | if (Val.getNode()) { |
| 14332 | SDValue Input = |
| 14333 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: VbicVT, Operand: N->getOperand(Num: 0)); |
| 14334 | SDValue Vbic = DAG.getNode(Opcode: ARMISD::VBICIMM, DL: dl, VT: VbicVT, N1: Input, N2: Val); |
| 14335 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vbic); |
| 14336 | } |
| 14337 | } |
| 14338 | } |
| 14339 | |
| 14340 | if (!Subtarget->isThumb1Only()) { |
| 14341 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) |
| 14342 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: true, DCI)) |
| 14343 | return Result; |
| 14344 | |
| 14345 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14346 | return Result; |
| 14347 | } |
| 14348 | |
| 14349 | if (Subtarget->isThumb1Only()) |
| 14350 | if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) |
| 14351 | return Result; |
| 14352 | |
| 14353 | return SDValue(); |
| 14354 | } |
| 14355 | |
| 14356 | // Try combining OR nodes to SMULWB, SMULWT. |
| 14357 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, |
| 14358 | TargetLowering::DAGCombinerInfo &DCI, |
| 14359 | const ARMSubtarget *Subtarget) { |
| 14360 | if (!Subtarget->hasV6Ops() || |
| 14361 | (Subtarget->isThumb() && |
| 14362 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) |
| 14363 | return SDValue(); |
| 14364 | |
| 14365 | SDValue SRL = OR->getOperand(Num: 0); |
| 14366 | SDValue SHL = OR->getOperand(Num: 1); |
| 14367 | |
| 14368 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { |
| 14369 | SRL = OR->getOperand(Num: 1); |
| 14370 | SHL = OR->getOperand(Num: 0); |
| 14371 | } |
| 14372 | if (!isSRL16(Op: SRL) || !isSHL16(Op: SHL)) |
| 14373 | return SDValue(); |
| 14374 | |
| 14375 | // The first operands to the shifts need to be the two results from the |
| 14376 | // same smul_lohi node. |
| 14377 | if ((SRL.getOperand(i: 0).getNode() != SHL.getOperand(i: 0).getNode()) || |
| 14378 | SRL.getOperand(i: 0).getOpcode() != ISD::SMUL_LOHI) |
| 14379 | return SDValue(); |
| 14380 | |
| 14381 | SDNode *SMULLOHI = SRL.getOperand(i: 0).getNode(); |
| 14382 | if (SRL.getOperand(i: 0) != SDValue(SMULLOHI, 0) || |
| 14383 | SHL.getOperand(i: 0) != SDValue(SMULLOHI, 1)) |
| 14384 | return SDValue(); |
| 14385 | |
| 14386 | // Now we have: |
| 14387 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) |
| 14388 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. |
| 14389 | // For SMUWB the 16-bit value will signed extended somehow. |
| 14390 | // For SMULWT only the SRA is required. |
| 14391 | // Check both sides of SMUL_LOHI |
| 14392 | SDValue OpS16 = SMULLOHI->getOperand(Num: 0); |
| 14393 | SDValue OpS32 = SMULLOHI->getOperand(Num: 1); |
| 14394 | |
| 14395 | SelectionDAG &DAG = DCI.DAG; |
| 14396 | if (!isS16(Op: OpS16, DAG) && !isSRA16(Op: OpS16)) { |
| 14397 | OpS16 = OpS32; |
| 14398 | OpS32 = SMULLOHI->getOperand(Num: 0); |
| 14399 | } |
| 14400 | |
| 14401 | SDLoc dl(OR); |
| 14402 | unsigned Opcode = 0; |
| 14403 | if (isS16(Op: OpS16, DAG)) |
| 14404 | Opcode = ARMISD::SMULWB; |
| 14405 | else if (isSRA16(Op: OpS16)) { |
| 14406 | Opcode = ARMISD::SMULWT; |
| 14407 | OpS16 = OpS16->getOperand(Num: 0); |
| 14408 | } |
| 14409 | else |
| 14410 | return SDValue(); |
| 14411 | |
| 14412 | SDValue Res = DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: OpS32, N2: OpS16); |
| 14413 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(OR, 0), To: Res); |
| 14414 | return SDValue(OR, 0); |
| 14415 | } |
| 14416 | |
| 14417 | static SDValue PerformORCombineToBFI(SDNode *N, |
| 14418 | TargetLowering::DAGCombinerInfo &DCI, |
| 14419 | const ARMSubtarget *Subtarget) { |
| 14420 | // BFI is only available on V6T2+ |
| 14421 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) |
| 14422 | return SDValue(); |
| 14423 | |
| 14424 | EVT VT = N->getValueType(ResNo: 0); |
| 14425 | SDValue N0 = N->getOperand(Num: 0); |
| 14426 | SDValue N1 = N->getOperand(Num: 1); |
| 14427 | SelectionDAG &DAG = DCI.DAG; |
| 14428 | SDLoc DL(N); |
| 14429 | // 1) or (and A, mask), val => ARMbfi A, val, mask |
| 14430 | // iff (val & mask) == val |
| 14431 | // |
| 14432 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 14433 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) |
| 14434 | // && mask == ~mask2 |
| 14435 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) |
| 14436 | // && ~mask == mask2 |
| 14437 | // (i.e., copy a bitfield value into another bitfield of the same width) |
| 14438 | |
| 14439 | if (VT != MVT::i32) |
| 14440 | return SDValue(); |
| 14441 | |
| 14442 | SDValue N00 = N0.getOperand(i: 0); |
| 14443 | |
| 14444 | // The value and the mask need to be constants so we can verify this is |
| 14445 | // actually a bitfield set. If the mask is 0xffff, we can do better |
| 14446 | // via a movt instruction, so don't use BFI in that case. |
| 14447 | SDValue MaskOp = N0.getOperand(i: 1); |
| 14448 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Val&: MaskOp); |
| 14449 | if (!MaskC) |
| 14450 | return SDValue(); |
| 14451 | unsigned Mask = MaskC->getZExtValue(); |
| 14452 | if (Mask == 0xffff) |
| 14453 | return SDValue(); |
| 14454 | SDValue Res; |
| 14455 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask |
| 14456 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val&: N1); |
| 14457 | if (N1C) { |
| 14458 | unsigned Val = N1C->getZExtValue(); |
| 14459 | if ((Val & ~Mask) != Val) |
| 14460 | return SDValue(); |
| 14461 | |
| 14462 | if (ARM::isBitFieldInvertedMask(v: Mask)) { |
| 14463 | Val >>= llvm::countr_zero(Val: ~Mask); |
| 14464 | |
| 14465 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, |
| 14466 | N2: DAG.getConstant(Val, DL, VT: MVT::i32), |
| 14467 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
| 14468 | |
| 14469 | DCI.CombineTo(N, Res, AddTo: false); |
| 14470 | // Return value from the original node to inform the combiner than N is |
| 14471 | // now dead. |
| 14472 | return SDValue(N, 0); |
| 14473 | } |
| 14474 | } else if (N1.getOpcode() == ISD::AND) { |
| 14475 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 14476 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
| 14477 | if (!N11C) |
| 14478 | return SDValue(); |
| 14479 | unsigned Mask2 = N11C->getZExtValue(); |
| 14480 | |
| 14481 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern |
| 14482 | // as is to match. |
| 14483 | if (ARM::isBitFieldInvertedMask(v: Mask) && |
| 14484 | (Mask == ~Mask2)) { |
| 14485 | // The pack halfword instruction works better for masks that fit it, |
| 14486 | // so use that when it's available. |
| 14487 | if (Subtarget->hasDSP() && |
| 14488 | (Mask == 0xffff || Mask == 0xffff0000)) |
| 14489 | return SDValue(); |
| 14490 | // 2a |
| 14491 | unsigned amt = llvm::countr_zero(Val: Mask2); |
| 14492 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N1.getOperand(i: 0), |
| 14493 | N2: DAG.getConstant(Val: amt, DL, VT: MVT::i32)); |
| 14494 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, N2: Res, |
| 14495 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
| 14496 | DCI.CombineTo(N, Res, AddTo: false); |
| 14497 | // Return value from the original node to inform the combiner than N is |
| 14498 | // now dead. |
| 14499 | return SDValue(N, 0); |
| 14500 | } else if (ARM::isBitFieldInvertedMask(v: ~Mask) && |
| 14501 | (~Mask == Mask2)) { |
| 14502 | // The pack halfword instruction works better for masks that fit it, |
| 14503 | // so use that when it's available. |
| 14504 | if (Subtarget->hasDSP() && |
| 14505 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) |
| 14506 | return SDValue(); |
| 14507 | // 2b |
| 14508 | unsigned lsb = llvm::countr_zero(Val: Mask); |
| 14509 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N00, |
| 14510 | N2: DAG.getConstant(Val: lsb, DL, VT: MVT::i32)); |
| 14511 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N1.getOperand(i: 0), N2: Res, |
| 14512 | N3: DAG.getConstant(Val: Mask2, DL, VT: MVT::i32)); |
| 14513 | DCI.CombineTo(N, Res, AddTo: false); |
| 14514 | // Return value from the original node to inform the combiner than N is |
| 14515 | // now dead. |
| 14516 | return SDValue(N, 0); |
| 14517 | } |
| 14518 | } |
| 14519 | |
| 14520 | if (DAG.MaskedValueIsZero(Op: N1, Mask: MaskC->getAPIntValue()) && |
| 14521 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Val: N00.getOperand(i: 1)) && |
| 14522 | ARM::isBitFieldInvertedMask(v: ~Mask)) { |
| 14523 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask |
| 14524 | // where lsb(mask) == #shamt and masked bits of B are known zero. |
| 14525 | SDValue ShAmt = N00.getOperand(i: 1); |
| 14526 | unsigned ShAmtC = ShAmt->getAsZExtVal(); |
| 14527 | unsigned LSB = llvm::countr_zero(Val: Mask); |
| 14528 | if (ShAmtC != LSB) |
| 14529 | return SDValue(); |
| 14530 | |
| 14531 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1, N2: N00.getOperand(i: 0), |
| 14532 | N3: DAG.getConstant(Val: ~Mask, DL, VT: MVT::i32)); |
| 14533 | |
| 14534 | DCI.CombineTo(N, Res, AddTo: false); |
| 14535 | // Return value from the original node to inform the combiner than N is |
| 14536 | // now dead. |
| 14537 | return SDValue(N, 0); |
| 14538 | } |
| 14539 | |
| 14540 | return SDValue(); |
| 14541 | } |
| 14542 | |
| 14543 | static bool isValidMVECond(unsigned CC, bool IsFloat) { |
| 14544 | switch (CC) { |
| 14545 | case ARMCC::EQ: |
| 14546 | case ARMCC::NE: |
| 14547 | case ARMCC::LE: |
| 14548 | case ARMCC::GT: |
| 14549 | case ARMCC::GE: |
| 14550 | case ARMCC::LT: |
| 14551 | return true; |
| 14552 | case ARMCC::HS: |
| 14553 | case ARMCC::HI: |
| 14554 | return !IsFloat; |
| 14555 | default: |
| 14556 | return false; |
| 14557 | }; |
| 14558 | } |
| 14559 | |
| 14560 | static ARMCC::CondCodes getVCMPCondCode(SDValue N) { |
| 14561 | if (N->getOpcode() == ARMISD::VCMP) |
| 14562 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
| 14563 | else if (N->getOpcode() == ARMISD::VCMPZ) |
| 14564 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 1); |
| 14565 | else |
| 14566 | llvm_unreachable("Not a VCMP/VCMPZ!" ); |
| 14567 | } |
| 14568 | |
| 14569 | static bool CanInvertMVEVCMP(SDValue N) { |
| 14570 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N)); |
| 14571 | return isValidMVECond(CC, IsFloat: N->getOperand(Num: 0).getValueType().isFloatingPoint()); |
| 14572 | } |
| 14573 | |
| 14574 | static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG, |
| 14575 | const ARMSubtarget *Subtarget) { |
| 14576 | // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain |
| 14577 | // together with predicates |
| 14578 | EVT VT = N->getValueType(ResNo: 0); |
| 14579 | SDLoc DL(N); |
| 14580 | SDValue N0 = N->getOperand(Num: 0); |
| 14581 | SDValue N1 = N->getOperand(Num: 1); |
| 14582 | |
| 14583 | auto IsFreelyInvertable = [&](SDValue V) { |
| 14584 | if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) |
| 14585 | return CanInvertMVEVCMP(N: V); |
| 14586 | return false; |
| 14587 | }; |
| 14588 | |
| 14589 | // At least one operand must be freely invertable. |
| 14590 | if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) |
| 14591 | return SDValue(); |
| 14592 | |
| 14593 | SDValue NewN0 = DAG.getLogicalNOT(DL, Val: N0, VT); |
| 14594 | SDValue NewN1 = DAG.getLogicalNOT(DL, Val: N1, VT); |
| 14595 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT, N1: NewN0, N2: NewN1); |
| 14596 | return DAG.getLogicalNOT(DL, Val: And, VT); |
| 14597 | } |
| 14598 | |
| 14599 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR |
| 14600 | static SDValue PerformORCombine(SDNode *N, |
| 14601 | TargetLowering::DAGCombinerInfo &DCI, |
| 14602 | const ARMSubtarget *Subtarget) { |
| 14603 | // Attempt to use immediate-form VORR |
| 14604 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
| 14605 | SDLoc dl(N); |
| 14606 | EVT VT = N->getValueType(ResNo: 0); |
| 14607 | SelectionDAG &DAG = DCI.DAG; |
| 14608 | |
| 14609 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14610 | return SDValue(); |
| 14611 | |
| 14612 | if (Subtarget->hasMVEIntegerOps() && (VT == MVT::v2i1 || VT == MVT::v4i1 || |
| 14613 | VT == MVT::v8i1 || VT == MVT::v16i1)) |
| 14614 | return PerformORCombine_i1(N, DAG, Subtarget); |
| 14615 | |
| 14616 | APInt SplatBits, SplatUndef; |
| 14617 | unsigned SplatBitSize; |
| 14618 | bool HasAnyUndefs; |
| 14619 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 14620 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 14621 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 14622 | SplatBitSize == 64) { |
| 14623 | EVT VorrVT; |
| 14624 | SDValue Val = |
| 14625 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
| 14626 | SplatBitSize, DAG, dl, VT&: VorrVT, VectorVT: VT, type: OtherModImm); |
| 14627 | if (Val.getNode()) { |
| 14628 | SDValue Input = |
| 14629 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: VorrVT, Operand: N->getOperand(Num: 0)); |
| 14630 | SDValue Vorr = DAG.getNode(Opcode: ARMISD::VORRIMM, DL: dl, VT: VorrVT, N1: Input, N2: Val); |
| 14631 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vorr); |
| 14632 | } |
| 14633 | } |
| 14634 | } |
| 14635 | |
| 14636 | if (!Subtarget->isThumb1Only()) { |
| 14637 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 14638 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
| 14639 | return Result; |
| 14640 | if (SDValue Result = PerformORCombineToSMULWBT(OR: N, DCI, Subtarget)) |
| 14641 | return Result; |
| 14642 | } |
| 14643 | |
| 14644 | SDValue N0 = N->getOperand(Num: 0); |
| 14645 | SDValue N1 = N->getOperand(Num: 1); |
| 14646 | |
| 14647 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. |
| 14648 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && |
| 14649 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { |
| 14650 | |
| 14651 | // The code below optimizes (or (and X, Y), Z). |
| 14652 | // The AND operand needs to have a single user to make these optimizations |
| 14653 | // profitable. |
| 14654 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) |
| 14655 | return SDValue(); |
| 14656 | |
| 14657 | APInt SplatUndef; |
| 14658 | unsigned SplatBitSize; |
| 14659 | bool HasAnyUndefs; |
| 14660 | |
| 14661 | APInt SplatBits0, SplatBits1; |
| 14662 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(Val: N0->getOperand(Num: 1)); |
| 14663 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(Val: N1->getOperand(Num: 1)); |
| 14664 | // Ensure that the second operand of both ands are constants |
| 14665 | if (BVN0 && BVN0->isConstantSplat(SplatValue&: SplatBits0, SplatUndef, SplatBitSize, |
| 14666 | HasAnyUndefs) && !HasAnyUndefs) { |
| 14667 | if (BVN1 && BVN1->isConstantSplat(SplatValue&: SplatBits1, SplatUndef, SplatBitSize, |
| 14668 | HasAnyUndefs) && !HasAnyUndefs) { |
| 14669 | // Ensure that the bit width of the constants are the same and that |
| 14670 | // the splat arguments are logical inverses as per the pattern we |
| 14671 | // are trying to simplify. |
| 14672 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && |
| 14673 | SplatBits0 == ~SplatBits1) { |
| 14674 | // Canonicalize the vector type to make instruction selection |
| 14675 | // simpler. |
| 14676 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 14677 | SDValue Result = DAG.getNode(Opcode: ARMISD::VBSP, DL: dl, VT: CanonicalVT, |
| 14678 | N1: N0->getOperand(Num: 1), |
| 14679 | N2: N0->getOperand(Num: 0), |
| 14680 | N3: N1->getOperand(Num: 0)); |
| 14681 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Result); |
| 14682 | } |
| 14683 | } |
| 14684 | } |
| 14685 | } |
| 14686 | |
| 14687 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when |
| 14688 | // reasonable. |
| 14689 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { |
| 14690 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) |
| 14691 | return Res; |
| 14692 | } |
| 14693 | |
| 14694 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14695 | return Result; |
| 14696 | |
| 14697 | return SDValue(); |
| 14698 | } |
| 14699 | |
| 14700 | static SDValue PerformXORCombine(SDNode *N, |
| 14701 | TargetLowering::DAGCombinerInfo &DCI, |
| 14702 | const ARMSubtarget *Subtarget) { |
| 14703 | EVT VT = N->getValueType(ResNo: 0); |
| 14704 | SelectionDAG &DAG = DCI.DAG; |
| 14705 | |
| 14706 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14707 | return SDValue(); |
| 14708 | |
| 14709 | if (!Subtarget->isThumb1Only()) { |
| 14710 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 14711 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
| 14712 | return Result; |
| 14713 | |
| 14714 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14715 | return Result; |
| 14716 | } |
| 14717 | |
| 14718 | if (Subtarget->hasMVEIntegerOps()) { |
| 14719 | // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. |
| 14720 | SDValue N0 = N->getOperand(Num: 0); |
| 14721 | SDValue N1 = N->getOperand(Num: 1); |
| 14722 | const TargetLowering *TLI = Subtarget->getTargetLowering(); |
| 14723 | if (TLI->isConstTrueVal(N: N1) && |
| 14724 | (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { |
| 14725 | if (CanInvertMVEVCMP(N: N0)) { |
| 14726 | SDLoc DL(N0); |
| 14727 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N: N0)); |
| 14728 | |
| 14729 | SmallVector<SDValue, 4> Ops; |
| 14730 | Ops.push_back(Elt: N0->getOperand(Num: 0)); |
| 14731 | if (N0->getOpcode() == ARMISD::VCMP) |
| 14732 | Ops.push_back(Elt: N0->getOperand(Num: 1)); |
| 14733 | Ops.push_back(Elt: DAG.getConstant(Val: CC, DL, VT: MVT::i32)); |
| 14734 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT: N0->getValueType(ResNo: 0), Ops); |
| 14735 | } |
| 14736 | } |
| 14737 | } |
| 14738 | |
| 14739 | return SDValue(); |
| 14740 | } |
| 14741 | |
| 14742 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, |
| 14743 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and |
| 14744 | // their position in "to" (Rd). |
| 14745 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { |
| 14746 | assert(N->getOpcode() == ARMISD::BFI); |
| 14747 | |
| 14748 | SDValue From = N->getOperand(Num: 1); |
| 14749 | ToMask = ~N->getConstantOperandAPInt(Num: 2); |
| 14750 | FromMask = APInt::getLowBitsSet(numBits: ToMask.getBitWidth(), loBitsSet: ToMask.popcount()); |
| 14751 | |
| 14752 | // If the Base came from a SHR #C, we can deduce that it is really testing bit |
| 14753 | // #C in the base of the SHR. |
| 14754 | if (From->getOpcode() == ISD::SRL && |
| 14755 | isa<ConstantSDNode>(Val: From->getOperand(Num: 1))) { |
| 14756 | APInt Shift = From->getConstantOperandAPInt(Num: 1); |
| 14757 | assert(Shift.getLimitedValue() < 32 && "Shift too large!" ); |
| 14758 | FromMask <<= Shift.getLimitedValue(Limit: 31); |
| 14759 | From = From->getOperand(Num: 0); |
| 14760 | } |
| 14761 | |
| 14762 | return From; |
| 14763 | } |
| 14764 | |
| 14765 | // If A and B contain one contiguous set of bits, does A | B == A . B? |
| 14766 | // |
| 14767 | // Neither A nor B must be zero. |
| 14768 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { |
| 14769 | unsigned LastActiveBitInA = A.countr_zero(); |
| 14770 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countl_zero() - 1; |
| 14771 | return LastActiveBitInA - 1 == FirstActiveBitInB; |
| 14772 | } |
| 14773 | |
| 14774 | static SDValue FindBFIToCombineWith(SDNode *N) { |
| 14775 | // We have a BFI in N. Find a BFI it can combine with, if one exists. |
| 14776 | APInt ToMask, FromMask; |
| 14777 | SDValue From = ParseBFI(N, ToMask, FromMask); |
| 14778 | SDValue To = N->getOperand(Num: 0); |
| 14779 | |
| 14780 | SDValue V = To; |
| 14781 | if (V.getOpcode() != ARMISD::BFI) |
| 14782 | return SDValue(); |
| 14783 | |
| 14784 | APInt NewToMask, NewFromMask; |
| 14785 | SDValue NewFrom = ParseBFI(N: V.getNode(), ToMask&: NewToMask, FromMask&: NewFromMask); |
| 14786 | if (NewFrom != From) |
| 14787 | return SDValue(); |
| 14788 | |
| 14789 | // Do the written bits conflict with any we've seen so far? |
| 14790 | if ((NewToMask & ToMask).getBoolValue()) |
| 14791 | // Conflicting bits. |
| 14792 | return SDValue(); |
| 14793 | |
| 14794 | // Are the new bits contiguous when combined with the old bits? |
| 14795 | if (BitsProperlyConcatenate(A: ToMask, B: NewToMask) && |
| 14796 | BitsProperlyConcatenate(A: FromMask, B: NewFromMask)) |
| 14797 | return V; |
| 14798 | if (BitsProperlyConcatenate(A: NewToMask, B: ToMask) && |
| 14799 | BitsProperlyConcatenate(A: NewFromMask, B: FromMask)) |
| 14800 | return V; |
| 14801 | |
| 14802 | return SDValue(); |
| 14803 | } |
| 14804 | |
| 14805 | static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) { |
| 14806 | SDValue N0 = N->getOperand(Num: 0); |
| 14807 | SDValue N1 = N->getOperand(Num: 1); |
| 14808 | |
| 14809 | if (N1.getOpcode() == ISD::AND) { |
| 14810 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff |
| 14811 | // the bits being cleared by the AND are not demanded by the BFI. |
| 14812 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
| 14813 | if (!N11C) |
| 14814 | return SDValue(); |
| 14815 | unsigned InvMask = N->getConstantOperandVal(Num: 2); |
| 14816 | unsigned LSB = llvm::countr_zero(Val: ~InvMask); |
| 14817 | unsigned Width = llvm::bit_width<unsigned>(Value: ~InvMask) - LSB; |
| 14818 | assert(Width < |
| 14819 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && |
| 14820 | "undefined behavior" ); |
| 14821 | unsigned Mask = (1u << Width) - 1; |
| 14822 | unsigned Mask2 = N11C->getZExtValue(); |
| 14823 | if ((Mask & (~Mask2)) == 0) |
| 14824 | return DAG.getNode(Opcode: ARMISD::BFI, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
| 14825 | N1: N->getOperand(Num: 0), N2: N1.getOperand(i: 0), N3: N->getOperand(Num: 2)); |
| 14826 | return SDValue(); |
| 14827 | } |
| 14828 | |
| 14829 | // Look for another BFI to combine with. |
| 14830 | if (SDValue CombineBFI = FindBFIToCombineWith(N)) { |
| 14831 | // We've found a BFI. |
| 14832 | APInt ToMask1, FromMask1; |
| 14833 | SDValue From1 = ParseBFI(N, ToMask&: ToMask1, FromMask&: FromMask1); |
| 14834 | |
| 14835 | APInt ToMask2, FromMask2; |
| 14836 | SDValue From2 = ParseBFI(N: CombineBFI.getNode(), ToMask&: ToMask2, FromMask&: FromMask2); |
| 14837 | assert(From1 == From2); |
| 14838 | (void)From2; |
| 14839 | |
| 14840 | // Create a new BFI, combining the two together. |
| 14841 | APInt NewFromMask = FromMask1 | FromMask2; |
| 14842 | APInt NewToMask = ToMask1 | ToMask2; |
| 14843 | |
| 14844 | EVT VT = N->getValueType(ResNo: 0); |
| 14845 | SDLoc dl(N); |
| 14846 | |
| 14847 | if (NewFromMask[0] == 0) |
| 14848 | From1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: From1, |
| 14849 | N2: DAG.getConstant(Val: NewFromMask.countr_zero(), DL: dl, VT)); |
| 14850 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: CombineBFI.getOperand(i: 0), N2: From1, |
| 14851 | N3: DAG.getConstant(Val: ~NewToMask, DL: dl, VT)); |
| 14852 | } |
| 14853 | |
| 14854 | // Reassociate BFI(BFI (A, B, M1), C, M2) to BFI(BFI (A, C, M2), B, M1) so |
| 14855 | // that lower bit insertions are performed first, providing that M1 and M2 |
| 14856 | // do no overlap. This can allow multiple BFI instructions to be combined |
| 14857 | // together by the other folds above. |
| 14858 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::BFI) { |
| 14859 | APInt ToMask1 = ~N->getConstantOperandAPInt(Num: 2); |
| 14860 | APInt ToMask2 = ~N0.getConstantOperandAPInt(i: 2); |
| 14861 | |
| 14862 | if (!N0.hasOneUse() || (ToMask1 & ToMask2) != 0 || |
| 14863 | ToMask1.countl_zero() < ToMask2.countl_zero()) |
| 14864 | return SDValue(); |
| 14865 | |
| 14866 | EVT VT = N->getValueType(ResNo: 0); |
| 14867 | SDLoc dl(N); |
| 14868 | SDValue BFI1 = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: N0.getOperand(i: 0), |
| 14869 | N2: N->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
| 14870 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: BFI1, N2: N0.getOperand(i: 1), |
| 14871 | N3: N0.getOperand(i: 2)); |
| 14872 | } |
| 14873 | |
| 14874 | return SDValue(); |
| 14875 | } |
| 14876 | |
| 14877 | // Check that N is CMPZ(CSINC(0, 0, CC, X)), |
| 14878 | // or CMPZ(CMOV(1, 0, CC, X)) |
| 14879 | // return X if valid. |
| 14880 | static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) { |
| 14881 | if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(V: Cmp->getOperand(Num: 1))) |
| 14882 | return SDValue(); |
| 14883 | SDValue CSInc = Cmp->getOperand(Num: 0); |
| 14884 | |
| 14885 | // Ignore any `And 1` nodes that may not yet have been removed. We are |
| 14886 | // looking for a value that produces 1/0, so these have no effect on the |
| 14887 | // code. |
| 14888 | while (CSInc.getOpcode() == ISD::AND && |
| 14889 | isa<ConstantSDNode>(Val: CSInc.getOperand(i: 1)) && |
| 14890 | CSInc.getConstantOperandVal(i: 1) == 1 && CSInc->hasOneUse()) |
| 14891 | CSInc = CSInc.getOperand(i: 0); |
| 14892 | |
| 14893 | if (CSInc.getOpcode() == ARMISD::CSINC && |
| 14894 | isNullConstant(V: CSInc.getOperand(i: 0)) && |
| 14895 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
| 14896 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
| 14897 | return CSInc.getOperand(i: 3); |
| 14898 | } |
| 14899 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 0)) && |
| 14900 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
| 14901 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
| 14902 | return CSInc.getOperand(i: 3); |
| 14903 | } |
| 14904 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 1)) && |
| 14905 | isNullConstant(V: CSInc.getOperand(i: 0)) && CSInc->hasOneUse()) { |
| 14906 | CC = ARMCC::getOppositeCondition( |
| 14907 | CC: (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2)); |
| 14908 | return CSInc.getOperand(i: 3); |
| 14909 | } |
| 14910 | return SDValue(); |
| 14911 | } |
| 14912 | |
| 14913 | static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) { |
| 14914 | // Given CMPZ(CSINC(C, 0, 0, EQ), 0), we can just use C directly. As in |
| 14915 | // t92: flags = ARMISD::CMPZ t74, 0 |
| 14916 | // t93: i32 = ARMISD::CSINC 0, 0, 1, t92 |
| 14917 | // t96: flags = ARMISD::CMPZ t93, 0 |
| 14918 | // t114: i32 = ARMISD::CSINV 0, 0, 0, t96 |
| 14919 | ARMCC::CondCodes Cond; |
| 14920 | if (SDValue C = IsCMPZCSINC(Cmp: N, CC&: Cond)) |
| 14921 | if (Cond == ARMCC::EQ) |
| 14922 | return C; |
| 14923 | return SDValue(); |
| 14924 | } |
| 14925 | |
| 14926 | static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) { |
| 14927 | // Fold away an unneccessary CMPZ/CSINC |
| 14928 | // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) -> |
| 14929 | // if C1==EQ -> CSXYZ A, B, C2, D |
| 14930 | // if C1==NE -> CSXYZ A, B, NOT(C2), D |
| 14931 | ARMCC::CondCodes Cond; |
| 14932 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 3).getNode(), CC&: Cond)) { |
| 14933 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ) |
| 14934 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 14935 | N2: N->getOperand(Num: 1), |
| 14936 | N3: DAG.getConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 14937 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
| 14938 | return DAG.getNode( |
| 14939 | Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 14940 | N2: N->getOperand(Num: 1), |
| 14941 | N3: DAG.getConstant(Val: ARMCC::getOppositeCondition(CC: Cond), DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 14942 | } |
| 14943 | return SDValue(); |
| 14944 | } |
| 14945 | |
| 14946 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for |
| 14947 | /// ARMISD::VMOVRRD. |
| 14948 | static SDValue PerformVMOVRRDCombine(SDNode *N, |
| 14949 | TargetLowering::DAGCombinerInfo &DCI, |
| 14950 | const ARMSubtarget *Subtarget) { |
| 14951 | // vmovrrd(vmovdrr x, y) -> x,y |
| 14952 | SDValue InDouble = N->getOperand(Num: 0); |
| 14953 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) |
| 14954 | return DCI.CombineTo(N, Res0: InDouble.getOperand(i: 0), Res1: InDouble.getOperand(i: 1)); |
| 14955 | |
| 14956 | // vmovrrd(load f64) -> (load i32), (load i32) |
| 14957 | SDNode *InNode = InDouble.getNode(); |
| 14958 | if (ISD::isNormalLoad(N: InNode) && InNode->hasOneUse() && |
| 14959 | InNode->getValueType(ResNo: 0) == MVT::f64 && |
| 14960 | InNode->getOperand(Num: 1).getOpcode() == ISD::FrameIndex && |
| 14961 | !cast<LoadSDNode>(Val: InNode)->isVolatile()) { |
| 14962 | // TODO: Should this be done for non-FrameIndex operands? |
| 14963 | LoadSDNode *LD = cast<LoadSDNode>(Val: InNode); |
| 14964 | |
| 14965 | SelectionDAG &DAG = DCI.DAG; |
| 14966 | SDLoc DL(LD); |
| 14967 | SDValue BasePtr = LD->getBasePtr(); |
| 14968 | SDValue NewLD1 = |
| 14969 | DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: BasePtr, PtrInfo: LD->getPointerInfo(), |
| 14970 | Alignment: LD->getAlign(), MMOFlags: LD->getMemOperand()->getFlags()); |
| 14971 | |
| 14972 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
| 14973 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
| 14974 | |
| 14975 | SDValue NewLD2 = DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: OffsetPtr, |
| 14976 | PtrInfo: LD->getPointerInfo().getWithOffset(O: 4), |
| 14977 | Alignment: commonAlignment(A: LD->getAlign(), Offset: 4), |
| 14978 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 14979 | |
| 14980 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewLD2.getValue(R: 1)); |
| 14981 | if (DCI.DAG.getDataLayout().isBigEndian()) |
| 14982 | std::swap (a&: NewLD1, b&: NewLD2); |
| 14983 | SDValue Result = DCI.CombineTo(N, Res0: NewLD1, Res1: NewLD2); |
| 14984 | return Result; |
| 14985 | } |
| 14986 | |
| 14987 | // VMOVRRD(extract(..(build_vector(a, b, c, d)))) -> a,b or c,d |
| 14988 | // VMOVRRD(extract(insert_vector(insert_vector(.., a, l1), b, l2))) -> a,b |
| 14989 | if (InDouble.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 14990 | isa<ConstantSDNode>(Val: InDouble.getOperand(i: 1))) { |
| 14991 | SDValue BV = InDouble.getOperand(i: 0); |
| 14992 | // Look up through any nop bitcasts and vector_reg_casts. bitcasts may |
| 14993 | // change lane order under big endian. |
| 14994 | bool BVSwap = BV.getOpcode() == ISD::BITCAST; |
| 14995 | while ( |
| 14996 | (BV.getOpcode() == ISD::BITCAST || |
| 14997 | BV.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
| 14998 | (BV.getValueType() == MVT::v2f64 || BV.getValueType() == MVT::v2i64)) { |
| 14999 | BVSwap = BV.getOpcode() == ISD::BITCAST; |
| 15000 | BV = BV.getOperand(i: 0); |
| 15001 | } |
| 15002 | if (BV.getValueType() != MVT::v4i32) |
| 15003 | return SDValue(); |
| 15004 | |
| 15005 | // Handle buildvectors, pulling out the correct lane depending on |
| 15006 | // endianness. |
| 15007 | unsigned Offset = InDouble.getConstantOperandVal(i: 1) == 1 ? 2 : 0; |
| 15008 | if (BV.getOpcode() == ISD::BUILD_VECTOR) { |
| 15009 | SDValue Op0 = BV.getOperand(i: Offset); |
| 15010 | SDValue Op1 = BV.getOperand(i: Offset + 1); |
| 15011 | if (!Subtarget->isLittle() && BVSwap) |
| 15012 | std::swap(a&: Op0, b&: Op1); |
| 15013 | |
| 15014 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
| 15015 | } |
| 15016 | |
| 15017 | // A chain of insert_vectors, grabbing the correct value of the chain of |
| 15018 | // inserts. |
| 15019 | SDValue Op0, Op1; |
| 15020 | while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) { |
| 15021 | if (isa<ConstantSDNode>(Val: BV.getOperand(i: 2))) { |
| 15022 | if (BV.getConstantOperandVal(i: 2) == Offset && !Op0) |
| 15023 | Op0 = BV.getOperand(i: 1); |
| 15024 | if (BV.getConstantOperandVal(i: 2) == Offset + 1 && !Op1) |
| 15025 | Op1 = BV.getOperand(i: 1); |
| 15026 | } |
| 15027 | BV = BV.getOperand(i: 0); |
| 15028 | } |
| 15029 | if (!Subtarget->isLittle() && BVSwap) |
| 15030 | std::swap(a&: Op0, b&: Op1); |
| 15031 | if (Op0 && Op1) |
| 15032 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
| 15033 | } |
| 15034 | |
| 15035 | return SDValue(); |
| 15036 | } |
| 15037 | |
| 15038 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for |
| 15039 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. |
| 15040 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { |
| 15041 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) |
| 15042 | SDValue Op0 = N->getOperand(Num: 0); |
| 15043 | SDValue Op1 = N->getOperand(Num: 1); |
| 15044 | if (Op0.getOpcode() == ISD::BITCAST) |
| 15045 | Op0 = Op0.getOperand(i: 0); |
| 15046 | if (Op1.getOpcode() == ISD::BITCAST) |
| 15047 | Op1 = Op1.getOperand(i: 0); |
| 15048 | if (Op0.getOpcode() == ARMISD::VMOVRRD && |
| 15049 | Op0.getNode() == Op1.getNode() && |
| 15050 | Op0.getResNo() == 0 && Op1.getResNo() == 1) |
| 15051 | return DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), |
| 15052 | VT: N->getValueType(ResNo: 0), Operand: Op0.getOperand(i: 0)); |
| 15053 | return SDValue(); |
| 15054 | } |
| 15055 | |
| 15056 | static SDValue PerformVMOVhrCombine(SDNode *N, |
| 15057 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15058 | SDValue Op0 = N->getOperand(Num: 0); |
| 15059 | |
| 15060 | // VMOVhr (VMOVrh (X)) -> X |
| 15061 | if (Op0->getOpcode() == ARMISD::VMOVrh) |
| 15062 | return Op0->getOperand(Num: 0); |
| 15063 | |
| 15064 | // FullFP16: half values are passed in S-registers, and we don't |
| 15065 | // need any of the bitcast and moves: |
| 15066 | // |
| 15067 | // t2: f32,ch1,gl1? = CopyFromReg ch, Register:f32 %0, gl? |
| 15068 | // t5: i32 = bitcast t2 |
| 15069 | // t18: f16 = ARMISD::VMOVhr t5 |
| 15070 | // => |
| 15071 | // tN: f16,ch2,gl2? = CopyFromReg ch, Register::f32 %0, gl? |
| 15072 | if (Op0->getOpcode() == ISD::BITCAST) { |
| 15073 | SDValue Copy = Op0->getOperand(Num: 0); |
| 15074 | if (Copy.getValueType() == MVT::f32 && |
| 15075 | Copy->getOpcode() == ISD::CopyFromReg) { |
| 15076 | bool HasGlue = Copy->getNumOperands() == 3; |
| 15077 | SDValue Ops[] = {Copy->getOperand(Num: 0), Copy->getOperand(Num: 1), |
| 15078 | HasGlue ? Copy->getOperand(Num: 2) : SDValue()}; |
| 15079 | EVT OutTys[] = {N->getValueType(ResNo: 0), MVT::Other, MVT::Glue}; |
| 15080 | SDValue NewCopy = |
| 15081 | DCI.DAG.getNode(Opcode: ISD::CopyFromReg, DL: SDLoc(N), |
| 15082 | VTList: DCI.DAG.getVTList(VTs: ArrayRef(OutTys, HasGlue ? 3 : 2)), |
| 15083 | Ops: ArrayRef(Ops, HasGlue ? 3 : 2)); |
| 15084 | |
| 15085 | // Update Users, Chains, and Potential Glue. |
| 15086 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: NewCopy.getValue(R: 0)); |
| 15087 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 1), To: NewCopy.getValue(R: 1)); |
| 15088 | if (HasGlue) |
| 15089 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 2), |
| 15090 | To: NewCopy.getValue(R: 2)); |
| 15091 | |
| 15092 | return NewCopy; |
| 15093 | } |
| 15094 | } |
| 15095 | |
| 15096 | // fold (VMOVhr (load x)) -> (load (f16*)x) |
| 15097 | if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Val&: Op0)) { |
| 15098 | if (LN0->hasOneUse() && LN0->isUnindexed() && |
| 15099 | LN0->getMemoryVT() == MVT::i16) { |
| 15100 | SDValue Load = |
| 15101 | DCI.DAG.getLoad(VT: N->getValueType(ResNo: 0), dl: SDLoc(N), Chain: LN0->getChain(), |
| 15102 | Ptr: LN0->getBasePtr(), MMO: LN0->getMemOperand()); |
| 15103 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
| 15104 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Op0.getValue(R: 1), To: Load.getValue(R: 1)); |
| 15105 | return Load; |
| 15106 | } |
| 15107 | } |
| 15108 | |
| 15109 | // Only the bottom 16 bits of the source register are used. |
| 15110 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
| 15111 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15112 | if (TLI.SimplifyDemandedBits(Op: Op0, DemandedBits: DemandedMask, DCI)) |
| 15113 | return SDValue(N, 0); |
| 15114 | |
| 15115 | return SDValue(); |
| 15116 | } |
| 15117 | |
| 15118 | static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) { |
| 15119 | SDValue N0 = N->getOperand(Num: 0); |
| 15120 | EVT VT = N->getValueType(ResNo: 0); |
| 15121 | |
| 15122 | // fold (VMOVrh (fpconst x)) -> const x |
| 15123 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val&: N0)) { |
| 15124 | APFloat V = C->getValueAPF(); |
| 15125 | return DAG.getConstant(Val: V.bitcastToAPInt().getZExtValue(), DL: SDLoc(N), VT); |
| 15126 | } |
| 15127 | |
| 15128 | // fold (VMOVrh (load x)) -> (zextload (i16*)x) |
| 15129 | if (ISD::isNormalLoad(N: N0.getNode()) && N0.hasOneUse()) { |
| 15130 | LoadSDNode *LN0 = cast<LoadSDNode>(Val&: N0); |
| 15131 | |
| 15132 | SDValue Load = |
| 15133 | DAG.getExtLoad(ExtType: ISD::ZEXTLOAD, dl: SDLoc(N), VT, Chain: LN0->getChain(), |
| 15134 | Ptr: LN0->getBasePtr(), MemVT: MVT::i16, MMO: LN0->getMemOperand()); |
| 15135 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
| 15136 | DAG.ReplaceAllUsesOfValueWith(From: N0.getValue(R: 1), To: Load.getValue(R: 1)); |
| 15137 | return Load; |
| 15138 | } |
| 15139 | |
| 15140 | // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) |
| 15141 | if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15142 | isa<ConstantSDNode>(Val: N0->getOperand(Num: 1))) |
| 15143 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: SDLoc(N), VT, N1: N0->getOperand(Num: 0), |
| 15144 | N2: N0->getOperand(Num: 1)); |
| 15145 | |
| 15146 | return SDValue(); |
| 15147 | } |
| 15148 | |
| 15149 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node |
| 15150 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an |
| 15151 | /// i64 vector to have f64 elements, since the value can then be loaded |
| 15152 | /// directly into a VFP register. |
| 15153 | static bool hasNormalLoadOperand(SDNode *N) { |
| 15154 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 15155 | for (unsigned i = 0; i < NumElts; ++i) { |
| 15156 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
| 15157 | if (ISD::isNormalLoad(N: Elt) && !cast<LoadSDNode>(Val: Elt)->isVolatile()) |
| 15158 | return true; |
| 15159 | } |
| 15160 | return false; |
| 15161 | } |
| 15162 | |
| 15163 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for |
| 15164 | /// ISD::BUILD_VECTOR. |
| 15165 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, |
| 15166 | TargetLowering::DAGCombinerInfo &DCI, |
| 15167 | const ARMSubtarget *Subtarget) { |
| 15168 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): |
| 15169 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value |
| 15170 | // into a pair of GPRs, which is fine when the value is used as a scalar, |
| 15171 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. |
| 15172 | SelectionDAG &DAG = DCI.DAG; |
| 15173 | if (N->getNumOperands() == 2) |
| 15174 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) |
| 15175 | return RV; |
| 15176 | |
| 15177 | // Load i64 elements as f64 values so that type legalization does not split |
| 15178 | // them up into i32 values. |
| 15179 | EVT VT = N->getValueType(ResNo: 0); |
| 15180 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) |
| 15181 | return SDValue(); |
| 15182 | SDLoc dl(N); |
| 15183 | SmallVector<SDValue, 8> Ops; |
| 15184 | unsigned NumElts = VT.getVectorNumElements(); |
| 15185 | for (unsigned i = 0; i < NumElts; ++i) { |
| 15186 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: i)); |
| 15187 | Ops.push_back(Elt: V); |
| 15188 | // Make the DAGCombiner fold the bitcast. |
| 15189 | DCI.AddToWorklist(N: V.getNode()); |
| 15190 | } |
| 15191 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, NumElements: NumElts); |
| 15192 | SDValue BV = DAG.getBuildVector(VT: FloatVT, DL: dl, Ops); |
| 15193 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: BV); |
| 15194 | } |
| 15195 | |
| 15196 | /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. |
| 15197 | static SDValue |
| 15198 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15199 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. |
| 15200 | // At that time, we may have inserted bitcasts from integer to float. |
| 15201 | // If these bitcasts have survived DAGCombine, change the lowering of this |
| 15202 | // BUILD_VECTOR in something more vector friendly, i.e., that does not |
| 15203 | // force to use floating point types. |
| 15204 | |
| 15205 | // Make sure we can change the type of the vector. |
| 15206 | // This is possible iff: |
| 15207 | // 1. The vector is only used in a bitcast to a integer type. I.e., |
| 15208 | // 1.1. Vector is used only once. |
| 15209 | // 1.2. Use is a bit convert to an integer type. |
| 15210 | // 2. The size of its operands are 32-bits (64-bits are not legal). |
| 15211 | EVT VT = N->getValueType(ResNo: 0); |
| 15212 | EVT EltVT = VT.getVectorElementType(); |
| 15213 | |
| 15214 | // Check 1.1. and 2. |
| 15215 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) |
| 15216 | return SDValue(); |
| 15217 | |
| 15218 | // By construction, the input type must be float. |
| 15219 | assert(EltVT == MVT::f32 && "Unexpected type!" ); |
| 15220 | |
| 15221 | // Check 1.2. |
| 15222 | SDNode *Use = *N->user_begin(); |
| 15223 | if (Use->getOpcode() != ISD::BITCAST || |
| 15224 | Use->getValueType(ResNo: 0).isFloatingPoint()) |
| 15225 | return SDValue(); |
| 15226 | |
| 15227 | // Check profitability. |
| 15228 | // Model is, if more than half of the relevant operands are bitcast from |
| 15229 | // i32, turn the build_vector into a sequence of insert_vector_elt. |
| 15230 | // Relevant operands are everything that is not statically |
| 15231 | // (i.e., at compile time) bitcasted. |
| 15232 | unsigned NumOfBitCastedElts = 0; |
| 15233 | unsigned NumElts = VT.getVectorNumElements(); |
| 15234 | unsigned NumOfRelevantElts = NumElts; |
| 15235 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { |
| 15236 | SDValue Elt = N->getOperand(Num: Idx); |
| 15237 | if (Elt->getOpcode() == ISD::BITCAST) { |
| 15238 | // Assume only bit cast to i32 will go away. |
| 15239 | if (Elt->getOperand(Num: 0).getValueType() == MVT::i32) |
| 15240 | ++NumOfBitCastedElts; |
| 15241 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Val: Elt)) |
| 15242 | // Constants are statically casted, thus do not count them as |
| 15243 | // relevant operands. |
| 15244 | --NumOfRelevantElts; |
| 15245 | } |
| 15246 | |
| 15247 | // Check if more than half of the elements require a non-free bitcast. |
| 15248 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) |
| 15249 | return SDValue(); |
| 15250 | |
| 15251 | SelectionDAG &DAG = DCI.DAG; |
| 15252 | // Create the new vector type. |
| 15253 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: NumElts); |
| 15254 | // Check if the type is legal. |
| 15255 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15256 | if (!TLI.isTypeLegal(VT: VecVT)) |
| 15257 | return SDValue(); |
| 15258 | |
| 15259 | // Combine: |
| 15260 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. |
| 15261 | // => BITCAST INSERT_VECTOR_ELT |
| 15262 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), |
| 15263 | // (BITCAST EN), N. |
| 15264 | SDValue Vec = DAG.getUNDEF(VT: VecVT); |
| 15265 | SDLoc dl(N); |
| 15266 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { |
| 15267 | SDValue V = N->getOperand(Num: Idx); |
| 15268 | if (V.isUndef()) |
| 15269 | continue; |
| 15270 | if (V.getOpcode() == ISD::BITCAST && |
| 15271 | V->getOperand(Num: 0).getValueType() == MVT::i32) |
| 15272 | // Fold obvious case. |
| 15273 | V = V.getOperand(i: 0); |
| 15274 | else { |
| 15275 | V = DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(V), VT: MVT::i32, Operand: V); |
| 15276 | // Make the DAGCombiner fold the bitcasts. |
| 15277 | DCI.AddToWorklist(N: V.getNode()); |
| 15278 | } |
| 15279 | SDValue LaneIdx = DAG.getConstant(Val: Idx, DL: dl, VT: MVT::i32); |
| 15280 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: VecVT, N1: Vec, N2: V, N3: LaneIdx); |
| 15281 | } |
| 15282 | Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vec); |
| 15283 | // Make the DAGCombiner fold the bitcasts. |
| 15284 | DCI.AddToWorklist(N: Vec.getNode()); |
| 15285 | return Vec; |
| 15286 | } |
| 15287 | |
| 15288 | static SDValue |
| 15289 | PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15290 | EVT VT = N->getValueType(ResNo: 0); |
| 15291 | SDValue Op = N->getOperand(Num: 0); |
| 15292 | SDLoc dl(N); |
| 15293 | |
| 15294 | // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) |
| 15295 | if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { |
| 15296 | // If the valuetypes are the same, we can remove the cast entirely. |
| 15297 | if (Op->getOperand(Num: 0).getValueType() == VT) |
| 15298 | return Op->getOperand(Num: 0); |
| 15299 | return DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15300 | } |
| 15301 | |
| 15302 | // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce |
| 15303 | // more VPNOT which might get folded as else predicates. |
| 15304 | if (Op.getValueType() == MVT::i32 && isBitwiseNot(V: Op)) { |
| 15305 | SDValue X = |
| 15306 | DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15307 | SDValue C = DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
| 15308 | Operand: DCI.DAG.getConstant(Val: 65535, DL: dl, VT: MVT::i32)); |
| 15309 | return DCI.DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: X, N2: C); |
| 15310 | } |
| 15311 | |
| 15312 | // Only the bottom 16 bits of the source register are used. |
| 15313 | if (Op.getValueType() == MVT::i32) { |
| 15314 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
| 15315 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15316 | if (TLI.SimplifyDemandedBits(Op, DemandedBits: DemandedMask, DCI)) |
| 15317 | return SDValue(N, 0); |
| 15318 | } |
| 15319 | return SDValue(); |
| 15320 | } |
| 15321 | |
| 15322 | static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG, |
| 15323 | const ARMSubtarget *ST) { |
| 15324 | EVT VT = N->getValueType(ResNo: 0); |
| 15325 | SDValue Op = N->getOperand(Num: 0); |
| 15326 | SDLoc dl(N); |
| 15327 | |
| 15328 | // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST |
| 15329 | if (ST->isLittle()) |
| 15330 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Op); |
| 15331 | |
| 15332 | // VT VECTOR_REG_CAST (VT Op) -> Op |
| 15333 | if (Op.getValueType() == VT) |
| 15334 | return Op; |
| 15335 | // VECTOR_REG_CAST undef -> undef |
| 15336 | if (Op.isUndef()) |
| 15337 | return DAG.getUNDEF(VT); |
| 15338 | |
| 15339 | // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) |
| 15340 | if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { |
| 15341 | // If the valuetypes are the same, we can remove the cast entirely. |
| 15342 | if (Op->getOperand(Num: 0).getValueType() == VT) |
| 15343 | return Op->getOperand(Num: 0); |
| 15344 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15345 | } |
| 15346 | |
| 15347 | return SDValue(); |
| 15348 | } |
| 15349 | |
| 15350 | static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG, |
| 15351 | const ARMSubtarget *Subtarget) { |
| 15352 | if (!Subtarget->hasMVEIntegerOps()) |
| 15353 | return SDValue(); |
| 15354 | |
| 15355 | EVT VT = N->getValueType(ResNo: 0); |
| 15356 | SDValue Op0 = N->getOperand(Num: 0); |
| 15357 | SDValue Op1 = N->getOperand(Num: 1); |
| 15358 | ARMCC::CondCodes Cond = (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
| 15359 | SDLoc dl(N); |
| 15360 | |
| 15361 | // vcmp X, 0, cc -> vcmpz X, cc |
| 15362 | if (isZeroVector(N: Op1)) |
| 15363 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op0, N2: N->getOperand(Num: 2)); |
| 15364 | |
| 15365 | unsigned SwappedCond = getSwappedCondition(CC: Cond); |
| 15366 | if (isValidMVECond(CC: SwappedCond, IsFloat: VT.isFloatingPoint())) { |
| 15367 | // vcmp 0, X, cc -> vcmpz X, reversed(cc) |
| 15368 | if (isZeroVector(N: Op0)) |
| 15369 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op1, |
| 15370 | N2: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
| 15371 | // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) |
| 15372 | if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) |
| 15373 | return DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT, N1: Op1, N2: Op0, |
| 15374 | N3: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
| 15375 | } |
| 15376 | |
| 15377 | return SDValue(); |
| 15378 | } |
| 15379 | |
| 15380 | /// PerformInsertEltCombine - Target-specific dag combine xforms for |
| 15381 | /// ISD::INSERT_VECTOR_ELT. |
| 15382 | static SDValue PerformInsertEltCombine(SDNode *N, |
| 15383 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15384 | // Bitcast an i64 load inserted into a vector to f64. |
| 15385 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 15386 | EVT VT = N->getValueType(ResNo: 0); |
| 15387 | SDNode *Elt = N->getOperand(Num: 1).getNode(); |
| 15388 | if (VT.getVectorElementType() != MVT::i64 || |
| 15389 | !ISD::isNormalLoad(N: Elt) || cast<LoadSDNode>(Val: Elt)->isVolatile()) |
| 15390 | return SDValue(); |
| 15391 | |
| 15392 | SelectionDAG &DAG = DCI.DAG; |
| 15393 | SDLoc dl(N); |
| 15394 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
| 15395 | NumElements: VT.getVectorNumElements()); |
| 15396 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: N->getOperand(Num: 0)); |
| 15397 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: 1)); |
| 15398 | // Make the DAGCombiner fold the bitcasts. |
| 15399 | DCI.AddToWorklist(N: Vec.getNode()); |
| 15400 | DCI.AddToWorklist(N: V.getNode()); |
| 15401 | SDValue InsElt = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: FloatVT, |
| 15402 | N1: Vec, N2: V, N3: N->getOperand(Num: 2)); |
| 15403 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: InsElt); |
| 15404 | } |
| 15405 | |
| 15406 | // Convert a pair of extracts from the same base vector to a VMOVRRD. Either |
| 15407 | // directly or bitcast to an integer if the original is a float vector. |
| 15408 | // extract(x, n); extract(x, n+1) -> VMOVRRD(extract v2f64 x, n/2) |
| 15409 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD(extract x, n/2) |
| 15410 | static SDValue |
| 15411 | (SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15412 | EVT VT = N->getValueType(ResNo: 0); |
| 15413 | SDLoc dl(N); |
| 15414 | |
| 15415 | if (!DCI.isAfterLegalizeDAG() || VT != MVT::i32 || |
| 15416 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: MVT::f64)) |
| 15417 | return SDValue(); |
| 15418 | |
| 15419 | SDValue Ext = SDValue(N, 0); |
| 15420 | if (Ext.getOpcode() == ISD::BITCAST && |
| 15421 | Ext.getOperand(i: 0).getValueType() == MVT::f32) |
| 15422 | Ext = Ext.getOperand(i: 0); |
| 15423 | if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 15424 | !isa<ConstantSDNode>(Val: Ext.getOperand(i: 1)) || |
| 15425 | Ext.getConstantOperandVal(i: 1) % 2 != 0) |
| 15426 | return SDValue(); |
| 15427 | if (Ext->hasOneUse() && (Ext->user_begin()->getOpcode() == ISD::SINT_TO_FP || |
| 15428 | Ext->user_begin()->getOpcode() == ISD::UINT_TO_FP)) |
| 15429 | return SDValue(); |
| 15430 | |
| 15431 | SDValue Op0 = Ext.getOperand(i: 0); |
| 15432 | EVT VecVT = Op0.getValueType(); |
| 15433 | unsigned ResNo = Op0.getResNo(); |
| 15434 | unsigned Lane = Ext.getConstantOperandVal(i: 1); |
| 15435 | if (VecVT.getVectorNumElements() != 4) |
| 15436 | return SDValue(); |
| 15437 | |
| 15438 | // Find another extract, of Lane + 1 |
| 15439 | auto OtherIt = find_if(Range: Op0->users(), P: [&](SDNode *V) { |
| 15440 | return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15441 | isa<ConstantSDNode>(Val: V->getOperand(Num: 1)) && |
| 15442 | V->getConstantOperandVal(Num: 1) == Lane + 1 && |
| 15443 | V->getOperand(Num: 0).getResNo() == ResNo; |
| 15444 | }); |
| 15445 | if (OtherIt == Op0->users().end()) |
| 15446 | return SDValue(); |
| 15447 | |
| 15448 | // For float extracts, we need to be converting to a i32 for both vector |
| 15449 | // lanes. |
| 15450 | SDValue OtherExt(*OtherIt, 0); |
| 15451 | if (OtherExt.getValueType() != MVT::i32) { |
| 15452 | if (!OtherExt->hasOneUse() || |
| 15453 | OtherExt->user_begin()->getOpcode() != ISD::BITCAST || |
| 15454 | OtherExt->user_begin()->getValueType(ResNo: 0) != MVT::i32) |
| 15455 | return SDValue(); |
| 15456 | OtherExt = SDValue(*OtherExt->user_begin(), 0); |
| 15457 | } |
| 15458 | |
| 15459 | // Convert the type to a f64 and extract with a VMOVRRD. |
| 15460 | SDValue F64 = DCI.DAG.getNode( |
| 15461 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
| 15462 | N1: DCI.DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v2f64, Operand: Op0), |
| 15463 | N2: DCI.DAG.getConstant(Val: Ext.getConstantOperandVal(i: 1) / 2, DL: dl, VT: MVT::i32)); |
| 15464 | SDValue VMOVRRD = |
| 15465 | DCI.DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops: F64); |
| 15466 | |
| 15467 | DCI.CombineTo(N: OtherExt.getNode(), Res: SDValue(VMOVRRD.getNode(), 1)); |
| 15468 | return VMOVRRD; |
| 15469 | } |
| 15470 | |
| 15471 | static SDValue (SDNode *N, |
| 15472 | TargetLowering::DAGCombinerInfo &DCI, |
| 15473 | const ARMSubtarget *ST) { |
| 15474 | SDValue Op0 = N->getOperand(Num: 0); |
| 15475 | EVT VT = N->getValueType(ResNo: 0); |
| 15476 | SDLoc dl(N); |
| 15477 | |
| 15478 | // extract (vdup x) -> x |
| 15479 | if (Op0->getOpcode() == ARMISD::VDUP) { |
| 15480 | SDValue X = Op0->getOperand(Num: 0); |
| 15481 | if (VT == MVT::f16 && X.getValueType() == MVT::i32) |
| 15482 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT, Operand: X); |
| 15483 | if (VT == MVT::i32 && X.getValueType() == MVT::f16) |
| 15484 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT, Operand: X); |
| 15485 | if (VT == MVT::f32 && X.getValueType() == MVT::i32) |
| 15486 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: X); |
| 15487 | |
| 15488 | while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) |
| 15489 | X = X->getOperand(Num: 0); |
| 15490 | if (X.getValueType() == VT) |
| 15491 | return X; |
| 15492 | } |
| 15493 | |
| 15494 | // extract ARM_BUILD_VECTOR -> x |
| 15495 | if (Op0->getOpcode() == ARMISD::BUILD_VECTOR && |
| 15496 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
| 15497 | N->getConstantOperandVal(Num: 1) < Op0.getNumOperands()) { |
| 15498 | return Op0.getOperand(i: N->getConstantOperandVal(Num: 1)); |
| 15499 | } |
| 15500 | |
| 15501 | // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b |
| 15502 | if (Op0.getValueType() == MVT::v4i32 && |
| 15503 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
| 15504 | Op0.getOpcode() == ISD::BITCAST && |
| 15505 | Op0.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR && |
| 15506 | Op0.getOperand(i: 0).getValueType() == MVT::v2f64) { |
| 15507 | SDValue BV = Op0.getOperand(i: 0); |
| 15508 | unsigned Offset = N->getConstantOperandVal(Num: 1); |
| 15509 | SDValue MOV = BV.getOperand(i: Offset < 2 ? 0 : 1); |
| 15510 | if (MOV.getOpcode() == ARMISD::VMOVDRR) |
| 15511 | return MOV.getOperand(i: ST->isLittle() ? Offset % 2 : 1 - Offset % 2); |
| 15512 | } |
| 15513 | |
| 15514 | // extract x, n; extract x, n+1 -> VMOVRRD x |
| 15515 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
| 15516 | return R; |
| 15517 | |
| 15518 | // extract (MVETrunc(x)) -> extract x |
| 15519 | if (Op0->getOpcode() == ARMISD::MVETRUNC) { |
| 15520 | unsigned Idx = N->getConstantOperandVal(Num: 1); |
| 15521 | unsigned Vec = |
| 15522 | Idx / Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
| 15523 | unsigned SubIdx = |
| 15524 | Idx % Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
| 15525 | return DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT, N1: Op0.getOperand(i: Vec), |
| 15526 | N2: DCI.DAG.getConstant(Val: SubIdx, DL: dl, VT: MVT::i32)); |
| 15527 | } |
| 15528 | |
| 15529 | return SDValue(); |
| 15530 | } |
| 15531 | |
| 15532 | static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) { |
| 15533 | SDValue Op = N->getOperand(Num: 0); |
| 15534 | EVT VT = N->getValueType(ResNo: 0); |
| 15535 | |
| 15536 | // sext_inreg(VGETLANEu) -> VGETLANEs |
| 15537 | if (Op.getOpcode() == ARMISD::VGETLANEu && |
| 15538 | cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT() == |
| 15539 | Op.getOperand(i: 0).getValueType().getScalarType()) |
| 15540 | return DAG.getNode(Opcode: ARMISD::VGETLANEs, DL: SDLoc(N), VT, N1: Op.getOperand(i: 0), |
| 15541 | N2: Op.getOperand(i: 1)); |
| 15542 | |
| 15543 | return SDValue(); |
| 15544 | } |
| 15545 | |
| 15546 | static SDValue |
| 15547 | PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15548 | SDValue Vec = N->getOperand(Num: 0); |
| 15549 | SDValue SubVec = N->getOperand(Num: 1); |
| 15550 | uint64_t IdxVal = N->getConstantOperandVal(Num: 2); |
| 15551 | EVT VecVT = Vec.getValueType(); |
| 15552 | EVT SubVT = SubVec.getValueType(); |
| 15553 | |
| 15554 | // Only do this for legal fixed vector types. |
| 15555 | if (!VecVT.isFixedLengthVector() || |
| 15556 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: VecVT) || |
| 15557 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: SubVT)) |
| 15558 | return SDValue(); |
| 15559 | |
| 15560 | // Ignore widening patterns. |
| 15561 | if (IdxVal == 0 && Vec.isUndef()) |
| 15562 | return SDValue(); |
| 15563 | |
| 15564 | // Subvector must be half the width and an "aligned" insertion. |
| 15565 | unsigned NumSubElts = SubVT.getVectorNumElements(); |
| 15566 | if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() || |
| 15567 | (IdxVal != 0 && IdxVal != NumSubElts)) |
| 15568 | return SDValue(); |
| 15569 | |
| 15570 | // Fold insert_subvector -> concat_vectors |
| 15571 | // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi)) |
| 15572 | // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub) |
| 15573 | SDLoc DL(N); |
| 15574 | SDValue Lo, Hi; |
| 15575 | if (IdxVal == 0) { |
| 15576 | Lo = SubVec; |
| 15577 | Hi = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
| 15578 | N2: DCI.DAG.getVectorIdxConstant(Val: NumSubElts, DL)); |
| 15579 | } else { |
| 15580 | Lo = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
| 15581 | N2: DCI.DAG.getVectorIdxConstant(Val: 0, DL)); |
| 15582 | Hi = SubVec; |
| 15583 | } |
| 15584 | return DCI.DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, N1: Lo, N2: Hi); |
| 15585 | } |
| 15586 | |
| 15587 | // shuffle(MVETrunc(x, y)) -> VMOVN(x, y) |
| 15588 | static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N, |
| 15589 | SelectionDAG &DAG) { |
| 15590 | SDValue Trunc = N->getOperand(Num: 0); |
| 15591 | EVT VT = Trunc.getValueType(); |
| 15592 | if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(Num: 1).isUndef()) |
| 15593 | return SDValue(); |
| 15594 | |
| 15595 | SDLoc DL(Trunc); |
| 15596 | if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: false)) |
| 15597 | return DAG.getNode( |
| 15598 | Opcode: ARMISD::VMOVN, DL, VT, |
| 15599 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
| 15600 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
| 15601 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 15602 | else if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: true)) |
| 15603 | return DAG.getNode( |
| 15604 | Opcode: ARMISD::VMOVN, DL, VT, |
| 15605 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
| 15606 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
| 15607 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 15608 | return SDValue(); |
| 15609 | } |
| 15610 | |
| 15611 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for |
| 15612 | /// ISD::VECTOR_SHUFFLE. |
| 15613 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { |
| 15614 | if (SDValue R = PerformShuffleVMOVNCombine(N: cast<ShuffleVectorSDNode>(Val: N), DAG)) |
| 15615 | return R; |
| 15616 | |
| 15617 | // The LLVM shufflevector instruction does not require the shuffle mask |
| 15618 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does |
| 15619 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the |
| 15620 | // operands do not match the mask length, they are extended by concatenating |
| 15621 | // them with undef vectors. That is probably the right thing for other |
| 15622 | // targets, but for NEON it is better to concatenate two double-register |
| 15623 | // size vector operands into a single quad-register size vector. Do that |
| 15624 | // transformation here: |
| 15625 | // shuffle(concat(v1, undef), concat(v2, undef)) -> |
| 15626 | // shuffle(concat(v1, v2), undef) |
| 15627 | SDValue Op0 = N->getOperand(Num: 0); |
| 15628 | SDValue Op1 = N->getOperand(Num: 1); |
| 15629 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || |
| 15630 | Op1.getOpcode() != ISD::CONCAT_VECTORS || |
| 15631 | Op0.getNumOperands() != 2 || |
| 15632 | Op1.getNumOperands() != 2) |
| 15633 | return SDValue(); |
| 15634 | SDValue Concat0Op1 = Op0.getOperand(i: 1); |
| 15635 | SDValue Concat1Op1 = Op1.getOperand(i: 1); |
| 15636 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) |
| 15637 | return SDValue(); |
| 15638 | // Skip the transformation if any of the types are illegal. |
| 15639 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15640 | EVT VT = N->getValueType(ResNo: 0); |
| 15641 | if (!TLI.isTypeLegal(VT) || |
| 15642 | !TLI.isTypeLegal(VT: Concat0Op1.getValueType()) || |
| 15643 | !TLI.isTypeLegal(VT: Concat1Op1.getValueType())) |
| 15644 | return SDValue(); |
| 15645 | |
| 15646 | SDValue NewConcat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT, |
| 15647 | N1: Op0.getOperand(i: 0), N2: Op1.getOperand(i: 0)); |
| 15648 | // Translate the shuffle mask. |
| 15649 | SmallVector<int, 16> NewMask; |
| 15650 | unsigned NumElts = VT.getVectorNumElements(); |
| 15651 | unsigned HalfElts = NumElts/2; |
| 15652 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: N); |
| 15653 | for (unsigned n = 0; n < NumElts; ++n) { |
| 15654 | int MaskElt = SVN->getMaskElt(Idx: n); |
| 15655 | int NewElt = -1; |
| 15656 | if (MaskElt < (int)HalfElts) |
| 15657 | NewElt = MaskElt; |
| 15658 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) |
| 15659 | NewElt = HalfElts + MaskElt - NumElts; |
| 15660 | NewMask.push_back(Elt: NewElt); |
| 15661 | } |
| 15662 | return DAG.getVectorShuffle(VT, dl: SDLoc(N), N1: NewConcat, |
| 15663 | N2: DAG.getUNDEF(VT), Mask: NewMask); |
| 15664 | } |
| 15665 | |
| 15666 | /// Load/store instruction that can be merged with a base address |
| 15667 | /// update |
| 15668 | struct BaseUpdateTarget { |
| 15669 | SDNode *N; |
| 15670 | bool isIntrinsic; |
| 15671 | bool isStore; |
| 15672 | unsigned AddrOpIdx; |
| 15673 | }; |
| 15674 | |
| 15675 | struct BaseUpdateUser { |
| 15676 | /// Instruction that updates a pointer |
| 15677 | SDNode *N; |
| 15678 | /// Pointer increment operand |
| 15679 | SDValue Inc; |
| 15680 | /// Pointer increment value if it is a constant, or 0 otherwise |
| 15681 | unsigned ConstInc; |
| 15682 | }; |
| 15683 | |
| 15684 | static bool isValidBaseUpdate(SDNode *N, SDNode *User) { |
| 15685 | // Check that the add is independent of the load/store. |
| 15686 | // Otherwise, folding it would create a cycle. Search through Addr |
| 15687 | // as well, since the User may not be a direct user of Addr and |
| 15688 | // only share a base pointer. |
| 15689 | SmallPtrSet<const SDNode *, 32> Visited; |
| 15690 | SmallVector<const SDNode *, 16> Worklist; |
| 15691 | Worklist.push_back(Elt: N); |
| 15692 | Worklist.push_back(Elt: User); |
| 15693 | const unsigned MaxSteps = 1024; |
| 15694 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps) || |
| 15695 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist, MaxSteps)) |
| 15696 | return false; |
| 15697 | return true; |
| 15698 | } |
| 15699 | |
| 15700 | static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target, |
| 15701 | struct BaseUpdateUser &User, |
| 15702 | bool SimpleConstIncOnly, |
| 15703 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15704 | SelectionDAG &DAG = DCI.DAG; |
| 15705 | SDNode *N = Target.N; |
| 15706 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
| 15707 | SDLoc dl(N); |
| 15708 | |
| 15709 | // Find the new opcode for the updating load/store. |
| 15710 | bool isLoadOp = true; |
| 15711 | bool isLaneOp = false; |
| 15712 | // Workaround for vst1x and vld1x intrinsics which do not have alignment |
| 15713 | // as an operand. |
| 15714 | bool hasAlignment = true; |
| 15715 | unsigned NewOpc = 0; |
| 15716 | unsigned NumVecs = 0; |
| 15717 | if (Target.isIntrinsic) { |
| 15718 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
| 15719 | switch (IntNo) { |
| 15720 | default: |
| 15721 | llvm_unreachable("unexpected intrinsic for Neon base update" ); |
| 15722 | case Intrinsic::arm_neon_vld1: |
| 15723 | NewOpc = ARMISD::VLD1_UPD; |
| 15724 | NumVecs = 1; |
| 15725 | break; |
| 15726 | case Intrinsic::arm_neon_vld2: |
| 15727 | NewOpc = ARMISD::VLD2_UPD; |
| 15728 | NumVecs = 2; |
| 15729 | break; |
| 15730 | case Intrinsic::arm_neon_vld3: |
| 15731 | NewOpc = ARMISD::VLD3_UPD; |
| 15732 | NumVecs = 3; |
| 15733 | break; |
| 15734 | case Intrinsic::arm_neon_vld4: |
| 15735 | NewOpc = ARMISD::VLD4_UPD; |
| 15736 | NumVecs = 4; |
| 15737 | break; |
| 15738 | case Intrinsic::arm_neon_vld1x2: |
| 15739 | NewOpc = ARMISD::VLD1x2_UPD; |
| 15740 | NumVecs = 2; |
| 15741 | hasAlignment = false; |
| 15742 | break; |
| 15743 | case Intrinsic::arm_neon_vld1x3: |
| 15744 | NewOpc = ARMISD::VLD1x3_UPD; |
| 15745 | NumVecs = 3; |
| 15746 | hasAlignment = false; |
| 15747 | break; |
| 15748 | case Intrinsic::arm_neon_vld1x4: |
| 15749 | NewOpc = ARMISD::VLD1x4_UPD; |
| 15750 | NumVecs = 4; |
| 15751 | hasAlignment = false; |
| 15752 | break; |
| 15753 | case Intrinsic::arm_neon_vld2dup: |
| 15754 | NewOpc = ARMISD::VLD2DUP_UPD; |
| 15755 | NumVecs = 2; |
| 15756 | break; |
| 15757 | case Intrinsic::arm_neon_vld3dup: |
| 15758 | NewOpc = ARMISD::VLD3DUP_UPD; |
| 15759 | NumVecs = 3; |
| 15760 | break; |
| 15761 | case Intrinsic::arm_neon_vld4dup: |
| 15762 | NewOpc = ARMISD::VLD4DUP_UPD; |
| 15763 | NumVecs = 4; |
| 15764 | break; |
| 15765 | case Intrinsic::arm_neon_vld2lane: |
| 15766 | NewOpc = ARMISD::VLD2LN_UPD; |
| 15767 | NumVecs = 2; |
| 15768 | isLaneOp = true; |
| 15769 | break; |
| 15770 | case Intrinsic::arm_neon_vld3lane: |
| 15771 | NewOpc = ARMISD::VLD3LN_UPD; |
| 15772 | NumVecs = 3; |
| 15773 | isLaneOp = true; |
| 15774 | break; |
| 15775 | case Intrinsic::arm_neon_vld4lane: |
| 15776 | NewOpc = ARMISD::VLD4LN_UPD; |
| 15777 | NumVecs = 4; |
| 15778 | isLaneOp = true; |
| 15779 | break; |
| 15780 | case Intrinsic::arm_neon_vst1: |
| 15781 | NewOpc = ARMISD::VST1_UPD; |
| 15782 | NumVecs = 1; |
| 15783 | isLoadOp = false; |
| 15784 | break; |
| 15785 | case Intrinsic::arm_neon_vst2: |
| 15786 | NewOpc = ARMISD::VST2_UPD; |
| 15787 | NumVecs = 2; |
| 15788 | isLoadOp = false; |
| 15789 | break; |
| 15790 | case Intrinsic::arm_neon_vst3: |
| 15791 | NewOpc = ARMISD::VST3_UPD; |
| 15792 | NumVecs = 3; |
| 15793 | isLoadOp = false; |
| 15794 | break; |
| 15795 | case Intrinsic::arm_neon_vst4: |
| 15796 | NewOpc = ARMISD::VST4_UPD; |
| 15797 | NumVecs = 4; |
| 15798 | isLoadOp = false; |
| 15799 | break; |
| 15800 | case Intrinsic::arm_neon_vst2lane: |
| 15801 | NewOpc = ARMISD::VST2LN_UPD; |
| 15802 | NumVecs = 2; |
| 15803 | isLoadOp = false; |
| 15804 | isLaneOp = true; |
| 15805 | break; |
| 15806 | case Intrinsic::arm_neon_vst3lane: |
| 15807 | NewOpc = ARMISD::VST3LN_UPD; |
| 15808 | NumVecs = 3; |
| 15809 | isLoadOp = false; |
| 15810 | isLaneOp = true; |
| 15811 | break; |
| 15812 | case Intrinsic::arm_neon_vst4lane: |
| 15813 | NewOpc = ARMISD::VST4LN_UPD; |
| 15814 | NumVecs = 4; |
| 15815 | isLoadOp = false; |
| 15816 | isLaneOp = true; |
| 15817 | break; |
| 15818 | case Intrinsic::arm_neon_vst1x2: |
| 15819 | NewOpc = ARMISD::VST1x2_UPD; |
| 15820 | NumVecs = 2; |
| 15821 | isLoadOp = false; |
| 15822 | hasAlignment = false; |
| 15823 | break; |
| 15824 | case Intrinsic::arm_neon_vst1x3: |
| 15825 | NewOpc = ARMISD::VST1x3_UPD; |
| 15826 | NumVecs = 3; |
| 15827 | isLoadOp = false; |
| 15828 | hasAlignment = false; |
| 15829 | break; |
| 15830 | case Intrinsic::arm_neon_vst1x4: |
| 15831 | NewOpc = ARMISD::VST1x4_UPD; |
| 15832 | NumVecs = 4; |
| 15833 | isLoadOp = false; |
| 15834 | hasAlignment = false; |
| 15835 | break; |
| 15836 | } |
| 15837 | } else { |
| 15838 | isLaneOp = true; |
| 15839 | switch (N->getOpcode()) { |
| 15840 | default: |
| 15841 | llvm_unreachable("unexpected opcode for Neon base update" ); |
| 15842 | case ARMISD::VLD1DUP: |
| 15843 | NewOpc = ARMISD::VLD1DUP_UPD; |
| 15844 | NumVecs = 1; |
| 15845 | break; |
| 15846 | case ARMISD::VLD2DUP: |
| 15847 | NewOpc = ARMISD::VLD2DUP_UPD; |
| 15848 | NumVecs = 2; |
| 15849 | break; |
| 15850 | case ARMISD::VLD3DUP: |
| 15851 | NewOpc = ARMISD::VLD3DUP_UPD; |
| 15852 | NumVecs = 3; |
| 15853 | break; |
| 15854 | case ARMISD::VLD4DUP: |
| 15855 | NewOpc = ARMISD::VLD4DUP_UPD; |
| 15856 | NumVecs = 4; |
| 15857 | break; |
| 15858 | case ISD::LOAD: |
| 15859 | NewOpc = ARMISD::VLD1_UPD; |
| 15860 | NumVecs = 1; |
| 15861 | isLaneOp = false; |
| 15862 | break; |
| 15863 | case ISD::STORE: |
| 15864 | NewOpc = ARMISD::VST1_UPD; |
| 15865 | NumVecs = 1; |
| 15866 | isLaneOp = false; |
| 15867 | isLoadOp = false; |
| 15868 | break; |
| 15869 | } |
| 15870 | } |
| 15871 | |
| 15872 | // Find the size of memory referenced by the load/store. |
| 15873 | EVT VecTy; |
| 15874 | if (isLoadOp) { |
| 15875 | VecTy = N->getValueType(ResNo: 0); |
| 15876 | } else if (Target.isIntrinsic) { |
| 15877 | VecTy = N->getOperand(Num: Target.AddrOpIdx + 1).getValueType(); |
| 15878 | } else { |
| 15879 | assert(Target.isStore && |
| 15880 | "Node has to be a load, a store, or an intrinsic!" ); |
| 15881 | VecTy = N->getOperand(Num: 1).getValueType(); |
| 15882 | } |
| 15883 | |
| 15884 | bool isVLDDUPOp = |
| 15885 | NewOpc == ARMISD::VLD1DUP_UPD || NewOpc == ARMISD::VLD2DUP_UPD || |
| 15886 | NewOpc == ARMISD::VLD3DUP_UPD || NewOpc == ARMISD::VLD4DUP_UPD; |
| 15887 | |
| 15888 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 15889 | if (isLaneOp || isVLDDUPOp) |
| 15890 | NumBytes /= VecTy.getVectorNumElements(); |
| 15891 | |
| 15892 | if (NumBytes >= 3 * 16 && User.ConstInc != NumBytes) { |
| 15893 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two |
| 15894 | // separate instructions that make it harder to use a non-constant update. |
| 15895 | return false; |
| 15896 | } |
| 15897 | |
| 15898 | if (SimpleConstIncOnly && User.ConstInc != NumBytes) |
| 15899 | return false; |
| 15900 | |
| 15901 | if (!isValidBaseUpdate(N, User: User.N)) |
| 15902 | return false; |
| 15903 | |
| 15904 | // OK, we found an ADD we can fold into the base update. |
| 15905 | // Now, create a _UPD node, taking care of not breaking alignment. |
| 15906 | |
| 15907 | EVT AlignedVecTy = VecTy; |
| 15908 | Align Alignment = MemN->getAlign(); |
| 15909 | |
| 15910 | // If this is a less-than-standard-aligned load/store, change the type to |
| 15911 | // match the standard alignment. |
| 15912 | // The alignment is overlooked when selecting _UPD variants; and it's |
| 15913 | // easier to introduce bitcasts here than fix that. |
| 15914 | // There are 3 ways to get to this base-update combine: |
| 15915 | // - intrinsics: they are assumed to be properly aligned (to the standard |
| 15916 | // alignment of the memory type), so we don't need to do anything. |
| 15917 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned |
| 15918 | // intrinsics, so, likewise, there's nothing to do. |
| 15919 | // - generic load/store instructions: the alignment is specified as an |
| 15920 | // explicit operand, rather than implicitly as the standard alignment |
| 15921 | // of the memory type (like the intrisics). We need to change the |
| 15922 | // memory type to match the explicit alignment. That way, we don't |
| 15923 | // generate non-standard-aligned ARMISD::VLDx nodes. |
| 15924 | if (isa<LSBaseSDNode>(Val: N)) { |
| 15925 | if (Alignment.value() < VecTy.getScalarSizeInBits() / 8) { |
| 15926 | MVT EltTy = MVT::getIntegerVT(BitWidth: Alignment.value() * 8); |
| 15927 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store." ); |
| 15928 | assert(!isLaneOp && "Unexpected generic load/store lane." ); |
| 15929 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); |
| 15930 | AlignedVecTy = MVT::getVectorVT(VT: EltTy, NumElements: NumElts); |
| 15931 | } |
| 15932 | // Don't set an explicit alignment on regular load/stores that we want |
| 15933 | // to transform to VLD/VST 1_UPD nodes. |
| 15934 | // This matches the behavior of regular load/stores, which only get an |
| 15935 | // explicit alignment if the MMO alignment is larger than the standard |
| 15936 | // alignment of the memory type. |
| 15937 | // Intrinsics, however, always get an explicit alignment, set to the |
| 15938 | // alignment of the MMO. |
| 15939 | Alignment = Align(1); |
| 15940 | } |
| 15941 | |
| 15942 | // Create the new updating load/store node. |
| 15943 | // First, create an SDVTList for the new updating node's results. |
| 15944 | EVT Tys[6]; |
| 15945 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 15946 | unsigned n; |
| 15947 | for (n = 0; n < NumResultVecs; ++n) |
| 15948 | Tys[n] = AlignedVecTy; |
| 15949 | Tys[n++] = MVT::i32; |
| 15950 | Tys[n] = MVT::Other; |
| 15951 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
| 15952 | |
| 15953 | // Then, gather the new node's operands. |
| 15954 | SmallVector<SDValue, 8> Ops; |
| 15955 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
| 15956 | Ops.push_back(Elt: N->getOperand(Num: Target.AddrOpIdx)); |
| 15957 | Ops.push_back(Elt: User.Inc); |
| 15958 | |
| 15959 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(Val: N)) { |
| 15960 | // Try to match the intrinsic's signature |
| 15961 | Ops.push_back(Elt: StN->getValue()); |
| 15962 | } else { |
| 15963 | // Loads (and of course intrinsics) match the intrinsics' signature, |
| 15964 | // so just add all but the alignment operand. |
| 15965 | unsigned LastOperand = |
| 15966 | hasAlignment ? N->getNumOperands() - 1 : N->getNumOperands(); |
| 15967 | for (unsigned i = Target.AddrOpIdx + 1; i < LastOperand; ++i) |
| 15968 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 15969 | } |
| 15970 | |
| 15971 | // For all node types, the alignment operand is always the last one. |
| 15972 | Ops.push_back(Elt: DAG.getConstant(Val: Alignment.value(), DL: dl, VT: MVT::i32)); |
| 15973 | |
| 15974 | // If this is a non-standard-aligned STORE, the penultimate operand is the |
| 15975 | // stored value. Bitcast it to the aligned type. |
| 15976 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { |
| 15977 | SDValue &StVal = Ops[Ops.size() - 2]; |
| 15978 | StVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: AlignedVecTy, Operand: StVal); |
| 15979 | } |
| 15980 | |
| 15981 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; |
| 15982 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: LoadVT, |
| 15983 | MMO: MemN->getMemOperand()); |
| 15984 | |
| 15985 | // Update the uses. |
| 15986 | SmallVector<SDValue, 5> NewResults; |
| 15987 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 15988 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
| 15989 | |
| 15990 | // If this is an non-standard-aligned LOAD, the first result is the loaded |
| 15991 | // value. Bitcast it to the expected result type. |
| 15992 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { |
| 15993 | SDValue &LdVal = NewResults[0]; |
| 15994 | LdVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecTy, Operand: LdVal); |
| 15995 | } |
| 15996 | |
| 15997 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
| 15998 | DCI.CombineTo(N, To: NewResults); |
| 15999 | DCI.CombineTo(N: User.N, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
| 16000 | |
| 16001 | return true; |
| 16002 | } |
| 16003 | |
| 16004 | // If (opcode ptr inc) is and ADD-like instruction, return the |
| 16005 | // increment value. Otherwise return 0. |
| 16006 | static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr, |
| 16007 | SDValue Inc, const SelectionDAG &DAG) { |
| 16008 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
| 16009 | if (!CInc) |
| 16010 | return 0; |
| 16011 | |
| 16012 | switch (Opcode) { |
| 16013 | case ARMISD::VLD1_UPD: |
| 16014 | case ISD::ADD: |
| 16015 | return CInc->getZExtValue(); |
| 16016 | case ISD::OR: { |
| 16017 | if (DAG.haveNoCommonBitsSet(A: Ptr, B: Inc)) { |
| 16018 | // (OR ptr inc) is the same as (ADD ptr inc) |
| 16019 | return CInc->getZExtValue(); |
| 16020 | } |
| 16021 | return 0; |
| 16022 | } |
| 16023 | default: |
| 16024 | return 0; |
| 16025 | } |
| 16026 | } |
| 16027 | |
| 16028 | static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) { |
| 16029 | switch (N->getOpcode()) { |
| 16030 | case ISD::ADD: |
| 16031 | case ISD::OR: { |
| 16032 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
| 16033 | *Ptr = N->getOperand(Num: 0); |
| 16034 | *CInc = N->getOperand(Num: 1); |
| 16035 | return true; |
| 16036 | } |
| 16037 | return false; |
| 16038 | } |
| 16039 | case ARMISD::VLD1_UPD: { |
| 16040 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
| 16041 | *Ptr = N->getOperand(Num: 1); |
| 16042 | *CInc = N->getOperand(Num: 2); |
| 16043 | return true; |
| 16044 | } |
| 16045 | return false; |
| 16046 | } |
| 16047 | default: |
| 16048 | return false; |
| 16049 | } |
| 16050 | } |
| 16051 | |
| 16052 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, |
| 16053 | /// NEON load/store intrinsics, and generic vector load/stores, to merge |
| 16054 | /// base address updates. |
| 16055 | /// For generic load/stores, the memory type is assumed to be a vector. |
| 16056 | /// The caller is assumed to have checked legality. |
| 16057 | static SDValue CombineBaseUpdate(SDNode *N, |
| 16058 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16059 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || |
| 16060 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
| 16061 | const bool isStore = N->getOpcode() == ISD::STORE; |
| 16062 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); |
| 16063 | BaseUpdateTarget Target = {.N: N, .isIntrinsic: isIntrinsic, .isStore: isStore, .AddrOpIdx: AddrOpIdx}; |
| 16064 | |
| 16065 | // Limit the number of possible base-updates we look at to prevent degenerate |
| 16066 | // cases. |
| 16067 | unsigned MaxBaseUpdates = ArmMaxBaseUpdatesToCheck; |
| 16068 | |
| 16069 | SDValue Addr = N->getOperand(Num: AddrOpIdx); |
| 16070 | |
| 16071 | SmallVector<BaseUpdateUser, 8> BaseUpdates; |
| 16072 | |
| 16073 | // Search for a use of the address operand that is an increment. |
| 16074 | for (SDUse &Use : Addr->uses()) { |
| 16075 | SDNode *User = Use.getUser(); |
| 16076 | if (Use.getResNo() != Addr.getResNo() || User->getNumOperands() != 2) |
| 16077 | continue; |
| 16078 | |
| 16079 | SDValue Inc = User->getOperand(Num: Use.getOperandNo() == 1 ? 0 : 1); |
| 16080 | unsigned ConstInc = |
| 16081 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Addr, Inc, DAG: DCI.DAG); |
| 16082 | |
| 16083 | if (ConstInc || User->getOpcode() == ISD::ADD) { |
| 16084 | BaseUpdates.push_back(Elt: {.N: User, .Inc: Inc, .ConstInc: ConstInc}); |
| 16085 | if (BaseUpdates.size() >= MaxBaseUpdates) |
| 16086 | break; |
| 16087 | } |
| 16088 | } |
| 16089 | |
| 16090 | // If the address is a constant pointer increment itself, find |
| 16091 | // another constant increment that has the same base operand |
| 16092 | SDValue Base; |
| 16093 | SDValue CInc; |
| 16094 | if (findPointerConstIncrement(N: Addr.getNode(), Ptr: &Base, CInc: &CInc)) { |
| 16095 | unsigned Offset = |
| 16096 | getPointerConstIncrement(Opcode: Addr->getOpcode(), Ptr: Base, Inc: CInc, DAG: DCI.DAG); |
| 16097 | for (SDUse &Use : Base->uses()) { |
| 16098 | |
| 16099 | SDNode *User = Use.getUser(); |
| 16100 | if (Use.getResNo() != Base.getResNo() || User == Addr.getNode() || |
| 16101 | User->getNumOperands() != 2) |
| 16102 | continue; |
| 16103 | |
| 16104 | SDValue UserInc = User->getOperand(Num: Use.getOperandNo() == 0 ? 1 : 0); |
| 16105 | unsigned UserOffset = |
| 16106 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Base, Inc: UserInc, DAG: DCI.DAG); |
| 16107 | |
| 16108 | if (!UserOffset || UserOffset <= Offset) |
| 16109 | continue; |
| 16110 | |
| 16111 | unsigned NewConstInc = UserOffset - Offset; |
| 16112 | SDValue NewInc = DCI.DAG.getConstant(Val: NewConstInc, DL: SDLoc(N), VT: MVT::i32); |
| 16113 | BaseUpdates.push_back(Elt: {.N: User, .Inc: NewInc, .ConstInc: NewConstInc}); |
| 16114 | if (BaseUpdates.size() >= MaxBaseUpdates) |
| 16115 | break; |
| 16116 | } |
| 16117 | } |
| 16118 | |
| 16119 | // Try to fold the load/store with an update that matches memory |
| 16120 | // access size. This should work well for sequential loads. |
| 16121 | unsigned NumValidUpd = BaseUpdates.size(); |
| 16122 | for (unsigned I = 0; I < NumValidUpd; I++) { |
| 16123 | BaseUpdateUser &User = BaseUpdates[I]; |
| 16124 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/true, DCI)) |
| 16125 | return SDValue(); |
| 16126 | } |
| 16127 | |
| 16128 | // Try to fold with other users. Non-constant updates are considered |
| 16129 | // first, and constant updates are sorted to not break a sequence of |
| 16130 | // strided accesses (if there is any). |
| 16131 | llvm::stable_sort(Range&: BaseUpdates, |
| 16132 | C: [](const BaseUpdateUser &LHS, const BaseUpdateUser &RHS) { |
| 16133 | return LHS.ConstInc < RHS.ConstInc; |
| 16134 | }); |
| 16135 | for (BaseUpdateUser &User : BaseUpdates) { |
| 16136 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/false, DCI)) |
| 16137 | return SDValue(); |
| 16138 | } |
| 16139 | return SDValue(); |
| 16140 | } |
| 16141 | |
| 16142 | static SDValue PerformVLDCombine(SDNode *N, |
| 16143 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16144 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 16145 | return SDValue(); |
| 16146 | |
| 16147 | return CombineBaseUpdate(N, DCI); |
| 16148 | } |
| 16149 | |
| 16150 | static SDValue PerformMVEVLDCombine(SDNode *N, |
| 16151 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16152 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 16153 | return SDValue(); |
| 16154 | |
| 16155 | SelectionDAG &DAG = DCI.DAG; |
| 16156 | SDValue Addr = N->getOperand(Num: 2); |
| 16157 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
| 16158 | SDLoc dl(N); |
| 16159 | |
| 16160 | // For the stores, where there are multiple intrinsics we only actually want |
| 16161 | // to post-inc the last of the them. |
| 16162 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
| 16163 | if (IntNo == Intrinsic::arm_mve_vst2q && N->getConstantOperandVal(Num: 5) != 1) |
| 16164 | return SDValue(); |
| 16165 | if (IntNo == Intrinsic::arm_mve_vst4q && N->getConstantOperandVal(Num: 7) != 3) |
| 16166 | return SDValue(); |
| 16167 | |
| 16168 | // Search for a use of the address operand that is an increment. |
| 16169 | for (SDUse &Use : Addr->uses()) { |
| 16170 | SDNode *User = Use.getUser(); |
| 16171 | if (User->getOpcode() != ISD::ADD || Use.getResNo() != Addr.getResNo()) |
| 16172 | continue; |
| 16173 | |
| 16174 | // Check that the add is independent of the load/store. Otherwise, folding |
| 16175 | // it would create a cycle. We can avoid searching through Addr as it's a |
| 16176 | // predecessor to both. |
| 16177 | SmallPtrSet<const SDNode *, 32> Visited; |
| 16178 | SmallVector<const SDNode *, 16> Worklist; |
| 16179 | Visited.insert(Ptr: Addr.getNode()); |
| 16180 | Worklist.push_back(Elt: N); |
| 16181 | Worklist.push_back(Elt: User); |
| 16182 | const unsigned MaxSteps = 1024; |
| 16183 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps) || |
| 16184 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist, MaxSteps)) |
| 16185 | continue; |
| 16186 | |
| 16187 | // Find the new opcode for the updating load/store. |
| 16188 | bool isLoadOp = true; |
| 16189 | unsigned NewOpc = 0; |
| 16190 | unsigned NumVecs = 0; |
| 16191 | switch (IntNo) { |
| 16192 | default: |
| 16193 | llvm_unreachable("unexpected intrinsic for MVE VLDn combine" ); |
| 16194 | case Intrinsic::arm_mve_vld2q: |
| 16195 | NewOpc = ARMISD::VLD2_UPD; |
| 16196 | NumVecs = 2; |
| 16197 | break; |
| 16198 | case Intrinsic::arm_mve_vld4q: |
| 16199 | NewOpc = ARMISD::VLD4_UPD; |
| 16200 | NumVecs = 4; |
| 16201 | break; |
| 16202 | case Intrinsic::arm_mve_vst2q: |
| 16203 | NewOpc = ARMISD::VST2_UPD; |
| 16204 | NumVecs = 2; |
| 16205 | isLoadOp = false; |
| 16206 | break; |
| 16207 | case Intrinsic::arm_mve_vst4q: |
| 16208 | NewOpc = ARMISD::VST4_UPD; |
| 16209 | NumVecs = 4; |
| 16210 | isLoadOp = false; |
| 16211 | break; |
| 16212 | } |
| 16213 | |
| 16214 | // Find the size of memory referenced by the load/store. |
| 16215 | EVT VecTy; |
| 16216 | if (isLoadOp) { |
| 16217 | VecTy = N->getValueType(ResNo: 0); |
| 16218 | } else { |
| 16219 | VecTy = N->getOperand(Num: 3).getValueType(); |
| 16220 | } |
| 16221 | |
| 16222 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 16223 | |
| 16224 | // If the increment is a constant, it must match the memory ref size. |
| 16225 | SDValue Inc = User->getOperand(Num: User->getOperand(Num: 0) == Addr ? 1 : 0); |
| 16226 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
| 16227 | if (!CInc || CInc->getZExtValue() != NumBytes) |
| 16228 | continue; |
| 16229 | |
| 16230 | // Create the new updating load/store node. |
| 16231 | // First, create an SDVTList for the new updating node's results. |
| 16232 | EVT Tys[6]; |
| 16233 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 16234 | unsigned n; |
| 16235 | for (n = 0; n < NumResultVecs; ++n) |
| 16236 | Tys[n] = VecTy; |
| 16237 | Tys[n++] = MVT::i32; |
| 16238 | Tys[n] = MVT::Other; |
| 16239 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
| 16240 | |
| 16241 | // Then, gather the new node's operands. |
| 16242 | SmallVector<SDValue, 8> Ops; |
| 16243 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
| 16244 | Ops.push_back(Elt: N->getOperand(Num: 2)); // ptr |
| 16245 | Ops.push_back(Elt: Inc); |
| 16246 | |
| 16247 | for (unsigned i = 3; i < N->getNumOperands(); ++i) |
| 16248 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 16249 | |
| 16250 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: VecTy, |
| 16251 | MMO: MemN->getMemOperand()); |
| 16252 | |
| 16253 | // Update the uses. |
| 16254 | SmallVector<SDValue, 5> NewResults; |
| 16255 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 16256 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
| 16257 | |
| 16258 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
| 16259 | DCI.CombineTo(N, To: NewResults); |
| 16260 | DCI.CombineTo(N: User, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
| 16261 | |
| 16262 | break; |
| 16263 | } |
| 16264 | |
| 16265 | return SDValue(); |
| 16266 | } |
| 16267 | |
| 16268 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a |
| 16269 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic |
| 16270 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and |
| 16271 | /// return true. |
| 16272 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 16273 | SelectionDAG &DAG = DCI.DAG; |
| 16274 | EVT VT = N->getValueType(ResNo: 0); |
| 16275 | // vldN-dup instructions only support 64-bit vectors for N > 1. |
| 16276 | if (!VT.is64BitVector()) |
| 16277 | return false; |
| 16278 | |
| 16279 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. |
| 16280 | SDNode *VLD = N->getOperand(Num: 0).getNode(); |
| 16281 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) |
| 16282 | return false; |
| 16283 | unsigned NumVecs = 0; |
| 16284 | unsigned NewOpc = 0; |
| 16285 | unsigned IntNo = VLD->getConstantOperandVal(Num: 1); |
| 16286 | if (IntNo == Intrinsic::arm_neon_vld2lane) { |
| 16287 | NumVecs = 2; |
| 16288 | NewOpc = ARMISD::VLD2DUP; |
| 16289 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { |
| 16290 | NumVecs = 3; |
| 16291 | NewOpc = ARMISD::VLD3DUP; |
| 16292 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { |
| 16293 | NumVecs = 4; |
| 16294 | NewOpc = ARMISD::VLD4DUP; |
| 16295 | } else { |
| 16296 | return false; |
| 16297 | } |
| 16298 | |
| 16299 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane |
| 16300 | // numbers match the load. |
| 16301 | unsigned VLDLaneNo = VLD->getConstantOperandVal(Num: NumVecs + 3); |
| 16302 | for (SDUse &Use : VLD->uses()) { |
| 16303 | // Ignore uses of the chain result. |
| 16304 | if (Use.getResNo() == NumVecs) |
| 16305 | continue; |
| 16306 | SDNode *User = Use.getUser(); |
| 16307 | if (User->getOpcode() != ARMISD::VDUPLANE || |
| 16308 | VLDLaneNo != User->getConstantOperandVal(Num: 1)) |
| 16309 | return false; |
| 16310 | } |
| 16311 | |
| 16312 | // Create the vldN-dup node. |
| 16313 | EVT Tys[5]; |
| 16314 | unsigned n; |
| 16315 | for (n = 0; n < NumVecs; ++n) |
| 16316 | Tys[n] = VT; |
| 16317 | Tys[n] = MVT::Other; |
| 16318 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumVecs + 1)); |
| 16319 | SDValue Ops[] = { VLD->getOperand(Num: 0), VLD->getOperand(Num: 2) }; |
| 16320 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(Val: VLD); |
| 16321 | SDValue VLDDup = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl: SDLoc(VLD), VTList: SDTys, |
| 16322 | Ops, MemVT: VLDMemInt->getMemoryVT(), |
| 16323 | MMO: VLDMemInt->getMemOperand()); |
| 16324 | |
| 16325 | // Update the uses. |
| 16326 | for (SDUse &Use : VLD->uses()) { |
| 16327 | unsigned ResNo = Use.getResNo(); |
| 16328 | // Ignore uses of the chain result. |
| 16329 | if (ResNo == NumVecs) |
| 16330 | continue; |
| 16331 | DCI.CombineTo(N: Use.getUser(), Res: SDValue(VLDDup.getNode(), ResNo)); |
| 16332 | } |
| 16333 | |
| 16334 | // Now the vldN-lane intrinsic is dead except for its chain result. |
| 16335 | // Update uses of the chain. |
| 16336 | std::vector<SDValue> VLDDupResults; |
| 16337 | for (unsigned n = 0; n < NumVecs; ++n) |
| 16338 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), n)); |
| 16339 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), NumVecs)); |
| 16340 | DCI.CombineTo(N: VLD, To: VLDDupResults); |
| 16341 | |
| 16342 | return true; |
| 16343 | } |
| 16344 | |
| 16345 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for |
| 16346 | /// ARMISD::VDUPLANE. |
| 16347 | static SDValue PerformVDUPLANECombine(SDNode *N, |
| 16348 | TargetLowering::DAGCombinerInfo &DCI, |
| 16349 | const ARMSubtarget *Subtarget) { |
| 16350 | SDValue Op = N->getOperand(Num: 0); |
| 16351 | EVT VT = N->getValueType(ResNo: 0); |
| 16352 | |
| 16353 | // On MVE, we just convert the VDUPLANE to a VDUP with an extract. |
| 16354 | if (Subtarget->hasMVEIntegerOps()) { |
| 16355 | EVT = VT.getVectorElementType(); |
| 16356 | // We need to ensure we are creating a legal type. |
| 16357 | if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: ExtractVT)) |
| 16358 | ExtractVT = MVT::i32; |
| 16359 | SDValue = DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: SDLoc(N), VT: ExtractVT, |
| 16360 | N1: N->getOperand(Num: 0), N2: N->getOperand(Num: 1)); |
| 16361 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT, Operand: Extract); |
| 16362 | } |
| 16363 | |
| 16364 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses |
| 16365 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. |
| 16366 | if (CombineVLDDUP(N, DCI)) |
| 16367 | return SDValue(N, 0); |
| 16368 | |
| 16369 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is |
| 16370 | // redundant. Ignore bit_converts for now; element sizes are checked below. |
| 16371 | while (Op.getOpcode() == ISD::BITCAST) |
| 16372 | Op = Op.getOperand(i: 0); |
| 16373 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) |
| 16374 | return SDValue(); |
| 16375 | |
| 16376 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. |
| 16377 | unsigned EltSize = Op.getScalarValueSizeInBits(); |
| 16378 | // The canonical VMOV for a zero vector uses a 32-bit element size. |
| 16379 | unsigned Imm = Op.getConstantOperandVal(i: 0); |
| 16380 | unsigned EltBits; |
| 16381 | if (ARM_AM::decodeVMOVModImm(ModImm: Imm, EltBits) == 0) |
| 16382 | EltSize = 8; |
| 16383 | if (EltSize > VT.getScalarSizeInBits()) |
| 16384 | return SDValue(); |
| 16385 | |
| 16386 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), VT, Operand: Op); |
| 16387 | } |
| 16388 | |
| 16389 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. |
| 16390 | static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG, |
| 16391 | const ARMSubtarget *Subtarget) { |
| 16392 | SDValue Op = N->getOperand(Num: 0); |
| 16393 | SDLoc dl(N); |
| 16394 | |
| 16395 | if (Subtarget->hasMVEIntegerOps()) { |
| 16396 | // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will |
| 16397 | // need to come from a GPR. |
| 16398 | if (Op.getValueType() == MVT::f32) |
| 16399 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
| 16400 | Operand: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Op)); |
| 16401 | else if (Op.getValueType() == MVT::f16) |
| 16402 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
| 16403 | Operand: DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT: MVT::i32, Operand: Op)); |
| 16404 | } |
| 16405 | |
| 16406 | if (!Subtarget->hasNEON()) |
| 16407 | return SDValue(); |
| 16408 | |
| 16409 | // Match VDUP(LOAD) -> VLD1DUP. |
| 16410 | // We match this pattern here rather than waiting for isel because the |
| 16411 | // transform is only legal for unindexed loads. |
| 16412 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: Op.getNode()); |
| 16413 | if (LD && Op.hasOneUse() && LD->isUnindexed() && |
| 16414 | LD->getMemoryVT() == N->getValueType(ResNo: 0).getVectorElementType()) { |
| 16415 | SDValue Ops[] = {LD->getOperand(Num: 0), LD->getOperand(Num: 1), |
| 16416 | DAG.getConstant(Val: LD->getAlign().value(), DL: SDLoc(N), VT: MVT::i32)}; |
| 16417 | SDVTList SDTys = DAG.getVTList(VT1: N->getValueType(ResNo: 0), VT2: MVT::Other); |
| 16418 | SDValue VLDDup = |
| 16419 | DAG.getMemIntrinsicNode(Opcode: ARMISD::VLD1DUP, dl: SDLoc(N), VTList: SDTys, Ops, |
| 16420 | MemVT: LD->getMemoryVT(), MMO: LD->getMemOperand()); |
| 16421 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: VLDDup.getValue(R: 1)); |
| 16422 | return VLDDup; |
| 16423 | } |
| 16424 | |
| 16425 | return SDValue(); |
| 16426 | } |
| 16427 | |
| 16428 | static SDValue PerformLOADCombine(SDNode *N, |
| 16429 | TargetLowering::DAGCombinerInfo &DCI, |
| 16430 | const ARMSubtarget *Subtarget) { |
| 16431 | EVT VT = N->getValueType(ResNo: 0); |
| 16432 | |
| 16433 | // If this is a legal vector load, try to combine it into a VLD1_UPD. |
| 16434 | if (Subtarget->hasNEON() && ISD::isNormalLoad(N) && VT.isVector() && |
| 16435 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 16436 | return CombineBaseUpdate(N, DCI); |
| 16437 | |
| 16438 | return SDValue(); |
| 16439 | } |
| 16440 | |
| 16441 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, |
| 16442 | // pack all of the elements in one place. Next, store to memory in fewer |
| 16443 | // chunks. |
| 16444 | static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, |
| 16445 | SelectionDAG &DAG) { |
| 16446 | SDValue StVal = St->getValue(); |
| 16447 | EVT VT = StVal.getValueType(); |
| 16448 | if (!St->isTruncatingStore() || !VT.isVector()) |
| 16449 | return SDValue(); |
| 16450 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 16451 | EVT StVT = St->getMemoryVT(); |
| 16452 | unsigned NumElems = VT.getVectorNumElements(); |
| 16453 | assert(StVT != VT && "Cannot truncate to the same type" ); |
| 16454 | unsigned FromEltSz = VT.getScalarSizeInBits(); |
| 16455 | unsigned ToEltSz = StVT.getScalarSizeInBits(); |
| 16456 | |
| 16457 | // From, To sizes and ElemCount must be pow of two |
| 16458 | if (!isPowerOf2_32(Value: NumElems * FromEltSz * ToEltSz)) |
| 16459 | return SDValue(); |
| 16460 | |
| 16461 | // We are going to use the original vector elt for storing. |
| 16462 | // Accumulated smaller vector elements must be a multiple of the store size. |
| 16463 | if (0 != (NumElems * FromEltSz) % ToEltSz) |
| 16464 | return SDValue(); |
| 16465 | |
| 16466 | unsigned SizeRatio = FromEltSz / ToEltSz; |
| 16467 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); |
| 16468 | |
| 16469 | // Create a type on which we perform the shuffle. |
| 16470 | EVT WideVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: StVT.getScalarType(), |
| 16471 | NumElements: NumElems * SizeRatio); |
| 16472 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 16473 | |
| 16474 | SDLoc DL(St); |
| 16475 | SDValue WideVec = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: WideVecVT, Operand: StVal); |
| 16476 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); |
| 16477 | for (unsigned i = 0; i < NumElems; ++i) |
| 16478 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 |
| 16479 | : i * SizeRatio; |
| 16480 | |
| 16481 | // Can't shuffle using an illegal type. |
| 16482 | if (!TLI.isTypeLegal(VT: WideVecVT)) |
| 16483 | return SDValue(); |
| 16484 | |
| 16485 | SDValue Shuff = DAG.getVectorShuffle( |
| 16486 | VT: WideVecVT, dl: DL, N1: WideVec, N2: DAG.getUNDEF(VT: WideVec.getValueType()), Mask: ShuffleVec); |
| 16487 | // At this point all of the data is stored at the bottom of the |
| 16488 | // register. We now need to save it to mem. |
| 16489 | |
| 16490 | // Find the largest store unit |
| 16491 | MVT StoreType = MVT::i8; |
| 16492 | for (MVT Tp : MVT::integer_valuetypes()) { |
| 16493 | if (TLI.isTypeLegal(VT: Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) |
| 16494 | StoreType = Tp; |
| 16495 | } |
| 16496 | // Didn't find a legal store type. |
| 16497 | if (!TLI.isTypeLegal(VT: StoreType)) |
| 16498 | return SDValue(); |
| 16499 | |
| 16500 | // Bitcast the original vector into a vector of store-size units |
| 16501 | EVT StoreVecVT = |
| 16502 | EVT::getVectorVT(Context&: *DAG.getContext(), VT: StoreType, |
| 16503 | NumElements: VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); |
| 16504 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 16505 | SDValue ShuffWide = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: StoreVecVT, Operand: Shuff); |
| 16506 | SmallVector<SDValue, 8> Chains; |
| 16507 | SDValue Increment = DAG.getConstant(Val: StoreType.getSizeInBits() / 8, DL, |
| 16508 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 16509 | SDValue BasePtr = St->getBasePtr(); |
| 16510 | |
| 16511 | // Perform one or more big stores into memory. |
| 16512 | unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); |
| 16513 | for (unsigned I = 0; I < E; I++) { |
| 16514 | SDValue SubVec = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: StoreType, |
| 16515 | N1: ShuffWide, N2: DAG.getIntPtrConstant(Val: I, DL)); |
| 16516 | SDValue Ch = |
| 16517 | DAG.getStore(Chain: St->getChain(), dl: DL, Val: SubVec, Ptr: BasePtr, PtrInfo: St->getPointerInfo(), |
| 16518 | Alignment: St->getAlign(), MMOFlags: St->getMemOperand()->getFlags()); |
| 16519 | BasePtr = |
| 16520 | DAG.getNode(Opcode: ISD::ADD, DL, VT: BasePtr.getValueType(), N1: BasePtr, N2: Increment); |
| 16521 | Chains.push_back(Elt: Ch); |
| 16522 | } |
| 16523 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 16524 | } |
| 16525 | |
| 16526 | // Try taking a single vector store from an fpround (which would otherwise turn |
| 16527 | // into an expensive buildvector) and splitting it into a series of narrowing |
| 16528 | // stores. |
| 16529 | static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, |
| 16530 | SelectionDAG &DAG) { |
| 16531 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16532 | return SDValue(); |
| 16533 | SDValue Trunc = St->getValue(); |
| 16534 | if (Trunc->getOpcode() != ISD::FP_ROUND) |
| 16535 | return SDValue(); |
| 16536 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
| 16537 | EVT ToVT = Trunc.getValueType(); |
| 16538 | if (!ToVT.isVector()) |
| 16539 | return SDValue(); |
| 16540 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 16541 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 16542 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 16543 | |
| 16544 | if (FromEltVT != MVT::f32 || ToEltVT != MVT::f16) |
| 16545 | return SDValue(); |
| 16546 | |
| 16547 | unsigned NumElements = 4; |
| 16548 | if (FromVT.getVectorNumElements() % NumElements != 0) |
| 16549 | return SDValue(); |
| 16550 | |
| 16551 | // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so |
| 16552 | // use the VMOVN over splitting the store. We are looking for patterns of: |
| 16553 | // !rev: 0 N 1 N+1 2 N+2 ... |
| 16554 | // rev: N 0 N+1 1 N+2 2 ... |
| 16555 | // The shuffle may either be a single source (in which case N = NumElts/2) or |
| 16556 | // two inputs extended with concat to the same size (in which case N = |
| 16557 | // NumElts). |
| 16558 | auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) { |
| 16559 | ArrayRef<int> M = SVN->getMask(); |
| 16560 | unsigned NumElts = ToVT.getVectorNumElements(); |
| 16561 | if (SVN->getOperand(Num: 1).isUndef()) |
| 16562 | NumElts /= 2; |
| 16563 | |
| 16564 | unsigned Off0 = Rev ? NumElts : 0; |
| 16565 | unsigned Off1 = Rev ? 0 : NumElts; |
| 16566 | |
| 16567 | for (unsigned I = 0; I < NumElts; I += 2) { |
| 16568 | if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2)) |
| 16569 | return false; |
| 16570 | if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2)) |
| 16571 | return false; |
| 16572 | } |
| 16573 | |
| 16574 | return true; |
| 16575 | }; |
| 16576 | |
| 16577 | if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Val: Trunc.getOperand(i: 0))) |
| 16578 | if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true)) |
| 16579 | return SDValue(); |
| 16580 | |
| 16581 | LLVMContext &C = *DAG.getContext(); |
| 16582 | SDLoc DL(St); |
| 16583 | // Details about the old store |
| 16584 | SDValue Ch = St->getChain(); |
| 16585 | SDValue BasePtr = St->getBasePtr(); |
| 16586 | Align Alignment = St->getBaseAlign(); |
| 16587 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16588 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16589 | |
| 16590 | // We split the store into slices of NumElements. fp16 trunc stores are vcvt |
| 16591 | // and then stored as truncating integer stores. |
| 16592 | EVT NewFromVT = EVT::getVectorVT(Context&: C, VT: FromEltVT, NumElements); |
| 16593 | EVT NewToVT = EVT::getVectorVT( |
| 16594 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getSizeInBits()), NumElements); |
| 16595 | |
| 16596 | SmallVector<SDValue, 4> Stores; |
| 16597 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 16598 | unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; |
| 16599 | SDValue NewPtr = |
| 16600 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 16601 | |
| 16602 | SDValue = |
| 16603 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: NewFromVT, N1: Trunc.getOperand(i: 0), |
| 16604 | N2: DAG.getConstant(Val: i * NumElements, DL, VT: MVT::i32)); |
| 16605 | |
| 16606 | SDValue FPTrunc = |
| 16607 | DAG.getNode(Opcode: ARMISD::VCVTN, DL, VT: MVT::v8f16, N1: DAG.getUNDEF(VT: MVT::v8f16), |
| 16608 | N2: Extract, N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 16609 | Extract = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v4i32, Operand: FPTrunc); |
| 16610 | |
| 16611 | SDValue Store = DAG.getTruncStore( |
| 16612 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
| 16613 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
| 16614 | Stores.push_back(Elt: Store); |
| 16615 | } |
| 16616 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
| 16617 | } |
| 16618 | |
| 16619 | // Try taking a single vector store from an MVETRUNC (which would otherwise turn |
| 16620 | // into an expensive buildvector) and splitting it into a series of narrowing |
| 16621 | // stores. |
| 16622 | static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St, |
| 16623 | SelectionDAG &DAG) { |
| 16624 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16625 | return SDValue(); |
| 16626 | SDValue Trunc = St->getValue(); |
| 16627 | if (Trunc->getOpcode() != ARMISD::MVETRUNC) |
| 16628 | return SDValue(); |
| 16629 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
| 16630 | EVT ToVT = Trunc.getValueType(); |
| 16631 | |
| 16632 | LLVMContext &C = *DAG.getContext(); |
| 16633 | SDLoc DL(St); |
| 16634 | // Details about the old store |
| 16635 | SDValue Ch = St->getChain(); |
| 16636 | SDValue BasePtr = St->getBasePtr(); |
| 16637 | Align Alignment = St->getBaseAlign(); |
| 16638 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16639 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16640 | |
| 16641 | EVT NewToVT = EVT::getVectorVT(Context&: C, VT: ToVT.getVectorElementType(), |
| 16642 | NumElements: FromVT.getVectorNumElements()); |
| 16643 | |
| 16644 | SmallVector<SDValue, 4> Stores; |
| 16645 | for (unsigned i = 0; i < Trunc.getNumOperands(); i++) { |
| 16646 | unsigned NewOffset = |
| 16647 | i * FromVT.getVectorNumElements() * ToVT.getScalarSizeInBits() / 8; |
| 16648 | SDValue NewPtr = |
| 16649 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 16650 | |
| 16651 | SDValue = Trunc.getOperand(i); |
| 16652 | SDValue Store = DAG.getTruncStore( |
| 16653 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
| 16654 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
| 16655 | Stores.push_back(Elt: Store); |
| 16656 | } |
| 16657 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
| 16658 | } |
| 16659 | |
| 16660 | // Given a floating point store from an extracted vector, with an integer |
| 16661 | // VGETLANE that already exists, store the existing VGETLANEu directly. This can |
| 16662 | // help reduce fp register pressure, doesn't require the fp extract and allows |
| 16663 | // use of more integer post-inc stores not available with vstr. |
| 16664 | static SDValue (StoreSDNode *St, SelectionDAG &DAG) { |
| 16665 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16666 | return SDValue(); |
| 16667 | SDValue = St->getValue(); |
| 16668 | EVT VT = Extract.getValueType(); |
| 16669 | // For now only uses f16. This may be useful for f32 too, but that will |
| 16670 | // be bitcast(extract), not the VGETLANEu we currently check here. |
| 16671 | if (VT != MVT::f16 || Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 16672 | return SDValue(); |
| 16673 | |
| 16674 | SDNode *GetLane = |
| 16675 | DAG.getNodeIfExists(Opcode: ARMISD::VGETLANEu, VTList: DAG.getVTList(VT: MVT::i32), |
| 16676 | Ops: {Extract.getOperand(i: 0), Extract.getOperand(i: 1)}); |
| 16677 | if (!GetLane) |
| 16678 | return SDValue(); |
| 16679 | |
| 16680 | LLVMContext &C = *DAG.getContext(); |
| 16681 | SDLoc DL(St); |
| 16682 | // Create a new integer store to replace the existing floating point version. |
| 16683 | SDValue Ch = St->getChain(); |
| 16684 | SDValue BasePtr = St->getBasePtr(); |
| 16685 | Align Alignment = St->getBaseAlign(); |
| 16686 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16687 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16688 | EVT NewToVT = EVT::getIntegerVT(Context&: C, BitWidth: VT.getSizeInBits()); |
| 16689 | SDValue Store = DAG.getTruncStore(Chain: Ch, dl: DL, Val: SDValue(GetLane, 0), Ptr: BasePtr, |
| 16690 | PtrInfo: St->getPointerInfo(), SVT: NewToVT, Alignment, |
| 16691 | MMOFlags, AAInfo); |
| 16692 | |
| 16693 | return Store; |
| 16694 | } |
| 16695 | |
| 16696 | /// PerformSTORECombine - Target-specific dag combine xforms for |
| 16697 | /// ISD::STORE. |
| 16698 | static SDValue PerformSTORECombine(SDNode *N, |
| 16699 | TargetLowering::DAGCombinerInfo &DCI, |
| 16700 | const ARMSubtarget *Subtarget) { |
| 16701 | StoreSDNode *St = cast<StoreSDNode>(Val: N); |
| 16702 | if (St->isVolatile()) |
| 16703 | return SDValue(); |
| 16704 | SDValue StVal = St->getValue(); |
| 16705 | EVT VT = StVal.getValueType(); |
| 16706 | |
| 16707 | if (Subtarget->hasNEON()) |
| 16708 | if (SDValue Store = PerformTruncatingStoreCombine(St, DAG&: DCI.DAG)) |
| 16709 | return Store; |
| 16710 | |
| 16711 | if (Subtarget->hasMVEFloatOps()) |
| 16712 | if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DAG&: DCI.DAG)) |
| 16713 | return NewToken; |
| 16714 | |
| 16715 | if (Subtarget->hasMVEIntegerOps()) { |
| 16716 | if (SDValue NewChain = PerformExtractFpToIntStores(St, DAG&: DCI.DAG)) |
| 16717 | return NewChain; |
| 16718 | if (SDValue NewToken = |
| 16719 | PerformSplittingMVETruncToNarrowingStores(St, DAG&: DCI.DAG)) |
| 16720 | return NewToken; |
| 16721 | } |
| 16722 | |
| 16723 | if (!ISD::isNormalStore(N: St)) |
| 16724 | return SDValue(); |
| 16725 | |
| 16726 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and |
| 16727 | // ARM stores of arguments in the same cache line. |
| 16728 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && |
| 16729 | StVal.getNode()->hasOneUse()) { |
| 16730 | SelectionDAG &DAG = DCI.DAG; |
| 16731 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 16732 | SDLoc DL(St); |
| 16733 | SDValue BasePtr = St->getBasePtr(); |
| 16734 | SDValue NewST1 = DAG.getStore( |
| 16735 | Chain: St->getChain(), dl: DL, Val: StVal.getNode()->getOperand(Num: isBigEndian ? 1 : 0), |
| 16736 | Ptr: BasePtr, PtrInfo: St->getPointerInfo(), Alignment: St->getBaseAlign(), |
| 16737 | MMOFlags: St->getMemOperand()->getFlags()); |
| 16738 | |
| 16739 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
| 16740 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
| 16741 | return DAG.getStore(Chain: NewST1.getValue(R: 0), dl: DL, |
| 16742 | Val: StVal.getNode()->getOperand(Num: isBigEndian ? 0 : 1), |
| 16743 | Ptr: OffsetPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: 4), |
| 16744 | Alignment: St->getBaseAlign(), MMOFlags: St->getMemOperand()->getFlags()); |
| 16745 | } |
| 16746 | |
| 16747 | if (StVal.getValueType() == MVT::i64 && |
| 16748 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 16749 | |
| 16750 | // Bitcast an i64 store extracted from a vector to f64. |
| 16751 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 16752 | SelectionDAG &DAG = DCI.DAG; |
| 16753 | SDLoc dl(StVal); |
| 16754 | SDValue IntVec = StVal.getOperand(i: 0); |
| 16755 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
| 16756 | NumElements: IntVec.getValueType().getVectorNumElements()); |
| 16757 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: IntVec); |
| 16758 | SDValue ExtElt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
| 16759 | N1: Vec, N2: StVal.getOperand(i: 1)); |
| 16760 | dl = SDLoc(N); |
| 16761 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i64, Operand: ExtElt); |
| 16762 | // Make the DAGCombiner fold the bitcasts. |
| 16763 | DCI.AddToWorklist(N: Vec.getNode()); |
| 16764 | DCI.AddToWorklist(N: ExtElt.getNode()); |
| 16765 | DCI.AddToWorklist(N: V.getNode()); |
| 16766 | return DAG.getStore(Chain: St->getChain(), dl, Val: V, Ptr: St->getBasePtr(), |
| 16767 | PtrInfo: St->getPointerInfo(), Alignment: St->getAlign(), |
| 16768 | MMOFlags: St->getMemOperand()->getFlags(), AAInfo: St->getAAInfo()); |
| 16769 | } |
| 16770 | |
| 16771 | // If this is a legal vector store, try to combine it into a VST1_UPD. |
| 16772 | if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && |
| 16773 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 16774 | return CombineBaseUpdate(N, DCI); |
| 16775 | |
| 16776 | return SDValue(); |
| 16777 | } |
| 16778 | |
| 16779 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) |
| 16780 | /// can replace combinations of VMUL and VCVT (floating-point to integer) |
| 16781 | /// when the VMUL has a constant operand that is a power of 2. |
| 16782 | /// |
| 16783 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
| 16784 | /// vmul.f32 d16, d17, d16 |
| 16785 | /// vcvt.s32.f32 d16, d16 |
| 16786 | /// becomes: |
| 16787 | /// vcvt.s32.f32 d16, d16, #3 |
| 16788 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, |
| 16789 | const ARMSubtarget *Subtarget) { |
| 16790 | if (!Subtarget->hasNEON()) |
| 16791 | return SDValue(); |
| 16792 | |
| 16793 | SDValue Op = N->getOperand(Num: 0); |
| 16794 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || |
| 16795 | Op.getOpcode() != ISD::FMUL) |
| 16796 | return SDValue(); |
| 16797 | |
| 16798 | SDValue ConstVec = Op->getOperand(Num: 1); |
| 16799 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
| 16800 | return SDValue(); |
| 16801 | |
| 16802 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); |
| 16803 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 16804 | MVT IntTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
| 16805 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 16806 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 16807 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 16808 | // These instructions only exist converting from f32 to i32. We can handle |
| 16809 | // smaller integers by generating an extra truncate, but larger ones would |
| 16810 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 16811 | // these intructions only support v2i32/v4i32 types. |
| 16812 | return SDValue(); |
| 16813 | } |
| 16814 | |
| 16815 | BitVector UndefElements; |
| 16816 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Val&: ConstVec); |
| 16817 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(UndefElements: &UndefElements, BitWidth: 33); |
| 16818 | if (C == -1 || C == 0 || C > 32) |
| 16819 | return SDValue(); |
| 16820 | |
| 16821 | SDLoc dl(N); |
| 16822 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; |
| 16823 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : |
| 16824 | Intrinsic::arm_neon_vcvtfp2fxu; |
| 16825 | SDValue FixConv = DAG.getNode( |
| 16826 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
| 16827 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL: dl, VT: MVT::i32), N2: Op->getOperand(Num: 0), |
| 16828 | N3: DAG.getConstant(Val: C, DL: dl, VT: MVT::i32)); |
| 16829 | |
| 16830 | if (IntBits < FloatBits) |
| 16831 | FixConv = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: N->getValueType(ResNo: 0), Operand: FixConv); |
| 16832 | |
| 16833 | return FixConv; |
| 16834 | } |
| 16835 | |
| 16836 | static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG, |
| 16837 | const ARMSubtarget *Subtarget) { |
| 16838 | if (!Subtarget->hasMVEFloatOps()) |
| 16839 | return SDValue(); |
| 16840 | |
| 16841 | // Turn (fadd x, (vselect c, y, -0.0)) into (vselect c, (fadd x, y), x) |
| 16842 | // The second form can be more easily turned into a predicated vadd, and |
| 16843 | // possibly combined into a fma to become a predicated vfma. |
| 16844 | SDValue Op0 = N->getOperand(Num: 0); |
| 16845 | SDValue Op1 = N->getOperand(Num: 1); |
| 16846 | EVT VT = N->getValueType(ResNo: 0); |
| 16847 | SDLoc DL(N); |
| 16848 | |
| 16849 | // The identity element for a fadd is -0.0 or +0.0 when the nsz flag is set, |
| 16850 | // which these VMOV's represent. |
| 16851 | auto isIdentitySplat = [&](SDValue Op, bool NSZ) { |
| 16852 | if (Op.getOpcode() != ISD::BITCAST || |
| 16853 | Op.getOperand(i: 0).getOpcode() != ARMISD::VMOVIMM) |
| 16854 | return false; |
| 16855 | uint64_t ImmVal = Op.getOperand(i: 0).getConstantOperandVal(i: 0); |
| 16856 | if (VT == MVT::v4f32 && (ImmVal == 1664 || (ImmVal == 0 && NSZ))) |
| 16857 | return true; |
| 16858 | if (VT == MVT::v8f16 && (ImmVal == 2688 || (ImmVal == 0 && NSZ))) |
| 16859 | return true; |
| 16860 | return false; |
| 16861 | }; |
| 16862 | |
| 16863 | if (Op0.getOpcode() == ISD::VSELECT && Op1.getOpcode() != ISD::VSELECT) |
| 16864 | std::swap(a&: Op0, b&: Op1); |
| 16865 | |
| 16866 | if (Op1.getOpcode() != ISD::VSELECT) |
| 16867 | return SDValue(); |
| 16868 | |
| 16869 | SDNodeFlags FaddFlags = N->getFlags(); |
| 16870 | bool NSZ = FaddFlags.hasNoSignedZeros(); |
| 16871 | if (!isIdentitySplat(Op1.getOperand(i: 2), NSZ)) |
| 16872 | return SDValue(); |
| 16873 | |
| 16874 | SDValue FAdd = |
| 16875 | DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: Op0, N2: Op1.getOperand(i: 1), Flags: FaddFlags); |
| 16876 | return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: Op1.getOperand(i: 0), N2: FAdd, N3: Op0, Flags: FaddFlags); |
| 16877 | } |
| 16878 | |
| 16879 | static SDValue PerformFADDVCMLACombine(SDNode *N, SelectionDAG &DAG) { |
| 16880 | SDValue LHS = N->getOperand(Num: 0); |
| 16881 | SDValue RHS = N->getOperand(Num: 1); |
| 16882 | EVT VT = N->getValueType(ResNo: 0); |
| 16883 | SDLoc DL(N); |
| 16884 | |
| 16885 | if (!N->getFlags().hasAllowReassociation()) |
| 16886 | return SDValue(); |
| 16887 | |
| 16888 | // Combine fadd(a, vcmla(b, c, d)) -> vcmla(fadd(a, b), b, c) |
| 16889 | auto ReassocComplex = [&](SDValue A, SDValue B) { |
| 16890 | if (A.getOpcode() != ISD::INTRINSIC_WO_CHAIN) |
| 16891 | return SDValue(); |
| 16892 | unsigned Opc = A.getConstantOperandVal(i: 0); |
| 16893 | if (Opc != Intrinsic::arm_mve_vcmlaq) |
| 16894 | return SDValue(); |
| 16895 | SDValue VCMLA = DAG.getNode( |
| 16896 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, N1: A.getOperand(i: 0), N2: A.getOperand(i: 1), |
| 16897 | N3: DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: A.getOperand(i: 2), N2: B, Flags: N->getFlags()), |
| 16898 | N4: A.getOperand(i: 3), N5: A.getOperand(i: 4)); |
| 16899 | VCMLA->setFlags(A->getFlags()); |
| 16900 | return VCMLA; |
| 16901 | }; |
| 16902 | if (SDValue R = ReassocComplex(LHS, RHS)) |
| 16903 | return R; |
| 16904 | if (SDValue R = ReassocComplex(RHS, LHS)) |
| 16905 | return R; |
| 16906 | |
| 16907 | return SDValue(); |
| 16908 | } |
| 16909 | |
| 16910 | static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, |
| 16911 | const ARMSubtarget *Subtarget) { |
| 16912 | if (SDValue S = PerformFAddVSelectCombine(N, DAG, Subtarget)) |
| 16913 | return S; |
| 16914 | if (SDValue S = PerformFADDVCMLACombine(N, DAG)) |
| 16915 | return S; |
| 16916 | return SDValue(); |
| 16917 | } |
| 16918 | |
| 16919 | /// PerformVMulVCTPCombine - VCVT (fixed-point to floating-point, Advanced SIMD) |
| 16920 | /// can replace combinations of VCVT (integer to floating-point) and VMUL |
| 16921 | /// when the VMUL has a constant operand that is a power of 2. |
| 16922 | /// |
| 16923 | /// Example (assume d17 = <float 0.125, float 0.125>): |
| 16924 | /// vcvt.f32.s32 d16, d16 |
| 16925 | /// vmul.f32 d16, d16, d17 |
| 16926 | /// becomes: |
| 16927 | /// vcvt.f32.s32 d16, d16, #3 |
| 16928 | static SDValue PerformVMulVCTPCombine(SDNode *N, SelectionDAG &DAG, |
| 16929 | const ARMSubtarget *Subtarget) { |
| 16930 | if (!Subtarget->hasNEON()) |
| 16931 | return SDValue(); |
| 16932 | |
| 16933 | SDValue Op = N->getOperand(Num: 0); |
| 16934 | unsigned OpOpcode = Op.getNode()->getOpcode(); |
| 16935 | if (!N->getValueType(ResNo: 0).isVector() || !N->getValueType(ResNo: 0).isSimple() || |
| 16936 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) |
| 16937 | return SDValue(); |
| 16938 | |
| 16939 | SDValue ConstVec = N->getOperand(Num: 1); |
| 16940 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
| 16941 | return SDValue(); |
| 16942 | |
| 16943 | MVT FloatTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
| 16944 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 16945 | MVT IntTy = Op.getOperand(i: 0).getSimpleValueType().getVectorElementType(); |
| 16946 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 16947 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 16948 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 16949 | // These instructions only exist converting from i32 to f32. We can handle |
| 16950 | // smaller integers by generating an extra extend, but larger ones would |
| 16951 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 16952 | // these intructions only support v2i32/v4i32 types. |
| 16953 | return SDValue(); |
| 16954 | } |
| 16955 | |
| 16956 | ConstantFPSDNode *CN = isConstOrConstSplatFP(N: ConstVec, AllowUndefs: true); |
| 16957 | APFloat Recip(0.0f); |
| 16958 | if (!CN || !CN->getValueAPF().getExactInverse(Inv: &Recip)) |
| 16959 | return SDValue(); |
| 16960 | |
| 16961 | bool IsExact; |
| 16962 | APSInt IntVal(33); |
| 16963 | if (Recip.convertToInteger(Result&: IntVal, RM: APFloat::rmTowardZero, IsExact: &IsExact) != |
| 16964 | APFloat::opOK || |
| 16965 | !IsExact) |
| 16966 | return SDValue(); |
| 16967 | |
| 16968 | int32_t C = IntVal.exactLogBase2(); |
| 16969 | if (C == -1 || C == 0 || C > 32) |
| 16970 | return SDValue(); |
| 16971 | |
| 16972 | SDLoc DL(N); |
| 16973 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; |
| 16974 | SDValue ConvInput = Op.getOperand(i: 0); |
| 16975 | if (IntBits < FloatBits) |
| 16976 | ConvInput = DAG.getNode(Opcode: isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
| 16977 | VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, Operand: ConvInput); |
| 16978 | |
| 16979 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp |
| 16980 | : Intrinsic::arm_neon_vcvtfxu2fp; |
| 16981 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: Op.getValueType(), |
| 16982 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL, VT: MVT::i32), N2: ConvInput, |
| 16983 | N3: DAG.getConstant(Val: C, DL, VT: MVT::i32)); |
| 16984 | } |
| 16985 | |
| 16986 | static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, |
| 16987 | const ARMSubtarget *ST) { |
| 16988 | if (!ST->hasMVEIntegerOps()) |
| 16989 | return SDValue(); |
| 16990 | |
| 16991 | assert(N->getOpcode() == ISD::VECREDUCE_ADD); |
| 16992 | EVT ResVT = N->getValueType(ResNo: 0); |
| 16993 | SDValue N0 = N->getOperand(Num: 0); |
| 16994 | SDLoc dl(N); |
| 16995 | |
| 16996 | // Try to turn vecreduce_add(add(x, y)) into vecreduce(x) + vecreduce(y) |
| 16997 | if (ResVT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
| 16998 | (N0.getValueType() == MVT::v4i32 || N0.getValueType() == MVT::v8i16 || |
| 16999 | N0.getValueType() == MVT::v16i8)) { |
| 17000 | SDValue Red0 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 0)); |
| 17001 | SDValue Red1 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 1)); |
| 17002 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: ResVT, N1: Red0, N2: Red1); |
| 17003 | } |
| 17004 | |
| 17005 | // We are looking for something that will have illegal types if left alone, |
| 17006 | // but that we can convert to a single instruction under MVE. For example |
| 17007 | // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A |
| 17008 | // or |
| 17009 | // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B |
| 17010 | |
| 17011 | // The legal cases are: |
| 17012 | // VADDV u/s 8/16/32 |
| 17013 | // VMLAV u/s 8/16/32 |
| 17014 | // VADDLV u/s 32 |
| 17015 | // VMLALV u/s 16/32 |
| 17016 | |
| 17017 | // If the input vector is smaller than legal (v4i8/v4i16 for example) we can |
| 17018 | // extend it and use v4i32 instead. |
| 17019 | auto ExtTypeMatches = [](SDValue A, ArrayRef<MVT> ExtTypes) { |
| 17020 | EVT AVT = A.getValueType(); |
| 17021 | return any_of(Range&: ExtTypes, P: [&](MVT Ty) { |
| 17022 | return AVT.getVectorNumElements() == Ty.getVectorNumElements() && |
| 17023 | AVT.bitsLE(VT: Ty); |
| 17024 | }); |
| 17025 | }; |
| 17026 | auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) { |
| 17027 | EVT AVT = A.getValueType(); |
| 17028 | if (!AVT.is128BitVector()) |
| 17029 | A = DAG.getNode( |
| 17030 | Opcode: ExtendCode, DL: dl, |
| 17031 | VT: AVT.changeVectorElementType( |
| 17032 | Context&: *DAG.getContext(), |
| 17033 | EltVT: MVT::getIntegerVT(BitWidth: 128 / AVT.getVectorMinNumElements())), |
| 17034 | Operand: A); |
| 17035 | return A; |
| 17036 | }; |
| 17037 | auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { |
| 17038 | if (ResVT != RetTy || N0->getOpcode() != ExtendCode) |
| 17039 | return SDValue(); |
| 17040 | SDValue A = N0->getOperand(Num: 0); |
| 17041 | if (ExtTypeMatches(A, ExtTypes)) |
| 17042 | return ExtendIfNeeded(A, ExtendCode); |
| 17043 | return SDValue(); |
| 17044 | }; |
| 17045 | auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, |
| 17046 | ArrayRef<MVT> ExtTypes, SDValue &Mask) { |
| 17047 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 17048 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
| 17049 | return SDValue(); |
| 17050 | Mask = N0->getOperand(Num: 0); |
| 17051 | SDValue Ext = N0->getOperand(Num: 1); |
| 17052 | if (Ext->getOpcode() != ExtendCode) |
| 17053 | return SDValue(); |
| 17054 | SDValue A = Ext->getOperand(Num: 0); |
| 17055 | if (ExtTypeMatches(A, ExtTypes)) |
| 17056 | return ExtendIfNeeded(A, ExtendCode); |
| 17057 | return SDValue(); |
| 17058 | }; |
| 17059 | auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 17060 | SDValue &A, SDValue &B) { |
| 17061 | // For a vmla we are trying to match a larger pattern: |
| 17062 | // ExtA = sext/zext A |
| 17063 | // ExtB = sext/zext B |
| 17064 | // Mul = mul ExtA, ExtB |
| 17065 | // vecreduce.add Mul |
| 17066 | // There might also be en extra extend between the mul and the addreduce, so |
| 17067 | // long as the bitwidth is high enough to make them equivalent (for example |
| 17068 | // original v8i16 might be mul at v8i32 and the reduce happens at v8i64). |
| 17069 | if (ResVT != RetTy) |
| 17070 | return false; |
| 17071 | SDValue Mul = N0; |
| 17072 | if (Mul->getOpcode() == ExtendCode && |
| 17073 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
| 17074 | ResVT.getScalarSizeInBits()) |
| 17075 | Mul = Mul->getOperand(Num: 0); |
| 17076 | if (Mul->getOpcode() != ISD::MUL) |
| 17077 | return false; |
| 17078 | SDValue ExtA = Mul->getOperand(Num: 0); |
| 17079 | SDValue ExtB = Mul->getOperand(Num: 1); |
| 17080 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
| 17081 | return false; |
| 17082 | A = ExtA->getOperand(Num: 0); |
| 17083 | B = ExtB->getOperand(Num: 0); |
| 17084 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
| 17085 | A = ExtendIfNeeded(A, ExtendCode); |
| 17086 | B = ExtendIfNeeded(B, ExtendCode); |
| 17087 | return true; |
| 17088 | } |
| 17089 | return false; |
| 17090 | }; |
| 17091 | auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 17092 | SDValue &A, SDValue &B, SDValue &Mask) { |
| 17093 | // Same as the pattern above with a select for the zero predicated lanes |
| 17094 | // ExtA = sext/zext A |
| 17095 | // ExtB = sext/zext B |
| 17096 | // Mul = mul ExtA, ExtB |
| 17097 | // N0 = select Mask, Mul, 0 |
| 17098 | // vecreduce.add N0 |
| 17099 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 17100 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
| 17101 | return false; |
| 17102 | Mask = N0->getOperand(Num: 0); |
| 17103 | SDValue Mul = N0->getOperand(Num: 1); |
| 17104 | if (Mul->getOpcode() == ExtendCode && |
| 17105 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
| 17106 | ResVT.getScalarSizeInBits()) |
| 17107 | Mul = Mul->getOperand(Num: 0); |
| 17108 | if (Mul->getOpcode() != ISD::MUL) |
| 17109 | return false; |
| 17110 | SDValue ExtA = Mul->getOperand(Num: 0); |
| 17111 | SDValue ExtB = Mul->getOperand(Num: 1); |
| 17112 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
| 17113 | return false; |
| 17114 | A = ExtA->getOperand(Num: 0); |
| 17115 | B = ExtB->getOperand(Num: 0); |
| 17116 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
| 17117 | A = ExtendIfNeeded(A, ExtendCode); |
| 17118 | B = ExtendIfNeeded(B, ExtendCode); |
| 17119 | return true; |
| 17120 | } |
| 17121 | return false; |
| 17122 | }; |
| 17123 | auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { |
| 17124 | // Split illegal MVT::v16i8->i64 vector reductions into two legal v8i16->i64 |
| 17125 | // reductions. The operands are extended with MVEEXT, but as they are |
| 17126 | // reductions the lane orders do not matter. MVEEXT may be combined with |
| 17127 | // loads to produce two extending loads, or else they will be expanded to |
| 17128 | // VREV/VMOVL. |
| 17129 | EVT VT = Ops[0].getValueType(); |
| 17130 | if (VT == MVT::v16i8) { |
| 17131 | assert((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && |
| 17132 | "Unexpected illegal long reduction opcode" ); |
| 17133 | bool IsUnsigned = Opcode == ARMISD::VMLALVu; |
| 17134 | |
| 17135 | SDValue Ext0 = |
| 17136 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
| 17137 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[0]); |
| 17138 | SDValue Ext1 = |
| 17139 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
| 17140 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[1]); |
| 17141 | |
| 17142 | SDValue MLA0 = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 17143 | N1: Ext0, N2: Ext1); |
| 17144 | SDValue MLA1 = |
| 17145 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::VMLALVAu : ARMISD::VMLALVAs, DL: dl, |
| 17146 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: MLA0, N2: MLA0.getValue(R: 1), |
| 17147 | N3: Ext0.getValue(R: 1), N4: Ext1.getValue(R: 1)); |
| 17148 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: MLA1, N2: MLA1.getValue(R: 1)); |
| 17149 | } |
| 17150 | SDValue Node = DAG.getNode(Opcode, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
| 17151 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Node, |
| 17152 | N2: SDValue(Node.getNode(), 1)); |
| 17153 | }; |
| 17154 | |
| 17155 | SDValue A, B; |
| 17156 | SDValue Mask; |
| 17157 | if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 17158 | return DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: ResVT, N1: A, N2: B); |
| 17159 | if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 17160 | return DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: ResVT, N1: A, N2: B); |
| 17161 | if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
| 17162 | A, B)) |
| 17163 | return Create64bitNode(ARMISD::VMLALVs, {A, B}); |
| 17164 | if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
| 17165 | A, B)) |
| 17166 | return Create64bitNode(ARMISD::VMLALVu, {A, B}); |
| 17167 | if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) |
| 17168 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17169 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
| 17170 | if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) |
| 17171 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17172 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
| 17173 | |
| 17174 | if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
| 17175 | Mask)) |
| 17176 | return DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
| 17177 | if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
| 17178 | Mask)) |
| 17179 | return DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
| 17180 | if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
| 17181 | Mask)) |
| 17182 | return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); |
| 17183 | if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
| 17184 | Mask)) |
| 17185 | return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); |
| 17186 | if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 17187 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17188 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
| 17189 | if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 17190 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17191 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
| 17192 | |
| 17193 | if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 17194 | return DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: ResVT, Operand: A); |
| 17195 | if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 17196 | return DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: ResVT, Operand: A); |
| 17197 | if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32})) |
| 17198 | return Create64bitNode(ARMISD::VADDLVs, {A}); |
| 17199 | if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) |
| 17200 | return Create64bitNode(ARMISD::VADDLVu, {A}); |
| 17201 | if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) |
| 17202 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17203 | Operand: DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: MVT::i32, Operand: A)); |
| 17204 | if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) |
| 17205 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17206 | Operand: DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: MVT::i32, Operand: A)); |
| 17207 | |
| 17208 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 17209 | return DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: ResVT, N1: A, N2: Mask); |
| 17210 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 17211 | return DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: ResVT, N1: A, N2: Mask); |
| 17212 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask)) |
| 17213 | return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); |
| 17214 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask)) |
| 17215 | return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); |
| 17216 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) |
| 17217 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17218 | Operand: DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
| 17219 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) |
| 17220 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17221 | Operand: DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
| 17222 | |
| 17223 | // Some complications. We can get a case where the two inputs of the mul are |
| 17224 | // the same, then the output sext will have been helpfully converted to a |
| 17225 | // zext. Turn it back. |
| 17226 | SDValue Op = N0; |
| 17227 | if (Op->getOpcode() == ISD::VSELECT) |
| 17228 | Op = Op->getOperand(Num: 1); |
| 17229 | if (Op->getOpcode() == ISD::ZERO_EXTEND && |
| 17230 | Op->getOperand(Num: 0)->getOpcode() == ISD::MUL) { |
| 17231 | SDValue Mul = Op->getOperand(Num: 0); |
| 17232 | if (Mul->getOperand(Num: 0) == Mul->getOperand(Num: 1) && |
| 17233 | Mul->getOperand(Num: 0)->getOpcode() == ISD::SIGN_EXTEND) { |
| 17234 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: N0->getValueType(ResNo: 0), Operand: Mul); |
| 17235 | if (Op != N0) |
| 17236 | Ext = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: N0->getValueType(ResNo: 0), |
| 17237 | N1: N0->getOperand(Num: 0), N2: Ext, N3: N0->getOperand(Num: 2)); |
| 17238 | return DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: Ext); |
| 17239 | } |
| 17240 | } |
| 17241 | |
| 17242 | return SDValue(); |
| 17243 | } |
| 17244 | |
| 17245 | // Looks for vaddv(shuffle) or vmlav(shuffle, shuffle), with a shuffle where all |
| 17246 | // the lanes are used. Due to the reduction being commutative the shuffle can be |
| 17247 | // removed. |
| 17248 | static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) { |
| 17249 | unsigned VecOp = N->getOperand(Num: 0).getValueType().isVector() ? 0 : 2; |
| 17250 | auto *Shuf = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp)); |
| 17251 | if (!Shuf || !Shuf->getOperand(Num: 1).isUndef()) |
| 17252 | return SDValue(); |
| 17253 | |
| 17254 | // Check all elements are used once in the mask. |
| 17255 | ArrayRef<int> Mask = Shuf->getMask(); |
| 17256 | APInt SetElts(Mask.size(), 0); |
| 17257 | for (int E : Mask) { |
| 17258 | if (E < 0 || E >= (int)Mask.size()) |
| 17259 | return SDValue(); |
| 17260 | SetElts.setBit(E); |
| 17261 | } |
| 17262 | if (!SetElts.isAllOnes()) |
| 17263 | return SDValue(); |
| 17264 | |
| 17265 | if (N->getNumOperands() != VecOp + 1) { |
| 17266 | auto *Shuf2 = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp + 1)); |
| 17267 | if (!Shuf2 || !Shuf2->getOperand(Num: 1).isUndef() || Shuf2->getMask() != Mask) |
| 17268 | return SDValue(); |
| 17269 | } |
| 17270 | |
| 17271 | SmallVector<SDValue> Ops; |
| 17272 | for (SDValue Op : N->ops()) { |
| 17273 | if (Op.getValueType().isVector()) |
| 17274 | Ops.push_back(Elt: Op.getOperand(i: 0)); |
| 17275 | else |
| 17276 | Ops.push_back(Elt: Op); |
| 17277 | } |
| 17278 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VTList: N->getVTList(), Ops); |
| 17279 | } |
| 17280 | |
| 17281 | static SDValue PerformVMOVNCombine(SDNode *N, |
| 17282 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17283 | SDValue Op0 = N->getOperand(Num: 0); |
| 17284 | SDValue Op1 = N->getOperand(Num: 1); |
| 17285 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
| 17286 | |
| 17287 | // VMOVNT a undef -> a |
| 17288 | // VMOVNB a undef -> a |
| 17289 | // VMOVNB undef a -> a |
| 17290 | if (Op1->isUndef()) |
| 17291 | return Op0; |
| 17292 | if (Op0->isUndef() && !IsTop) |
| 17293 | return Op1; |
| 17294 | |
| 17295 | // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) |
| 17296 | // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) |
| 17297 | if ((Op1->getOpcode() == ARMISD::VQMOVNs || |
| 17298 | Op1->getOpcode() == ARMISD::VQMOVNu) && |
| 17299 | Op1->getConstantOperandVal(Num: 2) == 0) |
| 17300 | return DCI.DAG.getNode(Opcode: Op1->getOpcode(), DL: SDLoc(Op1), VT: N->getValueType(ResNo: 0), |
| 17301 | N1: Op0, N2: Op1->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
| 17302 | |
| 17303 | // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from |
| 17304 | // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting |
| 17305 | // into the top or bottom lanes. |
| 17306 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 17307 | APInt Op1DemandedElts = APInt::getSplat(NewLen: NumElts, V: APInt::getLowBitsSet(numBits: 2, loBitsSet: 1)); |
| 17308 | APInt Op0DemandedElts = |
| 17309 | IsTop ? Op1DemandedElts |
| 17310 | : APInt::getSplat(NewLen: NumElts, V: APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
| 17311 | |
| 17312 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 17313 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
| 17314 | return SDValue(N, 0); |
| 17315 | if (TLI.SimplifyDemandedVectorElts(Op: Op1, DemandedElts: Op1DemandedElts, DCI)) |
| 17316 | return SDValue(N, 0); |
| 17317 | |
| 17318 | return SDValue(); |
| 17319 | } |
| 17320 | |
| 17321 | static SDValue PerformVQMOVNCombine(SDNode *N, |
| 17322 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17323 | SDValue Op0 = N->getOperand(Num: 0); |
| 17324 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
| 17325 | |
| 17326 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 17327 | APInt Op0DemandedElts = |
| 17328 | APInt::getSplat(NewLen: NumElts, V: IsTop ? APInt::getLowBitsSet(numBits: 2, loBitsSet: 1) |
| 17329 | : APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
| 17330 | |
| 17331 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 17332 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
| 17333 | return SDValue(N, 0); |
| 17334 | return SDValue(); |
| 17335 | } |
| 17336 | |
| 17337 | static SDValue PerformVQDMULHCombine(SDNode *N, |
| 17338 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17339 | EVT VT = N->getValueType(ResNo: 0); |
| 17340 | SDValue LHS = N->getOperand(Num: 0); |
| 17341 | SDValue RHS = N->getOperand(Num: 1); |
| 17342 | |
| 17343 | auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Val&: LHS); |
| 17344 | auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(Val&: RHS); |
| 17345 | // Turn VQDMULH(shuffle, shuffle) -> shuffle(VQDMULH) |
| 17346 | if (Shuf0 && Shuf1 && Shuf0->getMask().equals(RHS: Shuf1->getMask()) && |
| 17347 | LHS.getOperand(i: 1).isUndef() && RHS.getOperand(i: 1).isUndef() && |
| 17348 | (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) { |
| 17349 | SDLoc DL(N); |
| 17350 | SDValue NewBinOp = DCI.DAG.getNode(Opcode: N->getOpcode(), DL, VT, |
| 17351 | N1: LHS.getOperand(i: 0), N2: RHS.getOperand(i: 0)); |
| 17352 | SDValue UndefV = LHS.getOperand(i: 1); |
| 17353 | return DCI.DAG.getVectorShuffle(VT, dl: DL, N1: NewBinOp, N2: UndefV, Mask: Shuf0->getMask()); |
| 17354 | } |
| 17355 | return SDValue(); |
| 17356 | } |
| 17357 | |
| 17358 | static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { |
| 17359 | SDLoc DL(N); |
| 17360 | SDValue Op0 = N->getOperand(Num: 0); |
| 17361 | SDValue Op1 = N->getOperand(Num: 1); |
| 17362 | |
| 17363 | // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from |
| 17364 | // uses of the intrinsics. |
| 17365 | if (auto C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
| 17366 | int ShiftAmt = C->getSExtValue(); |
| 17367 | if (ShiftAmt == 0) { |
| 17368 | SDValue Merge = DAG.getMergeValues(Ops: {Op0, Op1}, dl: DL); |
| 17369 | DAG.ReplaceAllUsesWith(From: N, To: Merge.getNode()); |
| 17370 | return SDValue(); |
| 17371 | } |
| 17372 | |
| 17373 | if (ShiftAmt >= -32 && ShiftAmt < 0) { |
| 17374 | unsigned NewOpcode = |
| 17375 | N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; |
| 17376 | SDValue NewShift = DAG.getNode(Opcode: NewOpcode, DL, VTList: N->getVTList(), N1: Op0, N2: Op1, |
| 17377 | N3: DAG.getConstant(Val: -ShiftAmt, DL, VT: MVT::i32)); |
| 17378 | DAG.ReplaceAllUsesWith(From: N, To: NewShift.getNode()); |
| 17379 | return NewShift; |
| 17380 | } |
| 17381 | } |
| 17382 | |
| 17383 | return SDValue(); |
| 17384 | } |
| 17385 | |
| 17386 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. |
| 17387 | SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, |
| 17388 | DAGCombinerInfo &DCI) const { |
| 17389 | SelectionDAG &DAG = DCI.DAG; |
| 17390 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
| 17391 | switch (IntNo) { |
| 17392 | default: |
| 17393 | // Don't do anything for most intrinsics. |
| 17394 | break; |
| 17395 | |
| 17396 | // Vector shifts: check for immediate versions and lower them. |
| 17397 | // Note: This is done during DAG combining instead of DAG legalizing because |
| 17398 | // the build_vectors for 64-bit vector element shift counts are generally |
| 17399 | // not legal, and it is hard to see their values after they get legalized to |
| 17400 | // loads from a constant pool. |
| 17401 | case Intrinsic::arm_neon_vshifts: |
| 17402 | case Intrinsic::arm_neon_vshiftu: |
| 17403 | case Intrinsic::arm_neon_vrshifts: |
| 17404 | case Intrinsic::arm_neon_vrshiftu: |
| 17405 | case Intrinsic::arm_neon_vrshiftn: |
| 17406 | case Intrinsic::arm_neon_vqshifts: |
| 17407 | case Intrinsic::arm_neon_vqshiftu: |
| 17408 | case Intrinsic::arm_neon_vqshiftsu: |
| 17409 | case Intrinsic::arm_neon_vqshiftns: |
| 17410 | case Intrinsic::arm_neon_vqshiftnu: |
| 17411 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17412 | case Intrinsic::arm_neon_vqrshiftns: |
| 17413 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17414 | case Intrinsic::arm_neon_vqrshiftnsu: { |
| 17415 | EVT VT = N->getOperand(Num: 1).getValueType(); |
| 17416 | int64_t Cnt; |
| 17417 | unsigned VShiftOpc = 0; |
| 17418 | |
| 17419 | switch (IntNo) { |
| 17420 | case Intrinsic::arm_neon_vshifts: |
| 17421 | case Intrinsic::arm_neon_vshiftu: |
| 17422 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) { |
| 17423 | VShiftOpc = ARMISD::VSHLIMM; |
| 17424 | break; |
| 17425 | } |
| 17426 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) { |
| 17427 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM |
| 17428 | : ARMISD::VSHRuIMM); |
| 17429 | break; |
| 17430 | } |
| 17431 | return SDValue(); |
| 17432 | |
| 17433 | case Intrinsic::arm_neon_vrshifts: |
| 17434 | case Intrinsic::arm_neon_vrshiftu: |
| 17435 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
| 17436 | break; |
| 17437 | return SDValue(); |
| 17438 | |
| 17439 | case Intrinsic::arm_neon_vqshifts: |
| 17440 | case Intrinsic::arm_neon_vqshiftu: |
| 17441 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
| 17442 | break; |
| 17443 | return SDValue(); |
| 17444 | |
| 17445 | case Intrinsic::arm_neon_vqshiftsu: |
| 17446 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
| 17447 | break; |
| 17448 | llvm_unreachable("invalid shift count for vqshlu intrinsic" ); |
| 17449 | |
| 17450 | case Intrinsic::arm_neon_vrshiftn: |
| 17451 | case Intrinsic::arm_neon_vqshiftns: |
| 17452 | case Intrinsic::arm_neon_vqshiftnu: |
| 17453 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17454 | case Intrinsic::arm_neon_vqrshiftns: |
| 17455 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17456 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 17457 | // Narrowing shifts require an immediate right shift. |
| 17458 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: true, isIntrinsic: true, Cnt)) |
| 17459 | break; |
| 17460 | llvm_unreachable("invalid shift count for narrowing vector shift " |
| 17461 | "intrinsic" ); |
| 17462 | |
| 17463 | default: |
| 17464 | llvm_unreachable("unhandled vector shift" ); |
| 17465 | } |
| 17466 | |
| 17467 | switch (IntNo) { |
| 17468 | case Intrinsic::arm_neon_vshifts: |
| 17469 | case Intrinsic::arm_neon_vshiftu: |
| 17470 | // Opcode already set above. |
| 17471 | break; |
| 17472 | case Intrinsic::arm_neon_vrshifts: |
| 17473 | VShiftOpc = ARMISD::VRSHRsIMM; |
| 17474 | break; |
| 17475 | case Intrinsic::arm_neon_vrshiftu: |
| 17476 | VShiftOpc = ARMISD::VRSHRuIMM; |
| 17477 | break; |
| 17478 | case Intrinsic::arm_neon_vrshiftn: |
| 17479 | VShiftOpc = ARMISD::VRSHRNIMM; |
| 17480 | break; |
| 17481 | case Intrinsic::arm_neon_vqshifts: |
| 17482 | VShiftOpc = ARMISD::VQSHLsIMM; |
| 17483 | break; |
| 17484 | case Intrinsic::arm_neon_vqshiftu: |
| 17485 | VShiftOpc = ARMISD::VQSHLuIMM; |
| 17486 | break; |
| 17487 | case Intrinsic::arm_neon_vqshiftsu: |
| 17488 | VShiftOpc = ARMISD::VQSHLsuIMM; |
| 17489 | break; |
| 17490 | case Intrinsic::arm_neon_vqshiftns: |
| 17491 | VShiftOpc = ARMISD::VQSHRNsIMM; |
| 17492 | break; |
| 17493 | case Intrinsic::arm_neon_vqshiftnu: |
| 17494 | VShiftOpc = ARMISD::VQSHRNuIMM; |
| 17495 | break; |
| 17496 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17497 | VShiftOpc = ARMISD::VQSHRNsuIMM; |
| 17498 | break; |
| 17499 | case Intrinsic::arm_neon_vqrshiftns: |
| 17500 | VShiftOpc = ARMISD::VQRSHRNsIMM; |
| 17501 | break; |
| 17502 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17503 | VShiftOpc = ARMISD::VQRSHRNuIMM; |
| 17504 | break; |
| 17505 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 17506 | VShiftOpc = ARMISD::VQRSHRNsuIMM; |
| 17507 | break; |
| 17508 | } |
| 17509 | |
| 17510 | SDLoc dl(N); |
| 17511 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
| 17512 | N1: N->getOperand(Num: 1), N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17513 | } |
| 17514 | |
| 17515 | case Intrinsic::arm_neon_vshiftins: { |
| 17516 | EVT VT = N->getOperand(Num: 1).getValueType(); |
| 17517 | int64_t Cnt; |
| 17518 | unsigned VShiftOpc = 0; |
| 17519 | |
| 17520 | if (isVShiftLImm(Op: N->getOperand(Num: 3), VT, isLong: false, Cnt)) |
| 17521 | VShiftOpc = ARMISD::VSLIIMM; |
| 17522 | else if (isVShiftRImm(Op: N->getOperand(Num: 3), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
| 17523 | VShiftOpc = ARMISD::VSRIIMM; |
| 17524 | else { |
| 17525 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic" ); |
| 17526 | } |
| 17527 | |
| 17528 | SDLoc dl(N); |
| 17529 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
| 17530 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
| 17531 | N3: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17532 | } |
| 17533 | |
| 17534 | case Intrinsic::arm_neon_vqrshifts: |
| 17535 | case Intrinsic::arm_neon_vqrshiftu: |
| 17536 | // No immediate versions of these to check for. |
| 17537 | break; |
| 17538 | |
| 17539 | case Intrinsic::arm_neon_vbsl: { |
| 17540 | SDLoc dl(N); |
| 17541 | return DAG.getNode(Opcode: ARMISD::VBSP, DL: dl, VT: N->getValueType(ResNo: 0), N1: N->getOperand(Num: 1), |
| 17542 | N2: N->getOperand(Num: 2), N3: N->getOperand(Num: 3)); |
| 17543 | } |
| 17544 | case Intrinsic::arm_mve_vqdmlah: |
| 17545 | case Intrinsic::arm_mve_vqdmlash: |
| 17546 | case Intrinsic::arm_mve_vqrdmlah: |
| 17547 | case Intrinsic::arm_mve_vqrdmlash: |
| 17548 | case Intrinsic::arm_mve_vmla_n_predicated: |
| 17549 | case Intrinsic::arm_mve_vmlas_n_predicated: |
| 17550 | case Intrinsic::arm_mve_vqdmlah_predicated: |
| 17551 | case Intrinsic::arm_mve_vqdmlash_predicated: |
| 17552 | case Intrinsic::arm_mve_vqrdmlah_predicated: |
| 17553 | case Intrinsic::arm_mve_vqrdmlash_predicated: { |
| 17554 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 17555 | // size of a single lane of the vector type they return. So we don't need |
| 17556 | // any bits of that operand above that point, which allows us to eliminate |
| 17557 | // uxth/sxth. |
| 17558 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 17559 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
| 17560 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 3), DemandedBits: DemandedMask, DCI)) |
| 17561 | return SDValue(); |
| 17562 | break; |
| 17563 | } |
| 17564 | |
| 17565 | case Intrinsic::arm_mve_minv: |
| 17566 | case Intrinsic::arm_mve_maxv: |
| 17567 | case Intrinsic::arm_mve_minav: |
| 17568 | case Intrinsic::arm_mve_maxav: |
| 17569 | case Intrinsic::arm_mve_minv_predicated: |
| 17570 | case Intrinsic::arm_mve_maxv_predicated: |
| 17571 | case Intrinsic::arm_mve_minav_predicated: |
| 17572 | case Intrinsic::arm_mve_maxav_predicated: { |
| 17573 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 17574 | // size of a single lane of the vector type they take as the other input. |
| 17575 | unsigned BitWidth = N->getOperand(Num: 2)->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 17576 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
| 17577 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 17578 | return SDValue(); |
| 17579 | break; |
| 17580 | } |
| 17581 | |
| 17582 | case Intrinsic::arm_mve_addv: { |
| 17583 | // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, |
| 17584 | // which allow PerformADDVecReduce to turn it into VADDLV when possible. |
| 17585 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
| 17586 | unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; |
| 17587 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VTList: N->getVTList(), N: N->getOperand(Num: 1)); |
| 17588 | } |
| 17589 | |
| 17590 | case Intrinsic::arm_mve_addlv: |
| 17591 | case Intrinsic::arm_mve_addlv_predicated: { |
| 17592 | // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR |
| 17593 | // which recombines the two outputs into an i64 |
| 17594 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
| 17595 | unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? |
| 17596 | (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : |
| 17597 | (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); |
| 17598 | |
| 17599 | SmallVector<SDValue, 4> Ops; |
| 17600 | for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) |
| 17601 | if (i != 2) // skip the unsigned flag |
| 17602 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 17603 | |
| 17604 | SDLoc dl(N); |
| 17605 | SDValue val = DAG.getNode(Opcode: Opc, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
| 17606 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: val.getValue(R: 0), |
| 17607 | N2: val.getValue(R: 1)); |
| 17608 | } |
| 17609 | } |
| 17610 | |
| 17611 | return SDValue(); |
| 17612 | } |
| 17613 | |
| 17614 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and |
| 17615 | /// lowers them. As with the vector shift intrinsics, this is done during DAG |
| 17616 | /// combining instead of DAG legalizing because the build_vectors for 64-bit |
| 17617 | /// vector element shift counts are generally not legal, and it is hard to see |
| 17618 | /// their values after they get legalized to loads from a constant pool. |
| 17619 | static SDValue PerformShiftCombine(SDNode *N, |
| 17620 | TargetLowering::DAGCombinerInfo &DCI, |
| 17621 | const ARMSubtarget *ST) { |
| 17622 | SelectionDAG &DAG = DCI.DAG; |
| 17623 | EVT VT = N->getValueType(ResNo: 0); |
| 17624 | |
| 17625 | if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && |
| 17626 | N->getOperand(Num: 0)->getOpcode() == ISD::AND && |
| 17627 | N->getOperand(Num: 0)->hasOneUse()) { |
| 17628 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 17629 | return SDValue(); |
| 17630 | // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't |
| 17631 | // usually show up because instcombine prefers to canonicalize it to |
| 17632 | // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come |
| 17633 | // out of GEP lowering in some cases. |
| 17634 | SDValue N0 = N->getOperand(Num: 0); |
| 17635 | ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 17636 | if (!ShiftAmtNode) |
| 17637 | return SDValue(); |
| 17638 | uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); |
| 17639 | ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
| 17640 | if (!AndMaskNode) |
| 17641 | return SDValue(); |
| 17642 | uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); |
| 17643 | // Don't transform uxtb/uxth. |
| 17644 | if (AndMask == 255 || AndMask == 65535) |
| 17645 | return SDValue(); |
| 17646 | if (isMask_32(Value: AndMask)) { |
| 17647 | uint32_t MaskedBits = llvm::countl_zero(Val: AndMask); |
| 17648 | if (MaskedBits > ShiftAmt) { |
| 17649 | SDLoc DL(N); |
| 17650 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 17651 | N2: DAG.getConstant(Val: MaskedBits, DL, VT: MVT::i32)); |
| 17652 | return DAG.getNode( |
| 17653 | Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 17654 | N2: DAG.getConstant(Val: MaskedBits - ShiftAmt, DL, VT: MVT::i32)); |
| 17655 | } |
| 17656 | } |
| 17657 | } |
| 17658 | |
| 17659 | // Nothing to be done for scalar shifts. |
| 17660 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 17661 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) |
| 17662 | return SDValue(); |
| 17663 | if (ST->hasMVEIntegerOps()) |
| 17664 | return SDValue(); |
| 17665 | |
| 17666 | int64_t Cnt; |
| 17667 | |
| 17668 | switch (N->getOpcode()) { |
| 17669 | default: llvm_unreachable("unexpected shift opcode" ); |
| 17670 | |
| 17671 | case ISD::SHL: |
| 17672 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) { |
| 17673 | SDLoc dl(N); |
| 17674 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 17675 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17676 | } |
| 17677 | break; |
| 17678 | |
| 17679 | case ISD::SRA: |
| 17680 | case ISD::SRL: |
| 17681 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
| 17682 | unsigned VShiftOpc = |
| 17683 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 17684 | SDLoc dl(N); |
| 17685 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 17686 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17687 | } |
| 17688 | } |
| 17689 | return SDValue(); |
| 17690 | } |
| 17691 | |
| 17692 | // Look for a sign/zero/fpextend extend of a larger than legal load. This can be |
| 17693 | // split into multiple extending loads, which are simpler to deal with than an |
| 17694 | // arbitrary extend. For fp extends we use an integer extending load and a VCVTL |
| 17695 | // to convert the type to an f32. |
| 17696 | static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { |
| 17697 | SDValue N0 = N->getOperand(Num: 0); |
| 17698 | if (N0.getOpcode() != ISD::LOAD) |
| 17699 | return SDValue(); |
| 17700 | LoadSDNode *LD = cast<LoadSDNode>(Val: N0.getNode()); |
| 17701 | if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || |
| 17702 | LD->getExtensionType() != ISD::NON_EXTLOAD) |
| 17703 | return SDValue(); |
| 17704 | EVT FromVT = LD->getValueType(ResNo: 0); |
| 17705 | EVT ToVT = N->getValueType(ResNo: 0); |
| 17706 | if (!ToVT.isVector()) |
| 17707 | return SDValue(); |
| 17708 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 17709 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 17710 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 17711 | |
| 17712 | unsigned NumElements = 0; |
| 17713 | if (ToEltVT == MVT::i32 && FromEltVT == MVT::i8) |
| 17714 | NumElements = 4; |
| 17715 | if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) |
| 17716 | NumElements = 4; |
| 17717 | if (NumElements == 0 || |
| 17718 | (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || |
| 17719 | FromVT.getVectorNumElements() % NumElements != 0 || |
| 17720 | !isPowerOf2_32(Value: NumElements)) |
| 17721 | return SDValue(); |
| 17722 | |
| 17723 | LLVMContext &C = *DAG.getContext(); |
| 17724 | SDLoc DL(LD); |
| 17725 | // Details about the old load |
| 17726 | SDValue Ch = LD->getChain(); |
| 17727 | SDValue BasePtr = LD->getBasePtr(); |
| 17728 | Align Alignment = LD->getBaseAlign(); |
| 17729 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
| 17730 | AAMDNodes AAInfo = LD->getAAInfo(); |
| 17731 | |
| 17732 | ISD::LoadExtType NewExtType = |
| 17733 | N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
| 17734 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
| 17735 | EVT NewFromVT = EVT::getVectorVT( |
| 17736 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
| 17737 | EVT NewToVT = EVT::getVectorVT( |
| 17738 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
| 17739 | |
| 17740 | SmallVector<SDValue, 4> Loads; |
| 17741 | SmallVector<SDValue, 4> Chains; |
| 17742 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 17743 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
| 17744 | SDValue NewPtr = |
| 17745 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 17746 | |
| 17747 | SDValue NewLoad = |
| 17748 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
| 17749 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
| 17750 | Alignment, MMOFlags, AAInfo); |
| 17751 | Loads.push_back(Elt: NewLoad); |
| 17752 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
| 17753 | } |
| 17754 | |
| 17755 | // Float truncs need to extended with VCVTB's into their floating point types. |
| 17756 | if (FromEltVT == MVT::f16) { |
| 17757 | SmallVector<SDValue, 4> Extends; |
| 17758 | |
| 17759 | for (unsigned i = 0; i < Loads.size(); i++) { |
| 17760 | SDValue LoadBC = |
| 17761 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v8f16, Operand: Loads[i]); |
| 17762 | SDValue FPExt = DAG.getNode(Opcode: ARMISD::VCVTL, DL, VT: MVT::v4f32, N1: LoadBC, |
| 17763 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 17764 | Extends.push_back(Elt: FPExt); |
| 17765 | } |
| 17766 | |
| 17767 | Loads = Extends; |
| 17768 | } |
| 17769 | |
| 17770 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 17771 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
| 17772 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, Ops: Loads); |
| 17773 | } |
| 17774 | |
| 17775 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, |
| 17776 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. |
| 17777 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 17778 | const ARMSubtarget *ST) { |
| 17779 | SDValue N0 = N->getOperand(Num: 0); |
| 17780 | |
| 17781 | // Check for sign- and zero-extensions of vector extract operations of 8- and |
| 17782 | // 16-bit vector elements. NEON and MVE support these directly. They are |
| 17783 | // handled during DAG combining because type legalization will promote them |
| 17784 | // to 32-bit types and it is messy to recognize the operations after that. |
| 17785 | if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && |
| 17786 | N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 17787 | SDValue Vec = N0.getOperand(i: 0); |
| 17788 | SDValue Lane = N0.getOperand(i: 1); |
| 17789 | EVT VT = N->getValueType(ResNo: 0); |
| 17790 | EVT EltVT = N0.getValueType(); |
| 17791 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 17792 | |
| 17793 | if (VT == MVT::i32 && |
| 17794 | (EltVT == MVT::i8 || EltVT == MVT::i16) && |
| 17795 | TLI.isTypeLegal(VT: Vec.getValueType()) && |
| 17796 | isa<ConstantSDNode>(Val: Lane)) { |
| 17797 | |
| 17798 | unsigned Opc = 0; |
| 17799 | switch (N->getOpcode()) { |
| 17800 | default: llvm_unreachable("unexpected opcode" ); |
| 17801 | case ISD::SIGN_EXTEND: |
| 17802 | Opc = ARMISD::VGETLANEs; |
| 17803 | break; |
| 17804 | case ISD::ZERO_EXTEND: |
| 17805 | case ISD::ANY_EXTEND: |
| 17806 | Opc = ARMISD::VGETLANEu; |
| 17807 | break; |
| 17808 | } |
| 17809 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VT, N1: Vec, N2: Lane); |
| 17810 | } |
| 17811 | } |
| 17812 | |
| 17813 | if (ST->hasMVEIntegerOps()) |
| 17814 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 17815 | return NewLoad; |
| 17816 | |
| 17817 | return SDValue(); |
| 17818 | } |
| 17819 | |
| 17820 | static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 17821 | const ARMSubtarget *ST) { |
| 17822 | if (ST->hasMVEFloatOps()) |
| 17823 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 17824 | return NewLoad; |
| 17825 | |
| 17826 | return SDValue(); |
| 17827 | } |
| 17828 | |
| 17829 | // Lower smin(smax(x, C1), C2) to ssat or usat, if they have saturating |
| 17830 | // constant bounds. |
| 17831 | static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG, |
| 17832 | const ARMSubtarget *Subtarget) { |
| 17833 | if ((Subtarget->isThumb() || !Subtarget->hasV6Ops()) && |
| 17834 | !Subtarget->isThumb2()) |
| 17835 | return SDValue(); |
| 17836 | |
| 17837 | EVT VT = Op.getValueType(); |
| 17838 | SDValue Op0 = Op.getOperand(i: 0); |
| 17839 | |
| 17840 | if (VT != MVT::i32 || |
| 17841 | (Op0.getOpcode() != ISD::SMIN && Op0.getOpcode() != ISD::SMAX) || |
| 17842 | !isa<ConstantSDNode>(Val: Op.getOperand(i: 1)) || |
| 17843 | !isa<ConstantSDNode>(Val: Op0.getOperand(i: 1))) |
| 17844 | return SDValue(); |
| 17845 | |
| 17846 | SDValue Min = Op; |
| 17847 | SDValue Max = Op0; |
| 17848 | SDValue Input = Op0.getOperand(i: 0); |
| 17849 | if (Min.getOpcode() == ISD::SMAX) |
| 17850 | std::swap(a&: Min, b&: Max); |
| 17851 | |
| 17852 | APInt MinC = Min.getConstantOperandAPInt(i: 1); |
| 17853 | APInt MaxC = Max.getConstantOperandAPInt(i: 1); |
| 17854 | |
| 17855 | if (Min.getOpcode() != ISD::SMIN || Max.getOpcode() != ISD::SMAX || |
| 17856 | !(MinC + 1).isPowerOf2()) |
| 17857 | return SDValue(); |
| 17858 | |
| 17859 | SDLoc DL(Op); |
| 17860 | if (MinC == ~MaxC) |
| 17861 | return DAG.getNode(Opcode: ARMISD::SSAT, DL, VT, N1: Input, |
| 17862 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
| 17863 | if (MaxC == 0) |
| 17864 | return DAG.getNode(Opcode: ARMISD::USAT, DL, VT, N1: Input, |
| 17865 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
| 17866 | |
| 17867 | return SDValue(); |
| 17868 | } |
| 17869 | |
| 17870 | /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating |
| 17871 | /// saturates. |
| 17872 | static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, |
| 17873 | const ARMSubtarget *ST) { |
| 17874 | EVT VT = N->getValueType(ResNo: 0); |
| 17875 | SDValue N0 = N->getOperand(Num: 0); |
| 17876 | |
| 17877 | if (VT == MVT::i32) |
| 17878 | return PerformMinMaxToSatCombine(Op: SDValue(N, 0), DAG, Subtarget: ST); |
| 17879 | |
| 17880 | if (!ST->hasMVEIntegerOps()) |
| 17881 | return SDValue(); |
| 17882 | |
| 17883 | if (SDValue V = PerformVQDMULHCombine(N, DAG)) |
| 17884 | return V; |
| 17885 | |
| 17886 | if (VT != MVT::v4i32 && VT != MVT::v8i16) |
| 17887 | return SDValue(); |
| 17888 | |
| 17889 | auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { |
| 17890 | // Check one is a smin and the other is a smax |
| 17891 | if (Min->getOpcode() != ISD::SMIN) |
| 17892 | std::swap(a&: Min, b&: Max); |
| 17893 | if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) |
| 17894 | return false; |
| 17895 | |
| 17896 | APInt SaturateC; |
| 17897 | if (VT == MVT::v4i32) |
| 17898 | SaturateC = APInt(32, (1 << 15) - 1, true); |
| 17899 | else //if (VT == MVT::v8i16) |
| 17900 | SaturateC = APInt(16, (1 << 7) - 1, true); |
| 17901 | |
| 17902 | APInt MinC, MaxC; |
| 17903 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
| 17904 | MinC != SaturateC) |
| 17905 | return false; |
| 17906 | if (!ISD::isConstantSplatVector(N: Max->getOperand(Num: 1).getNode(), SplatValue&: MaxC) || |
| 17907 | MaxC != ~SaturateC) |
| 17908 | return false; |
| 17909 | return true; |
| 17910 | }; |
| 17911 | |
| 17912 | if (IsSignedSaturate(N, N0.getNode())) { |
| 17913 | SDLoc DL(N); |
| 17914 | MVT ExtVT, HalfVT; |
| 17915 | if (VT == MVT::v4i32) { |
| 17916 | HalfVT = MVT::v8i16; |
| 17917 | ExtVT = MVT::v4i16; |
| 17918 | } else { // if (VT == MVT::v8i16) |
| 17919 | HalfVT = MVT::v16i8; |
| 17920 | ExtVT = MVT::v8i8; |
| 17921 | } |
| 17922 | |
| 17923 | // Create a VQMOVNB with undef top lanes, then signed extended into the top |
| 17924 | // half. That extend will hopefully be removed if only the bottom bits are |
| 17925 | // demanded (though a truncating store, for example). |
| 17926 | SDValue VQMOVN = |
| 17927 | DAG.getNode(Opcode: ARMISD::VQMOVNs, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), |
| 17928 | N2: N0->getOperand(Num: 0), N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 17929 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
| 17930 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: Bitcast, |
| 17931 | N2: DAG.getValueType(ExtVT)); |
| 17932 | } |
| 17933 | |
| 17934 | auto IsUnsignedSaturate = [&](SDNode *Min) { |
| 17935 | // For unsigned, we just need to check for <= 0xffff |
| 17936 | if (Min->getOpcode() != ISD::UMIN) |
| 17937 | return false; |
| 17938 | |
| 17939 | APInt SaturateC; |
| 17940 | if (VT == MVT::v4i32) |
| 17941 | SaturateC = APInt(32, (1 << 16) - 1, true); |
| 17942 | else //if (VT == MVT::v8i16) |
| 17943 | SaturateC = APInt(16, (1 << 8) - 1, true); |
| 17944 | |
| 17945 | APInt MinC; |
| 17946 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
| 17947 | MinC != SaturateC) |
| 17948 | return false; |
| 17949 | return true; |
| 17950 | }; |
| 17951 | |
| 17952 | if (IsUnsignedSaturate(N)) { |
| 17953 | SDLoc DL(N); |
| 17954 | MVT HalfVT; |
| 17955 | unsigned ExtConst; |
| 17956 | if (VT == MVT::v4i32) { |
| 17957 | HalfVT = MVT::v8i16; |
| 17958 | ExtConst = 0x0000FFFF; |
| 17959 | } else { //if (VT == MVT::v8i16) |
| 17960 | HalfVT = MVT::v16i8; |
| 17961 | ExtConst = 0x00FF; |
| 17962 | } |
| 17963 | |
| 17964 | // Create a VQMOVNB with undef top lanes, then ZExt into the top half with |
| 17965 | // an AND. That extend will hopefully be removed if only the bottom bits are |
| 17966 | // demanded (though a truncating store, for example). |
| 17967 | SDValue VQMOVN = |
| 17968 | DAG.getNode(Opcode: ARMISD::VQMOVNu, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), N2: N0, |
| 17969 | N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 17970 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
| 17971 | return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Bitcast, |
| 17972 | N2: DAG.getConstant(Val: ExtConst, DL, VT)); |
| 17973 | } |
| 17974 | |
| 17975 | return SDValue(); |
| 17976 | } |
| 17977 | |
| 17978 | static const APInt *isPowerOf2Constant(SDValue V) { |
| 17979 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: V); |
| 17980 | if (!C) |
| 17981 | return nullptr; |
| 17982 | const APInt *CV = &C->getAPIntValue(); |
| 17983 | return CV->isPowerOf2() ? CV : nullptr; |
| 17984 | } |
| 17985 | |
| 17986 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { |
| 17987 | // If we have a CMOV, OR and AND combination such as: |
| 17988 | // if (x & CN) |
| 17989 | // y |= CM; |
| 17990 | // |
| 17991 | // And: |
| 17992 | // * CN is a single bit; |
| 17993 | // * All bits covered by CM are known zero in y |
| 17994 | // |
| 17995 | // Then we can convert this into a sequence of BFI instructions. This will |
| 17996 | // always be a win if CM is a single bit, will always be no worse than the |
| 17997 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is |
| 17998 | // three bits (due to the extra IT instruction). |
| 17999 | |
| 18000 | SDValue Op0 = CMOV->getOperand(Num: 0); |
| 18001 | SDValue Op1 = CMOV->getOperand(Num: 1); |
| 18002 | auto CC = CMOV->getConstantOperandAPInt(Num: 2).getLimitedValue(); |
| 18003 | SDValue CmpZ = CMOV->getOperand(Num: 3); |
| 18004 | |
| 18005 | // The compare must be against zero. |
| 18006 | if (!isNullConstant(V: CmpZ->getOperand(Num: 1))) |
| 18007 | return SDValue(); |
| 18008 | |
| 18009 | assert(CmpZ->getOpcode() == ARMISD::CMPZ); |
| 18010 | SDValue And = CmpZ->getOperand(Num: 0); |
| 18011 | if (And->getOpcode() != ISD::AND) |
| 18012 | return SDValue(); |
| 18013 | const APInt *AndC = isPowerOf2Constant(V: And->getOperand(Num: 1)); |
| 18014 | if (!AndC) |
| 18015 | return SDValue(); |
| 18016 | SDValue X = And->getOperand(Num: 0); |
| 18017 | |
| 18018 | if (CC == ARMCC::EQ) { |
| 18019 | // We're performing an "equal to zero" compare. Swap the operands so we |
| 18020 | // canonicalize on a "not equal to zero" compare. |
| 18021 | std::swap(a&: Op0, b&: Op1); |
| 18022 | } else { |
| 18023 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ); |
| 18024 | } |
| 18025 | |
| 18026 | if (Op1->getOpcode() != ISD::OR) |
| 18027 | return SDValue(); |
| 18028 | |
| 18029 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Val: Op1->getOperand(Num: 1)); |
| 18030 | if (!OrC) |
| 18031 | return SDValue(); |
| 18032 | SDValue Y = Op1->getOperand(Num: 0); |
| 18033 | |
| 18034 | if (Op0 != Y) |
| 18035 | return SDValue(); |
| 18036 | |
| 18037 | // Now, is it profitable to continue? |
| 18038 | APInt OrCI = OrC->getAPIntValue(); |
| 18039 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; |
| 18040 | if (OrCI.popcount() > Heuristic) |
| 18041 | return SDValue(); |
| 18042 | |
| 18043 | // Lastly, can we determine that the bits defined by OrCI |
| 18044 | // are zero in Y? |
| 18045 | KnownBits Known = DAG.computeKnownBits(Op: Y); |
| 18046 | if ((OrCI & Known.Zero) != OrCI) |
| 18047 | return SDValue(); |
| 18048 | |
| 18049 | // OK, we can do the combine. |
| 18050 | SDValue V = Y; |
| 18051 | SDLoc dl(X); |
| 18052 | EVT VT = X.getValueType(); |
| 18053 | unsigned BitInX = AndC->logBase2(); |
| 18054 | |
| 18055 | if (BitInX != 0) { |
| 18056 | // We must shift X first. |
| 18057 | X = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: X, |
| 18058 | N2: DAG.getConstant(Val: BitInX, DL: dl, VT)); |
| 18059 | } |
| 18060 | |
| 18061 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); |
| 18062 | BitInY < NumActiveBits; ++BitInY) { |
| 18063 | if (OrCI[BitInY] == 0) |
| 18064 | continue; |
| 18065 | APInt Mask(VT.getSizeInBits(), 0); |
| 18066 | Mask.setBit(BitInY); |
| 18067 | V = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: V, N2: X, |
| 18068 | // Confusingly, the operand is an *inverted* mask. |
| 18069 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT)); |
| 18070 | } |
| 18071 | |
| 18072 | return V; |
| 18073 | } |
| 18074 | |
| 18075 | // Given N, the value controlling the conditional branch, search for the loop |
| 18076 | // intrinsic, returning it, along with how the value is used. We need to handle |
| 18077 | // patterns such as the following: |
| 18078 | // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) |
| 18079 | // (brcond (setcc (loop.decrement), 0, eq), exit) |
| 18080 | // (brcond (setcc (loop.decrement), 0, ne), header) |
| 18081 | static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, |
| 18082 | bool &Negate) { |
| 18083 | switch (N->getOpcode()) { |
| 18084 | default: |
| 18085 | break; |
| 18086 | case ISD::XOR: { |
| 18087 | if (!isa<ConstantSDNode>(Val: N.getOperand(i: 1))) |
| 18088 | return SDValue(); |
| 18089 | if (!cast<ConstantSDNode>(Val: N.getOperand(i: 1))->isOne()) |
| 18090 | return SDValue(); |
| 18091 | Negate = !Negate; |
| 18092 | return SearchLoopIntrinsic(N: N.getOperand(i: 0), CC, Imm, Negate); |
| 18093 | } |
| 18094 | case ISD::SETCC: { |
| 18095 | auto *Const = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1)); |
| 18096 | if (!Const) |
| 18097 | return SDValue(); |
| 18098 | if (Const->isZero()) |
| 18099 | Imm = 0; |
| 18100 | else if (Const->isOne()) |
| 18101 | Imm = 1; |
| 18102 | else |
| 18103 | return SDValue(); |
| 18104 | CC = cast<CondCodeSDNode>(Val: N.getOperand(i: 2))->get(); |
| 18105 | return SearchLoopIntrinsic(N: N->getOperand(Num: 0), CC, Imm, Negate); |
| 18106 | } |
| 18107 | case ISD::INTRINSIC_W_CHAIN: { |
| 18108 | unsigned IntOp = N.getConstantOperandVal(i: 1); |
| 18109 | if (IntOp != Intrinsic::test_start_loop_iterations && |
| 18110 | IntOp != Intrinsic::loop_decrement_reg) |
| 18111 | return SDValue(); |
| 18112 | return N; |
| 18113 | } |
| 18114 | } |
| 18115 | return SDValue(); |
| 18116 | } |
| 18117 | |
| 18118 | static SDValue PerformHWLoopCombine(SDNode *N, |
| 18119 | TargetLowering::DAGCombinerInfo &DCI, |
| 18120 | const ARMSubtarget *ST) { |
| 18121 | |
| 18122 | // The hwloop intrinsics that we're interested are used for control-flow, |
| 18123 | // either for entering or exiting the loop: |
| 18124 | // - test.start.loop.iterations will test whether its operand is zero. If it |
| 18125 | // is zero, the proceeding branch should not enter the loop. |
| 18126 | // - loop.decrement.reg also tests whether its operand is zero. If it is |
| 18127 | // zero, the proceeding branch should not branch back to the beginning of |
| 18128 | // the loop. |
| 18129 | // So here, we need to check that how the brcond is using the result of each |
| 18130 | // of the intrinsics to ensure that we're branching to the right place at the |
| 18131 | // right time. |
| 18132 | |
| 18133 | ISD::CondCode CC; |
| 18134 | SDValue Cond; |
| 18135 | int Imm = 1; |
| 18136 | bool Negate = false; |
| 18137 | SDValue Chain = N->getOperand(Num: 0); |
| 18138 | SDValue Dest; |
| 18139 | |
| 18140 | if (N->getOpcode() == ISD::BRCOND) { |
| 18141 | CC = ISD::SETEQ; |
| 18142 | Cond = N->getOperand(Num: 1); |
| 18143 | Dest = N->getOperand(Num: 2); |
| 18144 | } else { |
| 18145 | assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!" ); |
| 18146 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 1))->get(); |
| 18147 | Cond = N->getOperand(Num: 2); |
| 18148 | Dest = N->getOperand(Num: 4); |
| 18149 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 3))) { |
| 18150 | if (!Const->isOne() && !Const->isZero()) |
| 18151 | return SDValue(); |
| 18152 | Imm = Const->getZExtValue(); |
| 18153 | } else |
| 18154 | return SDValue(); |
| 18155 | } |
| 18156 | |
| 18157 | SDValue Int = SearchLoopIntrinsic(N: Cond, CC, Imm, Negate); |
| 18158 | if (!Int) |
| 18159 | return SDValue(); |
| 18160 | |
| 18161 | if (Negate) |
| 18162 | CC = ISD::getSetCCInverse(Operation: CC, /* Integer inverse */ Type: MVT::i32); |
| 18163 | |
| 18164 | auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { |
| 18165 | return (CC == ISD::SETEQ && Imm == 0) || |
| 18166 | (CC == ISD::SETNE && Imm == 1) || |
| 18167 | (CC == ISD::SETLT && Imm == 1) || |
| 18168 | (CC == ISD::SETULT && Imm == 1); |
| 18169 | }; |
| 18170 | |
| 18171 | auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { |
| 18172 | return (CC == ISD::SETEQ && Imm == 1) || |
| 18173 | (CC == ISD::SETNE && Imm == 0) || |
| 18174 | (CC == ISD::SETGT && Imm == 0) || |
| 18175 | (CC == ISD::SETUGT && Imm == 0) || |
| 18176 | (CC == ISD::SETGE && Imm == 1) || |
| 18177 | (CC == ISD::SETUGE && Imm == 1); |
| 18178 | }; |
| 18179 | |
| 18180 | assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && |
| 18181 | "unsupported condition" ); |
| 18182 | |
| 18183 | SDLoc dl(Int); |
| 18184 | SelectionDAG &DAG = DCI.DAG; |
| 18185 | SDValue Elements = Int.getOperand(i: 2); |
| 18186 | unsigned IntOp = Int->getConstantOperandVal(Num: 1); |
| 18187 | assert((N->hasOneUse() && N->user_begin()->getOpcode() == ISD::BR) && |
| 18188 | "expected single br user" ); |
| 18189 | SDNode *Br = *N->user_begin(); |
| 18190 | SDValue OtherTarget = Br->getOperand(Num: 1); |
| 18191 | |
| 18192 | // Update the unconditional branch to branch to the given Dest. |
| 18193 | auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { |
| 18194 | SDValue NewBrOps[] = { Br->getOperand(Num: 0), Dest }; |
| 18195 | SDValue NewBr = DAG.getNode(Opcode: ISD::BR, DL: SDLoc(Br), VT: MVT::Other, Ops: NewBrOps); |
| 18196 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(Br, 0), To: NewBr); |
| 18197 | }; |
| 18198 | |
| 18199 | if (IntOp == Intrinsic::test_start_loop_iterations) { |
| 18200 | SDValue Res; |
| 18201 | SDValue Setup = DAG.getNode(Opcode: ARMISD::WLSSETUP, DL: dl, VT: MVT::i32, Operand: Elements); |
| 18202 | // We expect this 'instruction' to branch when the counter is zero. |
| 18203 | if (IsTrueIfZero(CC, Imm)) { |
| 18204 | SDValue Ops[] = {Chain, Setup, Dest}; |
| 18205 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
| 18206 | } else { |
| 18207 | // The logic is the reverse of what we need for WLS, so find the other |
| 18208 | // basic block target: the target of the proceeding br. |
| 18209 | UpdateUncondBr(Br, Dest, DAG); |
| 18210 | |
| 18211 | SDValue Ops[] = {Chain, Setup, OtherTarget}; |
| 18212 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
| 18213 | } |
| 18214 | // Update LR count to the new value |
| 18215 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 0), To: Setup); |
| 18216 | // Update chain |
| 18217 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 2), To: Int.getOperand(i: 0)); |
| 18218 | return Res; |
| 18219 | } else { |
| 18220 | SDValue Size = |
| 18221 | DAG.getTargetConstant(Val: Int.getConstantOperandVal(i: 3), DL: dl, VT: MVT::i32); |
| 18222 | SDValue Args[] = { Int.getOperand(i: 0), Elements, Size, }; |
| 18223 | SDValue LoopDec = DAG.getNode(Opcode: ARMISD::LOOP_DEC, DL: dl, |
| 18224 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops: Args); |
| 18225 | DAG.ReplaceAllUsesWith(From: Int.getNode(), To: LoopDec.getNode()); |
| 18226 | |
| 18227 | // We expect this instruction to branch when the count is not zero. |
| 18228 | SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; |
| 18229 | |
| 18230 | // Update the unconditional branch to target the loop preheader if we've |
| 18231 | // found the condition has been reversed. |
| 18232 | if (Target == OtherTarget) |
| 18233 | UpdateUncondBr(Br, Dest, DAG); |
| 18234 | |
| 18235 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 18236 | N1: SDValue(LoopDec.getNode(), 1), N2: Chain); |
| 18237 | |
| 18238 | SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; |
| 18239 | return DAG.getNode(Opcode: ARMISD::LE, DL: dl, VT: MVT::Other, Ops: EndArgs); |
| 18240 | } |
| 18241 | return SDValue(); |
| 18242 | } |
| 18243 | |
| 18244 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. |
| 18245 | SDValue |
| 18246 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { |
| 18247 | SDValue Cmp = N->getOperand(Num: 3); |
| 18248 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 18249 | // Only looking at NE cases. |
| 18250 | return SDValue(); |
| 18251 | |
| 18252 | SDLoc dl(N); |
| 18253 | SDValue LHS = Cmp.getOperand(i: 0); |
| 18254 | SDValue RHS = Cmp.getOperand(i: 1); |
| 18255 | SDValue Chain = N->getOperand(Num: 0); |
| 18256 | SDValue BB = N->getOperand(Num: 1); |
| 18257 | SDValue ARMcc = N->getOperand(Num: 2); |
| 18258 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
| 18259 | |
| 18260 | // (brcond Chain BB ne (cmpz (and (cmov 0 1 CC Flags) 1) 0)) |
| 18261 | // -> (brcond Chain BB CC Flags) |
| 18262 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && |
| 18263 | LHS->getOperand(Num: 0)->getOpcode() == ARMISD::CMOV && |
| 18264 | LHS->getOperand(Num: 0)->hasOneUse() && |
| 18265 | isNullConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 0)) && |
| 18266 | isOneConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 1)) && |
| 18267 | isOneConstant(V: LHS->getOperand(Num: 1)) && isNullConstant(V: RHS)) { |
| 18268 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: BB, |
| 18269 | N3: LHS->getOperand(Num: 0)->getOperand(Num: 2), |
| 18270 | N4: LHS->getOperand(Num: 0)->getOperand(Num: 3)); |
| 18271 | } |
| 18272 | |
| 18273 | return SDValue(); |
| 18274 | } |
| 18275 | |
| 18276 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. |
| 18277 | SDValue |
| 18278 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { |
| 18279 | SDValue Cmp = N->getOperand(Num: 3); |
| 18280 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 18281 | // Only looking at EQ and NE cases. |
| 18282 | return SDValue(); |
| 18283 | |
| 18284 | EVT VT = N->getValueType(ResNo: 0); |
| 18285 | SDLoc dl(N); |
| 18286 | SDValue LHS = Cmp.getOperand(i: 0); |
| 18287 | SDValue RHS = Cmp.getOperand(i: 1); |
| 18288 | SDValue FalseVal = N->getOperand(Num: 0); |
| 18289 | SDValue TrueVal = N->getOperand(Num: 1); |
| 18290 | SDValue ARMcc = N->getOperand(Num: 2); |
| 18291 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
| 18292 | |
| 18293 | // BFI is only available on V6T2+. |
| 18294 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { |
| 18295 | SDValue R = PerformCMOVToBFICombine(CMOV: N, DAG); |
| 18296 | if (R) |
| 18297 | return R; |
| 18298 | } |
| 18299 | |
| 18300 | // Simplify |
| 18301 | // mov r1, r0 |
| 18302 | // cmp r1, x |
| 18303 | // mov r0, y |
| 18304 | // moveq r0, x |
| 18305 | // to |
| 18306 | // cmp r0, x |
| 18307 | // movne r0, y |
| 18308 | // |
| 18309 | // mov r1, r0 |
| 18310 | // cmp r1, x |
| 18311 | // mov r0, x |
| 18312 | // movne r0, y |
| 18313 | // to |
| 18314 | // cmp r0, x |
| 18315 | // movne r0, y |
| 18316 | /// FIXME: Turn this into a target neutral optimization? |
| 18317 | SDValue Res; |
| 18318 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { |
| 18319 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: TrueVal, N3: ARMcc, N4: Cmp); |
| 18320 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { |
| 18321 | SDValue ARMcc; |
| 18322 | SDValue NewCmp = getARMCmp(LHS, RHS, CC: ISD::SETNE, ARMcc, DAG, dl); |
| 18323 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: FalseVal, N3: ARMcc, N4: NewCmp); |
| 18324 | } |
| 18325 | |
| 18326 | // (cmov F T ne (cmpz (cmov 0 1 CC Flags) 0)) |
| 18327 | // -> (cmov F T CC Flags) |
| 18328 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse() && |
| 18329 | isNullConstant(V: LHS->getOperand(Num: 0)) && isOneConstant(V: LHS->getOperand(Num: 1)) && |
| 18330 | isNullConstant(V: RHS)) { |
| 18331 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, |
| 18332 | N3: LHS->getOperand(Num: 2), N4: LHS->getOperand(Num: 3)); |
| 18333 | } |
| 18334 | |
| 18335 | if (!VT.isInteger()) |
| 18336 | return SDValue(); |
| 18337 | |
| 18338 | // Fold away an unneccessary CMPZ/CMOV |
| 18339 | // CMOV A, B, C1, (CMPZ (CMOV 1, 0, C2, D), 0) -> |
| 18340 | // if C1==EQ -> CMOV A, B, C2, D |
| 18341 | // if C1==NE -> CMOV A, B, NOT(C2), D |
| 18342 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ || |
| 18343 | N->getConstantOperandVal(Num: 2) == ARMCC::NE) { |
| 18344 | ARMCC::CondCodes Cond; |
| 18345 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 3).getNode(), CC&: Cond)) { |
| 18346 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
| 18347 | Cond = ARMCC::getOppositeCondition(CC: Cond); |
| 18348 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 18349 | N2: N->getOperand(Num: 1), |
| 18350 | N3: DAG.getConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 18351 | } |
| 18352 | } |
| 18353 | |
| 18354 | // Materialize a boolean comparison for integers so we can avoid branching. |
| 18355 | if (isNullConstant(V: FalseVal)) { |
| 18356 | if (CC == ARMCC::EQ && isOneConstant(V: TrueVal)) { |
| 18357 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { |
| 18358 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it |
| 18359 | // right 5 bits will make that 32 be 1, otherwise it will be 0. |
| 18360 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 |
| 18361 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
| 18362 | Res = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: Sub), |
| 18363 | N2: DAG.getConstant(Val: 5, DL: dl, VT: MVT::i32)); |
| 18364 | } else { |
| 18365 | // CMOV 0, 1, ==, (CMPZ x, y) -> |
| 18366 | // (UADDO_CARRY (SUB x, y), t:0, t:1) |
| 18367 | // where t = (USUBO_CARRY 0, (SUB x, y), 0) |
| 18368 | // |
| 18369 | // The USUBO_CARRY computes 0 - (x - y) and this will give a borrow when |
| 18370 | // x != y. In other words, a carry C == 1 when x == y, C == 0 |
| 18371 | // otherwise. |
| 18372 | // The final UADDO_CARRY computes |
| 18373 | // x - y + (0 - (x - y)) + C == C |
| 18374 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
| 18375 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 18376 | SDValue Neg = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: Sub); |
| 18377 | // ISD::USUBO_CARRY returns a borrow but we want the carry here |
| 18378 | // actually. |
| 18379 | SDValue Carry = |
| 18380 | DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 18381 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Neg.getValue(R: 1)); |
| 18382 | Res = DAG.getNode(Opcode: ISD::UADDO_CARRY, DL: dl, VTList: VTs, N1: Sub, N2: Neg, N3: Carry); |
| 18383 | } |
| 18384 | } else if (CC == ARMCC::NE && !isNullConstant(V: RHS) && |
| 18385 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: TrueVal))) { |
| 18386 | // This seems pointless but will allow us to combine it further below. |
| 18387 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
| 18388 | SDValue Sub = |
| 18389 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
| 18390 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: TrueVal, N3: ARMcc, |
| 18391 | N4: Sub.getValue(R: 1)); |
| 18392 | FalseVal = Sub; |
| 18393 | } |
| 18394 | } else if (isNullConstant(V: TrueVal)) { |
| 18395 | if (CC == ARMCC::EQ && !isNullConstant(V: RHS) && |
| 18396 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: FalseVal))) { |
| 18397 | // This seems pointless but will allow us to combine it further below |
| 18398 | // Note that we change == for != as this is the dual for the case above. |
| 18399 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
| 18400 | SDValue Sub = |
| 18401 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
| 18402 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: FalseVal, |
| 18403 | N3: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32), |
| 18404 | N4: Sub.getValue(R: 1)); |
| 18405 | FalseVal = Sub; |
| 18406 | } |
| 18407 | } |
| 18408 | |
| 18409 | // On Thumb1, the DAG above may be further combined if z is a power of 2 |
| 18410 | // (z == 2 ^ K). |
| 18411 | // CMOV (SUBC x, y), z, !=, (SUBC x, y):1 -> |
| 18412 | // t1 = (USUBO (SUB x, y), 1) |
| 18413 | // t2 = (USUBO_CARRY (SUB x, y), t1:0, t1:1) |
| 18414 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 18415 | // |
| 18416 | // This also handles the special case of comparing against zero; it's |
| 18417 | // essentially, the same pattern, except there's no SUBC: |
| 18418 | // CMOV x, z, !=, (CMPZ x, 0) -> |
| 18419 | // t1 = (USUBO x, 1) |
| 18420 | // t2 = (USUBO_CARRY x, t1:0, t1:1) |
| 18421 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 18422 | const APInt *TrueConst; |
| 18423 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && |
| 18424 | ((FalseVal.getOpcode() == ARMISD::SUBC && FalseVal.getOperand(i: 0) == LHS && |
| 18425 | FalseVal.getOperand(i: 1) == RHS) || |
| 18426 | (FalseVal == LHS && isNullConstant(V: RHS))) && |
| 18427 | (TrueConst = isPowerOf2Constant(V: TrueVal))) { |
| 18428 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 18429 | unsigned ShiftAmount = TrueConst->logBase2(); |
| 18430 | if (ShiftAmount) |
| 18431 | TrueVal = DAG.getConstant(Val: 1, DL: dl, VT); |
| 18432 | SDValue Subc = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: TrueVal); |
| 18433 | Res = DAG.getNode(Opcode: ISD::USUBO_CARRY, DL: dl, VTList: VTs, N1: FalseVal, N2: Subc, |
| 18434 | N3: Subc.getValue(R: 1)); |
| 18435 | |
| 18436 | if (ShiftAmount) |
| 18437 | Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: Res, |
| 18438 | N2: DAG.getConstant(Val: ShiftAmount, DL: dl, VT: MVT::i32)); |
| 18439 | } |
| 18440 | |
| 18441 | if (Res.getNode()) { |
| 18442 | KnownBits Known = DAG.computeKnownBits(Op: SDValue(N,0)); |
| 18443 | // Capture demanded bits information that would be otherwise lost. |
| 18444 | if (Known.Zero == 0xfffffffe) |
| 18445 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18446 | N2: DAG.getValueType(MVT::i1)); |
| 18447 | else if (Known.Zero == 0xffffff00) |
| 18448 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18449 | N2: DAG.getValueType(MVT::i8)); |
| 18450 | else if (Known.Zero == 0xffff0000) |
| 18451 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18452 | N2: DAG.getValueType(MVT::i16)); |
| 18453 | } |
| 18454 | |
| 18455 | return Res; |
| 18456 | } |
| 18457 | |
| 18458 | static SDValue PerformBITCASTCombine(SDNode *N, |
| 18459 | TargetLowering::DAGCombinerInfo &DCI, |
| 18460 | const ARMSubtarget *ST) { |
| 18461 | SelectionDAG &DAG = DCI.DAG; |
| 18462 | SDValue Src = N->getOperand(Num: 0); |
| 18463 | EVT DstVT = N->getValueType(ResNo: 0); |
| 18464 | |
| 18465 | // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. |
| 18466 | if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { |
| 18467 | EVT SrcVT = Src.getValueType(); |
| 18468 | if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) |
| 18469 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT: DstVT, Operand: Src.getOperand(i: 0)); |
| 18470 | } |
| 18471 | |
| 18472 | // We may have a bitcast of something that has already had this bitcast |
| 18473 | // combine performed on it, so skip past any VECTOR_REG_CASTs. |
| 18474 | if (Src.getOpcode() == ARMISD::VECTOR_REG_CAST && |
| 18475 | Src.getOperand(i: 0).getValueType().getScalarSizeInBits() <= |
| 18476 | Src.getValueType().getScalarSizeInBits()) |
| 18477 | Src = Src.getOperand(i: 0); |
| 18478 | |
| 18479 | // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that |
| 18480 | // would be generated is at least the width of the element type. |
| 18481 | EVT SrcVT = Src.getValueType(); |
| 18482 | if ((Src.getOpcode() == ARMISD::VMOVIMM || |
| 18483 | Src.getOpcode() == ARMISD::VMVNIMM || |
| 18484 | Src.getOpcode() == ARMISD::VMOVFPIMM) && |
| 18485 | SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && |
| 18486 | DAG.getDataLayout().isBigEndian()) |
| 18487 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(N), VT: DstVT, Operand: Src); |
| 18488 | |
| 18489 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD x |
| 18490 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
| 18491 | return R; |
| 18492 | |
| 18493 | return SDValue(); |
| 18494 | } |
| 18495 | |
| 18496 | // Some combines for the MVETrunc truncations legalizer helper. Also lowers the |
| 18497 | // node into stack operations after legalizeOps. |
| 18498 | SDValue ARMTargetLowering::PerformMVETruncCombine( |
| 18499 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
| 18500 | SelectionDAG &DAG = DCI.DAG; |
| 18501 | EVT VT = N->getValueType(ResNo: 0); |
| 18502 | SDLoc DL(N); |
| 18503 | |
| 18504 | // MVETrunc(Undef, Undef) -> Undef |
| 18505 | if (all_of(Range: N->ops(), P: [](SDValue Op) { return Op.isUndef(); })) |
| 18506 | return DAG.getUNDEF(VT); |
| 18507 | |
| 18508 | // MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc |
| 18509 | if (N->getNumOperands() == 2 && |
| 18510 | N->getOperand(Num: 0).getOpcode() == ARMISD::MVETRUNC && |
| 18511 | N->getOperand(Num: 1).getOpcode() == ARMISD::MVETRUNC) |
| 18512 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT, N1: N->getOperand(Num: 0).getOperand(i: 0), |
| 18513 | N2: N->getOperand(Num: 0).getOperand(i: 1), |
| 18514 | N3: N->getOperand(Num: 1).getOperand(i: 0), |
| 18515 | N4: N->getOperand(Num: 1).getOperand(i: 1)); |
| 18516 | |
| 18517 | // MVETrunc(shuffle, shuffle) -> VMOVN |
| 18518 | if (N->getNumOperands() == 2 && |
| 18519 | N->getOperand(Num: 0).getOpcode() == ISD::VECTOR_SHUFFLE && |
| 18520 | N->getOperand(Num: 1).getOpcode() == ISD::VECTOR_SHUFFLE) { |
| 18521 | auto *S0 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0).getNode()); |
| 18522 | auto *S1 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 1).getNode()); |
| 18523 | |
| 18524 | if (S0->getOperand(Num: 0) == S1->getOperand(Num: 0) && |
| 18525 | S0->getOperand(Num: 1) == S1->getOperand(Num: 1)) { |
| 18526 | // Construct complete shuffle mask |
| 18527 | SmallVector<int, 8> Mask(S0->getMask()); |
| 18528 | Mask.append(in_start: S1->getMask().begin(), in_end: S1->getMask().end()); |
| 18529 | |
| 18530 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: false)) |
| 18531 | return DAG.getNode( |
| 18532 | Opcode: ARMISD::VMOVN, DL, VT, |
| 18533 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
| 18534 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
| 18535 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 18536 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: true)) |
| 18537 | return DAG.getNode( |
| 18538 | Opcode: ARMISD::VMOVN, DL, VT, |
| 18539 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
| 18540 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
| 18541 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 18542 | } |
| 18543 | } |
| 18544 | |
| 18545 | // For MVETrunc of a buildvector or shuffle, it can be beneficial to lower the |
| 18546 | // truncate to a buildvector to allow the generic optimisations to kick in. |
| 18547 | if (all_of(Range: N->ops(), P: [](SDValue Op) { |
| 18548 | return Op.getOpcode() == ISD::BUILD_VECTOR || |
| 18549 | Op.getOpcode() == ISD::VECTOR_SHUFFLE || |
| 18550 | (Op.getOpcode() == ISD::BITCAST && |
| 18551 | Op.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR); |
| 18552 | })) { |
| 18553 | SmallVector<SDValue, 8> ; |
| 18554 | for (unsigned Op = 0; Op < N->getNumOperands(); Op++) { |
| 18555 | SDValue O = N->getOperand(Num: Op); |
| 18556 | for (unsigned i = 0; i < O.getValueType().getVectorNumElements(); i++) { |
| 18557 | SDValue Ext = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::i32, N1: O, |
| 18558 | N2: DAG.getConstant(Val: i, DL, VT: MVT::i32)); |
| 18559 | Extracts.push_back(Elt: Ext); |
| 18560 | } |
| 18561 | } |
| 18562 | return DAG.getBuildVector(VT, DL, Ops: Extracts); |
| 18563 | } |
| 18564 | |
| 18565 | // If we are late in the legalization process and nothing has optimised |
| 18566 | // the trunc to anything better, lower it to a stack store and reload, |
| 18567 | // performing the truncation whilst keeping the lanes in the correct order: |
| 18568 | // VSTRH.32 a, stack; VSTRH.32 b, stack+8; VLDRW.32 stack; |
| 18569 | if (!DCI.isAfterLegalizeDAG()) |
| 18570 | return SDValue(); |
| 18571 | |
| 18572 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
| 18573 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
| 18574 | int NumIns = N->getNumOperands(); |
| 18575 | assert((NumIns == 2 || NumIns == 4) && |
| 18576 | "Expected 2 or 4 inputs to an MVETrunc" ); |
| 18577 | EVT StoreVT = VT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18578 | if (N->getNumOperands() == 4) |
| 18579 | StoreVT = StoreVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18580 | |
| 18581 | SmallVector<SDValue> Chains; |
| 18582 | for (int I = 0; I < NumIns; I++) { |
| 18583 | SDValue Ptr = DAG.getNode( |
| 18584 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
| 18585 | N2: DAG.getConstant(Val: I * 16 / NumIns, DL, VT: StackPtr.getValueType())); |
| 18586 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
| 18587 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumIns); |
| 18588 | SDValue Ch = DAG.getTruncStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: I), |
| 18589 | Ptr, PtrInfo: MPI, SVT: StoreVT, Alignment: Align(4)); |
| 18590 | Chains.push_back(Elt: Ch); |
| 18591 | } |
| 18592 | |
| 18593 | SDValue Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 18594 | MachinePointerInfo MPI = |
| 18595 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
| 18596 | return DAG.getLoad(VT, dl: DL, Chain, Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
| 18597 | } |
| 18598 | |
| 18599 | // Take a MVEEXT(load x) and split that into (extload x, extload x+8) |
| 18600 | static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N, |
| 18601 | SelectionDAG &DAG) { |
| 18602 | SDValue N0 = N->getOperand(Num: 0); |
| 18603 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N0.getNode()); |
| 18604 | if (!LD || !LD->isSimple() || !N0.hasOneUse() || LD->isIndexed()) |
| 18605 | return SDValue(); |
| 18606 | |
| 18607 | EVT FromVT = LD->getMemoryVT(); |
| 18608 | EVT ToVT = N->getValueType(ResNo: 0); |
| 18609 | if (!ToVT.isVector()) |
| 18610 | return SDValue(); |
| 18611 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements() * 2); |
| 18612 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 18613 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 18614 | |
| 18615 | unsigned NumElements = 0; |
| 18616 | if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) |
| 18617 | NumElements = 4; |
| 18618 | if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) |
| 18619 | NumElements = 8; |
| 18620 | assert(NumElements != 0); |
| 18621 | |
| 18622 | ISD::LoadExtType NewExtType = |
| 18623 | N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
| 18624 | if (LD->getExtensionType() != ISD::NON_EXTLOAD && |
| 18625 | LD->getExtensionType() != ISD::EXTLOAD && |
| 18626 | LD->getExtensionType() != NewExtType) |
| 18627 | return SDValue(); |
| 18628 | |
| 18629 | LLVMContext &C = *DAG.getContext(); |
| 18630 | SDLoc DL(LD); |
| 18631 | // Details about the old load |
| 18632 | SDValue Ch = LD->getChain(); |
| 18633 | SDValue BasePtr = LD->getBasePtr(); |
| 18634 | Align Alignment = LD->getBaseAlign(); |
| 18635 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
| 18636 | AAMDNodes AAInfo = LD->getAAInfo(); |
| 18637 | |
| 18638 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
| 18639 | EVT NewFromVT = EVT::getVectorVT( |
| 18640 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
| 18641 | EVT NewToVT = EVT::getVectorVT( |
| 18642 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
| 18643 | |
| 18644 | SmallVector<SDValue, 4> Loads; |
| 18645 | SmallVector<SDValue, 4> Chains; |
| 18646 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 18647 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
| 18648 | SDValue NewPtr = |
| 18649 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 18650 | |
| 18651 | SDValue NewLoad = |
| 18652 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
| 18653 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
| 18654 | Alignment, MMOFlags, AAInfo); |
| 18655 | Loads.push_back(Elt: NewLoad); |
| 18656 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
| 18657 | } |
| 18658 | |
| 18659 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 18660 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
| 18661 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
| 18662 | } |
| 18663 | |
| 18664 | // Perform combines for MVEEXT. If it has not be optimized to anything better |
| 18665 | // before lowering, it gets converted to stack store and extloads performing the |
| 18666 | // extend whilst still keeping the same lane ordering. |
| 18667 | SDValue ARMTargetLowering::PerformMVEExtCombine( |
| 18668 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
| 18669 | SelectionDAG &DAG = DCI.DAG; |
| 18670 | EVT VT = N->getValueType(ResNo: 0); |
| 18671 | SDLoc DL(N); |
| 18672 | assert(N->getNumValues() == 2 && "Expected MVEEXT with 2 elements" ); |
| 18673 | assert((VT == MVT::v4i32 || VT == MVT::v8i16) && "Unexpected MVEEXT type" ); |
| 18674 | |
| 18675 | EVT ExtVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
| 18676 | Context&: *DAG.getContext()); |
| 18677 | auto Extend = [&](SDValue V) { |
| 18678 | SDValue VVT = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: V); |
| 18679 | return N->getOpcode() == ARMISD::MVESEXT |
| 18680 | ? DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: VVT, |
| 18681 | N2: DAG.getValueType(ExtVT)) |
| 18682 | : DAG.getZeroExtendInReg(Op: VVT, DL, VT: ExtVT); |
| 18683 | }; |
| 18684 | |
| 18685 | // MVEEXT(VDUP) -> SIGN_EXTEND_INREG(VDUP) |
| 18686 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::VDUP) { |
| 18687 | SDValue Ext = Extend(N->getOperand(Num: 0)); |
| 18688 | return DAG.getMergeValues(Ops: {Ext, Ext}, dl: DL); |
| 18689 | } |
| 18690 | |
| 18691 | // MVEEXT(shuffle) -> SIGN_EXTEND_INREG/ZERO_EXTEND_INREG |
| 18692 | if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0))) { |
| 18693 | ArrayRef<int> Mask = SVN->getMask(); |
| 18694 | assert(Mask.size() == 2 * VT.getVectorNumElements()); |
| 18695 | assert(Mask.size() == SVN->getValueType(0).getVectorNumElements()); |
| 18696 | unsigned Rev = VT == MVT::v4i32 ? ARMISD::VREV32 : ARMISD::VREV16; |
| 18697 | SDValue Op0 = SVN->getOperand(Num: 0); |
| 18698 | SDValue Op1 = SVN->getOperand(Num: 1); |
| 18699 | |
| 18700 | auto CheckInregMask = [&](int Start, int Offset) { |
| 18701 | for (int Idx = 0, E = VT.getVectorNumElements(); Idx < E; ++Idx) |
| 18702 | if (Mask[Start + Idx] >= 0 && Mask[Start + Idx] != Idx * 2 + Offset) |
| 18703 | return false; |
| 18704 | return true; |
| 18705 | }; |
| 18706 | SDValue V0 = SDValue(N, 0); |
| 18707 | SDValue V1 = SDValue(N, 1); |
| 18708 | if (CheckInregMask(0, 0)) |
| 18709 | V0 = Extend(Op0); |
| 18710 | else if (CheckInregMask(0, 1)) |
| 18711 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
| 18712 | else if (CheckInregMask(0, Mask.size())) |
| 18713 | V0 = Extend(Op1); |
| 18714 | else if (CheckInregMask(0, Mask.size() + 1)) |
| 18715 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
| 18716 | |
| 18717 | if (CheckInregMask(VT.getVectorNumElements(), Mask.size())) |
| 18718 | V1 = Extend(Op1); |
| 18719 | else if (CheckInregMask(VT.getVectorNumElements(), Mask.size() + 1)) |
| 18720 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
| 18721 | else if (CheckInregMask(VT.getVectorNumElements(), 0)) |
| 18722 | V1 = Extend(Op0); |
| 18723 | else if (CheckInregMask(VT.getVectorNumElements(), 1)) |
| 18724 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
| 18725 | |
| 18726 | if (V0.getNode() != N || V1.getNode() != N) |
| 18727 | return DAG.getMergeValues(Ops: {V0, V1}, dl: DL); |
| 18728 | } |
| 18729 | |
| 18730 | // MVEEXT(load) -> extload, extload |
| 18731 | if (N->getOperand(Num: 0)->getOpcode() == ISD::LOAD) |
| 18732 | if (SDValue L = PerformSplittingMVEEXTToWideningLoad(N, DAG)) |
| 18733 | return L; |
| 18734 | |
| 18735 | if (!DCI.isAfterLegalizeDAG()) |
| 18736 | return SDValue(); |
| 18737 | |
| 18738 | // Lower to a stack store and reload: |
| 18739 | // VSTRW.32 a, stack; VLDRH.32 stack; VLDRH.32 stack+8; |
| 18740 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
| 18741 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
| 18742 | int NumOuts = N->getNumValues(); |
| 18743 | assert((NumOuts == 2 || NumOuts == 4) && |
| 18744 | "Expected 2 or 4 outputs to an MVEEXT" ); |
| 18745 | EVT LoadVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
| 18746 | Context&: *DAG.getContext()); |
| 18747 | if (N->getNumOperands() == 4) |
| 18748 | LoadVT = LoadVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18749 | |
| 18750 | MachinePointerInfo MPI = |
| 18751 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
| 18752 | SDValue Chain = DAG.getStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: 0), |
| 18753 | Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
| 18754 | |
| 18755 | SmallVector<SDValue> Loads; |
| 18756 | for (int I = 0; I < NumOuts; I++) { |
| 18757 | SDValue Ptr = DAG.getNode( |
| 18758 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
| 18759 | N2: DAG.getConstant(Val: I * 16 / NumOuts, DL, VT: StackPtr.getValueType())); |
| 18760 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
| 18761 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumOuts); |
| 18762 | SDValue Load = DAG.getExtLoad( |
| 18763 | ExtType: N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD, dl: DL, |
| 18764 | VT, Chain, Ptr, PtrInfo: MPI, MemVT: LoadVT, Alignment: Align(4)); |
| 18765 | Loads.push_back(Elt: Load); |
| 18766 | } |
| 18767 | |
| 18768 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
| 18769 | } |
| 18770 | |
| 18771 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, |
| 18772 | DAGCombinerInfo &DCI) const { |
| 18773 | switch (N->getOpcode()) { |
| 18774 | default: break; |
| 18775 | case ISD::SELECT_CC: |
| 18776 | case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); |
| 18777 | case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); |
| 18778 | case ISD::SETCC: return PerformVSetCCToVCTPCombine(N, DCI, Subtarget); |
| 18779 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); |
| 18780 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18781 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); |
| 18782 | case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); |
| 18783 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); |
| 18784 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); |
| 18785 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); |
| 18786 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); |
| 18787 | case ISD::BRCOND: |
| 18788 | case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, ST: Subtarget); |
| 18789 | case ARMISD::ADDC: |
| 18790 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); |
| 18791 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 18792 | case ARMISD::BFI: return PerformBFICombine(N, DAG&: DCI.DAG); |
| 18793 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); |
| 18794 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DAG&: DCI.DAG); |
| 18795 | case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); |
| 18796 | case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DAG&: DCI.DAG); |
| 18797 | case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); |
| 18798 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); |
| 18799 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); |
| 18800 | case ISD::EXTRACT_VECTOR_ELT: |
| 18801 | return PerformExtractEltCombine(N, DCI, ST: Subtarget); |
| 18802 | case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DAG&: DCI.DAG); |
| 18803 | case ISD::INSERT_SUBVECTOR: return PerformInsertSubvectorCombine(N, DCI); |
| 18804 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DAG&: DCI.DAG); |
| 18805 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); |
| 18806 | case ARMISD::VDUP: return PerformVDUPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18807 | case ISD::FP_TO_SINT: |
| 18808 | case ISD::FP_TO_UINT: |
| 18809 | return PerformVCVTCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18810 | case ISD::FADD: |
| 18811 | return PerformFADDCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18812 | case ISD::FMUL: |
| 18813 | return PerformVMulVCTPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18814 | case ISD::INTRINSIC_WO_CHAIN: |
| 18815 | return PerformIntrinsicCombine(N, DCI); |
| 18816 | case ISD::SHL: |
| 18817 | case ISD::SRA: |
| 18818 | case ISD::SRL: |
| 18819 | return PerformShiftCombine(N, DCI, ST: Subtarget); |
| 18820 | case ISD::SIGN_EXTEND: |
| 18821 | case ISD::ZERO_EXTEND: |
| 18822 | case ISD::ANY_EXTEND: |
| 18823 | return PerformExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18824 | case ISD::FP_EXTEND: |
| 18825 | return PerformFPExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18826 | case ISD::SMIN: |
| 18827 | case ISD::UMIN: |
| 18828 | case ISD::SMAX: |
| 18829 | case ISD::UMAX: |
| 18830 | return PerformMinMaxCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18831 | case ARMISD::CMOV: |
| 18832 | return PerformCMOVCombine(N, DAG&: DCI.DAG); |
| 18833 | case ARMISD::BRCOND: |
| 18834 | return PerformBRCONDCombine(N, DAG&: DCI.DAG); |
| 18835 | case ARMISD::CMPZ: |
| 18836 | return PerformCMPZCombine(N, DAG&: DCI.DAG); |
| 18837 | case ARMISD::CSINC: |
| 18838 | case ARMISD::CSINV: |
| 18839 | case ARMISD::CSNEG: |
| 18840 | return PerformCSETCombine(N, DAG&: DCI.DAG); |
| 18841 | case ISD::LOAD: |
| 18842 | return PerformLOADCombine(N, DCI, Subtarget); |
| 18843 | case ARMISD::VLD1DUP: |
| 18844 | case ARMISD::VLD2DUP: |
| 18845 | case ARMISD::VLD3DUP: |
| 18846 | case ARMISD::VLD4DUP: |
| 18847 | return PerformVLDCombine(N, DCI); |
| 18848 | case ARMISD::BUILD_VECTOR: |
| 18849 | return PerformARMBUILD_VECTORCombine(N, DCI); |
| 18850 | case ISD::BITCAST: |
| 18851 | return PerformBITCASTCombine(N, DCI, ST: Subtarget); |
| 18852 | case ARMISD::PREDICATE_CAST: |
| 18853 | return PerformPREDICATE_CASTCombine(N, DCI); |
| 18854 | case ARMISD::VECTOR_REG_CAST: |
| 18855 | return PerformVECTOR_REG_CASTCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18856 | case ARMISD::MVETRUNC: |
| 18857 | return PerformMVETruncCombine(N, DCI); |
| 18858 | case ARMISD::MVESEXT: |
| 18859 | case ARMISD::MVEZEXT: |
| 18860 | return PerformMVEExtCombine(N, DCI); |
| 18861 | case ARMISD::VCMP: |
| 18862 | return PerformVCMPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18863 | case ISD::VECREDUCE_ADD: |
| 18864 | return PerformVECREDUCE_ADDCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18865 | case ARMISD::VADDVs: |
| 18866 | case ARMISD::VADDVu: |
| 18867 | case ARMISD::VADDLVs: |
| 18868 | case ARMISD::VADDLVu: |
| 18869 | case ARMISD::VADDLVAs: |
| 18870 | case ARMISD::VADDLVAu: |
| 18871 | case ARMISD::VMLAVs: |
| 18872 | case ARMISD::VMLAVu: |
| 18873 | case ARMISD::VMLALVs: |
| 18874 | case ARMISD::VMLALVu: |
| 18875 | case ARMISD::VMLALVAs: |
| 18876 | case ARMISD::VMLALVAu: |
| 18877 | return PerformReduceShuffleCombine(N, DAG&: DCI.DAG); |
| 18878 | case ARMISD::VMOVN: |
| 18879 | return PerformVMOVNCombine(N, DCI); |
| 18880 | case ARMISD::VQMOVNs: |
| 18881 | case ARMISD::VQMOVNu: |
| 18882 | return PerformVQMOVNCombine(N, DCI); |
| 18883 | case ARMISD::VQDMULH: |
| 18884 | return PerformVQDMULHCombine(N, DCI); |
| 18885 | case ARMISD::ASRL: |
| 18886 | case ARMISD::LSRL: |
| 18887 | case ARMISD::LSLL: |
| 18888 | return PerformLongShiftCombine(N, DAG&: DCI.DAG); |
| 18889 | case ARMISD::SMULWB: { |
| 18890 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18891 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
| 18892 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 18893 | return SDValue(); |
| 18894 | break; |
| 18895 | } |
| 18896 | case ARMISD::SMULWT: { |
| 18897 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18898 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
| 18899 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 18900 | return SDValue(); |
| 18901 | break; |
| 18902 | } |
| 18903 | case ARMISD::SMLALBB: |
| 18904 | case ARMISD::QADD16b: |
| 18905 | case ARMISD::QSUB16b: |
| 18906 | case ARMISD::UQADD16b: |
| 18907 | case ARMISD::UQSUB16b: { |
| 18908 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18909 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
| 18910 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 18911 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 18912 | return SDValue(); |
| 18913 | break; |
| 18914 | } |
| 18915 | case ARMISD::SMLALBT: { |
| 18916 | unsigned LowWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
| 18917 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
| 18918 | unsigned HighWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
| 18919 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
| 18920 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: LowMask, DCI)) || |
| 18921 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: HighMask, DCI))) |
| 18922 | return SDValue(); |
| 18923 | break; |
| 18924 | } |
| 18925 | case ARMISD::SMLALTB: { |
| 18926 | unsigned HighWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
| 18927 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
| 18928 | unsigned LowWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
| 18929 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
| 18930 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: HighMask, DCI)) || |
| 18931 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: LowMask, DCI))) |
| 18932 | return SDValue(); |
| 18933 | break; |
| 18934 | } |
| 18935 | case ARMISD::SMLALTT: { |
| 18936 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18937 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
| 18938 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 18939 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 18940 | return SDValue(); |
| 18941 | break; |
| 18942 | } |
| 18943 | case ARMISD::QADD8b: |
| 18944 | case ARMISD::QSUB8b: |
| 18945 | case ARMISD::UQADD8b: |
| 18946 | case ARMISD::UQSUB8b: { |
| 18947 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18948 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 8); |
| 18949 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 18950 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 18951 | return SDValue(); |
| 18952 | break; |
| 18953 | } |
| 18954 | case ARMISD::VBSP: |
| 18955 | if (N->getOperand(Num: 1) == N->getOperand(Num: 2)) |
| 18956 | return N->getOperand(Num: 1); |
| 18957 | return SDValue(); |
| 18958 | case ISD::INTRINSIC_VOID: |
| 18959 | case ISD::INTRINSIC_W_CHAIN: |
| 18960 | switch (N->getConstantOperandVal(Num: 1)) { |
| 18961 | case Intrinsic::arm_neon_vld1: |
| 18962 | case Intrinsic::arm_neon_vld1x2: |
| 18963 | case Intrinsic::arm_neon_vld1x3: |
| 18964 | case Intrinsic::arm_neon_vld1x4: |
| 18965 | case Intrinsic::arm_neon_vld2: |
| 18966 | case Intrinsic::arm_neon_vld3: |
| 18967 | case Intrinsic::arm_neon_vld4: |
| 18968 | case Intrinsic::arm_neon_vld2lane: |
| 18969 | case Intrinsic::arm_neon_vld3lane: |
| 18970 | case Intrinsic::arm_neon_vld4lane: |
| 18971 | case Intrinsic::arm_neon_vld2dup: |
| 18972 | case Intrinsic::arm_neon_vld3dup: |
| 18973 | case Intrinsic::arm_neon_vld4dup: |
| 18974 | case Intrinsic::arm_neon_vst1: |
| 18975 | case Intrinsic::arm_neon_vst1x2: |
| 18976 | case Intrinsic::arm_neon_vst1x3: |
| 18977 | case Intrinsic::arm_neon_vst1x4: |
| 18978 | case Intrinsic::arm_neon_vst2: |
| 18979 | case Intrinsic::arm_neon_vst3: |
| 18980 | case Intrinsic::arm_neon_vst4: |
| 18981 | case Intrinsic::arm_neon_vst2lane: |
| 18982 | case Intrinsic::arm_neon_vst3lane: |
| 18983 | case Intrinsic::arm_neon_vst4lane: |
| 18984 | return PerformVLDCombine(N, DCI); |
| 18985 | case Intrinsic::arm_mve_vld2q: |
| 18986 | case Intrinsic::arm_mve_vld4q: |
| 18987 | case Intrinsic::arm_mve_vst2q: |
| 18988 | case Intrinsic::arm_mve_vst4q: |
| 18989 | return PerformMVEVLDCombine(N, DCI); |
| 18990 | default: break; |
| 18991 | } |
| 18992 | break; |
| 18993 | } |
| 18994 | return SDValue(); |
| 18995 | } |
| 18996 | |
| 18997 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, |
| 18998 | EVT VT) const { |
| 18999 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); |
| 19000 | } |
| 19001 | |
| 19002 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, |
| 19003 | Align Alignment, |
| 19004 | MachineMemOperand::Flags, |
| 19005 | unsigned *Fast) const { |
| 19006 | // Depends what it gets converted into if the type is weird. |
| 19007 | if (!VT.isSimple()) |
| 19008 | return false; |
| 19009 | |
| 19010 | // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus |
| 19011 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 19012 | auto Ty = VT.getSimpleVT().SimpleTy; |
| 19013 | |
| 19014 | if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { |
| 19015 | // Unaligned access can use (for example) LRDB, LRDH, LDR |
| 19016 | if (AllowsUnaligned) { |
| 19017 | if (Fast) |
| 19018 | *Fast = Subtarget->hasV7Ops(); |
| 19019 | return true; |
| 19020 | } |
| 19021 | } |
| 19022 | |
| 19023 | if (Ty == MVT::f64 || Ty == MVT::v2f64) { |
| 19024 | // For any little-endian targets with neon, we can support unaligned ld/st |
| 19025 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. |
| 19026 | // A big-endian target may also explicitly support unaligned accesses |
| 19027 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { |
| 19028 | if (Fast) |
| 19029 | *Fast = 1; |
| 19030 | return true; |
| 19031 | } |
| 19032 | } |
| 19033 | |
| 19034 | if (!Subtarget->hasMVEIntegerOps()) |
| 19035 | return false; |
| 19036 | |
| 19037 | // These are for predicates |
| 19038 | if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1 || |
| 19039 | Ty == MVT::v2i1)) { |
| 19040 | if (Fast) |
| 19041 | *Fast = 1; |
| 19042 | return true; |
| 19043 | } |
| 19044 | |
| 19045 | // These are for truncated stores/narrowing loads. They are fine so long as |
| 19046 | // the alignment is at least the size of the item being loaded |
| 19047 | if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && |
| 19048 | Alignment >= VT.getScalarSizeInBits() / 8) { |
| 19049 | if (Fast) |
| 19050 | *Fast = true; |
| 19051 | return true; |
| 19052 | } |
| 19053 | |
| 19054 | // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and |
| 19055 | // VSTRW.U32 all store the vector register in exactly the same format, and |
| 19056 | // differ only in the range of their immediate offset field and the required |
| 19057 | // alignment. So there is always a store that can be used, regardless of |
| 19058 | // actual type. |
| 19059 | // |
| 19060 | // For big endian, that is not the case. But can still emit a (VSTRB.U8; |
| 19061 | // VREV64.8) pair and get the same effect. This will likely be better than |
| 19062 | // aligning the vector through the stack. |
| 19063 | if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || |
| 19064 | Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || |
| 19065 | Ty == MVT::v2f64) { |
| 19066 | if (Fast) |
| 19067 | *Fast = 1; |
| 19068 | return true; |
| 19069 | } |
| 19070 | |
| 19071 | return false; |
| 19072 | } |
| 19073 | |
| 19074 | EVT ARMTargetLowering::getOptimalMemOpType( |
| 19075 | LLVMContext &Context, const MemOp &Op, |
| 19076 | const AttributeList &FuncAttributes) const { |
| 19077 | // See if we can use NEON instructions for this... |
| 19078 | if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && |
| 19079 | !FuncAttributes.hasFnAttr(Kind: Attribute::NoImplicitFloat)) { |
| 19080 | unsigned Fast; |
| 19081 | if (Op.size() >= 16 && |
| 19082 | (Op.isAligned(AlignCheck: Align(16)) || |
| 19083 | (allowsMisalignedMemoryAccesses(VT: MVT::v2f64, 0, Alignment: Align(1), |
| 19084 | MachineMemOperand::MONone, Fast: &Fast) && |
| 19085 | Fast))) { |
| 19086 | return MVT::v2f64; |
| 19087 | } else if (Op.size() >= 8 && |
| 19088 | (Op.isAligned(AlignCheck: Align(8)) || |
| 19089 | (allowsMisalignedMemoryAccesses( |
| 19090 | VT: MVT::f64, 0, Alignment: Align(1), MachineMemOperand::MONone, Fast: &Fast) && |
| 19091 | Fast))) { |
| 19092 | return MVT::f64; |
| 19093 | } |
| 19094 | } |
| 19095 | |
| 19096 | // Let the target-independent logic figure it out. |
| 19097 | return MVT::Other; |
| 19098 | } |
| 19099 | |
| 19100 | // 64-bit integers are split into their high and low parts and held in two |
| 19101 | // different registers, so the trunc is free since the low register can just |
| 19102 | // be used. |
| 19103 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { |
| 19104 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) |
| 19105 | return false; |
| 19106 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); |
| 19107 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); |
| 19108 | return (SrcBits == 64 && DestBits == 32); |
| 19109 | } |
| 19110 | |
| 19111 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { |
| 19112 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || |
| 19113 | !DstVT.isInteger()) |
| 19114 | return false; |
| 19115 | unsigned SrcBits = SrcVT.getSizeInBits(); |
| 19116 | unsigned DestBits = DstVT.getSizeInBits(); |
| 19117 | return (SrcBits == 64 && DestBits == 32); |
| 19118 | } |
| 19119 | |
| 19120 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { |
| 19121 | if (Val.getOpcode() != ISD::LOAD) |
| 19122 | return false; |
| 19123 | |
| 19124 | EVT VT1 = Val.getValueType(); |
| 19125 | if (!VT1.isSimple() || !VT1.isInteger() || |
| 19126 | !VT2.isSimple() || !VT2.isInteger()) |
| 19127 | return false; |
| 19128 | |
| 19129 | switch (VT1.getSimpleVT().SimpleTy) { |
| 19130 | default: break; |
| 19131 | case MVT::i1: |
| 19132 | case MVT::i8: |
| 19133 | case MVT::i16: |
| 19134 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. |
| 19135 | return true; |
| 19136 | } |
| 19137 | |
| 19138 | return false; |
| 19139 | } |
| 19140 | |
| 19141 | bool ARMTargetLowering::isFNegFree(EVT VT) const { |
| 19142 | if (!VT.isSimple()) |
| 19143 | return false; |
| 19144 | |
| 19145 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that |
| 19146 | // negate values directly (fneg is free). So, we don't want to let the DAG |
| 19147 | // combiner rewrite fneg into xors and some other instructions. For f16 and |
| 19148 | // FullFP16 argument passing, some bitcast nodes may be introduced, |
| 19149 | // triggering this DAG combine rewrite, so we are avoiding that with this. |
| 19150 | switch (VT.getSimpleVT().SimpleTy) { |
| 19151 | default: break; |
| 19152 | case MVT::f16: |
| 19153 | return Subtarget->hasFullFP16(); |
| 19154 | } |
| 19155 | |
| 19156 | return false; |
| 19157 | } |
| 19158 | |
| 19159 | Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { |
| 19160 | if (!Subtarget->hasMVEIntegerOps()) |
| 19161 | return nullptr; |
| 19162 | Type *SVIType = SVI->getType(); |
| 19163 | Type *ScalarType = SVIType->getScalarType(); |
| 19164 | |
| 19165 | if (ScalarType->isFloatTy()) |
| 19166 | return Type::getInt32Ty(C&: SVIType->getContext()); |
| 19167 | if (ScalarType->isHalfTy()) |
| 19168 | return Type::getInt16Ty(C&: SVIType->getContext()); |
| 19169 | return nullptr; |
| 19170 | } |
| 19171 | |
| 19172 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { |
| 19173 | EVT VT = ExtVal.getValueType(); |
| 19174 | |
| 19175 | if (!isTypeLegal(VT)) |
| 19176 | return false; |
| 19177 | |
| 19178 | if (auto *Ld = dyn_cast<MaskedLoadSDNode>(Val: ExtVal.getOperand(i: 0))) { |
| 19179 | if (Ld->isExpandingLoad()) |
| 19180 | return false; |
| 19181 | } |
| 19182 | |
| 19183 | if (Subtarget->hasMVEIntegerOps()) |
| 19184 | return true; |
| 19185 | |
| 19186 | // Don't create a loadext if we can fold the extension into a wide/long |
| 19187 | // instruction. |
| 19188 | // If there's more than one user instruction, the loadext is desirable no |
| 19189 | // matter what. There can be two uses by the same instruction. |
| 19190 | if (ExtVal->use_empty() || |
| 19191 | !ExtVal->user_begin()->isOnlyUserOf(N: ExtVal.getNode())) |
| 19192 | return true; |
| 19193 | |
| 19194 | SDNode *U = *ExtVal->user_begin(); |
| 19195 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || |
| 19196 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) |
| 19197 | return false; |
| 19198 | |
| 19199 | return true; |
| 19200 | } |
| 19201 | |
| 19202 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
| 19203 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
| 19204 | return false; |
| 19205 | |
| 19206 | if (!isTypeLegal(VT: EVT::getEVT(Ty: Ty1))) |
| 19207 | return false; |
| 19208 | |
| 19209 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop" ); |
| 19210 | |
| 19211 | // Assuming the caller doesn't have a zeroext or signext return parameter, |
| 19212 | // truncation all the way down to i1 is valid. |
| 19213 | return true; |
| 19214 | } |
| 19215 | |
| 19216 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster |
| 19217 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be |
| 19218 | /// expanded to FMAs when this method returns true, otherwise fmuladd is |
| 19219 | /// expanded to fmul + fadd. |
| 19220 | /// |
| 19221 | /// ARM supports both fused and unfused multiply-add operations; we already |
| 19222 | /// lower a pair of fmul and fadd to the latter so it's not clear that there |
| 19223 | /// would be a gain or that the gain would be worthwhile enough to risk |
| 19224 | /// correctness bugs. |
| 19225 | /// |
| 19226 | /// For MVE, we set this to true as it helps simplify the need for some |
| 19227 | /// patterns (and we don't have the non-fused floating point instruction). |
| 19228 | bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
| 19229 | EVT VT) const { |
| 19230 | if (Subtarget->useSoftFloat()) |
| 19231 | return false; |
| 19232 | |
| 19233 | if (!VT.isSimple()) |
| 19234 | return false; |
| 19235 | |
| 19236 | switch (VT.getSimpleVT().SimpleTy) { |
| 19237 | case MVT::v4f32: |
| 19238 | case MVT::v8f16: |
| 19239 | return Subtarget->hasMVEFloatOps(); |
| 19240 | case MVT::f16: |
| 19241 | return Subtarget->useFPVFMx16(); |
| 19242 | case MVT::f32: |
| 19243 | return Subtarget->useFPVFMx(); |
| 19244 | case MVT::f64: |
| 19245 | return Subtarget->useFPVFMx64(); |
| 19246 | default: |
| 19247 | break; |
| 19248 | } |
| 19249 | |
| 19250 | return false; |
| 19251 | } |
| 19252 | |
| 19253 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { |
| 19254 | if (V < 0) |
| 19255 | return false; |
| 19256 | |
| 19257 | unsigned Scale = 1; |
| 19258 | switch (VT.getSimpleVT().SimpleTy) { |
| 19259 | case MVT::i1: |
| 19260 | case MVT::i8: |
| 19261 | // Scale == 1; |
| 19262 | break; |
| 19263 | case MVT::i16: |
| 19264 | // Scale == 2; |
| 19265 | Scale = 2; |
| 19266 | break; |
| 19267 | default: |
| 19268 | // On thumb1 we load most things (i32, i64, floats, etc) with a LDR |
| 19269 | // Scale == 4; |
| 19270 | Scale = 4; |
| 19271 | break; |
| 19272 | } |
| 19273 | |
| 19274 | if ((V & (Scale - 1)) != 0) |
| 19275 | return false; |
| 19276 | return isUInt<5>(x: V / Scale); |
| 19277 | } |
| 19278 | |
| 19279 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, |
| 19280 | const ARMSubtarget *Subtarget) { |
| 19281 | if (!VT.isInteger() && !VT.isFloatingPoint()) |
| 19282 | return false; |
| 19283 | if (VT.isVector() && Subtarget->hasNEON()) |
| 19284 | return false; |
| 19285 | if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && |
| 19286 | !Subtarget->hasMVEFloatOps()) |
| 19287 | return false; |
| 19288 | |
| 19289 | bool IsNeg = false; |
| 19290 | if (V < 0) { |
| 19291 | IsNeg = true; |
| 19292 | V = -V; |
| 19293 | } |
| 19294 | |
| 19295 | unsigned NumBytes = std::max(a: (unsigned)VT.getSizeInBits() / 8, b: 1U); |
| 19296 | |
| 19297 | // MVE: size * imm7 |
| 19298 | if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { |
| 19299 | switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { |
| 19300 | case MVT::i32: |
| 19301 | case MVT::f32: |
| 19302 | return isShiftedUInt<7,2>(x: V); |
| 19303 | case MVT::i16: |
| 19304 | case MVT::f16: |
| 19305 | return isShiftedUInt<7,1>(x: V); |
| 19306 | case MVT::i8: |
| 19307 | return isUInt<7>(x: V); |
| 19308 | default: |
| 19309 | return false; |
| 19310 | } |
| 19311 | } |
| 19312 | |
| 19313 | // half VLDR: 2 * imm8 |
| 19314 | if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) |
| 19315 | return isShiftedUInt<8, 1>(x: V); |
| 19316 | // VLDR and LDRD: 4 * imm8 |
| 19317 | if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) |
| 19318 | return isShiftedUInt<8, 2>(x: V); |
| 19319 | |
| 19320 | if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { |
| 19321 | // + imm12 or - imm8 |
| 19322 | if (IsNeg) |
| 19323 | return isUInt<8>(x: V); |
| 19324 | return isUInt<12>(x: V); |
| 19325 | } |
| 19326 | |
| 19327 | return false; |
| 19328 | } |
| 19329 | |
| 19330 | /// isLegalAddressImmediate - Return true if the integer value can be used |
| 19331 | /// as the offset of the target addressing mode for load / store of the |
| 19332 | /// given type. |
| 19333 | static bool isLegalAddressImmediate(int64_t V, EVT VT, |
| 19334 | const ARMSubtarget *Subtarget) { |
| 19335 | if (V == 0) |
| 19336 | return true; |
| 19337 | |
| 19338 | if (!VT.isSimple()) |
| 19339 | return false; |
| 19340 | |
| 19341 | if (Subtarget->isThumb1Only()) |
| 19342 | return isLegalT1AddressImmediate(V, VT); |
| 19343 | else if (Subtarget->isThumb2()) |
| 19344 | return isLegalT2AddressImmediate(V, VT, Subtarget); |
| 19345 | |
| 19346 | // ARM mode. |
| 19347 | if (V < 0) |
| 19348 | V = - V; |
| 19349 | switch (VT.getSimpleVT().SimpleTy) { |
| 19350 | default: return false; |
| 19351 | case MVT::i1: |
| 19352 | case MVT::i8: |
| 19353 | case MVT::i32: |
| 19354 | // +- imm12 |
| 19355 | return isUInt<12>(x: V); |
| 19356 | case MVT::i16: |
| 19357 | // +- imm8 |
| 19358 | return isUInt<8>(x: V); |
| 19359 | case MVT::f32: |
| 19360 | case MVT::f64: |
| 19361 | if (!Subtarget->hasVFP2Base()) // FIXME: NEON? |
| 19362 | return false; |
| 19363 | return isShiftedUInt<8, 2>(x: V); |
| 19364 | } |
| 19365 | } |
| 19366 | |
| 19367 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, |
| 19368 | EVT VT) const { |
| 19369 | int Scale = AM.Scale; |
| 19370 | if (Scale < 0) |
| 19371 | return false; |
| 19372 | |
| 19373 | switch (VT.getSimpleVT().SimpleTy) { |
| 19374 | default: return false; |
| 19375 | case MVT::i1: |
| 19376 | case MVT::i8: |
| 19377 | case MVT::i16: |
| 19378 | case MVT::i32: |
| 19379 | if (Scale == 1) |
| 19380 | return true; |
| 19381 | // r + r << imm |
| 19382 | Scale = Scale & ~1; |
| 19383 | return Scale == 2 || Scale == 4 || Scale == 8; |
| 19384 | case MVT::i64: |
| 19385 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r |
| 19386 | // version in Thumb mode. |
| 19387 | // r + r |
| 19388 | if (Scale == 1) |
| 19389 | return true; |
| 19390 | // r * 2 (this can be lowered to r + r). |
| 19391 | if (!AM.HasBaseReg && Scale == 2) |
| 19392 | return true; |
| 19393 | return false; |
| 19394 | case MVT::isVoid: |
| 19395 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 19396 | // stores), because arm allows folding a scale into many arithmetic |
| 19397 | // operations. This should be made more precise and revisited later. |
| 19398 | |
| 19399 | // Allow r << imm, but the imm has to be a multiple of two. |
| 19400 | if (Scale & 1) return false; |
| 19401 | return isPowerOf2_32(Value: Scale); |
| 19402 | } |
| 19403 | } |
| 19404 | |
| 19405 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, |
| 19406 | EVT VT) const { |
| 19407 | const int Scale = AM.Scale; |
| 19408 | |
| 19409 | // Negative scales are not supported in Thumb1. |
| 19410 | if (Scale < 0) |
| 19411 | return false; |
| 19412 | |
| 19413 | // Thumb1 addressing modes do not support register scaling excepting the |
| 19414 | // following cases: |
| 19415 | // 1. Scale == 1 means no scaling. |
| 19416 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. |
| 19417 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); |
| 19418 | } |
| 19419 | |
| 19420 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 19421 | /// by AM is legal for this target, for a load/store of the specified type. |
| 19422 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 19423 | const AddrMode &AM, Type *Ty, |
| 19424 | unsigned AS, Instruction *I) const { |
| 19425 | EVT VT = getValueType(DL, Ty, AllowUnknown: true); |
| 19426 | if (!isLegalAddressImmediate(V: AM.BaseOffs, VT, Subtarget)) |
| 19427 | return false; |
| 19428 | |
| 19429 | // Can never fold addr of global into load/store. |
| 19430 | if (AM.BaseGV) |
| 19431 | return false; |
| 19432 | |
| 19433 | switch (AM.Scale) { |
| 19434 | case 0: // no scale reg, must be "r+i" or "r", or "i". |
| 19435 | break; |
| 19436 | default: |
| 19437 | // ARM doesn't support any R+R*scale+imm addr modes. |
| 19438 | if (AM.BaseOffs) |
| 19439 | return false; |
| 19440 | |
| 19441 | if (!VT.isSimple()) |
| 19442 | return false; |
| 19443 | |
| 19444 | if (Subtarget->isThumb1Only()) |
| 19445 | return isLegalT1ScaledAddressingMode(AM, VT); |
| 19446 | |
| 19447 | if (Subtarget->isThumb2()) |
| 19448 | return isLegalT2ScaledAddressingMode(AM, VT); |
| 19449 | |
| 19450 | int Scale = AM.Scale; |
| 19451 | switch (VT.getSimpleVT().SimpleTy) { |
| 19452 | default: return false; |
| 19453 | case MVT::i1: |
| 19454 | case MVT::i8: |
| 19455 | case MVT::i32: |
| 19456 | if (Scale < 0) Scale = -Scale; |
| 19457 | if (Scale == 1) |
| 19458 | return true; |
| 19459 | // r + r << imm |
| 19460 | return isPowerOf2_32(Value: Scale & ~1); |
| 19461 | case MVT::i16: |
| 19462 | case MVT::i64: |
| 19463 | // r +/- r |
| 19464 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) |
| 19465 | return true; |
| 19466 | // r * 2 (this can be lowered to r + r). |
| 19467 | if (!AM.HasBaseReg && Scale == 2) |
| 19468 | return true; |
| 19469 | return false; |
| 19470 | |
| 19471 | case MVT::isVoid: |
| 19472 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 19473 | // stores), because arm allows folding a scale into many arithmetic |
| 19474 | // operations. This should be made more precise and revisited later. |
| 19475 | |
| 19476 | // Allow r << imm, but the imm has to be a multiple of two. |
| 19477 | if (Scale & 1) return false; |
| 19478 | return isPowerOf2_32(Value: Scale); |
| 19479 | } |
| 19480 | } |
| 19481 | return true; |
| 19482 | } |
| 19483 | |
| 19484 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 19485 | /// icmp immediate, that is the target has icmp instructions which can compare |
| 19486 | /// a register against the immediate without having to materialize the |
| 19487 | /// immediate into a register. |
| 19488 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
| 19489 | // Thumb2 and ARM modes can use cmn for negative immediates. |
| 19490 | if (!Subtarget->isThumb()) |
| 19491 | return ARM_AM::getSOImmVal(Arg: (uint32_t)Imm) != -1 || |
| 19492 | ARM_AM::getSOImmVal(Arg: -(uint32_t)Imm) != -1; |
| 19493 | if (Subtarget->isThumb2()) |
| 19494 | return ARM_AM::getT2SOImmVal(Arg: (uint32_t)Imm) != -1 || |
| 19495 | ARM_AM::getT2SOImmVal(Arg: -(uint32_t)Imm) != -1; |
| 19496 | // Thumb1 doesn't have cmn, and only 8-bit immediates. |
| 19497 | return Imm >= 0 && Imm <= 255; |
| 19498 | } |
| 19499 | |
| 19500 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add |
| 19501 | /// *or sub* immediate, that is the target has add or sub instructions which can |
| 19502 | /// add a register with the immediate without having to materialize the |
| 19503 | /// immediate into a register. |
| 19504 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
| 19505 | // Same encoding for add/sub, just flip the sign. |
| 19506 | uint64_t AbsImm = AbsoluteValue(X: Imm); |
| 19507 | if (!Subtarget->isThumb()) |
| 19508 | return ARM_AM::getSOImmVal(Arg: AbsImm) != -1; |
| 19509 | if (Subtarget->isThumb2()) |
| 19510 | return ARM_AM::getT2SOImmVal(Arg: AbsImm) != -1; |
| 19511 | // Thumb1 only has 8-bit unsigned immediate. |
| 19512 | return AbsImm <= 255; |
| 19513 | } |
| 19514 | |
| 19515 | // Return false to prevent folding |
| 19516 | // (mul (add r, c0), c1) -> (add (mul r, c1), c0*c1) in DAGCombine, |
| 19517 | // if the folding leads to worse code. |
| 19518 | bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode, |
| 19519 | SDValue ConstNode) const { |
| 19520 | // Let the DAGCombiner decide for vector types and large types. |
| 19521 | const EVT VT = AddNode.getValueType(); |
| 19522 | if (VT.isVector() || VT.getScalarSizeInBits() > 32) |
| 19523 | return true; |
| 19524 | |
| 19525 | // It is worse if c0 is legal add immediate, while c1*c0 is not |
| 19526 | // and has to be composed by at least two instructions. |
| 19527 | const ConstantSDNode *C0Node = cast<ConstantSDNode>(Val: AddNode.getOperand(i: 1)); |
| 19528 | const ConstantSDNode *C1Node = cast<ConstantSDNode>(Val&: ConstNode); |
| 19529 | const int64_t C0 = C0Node->getSExtValue(); |
| 19530 | APInt CA = C0Node->getAPIntValue() * C1Node->getAPIntValue(); |
| 19531 | if (!isLegalAddImmediate(Imm: C0) || isLegalAddImmediate(Imm: CA.getSExtValue())) |
| 19532 | return true; |
| 19533 | if (ConstantMaterializationCost(Val: (unsigned)CA.getZExtValue(), Subtarget) > 1) |
| 19534 | return false; |
| 19535 | |
| 19536 | // Default to true and let the DAGCombiner decide. |
| 19537 | return true; |
| 19538 | } |
| 19539 | |
| 19540 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, |
| 19541 | bool isSEXTLoad, SDValue &Base, |
| 19542 | SDValue &Offset, bool &isInc, |
| 19543 | SelectionDAG &DAG) { |
| 19544 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19545 | return false; |
| 19546 | |
| 19547 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { |
| 19548 | // AddressingMode 3 |
| 19549 | Base = Ptr->getOperand(Num: 0); |
| 19550 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19551 | int RHSC = (int)RHS->getZExtValue(); |
| 19552 | if (RHSC < 0 && RHSC > -256) { |
| 19553 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19554 | isInc = false; |
| 19555 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19556 | return true; |
| 19557 | } |
| 19558 | } |
| 19559 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 19560 | Offset = Ptr->getOperand(Num: 1); |
| 19561 | return true; |
| 19562 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { |
| 19563 | // AddressingMode 2 |
| 19564 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19565 | int RHSC = (int)RHS->getZExtValue(); |
| 19566 | if (RHSC < 0 && RHSC > -0x1000) { |
| 19567 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19568 | isInc = false; |
| 19569 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19570 | Base = Ptr->getOperand(Num: 0); |
| 19571 | return true; |
| 19572 | } |
| 19573 | } |
| 19574 | |
| 19575 | if (Ptr->getOpcode() == ISD::ADD) { |
| 19576 | isInc = true; |
| 19577 | ARM_AM::ShiftOpc ShOpcVal= |
| 19578 | ARM_AM::getShiftOpcForNode(Opcode: Ptr->getOperand(Num: 0).getOpcode()); |
| 19579 | if (ShOpcVal != ARM_AM::no_shift) { |
| 19580 | Base = Ptr->getOperand(Num: 1); |
| 19581 | Offset = Ptr->getOperand(Num: 0); |
| 19582 | } else { |
| 19583 | Base = Ptr->getOperand(Num: 0); |
| 19584 | Offset = Ptr->getOperand(Num: 1); |
| 19585 | } |
| 19586 | return true; |
| 19587 | } |
| 19588 | |
| 19589 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 19590 | Base = Ptr->getOperand(Num: 0); |
| 19591 | Offset = Ptr->getOperand(Num: 1); |
| 19592 | return true; |
| 19593 | } |
| 19594 | |
| 19595 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. |
| 19596 | return false; |
| 19597 | } |
| 19598 | |
| 19599 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, |
| 19600 | bool isSEXTLoad, SDValue &Base, |
| 19601 | SDValue &Offset, bool &isInc, |
| 19602 | SelectionDAG &DAG) { |
| 19603 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19604 | return false; |
| 19605 | |
| 19606 | Base = Ptr->getOperand(Num: 0); |
| 19607 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19608 | int RHSC = (int)RHS->getZExtValue(); |
| 19609 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. |
| 19610 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19611 | isInc = false; |
| 19612 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19613 | return true; |
| 19614 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. |
| 19615 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 19616 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19617 | return true; |
| 19618 | } |
| 19619 | } |
| 19620 | |
| 19621 | return false; |
| 19622 | } |
| 19623 | |
| 19624 | static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, |
| 19625 | bool isSEXTLoad, bool IsMasked, bool isLE, |
| 19626 | SDValue &Base, SDValue &Offset, |
| 19627 | bool &isInc, SelectionDAG &DAG) { |
| 19628 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19629 | return false; |
| 19630 | if (!isa<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) |
| 19631 | return false; |
| 19632 | |
| 19633 | // We allow LE non-masked loads to change the type (for example use a vldrb.8 |
| 19634 | // as opposed to a vldrw.32). This can allow extra addressing modes or |
| 19635 | // alignments for what is otherwise an equivalent instruction. |
| 19636 | bool CanChangeType = isLE && !IsMasked; |
| 19637 | |
| 19638 | ConstantSDNode *RHS = cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1)); |
| 19639 | int RHSC = (int)RHS->getZExtValue(); |
| 19640 | |
| 19641 | auto IsInRange = [&](int RHSC, int Limit, int Scale) { |
| 19642 | if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { |
| 19643 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19644 | isInc = false; |
| 19645 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19646 | return true; |
| 19647 | } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { |
| 19648 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 19649 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19650 | return true; |
| 19651 | } |
| 19652 | return false; |
| 19653 | }; |
| 19654 | |
| 19655 | // Try to find a matching instruction based on s/zext, Alignment, Offset and |
| 19656 | // (in BE/masked) type. |
| 19657 | Base = Ptr->getOperand(Num: 0); |
| 19658 | if (VT == MVT::v4i16) { |
| 19659 | if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) |
| 19660 | return true; |
| 19661 | } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { |
| 19662 | if (IsInRange(RHSC, 0x80, 1)) |
| 19663 | return true; |
| 19664 | } else if (Alignment >= 4 && |
| 19665 | (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && |
| 19666 | IsInRange(RHSC, 0x80, 4)) |
| 19667 | return true; |
| 19668 | else if (Alignment >= 2 && |
| 19669 | (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && |
| 19670 | IsInRange(RHSC, 0x80, 2)) |
| 19671 | return true; |
| 19672 | else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) |
| 19673 | return true; |
| 19674 | return false; |
| 19675 | } |
| 19676 | |
| 19677 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
| 19678 | /// offset pointer and addressing mode by reference if the node's address |
| 19679 | /// can be legally represented as pre-indexed load / store address. |
| 19680 | bool |
| 19681 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
| 19682 | SDValue &Offset, |
| 19683 | ISD::MemIndexedMode &AM, |
| 19684 | SelectionDAG &DAG) const { |
| 19685 | if (Subtarget->isThumb1Only()) |
| 19686 | return false; |
| 19687 | |
| 19688 | EVT VT; |
| 19689 | SDValue Ptr; |
| 19690 | Align Alignment; |
| 19691 | unsigned AS = 0; |
| 19692 | bool isSEXTLoad = false; |
| 19693 | bool IsMasked = false; |
| 19694 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 19695 | Ptr = LD->getBasePtr(); |
| 19696 | VT = LD->getMemoryVT(); |
| 19697 | Alignment = LD->getAlign(); |
| 19698 | AS = LD->getAddressSpace(); |
| 19699 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19700 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
| 19701 | Ptr = ST->getBasePtr(); |
| 19702 | VT = ST->getMemoryVT(); |
| 19703 | Alignment = ST->getAlign(); |
| 19704 | AS = ST->getAddressSpace(); |
| 19705 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
| 19706 | Ptr = LD->getBasePtr(); |
| 19707 | VT = LD->getMemoryVT(); |
| 19708 | Alignment = LD->getAlign(); |
| 19709 | AS = LD->getAddressSpace(); |
| 19710 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19711 | IsMasked = true; |
| 19712 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
| 19713 | Ptr = ST->getBasePtr(); |
| 19714 | VT = ST->getMemoryVT(); |
| 19715 | Alignment = ST->getAlign(); |
| 19716 | AS = ST->getAddressSpace(); |
| 19717 | IsMasked = true; |
| 19718 | } else |
| 19719 | return false; |
| 19720 | |
| 19721 | unsigned Fast = 0; |
| 19722 | if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment, |
| 19723 | MachineMemOperand::MONone, Fast: &Fast)) { |
| 19724 | // Only generate post-increment or pre-increment forms when a real |
| 19725 | // hardware instruction exists for them. Do not emit postinc/preinc |
| 19726 | // if the operation will end up as a libcall. |
| 19727 | return false; |
| 19728 | } |
| 19729 | |
| 19730 | bool isInc; |
| 19731 | bool isLegal = false; |
| 19732 | if (VT.isVector()) |
| 19733 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 19734 | getMVEIndexedAddressParts( |
| 19735 | Ptr: Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, |
| 19736 | isLE: Subtarget->isLittle(), Base, Offset, isInc, DAG); |
| 19737 | else { |
| 19738 | if (Subtarget->isThumb2()) |
| 19739 | isLegal = getT2IndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
| 19740 | Offset, isInc, DAG); |
| 19741 | else |
| 19742 | isLegal = getARMIndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
| 19743 | Offset, isInc, DAG); |
| 19744 | } |
| 19745 | if (!isLegal) |
| 19746 | return false; |
| 19747 | |
| 19748 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; |
| 19749 | return true; |
| 19750 | } |
| 19751 | |
| 19752 | /// getPostIndexedAddressParts - returns true by value, base pointer and |
| 19753 | /// offset pointer and addressing mode by reference if this node can be |
| 19754 | /// combined with a load / store to form a post-indexed load / store. |
| 19755 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
| 19756 | SDValue &Base, |
| 19757 | SDValue &Offset, |
| 19758 | ISD::MemIndexedMode &AM, |
| 19759 | SelectionDAG &DAG) const { |
| 19760 | EVT VT; |
| 19761 | SDValue Ptr; |
| 19762 | Align Alignment; |
| 19763 | bool isSEXTLoad = false, isNonExt; |
| 19764 | bool IsMasked = false; |
| 19765 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 19766 | VT = LD->getMemoryVT(); |
| 19767 | Ptr = LD->getBasePtr(); |
| 19768 | Alignment = LD->getAlign(); |
| 19769 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19770 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 19771 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
| 19772 | VT = ST->getMemoryVT(); |
| 19773 | Ptr = ST->getBasePtr(); |
| 19774 | Alignment = ST->getAlign(); |
| 19775 | isNonExt = !ST->isTruncatingStore(); |
| 19776 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
| 19777 | VT = LD->getMemoryVT(); |
| 19778 | Ptr = LD->getBasePtr(); |
| 19779 | Alignment = LD->getAlign(); |
| 19780 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19781 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 19782 | IsMasked = true; |
| 19783 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
| 19784 | VT = ST->getMemoryVT(); |
| 19785 | Ptr = ST->getBasePtr(); |
| 19786 | Alignment = ST->getAlign(); |
| 19787 | isNonExt = !ST->isTruncatingStore(); |
| 19788 | IsMasked = true; |
| 19789 | } else |
| 19790 | return false; |
| 19791 | |
| 19792 | if (Subtarget->isThumb1Only()) { |
| 19793 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It |
| 19794 | // must be non-extending/truncating, i32, with an offset of 4. |
| 19795 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!" ); |
| 19796 | if (Op->getOpcode() != ISD::ADD || !isNonExt) |
| 19797 | return false; |
| 19798 | auto *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1)); |
| 19799 | if (!RHS || RHS->getZExtValue() != 4) |
| 19800 | return false; |
| 19801 | if (Alignment < Align(4)) |
| 19802 | return false; |
| 19803 | |
| 19804 | Offset = Op->getOperand(Num: 1); |
| 19805 | Base = Op->getOperand(Num: 0); |
| 19806 | AM = ISD::POST_INC; |
| 19807 | return true; |
| 19808 | } |
| 19809 | |
| 19810 | bool isInc; |
| 19811 | bool isLegal = false; |
| 19812 | if (VT.isVector()) |
| 19813 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 19814 | getMVEIndexedAddressParts(Ptr: Op, VT, Alignment, isSEXTLoad, IsMasked, |
| 19815 | isLE: Subtarget->isLittle(), Base, Offset, |
| 19816 | isInc, DAG); |
| 19817 | else { |
| 19818 | if (Subtarget->isThumb2()) |
| 19819 | isLegal = getT2IndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
| 19820 | isInc, DAG); |
| 19821 | else |
| 19822 | isLegal = getARMIndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
| 19823 | isInc, DAG); |
| 19824 | } |
| 19825 | if (!isLegal) |
| 19826 | return false; |
| 19827 | |
| 19828 | if (Ptr != Base) { |
| 19829 | // Swap base ptr and offset to catch more post-index load / store when |
| 19830 | // it's legal. In Thumb2 mode, offset must be an immediate. |
| 19831 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && |
| 19832 | !Subtarget->isThumb2()) |
| 19833 | std::swap(a&: Base, b&: Offset); |
| 19834 | |
| 19835 | // Post-indexed load / store update the base pointer. |
| 19836 | if (Ptr != Base) |
| 19837 | return false; |
| 19838 | } |
| 19839 | |
| 19840 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; |
| 19841 | return true; |
| 19842 | } |
| 19843 | |
| 19844 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
| 19845 | KnownBits &Known, |
| 19846 | const APInt &DemandedElts, |
| 19847 | const SelectionDAG &DAG, |
| 19848 | unsigned Depth) const { |
| 19849 | unsigned BitWidth = Known.getBitWidth(); |
| 19850 | Known.resetAll(); |
| 19851 | switch (Op.getOpcode()) { |
| 19852 | default: break; |
| 19853 | case ARMISD::ADDC: |
| 19854 | case ARMISD::ADDE: |
| 19855 | case ARMISD::SUBC: |
| 19856 | case ARMISD::SUBE: |
| 19857 | // Special cases when we convert a carry to a boolean. |
| 19858 | if (Op.getResNo() == 0) { |
| 19859 | SDValue LHS = Op.getOperand(i: 0); |
| 19860 | SDValue RHS = Op.getOperand(i: 1); |
| 19861 | // (ADDE 0, 0, C) will give us a single bit. |
| 19862 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(V: LHS) && |
| 19863 | isNullConstant(V: RHS)) { |
| 19864 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 1); |
| 19865 | return; |
| 19866 | } |
| 19867 | } |
| 19868 | break; |
| 19869 | case ARMISD::CMOV: { |
| 19870 | // Bits are known zero/one if known on the LHS and RHS. |
| 19871 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth+1); |
| 19872 | if (Known.isUnknown()) |
| 19873 | return; |
| 19874 | |
| 19875 | KnownBits KnownRHS = DAG.computeKnownBits(Op: Op.getOperand(i: 1), Depth: Depth+1); |
| 19876 | Known = Known.intersectWith(RHS: KnownRHS); |
| 19877 | return; |
| 19878 | } |
| 19879 | case ISD::INTRINSIC_W_CHAIN: { |
| 19880 | Intrinsic::ID IntID = |
| 19881 | static_cast<Intrinsic::ID>(Op->getConstantOperandVal(Num: 1)); |
| 19882 | switch (IntID) { |
| 19883 | default: return; |
| 19884 | case Intrinsic::arm_ldaex: |
| 19885 | case Intrinsic::arm_ldrex: { |
| 19886 | EVT VT = cast<MemIntrinsicSDNode>(Val: Op)->getMemoryVT(); |
| 19887 | unsigned MemBits = VT.getScalarSizeInBits(); |
| 19888 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - MemBits); |
| 19889 | return; |
| 19890 | } |
| 19891 | } |
| 19892 | } |
| 19893 | case ARMISD::BFI: { |
| 19894 | // Conservatively, we can recurse down the first operand |
| 19895 | // and just mask out all affected bits. |
| 19896 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1); |
| 19897 | |
| 19898 | // The operand to BFI is already a mask suitable for removing the bits it |
| 19899 | // sets. |
| 19900 | const APInt &Mask = Op.getConstantOperandAPInt(i: 2); |
| 19901 | Known.Zero &= Mask; |
| 19902 | Known.One &= Mask; |
| 19903 | return; |
| 19904 | } |
| 19905 | case ARMISD::VGETLANEs: |
| 19906 | case ARMISD::VGETLANEu: { |
| 19907 | const SDValue &SrcSV = Op.getOperand(i: 0); |
| 19908 | EVT VecVT = SrcSV.getValueType(); |
| 19909 | assert(VecVT.isVector() && "VGETLANE expected a vector type" ); |
| 19910 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
| 19911 | ConstantSDNode *Pos = cast<ConstantSDNode>(Val: Op.getOperand(i: 1).getNode()); |
| 19912 | assert(Pos->getAPIntValue().ult(NumSrcElts) && |
| 19913 | "VGETLANE index out of bounds" ); |
| 19914 | unsigned Idx = Pos->getZExtValue(); |
| 19915 | APInt DemandedElt = APInt::getOneBitSet(numBits: NumSrcElts, BitNo: Idx); |
| 19916 | Known = DAG.computeKnownBits(Op: SrcSV, DemandedElts: DemandedElt, Depth: Depth + 1); |
| 19917 | |
| 19918 | EVT VT = Op.getValueType(); |
| 19919 | const unsigned DstSz = VT.getScalarSizeInBits(); |
| 19920 | const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); |
| 19921 | (void)SrcSz; |
| 19922 | assert(SrcSz == Known.getBitWidth()); |
| 19923 | assert(DstSz > SrcSz); |
| 19924 | if (Op.getOpcode() == ARMISD::VGETLANEs) |
| 19925 | Known = Known.sext(BitWidth: DstSz); |
| 19926 | else { |
| 19927 | Known = Known.zext(BitWidth: DstSz); |
| 19928 | } |
| 19929 | assert(DstSz == Known.getBitWidth()); |
| 19930 | break; |
| 19931 | } |
| 19932 | case ARMISD::VMOVrh: { |
| 19933 | KnownBits KnownOp = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
| 19934 | assert(KnownOp.getBitWidth() == 16); |
| 19935 | Known = KnownOp.zext(BitWidth: 32); |
| 19936 | break; |
| 19937 | } |
| 19938 | case ARMISD::CSINC: |
| 19939 | case ARMISD::CSINV: |
| 19940 | case ARMISD::CSNEG: { |
| 19941 | KnownBits KnownOp0 = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
| 19942 | KnownBits KnownOp1 = DAG.computeKnownBits(Op: Op->getOperand(Num: 1), Depth: Depth + 1); |
| 19943 | |
| 19944 | // The result is either: |
| 19945 | // CSINC: KnownOp0 or KnownOp1 + 1 |
| 19946 | // CSINV: KnownOp0 or ~KnownOp1 |
| 19947 | // CSNEG: KnownOp0 or KnownOp1 * -1 |
| 19948 | if (Op.getOpcode() == ARMISD::CSINC) |
| 19949 | KnownOp1 = |
| 19950 | KnownBits::add(LHS: KnownOp1, RHS: KnownBits::makeConstant(C: APInt(32, 1))); |
| 19951 | else if (Op.getOpcode() == ARMISD::CSINV) |
| 19952 | std::swap(a&: KnownOp1.Zero, b&: KnownOp1.One); |
| 19953 | else if (Op.getOpcode() == ARMISD::CSNEG) |
| 19954 | KnownOp1 = KnownBits::mul(LHS: KnownOp1, |
| 19955 | RHS: KnownBits::makeConstant(C: APInt::getAllOnes(numBits: 32))); |
| 19956 | |
| 19957 | Known = KnownOp0.intersectWith(RHS: KnownOp1); |
| 19958 | break; |
| 19959 | } |
| 19960 | case ARMISD::VORRIMM: |
| 19961 | case ARMISD::VBICIMM: { |
| 19962 | unsigned Encoded = Op.getConstantOperandVal(i: 1); |
| 19963 | unsigned DecEltBits = 0; |
| 19964 | uint64_t DecodedVal = ARM_AM::decodeVMOVModImm(ModImm: Encoded, EltBits&: DecEltBits); |
| 19965 | |
| 19966 | unsigned EltBits = Op.getScalarValueSizeInBits(); |
| 19967 | if (EltBits != DecEltBits) { |
| 19968 | // Be conservative: only update Known when EltBits == DecEltBits. |
| 19969 | // This is believed to always be true for VORRIMM/VBICIMM today, but if |
| 19970 | // that changes in the future, doing nothing here is safer than risking |
| 19971 | // subtle bugs. |
| 19972 | break; |
| 19973 | } |
| 19974 | |
| 19975 | KnownBits KnownLHS = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1); |
| 19976 | bool IsVORR = Op.getOpcode() == ARMISD::VORRIMM; |
| 19977 | APInt Imm(DecEltBits, DecodedVal); |
| 19978 | |
| 19979 | Known.One = IsVORR ? (KnownLHS.One | Imm) : (KnownLHS.One & ~Imm); |
| 19980 | Known.Zero = IsVORR ? (KnownLHS.Zero & ~Imm) : (KnownLHS.Zero | Imm); |
| 19981 | break; |
| 19982 | } |
| 19983 | } |
| 19984 | } |
| 19985 | |
| 19986 | bool ARMTargetLowering::targetShrinkDemandedConstant( |
| 19987 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, |
| 19988 | TargetLoweringOpt &TLO) const { |
| 19989 | // Delay optimization, so we don't have to deal with illegal types, or block |
| 19990 | // optimizations. |
| 19991 | if (!TLO.LegalOps) |
| 19992 | return false; |
| 19993 | |
| 19994 | // Only optimize AND for now. |
| 19995 | if (Op.getOpcode() != ISD::AND) |
| 19996 | return false; |
| 19997 | |
| 19998 | EVT VT = Op.getValueType(); |
| 19999 | |
| 20000 | // Ignore vectors. |
| 20001 | if (VT.isVector()) |
| 20002 | return false; |
| 20003 | |
| 20004 | assert(VT == MVT::i32 && "Unexpected integer type" ); |
| 20005 | |
| 20006 | // Make sure the RHS really is a constant. |
| 20007 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
| 20008 | if (!C) |
| 20009 | return false; |
| 20010 | |
| 20011 | unsigned Mask = C->getZExtValue(); |
| 20012 | |
| 20013 | unsigned Demanded = DemandedBits.getZExtValue(); |
| 20014 | unsigned ShrunkMask = Mask & Demanded; |
| 20015 | unsigned ExpandedMask = Mask | ~Demanded; |
| 20016 | |
| 20017 | // If the mask is all zeros, let the target-independent code replace the |
| 20018 | // result with zero. |
| 20019 | if (ShrunkMask == 0) |
| 20020 | return false; |
| 20021 | |
| 20022 | // If the mask is all ones, erase the AND. (Currently, the target-independent |
| 20023 | // code won't do this, so we have to do it explicitly to avoid an infinite |
| 20024 | // loop in obscure cases.) |
| 20025 | if (ExpandedMask == ~0U) |
| 20026 | return TLO.CombineTo(O: Op, N: Op.getOperand(i: 0)); |
| 20027 | |
| 20028 | auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { |
| 20029 | return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; |
| 20030 | }; |
| 20031 | auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { |
| 20032 | if (NewMask == Mask) |
| 20033 | return true; |
| 20034 | SDLoc DL(Op); |
| 20035 | SDValue NewC = TLO.DAG.getConstant(Val: NewMask, DL, VT); |
| 20036 | SDValue NewOp = TLO.DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Op.getOperand(i: 0), N2: NewC); |
| 20037 | return TLO.CombineTo(O: Op, N: NewOp); |
| 20038 | }; |
| 20039 | |
| 20040 | // Prefer uxtb mask. |
| 20041 | if (IsLegalMask(0xFF)) |
| 20042 | return UseMask(0xFF); |
| 20043 | |
| 20044 | // Prefer uxth mask. |
| 20045 | if (IsLegalMask(0xFFFF)) |
| 20046 | return UseMask(0xFFFF); |
| 20047 | |
| 20048 | // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. |
| 20049 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 20050 | if (ShrunkMask < 256) |
| 20051 | return UseMask(ShrunkMask); |
| 20052 | |
| 20053 | // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. |
| 20054 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 20055 | if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) |
| 20056 | return UseMask(ExpandedMask); |
| 20057 | |
| 20058 | // Potential improvements: |
| 20059 | // |
| 20060 | // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. |
| 20061 | // We could try to prefer Thumb1 immediates which can be lowered to a |
| 20062 | // two-instruction sequence. |
| 20063 | // We could try to recognize more legal ARM/Thumb2 immediates here. |
| 20064 | |
| 20065 | return false; |
| 20066 | } |
| 20067 | |
| 20068 | bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( |
| 20069 | SDValue Op, const APInt &OriginalDemandedBits, |
| 20070 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, |
| 20071 | unsigned Depth) const { |
| 20072 | unsigned Opc = Op.getOpcode(); |
| 20073 | |
| 20074 | switch (Opc) { |
| 20075 | case ARMISD::ASRL: |
| 20076 | case ARMISD::LSRL: { |
| 20077 | // If this is result 0 and the other result is unused, see if the demand |
| 20078 | // bits allow us to shrink this long shift into a standard small shift in |
| 20079 | // the opposite direction. |
| 20080 | if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(Value: 1) && |
| 20081 | isa<ConstantSDNode>(Val: Op->getOperand(Num: 2))) { |
| 20082 | unsigned ShAmt = Op->getConstantOperandVal(Num: 2); |
| 20083 | if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(RHS: APInt::getAllOnes(numBits: 32) |
| 20084 | << (32 - ShAmt))) |
| 20085 | return TLO.CombineTo( |
| 20086 | O: Op, N: TLO.DAG.getNode( |
| 20087 | Opcode: ISD::SHL, DL: SDLoc(Op), VT: MVT::i32, N1: Op.getOperand(i: 1), |
| 20088 | N2: TLO.DAG.getConstant(Val: 32 - ShAmt, DL: SDLoc(Op), VT: MVT::i32))); |
| 20089 | } |
| 20090 | break; |
| 20091 | } |
| 20092 | case ARMISD::VBICIMM: { |
| 20093 | SDValue Op0 = Op.getOperand(i: 0); |
| 20094 | unsigned ModImm = Op.getConstantOperandVal(i: 1); |
| 20095 | unsigned EltBits = 0; |
| 20096 | uint64_t Mask = ARM_AM::decodeVMOVModImm(ModImm, EltBits); |
| 20097 | if ((OriginalDemandedBits & Mask) == 0) |
| 20098 | return TLO.CombineTo(O: Op, N: Op0); |
| 20099 | } |
| 20100 | } |
| 20101 | |
| 20102 | return TargetLowering::SimplifyDemandedBitsForTargetNode( |
| 20103 | Op, DemandedBits: OriginalDemandedBits, DemandedElts: OriginalDemandedElts, Known, TLO, Depth); |
| 20104 | } |
| 20105 | |
| 20106 | //===----------------------------------------------------------------------===// |
| 20107 | // ARM Inline Assembly Support |
| 20108 | //===----------------------------------------------------------------------===// |
| 20109 | |
| 20110 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { |
| 20111 | // At this point, we have to lower this constraint to something else, so we |
| 20112 | // lower it to an "r" or "w". However, by doing this we will force the result |
| 20113 | // to be in register, while the X constraint is much more permissive. |
| 20114 | // |
| 20115 | // Although we are correct (we are free to emit anything, without |
| 20116 | // constraints), we might break use cases that would expect us to be more |
| 20117 | // efficient and emit something else. |
| 20118 | if (!Subtarget->hasVFP2Base()) |
| 20119 | return "r" ; |
| 20120 | if (ConstraintVT.isFloatingPoint()) |
| 20121 | return "w" ; |
| 20122 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && |
| 20123 | (ConstraintVT.getSizeInBits() == 64 || |
| 20124 | ConstraintVT.getSizeInBits() == 128)) |
| 20125 | return "w" ; |
| 20126 | |
| 20127 | return "r" ; |
| 20128 | } |
| 20129 | |
| 20130 | /// getConstraintType - Given a constraint letter, return the type of |
| 20131 | /// constraint it is for this target. |
| 20132 | ARMTargetLowering::ConstraintType |
| 20133 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { |
| 20134 | unsigned S = Constraint.size(); |
| 20135 | if (S == 1) { |
| 20136 | switch (Constraint[0]) { |
| 20137 | default: break; |
| 20138 | case 'l': return C_RegisterClass; |
| 20139 | case 'w': return C_RegisterClass; |
| 20140 | case 'h': return C_RegisterClass; |
| 20141 | case 'x': return C_RegisterClass; |
| 20142 | case 't': return C_RegisterClass; |
| 20143 | case 'j': return C_Immediate; // Constant for movw. |
| 20144 | // An address with a single base register. Due to the way we |
| 20145 | // currently handle addresses it is the same as an 'r' memory constraint. |
| 20146 | case 'Q': return C_Memory; |
| 20147 | } |
| 20148 | } else if (S == 2) { |
| 20149 | switch (Constraint[0]) { |
| 20150 | default: break; |
| 20151 | case 'T': return C_RegisterClass; |
| 20152 | // All 'U+' constraints are addresses. |
| 20153 | case 'U': return C_Memory; |
| 20154 | } |
| 20155 | } |
| 20156 | return TargetLowering::getConstraintType(Constraint); |
| 20157 | } |
| 20158 | |
| 20159 | /// Examine constraint type and operand type and determine a weight value. |
| 20160 | /// This object must already have been set up with the operand type |
| 20161 | /// and the current alternative constraint selected. |
| 20162 | TargetLowering::ConstraintWeight |
| 20163 | ARMTargetLowering::getSingleConstraintMatchWeight( |
| 20164 | AsmOperandInfo &info, const char *constraint) const { |
| 20165 | ConstraintWeight weight = CW_Invalid; |
| 20166 | Value *CallOperandVal = info.CallOperandVal; |
| 20167 | // If we don't have a value, we can't do a match, |
| 20168 | // but allow it at the lowest weight. |
| 20169 | if (!CallOperandVal) |
| 20170 | return CW_Default; |
| 20171 | Type *type = CallOperandVal->getType(); |
| 20172 | // Look at the constraint type. |
| 20173 | switch (*constraint) { |
| 20174 | default: |
| 20175 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
| 20176 | break; |
| 20177 | case 'l': |
| 20178 | if (type->isIntegerTy()) { |
| 20179 | if (Subtarget->isThumb()) |
| 20180 | weight = CW_SpecificReg; |
| 20181 | else |
| 20182 | weight = CW_Register; |
| 20183 | } |
| 20184 | break; |
| 20185 | case 'w': |
| 20186 | if (type->isFloatingPointTy()) |
| 20187 | weight = CW_Register; |
| 20188 | break; |
| 20189 | } |
| 20190 | return weight; |
| 20191 | } |
| 20192 | |
| 20193 | static bool isIncompatibleReg(const MCPhysReg &PR, MVT VT) { |
| 20194 | if (PR == 0 || VT == MVT::Other) |
| 20195 | return false; |
| 20196 | if (ARM::SPRRegClass.contains(Reg: PR)) |
| 20197 | return VT != MVT::f32 && VT != MVT::f16 && VT != MVT::i32; |
| 20198 | if (ARM::DPRRegClass.contains(Reg: PR)) |
| 20199 | return VT != MVT::f64 && !VT.is64BitVector(); |
| 20200 | return false; |
| 20201 | } |
| 20202 | |
| 20203 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; |
| 20204 | |
| 20205 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( |
| 20206 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
| 20207 | switch (Constraint.size()) { |
| 20208 | case 1: |
| 20209 | // GCC ARM Constraint Letters |
| 20210 | switch (Constraint[0]) { |
| 20211 | case 'l': // Low regs or general regs. |
| 20212 | if (Subtarget->isThumb()) |
| 20213 | return RCPair(0U, &ARM::tGPRRegClass); |
| 20214 | return RCPair(0U, &ARM::GPRRegClass); |
| 20215 | case 'h': // High regs or no regs. |
| 20216 | if (Subtarget->isThumb()) |
| 20217 | return RCPair(0U, &ARM::hGPRRegClass); |
| 20218 | break; |
| 20219 | case 'r': |
| 20220 | if (Subtarget->isThumb1Only()) |
| 20221 | return RCPair(0U, &ARM::tGPRRegClass); |
| 20222 | return RCPair(0U, &ARM::GPRRegClass); |
| 20223 | case 'w': |
| 20224 | if (VT == MVT::Other) |
| 20225 | break; |
| 20226 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20227 | return RCPair(0U, &ARM::SPRRegClass); |
| 20228 | if (VT.getSizeInBits() == 64) |
| 20229 | return RCPair(0U, &ARM::DPRRegClass); |
| 20230 | if (VT.getSizeInBits() == 128) |
| 20231 | return RCPair(0U, &ARM::QPRRegClass); |
| 20232 | break; |
| 20233 | case 'x': |
| 20234 | if (VT == MVT::Other) |
| 20235 | break; |
| 20236 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20237 | return RCPair(0U, &ARM::SPR_8RegClass); |
| 20238 | if (VT.getSizeInBits() == 64) |
| 20239 | return RCPair(0U, &ARM::DPR_8RegClass); |
| 20240 | if (VT.getSizeInBits() == 128) |
| 20241 | return RCPair(0U, &ARM::QPR_8RegClass); |
| 20242 | break; |
| 20243 | case 't': |
| 20244 | if (VT == MVT::Other) |
| 20245 | break; |
| 20246 | if (VT == MVT::f32 || VT == MVT::i32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20247 | return RCPair(0U, &ARM::SPRRegClass); |
| 20248 | if (VT.getSizeInBits() == 64) |
| 20249 | return RCPair(0U, &ARM::DPR_VFP2RegClass); |
| 20250 | if (VT.getSizeInBits() == 128) |
| 20251 | return RCPair(0U, &ARM::QPR_VFP2RegClass); |
| 20252 | break; |
| 20253 | } |
| 20254 | break; |
| 20255 | |
| 20256 | case 2: |
| 20257 | if (Constraint[0] == 'T') { |
| 20258 | switch (Constraint[1]) { |
| 20259 | default: |
| 20260 | break; |
| 20261 | case 'e': |
| 20262 | return RCPair(0U, &ARM::tGPREvenRegClass); |
| 20263 | case 'o': |
| 20264 | return RCPair(0U, &ARM::tGPROddRegClass); |
| 20265 | } |
| 20266 | } |
| 20267 | break; |
| 20268 | |
| 20269 | default: |
| 20270 | break; |
| 20271 | } |
| 20272 | |
| 20273 | if (StringRef("{cc}" ).equals_insensitive(RHS: Constraint)) |
| 20274 | return std::make_pair(x: unsigned(ARM::CPSR), y: &ARM::CCRRegClass); |
| 20275 | |
| 20276 | // r14 is an alias of lr. |
| 20277 | if (StringRef("{r14}" ).equals_insensitive(RHS: Constraint)) |
| 20278 | return std::make_pair(x: unsigned(ARM::LR), y: getRegClassFor(VT: MVT::i32)); |
| 20279 | |
| 20280 | auto RCP = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 20281 | if (isIncompatibleReg(PR: RCP.first, VT)) |
| 20282 | return {0, nullptr}; |
| 20283 | return RCP; |
| 20284 | } |
| 20285 | |
| 20286 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
| 20287 | /// vector. If it is invalid, don't add anything to Ops. |
| 20288 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
| 20289 | StringRef Constraint, |
| 20290 | std::vector<SDValue> &Ops, |
| 20291 | SelectionDAG &DAG) const { |
| 20292 | SDValue Result; |
| 20293 | |
| 20294 | // Currently only support length 1 constraints. |
| 20295 | if (Constraint.size() != 1) |
| 20296 | return; |
| 20297 | |
| 20298 | char ConstraintLetter = Constraint[0]; |
| 20299 | switch (ConstraintLetter) { |
| 20300 | default: break; |
| 20301 | case 'j': |
| 20302 | case 'I': case 'J': case 'K': case 'L': |
| 20303 | case 'M': case 'N': case 'O': |
| 20304 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op); |
| 20305 | if (!C) |
| 20306 | return; |
| 20307 | |
| 20308 | int64_t CVal64 = C->getSExtValue(); |
| 20309 | int CVal = (int) CVal64; |
| 20310 | // None of these constraints allow values larger than 32 bits. Check |
| 20311 | // that the value fits in an int. |
| 20312 | if (CVal != CVal64) |
| 20313 | return; |
| 20314 | |
| 20315 | switch (ConstraintLetter) { |
| 20316 | case 'j': |
| 20317 | // Constant suitable for movw, must be between 0 and |
| 20318 | // 65535. |
| 20319 | if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) |
| 20320 | if (CVal >= 0 && CVal <= 65535) |
| 20321 | break; |
| 20322 | return; |
| 20323 | case 'I': |
| 20324 | if (Subtarget->isThumb1Only()) { |
| 20325 | // This must be a constant between 0 and 255, for ADD |
| 20326 | // immediates. |
| 20327 | if (CVal >= 0 && CVal <= 255) |
| 20328 | break; |
| 20329 | } else if (Subtarget->isThumb2()) { |
| 20330 | // A constant that can be used as an immediate value in a |
| 20331 | // data-processing instruction. |
| 20332 | if (ARM_AM::getT2SOImmVal(Arg: CVal) != -1) |
| 20333 | break; |
| 20334 | } else { |
| 20335 | // A constant that can be used as an immediate value in a |
| 20336 | // data-processing instruction. |
| 20337 | if (ARM_AM::getSOImmVal(Arg: CVal) != -1) |
| 20338 | break; |
| 20339 | } |
| 20340 | return; |
| 20341 | |
| 20342 | case 'J': |
| 20343 | if (Subtarget->isThumb1Only()) { |
| 20344 | // This must be a constant between -255 and -1, for negated ADD |
| 20345 | // immediates. This can be used in GCC with an "n" modifier that |
| 20346 | // prints the negated value, for use with SUB instructions. It is |
| 20347 | // not useful otherwise but is implemented for compatibility. |
| 20348 | if (CVal >= -255 && CVal <= -1) |
| 20349 | break; |
| 20350 | } else { |
| 20351 | // This must be a constant between -4095 and 4095. This is suitable |
| 20352 | // for use as the immediate offset field in LDR and STR instructions |
| 20353 | // such as LDR r0,[r1,#offset]. |
| 20354 | if (CVal >= -4095 && CVal <= 4095) |
| 20355 | break; |
| 20356 | } |
| 20357 | return; |
| 20358 | |
| 20359 | case 'K': |
| 20360 | if (Subtarget->isThumb1Only()) { |
| 20361 | // A 32-bit value where only one byte has a nonzero value. Exclude |
| 20362 | // zero to match GCC. This constraint is used by GCC internally for |
| 20363 | // constants that can be loaded with a move/shift combination. |
| 20364 | // It is not useful otherwise but is implemented for compatibility. |
| 20365 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(V: CVal)) |
| 20366 | break; |
| 20367 | } else if (Subtarget->isThumb2()) { |
| 20368 | // A constant whose bitwise inverse can be used as an immediate |
| 20369 | // value in a data-processing instruction. This can be used in GCC |
| 20370 | // with a "B" modifier that prints the inverted value, for use with |
| 20371 | // BIC and MVN instructions. It is not useful otherwise but is |
| 20372 | // implemented for compatibility. |
| 20373 | if (ARM_AM::getT2SOImmVal(Arg: ~CVal) != -1) |
| 20374 | break; |
| 20375 | } else { |
| 20376 | // A constant whose bitwise inverse can be used as an immediate |
| 20377 | // value in a data-processing instruction. This can be used in GCC |
| 20378 | // with a "B" modifier that prints the inverted value, for use with |
| 20379 | // BIC and MVN instructions. It is not useful otherwise but is |
| 20380 | // implemented for compatibility. |
| 20381 | if (ARM_AM::getSOImmVal(Arg: ~CVal) != -1) |
| 20382 | break; |
| 20383 | } |
| 20384 | return; |
| 20385 | |
| 20386 | case 'L': |
| 20387 | if (Subtarget->isThumb1Only()) { |
| 20388 | // This must be a constant between -7 and 7, |
| 20389 | // for 3-operand ADD/SUB immediate instructions. |
| 20390 | if (CVal >= -7 && CVal < 7) |
| 20391 | break; |
| 20392 | } else if (Subtarget->isThumb2()) { |
| 20393 | // A constant whose negation can be used as an immediate value in a |
| 20394 | // data-processing instruction. This can be used in GCC with an "n" |
| 20395 | // modifier that prints the negated value, for use with SUB |
| 20396 | // instructions. It is not useful otherwise but is implemented for |
| 20397 | // compatibility. |
| 20398 | if (ARM_AM::getT2SOImmVal(Arg: -CVal) != -1) |
| 20399 | break; |
| 20400 | } else { |
| 20401 | // A constant whose negation can be used as an immediate value in a |
| 20402 | // data-processing instruction. This can be used in GCC with an "n" |
| 20403 | // modifier that prints the negated value, for use with SUB |
| 20404 | // instructions. It is not useful otherwise but is implemented for |
| 20405 | // compatibility. |
| 20406 | if (ARM_AM::getSOImmVal(Arg: -CVal) != -1) |
| 20407 | break; |
| 20408 | } |
| 20409 | return; |
| 20410 | |
| 20411 | case 'M': |
| 20412 | if (Subtarget->isThumb1Only()) { |
| 20413 | // This must be a multiple of 4 between 0 and 1020, for |
| 20414 | // ADD sp + immediate. |
| 20415 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) |
| 20416 | break; |
| 20417 | } else { |
| 20418 | // A power of two or a constant between 0 and 32. This is used in |
| 20419 | // GCC for the shift amount on shifted register operands, but it is |
| 20420 | // useful in general for any shift amounts. |
| 20421 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) |
| 20422 | break; |
| 20423 | } |
| 20424 | return; |
| 20425 | |
| 20426 | case 'N': |
| 20427 | if (Subtarget->isThumb1Only()) { |
| 20428 | // This must be a constant between 0 and 31, for shift amounts. |
| 20429 | if (CVal >= 0 && CVal <= 31) |
| 20430 | break; |
| 20431 | } |
| 20432 | return; |
| 20433 | |
| 20434 | case 'O': |
| 20435 | if (Subtarget->isThumb1Only()) { |
| 20436 | // This must be a multiple of 4 between -508 and 508, for |
| 20437 | // ADD/SUB sp = sp + immediate. |
| 20438 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) |
| 20439 | break; |
| 20440 | } |
| 20441 | return; |
| 20442 | } |
| 20443 | Result = DAG.getSignedTargetConstant(Val: CVal, DL: SDLoc(Op), VT: Op.getValueType()); |
| 20444 | break; |
| 20445 | } |
| 20446 | |
| 20447 | if (Result.getNode()) { |
| 20448 | Ops.push_back(x: Result); |
| 20449 | return; |
| 20450 | } |
| 20451 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
| 20452 | } |
| 20453 | |
| 20454 | static RTLIB::Libcall getDivRemLibcall( |
| 20455 | const SDNode *N, MVT::SimpleValueType SVT) { |
| 20456 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 20457 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 20458 | "Unhandled Opcode in getDivRemLibcall" ); |
| 20459 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 20460 | N->getOpcode() == ISD::SREM; |
| 20461 | RTLIB::Libcall LC; |
| 20462 | switch (SVT) { |
| 20463 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 20464 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
| 20465 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
| 20466 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
| 20467 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
| 20468 | } |
| 20469 | return LC; |
| 20470 | } |
| 20471 | |
| 20472 | static TargetLowering::ArgListTy getDivRemArgList( |
| 20473 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { |
| 20474 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 20475 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 20476 | "Unhandled Opcode in getDivRemArgList" ); |
| 20477 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 20478 | N->getOpcode() == ISD::SREM; |
| 20479 | TargetLowering::ArgListTy Args; |
| 20480 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 20481 | EVT ArgVT = N->getOperand(Num: i).getValueType(); |
| 20482 | Type *ArgTy = ArgVT.getTypeForEVT(Context&: *Context); |
| 20483 | TargetLowering::ArgListEntry Entry(N->getOperand(Num: i), ArgTy); |
| 20484 | Entry.IsSExt = isSigned; |
| 20485 | Entry.IsZExt = !isSigned; |
| 20486 | Args.push_back(x: Entry); |
| 20487 | } |
| 20488 | if (Subtarget->getTargetTriple().isOSWindows() && Args.size() >= 2) |
| 20489 | std::swap(a&: Args[0], b&: Args[1]); |
| 20490 | return Args; |
| 20491 | } |
| 20492 | |
| 20493 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { |
| 20494 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
| 20495 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
| 20496 | Subtarget->isTargetFuchsia() || Subtarget->isTargetWindows()) && |
| 20497 | "Register-based DivRem lowering only" ); |
| 20498 | unsigned Opcode = Op->getOpcode(); |
| 20499 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && |
| 20500 | "Invalid opcode for Div/Rem lowering" ); |
| 20501 | bool isSigned = (Opcode == ISD::SDIVREM); |
| 20502 | EVT VT = Op->getValueType(ResNo: 0); |
| 20503 | SDLoc dl(Op); |
| 20504 | |
| 20505 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: Op.getOperand(i: 1))) { |
| 20506 | SmallVector<SDValue> Result; |
| 20507 | if (expandDIVREMByConstant(N: Op.getNode(), Result, HiLoVT: MVT::i32, DAG)) { |
| 20508 | SDValue Res0 = |
| 20509 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[0], N2: Result[1]); |
| 20510 | SDValue Res1 = |
| 20511 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[2], N2: Result[3]); |
| 20512 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: Op->getVTList(), |
| 20513 | Ops: {Res0, Res1}); |
| 20514 | } |
| 20515 | } |
| 20516 | |
| 20517 | Type *Ty = VT.getTypeForEVT(Context&: *DAG.getContext()); |
| 20518 | |
| 20519 | // If the target has hardware divide, use divide + multiply + subtract: |
| 20520 | // div = a / b |
| 20521 | // rem = a - b * div |
| 20522 | // return {div, rem} |
| 20523 | // This should be lowered into UDIV/SDIV + MLS later on. |
| 20524 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 20525 | : Subtarget->hasDivideInARMMode(); |
| 20526 | if (hasDivide && Op->getValueType(ResNo: 0).isSimple() && |
| 20527 | Op->getSimpleValueType(ResNo: 0) == MVT::i32) { |
| 20528 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
| 20529 | const SDValue Dividend = Op->getOperand(Num: 0); |
| 20530 | const SDValue Divisor = Op->getOperand(Num: 1); |
| 20531 | SDValue Div = DAG.getNode(Opcode: DivOpcode, DL: dl, VT, N1: Dividend, N2: Divisor); |
| 20532 | SDValue Mul = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT, N1: Div, N2: Divisor); |
| 20533 | SDValue Rem = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: Dividend, N2: Mul); |
| 20534 | |
| 20535 | SDValue Values[2] = {Div, Rem}; |
| 20536 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), Ops: Values); |
| 20537 | } |
| 20538 | |
| 20539 | RTLIB::Libcall LC = getDivRemLibcall(N: Op.getNode(), |
| 20540 | SVT: VT.getSimpleVT().SimpleTy); |
| 20541 | RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(Call: LC); |
| 20542 | |
| 20543 | SDValue InChain = DAG.getEntryNode(); |
| 20544 | |
| 20545 | TargetLowering::ArgListTy Args = getDivRemArgList(N: Op.getNode(), |
| 20546 | Context: DAG.getContext(), |
| 20547 | Subtarget); |
| 20548 | |
| 20549 | SDValue Callee = |
| 20550 | DAG.getExternalSymbol(LCImpl, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 20551 | |
| 20552 | Type *RetTy = StructType::get(elt1: Ty, elts: Ty); |
| 20553 | |
| 20554 | if (getTM().getTargetTriple().isOSWindows()) |
| 20555 | InChain = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain); |
| 20556 | |
| 20557 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 20558 | CLI.setDebugLoc(dl) |
| 20559 | .setChain(InChain) |
| 20560 | .setCallee(CC: DAG.getLibcalls().getLibcallImplCallingConv(Call: LCImpl), ResultType: RetTy, |
| 20561 | Target: Callee, ArgsList: std::move(Args)) |
| 20562 | .setInRegister() |
| 20563 | .setSExtResult(isSigned) |
| 20564 | .setZExtResult(!isSigned); |
| 20565 | |
| 20566 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); |
| 20567 | return CallInfo.first; |
| 20568 | } |
| 20569 | |
| 20570 | // Lowers REM using divmod helpers |
| 20571 | // see RTABI section 4.2/4.3 |
| 20572 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { |
| 20573 | EVT VT = N->getValueType(ResNo: 0); |
| 20574 | |
| 20575 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
| 20576 | SmallVector<SDValue> Result; |
| 20577 | if (expandDIVREMByConstant(N, Result, HiLoVT: MVT::i32, DAG)) |
| 20578 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
| 20579 | N1: Result[0], N2: Result[1]); |
| 20580 | } |
| 20581 | |
| 20582 | // Build return types (div and rem) |
| 20583 | std::vector<Type*> RetTyParams; |
| 20584 | Type *RetTyElement; |
| 20585 | |
| 20586 | switch (VT.getSimpleVT().SimpleTy) { |
| 20587 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 20588 | case MVT::i8: RetTyElement = Type::getInt8Ty(C&: *DAG.getContext()); break; |
| 20589 | case MVT::i16: RetTyElement = Type::getInt16Ty(C&: *DAG.getContext()); break; |
| 20590 | case MVT::i32: RetTyElement = Type::getInt32Ty(C&: *DAG.getContext()); break; |
| 20591 | case MVT::i64: RetTyElement = Type::getInt64Ty(C&: *DAG.getContext()); break; |
| 20592 | } |
| 20593 | |
| 20594 | RetTyParams.push_back(x: RetTyElement); |
| 20595 | RetTyParams.push_back(x: RetTyElement); |
| 20596 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); |
| 20597 | Type *RetTy = StructType::get(Context&: *DAG.getContext(), Elements: ret); |
| 20598 | |
| 20599 | RTLIB::Libcall LC = getDivRemLibcall(N, SVT: N->getValueType(ResNo: 0).getSimpleVT(). |
| 20600 | SimpleTy); |
| 20601 | RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(Call: LC); |
| 20602 | SDValue InChain = DAG.getEntryNode(); |
| 20603 | TargetLowering::ArgListTy Args = getDivRemArgList(N, Context: DAG.getContext(), |
| 20604 | Subtarget); |
| 20605 | bool isSigned = N->getOpcode() == ISD::SREM; |
| 20606 | |
| 20607 | SDValue Callee = |
| 20608 | DAG.getExternalSymbol(LCImpl, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 20609 | |
| 20610 | if (getTM().getTargetTriple().isOSWindows()) |
| 20611 | InChain = WinDBZCheckDenominator(DAG, N, InChain); |
| 20612 | |
| 20613 | // Lower call |
| 20614 | CallLoweringInfo CLI(DAG); |
| 20615 | CLI.setChain(InChain) |
| 20616 | .setCallee(CC: DAG.getLibcalls().getLibcallImplCallingConv(Call: LCImpl), ResultType: RetTy, |
| 20617 | Target: Callee, ArgsList: std::move(Args)) |
| 20618 | .setSExtResult(isSigned) |
| 20619 | .setZExtResult(!isSigned) |
| 20620 | .setDebugLoc(SDLoc(N)); |
| 20621 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 20622 | |
| 20623 | // Return second (rem) result operand (first contains div) |
| 20624 | SDNode *ResNode = CallResult.first.getNode(); |
| 20625 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands" ); |
| 20626 | return ResNode->getOperand(Num: 1); |
| 20627 | } |
| 20628 | |
| 20629 | SDValue |
| 20630 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
| 20631 | assert(getTM().getTargetTriple().isOSWindows() && |
| 20632 | "unsupported target platform" ); |
| 20633 | SDLoc DL(Op); |
| 20634 | |
| 20635 | // Get the inputs. |
| 20636 | SDValue Chain = Op.getOperand(i: 0); |
| 20637 | SDValue Size = Op.getOperand(i: 1); |
| 20638 | |
| 20639 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( |
| 20640 | Kind: "no-stack-arg-probe" )) { |
| 20641 | MaybeAlign Align = |
| 20642 | cast<ConstantSDNode>(Val: Op.getOperand(i: 2))->getMaybeAlignValue(); |
| 20643 | SDValue SP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
| 20644 | Chain = SP.getValue(R: 1); |
| 20645 | SP = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: SP, N2: Size); |
| 20646 | if (Align) |
| 20647 | SP = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: SP.getValue(R: 0), |
| 20648 | N2: DAG.getSignedConstant(Val: -Align->value(), DL, VT: MVT::i32)); |
| 20649 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::SP, N: SP); |
| 20650 | SDValue Ops[2] = { SP, Chain }; |
| 20651 | return DAG.getMergeValues(Ops, dl: DL); |
| 20652 | } |
| 20653 | |
| 20654 | SDValue Words = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: Size, |
| 20655 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
| 20656 | |
| 20657 | SDValue Glue; |
| 20658 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R4, N: Words, Glue); |
| 20659 | Glue = Chain.getValue(R: 1); |
| 20660 | |
| 20661 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 20662 | Chain = DAG.getNode(Opcode: ARMISD::WIN__CHKSTK, DL, VTList: NodeTys, N1: Chain, N2: Glue); |
| 20663 | |
| 20664 | SDValue NewSP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
| 20665 | Chain = NewSP.getValue(R: 1); |
| 20666 | |
| 20667 | SDValue Ops[2] = { NewSP, Chain }; |
| 20668 | return DAG.getMergeValues(Ops, dl: DL); |
| 20669 | } |
| 20670 | |
| 20671 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { |
| 20672 | bool IsStrict = Op->isStrictFPOpcode(); |
| 20673 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 20674 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 20675 | const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); |
| 20676 | assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && |
| 20677 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 20678 | |
| 20679 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 20680 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 20681 | |
| 20682 | assert(!(DstSz == 32 && Subtarget->hasFP16()) && |
| 20683 | "With FP16, 16 to 32 conversion is legal!" ); |
| 20684 | |
| 20685 | // Converting from 32 -> 64 is valid if we have FP64. |
| 20686 | if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { |
| 20687 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 20688 | if (IsStrict) { |
| 20689 | SDLoc Loc(Op); |
| 20690 | SDValue Result = DAG.getNode(Opcode: ISD::FP_EXTEND, |
| 20691 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
| 20692 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
| 20693 | } |
| 20694 | return Op; |
| 20695 | } |
| 20696 | |
| 20697 | // Either we are converting from 16 -> 64, without FP16 and/or |
| 20698 | // FP.double-precision or without Armv8-fp. So we must do it in two |
| 20699 | // steps. |
| 20700 | // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 |
| 20701 | // without FP16. So we must do a function call. |
| 20702 | SDLoc Loc(Op); |
| 20703 | RTLIB::Libcall LC; |
| 20704 | MakeLibCallOptions CallOptions; |
| 20705 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 20706 | for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { |
| 20707 | bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); |
| 20708 | MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); |
| 20709 | MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); |
| 20710 | if (Supported) { |
| 20711 | if (IsStrict) { |
| 20712 | SrcVal = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL: Loc, |
| 20713 | ResultTys: {DstVT, MVT::Other}, Ops: {Chain, SrcVal}); |
| 20714 | Chain = SrcVal.getValue(R: 1); |
| 20715 | } else { |
| 20716 | SrcVal = DAG.getNode(Opcode: ISD::FP_EXTEND, DL: Loc, VT: DstVT, Operand: SrcVal); |
| 20717 | } |
| 20718 | } else { |
| 20719 | LC = RTLIB::getFPEXT(OpVT: SrcVT, RetVT: DstVT); |
| 20720 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 20721 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 20722 | std::tie(args&: SrcVal, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
| 20723 | dl: Loc, Chain); |
| 20724 | } |
| 20725 | } |
| 20726 | |
| 20727 | return IsStrict ? DAG.getMergeValues(Ops: {SrcVal, Chain}, dl: Loc) : SrcVal; |
| 20728 | } |
| 20729 | |
| 20730 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
| 20731 | bool IsStrict = Op->isStrictFPOpcode(); |
| 20732 | |
| 20733 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 20734 | EVT SrcVT = SrcVal.getValueType(); |
| 20735 | EVT DstVT = Op.getValueType(); |
| 20736 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 20737 | const unsigned SrcSz = SrcVT.getSizeInBits(); |
| 20738 | (void)DstSz; |
| 20739 | assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && |
| 20740 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 20741 | |
| 20742 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 20743 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 20744 | |
| 20745 | SDLoc Loc(Op); |
| 20746 | |
| 20747 | // Instruction from 32 -> 16 if hasFP16 is valid |
| 20748 | if (SrcSz == 32 && Subtarget->hasFP16()) |
| 20749 | return Op; |
| 20750 | |
| 20751 | // Lib call from 32 -> 16 / 64 -> [32, 16] |
| 20752 | RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: SrcVT, RetVT: DstVT); |
| 20753 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 20754 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 20755 | MakeLibCallOptions CallOptions; |
| 20756 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 20757 | SDValue Result; |
| 20758 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
| 20759 | dl: Loc, Chain); |
| 20760 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
| 20761 | } |
| 20762 | |
| 20763 | bool |
| 20764 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
| 20765 | // The ARM target isn't yet aware of offsets. |
| 20766 | return false; |
| 20767 | } |
| 20768 | |
| 20769 | bool ARM::isBitFieldInvertedMask(unsigned v) { |
| 20770 | if (v == 0xffffffff) |
| 20771 | return false; |
| 20772 | |
| 20773 | // there can be 1's on either or both "outsides", all the "inside" |
| 20774 | // bits must be 0's |
| 20775 | return isShiftedMask_32(Value: ~v); |
| 20776 | } |
| 20777 | |
| 20778 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 20779 | /// specified FP immediate natively. If false, the legalizer will |
| 20780 | /// materialize the FP immediate as a load from a constant pool. |
| 20781 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
| 20782 | bool ForCodeSize) const { |
| 20783 | if (!Subtarget->hasVFP3Base()) |
| 20784 | return false; |
| 20785 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) |
| 20786 | return ARM_AM::getFP16Imm(FPImm: Imm) != -1; |
| 20787 | if (VT == MVT::f32 && Subtarget->hasFullFP16() && |
| 20788 | ARM_AM::getFP32FP16Imm(FPImm: Imm) != -1) |
| 20789 | return true; |
| 20790 | if (VT == MVT::f32) |
| 20791 | return ARM_AM::getFP32Imm(FPImm: Imm) != -1; |
| 20792 | if (VT == MVT::f64 && Subtarget->hasFP64()) |
| 20793 | return ARM_AM::getFP64Imm(FPImm: Imm) != -1; |
| 20794 | return false; |
| 20795 | } |
| 20796 | |
| 20797 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as |
| 20798 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment |
| 20799 | /// specified in the intrinsic calls. |
| 20800 | void ARMTargetLowering::getTgtMemIntrinsic( |
| 20801 | SmallVectorImpl<IntrinsicInfo> &Infos, const CallBase &I, |
| 20802 | MachineFunction &MF, unsigned Intrinsic) const { |
| 20803 | IntrinsicInfo Info; |
| 20804 | switch (Intrinsic) { |
| 20805 | case Intrinsic::arm_neon_vld1: |
| 20806 | case Intrinsic::arm_neon_vld2: |
| 20807 | case Intrinsic::arm_neon_vld3: |
| 20808 | case Intrinsic::arm_neon_vld4: |
| 20809 | case Intrinsic::arm_neon_vld2lane: |
| 20810 | case Intrinsic::arm_neon_vld3lane: |
| 20811 | case Intrinsic::arm_neon_vld4lane: |
| 20812 | case Intrinsic::arm_neon_vld2dup: |
| 20813 | case Intrinsic::arm_neon_vld3dup: |
| 20814 | case Intrinsic::arm_neon_vld4dup: { |
| 20815 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20816 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20817 | auto &DL = I.getDataLayout(); |
| 20818 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
| 20819 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20820 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20821 | Info.offset = 0; |
| 20822 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
| 20823 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
| 20824 | // volatile loads with NEON intrinsics not supported |
| 20825 | Info.flags = MachineMemOperand::MOLoad; |
| 20826 | Infos.push_back(Elt: Info); |
| 20827 | return; |
| 20828 | } |
| 20829 | case Intrinsic::arm_neon_vld1x2: |
| 20830 | case Intrinsic::arm_neon_vld1x3: |
| 20831 | case Intrinsic::arm_neon_vld1x4: { |
| 20832 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20833 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20834 | auto &DL = I.getDataLayout(); |
| 20835 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
| 20836 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20837 | Info.ptrVal = I.getArgOperand(i: I.arg_size() - 1); |
| 20838 | Info.offset = 0; |
| 20839 | Info.align = I.getParamAlign(ArgNo: I.arg_size() - 1).valueOrOne(); |
| 20840 | // volatile loads with NEON intrinsics not supported |
| 20841 | Info.flags = MachineMemOperand::MOLoad; |
| 20842 | Infos.push_back(Elt: Info); |
| 20843 | return; |
| 20844 | } |
| 20845 | case Intrinsic::arm_neon_vst1: |
| 20846 | case Intrinsic::arm_neon_vst2: |
| 20847 | case Intrinsic::arm_neon_vst3: |
| 20848 | case Intrinsic::arm_neon_vst4: |
| 20849 | case Intrinsic::arm_neon_vst2lane: |
| 20850 | case Intrinsic::arm_neon_vst3lane: |
| 20851 | case Intrinsic::arm_neon_vst4lane: { |
| 20852 | Info.opc = ISD::INTRINSIC_VOID; |
| 20853 | // Conservatively set memVT to the entire set of vectors stored. |
| 20854 | auto &DL = I.getDataLayout(); |
| 20855 | unsigned NumElts = 0; |
| 20856 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
| 20857 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
| 20858 | if (!ArgTy->isVectorTy()) |
| 20859 | break; |
| 20860 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
| 20861 | } |
| 20862 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20863 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20864 | Info.offset = 0; |
| 20865 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
| 20866 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
| 20867 | // volatile stores with NEON intrinsics not supported |
| 20868 | Info.flags = MachineMemOperand::MOStore; |
| 20869 | Infos.push_back(Elt: Info); |
| 20870 | return; |
| 20871 | } |
| 20872 | case Intrinsic::arm_neon_vst1x2: |
| 20873 | case Intrinsic::arm_neon_vst1x3: |
| 20874 | case Intrinsic::arm_neon_vst1x4: { |
| 20875 | Info.opc = ISD::INTRINSIC_VOID; |
| 20876 | // Conservatively set memVT to the entire set of vectors stored. |
| 20877 | auto &DL = I.getDataLayout(); |
| 20878 | unsigned NumElts = 0; |
| 20879 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
| 20880 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
| 20881 | if (!ArgTy->isVectorTy()) |
| 20882 | break; |
| 20883 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
| 20884 | } |
| 20885 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20886 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20887 | Info.offset = 0; |
| 20888 | Info.align = I.getParamAlign(ArgNo: 0).valueOrOne(); |
| 20889 | // volatile stores with NEON intrinsics not supported |
| 20890 | Info.flags = MachineMemOperand::MOStore; |
| 20891 | Infos.push_back(Elt: Info); |
| 20892 | return; |
| 20893 | } |
| 20894 | case Intrinsic::arm_mve_vld2q: |
| 20895 | case Intrinsic::arm_mve_vld4q: { |
| 20896 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20897 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20898 | Type *VecTy = cast<StructType>(Val: I.getType())->getElementType(N: 1); |
| 20899 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; |
| 20900 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
| 20901 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20902 | Info.offset = 0; |
| 20903 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 20904 | // volatile loads with MVE intrinsics not supported |
| 20905 | Info.flags = MachineMemOperand::MOLoad; |
| 20906 | Infos.push_back(Elt: Info); |
| 20907 | return; |
| 20908 | } |
| 20909 | case Intrinsic::arm_mve_vst2q: |
| 20910 | case Intrinsic::arm_mve_vst4q: { |
| 20911 | Info.opc = ISD::INTRINSIC_VOID; |
| 20912 | // Conservatively set memVT to the entire set of vectors stored. |
| 20913 | Type *VecTy = I.getArgOperand(i: 1)->getType(); |
| 20914 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; |
| 20915 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
| 20916 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20917 | Info.offset = 0; |
| 20918 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 20919 | // volatile stores with MVE intrinsics not supported |
| 20920 | Info.flags = MachineMemOperand::MOStore; |
| 20921 | Infos.push_back(Elt: Info); |
| 20922 | return; |
| 20923 | } |
| 20924 | case Intrinsic::arm_mve_vldr_gather_base: |
| 20925 | case Intrinsic::arm_mve_vldr_gather_base_predicated: { |
| 20926 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20927 | Info.ptrVal = nullptr; |
| 20928 | Info.memVT = MVT::getVT(Ty: I.getType()); |
| 20929 | Info.align = Align(1); |
| 20930 | Info.flags |= MachineMemOperand::MOLoad; |
| 20931 | Infos.push_back(Elt: Info); |
| 20932 | return; |
| 20933 | } |
| 20934 | case Intrinsic::arm_mve_vldr_gather_base_wb: |
| 20935 | case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: { |
| 20936 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20937 | Info.ptrVal = nullptr; |
| 20938 | Info.memVT = MVT::getVT(Ty: I.getType()->getContainedType(i: 0)); |
| 20939 | Info.align = Align(1); |
| 20940 | Info.flags |= MachineMemOperand::MOLoad; |
| 20941 | Infos.push_back(Elt: Info); |
| 20942 | return; |
| 20943 | } |
| 20944 | case Intrinsic::arm_mve_vldr_gather_offset: |
| 20945 | case Intrinsic::arm_mve_vldr_gather_offset_predicated: { |
| 20946 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20947 | Info.ptrVal = nullptr; |
| 20948 | MVT DataVT = MVT::getVT(Ty: I.getType()); |
| 20949 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 2))->getZExtValue(); |
| 20950 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
| 20951 | NumElements: DataVT.getVectorNumElements()); |
| 20952 | Info.align = Align(1); |
| 20953 | Info.flags |= MachineMemOperand::MOLoad; |
| 20954 | Infos.push_back(Elt: Info); |
| 20955 | return; |
| 20956 | } |
| 20957 | case Intrinsic::arm_mve_vstr_scatter_base: |
| 20958 | case Intrinsic::arm_mve_vstr_scatter_base_predicated: { |
| 20959 | Info.opc = ISD::INTRINSIC_VOID; |
| 20960 | Info.ptrVal = nullptr; |
| 20961 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 20962 | Info.align = Align(1); |
| 20963 | Info.flags |= MachineMemOperand::MOStore; |
| 20964 | Infos.push_back(Elt: Info); |
| 20965 | return; |
| 20966 | } |
| 20967 | case Intrinsic::arm_mve_vstr_scatter_base_wb: |
| 20968 | case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: { |
| 20969 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20970 | Info.ptrVal = nullptr; |
| 20971 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 20972 | Info.align = Align(1); |
| 20973 | Info.flags |= MachineMemOperand::MOStore; |
| 20974 | Infos.push_back(Elt: Info); |
| 20975 | return; |
| 20976 | } |
| 20977 | case Intrinsic::arm_mve_vstr_scatter_offset: |
| 20978 | case Intrinsic::arm_mve_vstr_scatter_offset_predicated: { |
| 20979 | Info.opc = ISD::INTRINSIC_VOID; |
| 20980 | Info.ptrVal = nullptr; |
| 20981 | MVT DataVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 20982 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 3))->getZExtValue(); |
| 20983 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
| 20984 | NumElements: DataVT.getVectorNumElements()); |
| 20985 | Info.align = Align(1); |
| 20986 | Info.flags |= MachineMemOperand::MOStore; |
| 20987 | Infos.push_back(Elt: Info); |
| 20988 | return; |
| 20989 | } |
| 20990 | case Intrinsic::arm_ldaex: |
| 20991 | case Intrinsic::arm_ldrex: { |
| 20992 | auto &DL = I.getDataLayout(); |
| 20993 | Type *ValTy = I.getParamElementType(ArgNo: 0); |
| 20994 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20995 | Info.memVT = MVT::getVT(Ty: ValTy); |
| 20996 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20997 | Info.offset = 0; |
| 20998 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
| 20999 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 21000 | Infos.push_back(Elt: Info); |
| 21001 | return; |
| 21002 | } |
| 21003 | case Intrinsic::arm_stlex: |
| 21004 | case Intrinsic::arm_strex: { |
| 21005 | auto &DL = I.getDataLayout(); |
| 21006 | Type *ValTy = I.getParamElementType(ArgNo: 1); |
| 21007 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21008 | Info.memVT = MVT::getVT(Ty: ValTy); |
| 21009 | Info.ptrVal = I.getArgOperand(i: 1); |
| 21010 | Info.offset = 0; |
| 21011 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
| 21012 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 21013 | Infos.push_back(Elt: Info); |
| 21014 | return; |
| 21015 | } |
| 21016 | case Intrinsic::arm_stlexd: |
| 21017 | case Intrinsic::arm_strexd: |
| 21018 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21019 | Info.memVT = MVT::i64; |
| 21020 | Info.ptrVal = I.getArgOperand(i: 2); |
| 21021 | Info.offset = 0; |
| 21022 | Info.align = Align(8); |
| 21023 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 21024 | Infos.push_back(Elt: Info); |
| 21025 | return; |
| 21026 | |
| 21027 | case Intrinsic::arm_ldaexd: |
| 21028 | case Intrinsic::arm_ldrexd: |
| 21029 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21030 | Info.memVT = MVT::i64; |
| 21031 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21032 | Info.offset = 0; |
| 21033 | Info.align = Align(8); |
| 21034 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 21035 | Infos.push_back(Elt: Info); |
| 21036 | return; |
| 21037 | |
| 21038 | default: |
| 21039 | break; |
| 21040 | } |
| 21041 | } |
| 21042 | |
| 21043 | /// Returns true if it is beneficial to convert a load of a constant |
| 21044 | /// to just the constant itself. |
| 21045 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 21046 | Type *Ty) const { |
| 21047 | assert(Ty->isIntegerTy()); |
| 21048 | |
| 21049 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
| 21050 | if (Bits == 0 || Bits > 32) |
| 21051 | return false; |
| 21052 | return true; |
| 21053 | } |
| 21054 | |
| 21055 | bool ARMTargetLowering::(EVT ResVT, EVT SrcVT, |
| 21056 | unsigned Index) const { |
| 21057 | if (!isOperationLegalOrCustom(Op: ISD::EXTRACT_SUBVECTOR, VT: ResVT)) |
| 21058 | return false; |
| 21059 | |
| 21060 | return (Index == 0 || Index == ResVT.getVectorNumElements()); |
| 21061 | } |
| 21062 | |
| 21063 | Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder, |
| 21064 | ARM_MB::MemBOpt Domain) const { |
| 21065 | // First, if the target has no DMB, see what fallback we can use. |
| 21066 | if (!Subtarget->hasDataBarrier()) { |
| 21067 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 21068 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 21069 | // here. |
| 21070 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { |
| 21071 | Value* args[6] = {Builder.getInt32(C: 15), Builder.getInt32(C: 0), |
| 21072 | Builder.getInt32(C: 0), Builder.getInt32(C: 7), |
| 21073 | Builder.getInt32(C: 10), Builder.getInt32(C: 5)}; |
| 21074 | return Builder.CreateIntrinsic(ID: Intrinsic::arm_mcr, Args: args); |
| 21075 | } else { |
| 21076 | // Instead of using barriers, atomic accesses on these subtargets use |
| 21077 | // libcalls. |
| 21078 | llvm_unreachable("makeDMB on a target so old that it has no barriers" ); |
| 21079 | } |
| 21080 | } else { |
| 21081 | // Only a full system barrier exists in the M-class architectures. |
| 21082 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; |
| 21083 | Constant *CDomain = Builder.getInt32(C: Domain); |
| 21084 | return Builder.CreateIntrinsic(ID: Intrinsic::arm_dmb, Args: CDomain); |
| 21085 | } |
| 21086 | } |
| 21087 | |
| 21088 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html |
| 21089 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder, |
| 21090 | Instruction *Inst, |
| 21091 | AtomicOrdering Ord) const { |
| 21092 | switch (Ord) { |
| 21093 | case AtomicOrdering::NotAtomic: |
| 21094 | case AtomicOrdering::Unordered: |
| 21095 | llvm_unreachable("Invalid fence: unordered/non-atomic" ); |
| 21096 | case AtomicOrdering::Monotonic: |
| 21097 | case AtomicOrdering::Acquire: |
| 21098 | return nullptr; // Nothing to do |
| 21099 | case AtomicOrdering::SequentiallyConsistent: |
| 21100 | if (!Inst->hasAtomicStore()) |
| 21101 | return nullptr; // Nothing to do |
| 21102 | [[fallthrough]]; |
| 21103 | case AtomicOrdering::Release: |
| 21104 | case AtomicOrdering::AcquireRelease: |
| 21105 | if (Subtarget->preferISHSTBarriers()) |
| 21106 | return makeDMB(Builder, Domain: ARM_MB::ISHST); |
| 21107 | // FIXME: add a comment with a link to documentation justifying this. |
| 21108 | else |
| 21109 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
| 21110 | } |
| 21111 | llvm_unreachable("Unknown fence ordering in emitLeadingFence" ); |
| 21112 | } |
| 21113 | |
| 21114 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder, |
| 21115 | Instruction *Inst, |
| 21116 | AtomicOrdering Ord) const { |
| 21117 | switch (Ord) { |
| 21118 | case AtomicOrdering::NotAtomic: |
| 21119 | case AtomicOrdering::Unordered: |
| 21120 | llvm_unreachable("Invalid fence: unordered/not-atomic" ); |
| 21121 | case AtomicOrdering::Monotonic: |
| 21122 | case AtomicOrdering::Release: |
| 21123 | return nullptr; // Nothing to do |
| 21124 | case AtomicOrdering::Acquire: |
| 21125 | case AtomicOrdering::AcquireRelease: |
| 21126 | case AtomicOrdering::SequentiallyConsistent: |
| 21127 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
| 21128 | } |
| 21129 | llvm_unreachable("Unknown fence ordering in emitTrailingFence" ); |
| 21130 | } |
| 21131 | |
| 21132 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 21133 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 21134 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 21135 | // anything for those. |
| 21136 | TargetLoweringBase::AtomicExpansionKind |
| 21137 | ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
| 21138 | bool has64BitAtomicStore; |
| 21139 | if (Subtarget->isMClass()) |
| 21140 | has64BitAtomicStore = false; |
| 21141 | else if (Subtarget->isThumb()) |
| 21142 | has64BitAtomicStore = Subtarget->hasV7Ops(); |
| 21143 | else |
| 21144 | has64BitAtomicStore = Subtarget->hasV6Ops(); |
| 21145 | |
| 21146 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); |
| 21147 | return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand |
| 21148 | : AtomicExpansionKind::None; |
| 21149 | } |
| 21150 | |
| 21151 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 21152 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 21153 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 21154 | // anything for those. |
| 21155 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that |
| 21156 | // guarantee, see DDI0406C ARM architecture reference manual, |
| 21157 | // sections A8.8.72-74 LDRD) |
| 21158 | TargetLowering::AtomicExpansionKind |
| 21159 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
| 21160 | bool has64BitAtomicLoad; |
| 21161 | if (Subtarget->isMClass()) |
| 21162 | has64BitAtomicLoad = false; |
| 21163 | else if (Subtarget->isThumb()) |
| 21164 | has64BitAtomicLoad = Subtarget->hasV7Ops(); |
| 21165 | else |
| 21166 | has64BitAtomicLoad = Subtarget->hasV6Ops(); |
| 21167 | |
| 21168 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); |
| 21169 | return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly |
| 21170 | : AtomicExpansionKind::None; |
| 21171 | } |
| 21172 | |
| 21173 | // For the real atomic operations, we have ldrex/strex up to 32 bits, |
| 21174 | // and up to 64 bits on the non-M profiles |
| 21175 | TargetLowering::AtomicExpansionKind |
| 21176 | ARMTargetLowering::shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const { |
| 21177 | if (AI->isFloatingPointOperation()) |
| 21178 | return AtomicExpansionKind::CmpXChg; |
| 21179 | |
| 21180 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); |
| 21181 | bool hasAtomicRMW; |
| 21182 | if (Subtarget->isMClass()) |
| 21183 | hasAtomicRMW = Subtarget->hasV8MBaselineOps(); |
| 21184 | else if (Subtarget->isThumb()) |
| 21185 | hasAtomicRMW = Subtarget->hasV7Ops(); |
| 21186 | else |
| 21187 | hasAtomicRMW = Subtarget->hasV6Ops(); |
| 21188 | if (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) { |
| 21189 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 21190 | // implement atomicrmw without spilling. If the target address is also on |
| 21191 | // the stack and close enough to the spill slot, this can lead to a |
| 21192 | // situation where the monitor always gets cleared and the atomic operation |
| 21193 | // can never succeed. So at -O0 lower this operation to a CAS loop. |
| 21194 | if (getTargetMachine().getOptLevel() == CodeGenOptLevel::None) |
| 21195 | return AtomicExpansionKind::CmpXChg; |
| 21196 | return AtomicExpansionKind::LLSC; |
| 21197 | } |
| 21198 | return AtomicExpansionKind::None; |
| 21199 | } |
| 21200 | |
| 21201 | // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 |
| 21202 | // bits, and up to 64 bits on the non-M profiles. |
| 21203 | TargetLowering::AtomicExpansionKind |
| 21204 | ARMTargetLowering::shouldExpandAtomicCmpXchgInIR( |
| 21205 | const AtomicCmpXchgInst *AI) const { |
| 21206 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 21207 | // implement cmpxchg without spilling. If the address being exchanged is also |
| 21208 | // on the stack and close enough to the spill slot, this can lead to a |
| 21209 | // situation where the monitor always gets cleared and the atomic operation |
| 21210 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. |
| 21211 | unsigned Size = AI->getOperand(i_nocapture: 1)->getType()->getPrimitiveSizeInBits(); |
| 21212 | bool HasAtomicCmpXchg; |
| 21213 | if (Subtarget->isMClass()) |
| 21214 | HasAtomicCmpXchg = Subtarget->hasV8MBaselineOps(); |
| 21215 | else if (Subtarget->isThumb()) |
| 21216 | HasAtomicCmpXchg = Subtarget->hasV7Ops(); |
| 21217 | else |
| 21218 | HasAtomicCmpXchg = Subtarget->hasV6Ops(); |
| 21219 | if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None && |
| 21220 | HasAtomicCmpXchg && Size <= (Subtarget->isMClass() ? 32U : 64U)) |
| 21221 | return AtomicExpansionKind::LLSC; |
| 21222 | return AtomicExpansionKind::None; |
| 21223 | } |
| 21224 | |
| 21225 | bool ARMTargetLowering::shouldInsertFencesForAtomic( |
| 21226 | const Instruction *I) const { |
| 21227 | return InsertFencesForAtomic; |
| 21228 | } |
| 21229 | |
| 21230 | bool ARMTargetLowering::useLoadStackGuardNode(const Module &M) const { |
| 21231 | // ROPI/RWPI are not supported currently. |
| 21232 | return !Subtarget->isROPI() && !Subtarget->isRWPI(); |
| 21233 | } |
| 21234 | |
| 21235 | void ARMTargetLowering::insertSSPDeclarations( |
| 21236 | Module &M, const LibcallLoweringInfo &Libcalls) const { |
| 21237 | // MSVC CRT provides functionalities for stack protection. |
| 21238 | RTLIB::LibcallImpl SecurityCheckCookieLibcall = |
| 21239 | Libcalls.getLibcallImpl(Call: RTLIB::SECURITY_CHECK_COOKIE); |
| 21240 | |
| 21241 | RTLIB::LibcallImpl SecurityCookieVar = |
| 21242 | Libcalls.getLibcallImpl(Call: RTLIB::STACK_CHECK_GUARD); |
| 21243 | if (SecurityCheckCookieLibcall != RTLIB::Unsupported && |
| 21244 | SecurityCookieVar != RTLIB::Unsupported) { |
| 21245 | // MSVC CRT has a global variable holding security cookie. |
| 21246 | M.getOrInsertGlobal(Name: getLibcallImplName(Call: SecurityCookieVar), |
| 21247 | Ty: PointerType::getUnqual(C&: M.getContext())); |
| 21248 | |
| 21249 | // MSVC CRT has a function to validate security cookie. |
| 21250 | FunctionCallee SecurityCheckCookie = |
| 21251 | M.getOrInsertFunction(Name: getLibcallImplName(Call: SecurityCheckCookieLibcall), |
| 21252 | RetTy: Type::getVoidTy(C&: M.getContext()), |
| 21253 | Args: PointerType::getUnqual(C&: M.getContext())); |
| 21254 | if (Function *F = dyn_cast<Function>(Val: SecurityCheckCookie.getCallee())) |
| 21255 | F->addParamAttr(ArgNo: 0, Kind: Attribute::AttrKind::InReg); |
| 21256 | } |
| 21257 | |
| 21258 | TargetLowering::insertSSPDeclarations(M, Libcalls); |
| 21259 | } |
| 21260 | |
| 21261 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
| 21262 | unsigned &Cost) const { |
| 21263 | // If we do not have NEON, vector types are not natively supported. |
| 21264 | if (!Subtarget->hasNEON()) |
| 21265 | return false; |
| 21266 | |
| 21267 | // Floating point values and vector values map to the same register file. |
| 21268 | // Therefore, although we could do a store extract of a vector type, this is |
| 21269 | // better to leave at float as we have more freedom in the addressing mode for |
| 21270 | // those. |
| 21271 | if (VectorTy->isFPOrFPVectorTy()) |
| 21272 | return false; |
| 21273 | |
| 21274 | // If the index is unknown at compile time, this is very expensive to lower |
| 21275 | // and it is not possible to combine the store with the extract. |
| 21276 | if (!isa<ConstantInt>(Val: Idx)) |
| 21277 | return false; |
| 21278 | |
| 21279 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type" ); |
| 21280 | unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedValue(); |
| 21281 | // We can do a store + vector extract on any vector that fits perfectly in a D |
| 21282 | // or Q register. |
| 21283 | if (BitWidth == 64 || BitWidth == 128) { |
| 21284 | Cost = 0; |
| 21285 | return true; |
| 21286 | } |
| 21287 | return false; |
| 21288 | } |
| 21289 | |
| 21290 | bool ARMTargetLowering::canCreateUndefOrPoisonForTargetNode( |
| 21291 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, |
| 21292 | bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const { |
| 21293 | unsigned Opcode = Op.getOpcode(); |
| 21294 | switch (Opcode) { |
| 21295 | case ARMISD::VORRIMM: |
| 21296 | case ARMISD::VBICIMM: |
| 21297 | return false; |
| 21298 | } |
| 21299 | return TargetLowering::canCreateUndefOrPoisonForTargetNode( |
| 21300 | Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth); |
| 21301 | } |
| 21302 | |
| 21303 | bool ARMTargetLowering::isCheapToSpeculateCttz(Type *Ty) const { |
| 21304 | return Subtarget->hasV5TOps() && !Subtarget->isThumb1Only(); |
| 21305 | } |
| 21306 | |
| 21307 | bool ARMTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const { |
| 21308 | return Subtarget->hasV5TOps() && !Subtarget->isThumb1Only(); |
| 21309 | } |
| 21310 | |
| 21311 | bool ARMTargetLowering::isMaskAndCmp0FoldingBeneficial( |
| 21312 | const Instruction &AndI) const { |
| 21313 | if (!Subtarget->hasV7Ops()) |
| 21314 | return false; |
| 21315 | |
| 21316 | // Sink the `and` instruction only if the mask would fit into a modified |
| 21317 | // immediate operand. |
| 21318 | ConstantInt *Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1)); |
| 21319 | if (!Mask || Mask->getValue().getBitWidth() > 32u) |
| 21320 | return false; |
| 21321 | auto MaskVal = unsigned(Mask->getValue().getZExtValue()); |
| 21322 | return (Subtarget->isThumb2() ? ARM_AM::getT2SOImmVal(Arg: MaskVal) |
| 21323 | : ARM_AM::getSOImmVal(Arg: MaskVal)) != -1; |
| 21324 | } |
| 21325 | |
| 21326 | TargetLowering::ShiftLegalizationStrategy |
| 21327 | ARMTargetLowering::preferredShiftLegalizationStrategy( |
| 21328 | SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const { |
| 21329 | if (Subtarget->hasMinSize() && !getTM().getTargetTriple().isOSWindows()) |
| 21330 | return ShiftLegalizationStrategy::LowerToLibcall; |
| 21331 | return TargetLowering::preferredShiftLegalizationStrategy(DAG, N, |
| 21332 | ExpansionFactor); |
| 21333 | } |
| 21334 | |
| 21335 | Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, |
| 21336 | Value *Addr, |
| 21337 | AtomicOrdering Ord) const { |
| 21338 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 21339 | bool IsAcquire = isAcquireOrStronger(AO: Ord); |
| 21340 | |
| 21341 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd |
| 21342 | // intrinsic must return {i32, i32} and we have to recombine them into a |
| 21343 | // single i64 here. |
| 21344 | if (ValueTy->getPrimitiveSizeInBits() == 64) { |
| 21345 | Intrinsic::ID Int = |
| 21346 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; |
| 21347 | |
| 21348 | Value *LoHi = |
| 21349 | Builder.CreateIntrinsic(ID: Int, Args: Addr, /*FMFSource=*/nullptr, Name: "lohi" ); |
| 21350 | |
| 21351 | Value *Lo = Builder.CreateExtractValue(Agg: LoHi, Idxs: 0, Name: "lo" ); |
| 21352 | Value *Hi = Builder.CreateExtractValue(Agg: LoHi, Idxs: 1, Name: "hi" ); |
| 21353 | if (!Subtarget->isLittle()) |
| 21354 | std::swap (a&: Lo, b&: Hi); |
| 21355 | Lo = Builder.CreateZExt(V: Lo, DestTy: ValueTy, Name: "lo64" ); |
| 21356 | Hi = Builder.CreateZExt(V: Hi, DestTy: ValueTy, Name: "hi64" ); |
| 21357 | return Builder.CreateOr( |
| 21358 | LHS: Lo, RHS: Builder.CreateShl(LHS: Hi, RHS: ConstantInt::get(Ty: ValueTy, V: 32)), Name: "val64" ); |
| 21359 | } |
| 21360 | |
| 21361 | Type *Tys[] = { Addr->getType() }; |
| 21362 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; |
| 21363 | CallInst *CI = Builder.CreateIntrinsic(ID: Int, Types: Tys, Args: Addr); |
| 21364 | |
| 21365 | CI->addParamAttr( |
| 21366 | ArgNo: 0, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, Ty: ValueTy)); |
| 21367 | return Builder.CreateTruncOrBitCast(V: CI, DestTy: ValueTy); |
| 21368 | } |
| 21369 | |
| 21370 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( |
| 21371 | IRBuilderBase &Builder) const { |
| 21372 | if (!Subtarget->hasV7Ops()) |
| 21373 | return; |
| 21374 | Builder.CreateIntrinsic(ID: Intrinsic::arm_clrex, Args: {}); |
| 21375 | } |
| 21376 | |
| 21377 | Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder, |
| 21378 | Value *Val, Value *Addr, |
| 21379 | AtomicOrdering Ord) const { |
| 21380 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 21381 | bool IsRelease = isReleaseOrStronger(AO: Ord); |
| 21382 | |
| 21383 | // Since the intrinsics must have legal type, the i64 intrinsics take two |
| 21384 | // parameters: "i32, i32". We must marshal Val into the appropriate form |
| 21385 | // before the call. |
| 21386 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { |
| 21387 | Intrinsic::ID Int = |
| 21388 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; |
| 21389 | Type *Int32Ty = Type::getInt32Ty(C&: M->getContext()); |
| 21390 | |
| 21391 | Value *Lo = Builder.CreateTrunc(V: Val, DestTy: Int32Ty, Name: "lo" ); |
| 21392 | Value *Hi = Builder.CreateTrunc(V: Builder.CreateLShr(LHS: Val, RHS: 32), DestTy: Int32Ty, Name: "hi" ); |
| 21393 | if (!Subtarget->isLittle()) |
| 21394 | std::swap(a&: Lo, b&: Hi); |
| 21395 | return Builder.CreateIntrinsic(ID: Int, Args: {Lo, Hi, Addr}); |
| 21396 | } |
| 21397 | |
| 21398 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; |
| 21399 | Type *Tys[] = { Addr->getType() }; |
| 21400 | Function *Strex = Intrinsic::getOrInsertDeclaration(M, id: Int, Tys); |
| 21401 | |
| 21402 | CallInst *CI = Builder.CreateCall( |
| 21403 | Callee: Strex, Args: {Builder.CreateZExtOrBitCast( |
| 21404 | V: Val, DestTy: Strex->getFunctionType()->getParamType(i: 0)), |
| 21405 | Addr}); |
| 21406 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, |
| 21407 | Ty: Val->getType())); |
| 21408 | return CI; |
| 21409 | } |
| 21410 | |
| 21411 | |
| 21412 | bool ARMTargetLowering::alignLoopsWithOptSize() const { |
| 21413 | return Subtarget->isMClass(); |
| 21414 | } |
| 21415 | |
| 21416 | /// A helper function for determining the number of interleaved accesses we |
| 21417 | /// will generate when lowering accesses of the given type. |
| 21418 | unsigned |
| 21419 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, |
| 21420 | const DataLayout &DL) const { |
| 21421 | return (DL.getTypeSizeInBits(Ty: VecTy) + 127) / 128; |
| 21422 | } |
| 21423 | |
| 21424 | bool ARMTargetLowering::isLegalInterleavedAccessType( |
| 21425 | unsigned Factor, FixedVectorType *VecTy, Align Alignment, |
| 21426 | const DataLayout &DL) const { |
| 21427 | |
| 21428 | unsigned VecSize = DL.getTypeSizeInBits(Ty: VecTy); |
| 21429 | unsigned ElSize = DL.getTypeSizeInBits(Ty: VecTy->getElementType()); |
| 21430 | |
| 21431 | if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) |
| 21432 | return false; |
| 21433 | |
| 21434 | // Ensure the vector doesn't have f16 elements. Even though we could do an |
| 21435 | // i16 vldN, we can't hold the f16 vectors and will end up converting via |
| 21436 | // f32. |
| 21437 | if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) |
| 21438 | return false; |
| 21439 | if (Subtarget->hasMVEIntegerOps() && Factor == 3) |
| 21440 | return false; |
| 21441 | |
| 21442 | // Ensure the number of vector elements is greater than 1. |
| 21443 | if (VecTy->getNumElements() < 2) |
| 21444 | return false; |
| 21445 | |
| 21446 | // Ensure the element type is legal. |
| 21447 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) |
| 21448 | return false; |
| 21449 | // And the alignment if high enough under MVE. |
| 21450 | if (Subtarget->hasMVEIntegerOps() && Alignment < ElSize / 8) |
| 21451 | return false; |
| 21452 | |
| 21453 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than |
| 21454 | // 128 will be split into multiple interleaved accesses. |
| 21455 | if (Subtarget->hasNEON() && VecSize == 64) |
| 21456 | return true; |
| 21457 | return VecSize % 128 == 0; |
| 21458 | } |
| 21459 | |
| 21460 | unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { |
| 21461 | if (Subtarget->hasNEON()) |
| 21462 | return 4; |
| 21463 | if (Subtarget->hasMVEIntegerOps()) |
| 21464 | return MVEMaxSupportedInterleaveFactor; |
| 21465 | return TargetLoweringBase::getMaxSupportedInterleaveFactor(); |
| 21466 | } |
| 21467 | |
| 21468 | /// Lower an interleaved load into a vldN intrinsic. |
| 21469 | /// |
| 21470 | /// E.g. Lower an interleaved load (Factor = 2): |
| 21471 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 |
| 21472 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements |
| 21473 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements |
| 21474 | /// |
| 21475 | /// Into: |
| 21476 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) |
| 21477 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 |
| 21478 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 |
| 21479 | bool ARMTargetLowering::lowerInterleavedLoad( |
| 21480 | Instruction *Load, Value *Mask, ArrayRef<ShuffleVectorInst *> Shuffles, |
| 21481 | ArrayRef<unsigned> Indices, unsigned Factor, const APInt &GapMask) const { |
| 21482 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 21483 | "Invalid interleave factor" ); |
| 21484 | assert(!Shuffles.empty() && "Empty shufflevector input" ); |
| 21485 | assert(Shuffles.size() == Indices.size() && |
| 21486 | "Unmatched number of shufflevectors and indices" ); |
| 21487 | |
| 21488 | auto *LI = dyn_cast<LoadInst>(Val: Load); |
| 21489 | if (!LI) |
| 21490 | return false; |
| 21491 | assert(!Mask && GapMask.popcount() == Factor && "Unexpected mask on a load" ); |
| 21492 | |
| 21493 | auto *VecTy = cast<FixedVectorType>(Val: Shuffles[0]->getType()); |
| 21494 | Type *EltTy = VecTy->getElementType(); |
| 21495 | |
| 21496 | const DataLayout &DL = LI->getDataLayout(); |
| 21497 | Align Alignment = LI->getAlign(); |
| 21498 | |
| 21499 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 21500 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 21501 | // the vector types are divisible by 128. |
| 21502 | if (!isLegalInterleavedAccessType(Factor, VecTy, Alignment, DL)) |
| 21503 | return false; |
| 21504 | |
| 21505 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); |
| 21506 | |
| 21507 | // A pointer vector can not be the return type of the ldN intrinsics. Need to |
| 21508 | // load integer vectors first and then convert to pointer vectors. |
| 21509 | if (EltTy->isPointerTy()) |
| 21510 | VecTy = FixedVectorType::get(ElementType: DL.getIntPtrType(EltTy), FVTy: VecTy); |
| 21511 | |
| 21512 | IRBuilder<> Builder(LI); |
| 21513 | |
| 21514 | // The base address of the load. |
| 21515 | Value *BaseAddr = LI->getPointerOperand(); |
| 21516 | |
| 21517 | if (NumLoads > 1) { |
| 21518 | // If we're going to generate more than one load, reset the sub-vector type |
| 21519 | // to something legal. |
| 21520 | VecTy = FixedVectorType::get(ElementType: VecTy->getElementType(), |
| 21521 | NumElts: VecTy->getNumElements() / NumLoads); |
| 21522 | } |
| 21523 | |
| 21524 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!" ); |
| 21525 | |
| 21526 | auto createLoadIntrinsic = [&](Value *BaseAddr) { |
| 21527 | if (Subtarget->hasNEON()) { |
| 21528 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
| 21529 | Type *Tys[] = {VecTy, PtrTy}; |
| 21530 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, |
| 21531 | Intrinsic::arm_neon_vld3, |
| 21532 | Intrinsic::arm_neon_vld4}; |
| 21533 | |
| 21534 | SmallVector<Value *, 2> Ops; |
| 21535 | Ops.push_back(Elt: BaseAddr); |
| 21536 | Ops.push_back(Elt: Builder.getInt32(C: LI->getAlign().value())); |
| 21537 | |
| 21538 | return Builder.CreateIntrinsic(ID: LoadInts[Factor - 2], Types: Tys, Args: Ops, |
| 21539 | /*FMFSource=*/nullptr, Name: "vldN" ); |
| 21540 | } else { |
| 21541 | assert((Factor == 2 || Factor == 4) && |
| 21542 | "expected interleave factor of 2 or 4 for MVE" ); |
| 21543 | Intrinsic::ID LoadInts = |
| 21544 | Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; |
| 21545 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
| 21546 | Type *Tys[] = {VecTy, PtrTy}; |
| 21547 | |
| 21548 | SmallVector<Value *, 2> Ops; |
| 21549 | Ops.push_back(Elt: BaseAddr); |
| 21550 | return Builder.CreateIntrinsic(ID: LoadInts, Types: Tys, Args: Ops, /*FMFSource=*/nullptr, |
| 21551 | Name: "vldN" ); |
| 21552 | } |
| 21553 | }; |
| 21554 | |
| 21555 | // Holds sub-vectors extracted from the load intrinsic return values. The |
| 21556 | // sub-vectors are associated with the shufflevector instructions they will |
| 21557 | // replace. |
| 21558 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; |
| 21559 | |
| 21560 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { |
| 21561 | // If we're generating more than one load, compute the base address of |
| 21562 | // subsequent loads as an offset from the previous. |
| 21563 | if (LoadCount > 0) |
| 21564 | BaseAddr = Builder.CreateConstGEP1_32(Ty: VecTy->getElementType(), Ptr: BaseAddr, |
| 21565 | Idx0: VecTy->getNumElements() * Factor); |
| 21566 | |
| 21567 | CallInst *VldN = createLoadIntrinsic(BaseAddr); |
| 21568 | |
| 21569 | // Replace uses of each shufflevector with the corresponding vector loaded |
| 21570 | // by ldN. |
| 21571 | for (unsigned i = 0; i < Shuffles.size(); i++) { |
| 21572 | ShuffleVectorInst *SV = Shuffles[i]; |
| 21573 | unsigned Index = Indices[i]; |
| 21574 | |
| 21575 | Value *SubVec = Builder.CreateExtractValue(Agg: VldN, Idxs: Index); |
| 21576 | |
| 21577 | // Convert the integer vector to pointer vector if the element is pointer. |
| 21578 | if (EltTy->isPointerTy()) |
| 21579 | SubVec = Builder.CreateIntToPtr( |
| 21580 | V: SubVec, |
| 21581 | DestTy: FixedVectorType::get(ElementType: SV->getType()->getElementType(), FVTy: VecTy)); |
| 21582 | |
| 21583 | SubVecs[SV].push_back(Elt: SubVec); |
| 21584 | } |
| 21585 | } |
| 21586 | |
| 21587 | // Replace uses of the shufflevector instructions with the sub-vectors |
| 21588 | // returned by the load intrinsic. If a shufflevector instruction is |
| 21589 | // associated with more than one sub-vector, those sub-vectors will be |
| 21590 | // concatenated into a single wide vector. |
| 21591 | for (ShuffleVectorInst *SVI : Shuffles) { |
| 21592 | auto &SubVec = SubVecs[SVI]; |
| 21593 | auto *WideVec = |
| 21594 | SubVec.size() > 1 ? concatenateVectors(Builder, Vecs: SubVec) : SubVec[0]; |
| 21595 | SVI->replaceAllUsesWith(V: WideVec); |
| 21596 | } |
| 21597 | |
| 21598 | return true; |
| 21599 | } |
| 21600 | |
| 21601 | /// Lower an interleaved store into a vstN intrinsic. |
| 21602 | /// |
| 21603 | /// E.g. Lower an interleaved store (Factor = 3): |
| 21604 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, |
| 21605 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> |
| 21606 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 |
| 21607 | /// |
| 21608 | /// Into: |
| 21609 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> |
| 21610 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> |
| 21611 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> |
| 21612 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 21613 | /// |
| 21614 | /// Note that the new shufflevectors will be removed and we'll only generate one |
| 21615 | /// vst3 instruction in CodeGen. |
| 21616 | /// |
| 21617 | /// Example for a more general valid mask (Factor 3). Lower: |
| 21618 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, |
| 21619 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> |
| 21620 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr |
| 21621 | /// |
| 21622 | /// Into: |
| 21623 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> |
| 21624 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> |
| 21625 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> |
| 21626 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 21627 | bool ARMTargetLowering::lowerInterleavedStore(Instruction *Store, |
| 21628 | Value *LaneMask, |
| 21629 | ShuffleVectorInst *SVI, |
| 21630 | unsigned Factor, |
| 21631 | const APInt &GapMask) const { |
| 21632 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 21633 | "Invalid interleave factor" ); |
| 21634 | auto *SI = dyn_cast<StoreInst>(Val: Store); |
| 21635 | if (!SI) |
| 21636 | return false; |
| 21637 | assert(!LaneMask && GapMask.popcount() == Factor && |
| 21638 | "Unexpected mask on store" ); |
| 21639 | |
| 21640 | auto *VecTy = cast<FixedVectorType>(Val: SVI->getType()); |
| 21641 | assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store" ); |
| 21642 | |
| 21643 | unsigned LaneLen = VecTy->getNumElements() / Factor; |
| 21644 | Type *EltTy = VecTy->getElementType(); |
| 21645 | auto *SubVecTy = FixedVectorType::get(ElementType: EltTy, NumElts: LaneLen); |
| 21646 | |
| 21647 | const DataLayout &DL = SI->getDataLayout(); |
| 21648 | Align Alignment = SI->getAlign(); |
| 21649 | |
| 21650 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 21651 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 21652 | // the vector types are divisible by 128. |
| 21653 | if (!isLegalInterleavedAccessType(Factor, VecTy: SubVecTy, Alignment, DL)) |
| 21654 | return false; |
| 21655 | |
| 21656 | unsigned NumStores = getNumInterleavedAccesses(VecTy: SubVecTy, DL); |
| 21657 | |
| 21658 | Value *Op0 = SVI->getOperand(i_nocapture: 0); |
| 21659 | Value *Op1 = SVI->getOperand(i_nocapture: 1); |
| 21660 | IRBuilder<> Builder(SI); |
| 21661 | |
| 21662 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer |
| 21663 | // vectors to integer vectors. |
| 21664 | if (EltTy->isPointerTy()) { |
| 21665 | Type *IntTy = DL.getIntPtrType(EltTy); |
| 21666 | |
| 21667 | // Convert to the corresponding integer vector. |
| 21668 | auto *IntVecTy = |
| 21669 | FixedVectorType::get(ElementType: IntTy, FVTy: cast<FixedVectorType>(Val: Op0->getType())); |
| 21670 | Op0 = Builder.CreatePtrToInt(V: Op0, DestTy: IntVecTy); |
| 21671 | Op1 = Builder.CreatePtrToInt(V: Op1, DestTy: IntVecTy); |
| 21672 | |
| 21673 | SubVecTy = FixedVectorType::get(ElementType: IntTy, NumElts: LaneLen); |
| 21674 | } |
| 21675 | |
| 21676 | // The base address of the store. |
| 21677 | Value *BaseAddr = SI->getPointerOperand(); |
| 21678 | |
| 21679 | if (NumStores > 1) { |
| 21680 | // If we're going to generate more than one store, reset the lane length |
| 21681 | // and sub-vector type to something legal. |
| 21682 | LaneLen /= NumStores; |
| 21683 | SubVecTy = FixedVectorType::get(ElementType: SubVecTy->getElementType(), NumElts: LaneLen); |
| 21684 | } |
| 21685 | |
| 21686 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!" ); |
| 21687 | |
| 21688 | auto Mask = SVI->getShuffleMask(); |
| 21689 | |
| 21690 | auto createStoreIntrinsic = [&](Value *BaseAddr, |
| 21691 | SmallVectorImpl<Value *> &Shuffles) { |
| 21692 | if (Subtarget->hasNEON()) { |
| 21693 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, |
| 21694 | Intrinsic::arm_neon_vst3, |
| 21695 | Intrinsic::arm_neon_vst4}; |
| 21696 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
| 21697 | Type *Tys[] = {PtrTy, SubVecTy}; |
| 21698 | |
| 21699 | SmallVector<Value *, 6> Ops; |
| 21700 | Ops.push_back(Elt: BaseAddr); |
| 21701 | append_range(C&: Ops, R&: Shuffles); |
| 21702 | Ops.push_back(Elt: Builder.getInt32(C: SI->getAlign().value())); |
| 21703 | Builder.CreateIntrinsic(ID: StoreInts[Factor - 2], Types: Tys, Args: Ops); |
| 21704 | } else { |
| 21705 | assert((Factor == 2 || Factor == 4) && |
| 21706 | "expected interleave factor of 2 or 4 for MVE" ); |
| 21707 | Intrinsic::ID StoreInts = |
| 21708 | Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; |
| 21709 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
| 21710 | Type *Tys[] = {PtrTy, SubVecTy}; |
| 21711 | |
| 21712 | SmallVector<Value *, 6> Ops; |
| 21713 | Ops.push_back(Elt: BaseAddr); |
| 21714 | append_range(C&: Ops, R&: Shuffles); |
| 21715 | for (unsigned F = 0; F < Factor; F++) { |
| 21716 | Ops.push_back(Elt: Builder.getInt32(C: F)); |
| 21717 | Builder.CreateIntrinsic(ID: StoreInts, Types: Tys, Args: Ops); |
| 21718 | Ops.pop_back(); |
| 21719 | } |
| 21720 | } |
| 21721 | }; |
| 21722 | |
| 21723 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { |
| 21724 | // If we generating more than one store, we compute the base address of |
| 21725 | // subsequent stores as an offset from the previous. |
| 21726 | if (StoreCount > 0) |
| 21727 | BaseAddr = Builder.CreateConstGEP1_32(Ty: SubVecTy->getElementType(), |
| 21728 | Ptr: BaseAddr, Idx0: LaneLen * Factor); |
| 21729 | |
| 21730 | SmallVector<Value *, 4> Shuffles; |
| 21731 | |
| 21732 | // Split the shufflevector operands into sub vectors for the new vstN call. |
| 21733 | for (unsigned i = 0; i < Factor; i++) { |
| 21734 | unsigned IdxI = StoreCount * LaneLen * Factor + i; |
| 21735 | if (Mask[IdxI] >= 0) { |
| 21736 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
| 21737 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: Mask[IdxI], NumInts: LaneLen, NumUndefs: 0))); |
| 21738 | } else { |
| 21739 | unsigned StartMask = 0; |
| 21740 | for (unsigned j = 1; j < LaneLen; j++) { |
| 21741 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; |
| 21742 | if (Mask[IdxJ * Factor + IdxI] >= 0) { |
| 21743 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; |
| 21744 | break; |
| 21745 | } |
| 21746 | } |
| 21747 | // Note: If all elements in a chunk are undefs, StartMask=0! |
| 21748 | // Note: Filling undef gaps with random elements is ok, since |
| 21749 | // those elements were being written anyway (with undefs). |
| 21750 | // In the case of all undefs we're defaulting to using elems from 0 |
| 21751 | // Note: StartMask cannot be negative, it's checked in |
| 21752 | // isReInterleaveMask |
| 21753 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
| 21754 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: StartMask, NumInts: LaneLen, NumUndefs: 0))); |
| 21755 | } |
| 21756 | } |
| 21757 | |
| 21758 | createStoreIntrinsic(BaseAddr, Shuffles); |
| 21759 | } |
| 21760 | return true; |
| 21761 | } |
| 21762 | |
| 21763 | enum HABaseType { |
| 21764 | HA_UNKNOWN = 0, |
| 21765 | HA_FLOAT, |
| 21766 | HA_DOUBLE, |
| 21767 | HA_VECT64, |
| 21768 | HA_VECT128 |
| 21769 | }; |
| 21770 | |
| 21771 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, |
| 21772 | uint64_t &Members) { |
| 21773 | if (auto *ST = dyn_cast<StructType>(Val: Ty)) { |
| 21774 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { |
| 21775 | uint64_t SubMembers = 0; |
| 21776 | if (!isHomogeneousAggregate(Ty: ST->getElementType(N: i), Base, Members&: SubMembers)) |
| 21777 | return false; |
| 21778 | Members += SubMembers; |
| 21779 | } |
| 21780 | } else if (auto *AT = dyn_cast<ArrayType>(Val: Ty)) { |
| 21781 | uint64_t SubMembers = 0; |
| 21782 | if (!isHomogeneousAggregate(Ty: AT->getElementType(), Base, Members&: SubMembers)) |
| 21783 | return false; |
| 21784 | Members += SubMembers * AT->getNumElements(); |
| 21785 | } else if (Ty->isFloatTy()) { |
| 21786 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) |
| 21787 | return false; |
| 21788 | Members = 1; |
| 21789 | Base = HA_FLOAT; |
| 21790 | } else if (Ty->isDoubleTy()) { |
| 21791 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) |
| 21792 | return false; |
| 21793 | Members = 1; |
| 21794 | Base = HA_DOUBLE; |
| 21795 | } else if (auto *VT = dyn_cast<VectorType>(Val: Ty)) { |
| 21796 | Members = 1; |
| 21797 | switch (Base) { |
| 21798 | case HA_FLOAT: |
| 21799 | case HA_DOUBLE: |
| 21800 | return false; |
| 21801 | case HA_VECT64: |
| 21802 | return VT->getPrimitiveSizeInBits().getFixedValue() == 64; |
| 21803 | case HA_VECT128: |
| 21804 | return VT->getPrimitiveSizeInBits().getFixedValue() == 128; |
| 21805 | case HA_UNKNOWN: |
| 21806 | switch (VT->getPrimitiveSizeInBits().getFixedValue()) { |
| 21807 | case 64: |
| 21808 | Base = HA_VECT64; |
| 21809 | return true; |
| 21810 | case 128: |
| 21811 | Base = HA_VECT128; |
| 21812 | return true; |
| 21813 | default: |
| 21814 | return false; |
| 21815 | } |
| 21816 | } |
| 21817 | } |
| 21818 | |
| 21819 | return (Members > 0 && Members <= 4); |
| 21820 | } |
| 21821 | |
| 21822 | /// Return the correct alignment for the current calling convention. |
| 21823 | Align ARMTargetLowering::getABIAlignmentForCallingConv( |
| 21824 | Type *ArgTy, const DataLayout &DL) const { |
| 21825 | const Align ABITypeAlign = DL.getABITypeAlign(Ty: ArgTy); |
| 21826 | if (!ArgTy->isVectorTy()) |
| 21827 | return ABITypeAlign; |
| 21828 | |
| 21829 | // Avoid over-aligning vector parameters. It would require realigning the |
| 21830 | // stack and waste space for no real benefit. |
| 21831 | MaybeAlign StackAlign = DL.getStackAlignment(); |
| 21832 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 21833 | return std::min(a: ABITypeAlign, b: *StackAlign); |
| 21834 | } |
| 21835 | |
| 21836 | /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of |
| 21837 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when |
| 21838 | /// passing according to AAPCS rules. |
| 21839 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( |
| 21840 | Type *Ty, CallingConv::ID CallConv, bool isVarArg, |
| 21841 | const DataLayout &DL) const { |
| 21842 | if (getEffectiveCallingConv(CC: CallConv, isVarArg) != |
| 21843 | CallingConv::ARM_AAPCS_VFP) |
| 21844 | return false; |
| 21845 | |
| 21846 | HABaseType Base = HA_UNKNOWN; |
| 21847 | uint64_t Members = 0; |
| 21848 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); |
| 21849 | LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " " ; Ty->dump()); |
| 21850 | |
| 21851 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); |
| 21852 | return IsHA || IsIntArray; |
| 21853 | } |
| 21854 | |
| 21855 | Register ARMTargetLowering::getExceptionPointerRegister( |
| 21856 | const Constant *PersonalityFn) const { |
| 21857 | // Platforms which do not use SjLj EH may return values in these registers |
| 21858 | // via the personality function. |
| 21859 | ExceptionHandling EM = getTargetMachine().getExceptionModel(); |
| 21860 | return EM == ExceptionHandling::SjLj ? Register() : ARM::R0; |
| 21861 | } |
| 21862 | |
| 21863 | Register ARMTargetLowering::getExceptionSelectorRegister( |
| 21864 | const Constant *PersonalityFn) const { |
| 21865 | // Platforms which do not use SjLj EH may return values in these registers |
| 21866 | // via the personality function. |
| 21867 | ExceptionHandling EM = getTargetMachine().getExceptionModel(); |
| 21868 | return EM == ExceptionHandling::SjLj ? Register() : ARM::R1; |
| 21869 | } |
| 21870 | |
| 21871 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
| 21872 | // Update IsSplitCSR in ARMFunctionInfo. |
| 21873 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); |
| 21874 | AFI->setIsSplitCSR(true); |
| 21875 | } |
| 21876 | |
| 21877 | void ARMTargetLowering::insertCopiesSplitCSR( |
| 21878 | MachineBasicBlock *Entry, |
| 21879 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
| 21880 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 21881 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(MF: Entry->getParent()); |
| 21882 | if (!IStart) |
| 21883 | return; |
| 21884 | |
| 21885 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 21886 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
| 21887 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
| 21888 | for (const MCPhysReg *I = IStart; *I; ++I) { |
| 21889 | const TargetRegisterClass *RC = nullptr; |
| 21890 | if (ARM::GPRRegClass.contains(Reg: *I)) |
| 21891 | RC = &ARM::GPRRegClass; |
| 21892 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
| 21893 | RC = &ARM::DPRRegClass; |
| 21894 | else |
| 21895 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 21896 | |
| 21897 | Register NewVR = MRI->createVirtualRegister(RegClass: RC); |
| 21898 | // Create copy from CSR to a virtual register. |
| 21899 | // FIXME: this currently does not emit CFI pseudo-instructions, it works |
| 21900 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be |
| 21901 | // nounwind. If we want to generalize this later, we may need to emit |
| 21902 | // CFI pseudo-instructions. |
| 21903 | assert(Entry->getParent()->getFunction().hasFnAttribute( |
| 21904 | Attribute::NoUnwind) && |
| 21905 | "Function should be nounwind in insertCopiesSplitCSR!" ); |
| 21906 | Entry->addLiveIn(PhysReg: *I); |
| 21907 | BuildMI(BB&: *Entry, I: MBBI, MIMD: DebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVR) |
| 21908 | .addReg(RegNo: *I); |
| 21909 | |
| 21910 | // Insert the copy-back instructions right before the terminator. |
| 21911 | for (auto *Exit : Exits) |
| 21912 | BuildMI(BB&: *Exit, I: Exit->getFirstTerminator(), MIMD: DebugLoc(), |
| 21913 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: *I) |
| 21914 | .addReg(RegNo: NewVR); |
| 21915 | } |
| 21916 | } |
| 21917 | |
| 21918 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { |
| 21919 | MF.getFrameInfo().computeMaxCallFrameSize(MF); |
| 21920 | TargetLoweringBase::finalizeLowering(MF); |
| 21921 | } |
| 21922 | |
| 21923 | bool ARMTargetLowering::isComplexDeinterleavingSupported() const { |
| 21924 | return Subtarget->hasMVEIntegerOps(); |
| 21925 | } |
| 21926 | |
| 21927 | bool ARMTargetLowering::isComplexDeinterleavingOperationSupported( |
| 21928 | ComplexDeinterleavingOperation Operation, Type *Ty) const { |
| 21929 | auto *VTy = dyn_cast<FixedVectorType>(Val: Ty); |
| 21930 | if (!VTy) |
| 21931 | return false; |
| 21932 | |
| 21933 | auto *ScalarTy = VTy->getScalarType(); |
| 21934 | unsigned NumElements = VTy->getNumElements(); |
| 21935 | |
| 21936 | unsigned VTyWidth = VTy->getScalarSizeInBits() * NumElements; |
| 21937 | if (VTyWidth < 128 || !llvm::isPowerOf2_32(Value: VTyWidth)) |
| 21938 | return false; |
| 21939 | |
| 21940 | // Both VCADD and VCMUL/VCMLA support the same types, F16 and F32 |
| 21941 | if (ScalarTy->isHalfTy() || ScalarTy->isFloatTy()) |
| 21942 | return Subtarget->hasMVEFloatOps(); |
| 21943 | |
| 21944 | if (Operation != ComplexDeinterleavingOperation::CAdd) |
| 21945 | return false; |
| 21946 | |
| 21947 | return Subtarget->hasMVEIntegerOps() && |
| 21948 | (ScalarTy->isIntegerTy(Bitwidth: 8) || ScalarTy->isIntegerTy(Bitwidth: 16) || |
| 21949 | ScalarTy->isIntegerTy(Bitwidth: 32)); |
| 21950 | } |
| 21951 | |
| 21952 | ArrayRef<MCPhysReg> ARMTargetLowering::getRoundingControlRegisters() const { |
| 21953 | static const MCPhysReg RCRegs[] = {ARM::FPSCR_RM}; |
| 21954 | return RCRegs; |
| 21955 | } |
| 21956 | |
| 21957 | Value *ARMTargetLowering::createComplexDeinterleavingIR( |
| 21958 | IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, |
| 21959 | ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, |
| 21960 | Value *Accumulator) const { |
| 21961 | |
| 21962 | FixedVectorType *Ty = cast<FixedVectorType>(Val: InputA->getType()); |
| 21963 | |
| 21964 | unsigned TyWidth = Ty->getScalarSizeInBits() * Ty->getNumElements(); |
| 21965 | |
| 21966 | assert(TyWidth >= 128 && "Width of vector type must be at least 128 bits" ); |
| 21967 | |
| 21968 | if (TyWidth > 128) { |
| 21969 | int Stride = Ty->getNumElements() / 2; |
| 21970 | auto SplitSeq = llvm::seq<int>(Begin: 0, End: Ty->getNumElements()); |
| 21971 | auto SplitSeqVec = llvm::to_vector(Range&: SplitSeq); |
| 21972 | ArrayRef<int> LowerSplitMask(&SplitSeqVec[0], Stride); |
| 21973 | ArrayRef<int> UpperSplitMask(&SplitSeqVec[Stride], Stride); |
| 21974 | |
| 21975 | auto *LowerSplitA = B.CreateShuffleVector(V: InputA, Mask: LowerSplitMask); |
| 21976 | auto *LowerSplitB = B.CreateShuffleVector(V: InputB, Mask: LowerSplitMask); |
| 21977 | auto *UpperSplitA = B.CreateShuffleVector(V: InputA, Mask: UpperSplitMask); |
| 21978 | auto *UpperSplitB = B.CreateShuffleVector(V: InputB, Mask: UpperSplitMask); |
| 21979 | Value *LowerSplitAcc = nullptr; |
| 21980 | Value *UpperSplitAcc = nullptr; |
| 21981 | |
| 21982 | if (Accumulator) { |
| 21983 | LowerSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: LowerSplitMask); |
| 21984 | UpperSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: UpperSplitMask); |
| 21985 | } |
| 21986 | |
| 21987 | auto *LowerSplitInt = createComplexDeinterleavingIR( |
| 21988 | B, OperationType, Rotation, InputA: LowerSplitA, InputB: LowerSplitB, Accumulator: LowerSplitAcc); |
| 21989 | auto *UpperSplitInt = createComplexDeinterleavingIR( |
| 21990 | B, OperationType, Rotation, InputA: UpperSplitA, InputB: UpperSplitB, Accumulator: UpperSplitAcc); |
| 21991 | |
| 21992 | ArrayRef<int> JoinMask(&SplitSeqVec[0], Ty->getNumElements()); |
| 21993 | return B.CreateShuffleVector(V1: LowerSplitInt, V2: UpperSplitInt, Mask: JoinMask); |
| 21994 | } |
| 21995 | |
| 21996 | auto *IntTy = Type::getInt32Ty(C&: B.getContext()); |
| 21997 | |
| 21998 | ConstantInt *ConstRotation = nullptr; |
| 21999 | if (OperationType == ComplexDeinterleavingOperation::CMulPartial) { |
| 22000 | ConstRotation = ConstantInt::get(Ty: IntTy, V: (int)Rotation); |
| 22001 | |
| 22002 | if (Accumulator) |
| 22003 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmlaq, Types: Ty, |
| 22004 | Args: {ConstRotation, Accumulator, InputB, InputA}); |
| 22005 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmulq, Types: Ty, |
| 22006 | Args: {ConstRotation, InputB, InputA}); |
| 22007 | } |
| 22008 | |
| 22009 | if (OperationType == ComplexDeinterleavingOperation::CAdd) { |
| 22010 | // 1 means the value is not halved. |
| 22011 | auto *ConstHalving = ConstantInt::get(Ty: IntTy, V: 1); |
| 22012 | |
| 22013 | if (Rotation == ComplexDeinterleavingRotation::Rotation_90) |
| 22014 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 0); |
| 22015 | else if (Rotation == ComplexDeinterleavingRotation::Rotation_270) |
| 22016 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 1); |
| 22017 | |
| 22018 | if (!ConstRotation) |
| 22019 | return nullptr; // Invalid rotation for arm_mve_vcaddq |
| 22020 | |
| 22021 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcaddq, Types: Ty, |
| 22022 | Args: {ConstHalving, ConstRotation, InputA, InputB}); |
| 22023 | } |
| 22024 | |
| 22025 | return nullptr; |
| 22026 | } |
| 22027 | |