| 1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the interfaces that ARM uses to lower LLVM code into a |
| 10 | // selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "ARMISelLowering.h" |
| 15 | #include "ARMBaseInstrInfo.h" |
| 16 | #include "ARMBaseRegisterInfo.h" |
| 17 | #include "ARMCallingConv.h" |
| 18 | #include "ARMConstantPoolValue.h" |
| 19 | #include "ARMMachineFunctionInfo.h" |
| 20 | #include "ARMPerfectShuffle.h" |
| 21 | #include "ARMRegisterInfo.h" |
| 22 | #include "ARMSelectionDAGInfo.h" |
| 23 | #include "ARMSubtarget.h" |
| 24 | #include "ARMTargetTransformInfo.h" |
| 25 | #include "MCTargetDesc/ARMAddressingModes.h" |
| 26 | #include "MCTargetDesc/ARMBaseInfo.h" |
| 27 | #include "Utils/ARMBaseInfo.h" |
| 28 | #include "llvm/ADT/APFloat.h" |
| 29 | #include "llvm/ADT/APInt.h" |
| 30 | #include "llvm/ADT/ArrayRef.h" |
| 31 | #include "llvm/ADT/BitVector.h" |
| 32 | #include "llvm/ADT/DenseMap.h" |
| 33 | #include "llvm/ADT/STLExtras.h" |
| 34 | #include "llvm/ADT/SmallPtrSet.h" |
| 35 | #include "llvm/ADT/SmallVector.h" |
| 36 | #include "llvm/ADT/Statistic.h" |
| 37 | #include "llvm/ADT/StringExtras.h" |
| 38 | #include "llvm/ADT/StringRef.h" |
| 39 | #include "llvm/ADT/StringSwitch.h" |
| 40 | #include "llvm/ADT/Twine.h" |
| 41 | #include "llvm/Analysis/VectorUtils.h" |
| 42 | #include "llvm/CodeGen/CallingConvLower.h" |
| 43 | #include "llvm/CodeGen/ComplexDeinterleavingPass.h" |
| 44 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 45 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 46 | #include "llvm/CodeGen/MachineConstantPool.h" |
| 47 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 48 | #include "llvm/CodeGen/MachineFunction.h" |
| 49 | #include "llvm/CodeGen/MachineInstr.h" |
| 50 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 51 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
| 52 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 53 | #include "llvm/CodeGen/MachineOperand.h" |
| 54 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 55 | #include "llvm/CodeGen/RuntimeLibcallUtil.h" |
| 56 | #include "llvm/CodeGen/SelectionDAG.h" |
| 57 | #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" |
| 58 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 59 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 60 | #include "llvm/CodeGen/TargetLowering.h" |
| 61 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 62 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 63 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 64 | #include "llvm/CodeGen/ValueTypes.h" |
| 65 | #include "llvm/CodeGenTypes/MachineValueType.h" |
| 66 | #include "llvm/IR/Attributes.h" |
| 67 | #include "llvm/IR/CallingConv.h" |
| 68 | #include "llvm/IR/Constant.h" |
| 69 | #include "llvm/IR/Constants.h" |
| 70 | #include "llvm/IR/DataLayout.h" |
| 71 | #include "llvm/IR/DebugLoc.h" |
| 72 | #include "llvm/IR/DerivedTypes.h" |
| 73 | #include "llvm/IR/Function.h" |
| 74 | #include "llvm/IR/GlobalAlias.h" |
| 75 | #include "llvm/IR/GlobalValue.h" |
| 76 | #include "llvm/IR/GlobalVariable.h" |
| 77 | #include "llvm/IR/IRBuilder.h" |
| 78 | #include "llvm/IR/InlineAsm.h" |
| 79 | #include "llvm/IR/Instruction.h" |
| 80 | #include "llvm/IR/Instructions.h" |
| 81 | #include "llvm/IR/IntrinsicInst.h" |
| 82 | #include "llvm/IR/Intrinsics.h" |
| 83 | #include "llvm/IR/IntrinsicsARM.h" |
| 84 | #include "llvm/IR/Module.h" |
| 85 | #include "llvm/IR/Type.h" |
| 86 | #include "llvm/IR/User.h" |
| 87 | #include "llvm/IR/Value.h" |
| 88 | #include "llvm/MC/MCInstrDesc.h" |
| 89 | #include "llvm/MC/MCInstrItineraries.h" |
| 90 | #include "llvm/MC/MCSchedule.h" |
| 91 | #include "llvm/Support/AtomicOrdering.h" |
| 92 | #include "llvm/Support/BranchProbability.h" |
| 93 | #include "llvm/Support/Casting.h" |
| 94 | #include "llvm/Support/CodeGen.h" |
| 95 | #include "llvm/Support/CommandLine.h" |
| 96 | #include "llvm/Support/Compiler.h" |
| 97 | #include "llvm/Support/Debug.h" |
| 98 | #include "llvm/Support/ErrorHandling.h" |
| 99 | #include "llvm/Support/KnownBits.h" |
| 100 | #include "llvm/Support/MathExtras.h" |
| 101 | #include "llvm/Support/raw_ostream.h" |
| 102 | #include "llvm/Target/TargetMachine.h" |
| 103 | #include "llvm/Target/TargetOptions.h" |
| 104 | #include "llvm/TargetParser/Triple.h" |
| 105 | #include <algorithm> |
| 106 | #include <cassert> |
| 107 | #include <cstdint> |
| 108 | #include <cstdlib> |
| 109 | #include <iterator> |
| 110 | #include <limits> |
| 111 | #include <optional> |
| 112 | #include <tuple> |
| 113 | #include <utility> |
| 114 | #include <vector> |
| 115 | |
| 116 | using namespace llvm; |
| 117 | |
| 118 | #define DEBUG_TYPE "arm-isel" |
| 119 | |
| 120 | STATISTIC(NumTailCalls, "Number of tail calls" ); |
| 121 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt" ); |
| 122 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments" ); |
| 123 | STATISTIC(NumConstpoolPromoted, |
| 124 | "Number of constants with their storage promoted into constant pools" ); |
| 125 | |
| 126 | static cl::opt<bool> |
| 127 | ARMInterworking("arm-interworking" , cl::Hidden, |
| 128 | cl::desc("Enable / disable ARM interworking (for debugging only)" ), |
| 129 | cl::init(Val: true)); |
| 130 | |
| 131 | static cl::opt<bool> EnableConstpoolPromotion( |
| 132 | "arm-promote-constant" , cl::Hidden, |
| 133 | cl::desc("Enable / disable promotion of unnamed_addr constants into " |
| 134 | "constant pools" ), |
| 135 | cl::init(Val: false)); // FIXME: set to true by default once PR32780 is fixed |
| 136 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( |
| 137 | "arm-promote-constant-max-size" , cl::Hidden, |
| 138 | cl::desc("Maximum size of constant to promote into a constant pool" ), |
| 139 | cl::init(Val: 64)); |
| 140 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( |
| 141 | "arm-promote-constant-max-total" , cl::Hidden, |
| 142 | cl::desc("Maximum size of ALL constants to promote into a constant pool" ), |
| 143 | cl::init(Val: 128)); |
| 144 | |
| 145 | cl::opt<unsigned> |
| 146 | MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor" , cl::Hidden, |
| 147 | cl::desc("Maximum interleave factor for MVE VLDn to generate." ), |
| 148 | cl::init(Val: 2)); |
| 149 | |
| 150 | cl::opt<unsigned> ArmMaxBaseUpdatesToCheck( |
| 151 | "arm-max-base-updates-to-check" , cl::Hidden, |
| 152 | cl::desc("Maximum number of base-updates to check generating postindex." ), |
| 153 | cl::init(Val: 64)); |
| 154 | |
| 155 | /// Value type used for "flags" operands / results (either CPSR or FPSCR_NZCV). |
| 156 | constexpr MVT FlagsVT = MVT::i32; |
| 157 | |
| 158 | // The APCS parameter registers. |
| 159 | static const MCPhysReg GPRArgRegs[] = { |
| 160 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 |
| 161 | }; |
| 162 | |
| 163 | static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg, |
| 164 | SelectionDAG &DAG, const SDLoc &DL) { |
| 165 | assert(Arg.ArgVT.isScalarInteger()); |
| 166 | assert(Arg.ArgVT.bitsLT(MVT::i32)); |
| 167 | SDValue Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: Arg.ArgVT, Operand: Value); |
| 168 | SDValue Ext = |
| 169 | DAG.getNode(Opcode: Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
| 170 | VT: MVT::i32, Operand: Trunc); |
| 171 | return Ext; |
| 172 | } |
| 173 | |
| 174 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) { |
| 175 | if (VT != PromotedLdStVT) { |
| 176 | setOperationAction(Op: ISD::LOAD, VT, Action: Promote); |
| 177 | AddPromotedToType (Opc: ISD::LOAD, OrigVT: VT, DestVT: PromotedLdStVT); |
| 178 | |
| 179 | setOperationAction(Op: ISD::STORE, VT, Action: Promote); |
| 180 | AddPromotedToType (Opc: ISD::STORE, OrigVT: VT, DestVT: PromotedLdStVT); |
| 181 | } |
| 182 | |
| 183 | MVT ElemTy = VT.getVectorElementType(); |
| 184 | if (ElemTy != MVT::f64) |
| 185 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 186 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 187 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 188 | if (ElemTy == MVT::i32) { |
| 189 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Custom); |
| 190 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Custom); |
| 191 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Custom); |
| 192 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Custom); |
| 193 | } else { |
| 194 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 195 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 196 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 197 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 198 | } |
| 199 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 200 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 201 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Legal); |
| 202 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Legal); |
| 203 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 204 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 205 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
| 206 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT, Action: Expand); |
| 207 | if (VT.isInteger()) { |
| 208 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
| 209 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
| 210 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
| 211 | } |
| 212 | |
| 213 | // Neon does not support vector divide/remainder operations. |
| 214 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
| 215 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
| 216 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
| 217 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
| 218 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
| 219 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
| 220 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
| 221 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
| 222 | |
| 223 | if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) |
| 224 | for (auto Opcode : {ISD::ABS, ISD::ABDS, ISD::ABDU, ISD::SMIN, ISD::SMAX, |
| 225 | ISD::UMIN, ISD::UMAX}) |
| 226 | setOperationAction(Op: Opcode, VT, Action: Legal); |
| 227 | if (!VT.isFloatingPoint()) |
| 228 | for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) |
| 229 | setOperationAction(Op: Opcode, VT, Action: Legal); |
| 230 | } |
| 231 | |
| 232 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { |
| 233 | addRegisterClass(VT, RC: &ARM::DPRRegClass); |
| 234 | addTypeForNEON(VT, PromotedLdStVT: MVT::f64); |
| 235 | } |
| 236 | |
| 237 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { |
| 238 | addRegisterClass(VT, RC: &ARM::DPairRegClass); |
| 239 | addTypeForNEON(VT, PromotedLdStVT: MVT::v2f64); |
| 240 | } |
| 241 | |
| 242 | void ARMTargetLowering::setAllExpand(MVT VT) { |
| 243 | for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) |
| 244 | setOperationAction(Op: Opc, VT, Action: Expand); |
| 245 | |
| 246 | // We support these really simple operations even on types where all |
| 247 | // the actual arithmetic has to be broken down into simpler |
| 248 | // operations or turned into library calls. |
| 249 | setOperationAction(Op: ISD::BITCAST, VT, Action: Legal); |
| 250 | setOperationAction(Op: ISD::LOAD, VT, Action: Legal); |
| 251 | setOperationAction(Op: ISD::STORE, VT, Action: Legal); |
| 252 | setOperationAction(Op: ISD::UNDEF, VT, Action: Legal); |
| 253 | } |
| 254 | |
| 255 | void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, |
| 256 | LegalizeAction Action) { |
| 257 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: From, MemVT: To, Action); |
| 258 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: From, MemVT: To, Action); |
| 259 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: From, MemVT: To, Action); |
| 260 | } |
| 261 | |
| 262 | void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { |
| 263 | const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; |
| 264 | |
| 265 | for (auto VT : IntTypes) { |
| 266 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 267 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 268 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 269 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 270 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 271 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
| 272 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
| 273 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
| 274 | setOperationAction(Op: ISD::SMIN, VT, Action: Legal); |
| 275 | setOperationAction(Op: ISD::SMAX, VT, Action: Legal); |
| 276 | setOperationAction(Op: ISD::UMIN, VT, Action: Legal); |
| 277 | setOperationAction(Op: ISD::UMAX, VT, Action: Legal); |
| 278 | setOperationAction(Op: ISD::ABS, VT, Action: Legal); |
| 279 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 280 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
| 281 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
| 282 | setOperationAction(Op: ISD::CTLZ, VT, Action: Legal); |
| 283 | setOperationAction(Op: ISD::CTTZ, VT, Action: Custom); |
| 284 | setOperationAction(Op: ISD::BITREVERSE, VT, Action: Legal); |
| 285 | setOperationAction(Op: ISD::BSWAP, VT, Action: Legal); |
| 286 | setOperationAction(Op: ISD::SADDSAT, VT, Action: Legal); |
| 287 | setOperationAction(Op: ISD::UADDSAT, VT, Action: Legal); |
| 288 | setOperationAction(Op: ISD::SSUBSAT, VT, Action: Legal); |
| 289 | setOperationAction(Op: ISD::USUBSAT, VT, Action: Legal); |
| 290 | setOperationAction(Op: ISD::ABDS, VT, Action: Legal); |
| 291 | setOperationAction(Op: ISD::ABDU, VT, Action: Legal); |
| 292 | setOperationAction(Op: ISD::AVGFLOORS, VT, Action: Legal); |
| 293 | setOperationAction(Op: ISD::AVGFLOORU, VT, Action: Legal); |
| 294 | setOperationAction(Op: ISD::AVGCEILS, VT, Action: Legal); |
| 295 | setOperationAction(Op: ISD::AVGCEILU, VT, Action: Legal); |
| 296 | |
| 297 | // No native support for these. |
| 298 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
| 299 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
| 300 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
| 301 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
| 302 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
| 303 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
| 304 | setOperationAction(Op: ISD::CTPOP, VT, Action: Expand); |
| 305 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 306 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 307 | |
| 308 | // Vector reductions |
| 309 | setOperationAction(Op: ISD::VECREDUCE_ADD, VT, Action: Legal); |
| 310 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Legal); |
| 311 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Legal); |
| 312 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Legal); |
| 313 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Legal); |
| 314 | setOperationAction(Op: ISD::VECREDUCE_MUL, VT, Action: Custom); |
| 315 | setOperationAction(Op: ISD::VECREDUCE_AND, VT, Action: Custom); |
| 316 | setOperationAction(Op: ISD::VECREDUCE_OR, VT, Action: Custom); |
| 317 | setOperationAction(Op: ISD::VECREDUCE_XOR, VT, Action: Custom); |
| 318 | |
| 319 | if (!HasMVEFP) { |
| 320 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 321 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 322 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 323 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 324 | } else { |
| 325 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT, Action: Custom); |
| 326 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT, Action: Custom); |
| 327 | } |
| 328 | |
| 329 | // Pre and Post inc are supported on loads and stores |
| 330 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 331 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 332 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 333 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 334 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 335 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; |
| 340 | for (auto VT : FloatTypes) { |
| 341 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 342 | if (!HasMVEFP) |
| 343 | setAllExpand(VT); |
| 344 | |
| 345 | // These are legal or custom whether we have MVE.fp or not |
| 346 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 347 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 348 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT: VT.getVectorElementType(), Action: Custom); |
| 349 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 350 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 351 | setOperationAction(Op: ISD::BUILD_VECTOR, VT: VT.getVectorElementType(), Action: Custom); |
| 352 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Legal); |
| 353 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 354 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
| 355 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
| 356 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 357 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 358 | |
| 359 | // Pre and Post inc are supported on loads and stores |
| 360 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 361 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 362 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 363 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 364 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 365 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 366 | } |
| 367 | |
| 368 | if (HasMVEFP) { |
| 369 | setOperationAction(Op: ISD::FMINNUM, VT, Action: Legal); |
| 370 | setOperationAction(Op: ISD::FMAXNUM, VT, Action: Legal); |
| 371 | setOperationAction(Op: ISD::FROUND, VT, Action: Legal); |
| 372 | setOperationAction(Op: ISD::FROUNDEVEN, VT, Action: Legal); |
| 373 | setOperationAction(Op: ISD::FRINT, VT, Action: Legal); |
| 374 | setOperationAction(Op: ISD::FTRUNC, VT, Action: Legal); |
| 375 | setOperationAction(Op: ISD::FFLOOR, VT, Action: Legal); |
| 376 | setOperationAction(Op: ISD::FCEIL, VT, Action: Legal); |
| 377 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT, Action: Custom); |
| 378 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT, Action: Custom); |
| 379 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT, Action: Custom); |
| 380 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT, Action: Custom); |
| 381 | |
| 382 | // No native support for these. |
| 383 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
| 384 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
| 385 | setOperationAction(Op: ISD::FSQRT, VT, Action: Expand); |
| 386 | setOperationAction(Op: ISD::FSIN, VT, Action: Expand); |
| 387 | setOperationAction(Op: ISD::FCOS, VT, Action: Expand); |
| 388 | setOperationAction(Op: ISD::FTAN, VT, Action: Expand); |
| 389 | setOperationAction(Op: ISD::FPOW, VT, Action: Expand); |
| 390 | setOperationAction(Op: ISD::FLOG, VT, Action: Expand); |
| 391 | setOperationAction(Op: ISD::FLOG2, VT, Action: Expand); |
| 392 | setOperationAction(Op: ISD::FLOG10, VT, Action: Expand); |
| 393 | setOperationAction(Op: ISD::FEXP, VT, Action: Expand); |
| 394 | setOperationAction(Op: ISD::FEXP2, VT, Action: Expand); |
| 395 | setOperationAction(Op: ISD::FEXP10, VT, Action: Expand); |
| 396 | setOperationAction(Op: ISD::FNEARBYINT, VT, Action: Expand); |
| 397 | } |
| 398 | } |
| 399 | |
| 400 | // Custom Expand smaller than legal vector reductions to prevent false zero |
| 401 | // items being added. |
| 402 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v4f16, Action: Custom); |
| 403 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v4f16, Action: Custom); |
| 404 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v4f16, Action: Custom); |
| 405 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v4f16, Action: Custom); |
| 406 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v2f16, Action: Custom); |
| 407 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v2f16, Action: Custom); |
| 408 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v2f16, Action: Custom); |
| 409 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v2f16, Action: Custom); |
| 410 | |
| 411 | // We 'support' these types up to bitcast/load/store level, regardless of |
| 412 | // MVE integer-only / float support. Only doing FP data processing on the FP |
| 413 | // vector types is inhibited at integer-only level. |
| 414 | const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; |
| 415 | for (auto VT : LongTypes) { |
| 416 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
| 417 | setAllExpand(VT); |
| 418 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 419 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 420 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 421 | setOperationAction(Op: ISD::VSELECT, VT, Action: Legal); |
| 422 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 423 | } |
| 424 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT: MVT::v2f64, Action: Legal); |
| 425 | |
| 426 | // We can do bitwise operations on v2i64 vectors |
| 427 | setOperationAction(Op: ISD::AND, VT: MVT::v2i64, Action: Legal); |
| 428 | setOperationAction(Op: ISD::OR, VT: MVT::v2i64, Action: Legal); |
| 429 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i64, Action: Legal); |
| 430 | |
| 431 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 432 | addAllExtLoads(From: MVT::v8i16, To: MVT::v8i8, Action: Legal); |
| 433 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i16, Action: Legal); |
| 434 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i8, Action: Legal); |
| 435 | |
| 436 | // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. |
| 437 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i8, Action: Legal); |
| 438 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i16, Action: Legal); |
| 439 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i32, Action: Legal); |
| 440 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i8, Action: Legal); |
| 441 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i16, Action: Legal); |
| 442 | |
| 443 | // Some truncating stores are legal too. |
| 444 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i16, Action: Legal); |
| 445 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i8, Action: Legal); |
| 446 | setTruncStoreAction(ValVT: MVT::v8i16, MemVT: MVT::v8i8, Action: Legal); |
| 447 | |
| 448 | // Pre and Post inc on these are legal, given the correct extends |
| 449 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 450 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 451 | for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { |
| 452 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
| 453 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
| 454 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
| 455 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
| 456 | } |
| 457 | } |
| 458 | |
| 459 | // Predicate types |
| 460 | const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1}; |
| 461 | for (auto VT : pTypes) { |
| 462 | addRegisterClass(VT, RC: &ARM::VCCRRegClass); |
| 463 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
| 464 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
| 465 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Custom); |
| 466 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Custom); |
| 467 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
| 468 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
| 469 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
| 470 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Expand); |
| 471 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
| 472 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
| 473 | setOperationAction(Op: ISD::TRUNCATE, VT, Action: Custom); |
| 474 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
| 475 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
| 476 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 477 | |
| 478 | if (!HasMVEFP) { |
| 479 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
| 480 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
| 481 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
| 482 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
| 483 | } |
| 484 | } |
| 485 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i1, Action: Expand); |
| 486 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v2i1, Action: Expand); |
| 487 | setOperationAction(Op: ISD::AND, VT: MVT::v2i1, Action: Expand); |
| 488 | setOperationAction(Op: ISD::OR, VT: MVT::v2i1, Action: Expand); |
| 489 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i1, Action: Expand); |
| 490 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
| 491 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
| 492 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v2i1, Action: Expand); |
| 493 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v2i1, Action: Expand); |
| 494 | |
| 495 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v8i32, Action: Custom); |
| 496 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i16, Action: Custom); |
| 497 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i32, Action: Custom); |
| 498 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v8i32, Action: Custom); |
| 499 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i16, Action: Custom); |
| 500 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i32, Action: Custom); |
| 501 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v8i32, Action: Custom); |
| 502 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v16i16, Action: Custom); |
| 503 | } |
| 504 | |
| 505 | const ARMBaseTargetMachine &ARMTargetLowering::getTM() const { |
| 506 | return static_cast<const ARMBaseTargetMachine &>(getTargetMachine()); |
| 507 | } |
| 508 | |
| 509 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_, |
| 510 | const ARMSubtarget &STI) |
| 511 | : TargetLowering(TM_, STI), Subtarget(&STI), |
| 512 | RegInfo(Subtarget->getRegisterInfo()), |
| 513 | Itins(Subtarget->getInstrItineraryData()) { |
| 514 | const auto &TM = static_cast<const ARMBaseTargetMachine &>(TM_); |
| 515 | |
| 516 | setBooleanContents(ZeroOrOneBooleanContent); |
| 517 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
| 518 | |
| 519 | const Triple &TT = TM.getTargetTriple(); |
| 520 | |
| 521 | if (Subtarget->isThumb1Only()) |
| 522 | addRegisterClass(VT: MVT::i32, RC: &ARM::tGPRRegClass); |
| 523 | else |
| 524 | addRegisterClass(VT: MVT::i32, RC: &ARM::GPRRegClass); |
| 525 | |
| 526 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && |
| 527 | Subtarget->hasFPRegs()) { |
| 528 | addRegisterClass(VT: MVT::f32, RC: &ARM::SPRRegClass); |
| 529 | addRegisterClass(VT: MVT::f64, RC: &ARM::DPRRegClass); |
| 530 | |
| 531 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i32, Action: Custom); |
| 532 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i32, Action: Custom); |
| 533 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i64, Action: Custom); |
| 534 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i64, Action: Custom); |
| 535 | |
| 536 | if (!Subtarget->hasVFP2Base()) { |
| 537 | setAllExpand(MVT::f32); |
| 538 | } else { |
| 539 | for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, |
| 540 | ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) |
| 541 | setOperationAction(Op, VT: MVT::f32, Action: Legal); |
| 542 | } |
| 543 | if (!Subtarget->hasFP64()) { |
| 544 | setAllExpand(MVT::f64); |
| 545 | } else { |
| 546 | for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, |
| 547 | ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) |
| 548 | setOperationAction(Op, VT: MVT::f64, Action: Legal); |
| 549 | |
| 550 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f32, Action: Legal); |
| 551 | } |
| 552 | } |
| 553 | |
| 554 | if (Subtarget->hasFullFP16()) { |
| 555 | for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, |
| 556 | ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) |
| 557 | setOperationAction(Op, VT: MVT::f16, Action: Legal); |
| 558 | |
| 559 | addRegisterClass(VT: MVT::f16, RC: &ARM::HPRRegClass); |
| 560 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i16, Action: Custom); |
| 561 | setOperationAction(Op: ISD::BITCAST, VT: MVT::f16, Action: Custom); |
| 562 | |
| 563 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f16, Action: Legal); |
| 564 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f16, Action: Legal); |
| 565 | setOperationAction(Op: ISD::STRICT_FMINNUM, VT: MVT::f16, Action: Legal); |
| 566 | setOperationAction(Op: ISD::STRICT_FMAXNUM, VT: MVT::f16, Action: Legal); |
| 567 | } |
| 568 | |
| 569 | if (Subtarget->hasBF16()) { |
| 570 | addRegisterClass(VT: MVT::bf16, RC: &ARM::HPRRegClass); |
| 571 | setAllExpand(MVT::bf16); |
| 572 | if (!Subtarget->hasFullFP16()) |
| 573 | setOperationAction(Op: ISD::BITCAST, VT: MVT::bf16, Action: Custom); |
| 574 | } else { |
| 575 | setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f32, Action: Expand); |
| 576 | setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f64, Action: Expand); |
| 577 | setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f32, Action: Custom); |
| 578 | setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f64, Action: Custom); |
| 579 | } |
| 580 | |
| 581 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 582 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
| 583 | setTruncStoreAction(ValVT: VT, MemVT: InnerVT, Action: Expand); |
| 584 | addAllExtLoads(From: VT, To: InnerVT, Action: Expand); |
| 585 | } |
| 586 | |
| 587 | setOperationAction(Op: ISD::SMUL_LOHI, VT, Action: Expand); |
| 588 | setOperationAction(Op: ISD::UMUL_LOHI, VT, Action: Expand); |
| 589 | |
| 590 | setOperationAction(Op: ISD::BSWAP, VT, Action: Expand); |
| 591 | } |
| 592 | |
| 593 | if (!Subtarget->isThumb1Only() && !Subtarget->hasV8_1MMainlineOps()) |
| 594 | setOperationAction(Op: ISD::SCMP, VT: MVT::i32, Action: Custom); |
| 595 | |
| 596 | if (!Subtarget->hasV8_1MMainlineOps()) |
| 597 | setOperationAction(Op: ISD::UCMP, VT: MVT::i32, Action: Custom); |
| 598 | |
| 599 | if (!Subtarget->isThumb1Only()) |
| 600 | setOperationAction(Op: ISD::ABS, VT: MVT::i32, Action: Custom); |
| 601 | |
| 602 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Custom); |
| 603 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Custom); |
| 604 | |
| 605 | setOperationAction(Op: ISD::READ_REGISTER, VT: MVT::i64, Action: Custom); |
| 606 | setOperationAction(Op: ISD::WRITE_REGISTER, VT: MVT::i64, Action: Custom); |
| 607 | |
| 608 | if (Subtarget->hasMVEIntegerOps()) |
| 609 | addMVEVectorTypes(HasMVEFP: Subtarget->hasMVEFloatOps()); |
| 610 | |
| 611 | // Combine low-overhead loop intrinsics so that we can lower i1 types. |
| 612 | if (Subtarget->hasLOB()) { |
| 613 | setTargetDAGCombine({ISD::BRCOND, ISD::BR_CC}); |
| 614 | } |
| 615 | |
| 616 | if (Subtarget->hasNEON()) { |
| 617 | addDRTypeForNEON(VT: MVT::v2f32); |
| 618 | addDRTypeForNEON(VT: MVT::v8i8); |
| 619 | addDRTypeForNEON(VT: MVT::v4i16); |
| 620 | addDRTypeForNEON(VT: MVT::v2i32); |
| 621 | addDRTypeForNEON(VT: MVT::v1i64); |
| 622 | |
| 623 | addQRTypeForNEON(VT: MVT::v4f32); |
| 624 | addQRTypeForNEON(VT: MVT::v2f64); |
| 625 | addQRTypeForNEON(VT: MVT::v16i8); |
| 626 | addQRTypeForNEON(VT: MVT::v8i16); |
| 627 | addQRTypeForNEON(VT: MVT::v4i32); |
| 628 | addQRTypeForNEON(VT: MVT::v2i64); |
| 629 | |
| 630 | if (Subtarget->hasFullFP16()) { |
| 631 | addQRTypeForNEON(VT: MVT::v8f16); |
| 632 | addDRTypeForNEON(VT: MVT::v4f16); |
| 633 | } |
| 634 | |
| 635 | if (Subtarget->hasBF16()) { |
| 636 | addQRTypeForNEON(VT: MVT::v8bf16); |
| 637 | addDRTypeForNEON(VT: MVT::v4bf16); |
| 638 | } |
| 639 | } |
| 640 | |
| 641 | if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { |
| 642 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but |
| 643 | // none of Neon, MVE or VFP supports any arithmetic operations on it. |
| 644 | setOperationAction(Op: ISD::FADD, VT: MVT::v2f64, Action: Expand); |
| 645 | setOperationAction(Op: ISD::FSUB, VT: MVT::v2f64, Action: Expand); |
| 646 | setOperationAction(Op: ISD::FMUL, VT: MVT::v2f64, Action: Expand); |
| 647 | // FIXME: Code duplication: FDIV and FREM are expanded always, see |
| 648 | // ARMTargetLowering::addTypeForNEON method for details. |
| 649 | setOperationAction(Op: ISD::FDIV, VT: MVT::v2f64, Action: Expand); |
| 650 | setOperationAction(Op: ISD::FREM, VT: MVT::v2f64, Action: Expand); |
| 651 | // FIXME: Create unittest. |
| 652 | // In another words, find a way when "copysign" appears in DAG with vector |
| 653 | // operands. |
| 654 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::v2f64, Action: Expand); |
| 655 | // FIXME: Code duplication: SETCC has custom operation action, see |
| 656 | // ARMTargetLowering::addTypeForNEON method for details. |
| 657 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2f64, Action: Expand); |
| 658 | // FIXME: Create unittest for FNEG and for FABS. |
| 659 | setOperationAction(Op: ISD::FNEG, VT: MVT::v2f64, Action: Expand); |
| 660 | setOperationAction(Op: ISD::FABS, VT: MVT::v2f64, Action: Expand); |
| 661 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f64, Action: Expand); |
| 662 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f64, Action: Expand); |
| 663 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f64, Action: Expand); |
| 664 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f64, Action: Expand); |
| 665 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f64, Action: Expand); |
| 666 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f64, Action: Expand); |
| 667 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f64, Action: Expand); |
| 668 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f64, Action: Expand); |
| 669 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f64, Action: Expand); |
| 670 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f64, Action: Expand); |
| 671 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f64, Action: Expand); |
| 672 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f64, Action: Expand); |
| 673 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f64, Action: Expand); |
| 674 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f64, Action: Expand); |
| 675 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v2f64, Action: Expand); |
| 676 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f64, Action: Expand); |
| 677 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f64, Action: Expand); |
| 678 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f64, Action: Expand); |
| 679 | } |
| 680 | |
| 681 | if (Subtarget->hasNEON()) { |
| 682 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively |
| 683 | // supported for v4f32. |
| 684 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v4f32, Action: Expand); |
| 685 | setOperationAction(Op: ISD::FSIN, VT: MVT::v4f32, Action: Expand); |
| 686 | setOperationAction(Op: ISD::FCOS, VT: MVT::v4f32, Action: Expand); |
| 687 | setOperationAction(Op: ISD::FTAN, VT: MVT::v4f32, Action: Expand); |
| 688 | setOperationAction(Op: ISD::FPOW, VT: MVT::v4f32, Action: Expand); |
| 689 | setOperationAction(Op: ISD::FLOG, VT: MVT::v4f32, Action: Expand); |
| 690 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v4f32, Action: Expand); |
| 691 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v4f32, Action: Expand); |
| 692 | setOperationAction(Op: ISD::FEXP, VT: MVT::v4f32, Action: Expand); |
| 693 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v4f32, Action: Expand); |
| 694 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v4f32, Action: Expand); |
| 695 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f32, Action: Expand); |
| 696 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f32, Action: Expand); |
| 697 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f32, Action: Expand); |
| 698 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v4f32, Action: Expand); |
| 699 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v4f32, Action: Expand); |
| 700 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f32, Action: Expand); |
| 701 | |
| 702 | // Mark v2f32 intrinsics. |
| 703 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f32, Action: Expand); |
| 704 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f32, Action: Expand); |
| 705 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f32, Action: Expand); |
| 706 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f32, Action: Expand); |
| 707 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f32, Action: Expand); |
| 708 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f32, Action: Expand); |
| 709 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f32, Action: Expand); |
| 710 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f32, Action: Expand); |
| 711 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f32, Action: Expand); |
| 712 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f32, Action: Expand); |
| 713 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f32, Action: Expand); |
| 714 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f32, Action: Expand); |
| 715 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f32, Action: Expand); |
| 716 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f32, Action: Expand); |
| 717 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v2f32, Action: Expand); |
| 718 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f32, Action: Expand); |
| 719 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f32, Action: Expand); |
| 720 | |
| 721 | for (ISD::NodeType Op : {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, |
| 722 | ISD::FRINT, ISD::FTRUNC, ISD::FROUNDEVEN}) { |
| 723 | setOperationAction(Op, VT: MVT::v4f16, Action: Expand); |
| 724 | setOperationAction(Op, VT: MVT::v8f16, Action: Expand); |
| 725 | } |
| 726 | |
| 727 | // Neon does not support some operations on v1i64 and v2i64 types. |
| 728 | setOperationAction(Op: ISD::MUL, VT: MVT::v1i64, Action: Expand); |
| 729 | // Custom handling for some quad-vector types to detect VMULL. |
| 730 | setOperationAction(Op: ISD::MUL, VT: MVT::v8i16, Action: Custom); |
| 731 | setOperationAction(Op: ISD::MUL, VT: MVT::v4i32, Action: Custom); |
| 732 | setOperationAction(Op: ISD::MUL, VT: MVT::v2i64, Action: Custom); |
| 733 | // Custom handling for some vector types to avoid expensive expansions |
| 734 | setOperationAction(Op: ISD::SDIV, VT: MVT::v4i16, Action: Custom); |
| 735 | setOperationAction(Op: ISD::SDIV, VT: MVT::v8i8, Action: Custom); |
| 736 | setOperationAction(Op: ISD::UDIV, VT: MVT::v4i16, Action: Custom); |
| 737 | setOperationAction(Op: ISD::UDIV, VT: MVT::v8i8, Action: Custom); |
| 738 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with |
| 739 | // a destination type that is wider than the source, and nor does |
| 740 | // it have a FP_TO_[SU]INT instruction with a narrower destination than |
| 741 | // source. |
| 742 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
| 743 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
| 744 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
| 745 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
| 746 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v4i16, Action: Custom); |
| 747 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v8i16, Action: Custom); |
| 748 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v4i16, Action: Custom); |
| 749 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v8i16, Action: Custom); |
| 750 | |
| 751 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::v2f32, Action: Expand); |
| 752 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::v2f64, Action: Expand); |
| 753 | |
| 754 | // NEON does not have single instruction CTPOP for vectors with element |
| 755 | // types wider than 8-bits. However, custom lowering can leverage the |
| 756 | // v8i8/v16i8 vcnt instruction. |
| 757 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i32, Action: Custom); |
| 758 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i32, Action: Custom); |
| 759 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i16, Action: Custom); |
| 760 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v8i16, Action: Custom); |
| 761 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v1i64, Action: Custom); |
| 762 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i64, Action: Custom); |
| 763 | |
| 764 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v1i64, Action: Expand); |
| 765 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v2i64, Action: Expand); |
| 766 | |
| 767 | // NEON does not have single instruction CTTZ for vectors. |
| 768 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i8, Action: Custom); |
| 769 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i16, Action: Custom); |
| 770 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i32, Action: Custom); |
| 771 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v1i64, Action: Custom); |
| 772 | |
| 773 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v16i8, Action: Custom); |
| 774 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i16, Action: Custom); |
| 775 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i32, Action: Custom); |
| 776 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i64, Action: Custom); |
| 777 | |
| 778 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i8, Action: Custom); |
| 779 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i16, Action: Custom); |
| 780 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i32, Action: Custom); |
| 781 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v1i64, Action: Custom); |
| 782 | |
| 783 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v16i8, Action: Custom); |
| 784 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i16, Action: Custom); |
| 785 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i32, Action: Custom); |
| 786 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i64, Action: Custom); |
| 787 | |
| 788 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 789 | setOperationAction(Op: ISD::MULHS, VT, Action: Expand); |
| 790 | setOperationAction(Op: ISD::MULHU, VT, Action: Expand); |
| 791 | } |
| 792 | |
| 793 | // NEON only has FMA instructions as of VFP4. |
| 794 | if (!Subtarget->hasVFP4Base()) { |
| 795 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f32, Action: Expand); |
| 796 | setOperationAction(Op: ISD::FMA, VT: MVT::v4f32, Action: Expand); |
| 797 | } |
| 798 | |
| 799 | setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT, |
| 800 | ISD::FP_TO_UINT, ISD::FMUL, ISD::LOAD}); |
| 801 | |
| 802 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 803 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, |
| 804 | MVT::v2i32}) { |
| 805 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { |
| 806 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 807 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 808 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
| 809 | } |
| 810 | } |
| 811 | |
| 812 | for (auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, |
| 813 | MVT::v4i32}) { |
| 814 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Custom); |
| 815 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Custom); |
| 816 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Custom); |
| 817 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Custom); |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { |
| 822 | setTargetDAGCombine( |
| 823 | {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::INSERT_SUBVECTOR, |
| 824 | ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, |
| 825 | ISD::SIGN_EXTEND_INREG, ISD::STORE, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, |
| 826 | ISD::ANY_EXTEND, ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, |
| 827 | ISD::INTRINSIC_VOID, ISD::VECREDUCE_ADD, ISD::ADD, ISD::BITCAST}); |
| 828 | } |
| 829 | if (Subtarget->hasMVEIntegerOps()) { |
| 830 | setTargetDAGCombine({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, |
| 831 | ISD::FP_EXTEND, ISD::SELECT, ISD::SELECT_CC, |
| 832 | ISD::SETCC}); |
| 833 | } |
| 834 | if (Subtarget->hasMVEFloatOps()) { |
| 835 | setTargetDAGCombine(ISD::FADD); |
| 836 | } |
| 837 | |
| 838 | if (!Subtarget->hasFP64()) { |
| 839 | // When targeting a floating-point unit with only single-precision |
| 840 | // operations, f64 is legal for the few double-precision instructions which |
| 841 | // are present However, no double-precision operations other than moves, |
| 842 | // loads and stores are provided by the hardware. |
| 843 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Expand); |
| 844 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Expand); |
| 845 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Expand); |
| 846 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
| 847 | setOperationAction(Op: ISD::FDIV, VT: MVT::f64, Action: Expand); |
| 848 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: LibCall); |
| 849 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Expand); |
| 850 | setOperationAction(Op: ISD::FGETSIGN, VT: MVT::f64, Action: Expand); |
| 851 | setOperationAction(Op: ISD::FNEG, VT: MVT::f64, Action: Expand); |
| 852 | setOperationAction(Op: ISD::FABS, VT: MVT::f64, Action: Expand); |
| 853 | setOperationAction(Op: ISD::FSQRT, VT: MVT::f64, Action: Expand); |
| 854 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
| 855 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
| 856 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
| 857 | setOperationAction(Op: ISD::FLOG, VT: MVT::f64, Action: Expand); |
| 858 | setOperationAction(Op: ISD::FLOG2, VT: MVT::f64, Action: Expand); |
| 859 | setOperationAction(Op: ISD::FLOG10, VT: MVT::f64, Action: Expand); |
| 860 | setOperationAction(Op: ISD::FEXP, VT: MVT::f64, Action: Expand); |
| 861 | setOperationAction(Op: ISD::FEXP2, VT: MVT::f64, Action: Expand); |
| 862 | setOperationAction(Op: ISD::FEXP10, VT: MVT::f64, Action: Expand); |
| 863 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f64, Action: Expand); |
| 864 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f64, Action: Expand); |
| 865 | setOperationAction(Op: ISD::FRINT, VT: MVT::f64, Action: Expand); |
| 866 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::f64, Action: Expand); |
| 867 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f64, Action: Expand); |
| 868 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f64, Action: Expand); |
| 869 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i32, Action: Custom); |
| 870 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i32, Action: Custom); |
| 871 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i32, Action: Custom); |
| 872 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i32, Action: Custom); |
| 873 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::f64, Action: Custom); |
| 874 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::f64, Action: Custom); |
| 875 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f32, Action: Custom); |
| 876 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::f64, Action: Custom); |
| 877 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::f64, Action: Custom); |
| 878 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f32, Action: Custom); |
| 879 | } |
| 880 | |
| 881 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::i32, Action: Custom); |
| 882 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::i32, Action: Custom); |
| 883 | |
| 884 | if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { |
| 885 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f64, Action: Custom); |
| 886 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f64, Action: Custom); |
| 887 | if (Subtarget->hasFullFP16()) { |
| 888 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f16, Action: Custom); |
| 889 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f16, Action: Custom); |
| 890 | } |
| 891 | } else { |
| 892 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f64, Action: Legal); |
| 893 | } |
| 894 | |
| 895 | if (!Subtarget->hasFP16()) { |
| 896 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f32, Action: Custom); |
| 897 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f32, Action: Custom); |
| 898 | } else { |
| 899 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f32, Action: Legal); |
| 900 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f16, Action: Legal); |
| 901 | } |
| 902 | |
| 903 | computeRegisterProperties(TRI: Subtarget->getRegisterInfo()); |
| 904 | |
| 905 | // ARM does not have floating-point extending loads. |
| 906 | for (MVT VT : MVT::fp_valuetypes()) { |
| 907 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand); |
| 908 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f16, Action: Expand); |
| 909 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::bf16, Action: Expand); |
| 910 | } |
| 911 | |
| 912 | // ... or truncating stores |
| 913 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand); |
| 914 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
| 915 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
| 916 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::bf16, Action: Expand); |
| 917 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::bf16, Action: Expand); |
| 918 | |
| 919 | // ARM does not have i1 sign extending load. |
| 920 | for (MVT VT : MVT::integer_valuetypes()) |
| 921 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote); |
| 922 | |
| 923 | // ARM supports all 4 flavors of integer indexed load / store. |
| 924 | if (!Subtarget->isThumb1Only()) { |
| 925 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 926 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 927 | setIndexedLoadAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
| 928 | setIndexedLoadAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
| 929 | setIndexedLoadAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
| 930 | setIndexedLoadAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
| 931 | setIndexedStoreAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
| 932 | setIndexedStoreAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
| 933 | setIndexedStoreAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
| 934 | setIndexedStoreAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
| 935 | } |
| 936 | } else { |
| 937 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. |
| 938 | setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
| 939 | setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
| 940 | } |
| 941 | |
| 942 | // Custom loads/stores to possible use __aeabi_uread/write* |
| 943 | if (Subtarget->isTargetAEABI() && !Subtarget->allowsUnalignedMem()) { |
| 944 | setOperationAction(Op: ISD::STORE, VT: MVT::i32, Action: Custom); |
| 945 | setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom); |
| 946 | setOperationAction(Op: ISD::LOAD, VT: MVT::i32, Action: Custom); |
| 947 | setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom); |
| 948 | } |
| 949 | |
| 950 | setOperationAction(Op: ISD::SADDO, VT: MVT::i32, Action: Custom); |
| 951 | setOperationAction(Op: ISD::UADDO, VT: MVT::i32, Action: Custom); |
| 952 | setOperationAction(Op: ISD::SSUBO, VT: MVT::i32, Action: Custom); |
| 953 | setOperationAction(Op: ISD::USUBO, VT: MVT::i32, Action: Custom); |
| 954 | |
| 955 | setOperationAction(Op: ISD::UADDO_CARRY, VT: MVT::i32, Action: Custom); |
| 956 | setOperationAction(Op: ISD::USUBO_CARRY, VT: MVT::i32, Action: Custom); |
| 957 | if (Subtarget->hasDSP()) { |
| 958 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i8, Action: Custom); |
| 959 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i8, Action: Custom); |
| 960 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i16, Action: Custom); |
| 961 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i16, Action: Custom); |
| 962 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i8, Action: Custom); |
| 963 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i8, Action: Custom); |
| 964 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i16, Action: Custom); |
| 965 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i16, Action: Custom); |
| 966 | } |
| 967 | if (Subtarget->hasBaseDSP()) { |
| 968 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i32, Action: Legal); |
| 969 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i32, Action: Legal); |
| 970 | } |
| 971 | |
| 972 | // i64 operation support. |
| 973 | setOperationAction(Op: ISD::MUL, VT: MVT::i64, Action: Expand); |
| 974 | setOperationAction(Op: ISD::MULHU, VT: MVT::i32, Action: Expand); |
| 975 | if (Subtarget->isThumb1Only()) { |
| 976 | setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i32, Action: Expand); |
| 977 | setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i32, Action: Expand); |
| 978 | } |
| 979 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() |
| 980 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) |
| 981 | setOperationAction(Op: ISD::MULHS, VT: MVT::i32, Action: Expand); |
| 982 | |
| 983 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Custom); |
| 984 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Custom); |
| 985 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Custom); |
| 986 | setOperationAction(Op: ISD::SRL, VT: MVT::i64, Action: Custom); |
| 987 | setOperationAction(Op: ISD::SRA, VT: MVT::i64, Action: Custom); |
| 988 | setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom); |
| 989 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::i64, Action: Custom); |
| 990 | setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom); |
| 991 | setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom); |
| 992 | |
| 993 | // MVE lowers 64 bit shifts to lsll and lsrl |
| 994 | // assuming that ISD::SRL and SRA of i64 are already marked custom |
| 995 | if (Subtarget->hasMVEIntegerOps()) |
| 996 | setOperationAction(Op: ISD::SHL, VT: MVT::i64, Action: Custom); |
| 997 | |
| 998 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. |
| 999 | if (Subtarget->isThumb1Only()) { |
| 1000 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Expand); |
| 1001 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Expand); |
| 1002 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Expand); |
| 1003 | } |
| 1004 | |
| 1005 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) |
| 1006 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i32, Action: Legal); |
| 1007 | |
| 1008 | // ARM does not have ROTL. |
| 1009 | setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Expand); |
| 1010 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 1011 | setOperationAction(Op: ISD::ROTL, VT, Action: Expand); |
| 1012 | setOperationAction(Op: ISD::ROTR, VT, Action: Expand); |
| 1013 | } |
| 1014 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i32, Action: Custom); |
| 1015 | // TODO: These two should be set to LibCall, but this currently breaks |
| 1016 | // the Linux kernel build. See #101786. |
| 1017 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Expand); |
| 1018 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Expand); |
| 1019 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { |
| 1020 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i32, Action: Expand); |
| 1021 | setOperationAction(Op: ISD::CTLZ_ZERO_UNDEF, VT: MVT::i32, Action: LibCall); |
| 1022 | } |
| 1023 | |
| 1024 | // @llvm.readcyclecounter requires the Performance Monitors extension. |
| 1025 | // Default to the 0 expansion on unsupported platforms. |
| 1026 | // FIXME: Technically there are older ARM CPUs that have |
| 1027 | // implementation-specific ways of obtaining this information. |
| 1028 | if (Subtarget->hasPerfMon()) |
| 1029 | setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64, Action: Custom); |
| 1030 | |
| 1031 | // Only ARMv6 has BSWAP. |
| 1032 | if (!Subtarget->hasV6Ops()) |
| 1033 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Expand); |
| 1034 | |
| 1035 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 1036 | : Subtarget->hasDivideInARMMode(); |
| 1037 | if (!hasDivide) { |
| 1038 | // These are expanded into libcalls if the cpu doesn't have HW divider. |
| 1039 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: LibCall); |
| 1040 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: LibCall); |
| 1041 | } |
| 1042 | |
| 1043 | if (TT.isOSWindows() && !Subtarget->hasDivideInThumbMode()) { |
| 1044 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: Custom); |
| 1045 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: Custom); |
| 1046 | |
| 1047 | setOperationAction(Op: ISD::SDIV, VT: MVT::i64, Action: Custom); |
| 1048 | setOperationAction(Op: ISD::UDIV, VT: MVT::i64, Action: Custom); |
| 1049 | } |
| 1050 | |
| 1051 | setOperationAction(Op: ISD::SREM, VT: MVT::i32, Action: Expand); |
| 1052 | setOperationAction(Op: ISD::UREM, VT: MVT::i32, Action: Expand); |
| 1053 | |
| 1054 | // Register based DivRem for AEABI (RTABI 4.2) |
| 1055 | if (TT.isTargetAEABI() || TT.isAndroid() || TT.isTargetGNUAEABI() || |
| 1056 | TT.isTargetMuslAEABI() || TT.isOSFuchsia() || TT.isOSWindows()) { |
| 1057 | setOperationAction(Op: ISD::SREM, VT: MVT::i64, Action: Custom); |
| 1058 | setOperationAction(Op: ISD::UREM, VT: MVT::i64, Action: Custom); |
| 1059 | HasStandaloneRem = false; |
| 1060 | |
| 1061 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Custom); |
| 1062 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Custom); |
| 1063 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i64, Action: Custom); |
| 1064 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i64, Action: Custom); |
| 1065 | } else { |
| 1066 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Expand); |
| 1067 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Expand); |
| 1068 | } |
| 1069 | |
| 1070 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom); |
| 1071 | setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom); |
| 1072 | setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom); |
| 1073 | setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom); |
| 1074 | |
| 1075 | setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal); |
| 1076 | setOperationAction(Op: ISD::DEBUGTRAP, VT: MVT::Other, Action: Legal); |
| 1077 | |
| 1078 | // Use the default implementation. |
| 1079 | setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom); |
| 1080 | setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand); |
| 1081 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand); |
| 1082 | setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand); |
| 1083 | setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand); |
| 1084 | setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand); |
| 1085 | |
| 1086 | if (TT.isOSWindows()) |
| 1087 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Custom); |
| 1088 | else |
| 1089 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Expand); |
| 1090 | |
| 1091 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use |
| 1092 | // the default expansion. |
| 1093 | InsertFencesForAtomic = false; |
| 1094 | if (Subtarget->hasAnyDataBarrier() && |
| 1095 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { |
| 1096 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded |
| 1097 | // to ldrex/strex loops already. |
| 1098 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom); |
| 1099 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) |
| 1100 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i64, Action: Custom); |
| 1101 | |
| 1102 | // On v8, we have particularly efficient implementations of atomic fences |
| 1103 | // if they can be combined with nearby atomic loads and stores. |
| 1104 | if (!Subtarget->hasAcquireRelease() || |
| 1105 | getTargetMachine().getOptLevel() == CodeGenOptLevel::None) { |
| 1106 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. |
| 1107 | InsertFencesForAtomic = true; |
| 1108 | } |
| 1109 | } else { |
| 1110 | // If there's anything we can use as a barrier, go through custom lowering |
| 1111 | // for ATOMIC_FENCE. |
| 1112 | // If target has DMB in thumb, Fences can be inserted. |
| 1113 | if (Subtarget->hasDataBarrier()) |
| 1114 | InsertFencesForAtomic = true; |
| 1115 | |
| 1116 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, |
| 1117 | Action: Subtarget->hasAnyDataBarrier() ? Custom : Expand); |
| 1118 | |
| 1119 | // Set them all for libcall, which will force libcalls. |
| 1120 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i32, Action: LibCall); |
| 1121 | setOperationAction(Op: ISD::ATOMIC_SWAP, VT: MVT::i32, Action: LibCall); |
| 1122 | setOperationAction(Op: ISD::ATOMIC_LOAD_ADD, VT: MVT::i32, Action: LibCall); |
| 1123 | setOperationAction(Op: ISD::ATOMIC_LOAD_SUB, VT: MVT::i32, Action: LibCall); |
| 1124 | setOperationAction(Op: ISD::ATOMIC_LOAD_AND, VT: MVT::i32, Action: LibCall); |
| 1125 | setOperationAction(Op: ISD::ATOMIC_LOAD_OR, VT: MVT::i32, Action: LibCall); |
| 1126 | setOperationAction(Op: ISD::ATOMIC_LOAD_XOR, VT: MVT::i32, Action: LibCall); |
| 1127 | setOperationAction(Op: ISD::ATOMIC_LOAD_NAND, VT: MVT::i32, Action: LibCall); |
| 1128 | setOperationAction(Op: ISD::ATOMIC_LOAD_MIN, VT: MVT::i32, Action: LibCall); |
| 1129 | setOperationAction(Op: ISD::ATOMIC_LOAD_MAX, VT: MVT::i32, Action: LibCall); |
| 1130 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMIN, VT: MVT::i32, Action: LibCall); |
| 1131 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMAX, VT: MVT::i32, Action: LibCall); |
| 1132 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the |
| 1133 | // Unordered/Monotonic case. |
| 1134 | if (!InsertFencesForAtomic) { |
| 1135 | setOperationAction(Op: ISD::ATOMIC_LOAD, VT: MVT::i32, Action: Custom); |
| 1136 | setOperationAction(Op: ISD::ATOMIC_STORE, VT: MVT::i32, Action: Custom); |
| 1137 | } |
| 1138 | } |
| 1139 | |
| 1140 | // Compute supported atomic widths. |
| 1141 | if (TT.isOSLinux() || (!Subtarget->isMClass() && Subtarget->hasV6Ops())) { |
| 1142 | // For targets where __sync_* routines are reliably available, we use them |
| 1143 | // if necessary. |
| 1144 | // |
| 1145 | // ARM Linux always supports 64-bit atomics through kernel-assisted atomic |
| 1146 | // routines (kernel 3.1 or later). FIXME: Not with compiler-rt? |
| 1147 | // |
| 1148 | // ARMv6 targets have native instructions in ARM mode. For Thumb mode, |
| 1149 | // such targets should provide __sync_* routines, which use the ARM mode |
| 1150 | // instructions. (ARMv6 doesn't have dmb, but it has an equivalent |
| 1151 | // encoding; see ARMISD::MEMBARRIER_MCR.) |
| 1152 | setMaxAtomicSizeInBitsSupported(64); |
| 1153 | } else if ((Subtarget->isMClass() && Subtarget->hasV8MBaselineOps()) || |
| 1154 | Subtarget->hasForced32BitAtomics()) { |
| 1155 | // Cortex-M (besides Cortex-M0) have 32-bit atomics. |
| 1156 | setMaxAtomicSizeInBitsSupported(32); |
| 1157 | } else { |
| 1158 | // We can't assume anything about other targets; just use libatomic |
| 1159 | // routines. |
| 1160 | setMaxAtomicSizeInBitsSupported(0); |
| 1161 | } |
| 1162 | |
| 1163 | setMaxDivRemBitWidthSupported(64); |
| 1164 | |
| 1165 | setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom); |
| 1166 | |
| 1167 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. |
| 1168 | if (!Subtarget->hasV6Ops()) { |
| 1169 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i16, Action: Expand); |
| 1170 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i8, Action: Expand); |
| 1171 | } |
| 1172 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand); |
| 1173 | |
| 1174 | if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && |
| 1175 | !Subtarget->isThumb1Only()) { |
| 1176 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR |
| 1177 | // iff target supports vfp2. |
| 1178 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i64, Action: Custom); |
| 1179 | setOperationAction(Op: ISD::GET_ROUNDING, VT: MVT::i32, Action: Custom); |
| 1180 | setOperationAction(Op: ISD::SET_ROUNDING, VT: MVT::Other, Action: Custom); |
| 1181 | setOperationAction(Op: ISD::GET_FPENV, VT: MVT::i32, Action: Legal); |
| 1182 | setOperationAction(Op: ISD::SET_FPENV, VT: MVT::i32, Action: Legal); |
| 1183 | setOperationAction(Op: ISD::RESET_FPENV, VT: MVT::Other, Action: Legal); |
| 1184 | setOperationAction(Op: ISD::GET_FPMODE, VT: MVT::i32, Action: Legal); |
| 1185 | setOperationAction(Op: ISD::SET_FPMODE, VT: MVT::i32, Action: Custom); |
| 1186 | setOperationAction(Op: ISD::RESET_FPMODE, VT: MVT::Other, Action: Custom); |
| 1187 | } |
| 1188 | |
| 1189 | // We want to custom lower some of our intrinsics. |
| 1190 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom); |
| 1191 | setOperationAction(Op: ISD::EH_SJLJ_SETJMP, VT: MVT::i32, Action: Custom); |
| 1192 | setOperationAction(Op: ISD::EH_SJLJ_LONGJMP, VT: MVT::Other, Action: Custom); |
| 1193 | setOperationAction(Op: ISD::EH_SJLJ_SETUP_DISPATCH, VT: MVT::Other, Action: Custom); |
| 1194 | |
| 1195 | setOperationAction(Op: ISD::SETCC, VT: MVT::i32, Action: Expand); |
| 1196 | setOperationAction(Op: ISD::SETCC, VT: MVT::f32, Action: Expand); |
| 1197 | setOperationAction(Op: ISD::SETCC, VT: MVT::f64, Action: Expand); |
| 1198 | setOperationAction(Op: ISD::SELECT, VT: MVT::i32, Action: Custom); |
| 1199 | setOperationAction(Op: ISD::SELECT, VT: MVT::f32, Action: Custom); |
| 1200 | setOperationAction(Op: ISD::SELECT, VT: MVT::f64, Action: Custom); |
| 1201 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i32, Action: Custom); |
| 1202 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f32, Action: Custom); |
| 1203 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f64, Action: Custom); |
| 1204 | if (Subtarget->hasFullFP16()) { |
| 1205 | setOperationAction(Op: ISD::SETCC, VT: MVT::f16, Action: Expand); |
| 1206 | setOperationAction(Op: ISD::SELECT, VT: MVT::f16, Action: Custom); |
| 1207 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f16, Action: Custom); |
| 1208 | } |
| 1209 | |
| 1210 | setOperationAction(Op: ISD::SETCCCARRY, VT: MVT::i32, Action: Custom); |
| 1211 | |
| 1212 | setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Custom); |
| 1213 | setOperationAction(Op: ISD::BR_CC, VT: MVT::i32, Action: Custom); |
| 1214 | if (Subtarget->hasFullFP16()) |
| 1215 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f16, Action: Custom); |
| 1216 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f32, Action: Custom); |
| 1217 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f64, Action: Custom); |
| 1218 | setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Custom); |
| 1219 | |
| 1220 | // We don't support sin/cos/fmod/copysign/pow |
| 1221 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
| 1222 | setOperationAction(Op: ISD::FSIN, VT: MVT::f32, Action: Expand); |
| 1223 | setOperationAction(Op: ISD::FCOS, VT: MVT::f32, Action: Expand); |
| 1224 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
| 1225 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Expand); |
| 1226 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Expand); |
| 1227 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: LibCall); |
| 1228 | setOperationAction(Op: ISD::FREM, VT: MVT::f32, Action: LibCall); |
| 1229 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
| 1230 | !Subtarget->isThumb1Only()) { |
| 1231 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Custom); |
| 1232 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f32, Action: Custom); |
| 1233 | } |
| 1234 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
| 1235 | setOperationAction(Op: ISD::FPOW, VT: MVT::f32, Action: Expand); |
| 1236 | |
| 1237 | if (!Subtarget->hasVFP4Base()) { |
| 1238 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
| 1239 | setOperationAction(Op: ISD::FMA, VT: MVT::f32, Action: Expand); |
| 1240 | } |
| 1241 | |
| 1242 | // Various VFP goodness |
| 1243 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { |
| 1244 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. |
| 1245 | if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { |
| 1246 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand); |
| 1247 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand); |
| 1248 | setOperationAction(Op: ISD::STRICT_FP16_TO_FP, VT: MVT::f64, Action: Expand); |
| 1249 | setOperationAction(Op: ISD::STRICT_FP_TO_FP16, VT: MVT::f64, Action: Expand); |
| 1250 | } |
| 1251 | |
| 1252 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. |
| 1253 | if (!Subtarget->hasFP16()) { |
| 1254 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand); |
| 1255 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand); |
| 1256 | setOperationAction(Op: ISD::STRICT_FP16_TO_FP, VT: MVT::f32, Action: Expand); |
| 1257 | setOperationAction(Op: ISD::STRICT_FP_TO_FP16, VT: MVT::f32, Action: Expand); |
| 1258 | } |
| 1259 | |
| 1260 | // Strict floating-point comparisons need custom lowering. |
| 1261 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f16, Action: Custom); |
| 1262 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f16, Action: Custom); |
| 1263 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f32, Action: Custom); |
| 1264 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f32, Action: Custom); |
| 1265 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f64, Action: Custom); |
| 1266 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f64, Action: Custom); |
| 1267 | } |
| 1268 | |
| 1269 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Expand); |
| 1270 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Expand); |
| 1271 | |
| 1272 | // FP-ARMv8 implements a lot of rounding-like FP operations. |
| 1273 | if (Subtarget->hasFPARMv8Base()) { |
| 1274 | for (auto Op : |
| 1275 | {ISD::FFLOOR, ISD::FCEIL, ISD::FROUND, |
| 1276 | ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT, |
| 1277 | ISD::FROUNDEVEN, ISD::FMINNUM, ISD::FMAXNUM, |
| 1278 | ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, ISD::STRICT_FROUND, |
| 1279 | ISD::STRICT_FTRUNC, ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT, |
| 1280 | ISD::STRICT_FROUNDEVEN, ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM}) { |
| 1281 | setOperationAction(Op, VT: MVT::f32, Action: Legal); |
| 1282 | |
| 1283 | if (Subtarget->hasFP64()) |
| 1284 | setOperationAction(Op, VT: MVT::f64, Action: Legal); |
| 1285 | } |
| 1286 | |
| 1287 | if (Subtarget->hasNEON()) { |
| 1288 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v2f32, Action: Legal); |
| 1289 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v2f32, Action: Legal); |
| 1290 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f32, Action: Legal); |
| 1291 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f32, Action: Legal); |
| 1292 | } |
| 1293 | } |
| 1294 | |
| 1295 | // FP16 often need to be promoted to call lib functions |
| 1296 | // clang-format off |
| 1297 | if (Subtarget->hasFullFP16()) { |
| 1298 | setOperationAction(Op: ISD::LRINT, VT: MVT::f16, Action: Expand); |
| 1299 | setOperationAction(Op: ISD::LROUND, VT: MVT::f16, Action: Expand); |
| 1300 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f16, Action: Expand); |
| 1301 | |
| 1302 | for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI, |
| 1303 | ISD::FCOS, ISD::FSIN, ISD::FSINCOS, |
| 1304 | ISD::FSINCOSPI, ISD::FMODF, ISD::FACOS, |
| 1305 | ISD::FASIN, ISD::FATAN, ISD::FATAN2, |
| 1306 | ISD::FCOSH, ISD::FSINH, ISD::FTANH, |
| 1307 | ISD::FTAN, ISD::FEXP, ISD::FEXP2, |
| 1308 | ISD::FEXP10, ISD::FLOG, ISD::FLOG2, |
| 1309 | ISD::FLOG10, ISD::STRICT_FREM, ISD::STRICT_FPOW, |
| 1310 | ISD::STRICT_FPOWI, ISD::STRICT_FCOS, ISD::STRICT_FSIN, |
| 1311 | ISD::STRICT_FACOS, ISD::STRICT_FASIN, ISD::STRICT_FATAN, |
| 1312 | ISD::STRICT_FATAN2, ISD::STRICT_FCOSH, ISD::STRICT_FSINH, |
| 1313 | ISD::STRICT_FTANH, ISD::STRICT_FEXP, ISD::STRICT_FEXP2, |
| 1314 | ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10, |
| 1315 | ISD::STRICT_FTAN}) { |
| 1316 | setOperationAction(Op, VT: MVT::f16, Action: Promote); |
| 1317 | } |
| 1318 | |
| 1319 | // Round-to-integer need custom lowering for fp16, as Promote doesn't work |
| 1320 | // because the result type is integer. |
| 1321 | for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT}) |
| 1322 | setOperationAction(Op, VT: MVT::f16, Action: Custom); |
| 1323 | |
| 1324 | for (auto Op : {ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC, |
| 1325 | ISD::FNEARBYINT, ISD::FRINT, ISD::FFLOOR, |
| 1326 | ISD::FCEIL, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN, |
| 1327 | ISD::STRICT_FTRUNC, ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT, |
| 1328 | ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL}) { |
| 1329 | setOperationAction(Op, VT: MVT::f16, Action: Legal); |
| 1330 | } |
| 1331 | // clang-format on |
| 1332 | } |
| 1333 | |
| 1334 | if (Subtarget->hasNEON()) { |
| 1335 | // vmin and vmax aren't available in a scalar form, so we can use |
| 1336 | // a NEON instruction with an undef lane instead. |
| 1337 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f32, Action: Legal); |
| 1338 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f32, Action: Legal); |
| 1339 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f16, Action: Legal); |
| 1340 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f16, Action: Legal); |
| 1341 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v2f32, Action: Legal); |
| 1342 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v2f32, Action: Legal); |
| 1343 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f32, Action: Legal); |
| 1344 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f32, Action: Legal); |
| 1345 | |
| 1346 | if (Subtarget->hasV8Ops()) { |
| 1347 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f32, Action: Legal); |
| 1348 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f32, Action: Legal); |
| 1349 | setOperationAction(Op: ISD::FROUND, VT: MVT::v2f32, Action: Legal); |
| 1350 | setOperationAction(Op: ISD::FROUND, VT: MVT::v4f32, Action: Legal); |
| 1351 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v2f32, Action: Legal); |
| 1352 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v4f32, Action: Legal); |
| 1353 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f32, Action: Legal); |
| 1354 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f32, Action: Legal); |
| 1355 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f32, Action: Legal); |
| 1356 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f32, Action: Legal); |
| 1357 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f32, Action: Legal); |
| 1358 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f32, Action: Legal); |
| 1359 | } |
| 1360 | |
| 1361 | if (Subtarget->hasFullFP16()) { |
| 1362 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f16, Action: Legal); |
| 1363 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f16, Action: Legal); |
| 1364 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v8f16, Action: Legal); |
| 1365 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v8f16, Action: Legal); |
| 1366 | |
| 1367 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f16, Action: Legal); |
| 1368 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f16, Action: Legal); |
| 1369 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v8f16, Action: Legal); |
| 1370 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v8f16, Action: Legal); |
| 1371 | |
| 1372 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f16, Action: Legal); |
| 1373 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v8f16, Action: Legal); |
| 1374 | setOperationAction(Op: ISD::FROUND, VT: MVT::v4f16, Action: Legal); |
| 1375 | setOperationAction(Op: ISD::FROUND, VT: MVT::v8f16, Action: Legal); |
| 1376 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v4f16, Action: Legal); |
| 1377 | setOperationAction(Op: ISD::FROUNDEVEN, VT: MVT::v8f16, Action: Legal); |
| 1378 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f16, Action: Legal); |
| 1379 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v8f16, Action: Legal); |
| 1380 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f16, Action: Legal); |
| 1381 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v8f16, Action: Legal); |
| 1382 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f16, Action: Legal); |
| 1383 | setOperationAction(Op: ISD::FRINT, VT: MVT::v8f16, Action: Legal); |
| 1384 | } |
| 1385 | } |
| 1386 | |
| 1387 | // On MSVC, both 32-bit and 64-bit, ldexpf(f32) is not defined. MinGW has |
| 1388 | // it, but it's just a wrapper around ldexp. |
| 1389 | if (TT.isOSWindows()) { |
| 1390 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
| 1391 | if (isOperationExpand(Op, VT: MVT::f32)) |
| 1392 | setOperationAction(Op, VT: MVT::f32, Action: Promote); |
| 1393 | } |
| 1394 | |
| 1395 | // LegalizeDAG currently can't expand fp16 LDEXP/FREXP on targets where i16 |
| 1396 | // isn't legal. |
| 1397 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
| 1398 | if (isOperationExpand(Op, VT: MVT::f16)) |
| 1399 | setOperationAction(Op, VT: MVT::f16, Action: Promote); |
| 1400 | |
| 1401 | // We have target-specific dag combine patterns for the following nodes: |
| 1402 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine |
| 1403 | setTargetDAGCombine( |
| 1404 | {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR}); |
| 1405 | |
| 1406 | if (Subtarget->hasMVEIntegerOps()) |
| 1407 | setTargetDAGCombine(ISD::VSELECT); |
| 1408 | |
| 1409 | if (Subtarget->hasV6Ops()) |
| 1410 | setTargetDAGCombine(ISD::SRL); |
| 1411 | if (Subtarget->isThumb1Only()) |
| 1412 | setTargetDAGCombine(ISD::SHL); |
| 1413 | // Attempt to lower smin/smax to ssat/usat |
| 1414 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || |
| 1415 | Subtarget->isThumb2()) { |
| 1416 | setTargetDAGCombine({ISD::SMIN, ISD::SMAX}); |
| 1417 | } |
| 1418 | |
| 1419 | setStackPointerRegisterToSaveRestore(ARM::SP); |
| 1420 | |
| 1421 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || |
| 1422 | !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) |
| 1423 | setSchedulingPreference(Sched::RegPressure); |
| 1424 | else |
| 1425 | setSchedulingPreference(Sched::Hybrid); |
| 1426 | |
| 1427 | //// temporary - rewrite interface to use type |
| 1428 | MaxStoresPerMemset = 8; |
| 1429 | MaxStoresPerMemsetOptSize = 4; |
| 1430 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores |
| 1431 | MaxStoresPerMemcpyOptSize = 2; |
| 1432 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores |
| 1433 | MaxStoresPerMemmoveOptSize = 2; |
| 1434 | |
| 1435 | // On ARM arguments smaller than 4 bytes are extended, so all arguments |
| 1436 | // are at least 4 bytes aligned. |
| 1437 | setMinStackArgumentAlignment(Align(4)); |
| 1438 | |
| 1439 | // Prefer likely predicted branches to selects on out-of-order cores. |
| 1440 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); |
| 1441 | |
| 1442 | setPrefLoopAlignment(Align(1ULL << Subtarget->getPreferBranchLogAlignment())); |
| 1443 | setPrefFunctionAlignment( |
| 1444 | Align(1ULL << Subtarget->getPreferBranchLogAlignment())); |
| 1445 | |
| 1446 | setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); |
| 1447 | |
| 1448 | IsStrictFPEnabled = true; |
| 1449 | } |
| 1450 | |
| 1451 | bool ARMTargetLowering::useSoftFloat() const { |
| 1452 | return Subtarget->useSoftFloat(); |
| 1453 | } |
| 1454 | |
| 1455 | bool ARMTargetLowering::preferSelectsOverBooleanArithmetic(EVT VT) const { |
| 1456 | return !Subtarget->isThumb1Only() && VT.getSizeInBits() <= 32; |
| 1457 | } |
| 1458 | |
| 1459 | // FIXME: It might make sense to define the representative register class as the |
| 1460 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is |
| 1461 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, |
| 1462 | // SPR's representative would be DPR_VFP2. This should work well if register |
| 1463 | // pressure tracking were modified such that a register use would increment the |
| 1464 | // pressure of the register class's representative and all of it's super |
| 1465 | // classes' representatives transitively. We have not implemented this because |
| 1466 | // of the difficulty prior to coalescing of modeling operand register classes |
| 1467 | // due to the common occurrence of cross class copies and subregister insertions |
| 1468 | // and extractions. |
| 1469 | std::pair<const TargetRegisterClass *, uint8_t> |
| 1470 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
| 1471 | MVT VT) const { |
| 1472 | const TargetRegisterClass *RRC = nullptr; |
| 1473 | uint8_t Cost = 1; |
| 1474 | switch (VT.SimpleTy) { |
| 1475 | default: |
| 1476 | return TargetLowering::findRepresentativeClass(TRI, VT); |
| 1477 | // Use DPR as representative register class for all floating point |
| 1478 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so |
| 1479 | // the cost is 1 for both f32 and f64. |
| 1480 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: |
| 1481 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: |
| 1482 | RRC = &ARM::DPRRegClass; |
| 1483 | // When NEON is used for SP, only half of the register file is available |
| 1484 | // because operations that define both SP and DP results will be constrained |
| 1485 | // to the VFP2 class (D0-D15). We currently model this constraint prior to |
| 1486 | // coalescing by double-counting the SP regs. See the FIXME above. |
| 1487 | if (Subtarget->useNEONForSinglePrecisionFP()) |
| 1488 | Cost = 2; |
| 1489 | break; |
| 1490 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: |
| 1491 | case MVT::v4f32: case MVT::v2f64: |
| 1492 | RRC = &ARM::DPRRegClass; |
| 1493 | Cost = 2; |
| 1494 | break; |
| 1495 | case MVT::v4i64: |
| 1496 | RRC = &ARM::DPRRegClass; |
| 1497 | Cost = 4; |
| 1498 | break; |
| 1499 | case MVT::v8i64: |
| 1500 | RRC = &ARM::DPRRegClass; |
| 1501 | Cost = 8; |
| 1502 | break; |
| 1503 | } |
| 1504 | return std::make_pair(x&: RRC, y&: Cost); |
| 1505 | } |
| 1506 | |
| 1507 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, |
| 1508 | EVT VT) const { |
| 1509 | if (!VT.isVector()) |
| 1510 | return getPointerTy(DL); |
| 1511 | |
| 1512 | // MVE has a predicate register. |
| 1513 | if ((Subtarget->hasMVEIntegerOps() && |
| 1514 | (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || |
| 1515 | VT == MVT::v16i8)) || |
| 1516 | (Subtarget->hasMVEFloatOps() && |
| 1517 | (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16))) |
| 1518 | return MVT::getVectorVT(VT: MVT::i1, EC: VT.getVectorElementCount()); |
| 1519 | return VT.changeVectorElementTypeToInteger(); |
| 1520 | } |
| 1521 | |
| 1522 | /// getRegClassFor - Return the register class that should be used for the |
| 1523 | /// specified value type. |
| 1524 | const TargetRegisterClass * |
| 1525 | ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
| 1526 | (void)isDivergent; |
| 1527 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map |
| 1528 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to |
| 1529 | // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive |
| 1530 | // MVE Q registers. |
| 1531 | if (Subtarget->hasNEON()) { |
| 1532 | if (VT == MVT::v4i64) |
| 1533 | return &ARM::QQPRRegClass; |
| 1534 | if (VT == MVT::v8i64) |
| 1535 | return &ARM::QQQQPRRegClass; |
| 1536 | } |
| 1537 | if (Subtarget->hasMVEIntegerOps()) { |
| 1538 | if (VT == MVT::v4i64) |
| 1539 | return &ARM::MQQPRRegClass; |
| 1540 | if (VT == MVT::v8i64) |
| 1541 | return &ARM::MQQQQPRRegClass; |
| 1542 | } |
| 1543 | return TargetLowering::getRegClassFor(VT); |
| 1544 | } |
| 1545 | |
| 1546 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the |
| 1547 | // source/dest is aligned and the copy size is large enough. We therefore want |
| 1548 | // to align such objects passed to memory intrinsics. |
| 1549 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, |
| 1550 | Align &PrefAlign) const { |
| 1551 | if (!isa<MemIntrinsic>(Val: CI)) |
| 1552 | return false; |
| 1553 | MinSize = 8; |
| 1554 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 |
| 1555 | // cycle faster than 4-byte aligned LDM. |
| 1556 | PrefAlign = |
| 1557 | (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? Align(8) : Align(4)); |
| 1558 | return true; |
| 1559 | } |
| 1560 | |
| 1561 | // Create a fast isel object. |
| 1562 | FastISel *ARMTargetLowering::createFastISel( |
| 1563 | FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, |
| 1564 | const LibcallLoweringInfo *libcallLowering) const { |
| 1565 | return ARM::createFastISel(funcInfo, libInfo, libcallLowering); |
| 1566 | } |
| 1567 | |
| 1568 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { |
| 1569 | unsigned NumVals = N->getNumValues(); |
| 1570 | if (!NumVals) |
| 1571 | return Sched::RegPressure; |
| 1572 | |
| 1573 | for (unsigned i = 0; i != NumVals; ++i) { |
| 1574 | EVT VT = N->getValueType(ResNo: i); |
| 1575 | if (VT == MVT::Glue || VT == MVT::Other) |
| 1576 | continue; |
| 1577 | if (VT.isFloatingPoint() || VT.isVector()) |
| 1578 | return Sched::ILP; |
| 1579 | } |
| 1580 | |
| 1581 | if (!N->isMachineOpcode()) |
| 1582 | return Sched::RegPressure; |
| 1583 | |
| 1584 | // Load are scheduled for latency even if there instruction itinerary |
| 1585 | // is not available. |
| 1586 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 1587 | const MCInstrDesc &MCID = TII->get(Opcode: N->getMachineOpcode()); |
| 1588 | |
| 1589 | if (MCID.getNumDefs() == 0) |
| 1590 | return Sched::RegPressure; |
| 1591 | if (!Itins->isEmpty() && |
| 1592 | Itins->getOperandCycle(ItinClassIndx: MCID.getSchedClass(), OperandIdx: 0) > 2U) |
| 1593 | return Sched::ILP; |
| 1594 | |
| 1595 | return Sched::RegPressure; |
| 1596 | } |
| 1597 | |
| 1598 | //===----------------------------------------------------------------------===// |
| 1599 | // Lowering Code |
| 1600 | //===----------------------------------------------------------------------===// |
| 1601 | |
| 1602 | static bool isSRL16(const SDValue &Op) { |
| 1603 | if (Op.getOpcode() != ISD::SRL) |
| 1604 | return false; |
| 1605 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 1606 | return Const->getZExtValue() == 16; |
| 1607 | return false; |
| 1608 | } |
| 1609 | |
| 1610 | static bool isSRA16(const SDValue &Op) { |
| 1611 | if (Op.getOpcode() != ISD::SRA) |
| 1612 | return false; |
| 1613 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 1614 | return Const->getZExtValue() == 16; |
| 1615 | return false; |
| 1616 | } |
| 1617 | |
| 1618 | static bool isSHL16(const SDValue &Op) { |
| 1619 | if (Op.getOpcode() != ISD::SHL) |
| 1620 | return false; |
| 1621 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
| 1622 | return Const->getZExtValue() == 16; |
| 1623 | return false; |
| 1624 | } |
| 1625 | |
| 1626 | // Check for a signed 16-bit value. We special case SRA because it makes it |
| 1627 | // more simple when also looking for SRAs that aren't sign extending a |
| 1628 | // smaller value. Without the check, we'd need to take extra care with |
| 1629 | // checking order for some operations. |
| 1630 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { |
| 1631 | if (isSRA16(Op)) |
| 1632 | return isSHL16(Op: Op.getOperand(i: 0)); |
| 1633 | return DAG.ComputeNumSignBits(Op) == 17; |
| 1634 | } |
| 1635 | |
| 1636 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC |
| 1637 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { |
| 1638 | switch (CC) { |
| 1639 | default: llvm_unreachable("Unknown condition code!" ); |
| 1640 | case ISD::SETNE: return ARMCC::NE; |
| 1641 | case ISD::SETEQ: return ARMCC::EQ; |
| 1642 | case ISD::SETGT: return ARMCC::GT; |
| 1643 | case ISD::SETGE: return ARMCC::GE; |
| 1644 | case ISD::SETLT: return ARMCC::LT; |
| 1645 | case ISD::SETLE: return ARMCC::LE; |
| 1646 | case ISD::SETUGT: return ARMCC::HI; |
| 1647 | case ISD::SETUGE: return ARMCC::HS; |
| 1648 | case ISD::SETULT: return ARMCC::LO; |
| 1649 | case ISD::SETULE: return ARMCC::LS; |
| 1650 | } |
| 1651 | } |
| 1652 | |
| 1653 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. |
| 1654 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 1655 | ARMCC::CondCodes &CondCode2) { |
| 1656 | CondCode2 = ARMCC::AL; |
| 1657 | switch (CC) { |
| 1658 | default: llvm_unreachable("Unknown FP condition!" ); |
| 1659 | case ISD::SETEQ: |
| 1660 | case ISD::SETOEQ: CondCode = ARMCC::EQ; break; |
| 1661 | case ISD::SETGT: |
| 1662 | case ISD::SETOGT: CondCode = ARMCC::GT; break; |
| 1663 | case ISD::SETGE: |
| 1664 | case ISD::SETOGE: CondCode = ARMCC::GE; break; |
| 1665 | case ISD::SETOLT: CondCode = ARMCC::MI; break; |
| 1666 | case ISD::SETOLE: CondCode = ARMCC::LS; break; |
| 1667 | case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; |
| 1668 | case ISD::SETO: CondCode = ARMCC::VC; break; |
| 1669 | case ISD::SETUO: CondCode = ARMCC::VS; break; |
| 1670 | case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; |
| 1671 | case ISD::SETUGT: CondCode = ARMCC::HI; break; |
| 1672 | case ISD::SETUGE: CondCode = ARMCC::PL; break; |
| 1673 | case ISD::SETLT: |
| 1674 | case ISD::SETULT: CondCode = ARMCC::LT; break; |
| 1675 | case ISD::SETLE: |
| 1676 | case ISD::SETULE: CondCode = ARMCC::LE; break; |
| 1677 | case ISD::SETNE: |
| 1678 | case ISD::SETUNE: CondCode = ARMCC::NE; break; |
| 1679 | } |
| 1680 | } |
| 1681 | |
| 1682 | //===----------------------------------------------------------------------===// |
| 1683 | // Calling Convention Implementation |
| 1684 | //===----------------------------------------------------------------------===// |
| 1685 | |
| 1686 | /// getEffectiveCallingConv - Get the effective calling convention, taking into |
| 1687 | /// account presence of floating point hardware and calling convention |
| 1688 | /// limitations, such as support for variadic functions. |
| 1689 | CallingConv::ID |
| 1690 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, |
| 1691 | bool isVarArg) const { |
| 1692 | switch (CC) { |
| 1693 | default: |
| 1694 | report_fatal_error(reason: "Unsupported calling convention" ); |
| 1695 | case CallingConv::ARM_AAPCS: |
| 1696 | case CallingConv::ARM_APCS: |
| 1697 | case CallingConv::GHC: |
| 1698 | case CallingConv::CFGuard_Check: |
| 1699 | return CC; |
| 1700 | case CallingConv::PreserveMost: |
| 1701 | return CallingConv::PreserveMost; |
| 1702 | case CallingConv::PreserveAll: |
| 1703 | return CallingConv::PreserveAll; |
| 1704 | case CallingConv::ARM_AAPCS_VFP: |
| 1705 | case CallingConv::Swift: |
| 1706 | case CallingConv::SwiftTail: |
| 1707 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; |
| 1708 | case CallingConv::C: |
| 1709 | case CallingConv::Tail: |
| 1710 | if (!getTM().isAAPCS_ABI()) |
| 1711 | return CallingConv::ARM_APCS; |
| 1712 | else if (Subtarget->hasFPRegs() && !Subtarget->isThumb1Only() && |
| 1713 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && |
| 1714 | !isVarArg) |
| 1715 | return CallingConv::ARM_AAPCS_VFP; |
| 1716 | else |
| 1717 | return CallingConv::ARM_AAPCS; |
| 1718 | case CallingConv::Fast: |
| 1719 | case CallingConv::CXX_FAST_TLS: |
| 1720 | if (!getTM().isAAPCS_ABI()) { |
| 1721 | if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) |
| 1722 | return CallingConv::Fast; |
| 1723 | return CallingConv::ARM_APCS; |
| 1724 | } else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && |
| 1725 | !isVarArg) |
| 1726 | return CallingConv::ARM_AAPCS_VFP; |
| 1727 | else |
| 1728 | return CallingConv::ARM_AAPCS; |
| 1729 | } |
| 1730 | } |
| 1731 | |
| 1732 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, |
| 1733 | bool isVarArg) const { |
| 1734 | return CCAssignFnForNode(CC, Return: false, isVarArg); |
| 1735 | } |
| 1736 | |
| 1737 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, |
| 1738 | bool isVarArg) const { |
| 1739 | return CCAssignFnForNode(CC, Return: true, isVarArg); |
| 1740 | } |
| 1741 | |
| 1742 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given |
| 1743 | /// CallingConvention. |
| 1744 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, |
| 1745 | bool Return, |
| 1746 | bool isVarArg) const { |
| 1747 | switch (getEffectiveCallingConv(CC, isVarArg)) { |
| 1748 | default: |
| 1749 | report_fatal_error(reason: "Unsupported calling convention" ); |
| 1750 | case CallingConv::ARM_APCS: |
| 1751 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); |
| 1752 | case CallingConv::ARM_AAPCS: |
| 1753 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1754 | case CallingConv::ARM_AAPCS_VFP: |
| 1755 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); |
| 1756 | case CallingConv::Fast: |
| 1757 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); |
| 1758 | case CallingConv::GHC: |
| 1759 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); |
| 1760 | case CallingConv::PreserveMost: |
| 1761 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1762 | case CallingConv::PreserveAll: |
| 1763 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1764 | case CallingConv::CFGuard_Check: |
| 1765 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); |
| 1766 | } |
| 1767 | } |
| 1768 | |
| 1769 | SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 1770 | MVT LocVT, MVT ValVT, SDValue Val) const { |
| 1771 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), |
| 1772 | Operand: Val); |
| 1773 | if (Subtarget->hasFullFP16()) { |
| 1774 | Val = DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT: ValVT, Operand: Val); |
| 1775 | } else { |
| 1776 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, |
| 1777 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
| 1778 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: ValVT, Operand: Val); |
| 1779 | } |
| 1780 | return Val; |
| 1781 | } |
| 1782 | |
| 1783 | SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 1784 | MVT LocVT, MVT ValVT, |
| 1785 | SDValue Val) const { |
| 1786 | if (Subtarget->hasFullFP16()) { |
| 1787 | Val = DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, |
| 1788 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
| 1789 | } else { |
| 1790 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, |
| 1791 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
| 1792 | Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, |
| 1793 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
| 1794 | } |
| 1795 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: LocVT, Operand: Val); |
| 1796 | } |
| 1797 | |
| 1798 | /// LowerCallResult - Lower the result values of a call into the |
| 1799 | /// appropriate copies out of appropriate physical registers. |
| 1800 | SDValue ARMTargetLowering::LowerCallResult( |
| 1801 | SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, |
| 1802 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 1803 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, |
| 1804 | SDValue ThisVal, bool isCmseNSCall) const { |
| 1805 | // Assign locations to each value returned by this call. |
| 1806 | SmallVector<CCValAssign, 16> RVLocs; |
| 1807 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 1808 | *DAG.getContext()); |
| 1809 | CCInfo.AnalyzeCallResult(Ins, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 1810 | |
| 1811 | // Copy all of the result registers out of their specified physreg. |
| 1812 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 1813 | CCValAssign VA = RVLocs[i]; |
| 1814 | |
| 1815 | // Pass 'this' value directly from the argument to return value, to avoid |
| 1816 | // reg unit interference |
| 1817 | if (i == 0 && isThisReturn) { |
| 1818 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && |
| 1819 | "unexpected return calling convention register assignment" ); |
| 1820 | InVals.push_back(Elt: ThisVal); |
| 1821 | continue; |
| 1822 | } |
| 1823 | |
| 1824 | SDValue Val; |
| 1825 | if (VA.needsCustom() && |
| 1826 | (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { |
| 1827 | // Handle f64 or half of a v2f64. |
| 1828 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
| 1829 | Glue: InGlue); |
| 1830 | Chain = Lo.getValue(R: 1); |
| 1831 | InGlue = Lo.getValue(R: 2); |
| 1832 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1833 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
| 1834 | Glue: InGlue); |
| 1835 | Chain = Hi.getValue(R: 1); |
| 1836 | InGlue = Hi.getValue(R: 2); |
| 1837 | if (!Subtarget->isLittle()) |
| 1838 | std::swap (a&: Lo, b&: Hi); |
| 1839 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 1840 | |
| 1841 | if (VA.getLocVT() == MVT::v2f64) { |
| 1842 | SDValue Vec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
| 1843 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
| 1844 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 1845 | |
| 1846 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1847 | Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
| 1848 | Chain = Lo.getValue(R: 1); |
| 1849 | InGlue = Lo.getValue(R: 2); |
| 1850 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1851 | Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
| 1852 | Chain = Hi.getValue(R: 1); |
| 1853 | InGlue = Hi.getValue(R: 2); |
| 1854 | if (!Subtarget->isLittle()) |
| 1855 | std::swap (a&: Lo, b&: Hi); |
| 1856 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 1857 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
| 1858 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 1859 | } |
| 1860 | } else { |
| 1861 | Val = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: VA.getLocVT(), |
| 1862 | Glue: InGlue); |
| 1863 | Chain = Val.getValue(R: 1); |
| 1864 | InGlue = Val.getValue(R: 2); |
| 1865 | } |
| 1866 | |
| 1867 | switch (VA.getLocInfo()) { |
| 1868 | default: llvm_unreachable("Unknown loc info!" ); |
| 1869 | case CCValAssign::Full: break; |
| 1870 | case CCValAssign::BCvt: |
| 1871 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: Val); |
| 1872 | break; |
| 1873 | } |
| 1874 | |
| 1875 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 1876 | // had been copied to the LSBs of a 32-bit register. |
| 1877 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 1878 | if (VA.needsCustom() && |
| 1879 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 1880 | Val = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val); |
| 1881 | |
| 1882 | // On CMSE Non-secure Calls, call results (returned values) whose bitwidth |
| 1883 | // is less than 32 bits must be sign- or zero-extended after the call for |
| 1884 | // security reasons. Although the ABI mandates an extension done by the |
| 1885 | // callee, the latter cannot be trusted to follow the rules of the ABI. |
| 1886 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
| 1887 | if (isCmseNSCall && Arg.ArgVT.isScalarInteger() && |
| 1888 | VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
| 1889 | Val = handleCMSEValue(Value: Val, Arg, DAG, DL: dl); |
| 1890 | |
| 1891 | InVals.push_back(Elt: Val); |
| 1892 | } |
| 1893 | |
| 1894 | return Chain; |
| 1895 | } |
| 1896 | |
| 1897 | std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg( |
| 1898 | const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr, |
| 1899 | bool IsTailCall, int SPDiff) const { |
| 1900 | SDValue DstAddr; |
| 1901 | MachinePointerInfo DstInfo; |
| 1902 | int32_t Offset = VA.getLocMemOffset(); |
| 1903 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1904 | |
| 1905 | if (IsTailCall) { |
| 1906 | Offset += SPDiff; |
| 1907 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1908 | int Size = VA.getLocVT().getFixedSizeInBits() / 8; |
| 1909 | int FI = MF.getFrameInfo().CreateFixedObject(Size, SPOffset: Offset, IsImmutable: true); |
| 1910 | DstAddr = DAG.getFrameIndex(FI, VT: PtrVT); |
| 1911 | DstInfo = |
| 1912 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI); |
| 1913 | } else { |
| 1914 | SDValue PtrOff = DAG.getIntPtrConstant(Val: Offset, DL: dl); |
| 1915 | DstAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: getPointerTy(DL: DAG.getDataLayout()), |
| 1916 | N1: StackPtr, N2: PtrOff); |
| 1917 | DstInfo = |
| 1918 | MachinePointerInfo::getStack(MF&: DAG.getMachineFunction(), Offset); |
| 1919 | } |
| 1920 | |
| 1921 | return std::make_pair(x&: DstAddr, y&: DstInfo); |
| 1922 | } |
| 1923 | |
| 1924 | // Returns the type of copying which is required to set up a byval argument to |
| 1925 | // a tail-called function. This isn't needed for non-tail calls, because they |
| 1926 | // always need the equivalent of CopyOnce, but tail-calls sometimes need two to |
| 1927 | // avoid clobbering another argument (CopyViaTemp), and sometimes can be |
| 1928 | // optimised to zero copies when forwarding an argument from the caller's |
| 1929 | // caller (NoCopy). |
| 1930 | ARMTargetLowering::ByValCopyKind ARMTargetLowering::ByValNeedsCopyForTailCall( |
| 1931 | SelectionDAG &DAG, SDValue Src, SDValue Dst, ISD::ArgFlagsTy Flags) const { |
| 1932 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 1933 | ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 1934 | |
| 1935 | // Globals are always safe to copy from. |
| 1936 | if (isa<GlobalAddressSDNode>(Val: Src) || isa<ExternalSymbolSDNode>(Val: Src)) |
| 1937 | return CopyOnce; |
| 1938 | |
| 1939 | // Can only analyse frame index nodes, conservatively assume we need a |
| 1940 | // temporary. |
| 1941 | auto *SrcFrameIdxNode = dyn_cast<FrameIndexSDNode>(Val&: Src); |
| 1942 | auto *DstFrameIdxNode = dyn_cast<FrameIndexSDNode>(Val&: Dst); |
| 1943 | if (!SrcFrameIdxNode || !DstFrameIdxNode) |
| 1944 | return CopyViaTemp; |
| 1945 | |
| 1946 | int SrcFI = SrcFrameIdxNode->getIndex(); |
| 1947 | int DstFI = DstFrameIdxNode->getIndex(); |
| 1948 | assert(MFI.isFixedObjectIndex(DstFI) && |
| 1949 | "byval passed in non-fixed stack slot" ); |
| 1950 | |
| 1951 | int64_t SrcOffset = MFI.getObjectOffset(ObjectIdx: SrcFI); |
| 1952 | int64_t DstOffset = MFI.getObjectOffset(ObjectIdx: DstFI); |
| 1953 | |
| 1954 | // If the source is in the local frame, then the copy to the argument memory |
| 1955 | // is always valid. |
| 1956 | bool FixedSrc = MFI.isFixedObjectIndex(ObjectIdx: SrcFI); |
| 1957 | if (!FixedSrc || |
| 1958 | (FixedSrc && SrcOffset < -(int64_t)AFI->getArgRegsSaveSize())) |
| 1959 | return CopyOnce; |
| 1960 | |
| 1961 | // In the case of byval arguments split between registers and the stack, |
| 1962 | // computeAddrForCallArg returns a FrameIndex which corresponds only to the |
| 1963 | // stack portion, but the Src SDValue will refer to the full value, including |
| 1964 | // the local stack memory that the register portion gets stored into. We only |
| 1965 | // need to compare them for equality, so normalise on the full value version. |
| 1966 | uint64_t RegSize = Flags.getByValSize() - MFI.getObjectSize(ObjectIdx: DstFI); |
| 1967 | DstOffset -= RegSize; |
| 1968 | |
| 1969 | // If the value is already in the correct location, then no copying is |
| 1970 | // needed. If not, then we need to copy via a temporary. |
| 1971 | if (SrcOffset == DstOffset) |
| 1972 | return NoCopy; |
| 1973 | else |
| 1974 | return CopyViaTemp; |
| 1975 | } |
| 1976 | |
| 1977 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, |
| 1978 | SDValue Chain, SDValue &Arg, |
| 1979 | RegsToPassVector &RegsToPass, |
| 1980 | CCValAssign &VA, CCValAssign &NextVA, |
| 1981 | SDValue &StackPtr, |
| 1982 | SmallVectorImpl<SDValue> &MemOpChains, |
| 1983 | bool IsTailCall, |
| 1984 | int SPDiff) const { |
| 1985 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 1986 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
| 1987 | unsigned id = Subtarget->isLittle() ? 0 : 1; |
| 1988 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y: fmrrd.getValue(R: id))); |
| 1989 | |
| 1990 | if (NextVA.isRegLoc()) |
| 1991 | RegsToPass.push_back(Elt: std::make_pair(x: NextVA.getLocReg(), y: fmrrd.getValue(R: 1-id))); |
| 1992 | else { |
| 1993 | assert(NextVA.isMemLoc()); |
| 1994 | if (!StackPtr.getNode()) |
| 1995 | StackPtr = DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, |
| 1996 | VT: getPointerTy(DL: DAG.getDataLayout())); |
| 1997 | |
| 1998 | SDValue DstAddr; |
| 1999 | MachinePointerInfo DstInfo; |
| 2000 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2001 | computeAddrForCallArg(dl, DAG, VA: NextVA, StackPtr, IsTailCall, SPDiff); |
| 2002 | MemOpChains.push_back( |
| 2003 | Elt: DAG.getStore(Chain, dl, Val: fmrrd.getValue(R: 1 - id), Ptr: DstAddr, PtrInfo: DstInfo)); |
| 2004 | } |
| 2005 | } |
| 2006 | |
| 2007 | static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { |
| 2008 | return (CC == CallingConv::Fast && GuaranteeTailCalls) || |
| 2009 | CC == CallingConv::Tail || CC == CallingConv::SwiftTail; |
| 2010 | } |
| 2011 | |
| 2012 | /// LowerCall - Lowering a call into a callseq_start <- |
| 2013 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter |
| 2014 | /// nodes. |
| 2015 | SDValue |
| 2016 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 2017 | SmallVectorImpl<SDValue> &InVals) const { |
| 2018 | SelectionDAG &DAG = CLI.DAG; |
| 2019 | SDLoc &dl = CLI.DL; |
| 2020 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 2021 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 2022 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 2023 | SDValue Chain = CLI.Chain; |
| 2024 | SDValue Callee = CLI.Callee; |
| 2025 | bool &isTailCall = CLI.IsTailCall; |
| 2026 | CallingConv::ID CallConv = CLI.CallConv; |
| 2027 | bool doesNotRet = CLI.DoesNotReturn; |
| 2028 | bool isVarArg = CLI.IsVarArg; |
| 2029 | const CallBase *CB = CLI.CB; |
| 2030 | |
| 2031 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2032 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2033 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 2034 | MachineFunction::CallSiteInfo CSInfo; |
| 2035 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); |
| 2036 | bool isThisReturn = false; |
| 2037 | bool isCmseNSCall = false; |
| 2038 | bool isSibCall = false; |
| 2039 | bool PreferIndirect = false; |
| 2040 | bool GuardWithBTI = false; |
| 2041 | |
| 2042 | // Analyze operands of the call, assigning locations to each operand. |
| 2043 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2044 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 2045 | *DAG.getContext()); |
| 2046 | CCInfo.AnalyzeCallOperands(Outs, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
| 2047 | |
| 2048 | // Lower 'returns_twice' calls to a pseudo-instruction. |
| 2049 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: Attribute::ReturnsTwice) && |
| 2050 | !Subtarget->noBTIAtReturnTwice()) |
| 2051 | GuardWithBTI = AFI->branchTargetEnforcement(); |
| 2052 | |
| 2053 | // Set type id for call site info. |
| 2054 | if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall()) |
| 2055 | CSInfo = MachineFunction::CallSiteInfo(*CB); |
| 2056 | |
| 2057 | // Determine whether this is a non-secure function call. |
| 2058 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: "cmse_nonsecure_call" )) |
| 2059 | isCmseNSCall = true; |
| 2060 | |
| 2061 | // Disable tail calls if they're not supported. |
| 2062 | if (!Subtarget->supportsTailCall()) |
| 2063 | isTailCall = false; |
| 2064 | |
| 2065 | // For both the non-secure calls and the returns from a CMSE entry function, |
| 2066 | // the function needs to do some extra work after the call, or before the |
| 2067 | // return, respectively, thus it cannot end with a tail call |
| 2068 | if (isCmseNSCall || AFI->isCmseNSEntryFunction()) |
| 2069 | isTailCall = false; |
| 2070 | |
| 2071 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2072 | // If we're optimizing for minimum size and the function is called three or |
| 2073 | // more times in this block, we can improve codesize by calling indirectly |
| 2074 | // as BLXr has a 16-bit encoding. |
| 2075 | auto *GV = cast<GlobalAddressSDNode>(Val&: Callee)->getGlobal(); |
| 2076 | if (CLI.CB) { |
| 2077 | auto *BB = CLI.CB->getParent(); |
| 2078 | PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && |
| 2079 | count_if(Range: GV->users(), P: [&BB](const User *U) { |
| 2080 | return isa<Instruction>(Val: U) && |
| 2081 | cast<Instruction>(Val: U)->getParent() == BB; |
| 2082 | }) > 2; |
| 2083 | } |
| 2084 | } |
| 2085 | if (isTailCall) { |
| 2086 | // Check if it's really possible to do a tail call. |
| 2087 | isTailCall = |
| 2088 | IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs, isIndirect: PreferIndirect); |
| 2089 | |
| 2090 | if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt && |
| 2091 | CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail) |
| 2092 | isSibCall = true; |
| 2093 | |
| 2094 | // We don't support GuaranteedTailCallOpt for ARM, only automatically |
| 2095 | // detected sibcalls. |
| 2096 | if (isTailCall) |
| 2097 | ++NumTailCalls; |
| 2098 | } |
| 2099 | |
| 2100 | if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) |
| 2101 | report_fatal_error(reason: "failed to perform tail call elimination on a call " |
| 2102 | "site marked musttail" ); |
| 2103 | |
| 2104 | // Get a count of how many bytes are to be pushed on the stack. |
| 2105 | unsigned NumBytes = CCInfo.getStackSize(); |
| 2106 | |
| 2107 | // SPDiff is the byte offset of the call's argument area from the callee's. |
| 2108 | // Stores to callee stack arguments will be placed in FixedStackSlots offset |
| 2109 | // by this amount for a tail call. In a sibling call it must be 0 because the |
| 2110 | // caller will deallocate the entire stack and the callee still expects its |
| 2111 | // arguments to begin at SP+0. Completely unused for non-tail calls. |
| 2112 | int SPDiff = 0; |
| 2113 | |
| 2114 | if (isTailCall && !isSibCall) { |
| 2115 | auto FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 2116 | unsigned NumReusableBytes = FuncInfo->getArgumentStackSize(); |
| 2117 | |
| 2118 | // Since callee will pop argument stack as a tail call, we must keep the |
| 2119 | // popped size 16-byte aligned. |
| 2120 | MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment(); |
| 2121 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 2122 | NumBytes = alignTo(Size: NumBytes, A: *StackAlign); |
| 2123 | |
| 2124 | // SPDiff will be negative if this tail call requires more space than we |
| 2125 | // would automatically have in our incoming argument space. Positive if we |
| 2126 | // can actually shrink the stack. |
| 2127 | SPDiff = NumReusableBytes - NumBytes; |
| 2128 | |
| 2129 | // If this call requires more stack than we have available from |
| 2130 | // LowerFormalArguments, tell FrameLowering to reserve space for it. |
| 2131 | if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (unsigned)-SPDiff) |
| 2132 | AFI->setArgRegsSaveSize(-SPDiff); |
| 2133 | } |
| 2134 | |
| 2135 | if (isSibCall) { |
| 2136 | // For sibling tail calls, memory operands are available in our caller's stack. |
| 2137 | NumBytes = 0; |
| 2138 | } else { |
| 2139 | // Adjust the stack pointer for the new arguments... |
| 2140 | // These operations are automatically eliminated by the prolog/epilog pass |
| 2141 | Chain = DAG.getCALLSEQ_START(Chain, InSize: isTailCall ? 0 : NumBytes, OutSize: 0, DL: dl); |
| 2142 | } |
| 2143 | |
| 2144 | SDValue StackPtr = |
| 2145 | DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 2146 | |
| 2147 | RegsToPassVector RegsToPass; |
| 2148 | SmallVector<SDValue, 8> MemOpChains; |
| 2149 | |
| 2150 | // If we are doing a tail-call, any byval arguments will be written to stack |
| 2151 | // space which was used for incoming arguments. If any the values being used |
| 2152 | // are incoming byval arguments to this function, then they might be |
| 2153 | // overwritten by the stores of the outgoing arguments. To avoid this, we |
| 2154 | // need to make a temporary copy of them in local stack space, then copy back |
| 2155 | // to the argument area. |
| 2156 | DenseMap<unsigned, SDValue> ByValTemporaries; |
| 2157 | SDValue ByValTempChain; |
| 2158 | if (isTailCall) { |
| 2159 | SmallVector<SDValue, 8> ByValCopyChains; |
| 2160 | for (const CCValAssign &VA : ArgLocs) { |
| 2161 | unsigned ArgIdx = VA.getValNo(); |
| 2162 | SDValue Src = OutVals[ArgIdx]; |
| 2163 | ISD::ArgFlagsTy Flags = Outs[ArgIdx].Flags; |
| 2164 | |
| 2165 | if (!Flags.isByVal()) |
| 2166 | continue; |
| 2167 | |
| 2168 | SDValue Dst; |
| 2169 | MachinePointerInfo DstInfo; |
| 2170 | std::tie(args&: Dst, args&: DstInfo) = |
| 2171 | computeAddrForCallArg(dl, DAG, VA, StackPtr: SDValue(), IsTailCall: true, SPDiff); |
| 2172 | ByValCopyKind Copy = ByValNeedsCopyForTailCall(DAG, Src, Dst, Flags); |
| 2173 | |
| 2174 | if (Copy == NoCopy) { |
| 2175 | // If the argument is already at the correct offset on the stack |
| 2176 | // (because we are forwarding a byval argument from our caller), we |
| 2177 | // don't need any copying. |
| 2178 | continue; |
| 2179 | } else if (Copy == CopyOnce) { |
| 2180 | // If the argument is in our local stack frame, no other argument |
| 2181 | // preparation can clobber it, so we can copy it to the final location |
| 2182 | // later. |
| 2183 | ByValTemporaries[ArgIdx] = Src; |
| 2184 | } else { |
| 2185 | assert(Copy == CopyViaTemp && "unexpected enum value" ); |
| 2186 | // If we might be copying this argument from the outgoing argument |
| 2187 | // stack area, we need to copy via a temporary in the local stack |
| 2188 | // frame. |
| 2189 | int TempFrameIdx = MFI.CreateStackObject( |
| 2190 | Size: Flags.getByValSize(), Alignment: Flags.getNonZeroByValAlign(), isSpillSlot: false); |
| 2191 | SDValue Temp = |
| 2192 | DAG.getFrameIndex(FI: TempFrameIdx, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 2193 | |
| 2194 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize(), DL: dl, VT: MVT::i32); |
| 2195 | SDValue AlignNode = |
| 2196 | DAG.getConstant(Val: Flags.getNonZeroByValAlign().value(), DL: dl, VT: MVT::i32); |
| 2197 | |
| 2198 | SDVTList VTs = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 2199 | SDValue Ops[] = {Chain, Temp, Src, SizeNode, AlignNode}; |
| 2200 | ByValCopyChains.push_back( |
| 2201 | Elt: DAG.getNode(Opcode: ARMISD::COPY_STRUCT_BYVAL, DL: dl, VTList: VTs, Ops)); |
| 2202 | ByValTemporaries[ArgIdx] = Temp; |
| 2203 | } |
| 2204 | } |
| 2205 | if (!ByValCopyChains.empty()) |
| 2206 | ByValTempChain = |
| 2207 | DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: ByValCopyChains); |
| 2208 | } |
| 2209 | |
| 2210 | // During a tail call, stores to the argument area must happen after all of |
| 2211 | // the function's incoming arguments have been loaded because they may alias. |
| 2212 | // This is done by folding in a TokenFactor from LowerFormalArguments, but |
| 2213 | // there's no point in doing so repeatedly so this tracks whether that's |
| 2214 | // happened yet. |
| 2215 | bool AfterFormalArgLoads = false; |
| 2216 | |
| 2217 | // Walk the register/memloc assignments, inserting copies/loads. In the case |
| 2218 | // of tail call optimization, arguments are handled later. |
| 2219 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
| 2220 | i != e; |
| 2221 | ++i, ++realArgIdx) { |
| 2222 | CCValAssign &VA = ArgLocs[i]; |
| 2223 | SDValue Arg = OutVals[realArgIdx]; |
| 2224 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
| 2225 | bool isByVal = Flags.isByVal(); |
| 2226 | |
| 2227 | // Promote the value if needed. |
| 2228 | switch (VA.getLocInfo()) { |
| 2229 | default: llvm_unreachable("Unknown loc info!" ); |
| 2230 | case CCValAssign::Full: break; |
| 2231 | case CCValAssign::SExt: |
| 2232 | Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2233 | break; |
| 2234 | case CCValAssign::ZExt: |
| 2235 | Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2236 | break; |
| 2237 | case CCValAssign::AExt: |
| 2238 | Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2239 | break; |
| 2240 | case CCValAssign::BCvt: |
| 2241 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2242 | break; |
| 2243 | } |
| 2244 | |
| 2245 | if (isTailCall && VA.isMemLoc() && !AfterFormalArgLoads) { |
| 2246 | Chain = DAG.getStackArgumentTokenFactor(Chain); |
| 2247 | if (ByValTempChain) { |
| 2248 | // In case of large byval copies, re-using the stackframe for tail-calls |
| 2249 | // can lead to overwriting incoming arguments on the stack. Force |
| 2250 | // loading these stack arguments before the copy to avoid that. |
| 2251 | SmallVector<SDValue, 8> IncomingLoad; |
| 2252 | for (unsigned I = 0; I < OutVals.size(); ++I) { |
| 2253 | if (Outs[I].Flags.isByVal()) |
| 2254 | continue; |
| 2255 | |
| 2256 | SDValue OutVal = OutVals[I]; |
| 2257 | LoadSDNode *OutLN = dyn_cast_or_null<LoadSDNode>(Val&: OutVal); |
| 2258 | if (!OutLN) |
| 2259 | continue; |
| 2260 | |
| 2261 | FrameIndexSDNode *FIN = |
| 2262 | dyn_cast_or_null<FrameIndexSDNode>(Val: OutLN->getBasePtr()); |
| 2263 | if (!FIN) |
| 2264 | continue; |
| 2265 | |
| 2266 | if (!MFI.isFixedObjectIndex(ObjectIdx: FIN->getIndex())) |
| 2267 | continue; |
| 2268 | |
| 2269 | for (const CCValAssign &VA : ArgLocs) { |
| 2270 | if (VA.isMemLoc()) |
| 2271 | IncomingLoad.push_back(Elt: OutVal.getValue(R: 1)); |
| 2272 | } |
| 2273 | } |
| 2274 | |
| 2275 | // Update the chain to force loads for potentially clobbered argument |
| 2276 | // loads to happen before the byval copy. |
| 2277 | if (!IncomingLoad.empty()) { |
| 2278 | IncomingLoad.push_back(Elt: Chain); |
| 2279 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: IncomingLoad); |
| 2280 | } |
| 2281 | |
| 2282 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, N1: Chain, |
| 2283 | N2: ByValTempChain); |
| 2284 | } |
| 2285 | AfterFormalArgLoads = true; |
| 2286 | } |
| 2287 | |
| 2288 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 2289 | // had been copied to the LSBs of a 32-bit register. |
| 2290 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 2291 | if (VA.needsCustom() && |
| 2292 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { |
| 2293 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
| 2294 | } else { |
| 2295 | // f16 arguments could have been extended prior to argument lowering. |
| 2296 | // Mask them arguments if this is a CMSE nonsecure call. |
| 2297 | auto ArgVT = Outs[realArgIdx].ArgVT; |
| 2298 | if (isCmseNSCall && (ArgVT == MVT::f16)) { |
| 2299 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 2300 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: ArgVT.getSizeInBits()); |
| 2301 | SDValue Mask = |
| 2302 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
| 2303 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
| 2304 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
| 2305 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2306 | } |
| 2307 | } |
| 2308 | |
| 2309 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces |
| 2310 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 2311 | SDValue Op0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 2312 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 2313 | SDValue Op1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 2314 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 2315 | |
| 2316 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op0, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2317 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2318 | |
| 2319 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 2320 | if (VA.isRegLoc()) { |
| 2321 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op1, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2322 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2323 | } else { |
| 2324 | assert(VA.isMemLoc()); |
| 2325 | SDValue DstAddr; |
| 2326 | MachinePointerInfo DstInfo; |
| 2327 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2328 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2329 | MemOpChains.push_back(Elt: DAG.getStore(Chain, dl, Val: Op1, Ptr: DstAddr, PtrInfo: DstInfo)); |
| 2330 | } |
| 2331 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 2332 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, NextVA&: ArgLocs[++i], |
| 2333 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
| 2334 | } else if (VA.isRegLoc()) { |
| 2335 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && |
| 2336 | Outs[0].VT == MVT::i32) { |
| 2337 | assert(VA.getLocVT() == MVT::i32 && |
| 2338 | "unexpected calling convention register assignment" ); |
| 2339 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 && |
| 2340 | "unexpected use of 'returned'" ); |
| 2341 | isThisReturn = true; |
| 2342 | } |
| 2343 | const TargetOptions &Options = DAG.getTarget().Options; |
| 2344 | if (Options.EmitCallSiteInfo) |
| 2345 | CSInfo.ArgRegPairs.emplace_back(Args: VA.getLocReg(), Args&: i); |
| 2346 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg)); |
| 2347 | } else if (isByVal) { |
| 2348 | assert(VA.isMemLoc()); |
| 2349 | unsigned offset = 0; |
| 2350 | |
| 2351 | // True if this byval aggregate will be split between registers |
| 2352 | // and memory. |
| 2353 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); |
| 2354 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); |
| 2355 | |
| 2356 | SDValue ByValSrc; |
| 2357 | bool NeedsStackCopy; |
| 2358 | if (auto It = ByValTemporaries.find(Val: realArgIdx); |
| 2359 | It != ByValTemporaries.end()) { |
| 2360 | ByValSrc = It->second; |
| 2361 | NeedsStackCopy = true; |
| 2362 | } else { |
| 2363 | ByValSrc = Arg; |
| 2364 | NeedsStackCopy = !isTailCall; |
| 2365 | } |
| 2366 | |
| 2367 | // If part of the argument is in registers, load them. |
| 2368 | if (CurByValIdx < ByValArgsCount) { |
| 2369 | unsigned RegBegin, RegEnd; |
| 2370 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CurByValIdx, BeginReg&: RegBegin, EndReg&: RegEnd); |
| 2371 | |
| 2372 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 2373 | unsigned int i, j; |
| 2374 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { |
| 2375 | SDValue Const = DAG.getConstant(Val: 4*i, DL: dl, VT: MVT::i32); |
| 2376 | SDValue AddArg = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ByValSrc, N2: Const); |
| 2377 | SDValue Load = |
| 2378 | DAG.getLoad(VT: PtrVT, dl, Chain, Ptr: AddArg, PtrInfo: MachinePointerInfo(), |
| 2379 | Alignment: DAG.InferPtrAlign(Ptr: AddArg)); |
| 2380 | MemOpChains.push_back(Elt: Load.getValue(R: 1)); |
| 2381 | RegsToPass.push_back(Elt: std::make_pair(x&: j, y&: Load)); |
| 2382 | } |
| 2383 | |
| 2384 | // If parameter size outsides register area, "offset" value |
| 2385 | // helps us to calculate stack slot for remained part properly. |
| 2386 | offset = RegEnd - RegBegin; |
| 2387 | |
| 2388 | CCInfo.nextInRegsParam(); |
| 2389 | } |
| 2390 | |
| 2391 | // If the memory part of the argument isn't already in the correct place |
| 2392 | // (which can happen with tail calls), copy it into the argument area. |
| 2393 | if (NeedsStackCopy && Flags.getByValSize() > 4 * offset) { |
| 2394 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 2395 | SDValue Dst; |
| 2396 | MachinePointerInfo DstInfo; |
| 2397 | std::tie(args&: Dst, args&: DstInfo) = |
| 2398 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2399 | SDValue SrcOffset = DAG.getIntPtrConstant(Val: 4*offset, DL: dl); |
| 2400 | SDValue Src = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ByValSrc, N2: SrcOffset); |
| 2401 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize() - 4*offset, DL: dl, |
| 2402 | VT: MVT::i32); |
| 2403 | SDValue AlignNode = |
| 2404 | DAG.getConstant(Val: Flags.getNonZeroByValAlign().value(), DL: dl, VT: MVT::i32); |
| 2405 | |
| 2406 | SDVTList VTs = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 2407 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; |
| 2408 | MemOpChains.push_back(Elt: DAG.getNode(Opcode: ARMISD::COPY_STRUCT_BYVAL, DL: dl, VTList: VTs, |
| 2409 | Ops)); |
| 2410 | } |
| 2411 | } else { |
| 2412 | assert(VA.isMemLoc()); |
| 2413 | SDValue DstAddr; |
| 2414 | MachinePointerInfo DstInfo; |
| 2415 | std::tie(args&: DstAddr, args&: DstInfo) = |
| 2416 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
| 2417 | |
| 2418 | SDValue Store = DAG.getStore(Chain, dl, Val: Arg, Ptr: DstAddr, PtrInfo: DstInfo); |
| 2419 | MemOpChains.push_back(Elt: Store); |
| 2420 | } |
| 2421 | } |
| 2422 | |
| 2423 | if (!MemOpChains.empty()) |
| 2424 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains); |
| 2425 | |
| 2426 | // Build a sequence of copy-to-reg nodes chained together with token chain |
| 2427 | // and flag operands which copy the outgoing args into the appropriate regs. |
| 2428 | SDValue InGlue; |
| 2429 | for (const auto &[Reg, N] : RegsToPass) { |
| 2430 | Chain = DAG.getCopyToReg(Chain, dl, Reg, N, Glue: InGlue); |
| 2431 | InGlue = Chain.getValue(R: 1); |
| 2432 | } |
| 2433 | |
| 2434 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
| 2435 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
| 2436 | // node so that legalize doesn't hack it. |
| 2437 | bool isDirect = false; |
| 2438 | |
| 2439 | const TargetMachine &TM = getTargetMachine(); |
| 2440 | const GlobalValue *GVal = nullptr; |
| 2441 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) |
| 2442 | GVal = G->getGlobal(); |
| 2443 | bool isStub = !TM.shouldAssumeDSOLocal(GV: GVal) && Subtarget->isTargetMachO(); |
| 2444 | |
| 2445 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); |
| 2446 | bool isLocalARMFunc = false; |
| 2447 | auto PtrVt = getPointerTy(DL: DAG.getDataLayout()); |
| 2448 | |
| 2449 | if (Subtarget->genLongCalls()) { |
| 2450 | assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && |
| 2451 | "long-calls codegen is not position independent!" ); |
| 2452 | // Handle a global address or an external symbol. If it's not one of |
| 2453 | // those, the target's already in a register, so we don't need to do |
| 2454 | // anything extra. |
| 2455 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2456 | if (Subtarget->genExecuteOnly()) { |
| 2457 | if (Subtarget->useMovt()) |
| 2458 | ++NumMovwMovt; |
| 2459 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
| 2460 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
| 2461 | } else { |
| 2462 | // Create a constant pool entry for the callee address |
| 2463 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2464 | ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( |
| 2465 | C: GVal, ID: ARMPCLabelIndex, Kind: ARMCP::CPValue, PCAdj: 0); |
| 2466 | |
| 2467 | // Get the address of the callee into a register |
| 2468 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2469 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
| 2470 | Callee = DAG.getLoad( |
| 2471 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
| 2472 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2473 | } |
| 2474 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
| 2475 | const char *Sym = S->getSymbol(); |
| 2476 | |
| 2477 | if (Subtarget->genExecuteOnly()) { |
| 2478 | if (Subtarget->useMovt()) |
| 2479 | ++NumMovwMovt; |
| 2480 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
| 2481 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
| 2482 | } else { |
| 2483 | // Create a constant pool entry for the callee address |
| 2484 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2485 | ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create( |
| 2486 | C&: *DAG.getContext(), s: Sym, ID: ARMPCLabelIndex, PCAdj: 0); |
| 2487 | |
| 2488 | // Get the address of the callee into a register |
| 2489 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2490 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
| 2491 | Callee = DAG.getLoad( |
| 2492 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
| 2493 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2494 | } |
| 2495 | } |
| 2496 | } else if (isa<GlobalAddressSDNode>(Val: Callee)) { |
| 2497 | if (!PreferIndirect) { |
| 2498 | isDirect = true; |
| 2499 | bool isDef = GVal->isStrongDefinitionForLinker(); |
| 2500 | |
| 2501 | // ARM call to a local ARM function is predicable. |
| 2502 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); |
| 2503 | // tBX takes a register source operand. |
| 2504 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2505 | assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?" ); |
| 2506 | Callee = DAG.getNode( |
| 2507 | Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVt, |
| 2508 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: ARMII::MO_NONLAZY)); |
| 2509 | Callee = DAG.getLoad( |
| 2510 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Callee, |
| 2511 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: MaybeAlign(), |
| 2512 | MMOFlags: MachineMemOperand::MODereferenceable | |
| 2513 | MachineMemOperand::MOInvariant); |
| 2514 | } else if (Subtarget->isTargetCOFF()) { |
| 2515 | assert(Subtarget->isTargetWindows() && |
| 2516 | "Windows is the only supported COFF target" ); |
| 2517 | unsigned TargetFlags = ARMII::MO_NO_FLAG; |
| 2518 | if (GVal->hasDLLImportStorageClass()) |
| 2519 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 2520 | else if (!TM.shouldAssumeDSOLocal(GV: GVal)) |
| 2521 | TargetFlags = ARMII::MO_COFFSTUB; |
| 2522 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, /*offset=*/0, |
| 2523 | TargetFlags); |
| 2524 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 2525 | Callee = |
| 2526 | DAG.getLoad(VT: PtrVt, dl, Chain: DAG.getEntryNode(), |
| 2527 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, Operand: Callee), |
| 2528 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 2529 | } else { |
| 2530 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: 0); |
| 2531 | } |
| 2532 | } |
| 2533 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
| 2534 | isDirect = true; |
| 2535 | // tBX takes a register source operand. |
| 2536 | const char *Sym = S->getSymbol(); |
| 2537 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2538 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2539 | ARMConstantPoolValue *CPV = |
| 2540 | ARMConstantPoolSymbol::Create(C&: *DAG.getContext(), s: Sym, |
| 2541 | ID: ARMPCLabelIndex, PCAdj: 4); |
| 2542 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
| 2543 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 2544 | Callee = DAG.getLoad( |
| 2545 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 2546 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 2547 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 2548 | Callee = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVt, N1: Callee, N2: PICLabel); |
| 2549 | } else { |
| 2550 | Callee = DAG.getTargetExternalSymbol(Sym, VT: PtrVt, TargetFlags: 0); |
| 2551 | } |
| 2552 | } |
| 2553 | |
| 2554 | if (isCmseNSCall) { |
| 2555 | assert(!isARMFunc && !isDirect && |
| 2556 | "Cannot handle call to ARM function or direct call" ); |
| 2557 | if (NumBytes > 0) { |
| 2558 | DAG.getContext()->diagnose( |
| 2559 | DI: DiagnosticInfoUnsupported(DAG.getMachineFunction().getFunction(), |
| 2560 | "call to non-secure function would require " |
| 2561 | "passing arguments on stack" , |
| 2562 | dl.getDebugLoc())); |
| 2563 | } |
| 2564 | if (isStructRet) { |
| 2565 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 2566 | DAG.getMachineFunction().getFunction(), |
| 2567 | "call to non-secure function would return value through pointer" , |
| 2568 | dl.getDebugLoc())); |
| 2569 | } |
| 2570 | } |
| 2571 | |
| 2572 | // FIXME: handle tail calls differently. |
| 2573 | unsigned CallOpc; |
| 2574 | if (Subtarget->isThumb()) { |
| 2575 | if (GuardWithBTI) |
| 2576 | CallOpc = ARMISD::t2CALL_BTI; |
| 2577 | else if (isCmseNSCall) |
| 2578 | CallOpc = ARMISD::tSECALL; |
| 2579 | else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) |
| 2580 | CallOpc = ARMISD::CALL_NOLINK; |
| 2581 | else |
| 2582 | CallOpc = ARMISD::CALL; |
| 2583 | } else { |
| 2584 | if (!isDirect && !Subtarget->hasV5TOps()) |
| 2585 | CallOpc = ARMISD::CALL_NOLINK; |
| 2586 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && |
| 2587 | // Emit regular call when code size is the priority |
| 2588 | !Subtarget->hasMinSize()) |
| 2589 | // "mov lr, pc; b _foo" to avoid confusing the RSP |
| 2590 | CallOpc = ARMISD::CALL_NOLINK; |
| 2591 | else |
| 2592 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; |
| 2593 | } |
| 2594 | |
| 2595 | // We don't usually want to end the call-sequence here because we would tidy |
| 2596 | // the frame up *after* the call, however in the ABI-changing tail-call case |
| 2597 | // we've carefully laid out the parameters so that when sp is reset they'll be |
| 2598 | // in the correct location. |
| 2599 | if (isTailCall && !isSibCall) { |
| 2600 | Chain = DAG.getCALLSEQ_END(Chain, Size1: 0, Size2: 0, Glue: InGlue, DL: dl); |
| 2601 | InGlue = Chain.getValue(R: 1); |
| 2602 | } |
| 2603 | |
| 2604 | std::vector<SDValue> Ops; |
| 2605 | Ops.push_back(x: Chain); |
| 2606 | Ops.push_back(x: Callee); |
| 2607 | |
| 2608 | if (isTailCall) { |
| 2609 | Ops.push_back(x: DAG.getSignedTargetConstant(Val: SPDiff, DL: dl, VT: MVT::i32)); |
| 2610 | } |
| 2611 | |
| 2612 | // Add argument registers to the end of the list so that they are known live |
| 2613 | // into the call. |
| 2614 | for (const auto &[Reg, N] : RegsToPass) |
| 2615 | Ops.push_back(x: DAG.getRegister(Reg, VT: N.getValueType())); |
| 2616 | |
| 2617 | // Add a register mask operand representing the call-preserved registers. |
| 2618 | const uint32_t *Mask; |
| 2619 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 2620 | if (isThisReturn) { |
| 2621 | // For 'this' returns, use the R0-preserving mask if applicable |
| 2622 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); |
| 2623 | if (!Mask) { |
| 2624 | // Set isThisReturn to false if the calling convention is not one that |
| 2625 | // allows 'returned' to be modeled in this way, so LowerCallResult does |
| 2626 | // not try to pass 'this' straight through |
| 2627 | isThisReturn = false; |
| 2628 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2629 | } |
| 2630 | } else |
| 2631 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2632 | |
| 2633 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 2634 | Ops.push_back(x: DAG.getRegisterMask(RegMask: Mask)); |
| 2635 | |
| 2636 | if (InGlue.getNode()) |
| 2637 | Ops.push_back(x: InGlue); |
| 2638 | |
| 2639 | if (isTailCall) { |
| 2640 | MF.getFrameInfo().setHasTailCall(); |
| 2641 | SDValue Ret = DAG.getNode(Opcode: ARMISD::TC_RETURN, DL: dl, VT: MVT::Other, Ops); |
| 2642 | if (CLI.CFIType) |
| 2643 | Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue()); |
| 2644 | DAG.addNoMergeSiteInfo(Node: Ret.getNode(), NoMerge: CLI.NoMerge); |
| 2645 | DAG.addCallSiteInfo(Node: Ret.getNode(), CallInfo: std::move(CSInfo)); |
| 2646 | return Ret; |
| 2647 | } |
| 2648 | |
| 2649 | // Returns a chain and a flag for retval copy to use. |
| 2650 | Chain = DAG.getNode(Opcode: CallOpc, DL: dl, ResultTys: {MVT::Other, MVT::Glue}, Ops); |
| 2651 | if (CLI.CFIType) |
| 2652 | Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue()); |
| 2653 | DAG.addNoMergeSiteInfo(Node: Chain.getNode(), NoMerge: CLI.NoMerge); |
| 2654 | InGlue = Chain.getValue(R: 1); |
| 2655 | DAG.addCallSiteInfo(Node: Chain.getNode(), CallInfo: std::move(CSInfo)); |
| 2656 | |
| 2657 | // If we're guaranteeing tail-calls will be honoured, the callee must |
| 2658 | // pop its own argument stack on return. But this call is *not* a tail call so |
| 2659 | // we need to undo that after it returns to restore the status-quo. |
| 2660 | bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; |
| 2661 | uint64_t CalleePopBytes = |
| 2662 | canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt) ? alignTo(Value: NumBytes, Align: 16) : -1U; |
| 2663 | |
| 2664 | Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: CalleePopBytes, Glue: InGlue, DL: dl); |
| 2665 | if (!Ins.empty()) |
| 2666 | InGlue = Chain.getValue(R: 1); |
| 2667 | |
| 2668 | // Handle result values, copying them out of physregs into vregs that we |
| 2669 | // return. |
| 2670 | return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG, |
| 2671 | InVals, isThisReturn, |
| 2672 | ThisVal: isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall); |
| 2673 | } |
| 2674 | |
| 2675 | /// HandleByVal - Every parameter *after* a byval parameter is passed |
| 2676 | /// on the stack. Remember the next parameter register to allocate, |
| 2677 | /// and then confiscate the rest of the parameter registers to insure |
| 2678 | /// this. |
| 2679 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, |
| 2680 | Align Alignment) const { |
| 2681 | // Byval (as with any stack) slots are always at least 4 byte aligned. |
| 2682 | Alignment = std::max(a: Alignment, b: Align(4)); |
| 2683 | |
| 2684 | MCRegister Reg = State->AllocateReg(Regs: GPRArgRegs); |
| 2685 | if (!Reg) |
| 2686 | return; |
| 2687 | |
| 2688 | unsigned AlignInRegs = Alignment.value() / 4; |
| 2689 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; |
| 2690 | for (unsigned i = 0; i < Waste; ++i) |
| 2691 | Reg = State->AllocateReg(Regs: GPRArgRegs); |
| 2692 | |
| 2693 | if (!Reg) |
| 2694 | return; |
| 2695 | |
| 2696 | unsigned Excess = 4 * (ARM::R4 - Reg); |
| 2697 | |
| 2698 | // Special case when NSAA != SP and parameter size greater than size of |
| 2699 | // all remained GPR regs. In that case we can't split parameter, we must |
| 2700 | // send it to stack. We also must set NCRN to R4, so waste all |
| 2701 | // remained registers. |
| 2702 | const unsigned NSAAOffset = State->getStackSize(); |
| 2703 | if (NSAAOffset != 0 && Size > Excess) { |
| 2704 | while (State->AllocateReg(Regs: GPRArgRegs)) |
| 2705 | ; |
| 2706 | return; |
| 2707 | } |
| 2708 | |
| 2709 | // First register for byval parameter is the first register that wasn't |
| 2710 | // allocated before this method call, so it would be "reg". |
| 2711 | // If parameter is small enough to be saved in range [reg, r4), then |
| 2712 | // the end (first after last) register would be reg + param-size-in-regs, |
| 2713 | // else parameter would be splitted between registers and stack, |
| 2714 | // end register would be r4 in this case. |
| 2715 | unsigned ByValRegBegin = Reg; |
| 2716 | unsigned ByValRegEnd = std::min<unsigned>(a: Reg + Size / 4, b: ARM::R4); |
| 2717 | State->addInRegsParamInfo(RegBegin: ByValRegBegin, RegEnd: ByValRegEnd); |
| 2718 | // Note, first register is allocated in the beginning of function already, |
| 2719 | // allocate remained amount of registers we need. |
| 2720 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) |
| 2721 | State->AllocateReg(Regs: GPRArgRegs); |
| 2722 | // A byval parameter that is split between registers and memory needs its |
| 2723 | // size truncated here. |
| 2724 | // In the case where the entire structure fits in registers, we set the |
| 2725 | // size in memory to zero. |
| 2726 | Size = std::max<int>(a: Size - Excess, b: 0); |
| 2727 | } |
| 2728 | |
| 2729 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 2730 | /// for tail call optimization. Targets which want to do tail call |
| 2731 | /// optimization should implement this function. Note that this function also |
| 2732 | /// processes musttail calls, so when this function returns false on a valid |
| 2733 | /// musttail call, a fatal backend error occurs. |
| 2734 | bool ARMTargetLowering::IsEligibleForTailCallOptimization( |
| 2735 | TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo, |
| 2736 | SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const { |
| 2737 | CallingConv::ID CalleeCC = CLI.CallConv; |
| 2738 | SDValue Callee = CLI.Callee; |
| 2739 | bool isVarArg = CLI.IsVarArg; |
| 2740 | const SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 2741 | const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 2742 | const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 2743 | const SelectionDAG &DAG = CLI.DAG; |
| 2744 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2745 | const Function &CallerF = MF.getFunction(); |
| 2746 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
| 2747 | |
| 2748 | assert(Subtarget->supportsTailCall()); |
| 2749 | |
| 2750 | // Indirect tail-calls require a register to hold the target address. That |
| 2751 | // register must be: |
| 2752 | // * Allocatable (i.e. r0-r7 if the target is Thumb1). |
| 2753 | // * Not callee-saved, so must be one of r0-r3 or r12. |
| 2754 | // * Not used to hold an argument to the tail-called function, which might be |
| 2755 | // in r0-r3. |
| 2756 | // * Not used to hold the return address authentication code, which is in r12 |
| 2757 | // if enabled. |
| 2758 | // Sometimes, no register matches all of these conditions, so we can't do a |
| 2759 | // tail-call. |
| 2760 | if (!isa<GlobalAddressSDNode>(Val: Callee.getNode()) || isIndirect) { |
| 2761 | SmallSet<MCPhysReg, 5> AddressRegisters = {ARM::R0, ARM::R1, ARM::R2, |
| 2762 | ARM::R3}; |
| 2763 | if (!(Subtarget->isThumb1Only() || |
| 2764 | MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(SpillsLR: true))) |
| 2765 | AddressRegisters.insert(V: ARM::R12); |
| 2766 | for (const CCValAssign &AL : ArgLocs) |
| 2767 | if (AL.isRegLoc()) |
| 2768 | AddressRegisters.erase(V: AL.getLocReg()); |
| 2769 | if (AddressRegisters.empty()) { |
| 2770 | LLVM_DEBUG(dbgs() << "false (no reg to hold function pointer)\n" ); |
| 2771 | return false; |
| 2772 | } |
| 2773 | } |
| 2774 | |
| 2775 | // Look for obvious safe cases to perform tail call optimization that do not |
| 2776 | // require ABI changes. This is what gcc calls sibcall. |
| 2777 | |
| 2778 | // Exception-handling functions need a special set of instructions to indicate |
| 2779 | // a return to the hardware. Tail-calling another function would probably |
| 2780 | // break this. |
| 2781 | if (CallerF.hasFnAttribute(Kind: "interrupt" )) { |
| 2782 | LLVM_DEBUG(dbgs() << "false (interrupt attribute)\n" ); |
| 2783 | return false; |
| 2784 | } |
| 2785 | |
| 2786 | if (canGuaranteeTCO(CC: CalleeCC, |
| 2787 | GuaranteeTailCalls: getTargetMachine().Options.GuaranteedTailCallOpt)) { |
| 2788 | LLVM_DEBUG(dbgs() << (CalleeCC == CallerCC ? "true" : "false" ) |
| 2789 | << " (guaranteed tail-call CC)\n" ); |
| 2790 | return CalleeCC == CallerCC; |
| 2791 | } |
| 2792 | |
| 2793 | // Also avoid sibcall optimization if either caller or callee uses struct |
| 2794 | // return semantics. |
| 2795 | bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); |
| 2796 | bool isCallerStructRet = MF.getFunction().hasStructRetAttr(); |
| 2797 | if (isCalleeStructRet != isCallerStructRet) { |
| 2798 | LLVM_DEBUG(dbgs() << "false (struct-ret)\n" ); |
| 2799 | return false; |
| 2800 | } |
| 2801 | |
| 2802 | // Externally-defined functions with weak linkage should not be |
| 2803 | // tail-called on ARM when the OS does not support dynamic |
| 2804 | // pre-emption of symbols, as the AAELF spec requires normal calls |
| 2805 | // to undefined weak functions to be replaced with a NOP or jump to the |
| 2806 | // next instruction. The behaviour of branch instructions in this |
| 2807 | // situation (as used for tail calls) is implementation-defined, so we |
| 2808 | // cannot rely on the linker replacing the tail call with a return. |
| 2809 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) { |
| 2810 | const GlobalValue *GV = G->getGlobal(); |
| 2811 | const Triple &TT = getTargetMachine().getTargetTriple(); |
| 2812 | if (GV->hasExternalWeakLinkage() && |
| 2813 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || |
| 2814 | TT.isOSBinFormatMachO())) { |
| 2815 | LLVM_DEBUG(dbgs() << "false (external weak linkage)\n" ); |
| 2816 | return false; |
| 2817 | } |
| 2818 | } |
| 2819 | |
| 2820 | // Check that the call results are passed in the same way. |
| 2821 | LLVMContext &C = *DAG.getContext(); |
| 2822 | if (!CCState::resultsCompatible( |
| 2823 | CalleeCC: getEffectiveCallingConv(CC: CalleeCC, isVarArg), |
| 2824 | CallerCC: getEffectiveCallingConv(CC: CallerCC, isVarArg: CallerF.isVarArg()), MF, C, Ins, |
| 2825 | CalleeFn: CCAssignFnForReturn(CC: CalleeCC, isVarArg), |
| 2826 | CallerFn: CCAssignFnForReturn(CC: CallerCC, isVarArg: CallerF.isVarArg()))) { |
| 2827 | LLVM_DEBUG(dbgs() << "false (incompatible results)\n" ); |
| 2828 | return false; |
| 2829 | } |
| 2830 | // The callee has to preserve all registers the caller needs to preserve. |
| 2831 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 2832 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
| 2833 | if (CalleeCC != CallerCC) { |
| 2834 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
| 2835 | if (!TRI->regmaskSubsetEqual(mask0: CallerPreserved, mask1: CalleePreserved)) { |
| 2836 | LLVM_DEBUG(dbgs() << "false (not all registers preserved)\n" ); |
| 2837 | return false; |
| 2838 | } |
| 2839 | } |
| 2840 | |
| 2841 | // If Caller's vararg argument has been split between registers and stack, do |
| 2842 | // not perform tail call, since part of the argument is in caller's local |
| 2843 | // frame. |
| 2844 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); |
| 2845 | if (CLI.IsVarArg && AFI_Caller->getArgRegsSaveSize()) { |
| 2846 | LLVM_DEBUG(dbgs() << "false (arg reg save area)\n" ); |
| 2847 | return false; |
| 2848 | } |
| 2849 | |
| 2850 | // If the callee takes no arguments then go on to check the results of the |
| 2851 | // call. |
| 2852 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 2853 | if (!parametersInCSRMatch(MRI, CallerPreservedMask: CallerPreserved, ArgLocs, OutVals)) { |
| 2854 | LLVM_DEBUG(dbgs() << "false (parameters in CSRs do not match)\n" ); |
| 2855 | return false; |
| 2856 | } |
| 2857 | |
| 2858 | // If the stack arguments for this call do not fit into our own save area then |
| 2859 | // the call cannot be made tail. |
| 2860 | if (CCInfo.getStackSize() > AFI_Caller->getArgumentStackSize()) |
| 2861 | return false; |
| 2862 | |
| 2863 | LLVM_DEBUG(dbgs() << "true\n" ); |
| 2864 | return true; |
| 2865 | } |
| 2866 | |
| 2867 | bool |
| 2868 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, |
| 2869 | MachineFunction &MF, bool isVarArg, |
| 2870 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2871 | LLVMContext &Context, const Type *RetTy) const { |
| 2872 | SmallVector<CCValAssign, 16> RVLocs; |
| 2873 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); |
| 2874 | return CCInfo.CheckReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 2875 | } |
| 2876 | |
| 2877 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, |
| 2878 | const SDLoc &DL, SelectionDAG &DAG) { |
| 2879 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 2880 | const Function &F = MF.getFunction(); |
| 2881 | |
| 2882 | StringRef IntKind = F.getFnAttribute(Kind: "interrupt" ).getValueAsString(); |
| 2883 | |
| 2884 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset |
| 2885 | // version of the "preferred return address". These offsets affect the return |
| 2886 | // instruction if this is a return from PL1 without hypervisor extensions. |
| 2887 | // IRQ/FIQ: +4 "subs pc, lr, #4" |
| 2888 | // SWI: 0 "subs pc, lr, #0" |
| 2889 | // ABORT: +4 "subs pc, lr, #4" |
| 2890 | // UNDEF: +4/+2 "subs pc, lr, #0" |
| 2891 | // UNDEF varies depending on where the exception came from ARM or Thumb |
| 2892 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. |
| 2893 | |
| 2894 | int64_t LROffset; |
| 2895 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || |
| 2896 | IntKind == "ABORT" ) |
| 2897 | LROffset = 4; |
| 2898 | else if (IntKind == "SWI" || IntKind == "UNDEF" ) |
| 2899 | LROffset = 0; |
| 2900 | else |
| 2901 | report_fatal_error(reason: "Unsupported interrupt attribute. If present, value " |
| 2902 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF" ); |
| 2903 | |
| 2904 | RetOps.insert(I: RetOps.begin() + 1, |
| 2905 | Elt: DAG.getConstant(Val: LROffset, DL, VT: MVT::i32, isTarget: false)); |
| 2906 | |
| 2907 | return DAG.getNode(Opcode: ARMISD::INTRET_GLUE, DL, VT: MVT::Other, Ops: RetOps); |
| 2908 | } |
| 2909 | |
| 2910 | SDValue |
| 2911 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
| 2912 | bool isVarArg, |
| 2913 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2914 | const SmallVectorImpl<SDValue> &OutVals, |
| 2915 | const SDLoc &dl, SelectionDAG &DAG) const { |
| 2916 | // CCValAssign - represent the assignment of the return value to a location. |
| 2917 | SmallVector<CCValAssign, 16> RVLocs; |
| 2918 | |
| 2919 | // CCState - Info about the registers and stack slots. |
| 2920 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 2921 | *DAG.getContext()); |
| 2922 | |
| 2923 | // Analyze outgoing return values. |
| 2924 | CCInfo.AnalyzeReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
| 2925 | |
| 2926 | SDValue Glue; |
| 2927 | SmallVector<SDValue, 4> RetOps; |
| 2928 | RetOps.push_back(Elt: Chain); // Operand #0 = Chain (updated below) |
| 2929 | bool isLittleEndian = Subtarget->isLittle(); |
| 2930 | |
| 2931 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2932 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2933 | AFI->setReturnRegsCount(RVLocs.size()); |
| 2934 | |
| 2935 | // Report error if cmse entry function returns structure through first ptr arg. |
| 2936 | if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { |
| 2937 | // Note: using an empty SDLoc(), as the first line of the function is a |
| 2938 | // better place to report than the last line. |
| 2939 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 2940 | DAG.getMachineFunction().getFunction(), |
| 2941 | "secure entry function would return value through pointer" , |
| 2942 | SDLoc().getDebugLoc())); |
| 2943 | } |
| 2944 | |
| 2945 | // Copy the result values into the output registers. |
| 2946 | for (unsigned i = 0, realRVLocIdx = 0; |
| 2947 | i != RVLocs.size(); |
| 2948 | ++i, ++realRVLocIdx) { |
| 2949 | CCValAssign &VA = RVLocs[i]; |
| 2950 | assert(VA.isRegLoc() && "Can only return in registers!" ); |
| 2951 | |
| 2952 | SDValue Arg = OutVals[realRVLocIdx]; |
| 2953 | bool ReturnF16 = false; |
| 2954 | |
| 2955 | if (Subtarget->hasFullFP16() && getTM().isTargetHardFloat()) { |
| 2956 | // Half-precision return values can be returned like this: |
| 2957 | // |
| 2958 | // t11 f16 = fadd ... |
| 2959 | // t12: i16 = bitcast t11 |
| 2960 | // t13: i32 = zero_extend t12 |
| 2961 | // t14: f32 = bitcast t13 <~~~~~~~ Arg |
| 2962 | // |
| 2963 | // to avoid code generation for bitcasts, we simply set Arg to the node |
| 2964 | // that produces the f16 value, t11 in this case. |
| 2965 | // |
| 2966 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { |
| 2967 | SDValue ZE = Arg.getOperand(i: 0); |
| 2968 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { |
| 2969 | SDValue BC = ZE.getOperand(i: 0); |
| 2970 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { |
| 2971 | Arg = BC.getOperand(i: 0); |
| 2972 | ReturnF16 = true; |
| 2973 | } |
| 2974 | } |
| 2975 | } |
| 2976 | } |
| 2977 | |
| 2978 | switch (VA.getLocInfo()) { |
| 2979 | default: llvm_unreachable("Unknown loc info!" ); |
| 2980 | case CCValAssign::Full: break; |
| 2981 | case CCValAssign::BCvt: |
| 2982 | if (!ReturnF16) |
| 2983 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 2984 | break; |
| 2985 | } |
| 2986 | |
| 2987 | // Mask f16 arguments if this is a CMSE nonsecure entry. |
| 2988 | auto RetVT = Outs[realRVLocIdx].ArgVT; |
| 2989 | if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { |
| 2990 | if (VA.needsCustom() && VA.getValVT() == MVT::f16) { |
| 2991 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
| 2992 | } else { |
| 2993 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 2994 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: RetVT.getSizeInBits()); |
| 2995 | SDValue Mask = |
| 2996 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
| 2997 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
| 2998 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
| 2999 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 3000 | } |
| 3001 | } |
| 3002 | |
| 3003 | if (VA.needsCustom() && |
| 3004 | (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { |
| 3005 | if (VA.getLocVT() == MVT::v2f64) { |
| 3006 | // Extract the first half and return it in two registers. |
| 3007 | SDValue Half = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 3008 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 3009 | SDValue HalfGPRs = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 3010 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Half); |
| 3011 | |
| 3012 | Chain = |
| 3013 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3014 | N: HalfGPRs.getValue(R: isLittleEndian ? 0 : 1), Glue); |
| 3015 | Glue = Chain.getValue(R: 1); |
| 3016 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3017 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3018 | Chain = |
| 3019 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3020 | N: HalfGPRs.getValue(R: isLittleEndian ? 1 : 0), Glue); |
| 3021 | Glue = Chain.getValue(R: 1); |
| 3022 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3023 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3024 | |
| 3025 | // Extract the 2nd half and fall through to handle it as an f64 value. |
| 3026 | Arg = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
| 3027 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 3028 | } |
| 3029 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is |
| 3030 | // available. |
| 3031 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 3032 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
| 3033 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3034 | N: fmrrd.getValue(R: isLittleEndian ? 0 : 1), Glue); |
| 3035 | Glue = Chain.getValue(R: 1); |
| 3036 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 3037 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3038 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
| 3039 | N: fmrrd.getValue(R: isLittleEndian ? 1 : 0), Glue); |
| 3040 | } else |
| 3041 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: Arg, Glue); |
| 3042 | |
| 3043 | // Guarantee that all emitted copies are |
| 3044 | // stuck together, avoiding something bad. |
| 3045 | Glue = Chain.getValue(R: 1); |
| 3046 | RetOps.push_back(Elt: DAG.getRegister( |
| 3047 | Reg: VA.getLocReg(), VT: ReturnF16 ? Arg.getValueType() : VA.getLocVT())); |
| 3048 | } |
| 3049 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 3050 | const MCPhysReg *I = |
| 3051 | TRI->getCalleeSavedRegsViaCopy(MF: &DAG.getMachineFunction()); |
| 3052 | if (I) { |
| 3053 | for (; *I; ++I) { |
| 3054 | if (ARM::GPRRegClass.contains(Reg: *I)) |
| 3055 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::i32)); |
| 3056 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
| 3057 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::getFloatingPointVT(BitWidth: 64))); |
| 3058 | else |
| 3059 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 3060 | } |
| 3061 | } |
| 3062 | |
| 3063 | // Update chain and glue. |
| 3064 | RetOps[0] = Chain; |
| 3065 | if (Glue.getNode()) |
| 3066 | RetOps.push_back(Elt: Glue); |
| 3067 | |
| 3068 | // CPUs which aren't M-class use a special sequence to return from |
| 3069 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, |
| 3070 | // though we use "subs pc, lr, #N"). |
| 3071 | // |
| 3072 | // M-class CPUs actually use a normal return sequence with a special |
| 3073 | // (hardware-provided) value in LR, so the normal code path works. |
| 3074 | if (DAG.getMachineFunction().getFunction().hasFnAttribute(Kind: "interrupt" ) && |
| 3075 | !Subtarget->isMClass()) { |
| 3076 | if (Subtarget->isThumb1Only()) |
| 3077 | report_fatal_error(reason: "interrupt attribute is not supported in Thumb1" ); |
| 3078 | return LowerInterruptReturn(RetOps, DL: dl, DAG); |
| 3079 | } |
| 3080 | |
| 3081 | unsigned RetNode = |
| 3082 | AFI->isCmseNSEntryFunction() ? ARMISD::SERET_GLUE : ARMISD::RET_GLUE; |
| 3083 | return DAG.getNode(Opcode: RetNode, DL: dl, VT: MVT::Other, Ops: RetOps); |
| 3084 | } |
| 3085 | |
| 3086 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { |
| 3087 | if (N->getNumValues() != 1) |
| 3088 | return false; |
| 3089 | if (!N->hasNUsesOfValue(NUses: 1, Value: 0)) |
| 3090 | return false; |
| 3091 | |
| 3092 | SDValue TCChain = Chain; |
| 3093 | SDNode *Copy = *N->user_begin(); |
| 3094 | if (Copy->getOpcode() == ISD::CopyToReg) { |
| 3095 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3096 | // perform a tail call. |
| 3097 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3098 | return false; |
| 3099 | TCChain = Copy->getOperand(Num: 0); |
| 3100 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { |
| 3101 | SDNode *VMov = Copy; |
| 3102 | // f64 returned in a pair of GPRs. |
| 3103 | SmallPtrSet<SDNode*, 2> Copies; |
| 3104 | for (SDNode *U : VMov->users()) { |
| 3105 | if (U->getOpcode() != ISD::CopyToReg) |
| 3106 | return false; |
| 3107 | Copies.insert(Ptr: U); |
| 3108 | } |
| 3109 | if (Copies.size() > 2) |
| 3110 | return false; |
| 3111 | |
| 3112 | for (SDNode *U : VMov->users()) { |
| 3113 | SDValue UseChain = U->getOperand(Num: 0); |
| 3114 | if (Copies.count(Ptr: UseChain.getNode())) |
| 3115 | // Second CopyToReg |
| 3116 | Copy = U; |
| 3117 | else { |
| 3118 | // We are at the top of this chain. |
| 3119 | // If the copy has a glue operand, we conservatively assume it |
| 3120 | // isn't safe to perform a tail call. |
| 3121 | if (U->getOperand(Num: U->getNumOperands() - 1).getValueType() == MVT::Glue) |
| 3122 | return false; |
| 3123 | // First CopyToReg |
| 3124 | TCChain = UseChain; |
| 3125 | } |
| 3126 | } |
| 3127 | } else if (Copy->getOpcode() == ISD::BITCAST) { |
| 3128 | // f32 returned in a single GPR. |
| 3129 | if (!Copy->hasOneUse()) |
| 3130 | return false; |
| 3131 | Copy = *Copy->user_begin(); |
| 3132 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(NUses: 1, Value: 0)) |
| 3133 | return false; |
| 3134 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3135 | // perform a tail call. |
| 3136 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3137 | return false; |
| 3138 | TCChain = Copy->getOperand(Num: 0); |
| 3139 | } else { |
| 3140 | return false; |
| 3141 | } |
| 3142 | |
| 3143 | bool HasRet = false; |
| 3144 | for (const SDNode *U : Copy->users()) { |
| 3145 | if (U->getOpcode() != ARMISD::RET_GLUE && |
| 3146 | U->getOpcode() != ARMISD::INTRET_GLUE) |
| 3147 | return false; |
| 3148 | HasRet = true; |
| 3149 | } |
| 3150 | |
| 3151 | if (!HasRet) |
| 3152 | return false; |
| 3153 | |
| 3154 | Chain = TCChain; |
| 3155 | return true; |
| 3156 | } |
| 3157 | |
| 3158 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
| 3159 | if (!Subtarget->supportsTailCall()) |
| 3160 | return false; |
| 3161 | |
| 3162 | if (!CI->isTailCall()) |
| 3163 | return false; |
| 3164 | |
| 3165 | return true; |
| 3166 | } |
| 3167 | |
| 3168 | // Trying to write a 64 bit value so need to split into two 32 bit values first, |
| 3169 | // and pass the lower and high parts through. |
| 3170 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { |
| 3171 | SDLoc DL(Op); |
| 3172 | SDValue WriteValue = Op->getOperand(Num: 2); |
| 3173 | |
| 3174 | // This function is only supposed to be called for i64 type argument. |
| 3175 | assert(WriteValue.getValueType() == MVT::i64 |
| 3176 | && "LowerWRITE_REGISTER called for non-i64 type argument." ); |
| 3177 | |
| 3178 | SDValue Lo, Hi; |
| 3179 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: WriteValue, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
| 3180 | SDValue Ops[] = { Op->getOperand(Num: 0), Op->getOperand(Num: 1), Lo, Hi }; |
| 3181 | return DAG.getNode(Opcode: ISD::WRITE_REGISTER, DL, VT: MVT::Other, Ops); |
| 3182 | } |
| 3183 | |
| 3184 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as |
| 3185 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is |
| 3186 | // one of the above mentioned nodes. It has to be wrapped because otherwise |
| 3187 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only |
| 3188 | // be used to form addressing mode. These wrapped nodes will be selected |
| 3189 | // into MOVi. |
| 3190 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, |
| 3191 | SelectionDAG &DAG) const { |
| 3192 | EVT PtrVT = Op.getValueType(); |
| 3193 | // FIXME there is no actual debug info here |
| 3194 | SDLoc dl(Op); |
| 3195 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Val&: Op); |
| 3196 | SDValue Res; |
| 3197 | |
| 3198 | // When generating execute-only code Constant Pools must be promoted to the |
| 3199 | // global data section. It's a bit ugly that we can't share them across basic |
| 3200 | // blocks, but this way we guarantee that execute-only behaves correct with |
| 3201 | // position-independent addressing modes. |
| 3202 | if (Subtarget->genExecuteOnly()) { |
| 3203 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 3204 | auto *T = CP->getType(); |
| 3205 | auto C = const_cast<Constant*>(CP->getConstVal()); |
| 3206 | auto M = DAG.getMachineFunction().getFunction().getParent(); |
| 3207 | auto GV = new GlobalVariable( |
| 3208 | *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, |
| 3209 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + |
| 3210 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + |
| 3211 | Twine(AFI->createPICLabelUId()) |
| 3212 | ); |
| 3213 | SDValue GA = DAG.getTargetGlobalAddress(GV: dyn_cast<GlobalValue>(Val: GV), |
| 3214 | DL: dl, VT: PtrVT); |
| 3215 | return LowerGlobalAddress(Op: GA, DAG); |
| 3216 | } |
| 3217 | |
| 3218 | // The 16-bit ADR instruction can only encode offsets that are multiples of 4, |
| 3219 | // so we need to align to at least 4 bytes when we don't have 32-bit ADR. |
| 3220 | Align CPAlign = CP->getAlign(); |
| 3221 | if (Subtarget->isThumb1Only()) |
| 3222 | CPAlign = std::max(a: CPAlign, b: Align(4)); |
| 3223 | if (CP->isMachineConstantPoolEntry()) |
| 3224 | Res = |
| 3225 | DAG.getTargetConstantPool(C: CP->getMachineCPVal(), VT: PtrVT, Align: CPAlign); |
| 3226 | else |
| 3227 | Res = DAG.getTargetConstantPool(C: CP->getConstVal(), VT: PtrVT, Align: CPAlign); |
| 3228 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Res); |
| 3229 | } |
| 3230 | |
| 3231 | unsigned ARMTargetLowering::getJumpTableEncoding() const { |
| 3232 | // If we don't have a 32-bit pc-relative branch instruction then the jump |
| 3233 | // table consists of block addresses. Usually this is inline, but for |
| 3234 | // execute-only it must be placed out-of-line. |
| 3235 | if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps()) |
| 3236 | return MachineJumpTableInfo::EK_BlockAddress; |
| 3237 | return MachineJumpTableInfo::EK_Inline; |
| 3238 | } |
| 3239 | |
| 3240 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, |
| 3241 | SelectionDAG &DAG) const { |
| 3242 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3243 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3244 | unsigned ARMPCLabelIndex = 0; |
| 3245 | SDLoc DL(Op); |
| 3246 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3247 | const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress(); |
| 3248 | SDValue CPAddr; |
| 3249 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); |
| 3250 | if (!IsPositionIndependent) { |
| 3251 | CPAddr = DAG.getTargetConstantPool(C: BA, VT: PtrVT, Align: Align(4)); |
| 3252 | } else { |
| 3253 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3254 | ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3255 | ARMConstantPoolValue *CPV = |
| 3256 | ARMConstantPoolConstant::Create(C: BA, ID: ARMPCLabelIndex, |
| 3257 | Kind: ARMCP::CPBlockAddress, PCAdj); |
| 3258 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3259 | } |
| 3260 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: CPAddr); |
| 3261 | SDValue Result = DAG.getLoad( |
| 3262 | VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3263 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3264 | if (!IsPositionIndependent) |
| 3265 | return Result; |
| 3266 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL, VT: MVT::i32); |
| 3267 | return DAG.getNode(Opcode: ARMISD::PIC_ADD, DL, VT: PtrVT, N1: Result, N2: PICLabel); |
| 3268 | } |
| 3269 | |
| 3270 | /// Convert a TLS address reference into the correct sequence of loads |
| 3271 | /// and calls to compute the variable's address for Darwin, and return an |
| 3272 | /// SDValue containing the final node. |
| 3273 | |
| 3274 | /// Darwin only has one TLS scheme which must be capable of dealing with the |
| 3275 | /// fully general situation, in the worst case. This means: |
| 3276 | /// + "extern __thread" declaration. |
| 3277 | /// + Defined in a possibly unknown dynamic library. |
| 3278 | /// |
| 3279 | /// The general system is that each __thread variable has a [3 x i32] descriptor |
| 3280 | /// which contains information used by the runtime to calculate the address. The |
| 3281 | /// only part of this the compiler needs to know about is the first word, which |
| 3282 | /// contains a function pointer that must be called with the address of the |
| 3283 | /// entire descriptor in "r0". |
| 3284 | /// |
| 3285 | /// Since this descriptor may be in a different unit, in general access must |
| 3286 | /// proceed along the usual ARM rules. A common sequence to produce is: |
| 3287 | /// |
| 3288 | /// movw rT1, :lower16:_var$non_lazy_ptr |
| 3289 | /// movt rT1, :upper16:_var$non_lazy_ptr |
| 3290 | /// ldr r0, [rT1] |
| 3291 | /// ldr rT2, [r0] |
| 3292 | /// blx rT2 |
| 3293 | /// [...address now in r0...] |
| 3294 | SDValue |
| 3295 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, |
| 3296 | SelectionDAG &DAG) const { |
| 3297 | assert(Subtarget->isTargetDarwin() && |
| 3298 | "This function expects a Darwin target" ); |
| 3299 | SDLoc DL(Op); |
| 3300 | |
| 3301 | // First step is to get the address of the actua global symbol. This is where |
| 3302 | // the TLS descriptor lives. |
| 3303 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); |
| 3304 | |
| 3305 | // The first entry in the descriptor is a function pointer that we must call |
| 3306 | // to obtain the address of the variable. |
| 3307 | SDValue Chain = DAG.getEntryNode(); |
| 3308 | SDValue FuncTLVGet = DAG.getLoad( |
| 3309 | VT: MVT::i32, dl: DL, Chain, Ptr: DescAddr, |
| 3310 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: Align(4), |
| 3311 | MMOFlags: MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | |
| 3312 | MachineMemOperand::MOInvariant); |
| 3313 | Chain = FuncTLVGet.getValue(R: 1); |
| 3314 | |
| 3315 | MachineFunction &F = DAG.getMachineFunction(); |
| 3316 | MachineFrameInfo &MFI = F.getFrameInfo(); |
| 3317 | MFI.setAdjustsStack(true); |
| 3318 | |
| 3319 | // TLS calls preserve all registers except those that absolutely must be |
| 3320 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be |
| 3321 | // silly). |
| 3322 | auto TRI = |
| 3323 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); |
| 3324 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); |
| 3325 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(MF: DAG.getMachineFunction()); |
| 3326 | |
| 3327 | // Finally, we can make the call. This is just a degenerate version of a |
| 3328 | // normal AArch64 call node: r0 takes the address of the descriptor, and |
| 3329 | // returns the address of the variable in this thread. |
| 3330 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R0, N: DescAddr, Glue: SDValue()); |
| 3331 | Chain = |
| 3332 | DAG.getNode(Opcode: ARMISD::CALL, DL, VTList: DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue), |
| 3333 | N1: Chain, N2: FuncTLVGet, N3: DAG.getRegister(Reg: ARM::R0, VT: MVT::i32), |
| 3334 | N4: DAG.getRegisterMask(RegMask: Mask), N5: Chain.getValue(R: 1)); |
| 3335 | return DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::R0, VT: MVT::i32, Glue: Chain.getValue(R: 1)); |
| 3336 | } |
| 3337 | |
| 3338 | SDValue |
| 3339 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, |
| 3340 | SelectionDAG &DAG) const { |
| 3341 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering" ); |
| 3342 | |
| 3343 | SDValue Chain = DAG.getEntryNode(); |
| 3344 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3345 | SDLoc DL(Op); |
| 3346 | |
| 3347 | // Load the current TEB (thread environment block) |
| 3348 | SDValue Ops[] = {Chain, |
| 3349 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
| 3350 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
| 3351 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 3352 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
| 3353 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 3354 | DAG.getTargetConstant(Val: 2, DL, VT: MVT::i32)}; |
| 3355 | SDValue CurrentTEB = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
| 3356 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
| 3357 | |
| 3358 | SDValue TEB = CurrentTEB.getValue(R: 0); |
| 3359 | Chain = CurrentTEB.getValue(R: 1); |
| 3360 | |
| 3361 | // Load the ThreadLocalStoragePointer from the TEB |
| 3362 | // A pointer to the TLS array is located at offset 0x2c from the TEB. |
| 3363 | SDValue TLSArray = |
| 3364 | DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TEB, N2: DAG.getIntPtrConstant(Val: 0x2c, DL)); |
| 3365 | TLSArray = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSArray, PtrInfo: MachinePointerInfo()); |
| 3366 | |
| 3367 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 |
| 3368 | // offset into the TLSArray. |
| 3369 | |
| 3370 | // Load the TLS index from the C runtime |
| 3371 | SDValue TLSIndex = |
| 3372 | DAG.getTargetExternalSymbol(Sym: "_tls_index" , VT: PtrVT, TargetFlags: ARMII::MO_NO_FLAG); |
| 3373 | TLSIndex = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: TLSIndex); |
| 3374 | TLSIndex = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSIndex, PtrInfo: MachinePointerInfo()); |
| 3375 | |
| 3376 | SDValue Slot = DAG.getNode(Opcode: ISD::SHL, DL, VT: PtrVT, N1: TLSIndex, |
| 3377 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
| 3378 | SDValue TLS = DAG.getLoad(VT: PtrVT, dl: DL, Chain, |
| 3379 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLSArray, N2: Slot), |
| 3380 | PtrInfo: MachinePointerInfo()); |
| 3381 | |
| 3382 | // Get the offset of the start of the .tls section (section base) |
| 3383 | const auto *GA = cast<GlobalAddressSDNode>(Val&: Op); |
| 3384 | auto *CPV = ARMConstantPoolConstant::Create(GV: GA->getGlobal(), Modifier: ARMCP::SECREL); |
| 3385 | SDValue Offset = DAG.getLoad( |
| 3386 | VT: PtrVT, dl: DL, Chain, |
| 3387 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: MVT::i32, |
| 3388 | Operand: DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4))), |
| 3389 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3390 | |
| 3391 | return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLS, N2: Offset); |
| 3392 | } |
| 3393 | |
| 3394 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model |
| 3395 | SDValue |
| 3396 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
| 3397 | SelectionDAG &DAG) const { |
| 3398 | SDLoc dl(GA); |
| 3399 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3400 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3401 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3402 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3403 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3404 | ARMConstantPoolValue *CPV = |
| 3405 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
| 3406 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::TLSGD, AddCurrentAddress: true); |
| 3407 | SDValue Argument = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3408 | Argument = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Argument); |
| 3409 | Argument = DAG.getLoad( |
| 3410 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Argument, |
| 3411 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3412 | SDValue Chain = Argument.getValue(R: 1); |
| 3413 | |
| 3414 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3415 | Argument = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Argument, N2: PICLabel); |
| 3416 | |
| 3417 | // call __tls_get_addr. |
| 3418 | ArgListTy Args; |
| 3419 | Args.emplace_back(args&: Argument, args: Type::getInt32Ty(C&: *DAG.getContext())); |
| 3420 | |
| 3421 | // FIXME: is there useful debug info available here? |
| 3422 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 3423 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
| 3424 | CC: CallingConv::C, ResultType: Type::getInt32Ty(C&: *DAG.getContext()), |
| 3425 | Target: DAG.getExternalSymbol(Sym: "__tls_get_addr" , VT: PtrVT), ArgsList: std::move(Args)); |
| 3426 | |
| 3427 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 3428 | return CallResult.first; |
| 3429 | } |
| 3430 | |
| 3431 | // Lower ISD::GlobalTLSAddress using the "initial exec" or |
| 3432 | // "local exec" model. |
| 3433 | SDValue |
| 3434 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, |
| 3435 | SelectionDAG &DAG, |
| 3436 | TLSModel::Model model) const { |
| 3437 | const GlobalValue *GV = GA->getGlobal(); |
| 3438 | SDLoc dl(GA); |
| 3439 | SDValue Offset; |
| 3440 | SDValue Chain = DAG.getEntryNode(); |
| 3441 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3442 | // Get the Thread Pointer |
| 3443 | SDValue ThreadPointer = DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
| 3444 | |
| 3445 | if (model == TLSModel::InitialExec) { |
| 3446 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3447 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3448 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3449 | // Initial exec model. |
| 3450 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3451 | ARMConstantPoolValue *CPV = |
| 3452 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
| 3453 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::GOTTPOFF, |
| 3454 | AddCurrentAddress: true); |
| 3455 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3456 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
| 3457 | Offset = DAG.getLoad( |
| 3458 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3459 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3460 | Chain = Offset.getValue(R: 1); |
| 3461 | |
| 3462 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3463 | Offset = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Offset, N2: PICLabel); |
| 3464 | |
| 3465 | Offset = DAG.getLoad( |
| 3466 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3467 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3468 | } else { |
| 3469 | // local exec model |
| 3470 | assert(model == TLSModel::LocalExec); |
| 3471 | ARMConstantPoolValue *CPV = |
| 3472 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::TPOFF); |
| 3473 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3474 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
| 3475 | Offset = DAG.getLoad( |
| 3476 | VT: PtrVT, dl, Chain, Ptr: Offset, |
| 3477 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3478 | } |
| 3479 | |
| 3480 | // The address of the thread local variable is the add of the thread |
| 3481 | // pointer with the offset of the variable. |
| 3482 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ThreadPointer, N2: Offset); |
| 3483 | } |
| 3484 | |
| 3485 | SDValue |
| 3486 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { |
| 3487 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op); |
| 3488 | if (DAG.getTarget().useEmulatedTLS()) |
| 3489 | return LowerToTLSEmulatedModel(GA, DAG); |
| 3490 | |
| 3491 | if (Subtarget->isTargetDarwin()) |
| 3492 | return LowerGlobalTLSAddressDarwin(Op, DAG); |
| 3493 | |
| 3494 | if (Subtarget->isTargetWindows()) |
| 3495 | return LowerGlobalTLSAddressWindows(Op, DAG); |
| 3496 | |
| 3497 | // TODO: implement the "local dynamic" model |
| 3498 | assert(Subtarget->isTargetELF() && "Only ELF implemented here" ); |
| 3499 | TLSModel::Model model = getTargetMachine().getTLSModel(GV: GA->getGlobal()); |
| 3500 | |
| 3501 | switch (model) { |
| 3502 | case TLSModel::GeneralDynamic: |
| 3503 | case TLSModel::LocalDynamic: |
| 3504 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
| 3505 | case TLSModel::InitialExec: |
| 3506 | case TLSModel::LocalExec: |
| 3507 | return LowerToTLSExecModels(GA, DAG, model); |
| 3508 | } |
| 3509 | llvm_unreachable("bogus TLS model" ); |
| 3510 | } |
| 3511 | |
| 3512 | /// Return true if all users of V are within function F, looking through |
| 3513 | /// ConstantExprs. |
| 3514 | static bool allUsersAreInFunction(const Value *V, const Function *F) { |
| 3515 | SmallVector<const User*,4> Worklist(V->users()); |
| 3516 | while (!Worklist.empty()) { |
| 3517 | auto *U = Worklist.pop_back_val(); |
| 3518 | if (isa<ConstantExpr>(Val: U)) { |
| 3519 | append_range(C&: Worklist, R: U->users()); |
| 3520 | continue; |
| 3521 | } |
| 3522 | |
| 3523 | auto *I = dyn_cast<Instruction>(Val: U); |
| 3524 | if (!I || I->getParent()->getParent() != F) |
| 3525 | return false; |
| 3526 | } |
| 3527 | return true; |
| 3528 | } |
| 3529 | |
| 3530 | static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, |
| 3531 | const GlobalValue *GV, SelectionDAG &DAG, |
| 3532 | EVT PtrVT, const SDLoc &dl) { |
| 3533 | // If we're creating a pool entry for a constant global with unnamed address, |
| 3534 | // and the global is small enough, we can emit it inline into the constant pool |
| 3535 | // to save ourselves an indirection. |
| 3536 | // |
| 3537 | // This is a win if the constant is only used in one function (so it doesn't |
| 3538 | // need to be duplicated) or duplicating the constant wouldn't increase code |
| 3539 | // size (implying the constant is no larger than 4 bytes). |
| 3540 | const Function &F = DAG.getMachineFunction().getFunction(); |
| 3541 | |
| 3542 | // We rely on this decision to inline being idemopotent and unrelated to the |
| 3543 | // use-site. We know that if we inline a variable at one use site, we'll |
| 3544 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel |
| 3545 | // doesn't know about this optimization, so bail out if it's enabled else |
| 3546 | // we could decide to inline here (and thus never emit the GV) but require |
| 3547 | // the GV from fast-isel generated code. |
| 3548 | if (!EnableConstpoolPromotion || |
| 3549 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) |
| 3550 | return SDValue(); |
| 3551 | |
| 3552 | auto *GVar = dyn_cast<GlobalVariable>(Val: GV); |
| 3553 | if (!GVar || !GVar->hasInitializer() || |
| 3554 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || |
| 3555 | !GVar->hasLocalLinkage()) |
| 3556 | return SDValue(); |
| 3557 | |
| 3558 | // If we inline a value that contains relocations, we move the relocations |
| 3559 | // from .data to .text. This is not allowed in position-independent code. |
| 3560 | auto *Init = GVar->getInitializer(); |
| 3561 | if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && |
| 3562 | Init->needsDynamicRelocation()) |
| 3563 | return SDValue(); |
| 3564 | |
| 3565 | // The constant islands pass can only really deal with alignment requests |
| 3566 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote |
| 3567 | // any type wanting greater alignment requirements than 4 bytes. We also |
| 3568 | // can only promote constants that are multiples of 4 bytes in size or |
| 3569 | // are paddable to a multiple of 4. Currently we only try and pad constants |
| 3570 | // that are strings for simplicity. |
| 3571 | auto *CDAInit = dyn_cast<ConstantDataArray>(Val: Init); |
| 3572 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Ty: Init->getType()); |
| 3573 | Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GV: GVar); |
| 3574 | unsigned RequiredPadding = 4 - (Size % 4); |
| 3575 | bool PaddingPossible = |
| 3576 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); |
| 3577 | if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || |
| 3578 | Size == 0) |
| 3579 | return SDValue(); |
| 3580 | |
| 3581 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); |
| 3582 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3583 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3584 | |
| 3585 | // We can't bloat the constant pool too much, else the ConstantIslands pass |
| 3586 | // may fail to converge. If we haven't promoted this global yet (it may have |
| 3587 | // multiple uses), and promoting it would increase the constant pool size (Sz |
| 3588 | // > 4), ensure we have space to do so up to MaxTotal. |
| 3589 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar) && Size > 4) |
| 3590 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= |
| 3591 | ConstpoolPromotionMaxTotal) |
| 3592 | return SDValue(); |
| 3593 | |
| 3594 | // This is only valid if all users are in a single function; we can't clone |
| 3595 | // the constant in general. The LLVM IR unnamed_addr allows merging |
| 3596 | // constants, but not cloning them. |
| 3597 | // |
| 3598 | // We could potentially allow cloning if we could prove all uses of the |
| 3599 | // constant in the current function don't care about the address, like |
| 3600 | // printf format strings. But that isn't implemented for now. |
| 3601 | if (!allUsersAreInFunction(V: GVar, F: &F)) |
| 3602 | return SDValue(); |
| 3603 | |
| 3604 | // We're going to inline this global. Pad it out if needed. |
| 3605 | if (RequiredPadding != 4) { |
| 3606 | StringRef S = CDAInit->getAsString(); |
| 3607 | |
| 3608 | SmallVector<uint8_t,16> V(S.size()); |
| 3609 | std::copy(first: S.bytes_begin(), last: S.bytes_end(), result: V.begin()); |
| 3610 | while (RequiredPadding--) |
| 3611 | V.push_back(Elt: 0); |
| 3612 | Init = ConstantDataArray::get(Context&: *DAG.getContext(), Elts&: V); |
| 3613 | } |
| 3614 | |
| 3615 | auto CPVal = ARMConstantPoolConstant::Create(GV: GVar, Initializer: Init); |
| 3616 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPVal, VT: PtrVT, Align: Align(4)); |
| 3617 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar)) { |
| 3618 | AFI->markGlobalAsPromotedToConstantPool(GV: GVar); |
| 3619 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + |
| 3620 | PaddedSize - 4); |
| 3621 | } |
| 3622 | ++NumConstpoolPromoted; |
| 3623 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3624 | } |
| 3625 | |
| 3626 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { |
| 3627 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: GV)) |
| 3628 | if (!(GV = GA->getAliaseeObject())) |
| 3629 | return false; |
| 3630 | if (const auto *V = dyn_cast<GlobalVariable>(Val: GV)) |
| 3631 | return V->isConstant(); |
| 3632 | return isa<Function>(Val: GV); |
| 3633 | } |
| 3634 | |
| 3635 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, |
| 3636 | SelectionDAG &DAG) const { |
| 3637 | switch (Subtarget->getTargetTriple().getObjectFormat()) { |
| 3638 | default: llvm_unreachable("unknown object format" ); |
| 3639 | case Triple::COFF: |
| 3640 | return LowerGlobalAddressWindows(Op, DAG); |
| 3641 | case Triple::ELF: |
| 3642 | return LowerGlobalAddressELF(Op, DAG); |
| 3643 | case Triple::MachO: |
| 3644 | return LowerGlobalAddressDarwin(Op, DAG); |
| 3645 | } |
| 3646 | } |
| 3647 | |
| 3648 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, |
| 3649 | SelectionDAG &DAG) const { |
| 3650 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3651 | SDLoc dl(Op); |
| 3652 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 3653 | bool IsRO = isReadOnly(GV); |
| 3654 | |
| 3655 | // promoteToConstantPool only if not generating XO text section |
| 3656 | if (GV->isDSOLocal() && !Subtarget->genExecuteOnly()) |
| 3657 | if (SDValue V = promoteToConstantPool(TLI: this, GV, DAG, PtrVT, dl)) |
| 3658 | return V; |
| 3659 | |
| 3660 | if (isPositionIndependent()) { |
| 3661 | SDValue G = DAG.getTargetGlobalAddress( |
| 3662 | GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: GV->isDSOLocal() ? 0 : ARMII::MO_GOT); |
| 3663 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
| 3664 | if (!GV->isDSOLocal()) |
| 3665 | Result = |
| 3666 | DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
| 3667 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 3668 | return Result; |
| 3669 | } else if (Subtarget->isROPI() && IsRO) { |
| 3670 | // PC-relative. |
| 3671 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT); |
| 3672 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
| 3673 | return Result; |
| 3674 | } else if (Subtarget->isRWPI() && !IsRO) { |
| 3675 | // SB-relative. |
| 3676 | SDValue RelAddr; |
| 3677 | if (Subtarget->useMovt()) { |
| 3678 | ++NumMovwMovt; |
| 3679 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_SBREL); |
| 3680 | RelAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, Operand: G); |
| 3681 | } else { // use literal pool for address constant |
| 3682 | ARMConstantPoolValue *CPV = |
| 3683 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::SBREL); |
| 3684 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3685 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3686 | RelAddr = DAG.getLoad( |
| 3687 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3688 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3689 | } |
| 3690 | SDValue SB = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: ARM::R9, VT: PtrVT); |
| 3691 | SDValue Result = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: SB, N2: RelAddr); |
| 3692 | return Result; |
| 3693 | } |
| 3694 | |
| 3695 | // If we have T2 ops, we can materialize the address directly via movt/movw |
| 3696 | // pair. This is always cheaper. If need to generate Execute Only code, and we |
| 3697 | // only have Thumb1 available, we can't use a constant pool and are forced to |
| 3698 | // use immediate relocations. |
| 3699 | if (Subtarget->useMovt() || Subtarget->genExecuteOnly()) { |
| 3700 | if (Subtarget->useMovt()) |
| 3701 | ++NumMovwMovt; |
| 3702 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3703 | // operands, expand this into two nodes. |
| 3704 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, |
| 3705 | Operand: DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT)); |
| 3706 | } else { |
| 3707 | SDValue CPAddr = DAG.getTargetConstantPool(C: GV, VT: PtrVT, Align: Align(4)); |
| 3708 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3709 | return DAG.getLoad( |
| 3710 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3711 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3712 | } |
| 3713 | } |
| 3714 | |
| 3715 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, |
| 3716 | SelectionDAG &DAG) const { |
| 3717 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3718 | "ROPI/RWPI not currently supported for Darwin" ); |
| 3719 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3720 | SDLoc dl(Op); |
| 3721 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 3722 | |
| 3723 | if (Subtarget->useMovt()) |
| 3724 | ++NumMovwMovt; |
| 3725 | |
| 3726 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3727 | // operands, expand this into multiple nodes |
| 3728 | unsigned Wrapper = |
| 3729 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; |
| 3730 | |
| 3731 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_NONLAZY); |
| 3732 | SDValue Result = DAG.getNode(Opcode: Wrapper, DL: dl, VT: PtrVT, Operand: G); |
| 3733 | |
| 3734 | if (Subtarget->isGVIndirectSymbol(GV)) |
| 3735 | Result = DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
| 3736 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 3737 | return Result; |
| 3738 | } |
| 3739 | |
| 3740 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, |
| 3741 | SelectionDAG &DAG) const { |
| 3742 | assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported" ); |
| 3743 | assert(Subtarget->useMovt() && |
| 3744 | "Windows on ARM expects to use movw/movt" ); |
| 3745 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3746 | "ROPI/RWPI not currently supported for Windows" ); |
| 3747 | |
| 3748 | const TargetMachine &TM = getTargetMachine(); |
| 3749 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
| 3750 | ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; |
| 3751 | if (GV->hasDLLImportStorageClass()) |
| 3752 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 3753 | else if (!TM.shouldAssumeDSOLocal(GV)) |
| 3754 | TargetFlags = ARMII::MO_COFFSTUB; |
| 3755 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3756 | SDValue Result; |
| 3757 | SDLoc DL(Op); |
| 3758 | |
| 3759 | ++NumMovwMovt; |
| 3760 | |
| 3761 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3762 | // operands, expand this into two nodes. |
| 3763 | Result = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, |
| 3764 | Operand: DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, /*offset=*/0, |
| 3765 | TargetFlags)); |
| 3766 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 3767 | Result = DAG.getLoad(VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: Result, |
| 3768 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
| 3769 | return Result; |
| 3770 | } |
| 3771 | |
| 3772 | SDValue |
| 3773 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3774 | SDLoc dl(Op); |
| 3775 | SDValue Val = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 3776 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETJMP, DL: dl, |
| 3777 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), N1: Op.getOperand(i: 0), |
| 3778 | N2: Op.getOperand(i: 1), N3: Val); |
| 3779 | } |
| 3780 | |
| 3781 | SDValue |
| 3782 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3783 | SDLoc dl(Op); |
| 3784 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_LONGJMP, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 3785 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 3786 | } |
| 3787 | |
| 3788 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, |
| 3789 | SelectionDAG &DAG) const { |
| 3790 | SDLoc dl(Op); |
| 3791 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETUP_DISPATCH, DL: dl, VT: MVT::Other, |
| 3792 | Operand: Op.getOperand(i: 0)); |
| 3793 | } |
| 3794 | |
| 3795 | SDValue ARMTargetLowering::LowerINTRINSIC_VOID( |
| 3796 | SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { |
| 3797 | unsigned IntNo = |
| 3798 | Op.getConstantOperandVal(i: Op.getOperand(i: 0).getValueType() == MVT::Other); |
| 3799 | switch (IntNo) { |
| 3800 | default: |
| 3801 | return SDValue(); // Don't custom lower most intrinsics. |
| 3802 | case Intrinsic::arm_gnu_eabi_mcount: { |
| 3803 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3804 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3805 | SDLoc dl(Op); |
| 3806 | SDValue Chain = Op.getOperand(i: 0); |
| 3807 | // call "\01__gnu_mcount_nc" |
| 3808 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 3809 | const uint32_t *Mask = |
| 3810 | ARI->getCallPreservedMask(MF: DAG.getMachineFunction(), CallingConv::C); |
| 3811 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 3812 | // Mark LR an implicit live-in. |
| 3813 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
| 3814 | SDValue ReturnAddress = |
| 3815 | DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT: PtrVT); |
| 3816 | constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; |
| 3817 | SDValue Callee = |
| 3818 | DAG.getTargetExternalSymbol(Sym: "\01__gnu_mcount_nc" , VT: PtrVT, TargetFlags: 0); |
| 3819 | SDValue RegisterMask = DAG.getRegisterMask(RegMask: Mask); |
| 3820 | if (Subtarget->isThumb()) |
| 3821 | return SDValue( |
| 3822 | DAG.getMachineNode( |
| 3823 | Opcode: ARM::tBL_PUSHLR, dl, ResultTys, |
| 3824 | Ops: {ReturnAddress, DAG.getTargetConstant(Val: ARMCC::AL, DL: dl, VT: PtrVT), |
| 3825 | DAG.getRegister(Reg: 0, VT: PtrVT), Callee, RegisterMask, Chain}), |
| 3826 | 0); |
| 3827 | return SDValue( |
| 3828 | DAG.getMachineNode(Opcode: ARM::BL_PUSHLR, dl, ResultTys, |
| 3829 | Ops: {ReturnAddress, Callee, RegisterMask, Chain}), |
| 3830 | 0); |
| 3831 | } |
| 3832 | } |
| 3833 | } |
| 3834 | |
| 3835 | SDValue |
| 3836 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, |
| 3837 | const ARMSubtarget *Subtarget) const { |
| 3838 | unsigned IntNo = Op.getConstantOperandVal(i: 0); |
| 3839 | SDLoc dl(Op); |
| 3840 | switch (IntNo) { |
| 3841 | default: return SDValue(); // Don't custom lower most intrinsics. |
| 3842 | case Intrinsic::thread_pointer: { |
| 3843 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3844 | return DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
| 3845 | } |
| 3846 | case Intrinsic::arm_cls: { |
| 3847 | const SDValue &Operand = Op.getOperand(i: 1); |
| 3848 | const EVT VTy = Op.getValueType(); |
| 3849 | SDValue SRA = |
| 3850 | DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: VTy, N1: Operand, N2: DAG.getConstant(Val: 31, DL: dl, VT: VTy)); |
| 3851 | SDValue XOR = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: VTy, N1: SRA, N2: Operand); |
| 3852 | SDValue SHL = |
| 3853 | DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: VTy, N1: XOR, N2: DAG.getConstant(Val: 1, DL: dl, VT: VTy)); |
| 3854 | SDValue OR = |
| 3855 | DAG.getNode(Opcode: ISD::OR, DL: dl, VT: VTy, N1: SHL, N2: DAG.getConstant(Val: 1, DL: dl, VT: VTy)); |
| 3856 | SDValue Result = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: OR); |
| 3857 | return Result; |
| 3858 | } |
| 3859 | case Intrinsic::arm_cls64: { |
| 3860 | // cls(x) = if cls(hi(x)) != 31 then cls(hi(x)) |
| 3861 | // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x))) |
| 3862 | const SDValue &Operand = Op.getOperand(i: 1); |
| 3863 | const EVT VTy = Op.getValueType(); |
| 3864 | SDValue Lo, Hi; |
| 3865 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Operand, DL: dl, LoVT: VTy, HiVT: VTy); |
| 3866 | SDValue Constant0 = DAG.getConstant(Val: 0, DL: dl, VT: VTy); |
| 3867 | SDValue Constant1 = DAG.getConstant(Val: 1, DL: dl, VT: VTy); |
| 3868 | SDValue Constant31 = DAG.getConstant(Val: 31, DL: dl, VT: VTy); |
| 3869 | SDValue SRAHi = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: VTy, N1: Hi, N2: Constant31); |
| 3870 | SDValue XORHi = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: VTy, N1: SRAHi, N2: Hi); |
| 3871 | SDValue SHLHi = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: VTy, N1: XORHi, N2: Constant1); |
| 3872 | SDValue ORHi = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: VTy, N1: SHLHi, N2: Constant1); |
| 3873 | SDValue CLSHi = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: ORHi); |
| 3874 | SDValue CheckLo = |
| 3875 | DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: CLSHi, RHS: Constant31, Cond: ISD::CondCode::SETEQ); |
| 3876 | SDValue HiIsZero = |
| 3877 | DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Hi, RHS: Constant0, Cond: ISD::CondCode::SETEQ); |
| 3878 | SDValue AdjustedLo = |
| 3879 | DAG.getSelect(DL: dl, VT: VTy, Cond: HiIsZero, LHS: Lo, RHS: DAG.getNOT(DL: dl, Val: Lo, VT: VTy)); |
| 3880 | SDValue CLZAdjustedLo = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: AdjustedLo); |
| 3881 | SDValue Result = |
| 3882 | DAG.getSelect(DL: dl, VT: VTy, Cond: CheckLo, |
| 3883 | LHS: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: VTy, N1: CLZAdjustedLo, N2: Constant31), RHS: CLSHi); |
| 3884 | return Result; |
| 3885 | } |
| 3886 | case Intrinsic::eh_sjlj_lsda: { |
| 3887 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3888 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3889 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3890 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3891 | SDValue CPAddr; |
| 3892 | bool IsPositionIndependent = isPositionIndependent(); |
| 3893 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; |
| 3894 | ARMConstantPoolValue *CPV = |
| 3895 | ARMConstantPoolConstant::Create(C: &MF.getFunction(), ID: ARMPCLabelIndex, |
| 3896 | Kind: ARMCP::CPLSDA, PCAdj); |
| 3897 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
| 3898 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
| 3899 | SDValue Result = DAG.getLoad( |
| 3900 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
| 3901 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
| 3902 | |
| 3903 | if (IsPositionIndependent) { |
| 3904 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
| 3905 | Result = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Result, N2: PICLabel); |
| 3906 | } |
| 3907 | return Result; |
| 3908 | } |
| 3909 | case Intrinsic::arm_neon_vabs: |
| 3910 | return DAG.getNode(Opcode: ISD::ABS, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3911 | Operand: Op.getOperand(i: 1)); |
| 3912 | case Intrinsic::arm_neon_vabds: |
| 3913 | if (Op.getValueType().isInteger()) |
| 3914 | return DAG.getNode(Opcode: ISD::ABDS, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3915 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3916 | return SDValue(); |
| 3917 | case Intrinsic::arm_neon_vabdu: |
| 3918 | return DAG.getNode(Opcode: ISD::ABDU, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3919 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3920 | case Intrinsic::arm_neon_vmulls: |
| 3921 | case Intrinsic::arm_neon_vmullu: { |
| 3922 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) |
| 3923 | ? ARMISD::VMULLs : ARMISD::VMULLu; |
| 3924 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3925 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3926 | } |
| 3927 | case Intrinsic::arm_neon_vminnm: |
| 3928 | case Intrinsic::arm_neon_vmaxnm: { |
| 3929 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) |
| 3930 | ? ISD::FMINNUM : ISD::FMAXNUM; |
| 3931 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3932 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3933 | } |
| 3934 | case Intrinsic::arm_neon_vminu: |
| 3935 | case Intrinsic::arm_neon_vmaxu: { |
| 3936 | if (Op.getValueType().isFloatingPoint()) |
| 3937 | return SDValue(); |
| 3938 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) |
| 3939 | ? ISD::UMIN : ISD::UMAX; |
| 3940 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3941 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3942 | } |
| 3943 | case Intrinsic::arm_neon_vmins: |
| 3944 | case Intrinsic::arm_neon_vmaxs: { |
| 3945 | // v{min,max}s is overloaded between signed integers and floats. |
| 3946 | if (!Op.getValueType().isFloatingPoint()) { |
| 3947 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3948 | ? ISD::SMIN : ISD::SMAX; |
| 3949 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3950 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3951 | } |
| 3952 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3953 | ? ISD::FMINIMUM : ISD::FMAXIMUM; |
| 3954 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3955 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3956 | } |
| 3957 | case Intrinsic::arm_neon_vtbl1: |
| 3958 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3959 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
| 3960 | case Intrinsic::arm_neon_vtbl2: |
| 3961 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3962 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 3963 | case Intrinsic::arm_mve_pred_i2v: |
| 3964 | case Intrinsic::arm_mve_pred_v2i: |
| 3965 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3966 | Operand: Op.getOperand(i: 1)); |
| 3967 | case Intrinsic::arm_mve_vreinterpretq: |
| 3968 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
| 3969 | Operand: Op.getOperand(i: 1)); |
| 3970 | case Intrinsic::arm_mve_lsll: |
| 3971 | return DAG.getNode(Opcode: ARMISD::LSLL, DL: SDLoc(Op), VTList: Op->getVTList(), |
| 3972 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 3973 | case Intrinsic::arm_mve_asrl: |
| 3974 | return DAG.getNode(Opcode: ARMISD::ASRL, DL: SDLoc(Op), VTList: Op->getVTList(), |
| 3975 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
| 3976 | } |
| 3977 | } |
| 3978 | |
| 3979 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, |
| 3980 | const ARMSubtarget *Subtarget) { |
| 3981 | SDLoc dl(Op); |
| 3982 | auto SSID = static_cast<SyncScope::ID>(Op.getConstantOperandVal(i: 2)); |
| 3983 | if (SSID == SyncScope::SingleThread) |
| 3984 | return Op; |
| 3985 | |
| 3986 | if (!Subtarget->hasDataBarrier()) { |
| 3987 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 3988 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 3989 | // here. |
| 3990 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && |
| 3991 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ); |
| 3992 | return DAG.getNode(Opcode: ARMISD::MEMBARRIER_MCR, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 3993 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 3994 | } |
| 3995 | |
| 3996 | AtomicOrdering Ord = |
| 3997 | static_cast<AtomicOrdering>(Op.getConstantOperandVal(i: 1)); |
| 3998 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; |
| 3999 | if (Subtarget->isMClass()) { |
| 4000 | // Only a full system barrier exists in the M-class architectures. |
| 4001 | Domain = ARM_MB::SY; |
| 4002 | } else if (Subtarget->preferISHSTBarriers() && |
| 4003 | Ord == AtomicOrdering::Release) { |
| 4004 | // Swift happens to implement ISHST barriers in a way that's compatible with |
| 4005 | // Release semantics but weaker than ISH so we'd be fools not to use |
| 4006 | // it. Beware: other processors probably don't! |
| 4007 | Domain = ARM_MB::ISHST; |
| 4008 | } |
| 4009 | |
| 4010 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 4011 | N2: DAG.getConstant(Val: Intrinsic::arm_dmb, DL: dl, VT: MVT::i32), |
| 4012 | N3: DAG.getConstant(Val: Domain, DL: dl, VT: MVT::i32)); |
| 4013 | } |
| 4014 | |
| 4015 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, |
| 4016 | const ARMSubtarget *Subtarget) { |
| 4017 | // ARM pre v5TE and Thumb1 does not have preload instructions. |
| 4018 | if (!(Subtarget->isThumb2() || |
| 4019 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) |
| 4020 | // Just preserve the chain. |
| 4021 | return Op.getOperand(i: 0); |
| 4022 | |
| 4023 | SDLoc dl(Op); |
| 4024 | unsigned isRead = ~Op.getConstantOperandVal(i: 2) & 1; |
| 4025 | if (!isRead && |
| 4026 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) |
| 4027 | // ARMv7 with MP extension has PLDW. |
| 4028 | return Op.getOperand(i: 0); |
| 4029 | |
| 4030 | unsigned isData = Op.getConstantOperandVal(i: 4); |
| 4031 | if (Subtarget->isThumb()) { |
| 4032 | // Invert the bits. |
| 4033 | isRead = ~isRead & 1; |
| 4034 | isData = ~isData & 1; |
| 4035 | } |
| 4036 | |
| 4037 | return DAG.getNode(Opcode: ARMISD::PRELOAD, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
| 4038 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: isRead, DL: dl, VT: MVT::i32), |
| 4039 | N4: DAG.getConstant(Val: isData, DL: dl, VT: MVT::i32)); |
| 4040 | } |
| 4041 | |
| 4042 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { |
| 4043 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4044 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 4045 | |
| 4046 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
| 4047 | // memory location argument. |
| 4048 | SDLoc dl(Op); |
| 4049 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DL: DAG.getDataLayout()); |
| 4050 | SDValue FR = DAG.getFrameIndex(FI: FuncInfo->getVarArgsFrameIndex(), VT: PtrVT); |
| 4051 | const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue(); |
| 4052 | return DAG.getStore(Chain: Op.getOperand(i: 0), dl, Val: FR, Ptr: Op.getOperand(i: 1), |
| 4053 | PtrInfo: MachinePointerInfo(SV)); |
| 4054 | } |
| 4055 | |
| 4056 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, |
| 4057 | CCValAssign &NextVA, |
| 4058 | SDValue &Root, |
| 4059 | SelectionDAG &DAG, |
| 4060 | const SDLoc &dl) const { |
| 4061 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4062 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4063 | |
| 4064 | const TargetRegisterClass *RC; |
| 4065 | if (AFI->isThumb1OnlyFunction()) |
| 4066 | RC = &ARM::tGPRRegClass; |
| 4067 | else |
| 4068 | RC = &ARM::GPRRegClass; |
| 4069 | |
| 4070 | // Transform the arguments stored in physical registers into virtual ones. |
| 4071 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
| 4072 | SDValue ArgValue = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
| 4073 | |
| 4074 | SDValue ArgValue2; |
| 4075 | if (NextVA.isMemLoc()) { |
| 4076 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4077 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: NextVA.getLocMemOffset(), IsImmutable: true); |
| 4078 | |
| 4079 | // Create load node to retrieve arguments from the stack. |
| 4080 | SDValue FIN = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 4081 | ArgValue2 = DAG.getLoad( |
| 4082 | VT: MVT::i32, dl, Chain: Root, Ptr: FIN, |
| 4083 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
| 4084 | } else { |
| 4085 | Reg = MF.addLiveIn(PReg: NextVA.getLocReg(), RC); |
| 4086 | ArgValue2 = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
| 4087 | } |
| 4088 | if (!Subtarget->isLittle()) |
| 4089 | std::swap (a&: ArgValue, b&: ArgValue2); |
| 4090 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: ArgValue, N2: ArgValue2); |
| 4091 | } |
| 4092 | |
| 4093 | // The remaining GPRs hold either the beginning of variable-argument |
| 4094 | // data, or the beginning of an aggregate passed by value (usually |
| 4095 | // byval). Either way, we allocate stack slots adjacent to the data |
| 4096 | // provided by our caller, and store the unallocated registers there. |
| 4097 | // If this is a variadic function, the va_list pointer will begin with |
| 4098 | // these values; otherwise, this reassembles a (byval) structure that |
| 4099 | // was split between registers and memory. |
| 4100 | // Return: The frame index registers were stored into. |
| 4101 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, |
| 4102 | const SDLoc &dl, SDValue &Chain, |
| 4103 | const Value *OrigArg, |
| 4104 | unsigned InRegsParamRecordIdx, |
| 4105 | int ArgOffset, unsigned ArgSize) const { |
| 4106 | // Currently, two use-cases possible: |
| 4107 | // Case #1. Non-var-args function, and we meet first byval parameter. |
| 4108 | // Setup first unallocated register as first byval register; |
| 4109 | // eat all remained registers |
| 4110 | // (these two actions are performed by HandleByVal method). |
| 4111 | // Then, here, we initialize stack frame with |
| 4112 | // "store-reg" instructions. |
| 4113 | // Case #2. Var-args function, that doesn't contain byval parameters. |
| 4114 | // The same: eat all remained unallocated registers, |
| 4115 | // initialize stack frame. |
| 4116 | |
| 4117 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4118 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4119 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4120 | unsigned RBegin, REnd; |
| 4121 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { |
| 4122 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: InRegsParamRecordIdx, BeginReg&: RBegin, EndReg&: REnd); |
| 4123 | } else { |
| 4124 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
| 4125 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; |
| 4126 | REnd = ARM::R4; |
| 4127 | } |
| 4128 | |
| 4129 | if (REnd != RBegin) |
| 4130 | ArgOffset = -4 * (ARM::R4 - RBegin); |
| 4131 | |
| 4132 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4133 | int FrameIndex = MFI.CreateFixedObject(Size: ArgSize, SPOffset: ArgOffset, IsImmutable: false); |
| 4134 | SDValue FIN = DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT); |
| 4135 | |
| 4136 | SmallVector<SDValue, 4> MemOps; |
| 4137 | const TargetRegisterClass *RC = |
| 4138 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 4139 | |
| 4140 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { |
| 4141 | Register VReg = MF.addLiveIn(PReg: Reg, RC); |
| 4142 | SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: MVT::i32); |
| 4143 | SDValue Store = DAG.getStore(Chain: Val.getValue(R: 1), dl, Val, Ptr: FIN, |
| 4144 | PtrInfo: MachinePointerInfo(OrigArg, 4 * i)); |
| 4145 | MemOps.push_back(Elt: Store); |
| 4146 | FIN = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: FIN, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrVT)); |
| 4147 | } |
| 4148 | |
| 4149 | if (!MemOps.empty()) |
| 4150 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOps); |
| 4151 | return FrameIndex; |
| 4152 | } |
| 4153 | |
| 4154 | // Setup stack frame, the va_list pointer will start from. |
| 4155 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, |
| 4156 | const SDLoc &dl, SDValue &Chain, |
| 4157 | unsigned ArgOffset, |
| 4158 | unsigned TotalArgRegsSaveSize, |
| 4159 | bool ForceMutable) const { |
| 4160 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4161 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4162 | |
| 4163 | // Try to store any remaining integer argument regs |
| 4164 | // to their spots on the stack so that they may be loaded by dereferencing |
| 4165 | // the result of va_next. |
| 4166 | // If there is no regs to be stored, just point address after last |
| 4167 | // argument passed via stack. |
| 4168 | int FrameIndex = StoreByValRegs( |
| 4169 | CCInfo, DAG, dl, Chain, OrigArg: nullptr, InRegsParamRecordIdx: CCInfo.getInRegsParamsCount(), |
| 4170 | ArgOffset: CCInfo.getStackSize(), ArgSize: std::max(a: 4U, b: TotalArgRegsSaveSize)); |
| 4171 | AFI->setVarArgsFrameIndex(FrameIndex); |
| 4172 | } |
| 4173 | |
| 4174 | bool ARMTargetLowering::splitValueIntoRegisterParts( |
| 4175 | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
| 4176 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { |
| 4177 | EVT ValueVT = Val.getValueType(); |
| 4178 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
| 4179 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4180 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4181 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
| 4182 | Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
| 4183 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 4184 | Parts[0] = Val; |
| 4185 | return true; |
| 4186 | } |
| 4187 | return false; |
| 4188 | } |
| 4189 | |
| 4190 | SDValue ARMTargetLowering::joinRegisterPartsIntoValue( |
| 4191 | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, |
| 4192 | MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const { |
| 4193 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
| 4194 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4195 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4196 | SDValue Val = Parts[0]; |
| 4197 | |
| 4198 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
| 4199 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
| 4200 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 4201 | return Val; |
| 4202 | } |
| 4203 | return SDValue(); |
| 4204 | } |
| 4205 | |
| 4206 | SDValue ARMTargetLowering::LowerFormalArguments( |
| 4207 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 4208 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 4209 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
| 4210 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4211 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4212 | |
| 4213 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4214 | |
| 4215 | // Assign locations to all of the incoming arguments. |
| 4216 | SmallVector<CCValAssign, 16> ArgLocs; |
| 4217 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 4218 | *DAG.getContext()); |
| 4219 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
| 4220 | |
| 4221 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); |
| 4222 | unsigned CurArgIdx = 0; |
| 4223 | |
| 4224 | // Initially ArgRegsSaveSize is zero. |
| 4225 | // Then we increase this value each time we meet byval parameter. |
| 4226 | // We also increase this value in case of varargs function. |
| 4227 | AFI->setArgRegsSaveSize(0); |
| 4228 | |
| 4229 | // Calculate the amount of stack space that we need to allocate to store |
| 4230 | // byval and variadic arguments that are passed in registers. |
| 4231 | // We need to know this before we allocate the first byval or variadic |
| 4232 | // argument, as they will be allocated a stack slot below the CFA (Canonical |
| 4233 | // Frame Address, the stack pointer at entry to the function). |
| 4234 | unsigned ArgRegBegin = ARM::R4; |
| 4235 | for (const CCValAssign &VA : ArgLocs) { |
| 4236 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) |
| 4237 | break; |
| 4238 | |
| 4239 | unsigned Index = VA.getValNo(); |
| 4240 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; |
| 4241 | if (!Flags.isByVal()) |
| 4242 | continue; |
| 4243 | |
| 4244 | assert(VA.isMemLoc() && "unexpected byval pointer in reg" ); |
| 4245 | unsigned RBegin, REnd; |
| 4246 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CCInfo.getInRegsParamsProcessed(), BeginReg&: RBegin, EndReg&: REnd); |
| 4247 | ArgRegBegin = std::min(a: ArgRegBegin, b: RBegin); |
| 4248 | |
| 4249 | CCInfo.nextInRegsParam(); |
| 4250 | } |
| 4251 | CCInfo.rewindByValRegsInfo(); |
| 4252 | |
| 4253 | int lastInsIndex = -1; |
| 4254 | if (isVarArg && MFI.hasVAStart()) { |
| 4255 | unsigned RegIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
| 4256 | if (RegIdx != std::size(GPRArgRegs)) |
| 4257 | ArgRegBegin = std::min(a: ArgRegBegin, b: (unsigned)GPRArgRegs[RegIdx]); |
| 4258 | } |
| 4259 | |
| 4260 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); |
| 4261 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); |
| 4262 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 4263 | |
| 4264 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 4265 | CCValAssign &VA = ArgLocs[i]; |
| 4266 | if (Ins[VA.getValNo()].isOrigArg()) { |
| 4267 | std::advance(i&: CurOrigArg, |
| 4268 | n: Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); |
| 4269 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); |
| 4270 | } |
| 4271 | // Arguments stored in registers. |
| 4272 | if (VA.isRegLoc()) { |
| 4273 | EVT RegVT = VA.getLocVT(); |
| 4274 | SDValue ArgValue; |
| 4275 | |
| 4276 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 4277 | // f64 and vector types are split up into multiple registers or |
| 4278 | // combinations of registers and stack slots. |
| 4279 | SDValue ArgValue1 = |
| 4280 | GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4281 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 4282 | SDValue ArgValue2; |
| 4283 | if (VA.isMemLoc()) { |
| 4284 | int FI = MFI.CreateFixedObject(Size: 8, SPOffset: VA.getLocMemOffset(), IsImmutable: true); |
| 4285 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4286 | ArgValue2 = DAG.getLoad( |
| 4287 | VT: MVT::f64, dl, Chain, Ptr: FIN, |
| 4288 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
| 4289 | } else { |
| 4290 | ArgValue2 = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4291 | } |
| 4292 | ArgValue = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
| 4293 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
| 4294 | N2: ArgValue1, N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 4295 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
| 4296 | N2: ArgValue2, N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
| 4297 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 4298 | ArgValue = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
| 4299 | } else { |
| 4300 | const TargetRegisterClass *RC; |
| 4301 | |
| 4302 | if (RegVT == MVT::f16 || RegVT == MVT::bf16) |
| 4303 | RC = &ARM::HPRRegClass; |
| 4304 | else if (RegVT == MVT::f32) |
| 4305 | RC = &ARM::SPRRegClass; |
| 4306 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || |
| 4307 | RegVT == MVT::v4bf16) |
| 4308 | RC = &ARM::DPRRegClass; |
| 4309 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || |
| 4310 | RegVT == MVT::v8bf16) |
| 4311 | RC = &ARM::QPRRegClass; |
| 4312 | else if (RegVT == MVT::i32) |
| 4313 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass |
| 4314 | : &ARM::GPRRegClass; |
| 4315 | else |
| 4316 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering" ); |
| 4317 | |
| 4318 | // Transform the arguments in physical registers into virtual ones. |
| 4319 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
| 4320 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, VT: RegVT); |
| 4321 | |
| 4322 | // If this value is passed in r0 and has the returned attribute (e.g. |
| 4323 | // C++ 'structors), record this fact for later use. |
| 4324 | if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { |
| 4325 | AFI->setPreservesR0(); |
| 4326 | } |
| 4327 | } |
| 4328 | |
| 4329 | // If this is an 8 or 16-bit value, it is really passed promoted |
| 4330 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
| 4331 | // truncate to the right size. |
| 4332 | switch (VA.getLocInfo()) { |
| 4333 | default: llvm_unreachable("Unknown loc info!" ); |
| 4334 | case CCValAssign::Full: break; |
| 4335 | case CCValAssign::BCvt: |
| 4336 | ArgValue = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: ArgValue); |
| 4337 | break; |
| 4338 | } |
| 4339 | |
| 4340 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 4341 | // had been copied to the LSBs of a 32-bit register. |
| 4342 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 4343 | if (VA.needsCustom() && |
| 4344 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 4345 | ArgValue = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: ArgValue); |
| 4346 | |
| 4347 | // On CMSE Entry Functions, formal integer arguments whose bitwidth is |
| 4348 | // less than 32 bits must be sign- or zero-extended in the callee for |
| 4349 | // security reasons. Although the ABI mandates an extension done by the |
| 4350 | // caller, the latter cannot be trusted to follow the rules of the ABI. |
| 4351 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
| 4352 | if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() && |
| 4353 | RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
| 4354 | ArgValue = handleCMSEValue(Value: ArgValue, Arg, DAG, DL: dl); |
| 4355 | |
| 4356 | InVals.push_back(Elt: ArgValue); |
| 4357 | } else { // VA.isRegLoc() |
| 4358 | // Only arguments passed on the stack should make it here. |
| 4359 | assert(VA.isMemLoc()); |
| 4360 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered" ); |
| 4361 | |
| 4362 | int index = VA.getValNo(); |
| 4363 | |
| 4364 | // Some Ins[] entries become multiple ArgLoc[] entries. |
| 4365 | // Process them only once. |
| 4366 | if (index != lastInsIndex) |
| 4367 | { |
| 4368 | ISD::ArgFlagsTy Flags = Ins[index].Flags; |
| 4369 | // FIXME: For now, all byval parameter objects are marked mutable. |
| 4370 | // This can be changed with more analysis. |
| 4371 | // In case of tail call optimization mark all arguments mutable. |
| 4372 | // Since they could be overwritten by lowering of arguments in case of |
| 4373 | // a tail call. |
| 4374 | if (Flags.isByVal()) { |
| 4375 | assert(Ins[index].isOrigArg() && |
| 4376 | "Byval arguments cannot be implicit" ); |
| 4377 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); |
| 4378 | |
| 4379 | int FrameIndex = StoreByValRegs( |
| 4380 | CCInfo, DAG, dl, Chain, OrigArg: &*CurOrigArg, InRegsParamRecordIdx: CurByValIndex, |
| 4381 | ArgOffset: VA.getLocMemOffset(), ArgSize: Flags.getByValSize()); |
| 4382 | InVals.push_back(Elt: DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT)); |
| 4383 | CCInfo.nextInRegsParam(); |
| 4384 | } else if (VA.needsCustom() && (VA.getValVT() == MVT::f16 || |
| 4385 | VA.getValVT() == MVT::bf16)) { |
| 4386 | // f16 and bf16 values are passed in the least-significant half of |
| 4387 | // a 4 byte stack slot. This is done as-if the extension was done |
| 4388 | // in a 32-bit register, so the actual bytes used for the value |
| 4389 | // differ between little and big endian. |
| 4390 | assert(VA.getLocVT().getSizeInBits() == 32); |
| 4391 | unsigned FIOffset = VA.getLocMemOffset(); |
| 4392 | int FI = MFI.CreateFixedObject(Size: VA.getLocVT().getSizeInBits() / 8, |
| 4393 | SPOffset: FIOffset, IsImmutable: true); |
| 4394 | |
| 4395 | SDValue Addr = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4396 | if (DAG.getDataLayout().isBigEndian()) |
| 4397 | Addr = DAG.getObjectPtrOffset(SL: dl, Ptr: Addr, Offset: TypeSize::getFixed(ExactSize: 2)); |
| 4398 | |
| 4399 | InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: Addr, |
| 4400 | PtrInfo: MachinePointerInfo::getFixedStack( |
| 4401 | MF&: DAG.getMachineFunction(), FI))); |
| 4402 | |
| 4403 | } else { |
| 4404 | unsigned FIOffset = VA.getLocMemOffset(); |
| 4405 | int FI = MFI.CreateFixedObject(Size: VA.getLocVT().getSizeInBits()/8, |
| 4406 | SPOffset: FIOffset, IsImmutable: true); |
| 4407 | |
| 4408 | // Create load nodes to retrieve arguments from the stack. |
| 4409 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
| 4410 | InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIN, |
| 4411 | PtrInfo: MachinePointerInfo::getFixedStack( |
| 4412 | MF&: DAG.getMachineFunction(), FI))); |
| 4413 | } |
| 4414 | lastInsIndex = index; |
| 4415 | } |
| 4416 | } |
| 4417 | } |
| 4418 | |
| 4419 | // varargs |
| 4420 | if (isVarArg && MFI.hasVAStart()) { |
| 4421 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, ArgOffset: CCInfo.getStackSize(), |
| 4422 | TotalArgRegsSaveSize); |
| 4423 | if (AFI->isCmseNSEntryFunction()) { |
| 4424 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 4425 | DAG.getMachineFunction().getFunction(), |
| 4426 | "secure entry function must not be variadic" , dl.getDebugLoc())); |
| 4427 | } |
| 4428 | } |
| 4429 | |
| 4430 | unsigned StackArgSize = CCInfo.getStackSize(); |
| 4431 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; |
| 4432 | if (canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt)) { |
| 4433 | // The only way to guarantee a tail call is if the callee restores its |
| 4434 | // argument area, but it must also keep the stack aligned when doing so. |
| 4435 | MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment(); |
| 4436 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 4437 | StackArgSize = alignTo(Size: StackArgSize, A: *StackAlign); |
| 4438 | |
| 4439 | AFI->setArgumentStackToRestore(StackArgSize); |
| 4440 | } |
| 4441 | AFI->setArgumentStackSize(StackArgSize); |
| 4442 | |
| 4443 | if (CCInfo.getStackSize() > 0 && AFI->isCmseNSEntryFunction()) { |
| 4444 | DAG.getContext()->diagnose(DI: DiagnosticInfoUnsupported( |
| 4445 | DAG.getMachineFunction().getFunction(), |
| 4446 | "secure entry function requires arguments on stack" , dl.getDebugLoc())); |
| 4447 | } |
| 4448 | |
| 4449 | return Chain; |
| 4450 | } |
| 4451 | |
| 4452 | /// isFloatingPointZero - Return true if this is +0.0. |
| 4453 | static bool isFloatingPointZero(SDValue Op) { |
| 4454 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Val&: Op)) |
| 4455 | return CFP->getValueAPF().isPosZero(); |
| 4456 | else if (ISD::isEXTLoad(N: Op.getNode()) || ISD::isNON_EXTLoad(N: Op.getNode())) { |
| 4457 | // Maybe this has already been legalized into the constant pool? |
| 4458 | if (Op.getOperand(i: 1).getOpcode() == ARMISD::Wrapper) { |
| 4459 | SDValue WrapperOp = Op.getOperand(i: 1).getOperand(i: 0); |
| 4460 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Val&: WrapperOp)) |
| 4461 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Val: CP->getConstVal())) |
| 4462 | return CFP->getValueAPF().isPosZero(); |
| 4463 | } |
| 4464 | } else if (Op->getOpcode() == ISD::BITCAST && |
| 4465 | Op->getValueType(ResNo: 0) == MVT::f64) { |
| 4466 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) |
| 4467 | // created by LowerConstantFP(). |
| 4468 | SDValue BitcastOp = Op->getOperand(Num: 0); |
| 4469 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && |
| 4470 | isNullConstant(V: BitcastOp->getOperand(Num: 0))) |
| 4471 | return true; |
| 4472 | } |
| 4473 | return false; |
| 4474 | } |
| 4475 | |
| 4476 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for |
| 4477 | /// the given operands. |
| 4478 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| 4479 | SDValue &ARMcc, SelectionDAG &DAG, |
| 4480 | const SDLoc &dl) const { |
| 4481 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val: RHS.getNode())) { |
| 4482 | unsigned C = RHSC->getZExtValue(); |
| 4483 | if (!isLegalICmpImmediate(Imm: (int32_t)C)) { |
| 4484 | // Constant does not fit, try adjusting it by one. |
| 4485 | switch (CC) { |
| 4486 | default: break; |
| 4487 | case ISD::SETLT: |
| 4488 | case ISD::SETGE: |
| 4489 | if (C != 0x80000000 && isLegalICmpImmediate(Imm: C-1)) { |
| 4490 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; |
| 4491 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
| 4492 | } |
| 4493 | break; |
| 4494 | case ISD::SETULT: |
| 4495 | case ISD::SETUGE: |
| 4496 | if (C != 0 && isLegalICmpImmediate(Imm: C-1)) { |
| 4497 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; |
| 4498 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
| 4499 | } |
| 4500 | break; |
| 4501 | case ISD::SETLE: |
| 4502 | case ISD::SETGT: |
| 4503 | if (C != 0x7fffffff && isLegalICmpImmediate(Imm: C+1)) { |
| 4504 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; |
| 4505 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
| 4506 | } |
| 4507 | break; |
| 4508 | case ISD::SETULE: |
| 4509 | case ISD::SETUGT: |
| 4510 | if (C != 0xffffffff && isLegalICmpImmediate(Imm: C+1)) { |
| 4511 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; |
| 4512 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
| 4513 | } |
| 4514 | break; |
| 4515 | } |
| 4516 | } |
| 4517 | } else if ((ARM_AM::getShiftOpcForNode(Opcode: LHS.getOpcode()) != ARM_AM::no_shift) && |
| 4518 | (ARM_AM::getShiftOpcForNode(Opcode: RHS.getOpcode()) == ARM_AM::no_shift)) { |
| 4519 | // In ARM and Thumb-2, the compare instructions can shift their second |
| 4520 | // operand. |
| 4521 | CC = ISD::getSetCCSwappedOperands(Operation: CC); |
| 4522 | std::swap(a&: LHS, b&: RHS); |
| 4523 | } |
| 4524 | |
| 4525 | // Thumb1 has very limited immediate modes, so turning an "and" into a |
| 4526 | // shift can save multiple instructions. |
| 4527 | // |
| 4528 | // If we have (x & C1), and C1 is an appropriate mask, we can transform it |
| 4529 | // into "((x << n) >> n)". But that isn't necessarily profitable on its |
| 4530 | // own. If it's the operand to an unsigned comparison with an immediate, |
| 4531 | // we can eliminate one of the shifts: we transform |
| 4532 | // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". |
| 4533 | // |
| 4534 | // We avoid transforming cases which aren't profitable due to encoding |
| 4535 | // details: |
| 4536 | // |
| 4537 | // 1. C2 fits into the immediate field of a cmp, and the transformed version |
| 4538 | // would not; in that case, we're essentially trading one immediate load for |
| 4539 | // another. |
| 4540 | // 2. C1 is 255 or 65535, so we can use uxtb or uxth. |
| 4541 | // 3. C2 is zero; we have other code for this special case. |
| 4542 | // |
| 4543 | // FIXME: Figure out profitability for Thumb2; we usually can't save an |
| 4544 | // instruction, since the AND is always one instruction anyway, but we could |
| 4545 | // use narrow instructions in some cases. |
| 4546 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && |
| 4547 | LHS->hasOneUse() && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
| 4548 | LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(Val: RHS) && |
| 4549 | !isSignedIntSetCC(Code: CC)) { |
| 4550 | unsigned Mask = LHS.getConstantOperandVal(i: 1); |
| 4551 | auto *RHSC = cast<ConstantSDNode>(Val: RHS.getNode()); |
| 4552 | uint64_t RHSV = RHSC->getZExtValue(); |
| 4553 | if (isMask_32(Value: Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { |
| 4554 | unsigned ShiftBits = llvm::countl_zero(Val: Mask); |
| 4555 | if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { |
| 4556 | SDValue ShiftAmt = DAG.getConstant(Val: ShiftBits, DL: dl, VT: MVT::i32); |
| 4557 | LHS = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: LHS.getOperand(i: 0), N2: ShiftAmt); |
| 4558 | RHS = DAG.getConstant(Val: RHSV << ShiftBits, DL: dl, VT: MVT::i32); |
| 4559 | } |
| 4560 | } |
| 4561 | } |
| 4562 | |
| 4563 | // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a |
| 4564 | // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same |
| 4565 | // way a cmp would. |
| 4566 | // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and |
| 4567 | // some tweaks to the heuristics for the previous and->shift transform. |
| 4568 | // FIXME: Optimize cases where the LHS isn't a shift. |
| 4569 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && |
| 4570 | isa<ConstantSDNode>(Val: RHS) && RHS->getAsZExtVal() == 0x80000000U && |
| 4571 | CC == ISD::SETUGT && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
| 4572 | LHS.getConstantOperandVal(i: 1) < 31) { |
| 4573 | unsigned ShiftAmt = LHS.getConstantOperandVal(i: 1) + 1; |
| 4574 | SDValue Shift = |
| 4575 | DAG.getNode(Opcode: ARMISD::LSLS, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), |
| 4576 | N1: LHS.getOperand(i: 0), N2: DAG.getConstant(Val: ShiftAmt, DL: dl, VT: MVT::i32)); |
| 4577 | ARMcc = DAG.getConstant(Val: ARMCC::HI, DL: dl, VT: MVT::i32); |
| 4578 | return Shift.getValue(R: 1); |
| 4579 | } |
| 4580 | |
| 4581 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 4582 | |
| 4583 | // If the RHS is a constant zero then the V (overflow) flag will never be |
| 4584 | // set. This can allow us to simplify GE to PL or LT to MI, which can be |
| 4585 | // simpler for other passes (like the peephole optimiser) to deal with. |
| 4586 | if (isNullConstant(V: RHS)) { |
| 4587 | switch (CondCode) { |
| 4588 | default: break; |
| 4589 | case ARMCC::GE: |
| 4590 | CondCode = ARMCC::PL; |
| 4591 | break; |
| 4592 | case ARMCC::LT: |
| 4593 | CondCode = ARMCC::MI; |
| 4594 | break; |
| 4595 | } |
| 4596 | } |
| 4597 | |
| 4598 | unsigned CompareType; |
| 4599 | switch (CondCode) { |
| 4600 | default: |
| 4601 | CompareType = ARMISD::CMP; |
| 4602 | break; |
| 4603 | case ARMCC::EQ: |
| 4604 | case ARMCC::NE: |
| 4605 | // Uses only Z Flag |
| 4606 | CompareType = ARMISD::CMPZ; |
| 4607 | break; |
| 4608 | } |
| 4609 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 4610 | return DAG.getNode(Opcode: CompareType, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 4611 | } |
| 4612 | |
| 4613 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. |
| 4614 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, |
| 4615 | SelectionDAG &DAG, const SDLoc &dl, |
| 4616 | bool Signaling) const { |
| 4617 | assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); |
| 4618 | SDValue Flags; |
| 4619 | if (!isFloatingPointZero(Op: RHS)) |
| 4620 | Flags = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, DL: dl, VT: FlagsVT, |
| 4621 | N1: LHS, N2: RHS); |
| 4622 | else |
| 4623 | Flags = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, DL: dl, |
| 4624 | VT: FlagsVT, Operand: LHS); |
| 4625 | return DAG.getNode(Opcode: ARMISD::FMSTAT, DL: dl, VT: FlagsVT, Operand: Flags); |
| 4626 | } |
| 4627 | |
| 4628 | // This function returns three things: the arithmetic computation itself |
| 4629 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The |
| 4630 | // comparison and the condition code define the case in which the arithmetic |
| 4631 | // computation *does not* overflow. |
| 4632 | std::pair<SDValue, SDValue> |
| 4633 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, |
| 4634 | SDValue &ARMcc) const { |
| 4635 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type" ); |
| 4636 | |
| 4637 | SDValue Value, OverflowCmp; |
| 4638 | SDValue LHS = Op.getOperand(i: 0); |
| 4639 | SDValue RHS = Op.getOperand(i: 1); |
| 4640 | SDLoc dl(Op); |
| 4641 | |
| 4642 | // FIXME: We are currently always generating CMPs because we don't support |
| 4643 | // generating CMN through the backend. This is not as good as the natural |
| 4644 | // CMP case because it causes a register dependency and cannot be folded |
| 4645 | // later. |
| 4646 | |
| 4647 | switch (Op.getOpcode()) { |
| 4648 | default: |
| 4649 | llvm_unreachable("Unknown overflow instruction!" ); |
| 4650 | case ISD::SADDO: |
| 4651 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
| 4652 | Value = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 4653 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value, N2: LHS); |
| 4654 | break; |
| 4655 | case ISD::UADDO: |
| 4656 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
| 4657 | // We use ADDC here to correspond to its use in LowerUnsignedALUO. |
| 4658 | // We do not use it in the USUBO case as Value may not be used. |
| 4659 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, |
| 4660 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: MVT::i32), N1: LHS, N2: RHS) |
| 4661 | .getValue(R: 0); |
| 4662 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value, N2: LHS); |
| 4663 | break; |
| 4664 | case ISD::SSUBO: |
| 4665 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
| 4666 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 4667 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 4668 | break; |
| 4669 | case ISD::USUBO: |
| 4670 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
| 4671 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
| 4672 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: LHS, N2: RHS); |
| 4673 | break; |
| 4674 | case ISD::UMULO: |
| 4675 | // We generate a UMUL_LOHI and then check if the high word is 0. |
| 4676 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
| 4677 | Value = DAG.getNode(Opcode: ISD::UMUL_LOHI, DL: dl, |
| 4678 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
| 4679 | N1: LHS, N2: RHS); |
| 4680 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value.getValue(R: 1), |
| 4681 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 4682 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
| 4683 | break; |
| 4684 | case ISD::SMULO: |
| 4685 | // We generate a SMUL_LOHI and then check if all the bits of the high word |
| 4686 | // are the same as the sign bit of the low word. |
| 4687 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
| 4688 | Value = DAG.getNode(Opcode: ISD::SMUL_LOHI, DL: dl, |
| 4689 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
| 4690 | N1: LHS, N2: RHS); |
| 4691 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: Value.getValue(R: 1), |
| 4692 | N2: DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: Op.getValueType(), |
| 4693 | N1: Value.getValue(R: 0), |
| 4694 | N2: DAG.getConstant(Val: 31, DL: dl, VT: MVT::i32))); |
| 4695 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
| 4696 | break; |
| 4697 | } // switch (...) |
| 4698 | |
| 4699 | return std::make_pair(x&: Value, y&: OverflowCmp); |
| 4700 | } |
| 4701 | |
| 4702 | SDValue |
| 4703 | ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { |
| 4704 | // Let legalize expand this if it isn't a legal type yet. |
| 4705 | if (!isTypeLegal(VT: Op.getValueType())) |
| 4706 | return SDValue(); |
| 4707 | |
| 4708 | SDValue Value, OverflowCmp; |
| 4709 | SDValue ARMcc; |
| 4710 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); |
| 4711 | SDLoc dl(Op); |
| 4712 | // We use 0 and 1 as false and true values. |
| 4713 | SDValue TVal = DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32); |
| 4714 | SDValue FVal = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 4715 | EVT VT = Op.getValueType(); |
| 4716 | |
| 4717 | SDValue Overflow = |
| 4718 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: TVal, N2: FVal, N3: ARMcc, N4: OverflowCmp); |
| 4719 | |
| 4720 | SDVTList VTs = DAG.getVTList(VT1: Op.getValueType(), VT2: MVT::i32); |
| 4721 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: VTs, N1: Value, N2: Overflow); |
| 4722 | } |
| 4723 | |
| 4724 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, |
| 4725 | SelectionDAG &DAG) { |
| 4726 | SDLoc DL(BoolCarry); |
| 4727 | EVT CarryVT = BoolCarry.getValueType(); |
| 4728 | |
| 4729 | // This converts the boolean value carry into the carry flag by doing |
| 4730 | // ARMISD::SUBC Carry, 1 |
| 4731 | SDValue Carry = DAG.getNode(Opcode: ARMISD::SUBC, DL, |
| 4732 | VTList: DAG.getVTList(VT1: CarryVT, VT2: MVT::i32), |
| 4733 | N1: BoolCarry, N2: DAG.getConstant(Val: 1, DL, VT: CarryVT)); |
| 4734 | return Carry.getValue(R: 1); |
| 4735 | } |
| 4736 | |
| 4737 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, |
| 4738 | SelectionDAG &DAG) { |
| 4739 | SDLoc DL(Flags); |
| 4740 | |
| 4741 | // Now convert the carry flag into a boolean carry. We do this |
| 4742 | // using ARMISD:ADDE 0, 0, Carry |
| 4743 | return DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), |
| 4744 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), |
| 4745 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N3: Flags); |
| 4746 | } |
| 4747 | |
| 4748 | SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, |
| 4749 | SelectionDAG &DAG) const { |
| 4750 | // Let legalize expand this if it isn't a legal type yet. |
| 4751 | if (!isTypeLegal(VT: Op.getValueType())) |
| 4752 | return SDValue(); |
| 4753 | |
| 4754 | SDValue LHS = Op.getOperand(i: 0); |
| 4755 | SDValue RHS = Op.getOperand(i: 1); |
| 4756 | SDLoc dl(Op); |
| 4757 | |
| 4758 | EVT VT = Op.getValueType(); |
| 4759 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 4760 | SDValue Value; |
| 4761 | SDValue Overflow; |
| 4762 | switch (Op.getOpcode()) { |
| 4763 | default: |
| 4764 | llvm_unreachable("Unknown overflow instruction!" ); |
| 4765 | case ISD::UADDO: |
| 4766 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
| 4767 | // Convert the carry flag into a boolean value. |
| 4768 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
| 4769 | break; |
| 4770 | case ISD::USUBO: { |
| 4771 | Value = DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
| 4772 | // Convert the carry flag into a boolean value. |
| 4773 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
| 4774 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow |
| 4775 | // value. So compute 1 - C. |
| 4776 | Overflow = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 4777 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Overflow); |
| 4778 | break; |
| 4779 | } |
| 4780 | } |
| 4781 | |
| 4782 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: VTs, N1: Value, N2: Overflow); |
| 4783 | } |
| 4784 | |
| 4785 | static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG, |
| 4786 | const ARMSubtarget *Subtarget) { |
| 4787 | EVT VT = Op.getValueType(); |
| 4788 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP() || Subtarget->isThumb1Only()) |
| 4789 | return SDValue(); |
| 4790 | if (!VT.isSimple()) |
| 4791 | return SDValue(); |
| 4792 | |
| 4793 | unsigned NewOpcode; |
| 4794 | switch (VT.getSimpleVT().SimpleTy) { |
| 4795 | default: |
| 4796 | return SDValue(); |
| 4797 | case MVT::i8: |
| 4798 | switch (Op->getOpcode()) { |
| 4799 | case ISD::UADDSAT: |
| 4800 | NewOpcode = ARMISD::UQADD8b; |
| 4801 | break; |
| 4802 | case ISD::SADDSAT: |
| 4803 | NewOpcode = ARMISD::QADD8b; |
| 4804 | break; |
| 4805 | case ISD::USUBSAT: |
| 4806 | NewOpcode = ARMISD::UQSUB8b; |
| 4807 | break; |
| 4808 | case ISD::SSUBSAT: |
| 4809 | NewOpcode = ARMISD::QSUB8b; |
| 4810 | break; |
| 4811 | } |
| 4812 | break; |
| 4813 | case MVT::i16: |
| 4814 | switch (Op->getOpcode()) { |
| 4815 | case ISD::UADDSAT: |
| 4816 | NewOpcode = ARMISD::UQADD16b; |
| 4817 | break; |
| 4818 | case ISD::SADDSAT: |
| 4819 | NewOpcode = ARMISD::QADD16b; |
| 4820 | break; |
| 4821 | case ISD::USUBSAT: |
| 4822 | NewOpcode = ARMISD::UQSUB16b; |
| 4823 | break; |
| 4824 | case ISD::SSUBSAT: |
| 4825 | NewOpcode = ARMISD::QSUB16b; |
| 4826 | break; |
| 4827 | } |
| 4828 | break; |
| 4829 | } |
| 4830 | |
| 4831 | SDLoc dl(Op); |
| 4832 | SDValue Add = |
| 4833 | DAG.getNode(Opcode: NewOpcode, DL: dl, VT: MVT::i32, |
| 4834 | N1: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 0), DL: dl, VT: MVT::i32), |
| 4835 | N2: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 1), DL: dl, VT: MVT::i32)); |
| 4836 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Add); |
| 4837 | } |
| 4838 | |
| 4839 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 4840 | SDValue Cond = Op.getOperand(i: 0); |
| 4841 | SDValue SelectTrue = Op.getOperand(i: 1); |
| 4842 | SDValue SelectFalse = Op.getOperand(i: 2); |
| 4843 | SDLoc dl(Op); |
| 4844 | unsigned Opc = Cond.getOpcode(); |
| 4845 | |
| 4846 | if (Cond.getResNo() == 1 && |
| 4847 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 4848 | Opc == ISD::USUBO)) { |
| 4849 | if (!isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
| 4850 | return SDValue(); |
| 4851 | |
| 4852 | SDValue Value, OverflowCmp; |
| 4853 | SDValue ARMcc; |
| 4854 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
| 4855 | EVT VT = Op.getValueType(); |
| 4856 | |
| 4857 | return getCMOV(dl, VT, FalseVal: SelectTrue, TrueVal: SelectFalse, ARMcc, Flags: OverflowCmp, DAG); |
| 4858 | } |
| 4859 | |
| 4860 | // Convert: |
| 4861 | // |
| 4862 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) |
| 4863 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) |
| 4864 | // |
| 4865 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { |
| 4866 | const ConstantSDNode *CMOVTrue = |
| 4867 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 0)); |
| 4868 | const ConstantSDNode *CMOVFalse = |
| 4869 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 1)); |
| 4870 | |
| 4871 | if (CMOVTrue && CMOVFalse) { |
| 4872 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); |
| 4873 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); |
| 4874 | |
| 4875 | SDValue True; |
| 4876 | SDValue False; |
| 4877 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { |
| 4878 | True = SelectTrue; |
| 4879 | False = SelectFalse; |
| 4880 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { |
| 4881 | True = SelectFalse; |
| 4882 | False = SelectTrue; |
| 4883 | } |
| 4884 | |
| 4885 | if (True.getNode() && False.getNode()) |
| 4886 | return getCMOV(dl, VT: Op.getValueType(), FalseVal: True, TrueVal: False, ARMcc: Cond.getOperand(i: 2), |
| 4887 | Flags: Cond.getOperand(i: 3), DAG); |
| 4888 | } |
| 4889 | } |
| 4890 | |
| 4891 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the |
| 4892 | // undefined bits before doing a full-word comparison with zero. |
| 4893 | Cond = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: Cond.getValueType(), N1: Cond, |
| 4894 | N2: DAG.getConstant(Val: 1, DL: dl, VT: Cond.getValueType())); |
| 4895 | |
| 4896 | return DAG.getSelectCC(DL: dl, LHS: Cond, |
| 4897 | RHS: DAG.getConstant(Val: 0, DL: dl, VT: Cond.getValueType()), |
| 4898 | True: SelectTrue, False: SelectFalse, Cond: ISD::SETNE); |
| 4899 | } |
| 4900 | |
| 4901 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 4902 | bool &swpCmpOps, bool &swpVselOps) { |
| 4903 | // Start by selecting the GE condition code for opcodes that return true for |
| 4904 | // 'equality' |
| 4905 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || |
| 4906 | CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) |
| 4907 | CondCode = ARMCC::GE; |
| 4908 | |
| 4909 | // and GT for opcodes that return false for 'equality'. |
| 4910 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || |
| 4911 | CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) |
| 4912 | CondCode = ARMCC::GT; |
| 4913 | |
| 4914 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need |
| 4915 | // to swap the compare operands. |
| 4916 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || |
| 4917 | CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) |
| 4918 | swpCmpOps = true; |
| 4919 | |
| 4920 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. |
| 4921 | // If we have an unordered opcode, we need to swap the operands to the VSEL |
| 4922 | // instruction (effectively negating the condition). |
| 4923 | // |
| 4924 | // This also has the effect of swapping which one of 'less' or 'greater' |
| 4925 | // returns true, so we also swap the compare operands. It also switches |
| 4926 | // whether we return true for 'equality', so we compensate by picking the |
| 4927 | // opposite condition code to our original choice. |
| 4928 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || |
| 4929 | CC == ISD::SETUGT) { |
| 4930 | swpCmpOps = !swpCmpOps; |
| 4931 | swpVselOps = !swpVselOps; |
| 4932 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; |
| 4933 | } |
| 4934 | |
| 4935 | // 'ordered' is 'anything but unordered', so use the VS condition code and |
| 4936 | // swap the VSEL operands. |
| 4937 | if (CC == ISD::SETO) { |
| 4938 | CondCode = ARMCC::VS; |
| 4939 | swpVselOps = true; |
| 4940 | } |
| 4941 | |
| 4942 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition |
| 4943 | // code and swap the VSEL operands. Also do this if we don't care about the |
| 4944 | // unordered case. |
| 4945 | if (CC == ISD::SETUNE || CC == ISD::SETNE) { |
| 4946 | CondCode = ARMCC::EQ; |
| 4947 | swpVselOps = true; |
| 4948 | } |
| 4949 | } |
| 4950 | |
| 4951 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, |
| 4952 | SDValue TrueVal, SDValue ARMcc, |
| 4953 | SDValue Flags, SelectionDAG &DAG) const { |
| 4954 | if (!Subtarget->hasFP64() && VT == MVT::f64) { |
| 4955 | FalseVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 4956 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: FalseVal); |
| 4957 | TrueVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 4958 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: TrueVal); |
| 4959 | |
| 4960 | SDValue TrueLow = TrueVal.getValue(R: 0); |
| 4961 | SDValue TrueHigh = TrueVal.getValue(R: 1); |
| 4962 | SDValue FalseLow = FalseVal.getValue(R: 0); |
| 4963 | SDValue FalseHigh = FalseVal.getValue(R: 1); |
| 4964 | |
| 4965 | SDValue Low = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseLow, N2: TrueLow, |
| 4966 | N3: ARMcc, N4: Flags); |
| 4967 | SDValue High = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseHigh, N2: TrueHigh, |
| 4968 | N3: ARMcc, N4: Flags); |
| 4969 | |
| 4970 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Low, N2: High); |
| 4971 | } |
| 4972 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, N3: ARMcc, N4: Flags); |
| 4973 | } |
| 4974 | |
| 4975 | static bool isGTorGE(ISD::CondCode CC) { |
| 4976 | return CC == ISD::SETGT || CC == ISD::SETGE; |
| 4977 | } |
| 4978 | |
| 4979 | static bool isLTorLE(ISD::CondCode CC) { |
| 4980 | return CC == ISD::SETLT || CC == ISD::SETLE; |
| 4981 | } |
| 4982 | |
| 4983 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. |
| 4984 | // All of these conditions (and their <= and >= counterparts) will do: |
| 4985 | // x < k ? k : x |
| 4986 | // x > k ? x : k |
| 4987 | // k < x ? x : k |
| 4988 | // k > x ? k : x |
| 4989 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, |
| 4990 | const SDValue TrueVal, const SDValue FalseVal, |
| 4991 | const ISD::CondCode CC, const SDValue K) { |
| 4992 | return (isGTorGE(CC) && |
| 4993 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || |
| 4994 | (isLTorLE(CC) && |
| 4995 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); |
| 4996 | } |
| 4997 | |
| 4998 | // Check if two chained conditionals could be converted into SSAT or USAT. |
| 4999 | // |
| 5000 | // SSAT can replace a set of two conditional selectors that bound a number to an |
| 5001 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: |
| 5002 | // |
| 5003 | // x < -k ? -k : (x > k ? k : x) |
| 5004 | // x < -k ? -k : (x < k ? x : k) |
| 5005 | // x > -k ? (x > k ? k : x) : -k |
| 5006 | // x < k ? (x < -k ? -k : x) : k |
| 5007 | // etc. |
| 5008 | // |
| 5009 | // LLVM canonicalizes these to either a min(max()) or a max(min()) |
| 5010 | // pattern. This function tries to match one of these and will return a SSAT |
| 5011 | // node if successful. |
| 5012 | // |
| 5013 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 |
| 5014 | // is a power of 2. |
| 5015 | static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { |
| 5016 | EVT VT = Op.getValueType(); |
| 5017 | SDValue V1 = Op.getOperand(i: 0); |
| 5018 | SDValue K1 = Op.getOperand(i: 1); |
| 5019 | SDValue TrueVal1 = Op.getOperand(i: 2); |
| 5020 | SDValue FalseVal1 = Op.getOperand(i: 3); |
| 5021 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5022 | |
| 5023 | const SDValue Op2 = isa<ConstantSDNode>(Val: TrueVal1) ? FalseVal1 : TrueVal1; |
| 5024 | if (Op2.getOpcode() != ISD::SELECT_CC) |
| 5025 | return SDValue(); |
| 5026 | |
| 5027 | SDValue V2 = Op2.getOperand(i: 0); |
| 5028 | SDValue K2 = Op2.getOperand(i: 1); |
| 5029 | SDValue TrueVal2 = Op2.getOperand(i: 2); |
| 5030 | SDValue FalseVal2 = Op2.getOperand(i: 3); |
| 5031 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Val: Op2.getOperand(i: 4))->get(); |
| 5032 | |
| 5033 | SDValue V1Tmp = V1; |
| 5034 | SDValue V2Tmp = V2; |
| 5035 | |
| 5036 | // Check that the registers and the constants match a max(min()) or min(max()) |
| 5037 | // pattern |
| 5038 | if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 || |
| 5039 | K2 != FalseVal2 || |
| 5040 | !((isGTorGE(CC: CC1) && isLTorLE(CC: CC2)) || (isLTorLE(CC: CC1) && isGTorGE(CC: CC2)))) |
| 5041 | return SDValue(); |
| 5042 | |
| 5043 | // Check that the constant in the lower-bound check is |
| 5044 | // the opposite of the constant in the upper-bound check |
| 5045 | // in 1's complement. |
| 5046 | if (!isa<ConstantSDNode>(Val: K1) || !isa<ConstantSDNode>(Val: K2)) |
| 5047 | return SDValue(); |
| 5048 | |
| 5049 | int64_t Val1 = cast<ConstantSDNode>(Val&: K1)->getSExtValue(); |
| 5050 | int64_t Val2 = cast<ConstantSDNode>(Val&: K2)->getSExtValue(); |
| 5051 | int64_t PosVal = std::max(a: Val1, b: Val2); |
| 5052 | int64_t NegVal = std::min(a: Val1, b: Val2); |
| 5053 | |
| 5054 | if (!((Val1 > Val2 && isLTorLE(CC: CC1)) || (Val1 < Val2 && isLTorLE(CC: CC2))) || |
| 5055 | !isPowerOf2_64(Value: PosVal + 1)) |
| 5056 | return SDValue(); |
| 5057 | |
| 5058 | // Handle the difference between USAT (unsigned) and SSAT (signed) |
| 5059 | // saturation |
| 5060 | // At this point, PosVal is guaranteed to be positive |
| 5061 | uint64_t K = PosVal; |
| 5062 | SDLoc dl(Op); |
| 5063 | if (Val1 == ~Val2) |
| 5064 | return DAG.getNode(Opcode: ARMISD::SSAT, DL: dl, VT, N1: V2Tmp, |
| 5065 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
| 5066 | if (NegVal == 0) |
| 5067 | return DAG.getNode(Opcode: ARMISD::USAT, DL: dl, VT, N1: V2Tmp, |
| 5068 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
| 5069 | |
| 5070 | return SDValue(); |
| 5071 | } |
| 5072 | |
| 5073 | // Check if a condition of the type x < k ? k : x can be converted into a |
| 5074 | // bit operation instead of conditional moves. |
| 5075 | // Currently this is allowed given: |
| 5076 | // - The conditions and values match up |
| 5077 | // - k is 0 or -1 (all ones) |
| 5078 | // This function will not check the last condition, thats up to the caller |
| 5079 | // It returns true if the transformation can be made, and in such case |
| 5080 | // returns x in V, and k in SatK. |
| 5081 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, |
| 5082 | SDValue &SatK) |
| 5083 | { |
| 5084 | SDValue LHS = Op.getOperand(i: 0); |
| 5085 | SDValue RHS = Op.getOperand(i: 1); |
| 5086 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5087 | SDValue TrueVal = Op.getOperand(i: 2); |
| 5088 | SDValue FalseVal = Op.getOperand(i: 3); |
| 5089 | |
| 5090 | SDValue *K = isa<ConstantSDNode>(Val: LHS) ? &LHS : isa<ConstantSDNode>(Val: RHS) |
| 5091 | ? &RHS |
| 5092 | : nullptr; |
| 5093 | |
| 5094 | // No constant operation in comparison, early out |
| 5095 | if (!K) |
| 5096 | return false; |
| 5097 | |
| 5098 | SDValue KTmp = isa<ConstantSDNode>(Val: TrueVal) ? TrueVal : FalseVal; |
| 5099 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; |
| 5100 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; |
| 5101 | |
| 5102 | // If the constant on left and right side, or variable on left and right, |
| 5103 | // does not match, early out |
| 5104 | if (*K != KTmp || V != VTmp) |
| 5105 | return false; |
| 5106 | |
| 5107 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, K: *K)) { |
| 5108 | SatK = *K; |
| 5109 | return true; |
| 5110 | } |
| 5111 | |
| 5112 | return false; |
| 5113 | } |
| 5114 | |
| 5115 | bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { |
| 5116 | if (VT == MVT::f32) |
| 5117 | return !Subtarget->hasVFP2Base(); |
| 5118 | if (VT == MVT::f64) |
| 5119 | return !Subtarget->hasFP64(); |
| 5120 | if (VT == MVT::f16) |
| 5121 | return !Subtarget->hasFullFP16(); |
| 5122 | return false; |
| 5123 | } |
| 5124 | |
| 5125 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5126 | EVT VT = Op.getValueType(); |
| 5127 | SDLoc dl(Op); |
| 5128 | |
| 5129 | // Try to convert two saturating conditional selects into a single SSAT |
| 5130 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) |
| 5131 | if (SDValue SatValue = LowerSaturatingConditional(Op, DAG)) |
| 5132 | return SatValue; |
| 5133 | |
| 5134 | // Try to convert expressions of the form x < k ? k : x (and similar forms) |
| 5135 | // into more efficient bit operations, which is possible when k is 0 or -1 |
| 5136 | // On ARM and Thumb-2 which have flexible operand 2 this will result in |
| 5137 | // single instructions. On Thumb the shift and the bit operation will be two |
| 5138 | // instructions. |
| 5139 | // Only allow this transformation on full-width (32-bit) operations |
| 5140 | SDValue LowerSatConstant; |
| 5141 | SDValue SatValue; |
| 5142 | if (VT == MVT::i32 && |
| 5143 | isLowerSaturatingConditional(Op, V&: SatValue, SatK&: LowerSatConstant)) { |
| 5144 | SDValue ShiftV = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: SatValue, |
| 5145 | N2: DAG.getConstant(Val: 31, DL: dl, VT)); |
| 5146 | if (isNullConstant(V: LowerSatConstant)) { |
| 5147 | SDValue NotShiftV = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: ShiftV, |
| 5148 | N2: DAG.getAllOnesConstant(DL: dl, VT)); |
| 5149 | return DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: SatValue, N2: NotShiftV); |
| 5150 | } else if (isAllOnesConstant(V: LowerSatConstant)) |
| 5151 | return DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: SatValue, N2: ShiftV); |
| 5152 | } |
| 5153 | |
| 5154 | SDValue LHS = Op.getOperand(i: 0); |
| 5155 | SDValue RHS = Op.getOperand(i: 1); |
| 5156 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
| 5157 | SDValue TrueVal = Op.getOperand(i: 2); |
| 5158 | SDValue FalseVal = Op.getOperand(i: 3); |
| 5159 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(Val&: FalseVal); |
| 5160 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(Val&: TrueVal); |
| 5161 | ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val&: RHS); |
| 5162 | if (Op.getValueType().isInteger()) { |
| 5163 | |
| 5164 | // Check for SMAX(lhs, 0) and SMIN(lhs, 0) patterns. |
| 5165 | // (SELECT_CC setgt, lhs, 0, lhs, 0) -> (BIC lhs, (SRA lhs, typesize-1)) |
| 5166 | // (SELECT_CC setlt, lhs, 0, lhs, 0) -> (AND lhs, (SRA lhs, typesize-1)) |
| 5167 | // Both require less instructions than compare and conditional select. |
| 5168 | if ((CC == ISD::SETGT || CC == ISD::SETLT) && LHS == TrueVal && RHSC && |
| 5169 | RHSC->isZero() && CFVal && CFVal->isZero() && |
| 5170 | LHS.getValueType() == RHS.getValueType()) { |
| 5171 | EVT VT = LHS.getValueType(); |
| 5172 | SDValue Shift = |
| 5173 | DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: LHS, |
| 5174 | N2: DAG.getConstant(Val: VT.getSizeInBits() - 1, DL: dl, VT)); |
| 5175 | |
| 5176 | if (CC == ISD::SETGT) |
| 5177 | Shift = DAG.getNOT(DL: dl, Val: Shift, VT); |
| 5178 | |
| 5179 | return DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: LHS, N2: Shift); |
| 5180 | } |
| 5181 | } |
| 5182 | |
| 5183 | if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && |
| 5184 | LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { |
| 5185 | unsigned TVal = CTVal->getZExtValue(); |
| 5186 | unsigned FVal = CFVal->getZExtValue(); |
| 5187 | unsigned Opcode = 0; |
| 5188 | |
| 5189 | if (TVal == ~FVal) { |
| 5190 | Opcode = ARMISD::CSINV; |
| 5191 | } else if (TVal == ~FVal + 1) { |
| 5192 | Opcode = ARMISD::CSNEG; |
| 5193 | } else if (TVal + 1 == FVal) { |
| 5194 | Opcode = ARMISD::CSINC; |
| 5195 | } else if (TVal == FVal + 1) { |
| 5196 | Opcode = ARMISD::CSINC; |
| 5197 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5198 | std::swap(a&: TVal, b&: FVal); |
| 5199 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5200 | } |
| 5201 | |
| 5202 | if (Opcode) { |
| 5203 | // If one of the constants is cheaper than another, materialise the |
| 5204 | // cheaper one and let the csel generate the other. |
| 5205 | if (Opcode != ARMISD::CSINC && |
| 5206 | HasLowerConstantMaterializationCost(Val1: FVal, Val2: TVal, Subtarget)) { |
| 5207 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5208 | std::swap(a&: TVal, b&: FVal); |
| 5209 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5210 | } |
| 5211 | |
| 5212 | // Attempt to use ZR checking TVal is 0, possibly inverting the condition |
| 5213 | // to get there. CSINC not is invertable like the other two (~(~a) == a, |
| 5214 | // -(-a) == a, but (a+1)+1 != a). |
| 5215 | if (FVal == 0 && Opcode != ARMISD::CSINC) { |
| 5216 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5217 | std::swap(a&: TVal, b&: FVal); |
| 5218 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5219 | } |
| 5220 | |
| 5221 | // Drops F's value because we can get it by inverting/negating TVal. |
| 5222 | FalseVal = TrueVal; |
| 5223 | |
| 5224 | SDValue ARMcc; |
| 5225 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5226 | EVT VT = TrueVal.getValueType(); |
| 5227 | return DAG.getNode(Opcode, DL: dl, VT, N1: TrueVal, N2: FalseVal, N3: ARMcc, N4: Cmp); |
| 5228 | } |
| 5229 | } |
| 5230 | |
| 5231 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 5232 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
| 5233 | |
| 5234 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5235 | // zero. |
| 5236 | if (!RHS.getNode()) { |
| 5237 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 5238 | CC = ISD::SETNE; |
| 5239 | } |
| 5240 | } |
| 5241 | |
| 5242 | if (LHS.getValueType() == MVT::i32) { |
| 5243 | // Try to generate VSEL on ARMv8. |
| 5244 | // The VSEL instruction can't use all the usual ARM condition |
| 5245 | // codes: it only has two bits to select the condition code, so it's |
| 5246 | // constrained to use only GE, GT, VS and EQ. |
| 5247 | // |
| 5248 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to |
| 5249 | // swap the operands of the previous compare instruction (effectively |
| 5250 | // inverting the compare condition, swapping 'less' and 'greater') and |
| 5251 | // sometimes need to swap the operands to the VSEL (which inverts the |
| 5252 | // condition in the sense of firing whenever the previous condition didn't) |
| 5253 | if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || |
| 5254 | TrueVal.getValueType() == MVT::f32 || |
| 5255 | TrueVal.getValueType() == MVT::f64)) { |
| 5256 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5257 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || |
| 5258 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { |
| 5259 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
| 5260 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5261 | } |
| 5262 | } |
| 5263 | |
| 5264 | SDValue ARMcc; |
| 5265 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5266 | // Choose GE over PL, which vsel does now support |
| 5267 | if (ARMcc->getAsZExtVal() == ARMCC::PL) |
| 5268 | ARMcc = DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32); |
| 5269 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, Flags: Cmp, DAG); |
| 5270 | } |
| 5271 | |
| 5272 | ARMCC::CondCodes CondCode, CondCode2; |
| 5273 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5274 | |
| 5275 | // Normalize the fp compare. If RHS is zero we prefer to keep it there so we |
| 5276 | // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we |
| 5277 | // must use VSEL (limited condition codes), due to not having conditional f16 |
| 5278 | // moves. |
| 5279 | if (Subtarget->hasFPARMv8Base() && |
| 5280 | !(isFloatingPointZero(Op: RHS) && TrueVal.getValueType() != MVT::f16) && |
| 5281 | (TrueVal.getValueType() == MVT::f16 || |
| 5282 | TrueVal.getValueType() == MVT::f32 || |
| 5283 | TrueVal.getValueType() == MVT::f64)) { |
| 5284 | bool swpCmpOps = false; |
| 5285 | bool swpVselOps = false; |
| 5286 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); |
| 5287 | |
| 5288 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || |
| 5289 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { |
| 5290 | if (swpCmpOps) |
| 5291 | std::swap(a&: LHS, b&: RHS); |
| 5292 | if (swpVselOps) |
| 5293 | std::swap(a&: TrueVal, b&: FalseVal); |
| 5294 | } |
| 5295 | } |
| 5296 | |
| 5297 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5298 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5299 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, Flags: Cmp, DAG); |
| 5300 | if (CondCode2 != ARMCC::AL) { |
| 5301 | SDValue ARMcc2 = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 5302 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal, ARMcc: ARMcc2, Flags: Cmp, DAG); |
| 5303 | } |
| 5304 | return Result; |
| 5305 | } |
| 5306 | |
| 5307 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable |
| 5308 | /// to morph to an integer compare sequence. |
| 5309 | static bool canChangeToInt(SDValue Op, bool &SeenZero, |
| 5310 | const ARMSubtarget *Subtarget) { |
| 5311 | SDNode *N = Op.getNode(); |
| 5312 | if (!N->hasOneUse()) |
| 5313 | // Otherwise it requires moving the value from fp to integer registers. |
| 5314 | return false; |
| 5315 | if (!N->getNumValues()) |
| 5316 | return false; |
| 5317 | EVT VT = Op.getValueType(); |
| 5318 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) |
| 5319 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + |
| 5320 | // vmrs are very slow, e.g. cortex-a8. |
| 5321 | return false; |
| 5322 | |
| 5323 | if (isFloatingPointZero(Op)) { |
| 5324 | SeenZero = true; |
| 5325 | return true; |
| 5326 | } |
| 5327 | return ISD::isNormalLoad(N); |
| 5328 | } |
| 5329 | |
| 5330 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { |
| 5331 | if (isFloatingPointZero(Op)) |
| 5332 | return DAG.getConstant(Val: 0, DL: SDLoc(Op), VT: MVT::i32); |
| 5333 | |
| 5334 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) |
| 5335 | return DAG.getLoad(VT: MVT::i32, dl: SDLoc(Op), Chain: Ld->getChain(), Ptr: Ld->getBasePtr(), |
| 5336 | PtrInfo: Ld->getPointerInfo(), Alignment: Ld->getAlign(), |
| 5337 | MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5338 | |
| 5339 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5340 | } |
| 5341 | |
| 5342 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, |
| 5343 | SDValue &RetVal1, SDValue &RetVal2) { |
| 5344 | SDLoc dl(Op); |
| 5345 | |
| 5346 | if (isFloatingPointZero(Op)) { |
| 5347 | RetVal1 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 5348 | RetVal2 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 5349 | return; |
| 5350 | } |
| 5351 | |
| 5352 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) { |
| 5353 | SDValue Ptr = Ld->getBasePtr(); |
| 5354 | RetVal1 = |
| 5355 | DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr, PtrInfo: Ld->getPointerInfo(), |
| 5356 | Alignment: Ld->getAlign(), MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5357 | |
| 5358 | EVT PtrType = Ptr.getValueType(); |
| 5359 | SDValue NewPtr = DAG.getNode(Opcode: ISD::ADD, DL: dl, |
| 5360 | VT: PtrType, N1: Ptr, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrType)); |
| 5361 | RetVal2 = DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr: NewPtr, |
| 5362 | PtrInfo: Ld->getPointerInfo().getWithOffset(O: 4), |
| 5363 | Alignment: commonAlignment(A: Ld->getAlign(), Offset: 4), |
| 5364 | MMOFlags: Ld->getMemOperand()->getFlags()); |
| 5365 | return; |
| 5366 | } |
| 5367 | |
| 5368 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5369 | } |
| 5370 | |
| 5371 | /// OptimizeVFPBrcond - With nnan and without daz, it's legal to optimize some |
| 5372 | /// f32 and even f64 comparisons to integer ones. |
| 5373 | SDValue |
| 5374 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { |
| 5375 | SDValue Chain = Op.getOperand(i: 0); |
| 5376 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
| 5377 | SDValue LHS = Op.getOperand(i: 2); |
| 5378 | SDValue RHS = Op.getOperand(i: 3); |
| 5379 | SDValue Dest = Op.getOperand(i: 4); |
| 5380 | SDLoc dl(Op); |
| 5381 | |
| 5382 | bool LHSSeenZero = false; |
| 5383 | bool LHSOk = canChangeToInt(Op: LHS, SeenZero&: LHSSeenZero, Subtarget); |
| 5384 | bool RHSSeenZero = false; |
| 5385 | bool RHSOk = canChangeToInt(Op: RHS, SeenZero&: RHSSeenZero, Subtarget); |
| 5386 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { |
| 5387 | // If unsafe fp math optimization is enabled and there are no other uses of |
| 5388 | // the CMP operands, and the condition code is EQ or NE, we can optimize it |
| 5389 | // to an integer comparison. |
| 5390 | if (CC == ISD::SETOEQ) |
| 5391 | CC = ISD::SETEQ; |
| 5392 | else if (CC == ISD::SETUNE) |
| 5393 | CC = ISD::SETNE; |
| 5394 | |
| 5395 | SDValue Mask = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
| 5396 | SDValue ARMcc; |
| 5397 | if (LHS.getValueType() == MVT::f32) { |
| 5398 | LHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5399 | N1: bitcastf32Toi32(Op: LHS, DAG), N2: Mask); |
| 5400 | RHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5401 | N1: bitcastf32Toi32(Op: RHS, DAG), N2: Mask); |
| 5402 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5403 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5404 | N4: Cmp); |
| 5405 | } |
| 5406 | |
| 5407 | SDValue LHS1, LHS2; |
| 5408 | SDValue RHS1, RHS2; |
| 5409 | expandf64Toi32(Op: LHS, DAG, RetVal1&: LHS1, RetVal2&: LHS2); |
| 5410 | expandf64Toi32(Op: RHS, DAG, RetVal1&: RHS1, RetVal2&: RHS2); |
| 5411 | LHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: LHS2, N2: Mask); |
| 5412 | RHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RHS2, N2: Mask); |
| 5413 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5414 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5415 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; |
| 5416 | return DAG.getNode(Opcode: ARMISD::BCC_i64, DL: dl, VT: MVT::Other, Ops); |
| 5417 | } |
| 5418 | |
| 5419 | return SDValue(); |
| 5420 | } |
| 5421 | |
| 5422 | // Generate CMP + CMOV for integer abs. |
| 5423 | SDValue ARMTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { |
| 5424 | SDLoc DL(Op); |
| 5425 | |
| 5426 | SDValue Neg = DAG.getNegative(Val: Op.getOperand(i: 0), DL, VT: MVT::i32); |
| 5427 | |
| 5428 | // Generate CMP & CMOV. |
| 5429 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::CMP, DL, VT: FlagsVT, N1: Op.getOperand(i: 0), |
| 5430 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 5431 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT: MVT::i32, N1: Op.getOperand(i: 0), N2: Neg, |
| 5432 | N3: DAG.getConstant(Val: ARMCC::MI, DL, VT: MVT::i32), N4: Cmp); |
| 5433 | } |
| 5434 | |
| 5435 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { |
| 5436 | SDValue Chain = Op.getOperand(i: 0); |
| 5437 | SDValue Cond = Op.getOperand(i: 1); |
| 5438 | SDValue Dest = Op.getOperand(i: 2); |
| 5439 | SDLoc dl(Op); |
| 5440 | |
| 5441 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5442 | // instruction. |
| 5443 | unsigned Opc = Cond.getOpcode(); |
| 5444 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5445 | !Subtarget->isThumb1Only(); |
| 5446 | if (Cond.getResNo() == 1 && |
| 5447 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5448 | Opc == ISD::USUBO || OptimizeMul)) { |
| 5449 | // Only lower legal XALUO ops. |
| 5450 | if (!isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
| 5451 | return SDValue(); |
| 5452 | |
| 5453 | // The actual operation with overflow check. |
| 5454 | SDValue Value, OverflowCmp; |
| 5455 | SDValue ARMcc; |
| 5456 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
| 5457 | |
| 5458 | // Reverse the condition code. |
| 5459 | ARMCC::CondCodes CondCode = |
| 5460 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
| 5461 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
| 5462 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
| 5463 | |
| 5464 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5465 | N4: OverflowCmp); |
| 5466 | } |
| 5467 | |
| 5468 | return SDValue(); |
| 5469 | } |
| 5470 | |
| 5471 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5472 | SDValue Chain = Op.getOperand(i: 0); |
| 5473 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
| 5474 | SDValue LHS = Op.getOperand(i: 2); |
| 5475 | SDValue RHS = Op.getOperand(i: 3); |
| 5476 | SDValue Dest = Op.getOperand(i: 4); |
| 5477 | SDLoc dl(Op); |
| 5478 | |
| 5479 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 5480 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
| 5481 | |
| 5482 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5483 | // zero. |
| 5484 | if (!RHS.getNode()) { |
| 5485 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 5486 | CC = ISD::SETNE; |
| 5487 | } |
| 5488 | } |
| 5489 | |
| 5490 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5491 | // instruction. |
| 5492 | unsigned Opc = LHS.getOpcode(); |
| 5493 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5494 | !Subtarget->isThumb1Only(); |
| 5495 | if (LHS.getResNo() == 1 && (isOneConstant(V: RHS) || isNullConstant(V: RHS)) && |
| 5496 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5497 | Opc == ISD::USUBO || OptimizeMul) && |
| 5498 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
| 5499 | // Only lower legal XALUO ops. |
| 5500 | if (!isTypeLegal(VT: LHS->getValueType(ResNo: 0))) |
| 5501 | return SDValue(); |
| 5502 | |
| 5503 | // The actual operation with overflow check. |
| 5504 | SDValue Value, OverflowCmp; |
| 5505 | SDValue ARMcc; |
| 5506 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: LHS.getValue(R: 0), DAG, ARMcc); |
| 5507 | |
| 5508 | if ((CC == ISD::SETNE) != isOneConstant(V: RHS)) { |
| 5509 | // Reverse the condition code. |
| 5510 | ARMCC::CondCodes CondCode = |
| 5511 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
| 5512 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
| 5513 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
| 5514 | } |
| 5515 | |
| 5516 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, |
| 5517 | N4: OverflowCmp); |
| 5518 | } |
| 5519 | |
| 5520 | if (LHS.getValueType() == MVT::i32) { |
| 5521 | SDValue ARMcc; |
| 5522 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5523 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, N4: Cmp); |
| 5524 | } |
| 5525 | |
| 5526 | SDNodeFlags Flags = Op->getFlags(); |
| 5527 | if (Flags.hasNoNaNs() && |
| 5528 | DAG.getDenormalMode(VT: MVT::f32) == DenormalMode::getIEEE() && |
| 5529 | DAG.getDenormalMode(VT: MVT::f64) == DenormalMode::getIEEE() && |
| 5530 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETNE || |
| 5531 | CC == ISD::SETUNE)) { |
| 5532 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) |
| 5533 | return Result; |
| 5534 | } |
| 5535 | |
| 5536 | ARMCC::CondCodes CondCode, CondCode2; |
| 5537 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5538 | |
| 5539 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 5540 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5541 | SDValue Ops[] = {Chain, Dest, ARMcc, Cmp}; |
| 5542 | SDValue Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, Ops); |
| 5543 | if (CondCode2 != ARMCC::AL) { |
| 5544 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 5545 | SDValue Ops[] = {Res, Dest, ARMcc, Cmp}; |
| 5546 | Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, Ops); |
| 5547 | } |
| 5548 | return Res; |
| 5549 | } |
| 5550 | |
| 5551 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { |
| 5552 | SDValue Chain = Op.getOperand(i: 0); |
| 5553 | SDValue Table = Op.getOperand(i: 1); |
| 5554 | SDValue Index = Op.getOperand(i: 2); |
| 5555 | SDLoc dl(Op); |
| 5556 | |
| 5557 | EVT PTy = getPointerTy(DL: DAG.getDataLayout()); |
| 5558 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Val&: Table); |
| 5559 | SDValue JTI = DAG.getTargetJumpTable(JTI: JT->getIndex(), VT: PTy); |
| 5560 | Table = DAG.getNode(Opcode: ARMISD::WrapperJT, DL: dl, VT: MVT::i32, Operand: JTI); |
| 5561 | Index = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: PTy, N1: Index, N2: DAG.getConstant(Val: 4, DL: dl, VT: PTy)); |
| 5562 | SDValue Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Index); |
| 5563 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { |
| 5564 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table |
| 5565 | // which does another jump to the destination. This also makes it easier |
| 5566 | // to translate it to TBB / TBH later (Thumb2 only). |
| 5567 | // FIXME: This might not work if the function is extremely large. |
| 5568 | return DAG.getNode(Opcode: ARMISD::BR2_JT, DL: dl, VT: MVT::Other, N1: Chain, |
| 5569 | N2: Addr, N3: Op.getOperand(i: 2), N4: JTI); |
| 5570 | } |
| 5571 | if (isPositionIndependent() || Subtarget->isROPI()) { |
| 5572 | Addr = |
| 5573 | DAG.getLoad(VT: (EVT)MVT::i32, dl, Chain, Ptr: Addr, |
| 5574 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
| 5575 | Chain = Addr.getValue(R: 1); |
| 5576 | Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Addr); |
| 5577 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
| 5578 | } else { |
| 5579 | Addr = |
| 5580 | DAG.getLoad(VT: PTy, dl, Chain, Ptr: Addr, |
| 5581 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
| 5582 | Chain = Addr.getValue(R: 1); |
| 5583 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
| 5584 | } |
| 5585 | } |
| 5586 | |
| 5587 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { |
| 5588 | EVT VT = Op.getValueType(); |
| 5589 | SDLoc dl(Op); |
| 5590 | |
| 5591 | if (Op.getValueType().getVectorElementType() == MVT::i32) { |
| 5592 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::f32) |
| 5593 | return Op; |
| 5594 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5595 | } |
| 5596 | |
| 5597 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
| 5598 | |
| 5599 | EVT NewTy; |
| 5600 | const EVT OpTy = Op.getOperand(i: 0).getValueType(); |
| 5601 | if (OpTy == MVT::v4f32) |
| 5602 | NewTy = MVT::v4i32; |
| 5603 | else if (OpTy == MVT::v4f16 && HasFullFP16) |
| 5604 | NewTy = MVT::v4i16; |
| 5605 | else if (OpTy == MVT::v8f16 && HasFullFP16) |
| 5606 | NewTy = MVT::v8i16; |
| 5607 | else |
| 5608 | llvm_unreachable("Invalid type for custom lowering!" ); |
| 5609 | |
| 5610 | if (VT != MVT::v4i16 && VT != MVT::v8i16) |
| 5611 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5612 | |
| 5613 | Op = DAG.getNode(Opcode: Op.getOpcode(), DL: dl, VT: NewTy, Operand: Op.getOperand(i: 0)); |
| 5614 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Op); |
| 5615 | } |
| 5616 | |
| 5617 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { |
| 5618 | EVT VT = Op.getValueType(); |
| 5619 | if (VT.isVector()) |
| 5620 | return LowerVectorFP_TO_INT(Op, DAG); |
| 5621 | |
| 5622 | bool IsStrict = Op->isStrictFPOpcode(); |
| 5623 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 5624 | |
| 5625 | if (isUnsupportedFloatingType(VT: SrcVal.getValueType())) { |
| 5626 | RTLIB::Libcall LC; |
| 5627 | if (Op.getOpcode() == ISD::FP_TO_SINT || |
| 5628 | Op.getOpcode() == ISD::STRICT_FP_TO_SINT) |
| 5629 | LC = RTLIB::getFPTOSINT(OpVT: SrcVal.getValueType(), |
| 5630 | RetVT: Op.getValueType()); |
| 5631 | else |
| 5632 | LC = RTLIB::getFPTOUINT(OpVT: SrcVal.getValueType(), |
| 5633 | RetVT: Op.getValueType()); |
| 5634 | SDLoc Loc(Op); |
| 5635 | MakeLibCallOptions CallOptions; |
| 5636 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 5637 | SDValue Result; |
| 5638 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: SrcVal, |
| 5639 | CallOptions, dl: Loc, Chain); |
| 5640 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
| 5641 | } |
| 5642 | |
| 5643 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 5644 | if (IsStrict) { |
| 5645 | SDLoc Loc(Op); |
| 5646 | SDValue Result = |
| 5647 | DAG.getNode(Opcode: Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT |
| 5648 | : ISD::FP_TO_UINT, |
| 5649 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
| 5650 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
| 5651 | } |
| 5652 | |
| 5653 | return Op; |
| 5654 | } |
| 5655 | |
| 5656 | static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, |
| 5657 | const ARMSubtarget *Subtarget) { |
| 5658 | EVT VT = Op.getValueType(); |
| 5659 | EVT ToVT = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT(); |
| 5660 | EVT FromVT = Op.getOperand(i: 0).getValueType(); |
| 5661 | |
| 5662 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f32) |
| 5663 | return Op; |
| 5664 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f64 && |
| 5665 | Subtarget->hasFP64()) |
| 5666 | return Op; |
| 5667 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f16 && |
| 5668 | Subtarget->hasFullFP16()) |
| 5669 | return Op; |
| 5670 | if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32 && |
| 5671 | Subtarget->hasMVEFloatOps()) |
| 5672 | return Op; |
| 5673 | if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16 && |
| 5674 | Subtarget->hasMVEFloatOps()) |
| 5675 | return Op; |
| 5676 | |
| 5677 | if (FromVT != MVT::v4f32 && FromVT != MVT::v8f16) |
| 5678 | return SDValue(); |
| 5679 | |
| 5680 | SDLoc DL(Op); |
| 5681 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; |
| 5682 | unsigned BW = ToVT.getScalarSizeInBits() - IsSigned; |
| 5683 | SDValue CVT = DAG.getNode(Opcode: Op.getOpcode(), DL, VT, N1: Op.getOperand(i: 0), |
| 5684 | N2: DAG.getValueType(VT.getScalarType())); |
| 5685 | SDValue Max = DAG.getNode(Opcode: IsSigned ? ISD::SMIN : ISD::UMIN, DL, VT, N1: CVT, |
| 5686 | N2: DAG.getConstant(Val: (1 << BW) - 1, DL, VT)); |
| 5687 | if (IsSigned) |
| 5688 | Max = DAG.getNode(Opcode: ISD::SMAX, DL, VT, N1: Max, |
| 5689 | N2: DAG.getSignedConstant(Val: -(1 << BW), DL, VT)); |
| 5690 | return Max; |
| 5691 | } |
| 5692 | |
| 5693 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { |
| 5694 | EVT VT = Op.getValueType(); |
| 5695 | SDLoc dl(Op); |
| 5696 | |
| 5697 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::i32) { |
| 5698 | if (VT.getVectorElementType() == MVT::f32) |
| 5699 | return Op; |
| 5700 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5701 | } |
| 5702 | |
| 5703 | assert((Op.getOperand(0).getValueType() == MVT::v4i16 || |
| 5704 | Op.getOperand(0).getValueType() == MVT::v8i16) && |
| 5705 | "Invalid type for custom lowering!" ); |
| 5706 | |
| 5707 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
| 5708 | |
| 5709 | EVT DestVecType; |
| 5710 | if (VT == MVT::v4f32) |
| 5711 | DestVecType = MVT::v4i32; |
| 5712 | else if (VT == MVT::v4f16 && HasFullFP16) |
| 5713 | DestVecType = MVT::v4i16; |
| 5714 | else if (VT == MVT::v8f16 && HasFullFP16) |
| 5715 | DestVecType = MVT::v8i16; |
| 5716 | else |
| 5717 | return DAG.UnrollVectorOp(N: Op.getNode()); |
| 5718 | |
| 5719 | unsigned CastOpc; |
| 5720 | unsigned Opc; |
| 5721 | switch (Op.getOpcode()) { |
| 5722 | default: llvm_unreachable("Invalid opcode!" ); |
| 5723 | case ISD::SINT_TO_FP: |
| 5724 | CastOpc = ISD::SIGN_EXTEND; |
| 5725 | Opc = ISD::SINT_TO_FP; |
| 5726 | break; |
| 5727 | case ISD::UINT_TO_FP: |
| 5728 | CastOpc = ISD::ZERO_EXTEND; |
| 5729 | Opc = ISD::UINT_TO_FP; |
| 5730 | break; |
| 5731 | } |
| 5732 | |
| 5733 | Op = DAG.getNode(Opcode: CastOpc, DL: dl, VT: DestVecType, Operand: Op.getOperand(i: 0)); |
| 5734 | return DAG.getNode(Opcode: Opc, DL: dl, VT, Operand: Op); |
| 5735 | } |
| 5736 | |
| 5737 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { |
| 5738 | EVT VT = Op.getValueType(); |
| 5739 | if (VT.isVector()) |
| 5740 | return LowerVectorINT_TO_FP(Op, DAG); |
| 5741 | if (isUnsupportedFloatingType(VT)) { |
| 5742 | RTLIB::Libcall LC; |
| 5743 | if (Op.getOpcode() == ISD::SINT_TO_FP) |
| 5744 | LC = RTLIB::getSINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
| 5745 | RetVT: Op.getValueType()); |
| 5746 | else |
| 5747 | LC = RTLIB::getUINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
| 5748 | RetVT: Op.getValueType()); |
| 5749 | MakeLibCallOptions CallOptions; |
| 5750 | return makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: Op.getOperand(i: 0), |
| 5751 | CallOptions, dl: SDLoc(Op)).first; |
| 5752 | } |
| 5753 | |
| 5754 | return Op; |
| 5755 | } |
| 5756 | |
| 5757 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { |
| 5758 | // Implement fcopysign with a fabs and a conditional fneg. |
| 5759 | SDValue Tmp0 = Op.getOperand(i: 0); |
| 5760 | SDValue Tmp1 = Op.getOperand(i: 1); |
| 5761 | SDLoc dl(Op); |
| 5762 | EVT VT = Op.getValueType(); |
| 5763 | EVT SrcVT = Tmp1.getValueType(); |
| 5764 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || |
| 5765 | Tmp0.getOpcode() == ARMISD::VMOVDRR; |
| 5766 | bool UseNEON = !InGPR && Subtarget->hasNEON(); |
| 5767 | |
| 5768 | if (UseNEON) { |
| 5769 | // Use VBSL to copy the sign bit. |
| 5770 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode: 0x6, Val: 0x80); |
| 5771 | SDValue Mask = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v2i32, |
| 5772 | Operand: DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32)); |
| 5773 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; |
| 5774 | if (VT == MVT::f64) |
| 5775 | Mask = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
| 5776 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Mask), |
| 5777 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 5778 | else /*if (VT == MVT::f32)*/ |
| 5779 | Tmp0 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp0); |
| 5780 | if (SrcVT == MVT::f32) { |
| 5781 | Tmp1 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp1); |
| 5782 | if (VT == MVT::f64) |
| 5783 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
| 5784 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1), |
| 5785 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 5786 | } else if (VT == MVT::f32) |
| 5787 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHRuIMM, DL: dl, VT: MVT::v1i64, |
| 5788 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v1i64, Operand: Tmp1), |
| 5789 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
| 5790 | Tmp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp0); |
| 5791 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1); |
| 5792 | |
| 5793 | SDValue AllOnes = DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), |
| 5794 | DL: dl, VT: MVT::i32); |
| 5795 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v8i8, Operand: AllOnes); |
| 5796 | SDValue MaskNot = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: OpVT, N1: Mask, |
| 5797 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: AllOnes)); |
| 5798 | |
| 5799 | SDValue Res = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: OpVT, |
| 5800 | N1: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp1, N2: Mask), |
| 5801 | N2: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp0, N2: MaskNot)); |
| 5802 | if (VT == MVT::f32) { |
| 5803 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v2f32, Operand: Res); |
| 5804 | Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: Res, |
| 5805 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 5806 | } else { |
| 5807 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Res); |
| 5808 | } |
| 5809 | |
| 5810 | return Res; |
| 5811 | } |
| 5812 | |
| 5813 | // Bitcast operand 1 to i32. |
| 5814 | if (SrcVT == MVT::f64) |
| 5815 | Tmp1 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 5816 | N: Tmp1).getValue(R: 1); |
| 5817 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp1); |
| 5818 | |
| 5819 | // Or in the signbit with integer operations. |
| 5820 | SDValue Mask1 = DAG.getConstant(Val: 0x80000000, DL: dl, VT: MVT::i32); |
| 5821 | SDValue Mask2 = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
| 5822 | Tmp1 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp1, N2: Mask1); |
| 5823 | if (VT == MVT::f32) { |
| 5824 | Tmp0 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
| 5825 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp0), N2: Mask2); |
| 5826 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f32, |
| 5827 | Operand: DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Tmp0, N2: Tmp1)); |
| 5828 | } |
| 5829 | |
| 5830 | // f64: Or the high part with signbit and then combine two parts. |
| 5831 | Tmp0 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 5832 | N: Tmp0); |
| 5833 | SDValue Lo = Tmp0.getValue(R: 0); |
| 5834 | SDValue Hi = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp0.getValue(R: 1), N2: Mask2); |
| 5835 | Hi = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Hi, N2: Tmp1); |
| 5836 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
| 5837 | } |
| 5838 | |
| 5839 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ |
| 5840 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5841 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5842 | MFI.setReturnAddressIsTaken(true); |
| 5843 | |
| 5844 | EVT VT = Op.getValueType(); |
| 5845 | SDLoc dl(Op); |
| 5846 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 5847 | if (Depth) { |
| 5848 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
| 5849 | SDValue Offset = DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32); |
| 5850 | return DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), |
| 5851 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr, N2: Offset), |
| 5852 | PtrInfo: MachinePointerInfo()); |
| 5853 | } |
| 5854 | |
| 5855 | // Return LR, which contains the return address. Mark it an implicit live-in. |
| 5856 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
| 5857 | return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT); |
| 5858 | } |
| 5859 | |
| 5860 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
| 5861 | const ARMBaseRegisterInfo &ARI = |
| 5862 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); |
| 5863 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5864 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5865 | MFI.setFrameAddressIsTaken(true); |
| 5866 | |
| 5867 | EVT VT = Op.getValueType(); |
| 5868 | SDLoc dl(Op); // FIXME probably not meaningful |
| 5869 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 5870 | Register FrameReg = ARI.getFrameRegister(MF); |
| 5871 | SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: FrameReg, VT); |
| 5872 | while (Depth--) |
| 5873 | FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr, |
| 5874 | PtrInfo: MachinePointerInfo()); |
| 5875 | return FrameAddr; |
| 5876 | } |
| 5877 | |
| 5878 | // FIXME? Maybe this could be a TableGen attribute on some registers and |
| 5879 | // this table could be generated automatically from RegInfo. |
| 5880 | Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, |
| 5881 | const MachineFunction &MF) const { |
| 5882 | return StringSwitch<Register>(RegName) |
| 5883 | .Case(S: "sp" , Value: ARM::SP) |
| 5884 | .Default(Value: Register()); |
| 5885 | } |
| 5886 | |
| 5887 | // Result is 64 bit value so split into two 32 bit values and return as a |
| 5888 | // pair of values. |
| 5889 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 5890 | SelectionDAG &DAG) { |
| 5891 | SDLoc DL(N); |
| 5892 | |
| 5893 | // This function is only supposed to be called for i64 type destination. |
| 5894 | assert(N->getValueType(0) == MVT::i64 |
| 5895 | && "ExpandREAD_REGISTER called for non-i64 type result." ); |
| 5896 | |
| 5897 | SDValue Read = DAG.getNode(Opcode: ISD::READ_REGISTER, DL, |
| 5898 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32, VT3: MVT::Other), |
| 5899 | N1: N->getOperand(Num: 0), |
| 5900 | N2: N->getOperand(Num: 1)); |
| 5901 | |
| 5902 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Read.getValue(R: 0), |
| 5903 | N2: Read.getValue(R: 1))); |
| 5904 | Results.push_back(Elt: Read.getValue(R: 2)); // Chain |
| 5905 | } |
| 5906 | |
| 5907 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. |
| 5908 | /// When \p DstVT, the destination type of \p BC, is on the vector |
| 5909 | /// register bank and the source of bitcast, \p Op, operates on the same bank, |
| 5910 | /// it might be possible to combine them, such that everything stays on the |
| 5911 | /// vector register bank. |
| 5912 | /// \p return The node that would replace \p BT, if the combine |
| 5913 | /// is possible. |
| 5914 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, |
| 5915 | SelectionDAG &DAG) { |
| 5916 | SDValue Op = BC->getOperand(Num: 0); |
| 5917 | EVT DstVT = BC->getValueType(ResNo: 0); |
| 5918 | |
| 5919 | // The only vector instruction that can produce a scalar (remember, |
| 5920 | // since the bitcast was about to be turned into VMOVDRR, the source |
| 5921 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. |
| 5922 | // Moreover, we can do this combine only if there is one use. |
| 5923 | // Finally, if the destination type is not a vector, there is not |
| 5924 | // much point on forcing everything on the vector bank. |
| 5925 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 5926 | !Op.hasOneUse()) |
| 5927 | return SDValue(); |
| 5928 | |
| 5929 | // If the index is not constant, we will introduce an additional |
| 5930 | // multiply that will stick. |
| 5931 | // Give up in that case. |
| 5932 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
| 5933 | if (!Index) |
| 5934 | return SDValue(); |
| 5935 | unsigned DstNumElt = DstVT.getVectorNumElements(); |
| 5936 | |
| 5937 | // Compute the new index. |
| 5938 | const APInt &APIntIndex = Index->getAPIntValue(); |
| 5939 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); |
| 5940 | NewIndex *= APIntIndex; |
| 5941 | // Check if the new constant index fits into i32. |
| 5942 | if (NewIndex.getBitWidth() > 32) |
| 5943 | return SDValue(); |
| 5944 | |
| 5945 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> |
| 5946 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) |
| 5947 | SDLoc dl(Op); |
| 5948 | SDValue = Op.getOperand(i: 0); |
| 5949 | EVT VecVT = EVT::getVectorVT( |
| 5950 | Context&: *DAG.getContext(), VT: DstVT.getScalarType(), |
| 5951 | NumElements: ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); |
| 5952 | SDValue BitCast = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: ExtractSrc); |
| 5953 | return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DstVT, N1: BitCast, |
| 5954 | N2: DAG.getConstant(Val: NewIndex.getZExtValue(), DL: dl, VT: MVT::i32)); |
| 5955 | } |
| 5956 | |
| 5957 | /// ExpandBITCAST - If the target supports VFP, this function is called to |
| 5958 | /// expand a bit convert where either the source or destination type is i64 to |
| 5959 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 |
| 5960 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support |
| 5961 | /// vectors), since the legalizer won't know what to do with that. |
| 5962 | SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, |
| 5963 | const ARMSubtarget *Subtarget) const { |
| 5964 | SDLoc dl(N); |
| 5965 | SDValue Op = N->getOperand(Num: 0); |
| 5966 | |
| 5967 | // This function is only supposed to be called for i16 and i64 types, either |
| 5968 | // as the source or destination of the bit convert. |
| 5969 | EVT SrcVT = Op.getValueType(); |
| 5970 | EVT DstVT = N->getValueType(ResNo: 0); |
| 5971 | |
| 5972 | if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && |
| 5973 | (DstVT == MVT::f16 || DstVT == MVT::bf16)) |
| 5974 | return MoveToHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: DstVT.getSimpleVT(), |
| 5975 | Val: DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: SDLoc(N), VT: MVT::i32, Operand: Op)); |
| 5976 | |
| 5977 | if ((DstVT == MVT::i16 || DstVT == MVT::i32) && |
| 5978 | (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) { |
| 5979 | if (Subtarget->hasFullFP16() && !Subtarget->hasBF16()) |
| 5980 | Op = DAG.getBitcast(VT: MVT::f16, V: Op); |
| 5981 | return DAG.getNode( |
| 5982 | Opcode: ISD::TRUNCATE, DL: SDLoc(N), VT: DstVT, |
| 5983 | Operand: MoveFromHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: SrcVT.getSimpleVT(), Val: Op)); |
| 5984 | } |
| 5985 | |
| 5986 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) |
| 5987 | return SDValue(); |
| 5988 | |
| 5989 | // Turn i64->f64 into VMOVDRR. |
| 5990 | if (SrcVT == MVT::i64 && isTypeLegal(VT: DstVT)) { |
| 5991 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) |
| 5992 | // if we can combine the bitcast with its source. |
| 5993 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(BC: N, DAG)) |
| 5994 | return Val; |
| 5995 | SDValue Lo, Hi; |
| 5996 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 5997 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: DstVT, |
| 5998 | Operand: DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi)); |
| 5999 | } |
| 6000 | |
| 6001 | // Turn f64->i64 into VMOVRRD. |
| 6002 | if (DstVT == MVT::i64 && isTypeLegal(VT: SrcVT)) { |
| 6003 | SDValue Cvt; |
| 6004 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && |
| 6005 | SrcVT.getVectorNumElements() > 1) |
| 6006 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 6007 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 6008 | N: DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SrcVT, Operand: Op)); |
| 6009 | else |
| 6010 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
| 6011 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Op); |
| 6012 | // Merge the pieces into a single i64 value. |
| 6013 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Cvt, N2: Cvt.getValue(R: 1)); |
| 6014 | } |
| 6015 | |
| 6016 | return SDValue(); |
| 6017 | } |
| 6018 | |
| 6019 | /// getZeroVector - Returns a vector of specified type with all zero elements. |
| 6020 | /// Zero vectors are used to represent vector negation and in those cases |
| 6021 | /// will be implemented with the NEON VNEG instruction. However, VNEG does |
| 6022 | /// not support i64 elements, so sometimes the zero vectors will need to be |
| 6023 | /// explicitly constructed. Regardless, use a canonical VMOV to create the |
| 6024 | /// zero vector. |
| 6025 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { |
| 6026 | assert(VT.isVector() && "Expected a vector type" ); |
| 6027 | // The canonical modified immediate encoding of a zero vector is....0! |
| 6028 | SDValue EncodedVal = DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32); |
| 6029 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 6030 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: EncodedVal); |
| 6031 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vmov); |
| 6032 | } |
| 6033 | |
| 6034 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two |
| 6035 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6036 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, |
| 6037 | SelectionDAG &DAG) const { |
| 6038 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6039 | EVT VT = Op.getValueType(); |
| 6040 | unsigned VTBits = VT.getSizeInBits(); |
| 6041 | SDLoc dl(Op); |
| 6042 | SDValue ShOpLo = Op.getOperand(i: 0); |
| 6043 | SDValue ShOpHi = Op.getOperand(i: 1); |
| 6044 | SDValue ShAmt = Op.getOperand(i: 2); |
| 6045 | SDValue ARMcc; |
| 6046 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; |
| 6047 | |
| 6048 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); |
| 6049 | |
| 6050 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6051 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6052 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
| 6053 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
| 6054 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
| 6055 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: RevShAmt); |
| 6056 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
| 6057 | SDValue LoBigShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ExtraShAmt); |
| 6058 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6059 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6060 | SDValue Lo = |
| 6061 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, N2: LoBigShift, N3: ARMcc, N4: CmpLo); |
| 6062 | |
| 6063 | SDValue HiSmallShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
| 6064 | SDValue HiBigShift = Opc == ISD::SRA |
| 6065 | ? DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, |
| 6066 | N2: DAG.getConstant(Val: VTBits - 1, DL: dl, VT)) |
| 6067 | : DAG.getConstant(Val: 0, DL: dl, VT); |
| 6068 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6069 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6070 | SDValue Hi = |
| 6071 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, N3: ARMcc, N4: CmpHi); |
| 6072 | |
| 6073 | SDValue Ops[2] = { Lo, Hi }; |
| 6074 | return DAG.getMergeValues(Ops, dl); |
| 6075 | } |
| 6076 | |
| 6077 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two |
| 6078 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6079 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, |
| 6080 | SelectionDAG &DAG) const { |
| 6081 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6082 | EVT VT = Op.getValueType(); |
| 6083 | unsigned VTBits = VT.getSizeInBits(); |
| 6084 | SDLoc dl(Op); |
| 6085 | SDValue ShOpLo = Op.getOperand(i: 0); |
| 6086 | SDValue ShOpHi = Op.getOperand(i: 1); |
| 6087 | SDValue ShAmt = Op.getOperand(i: 2); |
| 6088 | SDValue ARMcc; |
| 6089 | |
| 6090 | assert(Op.getOpcode() == ISD::SHL_PARTS); |
| 6091 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6092 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6093 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: RevShAmt); |
| 6094 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
| 6095 | SDValue HiSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
| 6096 | |
| 6097 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
| 6098 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
| 6099 | SDValue HiBigShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ExtraShAmt); |
| 6100 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6101 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6102 | SDValue Hi = |
| 6103 | DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, N3: ARMcc, N4: CmpHi); |
| 6104 | |
| 6105 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 6106 | CC: ISD::SETGE, ARMcc, DAG, dl); |
| 6107 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
| 6108 | SDValue Lo = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, |
| 6109 | N2: DAG.getConstant(Val: 0, DL: dl, VT), N3: ARMcc, N4: CmpLo); |
| 6110 | |
| 6111 | SDValue Ops[2] = { Lo, Hi }; |
| 6112 | return DAG.getMergeValues(Ops, dl); |
| 6113 | } |
| 6114 | |
| 6115 | SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op, |
| 6116 | SelectionDAG &DAG) const { |
| 6117 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 6118 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 |
| 6119 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) |
| 6120 | // so that the shift + and get folded into a bitfield extract. |
| 6121 | SDLoc dl(Op); |
| 6122 | SDValue Chain = Op.getOperand(i: 0); |
| 6123 | SDValue Ops[] = {Chain, |
| 6124 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL: dl, VT: MVT::i32)}; |
| 6125 | |
| 6126 | SDValue FPSCR = |
| 6127 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL: dl, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6128 | Chain = FPSCR.getValue(R: 1); |
| 6129 | SDValue FltRounds = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: FPSCR, |
| 6130 | N2: DAG.getConstant(Val: 1U << 22, DL: dl, VT: MVT::i32)); |
| 6131 | SDValue RMODE = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: FltRounds, |
| 6132 | N2: DAG.getConstant(Val: 22, DL: dl, VT: MVT::i32)); |
| 6133 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RMODE, |
| 6134 | N2: DAG.getConstant(Val: 3, DL: dl, VT: MVT::i32)); |
| 6135 | return DAG.getMergeValues(Ops: {And, Chain}, dl); |
| 6136 | } |
| 6137 | |
| 6138 | SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op, |
| 6139 | SelectionDAG &DAG) const { |
| 6140 | SDLoc DL(Op); |
| 6141 | SDValue Chain = Op->getOperand(Num: 0); |
| 6142 | SDValue RMValue = Op->getOperand(Num: 1); |
| 6143 | |
| 6144 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 6145 | // The llvm.set.rounding argument value to ARM rounding mode value mapping |
| 6146 | // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is |
| 6147 | // ((arg - 1) & 3) << 22). |
| 6148 | // |
| 6149 | // It is expected that the argument of llvm.set.rounding is within the |
| 6150 | // segment [0, 3], so NearestTiesToAway (4) is not handled here. It is |
| 6151 | // responsibility of the code generated llvm.set.rounding to ensure this |
| 6152 | // condition. |
| 6153 | |
| 6154 | // Calculate new value of FPSCR[23:22]. |
| 6155 | RMValue = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: RMValue, |
| 6156 | N2: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 6157 | RMValue = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: RMValue, |
| 6158 | N2: DAG.getConstant(Val: 0x3, DL, VT: MVT::i32)); |
| 6159 | RMValue = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: RMValue, |
| 6160 | N2: DAG.getConstant(Val: ARM::RoundingBitsPos, DL, VT: MVT::i32)); |
| 6161 | |
| 6162 | // Get current value of FPSCR. |
| 6163 | SDValue Ops[] = {Chain, |
| 6164 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6165 | SDValue FPSCR = |
| 6166 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6167 | Chain = FPSCR.getValue(R: 1); |
| 6168 | FPSCR = FPSCR.getValue(R: 0); |
| 6169 | |
| 6170 | // Put new rounding mode into FPSCR[23:22]. |
| 6171 | const unsigned RMMask = ~(ARM::Rounding::rmMask << ARM::RoundingBitsPos); |
| 6172 | FPSCR = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6173 | N2: DAG.getConstant(Val: RMMask, DL, VT: MVT::i32)); |
| 6174 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCR, N2: RMValue); |
| 6175 | SDValue Ops2[] = { |
| 6176 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
| 6177 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6178 | } |
| 6179 | |
| 6180 | SDValue ARMTargetLowering::LowerSET_FPMODE(SDValue Op, |
| 6181 | SelectionDAG &DAG) const { |
| 6182 | SDLoc DL(Op); |
| 6183 | SDValue Chain = Op->getOperand(Num: 0); |
| 6184 | SDValue Mode = Op->getOperand(Num: 1); |
| 6185 | |
| 6186 | // Generate nodes to build: |
| 6187 | // FPSCR = (FPSCR & FPStatusBits) | (Mode & ~FPStatusBits) |
| 6188 | SDValue Ops[] = {Chain, |
| 6189 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6190 | SDValue FPSCR = |
| 6191 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6192 | Chain = FPSCR.getValue(R: 1); |
| 6193 | FPSCR = FPSCR.getValue(R: 0); |
| 6194 | |
| 6195 | SDValue FPSCRMasked = |
| 6196 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6197 | N2: DAG.getConstant(Val: ARM::FPStatusBits, DL, VT: MVT::i32)); |
| 6198 | SDValue InputMasked = |
| 6199 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: Mode, |
| 6200 | N2: DAG.getConstant(Val: ~ARM::FPStatusBits, DL, VT: MVT::i32)); |
| 6201 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCRMasked, N2: InputMasked); |
| 6202 | |
| 6203 | SDValue Ops2[] = { |
| 6204 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
| 6205 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6206 | } |
| 6207 | |
| 6208 | SDValue ARMTargetLowering::LowerRESET_FPMODE(SDValue Op, |
| 6209 | SelectionDAG &DAG) const { |
| 6210 | SDLoc DL(Op); |
| 6211 | SDValue Chain = Op->getOperand(Num: 0); |
| 6212 | |
| 6213 | // To get the default FP mode all control bits are cleared: |
| 6214 | // FPSCR = FPSCR & (FPStatusBits | FPReservedBits) |
| 6215 | SDValue Ops[] = {Chain, |
| 6216 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
| 6217 | SDValue FPSCR = |
| 6218 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
| 6219 | Chain = FPSCR.getValue(R: 1); |
| 6220 | FPSCR = FPSCR.getValue(R: 0); |
| 6221 | |
| 6222 | SDValue FPSCRMasked = DAG.getNode( |
| 6223 | Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
| 6224 | N2: DAG.getConstant(Val: ARM::FPStatusBits | ARM::FPReservedBits, DL, VT: MVT::i32)); |
| 6225 | SDValue Ops2[] = {Chain, |
| 6226 | DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), |
| 6227 | FPSCRMasked}; |
| 6228 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
| 6229 | } |
| 6230 | |
| 6231 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, |
| 6232 | const ARMSubtarget *ST) { |
| 6233 | SDLoc dl(N); |
| 6234 | EVT VT = N->getValueType(ResNo: 0); |
| 6235 | if (VT.isVector() && ST->hasNEON()) { |
| 6236 | |
| 6237 | // Compute the least significant set bit: LSB = X & -X |
| 6238 | SDValue X = N->getOperand(Num: 0); |
| 6239 | SDValue NX = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: getZeroVector(VT, DAG, dl), N2: X); |
| 6240 | SDValue LSB = DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: X, N2: NX); |
| 6241 | |
| 6242 | EVT ElemTy = VT.getVectorElementType(); |
| 6243 | |
| 6244 | if (ElemTy == MVT::i8) { |
| 6245 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6246 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6247 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
| 6248 | SDValue Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
| 6249 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
| 6250 | } |
| 6251 | |
| 6252 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && |
| 6253 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { |
| 6254 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 |
| 6255 | unsigned NumBits = ElemTy.getSizeInBits(); |
| 6256 | SDValue WidthMinus1 = |
| 6257 | DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6258 | Operand: DAG.getTargetConstant(Val: NumBits - 1, DL: dl, VT: ElemTy)); |
| 6259 | SDValue CTLZ = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: LSB); |
| 6260 | return DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: WidthMinus1, N2: CTLZ); |
| 6261 | } |
| 6262 | |
| 6263 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6264 | |
| 6265 | // Compute LSB - 1. |
| 6266 | SDValue Bits; |
| 6267 | if (ElemTy == MVT::i64) { |
| 6268 | // Load constant 0xffff'ffff'ffff'ffff to register. |
| 6269 | SDValue FF = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6270 | Operand: DAG.getTargetConstant(Val: 0x1eff, DL: dl, VT: MVT::i32)); |
| 6271 | Bits = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: LSB, N2: FF); |
| 6272 | } else { |
| 6273 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 6274 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
| 6275 | Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
| 6276 | } |
| 6277 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
| 6278 | } |
| 6279 | |
| 6280 | if (!ST->hasV6T2Ops()) |
| 6281 | return SDValue(); |
| 6282 | |
| 6283 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT, Operand: N->getOperand(Num: 0)); |
| 6284 | return DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: rbit); |
| 6285 | } |
| 6286 | |
| 6287 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, |
| 6288 | const ARMSubtarget *ST) { |
| 6289 | EVT VT = N->getValueType(ResNo: 0); |
| 6290 | SDLoc DL(N); |
| 6291 | |
| 6292 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON." ); |
| 6293 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || |
| 6294 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && |
| 6295 | "Unexpected type for custom ctpop lowering" ); |
| 6296 | |
| 6297 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 6298 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
| 6299 | SDValue Res = DAG.getBitcast(VT: VT8Bit, V: N->getOperand(Num: 0)); |
| 6300 | Res = DAG.getNode(Opcode: ISD::CTPOP, DL, VT: VT8Bit, Operand: Res); |
| 6301 | |
| 6302 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. |
| 6303 | unsigned EltSize = 8; |
| 6304 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; |
| 6305 | while (EltSize != VT.getScalarSizeInBits()) { |
| 6306 | SmallVector<SDValue, 8> Ops; |
| 6307 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddlu, DL, |
| 6308 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 6309 | Ops.push_back(Elt: Res); |
| 6310 | |
| 6311 | EltSize *= 2; |
| 6312 | NumElts /= 2; |
| 6313 | MVT WidenVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltSize), NumElements: NumElts); |
| 6314 | Res = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: WidenVT, Ops); |
| 6315 | } |
| 6316 | |
| 6317 | return Res; |
| 6318 | } |
| 6319 | |
| 6320 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate |
| 6321 | /// operand of a vector shift operation, where all the elements of the |
| 6322 | /// build_vector must have the same constant integer value. |
| 6323 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { |
| 6324 | // Ignore bit_converts. |
| 6325 | while (Op.getOpcode() == ISD::BITCAST) |
| 6326 | Op = Op.getOperand(i: 0); |
| 6327 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: Op.getNode()); |
| 6328 | APInt SplatBits, SplatUndef; |
| 6329 | unsigned SplatBitSize; |
| 6330 | bool HasAnyUndefs; |
| 6331 | if (!BVN || |
| 6332 | !BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
| 6333 | MinSplatBits: ElementBits) || |
| 6334 | SplatBitSize > ElementBits) |
| 6335 | return false; |
| 6336 | Cnt = SplatBits.getSExtValue(); |
| 6337 | return true; |
| 6338 | } |
| 6339 | |
| 6340 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate |
| 6341 | /// operand of a vector shift left operation. That value must be in the range: |
| 6342 | /// 0 <= Value < ElementBits for a left shift; or |
| 6343 | /// 0 <= Value <= ElementBits for a long left shift. |
| 6344 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { |
| 6345 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6346 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6347 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6348 | return false; |
| 6349 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); |
| 6350 | } |
| 6351 | |
| 6352 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate |
| 6353 | /// operand of a vector shift right operation. For a shift opcode, the value |
| 6354 | /// is positive, but for an intrinsic the value count must be negative. The |
| 6355 | /// absolute value must be in the range: |
| 6356 | /// 1 <= |Value| <= ElementBits for a right shift; or |
| 6357 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. |
| 6358 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, |
| 6359 | int64_t &Cnt) { |
| 6360 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6361 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6362 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6363 | return false; |
| 6364 | if (!isIntrinsic) |
| 6365 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); |
| 6366 | if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { |
| 6367 | Cnt = -Cnt; |
| 6368 | return true; |
| 6369 | } |
| 6370 | return false; |
| 6371 | } |
| 6372 | |
| 6373 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, |
| 6374 | const ARMSubtarget *ST) { |
| 6375 | EVT VT = N->getValueType(ResNo: 0); |
| 6376 | SDLoc dl(N); |
| 6377 | int64_t Cnt; |
| 6378 | |
| 6379 | if (!VT.isVector()) |
| 6380 | return SDValue(); |
| 6381 | |
| 6382 | // We essentially have two forms here. Shift by an immediate and shift by a |
| 6383 | // vector register (there are also shift by a gpr, but that is just handled |
| 6384 | // with a tablegen pattern). We cannot easily match shift by an immediate in |
| 6385 | // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. |
| 6386 | // For shifting by a vector, we don't have VSHR, only VSHL (which can be |
| 6387 | // signed or unsigned, and a negative shift indicates a shift right). |
| 6388 | if (N->getOpcode() == ISD::SHL) { |
| 6389 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) |
| 6390 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6391 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 6392 | return DAG.getNode(Opcode: ARMISD::VSHLu, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6393 | N2: N->getOperand(Num: 1)); |
| 6394 | } |
| 6395 | |
| 6396 | assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && |
| 6397 | "unexpected vector shift opcode" ); |
| 6398 | |
| 6399 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
| 6400 | unsigned VShiftOpc = |
| 6401 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 6402 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 6403 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 6404 | } |
| 6405 | |
| 6406 | // Other right shifts we don't have operations for (we use a shift left by a |
| 6407 | // negative number). |
| 6408 | EVT ShiftVT = N->getOperand(Num: 1).getValueType(); |
| 6409 | SDValue NegatedCount = DAG.getNode( |
| 6410 | Opcode: ISD::SUB, DL: dl, VT: ShiftVT, N1: getZeroVector(VT: ShiftVT, DAG, dl), N2: N->getOperand(Num: 1)); |
| 6411 | unsigned VShiftOpc = |
| 6412 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); |
| 6413 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), N2: NegatedCount); |
| 6414 | } |
| 6415 | |
| 6416 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, |
| 6417 | const ARMSubtarget *ST) { |
| 6418 | EVT VT = N->getValueType(ResNo: 0); |
| 6419 | SDLoc dl(N); |
| 6420 | |
| 6421 | // We can get here for a node like i32 = ISD::SHL i32, i64 |
| 6422 | if (VT != MVT::i64) |
| 6423 | return SDValue(); |
| 6424 | |
| 6425 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || |
| 6426 | N->getOpcode() == ISD::SHL) && |
| 6427 | "Unknown shift to lower!" ); |
| 6428 | |
| 6429 | unsigned ShOpc = N->getOpcode(); |
| 6430 | if (ST->hasMVEIntegerOps()) { |
| 6431 | SDValue ShAmt = N->getOperand(Num: 1); |
| 6432 | unsigned ShPartsOpc = ARMISD::LSLL; |
| 6433 | ConstantSDNode *Con = dyn_cast<ConstantSDNode>(Val&: ShAmt); |
| 6434 | |
| 6435 | // If the shift amount is greater than 32 or has a greater bitwidth than 64 |
| 6436 | // then do the default optimisation |
| 6437 | if ((!Con && ShAmt->getValueType(ResNo: 0).getSizeInBits() > 64) || |
| 6438 | (Con && (Con->getAPIntValue() == 0 || Con->getAPIntValue().uge(RHS: 32)))) |
| 6439 | return SDValue(); |
| 6440 | |
| 6441 | // Extract the lower 32 bits of the shift amount if it's not an i32 |
| 6442 | if (ShAmt->getValueType(ResNo: 0) != MVT::i32) |
| 6443 | ShAmt = DAG.getZExtOrTrunc(Op: ShAmt, DL: dl, VT: MVT::i32); |
| 6444 | |
| 6445 | if (ShOpc == ISD::SRL) { |
| 6446 | if (!Con) |
| 6447 | // There is no t2LSRLr instruction so negate and perform an lsll if the |
| 6448 | // shift amount is in a register, emulating a right shift. |
| 6449 | ShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 6450 | N1: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), N2: ShAmt); |
| 6451 | else |
| 6452 | // Else generate an lsrl on the immediate shift amount |
| 6453 | ShPartsOpc = ARMISD::LSRL; |
| 6454 | } else if (ShOpc == ISD::SRA) |
| 6455 | ShPartsOpc = ARMISD::ASRL; |
| 6456 | |
| 6457 | // Split Lower/Upper 32 bits of the destination/source |
| 6458 | SDValue Lo, Hi; |
| 6459 | std::tie(args&: Lo, args&: Hi) = |
| 6460 | DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 6461 | // Generate the shift operation as computed above |
| 6462 | Lo = DAG.getNode(Opcode: ShPartsOpc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: Lo, N2: Hi, |
| 6463 | N3: ShAmt); |
| 6464 | // The upper 32 bits come from the second return value of lsll |
| 6465 | Hi = SDValue(Lo.getNode(), 1); |
| 6466 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 6467 | } |
| 6468 | |
| 6469 | // We only lower SRA, SRL of 1 here, all others use generic lowering. |
| 6470 | if (!isOneConstant(V: N->getOperand(Num: 1)) || N->getOpcode() == ISD::SHL) |
| 6471 | return SDValue(); |
| 6472 | |
| 6473 | // If we are in thumb mode, we don't have RRX. |
| 6474 | if (ST->isThumb1Only()) |
| 6475 | return SDValue(); |
| 6476 | |
| 6477 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. |
| 6478 | SDValue Lo, Hi; |
| 6479 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 6480 | |
| 6481 | // First, build a LSRS1/ASRS1 op, which shifts the top part by one and |
| 6482 | // captures the shifted out bit into a carry flag. |
| 6483 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::LSRS1 : ARMISD::ASRS1; |
| 6484 | Hi = DAG.getNode(Opcode: Opc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N: Hi); |
| 6485 | |
| 6486 | // The low part is an ARMISD::RRX operand, which shifts the carry in. |
| 6487 | Lo = DAG.getNode(Opcode: ARMISD::RRX, DL: dl, VT: MVT::i32, N1: Lo, N2: Hi.getValue(R: 1)); |
| 6488 | |
| 6489 | // Merge the pieces into a single i64 value. |
| 6490 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 6491 | } |
| 6492 | |
| 6493 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, |
| 6494 | const ARMSubtarget *ST) { |
| 6495 | bool Invert = false; |
| 6496 | bool Swap = false; |
| 6497 | unsigned Opc = ARMCC::AL; |
| 6498 | |
| 6499 | SDValue Op0 = Op.getOperand(i: 0); |
| 6500 | SDValue Op1 = Op.getOperand(i: 1); |
| 6501 | SDValue CC = Op.getOperand(i: 2); |
| 6502 | EVT VT = Op.getValueType(); |
| 6503 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(Val&: CC)->get(); |
| 6504 | SDLoc dl(Op); |
| 6505 | |
| 6506 | EVT CmpVT; |
| 6507 | if (ST->hasNEON()) |
| 6508 | CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); |
| 6509 | else { |
| 6510 | assert(ST->hasMVEIntegerOps() && |
| 6511 | "No hardware support for integer vector comparison!" ); |
| 6512 | |
| 6513 | if (Op.getValueType().getVectorElementType() != MVT::i1) |
| 6514 | return SDValue(); |
| 6515 | |
| 6516 | // Make sure we expand floating point setcc to scalar if we do not have |
| 6517 | // mve.fp, so that we can handle them from there. |
| 6518 | if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) |
| 6519 | return SDValue(); |
| 6520 | |
| 6521 | CmpVT = VT; |
| 6522 | } |
| 6523 | |
| 6524 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && |
| 6525 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { |
| 6526 | // Special-case integer 64-bit equality comparisons. They aren't legal, |
| 6527 | // but they can be lowered with a few vector instructions. |
| 6528 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; |
| 6529 | EVT SplitVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: CmpElements); |
| 6530 | SDValue CastOp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op0); |
| 6531 | SDValue CastOp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op1); |
| 6532 | SDValue Cmp = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT: SplitVT, N1: CastOp0, N2: CastOp1, |
| 6533 | N3: DAG.getCondCode(Cond: ISD::SETEQ)); |
| 6534 | SDValue Reversed = DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SplitVT, Operand: Cmp); |
| 6535 | SDValue Merged = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: SplitVT, N1: Cmp, N2: Reversed); |
| 6536 | Merged = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: Merged); |
| 6537 | if (SetCCOpcode == ISD::SETNE) |
| 6538 | Merged = DAG.getNOT(DL: dl, Val: Merged, VT: CmpVT); |
| 6539 | Merged = DAG.getSExtOrTrunc(Op: Merged, DL: dl, VT); |
| 6540 | return Merged; |
| 6541 | } |
| 6542 | |
| 6543 | if (CmpVT.getVectorElementType() == MVT::i64) |
| 6544 | // 64-bit comparisons are not legal in general. |
| 6545 | return SDValue(); |
| 6546 | |
| 6547 | if (Op1.getValueType().isFloatingPoint()) { |
| 6548 | switch (SetCCOpcode) { |
| 6549 | default: llvm_unreachable("Illegal FP comparison" ); |
| 6550 | case ISD::SETUNE: |
| 6551 | case ISD::SETNE: |
| 6552 | if (ST->hasMVEFloatOps()) { |
| 6553 | Opc = ARMCC::NE; break; |
| 6554 | } else { |
| 6555 | Invert = true; [[fallthrough]]; |
| 6556 | } |
| 6557 | case ISD::SETOEQ: |
| 6558 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6559 | case ISD::SETOLT: |
| 6560 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
| 6561 | case ISD::SETOGT: |
| 6562 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6563 | case ISD::SETOLE: |
| 6564 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
| 6565 | case ISD::SETOGE: |
| 6566 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6567 | case ISD::SETUGE: Swap = true; [[fallthrough]]; |
| 6568 | case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; |
| 6569 | case ISD::SETUGT: Swap = true; [[fallthrough]]; |
| 6570 | case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; |
| 6571 | case ISD::SETUEQ: Invert = true; [[fallthrough]]; |
| 6572 | case ISD::SETONE: { |
| 6573 | // Expand this to (OLT | OGT). |
| 6574 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
| 6575 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6576 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6577 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6578 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
| 6579 | if (Invert) |
| 6580 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6581 | return Result; |
| 6582 | } |
| 6583 | case ISD::SETUO: Invert = true; [[fallthrough]]; |
| 6584 | case ISD::SETO: { |
| 6585 | // Expand this to (OLT | OGE). |
| 6586 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
| 6587 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
| 6588 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6589 | N3: DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32)); |
| 6590 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
| 6591 | if (Invert) |
| 6592 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6593 | return Result; |
| 6594 | } |
| 6595 | } |
| 6596 | } else { |
| 6597 | // Integer comparisons. |
| 6598 | switch (SetCCOpcode) { |
| 6599 | default: llvm_unreachable("Illegal integer comparison" ); |
| 6600 | case ISD::SETNE: |
| 6601 | if (ST->hasMVEIntegerOps()) { |
| 6602 | Opc = ARMCC::NE; break; |
| 6603 | } else { |
| 6604 | Invert = true; [[fallthrough]]; |
| 6605 | } |
| 6606 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6607 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
| 6608 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6609 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
| 6610 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6611 | case ISD::SETULT: Swap = true; [[fallthrough]]; |
| 6612 | case ISD::SETUGT: Opc = ARMCC::HI; break; |
| 6613 | case ISD::SETULE: Swap = true; [[fallthrough]]; |
| 6614 | case ISD::SETUGE: Opc = ARMCC::HS; break; |
| 6615 | } |
| 6616 | |
| 6617 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). |
| 6618 | if (ST->hasNEON() && Opc == ARMCC::EQ) { |
| 6619 | SDValue AndOp; |
| 6620 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode())) |
| 6621 | AndOp = Op0; |
| 6622 | else if (ISD::isBuildVectorAllZeros(N: Op0.getNode())) |
| 6623 | AndOp = Op1; |
| 6624 | |
| 6625 | // Ignore bitconvert. |
| 6626 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) |
| 6627 | AndOp = AndOp.getOperand(i: 0); |
| 6628 | |
| 6629 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { |
| 6630 | Op0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 0)); |
| 6631 | Op1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 1)); |
| 6632 | SDValue Result = DAG.getNode(Opcode: ARMISD::VTST, DL: dl, VT: CmpVT, N1: Op0, N2: Op1); |
| 6633 | if (!Invert) |
| 6634 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6635 | return Result; |
| 6636 | } |
| 6637 | } |
| 6638 | } |
| 6639 | |
| 6640 | if (Swap) |
| 6641 | std::swap(a&: Op0, b&: Op1); |
| 6642 | |
| 6643 | // If one of the operands is a constant vector zero, attempt to fold the |
| 6644 | // comparison to a specialized compare-against-zero form. |
| 6645 | if (ISD::isBuildVectorAllZeros(N: Op0.getNode()) && |
| 6646 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::EQ || |
| 6647 | Opc == ARMCC::NE)) { |
| 6648 | if (Opc == ARMCC::GE) |
| 6649 | Opc = ARMCC::LE; |
| 6650 | else if (Opc == ARMCC::GT) |
| 6651 | Opc = ARMCC::LT; |
| 6652 | std::swap(a&: Op0, b&: Op1); |
| 6653 | } |
| 6654 | |
| 6655 | SDValue Result; |
| 6656 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode()) && |
| 6657 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::LE || |
| 6658 | Opc == ARMCC::LT || Opc == ARMCC::NE || Opc == ARMCC::EQ)) |
| 6659 | Result = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: CmpVT, N1: Op0, |
| 6660 | N2: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
| 6661 | else |
| 6662 | Result = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
| 6663 | N3: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
| 6664 | |
| 6665 | Result = DAG.getSExtOrTrunc(Op: Result, DL: dl, VT); |
| 6666 | |
| 6667 | if (Invert) |
| 6668 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
| 6669 | |
| 6670 | return Result; |
| 6671 | } |
| 6672 | |
| 6673 | static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { |
| 6674 | SDValue LHS = Op.getOperand(i: 0); |
| 6675 | SDValue RHS = Op.getOperand(i: 1); |
| 6676 | SDValue Carry = Op.getOperand(i: 2); |
| 6677 | SDValue Cond = Op.getOperand(i: 3); |
| 6678 | SDLoc DL(Op); |
| 6679 | |
| 6680 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only." ); |
| 6681 | |
| 6682 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
| 6683 | // have to invert the carry first. |
| 6684 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 6685 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 6686 | // This converts the boolean value carry into the carry flag. |
| 6687 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 6688 | |
| 6689 | SDVTList VTs = DAG.getVTList(VT1: LHS.getValueType(), VT2: MVT::i32); |
| 6690 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: LHS, N2: RHS, N3: Carry); |
| 6691 | |
| 6692 | SDValue FVal = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
| 6693 | SDValue TVal = DAG.getConstant(Val: 1, DL, VT: MVT::i32); |
| 6694 | SDValue ARMcc = DAG.getConstant( |
| 6695 | Val: IntCCToARMCC(CC: cast<CondCodeSDNode>(Val&: Cond)->get()), DL, VT: MVT::i32); |
| 6696 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT: Op.getValueType(), N1: FVal, N2: TVal, N3: ARMcc, |
| 6697 | N4: Cmp.getValue(R: 1)); |
| 6698 | } |
| 6699 | |
| 6700 | /// isVMOVModifiedImm - Check if the specified splat value corresponds to a |
| 6701 | /// valid vector constant for a NEON or MVE instruction with a "modified |
| 6702 | /// immediate" operand (e.g., VMOV). If so, return the encoded value. |
| 6703 | static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, |
| 6704 | unsigned SplatBitSize, SelectionDAG &DAG, |
| 6705 | const SDLoc &dl, EVT &VT, EVT VectorVT, |
| 6706 | VMOVModImmType type) { |
| 6707 | unsigned OpCmode, Imm; |
| 6708 | bool is128Bits = VectorVT.is128BitVector(); |
| 6709 | |
| 6710 | // SplatBitSize is set to the smallest size that splats the vector, so a |
| 6711 | // zero vector will always have SplatBitSize == 8. However, NEON modified |
| 6712 | // immediate instructions others than VMOV do not support the 8-bit encoding |
| 6713 | // of a zero vector, and the default encoding of zero is supposed to be the |
| 6714 | // 32-bit version. |
| 6715 | if (SplatBits == 0) |
| 6716 | SplatBitSize = 32; |
| 6717 | |
| 6718 | switch (SplatBitSize) { |
| 6719 | case 8: |
| 6720 | if (type != VMOVModImm) |
| 6721 | return SDValue(); |
| 6722 | // Any 1-byte value is OK. Op=0, Cmode=1110. |
| 6723 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big" ); |
| 6724 | OpCmode = 0xe; |
| 6725 | Imm = SplatBits; |
| 6726 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; |
| 6727 | break; |
| 6728 | |
| 6729 | case 16: |
| 6730 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. |
| 6731 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; |
| 6732 | if ((SplatBits & ~0xff) == 0) { |
| 6733 | // Value = 0x00nn: Op=x, Cmode=100x. |
| 6734 | OpCmode = 0x8; |
| 6735 | Imm = SplatBits; |
| 6736 | break; |
| 6737 | } |
| 6738 | if ((SplatBits & ~0xff00) == 0) { |
| 6739 | // Value = 0xnn00: Op=x, Cmode=101x. |
| 6740 | OpCmode = 0xa; |
| 6741 | Imm = SplatBits >> 8; |
| 6742 | break; |
| 6743 | } |
| 6744 | return SDValue(); |
| 6745 | |
| 6746 | case 32: |
| 6747 | // NEON's 32-bit VMOV supports splat values where: |
| 6748 | // * only one byte is nonzero, or |
| 6749 | // * the least significant byte is 0xff and the second byte is nonzero, or |
| 6750 | // * the least significant 2 bytes are 0xff and the third is nonzero. |
| 6751 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; |
| 6752 | if ((SplatBits & ~0xff) == 0) { |
| 6753 | // Value = 0x000000nn: Op=x, Cmode=000x. |
| 6754 | OpCmode = 0; |
| 6755 | Imm = SplatBits; |
| 6756 | break; |
| 6757 | } |
| 6758 | if ((SplatBits & ~0xff00) == 0) { |
| 6759 | // Value = 0x0000nn00: Op=x, Cmode=001x. |
| 6760 | OpCmode = 0x2; |
| 6761 | Imm = SplatBits >> 8; |
| 6762 | break; |
| 6763 | } |
| 6764 | if ((SplatBits & ~0xff0000) == 0) { |
| 6765 | // Value = 0x00nn0000: Op=x, Cmode=010x. |
| 6766 | OpCmode = 0x4; |
| 6767 | Imm = SplatBits >> 16; |
| 6768 | break; |
| 6769 | } |
| 6770 | if ((SplatBits & ~0xff000000) == 0) { |
| 6771 | // Value = 0xnn000000: Op=x, Cmode=011x. |
| 6772 | OpCmode = 0x6; |
| 6773 | Imm = SplatBits >> 24; |
| 6774 | break; |
| 6775 | } |
| 6776 | |
| 6777 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC |
| 6778 | if (type == OtherModImm) return SDValue(); |
| 6779 | |
| 6780 | if ((SplatBits & ~0xffff) == 0 && |
| 6781 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { |
| 6782 | // Value = 0x0000nnff: Op=x, Cmode=1100. |
| 6783 | OpCmode = 0xc; |
| 6784 | Imm = SplatBits >> 8; |
| 6785 | break; |
| 6786 | } |
| 6787 | |
| 6788 | // cmode == 0b1101 is not supported for MVE VMVN |
| 6789 | if (type == MVEVMVNModImm) |
| 6790 | return SDValue(); |
| 6791 | |
| 6792 | if ((SplatBits & ~0xffffff) == 0 && |
| 6793 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { |
| 6794 | // Value = 0x00nnffff: Op=x, Cmode=1101. |
| 6795 | OpCmode = 0xd; |
| 6796 | Imm = SplatBits >> 16; |
| 6797 | break; |
| 6798 | } |
| 6799 | |
| 6800 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, |
| 6801 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not |
| 6802 | // VMOV.I32. A (very) minor optimization would be to replicate the value |
| 6803 | // and fall through here to test for a valid 64-bit splat. But, then the |
| 6804 | // caller would also need to check and handle the change in size. |
| 6805 | return SDValue(); |
| 6806 | |
| 6807 | case 64: { |
| 6808 | if (type != VMOVModImm) |
| 6809 | return SDValue(); |
| 6810 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. |
| 6811 | uint64_t BitMask = 0xff; |
| 6812 | unsigned ImmMask = 1; |
| 6813 | Imm = 0; |
| 6814 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { |
| 6815 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { |
| 6816 | Imm |= ImmMask; |
| 6817 | } else if ((SplatBits & BitMask) != 0) { |
| 6818 | return SDValue(); |
| 6819 | } |
| 6820 | BitMask <<= 8; |
| 6821 | ImmMask <<= 1; |
| 6822 | } |
| 6823 | |
| 6824 | // Op=1, Cmode=1110. |
| 6825 | OpCmode = 0x1e; |
| 6826 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; |
| 6827 | break; |
| 6828 | } |
| 6829 | |
| 6830 | default: |
| 6831 | llvm_unreachable("unexpected size for isVMOVModifiedImm" ); |
| 6832 | } |
| 6833 | |
| 6834 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Val: Imm); |
| 6835 | return DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32); |
| 6836 | } |
| 6837 | |
| 6838 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, |
| 6839 | const ARMSubtarget *ST) const { |
| 6840 | EVT VT = Op.getValueType(); |
| 6841 | bool IsDouble = (VT == MVT::f64); |
| 6842 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Val&: Op); |
| 6843 | const APFloat &FPVal = CFP->getValueAPF(); |
| 6844 | |
| 6845 | // Prevent floating-point constants from using literal loads |
| 6846 | // when execute-only is enabled. |
| 6847 | if (ST->genExecuteOnly()) { |
| 6848 | // We shouldn't trigger this for v6m execute-only |
| 6849 | assert((!ST->isThumb1Only() || ST->hasV8MBaselineOps()) && |
| 6850 | "Unexpected architecture" ); |
| 6851 | |
| 6852 | // If we can represent the constant as an immediate, don't lower it |
| 6853 | if (isFPImmLegal(Imm: FPVal, VT)) |
| 6854 | return Op; |
| 6855 | // Otherwise, construct as integer, and move to float register |
| 6856 | APInt INTVal = FPVal.bitcastToAPInt(); |
| 6857 | SDLoc DL(CFP); |
| 6858 | switch (VT.getSimpleVT().SimpleTy) { |
| 6859 | default: |
| 6860 | llvm_unreachable("Unknown floating point type!" ); |
| 6861 | break; |
| 6862 | case MVT::f64: { |
| 6863 | SDValue Lo = DAG.getConstant(Val: INTVal.trunc(width: 32), DL, VT: MVT::i32); |
| 6864 | SDValue Hi = DAG.getConstant(Val: INTVal.lshr(shiftAmt: 32).trunc(width: 32), DL, VT: MVT::i32); |
| 6865 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL, VT: MVT::f64, N1: Lo, N2: Hi); |
| 6866 | } |
| 6867 | case MVT::f32: |
| 6868 | return DAG.getNode(Opcode: ARMISD::VMOVSR, DL, VT, |
| 6869 | Operand: DAG.getConstant(Val: INTVal, DL, VT: MVT::i32)); |
| 6870 | } |
| 6871 | } |
| 6872 | |
| 6873 | if (!ST->hasVFP3Base()) |
| 6874 | return SDValue(); |
| 6875 | |
| 6876 | // Use the default (constant pool) lowering for double constants when we have |
| 6877 | // an SP-only FPU |
| 6878 | if (IsDouble && !Subtarget->hasFP64()) |
| 6879 | return SDValue(); |
| 6880 | |
| 6881 | // Try splatting with a VMOV.f32... |
| 6882 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPImm: FPVal) : ARM_AM::getFP32Imm(FPImm: FPVal); |
| 6883 | |
| 6884 | if (ImmVal != -1) { |
| 6885 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { |
| 6886 | // We have code in place to select a valid ConstantFP already, no need to |
| 6887 | // do any mangling. |
| 6888 | return Op; |
| 6889 | } |
| 6890 | |
| 6891 | // It's a float and we are trying to use NEON operations where |
| 6892 | // possible. Lower it to a splat followed by an extract. |
| 6893 | SDLoc DL(Op); |
| 6894 | SDValue NewVal = DAG.getTargetConstant(Val: ImmVal, DL, VT: MVT::i32); |
| 6895 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL, VT: MVT::v2f32, |
| 6896 | Operand: NewVal); |
| 6897 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecConstant, |
| 6898 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 6899 | } |
| 6900 | |
| 6901 | // The rest of our options are NEON only, make sure that's allowed before |
| 6902 | // proceeding.. |
| 6903 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) |
| 6904 | return SDValue(); |
| 6905 | |
| 6906 | EVT VMovVT; |
| 6907 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); |
| 6908 | |
| 6909 | // It wouldn't really be worth bothering for doubles except for one very |
| 6910 | // important value, which does happen to match: 0.0. So make sure we don't do |
| 6911 | // anything stupid. |
| 6912 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) |
| 6913 | return SDValue(); |
| 6914 | |
| 6915 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). |
| 6916 | SDValue NewVal = isVMOVModifiedImm(SplatBits: iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), |
| 6917 | VT&: VMovVT, VectorVT: VT, type: VMOVModImm); |
| 6918 | if (NewVal != SDValue()) { |
| 6919 | SDLoc DL(Op); |
| 6920 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL, VT: VMovVT, |
| 6921 | Operand: NewVal); |
| 6922 | if (IsDouble) |
| 6923 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
| 6924 | |
| 6925 | // It's a float: cast and extract a vector element. |
| 6926 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
| 6927 | Operand: VecConstant); |
| 6928 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
| 6929 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 6930 | } |
| 6931 | |
| 6932 | // Finally, try a VMVN.i32 |
| 6933 | NewVal = isVMOVModifiedImm(SplatBits: ~iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), VT&: VMovVT, |
| 6934 | VectorVT: VT, type: VMVNModImm); |
| 6935 | if (NewVal != SDValue()) { |
| 6936 | SDLoc DL(Op); |
| 6937 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL, VT: VMovVT, Operand: NewVal); |
| 6938 | |
| 6939 | if (IsDouble) |
| 6940 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
| 6941 | |
| 6942 | // It's a float: cast and extract a vector element. |
| 6943 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
| 6944 | Operand: VecConstant); |
| 6945 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
| 6946 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 6947 | } |
| 6948 | |
| 6949 | return SDValue(); |
| 6950 | } |
| 6951 | |
| 6952 | // check if an VEXT instruction can handle the shuffle mask when the |
| 6953 | // vector sources of the shuffle are the same. |
| 6954 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { |
| 6955 | unsigned NumElts = VT.getVectorNumElements(); |
| 6956 | |
| 6957 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 6958 | if (M[0] < 0) |
| 6959 | return false; |
| 6960 | |
| 6961 | Imm = M[0]; |
| 6962 | |
| 6963 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6964 | // element. The other shuffle indices must be the successive elements after |
| 6965 | // the first one. |
| 6966 | unsigned ExpectedElt = Imm; |
| 6967 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6968 | // Increment the expected index. If it wraps around, just follow it |
| 6969 | // back to index zero and keep going. |
| 6970 | ++ExpectedElt; |
| 6971 | if (ExpectedElt == NumElts) |
| 6972 | ExpectedElt = 0; |
| 6973 | |
| 6974 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6975 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 6976 | return false; |
| 6977 | } |
| 6978 | |
| 6979 | return true; |
| 6980 | } |
| 6981 | |
| 6982 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, |
| 6983 | bool &ReverseVEXT, unsigned &Imm) { |
| 6984 | unsigned NumElts = VT.getVectorNumElements(); |
| 6985 | ReverseVEXT = false; |
| 6986 | |
| 6987 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 6988 | if (M[0] < 0) |
| 6989 | return false; |
| 6990 | |
| 6991 | Imm = M[0]; |
| 6992 | |
| 6993 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6994 | // element. The other shuffle indices must be the successive elements after |
| 6995 | // the first one. |
| 6996 | unsigned ExpectedElt = Imm; |
| 6997 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6998 | // Increment the expected index. If it wraps around, it may still be |
| 6999 | // a VEXT but the source vectors must be swapped. |
| 7000 | ExpectedElt += 1; |
| 7001 | if (ExpectedElt == NumElts * 2) { |
| 7002 | ExpectedElt = 0; |
| 7003 | ReverseVEXT = true; |
| 7004 | } |
| 7005 | |
| 7006 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 7007 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 7008 | return false; |
| 7009 | } |
| 7010 | |
| 7011 | // Adjust the index value if the source operands will be swapped. |
| 7012 | if (ReverseVEXT) |
| 7013 | Imm -= NumElts; |
| 7014 | |
| 7015 | return true; |
| 7016 | } |
| 7017 | |
| 7018 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { |
| 7019 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of |
| 7020 | // range, then 0 is placed into the resulting vector. So pretty much any mask |
| 7021 | // of 8 elements can work here. |
| 7022 | return VT == MVT::v8i8 && M.size() == 8; |
| 7023 | } |
| 7024 | |
| 7025 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, |
| 7026 | unsigned Index) { |
| 7027 | if (Mask.size() == Elements * 2) |
| 7028 | return Index / Elements; |
| 7029 | return Mask[Index] == 0 ? 0 : 1; |
| 7030 | } |
| 7031 | |
| 7032 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by |
| 7033 | // checking that pairs of elements in the shuffle mask represent the same index |
| 7034 | // in each vector, incrementing the expected index by 2 at each step. |
| 7035 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] |
| 7036 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} |
| 7037 | // v2={e,f,g,h} |
| 7038 | // WhichResult gives the offset for each element in the mask based on which |
| 7039 | // of the two results it belongs to. |
| 7040 | // |
| 7041 | // The transpose can be represented either as: |
| 7042 | // result1 = shufflevector v1, v2, result1_shuffle_mask |
| 7043 | // result2 = shufflevector v1, v2, result2_shuffle_mask |
| 7044 | // where v1/v2 and the shuffle masks have the same number of elements |
| 7045 | // (here WhichResult (see below) indicates which result is being checked) |
| 7046 | // |
| 7047 | // or as: |
| 7048 | // results = shufflevector v1, v2, shuffle_mask |
| 7049 | // where both results are returned in one vector and the shuffle mask has twice |
| 7050 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we |
| 7051 | // want to check the low half and high half of the shuffle mask as if it were |
| 7052 | // the other case |
| 7053 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7054 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7055 | if (EltSz == 64) |
| 7056 | return false; |
| 7057 | |
| 7058 | unsigned NumElts = VT.getVectorNumElements(); |
| 7059 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7060 | return false; |
| 7061 | |
| 7062 | // If the mask is twice as long as the input vector then we need to check the |
| 7063 | // upper and lower parts of the mask with a matching value for WhichResult |
| 7064 | // FIXME: A mask with only even values will be rejected in case the first |
| 7065 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only |
| 7066 | // M[0] is used to determine WhichResult |
| 7067 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7068 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7069 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7070 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7071 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) |
| 7072 | return false; |
| 7073 | } |
| 7074 | } |
| 7075 | |
| 7076 | if (M.size() == NumElts*2) |
| 7077 | WhichResult = 0; |
| 7078 | |
| 7079 | return true; |
| 7080 | } |
| 7081 | |
| 7082 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of |
| 7083 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7084 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. |
| 7085 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7086 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7087 | if (EltSz == 64) |
| 7088 | return false; |
| 7089 | |
| 7090 | unsigned NumElts = VT.getVectorNumElements(); |
| 7091 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7092 | return false; |
| 7093 | |
| 7094 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7095 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7096 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7097 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7098 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) |
| 7099 | return false; |
| 7100 | } |
| 7101 | } |
| 7102 | |
| 7103 | if (M.size() == NumElts*2) |
| 7104 | WhichResult = 0; |
| 7105 | |
| 7106 | return true; |
| 7107 | } |
| 7108 | |
| 7109 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking |
| 7110 | // that the mask elements are either all even and in steps of size 2 or all odd |
| 7111 | // and in steps of size 2. |
| 7112 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] |
| 7113 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} |
| 7114 | // v2={e,f,g,h} |
| 7115 | // Requires similar checks to that of isVTRNMask with |
| 7116 | // respect the how results are returned. |
| 7117 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7118 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7119 | if (EltSz == 64) |
| 7120 | return false; |
| 7121 | |
| 7122 | unsigned NumElts = VT.getVectorNumElements(); |
| 7123 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7124 | return false; |
| 7125 | |
| 7126 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7127 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7128 | for (unsigned j = 0; j < NumElts; ++j) { |
| 7129 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) |
| 7130 | return false; |
| 7131 | } |
| 7132 | } |
| 7133 | |
| 7134 | if (M.size() == NumElts*2) |
| 7135 | WhichResult = 0; |
| 7136 | |
| 7137 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7138 | if (VT.is64BitVector() && EltSz == 32) |
| 7139 | return false; |
| 7140 | |
| 7141 | return true; |
| 7142 | } |
| 7143 | |
| 7144 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of |
| 7145 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7146 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, |
| 7147 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7148 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7149 | if (EltSz == 64) |
| 7150 | return false; |
| 7151 | |
| 7152 | unsigned NumElts = VT.getVectorNumElements(); |
| 7153 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7154 | return false; |
| 7155 | |
| 7156 | unsigned Half = NumElts / 2; |
| 7157 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7158 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7159 | for (unsigned j = 0; j < NumElts; j += Half) { |
| 7160 | unsigned Idx = WhichResult; |
| 7161 | for (unsigned k = 0; k < Half; ++k) { |
| 7162 | int MIdx = M[i + j + k]; |
| 7163 | if (MIdx >= 0 && (unsigned) MIdx != Idx) |
| 7164 | return false; |
| 7165 | Idx += 2; |
| 7166 | } |
| 7167 | } |
| 7168 | } |
| 7169 | |
| 7170 | if (M.size() == NumElts*2) |
| 7171 | WhichResult = 0; |
| 7172 | |
| 7173 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7174 | if (VT.is64BitVector() && EltSz == 32) |
| 7175 | return false; |
| 7176 | |
| 7177 | return true; |
| 7178 | } |
| 7179 | |
| 7180 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking |
| 7181 | // that pairs of elements of the shufflemask represent the same index in each |
| 7182 | // vector incrementing sequentially through the vectors. |
| 7183 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] |
| 7184 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} |
| 7185 | // v2={e,f,g,h} |
| 7186 | // Requires similar checks to that of isVTRNMask with respect the how results |
| 7187 | // are returned. |
| 7188 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7189 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7190 | if (EltSz == 64) |
| 7191 | return false; |
| 7192 | |
| 7193 | unsigned NumElts = VT.getVectorNumElements(); |
| 7194 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7195 | return false; |
| 7196 | |
| 7197 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7198 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7199 | unsigned Idx = WhichResult * NumElts / 2; |
| 7200 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7201 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7202 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) |
| 7203 | return false; |
| 7204 | Idx += 1; |
| 7205 | } |
| 7206 | } |
| 7207 | |
| 7208 | if (M.size() == NumElts*2) |
| 7209 | WhichResult = 0; |
| 7210 | |
| 7211 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7212 | if (VT.is64BitVector() && EltSz == 32) |
| 7213 | return false; |
| 7214 | |
| 7215 | return true; |
| 7216 | } |
| 7217 | |
| 7218 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of |
| 7219 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7220 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. |
| 7221 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7222 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7223 | if (EltSz == 64) |
| 7224 | return false; |
| 7225 | |
| 7226 | unsigned NumElts = VT.getVectorNumElements(); |
| 7227 | if ((M.size() != NumElts && M.size() != NumElts * 2) || NumElts % 2 != 0) |
| 7228 | return false; |
| 7229 | |
| 7230 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7231 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
| 7232 | unsigned Idx = WhichResult * NumElts / 2; |
| 7233 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7234 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7235 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) |
| 7236 | return false; |
| 7237 | Idx += 1; |
| 7238 | } |
| 7239 | } |
| 7240 | |
| 7241 | if (M.size() == NumElts*2) |
| 7242 | WhichResult = 0; |
| 7243 | |
| 7244 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7245 | if (VT.is64BitVector() && EltSz == 32) |
| 7246 | return false; |
| 7247 | |
| 7248 | return true; |
| 7249 | } |
| 7250 | |
| 7251 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), |
| 7252 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. |
| 7253 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, |
| 7254 | unsigned &WhichResult, |
| 7255 | bool &isV_UNDEF) { |
| 7256 | isV_UNDEF = false; |
| 7257 | if (isVTRNMask(M: ShuffleMask, VT, WhichResult)) |
| 7258 | return ARMISD::VTRN; |
| 7259 | if (isVUZPMask(M: ShuffleMask, VT, WhichResult)) |
| 7260 | return ARMISD::VUZP; |
| 7261 | if (isVZIPMask(M: ShuffleMask, VT, WhichResult)) |
| 7262 | return ARMISD::VZIP; |
| 7263 | |
| 7264 | isV_UNDEF = true; |
| 7265 | if (isVTRN_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7266 | return ARMISD::VTRN; |
| 7267 | if (isVUZP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7268 | return ARMISD::VUZP; |
| 7269 | if (isVZIP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
| 7270 | return ARMISD::VZIP; |
| 7271 | |
| 7272 | return 0; |
| 7273 | } |
| 7274 | |
| 7275 | /// \return true if this is a reverse operation on an vector. |
| 7276 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { |
| 7277 | unsigned NumElts = VT.getVectorNumElements(); |
| 7278 | // Make sure the mask has the right size. |
| 7279 | if (NumElts != M.size()) |
| 7280 | return false; |
| 7281 | |
| 7282 | // Look for <15, ..., 3, -1, 1, 0>. |
| 7283 | for (unsigned i = 0; i != NumElts; ++i) |
| 7284 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) |
| 7285 | return false; |
| 7286 | |
| 7287 | return true; |
| 7288 | } |
| 7289 | |
| 7290 | static bool isTruncMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
| 7291 | unsigned NumElts = VT.getVectorNumElements(); |
| 7292 | // Make sure the mask has the right size. |
| 7293 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
| 7294 | return false; |
| 7295 | |
| 7296 | // Half-width truncation patterns (e.g. v4i32 -> v8i16): |
| 7297 | // !Top && SingleSource: <0, 2, 4, 6, 0, 2, 4, 6> |
| 7298 | // !Top && !SingleSource: <0, 2, 4, 6, 8, 10, 12, 14> |
| 7299 | // Top && SingleSource: <1, 3, 5, 7, 1, 3, 5, 7> |
| 7300 | // Top && !SingleSource: <1, 3, 5, 7, 9, 11, 13, 15> |
| 7301 | int Ofs = Top ? 1 : 0; |
| 7302 | int Upper = SingleSource ? 0 : NumElts; |
| 7303 | for (int i = 0, e = NumElts / 2; i != e; ++i) { |
| 7304 | if (M[i] >= 0 && M[i] != (i * 2) + Ofs) |
| 7305 | return false; |
| 7306 | if (M[i + e] >= 0 && M[i + e] != (i * 2) + Ofs + Upper) |
| 7307 | return false; |
| 7308 | } |
| 7309 | return true; |
| 7310 | } |
| 7311 | |
| 7312 | static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
| 7313 | unsigned NumElts = VT.getVectorNumElements(); |
| 7314 | // Make sure the mask has the right size. |
| 7315 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
| 7316 | return false; |
| 7317 | |
| 7318 | // If Top |
| 7319 | // Look for <0, N, 2, N+2, 4, N+4, ..>. |
| 7320 | // This inserts Input2 into Input1 |
| 7321 | // else if not Top |
| 7322 | // Look for <0, N+1, 2, N+3, 4, N+5, ..> |
| 7323 | // This inserts Input1 into Input2 |
| 7324 | unsigned Offset = Top ? 0 : 1; |
| 7325 | unsigned N = SingleSource ? 0 : NumElts; |
| 7326 | for (unsigned i = 0; i < NumElts; i += 2) { |
| 7327 | if (M[i] >= 0 && M[i] != (int)i) |
| 7328 | return false; |
| 7329 | if (M[i + 1] >= 0 && M[i + 1] != (int)(N + i + Offset)) |
| 7330 | return false; |
| 7331 | } |
| 7332 | |
| 7333 | return true; |
| 7334 | } |
| 7335 | |
| 7336 | static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) { |
| 7337 | unsigned NumElts = ToVT.getVectorNumElements(); |
| 7338 | if (NumElts != M.size()) |
| 7339 | return false; |
| 7340 | |
| 7341 | // Test if the Trunc can be convertable to a VMOVN with this shuffle. We are |
| 7342 | // looking for patterns of: |
| 7343 | // !rev: 0 N/2 1 N/2+1 2 N/2+2 ... |
| 7344 | // rev: N/2 0 N/2+1 1 N/2+2 2 ... |
| 7345 | |
| 7346 | unsigned Off0 = rev ? NumElts / 2 : 0; |
| 7347 | unsigned Off1 = rev ? 0 : NumElts / 2; |
| 7348 | for (unsigned i = 0; i < NumElts; i += 2) { |
| 7349 | if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2)) |
| 7350 | return false; |
| 7351 | if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2)) |
| 7352 | return false; |
| 7353 | } |
| 7354 | |
| 7355 | return true; |
| 7356 | } |
| 7357 | |
| 7358 | // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted |
| 7359 | // from a pair of inputs. For example: |
| 7360 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7361 | // FP_ROUND(EXTRACT_ELT(Y, 0), |
| 7362 | // FP_ROUND(EXTRACT_ELT(X, 1), |
| 7363 | // FP_ROUND(EXTRACT_ELT(Y, 1), ...) |
| 7364 | static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, |
| 7365 | const ARMSubtarget *ST) { |
| 7366 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7367 | if (!ST->hasMVEFloatOps()) |
| 7368 | return SDValue(); |
| 7369 | |
| 7370 | SDLoc dl(BV); |
| 7371 | EVT VT = BV.getValueType(); |
| 7372 | if (VT != MVT::v8f16) |
| 7373 | return SDValue(); |
| 7374 | |
| 7375 | // We are looking for a buildvector of fptrunc elements, where all the |
| 7376 | // elements are interleavingly extracted from two sources. Check the first two |
| 7377 | // items are valid enough and extract some info from them (they are checked |
| 7378 | // properly in the loop below). |
| 7379 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_ROUND || |
| 7380 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7381 | BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
| 7382 | return SDValue(); |
| 7383 | if (BV.getOperand(i: 1).getOpcode() != ISD::FP_ROUND || |
| 7384 | BV.getOperand(i: 1).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7385 | BV.getOperand(i: 1).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
| 7386 | return SDValue(); |
| 7387 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
| 7388 | SDValue Op1 = BV.getOperand(i: 1).getOperand(i: 0).getOperand(i: 0); |
| 7389 | if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) |
| 7390 | return SDValue(); |
| 7391 | |
| 7392 | // Check all the values in the BuildVector line up with our expectations. |
| 7393 | for (unsigned i = 1; i < 4; i++) { |
| 7394 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7395 | return Trunc.getOpcode() == ISD::FP_ROUND && |
| 7396 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7397 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
| 7398 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
| 7399 | }; |
| 7400 | if (!Check(BV.getOperand(i: i * 2 + 0), Op0, i)) |
| 7401 | return SDValue(); |
| 7402 | if (!Check(BV.getOperand(i: i * 2 + 1), Op1, i)) |
| 7403 | return SDValue(); |
| 7404 | } |
| 7405 | |
| 7406 | SDValue N1 = DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1: DAG.getUNDEF(VT), N2: Op0, |
| 7407 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 7408 | return DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1, N2: Op1, |
| 7409 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 7410 | } |
| 7411 | |
| 7412 | // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted |
| 7413 | // from a single input on alternating lanes. For example: |
| 7414 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7415 | // FP_ROUND(EXTRACT_ELT(X, 2), |
| 7416 | // FP_ROUND(EXTRACT_ELT(X, 4), ...) |
| 7417 | static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, |
| 7418 | const ARMSubtarget *ST) { |
| 7419 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7420 | if (!ST->hasMVEFloatOps()) |
| 7421 | return SDValue(); |
| 7422 | |
| 7423 | SDLoc dl(BV); |
| 7424 | EVT VT = BV.getValueType(); |
| 7425 | if (VT != MVT::v4f32) |
| 7426 | return SDValue(); |
| 7427 | |
| 7428 | // We are looking for a buildvector of fptext elements, where all the |
| 7429 | // elements are alternating lanes from a single source. For example <0,2,4,6> |
| 7430 | // or <1,3,5,7>. Check the first two items are valid enough and extract some |
| 7431 | // info from them (they are checked properly in the loop below). |
| 7432 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_EXTEND || |
| 7433 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 7434 | return SDValue(); |
| 7435 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
| 7436 | int Offset = BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1); |
| 7437 | if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) |
| 7438 | return SDValue(); |
| 7439 | |
| 7440 | // Check all the values in the BuildVector line up with our expectations. |
| 7441 | for (unsigned i = 1; i < 4; i++) { |
| 7442 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7443 | return Trunc.getOpcode() == ISD::FP_EXTEND && |
| 7444 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7445 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
| 7446 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
| 7447 | }; |
| 7448 | if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) |
| 7449 | return SDValue(); |
| 7450 | } |
| 7451 | |
| 7452 | return DAG.getNode(Opcode: ARMISD::VCVTL, DL: dl, VT, N1: Op0, |
| 7453 | N2: DAG.getConstant(Val: Offset, DL: dl, VT: MVT::i32)); |
| 7454 | } |
| 7455 | |
| 7456 | // If N is an integer constant that can be moved into a register in one |
| 7457 | // instruction, return an SDValue of such a constant (will become a MOV |
| 7458 | // instruction). Otherwise return null. |
| 7459 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, |
| 7460 | const ARMSubtarget *ST, const SDLoc &dl) { |
| 7461 | uint64_t Val; |
| 7462 | if (!isa<ConstantSDNode>(Val: N)) |
| 7463 | return SDValue(); |
| 7464 | Val = N->getAsZExtVal(); |
| 7465 | |
| 7466 | if (ST->isThumb1Only()) { |
| 7467 | if (Val <= 255 || ~Val <= 255) |
| 7468 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
| 7469 | } else { |
| 7470 | if (ARM_AM::getSOImmVal(Arg: Val) != -1 || ARM_AM::getSOImmVal(Arg: ~Val) != -1) |
| 7471 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
| 7472 | } |
| 7473 | return SDValue(); |
| 7474 | } |
| 7475 | |
| 7476 | static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, |
| 7477 | const ARMSubtarget *ST) { |
| 7478 | SDLoc dl(Op); |
| 7479 | EVT VT = Op.getValueType(); |
| 7480 | |
| 7481 | assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!" ); |
| 7482 | |
| 7483 | unsigned NumElts = VT.getVectorNumElements(); |
| 7484 | unsigned BoolMask; |
| 7485 | unsigned BitsPerBool; |
| 7486 | if (NumElts == 2) { |
| 7487 | BitsPerBool = 8; |
| 7488 | BoolMask = 0xff; |
| 7489 | } else if (NumElts == 4) { |
| 7490 | BitsPerBool = 4; |
| 7491 | BoolMask = 0xf; |
| 7492 | } else if (NumElts == 8) { |
| 7493 | BitsPerBool = 2; |
| 7494 | BoolMask = 0x3; |
| 7495 | } else if (NumElts == 16) { |
| 7496 | BitsPerBool = 1; |
| 7497 | BoolMask = 0x1; |
| 7498 | } else |
| 7499 | return SDValue(); |
| 7500 | |
| 7501 | // If this is a single value copied into all lanes (a splat), we can just sign |
| 7502 | // extend that single value |
| 7503 | SDValue FirstOp = Op.getOperand(i: 0); |
| 7504 | if (!isa<ConstantSDNode>(Val: FirstOp) && |
| 7505 | llvm::all_of(Range: llvm::drop_begin(RangeOrContainer: Op->ops()), P: [&FirstOp](const SDUse &U) { |
| 7506 | return U.get().isUndef() || U.get() == FirstOp; |
| 7507 | })) { |
| 7508 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, N1: FirstOp, |
| 7509 | N2: DAG.getValueType(MVT::i1)); |
| 7510 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: Ext); |
| 7511 | } |
| 7512 | |
| 7513 | // First create base with bits set where known |
| 7514 | unsigned Bits32 = 0; |
| 7515 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7516 | SDValue V = Op.getOperand(i); |
| 7517 | if (!isa<ConstantSDNode>(Val: V) && !V.isUndef()) |
| 7518 | continue; |
| 7519 | bool BitSet = V.isUndef() ? false : V->getAsZExtVal(); |
| 7520 | if (BitSet) |
| 7521 | Bits32 |= BoolMask << (i * BitsPerBool); |
| 7522 | } |
| 7523 | |
| 7524 | // Add in unknown nodes |
| 7525 | SDValue Base = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
| 7526 | Operand: DAG.getConstant(Val: Bits32, DL: dl, VT: MVT::i32)); |
| 7527 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7528 | SDValue V = Op.getOperand(i); |
| 7529 | if (isa<ConstantSDNode>(Val: V) || V.isUndef()) |
| 7530 | continue; |
| 7531 | Base = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Base, N2: V, |
| 7532 | N3: DAG.getConstant(Val: i, DL: dl, VT: MVT::i32)); |
| 7533 | } |
| 7534 | |
| 7535 | return Base; |
| 7536 | } |
| 7537 | |
| 7538 | static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG, |
| 7539 | const ARMSubtarget *ST) { |
| 7540 | if (!ST->hasMVEIntegerOps()) |
| 7541 | return SDValue(); |
| 7542 | |
| 7543 | // We are looking for a buildvector where each element is Op[0] + i*N |
| 7544 | EVT VT = Op.getValueType(); |
| 7545 | SDValue Op0 = Op.getOperand(i: 0); |
| 7546 | unsigned NumElts = VT.getVectorNumElements(); |
| 7547 | |
| 7548 | // Get the increment value from operand 1 |
| 7549 | SDValue Op1 = Op.getOperand(i: 1); |
| 7550 | if (Op1.getOpcode() != ISD::ADD || Op1.getOperand(i: 0) != Op0 || |
| 7551 | !isa<ConstantSDNode>(Val: Op1.getOperand(i: 1))) |
| 7552 | return SDValue(); |
| 7553 | unsigned N = Op1.getConstantOperandVal(i: 1); |
| 7554 | if (N != 1 && N != 2 && N != 4 && N != 8) |
| 7555 | return SDValue(); |
| 7556 | |
| 7557 | // Check that each other operand matches |
| 7558 | for (unsigned I = 2; I < NumElts; I++) { |
| 7559 | SDValue OpI = Op.getOperand(i: I); |
| 7560 | if (OpI.getOpcode() != ISD::ADD || OpI.getOperand(i: 0) != Op0 || |
| 7561 | !isa<ConstantSDNode>(Val: OpI.getOperand(i: 1)) || |
| 7562 | OpI.getConstantOperandVal(i: 1) != I * N) |
| 7563 | return SDValue(); |
| 7564 | } |
| 7565 | |
| 7566 | SDLoc DL(Op); |
| 7567 | return DAG.getNode(Opcode: ARMISD::VIDUP, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: Op0, |
| 7568 | N2: DAG.getConstant(Val: N, DL, VT: MVT::i32)); |
| 7569 | } |
| 7570 | |
| 7571 | // Returns true if the operation N can be treated as qr instruction variant at |
| 7572 | // operand Op. |
| 7573 | static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) { |
| 7574 | switch (N->getOpcode()) { |
| 7575 | case ISD::ADD: |
| 7576 | case ISD::MUL: |
| 7577 | case ISD::SADDSAT: |
| 7578 | case ISD::UADDSAT: |
| 7579 | case ISD::AVGFLOORS: |
| 7580 | case ISD::AVGFLOORU: |
| 7581 | return true; |
| 7582 | case ISD::SUB: |
| 7583 | case ISD::SSUBSAT: |
| 7584 | case ISD::USUBSAT: |
| 7585 | return N->getOperand(Num: 1).getNode() == Op; |
| 7586 | case ISD::INTRINSIC_WO_CHAIN: |
| 7587 | switch (N->getConstantOperandVal(Num: 0)) { |
| 7588 | case Intrinsic::arm_mve_add_predicated: |
| 7589 | case Intrinsic::arm_mve_mul_predicated: |
| 7590 | case Intrinsic::arm_mve_qadd_predicated: |
| 7591 | case Intrinsic::arm_mve_vhadd: |
| 7592 | case Intrinsic::arm_mve_hadd_predicated: |
| 7593 | case Intrinsic::arm_mve_vqdmulh: |
| 7594 | case Intrinsic::arm_mve_qdmulh_predicated: |
| 7595 | case Intrinsic::arm_mve_vqrdmulh: |
| 7596 | case Intrinsic::arm_mve_qrdmulh_predicated: |
| 7597 | case Intrinsic::arm_mve_vqdmull: |
| 7598 | case Intrinsic::arm_mve_vqdmull_predicated: |
| 7599 | return true; |
| 7600 | case Intrinsic::arm_mve_sub_predicated: |
| 7601 | case Intrinsic::arm_mve_qsub_predicated: |
| 7602 | case Intrinsic::arm_mve_vhsub: |
| 7603 | case Intrinsic::arm_mve_hsub_predicated: |
| 7604 | return N->getOperand(Num: 2).getNode() == Op; |
| 7605 | default: |
| 7606 | return false; |
| 7607 | } |
| 7608 | default: |
| 7609 | return false; |
| 7610 | } |
| 7611 | } |
| 7612 | |
| 7613 | // If this is a case we can't handle, return null and let the default |
| 7614 | // expansion code take care of it. |
| 7615 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, |
| 7616 | const ARMSubtarget *ST) const { |
| 7617 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Val: Op.getNode()); |
| 7618 | SDLoc dl(Op); |
| 7619 | EVT VT = Op.getValueType(); |
| 7620 | |
| 7621 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 7622 | return LowerBUILD_VECTOR_i1(Op, DAG, ST); |
| 7623 | |
| 7624 | if (SDValue R = LowerBUILD_VECTORToVIDUP(Op, DAG, ST)) |
| 7625 | return R; |
| 7626 | |
| 7627 | APInt SplatBits, SplatUndef; |
| 7628 | unsigned SplatBitSize; |
| 7629 | bool HasAnyUndefs; |
| 7630 | if (BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 7631 | if (SplatUndef.isAllOnes()) |
| 7632 | return DAG.getUNDEF(VT); |
| 7633 | |
| 7634 | // If all the users of this constant splat are qr instruction variants, |
| 7635 | // generate a vdup of the constant. |
| 7636 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == SplatBitSize && |
| 7637 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32) && |
| 7638 | all_of(Range: BVN->users(), |
| 7639 | P: [BVN](const SDNode *U) { return IsQRMVEInstruction(N: U, Op: BVN); })) { |
| 7640 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
| 7641 | : SplatBitSize == 16 ? MVT::v8i16 |
| 7642 | : MVT::v16i8; |
| 7643 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
| 7644 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
| 7645 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
| 7646 | } |
| 7647 | |
| 7648 | if ((ST->hasNEON() && SplatBitSize <= 64) || |
| 7649 | (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { |
| 7650 | // Check if an immediate VMOV works. |
| 7651 | EVT VmovVT; |
| 7652 | SDValue Val = |
| 7653 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
| 7654 | SplatBitSize, DAG, dl, VT&: VmovVT, VectorVT: VT, type: VMOVModImm); |
| 7655 | |
| 7656 | if (Val.getNode()) { |
| 7657 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: Val); |
| 7658 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vmov); |
| 7659 | } |
| 7660 | |
| 7661 | // Try an immediate VMVN. |
| 7662 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); |
| 7663 | Val = isVMOVModifiedImm( |
| 7664 | SplatBits: NegatedImm, SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VT&: VmovVT, |
| 7665 | VectorVT: VT, type: ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); |
| 7666 | if (Val.getNode()) { |
| 7667 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL: dl, VT: VmovVT, Operand: Val); |
| 7668 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vmov); |
| 7669 | } |
| 7670 | |
| 7671 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. |
| 7672 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { |
| 7673 | int ImmVal = ARM_AM::getFP32Imm(Imm: SplatBits); |
| 7674 | if (ImmVal != -1) { |
| 7675 | SDValue Val = DAG.getTargetConstant(Val: ImmVal, DL: dl, VT: MVT::i32); |
| 7676 | return DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL: dl, VT, Operand: Val); |
| 7677 | } |
| 7678 | } |
| 7679 | |
| 7680 | // If we are under MVE, generate a VDUP(constant), bitcast to the original |
| 7681 | // type. |
| 7682 | if (ST->hasMVEIntegerOps() && |
| 7683 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32)) { |
| 7684 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
| 7685 | : SplatBitSize == 16 ? MVT::v8i16 |
| 7686 | : MVT::v16i8; |
| 7687 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
| 7688 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
| 7689 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
| 7690 | } |
| 7691 | } |
| 7692 | } |
| 7693 | |
| 7694 | // Scan through the operands to see if only one value is used. |
| 7695 | // |
| 7696 | // As an optimisation, even if more than one value is used it may be more |
| 7697 | // profitable to splat with one value then change some lanes. |
| 7698 | // |
| 7699 | // Heuristically we decide to do this if the vector has a "dominant" value, |
| 7700 | // defined as splatted to more than half of the lanes. |
| 7701 | unsigned NumElts = VT.getVectorNumElements(); |
| 7702 | bool isOnlyLowElement = true; |
| 7703 | bool usesOnlyOneValue = true; |
| 7704 | bool hasDominantValue = false; |
| 7705 | bool isConstant = true; |
| 7706 | |
| 7707 | // Map of the number of times a particular SDValue appears in the |
| 7708 | // element list. |
| 7709 | DenseMap<SDValue, unsigned> ValueCounts; |
| 7710 | SDValue Value; |
| 7711 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7712 | SDValue V = Op.getOperand(i); |
| 7713 | if (V.isUndef()) |
| 7714 | continue; |
| 7715 | if (i > 0) |
| 7716 | isOnlyLowElement = false; |
| 7717 | if (!isa<ConstantFPSDNode>(Val: V) && !isa<ConstantSDNode>(Val: V)) |
| 7718 | isConstant = false; |
| 7719 | |
| 7720 | unsigned &Count = ValueCounts[V]; |
| 7721 | |
| 7722 | // Is this value dominant? (takes up more than half of the lanes) |
| 7723 | if (++Count > (NumElts / 2)) { |
| 7724 | hasDominantValue = true; |
| 7725 | Value = V; |
| 7726 | } |
| 7727 | } |
| 7728 | if (ValueCounts.size() != 1) |
| 7729 | usesOnlyOneValue = false; |
| 7730 | if (!Value.getNode() && !ValueCounts.empty()) |
| 7731 | Value = ValueCounts.begin()->first; |
| 7732 | |
| 7733 | if (ValueCounts.empty()) |
| 7734 | return DAG.getUNDEF(VT); |
| 7735 | |
| 7736 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. |
| 7737 | // Keep going if we are hitting this case. |
| 7738 | if (isOnlyLowElement && !ISD::isNormalLoad(N: Value.getNode())) |
| 7739 | return DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT, Operand: Value); |
| 7740 | |
| 7741 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 7742 | |
| 7743 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to |
| 7744 | // i32 and try again. |
| 7745 | if (hasDominantValue && EltSize <= 32) { |
| 7746 | if (!isConstant) { |
| 7747 | SDValue N; |
| 7748 | |
| 7749 | // If we are VDUPing a value that comes directly from a vector, that will |
| 7750 | // cause an unnecessary move to and from a GPR, where instead we could |
| 7751 | // just use VDUPLANE. We can only do this if the lane being extracted |
| 7752 | // is at a constant index, as the VDUP from lane instructions only have |
| 7753 | // constant-index forms. |
| 7754 | ConstantSDNode *constIndex; |
| 7755 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7756 | (constIndex = dyn_cast<ConstantSDNode>(Val: Value->getOperand(Num: 1)))) { |
| 7757 | // We need to create a new undef vector to use for the VDUPLANE if the |
| 7758 | // size of the vector from which we get the value is different than the |
| 7759 | // size of the vector that we need to create. We will insert the element |
| 7760 | // such that the register coalescer will remove unnecessary copies. |
| 7761 | if (VT != Value->getOperand(Num: 0).getValueType()) { |
| 7762 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % |
| 7763 | VT.getVectorNumElements(); |
| 7764 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 7765 | N1: DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: DAG.getUNDEF(VT), |
| 7766 | N2: Value, N3: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)), |
| 7767 | N2: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)); |
| 7768 | } else |
| 7769 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 7770 | N1: Value->getOperand(Num: 0), N2: Value->getOperand(Num: 1)); |
| 7771 | } else |
| 7772 | N = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Value); |
| 7773 | |
| 7774 | if (!usesOnlyOneValue) { |
| 7775 | // The dominant value was splatted as 'N', but we now have to insert |
| 7776 | // all differing elements. |
| 7777 | for (unsigned I = 0; I < NumElts; ++I) { |
| 7778 | if (Op.getOperand(i: I) == Value) |
| 7779 | continue; |
| 7780 | SmallVector<SDValue, 3> Ops; |
| 7781 | Ops.push_back(Elt: N); |
| 7782 | Ops.push_back(Elt: Op.getOperand(i: I)); |
| 7783 | Ops.push_back(Elt: DAG.getConstant(Val: I, DL: dl, VT: MVT::i32)); |
| 7784 | N = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, Ops); |
| 7785 | } |
| 7786 | } |
| 7787 | return N; |
| 7788 | } |
| 7789 | if (VT.getVectorElementType().isFloatingPoint()) { |
| 7790 | SmallVector<SDValue, 8> Ops; |
| 7791 | MVT FVT = VT.getVectorElementType().getSimpleVT(); |
| 7792 | assert(FVT == MVT::f32 || FVT == MVT::f16); |
| 7793 | MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; |
| 7794 | for (unsigned i = 0; i < NumElts; ++i) |
| 7795 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVT, |
| 7796 | Operand: Op.getOperand(i))); |
| 7797 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IVT, NumElements: NumElts); |
| 7798 | SDValue Val = DAG.getBuildVector(VT: VecVT, DL: dl, Ops); |
| 7799 | Val = LowerBUILD_VECTOR(Op: Val, DAG, ST); |
| 7800 | if (Val.getNode()) |
| 7801 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 7802 | } |
| 7803 | if (usesOnlyOneValue) { |
| 7804 | SDValue Val = IsSingleInstrConstant(N: Value, DAG, ST, dl); |
| 7805 | if (isConstant && Val.getNode()) |
| 7806 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Val); |
| 7807 | } |
| 7808 | } |
| 7809 | |
| 7810 | // If all elements are constants and the case above didn't get hit, fall back |
| 7811 | // to the default expansion, which will generate a load from the constant |
| 7812 | // pool. |
| 7813 | if (isConstant) |
| 7814 | return SDValue(); |
| 7815 | |
| 7816 | // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and |
| 7817 | // vmovn). Empirical tests suggest this is rarely worth it for vectors of |
| 7818 | // length <= 2. |
| 7819 | if (NumElts >= 4) |
| 7820 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) |
| 7821 | return shuffle; |
| 7822 | |
| 7823 | // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into |
| 7824 | // VCVT's |
| 7825 | if (SDValue VCVT = LowerBuildVectorOfFPTrunc(BV: Op, DAG, ST: Subtarget)) |
| 7826 | return VCVT; |
| 7827 | if (SDValue VCVT = LowerBuildVectorOfFPExt(BV: Op, DAG, ST: Subtarget)) |
| 7828 | return VCVT; |
| 7829 | |
| 7830 | if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { |
| 7831 | // If we haven't found an efficient lowering, try splitting a 128-bit vector |
| 7832 | // into two 64-bit vectors; we might discover a better way to lower it. |
| 7833 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); |
| 7834 | EVT ExtVT = VT.getVectorElementType(); |
| 7835 | EVT HVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ExtVT, NumElements: NumElts / 2); |
| 7836 | SDValue Lower = DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[0], NumElts / 2)); |
| 7837 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) |
| 7838 | Lower = LowerBUILD_VECTOR(Op: Lower, DAG, ST); |
| 7839 | SDValue Upper = |
| 7840 | DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[NumElts / 2], NumElts / 2)); |
| 7841 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) |
| 7842 | Upper = LowerBUILD_VECTOR(Op: Upper, DAG, ST); |
| 7843 | if (Lower && Upper) |
| 7844 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Lower, N2: Upper); |
| 7845 | } |
| 7846 | |
| 7847 | // Vectors with 32- or 64-bit elements can be built by directly assigning |
| 7848 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands |
| 7849 | // will be legalized. |
| 7850 | if (EltSize >= 32) { |
| 7851 | // Do the expansion with floating-point types, since that is what the VFP |
| 7852 | // registers are defined to use, and since i64 is not legal. |
| 7853 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
| 7854 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
| 7855 | SmallVector<SDValue, 8> Ops; |
| 7856 | for (unsigned i = 0; i < NumElts; ++i) |
| 7857 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: EltVT, Operand: Op.getOperand(i))); |
| 7858 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
| 7859 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 7860 | } |
| 7861 | |
| 7862 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we |
| 7863 | // know the default expansion would otherwise fall back on something even |
| 7864 | // worse. For a vector with one or two non-undef values, that's |
| 7865 | // scalar_to_vector for the elements followed by a shuffle (provided the |
| 7866 | // shuffle is valid for the target) and materialization element by element |
| 7867 | // on the stack followed by a load for everything else. |
| 7868 | if (!isConstant && !usesOnlyOneValue) { |
| 7869 | SDValue Vec = DAG.getUNDEF(VT); |
| 7870 | for (unsigned i = 0 ; i < NumElts; ++i) { |
| 7871 | SDValue V = Op.getOperand(i); |
| 7872 | if (V.isUndef()) |
| 7873 | continue; |
| 7874 | SDValue LaneIdx = DAG.getConstant(Val: i, DL: dl, VT: MVT::i32); |
| 7875 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Vec, N2: V, N3: LaneIdx); |
| 7876 | } |
| 7877 | return Vec; |
| 7878 | } |
| 7879 | |
| 7880 | return SDValue(); |
| 7881 | } |
| 7882 | |
| 7883 | // Gather data to see if the operation can be modelled as a |
| 7884 | // shuffle in combination with VEXTs. |
| 7885 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, |
| 7886 | SelectionDAG &DAG) const { |
| 7887 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7888 | SDLoc dl(Op); |
| 7889 | EVT VT = Op.getValueType(); |
| 7890 | unsigned NumElts = VT.getVectorNumElements(); |
| 7891 | |
| 7892 | struct ShuffleSourceInfo { |
| 7893 | SDValue Vec; |
| 7894 | unsigned MinElt = std::numeric_limits<unsigned>::max(); |
| 7895 | unsigned MaxElt = 0; |
| 7896 | |
| 7897 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to |
| 7898 | // be compatible with the shuffle we intend to construct. As a result |
| 7899 | // ShuffleVec will be some sliding window into the original Vec. |
| 7900 | SDValue ShuffleVec; |
| 7901 | |
| 7902 | // Code should guarantee that element i in Vec starts at element "WindowBase |
| 7903 | // + i * WindowScale in ShuffleVec". |
| 7904 | int WindowBase = 0; |
| 7905 | int WindowScale = 1; |
| 7906 | |
| 7907 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} |
| 7908 | |
| 7909 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } |
| 7910 | }; |
| 7911 | |
| 7912 | // First gather all vectors used as an immediate source for this BUILD_VECTOR |
| 7913 | // node. |
| 7914 | SmallVector<ShuffleSourceInfo, 2> Sources; |
| 7915 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7916 | SDValue V = Op.getOperand(i); |
| 7917 | if (V.isUndef()) |
| 7918 | continue; |
| 7919 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { |
| 7920 | // A shuffle can only come from building a vector from various |
| 7921 | // elements of other vectors. |
| 7922 | return SDValue(); |
| 7923 | } else if (!isa<ConstantSDNode>(Val: V.getOperand(i: 1))) { |
| 7924 | // Furthermore, shuffles require a constant mask, whereas extractelts |
| 7925 | // accept variable indices. |
| 7926 | return SDValue(); |
| 7927 | } |
| 7928 | |
| 7929 | // Add this element source to the list if it's not already there. |
| 7930 | SDValue SourceVec = V.getOperand(i: 0); |
| 7931 | auto Source = llvm::find(Range&: Sources, Val: SourceVec); |
| 7932 | if (Source == Sources.end()) |
| 7933 | Source = Sources.insert(I: Sources.end(), Elt: ShuffleSourceInfo(SourceVec)); |
| 7934 | |
| 7935 | // Update the minimum and maximum lane number seen. |
| 7936 | unsigned EltNo = V.getConstantOperandVal(i: 1); |
| 7937 | Source->MinElt = std::min(a: Source->MinElt, b: EltNo); |
| 7938 | Source->MaxElt = std::max(a: Source->MaxElt, b: EltNo); |
| 7939 | } |
| 7940 | |
| 7941 | // Currently only do something sane when at most two source vectors |
| 7942 | // are involved. |
| 7943 | if (Sources.size() > 2) |
| 7944 | return SDValue(); |
| 7945 | |
| 7946 | // Find out the smallest element size among result and two sources, and use |
| 7947 | // it as element size to build the shuffle_vector. |
| 7948 | EVT SmallestEltTy = VT.getVectorElementType(); |
| 7949 | for (auto &Source : Sources) { |
| 7950 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); |
| 7951 | if (SrcEltTy.bitsLT(VT: SmallestEltTy)) |
| 7952 | SmallestEltTy = SrcEltTy; |
| 7953 | } |
| 7954 | unsigned ResMultiplier = |
| 7955 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 7956 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 7957 | EVT ShuffleVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: SmallestEltTy, NumElements: NumElts); |
| 7958 | |
| 7959 | // If the source vector is too wide or too narrow, we may nevertheless be able |
| 7960 | // to construct a compatible shuffle either by concatenating it with UNDEF or |
| 7961 | // extracting a suitable range of elements. |
| 7962 | for (auto &Src : Sources) { |
| 7963 | EVT SrcVT = Src.ShuffleVec.getValueType(); |
| 7964 | |
| 7965 | uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); |
| 7966 | uint64_t VTSize = VT.getFixedSizeInBits(); |
| 7967 | if (SrcVTSize == VTSize) |
| 7968 | continue; |
| 7969 | |
| 7970 | // This stage of the search produces a source with the same element type as |
| 7971 | // the original, but with a total width matching the BUILD_VECTOR output. |
| 7972 | EVT EltVT = SrcVT.getVectorElementType(); |
| 7973 | unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); |
| 7974 | EVT DestVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumSrcElts); |
| 7975 | |
| 7976 | if (SrcVTSize < VTSize) { |
| 7977 | if (2 * SrcVTSize != VTSize) |
| 7978 | return SDValue(); |
| 7979 | // We can pad out the smaller vector for free, so if it's part of a |
| 7980 | // shuffle... |
| 7981 | Src.ShuffleVec = |
| 7982 | DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 7983 | N2: DAG.getUNDEF(VT: Src.ShuffleVec.getValueType())); |
| 7984 | continue; |
| 7985 | } |
| 7986 | |
| 7987 | if (SrcVTSize != 2 * VTSize) |
| 7988 | return SDValue(); |
| 7989 | |
| 7990 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { |
| 7991 | // Span too large for a VEXT to cope |
| 7992 | return SDValue(); |
| 7993 | } |
| 7994 | |
| 7995 | if (Src.MinElt >= NumSrcElts) { |
| 7996 | // The extraction can just take the second half |
| 7997 | Src.ShuffleVec = |
| 7998 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 7999 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
| 8000 | Src.WindowBase = -NumSrcElts; |
| 8001 | } else if (Src.MaxElt < NumSrcElts) { |
| 8002 | // The extraction can just take the first half |
| 8003 | Src.ShuffleVec = |
| 8004 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8005 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 8006 | } else { |
| 8007 | // An actual VEXT is needed |
| 8008 | SDValue VEXTSrc1 = |
| 8009 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8010 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 8011 | SDValue VEXTSrc2 = |
| 8012 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
| 8013 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
| 8014 | |
| 8015 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT: DestVT, N1: VEXTSrc1, |
| 8016 | N2: VEXTSrc2, |
| 8017 | N3: DAG.getConstant(Val: Src.MinElt, DL: dl, VT: MVT::i32)); |
| 8018 | Src.WindowBase = -Src.MinElt; |
| 8019 | } |
| 8020 | } |
| 8021 | |
| 8022 | // Another possible incompatibility occurs from the vector element types. We |
| 8023 | // can fix this by bitcasting the source vectors to the same type we intend |
| 8024 | // for the shuffle. |
| 8025 | for (auto &Src : Sources) { |
| 8026 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); |
| 8027 | if (SrcEltTy == SmallestEltTy) |
| 8028 | continue; |
| 8029 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy); |
| 8030 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: ShuffleVT, Operand: Src.ShuffleVec); |
| 8031 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 8032 | Src.WindowBase *= Src.WindowScale; |
| 8033 | } |
| 8034 | |
| 8035 | // Final check before we try to actually produce a shuffle. |
| 8036 | LLVM_DEBUG({ |
| 8037 | for (auto Src : Sources) |
| 8038 | assert(Src.ShuffleVec.getValueType() == ShuffleVT); |
| 8039 | }); |
| 8040 | |
| 8041 | // The stars all align, our next step is to produce the mask for the shuffle. |
| 8042 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); |
| 8043 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); |
| 8044 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { |
| 8045 | SDValue Entry = Op.getOperand(i); |
| 8046 | if (Entry.isUndef()) |
| 8047 | continue; |
| 8048 | |
| 8049 | auto Src = llvm::find(Range&: Sources, Val: Entry.getOperand(i: 0)); |
| 8050 | int EltNo = cast<ConstantSDNode>(Val: Entry.getOperand(i: 1))->getSExtValue(); |
| 8051 | |
| 8052 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit |
| 8053 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this |
| 8054 | // segment. |
| 8055 | EVT OrigEltTy = Entry.getOperand(i: 0).getValueType().getVectorElementType(); |
| 8056 | int BitsDefined = std::min(a: OrigEltTy.getScalarSizeInBits(), |
| 8057 | b: VT.getScalarSizeInBits()); |
| 8058 | int LanesDefined = BitsDefined / BitsPerShuffleLane; |
| 8059 | |
| 8060 | // This source is expected to fill ResMultiplier lanes of the final shuffle, |
| 8061 | // starting at the appropriate offset. |
| 8062 | int *LaneMask = &Mask[i * ResMultiplier]; |
| 8063 | |
| 8064 | int = EltNo * Src->WindowScale + Src->WindowBase; |
| 8065 | ExtractBase += NumElts * (Src - Sources.begin()); |
| 8066 | for (int j = 0; j < LanesDefined; ++j) |
| 8067 | LaneMask[j] = ExtractBase + j; |
| 8068 | } |
| 8069 | |
| 8070 | |
| 8071 | // We can't handle more than two sources. This should have already |
| 8072 | // been checked before this point. |
| 8073 | assert(Sources.size() <= 2 && "Too many sources!" ); |
| 8074 | |
| 8075 | SDValue ShuffleOps[] = { DAG.getUNDEF(VT: ShuffleVT), DAG.getUNDEF(VT: ShuffleVT) }; |
| 8076 | for (unsigned i = 0; i < Sources.size(); ++i) |
| 8077 | ShuffleOps[i] = Sources[i].ShuffleVec; |
| 8078 | |
| 8079 | SDValue Shuffle = buildLegalVectorShuffle(VT: ShuffleVT, DL: dl, N0: ShuffleOps[0], |
| 8080 | N1: ShuffleOps[1], Mask, DAG); |
| 8081 | if (!Shuffle) |
| 8082 | return SDValue(); |
| 8083 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Shuffle); |
| 8084 | } |
| 8085 | |
| 8086 | enum ShuffleOpCodes { |
| 8087 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> |
| 8088 | OP_VREV, |
| 8089 | OP_VDUP0, |
| 8090 | OP_VDUP1, |
| 8091 | OP_VDUP2, |
| 8092 | OP_VDUP3, |
| 8093 | OP_VEXT1, |
| 8094 | OP_VEXT2, |
| 8095 | OP_VEXT3, |
| 8096 | OP_VUZPL, // VUZP, left result |
| 8097 | OP_VUZPR, // VUZP, right result |
| 8098 | OP_VZIPL, // VZIP, left result |
| 8099 | OP_VZIPR, // VZIP, right result |
| 8100 | OP_VTRNL, // VTRN, left result |
| 8101 | OP_VTRNR // VTRN, right result |
| 8102 | }; |
| 8103 | |
| 8104 | static bool isLegalMVEShuffleOp(unsigned PFEntry) { |
| 8105 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 8106 | switch (OpNum) { |
| 8107 | case OP_COPY: |
| 8108 | case OP_VREV: |
| 8109 | case OP_VDUP0: |
| 8110 | case OP_VDUP1: |
| 8111 | case OP_VDUP2: |
| 8112 | case OP_VDUP3: |
| 8113 | return true; |
| 8114 | } |
| 8115 | return false; |
| 8116 | } |
| 8117 | |
| 8118 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 8119 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
| 8120 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values |
| 8121 | /// are assumed to be legal. |
| 8122 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { |
| 8123 | if (VT.getVectorNumElements() == 4 && |
| 8124 | (VT.is128BitVector() || VT.is64BitVector())) { |
| 8125 | unsigned PFIndexes[4]; |
| 8126 | for (unsigned i = 0; i != 4; ++i) { |
| 8127 | if (M[i] < 0) |
| 8128 | PFIndexes[i] = 8; |
| 8129 | else |
| 8130 | PFIndexes[i] = M[i]; |
| 8131 | } |
| 8132 | |
| 8133 | // Compute the index in the perfect shuffle table. |
| 8134 | unsigned PFTableIndex = |
| 8135 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 8136 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 8137 | unsigned Cost = (PFEntry >> 30); |
| 8138 | |
| 8139 | if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) |
| 8140 | return true; |
| 8141 | } |
| 8142 | |
| 8143 | bool ReverseVEXT, isV_UNDEF; |
| 8144 | unsigned Imm, WhichResult; |
| 8145 | |
| 8146 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8147 | if (EltSize >= 32 || |
| 8148 | ShuffleVectorSDNode::isSplatMask(Mask: M) || |
| 8149 | ShuffleVectorInst::isIdentityMask(Mask: M, NumSrcElts: M.size()) || |
| 8150 | isVREVMask(M, VT, BlockSize: 64) || |
| 8151 | isVREVMask(M, VT, BlockSize: 32) || |
| 8152 | isVREVMask(M, VT, BlockSize: 16)) |
| 8153 | return true; |
| 8154 | else if (Subtarget->hasNEON() && |
| 8155 | (isVEXTMask(M, VT, ReverseVEXT, Imm) || |
| 8156 | isVTBLMask(M, VT) || |
| 8157 | isNEONTwoResultShuffleMask(ShuffleMask: M, VT, WhichResult, isV_UNDEF))) |
| 8158 | return true; |
| 8159 | else if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8160 | isReverseMask(M, VT)) |
| 8161 | return true; |
| 8162 | else if (Subtarget->hasMVEIntegerOps() && |
| 8163 | (isVMOVNMask(M, VT, Top: true, SingleSource: false) || |
| 8164 | isVMOVNMask(M, VT, Top: false, SingleSource: false) || isVMOVNMask(M, VT, Top: true, SingleSource: true))) |
| 8165 | return true; |
| 8166 | else if (Subtarget->hasMVEIntegerOps() && |
| 8167 | (isTruncMask(M, VT, Top: false, SingleSource: false) || |
| 8168 | isTruncMask(M, VT, Top: false, SingleSource: true) || |
| 8169 | isTruncMask(M, VT, Top: true, SingleSource: false) || isTruncMask(M, VT, Top: true, SingleSource: true))) |
| 8170 | return true; |
| 8171 | else |
| 8172 | return false; |
| 8173 | } |
| 8174 | |
| 8175 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit |
| 8176 | /// the specified operations to build the shuffle. |
| 8177 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, |
| 8178 | SDValue RHS, SelectionDAG &DAG, |
| 8179 | const SDLoc &dl) { |
| 8180 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 8181 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 8182 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 8183 | |
| 8184 | if (OpNum == OP_COPY) { |
| 8185 | if (LHSID == (1*9+2)*9+3) return LHS; |
| 8186 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!" ); |
| 8187 | return RHS; |
| 8188 | } |
| 8189 | |
| 8190 | SDValue OpLHS, OpRHS; |
| 8191 | OpLHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); |
| 8192 | OpRHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); |
| 8193 | EVT VT = OpLHS.getValueType(); |
| 8194 | |
| 8195 | switch (OpNum) { |
| 8196 | default: llvm_unreachable("Unknown shuffle opcode!" ); |
| 8197 | case OP_VREV: |
| 8198 | // VREV divides the vector in half and swaps within the half. |
| 8199 | if (VT.getScalarSizeInBits() == 32) |
| 8200 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: OpLHS); |
| 8201 | // vrev <4 x i16> -> VREV32 |
| 8202 | if (VT.getScalarSizeInBits() == 16) |
| 8203 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: OpLHS); |
| 8204 | // vrev <4 x i8> -> VREV16 |
| 8205 | assert(VT.getScalarSizeInBits() == 8); |
| 8206 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: OpLHS); |
| 8207 | case OP_VDUP0: |
| 8208 | case OP_VDUP1: |
| 8209 | case OP_VDUP2: |
| 8210 | case OP_VDUP3: |
| 8211 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
| 8212 | N1: OpLHS, N2: DAG.getConstant(Val: OpNum-OP_VDUP0, DL: dl, VT: MVT::i32)); |
| 8213 | case OP_VEXT1: |
| 8214 | case OP_VEXT2: |
| 8215 | case OP_VEXT3: |
| 8216 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, |
| 8217 | N1: OpLHS, N2: OpRHS, |
| 8218 | N3: DAG.getConstant(Val: OpNum - OP_VEXT1 + 1, DL: dl, VT: MVT::i32)); |
| 8219 | case OP_VUZPL: |
| 8220 | case OP_VUZPR: |
| 8221 | return DAG.getNode(Opcode: ARMISD::VUZP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8222 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VUZPL); |
| 8223 | case OP_VZIPL: |
| 8224 | case OP_VZIPR: |
| 8225 | return DAG.getNode(Opcode: ARMISD::VZIP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8226 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VZIPL); |
| 8227 | case OP_VTRNL: |
| 8228 | case OP_VTRNR: |
| 8229 | return DAG.getNode(Opcode: ARMISD::VTRN, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
| 8230 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VTRNL); |
| 8231 | } |
| 8232 | } |
| 8233 | |
| 8234 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, |
| 8235 | ArrayRef<int> ShuffleMask, |
| 8236 | SelectionDAG &DAG) { |
| 8237 | // Check to see if we can use the VTBL instruction. |
| 8238 | SDValue V1 = Op.getOperand(i: 0); |
| 8239 | SDValue V2 = Op.getOperand(i: 1); |
| 8240 | SDLoc DL(Op); |
| 8241 | |
| 8242 | SmallVector<SDValue, 8> VTBLMask; |
| 8243 | for (int I : ShuffleMask) |
| 8244 | VTBLMask.push_back(Elt: DAG.getSignedConstant(Val: I, DL, VT: MVT::i32)); |
| 8245 | |
| 8246 | if (V2.getNode()->isUndef()) |
| 8247 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL, VT: MVT::v8i8, N1: V1, |
| 8248 | N2: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
| 8249 | |
| 8250 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL, VT: MVT::v8i8, N1: V1, N2: V2, |
| 8251 | N3: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
| 8252 | } |
| 8253 | |
| 8254 | static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { |
| 8255 | SDLoc DL(Op); |
| 8256 | EVT VT = Op.getValueType(); |
| 8257 | |
| 8258 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8259 | "Expect an v8i16/v16i8 type" ); |
| 8260 | SDValue OpLHS = DAG.getNode(Opcode: ARMISD::VREV64, DL, VT, Operand: Op.getOperand(i: 0)); |
| 8261 | // For a v16i8 type: After the VREV, we have got <7, ..., 0, 15, ..., 8>. Now, |
| 8262 | // extract the first 8 bytes into the top double word and the last 8 bytes |
| 8263 | // into the bottom double word, through a new vector shuffle that will be |
| 8264 | // turned into a VEXT on Neon, or a couple of VMOVDs on MVE. |
| 8265 | std::vector<int> NewMask; |
| 8266 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
| 8267 | NewMask.push_back(x: VT.getVectorNumElements() / 2 + i); |
| 8268 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
| 8269 | NewMask.push_back(x: i); |
| 8270 | return DAG.getVectorShuffle(VT, dl: DL, N1: OpLHS, N2: OpLHS, Mask: NewMask); |
| 8271 | } |
| 8272 | |
| 8273 | static EVT getVectorTyFromPredicateVector(EVT VT) { |
| 8274 | switch (VT.getSimpleVT().SimpleTy) { |
| 8275 | case MVT::v2i1: |
| 8276 | return MVT::v2f64; |
| 8277 | case MVT::v4i1: |
| 8278 | return MVT::v4i32; |
| 8279 | case MVT::v8i1: |
| 8280 | return MVT::v8i16; |
| 8281 | case MVT::v16i1: |
| 8282 | return MVT::v16i8; |
| 8283 | default: |
| 8284 | llvm_unreachable("Unexpected vector predicate type" ); |
| 8285 | } |
| 8286 | } |
| 8287 | |
| 8288 | static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, |
| 8289 | SelectionDAG &DAG) { |
| 8290 | // Converting from boolean predicates to integers involves creating a vector |
| 8291 | // of all ones or all zeroes and selecting the lanes based upon the real |
| 8292 | // predicate. |
| 8293 | SDValue AllOnes = |
| 8294 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), DL: dl, VT: MVT::i32); |
| 8295 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllOnes); |
| 8296 | |
| 8297 | SDValue AllZeroes = |
| 8298 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0x0), DL: dl, VT: MVT::i32); |
| 8299 | AllZeroes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllZeroes); |
| 8300 | |
| 8301 | // Get full vector type from predicate type |
| 8302 | EVT NewVT = getVectorTyFromPredicateVector(VT); |
| 8303 | |
| 8304 | SDValue RecastV1; |
| 8305 | // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast |
| 8306 | // this to a v16i1. This cannot be done with an ordinary bitcast because the |
| 8307 | // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, |
| 8308 | // since we know in hardware the sizes are really the same. |
| 8309 | if (VT != MVT::v16i1) |
| 8310 | RecastV1 = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Pred); |
| 8311 | else |
| 8312 | RecastV1 = Pred; |
| 8313 | |
| 8314 | // Select either all ones or zeroes depending upon the real predicate bits. |
| 8315 | SDValue PredAsVector = |
| 8316 | DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: MVT::v16i8, N1: RecastV1, N2: AllOnes, N3: AllZeroes); |
| 8317 | |
| 8318 | // Recast our new predicate-as-integer v16i8 vector into something |
| 8319 | // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. |
| 8320 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: NewVT, Operand: PredAsVector); |
| 8321 | } |
| 8322 | |
| 8323 | static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, |
| 8324 | const ARMSubtarget *ST) { |
| 8325 | EVT VT = Op.getValueType(); |
| 8326 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
| 8327 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8328 | |
| 8329 | assert(ST->hasMVEIntegerOps() && |
| 8330 | "No support for vector shuffle of boolean predicates" ); |
| 8331 | |
| 8332 | SDValue V1 = Op.getOperand(i: 0); |
| 8333 | SDValue V2 = Op.getOperand(i: 1); |
| 8334 | SDLoc dl(Op); |
| 8335 | if (isReverseMask(M: ShuffleMask, VT)) { |
| 8336 | SDValue cast = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: V1); |
| 8337 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: cast); |
| 8338 | SDValue srl = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: rbit, |
| 8339 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
| 8340 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: srl); |
| 8341 | } |
| 8342 | |
| 8343 | // Until we can come up with optimised cases for every single vector |
| 8344 | // shuffle in existence we have chosen the least painful strategy. This is |
| 8345 | // to essentially promote the boolean predicate to a 8-bit integer, where |
| 8346 | // each predicate represents a byte. Then we fall back on a normal integer |
| 8347 | // vector shuffle and convert the result back into a predicate vector. In |
| 8348 | // many cases the generated code might be even better than scalar code |
| 8349 | // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit |
| 8350 | // fields in a register into 8 other arbitrary 2-bit fields! |
| 8351 | SDValue PredAsVector1 = PromoteMVEPredVector(dl, Pred: V1, VT, DAG); |
| 8352 | EVT NewVT = PredAsVector1.getValueType(); |
| 8353 | SDValue PredAsVector2 = V2.isUndef() ? DAG.getUNDEF(VT: NewVT) |
| 8354 | : PromoteMVEPredVector(dl, Pred: V2, VT, DAG); |
| 8355 | assert(PredAsVector2.getValueType() == NewVT && |
| 8356 | "Expected identical vector type in expanded i1 shuffle!" ); |
| 8357 | |
| 8358 | // Do the shuffle! |
| 8359 | SDValue Shuffled = DAG.getVectorShuffle(VT: NewVT, dl, N1: PredAsVector1, |
| 8360 | N2: PredAsVector2, Mask: ShuffleMask); |
| 8361 | |
| 8362 | // Now return the result of comparing the shuffled vector with zero, |
| 8363 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. For a v2i1 |
| 8364 | // we convert to a v4i1 compare to fill in the two halves of the i64 as i32s. |
| 8365 | if (VT == MVT::v2i1) { |
| 8366 | SDValue BC = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Shuffled); |
| 8367 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: BC, |
| 8368 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8369 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
| 8370 | } |
| 8371 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Shuffled, |
| 8372 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8373 | } |
| 8374 | |
| 8375 | static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, |
| 8376 | ArrayRef<int> ShuffleMask, |
| 8377 | SelectionDAG &DAG) { |
| 8378 | // Attempt to lower the vector shuffle using as many whole register movs as |
| 8379 | // possible. This is useful for types smaller than 32bits, which would |
| 8380 | // often otherwise become a series for grp movs. |
| 8381 | SDLoc dl(Op); |
| 8382 | EVT VT = Op.getValueType(); |
| 8383 | if (VT.getScalarSizeInBits() >= 32) |
| 8384 | return SDValue(); |
| 8385 | |
| 8386 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8387 | "Unexpected vector type" ); |
| 8388 | int NumElts = VT.getVectorNumElements(); |
| 8389 | int QuarterSize = NumElts / 4; |
| 8390 | // The four final parts of the vector, as i32's |
| 8391 | SDValue Parts[4]; |
| 8392 | |
| 8393 | // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not |
| 8394 | // <u,u,u,u>), returning the vmov lane index |
| 8395 | auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { |
| 8396 | // Detect which mov lane this would be from the first non-undef element. |
| 8397 | int MovIdx = -1; |
| 8398 | for (int i = 0; i < Length; i++) { |
| 8399 | if (ShuffleMask[Start + i] >= 0) { |
| 8400 | if (ShuffleMask[Start + i] % Length != i) |
| 8401 | return -1; |
| 8402 | MovIdx = ShuffleMask[Start + i] / Length; |
| 8403 | break; |
| 8404 | } |
| 8405 | } |
| 8406 | // If all items are undef, leave this for other combines |
| 8407 | if (MovIdx == -1) |
| 8408 | return -1; |
| 8409 | // Check the remaining values are the correct part of the same mov |
| 8410 | for (int i = 1; i < Length; i++) { |
| 8411 | if (ShuffleMask[Start + i] >= 0 && |
| 8412 | (ShuffleMask[Start + i] / Length != MovIdx || |
| 8413 | ShuffleMask[Start + i] % Length != i)) |
| 8414 | return -1; |
| 8415 | } |
| 8416 | return MovIdx; |
| 8417 | }; |
| 8418 | |
| 8419 | for (int Part = 0; Part < 4; ++Part) { |
| 8420 | // Does this part look like a mov |
| 8421 | int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); |
| 8422 | if (Elt != -1) { |
| 8423 | SDValue Input = Op->getOperand(Num: 0); |
| 8424 | if (Elt >= 4) { |
| 8425 | Input = Op->getOperand(Num: 1); |
| 8426 | Elt -= 4; |
| 8427 | } |
| 8428 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: Input); |
| 8429 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: BitCast, |
| 8430 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32)); |
| 8431 | } |
| 8432 | } |
| 8433 | |
| 8434 | // Nothing interesting found, just return |
| 8435 | if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) |
| 8436 | return SDValue(); |
| 8437 | |
| 8438 | // The other parts need to be built with the old shuffle vector, cast to a |
| 8439 | // v4i32 and extract_vector_elts |
| 8440 | if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { |
| 8441 | SmallVector<int, 16> NewShuffleMask; |
| 8442 | for (int Part = 0; Part < 4; ++Part) |
| 8443 | for (int i = 0; i < QuarterSize; i++) |
| 8444 | NewShuffleMask.push_back( |
| 8445 | Elt: Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); |
| 8446 | SDValue NewShuffle = DAG.getVectorShuffle( |
| 8447 | VT, dl, N1: Op->getOperand(Num: 0), N2: Op->getOperand(Num: 1), Mask: NewShuffleMask); |
| 8448 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: NewShuffle); |
| 8449 | |
| 8450 | for (int Part = 0; Part < 4; ++Part) |
| 8451 | if (!Parts[Part]) |
| 8452 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, |
| 8453 | N1: BitCast, N2: DAG.getConstant(Val: Part, DL: dl, VT: MVT::i32)); |
| 8454 | } |
| 8455 | // Build a vector out of the various parts and bitcast it back to the original |
| 8456 | // type. |
| 8457 | SDValue NewVec = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: MVT::v4f32, Ops: Parts); |
| 8458 | return DAG.getBitcast(VT, V: NewVec); |
| 8459 | } |
| 8460 | |
| 8461 | static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op, |
| 8462 | ArrayRef<int> ShuffleMask, |
| 8463 | SelectionDAG &DAG) { |
| 8464 | SDValue V1 = Op.getOperand(i: 0); |
| 8465 | SDValue V2 = Op.getOperand(i: 1); |
| 8466 | EVT VT = Op.getValueType(); |
| 8467 | unsigned NumElts = VT.getVectorNumElements(); |
| 8468 | |
| 8469 | // An One-Off Identity mask is one that is mostly an identity mask from as |
| 8470 | // single source but contains a single element out-of-place, either from a |
| 8471 | // different vector or from another position in the same vector. As opposed to |
| 8472 | // lowering this via a ARMISD::BUILD_VECTOR we can generate an extract/insert |
| 8473 | // pair directly. |
| 8474 | auto isOneOffIdentityMask = [](ArrayRef<int> Mask, EVT VT, int BaseOffset, |
| 8475 | int &OffElement) { |
| 8476 | OffElement = -1; |
| 8477 | int NonUndef = 0; |
| 8478 | for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { |
| 8479 | if (Mask[i] == -1) |
| 8480 | continue; |
| 8481 | NonUndef++; |
| 8482 | if (Mask[i] != i + BaseOffset) { |
| 8483 | if (OffElement == -1) |
| 8484 | OffElement = i; |
| 8485 | else |
| 8486 | return false; |
| 8487 | } |
| 8488 | } |
| 8489 | return NonUndef > 2 && OffElement != -1; |
| 8490 | }; |
| 8491 | int OffElement; |
| 8492 | SDValue VInput; |
| 8493 | if (isOneOffIdentityMask(ShuffleMask, VT, 0, OffElement)) |
| 8494 | VInput = V1; |
| 8495 | else if (isOneOffIdentityMask(ShuffleMask, VT, NumElts, OffElement)) |
| 8496 | VInput = V2; |
| 8497 | else |
| 8498 | return SDValue(); |
| 8499 | |
| 8500 | SDLoc dl(Op); |
| 8501 | EVT SVT = VT.getScalarType() == MVT::i8 || VT.getScalarType() == MVT::i16 |
| 8502 | ? MVT::i32 |
| 8503 | : VT.getScalarType(); |
| 8504 | SDValue Elt = DAG.getNode( |
| 8505 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: SVT, |
| 8506 | N1: ShuffleMask[OffElement] < (int)NumElts ? V1 : V2, |
| 8507 | N2: DAG.getVectorIdxConstant(Val: ShuffleMask[OffElement] % NumElts, DL: dl)); |
| 8508 | return DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: VInput, N2: Elt, |
| 8509 | N3: DAG.getVectorIdxConstant(Val: OffElement % NumElts, DL: dl)); |
| 8510 | } |
| 8511 | |
| 8512 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, |
| 8513 | const ARMSubtarget *ST) { |
| 8514 | SDValue V1 = Op.getOperand(i: 0); |
| 8515 | SDValue V2 = Op.getOperand(i: 1); |
| 8516 | SDLoc dl(Op); |
| 8517 | EVT VT = Op.getValueType(); |
| 8518 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
| 8519 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8520 | |
| 8521 | if (ST->hasMVEIntegerOps() && EltSize == 1) |
| 8522 | return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); |
| 8523 | |
| 8524 | // Convert shuffles that are directly supported on NEON to target-specific |
| 8525 | // DAG nodes, instead of keeping them as shuffles and matching them again |
| 8526 | // during code selection. This is more efficient and avoids the possibility |
| 8527 | // of inconsistencies between legalization and selection. |
| 8528 | // FIXME: floating-point vectors should be canonicalized to integer vectors |
| 8529 | // of the same time so that they get CSEd properly. |
| 8530 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8531 | |
| 8532 | if (EltSize <= 32) { |
| 8533 | if (SVN->isSplat()) { |
| 8534 | int Lane = SVN->getSplatIndex(); |
| 8535 | // If this is undef splat, generate it via "just" vdup, if possible. |
| 8536 | if (Lane == -1) Lane = 0; |
| 8537 | |
| 8538 | // Test if V1 is a SCALAR_TO_VECTOR. |
| 8539 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { |
| 8540 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
| 8541 | } |
| 8542 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR |
| 8543 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization |
| 8544 | // reaches it). |
| 8545 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && |
| 8546 | !isa<ConstantSDNode>(Val: V1.getOperand(i: 0))) { |
| 8547 | bool IsScalarToVector = true; |
| 8548 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) |
| 8549 | if (!V1.getOperand(i).isUndef()) { |
| 8550 | IsScalarToVector = false; |
| 8551 | break; |
| 8552 | } |
| 8553 | if (IsScalarToVector) |
| 8554 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
| 8555 | } |
| 8556 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, N1: V1, |
| 8557 | N2: DAG.getConstant(Val: Lane, DL: dl, VT: MVT::i32)); |
| 8558 | } |
| 8559 | |
| 8560 | bool ReverseVEXT = false; |
| 8561 | unsigned Imm = 0; |
| 8562 | if (ST->hasNEON() && isVEXTMask(M: ShuffleMask, VT, ReverseVEXT, Imm)) { |
| 8563 | if (ReverseVEXT) |
| 8564 | std::swap(a&: V1, b&: V2); |
| 8565 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V2, |
| 8566 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
| 8567 | } |
| 8568 | |
| 8569 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 64)) |
| 8570 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: V1); |
| 8571 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 32)) |
| 8572 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: V1); |
| 8573 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 16)) |
| 8574 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: V1); |
| 8575 | |
| 8576 | if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(M: ShuffleMask, VT, Imm)) { |
| 8577 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V1, |
| 8578 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
| 8579 | } |
| 8580 | |
| 8581 | // Check for Neon shuffles that modify both input vectors in place. |
| 8582 | // If both results are used, i.e., if there are two shuffles with the same |
| 8583 | // source operands and with masks corresponding to both results of one of |
| 8584 | // these operations, DAG memoization will ensure that a single node is |
| 8585 | // used for both shuffles. |
| 8586 | unsigned WhichResult = 0; |
| 8587 | bool isV_UNDEF = false; |
| 8588 | if (ST->hasNEON()) { |
| 8589 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8590 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { |
| 8591 | if (isV_UNDEF) |
| 8592 | V2 = V1; |
| 8593 | return DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), N1: V1, N2: V2) |
| 8594 | .getValue(R: WhichResult); |
| 8595 | } |
| 8596 | } |
| 8597 | if (ST->hasMVEIntegerOps()) { |
| 8598 | if (isVMOVNMask(M: ShuffleMask, VT, Top: false, SingleSource: false)) |
| 8599 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V2, N2: V1, |
| 8600 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 8601 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: false)) |
| 8602 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V2, |
| 8603 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 8604 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: true)) |
| 8605 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V1, |
| 8606 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 8607 | } |
| 8608 | |
| 8609 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize |
| 8610 | // shuffles that produce a result larger than their operands with: |
| 8611 | // shuffle(concat(v1, undef), concat(v2, undef)) |
| 8612 | // -> |
| 8613 | // shuffle(concat(v1, v2), undef) |
| 8614 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). |
| 8615 | // |
| 8616 | // This is useful in the general case, but there are special cases where |
| 8617 | // native shuffles produce larger results: the two-result ops. |
| 8618 | // |
| 8619 | // Look through the concat when lowering them: |
| 8620 | // shuffle(concat(v1, v2), undef) |
| 8621 | // -> |
| 8622 | // concat(VZIP(v1, v2):0, :1) |
| 8623 | // |
| 8624 | if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { |
| 8625 | SDValue SubV1 = V1->getOperand(Num: 0); |
| 8626 | SDValue SubV2 = V1->getOperand(Num: 1); |
| 8627 | EVT SubVT = SubV1.getValueType(); |
| 8628 | |
| 8629 | // We expect these to have been canonicalized to -1. |
| 8630 | assert(llvm::all_of(ShuffleMask, [&](int i) { |
| 8631 | return i < (int)VT.getVectorNumElements(); |
| 8632 | }) && "Unexpected shuffle index into UNDEF operand!" ); |
| 8633 | |
| 8634 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8635 | ShuffleMask, VT: SubVT, WhichResult, isV_UNDEF)) { |
| 8636 | if (isV_UNDEF) |
| 8637 | SubV2 = SubV1; |
| 8638 | assert((WhichResult == 0) && |
| 8639 | "In-place shuffle of concat can only have one result!" ); |
| 8640 | SDValue Res = DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: SubVT, VT2: SubVT), |
| 8641 | N1: SubV1, N2: SubV2); |
| 8642 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Res.getValue(R: 0), |
| 8643 | N2: Res.getValue(R: 1)); |
| 8644 | } |
| 8645 | } |
| 8646 | } |
| 8647 | |
| 8648 | if (ST->hasMVEIntegerOps() && EltSize <= 32) { |
| 8649 | if (SDValue V = LowerVECTOR_SHUFFLEUsingOneOff(Op, ShuffleMask, DAG)) |
| 8650 | return V; |
| 8651 | |
| 8652 | for (bool Top : {false, true}) { |
| 8653 | for (bool SingleSource : {false, true}) { |
| 8654 | if (isTruncMask(M: ShuffleMask, VT, Top, SingleSource)) { |
| 8655 | MVT FromSVT = MVT::getIntegerVT(BitWidth: EltSize * 2); |
| 8656 | MVT FromVT = MVT::getVectorVT(VT: FromSVT, NumElements: ShuffleMask.size() / 2); |
| 8657 | SDValue Lo = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, Operand: V1); |
| 8658 | SDValue Hi = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, |
| 8659 | Operand: SingleSource ? V1 : V2); |
| 8660 | if (Top) { |
| 8661 | SDValue Amt = DAG.getConstant(Val: EltSize, DL: dl, VT: FromVT); |
| 8662 | Lo = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Lo, N2: Amt); |
| 8663 | Hi = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Hi, N2: Amt); |
| 8664 | } |
| 8665 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT, N1: Lo, N2: Hi); |
| 8666 | } |
| 8667 | } |
| 8668 | } |
| 8669 | } |
| 8670 | |
| 8671 | // If the shuffle is not directly supported and it has 4 elements, use |
| 8672 | // the PerfectShuffle-generated table to synthesize it from other shuffles. |
| 8673 | unsigned NumElts = VT.getVectorNumElements(); |
| 8674 | if (NumElts == 4) { |
| 8675 | unsigned PFIndexes[4]; |
| 8676 | for (unsigned i = 0; i != 4; ++i) { |
| 8677 | if (ShuffleMask[i] < 0) |
| 8678 | PFIndexes[i] = 8; |
| 8679 | else |
| 8680 | PFIndexes[i] = ShuffleMask[i]; |
| 8681 | } |
| 8682 | |
| 8683 | // Compute the index in the perfect shuffle table. |
| 8684 | unsigned PFTableIndex = |
| 8685 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 8686 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 8687 | unsigned Cost = (PFEntry >> 30); |
| 8688 | |
| 8689 | if (Cost <= 4) { |
| 8690 | if (ST->hasNEON()) |
| 8691 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
| 8692 | else if (isLegalMVEShuffleOp(PFEntry)) { |
| 8693 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 8694 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 8695 | unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; |
| 8696 | unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; |
| 8697 | if (isLegalMVEShuffleOp(PFEntry: PFEntryLHS) && isLegalMVEShuffleOp(PFEntry: PFEntryRHS)) |
| 8698 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
| 8699 | } |
| 8700 | } |
| 8701 | } |
| 8702 | |
| 8703 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. |
| 8704 | if (EltSize >= 32) { |
| 8705 | // Do the expansion with floating-point types, since that is what the VFP |
| 8706 | // registers are defined to use, and since i64 is not legal. |
| 8707 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
| 8708 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
| 8709 | V1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V1); |
| 8710 | V2 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V2); |
| 8711 | SmallVector<SDValue, 8> Ops; |
| 8712 | for (unsigned i = 0; i < NumElts; ++i) { |
| 8713 | if (ShuffleMask[i] < 0) |
| 8714 | Ops.push_back(Elt: DAG.getUNDEF(VT: EltVT)); |
| 8715 | else |
| 8716 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, |
| 8717 | N1: ShuffleMask[i] < (int)NumElts ? V1 : V2, |
| 8718 | N2: DAG.getConstant(Val: ShuffleMask[i] & (NumElts-1), |
| 8719 | DL: dl, VT: MVT::i32))); |
| 8720 | } |
| 8721 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
| 8722 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
| 8723 | } |
| 8724 | |
| 8725 | if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8726 | isReverseMask(M: ShuffleMask, VT)) |
| 8727 | return LowerReverse_VECTOR_SHUFFLE(Op, DAG); |
| 8728 | |
| 8729 | if (ST->hasNEON() && VT == MVT::v8i8) |
| 8730 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) |
| 8731 | return NewOp; |
| 8732 | |
| 8733 | if (ST->hasMVEIntegerOps()) |
| 8734 | if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) |
| 8735 | return NewOp; |
| 8736 | |
| 8737 | return SDValue(); |
| 8738 | } |
| 8739 | |
| 8740 | static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, |
| 8741 | const ARMSubtarget *ST) { |
| 8742 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
| 8743 | SDLoc dl(Op); |
| 8744 | |
| 8745 | assert(ST->hasMVEIntegerOps() && |
| 8746 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 8747 | |
| 8748 | SDValue Conv = |
| 8749 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
| 8750 | unsigned Lane = Op.getConstantOperandVal(i: 2); |
| 8751 | unsigned LaneWidth = |
| 8752 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
| 8753 | unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; |
| 8754 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, |
| 8755 | N1: Op.getOperand(i: 1), N2: DAG.getValueType(MVT::i1)); |
| 8756 | SDValue BFI = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT: MVT::i32, N1: Conv, N2: Ext, |
| 8757 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT: MVT::i32)); |
| 8758 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: BFI); |
| 8759 | } |
| 8760 | |
| 8761 | SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, |
| 8762 | SelectionDAG &DAG) const { |
| 8763 | // INSERT_VECTOR_ELT is legal only for immediate indexes. |
| 8764 | SDValue Lane = Op.getOperand(i: 2); |
| 8765 | if (!isa<ConstantSDNode>(Val: Lane)) |
| 8766 | return SDValue(); |
| 8767 | |
| 8768 | SDValue Elt = Op.getOperand(i: 1); |
| 8769 | EVT EltVT = Elt.getValueType(); |
| 8770 | |
| 8771 | if (Subtarget->hasMVEIntegerOps() && |
| 8772 | Op.getValueType().getScalarSizeInBits() == 1) |
| 8773 | return LowerINSERT_VECTOR_ELT_i1(Op, DAG, ST: Subtarget); |
| 8774 | |
| 8775 | if (getTypeAction(Context&: *DAG.getContext(), VT: EltVT) == |
| 8776 | TargetLowering::TypeSoftPromoteHalf) { |
| 8777 | // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, |
| 8778 | // but the type system will try to do that if we don't intervene. |
| 8779 | // Reinterpret any such vector-element insertion as one with the |
| 8780 | // corresponding integer types. |
| 8781 | |
| 8782 | SDLoc dl(Op); |
| 8783 | |
| 8784 | EVT IEltVT = MVT::getIntegerVT(BitWidth: EltVT.getScalarSizeInBits()); |
| 8785 | assert(getTypeAction(*DAG.getContext(), IEltVT) != |
| 8786 | TargetLowering::TypeSoftPromoteHalf); |
| 8787 | |
| 8788 | SDValue VecIn = Op.getOperand(i: 0); |
| 8789 | EVT VecVT = VecIn.getValueType(); |
| 8790 | EVT IVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IEltVT, |
| 8791 | NumElements: VecVT.getVectorNumElements()); |
| 8792 | |
| 8793 | SDValue IElt = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IEltVT, Operand: Elt); |
| 8794 | SDValue IVecIn = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVecVT, Operand: VecIn); |
| 8795 | SDValue IVecOut = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: IVecVT, |
| 8796 | N1: IVecIn, N2: IElt, N3: Lane); |
| 8797 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: IVecOut); |
| 8798 | } |
| 8799 | |
| 8800 | return Op; |
| 8801 | } |
| 8802 | |
| 8803 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8804 | const ARMSubtarget *ST) { |
| 8805 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
| 8806 | SDLoc dl(Op); |
| 8807 | |
| 8808 | assert(ST->hasMVEIntegerOps() && |
| 8809 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 8810 | |
| 8811 | SDValue Conv = |
| 8812 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
| 8813 | unsigned Lane = Op.getConstantOperandVal(i: 1); |
| 8814 | unsigned LaneWidth = |
| 8815 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
| 8816 | SDValue Shift = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: Conv, |
| 8817 | N2: DAG.getConstant(Val: Lane * LaneWidth, DL: dl, VT: MVT::i32)); |
| 8818 | return Shift; |
| 8819 | } |
| 8820 | |
| 8821 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8822 | const ARMSubtarget *ST) { |
| 8823 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. |
| 8824 | SDValue Lane = Op.getOperand(i: 1); |
| 8825 | if (!isa<ConstantSDNode>(Val: Lane)) |
| 8826 | return SDValue(); |
| 8827 | |
| 8828 | SDValue Vec = Op.getOperand(i: 0); |
| 8829 | EVT VT = Vec.getValueType(); |
| 8830 | |
| 8831 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 8832 | return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); |
| 8833 | |
| 8834 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { |
| 8835 | SDLoc dl(Op); |
| 8836 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: dl, VT: MVT::i32, N1: Vec, N2: Lane); |
| 8837 | } |
| 8838 | |
| 8839 | return Op; |
| 8840 | } |
| 8841 | |
| 8842 | static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, |
| 8843 | const ARMSubtarget *ST) { |
| 8844 | SDLoc dl(Op); |
| 8845 | assert(Op.getValueType().getScalarSizeInBits() == 1 && |
| 8846 | "Unexpected custom CONCAT_VECTORS lowering" ); |
| 8847 | assert(isPowerOf2_32(Op.getNumOperands()) && |
| 8848 | "Unexpected custom CONCAT_VECTORS lowering" ); |
| 8849 | assert(ST->hasMVEIntegerOps() && |
| 8850 | "CONCAT_VECTORS lowering only supported for MVE" ); |
| 8851 | |
| 8852 | auto ConcatPair = [&](SDValue V1, SDValue V2) { |
| 8853 | EVT Op1VT = V1.getValueType(); |
| 8854 | EVT Op2VT = V2.getValueType(); |
| 8855 | assert(Op1VT == Op2VT && "Operand types don't match!" ); |
| 8856 | assert((Op1VT == MVT::v2i1 || Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) && |
| 8857 | "Unexpected i1 concat operations!" ); |
| 8858 | EVT VT = Op1VT.getDoubleNumVectorElementsVT(Context&: *DAG.getContext()); |
| 8859 | |
| 8860 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
| 8861 | SDValue NewV2 = PromoteMVEPredVector(dl, Pred: V2, VT: Op2VT, DAG); |
| 8862 | |
| 8863 | // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets |
| 8864 | // promoted to v8i16, etc. |
| 8865 | MVT ElType = |
| 8866 | getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 8867 | unsigned NumElts = 2 * Op1VT.getVectorNumElements(); |
| 8868 | |
| 8869 | EVT ConcatVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
| 8870 | if (Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) { |
| 8871 | // Use MVETRUNC to truncate the combined NewV1::NewV2 into the smaller |
| 8872 | // ConcatVT. |
| 8873 | SDValue ConVec = |
| 8874 | DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT: ConcatVT, N1: NewV1, N2: NewV2); |
| 8875 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
| 8876 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8877 | } |
| 8878 | |
| 8879 | // Extract the vector elements from Op1 and Op2 one by one and truncate them |
| 8880 | // to be the right size for the destination. For example, if Op1 is v4i1 |
| 8881 | // then the promoted vector is v4i32. The result of concatenation gives a |
| 8882 | // v8i1, which when promoted is v8i16. That means each i32 element from Op1 |
| 8883 | // needs truncating to i16 and inserting in the result. |
| 8884 | auto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { |
| 8885 | EVT NewVT = NewV.getValueType(); |
| 8886 | EVT ConcatVT = ConVec.getValueType(); |
| 8887 | unsigned ExtScale = 1; |
| 8888 | if (NewVT == MVT::v2f64) { |
| 8889 | NewV = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: NewV); |
| 8890 | ExtScale = 2; |
| 8891 | } |
| 8892 | for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { |
| 8893 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV, |
| 8894 | N2: DAG.getIntPtrConstant(Val: i * ExtScale, DL: dl)); |
| 8895 | ConVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: ConcatVT, N1: ConVec, N2: Elt, |
| 8896 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 8897 | } |
| 8898 | return ConVec; |
| 8899 | }; |
| 8900 | unsigned j = 0; |
| 8901 | SDValue ConVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: ConcatVT); |
| 8902 | ConVec = ExtractInto(NewV1, ConVec, j); |
| 8903 | ConVec = ExtractInto(NewV2, ConVec, j); |
| 8904 | |
| 8905 | // Now return the result of comparing the subvector with zero, which will |
| 8906 | // generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 8907 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
| 8908 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8909 | }; |
| 8910 | |
| 8911 | // Concat each pair of subvectors and pack into the lower half of the array. |
| 8912 | SmallVector<SDValue> ConcatOps(Op->ops()); |
| 8913 | while (ConcatOps.size() > 1) { |
| 8914 | for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) { |
| 8915 | SDValue V1 = ConcatOps[I]; |
| 8916 | SDValue V2 = ConcatOps[I + 1]; |
| 8917 | ConcatOps[I / 2] = ConcatPair(V1, V2); |
| 8918 | } |
| 8919 | ConcatOps.resize(N: ConcatOps.size() / 2); |
| 8920 | } |
| 8921 | return ConcatOps[0]; |
| 8922 | } |
| 8923 | |
| 8924 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, |
| 8925 | const ARMSubtarget *ST) { |
| 8926 | EVT VT = Op->getValueType(ResNo: 0); |
| 8927 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 8928 | return LowerCONCAT_VECTORS_i1(Op, DAG, ST); |
| 8929 | |
| 8930 | // The only time a CONCAT_VECTORS operation can have legal types is when |
| 8931 | // two 64-bit vectors are concatenated to a 128-bit vector. |
| 8932 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && |
| 8933 | "unexpected CONCAT_VECTORS" ); |
| 8934 | SDLoc dl(Op); |
| 8935 | SDValue Val = DAG.getUNDEF(VT: MVT::v2f64); |
| 8936 | SDValue Op0 = Op.getOperand(i: 0); |
| 8937 | SDValue Op1 = Op.getOperand(i: 1); |
| 8938 | if (!Op0.isUndef()) |
| 8939 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
| 8940 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op0), |
| 8941 | N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 8942 | if (!Op1.isUndef()) |
| 8943 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
| 8944 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op1), |
| 8945 | N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
| 8946 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: Op.getValueType(), Operand: Val); |
| 8947 | } |
| 8948 | |
| 8949 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8950 | const ARMSubtarget *ST) { |
| 8951 | SDValue V1 = Op.getOperand(i: 0); |
| 8952 | SDValue V2 = Op.getOperand(i: 1); |
| 8953 | SDLoc dl(Op); |
| 8954 | EVT VT = Op.getValueType(); |
| 8955 | EVT Op1VT = V1.getValueType(); |
| 8956 | unsigned NumElts = VT.getVectorNumElements(); |
| 8957 | unsigned Index = V2->getAsZExtVal(); |
| 8958 | |
| 8959 | assert(VT.getScalarSizeInBits() == 1 && |
| 8960 | "Unexpected custom EXTRACT_SUBVECTOR lowering" ); |
| 8961 | assert(ST->hasMVEIntegerOps() && |
| 8962 | "EXTRACT_SUBVECTOR lowering only supported for MVE" ); |
| 8963 | |
| 8964 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
| 8965 | |
| 8966 | // We now have Op1 promoted to a vector of integers, where v8i1 gets |
| 8967 | // promoted to v8i16, etc. |
| 8968 | |
| 8969 | MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 8970 | |
| 8971 | if (NumElts == 2) { |
| 8972 | EVT SubVT = MVT::v4i32; |
| 8973 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
| 8974 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j += 2) { |
| 8975 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
| 8976 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
| 8977 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 8978 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 8979 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 8980 | N3: DAG.getConstant(Val: j + 1, DL: dl, VT: MVT::i32)); |
| 8981 | } |
| 8982 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: SubVec, |
| 8983 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 8984 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
| 8985 | } |
| 8986 | |
| 8987 | EVT SubVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
| 8988 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
| 8989 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { |
| 8990 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
| 8991 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
| 8992 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
| 8993 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
| 8994 | } |
| 8995 | |
| 8996 | // Now return the result of comparing the subvector with zero, |
| 8997 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 8998 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: SubVec, |
| 8999 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
| 9000 | } |
| 9001 | |
| 9002 | // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0). |
| 9003 | static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG, |
| 9004 | const ARMSubtarget *ST) { |
| 9005 | assert(ST->hasMVEIntegerOps() && "Expected MVE!" ); |
| 9006 | EVT VT = N->getValueType(ResNo: 0); |
| 9007 | assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && |
| 9008 | "Expected a vector i1 type!" ); |
| 9009 | SDValue Op = N->getOperand(Num: 0); |
| 9010 | EVT FromVT = Op.getValueType(); |
| 9011 | SDLoc DL(N); |
| 9012 | |
| 9013 | SDValue And = |
| 9014 | DAG.getNode(Opcode: ISD::AND, DL, VT: FromVT, N1: Op, N2: DAG.getConstant(Val: 1, DL, VT: FromVT)); |
| 9015 | return DAG.getNode(Opcode: ISD::SETCC, DL, VT, N1: And, N2: DAG.getConstant(Val: 0, DL, VT: FromVT), |
| 9016 | N3: DAG.getCondCode(Cond: ISD::SETNE)); |
| 9017 | } |
| 9018 | |
| 9019 | static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG, |
| 9020 | const ARMSubtarget *Subtarget) { |
| 9021 | if (!Subtarget->hasMVEIntegerOps()) |
| 9022 | return SDValue(); |
| 9023 | |
| 9024 | EVT ToVT = N->getValueType(ResNo: 0); |
| 9025 | if (ToVT.getScalarType() == MVT::i1) |
| 9026 | return LowerTruncatei1(N, DAG, ST: Subtarget); |
| 9027 | |
| 9028 | // MVE does not have a single instruction to perform the truncation of a v4i32 |
| 9029 | // into the lower half of a v8i16, in the same way that a NEON vmovn would. |
| 9030 | // Most of the instructions in MVE follow the 'Beats' system, where moving |
| 9031 | // values from different lanes is usually something that the instructions |
| 9032 | // avoid. |
| 9033 | // |
| 9034 | // Instead it has top/bottom instructions such as VMOVLT/B and VMOVNT/B, |
| 9035 | // which take a the top/bottom half of a larger lane and extend it (or do the |
| 9036 | // opposite, truncating into the top/bottom lane from a larger lane). Note |
| 9037 | // that because of the way we widen lanes, a v4i16 is really a v4i32 using the |
| 9038 | // bottom 16bits from each vector lane. This works really well with T/B |
| 9039 | // instructions, but that doesn't extend to v8i32->v8i16 where the lanes need |
| 9040 | // to move order. |
| 9041 | // |
| 9042 | // But truncates and sext/zext are always going to be fairly common from llvm. |
| 9043 | // We have several options for how to deal with them: |
| 9044 | // - Wherever possible combine them into an instruction that makes them |
| 9045 | // "free". This includes loads/stores, which can perform the trunc as part |
| 9046 | // of the memory operation. Or certain shuffles that can be turned into |
| 9047 | // VMOVN/VMOVL. |
| 9048 | // - Lane Interleaving to transform blocks surrounded by ext/trunc. So |
| 9049 | // trunc(mul(sext(a), sext(b))) may become |
| 9050 | // VMOVNT(VMUL(VMOVLB(a), VMOVLB(b)), VMUL(VMOVLT(a), VMOVLT(b))). (Which in |
| 9051 | // this case can use VMULL). This is performed in the |
| 9052 | // MVELaneInterleavingPass. |
| 9053 | // - Otherwise we have an option. By default we would expand the |
| 9054 | // zext/sext/trunc into a series of lane extract/inserts going via GPR |
| 9055 | // registers. One for each vector lane in the vector. This can obviously be |
| 9056 | // very expensive. |
| 9057 | // - The other option is to use the fact that loads/store can extend/truncate |
| 9058 | // to turn a trunc into two truncating stack stores and a stack reload. This |
| 9059 | // becomes 3 back-to-back memory operations, but at least that is less than |
| 9060 | // all the insert/extracts. |
| 9061 | // |
| 9062 | // In order to do the last, we convert certain trunc's into MVETRUNC, which |
| 9063 | // are either optimized where they can be, or eventually lowered into stack |
| 9064 | // stores/loads. This prevents us from splitting a v8i16 trunc into two stores |
| 9065 | // two early, where other instructions would be better, and stops us from |
| 9066 | // having to reconstruct multiple buildvector shuffles into loads/stores. |
| 9067 | if (ToVT != MVT::v8i16 && ToVT != MVT::v16i8) |
| 9068 | return SDValue(); |
| 9069 | EVT FromVT = N->getOperand(Num: 0).getValueType(); |
| 9070 | if (FromVT != MVT::v8i32 && FromVT != MVT::v16i16) |
| 9071 | return SDValue(); |
| 9072 | |
| 9073 | SDValue Lo, Hi; |
| 9074 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVectorOperand(N, OpNo: 0); |
| 9075 | SDLoc DL(N); |
| 9076 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT: ToVT, N1: Lo, N2: Hi); |
| 9077 | } |
| 9078 | |
| 9079 | static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG, |
| 9080 | const ARMSubtarget *Subtarget) { |
| 9081 | if (!Subtarget->hasMVEIntegerOps()) |
| 9082 | return SDValue(); |
| 9083 | |
| 9084 | // See LowerTruncate above for an explanation of MVEEXT/MVETRUNC. |
| 9085 | |
| 9086 | EVT ToVT = N->getValueType(ResNo: 0); |
| 9087 | if (ToVT != MVT::v16i32 && ToVT != MVT::v8i32 && ToVT != MVT::v16i16) |
| 9088 | return SDValue(); |
| 9089 | SDValue Op = N->getOperand(Num: 0); |
| 9090 | EVT FromVT = Op.getValueType(); |
| 9091 | if (FromVT != MVT::v8i16 && FromVT != MVT::v16i8) |
| 9092 | return SDValue(); |
| 9093 | |
| 9094 | SDLoc DL(N); |
| 9095 | EVT ExtVT = ToVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 9096 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) |
| 9097 | ExtVT = MVT::v8i16; |
| 9098 | |
| 9099 | unsigned Opcode = |
| 9100 | N->getOpcode() == ISD::SIGN_EXTEND ? ARMISD::MVESEXT : ARMISD::MVEZEXT; |
| 9101 | SDValue Ext = DAG.getNode(Opcode, DL, VTList: DAG.getVTList(VT1: ExtVT, VT2: ExtVT), N: Op); |
| 9102 | SDValue Ext1 = Ext.getValue(R: 1); |
| 9103 | |
| 9104 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) { |
| 9105 | Ext = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext); |
| 9106 | Ext1 = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext1); |
| 9107 | } |
| 9108 | |
| 9109 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, N1: Ext, N2: Ext1); |
| 9110 | } |
| 9111 | |
| 9112 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each |
| 9113 | /// element has been zero/sign-extended, depending on the isSigned parameter, |
| 9114 | /// from an integer type half its size. |
| 9115 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, |
| 9116 | bool isSigned) { |
| 9117 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. |
| 9118 | EVT VT = N->getValueType(ResNo: 0); |
| 9119 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { |
| 9120 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
| 9121 | if (BVN->getValueType(ResNo: 0) != MVT::v4i32 || |
| 9122 | BVN->getOpcode() != ISD::BUILD_VECTOR) |
| 9123 | return false; |
| 9124 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 9125 | unsigned HiElt = 1 - LoElt; |
| 9126 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt)); |
| 9127 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt)); |
| 9128 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt+2)); |
| 9129 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt+2)); |
| 9130 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) |
| 9131 | return false; |
| 9132 | if (isSigned) { |
| 9133 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && |
| 9134 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) |
| 9135 | return true; |
| 9136 | } else { |
| 9137 | if (Hi0->isZero() && Hi1->isZero()) |
| 9138 | return true; |
| 9139 | } |
| 9140 | return false; |
| 9141 | } |
| 9142 | |
| 9143 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
| 9144 | return false; |
| 9145 | |
| 9146 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 9147 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
| 9148 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Elt)) { |
| 9149 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 9150 | unsigned HalfSize = EltSize / 2; |
| 9151 | if (isSigned) { |
| 9152 | if (!isIntN(N: HalfSize, x: C->getSExtValue())) |
| 9153 | return false; |
| 9154 | } else { |
| 9155 | if (!isUIntN(N: HalfSize, x: C->getZExtValue())) |
| 9156 | return false; |
| 9157 | } |
| 9158 | continue; |
| 9159 | } |
| 9160 | return false; |
| 9161 | } |
| 9162 | |
| 9163 | return true; |
| 9164 | } |
| 9165 | |
| 9166 | /// isSignExtended - Check if a node is a vector value that is sign-extended |
| 9167 | /// or a constant BUILD_VECTOR with sign-extended elements. |
| 9168 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { |
| 9169 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) |
| 9170 | return true; |
| 9171 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: true)) |
| 9172 | return true; |
| 9173 | return false; |
| 9174 | } |
| 9175 | |
| 9176 | /// isZeroExtended - Check if a node is a vector value that is zero-extended (or |
| 9177 | /// any-extended) or a constant BUILD_VECTOR with zero-extended elements. |
| 9178 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { |
| 9179 | if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || |
| 9180 | ISD::isZEXTLoad(N)) |
| 9181 | return true; |
| 9182 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: false)) |
| 9183 | return true; |
| 9184 | return false; |
| 9185 | } |
| 9186 | |
| 9187 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { |
| 9188 | if (OrigVT.getSizeInBits() >= 64) |
| 9189 | return OrigVT; |
| 9190 | |
| 9191 | assert(OrigVT.isSimple() && "Expecting a simple value type" ); |
| 9192 | |
| 9193 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; |
| 9194 | switch (OrigSimpleTy) { |
| 9195 | default: llvm_unreachable("Unexpected Vector Type" ); |
| 9196 | case MVT::v2i8: |
| 9197 | case MVT::v2i16: |
| 9198 | return MVT::v2i32; |
| 9199 | case MVT::v4i8: |
| 9200 | return MVT::v4i16; |
| 9201 | } |
| 9202 | } |
| 9203 | |
| 9204 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total |
| 9205 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. |
| 9206 | /// We insert the required extension here to get the vector to fill a D register. |
| 9207 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, |
| 9208 | const EVT &OrigTy, |
| 9209 | const EVT &ExtTy, |
| 9210 | unsigned ExtOpcode) { |
| 9211 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. |
| 9212 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than |
| 9213 | // 64-bits we need to insert a new extension so that it will be 64-bits. |
| 9214 | assert(ExtTy.is128BitVector() && "Unexpected extension size" ); |
| 9215 | if (OrigTy.getSizeInBits() >= 64) |
| 9216 | return N; |
| 9217 | |
| 9218 | // Must extend size to at least 64 bits to be used as an operand for VMULL. |
| 9219 | EVT NewVT = getExtensionTo64Bits(OrigVT: OrigTy); |
| 9220 | |
| 9221 | return DAG.getNode(Opcode: ExtOpcode, DL: SDLoc(N), VT: NewVT, Operand: N); |
| 9222 | } |
| 9223 | |
| 9224 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that |
| 9225 | /// does not do any sign/zero extension. If the original vector is less |
| 9226 | /// than 64 bits, an appropriate extension will be added after the load to |
| 9227 | /// reach a total size of 64 bits. We have to add the extension separately |
| 9228 | /// because ARM does not have a sign/zero extending load for vectors. |
| 9229 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { |
| 9230 | EVT ExtendedTy = getExtensionTo64Bits(OrigVT: LD->getMemoryVT()); |
| 9231 | |
| 9232 | // The load already has the right type. |
| 9233 | if (ExtendedTy == LD->getMemoryVT()) |
| 9234 | return DAG.getLoad(VT: LD->getMemoryVT(), dl: SDLoc(LD), Chain: LD->getChain(), |
| 9235 | Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), Alignment: LD->getAlign(), |
| 9236 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 9237 | |
| 9238 | // We need to create a zextload/sextload. We cannot just create a load |
| 9239 | // followed by a zext/zext node because LowerMUL is also run during normal |
| 9240 | // operation legalization where we can't create illegal types. |
| 9241 | return DAG.getExtLoad(ExtType: LD->getExtensionType(), dl: SDLoc(LD), VT: ExtendedTy, |
| 9242 | Chain: LD->getChain(), Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), |
| 9243 | MemVT: LD->getMemoryVT(), Alignment: LD->getAlign(), |
| 9244 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 9245 | } |
| 9246 | |
| 9247 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, |
| 9248 | /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return |
| 9249 | /// the unextended value. The unextended vector should be 64 bits so that it can |
| 9250 | /// be used as an operand to a VMULL instruction. If the original vector size |
| 9251 | /// before extension is less than 64 bits we add a an extension to resize |
| 9252 | /// the vector to 64 bits. |
| 9253 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { |
| 9254 | if (N->getOpcode() == ISD::SIGN_EXTEND || |
| 9255 | N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) |
| 9256 | return AddRequiredExtensionForVMULL(N: N->getOperand(Num: 0), DAG, |
| 9257 | OrigTy: N->getOperand(Num: 0)->getValueType(ResNo: 0), |
| 9258 | ExtTy: N->getValueType(ResNo: 0), |
| 9259 | ExtOpcode: N->getOpcode()); |
| 9260 | |
| 9261 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 9262 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && |
| 9263 | "Expected extending load" ); |
| 9264 | |
| 9265 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); |
| 9266 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: newLoad.getValue(R: 1)); |
| 9267 | unsigned Opcode = ISD::isSEXTLoad(N: LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| 9268 | SDValue extLoad = |
| 9269 | DAG.getNode(Opcode, DL: SDLoc(newLoad), VT: LD->getValueType(ResNo: 0), Operand: newLoad); |
| 9270 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 0), To: extLoad); |
| 9271 | |
| 9272 | return newLoad; |
| 9273 | } |
| 9274 | |
| 9275 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will |
| 9276 | // have been legalized as a BITCAST from v4i32. |
| 9277 | if (N->getOpcode() == ISD::BITCAST) { |
| 9278 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
| 9279 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR && |
| 9280 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ); |
| 9281 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 9282 | return DAG.getBuildVector( |
| 9283 | VT: MVT::v2i32, DL: SDLoc(N), |
| 9284 | Ops: {BVN->getOperand(Num: LowElt), BVN->getOperand(Num: LowElt + 2)}); |
| 9285 | } |
| 9286 | // Construct a new BUILD_VECTOR with elements truncated to half the size. |
| 9287 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR" ); |
| 9288 | EVT VT = N->getValueType(ResNo: 0); |
| 9289 | unsigned EltSize = VT.getScalarSizeInBits() / 2; |
| 9290 | unsigned NumElts = VT.getVectorNumElements(); |
| 9291 | MVT TruncVT = MVT::getIntegerVT(BitWidth: EltSize); |
| 9292 | SmallVector<SDValue, 8> Ops; |
| 9293 | SDLoc dl(N); |
| 9294 | for (unsigned i = 0; i != NumElts; ++i) { |
| 9295 | const APInt &CInt = N->getConstantOperandAPInt(Num: i); |
| 9296 | // Element types smaller than 32 bits are not legal, so use i32 elements. |
| 9297 | // The values are implicitly truncated so sext vs. zext doesn't matter. |
| 9298 | Ops.push_back(Elt: DAG.getConstant(Val: CInt.zextOrTrunc(width: 32), DL: dl, VT: MVT::i32)); |
| 9299 | } |
| 9300 | return DAG.getBuildVector(VT: MVT::getVectorVT(VT: TruncVT, NumElements: NumElts), DL: dl, Ops); |
| 9301 | } |
| 9302 | |
| 9303 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { |
| 9304 | unsigned Opcode = N->getOpcode(); |
| 9305 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 9306 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 9307 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
| 9308 | return N0->hasOneUse() && N1->hasOneUse() && |
| 9309 | isSignExtended(N: N0, DAG) && isSignExtended(N: N1, DAG); |
| 9310 | } |
| 9311 | return false; |
| 9312 | } |
| 9313 | |
| 9314 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { |
| 9315 | unsigned Opcode = N->getOpcode(); |
| 9316 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 9317 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 9318 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
| 9319 | return N0->hasOneUse() && N1->hasOneUse() && |
| 9320 | isZeroExtended(N: N0, DAG) && isZeroExtended(N: N1, DAG); |
| 9321 | } |
| 9322 | return false; |
| 9323 | } |
| 9324 | |
| 9325 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { |
| 9326 | // Multiplications are only custom-lowered for 128-bit vectors so that |
| 9327 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. |
| 9328 | EVT VT = Op.getValueType(); |
| 9329 | assert(VT.is128BitVector() && VT.isInteger() && |
| 9330 | "unexpected type for custom-lowering ISD::MUL" ); |
| 9331 | SDNode *N0 = Op.getOperand(i: 0).getNode(); |
| 9332 | SDNode *N1 = Op.getOperand(i: 1).getNode(); |
| 9333 | unsigned NewOpc = 0; |
| 9334 | bool isMLA = false; |
| 9335 | bool isN0SExt = isSignExtended(N: N0, DAG); |
| 9336 | bool isN1SExt = isSignExtended(N: N1, DAG); |
| 9337 | if (isN0SExt && isN1SExt) |
| 9338 | NewOpc = ARMISD::VMULLs; |
| 9339 | else { |
| 9340 | bool isN0ZExt = isZeroExtended(N: N0, DAG); |
| 9341 | bool isN1ZExt = isZeroExtended(N: N1, DAG); |
| 9342 | if (isN0ZExt && isN1ZExt) |
| 9343 | NewOpc = ARMISD::VMULLu; |
| 9344 | else if (isN1SExt || isN1ZExt) { |
| 9345 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these |
| 9346 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) |
| 9347 | if (isN1SExt && isAddSubSExt(N: N0, DAG)) { |
| 9348 | NewOpc = ARMISD::VMULLs; |
| 9349 | isMLA = true; |
| 9350 | } else if (isN1ZExt && isAddSubZExt(N: N0, DAG)) { |
| 9351 | NewOpc = ARMISD::VMULLu; |
| 9352 | isMLA = true; |
| 9353 | } else if (isN0ZExt && isAddSubZExt(N: N1, DAG)) { |
| 9354 | std::swap(a&: N0, b&: N1); |
| 9355 | NewOpc = ARMISD::VMULLu; |
| 9356 | isMLA = true; |
| 9357 | } |
| 9358 | } |
| 9359 | |
| 9360 | if (!NewOpc) { |
| 9361 | if (VT == MVT::v2i64) |
| 9362 | // Fall through to expand this. It is not legal. |
| 9363 | return SDValue(); |
| 9364 | else |
| 9365 | // Other vector multiplications are legal. |
| 9366 | return Op; |
| 9367 | } |
| 9368 | } |
| 9369 | |
| 9370 | // Legalize to a VMULL instruction. |
| 9371 | SDLoc DL(Op); |
| 9372 | SDValue Op0; |
| 9373 | SDValue Op1 = SkipExtensionForVMULL(N: N1, DAG); |
| 9374 | if (!isMLA) { |
| 9375 | Op0 = SkipExtensionForVMULL(N: N0, DAG); |
| 9376 | assert(Op0.getValueType().is64BitVector() && |
| 9377 | Op1.getValueType().is64BitVector() && |
| 9378 | "unexpected types for extended operands to VMULL" ); |
| 9379 | return DAG.getNode(Opcode: NewOpc, DL, VT, N1: Op0, N2: Op1); |
| 9380 | } |
| 9381 | |
| 9382 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during |
| 9383 | // isel lowering to take advantage of no-stall back to back vmul + vmla. |
| 9384 | // vmull q0, d4, d6 |
| 9385 | // vmlal q0, d5, d6 |
| 9386 | // is faster than |
| 9387 | // vaddl q0, d4, d5 |
| 9388 | // vmovl q1, d6 |
| 9389 | // vmul q0, q0, q1 |
| 9390 | SDValue N00 = SkipExtensionForVMULL(N: N0->getOperand(Num: 0).getNode(), DAG); |
| 9391 | SDValue N01 = SkipExtensionForVMULL(N: N0->getOperand(Num: 1).getNode(), DAG); |
| 9392 | EVT Op1VT = Op1.getValueType(); |
| 9393 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT, |
| 9394 | N1: DAG.getNode(Opcode: NewOpc, DL, VT, |
| 9395 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N00), N2: Op1), |
| 9396 | N2: DAG.getNode(Opcode: NewOpc, DL, VT, |
| 9397 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N01), N2: Op1)); |
| 9398 | } |
| 9399 | |
| 9400 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, |
| 9401 | SelectionDAG &DAG) { |
| 9402 | // TODO: Should this propagate fast-math-flags? |
| 9403 | |
| 9404 | // Convert to float |
| 9405 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); |
| 9406 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); |
| 9407 | X = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9408 | Y = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: Y); |
| 9409 | X = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: X); |
| 9410 | Y = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: Y); |
| 9411 | // Get reciprocal estimate. |
| 9412 | // float4 recip = vrecpeq_f32(yf); |
| 9413 | Y = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9414 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9415 | N2: Y); |
| 9416 | // Because char has a smaller range than uchar, we can actually get away |
| 9417 | // without any newton steps. This requires that we use a weird bias |
| 9418 | // of 0xb000, however (again, this has been exhaustively tested). |
| 9419 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); |
| 9420 | X = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: X, N2: Y); |
| 9421 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9422 | Y = DAG.getConstant(Val: 0xb000, DL: dl, VT: MVT::v4i32); |
| 9423 | X = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: X, N2: Y); |
| 9424 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: X); |
| 9425 | // Convert back to short. |
| 9426 | X = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: X); |
| 9427 | X = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: X); |
| 9428 | return X; |
| 9429 | } |
| 9430 | |
| 9431 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, |
| 9432 | SelectionDAG &DAG) { |
| 9433 | // TODO: Should this propagate fast-math-flags? |
| 9434 | |
| 9435 | SDValue N2; |
| 9436 | // Convert to float. |
| 9437 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); |
| 9438 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); |
| 9439 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9440 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
| 9441 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9442 | N1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
| 9443 | |
| 9444 | // Use reciprocal estimate and one refinement step. |
| 9445 | // float4 recip = vrecpeq_f32(yf); |
| 9446 | // recip *= vrecpsq_f32(yf, recip); |
| 9447 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9448 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9449 | N2: N1); |
| 9450 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9451 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9452 | N2: N1, N3: N2); |
| 9453 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9454 | // Because short has a smaller range than ushort, we can actually get away |
| 9455 | // with only a single newton step. This requires that we use a weird bias |
| 9456 | // of 89, however (again, this has been exhaustively tested). |
| 9457 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); |
| 9458 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
| 9459 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9460 | N1 = DAG.getConstant(Val: 0x89, DL: dl, VT: MVT::v4i32); |
| 9461 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
| 9462 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9463 | // Convert back to integer and return. |
| 9464 | // return vmovn_s32(vcvt_s32_f32(result)); |
| 9465 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9466 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
| 9467 | return N0; |
| 9468 | } |
| 9469 | |
| 9470 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, |
| 9471 | const ARMSubtarget *ST) { |
| 9472 | EVT VT = Op.getValueType(); |
| 9473 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9474 | "unexpected type for custom-lowering ISD::SDIV" ); |
| 9475 | |
| 9476 | SDLoc dl(Op); |
| 9477 | SDValue N0 = Op.getOperand(i: 0); |
| 9478 | SDValue N1 = Op.getOperand(i: 1); |
| 9479 | SDValue N2, N3; |
| 9480 | |
| 9481 | if (VT == MVT::v8i8) { |
| 9482 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
| 9483 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
| 9484 | |
| 9485 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9486 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9487 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9488 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9489 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9490 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9491 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9492 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9493 | |
| 9494 | N0 = LowerSDIV_v4i8(X: N0, Y: N1, dl, DAG); // v4i16 |
| 9495 | N2 = LowerSDIV_v4i8(X: N2, Y: N3, dl, DAG); // v4i16 |
| 9496 | |
| 9497 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
| 9498 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
| 9499 | |
| 9500 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v8i8, Operand: N0); |
| 9501 | return N0; |
| 9502 | } |
| 9503 | return LowerSDIV_v4i16(N0, N1, dl, DAG); |
| 9504 | } |
| 9505 | |
| 9506 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, |
| 9507 | const ARMSubtarget *ST) { |
| 9508 | // TODO: Should this propagate fast-math-flags? |
| 9509 | EVT VT = Op.getValueType(); |
| 9510 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9511 | "unexpected type for custom-lowering ISD::UDIV" ); |
| 9512 | |
| 9513 | SDLoc dl(Op); |
| 9514 | SDValue N0 = Op.getOperand(i: 0); |
| 9515 | SDValue N1 = Op.getOperand(i: 1); |
| 9516 | SDValue N2, N3; |
| 9517 | |
| 9518 | if (VT == MVT::v8i8) { |
| 9519 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
| 9520 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
| 9521 | |
| 9522 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9523 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9524 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9525 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 9526 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
| 9527 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9528 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
| 9529 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
| 9530 | |
| 9531 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 |
| 9532 | N2 = LowerSDIV_v4i16(N0: N2, N1: N3, dl, DAG); // v4i16 |
| 9533 | |
| 9534 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
| 9535 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
| 9536 | |
| 9537 | N0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v8i8, |
| 9538 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vqmovnsu, DL: dl, |
| 9539 | VT: MVT::i32), |
| 9540 | N2: N0); |
| 9541 | return N0; |
| 9542 | } |
| 9543 | |
| 9544 | // v4i16 sdiv ... Convert to float. |
| 9545 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); |
| 9546 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); |
| 9547 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9548 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
| 9549 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9550 | SDValue BN1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
| 9551 | |
| 9552 | // Use reciprocal estimate and two refinement steps. |
| 9553 | // float4 recip = vrecpeq_f32(yf); |
| 9554 | // recip *= vrecpsq_f32(yf, recip); |
| 9555 | // recip *= vrecpsq_f32(yf, recip); |
| 9556 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9557 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
| 9558 | N2: BN1); |
| 9559 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9560 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9561 | N2: BN1, N3: N2); |
| 9562 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9563 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
| 9564 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
| 9565 | N2: BN1, N3: N2); |
| 9566 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
| 9567 | // Simply multiplying by the reciprocal estimate can leave us a few ulps |
| 9568 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, |
| 9569 | // and that it will never cause us to return an answer too large). |
| 9570 | // float4 result = as_float4(as_int4(xf*recip) + 2); |
| 9571 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
| 9572 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9573 | N1 = DAG.getConstant(Val: 2, DL: dl, VT: MVT::v4i32); |
| 9574 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
| 9575 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
| 9576 | // Convert back to integer and return. |
| 9577 | // return vmovn_u32(vcvt_s32_f32(result)); |
| 9578 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
| 9579 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
| 9580 | return N0; |
| 9581 | } |
| 9582 | |
| 9583 | static SDValue LowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) { |
| 9584 | SDNode *N = Op.getNode(); |
| 9585 | EVT VT = N->getValueType(ResNo: 0); |
| 9586 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 9587 | |
| 9588 | SDValue Carry = Op.getOperand(i: 2); |
| 9589 | |
| 9590 | SDLoc DL(Op); |
| 9591 | |
| 9592 | SDValue Result; |
| 9593 | if (Op.getOpcode() == ISD::UADDO_CARRY) { |
| 9594 | // This converts the boolean value carry into the carry flag. |
| 9595 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 9596 | |
| 9597 | // Do the addition proper using the carry flag we wanted. |
| 9598 | Result = DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
| 9599 | N2: Op.getOperand(i: 1), N3: Carry); |
| 9600 | |
| 9601 | // Now convert the carry flag into a boolean value. |
| 9602 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
| 9603 | } else { |
| 9604 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
| 9605 | // have to invert the carry first. |
| 9606 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 9607 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 9608 | // This converts the boolean value carry into the carry flag. |
| 9609 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
| 9610 | |
| 9611 | // Do the subtraction proper using the carry flag we wanted. |
| 9612 | Result = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
| 9613 | N2: Op.getOperand(i: 1), N3: Carry); |
| 9614 | |
| 9615 | // Now convert the carry flag into a boolean value. |
| 9616 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
| 9617 | // But the carry returned by ARMISD::SUBE is not a borrow as expected |
| 9618 | // by ISD::USUBO_CARRY, so compute 1 - C. |
| 9619 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
| 9620 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
| 9621 | } |
| 9622 | |
| 9623 | // Return both values. |
| 9624 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL, VTList: N->getVTList(), N1: Result, N2: Carry); |
| 9625 | } |
| 9626 | |
| 9627 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, |
| 9628 | bool Signed, |
| 9629 | SDValue &Chain) const { |
| 9630 | EVT VT = Op.getValueType(); |
| 9631 | assert((VT == MVT::i32 || VT == MVT::i64) && |
| 9632 | "unexpected type for custom lowering DIV" ); |
| 9633 | SDLoc dl(Op); |
| 9634 | |
| 9635 | const auto &DL = DAG.getDataLayout(); |
| 9636 | RTLIB::Libcall LC; |
| 9637 | if (Signed) |
| 9638 | LC = VT == MVT::i32 ? RTLIB::SDIVREM_I32 : RTLIB::SDIVREM_I64; |
| 9639 | else |
| 9640 | LC = VT == MVT::i32 ? RTLIB::UDIVREM_I32 : RTLIB::UDIVREM_I64; |
| 9641 | |
| 9642 | RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(Call: LC); |
| 9643 | SDValue ES = DAG.getExternalSymbol(LCImpl, VT: getPointerTy(DL)); |
| 9644 | |
| 9645 | ARMTargetLowering::ArgListTy Args; |
| 9646 | |
| 9647 | for (auto AI : {1, 0}) { |
| 9648 | SDValue Operand = Op.getOperand(i: AI); |
| 9649 | Args.emplace_back(args&: Operand, |
| 9650 | args: Operand.getValueType().getTypeForEVT(Context&: *DAG.getContext())); |
| 9651 | } |
| 9652 | |
| 9653 | CallLoweringInfo CLI(DAG); |
| 9654 | CLI.setDebugLoc(dl).setChain(Chain).setCallee( |
| 9655 | CC: DAG.getLibcalls().getLibcallImplCallingConv(Call: LCImpl), |
| 9656 | ResultType: VT.getTypeForEVT(Context&: *DAG.getContext()), Target: ES, ArgsList: std::move(Args)); |
| 9657 | |
| 9658 | return LowerCallTo(CLI).first; |
| 9659 | } |
| 9660 | |
| 9661 | // This is a code size optimisation: return the original SDIV node to |
| 9662 | // DAGCombiner when we don't want to expand SDIV into a sequence of |
| 9663 | // instructions, and an empty node otherwise which will cause the |
| 9664 | // SDIV to be expanded in DAGCombine. |
| 9665 | SDValue |
| 9666 | ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, |
| 9667 | SelectionDAG &DAG, |
| 9668 | SmallVectorImpl<SDNode *> &Created) const { |
| 9669 | // TODO: Support SREM |
| 9670 | if (N->getOpcode() != ISD::SDIV) |
| 9671 | return SDValue(); |
| 9672 | |
| 9673 | const auto &ST = DAG.getSubtarget<ARMSubtarget>(); |
| 9674 | const bool MinSize = ST.hasMinSize(); |
| 9675 | const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() |
| 9676 | : ST.hasDivideInARMMode(); |
| 9677 | |
| 9678 | // Don't touch vector types; rewriting this may lead to scalarizing |
| 9679 | // the int divs. |
| 9680 | if (N->getOperand(Num: 0).getValueType().isVector()) |
| 9681 | return SDValue(); |
| 9682 | |
| 9683 | // Bail if MinSize is not set, and also for both ARM and Thumb mode we need |
| 9684 | // hwdiv support for this to be really profitable. |
| 9685 | if (!(MinSize && HasDivide)) |
| 9686 | return SDValue(); |
| 9687 | |
| 9688 | // ARM mode is a bit simpler than Thumb: we can handle large power |
| 9689 | // of 2 immediates with 1 mov instruction; no further checks required, |
| 9690 | // just return the sdiv node. |
| 9691 | if (!ST.isThumb()) |
| 9692 | return SDValue(N, 0); |
| 9693 | |
| 9694 | // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, |
| 9695 | // and thus lose the code size benefits of a MOVS that requires only 2. |
| 9696 | // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, |
| 9697 | // but as it's doing exactly this, it's not worth the trouble to get TTI. |
| 9698 | if (Divisor.sgt(RHS: 128)) |
| 9699 | return SDValue(); |
| 9700 | |
| 9701 | return SDValue(N, 0); |
| 9702 | } |
| 9703 | |
| 9704 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, |
| 9705 | bool Signed) const { |
| 9706 | assert(Op.getValueType() == MVT::i32 && |
| 9707 | "unexpected type for custom lowering DIV" ); |
| 9708 | SDLoc dl(Op); |
| 9709 | |
| 9710 | SDValue DBZCHK = DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL: dl, VT: MVT::Other, |
| 9711 | N1: DAG.getEntryNode(), N2: Op.getOperand(i: 1)); |
| 9712 | |
| 9713 | return LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
| 9714 | } |
| 9715 | |
| 9716 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { |
| 9717 | SDLoc DL(N); |
| 9718 | SDValue Op = N->getOperand(Num: 1); |
| 9719 | if (N->getValueType(ResNo: 0) == MVT::i32) |
| 9720 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, N2: Op); |
| 9721 | SDValue Lo, Hi; |
| 9722 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
| 9723 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, |
| 9724 | N2: DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: Lo, N2: Hi)); |
| 9725 | } |
| 9726 | |
| 9727 | void ARMTargetLowering::ExpandDIV_Windows( |
| 9728 | SDValue Op, SelectionDAG &DAG, bool Signed, |
| 9729 | SmallVectorImpl<SDValue> &Results) const { |
| 9730 | const auto &DL = DAG.getDataLayout(); |
| 9731 | |
| 9732 | assert(Op.getValueType() == MVT::i64 && |
| 9733 | "unexpected type for custom lowering DIV" ); |
| 9734 | SDLoc dl(Op); |
| 9735 | |
| 9736 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain: DAG.getEntryNode()); |
| 9737 | |
| 9738 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
| 9739 | |
| 9740 | SDValue Lower = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Result); |
| 9741 | SDValue Upper = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i64, N1: Result, |
| 9742 | N2: DAG.getConstant(Val: 32, DL: dl, VT: getPointerTy(DL))); |
| 9743 | Upper = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Upper); |
| 9744 | |
| 9745 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lower, N2: Upper)); |
| 9746 | } |
| 9747 | |
| 9748 | std::pair<SDValue, SDValue> |
| 9749 | ARMTargetLowering::LowerAEABIUnalignedLoad(SDValue Op, |
| 9750 | SelectionDAG &DAG) const { |
| 9751 | // If we have an unaligned load from a i32 or i64 that would normally be |
| 9752 | // split into separate ldrb's, we can use the __aeabi_uread4/__aeabi_uread8 |
| 9753 | // functions instead. |
| 9754 | LoadSDNode *LD = cast<LoadSDNode>(Val: Op.getNode()); |
| 9755 | EVT MemVT = LD->getMemoryVT(); |
| 9756 | if (MemVT != MVT::i32 && MemVT != MVT::i64) |
| 9757 | return std::make_pair(x: SDValue(), y: SDValue()); |
| 9758 | |
| 9759 | const auto &MF = DAG.getMachineFunction(); |
| 9760 | unsigned AS = LD->getAddressSpace(); |
| 9761 | Align Alignment = LD->getAlign(); |
| 9762 | const DataLayout &DL = DAG.getDataLayout(); |
| 9763 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 9764 | |
| 9765 | if (MF.getFunction().hasMinSize() && !AllowsUnaligned && |
| 9766 | Alignment <= llvm::Align(2)) { |
| 9767 | |
| 9768 | RTLIB::Libcall LC = |
| 9769 | (MemVT == MVT::i32) ? RTLIB::AEABI_UREAD4 : RTLIB::AEABI_UREAD8; |
| 9770 | |
| 9771 | MakeLibCallOptions Opts; |
| 9772 | SDLoc dl(Op); |
| 9773 | |
| 9774 | auto Pair = makeLibCall(DAG, LC, RetVT: MemVT.getSimpleVT(), Ops: LD->getBasePtr(), |
| 9775 | CallOptions: Opts, dl, Chain: LD->getChain()); |
| 9776 | |
| 9777 | // If necessary, extend the node to 64bit |
| 9778 | if (LD->getExtensionType() != ISD::NON_EXTLOAD) { |
| 9779 | unsigned ExtType = LD->getExtensionType() == ISD::SEXTLOAD |
| 9780 | ? ISD::SIGN_EXTEND |
| 9781 | : ISD::ZERO_EXTEND; |
| 9782 | SDValue EN = DAG.getNode(Opcode: ExtType, DL: dl, VT: LD->getValueType(ResNo: 0), Operand: Pair.first); |
| 9783 | Pair.first = EN; |
| 9784 | } |
| 9785 | return Pair; |
| 9786 | } |
| 9787 | |
| 9788 | // Default expand to individual loads |
| 9789 | if (!allowsMemoryAccess(Context&: *DAG.getContext(), DL, VT: MemVT, AddrSpace: AS, Alignment)) |
| 9790 | return expandUnalignedLoad(LD, DAG); |
| 9791 | return std::make_pair(x: SDValue(), y: SDValue()); |
| 9792 | } |
| 9793 | |
| 9794 | SDValue ARMTargetLowering::LowerAEABIUnalignedStore(SDValue Op, |
| 9795 | SelectionDAG &DAG) const { |
| 9796 | // If we have an unaligned store to a i32 or i64 that would normally be |
| 9797 | // split into separate ldrb's, we can use the __aeabi_uwrite4/__aeabi_uwrite8 |
| 9798 | // functions instead. |
| 9799 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 9800 | EVT MemVT = ST->getMemoryVT(); |
| 9801 | if (MemVT != MVT::i32 && MemVT != MVT::i64) |
| 9802 | return SDValue(); |
| 9803 | |
| 9804 | const auto &MF = DAG.getMachineFunction(); |
| 9805 | unsigned AS = ST->getAddressSpace(); |
| 9806 | Align Alignment = ST->getAlign(); |
| 9807 | const DataLayout &DL = DAG.getDataLayout(); |
| 9808 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 9809 | |
| 9810 | if (MF.getFunction().hasMinSize() && !AllowsUnaligned && |
| 9811 | Alignment <= llvm::Align(2)) { |
| 9812 | |
| 9813 | SDLoc dl(Op); |
| 9814 | |
| 9815 | // If necessary, trunc the value to 32bit |
| 9816 | SDValue StoreVal = ST->getOperand(Num: 1); |
| 9817 | if (ST->isTruncatingStore()) |
| 9818 | StoreVal = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MemVT, Operand: ST->getOperand(Num: 1)); |
| 9819 | |
| 9820 | RTLIB::Libcall LC = |
| 9821 | (MemVT == MVT::i32) ? RTLIB::AEABI_UWRITE4 : RTLIB::AEABI_UWRITE8; |
| 9822 | |
| 9823 | MakeLibCallOptions Opts; |
| 9824 | auto CallResult = |
| 9825 | makeLibCall(DAG, LC, RetVT: MVT::isVoid, Ops: {StoreVal, ST->getBasePtr()}, CallOptions: Opts, |
| 9826 | dl, Chain: ST->getChain()); |
| 9827 | |
| 9828 | return CallResult.second; |
| 9829 | } |
| 9830 | |
| 9831 | // Default expand to individual stores |
| 9832 | if (!allowsMemoryAccess(Context&: *DAG.getContext(), DL, VT: MemVT, AddrSpace: AS, Alignment)) |
| 9833 | return expandUnalignedStore(ST, DAG); |
| 9834 | return SDValue(); |
| 9835 | } |
| 9836 | |
| 9837 | static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { |
| 9838 | LoadSDNode *LD = cast<LoadSDNode>(Val: Op.getNode()); |
| 9839 | EVT MemVT = LD->getMemoryVT(); |
| 9840 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 9841 | MemVT == MVT::v16i1) && |
| 9842 | "Expected a predicate type!" ); |
| 9843 | assert(MemVT == Op.getValueType()); |
| 9844 | assert(LD->getExtensionType() == ISD::NON_EXTLOAD && |
| 9845 | "Expected a non-extending load" ); |
| 9846 | assert(LD->isUnindexed() && "Expected a unindexed load" ); |
| 9847 | |
| 9848 | // The basic MVE VLDR on a v2i1/v4i1/v8i1 actually loads the entire 16bit |
| 9849 | // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We |
| 9850 | // need to make sure that 8/4/2 bits are actually loaded into the correct |
| 9851 | // place, which means loading the value and then shuffling the values into |
| 9852 | // the bottom bits of the predicate. |
| 9853 | // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect |
| 9854 | // for BE). |
| 9855 | // Speaking of BE, apparently the rest of llvm will assume a reverse order to |
| 9856 | // a natural VMSR(load), so needs to be reversed. |
| 9857 | |
| 9858 | SDLoc dl(Op); |
| 9859 | SDValue Load = DAG.getExtLoad( |
| 9860 | ExtType: ISD::EXTLOAD, dl, VT: MVT::i32, Chain: LD->getChain(), Ptr: LD->getBasePtr(), |
| 9861 | MemVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
| 9862 | MMO: LD->getMemOperand()); |
| 9863 | SDValue Val = Load; |
| 9864 | if (DAG.getDataLayout().isBigEndian()) |
| 9865 | Val = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
| 9866 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: Load), |
| 9867 | N2: DAG.getConstant(Val: 32 - MemVT.getSizeInBits(), DL: dl, VT: MVT::i32)); |
| 9868 | SDValue Pred = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Val); |
| 9869 | if (MemVT != MVT::v16i1) |
| 9870 | Pred = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MemVT, N1: Pred, |
| 9871 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 9872 | return DAG.getMergeValues(Ops: {Pred, Load.getValue(R: 1)}, dl); |
| 9873 | } |
| 9874 | |
| 9875 | void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 9876 | SelectionDAG &DAG) const { |
| 9877 | LoadSDNode *LD = cast<LoadSDNode>(Val: N); |
| 9878 | EVT MemVT = LD->getMemoryVT(); |
| 9879 | |
| 9880 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 9881 | !Subtarget->isThumb1Only() && LD->isVolatile() && |
| 9882 | LD->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
| 9883 | assert(LD->isUnindexed() && "Loads should be unindexed at this point." ); |
| 9884 | SDLoc dl(N); |
| 9885 | SDValue Result = DAG.getMemIntrinsicNode( |
| 9886 | Opcode: ARMISD::LDRD, dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32, MVT::Other}), |
| 9887 | Ops: {LD->getChain(), LD->getBasePtr()}, MemVT, MMO: LD->getMemOperand()); |
| 9888 | SDValue Lo = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 0 : 1); |
| 9889 | SDValue Hi = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 1 : 0); |
| 9890 | SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
| 9891 | Results.append(IL: {Pair, Result.getValue(R: 2)}); |
| 9892 | } else if (MemVT == MVT::i32 || MemVT == MVT::i64) { |
| 9893 | auto Pair = LowerAEABIUnalignedLoad(Op: SDValue(N, 0), DAG); |
| 9894 | if (Pair.first) { |
| 9895 | Results.push_back(Elt: Pair.first); |
| 9896 | Results.push_back(Elt: Pair.second); |
| 9897 | } |
| 9898 | } |
| 9899 | } |
| 9900 | |
| 9901 | static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { |
| 9902 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 9903 | EVT MemVT = ST->getMemoryVT(); |
| 9904 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 9905 | MemVT == MVT::v16i1) && |
| 9906 | "Expected a predicate type!" ); |
| 9907 | assert(MemVT == ST->getValue().getValueType()); |
| 9908 | assert(!ST->isTruncatingStore() && "Expected a non-extending store" ); |
| 9909 | assert(ST->isUnindexed() && "Expected a unindexed store" ); |
| 9910 | |
| 9911 | // Only store the v2i1 or v4i1 or v8i1 worth of bits, via a buildvector with |
| 9912 | // top bits unset and a scalar store. |
| 9913 | SDLoc dl(Op); |
| 9914 | SDValue Build = ST->getValue(); |
| 9915 | if (MemVT != MVT::v16i1) { |
| 9916 | SmallVector<SDValue, 16> Ops; |
| 9917 | for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) { |
| 9918 | unsigned Elt = DAG.getDataLayout().isBigEndian() |
| 9919 | ? MemVT.getVectorNumElements() - I - 1 |
| 9920 | : I; |
| 9921 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: Build, |
| 9922 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32))); |
| 9923 | } |
| 9924 | for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) |
| 9925 | Ops.push_back(Elt: DAG.getUNDEF(VT: MVT::i32)); |
| 9926 | Build = DAG.getNode(Opcode: ISD::BUILD_VECTOR, DL: dl, VT: MVT::v16i1, Ops); |
| 9927 | } |
| 9928 | SDValue GRP = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Build); |
| 9929 | if (MemVT == MVT::v16i1 && DAG.getDataLayout().isBigEndian()) |
| 9930 | GRP = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
| 9931 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: GRP), |
| 9932 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
| 9933 | return DAG.getTruncStore( |
| 9934 | Chain: ST->getChain(), dl, Val: GRP, Ptr: ST->getBasePtr(), |
| 9935 | SVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
| 9936 | MMO: ST->getMemOperand()); |
| 9937 | } |
| 9938 | |
| 9939 | SDValue ARMTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG, |
| 9940 | const ARMSubtarget *Subtarget) const { |
| 9941 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
| 9942 | EVT MemVT = ST->getMemoryVT(); |
| 9943 | |
| 9944 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 9945 | !Subtarget->isThumb1Only() && ST->isVolatile() && |
| 9946 | ST->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
| 9947 | assert(ST->isUnindexed() && "Stores should be unindexed at this point." ); |
| 9948 | SDNode *N = Op.getNode(); |
| 9949 | SDLoc dl(N); |
| 9950 | |
| 9951 | SDValue Lo = DAG.getNode( |
| 9952 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
| 9953 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 0 : 1, DL: dl, |
| 9954 | VT: MVT::i32)); |
| 9955 | SDValue Hi = DAG.getNode( |
| 9956 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
| 9957 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 1 : 0, DL: dl, |
| 9958 | VT: MVT::i32)); |
| 9959 | |
| 9960 | return DAG.getMemIntrinsicNode(Opcode: ARMISD::STRD, dl, VTList: DAG.getVTList(VT: MVT::Other), |
| 9961 | Ops: {ST->getChain(), Lo, Hi, ST->getBasePtr()}, |
| 9962 | MemVT, MMO: ST->getMemOperand()); |
| 9963 | } else if (Subtarget->hasMVEIntegerOps() && |
| 9964 | ((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 9965 | MemVT == MVT::v16i1))) { |
| 9966 | return LowerPredicateStore(Op, DAG); |
| 9967 | } else if (MemVT == MVT::i32 || MemVT == MVT::i64) { |
| 9968 | return LowerAEABIUnalignedStore(Op, DAG); |
| 9969 | } |
| 9970 | return SDValue(); |
| 9971 | } |
| 9972 | |
| 9973 | static bool isZeroVector(SDValue N) { |
| 9974 | return (ISD::isBuildVectorAllZeros(N: N.getNode()) || |
| 9975 | (N->getOpcode() == ARMISD::VMOVIMM && |
| 9976 | isNullConstant(V: N->getOperand(Num: 0)))); |
| 9977 | } |
| 9978 | |
| 9979 | static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { |
| 9980 | MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Val: Op.getNode()); |
| 9981 | MVT VT = Op.getSimpleValueType(); |
| 9982 | SDValue Mask = N->getMask(); |
| 9983 | SDValue PassThru = N->getPassThru(); |
| 9984 | SDLoc dl(Op); |
| 9985 | |
| 9986 | if (isZeroVector(N: PassThru)) |
| 9987 | return Op; |
| 9988 | |
| 9989 | // MVE Masked loads use zero as the passthru value. Here we convert undef to |
| 9990 | // zero too, and other values are lowered to a select. |
| 9991 | SDValue ZeroVec = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
| 9992 | Operand: DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 9993 | SDValue NewLoad = DAG.getMaskedLoad( |
| 9994 | VT, dl, Chain: N->getChain(), Base: N->getBasePtr(), Offset: N->getOffset(), Mask, Src0: ZeroVec, |
| 9995 | MemVT: N->getMemoryVT(), MMO: N->getMemOperand(), AM: N->getAddressingMode(), |
| 9996 | N->getExtensionType(), IsExpanding: N->isExpandingLoad()); |
| 9997 | SDValue Combo = NewLoad; |
| 9998 | bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || |
| 9999 | PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
| 10000 | isZeroVector(N: PassThru->getOperand(Num: 0)); |
| 10001 | if (!PassThru.isUndef() && !PassThruIsCastZero) |
| 10002 | Combo = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT, N1: Mask, N2: NewLoad, N3: PassThru); |
| 10003 | return DAG.getMergeValues(Ops: {Combo, NewLoad.getValue(R: 1)}, dl); |
| 10004 | } |
| 10005 | |
| 10006 | static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, |
| 10007 | const ARMSubtarget *ST) { |
| 10008 | if (!ST->hasMVEIntegerOps()) |
| 10009 | return SDValue(); |
| 10010 | |
| 10011 | SDLoc dl(Op); |
| 10012 | unsigned BaseOpcode = 0; |
| 10013 | switch (Op->getOpcode()) { |
| 10014 | default: llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10015 | case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; |
| 10016 | case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; |
| 10017 | case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; |
| 10018 | case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; |
| 10019 | case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; |
| 10020 | case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; |
| 10021 | case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; |
| 10022 | case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; |
| 10023 | } |
| 10024 | |
| 10025 | SDValue Op0 = Op->getOperand(Num: 0); |
| 10026 | EVT VT = Op0.getValueType(); |
| 10027 | EVT EltVT = VT.getVectorElementType(); |
| 10028 | unsigned NumElts = VT.getVectorNumElements(); |
| 10029 | unsigned NumActiveLanes = NumElts; |
| 10030 | |
| 10031 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
| 10032 | NumActiveLanes == 2) && |
| 10033 | "Only expected a power 2 vector size" ); |
| 10034 | |
| 10035 | // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements |
| 10036 | // allows us to easily extract vector elements from the lanes. |
| 10037 | while (NumActiveLanes > 4) { |
| 10038 | unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; |
| 10039 | SDValue Rev = DAG.getNode(Opcode: RevOpcode, DL: dl, VT, Operand: Op0); |
| 10040 | Op0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT, N1: Op0, N2: Rev); |
| 10041 | NumActiveLanes /= 2; |
| 10042 | } |
| 10043 | |
| 10044 | SDValue Res; |
| 10045 | if (NumActiveLanes == 4) { |
| 10046 | // The remaining 4 elements are summed sequentially |
| 10047 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10048 | N2: DAG.getConstant(Val: 0 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10049 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10050 | N2: DAG.getConstant(Val: 1 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10051 | SDValue Ext2 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10052 | N2: DAG.getConstant(Val: 2 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10053 | SDValue Ext3 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10054 | N2: DAG.getConstant(Val: 3 * NumElts / 4, DL: dl, VT: MVT::i32)); |
| 10055 | SDValue Res0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
| 10056 | SDValue Res1 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext2, N2: Ext3, Flags: Op->getFlags()); |
| 10057 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Res0, N2: Res1, Flags: Op->getFlags()); |
| 10058 | } else { |
| 10059 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10060 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10061 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10062 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
| 10063 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
| 10064 | } |
| 10065 | |
| 10066 | // Result type may be wider than element type. |
| 10067 | if (EltVT != Op->getValueType(ResNo: 0)) |
| 10068 | Res = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: Op->getValueType(ResNo: 0), Operand: Res); |
| 10069 | return Res; |
| 10070 | } |
| 10071 | |
| 10072 | static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, |
| 10073 | const ARMSubtarget *ST) { |
| 10074 | if (!ST->hasMVEFloatOps()) |
| 10075 | return SDValue(); |
| 10076 | return LowerVecReduce(Op, DAG, ST); |
| 10077 | } |
| 10078 | |
| 10079 | static SDValue LowerVecReduceMinMax(SDValue Op, SelectionDAG &DAG, |
| 10080 | const ARMSubtarget *ST) { |
| 10081 | if (!ST->hasNEON()) |
| 10082 | return SDValue(); |
| 10083 | |
| 10084 | SDLoc dl(Op); |
| 10085 | SDValue Op0 = Op->getOperand(Num: 0); |
| 10086 | EVT VT = Op0.getValueType(); |
| 10087 | EVT EltVT = VT.getVectorElementType(); |
| 10088 | |
| 10089 | unsigned PairwiseIntrinsic = 0; |
| 10090 | switch (Op->getOpcode()) { |
| 10091 | default: |
| 10092 | llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10093 | case ISD::VECREDUCE_UMIN: |
| 10094 | PairwiseIntrinsic = Intrinsic::arm_neon_vpminu; |
| 10095 | break; |
| 10096 | case ISD::VECREDUCE_UMAX: |
| 10097 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxu; |
| 10098 | break; |
| 10099 | case ISD::VECREDUCE_SMIN: |
| 10100 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmins; |
| 10101 | break; |
| 10102 | case ISD::VECREDUCE_SMAX: |
| 10103 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxs; |
| 10104 | break; |
| 10105 | } |
| 10106 | SDValue PairwiseOp = DAG.getConstant(Val: PairwiseIntrinsic, DL: dl, VT: MVT::i32); |
| 10107 | |
| 10108 | unsigned NumElts = VT.getVectorNumElements(); |
| 10109 | unsigned NumActiveLanes = NumElts; |
| 10110 | |
| 10111 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
| 10112 | NumActiveLanes == 2) && |
| 10113 | "Only expected a power 2 vector size" ); |
| 10114 | |
| 10115 | // Split 128-bit vectors, since vpmin/max takes 2 64-bit vectors. |
| 10116 | if (VT.is128BitVector()) { |
| 10117 | SDValue Lo, Hi; |
| 10118 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVector(N: Op0, DL: dl); |
| 10119 | VT = Lo.getValueType(); |
| 10120 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Lo, Hi}); |
| 10121 | NumActiveLanes /= 2; |
| 10122 | } |
| 10123 | |
| 10124 | // Use pairwise reductions until one lane remains |
| 10125 | while (NumActiveLanes > 1) { |
| 10126 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Op0, Op0}); |
| 10127 | NumActiveLanes /= 2; |
| 10128 | } |
| 10129 | |
| 10130 | SDValue Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
| 10131 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
| 10132 | |
| 10133 | // Result type may be wider than element type. |
| 10134 | if (EltVT != Op.getValueType()) { |
| 10135 | unsigned Extend = 0; |
| 10136 | switch (Op->getOpcode()) { |
| 10137 | default: |
| 10138 | llvm_unreachable("Expected VECREDUCE opcode" ); |
| 10139 | case ISD::VECREDUCE_UMIN: |
| 10140 | case ISD::VECREDUCE_UMAX: |
| 10141 | Extend = ISD::ZERO_EXTEND; |
| 10142 | break; |
| 10143 | case ISD::VECREDUCE_SMIN: |
| 10144 | case ISD::VECREDUCE_SMAX: |
| 10145 | Extend = ISD::SIGN_EXTEND; |
| 10146 | break; |
| 10147 | } |
| 10148 | Res = DAG.getNode(Opcode: Extend, DL: dl, VT: Op.getValueType(), Operand: Res); |
| 10149 | } |
| 10150 | return Res; |
| 10151 | } |
| 10152 | |
| 10153 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { |
| 10154 | if (isStrongerThanMonotonic(AO: cast<AtomicSDNode>(Val&: Op)->getSuccessOrdering())) |
| 10155 | // Acquire/Release load/store is not legal for targets without a dmb or |
| 10156 | // equivalent available. |
| 10157 | return SDValue(); |
| 10158 | |
| 10159 | // Monotonic load/store is legal for all targets. |
| 10160 | return Op; |
| 10161 | } |
| 10162 | |
| 10163 | static void ReplaceREADCYCLECOUNTER(SDNode *N, |
| 10164 | SmallVectorImpl<SDValue> &Results, |
| 10165 | SelectionDAG &DAG, |
| 10166 | const ARMSubtarget *Subtarget) { |
| 10167 | SDLoc DL(N); |
| 10168 | // Under Power Management extensions, the cycle-count is: |
| 10169 | // mrc p15, #0, <Rt>, c9, c13, #0 |
| 10170 | SDValue Ops[] = { N->getOperand(Num: 0), // Chain |
| 10171 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
| 10172 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
| 10173 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
| 10174 | DAG.getTargetConstant(Val: 9, DL, VT: MVT::i32), |
| 10175 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
| 10176 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32) |
| 10177 | }; |
| 10178 | |
| 10179 | SDValue Cycles32 = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
| 10180 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
| 10181 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Cycles32, |
| 10182 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32))); |
| 10183 | Results.push_back(Elt: Cycles32.getValue(R: 1)); |
| 10184 | } |
| 10185 | |
| 10186 | static SDValue createGPRPairNode2xi32(SelectionDAG &DAG, SDValue V0, |
| 10187 | SDValue V1) { |
| 10188 | SDLoc dl(V0.getNode()); |
| 10189 | SDValue RegClass = |
| 10190 | DAG.getTargetConstant(Val: ARM::GPRPairRegClassID, DL: dl, VT: MVT::i32); |
| 10191 | SDValue SubReg0 = DAG.getTargetConstant(Val: ARM::gsub_0, DL: dl, VT: MVT::i32); |
| 10192 | SDValue SubReg1 = DAG.getTargetConstant(Val: ARM::gsub_1, DL: dl, VT: MVT::i32); |
| 10193 | const SDValue Ops[] = {RegClass, V0, SubReg0, V1, SubReg1}; |
| 10194 | return SDValue( |
| 10195 | DAG.getMachineNode(Opcode: TargetOpcode::REG_SEQUENCE, dl, VT: MVT::Untyped, Ops), 0); |
| 10196 | } |
| 10197 | |
| 10198 | static SDValue createGPRPairNodei64(SelectionDAG &DAG, SDValue V) { |
| 10199 | SDLoc dl(V.getNode()); |
| 10200 | auto [VLo, VHi] = DAG.SplitScalar(N: V, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 10201 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 10202 | if (isBigEndian) |
| 10203 | std::swap(a&: VLo, b&: VHi); |
| 10204 | return createGPRPairNode2xi32(DAG, V0: VLo, V1: VHi); |
| 10205 | } |
| 10206 | |
| 10207 | static void ReplaceCMP_SWAP_64Results(SDNode *N, |
| 10208 | SmallVectorImpl<SDValue> &Results, |
| 10209 | SelectionDAG &DAG) { |
| 10210 | assert(N->getValueType(0) == MVT::i64 && |
| 10211 | "AtomicCmpSwap on types less than 64 should be legal" ); |
| 10212 | SDValue Ops[] = { |
| 10213 | createGPRPairNode2xi32(DAG, V0: N->getOperand(Num: 1), |
| 10214 | V1: DAG.getUNDEF(VT: MVT::i32)), // pointer, temp |
| 10215 | createGPRPairNodei64(DAG, V: N->getOperand(Num: 2)), // expected |
| 10216 | createGPRPairNodei64(DAG, V: N->getOperand(Num: 3)), // new |
| 10217 | N->getOperand(Num: 0), // chain in |
| 10218 | }; |
| 10219 | SDNode *CmpSwap = DAG.getMachineNode( |
| 10220 | Opcode: ARM::CMP_SWAP_64, dl: SDLoc(N), |
| 10221 | VTs: DAG.getVTList(VT1: MVT::Untyped, VT2: MVT::Untyped, VT3: MVT::Other), Ops); |
| 10222 | |
| 10223 | MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand(); |
| 10224 | DAG.setNodeMemRefs(N: cast<MachineSDNode>(Val: CmpSwap), NewMemRefs: {MemOp}); |
| 10225 | |
| 10226 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 10227 | |
| 10228 | SDValue Lo = |
| 10229 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_1 : ARM::gsub_0, |
| 10230 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
| 10231 | SDValue Hi = |
| 10232 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_0 : ARM::gsub_1, |
| 10233 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
| 10234 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: MVT::i64, N1: Lo, N2: Hi)); |
| 10235 | Results.push_back(Elt: SDValue(CmpSwap, 2)); |
| 10236 | } |
| 10237 | |
| 10238 | SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { |
| 10239 | SDLoc dl(Op); |
| 10240 | EVT VT = Op.getValueType(); |
| 10241 | SDValue Chain = Op.getOperand(i: 0); |
| 10242 | SDValue LHS = Op.getOperand(i: 1); |
| 10243 | SDValue RHS = Op.getOperand(i: 2); |
| 10244 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 3))->get(); |
| 10245 | bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; |
| 10246 | |
| 10247 | // If we don't have instructions of this float type then soften to a libcall |
| 10248 | // and use SETCC instead. |
| 10249 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
| 10250 | softenSetCCOperands(DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS, |
| 10251 | Chain, IsSignaling); |
| 10252 | if (!RHS.getNode()) { |
| 10253 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
| 10254 | CC = ISD::SETNE; |
| 10255 | } |
| 10256 | SDValue Result = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT, N1: LHS, N2: RHS, |
| 10257 | N3: DAG.getCondCode(Cond: CC)); |
| 10258 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
| 10259 | } |
| 10260 | |
| 10261 | ARMCC::CondCodes CondCode, CondCode2; |
| 10262 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 10263 | |
| 10264 | SDValue True = DAG.getConstant(Val: 1, DL: dl, VT); |
| 10265 | SDValue False = DAG.getConstant(Val: 0, DL: dl, VT); |
| 10266 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
| 10267 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, Signaling: IsSignaling); |
| 10268 | SDValue Result = getCMOV(dl, VT, FalseVal: False, TrueVal: True, ARMcc, Flags: Cmp, DAG); |
| 10269 | if (CondCode2 != ARMCC::AL) { |
| 10270 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
| 10271 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal: True, ARMcc, Flags: Cmp, DAG); |
| 10272 | } |
| 10273 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
| 10274 | } |
| 10275 | |
| 10276 | SDValue ARMTargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const { |
| 10277 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 10278 | |
| 10279 | EVT VT = getPointerTy(DL: DAG.getDataLayout()); |
| 10280 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: 0, IsImmutable: false); |
| 10281 | return DAG.getFrameIndex(FI, VT); |
| 10282 | } |
| 10283 | |
| 10284 | SDValue ARMTargetLowering::LowerFP_TO_BF16(SDValue Op, |
| 10285 | SelectionDAG &DAG) const { |
| 10286 | SDLoc DL(Op); |
| 10287 | MakeLibCallOptions CallOptions; |
| 10288 | MVT SVT = Op.getOperand(i: 0).getSimpleValueType(); |
| 10289 | RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: SVT, RetVT: MVT::bf16); |
| 10290 | SDValue Res = |
| 10291 | makeLibCall(DAG, LC, RetVT: MVT::f32, Ops: Op.getOperand(i: 0), CallOptions, dl: DL).first; |
| 10292 | return DAG.getBitcast(VT: MVT::i32, V: Res); |
| 10293 | } |
| 10294 | |
| 10295 | SDValue ARMTargetLowering::LowerCMP(SDValue Op, SelectionDAG &DAG) const { |
| 10296 | SDLoc dl(Op); |
| 10297 | SDValue LHS = Op.getOperand(i: 0); |
| 10298 | SDValue RHS = Op.getOperand(i: 1); |
| 10299 | |
| 10300 | // Determine if this is signed or unsigned comparison |
| 10301 | bool IsSigned = (Op.getOpcode() == ISD::SCMP); |
| 10302 | |
| 10303 | // Special case for Thumb1 UCMP only |
| 10304 | if (!IsSigned && Subtarget->isThumb1Only()) { |
| 10305 | // For Thumb unsigned comparison, use this sequence: |
| 10306 | // subs r2, r0, r1 ; r2 = LHS - RHS, sets flags |
| 10307 | // sbc r2, r2 ; r2 = r2 - r2 - !carry |
| 10308 | // cmp r1, r0 ; compare RHS with LHS |
| 10309 | // sbc r1, r1 ; r1 = r1 - r1 - !carry |
| 10310 | // subs r0, r2, r1 ; r0 = r2 - r1 (final result) |
| 10311 | |
| 10312 | // First subtraction: LHS - RHS |
| 10313 | SDValue Sub1WithFlags = DAG.getNode( |
| 10314 | Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N1: LHS, N2: RHS); |
| 10315 | SDValue Sub1Result = Sub1WithFlags.getValue(R: 0); |
| 10316 | SDValue Flags1 = Sub1WithFlags.getValue(R: 1); |
| 10317 | |
| 10318 | // SUBE: Sub1Result - Sub1Result - !carry |
| 10319 | // This gives 0 if LHS >= RHS (unsigned), -1 if LHS < RHS (unsigned) |
| 10320 | SDValue Sbc1 = |
| 10321 | DAG.getNode(Opcode: ARMISD::SUBE, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), |
| 10322 | N1: Sub1Result, N2: Sub1Result, N3: Flags1); |
| 10323 | SDValue Sbc1Result = Sbc1.getValue(R: 0); |
| 10324 | |
| 10325 | // Second comparison: RHS vs LHS (reverse comparison) |
| 10326 | SDValue CmpFlags = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: FlagsVT, N1: RHS, N2: LHS); |
| 10327 | |
| 10328 | // SUBE: RHS - RHS - !carry |
| 10329 | // This gives 0 if RHS <= LHS (unsigned), -1 if RHS > LHS (unsigned) |
| 10330 | SDValue Sbc2 = DAG.getNode( |
| 10331 | Opcode: ARMISD::SUBE, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N1: RHS, N2: RHS, N3: CmpFlags); |
| 10332 | SDValue Sbc2Result = Sbc2.getValue(R: 0); |
| 10333 | |
| 10334 | // Final subtraction: Sbc1Result - Sbc2Result (no flags needed) |
| 10335 | SDValue Result = |
| 10336 | DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: Sbc1Result, N2: Sbc2Result); |
| 10337 | if (Op.getValueType() != MVT::i32) |
| 10338 | Result = DAG.getSExtOrTrunc(Op: Result, DL: dl, VT: Op.getValueType()); |
| 10339 | |
| 10340 | return Result; |
| 10341 | } |
| 10342 | |
| 10343 | // For the ARM assembly pattern: |
| 10344 | // subs r0, r0, r1 ; subtract RHS from LHS and set flags |
| 10345 | // movgt r0, #1 ; if LHS > RHS, set result to 1 (GT for signed, HI for |
| 10346 | // unsigned) mvnlt r0, #0 ; if LHS < RHS, set result to -1 (LT for |
| 10347 | // signed, LO for unsigned) |
| 10348 | // ; if LHS == RHS, result remains 0 from the subs |
| 10349 | |
| 10350 | // Optimization: if RHS is a subtraction against 0, use ADDC instead of SUBC |
| 10351 | unsigned Opcode = ARMISD::SUBC; |
| 10352 | |
| 10353 | // Check if RHS is a subtraction against 0: (0 - X) |
| 10354 | if (RHS.getOpcode() == ISD::SUB) { |
| 10355 | SDValue SubLHS = RHS.getOperand(i: 0); |
| 10356 | SDValue SubRHS = RHS.getOperand(i: 1); |
| 10357 | |
| 10358 | // Check if it's 0 - X |
| 10359 | if (isNullConstant(V: SubLHS)) { |
| 10360 | bool CanUseAdd = false; |
| 10361 | if (IsSigned) { |
| 10362 | // For SCMP: only if X is known to never be INT_MIN (to avoid overflow) |
| 10363 | if (RHS->getFlags().hasNoSignedWrap() || !DAG.computeKnownBits(Op: SubRHS) |
| 10364 | .getSignedMinValue() |
| 10365 | .isMinSignedValue()) { |
| 10366 | CanUseAdd = true; |
| 10367 | } |
| 10368 | } else { |
| 10369 | // For UCMP: only if X is known to never be zero |
| 10370 | if (DAG.isKnownNeverZero(Op: SubRHS)) { |
| 10371 | CanUseAdd = true; |
| 10372 | } |
| 10373 | } |
| 10374 | |
| 10375 | if (CanUseAdd) { |
| 10376 | Opcode = ARMISD::ADDC; |
| 10377 | RHS = SubRHS; // Replace RHS with X, so we do LHS + X instead of |
| 10378 | // LHS - (0 - X) |
| 10379 | } |
| 10380 | } |
| 10381 | } |
| 10382 | |
| 10383 | // Generate the operation with flags |
| 10384 | SDValue OpWithFlags = |
| 10385 | DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: FlagsVT), N1: LHS, N2: RHS); |
| 10386 | |
| 10387 | SDValue OpResult = OpWithFlags.getValue(R: 0); |
| 10388 | SDValue Flags = OpWithFlags.getValue(R: 1); |
| 10389 | |
| 10390 | // Constants for conditional moves |
| 10391 | SDValue One = DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32); |
| 10392 | SDValue MinusOne = DAG.getAllOnesConstant(DL: dl, VT: MVT::i32); |
| 10393 | |
| 10394 | // Select condition codes based on signed vs unsigned |
| 10395 | ARMCC::CondCodes GTCond = IsSigned ? ARMCC::GT : ARMCC::HI; |
| 10396 | ARMCC::CondCodes LTCond = IsSigned ? ARMCC::LT : ARMCC::LO; |
| 10397 | |
| 10398 | // First conditional move: if greater than, set to 1 |
| 10399 | SDValue GTCondValue = DAG.getConstant(Val: GTCond, DL: dl, VT: MVT::i32); |
| 10400 | SDValue Result1 = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: OpResult, N2: One, |
| 10401 | N3: GTCondValue, N4: Flags); |
| 10402 | |
| 10403 | // Second conditional move: if less than, set to -1 |
| 10404 | SDValue LTCondValue = DAG.getConstant(Val: LTCond, DL: dl, VT: MVT::i32); |
| 10405 | SDValue Result2 = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: Result1, N2: MinusOne, |
| 10406 | N3: LTCondValue, N4: Flags); |
| 10407 | |
| 10408 | if (Op.getValueType() != MVT::i32) |
| 10409 | Result2 = DAG.getSExtOrTrunc(Op: Result2, DL: dl, VT: Op.getValueType()); |
| 10410 | |
| 10411 | return Result2; |
| 10412 | } |
| 10413 | |
| 10414 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 10415 | LLVM_DEBUG(dbgs() << "Lowering node: " ; Op.dump()); |
| 10416 | switch (Op.getOpcode()) { |
| 10417 | default: llvm_unreachable("Don't know how to custom lower this!" ); |
| 10418 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); |
| 10419 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
| 10420 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
| 10421 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
| 10422 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
| 10423 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
| 10424 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
| 10425 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
| 10426 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); |
| 10427 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); |
| 10428 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
| 10429 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); |
| 10430 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); |
| 10431 | case ISD::SINT_TO_FP: |
| 10432 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); |
| 10433 | case ISD::STRICT_FP_TO_SINT: |
| 10434 | case ISD::STRICT_FP_TO_UINT: |
| 10435 | case ISD::FP_TO_SINT: |
| 10436 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); |
| 10437 | case ISD::FP_TO_SINT_SAT: |
| 10438 | case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG, Subtarget); |
| 10439 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); |
| 10440 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
| 10441 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
| 10442 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); |
| 10443 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); |
| 10444 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); |
| 10445 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); |
| 10446 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, |
| 10447 | Subtarget); |
| 10448 | case ISD::BITCAST: return ExpandBITCAST(N: Op.getNode(), DAG, Subtarget); |
| 10449 | case ISD::SHL: |
| 10450 | case ISD::SRL: |
| 10451 | case ISD::SRA: return LowerShift(N: Op.getNode(), DAG, ST: Subtarget); |
| 10452 | case ISD::SREM: return LowerREM(N: Op.getNode(), DAG); |
| 10453 | case ISD::UREM: return LowerREM(N: Op.getNode(), DAG); |
| 10454 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); |
| 10455 | case ISD::SRL_PARTS: |
| 10456 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); |
| 10457 | case ISD::CTTZ: |
| 10458 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(N: Op.getNode(), DAG, ST: Subtarget); |
| 10459 | case ISD::CTPOP: return LowerCTPOP(N: Op.getNode(), DAG, ST: Subtarget); |
| 10460 | case ISD::SETCC: return LowerVSETCC(Op, DAG, ST: Subtarget); |
| 10461 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); |
| 10462 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, ST: Subtarget); |
| 10463 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, ST: Subtarget); |
| 10464 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, ST: Subtarget); |
| 10465 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, ST: Subtarget); |
| 10466 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
| 10467 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, ST: Subtarget); |
| 10468 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, ST: Subtarget); |
| 10469 | case ISD::TRUNCATE: return LowerTruncate(N: Op.getNode(), DAG, Subtarget); |
| 10470 | case ISD::SIGN_EXTEND: |
| 10471 | case ISD::ZERO_EXTEND: return LowerVectorExtend(N: Op.getNode(), DAG, Subtarget); |
| 10472 | case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG); |
| 10473 | case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG); |
| 10474 | case ISD::SET_FPMODE: |
| 10475 | return LowerSET_FPMODE(Op, DAG); |
| 10476 | case ISD::RESET_FPMODE: |
| 10477 | return LowerRESET_FPMODE(Op, DAG); |
| 10478 | case ISD::MUL: return LowerMUL(Op, DAG); |
| 10479 | case ISD::SDIV: |
| 10480 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 10481 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); |
| 10482 | return LowerSDIV(Op, DAG, ST: Subtarget); |
| 10483 | case ISD::UDIV: |
| 10484 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 10485 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); |
| 10486 | return LowerUDIV(Op, DAG, ST: Subtarget); |
| 10487 | case ISD::UADDO_CARRY: |
| 10488 | case ISD::USUBO_CARRY: |
| 10489 | return LowerUADDSUBO_CARRY(Op, DAG); |
| 10490 | case ISD::SADDO: |
| 10491 | case ISD::SSUBO: |
| 10492 | return LowerSignedALUO(Op, DAG); |
| 10493 | case ISD::UADDO: |
| 10494 | case ISD::USUBO: |
| 10495 | return LowerUnsignedALUO(Op, DAG); |
| 10496 | case ISD::SADDSAT: |
| 10497 | case ISD::SSUBSAT: |
| 10498 | case ISD::UADDSAT: |
| 10499 | case ISD::USUBSAT: |
| 10500 | return LowerADDSUBSAT(Op, DAG, Subtarget); |
| 10501 | case ISD::LOAD: { |
| 10502 | auto *LD = cast<LoadSDNode>(Val&: Op); |
| 10503 | EVT MemVT = LD->getMemoryVT(); |
| 10504 | if (Subtarget->hasMVEIntegerOps() && |
| 10505 | (MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 10506 | MemVT == MVT::v16i1)) |
| 10507 | return LowerPredicateLoad(Op, DAG); |
| 10508 | |
| 10509 | auto Pair = LowerAEABIUnalignedLoad(Op, DAG); |
| 10510 | if (Pair.first) |
| 10511 | return DAG.getMergeValues(Ops: {Pair.first, Pair.second}, dl: SDLoc(Pair.first)); |
| 10512 | return SDValue(); |
| 10513 | } |
| 10514 | case ISD::STORE: |
| 10515 | return LowerSTORE(Op, DAG, Subtarget); |
| 10516 | case ISD::MLOAD: |
| 10517 | return LowerMLOAD(Op, DAG); |
| 10518 | case ISD::VECREDUCE_MUL: |
| 10519 | case ISD::VECREDUCE_AND: |
| 10520 | case ISD::VECREDUCE_OR: |
| 10521 | case ISD::VECREDUCE_XOR: |
| 10522 | return LowerVecReduce(Op, DAG, ST: Subtarget); |
| 10523 | case ISD::VECREDUCE_FADD: |
| 10524 | case ISD::VECREDUCE_FMUL: |
| 10525 | case ISD::VECREDUCE_FMIN: |
| 10526 | case ISD::VECREDUCE_FMAX: |
| 10527 | return LowerVecReduceF(Op, DAG, ST: Subtarget); |
| 10528 | case ISD::VECREDUCE_UMIN: |
| 10529 | case ISD::VECREDUCE_UMAX: |
| 10530 | case ISD::VECREDUCE_SMIN: |
| 10531 | case ISD::VECREDUCE_SMAX: |
| 10532 | return LowerVecReduceMinMax(Op, DAG, ST: Subtarget); |
| 10533 | case ISD::ATOMIC_LOAD: |
| 10534 | case ISD::ATOMIC_STORE: |
| 10535 | return LowerAtomicLoadStore(Op, DAG); |
| 10536 | case ISD::SDIVREM: |
| 10537 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); |
| 10538 | case ISD::DYNAMIC_STACKALLOC: |
| 10539 | if (Subtarget->isTargetWindows()) |
| 10540 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 10541 | llvm_unreachable("Don't know how to custom lower this!" ); |
| 10542 | case ISD::STRICT_FP_ROUND: |
| 10543 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); |
| 10544 | case ISD::STRICT_FP_EXTEND: |
| 10545 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); |
| 10546 | case ISD::STRICT_FSETCC: |
| 10547 | case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); |
| 10548 | case ISD::SPONENTRY: |
| 10549 | return LowerSPONENTRY(Op, DAG); |
| 10550 | case ISD::FP_TO_BF16: |
| 10551 | return LowerFP_TO_BF16(Op, DAG); |
| 10552 | case ARMISD::WIN__DBZCHK: return SDValue(); |
| 10553 | case ISD::UCMP: |
| 10554 | case ISD::SCMP: |
| 10555 | return LowerCMP(Op, DAG); |
| 10556 | case ISD::ABS: |
| 10557 | return LowerABS(Op, DAG); |
| 10558 | case ISD::STRICT_LROUND: |
| 10559 | case ISD::STRICT_LLROUND: |
| 10560 | case ISD::STRICT_LRINT: |
| 10561 | case ISD::STRICT_LLRINT: { |
| 10562 | assert((Op.getOperand(1).getValueType() == MVT::f16 || |
| 10563 | Op.getOperand(1).getValueType() == MVT::bf16) && |
| 10564 | "Expected custom lowering of rounding operations only for f16" ); |
| 10565 | SDLoc DL(Op); |
| 10566 | SDValue Ext = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL, ResultTys: {MVT::f32, MVT::Other}, |
| 10567 | Ops: {Op.getOperand(i: 0), Op.getOperand(i: 1)}); |
| 10568 | return DAG.getNode(Opcode: Op.getOpcode(), DL, ResultTys: {Op.getValueType(), MVT::Other}, |
| 10569 | Ops: {Ext.getValue(R: 1), Ext.getValue(R: 0)}); |
| 10570 | } |
| 10571 | } |
| 10572 | } |
| 10573 | |
| 10574 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 10575 | SelectionDAG &DAG) { |
| 10576 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
| 10577 | unsigned Opc = 0; |
| 10578 | if (IntNo == Intrinsic::arm_smlald) |
| 10579 | Opc = ARMISD::SMLALD; |
| 10580 | else if (IntNo == Intrinsic::arm_smlaldx) |
| 10581 | Opc = ARMISD::SMLALDX; |
| 10582 | else if (IntNo == Intrinsic::arm_smlsld) |
| 10583 | Opc = ARMISD::SMLSLD; |
| 10584 | else if (IntNo == Intrinsic::arm_smlsldx) |
| 10585 | Opc = ARMISD::SMLSLDX; |
| 10586 | else |
| 10587 | return; |
| 10588 | |
| 10589 | SDLoc dl(N); |
| 10590 | SDValue Lo, Hi; |
| 10591 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 3), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 10592 | |
| 10593 | SDValue LongMul = DAG.getNode(Opcode: Opc, DL: dl, |
| 10594 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 10595 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
| 10596 | N3: Lo, N4: Hi); |
| 10597 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
| 10598 | N1: LongMul.getValue(R: 0), N2: LongMul.getValue(R: 1))); |
| 10599 | } |
| 10600 | |
| 10601 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 10602 | /// type with new values built out of custom code. |
| 10603 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, |
| 10604 | SmallVectorImpl<SDValue> &Results, |
| 10605 | SelectionDAG &DAG) const { |
| 10606 | SDValue Res; |
| 10607 | switch (N->getOpcode()) { |
| 10608 | default: |
| 10609 | llvm_unreachable("Don't know how to custom expand this!" ); |
| 10610 | case ISD::READ_REGISTER: |
| 10611 | ExpandREAD_REGISTER(N, Results, DAG); |
| 10612 | break; |
| 10613 | case ISD::BITCAST: |
| 10614 | Res = ExpandBITCAST(N, DAG, Subtarget); |
| 10615 | break; |
| 10616 | case ISD::SRL: |
| 10617 | case ISD::SRA: |
| 10618 | case ISD::SHL: |
| 10619 | Res = Expand64BitShift(N, DAG, ST: Subtarget); |
| 10620 | break; |
| 10621 | case ISD::SREM: |
| 10622 | case ISD::UREM: |
| 10623 | Res = LowerREM(N, DAG); |
| 10624 | break; |
| 10625 | case ISD::SDIVREM: |
| 10626 | case ISD::UDIVREM: |
| 10627 | Res = LowerDivRem(Op: SDValue(N, 0), DAG); |
| 10628 | assert(Res.getNumOperands() == 2 && "DivRem needs two values" ); |
| 10629 | Results.push_back(Elt: Res.getValue(R: 0)); |
| 10630 | Results.push_back(Elt: Res.getValue(R: 1)); |
| 10631 | return; |
| 10632 | case ISD::SADDSAT: |
| 10633 | case ISD::SSUBSAT: |
| 10634 | case ISD::UADDSAT: |
| 10635 | case ISD::USUBSAT: |
| 10636 | Res = LowerADDSUBSAT(Op: SDValue(N, 0), DAG, Subtarget); |
| 10637 | break; |
| 10638 | case ISD::READCYCLECOUNTER: |
| 10639 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); |
| 10640 | return; |
| 10641 | case ISD::UDIV: |
| 10642 | case ISD::SDIV: |
| 10643 | assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows" ); |
| 10644 | return ExpandDIV_Windows(Op: SDValue(N, 0), DAG, Signed: N->getOpcode() == ISD::SDIV, |
| 10645 | Results); |
| 10646 | case ISD::ATOMIC_CMP_SWAP: |
| 10647 | ReplaceCMP_SWAP_64Results(N, Results, DAG); |
| 10648 | return; |
| 10649 | case ISD::INTRINSIC_WO_CHAIN: |
| 10650 | return ReplaceLongIntrinsic(N, Results, DAG); |
| 10651 | case ISD::LOAD: |
| 10652 | LowerLOAD(N, Results, DAG); |
| 10653 | break; |
| 10654 | case ISD::STORE: |
| 10655 | Res = LowerAEABIUnalignedStore(Op: SDValue(N, 0), DAG); |
| 10656 | break; |
| 10657 | case ISD::TRUNCATE: |
| 10658 | Res = LowerTruncate(N, DAG, Subtarget); |
| 10659 | break; |
| 10660 | case ISD::SIGN_EXTEND: |
| 10661 | case ISD::ZERO_EXTEND: |
| 10662 | Res = LowerVectorExtend(N, DAG, Subtarget); |
| 10663 | break; |
| 10664 | case ISD::FP_TO_SINT_SAT: |
| 10665 | case ISD::FP_TO_UINT_SAT: |
| 10666 | Res = LowerFP_TO_INT_SAT(Op: SDValue(N, 0), DAG, Subtarget); |
| 10667 | break; |
| 10668 | } |
| 10669 | if (Res.getNode()) |
| 10670 | Results.push_back(Elt: Res); |
| 10671 | } |
| 10672 | |
| 10673 | //===----------------------------------------------------------------------===// |
| 10674 | // ARM Scheduler Hooks |
| 10675 | //===----------------------------------------------------------------------===// |
| 10676 | |
| 10677 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and |
| 10678 | /// registers the function context. |
| 10679 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, |
| 10680 | MachineBasicBlock *MBB, |
| 10681 | MachineBasicBlock *DispatchBB, |
| 10682 | int FI) const { |
| 10683 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 10684 | "ROPI/RWPI not currently supported with SjLj" ); |
| 10685 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10686 | DebugLoc dl = MI.getDebugLoc(); |
| 10687 | MachineFunction *MF = MBB->getParent(); |
| 10688 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 10689 | MachineConstantPool *MCP = MF->getConstantPool(); |
| 10690 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); |
| 10691 | const Function &F = MF->getFunction(); |
| 10692 | |
| 10693 | bool isThumb = Subtarget->isThumb(); |
| 10694 | bool isThumb2 = Subtarget->isThumb2(); |
| 10695 | |
| 10696 | unsigned PCLabelId = AFI->createPICLabelUId(); |
| 10697 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; |
| 10698 | ARMConstantPoolValue *CPV = |
| 10699 | ARMConstantPoolMBB::Create(C&: F.getContext(), mbb: DispatchBB, ID: PCLabelId, PCAdj); |
| 10700 | unsigned CPI = MCP->getConstantPoolIndex(V: CPV, Alignment: Align(4)); |
| 10701 | |
| 10702 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass |
| 10703 | : &ARM::GPRRegClass; |
| 10704 | |
| 10705 | // Grab constant pool and fixed stack memory operands. |
| 10706 | MachineMemOperand *CPMMO = |
| 10707 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
| 10708 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 10709 | |
| 10710 | MachineMemOperand *FIMMOSt = |
| 10711 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
| 10712 | F: MachineMemOperand::MOStore, Size: 4, BaseAlignment: Align(4)); |
| 10713 | |
| 10714 | // Load the address of the dispatch MBB into the jump buffer. |
| 10715 | if (isThumb2) { |
| 10716 | // Incoming value: jbuf |
| 10717 | // ldr.n r5, LCPI1_1 |
| 10718 | // orr r5, r5, #1 |
| 10719 | // add r5, pc |
| 10720 | // str r5, [$jbuf, #+4] ; &jbuf[1] |
| 10721 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10722 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRpci), DestReg: NewVReg1) |
| 10723 | .addConstantPoolIndex(Idx: CPI) |
| 10724 | .addMemOperand(MMO: CPMMO) |
| 10725 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10726 | // Set the low bit because of thumb mode. |
| 10727 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10728 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ORRri), DestReg: NewVReg2) |
| 10729 | .addReg(RegNo: NewVReg1, Flags: RegState::Kill) |
| 10730 | .addImm(Val: 0x01) |
| 10731 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 10732 | .add(MO: condCodeOp()); |
| 10733 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10734 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg3) |
| 10735 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 10736 | .addImm(Val: PCLabelId); |
| 10737 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2STRi12)) |
| 10738 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 10739 | .addFrameIndex(Idx: FI) |
| 10740 | .addImm(Val: 36) // &jbuf[1] :: pc |
| 10741 | .addMemOperand(MMO: FIMMOSt) |
| 10742 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10743 | } else if (isThumb) { |
| 10744 | // Incoming value: jbuf |
| 10745 | // ldr.n r1, LCPI1_4 |
| 10746 | // add r1, pc |
| 10747 | // mov r2, #1 |
| 10748 | // orrs r1, r2 |
| 10749 | // add r2, $jbuf, #+4 ; &jbuf[1] |
| 10750 | // str r1, [r2] |
| 10751 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10752 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci), DestReg: NewVReg1) |
| 10753 | .addConstantPoolIndex(Idx: CPI) |
| 10754 | .addMemOperand(MMO: CPMMO) |
| 10755 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10756 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10757 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg2) |
| 10758 | .addReg(RegNo: NewVReg1, Flags: RegState::Kill) |
| 10759 | .addImm(Val: PCLabelId); |
| 10760 | // Set the low bit because of thumb mode. |
| 10761 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10762 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi8), DestReg: NewVReg3) |
| 10763 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 10764 | .addImm(Val: 1) |
| 10765 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10766 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 10767 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tORR), DestReg: NewVReg4) |
| 10768 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 10769 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 10770 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 10771 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10772 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 10773 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDframe), DestReg: NewVReg5) |
| 10774 | .addFrameIndex(Idx: FI) |
| 10775 | .addImm(Val: 36); // &jbuf[1] :: pc |
| 10776 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tSTRi)) |
| 10777 | .addReg(RegNo: NewVReg4, Flags: RegState::Kill) |
| 10778 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 10779 | .addImm(Val: 0) |
| 10780 | .addMemOperand(MMO: FIMMOSt) |
| 10781 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10782 | } else { |
| 10783 | // Incoming value: jbuf |
| 10784 | // ldr r1, LCPI1_1 |
| 10785 | // add r1, pc, r1 |
| 10786 | // str r1, [$jbuf, #+4] ; &jbuf[1] |
| 10787 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10788 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
| 10789 | .addConstantPoolIndex(Idx: CPI) |
| 10790 | .addImm(Val: 0) |
| 10791 | .addMemOperand(MMO: CPMMO) |
| 10792 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10793 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10794 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::PICADD), DestReg: NewVReg2) |
| 10795 | .addReg(RegNo: NewVReg1, Flags: RegState::Kill) |
| 10796 | .addImm(Val: PCLabelId) |
| 10797 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10798 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::STRi12)) |
| 10799 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 10800 | .addFrameIndex(Idx: FI) |
| 10801 | .addImm(Val: 36) // &jbuf[1] :: pc |
| 10802 | .addMemOperand(MMO: FIMMOSt) |
| 10803 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10804 | } |
| 10805 | } |
| 10806 | |
| 10807 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, |
| 10808 | MachineBasicBlock *MBB) const { |
| 10809 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10810 | DebugLoc dl = MI.getDebugLoc(); |
| 10811 | MachineFunction *MF = MBB->getParent(); |
| 10812 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 10813 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 10814 | int FI = MFI.getFunctionContextIndex(); |
| 10815 | |
| 10816 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass |
| 10817 | : &ARM::GPRnopcRegClass; |
| 10818 | |
| 10819 | // Get a mapping of the call site numbers to all of the landing pads they're |
| 10820 | // associated with. |
| 10821 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; |
| 10822 | unsigned MaxCSNum = 0; |
| 10823 | for (MachineBasicBlock &BB : *MF) { |
| 10824 | if (!BB.isEHPad()) |
| 10825 | continue; |
| 10826 | |
| 10827 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing |
| 10828 | // pad. |
| 10829 | for (MachineInstr &II : BB) { |
| 10830 | if (!II.isEHLabel()) |
| 10831 | continue; |
| 10832 | |
| 10833 | MCSymbol *Sym = II.getOperand(i: 0).getMCSymbol(); |
| 10834 | if (!MF->hasCallSiteLandingPad(Sym)) continue; |
| 10835 | |
| 10836 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); |
| 10837 | for (unsigned Idx : CallSiteIdxs) { |
| 10838 | CallSiteNumToLPad[Idx].push_back(Elt: &BB); |
| 10839 | MaxCSNum = std::max(a: MaxCSNum, b: Idx); |
| 10840 | } |
| 10841 | break; |
| 10842 | } |
| 10843 | } |
| 10844 | |
| 10845 | // Get an ordered list of the machine basic blocks for the jump table. |
| 10846 | std::vector<MachineBasicBlock*> LPadList; |
| 10847 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; |
| 10848 | LPadList.reserve(n: CallSiteNumToLPad.size()); |
| 10849 | for (unsigned I = 1; I <= MaxCSNum; ++I) { |
| 10850 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; |
| 10851 | for (MachineBasicBlock *MBB : MBBList) { |
| 10852 | LPadList.push_back(x: MBB); |
| 10853 | InvokeBBs.insert_range(R: MBB->predecessors()); |
| 10854 | } |
| 10855 | } |
| 10856 | |
| 10857 | assert(!LPadList.empty() && |
| 10858 | "No landing pad destinations for the dispatch jump table!" ); |
| 10859 | |
| 10860 | // Create the jump table and associated information. |
| 10861 | MachineJumpTableInfo *JTI = |
| 10862 | MF->getOrCreateJumpTableInfo(JTEntryKind: MachineJumpTableInfo::EK_Inline); |
| 10863 | unsigned MJTI = JTI->createJumpTableIndex(DestBBs: LPadList); |
| 10864 | |
| 10865 | // Create the MBBs for the dispatch code. |
| 10866 | |
| 10867 | // Shove the dispatch's address into the return slot in the function context. |
| 10868 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); |
| 10869 | DispatchBB->setIsEHPad(); |
| 10870 | |
| 10871 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 10872 | |
| 10873 | BuildMI(BB: TrapBB, MIMD: dl, MCID: TII->get(Opcode: Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); |
| 10874 | DispatchBB->addSuccessor(Succ: TrapBB); |
| 10875 | |
| 10876 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); |
| 10877 | DispatchBB->addSuccessor(Succ: DispContBB); |
| 10878 | |
| 10879 | // Insert and MBBs. |
| 10880 | MF->insert(MBBI: MF->end(), MBB: DispatchBB); |
| 10881 | MF->insert(MBBI: MF->end(), MBB: DispContBB); |
| 10882 | MF->insert(MBBI: MF->end(), MBB: TrapBB); |
| 10883 | |
| 10884 | // Insert code into the entry block that creates and registers the function |
| 10885 | // context. |
| 10886 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); |
| 10887 | |
| 10888 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( |
| 10889 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
| 10890 | F: MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, Size: 4, BaseAlignment: Align(4)); |
| 10891 | |
| 10892 | MachineInstrBuilder MIB; |
| 10893 | MIB = BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Int_eh_sjlj_dispatchsetup)); |
| 10894 | |
| 10895 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); |
| 10896 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); |
| 10897 | |
| 10898 | // Add a register mask with no preserved registers. This results in all |
| 10899 | // registers being marked as clobbered. This can't work if the dispatch block |
| 10900 | // is in a Thumb1 function and is linked with ARM code which uses the FP |
| 10901 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. |
| 10902 | MIB.addRegMask(Mask: RI.getSjLjDispatchPreservedMask(MF: *MF)); |
| 10903 | |
| 10904 | bool IsPositionIndependent = isPositionIndependent(); |
| 10905 | unsigned NumLPads = LPadList.size(); |
| 10906 | if (Subtarget->isThumb2()) { |
| 10907 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10908 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRi12), DestReg: NewVReg1) |
| 10909 | .addFrameIndex(Idx: FI) |
| 10910 | .addImm(Val: 4) |
| 10911 | .addMemOperand(MMO: FIMMOLd) |
| 10912 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10913 | |
| 10914 | if (NumLPads < 256) { |
| 10915 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPri)) |
| 10916 | .addReg(RegNo: NewVReg1) |
| 10917 | .addImm(Val: LPadList.size()) |
| 10918 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10919 | } else { |
| 10920 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10921 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVi16), DestReg: VReg1) |
| 10922 | .addImm(Val: NumLPads & 0xFFFF) |
| 10923 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10924 | |
| 10925 | unsigned VReg2 = VReg1; |
| 10926 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 10927 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 10928 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVTi16), DestReg: VReg2) |
| 10929 | .addReg(RegNo: VReg1) |
| 10930 | .addImm(Val: NumLPads >> 16) |
| 10931 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10932 | } |
| 10933 | |
| 10934 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPrr)) |
| 10935 | .addReg(RegNo: NewVReg1) |
| 10936 | .addReg(RegNo: VReg2) |
| 10937 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10938 | } |
| 10939 | |
| 10940 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2Bcc)) |
| 10941 | .addMBB(MBB: TrapBB) |
| 10942 | .addImm(Val: ARMCC::HI) |
| 10943 | .addReg(RegNo: ARM::CPSR); |
| 10944 | |
| 10945 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 10946 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LEApcrelJT), DestReg: NewVReg3) |
| 10947 | .addJumpTableIndex(Idx: MJTI) |
| 10948 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10949 | |
| 10950 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 10951 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ADDrs), DestReg: NewVReg4) |
| 10952 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 10953 | .addReg(RegNo: NewVReg1) |
| 10954 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
| 10955 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 10956 | .add(MO: condCodeOp()); |
| 10957 | |
| 10958 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2BR_JT)) |
| 10959 | .addReg(RegNo: NewVReg4, Flags: RegState::Kill) |
| 10960 | .addReg(RegNo: NewVReg1) |
| 10961 | .addJumpTableIndex(Idx: MJTI); |
| 10962 | } else if (Subtarget->isThumb()) { |
| 10963 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10964 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRspi), DestReg: NewVReg1) |
| 10965 | .addFrameIndex(Idx: FI) |
| 10966 | .addImm(Val: 1) |
| 10967 | .addMemOperand(MMO: FIMMOLd) |
| 10968 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10969 | |
| 10970 | if (NumLPads < 256) { |
| 10971 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPi8)) |
| 10972 | .addReg(RegNo: NewVReg1) |
| 10973 | .addImm(Val: NumLPads) |
| 10974 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10975 | } else { |
| 10976 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 10977 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 10978 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
| 10979 | |
| 10980 | // MachineConstantPool wants an explicit alignment. |
| 10981 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 10982 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 10983 | |
| 10984 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 10985 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
| 10986 | .addReg(RegNo: VReg1, Flags: RegState::Define) |
| 10987 | .addConstantPoolIndex(Idx) |
| 10988 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10989 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPr)) |
| 10990 | .addReg(RegNo: NewVReg1) |
| 10991 | .addReg(RegNo: VReg1) |
| 10992 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 10993 | } |
| 10994 | |
| 10995 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
| 10996 | .addMBB(MBB: TrapBB) |
| 10997 | .addImm(Val: ARMCC::HI) |
| 10998 | .addReg(RegNo: ARM::CPSR); |
| 10999 | |
| 11000 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 11001 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLSLri), DestReg: NewVReg2) |
| 11002 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 11003 | .addReg(RegNo: NewVReg1) |
| 11004 | .addImm(Val: 2) |
| 11005 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11006 | |
| 11007 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 11008 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLEApcrelJT), DestReg: NewVReg3) |
| 11009 | .addJumpTableIndex(Idx: MJTI) |
| 11010 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11011 | |
| 11012 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 11013 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg4) |
| 11014 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 11015 | .addReg(RegNo: NewVReg2, Flags: RegState::Kill) |
| 11016 | .addReg(RegNo: NewVReg3) |
| 11017 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11018 | |
| 11019 | MachineMemOperand *JTMMOLd = |
| 11020 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
| 11021 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11022 | |
| 11023 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 11024 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRi), DestReg: NewVReg5) |
| 11025 | .addReg(RegNo: NewVReg4, Flags: RegState::Kill) |
| 11026 | .addImm(Val: 0) |
| 11027 | .addMemOperand(MMO: JTMMOLd) |
| 11028 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11029 | |
| 11030 | unsigned NewVReg6 = NewVReg5; |
| 11031 | if (IsPositionIndependent) { |
| 11032 | NewVReg6 = MRI->createVirtualRegister(RegClass: TRC); |
| 11033 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg6) |
| 11034 | .addReg(RegNo: ARM::CPSR, Flags: RegState::Define) |
| 11035 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 11036 | .addReg(RegNo: NewVReg3) |
| 11037 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11038 | } |
| 11039 | |
| 11040 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBR_JTr)) |
| 11041 | .addReg(RegNo: NewVReg6, Flags: RegState::Kill) |
| 11042 | .addJumpTableIndex(Idx: MJTI); |
| 11043 | } else { |
| 11044 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11045 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
| 11046 | .addFrameIndex(Idx: FI) |
| 11047 | .addImm(Val: 4) |
| 11048 | .addMemOperand(MMO: FIMMOLd) |
| 11049 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11050 | |
| 11051 | if (NumLPads < 256) { |
| 11052 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPri)) |
| 11053 | .addReg(RegNo: NewVReg1) |
| 11054 | .addImm(Val: NumLPads) |
| 11055 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11056 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(x: NumLPads)) { |
| 11057 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11058 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVi16), DestReg: VReg1) |
| 11059 | .addImm(Val: NumLPads & 0xFFFF) |
| 11060 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11061 | |
| 11062 | unsigned VReg2 = VReg1; |
| 11063 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 11064 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
| 11065 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVTi16), DestReg: VReg2) |
| 11066 | .addReg(RegNo: VReg1) |
| 11067 | .addImm(Val: NumLPads >> 16) |
| 11068 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11069 | } |
| 11070 | |
| 11071 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
| 11072 | .addReg(RegNo: NewVReg1) |
| 11073 | .addReg(RegNo: VReg2) |
| 11074 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11075 | } else { |
| 11076 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 11077 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 11078 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
| 11079 | |
| 11080 | // MachineConstantPool wants an explicit alignment. |
| 11081 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 11082 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 11083 | |
| 11084 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
| 11085 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
| 11086 | .addReg(RegNo: VReg1, Flags: RegState::Define) |
| 11087 | .addConstantPoolIndex(Idx) |
| 11088 | .addImm(Val: 0) |
| 11089 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11090 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
| 11091 | .addReg(RegNo: NewVReg1) |
| 11092 | .addReg(RegNo: VReg1, Flags: RegState::Kill) |
| 11093 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11094 | } |
| 11095 | |
| 11096 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Bcc)) |
| 11097 | .addMBB(MBB: TrapBB) |
| 11098 | .addImm(Val: ARMCC::HI) |
| 11099 | .addReg(RegNo: ARM::CPSR); |
| 11100 | |
| 11101 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
| 11102 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVsi), DestReg: NewVReg3) |
| 11103 | .addReg(RegNo: NewVReg1) |
| 11104 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
| 11105 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11106 | .add(MO: condCodeOp()); |
| 11107 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
| 11108 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LEApcrelJT), DestReg: NewVReg4) |
| 11109 | .addJumpTableIndex(Idx: MJTI) |
| 11110 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11111 | |
| 11112 | MachineMemOperand *JTMMOLd = |
| 11113 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
| 11114 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11115 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
| 11116 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRrs), DestReg: NewVReg5) |
| 11117 | .addReg(RegNo: NewVReg3, Flags: RegState::Kill) |
| 11118 | .addReg(RegNo: NewVReg4) |
| 11119 | .addImm(Val: 0) |
| 11120 | .addMemOperand(MMO: JTMMOLd) |
| 11121 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11122 | |
| 11123 | if (IsPositionIndependent) { |
| 11124 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTadd)) |
| 11125 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 11126 | .addReg(RegNo: NewVReg4) |
| 11127 | .addJumpTableIndex(Idx: MJTI); |
| 11128 | } else { |
| 11129 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTr)) |
| 11130 | .addReg(RegNo: NewVReg5, Flags: RegState::Kill) |
| 11131 | .addJumpTableIndex(Idx: MJTI); |
| 11132 | } |
| 11133 | } |
| 11134 | |
| 11135 | // Add the jump table entries as successors to the MBB. |
| 11136 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; |
| 11137 | for (MachineBasicBlock *CurMBB : LPadList) { |
| 11138 | if (SeenMBBs.insert(Ptr: CurMBB).second) |
| 11139 | DispContBB->addSuccessor(Succ: CurMBB); |
| 11140 | } |
| 11141 | |
| 11142 | // N.B. the order the invoke BBs are processed in doesn't matter here. |
| 11143 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); |
| 11144 | SmallVector<MachineBasicBlock*, 64> MBBLPads; |
| 11145 | for (MachineBasicBlock *BB : InvokeBBs) { |
| 11146 | |
| 11147 | // Remove the landing pad successor from the invoke block and replace it |
| 11148 | // with the new dispatch block. |
| 11149 | SmallVector<MachineBasicBlock*, 4> Successors(BB->successors()); |
| 11150 | while (!Successors.empty()) { |
| 11151 | MachineBasicBlock *SMBB = Successors.pop_back_val(); |
| 11152 | if (SMBB->isEHPad()) { |
| 11153 | BB->removeSuccessor(Succ: SMBB); |
| 11154 | MBBLPads.push_back(Elt: SMBB); |
| 11155 | } |
| 11156 | } |
| 11157 | |
| 11158 | BB->addSuccessor(Succ: DispatchBB, Prob: BranchProbability::getZero()); |
| 11159 | BB->normalizeSuccProbs(); |
| 11160 | |
| 11161 | // Find the invoke call and mark all of the callee-saved registers as |
| 11162 | // 'implicit defined' so that they're spilled. This prevents code from |
| 11163 | // moving instructions to before the EH block, where they will never be |
| 11164 | // executed. |
| 11165 | for (MachineBasicBlock::reverse_iterator |
| 11166 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { |
| 11167 | if (!II->isCall()) continue; |
| 11168 | |
| 11169 | DenseSet<unsigned> DefRegs; |
| 11170 | for (MachineInstr::mop_iterator |
| 11171 | OI = II->operands_begin(), OE = II->operands_end(); |
| 11172 | OI != OE; ++OI) { |
| 11173 | if (!OI->isReg()) continue; |
| 11174 | DefRegs.insert(V: OI->getReg()); |
| 11175 | } |
| 11176 | |
| 11177 | MachineInstrBuilder MIB(*MF, &*II); |
| 11178 | |
| 11179 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { |
| 11180 | unsigned Reg = SavedRegs[i]; |
| 11181 | if (Subtarget->isThumb2() && |
| 11182 | !ARM::tGPRRegClass.contains(Reg) && |
| 11183 | !ARM::hGPRRegClass.contains(Reg)) |
| 11184 | continue; |
| 11185 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) |
| 11186 | continue; |
| 11187 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) |
| 11188 | continue; |
| 11189 | if (!DefRegs.contains(V: Reg)) |
| 11190 | MIB.addReg(RegNo: Reg, Flags: RegState::ImplicitDefine | RegState::Dead); |
| 11191 | } |
| 11192 | |
| 11193 | break; |
| 11194 | } |
| 11195 | } |
| 11196 | |
| 11197 | // Mark all former landing pads as non-landing pads. The dispatch is the only |
| 11198 | // landing pad now. |
| 11199 | for (MachineBasicBlock *MBBLPad : MBBLPads) |
| 11200 | MBBLPad->setIsEHPad(false); |
| 11201 | |
| 11202 | // The instruction is gone now. |
| 11203 | MI.eraseFromParent(); |
| 11204 | } |
| 11205 | |
| 11206 | static |
| 11207 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { |
| 11208 | for (MachineBasicBlock *S : MBB->successors()) |
| 11209 | if (S != Succ) |
| 11210 | return S; |
| 11211 | llvm_unreachable("Expecting a BB with two successors!" ); |
| 11212 | } |
| 11213 | |
| 11214 | /// Return the load opcode for a given load size. If load size >= 8, |
| 11215 | /// neon opcode will be returned. |
| 11216 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { |
| 11217 | if (LdSize >= 8) |
| 11218 | return LdSize == 16 ? ARM::VLD1q32wb_fixed |
| 11219 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; |
| 11220 | if (IsThumb1) |
| 11221 | return LdSize == 4 ? ARM::tLDRi |
| 11222 | : LdSize == 2 ? ARM::tLDRHi |
| 11223 | : LdSize == 1 ? ARM::tLDRBi : 0; |
| 11224 | if (IsThumb2) |
| 11225 | return LdSize == 4 ? ARM::t2LDR_POST |
| 11226 | : LdSize == 2 ? ARM::t2LDRH_POST |
| 11227 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; |
| 11228 | return LdSize == 4 ? ARM::LDR_POST_IMM |
| 11229 | : LdSize == 2 ? ARM::LDRH_POST |
| 11230 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; |
| 11231 | } |
| 11232 | |
| 11233 | /// Return the store opcode for a given store size. If store size >= 8, |
| 11234 | /// neon opcode will be returned. |
| 11235 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { |
| 11236 | if (StSize >= 8) |
| 11237 | return StSize == 16 ? ARM::VST1q32wb_fixed |
| 11238 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; |
| 11239 | if (IsThumb1) |
| 11240 | return StSize == 4 ? ARM::tSTRi |
| 11241 | : StSize == 2 ? ARM::tSTRHi |
| 11242 | : StSize == 1 ? ARM::tSTRBi : 0; |
| 11243 | if (IsThumb2) |
| 11244 | return StSize == 4 ? ARM::t2STR_POST |
| 11245 | : StSize == 2 ? ARM::t2STRH_POST |
| 11246 | : StSize == 1 ? ARM::t2STRB_POST : 0; |
| 11247 | return StSize == 4 ? ARM::STR_POST_IMM |
| 11248 | : StSize == 2 ? ARM::STRH_POST |
| 11249 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; |
| 11250 | } |
| 11251 | |
| 11252 | /// Emit a post-increment load operation with given size. The instructions |
| 11253 | /// will be added to BB at Pos. |
| 11254 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 11255 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 11256 | unsigned LdSize, unsigned Data, unsigned AddrIn, |
| 11257 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 11258 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); |
| 11259 | assert(LdOpc != 0 && "Should have a load opcode" ); |
| 11260 | if (LdSize >= 8) { |
| 11261 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11262 | .addReg(RegNo: AddrOut, Flags: RegState::Define) |
| 11263 | .addReg(RegNo: AddrIn) |
| 11264 | .addImm(Val: 0) |
| 11265 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11266 | } else if (IsThumb1) { |
| 11267 | // load + update AddrIn |
| 11268 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11269 | .addReg(RegNo: AddrIn) |
| 11270 | .addImm(Val: 0) |
| 11271 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11272 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
| 11273 | .add(MO: t1CondCodeOp()) |
| 11274 | .addReg(RegNo: AddrIn) |
| 11275 | .addImm(Val: LdSize) |
| 11276 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11277 | } else if (IsThumb2) { |
| 11278 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11279 | .addReg(RegNo: AddrOut, Flags: RegState::Define) |
| 11280 | .addReg(RegNo: AddrIn) |
| 11281 | .addImm(Val: LdSize) |
| 11282 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11283 | } else { // arm |
| 11284 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
| 11285 | .addReg(RegNo: AddrOut, Flags: RegState::Define) |
| 11286 | .addReg(RegNo: AddrIn) |
| 11287 | .addReg(RegNo: 0) |
| 11288 | .addImm(Val: LdSize) |
| 11289 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11290 | } |
| 11291 | } |
| 11292 | |
| 11293 | /// Emit a post-increment store operation with given size. The instructions |
| 11294 | /// will be added to BB at Pos. |
| 11295 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 11296 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 11297 | unsigned StSize, unsigned Data, unsigned AddrIn, |
| 11298 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 11299 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); |
| 11300 | assert(StOpc != 0 && "Should have a store opcode" ); |
| 11301 | if (StSize >= 8) { |
| 11302 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11303 | .addReg(RegNo: AddrIn) |
| 11304 | .addImm(Val: 0) |
| 11305 | .addReg(RegNo: Data) |
| 11306 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11307 | } else if (IsThumb1) { |
| 11308 | // store + update AddrIn |
| 11309 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc)) |
| 11310 | .addReg(RegNo: Data) |
| 11311 | .addReg(RegNo: AddrIn) |
| 11312 | .addImm(Val: 0) |
| 11313 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11314 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
| 11315 | .add(MO: t1CondCodeOp()) |
| 11316 | .addReg(RegNo: AddrIn) |
| 11317 | .addImm(Val: StSize) |
| 11318 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11319 | } else if (IsThumb2) { |
| 11320 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11321 | .addReg(RegNo: Data) |
| 11322 | .addReg(RegNo: AddrIn) |
| 11323 | .addImm(Val: StSize) |
| 11324 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11325 | } else { // arm |
| 11326 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
| 11327 | .addReg(RegNo: Data) |
| 11328 | .addReg(RegNo: AddrIn) |
| 11329 | .addReg(RegNo: 0) |
| 11330 | .addImm(Val: StSize) |
| 11331 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11332 | } |
| 11333 | } |
| 11334 | |
| 11335 | MachineBasicBlock * |
| 11336 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, |
| 11337 | MachineBasicBlock *BB) const { |
| 11338 | // This pseudo instruction has 3 operands: dst, src, size |
| 11339 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). |
| 11340 | // Otherwise, we will generate unrolled scalar copies. |
| 11341 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11342 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 11343 | MachineFunction::iterator It = ++BB->getIterator(); |
| 11344 | |
| 11345 | Register dest = MI.getOperand(i: 0).getReg(); |
| 11346 | Register src = MI.getOperand(i: 1).getReg(); |
| 11347 | unsigned SizeVal = MI.getOperand(i: 2).getImm(); |
| 11348 | unsigned Alignment = MI.getOperand(i: 3).getImm(); |
| 11349 | DebugLoc dl = MI.getDebugLoc(); |
| 11350 | |
| 11351 | MachineFunction *MF = BB->getParent(); |
| 11352 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 11353 | unsigned UnitSize = 0; |
| 11354 | const TargetRegisterClass *TRC = nullptr; |
| 11355 | const TargetRegisterClass *VecTRC = nullptr; |
| 11356 | |
| 11357 | bool IsThumb1 = Subtarget->isThumb1Only(); |
| 11358 | bool IsThumb2 = Subtarget->isThumb2(); |
| 11359 | bool IsThumb = Subtarget->isThumb(); |
| 11360 | |
| 11361 | if (Alignment & 1) { |
| 11362 | UnitSize = 1; |
| 11363 | } else if (Alignment & 2) { |
| 11364 | UnitSize = 2; |
| 11365 | } else { |
| 11366 | // Check whether we can use NEON instructions. |
| 11367 | if (!MF->getFunction().hasFnAttribute(Kind: Attribute::NoImplicitFloat) && |
| 11368 | Subtarget->hasNEON()) { |
| 11369 | if ((Alignment % 16 == 0) && SizeVal >= 16) |
| 11370 | UnitSize = 16; |
| 11371 | else if ((Alignment % 8 == 0) && SizeVal >= 8) |
| 11372 | UnitSize = 8; |
| 11373 | } |
| 11374 | // Can't use NEON instructions. |
| 11375 | if (UnitSize == 0) |
| 11376 | UnitSize = 4; |
| 11377 | } |
| 11378 | |
| 11379 | // Select the correct opcode and register class for unit size load/store |
| 11380 | bool IsNeon = UnitSize >= 8; |
| 11381 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 11382 | if (IsNeon) |
| 11383 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass |
| 11384 | : UnitSize == 8 ? &ARM::DPRRegClass |
| 11385 | : nullptr; |
| 11386 | |
| 11387 | unsigned BytesLeft = SizeVal % UnitSize; |
| 11388 | unsigned LoopSize = SizeVal - BytesLeft; |
| 11389 | |
| 11390 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { |
| 11391 | // Use LDR and STR to copy. |
| 11392 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) |
| 11393 | // [destOut] = STR_POST(scratch, destIn, UnitSize) |
| 11394 | unsigned srcIn = src; |
| 11395 | unsigned destIn = dest; |
| 11396 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { |
| 11397 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11398 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11399 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
| 11400 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11401 | IsThumb1, IsThumb2); |
| 11402 | emitPostSt(BB, Pos: MI, TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11403 | IsThumb1, IsThumb2); |
| 11404 | srcIn = srcOut; |
| 11405 | destIn = destOut; |
| 11406 | } |
| 11407 | |
| 11408 | // Handle the leftover bytes with LDRB and STRB. |
| 11409 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) |
| 11410 | // [destOut] = STRB_POST(scratch, destIn, 1) |
| 11411 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 11412 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11413 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11414 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
| 11415 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11416 | IsThumb1, IsThumb2); |
| 11417 | emitPostSt(BB, Pos: MI, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11418 | IsThumb1, IsThumb2); |
| 11419 | srcIn = srcOut; |
| 11420 | destIn = destOut; |
| 11421 | } |
| 11422 | MI.eraseFromParent(); // The instruction is gone now. |
| 11423 | return BB; |
| 11424 | } |
| 11425 | |
| 11426 | // Expand the pseudo op to a loop. |
| 11427 | // thisMBB: |
| 11428 | // ... |
| 11429 | // movw varEnd, # --> with thumb2 |
| 11430 | // movt varEnd, # |
| 11431 | // ldrcp varEnd, idx --> without thumb2 |
| 11432 | // fallthrough --> loopMBB |
| 11433 | // loopMBB: |
| 11434 | // PHI varPhi, varEnd, varLoop |
| 11435 | // PHI srcPhi, src, srcLoop |
| 11436 | // PHI destPhi, dst, destLoop |
| 11437 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 11438 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) |
| 11439 | // subs varLoop, varPhi, #UnitSize |
| 11440 | // bne loopMBB |
| 11441 | // fallthrough --> exitMBB |
| 11442 | // exitMBB: |
| 11443 | // epilogue to handle left-over bytes |
| 11444 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 11445 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 11446 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
| 11447 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
| 11448 | MF->insert(MBBI: It, MBB: loopMBB); |
| 11449 | MF->insert(MBBI: It, MBB: exitMBB); |
| 11450 | |
| 11451 | // Set the call frame size on entry to the new basic blocks. |
| 11452 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
| 11453 | loopMBB->setCallFrameSize(CallFrameSize); |
| 11454 | exitMBB->setCallFrameSize(CallFrameSize); |
| 11455 | |
| 11456 | // Transfer the remainder of BB and its successor edges to exitMBB. |
| 11457 | exitMBB->splice(Where: exitMBB->begin(), Other: BB, |
| 11458 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
| 11459 | exitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
| 11460 | |
| 11461 | // Load an immediate to varEnd. |
| 11462 | Register varEnd = MRI.createVirtualRegister(RegClass: TRC); |
| 11463 | if (Subtarget->useMovt()) { |
| 11464 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: IsThumb ? ARM::t2MOVi32imm : ARM::MOVi32imm), |
| 11465 | DestReg: varEnd) |
| 11466 | .addImm(Val: LoopSize); |
| 11467 | } else if (Subtarget->genExecuteOnly()) { |
| 11468 | assert(IsThumb && "Non-thumb expected to have used movt" ); |
| 11469 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi32imm), DestReg: varEnd).addImm(Val: LoopSize); |
| 11470 | } else { |
| 11471 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 11472 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
| 11473 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: LoopSize); |
| 11474 | |
| 11475 | // MachineConstantPool wants an explicit alignment. |
| 11476 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
| 11477 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 11478 | MachineMemOperand *CPMMO = |
| 11479 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
| 11480 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
| 11481 | |
| 11482 | if (IsThumb) |
| 11483 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
| 11484 | .addReg(RegNo: varEnd, Flags: RegState::Define) |
| 11485 | .addConstantPoolIndex(Idx) |
| 11486 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11487 | .addMemOperand(MMO: CPMMO); |
| 11488 | else |
| 11489 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
| 11490 | .addReg(RegNo: varEnd, Flags: RegState::Define) |
| 11491 | .addConstantPoolIndex(Idx) |
| 11492 | .addImm(Val: 0) |
| 11493 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11494 | .addMemOperand(MMO: CPMMO); |
| 11495 | } |
| 11496 | BB->addSuccessor(Succ: loopMBB); |
| 11497 | |
| 11498 | // Generate the loop body: |
| 11499 | // varPhi = PHI(varLoop, varEnd) |
| 11500 | // srcPhi = PHI(srcLoop, src) |
| 11501 | // destPhi = PHI(destLoop, dst) |
| 11502 | MachineBasicBlock *entryBB = BB; |
| 11503 | BB = loopMBB; |
| 11504 | Register varLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11505 | Register varPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11506 | Register srcLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11507 | Register srcPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11508 | Register destLoop = MRI.createVirtualRegister(RegClass: TRC); |
| 11509 | Register destPhi = MRI.createVirtualRegister(RegClass: TRC); |
| 11510 | |
| 11511 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: varPhi) |
| 11512 | .addReg(RegNo: varLoop).addMBB(MBB: loopMBB) |
| 11513 | .addReg(RegNo: varEnd).addMBB(MBB: entryBB); |
| 11514 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: srcPhi) |
| 11515 | .addReg(RegNo: srcLoop).addMBB(MBB: loopMBB) |
| 11516 | .addReg(RegNo: src).addMBB(MBB: entryBB); |
| 11517 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: destPhi) |
| 11518 | .addReg(RegNo: destLoop).addMBB(MBB: loopMBB) |
| 11519 | .addReg(RegNo: dest).addMBB(MBB: entryBB); |
| 11520 | |
| 11521 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 11522 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) |
| 11523 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
| 11524 | emitPostLd(BB, Pos: BB->end(), TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcPhi, AddrOut: srcLoop, |
| 11525 | IsThumb1, IsThumb2); |
| 11526 | emitPostSt(BB, Pos: BB->end(), TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destPhi, AddrOut: destLoop, |
| 11527 | IsThumb1, IsThumb2); |
| 11528 | |
| 11529 | // Decrement loop variable by UnitSize. |
| 11530 | if (IsThumb1) { |
| 11531 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, MCID: TII->get(Opcode: ARM::tSUBi8), DestReg: varLoop) |
| 11532 | .add(MO: t1CondCodeOp()) |
| 11533 | .addReg(RegNo: varPhi) |
| 11534 | .addImm(Val: UnitSize) |
| 11535 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11536 | } else { |
| 11537 | MachineInstrBuilder MIB = |
| 11538 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
| 11539 | MCID: TII->get(Opcode: IsThumb2 ? ARM::t2SUBri : ARM::SUBri), DestReg: varLoop); |
| 11540 | MIB.addReg(RegNo: varPhi) |
| 11541 | .addImm(Val: UnitSize) |
| 11542 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11543 | .add(MO: condCodeOp()); |
| 11544 | MIB->getOperand(i: 5).setReg(ARM::CPSR); |
| 11545 | MIB->getOperand(i: 5).setIsDef(true); |
| 11546 | } |
| 11547 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
| 11548 | MCID: TII->get(Opcode: IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 11549 | .addMBB(MBB: loopMBB).addImm(Val: ARMCC::NE).addReg(RegNo: ARM::CPSR); |
| 11550 | |
| 11551 | // loopMBB can loop back to loopMBB or fall through to exitMBB. |
| 11552 | BB->addSuccessor(Succ: loopMBB); |
| 11553 | BB->addSuccessor(Succ: exitMBB); |
| 11554 | |
| 11555 | // Add epilogue to handle BytesLeft. |
| 11556 | BB = exitMBB; |
| 11557 | auto StartOfExit = exitMBB->begin(); |
| 11558 | |
| 11559 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 11560 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 11561 | unsigned srcIn = srcLoop; |
| 11562 | unsigned destIn = destLoop; |
| 11563 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 11564 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11565 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
| 11566 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
| 11567 | emitPostLd(BB, Pos: StartOfExit, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
| 11568 | IsThumb1, IsThumb2); |
| 11569 | emitPostSt(BB, Pos: StartOfExit, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
| 11570 | IsThumb1, IsThumb2); |
| 11571 | srcIn = srcOut; |
| 11572 | destIn = destOut; |
| 11573 | } |
| 11574 | |
| 11575 | MI.eraseFromParent(); // The instruction is gone now. |
| 11576 | return BB; |
| 11577 | } |
| 11578 | |
| 11579 | MachineBasicBlock * |
| 11580 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, |
| 11581 | MachineBasicBlock *MBB) const { |
| 11582 | const TargetMachine &TM = getTargetMachine(); |
| 11583 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); |
| 11584 | DebugLoc DL = MI.getDebugLoc(); |
| 11585 | |
| 11586 | assert(Subtarget->isTargetWindows() && |
| 11587 | "__chkstk is only supported on Windows" ); |
| 11588 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode" ); |
| 11589 | |
| 11590 | // __chkstk takes the number of words to allocate on the stack in R4, and |
| 11591 | // returns the stack adjustment in number of bytes in R4. This will not |
| 11592 | // clober any other registers (other than the obvious lr). |
| 11593 | // |
| 11594 | // Although, technically, IP should be considered a register which may be |
| 11595 | // clobbered, the call itself will not touch it. Windows on ARM is a pure |
| 11596 | // thumb-2 environment, so there is no interworking required. As a result, we |
| 11597 | // do not expect a veneer to be emitted by the linker, clobbering IP. |
| 11598 | // |
| 11599 | // Each module receives its own copy of __chkstk, so no import thunk is |
| 11600 | // required, again, ensuring that IP is not clobbered. |
| 11601 | // |
| 11602 | // Finally, although some linkers may theoretically provide a trampoline for |
| 11603 | // out of range calls (which is quite common due to a 32M range limitation of |
| 11604 | // branches for Thumb), we can generate the long-call version via |
| 11605 | // -mcmodel=large, alleviating the need for the trampoline which may clobber |
| 11606 | // IP. |
| 11607 | |
| 11608 | RTLIB::LibcallImpl ChkStkLibcall = getLibcallImpl(Call: RTLIB::STACK_PROBE); |
| 11609 | if (ChkStkLibcall == RTLIB::Unsupported) |
| 11610 | reportFatalUsageError(reason: "no available implementation of __chkstk" ); |
| 11611 | |
| 11612 | const char *ChkStk = getLibcallImplName(Call: ChkStkLibcall).data(); |
| 11613 | switch (TM.getCodeModel()) { |
| 11614 | case CodeModel::Tiny: |
| 11615 | llvm_unreachable("Tiny code model not available on ARM." ); |
| 11616 | case CodeModel::Small: |
| 11617 | case CodeModel::Medium: |
| 11618 | case CodeModel::Kernel: |
| 11619 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::tBL)) |
| 11620 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11621 | .addExternalSymbol(FnName: ChkStk) |
| 11622 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Kill) |
| 11623 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Define) |
| 11624 | .addReg(RegNo: ARM::R12, |
| 11625 | Flags: RegState::Implicit | RegState::Define | RegState::Dead) |
| 11626 | .addReg(RegNo: ARM::CPSR, |
| 11627 | Flags: RegState::Implicit | RegState::Define | RegState::Dead); |
| 11628 | break; |
| 11629 | case CodeModel::Large: { |
| 11630 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
| 11631 | Register Reg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11632 | |
| 11633 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2MOVi32imm), DestReg: Reg) |
| 11634 | .addExternalSymbol(FnName: ChkStk); |
| 11635 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: gettBLXrOpcode(MF: *MBB->getParent()))) |
| 11636 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11637 | .addReg(RegNo: Reg, Flags: RegState::Kill) |
| 11638 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Kill) |
| 11639 | .addReg(RegNo: ARM::R4, Flags: RegState::Implicit | RegState::Define) |
| 11640 | .addReg(RegNo: ARM::R12, |
| 11641 | Flags: RegState::Implicit | RegState::Define | RegState::Dead) |
| 11642 | .addReg(RegNo: ARM::CPSR, |
| 11643 | Flags: RegState::Implicit | RegState::Define | RegState::Dead); |
| 11644 | break; |
| 11645 | } |
| 11646 | } |
| 11647 | |
| 11648 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2SUBrr), DestReg: ARM::SP) |
| 11649 | .addReg(RegNo: ARM::SP, Flags: RegState::Kill) |
| 11650 | .addReg(RegNo: ARM::R4, Flags: RegState::Kill) |
| 11651 | .setMIFlags(MachineInstr::FrameSetup) |
| 11652 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11653 | .add(MO: condCodeOp()); |
| 11654 | |
| 11655 | MI.eraseFromParent(); |
| 11656 | return MBB; |
| 11657 | } |
| 11658 | |
| 11659 | MachineBasicBlock * |
| 11660 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, |
| 11661 | MachineBasicBlock *MBB) const { |
| 11662 | DebugLoc DL = MI.getDebugLoc(); |
| 11663 | MachineFunction *MF = MBB->getParent(); |
| 11664 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11665 | |
| 11666 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); |
| 11667 | MF->insert(MBBI: ++MBB->getIterator(), MBB: ContBB); |
| 11668 | ContBB->splice(Where: ContBB->begin(), Other: MBB, |
| 11669 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: MBB->end()); |
| 11670 | ContBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
| 11671 | MBB->addSuccessor(Succ: ContBB); |
| 11672 | |
| 11673 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 11674 | BuildMI(BB: TrapBB, MIMD: DL, MCID: TII->get(Opcode: ARM::t__brkdiv0)); |
| 11675 | MF->push_back(MBB: TrapBB); |
| 11676 | MBB->addSuccessor(Succ: TrapBB); |
| 11677 | |
| 11678 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::tCMPi8)) |
| 11679 | .addReg(RegNo: MI.getOperand(i: 0).getReg()) |
| 11680 | .addImm(Val: 0) |
| 11681 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11682 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::t2Bcc)) |
| 11683 | .addMBB(MBB: TrapBB) |
| 11684 | .addImm(Val: ARMCC::EQ) |
| 11685 | .addReg(RegNo: ARM::CPSR); |
| 11686 | |
| 11687 | MI.eraseFromParent(); |
| 11688 | return ContBB; |
| 11689 | } |
| 11690 | |
| 11691 | // The CPSR operand of SelectItr might be missing a kill marker |
| 11692 | // because there were multiple uses of CPSR, and ISel didn't know |
| 11693 | // which to mark. Figure out whether SelectItr should have had a |
| 11694 | // kill marker, and set it if it should. Returns the correct kill |
| 11695 | // marker value. |
| 11696 | static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, |
| 11697 | MachineBasicBlock* BB, |
| 11698 | const TargetRegisterInfo* TRI) { |
| 11699 | // Scan forward through BB for a use/def of CPSR. |
| 11700 | MachineBasicBlock::iterator miI(std::next(x: SelectItr)); |
| 11701 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { |
| 11702 | const MachineInstr& mi = *miI; |
| 11703 | if (mi.readsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
| 11704 | return false; |
| 11705 | if (mi.definesRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
| 11706 | break; // Should have kill-flag - update below. |
| 11707 | } |
| 11708 | |
| 11709 | // If we hit the end of the block, check whether CPSR is live into a |
| 11710 | // successor. |
| 11711 | if (miI == BB->end()) { |
| 11712 | for (MachineBasicBlock *Succ : BB->successors()) |
| 11713 | if (Succ->isLiveIn(Reg: ARM::CPSR)) |
| 11714 | return false; |
| 11715 | } |
| 11716 | |
| 11717 | // We found a def, or hit the end of the basic block and CPSR wasn't live |
| 11718 | // out. SelectMI should have a kill flag on CPSR. |
| 11719 | SelectItr->addRegisterKilled(IncomingReg: ARM::CPSR, RegInfo: TRI); |
| 11720 | return true; |
| 11721 | } |
| 11722 | |
| 11723 | /// Adds logic in loop entry MBB to calculate loop iteration count and adds |
| 11724 | /// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop |
| 11725 | static Register genTPEntry(MachineBasicBlock *TpEntry, |
| 11726 | MachineBasicBlock *TpLoopBody, |
| 11727 | MachineBasicBlock *TpExit, Register OpSizeReg, |
| 11728 | const TargetInstrInfo *TII, DebugLoc Dl, |
| 11729 | MachineRegisterInfo &MRI) { |
| 11730 | // Calculates loop iteration count = ceil(n/16) = (n + 15) >> 4. |
| 11731 | Register AddDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11732 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2ADDri), DestReg: AddDestReg) |
| 11733 | .addUse(RegNo: OpSizeReg) |
| 11734 | .addImm(Val: 15) |
| 11735 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11736 | .addReg(RegNo: 0); |
| 11737 | |
| 11738 | Register LsrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11739 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LSRri), DestReg: LsrDestReg) |
| 11740 | .addUse(RegNo: AddDestReg, Flags: RegState::Kill) |
| 11741 | .addImm(Val: 4) |
| 11742 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11743 | .addReg(RegNo: 0); |
| 11744 | |
| 11745 | Register TotalIterationsReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11746 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopSetup), DestReg: TotalIterationsReg) |
| 11747 | .addUse(RegNo: LsrDestReg, Flags: RegState::Kill); |
| 11748 | |
| 11749 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopStart)) |
| 11750 | .addUse(RegNo: TotalIterationsReg) |
| 11751 | .addMBB(MBB: TpExit); |
| 11752 | |
| 11753 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 11754 | .addMBB(MBB: TpLoopBody) |
| 11755 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11756 | |
| 11757 | return TotalIterationsReg; |
| 11758 | } |
| 11759 | |
| 11760 | /// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and |
| 11761 | /// t2DoLoopEnd. These are used by later passes to generate tail predicated |
| 11762 | /// loops. |
| 11763 | static void genTPLoopBody(MachineBasicBlock *TpLoopBody, |
| 11764 | MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit, |
| 11765 | const TargetInstrInfo *TII, DebugLoc Dl, |
| 11766 | MachineRegisterInfo &MRI, Register OpSrcReg, |
| 11767 | Register OpDestReg, Register ElementCountReg, |
| 11768 | Register TotalIterationsReg, bool IsMemcpy) { |
| 11769 | // First insert 4 PHI nodes for: Current pointer to Src (if memcpy), Dest |
| 11770 | // array, loop iteration counter, predication counter. |
| 11771 | |
| 11772 | Register SrcPhiReg, CurrSrcReg; |
| 11773 | if (IsMemcpy) { |
| 11774 | // Current position in the src array |
| 11775 | SrcPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11776 | CurrSrcReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11777 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: SrcPhiReg) |
| 11778 | .addUse(RegNo: OpSrcReg) |
| 11779 | .addMBB(MBB: TpEntry) |
| 11780 | .addUse(RegNo: CurrSrcReg) |
| 11781 | .addMBB(MBB: TpLoopBody); |
| 11782 | } |
| 11783 | |
| 11784 | // Current position in the dest array |
| 11785 | Register DestPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11786 | Register CurrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11787 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: DestPhiReg) |
| 11788 | .addUse(RegNo: OpDestReg) |
| 11789 | .addMBB(MBB: TpEntry) |
| 11790 | .addUse(RegNo: CurrDestReg) |
| 11791 | .addMBB(MBB: TpLoopBody); |
| 11792 | |
| 11793 | // Current loop counter |
| 11794 | Register LoopCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11795 | Register RemainingLoopIterationsReg = |
| 11796 | MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
| 11797 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: LoopCounterPhiReg) |
| 11798 | .addUse(RegNo: TotalIterationsReg) |
| 11799 | .addMBB(MBB: TpEntry) |
| 11800 | .addUse(RegNo: RemainingLoopIterationsReg) |
| 11801 | .addMBB(MBB: TpLoopBody); |
| 11802 | |
| 11803 | // Predication counter |
| 11804 | Register PredCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11805 | Register RemainingElementsReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
| 11806 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: PredCounterPhiReg) |
| 11807 | .addUse(RegNo: ElementCountReg) |
| 11808 | .addMBB(MBB: TpEntry) |
| 11809 | .addUse(RegNo: RemainingElementsReg) |
| 11810 | .addMBB(MBB: TpLoopBody); |
| 11811 | |
| 11812 | // Pass predication counter to VCTP |
| 11813 | Register VccrReg = MRI.createVirtualRegister(RegClass: &ARM::VCCRRegClass); |
| 11814 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VCTP8), DestReg: VccrReg) |
| 11815 | .addUse(RegNo: PredCounterPhiReg) |
| 11816 | .addImm(Val: ARMVCC::None) |
| 11817 | .addReg(RegNo: 0) |
| 11818 | .addReg(RegNo: 0); |
| 11819 | |
| 11820 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2SUBri), DestReg: RemainingElementsReg) |
| 11821 | .addUse(RegNo: PredCounterPhiReg) |
| 11822 | .addImm(Val: 16) |
| 11823 | .add(MOs: predOps(Pred: ARMCC::AL)) |
| 11824 | .addReg(RegNo: 0); |
| 11825 | |
| 11826 | // VLDRB (only if memcpy) and VSTRB instructions, predicated using VPR |
| 11827 | Register SrcValueReg; |
| 11828 | if (IsMemcpy) { |
| 11829 | SrcValueReg = MRI.createVirtualRegister(RegClass: &ARM::MQPRRegClass); |
| 11830 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VLDRBU8_post)) |
| 11831 | .addDef(RegNo: CurrSrcReg) |
| 11832 | .addDef(RegNo: SrcValueReg) |
| 11833 | .addReg(RegNo: SrcPhiReg) |
| 11834 | .addImm(Val: 16) |
| 11835 | .addImm(Val: ARMVCC::Then) |
| 11836 | .addUse(RegNo: VccrReg) |
| 11837 | .addReg(RegNo: 0); |
| 11838 | } else |
| 11839 | SrcValueReg = OpSrcReg; |
| 11840 | |
| 11841 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VSTRBU8_post)) |
| 11842 | .addDef(RegNo: CurrDestReg) |
| 11843 | .addUse(RegNo: SrcValueReg) |
| 11844 | .addReg(RegNo: DestPhiReg) |
| 11845 | .addImm(Val: 16) |
| 11846 | .addImm(Val: ARMVCC::Then) |
| 11847 | .addUse(RegNo: VccrReg) |
| 11848 | .addReg(RegNo: 0); |
| 11849 | |
| 11850 | // Add the pseudoInstrs for decrementing the loop counter and marking the |
| 11851 | // end:t2DoLoopDec and t2DoLoopEnd |
| 11852 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopDec), DestReg: RemainingLoopIterationsReg) |
| 11853 | .addUse(RegNo: LoopCounterPhiReg) |
| 11854 | .addImm(Val: 1); |
| 11855 | |
| 11856 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopEnd)) |
| 11857 | .addUse(RegNo: RemainingLoopIterationsReg) |
| 11858 | .addMBB(MBB: TpLoopBody); |
| 11859 | |
| 11860 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 11861 | .addMBB(MBB: TpExit) |
| 11862 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 11863 | } |
| 11864 | |
| 11865 | bool ARMTargetLowering::supportKCFIBundles() const { |
| 11866 | // KCFI is supported in all ARM/Thumb modes |
| 11867 | return true; |
| 11868 | } |
| 11869 | |
| 11870 | MachineInstr * |
| 11871 | ARMTargetLowering::EmitKCFICheck(MachineBasicBlock &MBB, |
| 11872 | MachineBasicBlock::instr_iterator &MBBI, |
| 11873 | const TargetInstrInfo *TII) const { |
| 11874 | assert(MBBI->isCall() && MBBI->getCFIType() && |
| 11875 | "Invalid call instruction for a KCFI check" ); |
| 11876 | |
| 11877 | MachineOperand *TargetOp = nullptr; |
| 11878 | switch (MBBI->getOpcode()) { |
| 11879 | // ARM mode opcodes |
| 11880 | case ARM::BLX: |
| 11881 | case ARM::BLX_pred: |
| 11882 | case ARM::BLX_noip: |
| 11883 | case ARM::BLX_pred_noip: |
| 11884 | case ARM::BX_CALL: |
| 11885 | TargetOp = &MBBI->getOperand(i: 0); |
| 11886 | break; |
| 11887 | case ARM::TCRETURNri: |
| 11888 | case ARM::TCRETURNrinotr12: |
| 11889 | case ARM::TAILJMPr: |
| 11890 | case ARM::TAILJMPr4: |
| 11891 | TargetOp = &MBBI->getOperand(i: 0); |
| 11892 | break; |
| 11893 | // Thumb mode opcodes (Thumb1 and Thumb2) |
| 11894 | // Note: Most Thumb call instructions have predicate operands before the |
| 11895 | // target register Format: tBLXr pred, predreg, target_register, ... |
| 11896 | case ARM::tBLXr: // Thumb1/Thumb2: BLX register (requires V5T) |
| 11897 | case ARM::tBLXr_noip: // Thumb1/Thumb2: BLX register, no IP clobber |
| 11898 | case ARM::tBX_CALL: // Thumb1 only: BX call (push LR, BX) |
| 11899 | TargetOp = &MBBI->getOperand(i: 2); |
| 11900 | break; |
| 11901 | // Tail call instructions don't have predicates, target is operand 0 |
| 11902 | case ARM::tTAILJMPr: // Thumb1/Thumb2: Tail call via register |
| 11903 | TargetOp = &MBBI->getOperand(i: 0); |
| 11904 | break; |
| 11905 | default: |
| 11906 | llvm_unreachable("Unexpected CFI call opcode" ); |
| 11907 | } |
| 11908 | |
| 11909 | assert(TargetOp && TargetOp->isReg() && "Invalid target operand" ); |
| 11910 | TargetOp->setIsRenamable(false); |
| 11911 | |
| 11912 | // Select the appropriate KCFI_CHECK variant based on the instruction set |
| 11913 | unsigned KCFICheckOpcode; |
| 11914 | if (Subtarget->isThumb()) { |
| 11915 | if (Subtarget->isThumb2()) { |
| 11916 | KCFICheckOpcode = ARM::KCFI_CHECK_Thumb2; |
| 11917 | } else { |
| 11918 | KCFICheckOpcode = ARM::KCFI_CHECK_Thumb1; |
| 11919 | } |
| 11920 | } else { |
| 11921 | KCFICheckOpcode = ARM::KCFI_CHECK_ARM; |
| 11922 | } |
| 11923 | |
| 11924 | return BuildMI(BB&: MBB, I: MBBI, MIMD: MBBI->getDebugLoc(), MCID: TII->get(Opcode: KCFICheckOpcode)) |
| 11925 | .addReg(RegNo: TargetOp->getReg()) |
| 11926 | .addImm(Val: MBBI->getCFIType()) |
| 11927 | .getInstr(); |
| 11928 | } |
| 11929 | |
| 11930 | MachineBasicBlock * |
| 11931 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, |
| 11932 | MachineBasicBlock *BB) const { |
| 11933 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11934 | DebugLoc dl = MI.getDebugLoc(); |
| 11935 | bool isThumb2 = Subtarget->isThumb2(); |
| 11936 | switch (MI.getOpcode()) { |
| 11937 | default: { |
| 11938 | MI.print(OS&: errs()); |
| 11939 | llvm_unreachable("Unexpected instr type to insert" ); |
| 11940 | } |
| 11941 | |
| 11942 | // Thumb1 post-indexed loads are really just single-register LDMs. |
| 11943 | case ARM::tLDR_postidx: { |
| 11944 | MachineOperand Def(MI.getOperand(i: 1)); |
| 11945 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDMIA_UPD)) |
| 11946 | .add(MO: Def) // Rn_wb |
| 11947 | .add(MO: MI.getOperand(i: 2)) // Rn |
| 11948 | .add(MO: MI.getOperand(i: 3)) // PredImm |
| 11949 | .add(MO: MI.getOperand(i: 4)) // PredReg |
| 11950 | .add(MO: MI.getOperand(i: 0)) // Rt |
| 11951 | .cloneMemRefs(OtherMI: MI); |
| 11952 | MI.eraseFromParent(); |
| 11953 | return BB; |
| 11954 | } |
| 11955 | |
| 11956 | case ARM::MVE_MEMCPYLOOPINST: |
| 11957 | case ARM::MVE_MEMSETLOOPINST: { |
| 11958 | |
| 11959 | // Transformation below expands MVE_MEMCPYLOOPINST/MVE_MEMSETLOOPINST Pseudo |
| 11960 | // into a Tail Predicated (TP) Loop. It adds the instructions to calculate |
| 11961 | // the iteration count =ceil(size_in_bytes/16)) in the TP entry block and |
| 11962 | // adds the relevant instructions in the TP loop Body for generation of a |
| 11963 | // WLSTP loop. |
| 11964 | |
| 11965 | // Below is relevant portion of the CFG after the transformation. |
| 11966 | // The Machine Basic Blocks are shown along with branch conditions (in |
| 11967 | // brackets). Note that TP entry/exit MBBs depict the entry/exit of this |
| 11968 | // portion of the CFG and may not necessarily be the entry/exit of the |
| 11969 | // function. |
| 11970 | |
| 11971 | // (Relevant) CFG after transformation: |
| 11972 | // TP entry MBB |
| 11973 | // | |
| 11974 | // |-----------------| |
| 11975 | // (n <= 0) (n > 0) |
| 11976 | // | | |
| 11977 | // | TP loop Body MBB<--| |
| 11978 | // | | | |
| 11979 | // \ |___________| |
| 11980 | // \ / |
| 11981 | // TP exit MBB |
| 11982 | |
| 11983 | MachineFunction *MF = BB->getParent(); |
| 11984 | MachineFunctionProperties &Properties = MF->getProperties(); |
| 11985 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 11986 | |
| 11987 | Register OpDestReg = MI.getOperand(i: 0).getReg(); |
| 11988 | Register OpSrcReg = MI.getOperand(i: 1).getReg(); |
| 11989 | Register OpSizeReg = MI.getOperand(i: 2).getReg(); |
| 11990 | |
| 11991 | // Allocate the required MBBs and add to parent function. |
| 11992 | MachineBasicBlock *TpEntry = BB; |
| 11993 | MachineBasicBlock *TpLoopBody = MF->CreateMachineBasicBlock(); |
| 11994 | MachineBasicBlock *TpExit; |
| 11995 | |
| 11996 | MF->push_back(MBB: TpLoopBody); |
| 11997 | |
| 11998 | // If any instructions are present in the current block after |
| 11999 | // MVE_MEMCPYLOOPINST or MVE_MEMSETLOOPINST, split the current block and |
| 12000 | // move the instructions into the newly created exit block. If there are no |
| 12001 | // instructions add an explicit branch to the FallThrough block and then |
| 12002 | // split. |
| 12003 | // |
| 12004 | // The split is required for two reasons: |
| 12005 | // 1) A terminator(t2WhileLoopStart) will be placed at that site. |
| 12006 | // 2) Since a TPLoopBody will be added later, any phis in successive blocks |
| 12007 | // need to be updated. splitAt() already handles this. |
| 12008 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
| 12009 | if (TpExit == BB) { |
| 12010 | assert(BB->canFallThrough() && "Exit Block must be Fallthrough of the " |
| 12011 | "block containing memcpy/memset Pseudo" ); |
| 12012 | TpExit = BB->getFallThrough(); |
| 12013 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 12014 | .addMBB(MBB: TpExit) |
| 12015 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12016 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
| 12017 | } |
| 12018 | |
| 12019 | // Add logic for iteration count |
| 12020 | Register TotalIterationsReg = |
| 12021 | genTPEntry(TpEntry, TpLoopBody, TpExit, OpSizeReg, TII, Dl: dl, MRI); |
| 12022 | |
| 12023 | // Add the vectorized (and predicated) loads/store instructions |
| 12024 | bool IsMemcpy = MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST; |
| 12025 | genTPLoopBody(TpLoopBody, TpEntry, TpExit, TII, Dl: dl, MRI, OpSrcReg, |
| 12026 | OpDestReg, ElementCountReg: OpSizeReg, TotalIterationsReg, IsMemcpy); |
| 12027 | |
| 12028 | // Required to avoid conflict with the MachineVerifier during testing. |
| 12029 | Properties.resetNoPHIs(); |
| 12030 | |
| 12031 | // Connect the blocks |
| 12032 | TpEntry->addSuccessor(Succ: TpLoopBody); |
| 12033 | TpLoopBody->addSuccessor(Succ: TpLoopBody); |
| 12034 | TpLoopBody->addSuccessor(Succ: TpExit); |
| 12035 | |
| 12036 | // Reorder for a more natural layout |
| 12037 | TpLoopBody->moveAfter(NewBefore: TpEntry); |
| 12038 | TpExit->moveAfter(NewBefore: TpLoopBody); |
| 12039 | |
| 12040 | // Finally, remove the memcpy Pseudo Instruction |
| 12041 | MI.eraseFromParent(); |
| 12042 | |
| 12043 | // Return the exit block as it may contain other instructions requiring a |
| 12044 | // custom inserter |
| 12045 | return TpExit; |
| 12046 | } |
| 12047 | |
| 12048 | // The Thumb2 pre-indexed stores have the same MI operands, they just |
| 12049 | // define them differently in the .td files from the isel patterns, so |
| 12050 | // they need pseudos. |
| 12051 | case ARM::t2STR_preidx: |
| 12052 | MI.setDesc(TII->get(Opcode: ARM::t2STR_PRE)); |
| 12053 | return BB; |
| 12054 | case ARM::t2STRB_preidx: |
| 12055 | MI.setDesc(TII->get(Opcode: ARM::t2STRB_PRE)); |
| 12056 | return BB; |
| 12057 | case ARM::t2STRH_preidx: |
| 12058 | MI.setDesc(TII->get(Opcode: ARM::t2STRH_PRE)); |
| 12059 | return BB; |
| 12060 | |
| 12061 | case ARM::STRi_preidx: |
| 12062 | case ARM::STRBi_preidx: { |
| 12063 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM |
| 12064 | : ARM::STRB_PRE_IMM; |
| 12065 | // Decode the offset. |
| 12066 | unsigned Offset = MI.getOperand(i: 4).getImm(); |
| 12067 | bool isSub = ARM_AM::getAM2Op(AM2Opc: Offset) == ARM_AM::sub; |
| 12068 | Offset = ARM_AM::getAM2Offset(AM2Opc: Offset); |
| 12069 | if (isSub) |
| 12070 | Offset = -Offset; |
| 12071 | |
| 12072 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
| 12073 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)) |
| 12074 | .add(MO: MI.getOperand(i: 0)) // Rn_wb |
| 12075 | .add(MO: MI.getOperand(i: 1)) // Rt |
| 12076 | .add(MO: MI.getOperand(i: 2)) // Rn |
| 12077 | .addImm(Val: Offset) // offset (skip GPR==zero_reg) |
| 12078 | .add(MO: MI.getOperand(i: 5)) // pred |
| 12079 | .add(MO: MI.getOperand(i: 6)) |
| 12080 | .addMemOperand(MMO); |
| 12081 | MI.eraseFromParent(); |
| 12082 | return BB; |
| 12083 | } |
| 12084 | case ARM::STRr_preidx: |
| 12085 | case ARM::STRBr_preidx: |
| 12086 | case ARM::STRH_preidx: { |
| 12087 | unsigned NewOpc; |
| 12088 | switch (MI.getOpcode()) { |
| 12089 | default: llvm_unreachable("unexpected opcode!" ); |
| 12090 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; |
| 12091 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; |
| 12092 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; |
| 12093 | } |
| 12094 | MachineInstrBuilder MIB = BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)); |
| 12095 | for (const MachineOperand &MO : MI.operands()) |
| 12096 | MIB.add(MO); |
| 12097 | MI.eraseFromParent(); |
| 12098 | return BB; |
| 12099 | } |
| 12100 | |
| 12101 | case ARM::tMOVCCr_pseudo: { |
| 12102 | // To "insert" a SELECT_CC instruction, we actually have to insert the |
| 12103 | // diamond control-flow pattern. The incoming instruction knows the |
| 12104 | // destination vreg to set, the condition code register to branch on, the |
| 12105 | // true/false values to select between, and a branch opcode to use. |
| 12106 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 12107 | MachineFunction::iterator It = ++BB->getIterator(); |
| 12108 | |
| 12109 | // thisMBB: |
| 12110 | // ... |
| 12111 | // TrueVal = ... |
| 12112 | // cmpTY ccX, r1, r2 |
| 12113 | // bCC copy1MBB |
| 12114 | // fallthrough --> copy0MBB |
| 12115 | MachineBasicBlock *thisMBB = BB; |
| 12116 | MachineFunction *F = BB->getParent(); |
| 12117 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12118 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
| 12119 | F->insert(MBBI: It, MBB: copy0MBB); |
| 12120 | F->insert(MBBI: It, MBB: sinkMBB); |
| 12121 | |
| 12122 | // Set the call frame size on entry to the new basic blocks. |
| 12123 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
| 12124 | copy0MBB->setCallFrameSize(CallFrameSize); |
| 12125 | sinkMBB->setCallFrameSize(CallFrameSize); |
| 12126 | |
| 12127 | // Check whether CPSR is live past the tMOVCCr_pseudo. |
| 12128 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 12129 | if (!MI.killsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr) && |
| 12130 | !checkAndUpdateCPSRKill(SelectItr: MI, BB: thisMBB, TRI)) { |
| 12131 | copy0MBB->addLiveIn(PhysReg: ARM::CPSR); |
| 12132 | sinkMBB->addLiveIn(PhysReg: ARM::CPSR); |
| 12133 | } |
| 12134 | |
| 12135 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 12136 | sinkMBB->splice(Where: sinkMBB->begin(), Other: BB, |
| 12137 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
| 12138 | sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
| 12139 | |
| 12140 | BB->addSuccessor(Succ: copy0MBB); |
| 12141 | BB->addSuccessor(Succ: sinkMBB); |
| 12142 | |
| 12143 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
| 12144 | .addMBB(MBB: sinkMBB) |
| 12145 | .addImm(Val: MI.getOperand(i: 3).getImm()) |
| 12146 | .addReg(RegNo: MI.getOperand(i: 4).getReg()); |
| 12147 | |
| 12148 | // copy0MBB: |
| 12149 | // %FalseValue = ... |
| 12150 | // # fallthrough to sinkMBB |
| 12151 | BB = copy0MBB; |
| 12152 | |
| 12153 | // Update machine-CFG edges |
| 12154 | BB->addSuccessor(Succ: sinkMBB); |
| 12155 | |
| 12156 | // sinkMBB: |
| 12157 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
| 12158 | // ... |
| 12159 | BB = sinkMBB; |
| 12160 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: MI.getOperand(i: 0).getReg()) |
| 12161 | .addReg(RegNo: MI.getOperand(i: 1).getReg()) |
| 12162 | .addMBB(MBB: copy0MBB) |
| 12163 | .addReg(RegNo: MI.getOperand(i: 2).getReg()) |
| 12164 | .addMBB(MBB: thisMBB); |
| 12165 | |
| 12166 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 12167 | return BB; |
| 12168 | } |
| 12169 | |
| 12170 | case ARM::BCCi64: |
| 12171 | case ARM::BCCZi64: { |
| 12172 | // If there is an unconditional branch to the other successor, remove it. |
| 12173 | BB->erase(I: std::next(x: MachineBasicBlock::iterator(MI)), E: BB->end()); |
| 12174 | |
| 12175 | // Compare both parts that make up the double comparison separately for |
| 12176 | // equality. |
| 12177 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; |
| 12178 | |
| 12179 | Register LHS1 = MI.getOperand(i: 1).getReg(); |
| 12180 | Register LHS2 = MI.getOperand(i: 2).getReg(); |
| 12181 | if (RHSisZero) { |
| 12182 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 12183 | .addReg(RegNo: LHS1) |
| 12184 | .addImm(Val: 0) |
| 12185 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12186 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 12187 | .addReg(RegNo: LHS2).addImm(Val: 0) |
| 12188 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12189 | } else { |
| 12190 | Register RHS1 = MI.getOperand(i: 3).getReg(); |
| 12191 | Register RHS2 = MI.getOperand(i: 4).getReg(); |
| 12192 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 12193 | .addReg(RegNo: LHS1) |
| 12194 | .addReg(RegNo: RHS1) |
| 12195 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12196 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 12197 | .addReg(RegNo: LHS2).addReg(RegNo: RHS2) |
| 12198 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12199 | } |
| 12200 | |
| 12201 | MachineBasicBlock *destMBB = MI.getOperand(i: RHSisZero ? 3 : 5).getMBB(); |
| 12202 | MachineBasicBlock *exitMBB = OtherSucc(MBB: BB, Succ: destMBB); |
| 12203 | if (MI.getOperand(i: 0).getImm() == ARMCC::NE) |
| 12204 | std::swap(a&: destMBB, b&: exitMBB); |
| 12205 | |
| 12206 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 12207 | .addMBB(MBB: destMBB).addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
| 12208 | if (isThumb2) |
| 12209 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
| 12210 | .addMBB(MBB: exitMBB) |
| 12211 | .add(MOs: predOps(Pred: ARMCC::AL)); |
| 12212 | else |
| 12213 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::B)) .addMBB(MBB: exitMBB); |
| 12214 | |
| 12215 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 12216 | return BB; |
| 12217 | } |
| 12218 | |
| 12219 | case ARM::Int_eh_sjlj_setjmp: |
| 12220 | case ARM::Int_eh_sjlj_setjmp_nofp: |
| 12221 | case ARM::tInt_eh_sjlj_setjmp: |
| 12222 | case ARM::t2Int_eh_sjlj_setjmp: |
| 12223 | case ARM::t2Int_eh_sjlj_setjmp_nofp: |
| 12224 | return BB; |
| 12225 | |
| 12226 | case ARM::Int_eh_sjlj_setup_dispatch: |
| 12227 | EmitSjLjDispatchBlock(MI, MBB: BB); |
| 12228 | return BB; |
| 12229 | case ARM::COPY_STRUCT_BYVAL_I32: |
| 12230 | ++NumLoopByVals; |
| 12231 | return EmitStructByval(MI, BB); |
| 12232 | case ARM::WIN__CHKSTK: |
| 12233 | return EmitLowered__chkstk(MI, MBB: BB); |
| 12234 | case ARM::WIN__DBZCHK: |
| 12235 | return EmitLowered__dbzchk(MI, MBB: BB); |
| 12236 | } |
| 12237 | } |
| 12238 | |
| 12239 | /// Attaches vregs to MEMCPY that it will use as scratch registers |
| 12240 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering |
| 12241 | /// instead of as a custom inserter because we need the use list from the SDNode. |
| 12242 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, |
| 12243 | MachineInstr &MI, const SDNode *Node) { |
| 12244 | bool isThumb1 = Subtarget->isThumb1Only(); |
| 12245 | |
| 12246 | MachineFunction *MF = MI.getParent()->getParent(); |
| 12247 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 12248 | MachineInstrBuilder MIB(*MF, MI); |
| 12249 | |
| 12250 | // If the new dst/src is unused mark it as dead. |
| 12251 | if (!Node->hasAnyUseOfValue(Value: 0)) { |
| 12252 | MI.getOperand(i: 0).setIsDead(true); |
| 12253 | } |
| 12254 | if (!Node->hasAnyUseOfValue(Value: 1)) { |
| 12255 | MI.getOperand(i: 1).setIsDead(true); |
| 12256 | } |
| 12257 | |
| 12258 | // The MEMCPY both defines and kills the scratch registers. |
| 12259 | for (unsigned I = 0; I != MI.getOperand(i: 4).getImm(); ++I) { |
| 12260 | Register TmpReg = MRI.createVirtualRegister(RegClass: isThumb1 ? &ARM::tGPRRegClass |
| 12261 | : &ARM::GPRRegClass); |
| 12262 | MIB.addReg(RegNo: TmpReg, Flags: RegState::Define|RegState::Dead); |
| 12263 | } |
| 12264 | } |
| 12265 | |
| 12266 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
| 12267 | SDNode *Node) const { |
| 12268 | if (MI.getOpcode() == ARM::MEMCPY) { |
| 12269 | attachMEMCPYScratchRegs(Subtarget, MI, Node); |
| 12270 | return; |
| 12271 | } |
| 12272 | |
| 12273 | const MCInstrDesc *MCID = &MI.getDesc(); |
| 12274 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, |
| 12275 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional |
| 12276 | // operand is still set to noreg. If needed, set the optional operand's |
| 12277 | // register to CPSR, and remove the redundant implicit def. |
| 12278 | // |
| 12279 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). |
| 12280 | |
| 12281 | // Rename pseudo opcodes. |
| 12282 | unsigned NewOpc = convertAddSubFlagsOpcode(OldOpc: MI.getOpcode()); |
| 12283 | unsigned ccOutIdx; |
| 12284 | if (NewOpc) { |
| 12285 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); |
| 12286 | MCID = &TII->get(Opcode: NewOpc); |
| 12287 | |
| 12288 | assert(MCID->getNumOperands() == |
| 12289 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() |
| 12290 | && "converted opcode should be the same except for cc_out" |
| 12291 | " (and, on Thumb1, pred)" ); |
| 12292 | |
| 12293 | MI.setDesc(*MCID); |
| 12294 | |
| 12295 | // Add the optional cc_out operand |
| 12296 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/true)); |
| 12297 | |
| 12298 | // On Thumb1, move all input operands to the end, then add the predicate |
| 12299 | if (Subtarget->isThumb1Only()) { |
| 12300 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { |
| 12301 | MI.addOperand(Op: MI.getOperand(i: 1)); |
| 12302 | MI.removeOperand(OpNo: 1); |
| 12303 | } |
| 12304 | |
| 12305 | // Restore the ties |
| 12306 | for (unsigned i = MI.getNumOperands(); i--;) { |
| 12307 | const MachineOperand& op = MI.getOperand(i); |
| 12308 | if (op.isReg() && op.isUse()) { |
| 12309 | int DefIdx = MCID->getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO); |
| 12310 | if (DefIdx != -1) |
| 12311 | MI.tieOperands(DefIdx, UseIdx: i); |
| 12312 | } |
| 12313 | } |
| 12314 | |
| 12315 | MI.addOperand(Op: MachineOperand::CreateImm(Val: ARMCC::AL)); |
| 12316 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/false)); |
| 12317 | ccOutIdx = 1; |
| 12318 | } else |
| 12319 | ccOutIdx = MCID->getNumOperands() - 1; |
| 12320 | } else |
| 12321 | ccOutIdx = MCID->getNumOperands() - 1; |
| 12322 | |
| 12323 | // Any ARM instruction that sets the 's' bit should specify an optional |
| 12324 | // "cc_out" operand in the last operand position. |
| 12325 | if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) { |
| 12326 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 12327 | return; |
| 12328 | } |
| 12329 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it |
| 12330 | // since we already have an optional CPSR def. |
| 12331 | bool definesCPSR = false; |
| 12332 | bool deadCPSR = false; |
| 12333 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; |
| 12334 | ++i) { |
| 12335 | const MachineOperand &MO = MI.getOperand(i); |
| 12336 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { |
| 12337 | definesCPSR = true; |
| 12338 | if (MO.isDead()) |
| 12339 | deadCPSR = true; |
| 12340 | MI.removeOperand(OpNo: i); |
| 12341 | break; |
| 12342 | } |
| 12343 | } |
| 12344 | if (!definesCPSR) { |
| 12345 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 12346 | return; |
| 12347 | } |
| 12348 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag" ); |
| 12349 | if (deadCPSR) { |
| 12350 | assert(!MI.getOperand(ccOutIdx).getReg() && |
| 12351 | "expect uninitialized optional cc_out operand" ); |
| 12352 | // Thumb1 instructions must have the S bit even if the CPSR is dead. |
| 12353 | if (!Subtarget->isThumb1Only()) |
| 12354 | return; |
| 12355 | } |
| 12356 | |
| 12357 | // If this instruction was defined with an optional CPSR def and its dag node |
| 12358 | // had a live implicit CPSR def, then activate the optional CPSR def. |
| 12359 | MachineOperand &MO = MI.getOperand(i: ccOutIdx); |
| 12360 | MO.setReg(ARM::CPSR); |
| 12361 | MO.setIsDef(true); |
| 12362 | } |
| 12363 | |
| 12364 | //===----------------------------------------------------------------------===// |
| 12365 | // ARM Optimization Hooks |
| 12366 | //===----------------------------------------------------------------------===// |
| 12367 | |
| 12368 | // Helper function that checks if N is a null or all ones constant. |
| 12369 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { |
| 12370 | return AllOnes ? isAllOnesConstant(V: N) : isNullConstant(V: N); |
| 12371 | } |
| 12372 | |
| 12373 | // Return true if N is conditionally 0 or all ones. |
| 12374 | // Detects these expressions where cc is an i1 value: |
| 12375 | // |
| 12376 | // (select cc 0, y) [AllOnes=0] |
| 12377 | // (select cc y, 0) [AllOnes=0] |
| 12378 | // (zext cc) [AllOnes=0] |
| 12379 | // (sext cc) [AllOnes=0/1] |
| 12380 | // (select cc -1, y) [AllOnes=1] |
| 12381 | // (select cc y, -1) [AllOnes=1] |
| 12382 | // |
| 12383 | // Invert is set when N is the null/all ones constant when CC is false. |
| 12384 | // OtherOp is set to the alternative value of N. |
| 12385 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, |
| 12386 | SDValue &CC, bool &Invert, |
| 12387 | SDValue &OtherOp, |
| 12388 | SelectionDAG &DAG) { |
| 12389 | switch (N->getOpcode()) { |
| 12390 | default: return false; |
| 12391 | case ISD::SELECT: { |
| 12392 | CC = N->getOperand(Num: 0); |
| 12393 | SDValue N1 = N->getOperand(Num: 1); |
| 12394 | SDValue N2 = N->getOperand(Num: 2); |
| 12395 | if (isZeroOrAllOnes(N: N1, AllOnes)) { |
| 12396 | Invert = false; |
| 12397 | OtherOp = N2; |
| 12398 | return true; |
| 12399 | } |
| 12400 | if (isZeroOrAllOnes(N: N2, AllOnes)) { |
| 12401 | Invert = true; |
| 12402 | OtherOp = N1; |
| 12403 | return true; |
| 12404 | } |
| 12405 | return false; |
| 12406 | } |
| 12407 | case ISD::ZERO_EXTEND: |
| 12408 | // (zext cc) can never be the all ones value. |
| 12409 | if (AllOnes) |
| 12410 | return false; |
| 12411 | [[fallthrough]]; |
| 12412 | case ISD::SIGN_EXTEND: { |
| 12413 | SDLoc dl(N); |
| 12414 | EVT VT = N->getValueType(ResNo: 0); |
| 12415 | CC = N->getOperand(Num: 0); |
| 12416 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) |
| 12417 | return false; |
| 12418 | Invert = !AllOnes; |
| 12419 | if (AllOnes) |
| 12420 | // When looking for an AllOnes constant, N is an sext, and the 'other' |
| 12421 | // value is 0. |
| 12422 | OtherOp = DAG.getConstant(Val: 0, DL: dl, VT); |
| 12423 | else if (N->getOpcode() == ISD::ZERO_EXTEND) |
| 12424 | // When looking for a 0 constant, N can be zext or sext. |
| 12425 | OtherOp = DAG.getConstant(Val: 1, DL: dl, VT); |
| 12426 | else |
| 12427 | OtherOp = DAG.getAllOnesConstant(DL: dl, VT); |
| 12428 | return true; |
| 12429 | } |
| 12430 | } |
| 12431 | } |
| 12432 | |
| 12433 | // Combine a constant select operand into its use: |
| 12434 | // |
| 12435 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 12436 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 12437 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] |
| 12438 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 12439 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 12440 | // |
| 12441 | // The transform is rejected if the select doesn't have a constant operand that |
| 12442 | // is null, or all ones when AllOnes is set. |
| 12443 | // |
| 12444 | // Also recognize sext/zext from i1: |
| 12445 | // |
| 12446 | // (add (zext cc), x) -> (select cc (add x, 1), x) |
| 12447 | // (add (sext cc), x) -> (select cc (add x, -1), x) |
| 12448 | // |
| 12449 | // These transformations eventually create predicated instructions. |
| 12450 | // |
| 12451 | // @param N The node to transform. |
| 12452 | // @param Slct The N operand that is a select. |
| 12453 | // @param OtherOp The other N operand (x above). |
| 12454 | // @param DCI Context. |
| 12455 | // @param AllOnes Require the select constant to be all ones instead of null. |
| 12456 | // @returns The new node, or SDValue() on failure. |
| 12457 | static |
| 12458 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, |
| 12459 | TargetLowering::DAGCombinerInfo &DCI, |
| 12460 | bool AllOnes = false) { |
| 12461 | SelectionDAG &DAG = DCI.DAG; |
| 12462 | EVT VT = N->getValueType(ResNo: 0); |
| 12463 | SDValue NonConstantVal; |
| 12464 | SDValue CCOp; |
| 12465 | bool SwapSelectOps; |
| 12466 | if (!isConditionalZeroOrAllOnes(N: Slct.getNode(), AllOnes, CC&: CCOp, Invert&: SwapSelectOps, |
| 12467 | OtherOp&: NonConstantVal, DAG)) |
| 12468 | return SDValue(); |
| 12469 | |
| 12470 | // Slct is now know to be the desired identity constant when CC is true. |
| 12471 | SDValue TrueVal = OtherOp; |
| 12472 | SDValue FalseVal = DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT, |
| 12473 | N1: OtherOp, N2: NonConstantVal); |
| 12474 | // Unless SwapSelectOps says CC should be false. |
| 12475 | if (SwapSelectOps) |
| 12476 | std::swap(a&: TrueVal, b&: FalseVal); |
| 12477 | |
| 12478 | return DAG.getNode(Opcode: ISD::SELECT, DL: SDLoc(N), VT, |
| 12479 | N1: CCOp, N2: TrueVal, N3: FalseVal); |
| 12480 | } |
| 12481 | |
| 12482 | // Attempt combineSelectAndUse on each operand of a commutative operator N. |
| 12483 | static |
| 12484 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, |
| 12485 | TargetLowering::DAGCombinerInfo &DCI) { |
| 12486 | SDValue N0 = N->getOperand(Num: 0); |
| 12487 | SDValue N1 = N->getOperand(Num: 1); |
| 12488 | if (N0.getNode()->hasOneUse()) |
| 12489 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI, AllOnes)) |
| 12490 | return Result; |
| 12491 | if (N1.getNode()->hasOneUse()) |
| 12492 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI, AllOnes)) |
| 12493 | return Result; |
| 12494 | return SDValue(); |
| 12495 | } |
| 12496 | |
| 12497 | static bool IsVUZPShuffleNode(SDNode *N) { |
| 12498 | // VUZP shuffle node. |
| 12499 | if (N->getOpcode() == ARMISD::VUZP) |
| 12500 | return true; |
| 12501 | |
| 12502 | // "VUZP" on i32 is an alias for VTRN. |
| 12503 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(ResNo: 0) == MVT::v2i32) |
| 12504 | return true; |
| 12505 | |
| 12506 | return false; |
| 12507 | } |
| 12508 | |
| 12509 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, |
| 12510 | TargetLowering::DAGCombinerInfo &DCI, |
| 12511 | const ARMSubtarget *Subtarget) { |
| 12512 | // Look for ADD(VUZP.0, VUZP.1). |
| 12513 | if (!IsVUZPShuffleNode(N: N0.getNode()) || N0.getNode() != N1.getNode() || |
| 12514 | N0 == N1) |
| 12515 | return SDValue(); |
| 12516 | |
| 12517 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. |
| 12518 | if (!N->getValueType(ResNo: 0).is64BitVector()) |
| 12519 | return SDValue(); |
| 12520 | |
| 12521 | // Generate vpadd. |
| 12522 | SelectionDAG &DAG = DCI.DAG; |
| 12523 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12524 | SDLoc dl(N); |
| 12525 | SDNode *Unzip = N0.getNode(); |
| 12526 | EVT VT = N->getValueType(ResNo: 0); |
| 12527 | |
| 12528 | SmallVector<SDValue, 8> Ops; |
| 12529 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpadd, DL: dl, |
| 12530 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12531 | Ops.push_back(Elt: Unzip->getOperand(Num: 0)); |
| 12532 | Ops.push_back(Elt: Unzip->getOperand(Num: 1)); |
| 12533 | |
| 12534 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
| 12535 | } |
| 12536 | |
| 12537 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 12538 | TargetLowering::DAGCombinerInfo &DCI, |
| 12539 | const ARMSubtarget *Subtarget) { |
| 12540 | // Check for two extended operands. |
| 12541 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && |
| 12542 | N1.getOpcode() == ISD::SIGN_EXTEND) && |
| 12543 | !(N0.getOpcode() == ISD::ZERO_EXTEND && |
| 12544 | N1.getOpcode() == ISD::ZERO_EXTEND)) |
| 12545 | return SDValue(); |
| 12546 | |
| 12547 | SDValue N00 = N0.getOperand(i: 0); |
| 12548 | SDValue N10 = N1.getOperand(i: 0); |
| 12549 | |
| 12550 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) |
| 12551 | if (!IsVUZPShuffleNode(N: N00.getNode()) || N00.getNode() != N10.getNode() || |
| 12552 | N00 == N10) |
| 12553 | return SDValue(); |
| 12554 | |
| 12555 | // We only recognize Q register paddl here; this can't be reached until |
| 12556 | // after type legalization. |
| 12557 | if (!N00.getValueType().is64BitVector() || |
| 12558 | !N0.getValueType().is128BitVector()) |
| 12559 | return SDValue(); |
| 12560 | |
| 12561 | // Generate vpaddl. |
| 12562 | SelectionDAG &DAG = DCI.DAG; |
| 12563 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12564 | SDLoc dl(N); |
| 12565 | EVT VT = N->getValueType(ResNo: 0); |
| 12566 | |
| 12567 | SmallVector<SDValue, 8> Ops; |
| 12568 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. |
| 12569 | unsigned Opcode; |
| 12570 | if (N0.getOpcode() == ISD::SIGN_EXTEND) |
| 12571 | Opcode = Intrinsic::arm_neon_vpaddls; |
| 12572 | else |
| 12573 | Opcode = Intrinsic::arm_neon_vpaddlu; |
| 12574 | Ops.push_back(Elt: DAG.getConstant(Val: Opcode, DL: dl, |
| 12575 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12576 | EVT ElemTy = N00.getValueType().getVectorElementType(); |
| 12577 | unsigned NumElts = VT.getVectorNumElements(); |
| 12578 | EVT ConcatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ElemTy, NumElements: NumElts * 2); |
| 12579 | SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT: ConcatVT, |
| 12580 | N1: N00.getOperand(i: 0), N2: N00.getOperand(i: 1)); |
| 12581 | Ops.push_back(Elt: Concat); |
| 12582 | |
| 12583 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
| 12584 | } |
| 12585 | |
| 12586 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in |
| 12587 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is |
| 12588 | // much easier to match. |
| 12589 | static SDValue |
| 12590 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 12591 | TargetLowering::DAGCombinerInfo &DCI, |
| 12592 | const ARMSubtarget *Subtarget) { |
| 12593 | // Only perform optimization if after legalize, and if NEON is available. We |
| 12594 | // also expected both operands to be BUILD_VECTORs. |
| 12595 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() |
| 12596 | || N0.getOpcode() != ISD::BUILD_VECTOR |
| 12597 | || N1.getOpcode() != ISD::BUILD_VECTOR) |
| 12598 | return SDValue(); |
| 12599 | |
| 12600 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. |
| 12601 | EVT VT = N->getValueType(ResNo: 0); |
| 12602 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) |
| 12603 | return SDValue(); |
| 12604 | |
| 12605 | // Check that the vector operands are of the right form. |
| 12606 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR |
| 12607 | // operands, where N is the size of the formed vector. |
| 12608 | // Each EXTRACT_VECTOR should have the same input vector and odd or even |
| 12609 | // index such that we have a pair wise add pattern. |
| 12610 | |
| 12611 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. |
| 12612 | if (N0->getOperand(Num: 0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 12613 | return SDValue(); |
| 12614 | SDValue Vec = N0->getOperand(Num: 0)->getOperand(Num: 0); |
| 12615 | SDNode *V = Vec.getNode(); |
| 12616 | unsigned nextIndex = 0; |
| 12617 | |
| 12618 | // For each operands to the ADD which are BUILD_VECTORs, |
| 12619 | // check to see if each of their operands are an EXTRACT_VECTOR with |
| 12620 | // the same vector and appropriate index. |
| 12621 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { |
| 12622 | if (N0->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT |
| 12623 | && N1->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 12624 | |
| 12625 | SDValue ExtVec0 = N0->getOperand(Num: i); |
| 12626 | SDValue ExtVec1 = N1->getOperand(Num: i); |
| 12627 | |
| 12628 | // First operand is the vector, verify its the same. |
| 12629 | if (V != ExtVec0->getOperand(Num: 0).getNode() || |
| 12630 | V != ExtVec1->getOperand(Num: 0).getNode()) |
| 12631 | return SDValue(); |
| 12632 | |
| 12633 | // Second is the constant, verify its correct. |
| 12634 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(Val: ExtVec0->getOperand(Num: 1)); |
| 12635 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Val: ExtVec1->getOperand(Num: 1)); |
| 12636 | |
| 12637 | // For the constant, we want to see all the even or all the odd. |
| 12638 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex |
| 12639 | || C1->getZExtValue() != nextIndex+1) |
| 12640 | return SDValue(); |
| 12641 | |
| 12642 | // Increment index. |
| 12643 | nextIndex+=2; |
| 12644 | } else |
| 12645 | return SDValue(); |
| 12646 | } |
| 12647 | |
| 12648 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure |
| 12649 | // we're using the entire input vector, otherwise there's a size/legality |
| 12650 | // mismatch somewhere. |
| 12651 | if (nextIndex != Vec.getValueType().getVectorNumElements() || |
| 12652 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) |
| 12653 | return SDValue(); |
| 12654 | |
| 12655 | // Create VPADDL node. |
| 12656 | SelectionDAG &DAG = DCI.DAG; |
| 12657 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12658 | |
| 12659 | SDLoc dl(N); |
| 12660 | |
| 12661 | // Build operand list. |
| 12662 | SmallVector<SDValue, 8> Ops; |
| 12663 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddls, DL: dl, |
| 12664 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 12665 | |
| 12666 | // Input is the vector. |
| 12667 | Ops.push_back(Elt: Vec); |
| 12668 | |
| 12669 | // Get widened type and narrowed type. |
| 12670 | MVT widenType; |
| 12671 | unsigned numElem = VT.getVectorNumElements(); |
| 12672 | |
| 12673 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); |
| 12674 | switch (inputLaneType.getSimpleVT().SimpleTy) { |
| 12675 | case MVT::i8: widenType = MVT::getVectorVT(VT: MVT::i16, NumElements: numElem); break; |
| 12676 | case MVT::i16: widenType = MVT::getVectorVT(VT: MVT::i32, NumElements: numElem); break; |
| 12677 | case MVT::i32: widenType = MVT::getVectorVT(VT: MVT::i64, NumElements: numElem); break; |
| 12678 | default: |
| 12679 | llvm_unreachable("Invalid vector element type for padd optimization." ); |
| 12680 | } |
| 12681 | |
| 12682 | SDValue tmp = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: widenType, Ops); |
| 12683 | unsigned ExtOp = VT.bitsGT(VT: tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; |
| 12684 | return DAG.getNode(Opcode: ExtOp, DL: dl, VT, Operand: tmp); |
| 12685 | } |
| 12686 | |
| 12687 | static SDValue findMUL_LOHI(SDValue V) { |
| 12688 | if (V->getOpcode() == ISD::UMUL_LOHI || |
| 12689 | V->getOpcode() == ISD::SMUL_LOHI) |
| 12690 | return V; |
| 12691 | return SDValue(); |
| 12692 | } |
| 12693 | |
| 12694 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, |
| 12695 | TargetLowering::DAGCombinerInfo &DCI, |
| 12696 | const ARMSubtarget *Subtarget) { |
| 12697 | if (!Subtarget->hasBaseDSP()) |
| 12698 | return SDValue(); |
| 12699 | |
| 12700 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and |
| 12701 | // accumulates the product into a 64-bit value. The 16-bit values will |
| 12702 | // be sign extended somehow or SRA'd into 32-bit values |
| 12703 | // (addc (adde (mul 16bit, 16bit), lo), hi) |
| 12704 | SDValue Mul = AddcNode->getOperand(Num: 0); |
| 12705 | SDValue Lo = AddcNode->getOperand(Num: 1); |
| 12706 | if (Mul.getOpcode() != ISD::MUL) { |
| 12707 | Lo = AddcNode->getOperand(Num: 0); |
| 12708 | Mul = AddcNode->getOperand(Num: 1); |
| 12709 | if (Mul.getOpcode() != ISD::MUL) |
| 12710 | return SDValue(); |
| 12711 | } |
| 12712 | |
| 12713 | SDValue SRA = AddeNode->getOperand(Num: 0); |
| 12714 | SDValue Hi = AddeNode->getOperand(Num: 1); |
| 12715 | if (SRA.getOpcode() != ISD::SRA) { |
| 12716 | SRA = AddeNode->getOperand(Num: 1); |
| 12717 | Hi = AddeNode->getOperand(Num: 0); |
| 12718 | if (SRA.getOpcode() != ISD::SRA) |
| 12719 | return SDValue(); |
| 12720 | } |
| 12721 | if (auto Const = dyn_cast<ConstantSDNode>(Val: SRA.getOperand(i: 1))) { |
| 12722 | if (Const->getZExtValue() != 31) |
| 12723 | return SDValue(); |
| 12724 | } else |
| 12725 | return SDValue(); |
| 12726 | |
| 12727 | if (SRA.getOperand(i: 0) != Mul) |
| 12728 | return SDValue(); |
| 12729 | |
| 12730 | SelectionDAG &DAG = DCI.DAG; |
| 12731 | SDLoc dl(AddcNode); |
| 12732 | unsigned Opcode = 0; |
| 12733 | SDValue Op0; |
| 12734 | SDValue Op1; |
| 12735 | |
| 12736 | if (isS16(Op: Mul.getOperand(i: 0), DAG) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
| 12737 | Opcode = ARMISD::SMLALBB; |
| 12738 | Op0 = Mul.getOperand(i: 0); |
| 12739 | Op1 = Mul.getOperand(i: 1); |
| 12740 | } else if (isS16(Op: Mul.getOperand(i: 0), DAG) && isSRA16(Op: Mul.getOperand(i: 1))) { |
| 12741 | Opcode = ARMISD::SMLALBT; |
| 12742 | Op0 = Mul.getOperand(i: 0); |
| 12743 | Op1 = Mul.getOperand(i: 1).getOperand(i: 0); |
| 12744 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
| 12745 | Opcode = ARMISD::SMLALTB; |
| 12746 | Op0 = Mul.getOperand(i: 0).getOperand(i: 0); |
| 12747 | Op1 = Mul.getOperand(i: 1); |
| 12748 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isSRA16(Op: Mul.getOperand(i: 1))) { |
| 12749 | Opcode = ARMISD::SMLALTT; |
| 12750 | Op0 = Mul->getOperand(Num: 0).getOperand(i: 0); |
| 12751 | Op1 = Mul->getOperand(Num: 1).getOperand(i: 0); |
| 12752 | } |
| 12753 | |
| 12754 | if (!Op0 || !Op1) |
| 12755 | return SDValue(); |
| 12756 | |
| 12757 | SDValue SMLAL = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 12758 | N1: Op0, N2: Op1, N3: Lo, N4: Hi); |
| 12759 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 12760 | SDValue HiMLALResult(SMLAL.getNode(), 1); |
| 12761 | SDValue LoMLALResult(SMLAL.getNode(), 0); |
| 12762 | |
| 12763 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: LoMLALResult); |
| 12764 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: HiMLALResult); |
| 12765 | |
| 12766 | // Return original node to notify the driver to stop replacing. |
| 12767 | SDValue resNode(AddcNode, 0); |
| 12768 | return resNode; |
| 12769 | } |
| 12770 | |
| 12771 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, |
| 12772 | TargetLowering::DAGCombinerInfo &DCI, |
| 12773 | const ARMSubtarget *Subtarget) { |
| 12774 | // Look for multiply add opportunities. |
| 12775 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where |
| 12776 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is |
| 12777 | // a glue link from the first add to the second add. |
| 12778 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by |
| 12779 | // a S/UMLAL instruction. |
| 12780 | // UMUL_LOHI |
| 12781 | // / :lo \ :hi |
| 12782 | // V \ [no multiline comment] |
| 12783 | // loAdd -> ADDC | |
| 12784 | // \ :carry / |
| 12785 | // V V |
| 12786 | // ADDE <- hiAdd |
| 12787 | // |
| 12788 | // In the special case where only the higher part of a signed result is used |
| 12789 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts |
| 12790 | // a constant with the exact value of 0x80000000, we recognize we are dealing |
| 12791 | // with a "rounded multiply and add" (or subtract) and transform it into |
| 12792 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. |
| 12793 | |
| 12794 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || |
| 12795 | AddeSubeNode->getOpcode() == ARMISD::SUBE) && |
| 12796 | "Expect an ADDE or SUBE" ); |
| 12797 | |
| 12798 | assert(AddeSubeNode->getNumOperands() == 3 && |
| 12799 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && |
| 12800 | "ADDE node has the wrong inputs" ); |
| 12801 | |
| 12802 | // Check that we are chained to the right ADDC or SUBC node. |
| 12803 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(Num: 2).getNode(); |
| 12804 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 12805 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || |
| 12806 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && |
| 12807 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) |
| 12808 | return SDValue(); |
| 12809 | |
| 12810 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(Num: 0); |
| 12811 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(Num: 1); |
| 12812 | |
| 12813 | // Check if the two operands are from the same mul_lohi node. |
| 12814 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) |
| 12815 | return SDValue(); |
| 12816 | |
| 12817 | assert(AddcSubcNode->getNumValues() == 2 && |
| 12818 | AddcSubcNode->getValueType(0) == MVT::i32 && |
| 12819 | "Expect ADDC with two result values. First: i32" ); |
| 12820 | |
| 12821 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it |
| 12822 | // maybe a SMLAL which multiplies two 16-bit values. |
| 12823 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 12824 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && |
| 12825 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && |
| 12826 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && |
| 12827 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) |
| 12828 | return AddCombineTo64BitSMLAL16(AddcNode: AddcSubcNode, AddeNode: AddeSubeNode, DCI, Subtarget); |
| 12829 | |
| 12830 | // Check for the triangle shape. |
| 12831 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(Num: 0); |
| 12832 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(Num: 1); |
| 12833 | |
| 12834 | // Make sure that the ADDE/SUBE operands are not coming from the same node. |
| 12835 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) |
| 12836 | return SDValue(); |
| 12837 | |
| 12838 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. |
| 12839 | bool IsLeftOperandMUL = false; |
| 12840 | SDValue MULOp = findMUL_LOHI(V: AddeSubeOp0); |
| 12841 | if (MULOp == SDValue()) |
| 12842 | MULOp = findMUL_LOHI(V: AddeSubeOp1); |
| 12843 | else |
| 12844 | IsLeftOperandMUL = true; |
| 12845 | if (MULOp == SDValue()) |
| 12846 | return SDValue(); |
| 12847 | |
| 12848 | // Figure out the right opcode. |
| 12849 | unsigned Opc = MULOp->getOpcode(); |
| 12850 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; |
| 12851 | |
| 12852 | // Figure out the high and low input values to the MLAL node. |
| 12853 | SDValue *HiAddSub = nullptr; |
| 12854 | SDValue *LoMul = nullptr; |
| 12855 | SDValue *LowAddSub = nullptr; |
| 12856 | |
| 12857 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. |
| 12858 | if ((AddeSubeOp0 != MULOp.getValue(R: 1)) && (AddeSubeOp1 != MULOp.getValue(R: 1))) |
| 12859 | return SDValue(); |
| 12860 | |
| 12861 | if (IsLeftOperandMUL) |
| 12862 | HiAddSub = &AddeSubeOp1; |
| 12863 | else |
| 12864 | HiAddSub = &AddeSubeOp0; |
| 12865 | |
| 12866 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node |
| 12867 | // whose low result is fed to the ADDC/SUBC we are checking. |
| 12868 | |
| 12869 | if (AddcSubcOp0 == MULOp.getValue(R: 0)) { |
| 12870 | LoMul = &AddcSubcOp0; |
| 12871 | LowAddSub = &AddcSubcOp1; |
| 12872 | } |
| 12873 | if (AddcSubcOp1 == MULOp.getValue(R: 0)) { |
| 12874 | LoMul = &AddcSubcOp1; |
| 12875 | LowAddSub = &AddcSubcOp0; |
| 12876 | } |
| 12877 | |
| 12878 | if (!LoMul) |
| 12879 | return SDValue(); |
| 12880 | |
| 12881 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC |
| 12882 | // the replacement below will create a cycle. |
| 12883 | if (AddcSubcNode == HiAddSub->getNode() || |
| 12884 | AddcSubcNode->isPredecessorOf(N: HiAddSub->getNode())) |
| 12885 | return SDValue(); |
| 12886 | |
| 12887 | // Create the merged node. |
| 12888 | SelectionDAG &DAG = DCI.DAG; |
| 12889 | |
| 12890 | // Start building operand list. |
| 12891 | SmallVector<SDValue, 8> Ops; |
| 12892 | Ops.push_back(Elt: LoMul->getOperand(i: 0)); |
| 12893 | Ops.push_back(Elt: LoMul->getOperand(i: 1)); |
| 12894 | |
| 12895 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be |
| 12896 | // the case, we must be doing signed multiplication and only use the higher |
| 12897 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant |
| 12898 | // addition or subtraction with the value of 0x800000. |
| 12899 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && |
| 12900 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(Value: 1) && |
| 12901 | LowAddSub->getNode()->getOpcode() == ISD::Constant && |
| 12902 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == |
| 12903 | 0x80000000) { |
| 12904 | Ops.push_back(Elt: *HiAddSub); |
| 12905 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { |
| 12906 | FinalOpc = ARMISD::SMMLSR; |
| 12907 | } else { |
| 12908 | FinalOpc = ARMISD::SMMLAR; |
| 12909 | } |
| 12910 | SDValue NewNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), VT: MVT::i32, Ops); |
| 12911 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: NewNode); |
| 12912 | |
| 12913 | return SDValue(AddeSubeNode, 0); |
| 12914 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) |
| 12915 | // SMMLS is generated during instruction selection and the rest of this |
| 12916 | // function can not handle the case where AddcSubcNode is a SUBC. |
| 12917 | return SDValue(); |
| 12918 | |
| 12919 | // Finish building the operand list for {U/S}MLAL |
| 12920 | Ops.push_back(Elt: *LowAddSub); |
| 12921 | Ops.push_back(Elt: *HiAddSub); |
| 12922 | |
| 12923 | SDValue MLALNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), |
| 12924 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
| 12925 | |
| 12926 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 12927 | SDValue HiMLALResult(MLALNode.getNode(), 1); |
| 12928 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: HiMLALResult); |
| 12929 | |
| 12930 | SDValue LoMLALResult(MLALNode.getNode(), 0); |
| 12931 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcSubcNode, 0), To: LoMLALResult); |
| 12932 | |
| 12933 | // Return original node to notify the driver to stop replacing. |
| 12934 | return SDValue(AddeSubeNode, 0); |
| 12935 | } |
| 12936 | |
| 12937 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, |
| 12938 | TargetLowering::DAGCombinerInfo &DCI, |
| 12939 | const ARMSubtarget *Subtarget) { |
| 12940 | // UMAAL is similar to UMLAL except that it adds two unsigned values. |
| 12941 | // While trying to combine for the other MLAL nodes, first search for the |
| 12942 | // chance to use UMAAL. Check if Addc uses a node which has already |
| 12943 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde |
| 12944 | // as the addend, and it's handled in PerformUMLALCombine. |
| 12945 | |
| 12946 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 12947 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
| 12948 | |
| 12949 | // Check that we have a glued ADDC node. |
| 12950 | SDNode* AddcNode = AddeNode->getOperand(Num: 2).getNode(); |
| 12951 | if (AddcNode->getOpcode() != ARMISD::ADDC) |
| 12952 | return SDValue(); |
| 12953 | |
| 12954 | // Find the converted UMAAL or quit if it doesn't exist. |
| 12955 | SDNode *UmlalNode = nullptr; |
| 12956 | SDValue AddHi; |
| 12957 | if (AddcNode->getOperand(Num: 0).getOpcode() == ARMISD::UMLAL) { |
| 12958 | UmlalNode = AddcNode->getOperand(Num: 0).getNode(); |
| 12959 | AddHi = AddcNode->getOperand(Num: 1); |
| 12960 | } else if (AddcNode->getOperand(Num: 1).getOpcode() == ARMISD::UMLAL) { |
| 12961 | UmlalNode = AddcNode->getOperand(Num: 1).getNode(); |
| 12962 | AddHi = AddcNode->getOperand(Num: 0); |
| 12963 | } else { |
| 12964 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
| 12965 | } |
| 12966 | |
| 12967 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as |
| 12968 | // the ADDC as well as Zero. |
| 12969 | if (!isNullConstant(V: UmlalNode->getOperand(Num: 3))) |
| 12970 | return SDValue(); |
| 12971 | |
| 12972 | if ((isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
| 12973 | AddeNode->getOperand(Num: 1).getNode() == UmlalNode) || |
| 12974 | (AddeNode->getOperand(Num: 0).getNode() == UmlalNode && |
| 12975 | isNullConstant(V: AddeNode->getOperand(Num: 1)))) { |
| 12976 | SelectionDAG &DAG = DCI.DAG; |
| 12977 | SDValue Ops[] = { UmlalNode->getOperand(Num: 0), UmlalNode->getOperand(Num: 1), |
| 12978 | UmlalNode->getOperand(Num: 2), AddHi }; |
| 12979 | SDValue UMAAL = DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(AddcNode), |
| 12980 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
| 12981 | |
| 12982 | // Replace the ADDs' nodes uses by the UMAAL node's values. |
| 12983 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: SDValue(UMAAL.getNode(), 1)); |
| 12984 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: SDValue(UMAAL.getNode(), 0)); |
| 12985 | |
| 12986 | // Return original node to notify the driver to stop replacing. |
| 12987 | return SDValue(AddeNode, 0); |
| 12988 | } |
| 12989 | return SDValue(); |
| 12990 | } |
| 12991 | |
| 12992 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, |
| 12993 | const ARMSubtarget *Subtarget) { |
| 12994 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 12995 | return SDValue(); |
| 12996 | |
| 12997 | // Check that we have a pair of ADDC and ADDE as operands. |
| 12998 | // Both addends of the ADDE must be zero. |
| 12999 | SDNode* AddcNode = N->getOperand(Num: 2).getNode(); |
| 13000 | SDNode* AddeNode = N->getOperand(Num: 3).getNode(); |
| 13001 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && |
| 13002 | (AddeNode->getOpcode() == ARMISD::ADDE) && |
| 13003 | isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
| 13004 | isNullConstant(V: AddeNode->getOperand(Num: 1)) && |
| 13005 | (AddeNode->getOperand(Num: 2).getNode() == AddcNode)) |
| 13006 | return DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(N), |
| 13007 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 13008 | Ops: {N->getOperand(Num: 0), N->getOperand(Num: 1), |
| 13009 | AddcNode->getOperand(Num: 0), AddcNode->getOperand(Num: 1)}); |
| 13010 | else |
| 13011 | return SDValue(); |
| 13012 | } |
| 13013 | |
| 13014 | static SDValue PerformAddcSubcCombine(SDNode *N, |
| 13015 | TargetLowering::DAGCombinerInfo &DCI, |
| 13016 | const ARMSubtarget *Subtarget) { |
| 13017 | SelectionDAG &DAG(DCI.DAG); |
| 13018 | |
| 13019 | if (N->getOpcode() == ARMISD::SUBC && N->hasAnyUseOfValue(Value: 1)) { |
| 13020 | // (SUBC (ADDE 0, 0, C), 1) -> C |
| 13021 | SDValue LHS = N->getOperand(Num: 0); |
| 13022 | SDValue RHS = N->getOperand(Num: 1); |
| 13023 | if (LHS->getOpcode() == ARMISD::ADDE && |
| 13024 | isNullConstant(V: LHS->getOperand(Num: 0)) && |
| 13025 | isNullConstant(V: LHS->getOperand(Num: 1)) && isOneConstant(V: RHS)) { |
| 13026 | return DCI.CombineTo(N, Res0: SDValue(N, 0), Res1: LHS->getOperand(Num: 2)); |
| 13027 | } |
| 13028 | } |
| 13029 | |
| 13030 | if (Subtarget->isThumb1Only()) { |
| 13031 | SDValue RHS = N->getOperand(Num: 1); |
| 13032 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
| 13033 | int32_t imm = C->getSExtValue(); |
| 13034 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { |
| 13035 | SDLoc DL(N); |
| 13036 | RHS = DAG.getConstant(Val: -imm, DL, VT: MVT::i32); |
| 13037 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC |
| 13038 | : ARMISD::ADDC; |
| 13039 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), N1: N->getOperand(Num: 0), N2: RHS); |
| 13040 | } |
| 13041 | } |
| 13042 | } |
| 13043 | |
| 13044 | return SDValue(); |
| 13045 | } |
| 13046 | |
| 13047 | static SDValue PerformAddeSubeCombine(SDNode *N, |
| 13048 | TargetLowering::DAGCombinerInfo &DCI, |
| 13049 | const ARMSubtarget *Subtarget) { |
| 13050 | if (Subtarget->isThumb1Only()) { |
| 13051 | SelectionDAG &DAG = DCI.DAG; |
| 13052 | SDValue RHS = N->getOperand(Num: 1); |
| 13053 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
| 13054 | int64_t imm = C->getSExtValue(); |
| 13055 | if (imm < 0) { |
| 13056 | SDLoc DL(N); |
| 13057 | |
| 13058 | // The with-carry-in form matches bitwise not instead of the negation. |
| 13059 | // Effectively, the inverse interpretation of the carry flag already |
| 13060 | // accounts for part of the negation. |
| 13061 | RHS = DAG.getConstant(Val: ~imm, DL, VT: MVT::i32); |
| 13062 | |
| 13063 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE |
| 13064 | : ARMISD::ADDE; |
| 13065 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), |
| 13066 | N1: N->getOperand(Num: 0), N2: RHS, N3: N->getOperand(Num: 2)); |
| 13067 | } |
| 13068 | } |
| 13069 | } else if (N->getOperand(Num: 1)->getOpcode() == ISD::SMUL_LOHI) { |
| 13070 | return AddCombineTo64bitMLAL(AddeSubeNode: N, DCI, Subtarget); |
| 13071 | } |
| 13072 | return SDValue(); |
| 13073 | } |
| 13074 | |
| 13075 | static SDValue PerformSELECTCombine(SDNode *N, |
| 13076 | TargetLowering::DAGCombinerInfo &DCI, |
| 13077 | const ARMSubtarget *Subtarget) { |
| 13078 | if (!Subtarget->hasMVEIntegerOps()) |
| 13079 | return SDValue(); |
| 13080 | |
| 13081 | SDLoc dl(N); |
| 13082 | SDValue SetCC; |
| 13083 | SDValue LHS; |
| 13084 | SDValue RHS; |
| 13085 | ISD::CondCode CC; |
| 13086 | SDValue TrueVal; |
| 13087 | SDValue FalseVal; |
| 13088 | |
| 13089 | if (N->getOpcode() == ISD::SELECT && |
| 13090 | N->getOperand(Num: 0)->getOpcode() == ISD::SETCC) { |
| 13091 | SetCC = N->getOperand(Num: 0); |
| 13092 | LHS = SetCC->getOperand(Num: 0); |
| 13093 | RHS = SetCC->getOperand(Num: 1); |
| 13094 | CC = cast<CondCodeSDNode>(Val: SetCC->getOperand(Num: 2))->get(); |
| 13095 | TrueVal = N->getOperand(Num: 1); |
| 13096 | FalseVal = N->getOperand(Num: 2); |
| 13097 | } else if (N->getOpcode() == ISD::SELECT_CC) { |
| 13098 | LHS = N->getOperand(Num: 0); |
| 13099 | RHS = N->getOperand(Num: 1); |
| 13100 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 4))->get(); |
| 13101 | TrueVal = N->getOperand(Num: 2); |
| 13102 | FalseVal = N->getOperand(Num: 3); |
| 13103 | } else { |
| 13104 | return SDValue(); |
| 13105 | } |
| 13106 | |
| 13107 | unsigned int Opcode = 0; |
| 13108 | if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || |
| 13109 | FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && |
| 13110 | (CC == ISD::SETULT || CC == ISD::SETUGT)) { |
| 13111 | Opcode = ARMISD::VMINVu; |
| 13112 | if (CC == ISD::SETUGT) |
| 13113 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13114 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || |
| 13115 | FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && |
| 13116 | (CC == ISD::SETLT || CC == ISD::SETGT)) { |
| 13117 | Opcode = ARMISD::VMINVs; |
| 13118 | if (CC == ISD::SETGT) |
| 13119 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13120 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || |
| 13121 | FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && |
| 13122 | (CC == ISD::SETUGT || CC == ISD::SETULT)) { |
| 13123 | Opcode = ARMISD::VMAXVu; |
| 13124 | if (CC == ISD::SETULT) |
| 13125 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13126 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || |
| 13127 | FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && |
| 13128 | (CC == ISD::SETGT || CC == ISD::SETLT)) { |
| 13129 | Opcode = ARMISD::VMAXVs; |
| 13130 | if (CC == ISD::SETLT) |
| 13131 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13132 | } else |
| 13133 | return SDValue(); |
| 13134 | |
| 13135 | // Normalise to the right hand side being the vector reduction |
| 13136 | switch (TrueVal->getOpcode()) { |
| 13137 | case ISD::VECREDUCE_UMIN: |
| 13138 | case ISD::VECREDUCE_SMIN: |
| 13139 | case ISD::VECREDUCE_UMAX: |
| 13140 | case ISD::VECREDUCE_SMAX: |
| 13141 | std::swap(a&: LHS, b&: RHS); |
| 13142 | std::swap(a&: TrueVal, b&: FalseVal); |
| 13143 | break; |
| 13144 | } |
| 13145 | |
| 13146 | EVT VectorType = FalseVal->getOperand(Num: 0).getValueType(); |
| 13147 | |
| 13148 | if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && |
| 13149 | VectorType != MVT::v4i32) |
| 13150 | return SDValue(); |
| 13151 | |
| 13152 | EVT VectorScalarType = VectorType.getVectorElementType(); |
| 13153 | |
| 13154 | // The values being selected must also be the ones being compared |
| 13155 | if (TrueVal != LHS || FalseVal != RHS) |
| 13156 | return SDValue(); |
| 13157 | |
| 13158 | EVT LeftType = LHS->getValueType(ResNo: 0); |
| 13159 | EVT RightType = RHS->getValueType(ResNo: 0); |
| 13160 | |
| 13161 | // The types must match the reduced type too |
| 13162 | if (LeftType != VectorScalarType || RightType != VectorScalarType) |
| 13163 | return SDValue(); |
| 13164 | |
| 13165 | // Legalise the scalar to an i32 |
| 13166 | if (VectorScalarType != MVT::i32) |
| 13167 | LHS = DCI.DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: MVT::i32, Operand: LHS); |
| 13168 | |
| 13169 | // Generate the reduction as an i32 for legalisation purposes |
| 13170 | auto Reduction = |
| 13171 | DCI.DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: LHS, N2: RHS->getOperand(Num: 0)); |
| 13172 | |
| 13173 | // The result isn't actually an i32 so truncate it back to its original type |
| 13174 | if (VectorScalarType != MVT::i32) |
| 13175 | Reduction = DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VectorScalarType, Operand: Reduction); |
| 13176 | |
| 13177 | return Reduction; |
| 13178 | } |
| 13179 | |
| 13180 | // A special combine for the vqdmulh family of instructions. This is one of the |
| 13181 | // potential set of patterns that could patch this instruction. The base pattern |
| 13182 | // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))). |
| 13183 | // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))), |
| 13184 | // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as |
| 13185 | // the max is unnecessary. |
| 13186 | static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { |
| 13187 | EVT VT = N->getValueType(ResNo: 0); |
| 13188 | SDValue Shft; |
| 13189 | ConstantSDNode *Clamp; |
| 13190 | |
| 13191 | if (!VT.isVector() || VT.getScalarSizeInBits() > 64) |
| 13192 | return SDValue(); |
| 13193 | |
| 13194 | if (N->getOpcode() == ISD::SMIN) { |
| 13195 | Shft = N->getOperand(Num: 0); |
| 13196 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 1)); |
| 13197 | } else if (N->getOpcode() == ISD::VSELECT) { |
| 13198 | // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin. |
| 13199 | SDValue Cmp = N->getOperand(Num: 0); |
| 13200 | if (Cmp.getOpcode() != ISD::SETCC || |
| 13201 | cast<CondCodeSDNode>(Val: Cmp.getOperand(i: 2))->get() != ISD::SETLT || |
| 13202 | Cmp.getOperand(i: 0) != N->getOperand(Num: 1) || |
| 13203 | Cmp.getOperand(i: 1) != N->getOperand(Num: 2)) |
| 13204 | return SDValue(); |
| 13205 | Shft = N->getOperand(Num: 1); |
| 13206 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 2)); |
| 13207 | } else |
| 13208 | return SDValue(); |
| 13209 | |
| 13210 | if (!Clamp) |
| 13211 | return SDValue(); |
| 13212 | |
| 13213 | MVT ScalarType; |
| 13214 | int ShftAmt = 0; |
| 13215 | switch (Clamp->getSExtValue()) { |
| 13216 | case (1 << 7) - 1: |
| 13217 | ScalarType = MVT::i8; |
| 13218 | ShftAmt = 7; |
| 13219 | break; |
| 13220 | case (1 << 15) - 1: |
| 13221 | ScalarType = MVT::i16; |
| 13222 | ShftAmt = 15; |
| 13223 | break; |
| 13224 | case (1ULL << 31) - 1: |
| 13225 | ScalarType = MVT::i32; |
| 13226 | ShftAmt = 31; |
| 13227 | break; |
| 13228 | default: |
| 13229 | return SDValue(); |
| 13230 | } |
| 13231 | |
| 13232 | if (Shft.getOpcode() != ISD::SRA) |
| 13233 | return SDValue(); |
| 13234 | ConstantSDNode *N1 = isConstOrConstSplat(N: Shft.getOperand(i: 1)); |
| 13235 | if (!N1 || N1->getSExtValue() != ShftAmt) |
| 13236 | return SDValue(); |
| 13237 | |
| 13238 | SDValue Mul = Shft.getOperand(i: 0); |
| 13239 | if (Mul.getOpcode() != ISD::MUL) |
| 13240 | return SDValue(); |
| 13241 | |
| 13242 | SDValue Ext0 = Mul.getOperand(i: 0); |
| 13243 | SDValue Ext1 = Mul.getOperand(i: 1); |
| 13244 | if (Ext0.getOpcode() != ISD::SIGN_EXTEND || |
| 13245 | Ext1.getOpcode() != ISD::SIGN_EXTEND) |
| 13246 | return SDValue(); |
| 13247 | EVT VecVT = Ext0.getOperand(i: 0).getValueType(); |
| 13248 | if (!VecVT.isPow2VectorType() || VecVT.getVectorNumElements() == 1) |
| 13249 | return SDValue(); |
| 13250 | if (Ext1.getOperand(i: 0).getValueType() != VecVT || |
| 13251 | VecVT.getScalarType() != ScalarType || |
| 13252 | VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2) |
| 13253 | return SDValue(); |
| 13254 | |
| 13255 | SDLoc DL(Mul); |
| 13256 | unsigned LegalLanes = 128 / (ShftAmt + 1); |
| 13257 | EVT LegalVecVT = MVT::getVectorVT(VT: ScalarType, NumElements: LegalLanes); |
| 13258 | // For types smaller than legal vectors extend to be legal and only use needed |
| 13259 | // lanes. |
| 13260 | if (VecVT.getSizeInBits() < 128) { |
| 13261 | EVT ExtVecVT = |
| 13262 | MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 128 / VecVT.getVectorNumElements()), |
| 13263 | NumElements: VecVT.getVectorNumElements()); |
| 13264 | SDValue Inp0 = |
| 13265 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext0.getOperand(i: 0)); |
| 13266 | SDValue Inp1 = |
| 13267 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext1.getOperand(i: 0)); |
| 13268 | Inp0 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp0); |
| 13269 | Inp1 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp1); |
| 13270 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
| 13271 | SDValue Trunc = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: ExtVecVT, Operand: VQDMULH); |
| 13272 | Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VecVT, Operand: Trunc); |
| 13273 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, Operand: Trunc); |
| 13274 | } |
| 13275 | |
| 13276 | // For larger types, split into legal sized chunks. |
| 13277 | assert(VecVT.getSizeInBits() % 128 == 0 && "Expected a power2 type" ); |
| 13278 | unsigned NumParts = VecVT.getSizeInBits() / 128; |
| 13279 | SmallVector<SDValue> Parts; |
| 13280 | for (unsigned I = 0; I < NumParts; ++I) { |
| 13281 | SDValue Inp0 = |
| 13282 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext0.getOperand(i: 0), |
| 13283 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
| 13284 | SDValue Inp1 = |
| 13285 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext1.getOperand(i: 0), |
| 13286 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
| 13287 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
| 13288 | Parts.push_back(Elt: VQDMULH); |
| 13289 | } |
| 13290 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, |
| 13291 | Operand: DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, Ops: Parts)); |
| 13292 | } |
| 13293 | |
| 13294 | static SDValue PerformVSELECTCombine(SDNode *N, |
| 13295 | TargetLowering::DAGCombinerInfo &DCI, |
| 13296 | const ARMSubtarget *Subtarget) { |
| 13297 | if (!Subtarget->hasMVEIntegerOps()) |
| 13298 | return SDValue(); |
| 13299 | |
| 13300 | if (SDValue V = PerformVQDMULHCombine(N, DAG&: DCI.DAG)) |
| 13301 | return V; |
| 13302 | |
| 13303 | // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). |
| 13304 | // |
| 13305 | // We need to re-implement this optimization here as the implementation in the |
| 13306 | // Target-Independent DAGCombiner does not handle the kind of constant we make |
| 13307 | // (it calls isConstOrConstSplat with AllowTruncation set to false - and for |
| 13308 | // good reason, allowing truncation there would break other targets). |
| 13309 | // |
| 13310 | // Currently, this is only done for MVE, as it's the only target that benefits |
| 13311 | // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). |
| 13312 | if (N->getOperand(Num: 0).getOpcode() != ISD::XOR) |
| 13313 | return SDValue(); |
| 13314 | SDValue XOR = N->getOperand(Num: 0); |
| 13315 | |
| 13316 | // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. |
| 13317 | // It is important to check with truncation allowed as the BUILD_VECTORs we |
| 13318 | // generate in those situations will truncate their operands. |
| 13319 | ConstantSDNode *Const = |
| 13320 | isConstOrConstSplat(N: XOR->getOperand(Num: 1), /*AllowUndefs*/ false, |
| 13321 | /*AllowTruncation*/ true); |
| 13322 | if (!Const || !Const->isOne()) |
| 13323 | return SDValue(); |
| 13324 | |
| 13325 | // Rewrite into vselect(cond, rhs, lhs). |
| 13326 | SDValue Cond = XOR->getOperand(Num: 0); |
| 13327 | SDValue LHS = N->getOperand(Num: 1); |
| 13328 | SDValue RHS = N->getOperand(Num: 2); |
| 13329 | EVT Type = N->getValueType(ResNo: 0); |
| 13330 | return DCI.DAG.getNode(Opcode: ISD::VSELECT, DL: SDLoc(N), VT: Type, N1: Cond, N2: RHS, N3: LHS); |
| 13331 | } |
| 13332 | |
| 13333 | // Convert vsetcc([0,1,2,..], splat(n), ult) -> vctp n |
| 13334 | static SDValue PerformVSetCCToVCTPCombine(SDNode *N, |
| 13335 | TargetLowering::DAGCombinerInfo &DCI, |
| 13336 | const ARMSubtarget *Subtarget) { |
| 13337 | SDValue Op0 = N->getOperand(Num: 0); |
| 13338 | SDValue Op1 = N->getOperand(Num: 1); |
| 13339 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 2))->get(); |
| 13340 | EVT VT = N->getValueType(ResNo: 0); |
| 13341 | |
| 13342 | if (!Subtarget->hasMVEIntegerOps() || |
| 13343 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 13344 | return SDValue(); |
| 13345 | |
| 13346 | if (CC == ISD::SETUGE) { |
| 13347 | std::swap(a&: Op0, b&: Op1); |
| 13348 | CC = ISD::SETULT; |
| 13349 | } |
| 13350 | |
| 13351 | if (CC != ISD::SETULT || VT.getScalarSizeInBits() != 1 || |
| 13352 | Op0.getOpcode() != ISD::BUILD_VECTOR) |
| 13353 | return SDValue(); |
| 13354 | |
| 13355 | // Check first operand is BuildVector of 0,1,2,... |
| 13356 | for (unsigned I = 0; I < VT.getVectorNumElements(); I++) { |
| 13357 | if (!Op0.getOperand(i: I).isUndef() && |
| 13358 | !(isa<ConstantSDNode>(Val: Op0.getOperand(i: I)) && |
| 13359 | Op0.getConstantOperandVal(i: I) == I)) |
| 13360 | return SDValue(); |
| 13361 | } |
| 13362 | |
| 13363 | // The second is a Splat of Op1S |
| 13364 | SDValue Op1S = DCI.DAG.getSplatValue(V: Op1); |
| 13365 | if (!Op1S) |
| 13366 | return SDValue(); |
| 13367 | |
| 13368 | unsigned Opc; |
| 13369 | switch (VT.getVectorNumElements()) { |
| 13370 | case 2: |
| 13371 | Opc = Intrinsic::arm_mve_vctp64; |
| 13372 | break; |
| 13373 | case 4: |
| 13374 | Opc = Intrinsic::arm_mve_vctp32; |
| 13375 | break; |
| 13376 | case 8: |
| 13377 | Opc = Intrinsic::arm_mve_vctp16; |
| 13378 | break; |
| 13379 | case 16: |
| 13380 | Opc = Intrinsic::arm_mve_vctp8; |
| 13381 | break; |
| 13382 | default: |
| 13383 | return SDValue(); |
| 13384 | } |
| 13385 | |
| 13386 | SDLoc DL(N); |
| 13387 | return DCI.DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, |
| 13388 | N1: DCI.DAG.getConstant(Val: Opc, DL, VT: MVT::i32), |
| 13389 | N2: DCI.DAG.getZExtOrTrunc(Op: Op1S, DL, VT: MVT::i32)); |
| 13390 | } |
| 13391 | |
| 13392 | /// PerformADDECombine - Target-specific dag combine transform from |
| 13393 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or |
| 13394 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL |
| 13395 | static SDValue PerformADDECombine(SDNode *N, |
| 13396 | TargetLowering::DAGCombinerInfo &DCI, |
| 13397 | const ARMSubtarget *Subtarget) { |
| 13398 | // Only ARM and Thumb2 support UMLAL/SMLAL. |
| 13399 | if (Subtarget->isThumb1Only()) |
| 13400 | return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 13401 | |
| 13402 | // Only perform the checks after legalize when the pattern is available. |
| 13403 | if (DCI.isBeforeLegalize()) return SDValue(); |
| 13404 | |
| 13405 | return AddCombineTo64bitUMAAL(AddeNode: N, DCI, Subtarget); |
| 13406 | } |
| 13407 | |
| 13408 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with |
| 13409 | /// operands N0 and N1. This is a helper for PerformADDCombine that is |
| 13410 | /// called with the default operands, and if that fails, with commuted |
| 13411 | /// operands. |
| 13412 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, |
| 13413 | TargetLowering::DAGCombinerInfo &DCI, |
| 13414 | const ARMSubtarget *Subtarget){ |
| 13415 | // Attempt to create vpadd for this add. |
| 13416 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) |
| 13417 | return Result; |
| 13418 | |
| 13419 | // Attempt to create vpaddl for this add. |
| 13420 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) |
| 13421 | return Result; |
| 13422 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, |
| 13423 | Subtarget)) |
| 13424 | return Result; |
| 13425 | |
| 13426 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 13427 | if (N0.getNode()->hasOneUse()) |
| 13428 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI)) |
| 13429 | return Result; |
| 13430 | return SDValue(); |
| 13431 | } |
| 13432 | |
| 13433 | static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { |
| 13434 | EVT VT = N->getValueType(ResNo: 0); |
| 13435 | SDValue N0 = N->getOperand(Num: 0); |
| 13436 | SDValue N1 = N->getOperand(Num: 1); |
| 13437 | SDLoc dl(N); |
| 13438 | |
| 13439 | auto IsVecReduce = [](SDValue Op) { |
| 13440 | switch (Op.getOpcode()) { |
| 13441 | case ISD::VECREDUCE_ADD: |
| 13442 | case ARMISD::VADDVs: |
| 13443 | case ARMISD::VADDVu: |
| 13444 | case ARMISD::VMLAVs: |
| 13445 | case ARMISD::VMLAVu: |
| 13446 | return true; |
| 13447 | } |
| 13448 | return false; |
| 13449 | }; |
| 13450 | |
| 13451 | auto DistrubuteAddAddVecReduce = [&](SDValue N0, SDValue N1) { |
| 13452 | // Distribute add(X, add(vecreduce(Y), vecreduce(Z))) -> |
| 13453 | // add(add(X, vecreduce(Y)), vecreduce(Z)) |
| 13454 | // to make better use of vaddva style instructions. |
| 13455 | if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) && |
| 13456 | IsVecReduce(N1.getOperand(i: 0)) && IsVecReduce(N1.getOperand(i: 1)) && |
| 13457 | !isa<ConstantSDNode>(Val: N0) && N1->hasOneUse()) { |
| 13458 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0, N2: N1.getOperand(i: 0)); |
| 13459 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N1.getOperand(i: 1)); |
| 13460 | } |
| 13461 | // And turn add(add(A, reduce(B)), add(C, reduce(D))) -> |
| 13462 | // add(add(add(A, C), reduce(B)), reduce(D)) |
| 13463 | if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
| 13464 | N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) { |
| 13465 | unsigned N0RedOp = 0; |
| 13466 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) { |
| 13467 | N0RedOp = 1; |
| 13468 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) |
| 13469 | return SDValue(); |
| 13470 | } |
| 13471 | |
| 13472 | unsigned N1RedOp = 0; |
| 13473 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
| 13474 | N1RedOp = 1; |
| 13475 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
| 13476 | return SDValue(); |
| 13477 | |
| 13478 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0.getOperand(i: 1 - N0RedOp), |
| 13479 | N2: N1.getOperand(i: 1 - N1RedOp)); |
| 13480 | SDValue Add1 = |
| 13481 | DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0.getOperand(i: N0RedOp)); |
| 13482 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add1, N2: N1.getOperand(i: N1RedOp)); |
| 13483 | } |
| 13484 | return SDValue(); |
| 13485 | }; |
| 13486 | if (SDValue R = DistrubuteAddAddVecReduce(N0, N1)) |
| 13487 | return R; |
| 13488 | if (SDValue R = DistrubuteAddAddVecReduce(N1, N0)) |
| 13489 | return R; |
| 13490 | |
| 13491 | // Distribute add(vecreduce(load(Y)), vecreduce(load(Z))) |
| 13492 | // Or add(add(X, vecreduce(load(Y))), vecreduce(load(Z))) |
| 13493 | // by ascending load offsets. This can help cores prefetch if the order of |
| 13494 | // loads is more predictable. |
| 13495 | auto DistrubuteVecReduceLoad = [&](SDValue N0, SDValue N1, bool IsForward) { |
| 13496 | // Check if two reductions are known to load data where one is before/after |
| 13497 | // another. Return negative if N0 loads data before N1, positive if N1 is |
| 13498 | // before N0 and 0 otherwise if nothing is known. |
| 13499 | auto IsKnownOrderedLoad = [&](SDValue N0, SDValue N1) { |
| 13500 | // Look through to the first operand of a MUL, for the VMLA case. |
| 13501 | // Currently only looks at the first operand, in the hope they are equal. |
| 13502 | if (N0.getOpcode() == ISD::MUL) |
| 13503 | N0 = N0.getOperand(i: 0); |
| 13504 | if (N1.getOpcode() == ISD::MUL) |
| 13505 | N1 = N1.getOperand(i: 0); |
| 13506 | |
| 13507 | // Return true if the two operands are loads to the same object and the |
| 13508 | // offset of the first is known to be less than the offset of the second. |
| 13509 | LoadSDNode *Load0 = dyn_cast<LoadSDNode>(Val&: N0); |
| 13510 | LoadSDNode *Load1 = dyn_cast<LoadSDNode>(Val&: N1); |
| 13511 | if (!Load0 || !Load1 || Load0->getChain() != Load1->getChain() || |
| 13512 | !Load0->isSimple() || !Load1->isSimple() || Load0->isIndexed() || |
| 13513 | Load1->isIndexed()) |
| 13514 | return 0; |
| 13515 | |
| 13516 | auto BaseLocDecomp0 = BaseIndexOffset::match(N: Load0, DAG); |
| 13517 | auto BaseLocDecomp1 = BaseIndexOffset::match(N: Load1, DAG); |
| 13518 | |
| 13519 | if (!BaseLocDecomp0.getBase() || |
| 13520 | BaseLocDecomp0.getBase() != BaseLocDecomp1.getBase() || |
| 13521 | !BaseLocDecomp0.hasValidOffset() || !BaseLocDecomp1.hasValidOffset()) |
| 13522 | return 0; |
| 13523 | if (BaseLocDecomp0.getOffset() < BaseLocDecomp1.getOffset()) |
| 13524 | return -1; |
| 13525 | if (BaseLocDecomp0.getOffset() > BaseLocDecomp1.getOffset()) |
| 13526 | return 1; |
| 13527 | return 0; |
| 13528 | }; |
| 13529 | |
| 13530 | SDValue X; |
| 13531 | if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) { |
| 13532 | if (IsVecReduce(N0.getOperand(i: 0)) && IsVecReduce(N0.getOperand(i: 1))) { |
| 13533 | int IsBefore = IsKnownOrderedLoad(N0.getOperand(i: 0).getOperand(i: 0), |
| 13534 | N0.getOperand(i: 1).getOperand(i: 0)); |
| 13535 | if (IsBefore < 0) { |
| 13536 | X = N0.getOperand(i: 0); |
| 13537 | N0 = N0.getOperand(i: 1); |
| 13538 | } else if (IsBefore > 0) { |
| 13539 | X = N0.getOperand(i: 1); |
| 13540 | N0 = N0.getOperand(i: 0); |
| 13541 | } else |
| 13542 | return SDValue(); |
| 13543 | } else if (IsVecReduce(N0.getOperand(i: 0))) { |
| 13544 | X = N0.getOperand(i: 1); |
| 13545 | N0 = N0.getOperand(i: 0); |
| 13546 | } else if (IsVecReduce(N0.getOperand(i: 1))) { |
| 13547 | X = N0.getOperand(i: 0); |
| 13548 | N0 = N0.getOperand(i: 1); |
| 13549 | } else |
| 13550 | return SDValue(); |
| 13551 | } else if (IsForward && IsVecReduce(N0) && IsVecReduce(N1) && |
| 13552 | IsKnownOrderedLoad(N0.getOperand(i: 0), N1.getOperand(i: 0)) < 0) { |
| 13553 | // Note this is backward to how you would expect. We create |
| 13554 | // add(reduce(load + 16), reduce(load + 0)) so that the |
| 13555 | // add(reduce(load+16), X) is combined into VADDVA(X, load+16)), leaving |
| 13556 | // the X as VADDV(load + 0) |
| 13557 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1, N2: N0); |
| 13558 | } else |
| 13559 | return SDValue(); |
| 13560 | |
| 13561 | if (!IsVecReduce(N0) || !IsVecReduce(N1)) |
| 13562 | return SDValue(); |
| 13563 | |
| 13564 | if (IsKnownOrderedLoad(N1.getOperand(i: 0), N0.getOperand(i: 0)) >= 0) |
| 13565 | return SDValue(); |
| 13566 | |
| 13567 | // Switch from add(add(X, N0), N1) to add(add(X, N1), N0) |
| 13568 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: X, N2: N1); |
| 13569 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0); |
| 13570 | }; |
| 13571 | if (SDValue R = DistrubuteVecReduceLoad(N0, N1, true)) |
| 13572 | return R; |
| 13573 | if (SDValue R = DistrubuteVecReduceLoad(N1, N0, false)) |
| 13574 | return R; |
| 13575 | return SDValue(); |
| 13576 | } |
| 13577 | |
| 13578 | static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG, |
| 13579 | const ARMSubtarget *Subtarget) { |
| 13580 | if (!Subtarget->hasMVEIntegerOps()) |
| 13581 | return SDValue(); |
| 13582 | |
| 13583 | if (SDValue R = TryDistrubutionADDVecReduce(N, DAG)) |
| 13584 | return R; |
| 13585 | |
| 13586 | EVT VT = N->getValueType(ResNo: 0); |
| 13587 | SDValue N0 = N->getOperand(Num: 0); |
| 13588 | SDValue N1 = N->getOperand(Num: 1); |
| 13589 | SDLoc dl(N); |
| 13590 | |
| 13591 | if (VT != MVT::i64) |
| 13592 | return SDValue(); |
| 13593 | |
| 13594 | // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this |
| 13595 | // will look like: |
| 13596 | // t1: i32,i32 = ARMISD::VADDLVs x |
| 13597 | // t2: i64 = build_pair t1, t1:1 |
| 13598 | // t3: i64 = add t2, y |
| 13599 | // Otherwise we try to push the add up above VADDLVAx, to potentially allow |
| 13600 | // the add to be simplified separately. |
| 13601 | // We also need to check for sext / zext and commutitive adds. |
| 13602 | auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, |
| 13603 | SDValue NB) { |
| 13604 | if (NB->getOpcode() != ISD::BUILD_PAIR) |
| 13605 | return SDValue(); |
| 13606 | SDValue VecRed = NB->getOperand(Num: 0); |
| 13607 | if ((VecRed->getOpcode() != Opcode && VecRed->getOpcode() != OpcodeA) || |
| 13608 | VecRed.getResNo() != 0 || |
| 13609 | NB->getOperand(Num: 1) != SDValue(VecRed.getNode(), 1)) |
| 13610 | return SDValue(); |
| 13611 | |
| 13612 | if (VecRed->getOpcode() == OpcodeA) { |
| 13613 | // add(NA, VADDLVA(Inp), Y) -> VADDLVA(add(NA, Inp), Y) |
| 13614 | SDValue Inp = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
| 13615 | N1: VecRed.getOperand(i: 0), N2: VecRed.getOperand(i: 1)); |
| 13616 | NA = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i64, N1: Inp, N2: NA); |
| 13617 | } |
| 13618 | |
| 13619 | SmallVector<SDValue, 4> Ops(2); |
| 13620 | std::tie(args&: Ops[0], args&: Ops[1]) = DAG.SplitScalar(N: NA, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
| 13621 | |
| 13622 | unsigned S = VecRed->getOpcode() == OpcodeA ? 2 : 0; |
| 13623 | for (unsigned I = S, E = VecRed.getNumOperands(); I < E; I++) |
| 13624 | Ops.push_back(Elt: VecRed->getOperand(Num: I)); |
| 13625 | SDValue Red = |
| 13626 | DAG.getNode(Opcode: OpcodeA, DL: dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32}), Ops); |
| 13627 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Red, |
| 13628 | N2: SDValue(Red.getNode(), 1)); |
| 13629 | }; |
| 13630 | |
| 13631 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) |
| 13632 | return M; |
| 13633 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) |
| 13634 | return M; |
| 13635 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) |
| 13636 | return M; |
| 13637 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) |
| 13638 | return M; |
| 13639 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) |
| 13640 | return M; |
| 13641 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) |
| 13642 | return M; |
| 13643 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) |
| 13644 | return M; |
| 13645 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) |
| 13646 | return M; |
| 13647 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) |
| 13648 | return M; |
| 13649 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) |
| 13650 | return M; |
| 13651 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) |
| 13652 | return M; |
| 13653 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) |
| 13654 | return M; |
| 13655 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1)) |
| 13656 | return M; |
| 13657 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1)) |
| 13658 | return M; |
| 13659 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0)) |
| 13660 | return M; |
| 13661 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0)) |
| 13662 | return M; |
| 13663 | return SDValue(); |
| 13664 | } |
| 13665 | |
| 13666 | bool |
| 13667 | ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, |
| 13668 | CombineLevel Level) const { |
| 13669 | assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || |
| 13670 | N->getOpcode() == ISD::SRL) && |
| 13671 | "Expected shift op" ); |
| 13672 | |
| 13673 | SDValue ShiftLHS = N->getOperand(Num: 0); |
| 13674 | if (!ShiftLHS->hasOneUse()) |
| 13675 | return false; |
| 13676 | |
| 13677 | if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND && |
| 13678 | !ShiftLHS.getOperand(i: 0)->hasOneUse()) |
| 13679 | return false; |
| 13680 | |
| 13681 | if (Level == BeforeLegalizeTypes) |
| 13682 | return true; |
| 13683 | |
| 13684 | if (N->getOpcode() != ISD::SHL) |
| 13685 | return true; |
| 13686 | |
| 13687 | if (Subtarget->isThumb1Only()) { |
| 13688 | // Avoid making expensive immediates by commuting shifts. (This logic |
| 13689 | // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted |
| 13690 | // for free.) |
| 13691 | if (N->getOpcode() != ISD::SHL) |
| 13692 | return true; |
| 13693 | SDValue N1 = N->getOperand(Num: 0); |
| 13694 | if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && |
| 13695 | N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) |
| 13696 | return true; |
| 13697 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N1->getOperand(Num: 1))) { |
| 13698 | if (Const->getAPIntValue().ult(RHS: 256)) |
| 13699 | return false; |
| 13700 | if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(RHS: 0) && |
| 13701 | Const->getAPIntValue().sgt(RHS: -256)) |
| 13702 | return false; |
| 13703 | } |
| 13704 | return true; |
| 13705 | } |
| 13706 | |
| 13707 | // Turn off commute-with-shift transform after legalization, so it doesn't |
| 13708 | // conflict with PerformSHLSimplify. (We could try to detect when |
| 13709 | // PerformSHLSimplify would trigger more precisely, but it isn't |
| 13710 | // really necessary.) |
| 13711 | return false; |
| 13712 | } |
| 13713 | |
| 13714 | bool ARMTargetLowering::isDesirableToCommuteXorWithShift( |
| 13715 | const SDNode *N) const { |
| 13716 | assert(N->getOpcode() == ISD::XOR && |
| 13717 | (N->getOperand(0).getOpcode() == ISD::SHL || |
| 13718 | N->getOperand(0).getOpcode() == ISD::SRL) && |
| 13719 | "Expected XOR(SHIFT) pattern" ); |
| 13720 | |
| 13721 | // Only commute if the entire NOT mask is a hidden shifted mask. |
| 13722 | auto *XorC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 13723 | auto *ShiftC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 0).getOperand(i: 1)); |
| 13724 | if (XorC && ShiftC) { |
| 13725 | unsigned MaskIdx, MaskLen; |
| 13726 | if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) { |
| 13727 | unsigned ShiftAmt = ShiftC->getZExtValue(); |
| 13728 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 13729 | if (N->getOperand(Num: 0).getOpcode() == ISD::SHL) |
| 13730 | return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt); |
| 13731 | return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); |
| 13732 | } |
| 13733 | } |
| 13734 | |
| 13735 | return false; |
| 13736 | } |
| 13737 | |
| 13738 | bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( |
| 13739 | const SDNode *N) const { |
| 13740 | assert(((N->getOpcode() == ISD::SHL && |
| 13741 | N->getOperand(0).getOpcode() == ISD::SRL) || |
| 13742 | (N->getOpcode() == ISD::SRL && |
| 13743 | N->getOperand(0).getOpcode() == ISD::SHL)) && |
| 13744 | "Expected shift-shift mask" ); |
| 13745 | |
| 13746 | if (!Subtarget->isThumb1Only()) |
| 13747 | return true; |
| 13748 | |
| 13749 | EVT VT = N->getValueType(ResNo: 0); |
| 13750 | if (VT.getScalarSizeInBits() > 32) |
| 13751 | return true; |
| 13752 | |
| 13753 | return false; |
| 13754 | } |
| 13755 | |
| 13756 | bool ARMTargetLowering::shouldFoldSelectWithIdentityConstant( |
| 13757 | unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, |
| 13758 | SDValue Y) const { |
| 13759 | return Subtarget->hasMVEIntegerOps() && isTypeLegal(VT) && |
| 13760 | SelectOpcode == ISD::VSELECT; |
| 13761 | } |
| 13762 | |
| 13763 | bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { |
| 13764 | if (!Subtarget->hasNEON()) { |
| 13765 | if (Subtarget->isThumb1Only()) |
| 13766 | return VT.getScalarSizeInBits() <= 32; |
| 13767 | return true; |
| 13768 | } |
| 13769 | return VT.isScalarInteger(); |
| 13770 | } |
| 13771 | |
| 13772 | bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, |
| 13773 | EVT VT) const { |
| 13774 | if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple()) |
| 13775 | return false; |
| 13776 | |
| 13777 | switch (FPVT.getSimpleVT().SimpleTy) { |
| 13778 | case MVT::f16: |
| 13779 | return Subtarget->hasVFP2Base(); |
| 13780 | case MVT::f32: |
| 13781 | return Subtarget->hasVFP2Base(); |
| 13782 | case MVT::f64: |
| 13783 | return Subtarget->hasFP64(); |
| 13784 | case MVT::v4f32: |
| 13785 | case MVT::v8f16: |
| 13786 | return Subtarget->hasMVEFloatOps(); |
| 13787 | default: |
| 13788 | return false; |
| 13789 | } |
| 13790 | } |
| 13791 | |
| 13792 | static SDValue PerformSHLSimplify(SDNode *N, |
| 13793 | TargetLowering::DAGCombinerInfo &DCI, |
| 13794 | const ARMSubtarget *ST) { |
| 13795 | // Allow the generic combiner to identify potential bswaps. |
| 13796 | if (DCI.isBeforeLegalize()) |
| 13797 | return SDValue(); |
| 13798 | |
| 13799 | // DAG combiner will fold: |
| 13800 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
| 13801 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 |
| 13802 | // Other code patterns that can be also be modified have the following form: |
| 13803 | // b + ((a << 1) | 510) |
| 13804 | // b + ((a << 1) & 510) |
| 13805 | // b + ((a << 1) ^ 510) |
| 13806 | // b + ((a << 1) + 510) |
| 13807 | |
| 13808 | // Many instructions can perform the shift for free, but it requires both |
| 13809 | // the operands to be registers. If c1 << c2 is too large, a mov immediate |
| 13810 | // instruction will needed. So, unfold back to the original pattern if: |
| 13811 | // - if c1 and c2 are small enough that they don't require mov imms. |
| 13812 | // - the user(s) of the node can perform an shl |
| 13813 | |
| 13814 | // No shifted operands for 16-bit instructions. |
| 13815 | if (ST->isThumb() && ST->isThumb1Only()) |
| 13816 | return SDValue(); |
| 13817 | |
| 13818 | // Check that all the users could perform the shl themselves. |
| 13819 | for (auto *U : N->users()) { |
| 13820 | switch(U->getOpcode()) { |
| 13821 | default: |
| 13822 | return SDValue(); |
| 13823 | case ISD::SUB: |
| 13824 | case ISD::ADD: |
| 13825 | case ISD::AND: |
| 13826 | case ISD::OR: |
| 13827 | case ISD::XOR: |
| 13828 | case ISD::SETCC: |
| 13829 | case ARMISD::CMP: |
| 13830 | // Check that the user isn't already using a constant because there |
| 13831 | // aren't any instructions that support an immediate operand and a |
| 13832 | // shifted operand. |
| 13833 | if (isa<ConstantSDNode>(Val: U->getOperand(Num: 0)) || |
| 13834 | isa<ConstantSDNode>(Val: U->getOperand(Num: 1))) |
| 13835 | return SDValue(); |
| 13836 | |
| 13837 | // Check that it's not already using a shift. |
| 13838 | if (U->getOperand(Num: 0).getOpcode() == ISD::SHL || |
| 13839 | U->getOperand(Num: 1).getOpcode() == ISD::SHL) |
| 13840 | return SDValue(); |
| 13841 | break; |
| 13842 | } |
| 13843 | } |
| 13844 | |
| 13845 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && |
| 13846 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) |
| 13847 | return SDValue(); |
| 13848 | |
| 13849 | if (N->getOperand(Num: 0).getOpcode() != ISD::SHL) |
| 13850 | return SDValue(); |
| 13851 | |
| 13852 | SDValue SHL = N->getOperand(Num: 0); |
| 13853 | |
| 13854 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 13855 | auto *C2 = dyn_cast<ConstantSDNode>(Val: SHL.getOperand(i: 1)); |
| 13856 | if (!C1ShlC2 || !C2) |
| 13857 | return SDValue(); |
| 13858 | |
| 13859 | APInt C2Int = C2->getAPIntValue(); |
| 13860 | APInt C1Int = C1ShlC2->getAPIntValue(); |
| 13861 | unsigned C2Width = C2Int.getBitWidth(); |
| 13862 | if (C2Int.uge(RHS: C2Width)) |
| 13863 | return SDValue(); |
| 13864 | uint64_t C2Value = C2Int.getZExtValue(); |
| 13865 | |
| 13866 | // Check that performing a lshr will not lose any information. |
| 13867 | APInt Mask = APInt::getHighBitsSet(numBits: C2Width, hiBitsSet: C2Width - C2Value); |
| 13868 | if ((C1Int & Mask) != C1Int) |
| 13869 | return SDValue(); |
| 13870 | |
| 13871 | // Shift the first constant. |
| 13872 | C1Int.lshrInPlace(ShiftAmt: C2Int); |
| 13873 | |
| 13874 | // The immediates are encoded as an 8-bit value that can be rotated. |
| 13875 | auto LargeImm = [](const APInt &Imm) { |
| 13876 | unsigned Zeros = Imm.countl_zero() + Imm.countr_zero(); |
| 13877 | return Imm.getBitWidth() - Zeros > 8; |
| 13878 | }; |
| 13879 | |
| 13880 | if (LargeImm(C1Int) || LargeImm(C2Int)) |
| 13881 | return SDValue(); |
| 13882 | |
| 13883 | SelectionDAG &DAG = DCI.DAG; |
| 13884 | SDLoc dl(N); |
| 13885 | SDValue X = SHL.getOperand(i: 0); |
| 13886 | SDValue BinOp = DAG.getNode(Opcode: N->getOpcode(), DL: dl, VT: MVT::i32, N1: X, |
| 13887 | N2: DAG.getConstant(Val: C1Int, DL: dl, VT: MVT::i32)); |
| 13888 | // Shift left to compensate for the lshr of C1Int. |
| 13889 | SDValue Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: BinOp, N2: SHL.getOperand(i: 1)); |
| 13890 | |
| 13891 | LLVM_DEBUG(dbgs() << "Simplify shl use:\n" ; SHL.getOperand(0).dump(); |
| 13892 | SHL.dump(); N->dump()); |
| 13893 | LLVM_DEBUG(dbgs() << "Into:\n" ; X.dump(); BinOp.dump(); Res.dump()); |
| 13894 | return Res; |
| 13895 | } |
| 13896 | |
| 13897 | |
| 13898 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. |
| 13899 | /// |
| 13900 | static SDValue PerformADDCombine(SDNode *N, |
| 13901 | TargetLowering::DAGCombinerInfo &DCI, |
| 13902 | const ARMSubtarget *Subtarget) { |
| 13903 | SDValue N0 = N->getOperand(Num: 0); |
| 13904 | SDValue N1 = N->getOperand(Num: 1); |
| 13905 | |
| 13906 | // Only works one way, because it needs an immediate operand. |
| 13907 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 13908 | return Result; |
| 13909 | |
| 13910 | if (SDValue Result = PerformADDVecReduce(N, DAG&: DCI.DAG, Subtarget)) |
| 13911 | return Result; |
| 13912 | |
| 13913 | // First try with the default operand order. |
| 13914 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) |
| 13915 | return Result; |
| 13916 | |
| 13917 | // If that didn't work, try again with the operands commuted. |
| 13918 | return PerformADDCombineWithOperands(N, N0: N1, N1: N0, DCI, Subtarget); |
| 13919 | } |
| 13920 | |
| 13921 | // Combine (sub 0, (csinc X, Y, CC)) -> (csinv -X, Y, CC) |
| 13922 | // providing -X is as cheap as X (currently, just a constant). |
| 13923 | static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) { |
| 13924 | if (N->getValueType(ResNo: 0) != MVT::i32 || !isNullConstant(V: N->getOperand(Num: 0))) |
| 13925 | return SDValue(); |
| 13926 | SDValue CSINC = N->getOperand(Num: 1); |
| 13927 | if (CSINC.getOpcode() != ARMISD::CSINC || !CSINC.hasOneUse()) |
| 13928 | return SDValue(); |
| 13929 | |
| 13930 | ConstantSDNode *X = dyn_cast<ConstantSDNode>(Val: CSINC.getOperand(i: 0)); |
| 13931 | if (!X) |
| 13932 | return SDValue(); |
| 13933 | |
| 13934 | return DAG.getNode(Opcode: ARMISD::CSINV, DL: SDLoc(N), VT: MVT::i32, |
| 13935 | N1: DAG.getNode(Opcode: ISD::SUB, DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 13936 | N2: CSINC.getOperand(i: 0)), |
| 13937 | N2: CSINC.getOperand(i: 1), N3: CSINC.getOperand(i: 2), |
| 13938 | N4: CSINC.getOperand(i: 3)); |
| 13939 | } |
| 13940 | |
| 13941 | static bool isNegatedInteger(SDValue Op) { |
| 13942 | return Op.getOpcode() == ISD::SUB && isNullConstant(V: Op.getOperand(i: 0)); |
| 13943 | } |
| 13944 | |
| 13945 | // Try to fold |
| 13946 | // |
| 13947 | // (neg (cmov X, Y)) -> (cmov (neg X), (neg Y)) |
| 13948 | // |
| 13949 | // The folding helps cmov to be matched with csneg without generating |
| 13950 | // redundant neg instruction. |
| 13951 | static SDValue performNegCMovCombine(SDNode *N, SelectionDAG &DAG) { |
| 13952 | if (!isNegatedInteger(Op: SDValue(N, 0))) |
| 13953 | return SDValue(); |
| 13954 | |
| 13955 | SDValue CMov = N->getOperand(Num: 1); |
| 13956 | if (CMov.getOpcode() != ARMISD::CMOV || !CMov->hasOneUse()) |
| 13957 | return SDValue(); |
| 13958 | |
| 13959 | SDValue N0 = CMov.getOperand(i: 0); |
| 13960 | SDValue N1 = CMov.getOperand(i: 1); |
| 13961 | |
| 13962 | // If neither of them are negations, it's not worth the folding as it |
| 13963 | // introduces two additional negations while reducing one negation. |
| 13964 | if (!isNegatedInteger(Op: N0) && !isNegatedInteger(Op: N1)) |
| 13965 | return SDValue(); |
| 13966 | |
| 13967 | SDLoc DL(N); |
| 13968 | EVT VT = CMov.getValueType(); |
| 13969 | |
| 13970 | SDValue N0N = DAG.getNegative(Val: N0, DL, VT); |
| 13971 | SDValue N1N = DAG.getNegative(Val: N1, DL, VT); |
| 13972 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT, N1: N0N, N2: N1N, N3: CMov.getOperand(i: 2), |
| 13973 | N4: CMov.getOperand(i: 3)); |
| 13974 | } |
| 13975 | |
| 13976 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. |
| 13977 | /// |
| 13978 | static SDValue PerformSUBCombine(SDNode *N, |
| 13979 | TargetLowering::DAGCombinerInfo &DCI, |
| 13980 | const ARMSubtarget *Subtarget) { |
| 13981 | SDValue N0 = N->getOperand(Num: 0); |
| 13982 | SDValue N1 = N->getOperand(Num: 1); |
| 13983 | |
| 13984 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 13985 | if (N1.getNode()->hasOneUse()) |
| 13986 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI)) |
| 13987 | return Result; |
| 13988 | |
| 13989 | if (SDValue R = PerformSubCSINCCombine(N, DAG&: DCI.DAG)) |
| 13990 | return R; |
| 13991 | |
| 13992 | if (SDValue Val = performNegCMovCombine(N, DAG&: DCI.DAG)) |
| 13993 | return Val; |
| 13994 | |
| 13995 | if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(ResNo: 0).isVector()) |
| 13996 | return SDValue(); |
| 13997 | |
| 13998 | // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) |
| 13999 | // so that we can readily pattern match more mve instructions which can use |
| 14000 | // a scalar operand. |
| 14001 | SDValue VDup = N->getOperand(Num: 1); |
| 14002 | if (VDup->getOpcode() != ARMISD::VDUP) |
| 14003 | return SDValue(); |
| 14004 | |
| 14005 | SDValue VMov = N->getOperand(Num: 0); |
| 14006 | if (VMov->getOpcode() == ISD::BITCAST) |
| 14007 | VMov = VMov->getOperand(Num: 0); |
| 14008 | |
| 14009 | if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(N: VMov)) |
| 14010 | return SDValue(); |
| 14011 | |
| 14012 | SDLoc dl(N); |
| 14013 | SDValue Negate = DCI.DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 14014 | N1: DCI.DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
| 14015 | N2: VDup->getOperand(Num: 0)); |
| 14016 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), Operand: Negate); |
| 14017 | } |
| 14018 | |
| 14019 | /// PerformVMULCombine |
| 14020 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the |
| 14021 | /// special multiplier accumulator forwarding. |
| 14022 | /// vmul d3, d0, d2 |
| 14023 | /// vmla d3, d1, d2 |
| 14024 | /// is faster than |
| 14025 | /// vadd d3, d0, d1 |
| 14026 | /// vmul d3, d3, d2 |
| 14027 | // However, for (A + B) * (A + B), |
| 14028 | // vadd d2, d0, d1 |
| 14029 | // vmul d3, d0, d2 |
| 14030 | // vmla d3, d1, d2 |
| 14031 | // is slower than |
| 14032 | // vadd d2, d0, d1 |
| 14033 | // vmul d3, d2, d2 |
| 14034 | static SDValue PerformVMULCombine(SDNode *N, |
| 14035 | TargetLowering::DAGCombinerInfo &DCI, |
| 14036 | const ARMSubtarget *Subtarget) { |
| 14037 | if (!Subtarget->hasVMLxForwarding()) |
| 14038 | return SDValue(); |
| 14039 | |
| 14040 | SelectionDAG &DAG = DCI.DAG; |
| 14041 | SDValue N0 = N->getOperand(Num: 0); |
| 14042 | SDValue N1 = N->getOperand(Num: 1); |
| 14043 | unsigned Opcode = N0.getOpcode(); |
| 14044 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 14045 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { |
| 14046 | Opcode = N1.getOpcode(); |
| 14047 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 14048 | Opcode != ISD::FADD && Opcode != ISD::FSUB) |
| 14049 | return SDValue(); |
| 14050 | std::swap(a&: N0, b&: N1); |
| 14051 | } |
| 14052 | |
| 14053 | if (N0 == N1) |
| 14054 | return SDValue(); |
| 14055 | |
| 14056 | EVT VT = N->getValueType(ResNo: 0); |
| 14057 | SDLoc DL(N); |
| 14058 | SDValue N00 = N0->getOperand(Num: 0); |
| 14059 | SDValue N01 = N0->getOperand(Num: 1); |
| 14060 | return DAG.getNode(Opcode, DL, VT, |
| 14061 | N1: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N00, N2: N1), |
| 14062 | N2: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N01, N2: N1)); |
| 14063 | } |
| 14064 | |
| 14065 | static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, |
| 14066 | const ARMSubtarget *Subtarget) { |
| 14067 | EVT VT = N->getValueType(ResNo: 0); |
| 14068 | if (VT != MVT::v2i64) |
| 14069 | return SDValue(); |
| 14070 | |
| 14071 | SDValue N0 = N->getOperand(Num: 0); |
| 14072 | SDValue N1 = N->getOperand(Num: 1); |
| 14073 | |
| 14074 | auto IsSignExt = [&](SDValue Op) { |
| 14075 | if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) |
| 14076 | return SDValue(); |
| 14077 | EVT VT = cast<VTSDNode>(Val: Op->getOperand(Num: 1))->getVT(); |
| 14078 | if (VT.getScalarSizeInBits() == 32) |
| 14079 | return Op->getOperand(Num: 0); |
| 14080 | return SDValue(); |
| 14081 | }; |
| 14082 | auto IsZeroExt = [&](SDValue Op) { |
| 14083 | // Zero extends are a little more awkward. At the point we are matching |
| 14084 | // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. |
| 14085 | // That might be before of after a bitcast depending on how the and is |
| 14086 | // placed. Because this has to look through bitcasts, it is currently only |
| 14087 | // supported on LE. |
| 14088 | if (!Subtarget->isLittle()) |
| 14089 | return SDValue(); |
| 14090 | |
| 14091 | SDValue And = Op; |
| 14092 | if (And->getOpcode() == ISD::BITCAST) |
| 14093 | And = And->getOperand(Num: 0); |
| 14094 | if (And->getOpcode() != ISD::AND) |
| 14095 | return SDValue(); |
| 14096 | SDValue Mask = And->getOperand(Num: 1); |
| 14097 | if (Mask->getOpcode() == ISD::BITCAST) |
| 14098 | Mask = Mask->getOperand(Num: 0); |
| 14099 | |
| 14100 | if (Mask->getOpcode() != ISD::BUILD_VECTOR || |
| 14101 | Mask.getValueType() != MVT::v4i32) |
| 14102 | return SDValue(); |
| 14103 | if (isAllOnesConstant(V: Mask->getOperand(Num: 0)) && |
| 14104 | isNullConstant(V: Mask->getOperand(Num: 1)) && |
| 14105 | isAllOnesConstant(V: Mask->getOperand(Num: 2)) && |
| 14106 | isNullConstant(V: Mask->getOperand(Num: 3))) |
| 14107 | return And->getOperand(Num: 0); |
| 14108 | return SDValue(); |
| 14109 | }; |
| 14110 | |
| 14111 | SDLoc dl(N); |
| 14112 | if (SDValue Op0 = IsSignExt(N0)) { |
| 14113 | if (SDValue Op1 = IsSignExt(N1)) { |
| 14114 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
| 14115 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
| 14116 | return DAG.getNode(Opcode: ARMISD::VMULLs, DL: dl, VT, N1: New0a, N2: New1a); |
| 14117 | } |
| 14118 | } |
| 14119 | if (SDValue Op0 = IsZeroExt(N0)) { |
| 14120 | if (SDValue Op1 = IsZeroExt(N1)) { |
| 14121 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
| 14122 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
| 14123 | return DAG.getNode(Opcode: ARMISD::VMULLu, DL: dl, VT, N1: New0a, N2: New1a); |
| 14124 | } |
| 14125 | } |
| 14126 | |
| 14127 | return SDValue(); |
| 14128 | } |
| 14129 | |
| 14130 | static SDValue PerformMULCombine(SDNode *N, |
| 14131 | TargetLowering::DAGCombinerInfo &DCI, |
| 14132 | const ARMSubtarget *Subtarget) { |
| 14133 | SelectionDAG &DAG = DCI.DAG; |
| 14134 | |
| 14135 | EVT VT = N->getValueType(ResNo: 0); |
| 14136 | if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) |
| 14137 | return PerformMVEVMULLCombine(N, DAG, Subtarget); |
| 14138 | |
| 14139 | if (Subtarget->isThumb1Only()) |
| 14140 | return SDValue(); |
| 14141 | |
| 14142 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14143 | return SDValue(); |
| 14144 | |
| 14145 | if (VT.is64BitVector() || VT.is128BitVector()) |
| 14146 | return PerformVMULCombine(N, DCI, Subtarget); |
| 14147 | if (VT != MVT::i32) |
| 14148 | return SDValue(); |
| 14149 | |
| 14150 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 14151 | if (!C) |
| 14152 | return SDValue(); |
| 14153 | |
| 14154 | int64_t MulAmt = C->getSExtValue(); |
| 14155 | unsigned ShiftAmt = llvm::countr_zero<uint64_t>(Val: MulAmt); |
| 14156 | |
| 14157 | ShiftAmt = ShiftAmt & (32 - 1); |
| 14158 | SDValue V = N->getOperand(Num: 0); |
| 14159 | SDLoc DL(N); |
| 14160 | |
| 14161 | SDValue Res; |
| 14162 | MulAmt >>= ShiftAmt; |
| 14163 | |
| 14164 | if (MulAmt >= 0) { |
| 14165 | if (llvm::has_single_bit<uint32_t>(Value: MulAmt - 1)) { |
| 14166 | // (mul x, 2^N + 1) => (add (shl x, N), x) |
| 14167 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
| 14168 | N1: V, |
| 14169 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14170 | N1: V, |
| 14171 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt - 1), DL, |
| 14172 | VT: MVT::i32))); |
| 14173 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmt + 1)) { |
| 14174 | // (mul x, 2^N - 1) => (sub (shl x, N), x) |
| 14175 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14176 | N1: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14177 | N1: V, |
| 14178 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt + 1), DL, |
| 14179 | VT: MVT::i32)), |
| 14180 | N2: V); |
| 14181 | } else |
| 14182 | return SDValue(); |
| 14183 | } else { |
| 14184 | uint64_t MulAmtAbs = -MulAmt; |
| 14185 | if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs + 1)) { |
| 14186 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) |
| 14187 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14188 | N1: V, |
| 14189 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14190 | N1: V, |
| 14191 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs + 1), DL, |
| 14192 | VT: MVT::i32))); |
| 14193 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs - 1)) { |
| 14194 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) |
| 14195 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
| 14196 | N1: V, |
| 14197 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14198 | N1: V, |
| 14199 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs - 1), DL, |
| 14200 | VT: MVT::i32))); |
| 14201 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
| 14202 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N2: Res); |
| 14203 | } else |
| 14204 | return SDValue(); |
| 14205 | } |
| 14206 | |
| 14207 | if (ShiftAmt != 0) |
| 14208 | Res = DAG.getNode(Opcode: ISD::SHL, DL, VT, |
| 14209 | N1: Res, N2: DAG.getConstant(Val: ShiftAmt, DL, VT: MVT::i32)); |
| 14210 | |
| 14211 | // Do not add new nodes to DAG combiner worklist. |
| 14212 | DCI.CombineTo(N, Res, AddTo: false); |
| 14213 | return SDValue(); |
| 14214 | } |
| 14215 | |
| 14216 | static SDValue CombineANDShift(SDNode *N, |
| 14217 | TargetLowering::DAGCombinerInfo &DCI, |
| 14218 | const ARMSubtarget *Subtarget) { |
| 14219 | // Allow DAGCombine to pattern-match before we touch the canonical form. |
| 14220 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14221 | return SDValue(); |
| 14222 | |
| 14223 | if (N->getValueType(ResNo: 0) != MVT::i32) |
| 14224 | return SDValue(); |
| 14225 | |
| 14226 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 14227 | if (!N1C) |
| 14228 | return SDValue(); |
| 14229 | |
| 14230 | uint32_t C1 = (uint32_t)N1C->getZExtValue(); |
| 14231 | // Don't transform uxtb/uxth. |
| 14232 | if (C1 == 255 || C1 == 65535) |
| 14233 | return SDValue(); |
| 14234 | |
| 14235 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
| 14236 | if (!N0->hasOneUse()) |
| 14237 | return SDValue(); |
| 14238 | |
| 14239 | if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) |
| 14240 | return SDValue(); |
| 14241 | |
| 14242 | bool LeftShift = N0->getOpcode() == ISD::SHL; |
| 14243 | |
| 14244 | ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
| 14245 | if (!N01C) |
| 14246 | return SDValue(); |
| 14247 | |
| 14248 | uint32_t C2 = (uint32_t)N01C->getZExtValue(); |
| 14249 | if (!C2 || C2 >= 32) |
| 14250 | return SDValue(); |
| 14251 | |
| 14252 | // Clear irrelevant bits in the mask. |
| 14253 | if (LeftShift) |
| 14254 | C1 &= (-1U << C2); |
| 14255 | else |
| 14256 | C1 &= (-1U >> C2); |
| 14257 | |
| 14258 | SelectionDAG &DAG = DCI.DAG; |
| 14259 | SDLoc DL(N); |
| 14260 | |
| 14261 | // We have a pattern of the form "(and (shl x, c2) c1)" or |
| 14262 | // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to |
| 14263 | // transform to a pair of shifts, to save materializing c1. |
| 14264 | |
| 14265 | // First pattern: right shift, then mask off leading bits. |
| 14266 | // FIXME: Use demanded bits? |
| 14267 | if (!LeftShift && isMask_32(Value: C1)) { |
| 14268 | uint32_t C3 = llvm::countl_zero(Val: C1); |
| 14269 | if (C2 < C3) { |
| 14270 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14271 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
| 14272 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 14273 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14274 | } |
| 14275 | } |
| 14276 | |
| 14277 | // First pattern, reversed: left shift, then mask off trailing bits. |
| 14278 | if (LeftShift && isMask_32(Value: ~C1)) { |
| 14279 | uint32_t C3 = llvm::countr_zero(Val: C1); |
| 14280 | if (C2 < C3) { |
| 14281 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14282 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
| 14283 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
| 14284 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14285 | } |
| 14286 | } |
| 14287 | |
| 14288 | // Second pattern: left shift, then mask off leading bits. |
| 14289 | // FIXME: Use demanded bits? |
| 14290 | if (LeftShift && isShiftedMask_32(Value: C1)) { |
| 14291 | uint32_t Trailing = llvm::countr_zero(Val: C1); |
| 14292 | uint32_t C3 = llvm::countl_zero(Val: C1); |
| 14293 | if (Trailing == C2 && C2 + C3 < 32) { |
| 14294 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14295 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
| 14296 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 14297 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14298 | } |
| 14299 | } |
| 14300 | |
| 14301 | // Second pattern, reversed: right shift, then mask off trailing bits. |
| 14302 | // FIXME: Handle other patterns of known/demanded bits. |
| 14303 | if (!LeftShift && isShiftedMask_32(Value: C1)) { |
| 14304 | uint32_t Leading = llvm::countl_zero(Val: C1); |
| 14305 | uint32_t C3 = llvm::countr_zero(Val: C1); |
| 14306 | if (Leading == C2 && C2 + C3 < 32) { |
| 14307 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14308 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
| 14309 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
| 14310 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
| 14311 | } |
| 14312 | } |
| 14313 | |
| 14314 | // Transform "(and (shl x, c2) c1)" into "(shl (and x, c1>>c2), c2)" |
| 14315 | // if "c1 >> c2" is a cheaper immediate than "c1" |
| 14316 | if (LeftShift && |
| 14317 | HasLowerConstantMaterializationCost(Val1: C1 >> C2, Val2: C1, Subtarget)) { |
| 14318 | |
| 14319 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 14320 | N2: DAG.getConstant(Val: C1 >> C2, DL, VT: MVT::i32)); |
| 14321 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: And, |
| 14322 | N2: DAG.getConstant(Val: C2, DL, VT: MVT::i32)); |
| 14323 | } |
| 14324 | |
| 14325 | return SDValue(); |
| 14326 | } |
| 14327 | |
| 14328 | static SDValue PerformANDCombine(SDNode *N, |
| 14329 | TargetLowering::DAGCombinerInfo &DCI, |
| 14330 | const ARMSubtarget *Subtarget) { |
| 14331 | // Attempt to use immediate-form VBIC |
| 14332 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
| 14333 | SDLoc dl(N); |
| 14334 | EVT VT = N->getValueType(ResNo: 0); |
| 14335 | SelectionDAG &DAG = DCI.DAG; |
| 14336 | |
| 14337 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v2i1 || |
| 14338 | VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1) |
| 14339 | return SDValue(); |
| 14340 | |
| 14341 | APInt SplatBits, SplatUndef; |
| 14342 | unsigned SplatBitSize; |
| 14343 | bool HasAnyUndefs; |
| 14344 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 14345 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 14346 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 14347 | SplatBitSize == 64) { |
| 14348 | EVT VbicVT; |
| 14349 | SDValue Val = isVMOVModifiedImm(SplatBits: (~SplatBits).getZExtValue(), |
| 14350 | SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, |
| 14351 | DAG, dl, VT&: VbicVT, VectorVT: VT, type: OtherModImm); |
| 14352 | if (Val.getNode()) { |
| 14353 | SDValue Input = |
| 14354 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: VbicVT, Operand: N->getOperand(Num: 0)); |
| 14355 | SDValue Vbic = DAG.getNode(Opcode: ARMISD::VBICIMM, DL: dl, VT: VbicVT, N1: Input, N2: Val); |
| 14356 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vbic); |
| 14357 | } |
| 14358 | } |
| 14359 | } |
| 14360 | |
| 14361 | if (!Subtarget->isThumb1Only()) { |
| 14362 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) |
| 14363 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: true, DCI)) |
| 14364 | return Result; |
| 14365 | |
| 14366 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14367 | return Result; |
| 14368 | } |
| 14369 | |
| 14370 | if (Subtarget->isThumb1Only()) |
| 14371 | if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) |
| 14372 | return Result; |
| 14373 | |
| 14374 | return SDValue(); |
| 14375 | } |
| 14376 | |
| 14377 | // Try combining OR nodes to SMULWB, SMULWT. |
| 14378 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, |
| 14379 | TargetLowering::DAGCombinerInfo &DCI, |
| 14380 | const ARMSubtarget *Subtarget) { |
| 14381 | if (!Subtarget->hasV6Ops() || |
| 14382 | (Subtarget->isThumb() && |
| 14383 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) |
| 14384 | return SDValue(); |
| 14385 | |
| 14386 | SDValue SRL = OR->getOperand(Num: 0); |
| 14387 | SDValue SHL = OR->getOperand(Num: 1); |
| 14388 | |
| 14389 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { |
| 14390 | SRL = OR->getOperand(Num: 1); |
| 14391 | SHL = OR->getOperand(Num: 0); |
| 14392 | } |
| 14393 | if (!isSRL16(Op: SRL) || !isSHL16(Op: SHL)) |
| 14394 | return SDValue(); |
| 14395 | |
| 14396 | // The first operands to the shifts need to be the two results from the |
| 14397 | // same smul_lohi node. |
| 14398 | if ((SRL.getOperand(i: 0).getNode() != SHL.getOperand(i: 0).getNode()) || |
| 14399 | SRL.getOperand(i: 0).getOpcode() != ISD::SMUL_LOHI) |
| 14400 | return SDValue(); |
| 14401 | |
| 14402 | SDNode *SMULLOHI = SRL.getOperand(i: 0).getNode(); |
| 14403 | if (SRL.getOperand(i: 0) != SDValue(SMULLOHI, 0) || |
| 14404 | SHL.getOperand(i: 0) != SDValue(SMULLOHI, 1)) |
| 14405 | return SDValue(); |
| 14406 | |
| 14407 | // Now we have: |
| 14408 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) |
| 14409 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. |
| 14410 | // For SMUWB the 16-bit value will signed extended somehow. |
| 14411 | // For SMULWT only the SRA is required. |
| 14412 | // Check both sides of SMUL_LOHI |
| 14413 | SDValue OpS16 = SMULLOHI->getOperand(Num: 0); |
| 14414 | SDValue OpS32 = SMULLOHI->getOperand(Num: 1); |
| 14415 | |
| 14416 | SelectionDAG &DAG = DCI.DAG; |
| 14417 | if (!isS16(Op: OpS16, DAG) && !isSRA16(Op: OpS16)) { |
| 14418 | OpS16 = OpS32; |
| 14419 | OpS32 = SMULLOHI->getOperand(Num: 0); |
| 14420 | } |
| 14421 | |
| 14422 | SDLoc dl(OR); |
| 14423 | unsigned Opcode = 0; |
| 14424 | if (isS16(Op: OpS16, DAG)) |
| 14425 | Opcode = ARMISD::SMULWB; |
| 14426 | else if (isSRA16(Op: OpS16)) { |
| 14427 | Opcode = ARMISD::SMULWT; |
| 14428 | OpS16 = OpS16->getOperand(Num: 0); |
| 14429 | } |
| 14430 | else |
| 14431 | return SDValue(); |
| 14432 | |
| 14433 | SDValue Res = DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: OpS32, N2: OpS16); |
| 14434 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(OR, 0), To: Res); |
| 14435 | return SDValue(OR, 0); |
| 14436 | } |
| 14437 | |
| 14438 | static SDValue PerformORCombineToBFI(SDNode *N, |
| 14439 | TargetLowering::DAGCombinerInfo &DCI, |
| 14440 | const ARMSubtarget *Subtarget) { |
| 14441 | // BFI is only available on V6T2+ |
| 14442 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) |
| 14443 | return SDValue(); |
| 14444 | |
| 14445 | EVT VT = N->getValueType(ResNo: 0); |
| 14446 | SDValue N0 = N->getOperand(Num: 0); |
| 14447 | SDValue N1 = N->getOperand(Num: 1); |
| 14448 | SelectionDAG &DAG = DCI.DAG; |
| 14449 | SDLoc DL(N); |
| 14450 | // 1) or (and A, mask), val => ARMbfi A, val, mask |
| 14451 | // iff (val & mask) == val |
| 14452 | // |
| 14453 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 14454 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) |
| 14455 | // && mask == ~mask2 |
| 14456 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) |
| 14457 | // && ~mask == mask2 |
| 14458 | // (i.e., copy a bitfield value into another bitfield of the same width) |
| 14459 | |
| 14460 | if (VT != MVT::i32) |
| 14461 | return SDValue(); |
| 14462 | |
| 14463 | SDValue N00 = N0.getOperand(i: 0); |
| 14464 | |
| 14465 | // The value and the mask need to be constants so we can verify this is |
| 14466 | // actually a bitfield set. If the mask is 0xffff, we can do better |
| 14467 | // via a movt instruction, so don't use BFI in that case. |
| 14468 | SDValue MaskOp = N0.getOperand(i: 1); |
| 14469 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Val&: MaskOp); |
| 14470 | if (!MaskC) |
| 14471 | return SDValue(); |
| 14472 | unsigned Mask = MaskC->getZExtValue(); |
| 14473 | if (Mask == 0xffff) |
| 14474 | return SDValue(); |
| 14475 | SDValue Res; |
| 14476 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask |
| 14477 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val&: N1); |
| 14478 | if (N1C) { |
| 14479 | unsigned Val = N1C->getZExtValue(); |
| 14480 | if ((Val & ~Mask) != Val) |
| 14481 | return SDValue(); |
| 14482 | |
| 14483 | if (ARM::isBitFieldInvertedMask(v: Mask)) { |
| 14484 | Val >>= llvm::countr_zero(Val: ~Mask); |
| 14485 | |
| 14486 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, |
| 14487 | N2: DAG.getConstant(Val, DL, VT: MVT::i32), |
| 14488 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
| 14489 | |
| 14490 | DCI.CombineTo(N, Res, AddTo: false); |
| 14491 | // Return value from the original node to inform the combiner than N is |
| 14492 | // now dead. |
| 14493 | return SDValue(N, 0); |
| 14494 | } |
| 14495 | } else if (N1.getOpcode() == ISD::AND) { |
| 14496 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 14497 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
| 14498 | if (!N11C) |
| 14499 | return SDValue(); |
| 14500 | unsigned Mask2 = N11C->getZExtValue(); |
| 14501 | |
| 14502 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern |
| 14503 | // as is to match. |
| 14504 | if (ARM::isBitFieldInvertedMask(v: Mask) && |
| 14505 | (Mask == ~Mask2)) { |
| 14506 | // The pack halfword instruction works better for masks that fit it, |
| 14507 | // so use that when it's available. |
| 14508 | if (Subtarget->hasDSP() && |
| 14509 | (Mask == 0xffff || Mask == 0xffff0000)) |
| 14510 | return SDValue(); |
| 14511 | // 2a |
| 14512 | unsigned amt = llvm::countr_zero(Val: Mask2); |
| 14513 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N1.getOperand(i: 0), |
| 14514 | N2: DAG.getConstant(Val: amt, DL, VT: MVT::i32)); |
| 14515 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, N2: Res, |
| 14516 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
| 14517 | DCI.CombineTo(N, Res, AddTo: false); |
| 14518 | // Return value from the original node to inform the combiner than N is |
| 14519 | // now dead. |
| 14520 | return SDValue(N, 0); |
| 14521 | } else if (ARM::isBitFieldInvertedMask(v: ~Mask) && |
| 14522 | (~Mask == Mask2)) { |
| 14523 | // The pack halfword instruction works better for masks that fit it, |
| 14524 | // so use that when it's available. |
| 14525 | if (Subtarget->hasDSP() && |
| 14526 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) |
| 14527 | return SDValue(); |
| 14528 | // 2b |
| 14529 | unsigned lsb = llvm::countr_zero(Val: Mask); |
| 14530 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N00, |
| 14531 | N2: DAG.getConstant(Val: lsb, DL, VT: MVT::i32)); |
| 14532 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N1.getOperand(i: 0), N2: Res, |
| 14533 | N3: DAG.getConstant(Val: Mask2, DL, VT: MVT::i32)); |
| 14534 | DCI.CombineTo(N, Res, AddTo: false); |
| 14535 | // Return value from the original node to inform the combiner than N is |
| 14536 | // now dead. |
| 14537 | return SDValue(N, 0); |
| 14538 | } |
| 14539 | } |
| 14540 | |
| 14541 | if (DAG.MaskedValueIsZero(Op: N1, Mask: MaskC->getAPIntValue()) && |
| 14542 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Val: N00.getOperand(i: 1)) && |
| 14543 | ARM::isBitFieldInvertedMask(v: ~Mask)) { |
| 14544 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask |
| 14545 | // where lsb(mask) == #shamt and masked bits of B are known zero. |
| 14546 | SDValue ShAmt = N00.getOperand(i: 1); |
| 14547 | unsigned ShAmtC = ShAmt->getAsZExtVal(); |
| 14548 | unsigned LSB = llvm::countr_zero(Val: Mask); |
| 14549 | if (ShAmtC != LSB) |
| 14550 | return SDValue(); |
| 14551 | |
| 14552 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1, N2: N00.getOperand(i: 0), |
| 14553 | N3: DAG.getConstant(Val: ~Mask, DL, VT: MVT::i32)); |
| 14554 | |
| 14555 | DCI.CombineTo(N, Res, AddTo: false); |
| 14556 | // Return value from the original node to inform the combiner than N is |
| 14557 | // now dead. |
| 14558 | return SDValue(N, 0); |
| 14559 | } |
| 14560 | |
| 14561 | return SDValue(); |
| 14562 | } |
| 14563 | |
| 14564 | static bool isValidMVECond(unsigned CC, bool IsFloat) { |
| 14565 | switch (CC) { |
| 14566 | case ARMCC::EQ: |
| 14567 | case ARMCC::NE: |
| 14568 | case ARMCC::LE: |
| 14569 | case ARMCC::GT: |
| 14570 | case ARMCC::GE: |
| 14571 | case ARMCC::LT: |
| 14572 | return true; |
| 14573 | case ARMCC::HS: |
| 14574 | case ARMCC::HI: |
| 14575 | return !IsFloat; |
| 14576 | default: |
| 14577 | return false; |
| 14578 | }; |
| 14579 | } |
| 14580 | |
| 14581 | static ARMCC::CondCodes getVCMPCondCode(SDValue N) { |
| 14582 | if (N->getOpcode() == ARMISD::VCMP) |
| 14583 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
| 14584 | else if (N->getOpcode() == ARMISD::VCMPZ) |
| 14585 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 1); |
| 14586 | else |
| 14587 | llvm_unreachable("Not a VCMP/VCMPZ!" ); |
| 14588 | } |
| 14589 | |
| 14590 | static bool CanInvertMVEVCMP(SDValue N) { |
| 14591 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N)); |
| 14592 | return isValidMVECond(CC, IsFloat: N->getOperand(Num: 0).getValueType().isFloatingPoint()); |
| 14593 | } |
| 14594 | |
| 14595 | static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG, |
| 14596 | const ARMSubtarget *Subtarget) { |
| 14597 | // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain |
| 14598 | // together with predicates |
| 14599 | EVT VT = N->getValueType(ResNo: 0); |
| 14600 | SDLoc DL(N); |
| 14601 | SDValue N0 = N->getOperand(Num: 0); |
| 14602 | SDValue N1 = N->getOperand(Num: 1); |
| 14603 | |
| 14604 | auto IsFreelyInvertable = [&](SDValue V) { |
| 14605 | if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) |
| 14606 | return CanInvertMVEVCMP(N: V); |
| 14607 | return false; |
| 14608 | }; |
| 14609 | |
| 14610 | // At least one operand must be freely invertable. |
| 14611 | if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) |
| 14612 | return SDValue(); |
| 14613 | |
| 14614 | SDValue NewN0 = DAG.getLogicalNOT(DL, Val: N0, VT); |
| 14615 | SDValue NewN1 = DAG.getLogicalNOT(DL, Val: N1, VT); |
| 14616 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT, N1: NewN0, N2: NewN1); |
| 14617 | return DAG.getLogicalNOT(DL, Val: And, VT); |
| 14618 | } |
| 14619 | |
| 14620 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR |
| 14621 | static SDValue PerformORCombine(SDNode *N, |
| 14622 | TargetLowering::DAGCombinerInfo &DCI, |
| 14623 | const ARMSubtarget *Subtarget) { |
| 14624 | // Attempt to use immediate-form VORR |
| 14625 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
| 14626 | SDLoc dl(N); |
| 14627 | EVT VT = N->getValueType(ResNo: 0); |
| 14628 | SelectionDAG &DAG = DCI.DAG; |
| 14629 | |
| 14630 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14631 | return SDValue(); |
| 14632 | |
| 14633 | if (Subtarget->hasMVEIntegerOps() && (VT == MVT::v2i1 || VT == MVT::v4i1 || |
| 14634 | VT == MVT::v8i1 || VT == MVT::v16i1)) |
| 14635 | return PerformORCombine_i1(N, DAG, Subtarget); |
| 14636 | |
| 14637 | APInt SplatBits, SplatUndef; |
| 14638 | unsigned SplatBitSize; |
| 14639 | bool HasAnyUndefs; |
| 14640 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 14641 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 14642 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 14643 | SplatBitSize == 64) { |
| 14644 | EVT VorrVT; |
| 14645 | SDValue Val = |
| 14646 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
| 14647 | SplatBitSize, DAG, dl, VT&: VorrVT, VectorVT: VT, type: OtherModImm); |
| 14648 | if (Val.getNode()) { |
| 14649 | SDValue Input = |
| 14650 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: VorrVT, Operand: N->getOperand(Num: 0)); |
| 14651 | SDValue Vorr = DAG.getNode(Opcode: ARMISD::VORRIMM, DL: dl, VT: VorrVT, N1: Input, N2: Val); |
| 14652 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Vorr); |
| 14653 | } |
| 14654 | } |
| 14655 | } |
| 14656 | |
| 14657 | if (!Subtarget->isThumb1Only()) { |
| 14658 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 14659 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
| 14660 | return Result; |
| 14661 | if (SDValue Result = PerformORCombineToSMULWBT(OR: N, DCI, Subtarget)) |
| 14662 | return Result; |
| 14663 | } |
| 14664 | |
| 14665 | SDValue N0 = N->getOperand(Num: 0); |
| 14666 | SDValue N1 = N->getOperand(Num: 1); |
| 14667 | |
| 14668 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. |
| 14669 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && |
| 14670 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { |
| 14671 | |
| 14672 | // The code below optimizes (or (and X, Y), Z). |
| 14673 | // The AND operand needs to have a single user to make these optimizations |
| 14674 | // profitable. |
| 14675 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) |
| 14676 | return SDValue(); |
| 14677 | |
| 14678 | APInt SplatUndef; |
| 14679 | unsigned SplatBitSize; |
| 14680 | bool HasAnyUndefs; |
| 14681 | |
| 14682 | APInt SplatBits0, SplatBits1; |
| 14683 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(Val: N0->getOperand(Num: 1)); |
| 14684 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(Val: N1->getOperand(Num: 1)); |
| 14685 | // Ensure that the second operand of both ands are constants |
| 14686 | if (BVN0 && BVN0->isConstantSplat(SplatValue&: SplatBits0, SplatUndef, SplatBitSize, |
| 14687 | HasAnyUndefs) && !HasAnyUndefs) { |
| 14688 | if (BVN1 && BVN1->isConstantSplat(SplatValue&: SplatBits1, SplatUndef, SplatBitSize, |
| 14689 | HasAnyUndefs) && !HasAnyUndefs) { |
| 14690 | // Ensure that the bit width of the constants are the same and that |
| 14691 | // the splat arguments are logical inverses as per the pattern we |
| 14692 | // are trying to simplify. |
| 14693 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && |
| 14694 | SplatBits0 == ~SplatBits1) { |
| 14695 | // Canonicalize the vector type to make instruction selection |
| 14696 | // simpler. |
| 14697 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 14698 | SDValue Result = DAG.getNode(Opcode: ARMISD::VBSP, DL: dl, VT: CanonicalVT, |
| 14699 | N1: N0->getOperand(Num: 1), |
| 14700 | N2: N0->getOperand(Num: 0), |
| 14701 | N3: N1->getOperand(Num: 0)); |
| 14702 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Result); |
| 14703 | } |
| 14704 | } |
| 14705 | } |
| 14706 | } |
| 14707 | |
| 14708 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when |
| 14709 | // reasonable. |
| 14710 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { |
| 14711 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) |
| 14712 | return Res; |
| 14713 | } |
| 14714 | |
| 14715 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14716 | return Result; |
| 14717 | |
| 14718 | return SDValue(); |
| 14719 | } |
| 14720 | |
| 14721 | static SDValue PerformXORCombine(SDNode *N, |
| 14722 | TargetLowering::DAGCombinerInfo &DCI, |
| 14723 | const ARMSubtarget *Subtarget) { |
| 14724 | EVT VT = N->getValueType(ResNo: 0); |
| 14725 | SelectionDAG &DAG = DCI.DAG; |
| 14726 | |
| 14727 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14728 | return SDValue(); |
| 14729 | |
| 14730 | if (!Subtarget->isThumb1Only()) { |
| 14731 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 14732 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
| 14733 | return Result; |
| 14734 | |
| 14735 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
| 14736 | return Result; |
| 14737 | } |
| 14738 | |
| 14739 | if (Subtarget->hasMVEIntegerOps()) { |
| 14740 | // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. |
| 14741 | SDValue N0 = N->getOperand(Num: 0); |
| 14742 | SDValue N1 = N->getOperand(Num: 1); |
| 14743 | const TargetLowering *TLI = Subtarget->getTargetLowering(); |
| 14744 | if (TLI->isConstTrueVal(N: N1) && |
| 14745 | (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { |
| 14746 | if (CanInvertMVEVCMP(N: N0)) { |
| 14747 | SDLoc DL(N0); |
| 14748 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N: N0)); |
| 14749 | |
| 14750 | SmallVector<SDValue, 4> Ops; |
| 14751 | Ops.push_back(Elt: N0->getOperand(Num: 0)); |
| 14752 | if (N0->getOpcode() == ARMISD::VCMP) |
| 14753 | Ops.push_back(Elt: N0->getOperand(Num: 1)); |
| 14754 | Ops.push_back(Elt: DAG.getConstant(Val: CC, DL, VT: MVT::i32)); |
| 14755 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT: N0->getValueType(ResNo: 0), Ops); |
| 14756 | } |
| 14757 | } |
| 14758 | } |
| 14759 | |
| 14760 | return SDValue(); |
| 14761 | } |
| 14762 | |
| 14763 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, |
| 14764 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and |
| 14765 | // their position in "to" (Rd). |
| 14766 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { |
| 14767 | assert(N->getOpcode() == ARMISD::BFI); |
| 14768 | |
| 14769 | SDValue From = N->getOperand(Num: 1); |
| 14770 | ToMask = ~N->getConstantOperandAPInt(Num: 2); |
| 14771 | FromMask = APInt::getLowBitsSet(numBits: ToMask.getBitWidth(), loBitsSet: ToMask.popcount()); |
| 14772 | |
| 14773 | // If the Base came from a SHR #C, we can deduce that it is really testing bit |
| 14774 | // #C in the base of the SHR. |
| 14775 | if (From->getOpcode() == ISD::SRL && |
| 14776 | isa<ConstantSDNode>(Val: From->getOperand(Num: 1))) { |
| 14777 | APInt Shift = From->getConstantOperandAPInt(Num: 1); |
| 14778 | assert(Shift.getLimitedValue() < 32 && "Shift too large!" ); |
| 14779 | FromMask <<= Shift.getLimitedValue(Limit: 31); |
| 14780 | From = From->getOperand(Num: 0); |
| 14781 | } |
| 14782 | |
| 14783 | return From; |
| 14784 | } |
| 14785 | |
| 14786 | // If A and B contain one contiguous set of bits, does A | B == A . B? |
| 14787 | // |
| 14788 | // Neither A nor B must be zero. |
| 14789 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { |
| 14790 | unsigned LastActiveBitInA = A.countr_zero(); |
| 14791 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countl_zero() - 1; |
| 14792 | return LastActiveBitInA - 1 == FirstActiveBitInB; |
| 14793 | } |
| 14794 | |
| 14795 | static SDValue FindBFIToCombineWith(SDNode *N) { |
| 14796 | // We have a BFI in N. Find a BFI it can combine with, if one exists. |
| 14797 | APInt ToMask, FromMask; |
| 14798 | SDValue From = ParseBFI(N, ToMask, FromMask); |
| 14799 | SDValue To = N->getOperand(Num: 0); |
| 14800 | |
| 14801 | SDValue V = To; |
| 14802 | if (V.getOpcode() != ARMISD::BFI) |
| 14803 | return SDValue(); |
| 14804 | |
| 14805 | APInt NewToMask, NewFromMask; |
| 14806 | SDValue NewFrom = ParseBFI(N: V.getNode(), ToMask&: NewToMask, FromMask&: NewFromMask); |
| 14807 | if (NewFrom != From) |
| 14808 | return SDValue(); |
| 14809 | |
| 14810 | // Do the written bits conflict with any we've seen so far? |
| 14811 | if ((NewToMask & ToMask).getBoolValue()) |
| 14812 | // Conflicting bits. |
| 14813 | return SDValue(); |
| 14814 | |
| 14815 | // Are the new bits contiguous when combined with the old bits? |
| 14816 | if (BitsProperlyConcatenate(A: ToMask, B: NewToMask) && |
| 14817 | BitsProperlyConcatenate(A: FromMask, B: NewFromMask)) |
| 14818 | return V; |
| 14819 | if (BitsProperlyConcatenate(A: NewToMask, B: ToMask) && |
| 14820 | BitsProperlyConcatenate(A: NewFromMask, B: FromMask)) |
| 14821 | return V; |
| 14822 | |
| 14823 | return SDValue(); |
| 14824 | } |
| 14825 | |
| 14826 | static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) { |
| 14827 | SDValue N0 = N->getOperand(Num: 0); |
| 14828 | SDValue N1 = N->getOperand(Num: 1); |
| 14829 | |
| 14830 | if (N1.getOpcode() == ISD::AND) { |
| 14831 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff |
| 14832 | // the bits being cleared by the AND are not demanded by the BFI. |
| 14833 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
| 14834 | if (!N11C) |
| 14835 | return SDValue(); |
| 14836 | unsigned InvMask = N->getConstantOperandVal(Num: 2); |
| 14837 | unsigned LSB = llvm::countr_zero(Val: ~InvMask); |
| 14838 | unsigned Width = llvm::bit_width<unsigned>(Value: ~InvMask) - LSB; |
| 14839 | assert(Width < |
| 14840 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && |
| 14841 | "undefined behavior" ); |
| 14842 | unsigned Mask = (1u << Width) - 1; |
| 14843 | unsigned Mask2 = N11C->getZExtValue(); |
| 14844 | if ((Mask & (~Mask2)) == 0) |
| 14845 | return DAG.getNode(Opcode: ARMISD::BFI, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
| 14846 | N1: N->getOperand(Num: 0), N2: N1.getOperand(i: 0), N3: N->getOperand(Num: 2)); |
| 14847 | return SDValue(); |
| 14848 | } |
| 14849 | |
| 14850 | // Look for another BFI to combine with. |
| 14851 | if (SDValue CombineBFI = FindBFIToCombineWith(N)) { |
| 14852 | // We've found a BFI. |
| 14853 | APInt ToMask1, FromMask1; |
| 14854 | SDValue From1 = ParseBFI(N, ToMask&: ToMask1, FromMask&: FromMask1); |
| 14855 | |
| 14856 | APInt ToMask2, FromMask2; |
| 14857 | SDValue From2 = ParseBFI(N: CombineBFI.getNode(), ToMask&: ToMask2, FromMask&: FromMask2); |
| 14858 | assert(From1 == From2); |
| 14859 | (void)From2; |
| 14860 | |
| 14861 | // Create a new BFI, combining the two together. |
| 14862 | APInt NewFromMask = FromMask1 | FromMask2; |
| 14863 | APInt NewToMask = ToMask1 | ToMask2; |
| 14864 | |
| 14865 | EVT VT = N->getValueType(ResNo: 0); |
| 14866 | SDLoc dl(N); |
| 14867 | |
| 14868 | if (NewFromMask[0] == 0) |
| 14869 | From1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: From1, |
| 14870 | N2: DAG.getConstant(Val: NewFromMask.countr_zero(), DL: dl, VT)); |
| 14871 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: CombineBFI.getOperand(i: 0), N2: From1, |
| 14872 | N3: DAG.getConstant(Val: ~NewToMask, DL: dl, VT)); |
| 14873 | } |
| 14874 | |
| 14875 | // Reassociate BFI(BFI (A, B, M1), C, M2) to BFI(BFI (A, C, M2), B, M1) so |
| 14876 | // that lower bit insertions are performed first, providing that M1 and M2 |
| 14877 | // do no overlap. This can allow multiple BFI instructions to be combined |
| 14878 | // together by the other folds above. |
| 14879 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::BFI) { |
| 14880 | APInt ToMask1 = ~N->getConstantOperandAPInt(Num: 2); |
| 14881 | APInt ToMask2 = ~N0.getConstantOperandAPInt(i: 2); |
| 14882 | |
| 14883 | if (!N0.hasOneUse() || (ToMask1 & ToMask2) != 0 || |
| 14884 | ToMask1.countl_zero() < ToMask2.countl_zero()) |
| 14885 | return SDValue(); |
| 14886 | |
| 14887 | EVT VT = N->getValueType(ResNo: 0); |
| 14888 | SDLoc dl(N); |
| 14889 | SDValue BFI1 = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: N0.getOperand(i: 0), |
| 14890 | N2: N->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
| 14891 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: BFI1, N2: N0.getOperand(i: 1), |
| 14892 | N3: N0.getOperand(i: 2)); |
| 14893 | } |
| 14894 | |
| 14895 | return SDValue(); |
| 14896 | } |
| 14897 | |
| 14898 | // Check that N is CMPZ(CSINC(0, 0, CC, X)), |
| 14899 | // or CMPZ(CMOV(1, 0, CC, X)) |
| 14900 | // return X if valid. |
| 14901 | static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) { |
| 14902 | if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(V: Cmp->getOperand(Num: 1))) |
| 14903 | return SDValue(); |
| 14904 | SDValue CSInc = Cmp->getOperand(Num: 0); |
| 14905 | |
| 14906 | // Ignore any `And 1` nodes that may not yet have been removed. We are |
| 14907 | // looking for a value that produces 1/0, so these have no effect on the |
| 14908 | // code. |
| 14909 | while (CSInc.getOpcode() == ISD::AND && |
| 14910 | isa<ConstantSDNode>(Val: CSInc.getOperand(i: 1)) && |
| 14911 | CSInc.getConstantOperandVal(i: 1) == 1 && CSInc->hasOneUse()) |
| 14912 | CSInc = CSInc.getOperand(i: 0); |
| 14913 | |
| 14914 | if (CSInc.getOpcode() == ARMISD::CSINC && |
| 14915 | isNullConstant(V: CSInc.getOperand(i: 0)) && |
| 14916 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
| 14917 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
| 14918 | return CSInc.getOperand(i: 3); |
| 14919 | } |
| 14920 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 0)) && |
| 14921 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
| 14922 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
| 14923 | return CSInc.getOperand(i: 3); |
| 14924 | } |
| 14925 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 1)) && |
| 14926 | isNullConstant(V: CSInc.getOperand(i: 0)) && CSInc->hasOneUse()) { |
| 14927 | CC = ARMCC::getOppositeCondition( |
| 14928 | CC: (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2)); |
| 14929 | return CSInc.getOperand(i: 3); |
| 14930 | } |
| 14931 | return SDValue(); |
| 14932 | } |
| 14933 | |
| 14934 | static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) { |
| 14935 | // Given CMPZ(CSINC(C, 0, 0, EQ), 0), we can just use C directly. As in |
| 14936 | // t92: flags = ARMISD::CMPZ t74, 0 |
| 14937 | // t93: i32 = ARMISD::CSINC 0, 0, 1, t92 |
| 14938 | // t96: flags = ARMISD::CMPZ t93, 0 |
| 14939 | // t114: i32 = ARMISD::CSINV 0, 0, 0, t96 |
| 14940 | ARMCC::CondCodes Cond; |
| 14941 | if (SDValue C = IsCMPZCSINC(Cmp: N, CC&: Cond)) |
| 14942 | if (Cond == ARMCC::EQ) |
| 14943 | return C; |
| 14944 | return SDValue(); |
| 14945 | } |
| 14946 | |
| 14947 | static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) { |
| 14948 | // Fold away an unneccessary CMPZ/CSINC |
| 14949 | // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) -> |
| 14950 | // if C1==EQ -> CSXYZ A, B, C2, D |
| 14951 | // if C1==NE -> CSXYZ A, B, NOT(C2), D |
| 14952 | ARMCC::CondCodes Cond; |
| 14953 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 3).getNode(), CC&: Cond)) { |
| 14954 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ) |
| 14955 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 14956 | N2: N->getOperand(Num: 1), |
| 14957 | N3: DAG.getConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 14958 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
| 14959 | return DAG.getNode( |
| 14960 | Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 14961 | N2: N->getOperand(Num: 1), |
| 14962 | N3: DAG.getConstant(Val: ARMCC::getOppositeCondition(CC: Cond), DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 14963 | } |
| 14964 | return SDValue(); |
| 14965 | } |
| 14966 | |
| 14967 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for |
| 14968 | /// ARMISD::VMOVRRD. |
| 14969 | static SDValue PerformVMOVRRDCombine(SDNode *N, |
| 14970 | TargetLowering::DAGCombinerInfo &DCI, |
| 14971 | const ARMSubtarget *Subtarget) { |
| 14972 | // vmovrrd(vmovdrr x, y) -> x,y |
| 14973 | SDValue InDouble = N->getOperand(Num: 0); |
| 14974 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) |
| 14975 | return DCI.CombineTo(N, Res0: InDouble.getOperand(i: 0), Res1: InDouble.getOperand(i: 1)); |
| 14976 | |
| 14977 | // vmovrrd(load f64) -> (load i32), (load i32) |
| 14978 | SDNode *InNode = InDouble.getNode(); |
| 14979 | if (ISD::isNormalLoad(N: InNode) && InNode->hasOneUse() && |
| 14980 | InNode->getValueType(ResNo: 0) == MVT::f64 && |
| 14981 | InNode->getOperand(Num: 1).getOpcode() == ISD::FrameIndex && |
| 14982 | !cast<LoadSDNode>(Val: InNode)->isVolatile()) { |
| 14983 | // TODO: Should this be done for non-FrameIndex operands? |
| 14984 | LoadSDNode *LD = cast<LoadSDNode>(Val: InNode); |
| 14985 | |
| 14986 | SelectionDAG &DAG = DCI.DAG; |
| 14987 | SDLoc DL(LD); |
| 14988 | SDValue BasePtr = LD->getBasePtr(); |
| 14989 | SDValue NewLD1 = |
| 14990 | DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: BasePtr, PtrInfo: LD->getPointerInfo(), |
| 14991 | Alignment: LD->getAlign(), MMOFlags: LD->getMemOperand()->getFlags()); |
| 14992 | |
| 14993 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
| 14994 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
| 14995 | |
| 14996 | SDValue NewLD2 = DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: OffsetPtr, |
| 14997 | PtrInfo: LD->getPointerInfo().getWithOffset(O: 4), |
| 14998 | Alignment: commonAlignment(A: LD->getAlign(), Offset: 4), |
| 14999 | MMOFlags: LD->getMemOperand()->getFlags()); |
| 15000 | |
| 15001 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewLD2.getValue(R: 1)); |
| 15002 | if (DCI.DAG.getDataLayout().isBigEndian()) |
| 15003 | std::swap (a&: NewLD1, b&: NewLD2); |
| 15004 | SDValue Result = DCI.CombineTo(N, Res0: NewLD1, Res1: NewLD2); |
| 15005 | return Result; |
| 15006 | } |
| 15007 | |
| 15008 | // VMOVRRD(extract(..(build_vector(a, b, c, d)))) -> a,b or c,d |
| 15009 | // VMOVRRD(extract(insert_vector(insert_vector(.., a, l1), b, l2))) -> a,b |
| 15010 | if (InDouble.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15011 | isa<ConstantSDNode>(Val: InDouble.getOperand(i: 1))) { |
| 15012 | SDValue BV = InDouble.getOperand(i: 0); |
| 15013 | // Look up through any nop bitcasts and vector_reg_casts. bitcasts may |
| 15014 | // change lane order under big endian. |
| 15015 | bool BVSwap = BV.getOpcode() == ISD::BITCAST; |
| 15016 | while ( |
| 15017 | (BV.getOpcode() == ISD::BITCAST || |
| 15018 | BV.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
| 15019 | (BV.getValueType() == MVT::v2f64 || BV.getValueType() == MVT::v2i64)) { |
| 15020 | BVSwap = BV.getOpcode() == ISD::BITCAST; |
| 15021 | BV = BV.getOperand(i: 0); |
| 15022 | } |
| 15023 | if (BV.getValueType() != MVT::v4i32) |
| 15024 | return SDValue(); |
| 15025 | |
| 15026 | // Handle buildvectors, pulling out the correct lane depending on |
| 15027 | // endianness. |
| 15028 | unsigned Offset = InDouble.getConstantOperandVal(i: 1) == 1 ? 2 : 0; |
| 15029 | if (BV.getOpcode() == ISD::BUILD_VECTOR) { |
| 15030 | SDValue Op0 = BV.getOperand(i: Offset); |
| 15031 | SDValue Op1 = BV.getOperand(i: Offset + 1); |
| 15032 | if (!Subtarget->isLittle() && BVSwap) |
| 15033 | std::swap(a&: Op0, b&: Op1); |
| 15034 | |
| 15035 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
| 15036 | } |
| 15037 | |
| 15038 | // A chain of insert_vectors, grabbing the correct value of the chain of |
| 15039 | // inserts. |
| 15040 | SDValue Op0, Op1; |
| 15041 | while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) { |
| 15042 | if (isa<ConstantSDNode>(Val: BV.getOperand(i: 2))) { |
| 15043 | if (BV.getConstantOperandVal(i: 2) == Offset && !Op0) |
| 15044 | Op0 = BV.getOperand(i: 1); |
| 15045 | if (BV.getConstantOperandVal(i: 2) == Offset + 1 && !Op1) |
| 15046 | Op1 = BV.getOperand(i: 1); |
| 15047 | } |
| 15048 | BV = BV.getOperand(i: 0); |
| 15049 | } |
| 15050 | if (!Subtarget->isLittle() && BVSwap) |
| 15051 | std::swap(a&: Op0, b&: Op1); |
| 15052 | if (Op0 && Op1) |
| 15053 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
| 15054 | } |
| 15055 | |
| 15056 | return SDValue(); |
| 15057 | } |
| 15058 | |
| 15059 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for |
| 15060 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. |
| 15061 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { |
| 15062 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) |
| 15063 | SDValue Op0 = N->getOperand(Num: 0); |
| 15064 | SDValue Op1 = N->getOperand(Num: 1); |
| 15065 | if (Op0.getOpcode() == ISD::BITCAST) |
| 15066 | Op0 = Op0.getOperand(i: 0); |
| 15067 | if (Op1.getOpcode() == ISD::BITCAST) |
| 15068 | Op1 = Op1.getOperand(i: 0); |
| 15069 | if (Op0.getOpcode() == ARMISD::VMOVRRD && |
| 15070 | Op0.getNode() == Op1.getNode() && |
| 15071 | Op0.getResNo() == 0 && Op1.getResNo() == 1) |
| 15072 | return DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), |
| 15073 | VT: N->getValueType(ResNo: 0), Operand: Op0.getOperand(i: 0)); |
| 15074 | return SDValue(); |
| 15075 | } |
| 15076 | |
| 15077 | static SDValue PerformVMOVhrCombine(SDNode *N, |
| 15078 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15079 | SDValue Op0 = N->getOperand(Num: 0); |
| 15080 | |
| 15081 | // VMOVhr (VMOVrh (X)) -> X |
| 15082 | if (Op0->getOpcode() == ARMISD::VMOVrh) |
| 15083 | return Op0->getOperand(Num: 0); |
| 15084 | |
| 15085 | // FullFP16: half values are passed in S-registers, and we don't |
| 15086 | // need any of the bitcast and moves: |
| 15087 | // |
| 15088 | // t2: f32,ch1,gl1? = CopyFromReg ch, Register:f32 %0, gl? |
| 15089 | // t5: i32 = bitcast t2 |
| 15090 | // t18: f16 = ARMISD::VMOVhr t5 |
| 15091 | // => |
| 15092 | // tN: f16,ch2,gl2? = CopyFromReg ch, Register::f32 %0, gl? |
| 15093 | if (Op0->getOpcode() == ISD::BITCAST) { |
| 15094 | SDValue Copy = Op0->getOperand(Num: 0); |
| 15095 | if (Copy.getValueType() == MVT::f32 && |
| 15096 | Copy->getOpcode() == ISD::CopyFromReg) { |
| 15097 | bool HasGlue = Copy->getNumOperands() == 3; |
| 15098 | SDValue Ops[] = {Copy->getOperand(Num: 0), Copy->getOperand(Num: 1), |
| 15099 | HasGlue ? Copy->getOperand(Num: 2) : SDValue()}; |
| 15100 | EVT OutTys[] = {N->getValueType(ResNo: 0), MVT::Other, MVT::Glue}; |
| 15101 | SDValue NewCopy = |
| 15102 | DCI.DAG.getNode(Opcode: ISD::CopyFromReg, DL: SDLoc(N), |
| 15103 | VTList: DCI.DAG.getVTList(VTs: ArrayRef(OutTys, HasGlue ? 3 : 2)), |
| 15104 | Ops: ArrayRef(Ops, HasGlue ? 3 : 2)); |
| 15105 | |
| 15106 | // Update Users, Chains, and Potential Glue. |
| 15107 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: NewCopy.getValue(R: 0)); |
| 15108 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 1), To: NewCopy.getValue(R: 1)); |
| 15109 | if (HasGlue) |
| 15110 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 2), |
| 15111 | To: NewCopy.getValue(R: 2)); |
| 15112 | |
| 15113 | return NewCopy; |
| 15114 | } |
| 15115 | } |
| 15116 | |
| 15117 | // fold (VMOVhr (load x)) -> (load (f16*)x) |
| 15118 | if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Val&: Op0)) { |
| 15119 | if (LN0->hasOneUse() && LN0->isUnindexed() && |
| 15120 | LN0->getMemoryVT() == MVT::i16) { |
| 15121 | SDValue Load = |
| 15122 | DCI.DAG.getLoad(VT: N->getValueType(ResNo: 0), dl: SDLoc(N), Chain: LN0->getChain(), |
| 15123 | Ptr: LN0->getBasePtr(), MMO: LN0->getMemOperand()); |
| 15124 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
| 15125 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Op0.getValue(R: 1), To: Load.getValue(R: 1)); |
| 15126 | return Load; |
| 15127 | } |
| 15128 | } |
| 15129 | |
| 15130 | // Only the bottom 16 bits of the source register are used. |
| 15131 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
| 15132 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15133 | if (TLI.SimplifyDemandedBits(Op: Op0, DemandedBits: DemandedMask, DCI)) |
| 15134 | return SDValue(N, 0); |
| 15135 | |
| 15136 | return SDValue(); |
| 15137 | } |
| 15138 | |
| 15139 | static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) { |
| 15140 | SDValue N0 = N->getOperand(Num: 0); |
| 15141 | EVT VT = N->getValueType(ResNo: 0); |
| 15142 | |
| 15143 | // fold (VMOVrh (fpconst x)) -> const x |
| 15144 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val&: N0)) { |
| 15145 | APFloat V = C->getValueAPF(); |
| 15146 | return DAG.getConstant(Val: V.bitcastToAPInt().getZExtValue(), DL: SDLoc(N), VT); |
| 15147 | } |
| 15148 | |
| 15149 | // fold (VMOVrh (load x)) -> (zextload (i16*)x) |
| 15150 | if (ISD::isNormalLoad(N: N0.getNode()) && N0.hasOneUse()) { |
| 15151 | LoadSDNode *LN0 = cast<LoadSDNode>(Val&: N0); |
| 15152 | |
| 15153 | SDValue Load = |
| 15154 | DAG.getExtLoad(ExtType: ISD::ZEXTLOAD, dl: SDLoc(N), VT, Chain: LN0->getChain(), |
| 15155 | Ptr: LN0->getBasePtr(), MemVT: MVT::i16, MMO: LN0->getMemOperand()); |
| 15156 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
| 15157 | DAG.ReplaceAllUsesOfValueWith(From: N0.getValue(R: 1), To: Load.getValue(R: 1)); |
| 15158 | return Load; |
| 15159 | } |
| 15160 | |
| 15161 | // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) |
| 15162 | if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15163 | isa<ConstantSDNode>(Val: N0->getOperand(Num: 1))) |
| 15164 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: SDLoc(N), VT, N1: N0->getOperand(Num: 0), |
| 15165 | N2: N0->getOperand(Num: 1)); |
| 15166 | |
| 15167 | return SDValue(); |
| 15168 | } |
| 15169 | |
| 15170 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node |
| 15171 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an |
| 15172 | /// i64 vector to have f64 elements, since the value can then be loaded |
| 15173 | /// directly into a VFP register. |
| 15174 | static bool hasNormalLoadOperand(SDNode *N) { |
| 15175 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 15176 | for (unsigned i = 0; i < NumElts; ++i) { |
| 15177 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
| 15178 | if (ISD::isNormalLoad(N: Elt) && !cast<LoadSDNode>(Val: Elt)->isVolatile()) |
| 15179 | return true; |
| 15180 | } |
| 15181 | return false; |
| 15182 | } |
| 15183 | |
| 15184 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for |
| 15185 | /// ISD::BUILD_VECTOR. |
| 15186 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, |
| 15187 | TargetLowering::DAGCombinerInfo &DCI, |
| 15188 | const ARMSubtarget *Subtarget) { |
| 15189 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): |
| 15190 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value |
| 15191 | // into a pair of GPRs, which is fine when the value is used as a scalar, |
| 15192 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. |
| 15193 | SelectionDAG &DAG = DCI.DAG; |
| 15194 | if (N->getNumOperands() == 2) |
| 15195 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) |
| 15196 | return RV; |
| 15197 | |
| 15198 | // Load i64 elements as f64 values so that type legalization does not split |
| 15199 | // them up into i32 values. |
| 15200 | EVT VT = N->getValueType(ResNo: 0); |
| 15201 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) |
| 15202 | return SDValue(); |
| 15203 | SDLoc dl(N); |
| 15204 | SmallVector<SDValue, 8> Ops; |
| 15205 | unsigned NumElts = VT.getVectorNumElements(); |
| 15206 | for (unsigned i = 0; i < NumElts; ++i) { |
| 15207 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: i)); |
| 15208 | Ops.push_back(Elt: V); |
| 15209 | // Make the DAGCombiner fold the bitcast. |
| 15210 | DCI.AddToWorklist(N: V.getNode()); |
| 15211 | } |
| 15212 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, NumElements: NumElts); |
| 15213 | SDValue BV = DAG.getBuildVector(VT: FloatVT, DL: dl, Ops); |
| 15214 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: BV); |
| 15215 | } |
| 15216 | |
| 15217 | /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. |
| 15218 | static SDValue |
| 15219 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15220 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. |
| 15221 | // At that time, we may have inserted bitcasts from integer to float. |
| 15222 | // If these bitcasts have survived DAGCombine, change the lowering of this |
| 15223 | // BUILD_VECTOR in something more vector friendly, i.e., that does not |
| 15224 | // force to use floating point types. |
| 15225 | |
| 15226 | // Make sure we can change the type of the vector. |
| 15227 | // This is possible iff: |
| 15228 | // 1. The vector is only used in a bitcast to a integer type. I.e., |
| 15229 | // 1.1. Vector is used only once. |
| 15230 | // 1.2. Use is a bit convert to an integer type. |
| 15231 | // 2. The size of its operands are 32-bits (64-bits are not legal). |
| 15232 | EVT VT = N->getValueType(ResNo: 0); |
| 15233 | EVT EltVT = VT.getVectorElementType(); |
| 15234 | |
| 15235 | // Check 1.1. and 2. |
| 15236 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) |
| 15237 | return SDValue(); |
| 15238 | |
| 15239 | // By construction, the input type must be float. |
| 15240 | assert(EltVT == MVT::f32 && "Unexpected type!" ); |
| 15241 | |
| 15242 | // Check 1.2. |
| 15243 | SDNode *Use = *N->user_begin(); |
| 15244 | if (Use->getOpcode() != ISD::BITCAST || |
| 15245 | Use->getValueType(ResNo: 0).isFloatingPoint()) |
| 15246 | return SDValue(); |
| 15247 | |
| 15248 | // Check profitability. |
| 15249 | // Model is, if more than half of the relevant operands are bitcast from |
| 15250 | // i32, turn the build_vector into a sequence of insert_vector_elt. |
| 15251 | // Relevant operands are everything that is not statically |
| 15252 | // (i.e., at compile time) bitcasted. |
| 15253 | unsigned NumOfBitCastedElts = 0; |
| 15254 | unsigned NumElts = VT.getVectorNumElements(); |
| 15255 | unsigned NumOfRelevantElts = NumElts; |
| 15256 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { |
| 15257 | SDValue Elt = N->getOperand(Num: Idx); |
| 15258 | if (Elt->getOpcode() == ISD::BITCAST) { |
| 15259 | // Assume only bit cast to i32 will go away. |
| 15260 | if (Elt->getOperand(Num: 0).getValueType() == MVT::i32) |
| 15261 | ++NumOfBitCastedElts; |
| 15262 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Val: Elt)) |
| 15263 | // Constants are statically casted, thus do not count them as |
| 15264 | // relevant operands. |
| 15265 | --NumOfRelevantElts; |
| 15266 | } |
| 15267 | |
| 15268 | // Check if more than half of the elements require a non-free bitcast. |
| 15269 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) |
| 15270 | return SDValue(); |
| 15271 | |
| 15272 | SelectionDAG &DAG = DCI.DAG; |
| 15273 | // Create the new vector type. |
| 15274 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: NumElts); |
| 15275 | // Check if the type is legal. |
| 15276 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15277 | if (!TLI.isTypeLegal(VT: VecVT)) |
| 15278 | return SDValue(); |
| 15279 | |
| 15280 | // Combine: |
| 15281 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. |
| 15282 | // => BITCAST INSERT_VECTOR_ELT |
| 15283 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), |
| 15284 | // (BITCAST EN), N. |
| 15285 | SDValue Vec = DAG.getUNDEF(VT: VecVT); |
| 15286 | SDLoc dl(N); |
| 15287 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { |
| 15288 | SDValue V = N->getOperand(Num: Idx); |
| 15289 | if (V.isUndef()) |
| 15290 | continue; |
| 15291 | if (V.getOpcode() == ISD::BITCAST && |
| 15292 | V->getOperand(Num: 0).getValueType() == MVT::i32) |
| 15293 | // Fold obvious case. |
| 15294 | V = V.getOperand(i: 0); |
| 15295 | else { |
| 15296 | V = DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(V), VT: MVT::i32, Operand: V); |
| 15297 | // Make the DAGCombiner fold the bitcasts. |
| 15298 | DCI.AddToWorklist(N: V.getNode()); |
| 15299 | } |
| 15300 | SDValue LaneIdx = DAG.getConstant(Val: Idx, DL: dl, VT: MVT::i32); |
| 15301 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: VecVT, N1: Vec, N2: V, N3: LaneIdx); |
| 15302 | } |
| 15303 | Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vec); |
| 15304 | // Make the DAGCombiner fold the bitcasts. |
| 15305 | DCI.AddToWorklist(N: Vec.getNode()); |
| 15306 | return Vec; |
| 15307 | } |
| 15308 | |
| 15309 | static SDValue |
| 15310 | PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15311 | EVT VT = N->getValueType(ResNo: 0); |
| 15312 | SDValue Op = N->getOperand(Num: 0); |
| 15313 | SDLoc dl(N); |
| 15314 | |
| 15315 | // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) |
| 15316 | if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { |
| 15317 | // If the valuetypes are the same, we can remove the cast entirely. |
| 15318 | if (Op->getOperand(Num: 0).getValueType() == VT) |
| 15319 | return Op->getOperand(Num: 0); |
| 15320 | return DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15321 | } |
| 15322 | |
| 15323 | // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce |
| 15324 | // more VPNOT which might get folded as else predicates. |
| 15325 | if (Op.getValueType() == MVT::i32 && isBitwiseNot(V: Op)) { |
| 15326 | SDValue X = |
| 15327 | DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15328 | SDValue C = DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
| 15329 | Operand: DCI.DAG.getConstant(Val: 65535, DL: dl, VT: MVT::i32)); |
| 15330 | return DCI.DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: X, N2: C); |
| 15331 | } |
| 15332 | |
| 15333 | // Only the bottom 16 bits of the source register are used. |
| 15334 | if (Op.getValueType() == MVT::i32) { |
| 15335 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
| 15336 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15337 | if (TLI.SimplifyDemandedBits(Op, DemandedBits: DemandedMask, DCI)) |
| 15338 | return SDValue(N, 0); |
| 15339 | } |
| 15340 | return SDValue(); |
| 15341 | } |
| 15342 | |
| 15343 | static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG, |
| 15344 | const ARMSubtarget *ST) { |
| 15345 | EVT VT = N->getValueType(ResNo: 0); |
| 15346 | SDValue Op = N->getOperand(Num: 0); |
| 15347 | SDLoc dl(N); |
| 15348 | |
| 15349 | // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST |
| 15350 | if (ST->isLittle()) |
| 15351 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Op); |
| 15352 | |
| 15353 | // VT VECTOR_REG_CAST (VT Op) -> Op |
| 15354 | if (Op.getValueType() == VT) |
| 15355 | return Op; |
| 15356 | // VECTOR_REG_CAST undef -> undef |
| 15357 | if (Op.isUndef()) |
| 15358 | return DAG.getUNDEF(VT); |
| 15359 | |
| 15360 | // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) |
| 15361 | if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { |
| 15362 | // If the valuetypes are the same, we can remove the cast entirely. |
| 15363 | if (Op->getOperand(Num: 0).getValueType() == VT) |
| 15364 | return Op->getOperand(Num: 0); |
| 15365 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
| 15366 | } |
| 15367 | |
| 15368 | return SDValue(); |
| 15369 | } |
| 15370 | |
| 15371 | static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG, |
| 15372 | const ARMSubtarget *Subtarget) { |
| 15373 | if (!Subtarget->hasMVEIntegerOps()) |
| 15374 | return SDValue(); |
| 15375 | |
| 15376 | EVT VT = N->getValueType(ResNo: 0); |
| 15377 | SDValue Op0 = N->getOperand(Num: 0); |
| 15378 | SDValue Op1 = N->getOperand(Num: 1); |
| 15379 | ARMCC::CondCodes Cond = (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
| 15380 | SDLoc dl(N); |
| 15381 | |
| 15382 | // vcmp X, 0, cc -> vcmpz X, cc |
| 15383 | if (isZeroVector(N: Op1)) |
| 15384 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op0, N2: N->getOperand(Num: 2)); |
| 15385 | |
| 15386 | unsigned SwappedCond = getSwappedCondition(CC: Cond); |
| 15387 | if (isValidMVECond(CC: SwappedCond, IsFloat: VT.isFloatingPoint())) { |
| 15388 | // vcmp 0, X, cc -> vcmpz X, reversed(cc) |
| 15389 | if (isZeroVector(N: Op0)) |
| 15390 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op1, |
| 15391 | N2: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
| 15392 | // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) |
| 15393 | if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) |
| 15394 | return DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT, N1: Op1, N2: Op0, |
| 15395 | N3: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
| 15396 | } |
| 15397 | |
| 15398 | return SDValue(); |
| 15399 | } |
| 15400 | |
| 15401 | /// PerformInsertEltCombine - Target-specific dag combine xforms for |
| 15402 | /// ISD::INSERT_VECTOR_ELT. |
| 15403 | static SDValue PerformInsertEltCombine(SDNode *N, |
| 15404 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15405 | // Bitcast an i64 load inserted into a vector to f64. |
| 15406 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 15407 | EVT VT = N->getValueType(ResNo: 0); |
| 15408 | SDNode *Elt = N->getOperand(Num: 1).getNode(); |
| 15409 | if (VT.getVectorElementType() != MVT::i64 || |
| 15410 | !ISD::isNormalLoad(N: Elt) || cast<LoadSDNode>(Val: Elt)->isVolatile()) |
| 15411 | return SDValue(); |
| 15412 | |
| 15413 | SelectionDAG &DAG = DCI.DAG; |
| 15414 | SDLoc dl(N); |
| 15415 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
| 15416 | NumElements: VT.getVectorNumElements()); |
| 15417 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: N->getOperand(Num: 0)); |
| 15418 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: 1)); |
| 15419 | // Make the DAGCombiner fold the bitcasts. |
| 15420 | DCI.AddToWorklist(N: Vec.getNode()); |
| 15421 | DCI.AddToWorklist(N: V.getNode()); |
| 15422 | SDValue InsElt = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: FloatVT, |
| 15423 | N1: Vec, N2: V, N3: N->getOperand(Num: 2)); |
| 15424 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: InsElt); |
| 15425 | } |
| 15426 | |
| 15427 | // Convert a pair of extracts from the same base vector to a VMOVRRD. Either |
| 15428 | // directly or bitcast to an integer if the original is a float vector. |
| 15429 | // extract(x, n); extract(x, n+1) -> VMOVRRD(extract v2f64 x, n/2) |
| 15430 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD(extract x, n/2) |
| 15431 | static SDValue |
| 15432 | (SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15433 | EVT VT = N->getValueType(ResNo: 0); |
| 15434 | SDLoc dl(N); |
| 15435 | |
| 15436 | if (!DCI.isAfterLegalizeDAG() || VT != MVT::i32 || |
| 15437 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: MVT::f64)) |
| 15438 | return SDValue(); |
| 15439 | |
| 15440 | SDValue Ext = SDValue(N, 0); |
| 15441 | if (Ext.getOpcode() == ISD::BITCAST && |
| 15442 | Ext.getOperand(i: 0).getValueType() == MVT::f32) |
| 15443 | Ext = Ext.getOperand(i: 0); |
| 15444 | if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 15445 | !isa<ConstantSDNode>(Val: Ext.getOperand(i: 1)) || |
| 15446 | Ext.getConstantOperandVal(i: 1) % 2 != 0) |
| 15447 | return SDValue(); |
| 15448 | if (Ext->hasOneUse() && (Ext->user_begin()->getOpcode() == ISD::SINT_TO_FP || |
| 15449 | Ext->user_begin()->getOpcode() == ISD::UINT_TO_FP)) |
| 15450 | return SDValue(); |
| 15451 | |
| 15452 | SDValue Op0 = Ext.getOperand(i: 0); |
| 15453 | EVT VecVT = Op0.getValueType(); |
| 15454 | unsigned ResNo = Op0.getResNo(); |
| 15455 | unsigned Lane = Ext.getConstantOperandVal(i: 1); |
| 15456 | if (VecVT.getVectorNumElements() != 4) |
| 15457 | return SDValue(); |
| 15458 | |
| 15459 | // Find another extract, of Lane + 1 |
| 15460 | auto OtherIt = find_if(Range: Op0->users(), P: [&](SDNode *V) { |
| 15461 | return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 15462 | isa<ConstantSDNode>(Val: V->getOperand(Num: 1)) && |
| 15463 | V->getConstantOperandVal(Num: 1) == Lane + 1 && |
| 15464 | V->getOperand(Num: 0).getResNo() == ResNo; |
| 15465 | }); |
| 15466 | if (OtherIt == Op0->users().end()) |
| 15467 | return SDValue(); |
| 15468 | |
| 15469 | // For float extracts, we need to be converting to a i32 for both vector |
| 15470 | // lanes. |
| 15471 | SDValue OtherExt(*OtherIt, 0); |
| 15472 | if (OtherExt.getValueType() != MVT::i32) { |
| 15473 | if (!OtherExt->hasOneUse() || |
| 15474 | OtherExt->user_begin()->getOpcode() != ISD::BITCAST || |
| 15475 | OtherExt->user_begin()->getValueType(ResNo: 0) != MVT::i32) |
| 15476 | return SDValue(); |
| 15477 | OtherExt = SDValue(*OtherExt->user_begin(), 0); |
| 15478 | } |
| 15479 | |
| 15480 | // Convert the type to a f64 and extract with a VMOVRRD. |
| 15481 | SDValue F64 = DCI.DAG.getNode( |
| 15482 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
| 15483 | N1: DCI.DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v2f64, Operand: Op0), |
| 15484 | N2: DCI.DAG.getConstant(Val: Ext.getConstantOperandVal(i: 1) / 2, DL: dl, VT: MVT::i32)); |
| 15485 | SDValue VMOVRRD = |
| 15486 | DCI.DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops: F64); |
| 15487 | |
| 15488 | DCI.CombineTo(N: OtherExt.getNode(), Res: SDValue(VMOVRRD.getNode(), 1)); |
| 15489 | return VMOVRRD; |
| 15490 | } |
| 15491 | |
| 15492 | static SDValue (SDNode *N, |
| 15493 | TargetLowering::DAGCombinerInfo &DCI, |
| 15494 | const ARMSubtarget *ST) { |
| 15495 | SDValue Op0 = N->getOperand(Num: 0); |
| 15496 | EVT VT = N->getValueType(ResNo: 0); |
| 15497 | SDLoc dl(N); |
| 15498 | |
| 15499 | // extract (vdup x) -> x |
| 15500 | if (Op0->getOpcode() == ARMISD::VDUP) { |
| 15501 | SDValue X = Op0->getOperand(Num: 0); |
| 15502 | if (VT == MVT::f16 && X.getValueType() == MVT::i32) |
| 15503 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT, Operand: X); |
| 15504 | if (VT == MVT::i32 && X.getValueType() == MVT::f16) |
| 15505 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT, Operand: X); |
| 15506 | if (VT == MVT::f32 && X.getValueType() == MVT::i32) |
| 15507 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: X); |
| 15508 | |
| 15509 | while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) |
| 15510 | X = X->getOperand(Num: 0); |
| 15511 | if (X.getValueType() == VT) |
| 15512 | return X; |
| 15513 | } |
| 15514 | |
| 15515 | // extract ARM_BUILD_VECTOR -> x |
| 15516 | if (Op0->getOpcode() == ARMISD::BUILD_VECTOR && |
| 15517 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
| 15518 | N->getConstantOperandVal(Num: 1) < Op0.getNumOperands()) { |
| 15519 | return Op0.getOperand(i: N->getConstantOperandVal(Num: 1)); |
| 15520 | } |
| 15521 | |
| 15522 | // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b |
| 15523 | if (Op0.getValueType() == MVT::v4i32 && |
| 15524 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
| 15525 | Op0.getOpcode() == ISD::BITCAST && |
| 15526 | Op0.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR && |
| 15527 | Op0.getOperand(i: 0).getValueType() == MVT::v2f64) { |
| 15528 | SDValue BV = Op0.getOperand(i: 0); |
| 15529 | unsigned Offset = N->getConstantOperandVal(Num: 1); |
| 15530 | SDValue MOV = BV.getOperand(i: Offset < 2 ? 0 : 1); |
| 15531 | if (MOV.getOpcode() == ARMISD::VMOVDRR) |
| 15532 | return MOV.getOperand(i: ST->isLittle() ? Offset % 2 : 1 - Offset % 2); |
| 15533 | } |
| 15534 | |
| 15535 | // extract x, n; extract x, n+1 -> VMOVRRD x |
| 15536 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
| 15537 | return R; |
| 15538 | |
| 15539 | // extract (MVETrunc(x)) -> extract x |
| 15540 | if (Op0->getOpcode() == ARMISD::MVETRUNC) { |
| 15541 | unsigned Idx = N->getConstantOperandVal(Num: 1); |
| 15542 | unsigned Vec = |
| 15543 | Idx / Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
| 15544 | unsigned SubIdx = |
| 15545 | Idx % Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
| 15546 | return DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT, N1: Op0.getOperand(i: Vec), |
| 15547 | N2: DCI.DAG.getConstant(Val: SubIdx, DL: dl, VT: MVT::i32)); |
| 15548 | } |
| 15549 | |
| 15550 | return SDValue(); |
| 15551 | } |
| 15552 | |
| 15553 | static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) { |
| 15554 | SDValue Op = N->getOperand(Num: 0); |
| 15555 | EVT VT = N->getValueType(ResNo: 0); |
| 15556 | |
| 15557 | // sext_inreg(VGETLANEu) -> VGETLANEs |
| 15558 | if (Op.getOpcode() == ARMISD::VGETLANEu && |
| 15559 | cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT() == |
| 15560 | Op.getOperand(i: 0).getValueType().getScalarType()) |
| 15561 | return DAG.getNode(Opcode: ARMISD::VGETLANEs, DL: SDLoc(N), VT, N1: Op.getOperand(i: 0), |
| 15562 | N2: Op.getOperand(i: 1)); |
| 15563 | |
| 15564 | return SDValue(); |
| 15565 | } |
| 15566 | |
| 15567 | static SDValue |
| 15568 | PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 15569 | SDValue Vec = N->getOperand(Num: 0); |
| 15570 | SDValue SubVec = N->getOperand(Num: 1); |
| 15571 | uint64_t IdxVal = N->getConstantOperandVal(Num: 2); |
| 15572 | EVT VecVT = Vec.getValueType(); |
| 15573 | EVT SubVT = SubVec.getValueType(); |
| 15574 | |
| 15575 | // Only do this for legal fixed vector types. |
| 15576 | if (!VecVT.isFixedLengthVector() || |
| 15577 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: VecVT) || |
| 15578 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: SubVT)) |
| 15579 | return SDValue(); |
| 15580 | |
| 15581 | // Ignore widening patterns. |
| 15582 | if (IdxVal == 0 && Vec.isUndef()) |
| 15583 | return SDValue(); |
| 15584 | |
| 15585 | // Subvector must be half the width and an "aligned" insertion. |
| 15586 | unsigned NumSubElts = SubVT.getVectorNumElements(); |
| 15587 | if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() || |
| 15588 | (IdxVal != 0 && IdxVal != NumSubElts)) |
| 15589 | return SDValue(); |
| 15590 | |
| 15591 | // Fold insert_subvector -> concat_vectors |
| 15592 | // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi)) |
| 15593 | // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub) |
| 15594 | SDLoc DL(N); |
| 15595 | SDValue Lo, Hi; |
| 15596 | if (IdxVal == 0) { |
| 15597 | Lo = SubVec; |
| 15598 | Hi = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
| 15599 | N2: DCI.DAG.getVectorIdxConstant(Val: NumSubElts, DL)); |
| 15600 | } else { |
| 15601 | Lo = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
| 15602 | N2: DCI.DAG.getVectorIdxConstant(Val: 0, DL)); |
| 15603 | Hi = SubVec; |
| 15604 | } |
| 15605 | return DCI.DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, N1: Lo, N2: Hi); |
| 15606 | } |
| 15607 | |
| 15608 | // shuffle(MVETrunc(x, y)) -> VMOVN(x, y) |
| 15609 | static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N, |
| 15610 | SelectionDAG &DAG) { |
| 15611 | SDValue Trunc = N->getOperand(Num: 0); |
| 15612 | EVT VT = Trunc.getValueType(); |
| 15613 | if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(Num: 1).isUndef()) |
| 15614 | return SDValue(); |
| 15615 | |
| 15616 | SDLoc DL(Trunc); |
| 15617 | if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: false)) |
| 15618 | return DAG.getNode( |
| 15619 | Opcode: ARMISD::VMOVN, DL, VT, |
| 15620 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
| 15621 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
| 15622 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 15623 | else if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: true)) |
| 15624 | return DAG.getNode( |
| 15625 | Opcode: ARMISD::VMOVN, DL, VT, |
| 15626 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
| 15627 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
| 15628 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 15629 | return SDValue(); |
| 15630 | } |
| 15631 | |
| 15632 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for |
| 15633 | /// ISD::VECTOR_SHUFFLE. |
| 15634 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { |
| 15635 | if (SDValue R = PerformShuffleVMOVNCombine(N: cast<ShuffleVectorSDNode>(Val: N), DAG)) |
| 15636 | return R; |
| 15637 | |
| 15638 | // The LLVM shufflevector instruction does not require the shuffle mask |
| 15639 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does |
| 15640 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the |
| 15641 | // operands do not match the mask length, they are extended by concatenating |
| 15642 | // them with undef vectors. That is probably the right thing for other |
| 15643 | // targets, but for NEON it is better to concatenate two double-register |
| 15644 | // size vector operands into a single quad-register size vector. Do that |
| 15645 | // transformation here: |
| 15646 | // shuffle(concat(v1, undef), concat(v2, undef)) -> |
| 15647 | // shuffle(concat(v1, v2), undef) |
| 15648 | SDValue Op0 = N->getOperand(Num: 0); |
| 15649 | SDValue Op1 = N->getOperand(Num: 1); |
| 15650 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || |
| 15651 | Op1.getOpcode() != ISD::CONCAT_VECTORS || |
| 15652 | Op0.getNumOperands() != 2 || |
| 15653 | Op1.getNumOperands() != 2) |
| 15654 | return SDValue(); |
| 15655 | SDValue Concat0Op1 = Op0.getOperand(i: 1); |
| 15656 | SDValue Concat1Op1 = Op1.getOperand(i: 1); |
| 15657 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) |
| 15658 | return SDValue(); |
| 15659 | // Skip the transformation if any of the types are illegal. |
| 15660 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15661 | EVT VT = N->getValueType(ResNo: 0); |
| 15662 | if (!TLI.isTypeLegal(VT) || |
| 15663 | !TLI.isTypeLegal(VT: Concat0Op1.getValueType()) || |
| 15664 | !TLI.isTypeLegal(VT: Concat1Op1.getValueType())) |
| 15665 | return SDValue(); |
| 15666 | |
| 15667 | SDValue NewConcat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT, |
| 15668 | N1: Op0.getOperand(i: 0), N2: Op1.getOperand(i: 0)); |
| 15669 | // Translate the shuffle mask. |
| 15670 | SmallVector<int, 16> NewMask; |
| 15671 | unsigned NumElts = VT.getVectorNumElements(); |
| 15672 | unsigned HalfElts = NumElts/2; |
| 15673 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: N); |
| 15674 | for (unsigned n = 0; n < NumElts; ++n) { |
| 15675 | int MaskElt = SVN->getMaskElt(Idx: n); |
| 15676 | int NewElt = -1; |
| 15677 | if (MaskElt < (int)HalfElts) |
| 15678 | NewElt = MaskElt; |
| 15679 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) |
| 15680 | NewElt = HalfElts + MaskElt - NumElts; |
| 15681 | NewMask.push_back(Elt: NewElt); |
| 15682 | } |
| 15683 | return DAG.getVectorShuffle(VT, dl: SDLoc(N), N1: NewConcat, |
| 15684 | N2: DAG.getUNDEF(VT), Mask: NewMask); |
| 15685 | } |
| 15686 | |
| 15687 | /// Load/store instruction that can be merged with a base address |
| 15688 | /// update |
| 15689 | struct BaseUpdateTarget { |
| 15690 | SDNode *N; |
| 15691 | bool isIntrinsic; |
| 15692 | bool isStore; |
| 15693 | unsigned AddrOpIdx; |
| 15694 | }; |
| 15695 | |
| 15696 | struct BaseUpdateUser { |
| 15697 | /// Instruction that updates a pointer |
| 15698 | SDNode *N; |
| 15699 | /// Pointer increment operand |
| 15700 | SDValue Inc; |
| 15701 | /// Pointer increment value if it is a constant, or 0 otherwise |
| 15702 | unsigned ConstInc; |
| 15703 | }; |
| 15704 | |
| 15705 | static bool isValidBaseUpdate(SDNode *N, SDNode *User) { |
| 15706 | // Check that the add is independent of the load/store. |
| 15707 | // Otherwise, folding it would create a cycle. Search through Addr |
| 15708 | // as well, since the User may not be a direct user of Addr and |
| 15709 | // only share a base pointer. |
| 15710 | SmallPtrSet<const SDNode *, 32> Visited; |
| 15711 | SmallVector<const SDNode *, 16> Worklist; |
| 15712 | Worklist.push_back(Elt: N); |
| 15713 | Worklist.push_back(Elt: User); |
| 15714 | const unsigned MaxSteps = 1024; |
| 15715 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps) || |
| 15716 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist, MaxSteps)) |
| 15717 | return false; |
| 15718 | return true; |
| 15719 | } |
| 15720 | |
| 15721 | static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target, |
| 15722 | struct BaseUpdateUser &User, |
| 15723 | bool SimpleConstIncOnly, |
| 15724 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15725 | SelectionDAG &DAG = DCI.DAG; |
| 15726 | SDNode *N = Target.N; |
| 15727 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
| 15728 | SDLoc dl(N); |
| 15729 | |
| 15730 | // Find the new opcode for the updating load/store. |
| 15731 | bool isLoadOp = true; |
| 15732 | bool isLaneOp = false; |
| 15733 | // Workaround for vst1x and vld1x intrinsics which do not have alignment |
| 15734 | // as an operand. |
| 15735 | bool hasAlignment = true; |
| 15736 | unsigned NewOpc = 0; |
| 15737 | unsigned NumVecs = 0; |
| 15738 | if (Target.isIntrinsic) { |
| 15739 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
| 15740 | switch (IntNo) { |
| 15741 | default: |
| 15742 | llvm_unreachable("unexpected intrinsic for Neon base update" ); |
| 15743 | case Intrinsic::arm_neon_vld1: |
| 15744 | NewOpc = ARMISD::VLD1_UPD; |
| 15745 | NumVecs = 1; |
| 15746 | break; |
| 15747 | case Intrinsic::arm_neon_vld2: |
| 15748 | NewOpc = ARMISD::VLD2_UPD; |
| 15749 | NumVecs = 2; |
| 15750 | break; |
| 15751 | case Intrinsic::arm_neon_vld3: |
| 15752 | NewOpc = ARMISD::VLD3_UPD; |
| 15753 | NumVecs = 3; |
| 15754 | break; |
| 15755 | case Intrinsic::arm_neon_vld4: |
| 15756 | NewOpc = ARMISD::VLD4_UPD; |
| 15757 | NumVecs = 4; |
| 15758 | break; |
| 15759 | case Intrinsic::arm_neon_vld1x2: |
| 15760 | NewOpc = ARMISD::VLD1x2_UPD; |
| 15761 | NumVecs = 2; |
| 15762 | hasAlignment = false; |
| 15763 | break; |
| 15764 | case Intrinsic::arm_neon_vld1x3: |
| 15765 | NewOpc = ARMISD::VLD1x3_UPD; |
| 15766 | NumVecs = 3; |
| 15767 | hasAlignment = false; |
| 15768 | break; |
| 15769 | case Intrinsic::arm_neon_vld1x4: |
| 15770 | NewOpc = ARMISD::VLD1x4_UPD; |
| 15771 | NumVecs = 4; |
| 15772 | hasAlignment = false; |
| 15773 | break; |
| 15774 | case Intrinsic::arm_neon_vld2dup: |
| 15775 | NewOpc = ARMISD::VLD2DUP_UPD; |
| 15776 | NumVecs = 2; |
| 15777 | break; |
| 15778 | case Intrinsic::arm_neon_vld3dup: |
| 15779 | NewOpc = ARMISD::VLD3DUP_UPD; |
| 15780 | NumVecs = 3; |
| 15781 | break; |
| 15782 | case Intrinsic::arm_neon_vld4dup: |
| 15783 | NewOpc = ARMISD::VLD4DUP_UPD; |
| 15784 | NumVecs = 4; |
| 15785 | break; |
| 15786 | case Intrinsic::arm_neon_vld2lane: |
| 15787 | NewOpc = ARMISD::VLD2LN_UPD; |
| 15788 | NumVecs = 2; |
| 15789 | isLaneOp = true; |
| 15790 | break; |
| 15791 | case Intrinsic::arm_neon_vld3lane: |
| 15792 | NewOpc = ARMISD::VLD3LN_UPD; |
| 15793 | NumVecs = 3; |
| 15794 | isLaneOp = true; |
| 15795 | break; |
| 15796 | case Intrinsic::arm_neon_vld4lane: |
| 15797 | NewOpc = ARMISD::VLD4LN_UPD; |
| 15798 | NumVecs = 4; |
| 15799 | isLaneOp = true; |
| 15800 | break; |
| 15801 | case Intrinsic::arm_neon_vst1: |
| 15802 | NewOpc = ARMISD::VST1_UPD; |
| 15803 | NumVecs = 1; |
| 15804 | isLoadOp = false; |
| 15805 | break; |
| 15806 | case Intrinsic::arm_neon_vst2: |
| 15807 | NewOpc = ARMISD::VST2_UPD; |
| 15808 | NumVecs = 2; |
| 15809 | isLoadOp = false; |
| 15810 | break; |
| 15811 | case Intrinsic::arm_neon_vst3: |
| 15812 | NewOpc = ARMISD::VST3_UPD; |
| 15813 | NumVecs = 3; |
| 15814 | isLoadOp = false; |
| 15815 | break; |
| 15816 | case Intrinsic::arm_neon_vst4: |
| 15817 | NewOpc = ARMISD::VST4_UPD; |
| 15818 | NumVecs = 4; |
| 15819 | isLoadOp = false; |
| 15820 | break; |
| 15821 | case Intrinsic::arm_neon_vst2lane: |
| 15822 | NewOpc = ARMISD::VST2LN_UPD; |
| 15823 | NumVecs = 2; |
| 15824 | isLoadOp = false; |
| 15825 | isLaneOp = true; |
| 15826 | break; |
| 15827 | case Intrinsic::arm_neon_vst3lane: |
| 15828 | NewOpc = ARMISD::VST3LN_UPD; |
| 15829 | NumVecs = 3; |
| 15830 | isLoadOp = false; |
| 15831 | isLaneOp = true; |
| 15832 | break; |
| 15833 | case Intrinsic::arm_neon_vst4lane: |
| 15834 | NewOpc = ARMISD::VST4LN_UPD; |
| 15835 | NumVecs = 4; |
| 15836 | isLoadOp = false; |
| 15837 | isLaneOp = true; |
| 15838 | break; |
| 15839 | case Intrinsic::arm_neon_vst1x2: |
| 15840 | NewOpc = ARMISD::VST1x2_UPD; |
| 15841 | NumVecs = 2; |
| 15842 | isLoadOp = false; |
| 15843 | hasAlignment = false; |
| 15844 | break; |
| 15845 | case Intrinsic::arm_neon_vst1x3: |
| 15846 | NewOpc = ARMISD::VST1x3_UPD; |
| 15847 | NumVecs = 3; |
| 15848 | isLoadOp = false; |
| 15849 | hasAlignment = false; |
| 15850 | break; |
| 15851 | case Intrinsic::arm_neon_vst1x4: |
| 15852 | NewOpc = ARMISD::VST1x4_UPD; |
| 15853 | NumVecs = 4; |
| 15854 | isLoadOp = false; |
| 15855 | hasAlignment = false; |
| 15856 | break; |
| 15857 | } |
| 15858 | } else { |
| 15859 | isLaneOp = true; |
| 15860 | switch (N->getOpcode()) { |
| 15861 | default: |
| 15862 | llvm_unreachable("unexpected opcode for Neon base update" ); |
| 15863 | case ARMISD::VLD1DUP: |
| 15864 | NewOpc = ARMISD::VLD1DUP_UPD; |
| 15865 | NumVecs = 1; |
| 15866 | break; |
| 15867 | case ARMISD::VLD2DUP: |
| 15868 | NewOpc = ARMISD::VLD2DUP_UPD; |
| 15869 | NumVecs = 2; |
| 15870 | break; |
| 15871 | case ARMISD::VLD3DUP: |
| 15872 | NewOpc = ARMISD::VLD3DUP_UPD; |
| 15873 | NumVecs = 3; |
| 15874 | break; |
| 15875 | case ARMISD::VLD4DUP: |
| 15876 | NewOpc = ARMISD::VLD4DUP_UPD; |
| 15877 | NumVecs = 4; |
| 15878 | break; |
| 15879 | case ISD::LOAD: |
| 15880 | NewOpc = ARMISD::VLD1_UPD; |
| 15881 | NumVecs = 1; |
| 15882 | isLaneOp = false; |
| 15883 | break; |
| 15884 | case ISD::STORE: |
| 15885 | NewOpc = ARMISD::VST1_UPD; |
| 15886 | NumVecs = 1; |
| 15887 | isLaneOp = false; |
| 15888 | isLoadOp = false; |
| 15889 | break; |
| 15890 | } |
| 15891 | } |
| 15892 | |
| 15893 | // Find the size of memory referenced by the load/store. |
| 15894 | EVT VecTy; |
| 15895 | if (isLoadOp) { |
| 15896 | VecTy = N->getValueType(ResNo: 0); |
| 15897 | } else if (Target.isIntrinsic) { |
| 15898 | VecTy = N->getOperand(Num: Target.AddrOpIdx + 1).getValueType(); |
| 15899 | } else { |
| 15900 | assert(Target.isStore && |
| 15901 | "Node has to be a load, a store, or an intrinsic!" ); |
| 15902 | VecTy = N->getOperand(Num: 1).getValueType(); |
| 15903 | } |
| 15904 | |
| 15905 | bool isVLDDUPOp = |
| 15906 | NewOpc == ARMISD::VLD1DUP_UPD || NewOpc == ARMISD::VLD2DUP_UPD || |
| 15907 | NewOpc == ARMISD::VLD3DUP_UPD || NewOpc == ARMISD::VLD4DUP_UPD; |
| 15908 | |
| 15909 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 15910 | if (isLaneOp || isVLDDUPOp) |
| 15911 | NumBytes /= VecTy.getVectorNumElements(); |
| 15912 | |
| 15913 | if (NumBytes >= 3 * 16 && User.ConstInc != NumBytes) { |
| 15914 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two |
| 15915 | // separate instructions that make it harder to use a non-constant update. |
| 15916 | return false; |
| 15917 | } |
| 15918 | |
| 15919 | if (SimpleConstIncOnly && User.ConstInc != NumBytes) |
| 15920 | return false; |
| 15921 | |
| 15922 | if (!isValidBaseUpdate(N, User: User.N)) |
| 15923 | return false; |
| 15924 | |
| 15925 | // OK, we found an ADD we can fold into the base update. |
| 15926 | // Now, create a _UPD node, taking care of not breaking alignment. |
| 15927 | |
| 15928 | EVT AlignedVecTy = VecTy; |
| 15929 | Align Alignment = MemN->getAlign(); |
| 15930 | |
| 15931 | // If this is a less-than-standard-aligned load/store, change the type to |
| 15932 | // match the standard alignment. |
| 15933 | // The alignment is overlooked when selecting _UPD variants; and it's |
| 15934 | // easier to introduce bitcasts here than fix that. |
| 15935 | // There are 3 ways to get to this base-update combine: |
| 15936 | // - intrinsics: they are assumed to be properly aligned (to the standard |
| 15937 | // alignment of the memory type), so we don't need to do anything. |
| 15938 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned |
| 15939 | // intrinsics, so, likewise, there's nothing to do. |
| 15940 | // - generic load/store instructions: the alignment is specified as an |
| 15941 | // explicit operand, rather than implicitly as the standard alignment |
| 15942 | // of the memory type (like the intrisics). We need to change the |
| 15943 | // memory type to match the explicit alignment. That way, we don't |
| 15944 | // generate non-standard-aligned ARMISD::VLDx nodes. |
| 15945 | if (isa<LSBaseSDNode>(Val: N)) { |
| 15946 | if (Alignment.value() < VecTy.getScalarSizeInBits() / 8) { |
| 15947 | MVT EltTy = MVT::getIntegerVT(BitWidth: Alignment.value() * 8); |
| 15948 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store." ); |
| 15949 | assert(!isLaneOp && "Unexpected generic load/store lane." ); |
| 15950 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); |
| 15951 | AlignedVecTy = MVT::getVectorVT(VT: EltTy, NumElements: NumElts); |
| 15952 | } |
| 15953 | // Don't set an explicit alignment on regular load/stores that we want |
| 15954 | // to transform to VLD/VST 1_UPD nodes. |
| 15955 | // This matches the behavior of regular load/stores, which only get an |
| 15956 | // explicit alignment if the MMO alignment is larger than the standard |
| 15957 | // alignment of the memory type. |
| 15958 | // Intrinsics, however, always get an explicit alignment, set to the |
| 15959 | // alignment of the MMO. |
| 15960 | Alignment = Align(1); |
| 15961 | } |
| 15962 | |
| 15963 | // Create the new updating load/store node. |
| 15964 | // First, create an SDVTList for the new updating node's results. |
| 15965 | EVT Tys[6]; |
| 15966 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 15967 | unsigned n; |
| 15968 | for (n = 0; n < NumResultVecs; ++n) |
| 15969 | Tys[n] = AlignedVecTy; |
| 15970 | Tys[n++] = MVT::i32; |
| 15971 | Tys[n] = MVT::Other; |
| 15972 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
| 15973 | |
| 15974 | // Then, gather the new node's operands. |
| 15975 | SmallVector<SDValue, 8> Ops; |
| 15976 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
| 15977 | Ops.push_back(Elt: N->getOperand(Num: Target.AddrOpIdx)); |
| 15978 | Ops.push_back(Elt: User.Inc); |
| 15979 | |
| 15980 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(Val: N)) { |
| 15981 | // Try to match the intrinsic's signature |
| 15982 | Ops.push_back(Elt: StN->getValue()); |
| 15983 | } else { |
| 15984 | // Loads (and of course intrinsics) match the intrinsics' signature, |
| 15985 | // so just add all but the alignment operand. |
| 15986 | unsigned LastOperand = |
| 15987 | hasAlignment ? N->getNumOperands() - 1 : N->getNumOperands(); |
| 15988 | for (unsigned i = Target.AddrOpIdx + 1; i < LastOperand; ++i) |
| 15989 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 15990 | } |
| 15991 | |
| 15992 | // For all node types, the alignment operand is always the last one. |
| 15993 | Ops.push_back(Elt: DAG.getConstant(Val: Alignment.value(), DL: dl, VT: MVT::i32)); |
| 15994 | |
| 15995 | // If this is a non-standard-aligned STORE, the penultimate operand is the |
| 15996 | // stored value. Bitcast it to the aligned type. |
| 15997 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { |
| 15998 | SDValue &StVal = Ops[Ops.size() - 2]; |
| 15999 | StVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: AlignedVecTy, Operand: StVal); |
| 16000 | } |
| 16001 | |
| 16002 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; |
| 16003 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: LoadVT, |
| 16004 | MMO: MemN->getMemOperand()); |
| 16005 | |
| 16006 | // Update the uses. |
| 16007 | SmallVector<SDValue, 5> NewResults; |
| 16008 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 16009 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
| 16010 | |
| 16011 | // If this is an non-standard-aligned LOAD, the first result is the loaded |
| 16012 | // value. Bitcast it to the expected result type. |
| 16013 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { |
| 16014 | SDValue &LdVal = NewResults[0]; |
| 16015 | LdVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecTy, Operand: LdVal); |
| 16016 | } |
| 16017 | |
| 16018 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
| 16019 | DCI.CombineTo(N, To: NewResults); |
| 16020 | DCI.CombineTo(N: User.N, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
| 16021 | |
| 16022 | return true; |
| 16023 | } |
| 16024 | |
| 16025 | // If (opcode ptr inc) is and ADD-like instruction, return the |
| 16026 | // increment value. Otherwise return 0. |
| 16027 | static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr, |
| 16028 | SDValue Inc, const SelectionDAG &DAG) { |
| 16029 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
| 16030 | if (!CInc) |
| 16031 | return 0; |
| 16032 | |
| 16033 | switch (Opcode) { |
| 16034 | case ARMISD::VLD1_UPD: |
| 16035 | case ISD::ADD: |
| 16036 | return CInc->getZExtValue(); |
| 16037 | case ISD::OR: { |
| 16038 | if (DAG.haveNoCommonBitsSet(A: Ptr, B: Inc)) { |
| 16039 | // (OR ptr inc) is the same as (ADD ptr inc) |
| 16040 | return CInc->getZExtValue(); |
| 16041 | } |
| 16042 | return 0; |
| 16043 | } |
| 16044 | default: |
| 16045 | return 0; |
| 16046 | } |
| 16047 | } |
| 16048 | |
| 16049 | static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) { |
| 16050 | switch (N->getOpcode()) { |
| 16051 | case ISD::ADD: |
| 16052 | case ISD::OR: { |
| 16053 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
| 16054 | *Ptr = N->getOperand(Num: 0); |
| 16055 | *CInc = N->getOperand(Num: 1); |
| 16056 | return true; |
| 16057 | } |
| 16058 | return false; |
| 16059 | } |
| 16060 | case ARMISD::VLD1_UPD: { |
| 16061 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
| 16062 | *Ptr = N->getOperand(Num: 1); |
| 16063 | *CInc = N->getOperand(Num: 2); |
| 16064 | return true; |
| 16065 | } |
| 16066 | return false; |
| 16067 | } |
| 16068 | default: |
| 16069 | return false; |
| 16070 | } |
| 16071 | } |
| 16072 | |
| 16073 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, |
| 16074 | /// NEON load/store intrinsics, and generic vector load/stores, to merge |
| 16075 | /// base address updates. |
| 16076 | /// For generic load/stores, the memory type is assumed to be a vector. |
| 16077 | /// The caller is assumed to have checked legality. |
| 16078 | static SDValue CombineBaseUpdate(SDNode *N, |
| 16079 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16080 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || |
| 16081 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
| 16082 | const bool isStore = N->getOpcode() == ISD::STORE; |
| 16083 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); |
| 16084 | BaseUpdateTarget Target = {.N: N, .isIntrinsic: isIntrinsic, .isStore: isStore, .AddrOpIdx: AddrOpIdx}; |
| 16085 | |
| 16086 | // Limit the number of possible base-updates we look at to prevent degenerate |
| 16087 | // cases. |
| 16088 | unsigned MaxBaseUpdates = ArmMaxBaseUpdatesToCheck; |
| 16089 | |
| 16090 | SDValue Addr = N->getOperand(Num: AddrOpIdx); |
| 16091 | |
| 16092 | SmallVector<BaseUpdateUser, 8> BaseUpdates; |
| 16093 | |
| 16094 | // Search for a use of the address operand that is an increment. |
| 16095 | for (SDUse &Use : Addr->uses()) { |
| 16096 | SDNode *User = Use.getUser(); |
| 16097 | if (Use.getResNo() != Addr.getResNo() || User->getNumOperands() != 2) |
| 16098 | continue; |
| 16099 | |
| 16100 | SDValue Inc = User->getOperand(Num: Use.getOperandNo() == 1 ? 0 : 1); |
| 16101 | unsigned ConstInc = |
| 16102 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Addr, Inc, DAG: DCI.DAG); |
| 16103 | |
| 16104 | if (ConstInc || User->getOpcode() == ISD::ADD) { |
| 16105 | BaseUpdates.push_back(Elt: {.N: User, .Inc: Inc, .ConstInc: ConstInc}); |
| 16106 | if (BaseUpdates.size() >= MaxBaseUpdates) |
| 16107 | break; |
| 16108 | } |
| 16109 | } |
| 16110 | |
| 16111 | // If the address is a constant pointer increment itself, find |
| 16112 | // another constant increment that has the same base operand |
| 16113 | SDValue Base; |
| 16114 | SDValue CInc; |
| 16115 | if (findPointerConstIncrement(N: Addr.getNode(), Ptr: &Base, CInc: &CInc)) { |
| 16116 | unsigned Offset = |
| 16117 | getPointerConstIncrement(Opcode: Addr->getOpcode(), Ptr: Base, Inc: CInc, DAG: DCI.DAG); |
| 16118 | for (SDUse &Use : Base->uses()) { |
| 16119 | |
| 16120 | SDNode *User = Use.getUser(); |
| 16121 | if (Use.getResNo() != Base.getResNo() || User == Addr.getNode() || |
| 16122 | User->getNumOperands() != 2) |
| 16123 | continue; |
| 16124 | |
| 16125 | SDValue UserInc = User->getOperand(Num: Use.getOperandNo() == 0 ? 1 : 0); |
| 16126 | unsigned UserOffset = |
| 16127 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Base, Inc: UserInc, DAG: DCI.DAG); |
| 16128 | |
| 16129 | if (!UserOffset || UserOffset <= Offset) |
| 16130 | continue; |
| 16131 | |
| 16132 | unsigned NewConstInc = UserOffset - Offset; |
| 16133 | SDValue NewInc = DCI.DAG.getConstant(Val: NewConstInc, DL: SDLoc(N), VT: MVT::i32); |
| 16134 | BaseUpdates.push_back(Elt: {.N: User, .Inc: NewInc, .ConstInc: NewConstInc}); |
| 16135 | if (BaseUpdates.size() >= MaxBaseUpdates) |
| 16136 | break; |
| 16137 | } |
| 16138 | } |
| 16139 | |
| 16140 | // Try to fold the load/store with an update that matches memory |
| 16141 | // access size. This should work well for sequential loads. |
| 16142 | unsigned NumValidUpd = BaseUpdates.size(); |
| 16143 | for (unsigned I = 0; I < NumValidUpd; I++) { |
| 16144 | BaseUpdateUser &User = BaseUpdates[I]; |
| 16145 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/true, DCI)) |
| 16146 | return SDValue(); |
| 16147 | } |
| 16148 | |
| 16149 | // Try to fold with other users. Non-constant updates are considered |
| 16150 | // first, and constant updates are sorted to not break a sequence of |
| 16151 | // strided accesses (if there is any). |
| 16152 | llvm::stable_sort(Range&: BaseUpdates, |
| 16153 | C: [](const BaseUpdateUser &LHS, const BaseUpdateUser &RHS) { |
| 16154 | return LHS.ConstInc < RHS.ConstInc; |
| 16155 | }); |
| 16156 | for (BaseUpdateUser &User : BaseUpdates) { |
| 16157 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/false, DCI)) |
| 16158 | return SDValue(); |
| 16159 | } |
| 16160 | return SDValue(); |
| 16161 | } |
| 16162 | |
| 16163 | static SDValue PerformVLDCombine(SDNode *N, |
| 16164 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16165 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 16166 | return SDValue(); |
| 16167 | |
| 16168 | return CombineBaseUpdate(N, DCI); |
| 16169 | } |
| 16170 | |
| 16171 | static SDValue PerformMVEVLDCombine(SDNode *N, |
| 16172 | TargetLowering::DAGCombinerInfo &DCI) { |
| 16173 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 16174 | return SDValue(); |
| 16175 | |
| 16176 | SelectionDAG &DAG = DCI.DAG; |
| 16177 | SDValue Addr = N->getOperand(Num: 2); |
| 16178 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
| 16179 | SDLoc dl(N); |
| 16180 | |
| 16181 | // For the stores, where there are multiple intrinsics we only actually want |
| 16182 | // to post-inc the last of the them. |
| 16183 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
| 16184 | if (IntNo == Intrinsic::arm_mve_vst2q && N->getConstantOperandVal(Num: 5) != 1) |
| 16185 | return SDValue(); |
| 16186 | if (IntNo == Intrinsic::arm_mve_vst4q && N->getConstantOperandVal(Num: 7) != 3) |
| 16187 | return SDValue(); |
| 16188 | |
| 16189 | // Search for a use of the address operand that is an increment. |
| 16190 | for (SDUse &Use : Addr->uses()) { |
| 16191 | SDNode *User = Use.getUser(); |
| 16192 | if (User->getOpcode() != ISD::ADD || Use.getResNo() != Addr.getResNo()) |
| 16193 | continue; |
| 16194 | |
| 16195 | // Check that the add is independent of the load/store. Otherwise, folding |
| 16196 | // it would create a cycle. We can avoid searching through Addr as it's a |
| 16197 | // predecessor to both. |
| 16198 | SmallPtrSet<const SDNode *, 32> Visited; |
| 16199 | SmallVector<const SDNode *, 16> Worklist; |
| 16200 | Visited.insert(Ptr: Addr.getNode()); |
| 16201 | Worklist.push_back(Elt: N); |
| 16202 | Worklist.push_back(Elt: User); |
| 16203 | const unsigned MaxSteps = 1024; |
| 16204 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps) || |
| 16205 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist, MaxSteps)) |
| 16206 | continue; |
| 16207 | |
| 16208 | // Find the new opcode for the updating load/store. |
| 16209 | bool isLoadOp = true; |
| 16210 | unsigned NewOpc = 0; |
| 16211 | unsigned NumVecs = 0; |
| 16212 | switch (IntNo) { |
| 16213 | default: |
| 16214 | llvm_unreachable("unexpected intrinsic for MVE VLDn combine" ); |
| 16215 | case Intrinsic::arm_mve_vld2q: |
| 16216 | NewOpc = ARMISD::VLD2_UPD; |
| 16217 | NumVecs = 2; |
| 16218 | break; |
| 16219 | case Intrinsic::arm_mve_vld4q: |
| 16220 | NewOpc = ARMISD::VLD4_UPD; |
| 16221 | NumVecs = 4; |
| 16222 | break; |
| 16223 | case Intrinsic::arm_mve_vst2q: |
| 16224 | NewOpc = ARMISD::VST2_UPD; |
| 16225 | NumVecs = 2; |
| 16226 | isLoadOp = false; |
| 16227 | break; |
| 16228 | case Intrinsic::arm_mve_vst4q: |
| 16229 | NewOpc = ARMISD::VST4_UPD; |
| 16230 | NumVecs = 4; |
| 16231 | isLoadOp = false; |
| 16232 | break; |
| 16233 | } |
| 16234 | |
| 16235 | // Find the size of memory referenced by the load/store. |
| 16236 | EVT VecTy; |
| 16237 | if (isLoadOp) { |
| 16238 | VecTy = N->getValueType(ResNo: 0); |
| 16239 | } else { |
| 16240 | VecTy = N->getOperand(Num: 3).getValueType(); |
| 16241 | } |
| 16242 | |
| 16243 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 16244 | |
| 16245 | // If the increment is a constant, it must match the memory ref size. |
| 16246 | SDValue Inc = User->getOperand(Num: User->getOperand(Num: 0) == Addr ? 1 : 0); |
| 16247 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
| 16248 | if (!CInc || CInc->getZExtValue() != NumBytes) |
| 16249 | continue; |
| 16250 | |
| 16251 | // Create the new updating load/store node. |
| 16252 | // First, create an SDVTList for the new updating node's results. |
| 16253 | EVT Tys[6]; |
| 16254 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 16255 | unsigned n; |
| 16256 | for (n = 0; n < NumResultVecs; ++n) |
| 16257 | Tys[n] = VecTy; |
| 16258 | Tys[n++] = MVT::i32; |
| 16259 | Tys[n] = MVT::Other; |
| 16260 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
| 16261 | |
| 16262 | // Then, gather the new node's operands. |
| 16263 | SmallVector<SDValue, 8> Ops; |
| 16264 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
| 16265 | Ops.push_back(Elt: N->getOperand(Num: 2)); // ptr |
| 16266 | Ops.push_back(Elt: Inc); |
| 16267 | |
| 16268 | for (unsigned i = 3; i < N->getNumOperands(); ++i) |
| 16269 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 16270 | |
| 16271 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: VecTy, |
| 16272 | MMO: MemN->getMemOperand()); |
| 16273 | |
| 16274 | // Update the uses. |
| 16275 | SmallVector<SDValue, 5> NewResults; |
| 16276 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 16277 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
| 16278 | |
| 16279 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
| 16280 | DCI.CombineTo(N, To: NewResults); |
| 16281 | DCI.CombineTo(N: User, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
| 16282 | |
| 16283 | break; |
| 16284 | } |
| 16285 | |
| 16286 | return SDValue(); |
| 16287 | } |
| 16288 | |
| 16289 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a |
| 16290 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic |
| 16291 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and |
| 16292 | /// return true. |
| 16293 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 16294 | SelectionDAG &DAG = DCI.DAG; |
| 16295 | EVT VT = N->getValueType(ResNo: 0); |
| 16296 | // vldN-dup instructions only support 64-bit vectors for N > 1. |
| 16297 | if (!VT.is64BitVector()) |
| 16298 | return false; |
| 16299 | |
| 16300 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. |
| 16301 | SDNode *VLD = N->getOperand(Num: 0).getNode(); |
| 16302 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) |
| 16303 | return false; |
| 16304 | unsigned NumVecs = 0; |
| 16305 | unsigned NewOpc = 0; |
| 16306 | unsigned IntNo = VLD->getConstantOperandVal(Num: 1); |
| 16307 | if (IntNo == Intrinsic::arm_neon_vld2lane) { |
| 16308 | NumVecs = 2; |
| 16309 | NewOpc = ARMISD::VLD2DUP; |
| 16310 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { |
| 16311 | NumVecs = 3; |
| 16312 | NewOpc = ARMISD::VLD3DUP; |
| 16313 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { |
| 16314 | NumVecs = 4; |
| 16315 | NewOpc = ARMISD::VLD4DUP; |
| 16316 | } else { |
| 16317 | return false; |
| 16318 | } |
| 16319 | |
| 16320 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane |
| 16321 | // numbers match the load. |
| 16322 | unsigned VLDLaneNo = VLD->getConstantOperandVal(Num: NumVecs + 3); |
| 16323 | for (SDUse &Use : VLD->uses()) { |
| 16324 | // Ignore uses of the chain result. |
| 16325 | if (Use.getResNo() == NumVecs) |
| 16326 | continue; |
| 16327 | SDNode *User = Use.getUser(); |
| 16328 | if (User->getOpcode() != ARMISD::VDUPLANE || |
| 16329 | VLDLaneNo != User->getConstantOperandVal(Num: 1)) |
| 16330 | return false; |
| 16331 | } |
| 16332 | |
| 16333 | // Create the vldN-dup node. |
| 16334 | EVT Tys[5]; |
| 16335 | unsigned n; |
| 16336 | for (n = 0; n < NumVecs; ++n) |
| 16337 | Tys[n] = VT; |
| 16338 | Tys[n] = MVT::Other; |
| 16339 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumVecs + 1)); |
| 16340 | SDValue Ops[] = { VLD->getOperand(Num: 0), VLD->getOperand(Num: 2) }; |
| 16341 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(Val: VLD); |
| 16342 | SDValue VLDDup = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl: SDLoc(VLD), VTList: SDTys, |
| 16343 | Ops, MemVT: VLDMemInt->getMemoryVT(), |
| 16344 | MMO: VLDMemInt->getMemOperand()); |
| 16345 | |
| 16346 | // Update the uses. |
| 16347 | for (SDUse &Use : VLD->uses()) { |
| 16348 | unsigned ResNo = Use.getResNo(); |
| 16349 | // Ignore uses of the chain result. |
| 16350 | if (ResNo == NumVecs) |
| 16351 | continue; |
| 16352 | DCI.CombineTo(N: Use.getUser(), Res: SDValue(VLDDup.getNode(), ResNo)); |
| 16353 | } |
| 16354 | |
| 16355 | // Now the vldN-lane intrinsic is dead except for its chain result. |
| 16356 | // Update uses of the chain. |
| 16357 | std::vector<SDValue> VLDDupResults; |
| 16358 | for (unsigned n = 0; n < NumVecs; ++n) |
| 16359 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), n)); |
| 16360 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), NumVecs)); |
| 16361 | DCI.CombineTo(N: VLD, To: VLDDupResults); |
| 16362 | |
| 16363 | return true; |
| 16364 | } |
| 16365 | |
| 16366 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for |
| 16367 | /// ARMISD::VDUPLANE. |
| 16368 | static SDValue PerformVDUPLANECombine(SDNode *N, |
| 16369 | TargetLowering::DAGCombinerInfo &DCI, |
| 16370 | const ARMSubtarget *Subtarget) { |
| 16371 | SDValue Op = N->getOperand(Num: 0); |
| 16372 | EVT VT = N->getValueType(ResNo: 0); |
| 16373 | |
| 16374 | // On MVE, we just convert the VDUPLANE to a VDUP with an extract. |
| 16375 | if (Subtarget->hasMVEIntegerOps()) { |
| 16376 | EVT = VT.getVectorElementType(); |
| 16377 | // We need to ensure we are creating a legal type. |
| 16378 | if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: ExtractVT)) |
| 16379 | ExtractVT = MVT::i32; |
| 16380 | SDValue = DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: SDLoc(N), VT: ExtractVT, |
| 16381 | N1: N->getOperand(Num: 0), N2: N->getOperand(Num: 1)); |
| 16382 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT, Operand: Extract); |
| 16383 | } |
| 16384 | |
| 16385 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses |
| 16386 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. |
| 16387 | if (CombineVLDDUP(N, DCI)) |
| 16388 | return SDValue(N, 0); |
| 16389 | |
| 16390 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is |
| 16391 | // redundant. Ignore bit_converts for now; element sizes are checked below. |
| 16392 | while (Op.getOpcode() == ISD::BITCAST) |
| 16393 | Op = Op.getOperand(i: 0); |
| 16394 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) |
| 16395 | return SDValue(); |
| 16396 | |
| 16397 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. |
| 16398 | unsigned EltSize = Op.getScalarValueSizeInBits(); |
| 16399 | // The canonical VMOV for a zero vector uses a 32-bit element size. |
| 16400 | unsigned Imm = Op.getConstantOperandVal(i: 0); |
| 16401 | unsigned EltBits; |
| 16402 | if (ARM_AM::decodeVMOVModImm(ModImm: Imm, EltBits) == 0) |
| 16403 | EltSize = 8; |
| 16404 | if (EltSize > VT.getScalarSizeInBits()) |
| 16405 | return SDValue(); |
| 16406 | |
| 16407 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), VT, Operand: Op); |
| 16408 | } |
| 16409 | |
| 16410 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. |
| 16411 | static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG, |
| 16412 | const ARMSubtarget *Subtarget) { |
| 16413 | SDValue Op = N->getOperand(Num: 0); |
| 16414 | SDLoc dl(N); |
| 16415 | |
| 16416 | if (Subtarget->hasMVEIntegerOps()) { |
| 16417 | // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will |
| 16418 | // need to come from a GPR. |
| 16419 | if (Op.getValueType() == MVT::f32) |
| 16420 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
| 16421 | Operand: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Op)); |
| 16422 | else if (Op.getValueType() == MVT::f16) |
| 16423 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
| 16424 | Operand: DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT: MVT::i32, Operand: Op)); |
| 16425 | } |
| 16426 | |
| 16427 | if (!Subtarget->hasNEON()) |
| 16428 | return SDValue(); |
| 16429 | |
| 16430 | // Match VDUP(LOAD) -> VLD1DUP. |
| 16431 | // We match this pattern here rather than waiting for isel because the |
| 16432 | // transform is only legal for unindexed loads. |
| 16433 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: Op.getNode()); |
| 16434 | if (LD && Op.hasOneUse() && LD->isUnindexed() && |
| 16435 | LD->getMemoryVT() == N->getValueType(ResNo: 0).getVectorElementType()) { |
| 16436 | SDValue Ops[] = {LD->getOperand(Num: 0), LD->getOperand(Num: 1), |
| 16437 | DAG.getConstant(Val: LD->getAlign().value(), DL: SDLoc(N), VT: MVT::i32)}; |
| 16438 | SDVTList SDTys = DAG.getVTList(VT1: N->getValueType(ResNo: 0), VT2: MVT::Other); |
| 16439 | SDValue VLDDup = |
| 16440 | DAG.getMemIntrinsicNode(Opcode: ARMISD::VLD1DUP, dl: SDLoc(N), VTList: SDTys, Ops, |
| 16441 | MemVT: LD->getMemoryVT(), MMO: LD->getMemOperand()); |
| 16442 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: VLDDup.getValue(R: 1)); |
| 16443 | return VLDDup; |
| 16444 | } |
| 16445 | |
| 16446 | return SDValue(); |
| 16447 | } |
| 16448 | |
| 16449 | static SDValue PerformLOADCombine(SDNode *N, |
| 16450 | TargetLowering::DAGCombinerInfo &DCI, |
| 16451 | const ARMSubtarget *Subtarget) { |
| 16452 | EVT VT = N->getValueType(ResNo: 0); |
| 16453 | |
| 16454 | // If this is a legal vector load, try to combine it into a VLD1_UPD. |
| 16455 | if (Subtarget->hasNEON() && ISD::isNormalLoad(N) && VT.isVector() && |
| 16456 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 16457 | return CombineBaseUpdate(N, DCI); |
| 16458 | |
| 16459 | return SDValue(); |
| 16460 | } |
| 16461 | |
| 16462 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, |
| 16463 | // pack all of the elements in one place. Next, store to memory in fewer |
| 16464 | // chunks. |
| 16465 | static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, |
| 16466 | SelectionDAG &DAG) { |
| 16467 | SDValue StVal = St->getValue(); |
| 16468 | EVT VT = StVal.getValueType(); |
| 16469 | if (!St->isTruncatingStore() || !VT.isVector()) |
| 16470 | return SDValue(); |
| 16471 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 16472 | EVT StVT = St->getMemoryVT(); |
| 16473 | unsigned NumElems = VT.getVectorNumElements(); |
| 16474 | assert(StVT != VT && "Cannot truncate to the same type" ); |
| 16475 | unsigned FromEltSz = VT.getScalarSizeInBits(); |
| 16476 | unsigned ToEltSz = StVT.getScalarSizeInBits(); |
| 16477 | |
| 16478 | // From, To sizes and ElemCount must be pow of two |
| 16479 | if (!isPowerOf2_32(Value: NumElems * FromEltSz * ToEltSz)) |
| 16480 | return SDValue(); |
| 16481 | |
| 16482 | // We are going to use the original vector elt for storing. |
| 16483 | // Accumulated smaller vector elements must be a multiple of the store size. |
| 16484 | if (0 != (NumElems * FromEltSz) % ToEltSz) |
| 16485 | return SDValue(); |
| 16486 | |
| 16487 | unsigned SizeRatio = FromEltSz / ToEltSz; |
| 16488 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); |
| 16489 | |
| 16490 | // Create a type on which we perform the shuffle. |
| 16491 | EVT WideVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: StVT.getScalarType(), |
| 16492 | NumElements: NumElems * SizeRatio); |
| 16493 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 16494 | |
| 16495 | SDLoc DL(St); |
| 16496 | SDValue WideVec = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: WideVecVT, Operand: StVal); |
| 16497 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); |
| 16498 | for (unsigned i = 0; i < NumElems; ++i) |
| 16499 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 |
| 16500 | : i * SizeRatio; |
| 16501 | |
| 16502 | // Can't shuffle using an illegal type. |
| 16503 | if (!TLI.isTypeLegal(VT: WideVecVT)) |
| 16504 | return SDValue(); |
| 16505 | |
| 16506 | SDValue Shuff = DAG.getVectorShuffle( |
| 16507 | VT: WideVecVT, dl: DL, N1: WideVec, N2: DAG.getUNDEF(VT: WideVec.getValueType()), Mask: ShuffleVec); |
| 16508 | // At this point all of the data is stored at the bottom of the |
| 16509 | // register. We now need to save it to mem. |
| 16510 | |
| 16511 | // Find the largest store unit |
| 16512 | MVT StoreType = MVT::i8; |
| 16513 | for (MVT Tp : MVT::integer_valuetypes()) { |
| 16514 | if (TLI.isTypeLegal(VT: Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) |
| 16515 | StoreType = Tp; |
| 16516 | } |
| 16517 | // Didn't find a legal store type. |
| 16518 | if (!TLI.isTypeLegal(VT: StoreType)) |
| 16519 | return SDValue(); |
| 16520 | |
| 16521 | // Bitcast the original vector into a vector of store-size units |
| 16522 | EVT StoreVecVT = |
| 16523 | EVT::getVectorVT(Context&: *DAG.getContext(), VT: StoreType, |
| 16524 | NumElements: VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); |
| 16525 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 16526 | SDValue ShuffWide = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: StoreVecVT, Operand: Shuff); |
| 16527 | SmallVector<SDValue, 8> Chains; |
| 16528 | SDValue Increment = DAG.getConstant(Val: StoreType.getSizeInBits() / 8, DL, |
| 16529 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 16530 | SDValue BasePtr = St->getBasePtr(); |
| 16531 | |
| 16532 | // Perform one or more big stores into memory. |
| 16533 | unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); |
| 16534 | for (unsigned I = 0; I < E; I++) { |
| 16535 | SDValue SubVec = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: StoreType, |
| 16536 | N1: ShuffWide, N2: DAG.getIntPtrConstant(Val: I, DL)); |
| 16537 | SDValue Ch = |
| 16538 | DAG.getStore(Chain: St->getChain(), dl: DL, Val: SubVec, Ptr: BasePtr, PtrInfo: St->getPointerInfo(), |
| 16539 | Alignment: St->getAlign(), MMOFlags: St->getMemOperand()->getFlags()); |
| 16540 | BasePtr = |
| 16541 | DAG.getNode(Opcode: ISD::ADD, DL, VT: BasePtr.getValueType(), N1: BasePtr, N2: Increment); |
| 16542 | Chains.push_back(Elt: Ch); |
| 16543 | } |
| 16544 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 16545 | } |
| 16546 | |
| 16547 | // Try taking a single vector store from an fpround (which would otherwise turn |
| 16548 | // into an expensive buildvector) and splitting it into a series of narrowing |
| 16549 | // stores. |
| 16550 | static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, |
| 16551 | SelectionDAG &DAG) { |
| 16552 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16553 | return SDValue(); |
| 16554 | SDValue Trunc = St->getValue(); |
| 16555 | if (Trunc->getOpcode() != ISD::FP_ROUND) |
| 16556 | return SDValue(); |
| 16557 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
| 16558 | EVT ToVT = Trunc.getValueType(); |
| 16559 | if (!ToVT.isVector()) |
| 16560 | return SDValue(); |
| 16561 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 16562 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 16563 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 16564 | |
| 16565 | if (FromEltVT != MVT::f32 || ToEltVT != MVT::f16) |
| 16566 | return SDValue(); |
| 16567 | |
| 16568 | unsigned NumElements = 4; |
| 16569 | if (FromVT.getVectorNumElements() % NumElements != 0) |
| 16570 | return SDValue(); |
| 16571 | |
| 16572 | // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so |
| 16573 | // use the VMOVN over splitting the store. We are looking for patterns of: |
| 16574 | // !rev: 0 N 1 N+1 2 N+2 ... |
| 16575 | // rev: N 0 N+1 1 N+2 2 ... |
| 16576 | // The shuffle may either be a single source (in which case N = NumElts/2) or |
| 16577 | // two inputs extended with concat to the same size (in which case N = |
| 16578 | // NumElts). |
| 16579 | auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) { |
| 16580 | ArrayRef<int> M = SVN->getMask(); |
| 16581 | unsigned NumElts = ToVT.getVectorNumElements(); |
| 16582 | if (SVN->getOperand(Num: 1).isUndef()) |
| 16583 | NumElts /= 2; |
| 16584 | |
| 16585 | unsigned Off0 = Rev ? NumElts : 0; |
| 16586 | unsigned Off1 = Rev ? 0 : NumElts; |
| 16587 | |
| 16588 | for (unsigned I = 0; I < NumElts; I += 2) { |
| 16589 | if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2)) |
| 16590 | return false; |
| 16591 | if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2)) |
| 16592 | return false; |
| 16593 | } |
| 16594 | |
| 16595 | return true; |
| 16596 | }; |
| 16597 | |
| 16598 | if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Val: Trunc.getOperand(i: 0))) |
| 16599 | if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true)) |
| 16600 | return SDValue(); |
| 16601 | |
| 16602 | LLVMContext &C = *DAG.getContext(); |
| 16603 | SDLoc DL(St); |
| 16604 | // Details about the old store |
| 16605 | SDValue Ch = St->getChain(); |
| 16606 | SDValue BasePtr = St->getBasePtr(); |
| 16607 | Align Alignment = St->getBaseAlign(); |
| 16608 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16609 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16610 | |
| 16611 | // We split the store into slices of NumElements. fp16 trunc stores are vcvt |
| 16612 | // and then stored as truncating integer stores. |
| 16613 | EVT NewFromVT = EVT::getVectorVT(Context&: C, VT: FromEltVT, NumElements); |
| 16614 | EVT NewToVT = EVT::getVectorVT( |
| 16615 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getSizeInBits()), NumElements); |
| 16616 | |
| 16617 | SmallVector<SDValue, 4> Stores; |
| 16618 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 16619 | unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; |
| 16620 | SDValue NewPtr = |
| 16621 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 16622 | |
| 16623 | SDValue = |
| 16624 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: NewFromVT, N1: Trunc.getOperand(i: 0), |
| 16625 | N2: DAG.getConstant(Val: i * NumElements, DL, VT: MVT::i32)); |
| 16626 | |
| 16627 | SDValue FPTrunc = |
| 16628 | DAG.getNode(Opcode: ARMISD::VCVTN, DL, VT: MVT::v8f16, N1: DAG.getUNDEF(VT: MVT::v8f16), |
| 16629 | N2: Extract, N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 16630 | Extract = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v4i32, Operand: FPTrunc); |
| 16631 | |
| 16632 | SDValue Store = DAG.getTruncStore( |
| 16633 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
| 16634 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
| 16635 | Stores.push_back(Elt: Store); |
| 16636 | } |
| 16637 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
| 16638 | } |
| 16639 | |
| 16640 | // Try taking a single vector store from an MVETRUNC (which would otherwise turn |
| 16641 | // into an expensive buildvector) and splitting it into a series of narrowing |
| 16642 | // stores. |
| 16643 | static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St, |
| 16644 | SelectionDAG &DAG) { |
| 16645 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16646 | return SDValue(); |
| 16647 | SDValue Trunc = St->getValue(); |
| 16648 | if (Trunc->getOpcode() != ARMISD::MVETRUNC) |
| 16649 | return SDValue(); |
| 16650 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
| 16651 | EVT ToVT = Trunc.getValueType(); |
| 16652 | |
| 16653 | LLVMContext &C = *DAG.getContext(); |
| 16654 | SDLoc DL(St); |
| 16655 | // Details about the old store |
| 16656 | SDValue Ch = St->getChain(); |
| 16657 | SDValue BasePtr = St->getBasePtr(); |
| 16658 | Align Alignment = St->getBaseAlign(); |
| 16659 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16660 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16661 | |
| 16662 | EVT NewToVT = EVT::getVectorVT(Context&: C, VT: ToVT.getVectorElementType(), |
| 16663 | NumElements: FromVT.getVectorNumElements()); |
| 16664 | |
| 16665 | SmallVector<SDValue, 4> Stores; |
| 16666 | for (unsigned i = 0; i < Trunc.getNumOperands(); i++) { |
| 16667 | unsigned NewOffset = |
| 16668 | i * FromVT.getVectorNumElements() * ToVT.getScalarSizeInBits() / 8; |
| 16669 | SDValue NewPtr = |
| 16670 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 16671 | |
| 16672 | SDValue = Trunc.getOperand(i); |
| 16673 | SDValue Store = DAG.getTruncStore( |
| 16674 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
| 16675 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
| 16676 | Stores.push_back(Elt: Store); |
| 16677 | } |
| 16678 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
| 16679 | } |
| 16680 | |
| 16681 | // Given a floating point store from an extracted vector, with an integer |
| 16682 | // VGETLANE that already exists, store the existing VGETLANEu directly. This can |
| 16683 | // help reduce fp register pressure, doesn't require the fp extract and allows |
| 16684 | // use of more integer post-inc stores not available with vstr. |
| 16685 | static SDValue (StoreSDNode *St, SelectionDAG &DAG) { |
| 16686 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 16687 | return SDValue(); |
| 16688 | SDValue = St->getValue(); |
| 16689 | EVT VT = Extract.getValueType(); |
| 16690 | // For now only uses f16. This may be useful for f32 too, but that will |
| 16691 | // be bitcast(extract), not the VGETLANEu we currently check here. |
| 16692 | if (VT != MVT::f16 || Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 16693 | return SDValue(); |
| 16694 | |
| 16695 | SDNode *GetLane = |
| 16696 | DAG.getNodeIfExists(Opcode: ARMISD::VGETLANEu, VTList: DAG.getVTList(VT: MVT::i32), |
| 16697 | Ops: {Extract.getOperand(i: 0), Extract.getOperand(i: 1)}); |
| 16698 | if (!GetLane) |
| 16699 | return SDValue(); |
| 16700 | |
| 16701 | LLVMContext &C = *DAG.getContext(); |
| 16702 | SDLoc DL(St); |
| 16703 | // Create a new integer store to replace the existing floating point version. |
| 16704 | SDValue Ch = St->getChain(); |
| 16705 | SDValue BasePtr = St->getBasePtr(); |
| 16706 | Align Alignment = St->getBaseAlign(); |
| 16707 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 16708 | AAMDNodes AAInfo = St->getAAInfo(); |
| 16709 | EVT NewToVT = EVT::getIntegerVT(Context&: C, BitWidth: VT.getSizeInBits()); |
| 16710 | SDValue Store = DAG.getTruncStore(Chain: Ch, dl: DL, Val: SDValue(GetLane, 0), Ptr: BasePtr, |
| 16711 | PtrInfo: St->getPointerInfo(), SVT: NewToVT, Alignment, |
| 16712 | MMOFlags, AAInfo); |
| 16713 | |
| 16714 | return Store; |
| 16715 | } |
| 16716 | |
| 16717 | /// PerformSTORECombine - Target-specific dag combine xforms for |
| 16718 | /// ISD::STORE. |
| 16719 | static SDValue PerformSTORECombine(SDNode *N, |
| 16720 | TargetLowering::DAGCombinerInfo &DCI, |
| 16721 | const ARMSubtarget *Subtarget) { |
| 16722 | StoreSDNode *St = cast<StoreSDNode>(Val: N); |
| 16723 | if (St->isVolatile()) |
| 16724 | return SDValue(); |
| 16725 | SDValue StVal = St->getValue(); |
| 16726 | EVT VT = StVal.getValueType(); |
| 16727 | |
| 16728 | if (Subtarget->hasNEON()) |
| 16729 | if (SDValue Store = PerformTruncatingStoreCombine(St, DAG&: DCI.DAG)) |
| 16730 | return Store; |
| 16731 | |
| 16732 | if (Subtarget->hasMVEFloatOps()) |
| 16733 | if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DAG&: DCI.DAG)) |
| 16734 | return NewToken; |
| 16735 | |
| 16736 | if (Subtarget->hasMVEIntegerOps()) { |
| 16737 | if (SDValue NewChain = PerformExtractFpToIntStores(St, DAG&: DCI.DAG)) |
| 16738 | return NewChain; |
| 16739 | if (SDValue NewToken = |
| 16740 | PerformSplittingMVETruncToNarrowingStores(St, DAG&: DCI.DAG)) |
| 16741 | return NewToken; |
| 16742 | } |
| 16743 | |
| 16744 | if (!ISD::isNormalStore(N: St)) |
| 16745 | return SDValue(); |
| 16746 | |
| 16747 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and |
| 16748 | // ARM stores of arguments in the same cache line. |
| 16749 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && |
| 16750 | StVal.getNode()->hasOneUse()) { |
| 16751 | SelectionDAG &DAG = DCI.DAG; |
| 16752 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 16753 | SDLoc DL(St); |
| 16754 | SDValue BasePtr = St->getBasePtr(); |
| 16755 | SDValue NewST1 = DAG.getStore( |
| 16756 | Chain: St->getChain(), dl: DL, Val: StVal.getNode()->getOperand(Num: isBigEndian ? 1 : 0), |
| 16757 | Ptr: BasePtr, PtrInfo: St->getPointerInfo(), Alignment: St->getBaseAlign(), |
| 16758 | MMOFlags: St->getMemOperand()->getFlags()); |
| 16759 | |
| 16760 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
| 16761 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
| 16762 | return DAG.getStore(Chain: NewST1.getValue(R: 0), dl: DL, |
| 16763 | Val: StVal.getNode()->getOperand(Num: isBigEndian ? 0 : 1), |
| 16764 | Ptr: OffsetPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: 4), |
| 16765 | Alignment: St->getBaseAlign(), MMOFlags: St->getMemOperand()->getFlags()); |
| 16766 | } |
| 16767 | |
| 16768 | if (StVal.getValueType() == MVT::i64 && |
| 16769 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 16770 | |
| 16771 | // Bitcast an i64 store extracted from a vector to f64. |
| 16772 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 16773 | SelectionDAG &DAG = DCI.DAG; |
| 16774 | SDLoc dl(StVal); |
| 16775 | SDValue IntVec = StVal.getOperand(i: 0); |
| 16776 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
| 16777 | NumElements: IntVec.getValueType().getVectorNumElements()); |
| 16778 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: IntVec); |
| 16779 | SDValue ExtElt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
| 16780 | N1: Vec, N2: StVal.getOperand(i: 1)); |
| 16781 | dl = SDLoc(N); |
| 16782 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i64, Operand: ExtElt); |
| 16783 | // Make the DAGCombiner fold the bitcasts. |
| 16784 | DCI.AddToWorklist(N: Vec.getNode()); |
| 16785 | DCI.AddToWorklist(N: ExtElt.getNode()); |
| 16786 | DCI.AddToWorklist(N: V.getNode()); |
| 16787 | return DAG.getStore(Chain: St->getChain(), dl, Val: V, Ptr: St->getBasePtr(), |
| 16788 | PtrInfo: St->getPointerInfo(), Alignment: St->getAlign(), |
| 16789 | MMOFlags: St->getMemOperand()->getFlags(), AAInfo: St->getAAInfo()); |
| 16790 | } |
| 16791 | |
| 16792 | // If this is a legal vector store, try to combine it into a VST1_UPD. |
| 16793 | if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && |
| 16794 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 16795 | return CombineBaseUpdate(N, DCI); |
| 16796 | |
| 16797 | return SDValue(); |
| 16798 | } |
| 16799 | |
| 16800 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) |
| 16801 | /// can replace combinations of VMUL and VCVT (floating-point to integer) |
| 16802 | /// when the VMUL has a constant operand that is a power of 2. |
| 16803 | /// |
| 16804 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
| 16805 | /// vmul.f32 d16, d17, d16 |
| 16806 | /// vcvt.s32.f32 d16, d16 |
| 16807 | /// becomes: |
| 16808 | /// vcvt.s32.f32 d16, d16, #3 |
| 16809 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, |
| 16810 | const ARMSubtarget *Subtarget) { |
| 16811 | if (!Subtarget->hasNEON()) |
| 16812 | return SDValue(); |
| 16813 | |
| 16814 | SDValue Op = N->getOperand(Num: 0); |
| 16815 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || |
| 16816 | Op.getOpcode() != ISD::FMUL) |
| 16817 | return SDValue(); |
| 16818 | |
| 16819 | SDValue ConstVec = Op->getOperand(Num: 1); |
| 16820 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
| 16821 | return SDValue(); |
| 16822 | |
| 16823 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); |
| 16824 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 16825 | MVT IntTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
| 16826 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 16827 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 16828 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 16829 | // These instructions only exist converting from f32 to i32. We can handle |
| 16830 | // smaller integers by generating an extra truncate, but larger ones would |
| 16831 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 16832 | // these intructions only support v2i32/v4i32 types. |
| 16833 | return SDValue(); |
| 16834 | } |
| 16835 | |
| 16836 | BitVector UndefElements; |
| 16837 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Val&: ConstVec); |
| 16838 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(UndefElements: &UndefElements, BitWidth: 33); |
| 16839 | if (C == -1 || C == 0 || C > 32) |
| 16840 | return SDValue(); |
| 16841 | |
| 16842 | SDLoc dl(N); |
| 16843 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; |
| 16844 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : |
| 16845 | Intrinsic::arm_neon_vcvtfp2fxu; |
| 16846 | SDValue FixConv = DAG.getNode( |
| 16847 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
| 16848 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL: dl, VT: MVT::i32), N2: Op->getOperand(Num: 0), |
| 16849 | N3: DAG.getConstant(Val: C, DL: dl, VT: MVT::i32)); |
| 16850 | |
| 16851 | if (IntBits < FloatBits) |
| 16852 | FixConv = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: N->getValueType(ResNo: 0), Operand: FixConv); |
| 16853 | |
| 16854 | return FixConv; |
| 16855 | } |
| 16856 | |
| 16857 | static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG, |
| 16858 | const ARMSubtarget *Subtarget) { |
| 16859 | if (!Subtarget->hasMVEFloatOps()) |
| 16860 | return SDValue(); |
| 16861 | |
| 16862 | // Turn (fadd x, (vselect c, y, -0.0)) into (vselect c, (fadd x, y), x) |
| 16863 | // The second form can be more easily turned into a predicated vadd, and |
| 16864 | // possibly combined into a fma to become a predicated vfma. |
| 16865 | SDValue Op0 = N->getOperand(Num: 0); |
| 16866 | SDValue Op1 = N->getOperand(Num: 1); |
| 16867 | EVT VT = N->getValueType(ResNo: 0); |
| 16868 | SDLoc DL(N); |
| 16869 | |
| 16870 | // The identity element for a fadd is -0.0 or +0.0 when the nsz flag is set, |
| 16871 | // which these VMOV's represent. |
| 16872 | auto isIdentitySplat = [&](SDValue Op, bool NSZ) { |
| 16873 | if (Op.getOpcode() != ISD::BITCAST || |
| 16874 | Op.getOperand(i: 0).getOpcode() != ARMISD::VMOVIMM) |
| 16875 | return false; |
| 16876 | uint64_t ImmVal = Op.getOperand(i: 0).getConstantOperandVal(i: 0); |
| 16877 | if (VT == MVT::v4f32 && (ImmVal == 1664 || (ImmVal == 0 && NSZ))) |
| 16878 | return true; |
| 16879 | if (VT == MVT::v8f16 && (ImmVal == 2688 || (ImmVal == 0 && NSZ))) |
| 16880 | return true; |
| 16881 | return false; |
| 16882 | }; |
| 16883 | |
| 16884 | if (Op0.getOpcode() == ISD::VSELECT && Op1.getOpcode() != ISD::VSELECT) |
| 16885 | std::swap(a&: Op0, b&: Op1); |
| 16886 | |
| 16887 | if (Op1.getOpcode() != ISD::VSELECT) |
| 16888 | return SDValue(); |
| 16889 | |
| 16890 | SDNodeFlags FaddFlags = N->getFlags(); |
| 16891 | bool NSZ = FaddFlags.hasNoSignedZeros(); |
| 16892 | if (!isIdentitySplat(Op1.getOperand(i: 2), NSZ)) |
| 16893 | return SDValue(); |
| 16894 | |
| 16895 | SDValue FAdd = |
| 16896 | DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: Op0, N2: Op1.getOperand(i: 1), Flags: FaddFlags); |
| 16897 | return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: Op1.getOperand(i: 0), N2: FAdd, N3: Op0, Flags: FaddFlags); |
| 16898 | } |
| 16899 | |
| 16900 | static SDValue PerformFADDVCMLACombine(SDNode *N, SelectionDAG &DAG) { |
| 16901 | SDValue LHS = N->getOperand(Num: 0); |
| 16902 | SDValue RHS = N->getOperand(Num: 1); |
| 16903 | EVT VT = N->getValueType(ResNo: 0); |
| 16904 | SDLoc DL(N); |
| 16905 | |
| 16906 | if (!N->getFlags().hasAllowReassociation()) |
| 16907 | return SDValue(); |
| 16908 | |
| 16909 | // Combine fadd(a, vcmla(b, c, d)) -> vcmla(fadd(a, b), b, c) |
| 16910 | auto ReassocComplex = [&](SDValue A, SDValue B) { |
| 16911 | if (A.getOpcode() != ISD::INTRINSIC_WO_CHAIN) |
| 16912 | return SDValue(); |
| 16913 | unsigned Opc = A.getConstantOperandVal(i: 0); |
| 16914 | if (Opc != Intrinsic::arm_mve_vcmlaq) |
| 16915 | return SDValue(); |
| 16916 | SDValue VCMLA = DAG.getNode( |
| 16917 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, N1: A.getOperand(i: 0), N2: A.getOperand(i: 1), |
| 16918 | N3: DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: A.getOperand(i: 2), N2: B, Flags: N->getFlags()), |
| 16919 | N4: A.getOperand(i: 3), N5: A.getOperand(i: 4)); |
| 16920 | VCMLA->setFlags(A->getFlags()); |
| 16921 | return VCMLA; |
| 16922 | }; |
| 16923 | if (SDValue R = ReassocComplex(LHS, RHS)) |
| 16924 | return R; |
| 16925 | if (SDValue R = ReassocComplex(RHS, LHS)) |
| 16926 | return R; |
| 16927 | |
| 16928 | return SDValue(); |
| 16929 | } |
| 16930 | |
| 16931 | static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, |
| 16932 | const ARMSubtarget *Subtarget) { |
| 16933 | if (SDValue S = PerformFAddVSelectCombine(N, DAG, Subtarget)) |
| 16934 | return S; |
| 16935 | if (SDValue S = PerformFADDVCMLACombine(N, DAG)) |
| 16936 | return S; |
| 16937 | return SDValue(); |
| 16938 | } |
| 16939 | |
| 16940 | /// PerformVMulVCTPCombine - VCVT (fixed-point to floating-point, Advanced SIMD) |
| 16941 | /// can replace combinations of VCVT (integer to floating-point) and VMUL |
| 16942 | /// when the VMUL has a constant operand that is a power of 2. |
| 16943 | /// |
| 16944 | /// Example (assume d17 = <float 0.125, float 0.125>): |
| 16945 | /// vcvt.f32.s32 d16, d16 |
| 16946 | /// vmul.f32 d16, d16, d17 |
| 16947 | /// becomes: |
| 16948 | /// vcvt.f32.s32 d16, d16, #3 |
| 16949 | static SDValue PerformVMulVCTPCombine(SDNode *N, SelectionDAG &DAG, |
| 16950 | const ARMSubtarget *Subtarget) { |
| 16951 | if (!Subtarget->hasNEON()) |
| 16952 | return SDValue(); |
| 16953 | |
| 16954 | SDValue Op = N->getOperand(Num: 0); |
| 16955 | unsigned OpOpcode = Op.getNode()->getOpcode(); |
| 16956 | if (!N->getValueType(ResNo: 0).isVector() || !N->getValueType(ResNo: 0).isSimple() || |
| 16957 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) |
| 16958 | return SDValue(); |
| 16959 | |
| 16960 | SDValue ConstVec = N->getOperand(Num: 1); |
| 16961 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
| 16962 | return SDValue(); |
| 16963 | |
| 16964 | MVT FloatTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
| 16965 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 16966 | MVT IntTy = Op.getOperand(i: 0).getSimpleValueType().getVectorElementType(); |
| 16967 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 16968 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 16969 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 16970 | // These instructions only exist converting from i32 to f32. We can handle |
| 16971 | // smaller integers by generating an extra extend, but larger ones would |
| 16972 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 16973 | // these intructions only support v2i32/v4i32 types. |
| 16974 | return SDValue(); |
| 16975 | } |
| 16976 | |
| 16977 | ConstantFPSDNode *CN = isConstOrConstSplatFP(N: ConstVec, AllowUndefs: true); |
| 16978 | APFloat Recip(0.0f); |
| 16979 | if (!CN || !CN->getValueAPF().getExactInverse(Inv: &Recip)) |
| 16980 | return SDValue(); |
| 16981 | |
| 16982 | bool IsExact; |
| 16983 | APSInt IntVal(33); |
| 16984 | if (Recip.convertToInteger(Result&: IntVal, RM: APFloat::rmTowardZero, IsExact: &IsExact) != |
| 16985 | APFloat::opOK || |
| 16986 | !IsExact) |
| 16987 | return SDValue(); |
| 16988 | |
| 16989 | int32_t C = IntVal.exactLogBase2(); |
| 16990 | if (C == -1 || C == 0 || C > 32) |
| 16991 | return SDValue(); |
| 16992 | |
| 16993 | SDLoc DL(N); |
| 16994 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; |
| 16995 | SDValue ConvInput = Op.getOperand(i: 0); |
| 16996 | if (IntBits < FloatBits) |
| 16997 | ConvInput = DAG.getNode(Opcode: isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
| 16998 | VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, Operand: ConvInput); |
| 16999 | |
| 17000 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp |
| 17001 | : Intrinsic::arm_neon_vcvtfxu2fp; |
| 17002 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: Op.getValueType(), |
| 17003 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL, VT: MVT::i32), N2: ConvInput, |
| 17004 | N3: DAG.getConstant(Val: C, DL, VT: MVT::i32)); |
| 17005 | } |
| 17006 | |
| 17007 | static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, |
| 17008 | const ARMSubtarget *ST) { |
| 17009 | if (!ST->hasMVEIntegerOps()) |
| 17010 | return SDValue(); |
| 17011 | |
| 17012 | assert(N->getOpcode() == ISD::VECREDUCE_ADD); |
| 17013 | EVT ResVT = N->getValueType(ResNo: 0); |
| 17014 | SDValue N0 = N->getOperand(Num: 0); |
| 17015 | SDLoc dl(N); |
| 17016 | |
| 17017 | // Try to turn vecreduce_add(add(x, y)) into vecreduce(x) + vecreduce(y) |
| 17018 | if (ResVT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
| 17019 | (N0.getValueType() == MVT::v4i32 || N0.getValueType() == MVT::v8i16 || |
| 17020 | N0.getValueType() == MVT::v16i8)) { |
| 17021 | SDValue Red0 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 0)); |
| 17022 | SDValue Red1 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 1)); |
| 17023 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: ResVT, N1: Red0, N2: Red1); |
| 17024 | } |
| 17025 | |
| 17026 | // We are looking for something that will have illegal types if left alone, |
| 17027 | // but that we can convert to a single instruction under MVE. For example |
| 17028 | // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A |
| 17029 | // or |
| 17030 | // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B |
| 17031 | |
| 17032 | // The legal cases are: |
| 17033 | // VADDV u/s 8/16/32 |
| 17034 | // VMLAV u/s 8/16/32 |
| 17035 | // VADDLV u/s 32 |
| 17036 | // VMLALV u/s 16/32 |
| 17037 | |
| 17038 | // If the input vector is smaller than legal (v4i8/v4i16 for example) we can |
| 17039 | // extend it and use v4i32 instead. |
| 17040 | auto ExtTypeMatches = [](SDValue A, ArrayRef<MVT> ExtTypes) { |
| 17041 | EVT AVT = A.getValueType(); |
| 17042 | return any_of(Range&: ExtTypes, P: [&](MVT Ty) { |
| 17043 | return AVT.getVectorNumElements() == Ty.getVectorNumElements() && |
| 17044 | AVT.bitsLE(VT: Ty); |
| 17045 | }); |
| 17046 | }; |
| 17047 | auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) { |
| 17048 | EVT AVT = A.getValueType(); |
| 17049 | if (!AVT.is128BitVector()) |
| 17050 | A = DAG.getNode( |
| 17051 | Opcode: ExtendCode, DL: dl, |
| 17052 | VT: AVT.changeVectorElementType( |
| 17053 | Context&: *DAG.getContext(), |
| 17054 | EltVT: MVT::getIntegerVT(BitWidth: 128 / AVT.getVectorMinNumElements())), |
| 17055 | Operand: A); |
| 17056 | return A; |
| 17057 | }; |
| 17058 | auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { |
| 17059 | if (ResVT != RetTy || N0->getOpcode() != ExtendCode) |
| 17060 | return SDValue(); |
| 17061 | SDValue A = N0->getOperand(Num: 0); |
| 17062 | if (ExtTypeMatches(A, ExtTypes)) |
| 17063 | return ExtendIfNeeded(A, ExtendCode); |
| 17064 | return SDValue(); |
| 17065 | }; |
| 17066 | auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, |
| 17067 | ArrayRef<MVT> ExtTypes, SDValue &Mask) { |
| 17068 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 17069 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
| 17070 | return SDValue(); |
| 17071 | Mask = N0->getOperand(Num: 0); |
| 17072 | SDValue Ext = N0->getOperand(Num: 1); |
| 17073 | if (Ext->getOpcode() != ExtendCode) |
| 17074 | return SDValue(); |
| 17075 | SDValue A = Ext->getOperand(Num: 0); |
| 17076 | if (ExtTypeMatches(A, ExtTypes)) |
| 17077 | return ExtendIfNeeded(A, ExtendCode); |
| 17078 | return SDValue(); |
| 17079 | }; |
| 17080 | auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 17081 | SDValue &A, SDValue &B) { |
| 17082 | // For a vmla we are trying to match a larger pattern: |
| 17083 | // ExtA = sext/zext A |
| 17084 | // ExtB = sext/zext B |
| 17085 | // Mul = mul ExtA, ExtB |
| 17086 | // vecreduce.add Mul |
| 17087 | // There might also be en extra extend between the mul and the addreduce, so |
| 17088 | // long as the bitwidth is high enough to make them equivalent (for example |
| 17089 | // original v8i16 might be mul at v8i32 and the reduce happens at v8i64). |
| 17090 | if (ResVT != RetTy) |
| 17091 | return false; |
| 17092 | SDValue Mul = N0; |
| 17093 | if (Mul->getOpcode() == ExtendCode && |
| 17094 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
| 17095 | ResVT.getScalarSizeInBits()) |
| 17096 | Mul = Mul->getOperand(Num: 0); |
| 17097 | if (Mul->getOpcode() != ISD::MUL) |
| 17098 | return false; |
| 17099 | SDValue ExtA = Mul->getOperand(Num: 0); |
| 17100 | SDValue ExtB = Mul->getOperand(Num: 1); |
| 17101 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
| 17102 | return false; |
| 17103 | A = ExtA->getOperand(Num: 0); |
| 17104 | B = ExtB->getOperand(Num: 0); |
| 17105 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
| 17106 | A = ExtendIfNeeded(A, ExtendCode); |
| 17107 | B = ExtendIfNeeded(B, ExtendCode); |
| 17108 | return true; |
| 17109 | } |
| 17110 | return false; |
| 17111 | }; |
| 17112 | auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 17113 | SDValue &A, SDValue &B, SDValue &Mask) { |
| 17114 | // Same as the pattern above with a select for the zero predicated lanes |
| 17115 | // ExtA = sext/zext A |
| 17116 | // ExtB = sext/zext B |
| 17117 | // Mul = mul ExtA, ExtB |
| 17118 | // N0 = select Mask, Mul, 0 |
| 17119 | // vecreduce.add N0 |
| 17120 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 17121 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
| 17122 | return false; |
| 17123 | Mask = N0->getOperand(Num: 0); |
| 17124 | SDValue Mul = N0->getOperand(Num: 1); |
| 17125 | if (Mul->getOpcode() == ExtendCode && |
| 17126 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
| 17127 | ResVT.getScalarSizeInBits()) |
| 17128 | Mul = Mul->getOperand(Num: 0); |
| 17129 | if (Mul->getOpcode() != ISD::MUL) |
| 17130 | return false; |
| 17131 | SDValue ExtA = Mul->getOperand(Num: 0); |
| 17132 | SDValue ExtB = Mul->getOperand(Num: 1); |
| 17133 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
| 17134 | return false; |
| 17135 | A = ExtA->getOperand(Num: 0); |
| 17136 | B = ExtB->getOperand(Num: 0); |
| 17137 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
| 17138 | A = ExtendIfNeeded(A, ExtendCode); |
| 17139 | B = ExtendIfNeeded(B, ExtendCode); |
| 17140 | return true; |
| 17141 | } |
| 17142 | return false; |
| 17143 | }; |
| 17144 | auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { |
| 17145 | // Split illegal MVT::v16i8->i64 vector reductions into two legal v8i16->i64 |
| 17146 | // reductions. The operands are extended with MVEEXT, but as they are |
| 17147 | // reductions the lane orders do not matter. MVEEXT may be combined with |
| 17148 | // loads to produce two extending loads, or else they will be expanded to |
| 17149 | // VREV/VMOVL. |
| 17150 | EVT VT = Ops[0].getValueType(); |
| 17151 | if (VT == MVT::v16i8) { |
| 17152 | assert((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && |
| 17153 | "Unexpected illegal long reduction opcode" ); |
| 17154 | bool IsUnsigned = Opcode == ARMISD::VMLALVu; |
| 17155 | |
| 17156 | SDValue Ext0 = |
| 17157 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
| 17158 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[0]); |
| 17159 | SDValue Ext1 = |
| 17160 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
| 17161 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[1]); |
| 17162 | |
| 17163 | SDValue MLA0 = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
| 17164 | N1: Ext0, N2: Ext1); |
| 17165 | SDValue MLA1 = |
| 17166 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::VMLALVAu : ARMISD::VMLALVAs, DL: dl, |
| 17167 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: MLA0, N2: MLA0.getValue(R: 1), |
| 17168 | N3: Ext0.getValue(R: 1), N4: Ext1.getValue(R: 1)); |
| 17169 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: MLA1, N2: MLA1.getValue(R: 1)); |
| 17170 | } |
| 17171 | SDValue Node = DAG.getNode(Opcode, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
| 17172 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Node, |
| 17173 | N2: SDValue(Node.getNode(), 1)); |
| 17174 | }; |
| 17175 | |
| 17176 | SDValue A, B; |
| 17177 | SDValue Mask; |
| 17178 | if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 17179 | return DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: ResVT, N1: A, N2: B); |
| 17180 | if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 17181 | return DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: ResVT, N1: A, N2: B); |
| 17182 | if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
| 17183 | A, B)) |
| 17184 | return Create64bitNode(ARMISD::VMLALVs, {A, B}); |
| 17185 | if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
| 17186 | A, B)) |
| 17187 | return Create64bitNode(ARMISD::VMLALVu, {A, B}); |
| 17188 | if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) |
| 17189 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17190 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
| 17191 | if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) |
| 17192 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17193 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
| 17194 | |
| 17195 | if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
| 17196 | Mask)) |
| 17197 | return DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
| 17198 | if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
| 17199 | Mask)) |
| 17200 | return DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
| 17201 | if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
| 17202 | Mask)) |
| 17203 | return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); |
| 17204 | if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
| 17205 | Mask)) |
| 17206 | return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); |
| 17207 | if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 17208 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17209 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
| 17210 | if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 17211 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17212 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
| 17213 | |
| 17214 | if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 17215 | return DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: ResVT, Operand: A); |
| 17216 | if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 17217 | return DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: ResVT, Operand: A); |
| 17218 | if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32})) |
| 17219 | return Create64bitNode(ARMISD::VADDLVs, {A}); |
| 17220 | if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) |
| 17221 | return Create64bitNode(ARMISD::VADDLVu, {A}); |
| 17222 | if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) |
| 17223 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17224 | Operand: DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: MVT::i32, Operand: A)); |
| 17225 | if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) |
| 17226 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17227 | Operand: DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: MVT::i32, Operand: A)); |
| 17228 | |
| 17229 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 17230 | return DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: ResVT, N1: A, N2: Mask); |
| 17231 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 17232 | return DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: ResVT, N1: A, N2: Mask); |
| 17233 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask)) |
| 17234 | return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); |
| 17235 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask)) |
| 17236 | return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); |
| 17237 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) |
| 17238 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17239 | Operand: DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
| 17240 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) |
| 17241 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
| 17242 | Operand: DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
| 17243 | |
| 17244 | // Some complications. We can get a case where the two inputs of the mul are |
| 17245 | // the same, then the output sext will have been helpfully converted to a |
| 17246 | // zext. Turn it back. |
| 17247 | SDValue Op = N0; |
| 17248 | if (Op->getOpcode() == ISD::VSELECT) |
| 17249 | Op = Op->getOperand(Num: 1); |
| 17250 | if (Op->getOpcode() == ISD::ZERO_EXTEND && |
| 17251 | Op->getOperand(Num: 0)->getOpcode() == ISD::MUL) { |
| 17252 | SDValue Mul = Op->getOperand(Num: 0); |
| 17253 | if (Mul->getOperand(Num: 0) == Mul->getOperand(Num: 1) && |
| 17254 | Mul->getOperand(Num: 0)->getOpcode() == ISD::SIGN_EXTEND) { |
| 17255 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: N0->getValueType(ResNo: 0), Operand: Mul); |
| 17256 | if (Op != N0) |
| 17257 | Ext = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: N0->getValueType(ResNo: 0), |
| 17258 | N1: N0->getOperand(Num: 0), N2: Ext, N3: N0->getOperand(Num: 2)); |
| 17259 | return DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: Ext); |
| 17260 | } |
| 17261 | } |
| 17262 | |
| 17263 | return SDValue(); |
| 17264 | } |
| 17265 | |
| 17266 | // Looks for vaddv(shuffle) or vmlav(shuffle, shuffle), with a shuffle where all |
| 17267 | // the lanes are used. Due to the reduction being commutative the shuffle can be |
| 17268 | // removed. |
| 17269 | static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) { |
| 17270 | unsigned VecOp = N->getOperand(Num: 0).getValueType().isVector() ? 0 : 2; |
| 17271 | auto *Shuf = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp)); |
| 17272 | if (!Shuf || !Shuf->getOperand(Num: 1).isUndef()) |
| 17273 | return SDValue(); |
| 17274 | |
| 17275 | // Check all elements are used once in the mask. |
| 17276 | ArrayRef<int> Mask = Shuf->getMask(); |
| 17277 | APInt SetElts(Mask.size(), 0); |
| 17278 | for (int E : Mask) { |
| 17279 | if (E < 0 || E >= (int)Mask.size()) |
| 17280 | return SDValue(); |
| 17281 | SetElts.setBit(E); |
| 17282 | } |
| 17283 | if (!SetElts.isAllOnes()) |
| 17284 | return SDValue(); |
| 17285 | |
| 17286 | if (N->getNumOperands() != VecOp + 1) { |
| 17287 | auto *Shuf2 = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp + 1)); |
| 17288 | if (!Shuf2 || !Shuf2->getOperand(Num: 1).isUndef() || Shuf2->getMask() != Mask) |
| 17289 | return SDValue(); |
| 17290 | } |
| 17291 | |
| 17292 | SmallVector<SDValue> Ops; |
| 17293 | for (SDValue Op : N->ops()) { |
| 17294 | if (Op.getValueType().isVector()) |
| 17295 | Ops.push_back(Elt: Op.getOperand(i: 0)); |
| 17296 | else |
| 17297 | Ops.push_back(Elt: Op); |
| 17298 | } |
| 17299 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VTList: N->getVTList(), Ops); |
| 17300 | } |
| 17301 | |
| 17302 | static SDValue PerformVMOVNCombine(SDNode *N, |
| 17303 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17304 | SDValue Op0 = N->getOperand(Num: 0); |
| 17305 | SDValue Op1 = N->getOperand(Num: 1); |
| 17306 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
| 17307 | |
| 17308 | // VMOVNT a undef -> a |
| 17309 | // VMOVNB a undef -> a |
| 17310 | // VMOVNB undef a -> a |
| 17311 | if (Op1->isUndef()) |
| 17312 | return Op0; |
| 17313 | if (Op0->isUndef() && !IsTop) |
| 17314 | return Op1; |
| 17315 | |
| 17316 | // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) |
| 17317 | // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) |
| 17318 | if ((Op1->getOpcode() == ARMISD::VQMOVNs || |
| 17319 | Op1->getOpcode() == ARMISD::VQMOVNu) && |
| 17320 | Op1->getConstantOperandVal(Num: 2) == 0) |
| 17321 | return DCI.DAG.getNode(Opcode: Op1->getOpcode(), DL: SDLoc(Op1), VT: N->getValueType(ResNo: 0), |
| 17322 | N1: Op0, N2: Op1->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
| 17323 | |
| 17324 | // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from |
| 17325 | // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting |
| 17326 | // into the top or bottom lanes. |
| 17327 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 17328 | APInt Op1DemandedElts = APInt::getSplat(NewLen: NumElts, V: APInt::getLowBitsSet(numBits: 2, loBitsSet: 1)); |
| 17329 | APInt Op0DemandedElts = |
| 17330 | IsTop ? Op1DemandedElts |
| 17331 | : APInt::getSplat(NewLen: NumElts, V: APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
| 17332 | |
| 17333 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 17334 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
| 17335 | return SDValue(N, 0); |
| 17336 | if (TLI.SimplifyDemandedVectorElts(Op: Op1, DemandedElts: Op1DemandedElts, DCI)) |
| 17337 | return SDValue(N, 0); |
| 17338 | |
| 17339 | return SDValue(); |
| 17340 | } |
| 17341 | |
| 17342 | static SDValue PerformVQMOVNCombine(SDNode *N, |
| 17343 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17344 | SDValue Op0 = N->getOperand(Num: 0); |
| 17345 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
| 17346 | |
| 17347 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
| 17348 | APInt Op0DemandedElts = |
| 17349 | APInt::getSplat(NewLen: NumElts, V: IsTop ? APInt::getLowBitsSet(numBits: 2, loBitsSet: 1) |
| 17350 | : APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
| 17351 | |
| 17352 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 17353 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
| 17354 | return SDValue(N, 0); |
| 17355 | return SDValue(); |
| 17356 | } |
| 17357 | |
| 17358 | static SDValue PerformVQDMULHCombine(SDNode *N, |
| 17359 | TargetLowering::DAGCombinerInfo &DCI) { |
| 17360 | EVT VT = N->getValueType(ResNo: 0); |
| 17361 | SDValue LHS = N->getOperand(Num: 0); |
| 17362 | SDValue RHS = N->getOperand(Num: 1); |
| 17363 | |
| 17364 | auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Val&: LHS); |
| 17365 | auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(Val&: RHS); |
| 17366 | // Turn VQDMULH(shuffle, shuffle) -> shuffle(VQDMULH) |
| 17367 | if (Shuf0 && Shuf1 && Shuf0->getMask().equals(RHS: Shuf1->getMask()) && |
| 17368 | LHS.getOperand(i: 1).isUndef() && RHS.getOperand(i: 1).isUndef() && |
| 17369 | (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) { |
| 17370 | SDLoc DL(N); |
| 17371 | SDValue NewBinOp = DCI.DAG.getNode(Opcode: N->getOpcode(), DL, VT, |
| 17372 | N1: LHS.getOperand(i: 0), N2: RHS.getOperand(i: 0)); |
| 17373 | SDValue UndefV = LHS.getOperand(i: 1); |
| 17374 | return DCI.DAG.getVectorShuffle(VT, dl: DL, N1: NewBinOp, N2: UndefV, Mask: Shuf0->getMask()); |
| 17375 | } |
| 17376 | return SDValue(); |
| 17377 | } |
| 17378 | |
| 17379 | static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { |
| 17380 | SDLoc DL(N); |
| 17381 | SDValue Op0 = N->getOperand(Num: 0); |
| 17382 | SDValue Op1 = N->getOperand(Num: 1); |
| 17383 | |
| 17384 | // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from |
| 17385 | // uses of the intrinsics. |
| 17386 | if (auto C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
| 17387 | int ShiftAmt = C->getSExtValue(); |
| 17388 | if (ShiftAmt == 0) { |
| 17389 | SDValue Merge = DAG.getMergeValues(Ops: {Op0, Op1}, dl: DL); |
| 17390 | DAG.ReplaceAllUsesWith(From: N, To: Merge.getNode()); |
| 17391 | return SDValue(); |
| 17392 | } |
| 17393 | |
| 17394 | if (ShiftAmt >= -32 && ShiftAmt < 0) { |
| 17395 | unsigned NewOpcode = |
| 17396 | N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; |
| 17397 | SDValue NewShift = DAG.getNode(Opcode: NewOpcode, DL, VTList: N->getVTList(), N1: Op0, N2: Op1, |
| 17398 | N3: DAG.getConstant(Val: -ShiftAmt, DL, VT: MVT::i32)); |
| 17399 | DAG.ReplaceAllUsesWith(From: N, To: NewShift.getNode()); |
| 17400 | return NewShift; |
| 17401 | } |
| 17402 | } |
| 17403 | |
| 17404 | return SDValue(); |
| 17405 | } |
| 17406 | |
| 17407 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. |
| 17408 | SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, |
| 17409 | DAGCombinerInfo &DCI) const { |
| 17410 | SelectionDAG &DAG = DCI.DAG; |
| 17411 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
| 17412 | switch (IntNo) { |
| 17413 | default: |
| 17414 | // Don't do anything for most intrinsics. |
| 17415 | break; |
| 17416 | |
| 17417 | // Vector shifts: check for immediate versions and lower them. |
| 17418 | // Note: This is done during DAG combining instead of DAG legalizing because |
| 17419 | // the build_vectors for 64-bit vector element shift counts are generally |
| 17420 | // not legal, and it is hard to see their values after they get legalized to |
| 17421 | // loads from a constant pool. |
| 17422 | case Intrinsic::arm_neon_vshifts: |
| 17423 | case Intrinsic::arm_neon_vshiftu: |
| 17424 | case Intrinsic::arm_neon_vrshifts: |
| 17425 | case Intrinsic::arm_neon_vrshiftu: |
| 17426 | case Intrinsic::arm_neon_vrshiftn: |
| 17427 | case Intrinsic::arm_neon_vqshifts: |
| 17428 | case Intrinsic::arm_neon_vqshiftu: |
| 17429 | case Intrinsic::arm_neon_vqshiftsu: |
| 17430 | case Intrinsic::arm_neon_vqshiftns: |
| 17431 | case Intrinsic::arm_neon_vqshiftnu: |
| 17432 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17433 | case Intrinsic::arm_neon_vqrshiftns: |
| 17434 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17435 | case Intrinsic::arm_neon_vqrshiftnsu: { |
| 17436 | EVT VT = N->getOperand(Num: 1).getValueType(); |
| 17437 | int64_t Cnt; |
| 17438 | unsigned VShiftOpc = 0; |
| 17439 | |
| 17440 | switch (IntNo) { |
| 17441 | case Intrinsic::arm_neon_vshifts: |
| 17442 | case Intrinsic::arm_neon_vshiftu: |
| 17443 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) { |
| 17444 | VShiftOpc = ARMISD::VSHLIMM; |
| 17445 | break; |
| 17446 | } |
| 17447 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) { |
| 17448 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM |
| 17449 | : ARMISD::VSHRuIMM); |
| 17450 | break; |
| 17451 | } |
| 17452 | return SDValue(); |
| 17453 | |
| 17454 | case Intrinsic::arm_neon_vrshifts: |
| 17455 | case Intrinsic::arm_neon_vrshiftu: |
| 17456 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
| 17457 | break; |
| 17458 | return SDValue(); |
| 17459 | |
| 17460 | case Intrinsic::arm_neon_vqshifts: |
| 17461 | case Intrinsic::arm_neon_vqshiftu: |
| 17462 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
| 17463 | break; |
| 17464 | return SDValue(); |
| 17465 | |
| 17466 | case Intrinsic::arm_neon_vqshiftsu: |
| 17467 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
| 17468 | break; |
| 17469 | llvm_unreachable("invalid shift count for vqshlu intrinsic" ); |
| 17470 | |
| 17471 | case Intrinsic::arm_neon_vrshiftn: |
| 17472 | case Intrinsic::arm_neon_vqshiftns: |
| 17473 | case Intrinsic::arm_neon_vqshiftnu: |
| 17474 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17475 | case Intrinsic::arm_neon_vqrshiftns: |
| 17476 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17477 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 17478 | // Narrowing shifts require an immediate right shift. |
| 17479 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: true, isIntrinsic: true, Cnt)) |
| 17480 | break; |
| 17481 | llvm_unreachable("invalid shift count for narrowing vector shift " |
| 17482 | "intrinsic" ); |
| 17483 | |
| 17484 | default: |
| 17485 | llvm_unreachable("unhandled vector shift" ); |
| 17486 | } |
| 17487 | |
| 17488 | switch (IntNo) { |
| 17489 | case Intrinsic::arm_neon_vshifts: |
| 17490 | case Intrinsic::arm_neon_vshiftu: |
| 17491 | // Opcode already set above. |
| 17492 | break; |
| 17493 | case Intrinsic::arm_neon_vrshifts: |
| 17494 | VShiftOpc = ARMISD::VRSHRsIMM; |
| 17495 | break; |
| 17496 | case Intrinsic::arm_neon_vrshiftu: |
| 17497 | VShiftOpc = ARMISD::VRSHRuIMM; |
| 17498 | break; |
| 17499 | case Intrinsic::arm_neon_vrshiftn: |
| 17500 | VShiftOpc = ARMISD::VRSHRNIMM; |
| 17501 | break; |
| 17502 | case Intrinsic::arm_neon_vqshifts: |
| 17503 | VShiftOpc = ARMISD::VQSHLsIMM; |
| 17504 | break; |
| 17505 | case Intrinsic::arm_neon_vqshiftu: |
| 17506 | VShiftOpc = ARMISD::VQSHLuIMM; |
| 17507 | break; |
| 17508 | case Intrinsic::arm_neon_vqshiftsu: |
| 17509 | VShiftOpc = ARMISD::VQSHLsuIMM; |
| 17510 | break; |
| 17511 | case Intrinsic::arm_neon_vqshiftns: |
| 17512 | VShiftOpc = ARMISD::VQSHRNsIMM; |
| 17513 | break; |
| 17514 | case Intrinsic::arm_neon_vqshiftnu: |
| 17515 | VShiftOpc = ARMISD::VQSHRNuIMM; |
| 17516 | break; |
| 17517 | case Intrinsic::arm_neon_vqshiftnsu: |
| 17518 | VShiftOpc = ARMISD::VQSHRNsuIMM; |
| 17519 | break; |
| 17520 | case Intrinsic::arm_neon_vqrshiftns: |
| 17521 | VShiftOpc = ARMISD::VQRSHRNsIMM; |
| 17522 | break; |
| 17523 | case Intrinsic::arm_neon_vqrshiftnu: |
| 17524 | VShiftOpc = ARMISD::VQRSHRNuIMM; |
| 17525 | break; |
| 17526 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 17527 | VShiftOpc = ARMISD::VQRSHRNsuIMM; |
| 17528 | break; |
| 17529 | } |
| 17530 | |
| 17531 | SDLoc dl(N); |
| 17532 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
| 17533 | N1: N->getOperand(Num: 1), N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17534 | } |
| 17535 | |
| 17536 | case Intrinsic::arm_neon_vshiftins: { |
| 17537 | EVT VT = N->getOperand(Num: 1).getValueType(); |
| 17538 | int64_t Cnt; |
| 17539 | unsigned VShiftOpc = 0; |
| 17540 | |
| 17541 | if (isVShiftLImm(Op: N->getOperand(Num: 3), VT, isLong: false, Cnt)) |
| 17542 | VShiftOpc = ARMISD::VSLIIMM; |
| 17543 | else if (isVShiftRImm(Op: N->getOperand(Num: 3), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
| 17544 | VShiftOpc = ARMISD::VSRIIMM; |
| 17545 | else { |
| 17546 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic" ); |
| 17547 | } |
| 17548 | |
| 17549 | SDLoc dl(N); |
| 17550 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
| 17551 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
| 17552 | N3: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17553 | } |
| 17554 | |
| 17555 | case Intrinsic::arm_neon_vqrshifts: |
| 17556 | case Intrinsic::arm_neon_vqrshiftu: |
| 17557 | // No immediate versions of these to check for. |
| 17558 | break; |
| 17559 | |
| 17560 | case Intrinsic::arm_neon_vbsl: { |
| 17561 | SDLoc dl(N); |
| 17562 | return DAG.getNode(Opcode: ARMISD::VBSP, DL: dl, VT: N->getValueType(ResNo: 0), N1: N->getOperand(Num: 1), |
| 17563 | N2: N->getOperand(Num: 2), N3: N->getOperand(Num: 3)); |
| 17564 | } |
| 17565 | case Intrinsic::arm_mve_vqdmlah: |
| 17566 | case Intrinsic::arm_mve_vqdmlash: |
| 17567 | case Intrinsic::arm_mve_vqrdmlah: |
| 17568 | case Intrinsic::arm_mve_vqrdmlash: |
| 17569 | case Intrinsic::arm_mve_vmla_n_predicated: |
| 17570 | case Intrinsic::arm_mve_vmlas_n_predicated: |
| 17571 | case Intrinsic::arm_mve_vqdmlah_predicated: |
| 17572 | case Intrinsic::arm_mve_vqdmlash_predicated: |
| 17573 | case Intrinsic::arm_mve_vqrdmlah_predicated: |
| 17574 | case Intrinsic::arm_mve_vqrdmlash_predicated: { |
| 17575 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 17576 | // size of a single lane of the vector type they return. So we don't need |
| 17577 | // any bits of that operand above that point, which allows us to eliminate |
| 17578 | // uxth/sxth. |
| 17579 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 17580 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
| 17581 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 3), DemandedBits: DemandedMask, DCI)) |
| 17582 | return SDValue(); |
| 17583 | break; |
| 17584 | } |
| 17585 | |
| 17586 | case Intrinsic::arm_mve_minv: |
| 17587 | case Intrinsic::arm_mve_maxv: |
| 17588 | case Intrinsic::arm_mve_minav: |
| 17589 | case Intrinsic::arm_mve_maxav: |
| 17590 | case Intrinsic::arm_mve_minv_predicated: |
| 17591 | case Intrinsic::arm_mve_maxv_predicated: |
| 17592 | case Intrinsic::arm_mve_minav_predicated: |
| 17593 | case Intrinsic::arm_mve_maxav_predicated: { |
| 17594 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 17595 | // size of a single lane of the vector type they take as the other input. |
| 17596 | unsigned BitWidth = N->getOperand(Num: 2)->getValueType(ResNo: 0).getScalarSizeInBits(); |
| 17597 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
| 17598 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 17599 | return SDValue(); |
| 17600 | break; |
| 17601 | } |
| 17602 | |
| 17603 | case Intrinsic::arm_mve_addv: { |
| 17604 | // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, |
| 17605 | // which allow PerformADDVecReduce to turn it into VADDLV when possible. |
| 17606 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
| 17607 | unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; |
| 17608 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VTList: N->getVTList(), N: N->getOperand(Num: 1)); |
| 17609 | } |
| 17610 | |
| 17611 | case Intrinsic::arm_mve_addlv: |
| 17612 | case Intrinsic::arm_mve_addlv_predicated: { |
| 17613 | // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR |
| 17614 | // which recombines the two outputs into an i64 |
| 17615 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
| 17616 | unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? |
| 17617 | (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : |
| 17618 | (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); |
| 17619 | |
| 17620 | SmallVector<SDValue, 4> Ops; |
| 17621 | for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) |
| 17622 | if (i != 2) // skip the unsigned flag |
| 17623 | Ops.push_back(Elt: N->getOperand(Num: i)); |
| 17624 | |
| 17625 | SDLoc dl(N); |
| 17626 | SDValue val = DAG.getNode(Opcode: Opc, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
| 17627 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: val.getValue(R: 0), |
| 17628 | N2: val.getValue(R: 1)); |
| 17629 | } |
| 17630 | } |
| 17631 | |
| 17632 | return SDValue(); |
| 17633 | } |
| 17634 | |
| 17635 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and |
| 17636 | /// lowers them. As with the vector shift intrinsics, this is done during DAG |
| 17637 | /// combining instead of DAG legalizing because the build_vectors for 64-bit |
| 17638 | /// vector element shift counts are generally not legal, and it is hard to see |
| 17639 | /// their values after they get legalized to loads from a constant pool. |
| 17640 | static SDValue PerformShiftCombine(SDNode *N, |
| 17641 | TargetLowering::DAGCombinerInfo &DCI, |
| 17642 | const ARMSubtarget *ST) { |
| 17643 | SelectionDAG &DAG = DCI.DAG; |
| 17644 | EVT VT = N->getValueType(ResNo: 0); |
| 17645 | |
| 17646 | if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && |
| 17647 | N->getOperand(Num: 0)->getOpcode() == ISD::AND && |
| 17648 | N->getOperand(Num: 0)->hasOneUse()) { |
| 17649 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 17650 | return SDValue(); |
| 17651 | // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't |
| 17652 | // usually show up because instcombine prefers to canonicalize it to |
| 17653 | // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come |
| 17654 | // out of GEP lowering in some cases. |
| 17655 | SDValue N0 = N->getOperand(Num: 0); |
| 17656 | ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
| 17657 | if (!ShiftAmtNode) |
| 17658 | return SDValue(); |
| 17659 | uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); |
| 17660 | ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
| 17661 | if (!AndMaskNode) |
| 17662 | return SDValue(); |
| 17663 | uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); |
| 17664 | // Don't transform uxtb/uxth. |
| 17665 | if (AndMask == 255 || AndMask == 65535) |
| 17666 | return SDValue(); |
| 17667 | if (isMask_32(Value: AndMask)) { |
| 17668 | uint32_t MaskedBits = llvm::countl_zero(Val: AndMask); |
| 17669 | if (MaskedBits > ShiftAmt) { |
| 17670 | SDLoc DL(N); |
| 17671 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
| 17672 | N2: DAG.getConstant(Val: MaskedBits, DL, VT: MVT::i32)); |
| 17673 | return DAG.getNode( |
| 17674 | Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
| 17675 | N2: DAG.getConstant(Val: MaskedBits - ShiftAmt, DL, VT: MVT::i32)); |
| 17676 | } |
| 17677 | } |
| 17678 | } |
| 17679 | |
| 17680 | // Nothing to be done for scalar shifts. |
| 17681 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 17682 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) |
| 17683 | return SDValue(); |
| 17684 | if (ST->hasMVEIntegerOps()) |
| 17685 | return SDValue(); |
| 17686 | |
| 17687 | int64_t Cnt; |
| 17688 | |
| 17689 | switch (N->getOpcode()) { |
| 17690 | default: llvm_unreachable("unexpected shift opcode" ); |
| 17691 | |
| 17692 | case ISD::SHL: |
| 17693 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) { |
| 17694 | SDLoc dl(N); |
| 17695 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 17696 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17697 | } |
| 17698 | break; |
| 17699 | |
| 17700 | case ISD::SRA: |
| 17701 | case ISD::SRL: |
| 17702 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
| 17703 | unsigned VShiftOpc = |
| 17704 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 17705 | SDLoc dl(N); |
| 17706 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
| 17707 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
| 17708 | } |
| 17709 | } |
| 17710 | return SDValue(); |
| 17711 | } |
| 17712 | |
| 17713 | // Look for a sign/zero/fpextend extend of a larger than legal load. This can be |
| 17714 | // split into multiple extending loads, which are simpler to deal with than an |
| 17715 | // arbitrary extend. For fp extends we use an integer extending load and a VCVTL |
| 17716 | // to convert the type to an f32. |
| 17717 | static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { |
| 17718 | SDValue N0 = N->getOperand(Num: 0); |
| 17719 | if (N0.getOpcode() != ISD::LOAD) |
| 17720 | return SDValue(); |
| 17721 | LoadSDNode *LD = cast<LoadSDNode>(Val: N0.getNode()); |
| 17722 | if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || |
| 17723 | LD->getExtensionType() != ISD::NON_EXTLOAD) |
| 17724 | return SDValue(); |
| 17725 | EVT FromVT = LD->getValueType(ResNo: 0); |
| 17726 | EVT ToVT = N->getValueType(ResNo: 0); |
| 17727 | if (!ToVT.isVector()) |
| 17728 | return SDValue(); |
| 17729 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 17730 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 17731 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 17732 | |
| 17733 | unsigned NumElements = 0; |
| 17734 | if (ToEltVT == MVT::i32 && FromEltVT == MVT::i8) |
| 17735 | NumElements = 4; |
| 17736 | if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) |
| 17737 | NumElements = 4; |
| 17738 | if (NumElements == 0 || |
| 17739 | (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || |
| 17740 | FromVT.getVectorNumElements() % NumElements != 0 || |
| 17741 | !isPowerOf2_32(Value: NumElements)) |
| 17742 | return SDValue(); |
| 17743 | |
| 17744 | LLVMContext &C = *DAG.getContext(); |
| 17745 | SDLoc DL(LD); |
| 17746 | // Details about the old load |
| 17747 | SDValue Ch = LD->getChain(); |
| 17748 | SDValue BasePtr = LD->getBasePtr(); |
| 17749 | Align Alignment = LD->getBaseAlign(); |
| 17750 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
| 17751 | AAMDNodes AAInfo = LD->getAAInfo(); |
| 17752 | |
| 17753 | ISD::LoadExtType NewExtType = |
| 17754 | N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
| 17755 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
| 17756 | EVT NewFromVT = EVT::getVectorVT( |
| 17757 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
| 17758 | EVT NewToVT = EVT::getVectorVT( |
| 17759 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
| 17760 | |
| 17761 | SmallVector<SDValue, 4> Loads; |
| 17762 | SmallVector<SDValue, 4> Chains; |
| 17763 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 17764 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
| 17765 | SDValue NewPtr = |
| 17766 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 17767 | |
| 17768 | SDValue NewLoad = |
| 17769 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
| 17770 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
| 17771 | Alignment, MMOFlags, AAInfo); |
| 17772 | Loads.push_back(Elt: NewLoad); |
| 17773 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
| 17774 | } |
| 17775 | |
| 17776 | // Float truncs need to extended with VCVTB's into their floating point types. |
| 17777 | if (FromEltVT == MVT::f16) { |
| 17778 | SmallVector<SDValue, 4> Extends; |
| 17779 | |
| 17780 | for (unsigned i = 0; i < Loads.size(); i++) { |
| 17781 | SDValue LoadBC = |
| 17782 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v8f16, Operand: Loads[i]); |
| 17783 | SDValue FPExt = DAG.getNode(Opcode: ARMISD::VCVTL, DL, VT: MVT::v4f32, N1: LoadBC, |
| 17784 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 17785 | Extends.push_back(Elt: FPExt); |
| 17786 | } |
| 17787 | |
| 17788 | Loads = Extends; |
| 17789 | } |
| 17790 | |
| 17791 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 17792 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
| 17793 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, Ops: Loads); |
| 17794 | } |
| 17795 | |
| 17796 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, |
| 17797 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. |
| 17798 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 17799 | const ARMSubtarget *ST) { |
| 17800 | SDValue N0 = N->getOperand(Num: 0); |
| 17801 | |
| 17802 | // Check for sign- and zero-extensions of vector extract operations of 8- and |
| 17803 | // 16-bit vector elements. NEON and MVE support these directly. They are |
| 17804 | // handled during DAG combining because type legalization will promote them |
| 17805 | // to 32-bit types and it is messy to recognize the operations after that. |
| 17806 | if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && |
| 17807 | N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 17808 | SDValue Vec = N0.getOperand(i: 0); |
| 17809 | SDValue Lane = N0.getOperand(i: 1); |
| 17810 | EVT VT = N->getValueType(ResNo: 0); |
| 17811 | EVT EltVT = N0.getValueType(); |
| 17812 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 17813 | |
| 17814 | if (VT == MVT::i32 && |
| 17815 | (EltVT == MVT::i8 || EltVT == MVT::i16) && |
| 17816 | TLI.isTypeLegal(VT: Vec.getValueType()) && |
| 17817 | isa<ConstantSDNode>(Val: Lane)) { |
| 17818 | |
| 17819 | unsigned Opc = 0; |
| 17820 | switch (N->getOpcode()) { |
| 17821 | default: llvm_unreachable("unexpected opcode" ); |
| 17822 | case ISD::SIGN_EXTEND: |
| 17823 | Opc = ARMISD::VGETLANEs; |
| 17824 | break; |
| 17825 | case ISD::ZERO_EXTEND: |
| 17826 | case ISD::ANY_EXTEND: |
| 17827 | Opc = ARMISD::VGETLANEu; |
| 17828 | break; |
| 17829 | } |
| 17830 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VT, N1: Vec, N2: Lane); |
| 17831 | } |
| 17832 | } |
| 17833 | |
| 17834 | if (ST->hasMVEIntegerOps()) |
| 17835 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 17836 | return NewLoad; |
| 17837 | |
| 17838 | return SDValue(); |
| 17839 | } |
| 17840 | |
| 17841 | static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 17842 | const ARMSubtarget *ST) { |
| 17843 | if (ST->hasMVEFloatOps()) |
| 17844 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 17845 | return NewLoad; |
| 17846 | |
| 17847 | return SDValue(); |
| 17848 | } |
| 17849 | |
| 17850 | // Lower smin(smax(x, C1), C2) to ssat or usat, if they have saturating |
| 17851 | // constant bounds. |
| 17852 | static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG, |
| 17853 | const ARMSubtarget *Subtarget) { |
| 17854 | if ((Subtarget->isThumb() || !Subtarget->hasV6Ops()) && |
| 17855 | !Subtarget->isThumb2()) |
| 17856 | return SDValue(); |
| 17857 | |
| 17858 | EVT VT = Op.getValueType(); |
| 17859 | SDValue Op0 = Op.getOperand(i: 0); |
| 17860 | |
| 17861 | if (VT != MVT::i32 || |
| 17862 | (Op0.getOpcode() != ISD::SMIN && Op0.getOpcode() != ISD::SMAX) || |
| 17863 | !isa<ConstantSDNode>(Val: Op.getOperand(i: 1)) || |
| 17864 | !isa<ConstantSDNode>(Val: Op0.getOperand(i: 1))) |
| 17865 | return SDValue(); |
| 17866 | |
| 17867 | SDValue Min = Op; |
| 17868 | SDValue Max = Op0; |
| 17869 | SDValue Input = Op0.getOperand(i: 0); |
| 17870 | if (Min.getOpcode() == ISD::SMAX) |
| 17871 | std::swap(a&: Min, b&: Max); |
| 17872 | |
| 17873 | APInt MinC = Min.getConstantOperandAPInt(i: 1); |
| 17874 | APInt MaxC = Max.getConstantOperandAPInt(i: 1); |
| 17875 | |
| 17876 | if (Min.getOpcode() != ISD::SMIN || Max.getOpcode() != ISD::SMAX || |
| 17877 | !(MinC + 1).isPowerOf2()) |
| 17878 | return SDValue(); |
| 17879 | |
| 17880 | SDLoc DL(Op); |
| 17881 | if (MinC == ~MaxC) |
| 17882 | return DAG.getNode(Opcode: ARMISD::SSAT, DL, VT, N1: Input, |
| 17883 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
| 17884 | if (MaxC == 0) |
| 17885 | return DAG.getNode(Opcode: ARMISD::USAT, DL, VT, N1: Input, |
| 17886 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
| 17887 | |
| 17888 | return SDValue(); |
| 17889 | } |
| 17890 | |
| 17891 | /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating |
| 17892 | /// saturates. |
| 17893 | static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, |
| 17894 | const ARMSubtarget *ST) { |
| 17895 | EVT VT = N->getValueType(ResNo: 0); |
| 17896 | SDValue N0 = N->getOperand(Num: 0); |
| 17897 | |
| 17898 | if (VT == MVT::i32) |
| 17899 | return PerformMinMaxToSatCombine(Op: SDValue(N, 0), DAG, Subtarget: ST); |
| 17900 | |
| 17901 | if (!ST->hasMVEIntegerOps()) |
| 17902 | return SDValue(); |
| 17903 | |
| 17904 | if (SDValue V = PerformVQDMULHCombine(N, DAG)) |
| 17905 | return V; |
| 17906 | |
| 17907 | if (VT != MVT::v4i32 && VT != MVT::v8i16) |
| 17908 | return SDValue(); |
| 17909 | |
| 17910 | auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { |
| 17911 | // Check one is a smin and the other is a smax |
| 17912 | if (Min->getOpcode() != ISD::SMIN) |
| 17913 | std::swap(a&: Min, b&: Max); |
| 17914 | if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) |
| 17915 | return false; |
| 17916 | |
| 17917 | APInt SaturateC; |
| 17918 | if (VT == MVT::v4i32) |
| 17919 | SaturateC = APInt(32, (1 << 15) - 1, true); |
| 17920 | else //if (VT == MVT::v8i16) |
| 17921 | SaturateC = APInt(16, (1 << 7) - 1, true); |
| 17922 | |
| 17923 | APInt MinC, MaxC; |
| 17924 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
| 17925 | MinC != SaturateC) |
| 17926 | return false; |
| 17927 | if (!ISD::isConstantSplatVector(N: Max->getOperand(Num: 1).getNode(), SplatValue&: MaxC) || |
| 17928 | MaxC != ~SaturateC) |
| 17929 | return false; |
| 17930 | return true; |
| 17931 | }; |
| 17932 | |
| 17933 | if (IsSignedSaturate(N, N0.getNode())) { |
| 17934 | SDLoc DL(N); |
| 17935 | MVT ExtVT, HalfVT; |
| 17936 | if (VT == MVT::v4i32) { |
| 17937 | HalfVT = MVT::v8i16; |
| 17938 | ExtVT = MVT::v4i16; |
| 17939 | } else { // if (VT == MVT::v8i16) |
| 17940 | HalfVT = MVT::v16i8; |
| 17941 | ExtVT = MVT::v8i8; |
| 17942 | } |
| 17943 | |
| 17944 | // Create a VQMOVNB with undef top lanes, then signed extended into the top |
| 17945 | // half. That extend will hopefully be removed if only the bottom bits are |
| 17946 | // demanded (though a truncating store, for example). |
| 17947 | SDValue VQMOVN = |
| 17948 | DAG.getNode(Opcode: ARMISD::VQMOVNs, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), |
| 17949 | N2: N0->getOperand(Num: 0), N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 17950 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
| 17951 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: Bitcast, |
| 17952 | N2: DAG.getValueType(ExtVT)); |
| 17953 | } |
| 17954 | |
| 17955 | auto IsUnsignedSaturate = [&](SDNode *Min) { |
| 17956 | // For unsigned, we just need to check for <= 0xffff |
| 17957 | if (Min->getOpcode() != ISD::UMIN) |
| 17958 | return false; |
| 17959 | |
| 17960 | APInt SaturateC; |
| 17961 | if (VT == MVT::v4i32) |
| 17962 | SaturateC = APInt(32, (1 << 16) - 1, true); |
| 17963 | else //if (VT == MVT::v8i16) |
| 17964 | SaturateC = APInt(16, (1 << 8) - 1, true); |
| 17965 | |
| 17966 | APInt MinC; |
| 17967 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
| 17968 | MinC != SaturateC) |
| 17969 | return false; |
| 17970 | return true; |
| 17971 | }; |
| 17972 | |
| 17973 | if (IsUnsignedSaturate(N)) { |
| 17974 | SDLoc DL(N); |
| 17975 | MVT HalfVT; |
| 17976 | unsigned ExtConst; |
| 17977 | if (VT == MVT::v4i32) { |
| 17978 | HalfVT = MVT::v8i16; |
| 17979 | ExtConst = 0x0000FFFF; |
| 17980 | } else { //if (VT == MVT::v8i16) |
| 17981 | HalfVT = MVT::v16i8; |
| 17982 | ExtConst = 0x00FF; |
| 17983 | } |
| 17984 | |
| 17985 | // Create a VQMOVNB with undef top lanes, then ZExt into the top half with |
| 17986 | // an AND. That extend will hopefully be removed if only the bottom bits are |
| 17987 | // demanded (though a truncating store, for example). |
| 17988 | SDValue VQMOVN = |
| 17989 | DAG.getNode(Opcode: ARMISD::VQMOVNu, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), N2: N0, |
| 17990 | N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
| 17991 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
| 17992 | return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Bitcast, |
| 17993 | N2: DAG.getConstant(Val: ExtConst, DL, VT)); |
| 17994 | } |
| 17995 | |
| 17996 | return SDValue(); |
| 17997 | } |
| 17998 | |
| 17999 | static const APInt *isPowerOf2Constant(SDValue V) { |
| 18000 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: V); |
| 18001 | if (!C) |
| 18002 | return nullptr; |
| 18003 | const APInt *CV = &C->getAPIntValue(); |
| 18004 | return CV->isPowerOf2() ? CV : nullptr; |
| 18005 | } |
| 18006 | |
| 18007 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { |
| 18008 | // If we have a CMOV, OR and AND combination such as: |
| 18009 | // if (x & CN) |
| 18010 | // y |= CM; |
| 18011 | // |
| 18012 | // And: |
| 18013 | // * CN is a single bit; |
| 18014 | // * All bits covered by CM are known zero in y |
| 18015 | // |
| 18016 | // Then we can convert this into a sequence of BFI instructions. This will |
| 18017 | // always be a win if CM is a single bit, will always be no worse than the |
| 18018 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is |
| 18019 | // three bits (due to the extra IT instruction). |
| 18020 | |
| 18021 | SDValue Op0 = CMOV->getOperand(Num: 0); |
| 18022 | SDValue Op1 = CMOV->getOperand(Num: 1); |
| 18023 | auto CC = CMOV->getConstantOperandAPInt(Num: 2).getLimitedValue(); |
| 18024 | SDValue CmpZ = CMOV->getOperand(Num: 3); |
| 18025 | |
| 18026 | // The compare must be against zero. |
| 18027 | if (!isNullConstant(V: CmpZ->getOperand(Num: 1))) |
| 18028 | return SDValue(); |
| 18029 | |
| 18030 | assert(CmpZ->getOpcode() == ARMISD::CMPZ); |
| 18031 | SDValue And = CmpZ->getOperand(Num: 0); |
| 18032 | if (And->getOpcode() != ISD::AND) |
| 18033 | return SDValue(); |
| 18034 | const APInt *AndC = isPowerOf2Constant(V: And->getOperand(Num: 1)); |
| 18035 | if (!AndC) |
| 18036 | return SDValue(); |
| 18037 | SDValue X = And->getOperand(Num: 0); |
| 18038 | |
| 18039 | if (CC == ARMCC::EQ) { |
| 18040 | // We're performing an "equal to zero" compare. Swap the operands so we |
| 18041 | // canonicalize on a "not equal to zero" compare. |
| 18042 | std::swap(a&: Op0, b&: Op1); |
| 18043 | } else { |
| 18044 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ); |
| 18045 | } |
| 18046 | |
| 18047 | if (Op1->getOpcode() != ISD::OR) |
| 18048 | return SDValue(); |
| 18049 | |
| 18050 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Val: Op1->getOperand(Num: 1)); |
| 18051 | if (!OrC) |
| 18052 | return SDValue(); |
| 18053 | SDValue Y = Op1->getOperand(Num: 0); |
| 18054 | |
| 18055 | if (Op0 != Y) |
| 18056 | return SDValue(); |
| 18057 | |
| 18058 | // Now, is it profitable to continue? |
| 18059 | APInt OrCI = OrC->getAPIntValue(); |
| 18060 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; |
| 18061 | if (OrCI.popcount() > Heuristic) |
| 18062 | return SDValue(); |
| 18063 | |
| 18064 | // Lastly, can we determine that the bits defined by OrCI |
| 18065 | // are zero in Y? |
| 18066 | KnownBits Known = DAG.computeKnownBits(Op: Y); |
| 18067 | if ((OrCI & Known.Zero) != OrCI) |
| 18068 | return SDValue(); |
| 18069 | |
| 18070 | // OK, we can do the combine. |
| 18071 | SDValue V = Y; |
| 18072 | SDLoc dl(X); |
| 18073 | EVT VT = X.getValueType(); |
| 18074 | unsigned BitInX = AndC->logBase2(); |
| 18075 | |
| 18076 | if (BitInX != 0) { |
| 18077 | // We must shift X first. |
| 18078 | X = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: X, |
| 18079 | N2: DAG.getConstant(Val: BitInX, DL: dl, VT)); |
| 18080 | } |
| 18081 | |
| 18082 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); |
| 18083 | BitInY < NumActiveBits; ++BitInY) { |
| 18084 | if (OrCI[BitInY] == 0) |
| 18085 | continue; |
| 18086 | APInt Mask(VT.getSizeInBits(), 0); |
| 18087 | Mask.setBit(BitInY); |
| 18088 | V = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: V, N2: X, |
| 18089 | // Confusingly, the operand is an *inverted* mask. |
| 18090 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT)); |
| 18091 | } |
| 18092 | |
| 18093 | return V; |
| 18094 | } |
| 18095 | |
| 18096 | // Given N, the value controlling the conditional branch, search for the loop |
| 18097 | // intrinsic, returning it, along with how the value is used. We need to handle |
| 18098 | // patterns such as the following: |
| 18099 | // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) |
| 18100 | // (brcond (setcc (loop.decrement), 0, eq), exit) |
| 18101 | // (brcond (setcc (loop.decrement), 0, ne), header) |
| 18102 | static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, |
| 18103 | bool &Negate) { |
| 18104 | switch (N->getOpcode()) { |
| 18105 | default: |
| 18106 | break; |
| 18107 | case ISD::XOR: { |
| 18108 | if (!isa<ConstantSDNode>(Val: N.getOperand(i: 1))) |
| 18109 | return SDValue(); |
| 18110 | if (!cast<ConstantSDNode>(Val: N.getOperand(i: 1))->isOne()) |
| 18111 | return SDValue(); |
| 18112 | Negate = !Negate; |
| 18113 | return SearchLoopIntrinsic(N: N.getOperand(i: 0), CC, Imm, Negate); |
| 18114 | } |
| 18115 | case ISD::SETCC: { |
| 18116 | auto *Const = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1)); |
| 18117 | if (!Const) |
| 18118 | return SDValue(); |
| 18119 | if (Const->isZero()) |
| 18120 | Imm = 0; |
| 18121 | else if (Const->isOne()) |
| 18122 | Imm = 1; |
| 18123 | else |
| 18124 | return SDValue(); |
| 18125 | CC = cast<CondCodeSDNode>(Val: N.getOperand(i: 2))->get(); |
| 18126 | return SearchLoopIntrinsic(N: N->getOperand(Num: 0), CC, Imm, Negate); |
| 18127 | } |
| 18128 | case ISD::INTRINSIC_W_CHAIN: { |
| 18129 | unsigned IntOp = N.getConstantOperandVal(i: 1); |
| 18130 | if (IntOp != Intrinsic::test_start_loop_iterations && |
| 18131 | IntOp != Intrinsic::loop_decrement_reg) |
| 18132 | return SDValue(); |
| 18133 | return N; |
| 18134 | } |
| 18135 | } |
| 18136 | return SDValue(); |
| 18137 | } |
| 18138 | |
| 18139 | static SDValue PerformHWLoopCombine(SDNode *N, |
| 18140 | TargetLowering::DAGCombinerInfo &DCI, |
| 18141 | const ARMSubtarget *ST) { |
| 18142 | |
| 18143 | // The hwloop intrinsics that we're interested are used for control-flow, |
| 18144 | // either for entering or exiting the loop: |
| 18145 | // - test.start.loop.iterations will test whether its operand is zero. If it |
| 18146 | // is zero, the proceeding branch should not enter the loop. |
| 18147 | // - loop.decrement.reg also tests whether its operand is zero. If it is |
| 18148 | // zero, the proceeding branch should not branch back to the beginning of |
| 18149 | // the loop. |
| 18150 | // So here, we need to check that how the brcond is using the result of each |
| 18151 | // of the intrinsics to ensure that we're branching to the right place at the |
| 18152 | // right time. |
| 18153 | |
| 18154 | ISD::CondCode CC; |
| 18155 | SDValue Cond; |
| 18156 | int Imm = 1; |
| 18157 | bool Negate = false; |
| 18158 | SDValue Chain = N->getOperand(Num: 0); |
| 18159 | SDValue Dest; |
| 18160 | |
| 18161 | if (N->getOpcode() == ISD::BRCOND) { |
| 18162 | CC = ISD::SETEQ; |
| 18163 | Cond = N->getOperand(Num: 1); |
| 18164 | Dest = N->getOperand(Num: 2); |
| 18165 | } else { |
| 18166 | assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!" ); |
| 18167 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 1))->get(); |
| 18168 | Cond = N->getOperand(Num: 2); |
| 18169 | Dest = N->getOperand(Num: 4); |
| 18170 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 3))) { |
| 18171 | if (!Const->isOne() && !Const->isZero()) |
| 18172 | return SDValue(); |
| 18173 | Imm = Const->getZExtValue(); |
| 18174 | } else |
| 18175 | return SDValue(); |
| 18176 | } |
| 18177 | |
| 18178 | SDValue Int = SearchLoopIntrinsic(N: Cond, CC, Imm, Negate); |
| 18179 | if (!Int) |
| 18180 | return SDValue(); |
| 18181 | |
| 18182 | if (Negate) |
| 18183 | CC = ISD::getSetCCInverse(Operation: CC, /* Integer inverse */ Type: MVT::i32); |
| 18184 | |
| 18185 | auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { |
| 18186 | return (CC == ISD::SETEQ && Imm == 0) || |
| 18187 | (CC == ISD::SETNE && Imm == 1) || |
| 18188 | (CC == ISD::SETLT && Imm == 1) || |
| 18189 | (CC == ISD::SETULT && Imm == 1); |
| 18190 | }; |
| 18191 | |
| 18192 | auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { |
| 18193 | return (CC == ISD::SETEQ && Imm == 1) || |
| 18194 | (CC == ISD::SETNE && Imm == 0) || |
| 18195 | (CC == ISD::SETGT && Imm == 0) || |
| 18196 | (CC == ISD::SETUGT && Imm == 0) || |
| 18197 | (CC == ISD::SETGE && Imm == 1) || |
| 18198 | (CC == ISD::SETUGE && Imm == 1); |
| 18199 | }; |
| 18200 | |
| 18201 | assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && |
| 18202 | "unsupported condition" ); |
| 18203 | |
| 18204 | SDLoc dl(Int); |
| 18205 | SelectionDAG &DAG = DCI.DAG; |
| 18206 | SDValue Elements = Int.getOperand(i: 2); |
| 18207 | unsigned IntOp = Int->getConstantOperandVal(Num: 1); |
| 18208 | assert((N->hasOneUse() && N->user_begin()->getOpcode() == ISD::BR) && |
| 18209 | "expected single br user" ); |
| 18210 | SDNode *Br = *N->user_begin(); |
| 18211 | SDValue OtherTarget = Br->getOperand(Num: 1); |
| 18212 | |
| 18213 | // Update the unconditional branch to branch to the given Dest. |
| 18214 | auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { |
| 18215 | SDValue NewBrOps[] = { Br->getOperand(Num: 0), Dest }; |
| 18216 | SDValue NewBr = DAG.getNode(Opcode: ISD::BR, DL: SDLoc(Br), VT: MVT::Other, Ops: NewBrOps); |
| 18217 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(Br, 0), To: NewBr); |
| 18218 | }; |
| 18219 | |
| 18220 | if (IntOp == Intrinsic::test_start_loop_iterations) { |
| 18221 | SDValue Res; |
| 18222 | SDValue Setup = DAG.getNode(Opcode: ARMISD::WLSSETUP, DL: dl, VT: MVT::i32, Operand: Elements); |
| 18223 | // We expect this 'instruction' to branch when the counter is zero. |
| 18224 | if (IsTrueIfZero(CC, Imm)) { |
| 18225 | SDValue Ops[] = {Chain, Setup, Dest}; |
| 18226 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
| 18227 | } else { |
| 18228 | // The logic is the reverse of what we need for WLS, so find the other |
| 18229 | // basic block target: the target of the proceeding br. |
| 18230 | UpdateUncondBr(Br, Dest, DAG); |
| 18231 | |
| 18232 | SDValue Ops[] = {Chain, Setup, OtherTarget}; |
| 18233 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
| 18234 | } |
| 18235 | // Update LR count to the new value |
| 18236 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 0), To: Setup); |
| 18237 | // Update chain |
| 18238 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 2), To: Int.getOperand(i: 0)); |
| 18239 | return Res; |
| 18240 | } else { |
| 18241 | SDValue Size = |
| 18242 | DAG.getTargetConstant(Val: Int.getConstantOperandVal(i: 3), DL: dl, VT: MVT::i32); |
| 18243 | SDValue Args[] = { Int.getOperand(i: 0), Elements, Size, }; |
| 18244 | SDValue LoopDec = DAG.getNode(Opcode: ARMISD::LOOP_DEC, DL: dl, |
| 18245 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops: Args); |
| 18246 | DAG.ReplaceAllUsesWith(From: Int.getNode(), To: LoopDec.getNode()); |
| 18247 | |
| 18248 | // We expect this instruction to branch when the count is not zero. |
| 18249 | SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; |
| 18250 | |
| 18251 | // Update the unconditional branch to target the loop preheader if we've |
| 18252 | // found the condition has been reversed. |
| 18253 | if (Target == OtherTarget) |
| 18254 | UpdateUncondBr(Br, Dest, DAG); |
| 18255 | |
| 18256 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 18257 | N1: SDValue(LoopDec.getNode(), 1), N2: Chain); |
| 18258 | |
| 18259 | SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; |
| 18260 | return DAG.getNode(Opcode: ARMISD::LE, DL: dl, VT: MVT::Other, Ops: EndArgs); |
| 18261 | } |
| 18262 | return SDValue(); |
| 18263 | } |
| 18264 | |
| 18265 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. |
| 18266 | SDValue |
| 18267 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { |
| 18268 | SDValue Cmp = N->getOperand(Num: 3); |
| 18269 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 18270 | // Only looking at NE cases. |
| 18271 | return SDValue(); |
| 18272 | |
| 18273 | SDLoc dl(N); |
| 18274 | SDValue LHS = Cmp.getOperand(i: 0); |
| 18275 | SDValue RHS = Cmp.getOperand(i: 1); |
| 18276 | SDValue Chain = N->getOperand(Num: 0); |
| 18277 | SDValue BB = N->getOperand(Num: 1); |
| 18278 | SDValue ARMcc = N->getOperand(Num: 2); |
| 18279 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
| 18280 | |
| 18281 | // (brcond Chain BB ne (cmpz (and (cmov 0 1 CC Flags) 1) 0)) |
| 18282 | // -> (brcond Chain BB CC Flags) |
| 18283 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && |
| 18284 | LHS->getOperand(Num: 0)->getOpcode() == ARMISD::CMOV && |
| 18285 | LHS->getOperand(Num: 0)->hasOneUse() && |
| 18286 | isNullConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 0)) && |
| 18287 | isOneConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 1)) && |
| 18288 | isOneConstant(V: LHS->getOperand(Num: 1)) && isNullConstant(V: RHS)) { |
| 18289 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: BB, |
| 18290 | N3: LHS->getOperand(Num: 0)->getOperand(Num: 2), |
| 18291 | N4: LHS->getOperand(Num: 0)->getOperand(Num: 3)); |
| 18292 | } |
| 18293 | |
| 18294 | return SDValue(); |
| 18295 | } |
| 18296 | |
| 18297 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. |
| 18298 | SDValue |
| 18299 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { |
| 18300 | SDValue Cmp = N->getOperand(Num: 3); |
| 18301 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 18302 | // Only looking at EQ and NE cases. |
| 18303 | return SDValue(); |
| 18304 | |
| 18305 | EVT VT = N->getValueType(ResNo: 0); |
| 18306 | SDLoc dl(N); |
| 18307 | SDValue LHS = Cmp.getOperand(i: 0); |
| 18308 | SDValue RHS = Cmp.getOperand(i: 1); |
| 18309 | SDValue FalseVal = N->getOperand(Num: 0); |
| 18310 | SDValue TrueVal = N->getOperand(Num: 1); |
| 18311 | SDValue ARMcc = N->getOperand(Num: 2); |
| 18312 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
| 18313 | |
| 18314 | // BFI is only available on V6T2+. |
| 18315 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { |
| 18316 | SDValue R = PerformCMOVToBFICombine(CMOV: N, DAG); |
| 18317 | if (R) |
| 18318 | return R; |
| 18319 | } |
| 18320 | |
| 18321 | // Simplify |
| 18322 | // mov r1, r0 |
| 18323 | // cmp r1, x |
| 18324 | // mov r0, y |
| 18325 | // moveq r0, x |
| 18326 | // to |
| 18327 | // cmp r0, x |
| 18328 | // movne r0, y |
| 18329 | // |
| 18330 | // mov r1, r0 |
| 18331 | // cmp r1, x |
| 18332 | // mov r0, x |
| 18333 | // movne r0, y |
| 18334 | // to |
| 18335 | // cmp r0, x |
| 18336 | // movne r0, y |
| 18337 | /// FIXME: Turn this into a target neutral optimization? |
| 18338 | SDValue Res; |
| 18339 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { |
| 18340 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: TrueVal, N3: ARMcc, N4: Cmp); |
| 18341 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { |
| 18342 | SDValue ARMcc; |
| 18343 | SDValue NewCmp = getARMCmp(LHS, RHS, CC: ISD::SETNE, ARMcc, DAG, dl); |
| 18344 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: FalseVal, N3: ARMcc, N4: NewCmp); |
| 18345 | } |
| 18346 | |
| 18347 | // (cmov F T ne (cmpz (cmov 0 1 CC Flags) 0)) |
| 18348 | // -> (cmov F T CC Flags) |
| 18349 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse() && |
| 18350 | isNullConstant(V: LHS->getOperand(Num: 0)) && isOneConstant(V: LHS->getOperand(Num: 1)) && |
| 18351 | isNullConstant(V: RHS)) { |
| 18352 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, |
| 18353 | N3: LHS->getOperand(Num: 2), N4: LHS->getOperand(Num: 3)); |
| 18354 | } |
| 18355 | |
| 18356 | if (!VT.isInteger()) |
| 18357 | return SDValue(); |
| 18358 | |
| 18359 | // Fold away an unneccessary CMPZ/CMOV |
| 18360 | // CMOV A, B, C1, (CMPZ (CMOV 1, 0, C2, D), 0) -> |
| 18361 | // if C1==EQ -> CMOV A, B, C2, D |
| 18362 | // if C1==NE -> CMOV A, B, NOT(C2), D |
| 18363 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ || |
| 18364 | N->getConstantOperandVal(Num: 2) == ARMCC::NE) { |
| 18365 | ARMCC::CondCodes Cond; |
| 18366 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 3).getNode(), CC&: Cond)) { |
| 18367 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
| 18368 | Cond = ARMCC::getOppositeCondition(CC: Cond); |
| 18369 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
| 18370 | N2: N->getOperand(Num: 1), |
| 18371 | N3: DAG.getConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), N4: C); |
| 18372 | } |
| 18373 | } |
| 18374 | |
| 18375 | // Materialize a boolean comparison for integers so we can avoid branching. |
| 18376 | if (isNullConstant(V: FalseVal)) { |
| 18377 | if (CC == ARMCC::EQ && isOneConstant(V: TrueVal)) { |
| 18378 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { |
| 18379 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it |
| 18380 | // right 5 bits will make that 32 be 1, otherwise it will be 0. |
| 18381 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 |
| 18382 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
| 18383 | Res = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: Sub), |
| 18384 | N2: DAG.getConstant(Val: 5, DL: dl, VT: MVT::i32)); |
| 18385 | } else { |
| 18386 | // CMOV 0, 1, ==, (CMPZ x, y) -> |
| 18387 | // (UADDO_CARRY (SUB x, y), t:0, t:1) |
| 18388 | // where t = (USUBO_CARRY 0, (SUB x, y), 0) |
| 18389 | // |
| 18390 | // The USUBO_CARRY computes 0 - (x - y) and this will give a borrow when |
| 18391 | // x != y. In other words, a carry C == 1 when x == y, C == 0 |
| 18392 | // otherwise. |
| 18393 | // The final UADDO_CARRY computes |
| 18394 | // x - y + (0 - (x - y)) + C == C |
| 18395 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
| 18396 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 18397 | SDValue Neg = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: Sub); |
| 18398 | // ISD::USUBO_CARRY returns a borrow but we want the carry here |
| 18399 | // actually. |
| 18400 | SDValue Carry = |
| 18401 | DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
| 18402 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Neg.getValue(R: 1)); |
| 18403 | Res = DAG.getNode(Opcode: ISD::UADDO_CARRY, DL: dl, VTList: VTs, N1: Sub, N2: Neg, N3: Carry); |
| 18404 | } |
| 18405 | } else if (CC == ARMCC::NE && !isNullConstant(V: RHS) && |
| 18406 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: TrueVal))) { |
| 18407 | // This seems pointless but will allow us to combine it further below. |
| 18408 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
| 18409 | SDValue Sub = |
| 18410 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
| 18411 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: TrueVal, N3: ARMcc, |
| 18412 | N4: Sub.getValue(R: 1)); |
| 18413 | FalseVal = Sub; |
| 18414 | } |
| 18415 | } else if (isNullConstant(V: TrueVal)) { |
| 18416 | if (CC == ARMCC::EQ && !isNullConstant(V: RHS) && |
| 18417 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: FalseVal))) { |
| 18418 | // This seems pointless but will allow us to combine it further below |
| 18419 | // Note that we change == for != as this is the dual for the case above. |
| 18420 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
| 18421 | SDValue Sub = |
| 18422 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
| 18423 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: FalseVal, |
| 18424 | N3: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32), |
| 18425 | N4: Sub.getValue(R: 1)); |
| 18426 | FalseVal = Sub; |
| 18427 | } |
| 18428 | } |
| 18429 | |
| 18430 | // On Thumb1, the DAG above may be further combined if z is a power of 2 |
| 18431 | // (z == 2 ^ K). |
| 18432 | // CMOV (SUBC x, y), z, !=, (SUBC x, y):1 -> |
| 18433 | // t1 = (USUBO (SUB x, y), 1) |
| 18434 | // t2 = (USUBO_CARRY (SUB x, y), t1:0, t1:1) |
| 18435 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 18436 | // |
| 18437 | // This also handles the special case of comparing against zero; it's |
| 18438 | // essentially, the same pattern, except there's no SUBC: |
| 18439 | // CMOV x, z, !=, (CMPZ x, 0) -> |
| 18440 | // t1 = (USUBO x, 1) |
| 18441 | // t2 = (USUBO_CARRY x, t1:0, t1:1) |
| 18442 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 18443 | const APInt *TrueConst; |
| 18444 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && |
| 18445 | ((FalseVal.getOpcode() == ARMISD::SUBC && FalseVal.getOperand(i: 0) == LHS && |
| 18446 | FalseVal.getOperand(i: 1) == RHS) || |
| 18447 | (FalseVal == LHS && isNullConstant(V: RHS))) && |
| 18448 | (TrueConst = isPowerOf2Constant(V: TrueVal))) { |
| 18449 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
| 18450 | unsigned ShiftAmount = TrueConst->logBase2(); |
| 18451 | if (ShiftAmount) |
| 18452 | TrueVal = DAG.getConstant(Val: 1, DL: dl, VT); |
| 18453 | SDValue Subc = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: TrueVal); |
| 18454 | Res = DAG.getNode(Opcode: ISD::USUBO_CARRY, DL: dl, VTList: VTs, N1: FalseVal, N2: Subc, |
| 18455 | N3: Subc.getValue(R: 1)); |
| 18456 | |
| 18457 | if (ShiftAmount) |
| 18458 | Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: Res, |
| 18459 | N2: DAG.getConstant(Val: ShiftAmount, DL: dl, VT: MVT::i32)); |
| 18460 | } |
| 18461 | |
| 18462 | if (Res.getNode()) { |
| 18463 | KnownBits Known = DAG.computeKnownBits(Op: SDValue(N,0)); |
| 18464 | // Capture demanded bits information that would be otherwise lost. |
| 18465 | if (Known.Zero == 0xfffffffe) |
| 18466 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18467 | N2: DAG.getValueType(MVT::i1)); |
| 18468 | else if (Known.Zero == 0xffffff00) |
| 18469 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18470 | N2: DAG.getValueType(MVT::i8)); |
| 18471 | else if (Known.Zero == 0xffff0000) |
| 18472 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
| 18473 | N2: DAG.getValueType(MVT::i16)); |
| 18474 | } |
| 18475 | |
| 18476 | return Res; |
| 18477 | } |
| 18478 | |
| 18479 | static SDValue PerformBITCASTCombine(SDNode *N, |
| 18480 | TargetLowering::DAGCombinerInfo &DCI, |
| 18481 | const ARMSubtarget *ST) { |
| 18482 | SelectionDAG &DAG = DCI.DAG; |
| 18483 | SDValue Src = N->getOperand(Num: 0); |
| 18484 | EVT DstVT = N->getValueType(ResNo: 0); |
| 18485 | |
| 18486 | // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. |
| 18487 | if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { |
| 18488 | EVT SrcVT = Src.getValueType(); |
| 18489 | if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) |
| 18490 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT: DstVT, Operand: Src.getOperand(i: 0)); |
| 18491 | } |
| 18492 | |
| 18493 | // We may have a bitcast of something that has already had this bitcast |
| 18494 | // combine performed on it, so skip past any VECTOR_REG_CASTs. |
| 18495 | if (Src.getOpcode() == ARMISD::VECTOR_REG_CAST && |
| 18496 | Src.getOperand(i: 0).getValueType().getScalarSizeInBits() <= |
| 18497 | Src.getValueType().getScalarSizeInBits()) |
| 18498 | Src = Src.getOperand(i: 0); |
| 18499 | |
| 18500 | // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that |
| 18501 | // would be generated is at least the width of the element type. |
| 18502 | EVT SrcVT = Src.getValueType(); |
| 18503 | if ((Src.getOpcode() == ARMISD::VMOVIMM || |
| 18504 | Src.getOpcode() == ARMISD::VMVNIMM || |
| 18505 | Src.getOpcode() == ARMISD::VMOVFPIMM) && |
| 18506 | SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && |
| 18507 | DAG.getDataLayout().isBigEndian()) |
| 18508 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(N), VT: DstVT, Operand: Src); |
| 18509 | |
| 18510 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD x |
| 18511 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
| 18512 | return R; |
| 18513 | |
| 18514 | return SDValue(); |
| 18515 | } |
| 18516 | |
| 18517 | // Some combines for the MVETrunc truncations legalizer helper. Also lowers the |
| 18518 | // node into stack operations after legalizeOps. |
| 18519 | SDValue ARMTargetLowering::PerformMVETruncCombine( |
| 18520 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
| 18521 | SelectionDAG &DAG = DCI.DAG; |
| 18522 | EVT VT = N->getValueType(ResNo: 0); |
| 18523 | SDLoc DL(N); |
| 18524 | |
| 18525 | // MVETrunc(Undef, Undef) -> Undef |
| 18526 | if (all_of(Range: N->ops(), P: [](SDValue Op) { return Op.isUndef(); })) |
| 18527 | return DAG.getUNDEF(VT); |
| 18528 | |
| 18529 | // MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc |
| 18530 | if (N->getNumOperands() == 2 && |
| 18531 | N->getOperand(Num: 0).getOpcode() == ARMISD::MVETRUNC && |
| 18532 | N->getOperand(Num: 1).getOpcode() == ARMISD::MVETRUNC) |
| 18533 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT, N1: N->getOperand(Num: 0).getOperand(i: 0), |
| 18534 | N2: N->getOperand(Num: 0).getOperand(i: 1), |
| 18535 | N3: N->getOperand(Num: 1).getOperand(i: 0), |
| 18536 | N4: N->getOperand(Num: 1).getOperand(i: 1)); |
| 18537 | |
| 18538 | // MVETrunc(shuffle, shuffle) -> VMOVN |
| 18539 | if (N->getNumOperands() == 2 && |
| 18540 | N->getOperand(Num: 0).getOpcode() == ISD::VECTOR_SHUFFLE && |
| 18541 | N->getOperand(Num: 1).getOpcode() == ISD::VECTOR_SHUFFLE) { |
| 18542 | auto *S0 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0).getNode()); |
| 18543 | auto *S1 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 1).getNode()); |
| 18544 | |
| 18545 | if (S0->getOperand(Num: 0) == S1->getOperand(Num: 0) && |
| 18546 | S0->getOperand(Num: 1) == S1->getOperand(Num: 1)) { |
| 18547 | // Construct complete shuffle mask |
| 18548 | SmallVector<int, 8> Mask(S0->getMask()); |
| 18549 | Mask.append(in_start: S1->getMask().begin(), in_end: S1->getMask().end()); |
| 18550 | |
| 18551 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: false)) |
| 18552 | return DAG.getNode( |
| 18553 | Opcode: ARMISD::VMOVN, DL, VT, |
| 18554 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
| 18555 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
| 18556 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 18557 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: true)) |
| 18558 | return DAG.getNode( |
| 18559 | Opcode: ARMISD::VMOVN, DL, VT, |
| 18560 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
| 18561 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
| 18562 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
| 18563 | } |
| 18564 | } |
| 18565 | |
| 18566 | // For MVETrunc of a buildvector or shuffle, it can be beneficial to lower the |
| 18567 | // truncate to a buildvector to allow the generic optimisations to kick in. |
| 18568 | if (all_of(Range: N->ops(), P: [](SDValue Op) { |
| 18569 | return Op.getOpcode() == ISD::BUILD_VECTOR || |
| 18570 | Op.getOpcode() == ISD::VECTOR_SHUFFLE || |
| 18571 | (Op.getOpcode() == ISD::BITCAST && |
| 18572 | Op.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR); |
| 18573 | })) { |
| 18574 | SmallVector<SDValue, 8> ; |
| 18575 | for (unsigned Op = 0; Op < N->getNumOperands(); Op++) { |
| 18576 | SDValue O = N->getOperand(Num: Op); |
| 18577 | for (unsigned i = 0; i < O.getValueType().getVectorNumElements(); i++) { |
| 18578 | SDValue Ext = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::i32, N1: O, |
| 18579 | N2: DAG.getConstant(Val: i, DL, VT: MVT::i32)); |
| 18580 | Extracts.push_back(Elt: Ext); |
| 18581 | } |
| 18582 | } |
| 18583 | return DAG.getBuildVector(VT, DL, Ops: Extracts); |
| 18584 | } |
| 18585 | |
| 18586 | // If we are late in the legalization process and nothing has optimised |
| 18587 | // the trunc to anything better, lower it to a stack store and reload, |
| 18588 | // performing the truncation whilst keeping the lanes in the correct order: |
| 18589 | // VSTRH.32 a, stack; VSTRH.32 b, stack+8; VLDRW.32 stack; |
| 18590 | if (!DCI.isAfterLegalizeDAG()) |
| 18591 | return SDValue(); |
| 18592 | |
| 18593 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
| 18594 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
| 18595 | int NumIns = N->getNumOperands(); |
| 18596 | assert((NumIns == 2 || NumIns == 4) && |
| 18597 | "Expected 2 or 4 inputs to an MVETrunc" ); |
| 18598 | EVT StoreVT = VT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18599 | if (N->getNumOperands() == 4) |
| 18600 | StoreVT = StoreVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18601 | |
| 18602 | SmallVector<SDValue> Chains; |
| 18603 | for (int I = 0; I < NumIns; I++) { |
| 18604 | SDValue Ptr = DAG.getNode( |
| 18605 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
| 18606 | N2: DAG.getConstant(Val: I * 16 / NumIns, DL, VT: StackPtr.getValueType())); |
| 18607 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
| 18608 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumIns); |
| 18609 | SDValue Ch = DAG.getTruncStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: I), |
| 18610 | Ptr, PtrInfo: MPI, SVT: StoreVT, Alignment: Align(4)); |
| 18611 | Chains.push_back(Elt: Ch); |
| 18612 | } |
| 18613 | |
| 18614 | SDValue Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 18615 | MachinePointerInfo MPI = |
| 18616 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
| 18617 | return DAG.getLoad(VT, dl: DL, Chain, Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
| 18618 | } |
| 18619 | |
| 18620 | // Take a MVEEXT(load x) and split that into (extload x, extload x+8) |
| 18621 | static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N, |
| 18622 | SelectionDAG &DAG) { |
| 18623 | SDValue N0 = N->getOperand(Num: 0); |
| 18624 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N0.getNode()); |
| 18625 | if (!LD || !LD->isSimple() || !N0.hasOneUse() || LD->isIndexed()) |
| 18626 | return SDValue(); |
| 18627 | |
| 18628 | EVT FromVT = LD->getMemoryVT(); |
| 18629 | EVT ToVT = N->getValueType(ResNo: 0); |
| 18630 | if (!ToVT.isVector()) |
| 18631 | return SDValue(); |
| 18632 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements() * 2); |
| 18633 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 18634 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 18635 | |
| 18636 | unsigned NumElements = 0; |
| 18637 | if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) |
| 18638 | NumElements = 4; |
| 18639 | if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) |
| 18640 | NumElements = 8; |
| 18641 | assert(NumElements != 0); |
| 18642 | |
| 18643 | ISD::LoadExtType NewExtType = |
| 18644 | N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
| 18645 | if (LD->getExtensionType() != ISD::NON_EXTLOAD && |
| 18646 | LD->getExtensionType() != ISD::EXTLOAD && |
| 18647 | LD->getExtensionType() != NewExtType) |
| 18648 | return SDValue(); |
| 18649 | |
| 18650 | LLVMContext &C = *DAG.getContext(); |
| 18651 | SDLoc DL(LD); |
| 18652 | // Details about the old load |
| 18653 | SDValue Ch = LD->getChain(); |
| 18654 | SDValue BasePtr = LD->getBasePtr(); |
| 18655 | Align Alignment = LD->getBaseAlign(); |
| 18656 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
| 18657 | AAMDNodes AAInfo = LD->getAAInfo(); |
| 18658 | |
| 18659 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
| 18660 | EVT NewFromVT = EVT::getVectorVT( |
| 18661 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
| 18662 | EVT NewToVT = EVT::getVectorVT( |
| 18663 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
| 18664 | |
| 18665 | SmallVector<SDValue, 4> Loads; |
| 18666 | SmallVector<SDValue, 4> Chains; |
| 18667 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 18668 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
| 18669 | SDValue NewPtr = |
| 18670 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
| 18671 | |
| 18672 | SDValue NewLoad = |
| 18673 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
| 18674 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
| 18675 | Alignment, MMOFlags, AAInfo); |
| 18676 | Loads.push_back(Elt: NewLoad); |
| 18677 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
| 18678 | } |
| 18679 | |
| 18680 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
| 18681 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
| 18682 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
| 18683 | } |
| 18684 | |
| 18685 | // Perform combines for MVEEXT. If it has not be optimized to anything better |
| 18686 | // before lowering, it gets converted to stack store and extloads performing the |
| 18687 | // extend whilst still keeping the same lane ordering. |
| 18688 | SDValue ARMTargetLowering::PerformMVEExtCombine( |
| 18689 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
| 18690 | SelectionDAG &DAG = DCI.DAG; |
| 18691 | EVT VT = N->getValueType(ResNo: 0); |
| 18692 | SDLoc DL(N); |
| 18693 | assert(N->getNumValues() == 2 && "Expected MVEEXT with 2 elements" ); |
| 18694 | assert((VT == MVT::v4i32 || VT == MVT::v8i16) && "Unexpected MVEEXT type" ); |
| 18695 | |
| 18696 | EVT ExtVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
| 18697 | Context&: *DAG.getContext()); |
| 18698 | auto Extend = [&](SDValue V) { |
| 18699 | SDValue VVT = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: V); |
| 18700 | return N->getOpcode() == ARMISD::MVESEXT |
| 18701 | ? DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: VVT, |
| 18702 | N2: DAG.getValueType(ExtVT)) |
| 18703 | : DAG.getZeroExtendInReg(Op: VVT, DL, VT: ExtVT); |
| 18704 | }; |
| 18705 | |
| 18706 | // MVEEXT(VDUP) -> SIGN_EXTEND_INREG(VDUP) |
| 18707 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::VDUP) { |
| 18708 | SDValue Ext = Extend(N->getOperand(Num: 0)); |
| 18709 | return DAG.getMergeValues(Ops: {Ext, Ext}, dl: DL); |
| 18710 | } |
| 18711 | |
| 18712 | // MVEEXT(shuffle) -> SIGN_EXTEND_INREG/ZERO_EXTEND_INREG |
| 18713 | if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0))) { |
| 18714 | ArrayRef<int> Mask = SVN->getMask(); |
| 18715 | assert(Mask.size() == 2 * VT.getVectorNumElements()); |
| 18716 | assert(Mask.size() == SVN->getValueType(0).getVectorNumElements()); |
| 18717 | unsigned Rev = VT == MVT::v4i32 ? ARMISD::VREV32 : ARMISD::VREV16; |
| 18718 | SDValue Op0 = SVN->getOperand(Num: 0); |
| 18719 | SDValue Op1 = SVN->getOperand(Num: 1); |
| 18720 | |
| 18721 | auto CheckInregMask = [&](int Start, int Offset) { |
| 18722 | for (int Idx = 0, E = VT.getVectorNumElements(); Idx < E; ++Idx) |
| 18723 | if (Mask[Start + Idx] >= 0 && Mask[Start + Idx] != Idx * 2 + Offset) |
| 18724 | return false; |
| 18725 | return true; |
| 18726 | }; |
| 18727 | SDValue V0 = SDValue(N, 0); |
| 18728 | SDValue V1 = SDValue(N, 1); |
| 18729 | if (CheckInregMask(0, 0)) |
| 18730 | V0 = Extend(Op0); |
| 18731 | else if (CheckInregMask(0, 1)) |
| 18732 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
| 18733 | else if (CheckInregMask(0, Mask.size())) |
| 18734 | V0 = Extend(Op1); |
| 18735 | else if (CheckInregMask(0, Mask.size() + 1)) |
| 18736 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
| 18737 | |
| 18738 | if (CheckInregMask(VT.getVectorNumElements(), Mask.size())) |
| 18739 | V1 = Extend(Op1); |
| 18740 | else if (CheckInregMask(VT.getVectorNumElements(), Mask.size() + 1)) |
| 18741 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
| 18742 | else if (CheckInregMask(VT.getVectorNumElements(), 0)) |
| 18743 | V1 = Extend(Op0); |
| 18744 | else if (CheckInregMask(VT.getVectorNumElements(), 1)) |
| 18745 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
| 18746 | |
| 18747 | if (V0.getNode() != N || V1.getNode() != N) |
| 18748 | return DAG.getMergeValues(Ops: {V0, V1}, dl: DL); |
| 18749 | } |
| 18750 | |
| 18751 | // MVEEXT(load) -> extload, extload |
| 18752 | if (N->getOperand(Num: 0)->getOpcode() == ISD::LOAD) |
| 18753 | if (SDValue L = PerformSplittingMVEEXTToWideningLoad(N, DAG)) |
| 18754 | return L; |
| 18755 | |
| 18756 | if (!DCI.isAfterLegalizeDAG()) |
| 18757 | return SDValue(); |
| 18758 | |
| 18759 | // Lower to a stack store and reload: |
| 18760 | // VSTRW.32 a, stack; VLDRH.32 stack; VLDRH.32 stack+8; |
| 18761 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
| 18762 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
| 18763 | int NumOuts = N->getNumValues(); |
| 18764 | assert((NumOuts == 2 || NumOuts == 4) && |
| 18765 | "Expected 2 or 4 outputs to an MVEEXT" ); |
| 18766 | EVT LoadVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
| 18767 | Context&: *DAG.getContext()); |
| 18768 | if (N->getNumOperands() == 4) |
| 18769 | LoadVT = LoadVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
| 18770 | |
| 18771 | MachinePointerInfo MPI = |
| 18772 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
| 18773 | SDValue Chain = DAG.getStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: 0), |
| 18774 | Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
| 18775 | |
| 18776 | SmallVector<SDValue> Loads; |
| 18777 | for (int I = 0; I < NumOuts; I++) { |
| 18778 | SDValue Ptr = DAG.getNode( |
| 18779 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
| 18780 | N2: DAG.getConstant(Val: I * 16 / NumOuts, DL, VT: StackPtr.getValueType())); |
| 18781 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
| 18782 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumOuts); |
| 18783 | SDValue Load = DAG.getExtLoad( |
| 18784 | ExtType: N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD, dl: DL, |
| 18785 | VT, Chain, Ptr, PtrInfo: MPI, MemVT: LoadVT, Alignment: Align(4)); |
| 18786 | Loads.push_back(Elt: Load); |
| 18787 | } |
| 18788 | |
| 18789 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
| 18790 | } |
| 18791 | |
| 18792 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, |
| 18793 | DAGCombinerInfo &DCI) const { |
| 18794 | switch (N->getOpcode()) { |
| 18795 | default: break; |
| 18796 | case ISD::SELECT_CC: |
| 18797 | case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); |
| 18798 | case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); |
| 18799 | case ISD::SETCC: return PerformVSetCCToVCTPCombine(N, DCI, Subtarget); |
| 18800 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); |
| 18801 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18802 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); |
| 18803 | case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); |
| 18804 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); |
| 18805 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); |
| 18806 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); |
| 18807 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); |
| 18808 | case ISD::BRCOND: |
| 18809 | case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, ST: Subtarget); |
| 18810 | case ARMISD::ADDC: |
| 18811 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); |
| 18812 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 18813 | case ARMISD::BFI: return PerformBFICombine(N, DAG&: DCI.DAG); |
| 18814 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); |
| 18815 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DAG&: DCI.DAG); |
| 18816 | case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); |
| 18817 | case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DAG&: DCI.DAG); |
| 18818 | case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); |
| 18819 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); |
| 18820 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); |
| 18821 | case ISD::EXTRACT_VECTOR_ELT: |
| 18822 | return PerformExtractEltCombine(N, DCI, ST: Subtarget); |
| 18823 | case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DAG&: DCI.DAG); |
| 18824 | case ISD::INSERT_SUBVECTOR: return PerformInsertSubvectorCombine(N, DCI); |
| 18825 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DAG&: DCI.DAG); |
| 18826 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); |
| 18827 | case ARMISD::VDUP: return PerformVDUPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18828 | case ISD::FP_TO_SINT: |
| 18829 | case ISD::FP_TO_UINT: |
| 18830 | return PerformVCVTCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18831 | case ISD::FADD: |
| 18832 | return PerformFADDCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18833 | case ISD::FMUL: |
| 18834 | return PerformVMulVCTPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18835 | case ISD::INTRINSIC_WO_CHAIN: |
| 18836 | return PerformIntrinsicCombine(N, DCI); |
| 18837 | case ISD::SHL: |
| 18838 | case ISD::SRA: |
| 18839 | case ISD::SRL: |
| 18840 | return PerformShiftCombine(N, DCI, ST: Subtarget); |
| 18841 | case ISD::SIGN_EXTEND: |
| 18842 | case ISD::ZERO_EXTEND: |
| 18843 | case ISD::ANY_EXTEND: |
| 18844 | return PerformExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18845 | case ISD::FP_EXTEND: |
| 18846 | return PerformFPExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18847 | case ISD::SMIN: |
| 18848 | case ISD::UMIN: |
| 18849 | case ISD::SMAX: |
| 18850 | case ISD::UMAX: |
| 18851 | return PerformMinMaxCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18852 | case ARMISD::CMOV: |
| 18853 | return PerformCMOVCombine(N, DAG&: DCI.DAG); |
| 18854 | case ARMISD::BRCOND: |
| 18855 | return PerformBRCONDCombine(N, DAG&: DCI.DAG); |
| 18856 | case ARMISD::CMPZ: |
| 18857 | return PerformCMPZCombine(N, DAG&: DCI.DAG); |
| 18858 | case ARMISD::CSINC: |
| 18859 | case ARMISD::CSINV: |
| 18860 | case ARMISD::CSNEG: |
| 18861 | return PerformCSETCombine(N, DAG&: DCI.DAG); |
| 18862 | case ISD::LOAD: |
| 18863 | return PerformLOADCombine(N, DCI, Subtarget); |
| 18864 | case ARMISD::VLD1DUP: |
| 18865 | case ARMISD::VLD2DUP: |
| 18866 | case ARMISD::VLD3DUP: |
| 18867 | case ARMISD::VLD4DUP: |
| 18868 | return PerformVLDCombine(N, DCI); |
| 18869 | case ARMISD::BUILD_VECTOR: |
| 18870 | return PerformARMBUILD_VECTORCombine(N, DCI); |
| 18871 | case ISD::BITCAST: |
| 18872 | return PerformBITCASTCombine(N, DCI, ST: Subtarget); |
| 18873 | case ARMISD::PREDICATE_CAST: |
| 18874 | return PerformPREDICATE_CASTCombine(N, DCI); |
| 18875 | case ARMISD::VECTOR_REG_CAST: |
| 18876 | return PerformVECTOR_REG_CASTCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18877 | case ARMISD::MVETRUNC: |
| 18878 | return PerformMVETruncCombine(N, DCI); |
| 18879 | case ARMISD::MVESEXT: |
| 18880 | case ARMISD::MVEZEXT: |
| 18881 | return PerformMVEExtCombine(N, DCI); |
| 18882 | case ARMISD::VCMP: |
| 18883 | return PerformVCMPCombine(N, DAG&: DCI.DAG, Subtarget); |
| 18884 | case ISD::VECREDUCE_ADD: |
| 18885 | return PerformVECREDUCE_ADDCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
| 18886 | case ARMISD::VADDVs: |
| 18887 | case ARMISD::VADDVu: |
| 18888 | case ARMISD::VADDLVs: |
| 18889 | case ARMISD::VADDLVu: |
| 18890 | case ARMISD::VADDLVAs: |
| 18891 | case ARMISD::VADDLVAu: |
| 18892 | case ARMISD::VMLAVs: |
| 18893 | case ARMISD::VMLAVu: |
| 18894 | case ARMISD::VMLALVs: |
| 18895 | case ARMISD::VMLALVu: |
| 18896 | case ARMISD::VMLALVAs: |
| 18897 | case ARMISD::VMLALVAu: |
| 18898 | return PerformReduceShuffleCombine(N, DAG&: DCI.DAG); |
| 18899 | case ARMISD::VMOVN: |
| 18900 | return PerformVMOVNCombine(N, DCI); |
| 18901 | case ARMISD::VQMOVNs: |
| 18902 | case ARMISD::VQMOVNu: |
| 18903 | return PerformVQMOVNCombine(N, DCI); |
| 18904 | case ARMISD::VQDMULH: |
| 18905 | return PerformVQDMULHCombine(N, DCI); |
| 18906 | case ARMISD::ASRL: |
| 18907 | case ARMISD::LSRL: |
| 18908 | case ARMISD::LSLL: |
| 18909 | return PerformLongShiftCombine(N, DAG&: DCI.DAG); |
| 18910 | case ARMISD::SMULWB: { |
| 18911 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18912 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
| 18913 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 18914 | return SDValue(); |
| 18915 | break; |
| 18916 | } |
| 18917 | case ARMISD::SMULWT: { |
| 18918 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18919 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
| 18920 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
| 18921 | return SDValue(); |
| 18922 | break; |
| 18923 | } |
| 18924 | case ARMISD::SMLALBB: |
| 18925 | case ARMISD::QADD16b: |
| 18926 | case ARMISD::QSUB16b: |
| 18927 | case ARMISD::UQADD16b: |
| 18928 | case ARMISD::UQSUB16b: { |
| 18929 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18930 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
| 18931 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 18932 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 18933 | return SDValue(); |
| 18934 | break; |
| 18935 | } |
| 18936 | case ARMISD::SMLALBT: { |
| 18937 | unsigned LowWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
| 18938 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
| 18939 | unsigned HighWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
| 18940 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
| 18941 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: LowMask, DCI)) || |
| 18942 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: HighMask, DCI))) |
| 18943 | return SDValue(); |
| 18944 | break; |
| 18945 | } |
| 18946 | case ARMISD::SMLALTB: { |
| 18947 | unsigned HighWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
| 18948 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
| 18949 | unsigned LowWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
| 18950 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
| 18951 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: HighMask, DCI)) || |
| 18952 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: LowMask, DCI))) |
| 18953 | return SDValue(); |
| 18954 | break; |
| 18955 | } |
| 18956 | case ARMISD::SMLALTT: { |
| 18957 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18958 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
| 18959 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 18960 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 18961 | return SDValue(); |
| 18962 | break; |
| 18963 | } |
| 18964 | case ARMISD::QADD8b: |
| 18965 | case ARMISD::QSUB8b: |
| 18966 | case ARMISD::UQADD8b: |
| 18967 | case ARMISD::UQSUB8b: { |
| 18968 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
| 18969 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 8); |
| 18970 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
| 18971 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
| 18972 | return SDValue(); |
| 18973 | break; |
| 18974 | } |
| 18975 | case ARMISD::VBSP: |
| 18976 | if (N->getOperand(Num: 1) == N->getOperand(Num: 2)) |
| 18977 | return N->getOperand(Num: 1); |
| 18978 | return SDValue(); |
| 18979 | case ISD::INTRINSIC_VOID: |
| 18980 | case ISD::INTRINSIC_W_CHAIN: |
| 18981 | switch (N->getConstantOperandVal(Num: 1)) { |
| 18982 | case Intrinsic::arm_neon_vld1: |
| 18983 | case Intrinsic::arm_neon_vld1x2: |
| 18984 | case Intrinsic::arm_neon_vld1x3: |
| 18985 | case Intrinsic::arm_neon_vld1x4: |
| 18986 | case Intrinsic::arm_neon_vld2: |
| 18987 | case Intrinsic::arm_neon_vld3: |
| 18988 | case Intrinsic::arm_neon_vld4: |
| 18989 | case Intrinsic::arm_neon_vld2lane: |
| 18990 | case Intrinsic::arm_neon_vld3lane: |
| 18991 | case Intrinsic::arm_neon_vld4lane: |
| 18992 | case Intrinsic::arm_neon_vld2dup: |
| 18993 | case Intrinsic::arm_neon_vld3dup: |
| 18994 | case Intrinsic::arm_neon_vld4dup: |
| 18995 | case Intrinsic::arm_neon_vst1: |
| 18996 | case Intrinsic::arm_neon_vst1x2: |
| 18997 | case Intrinsic::arm_neon_vst1x3: |
| 18998 | case Intrinsic::arm_neon_vst1x4: |
| 18999 | case Intrinsic::arm_neon_vst2: |
| 19000 | case Intrinsic::arm_neon_vst3: |
| 19001 | case Intrinsic::arm_neon_vst4: |
| 19002 | case Intrinsic::arm_neon_vst2lane: |
| 19003 | case Intrinsic::arm_neon_vst3lane: |
| 19004 | case Intrinsic::arm_neon_vst4lane: |
| 19005 | return PerformVLDCombine(N, DCI); |
| 19006 | case Intrinsic::arm_mve_vld2q: |
| 19007 | case Intrinsic::arm_mve_vld4q: |
| 19008 | case Intrinsic::arm_mve_vst2q: |
| 19009 | case Intrinsic::arm_mve_vst4q: |
| 19010 | return PerformMVEVLDCombine(N, DCI); |
| 19011 | default: break; |
| 19012 | } |
| 19013 | break; |
| 19014 | } |
| 19015 | return SDValue(); |
| 19016 | } |
| 19017 | |
| 19018 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, |
| 19019 | EVT VT) const { |
| 19020 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); |
| 19021 | } |
| 19022 | |
| 19023 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, |
| 19024 | Align Alignment, |
| 19025 | MachineMemOperand::Flags, |
| 19026 | unsigned *Fast) const { |
| 19027 | // Depends what it gets converted into if the type is weird. |
| 19028 | if (!VT.isSimple()) |
| 19029 | return false; |
| 19030 | |
| 19031 | // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus |
| 19032 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 19033 | auto Ty = VT.getSimpleVT().SimpleTy; |
| 19034 | |
| 19035 | if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { |
| 19036 | // Unaligned access can use (for example) LRDB, LRDH, LDR |
| 19037 | if (AllowsUnaligned) { |
| 19038 | if (Fast) |
| 19039 | *Fast = Subtarget->hasV7Ops(); |
| 19040 | return true; |
| 19041 | } |
| 19042 | } |
| 19043 | |
| 19044 | if (Ty == MVT::f64 || Ty == MVT::v2f64) { |
| 19045 | // For any little-endian targets with neon, we can support unaligned ld/st |
| 19046 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. |
| 19047 | // A big-endian target may also explicitly support unaligned accesses |
| 19048 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { |
| 19049 | if (Fast) |
| 19050 | *Fast = 1; |
| 19051 | return true; |
| 19052 | } |
| 19053 | } |
| 19054 | |
| 19055 | if (!Subtarget->hasMVEIntegerOps()) |
| 19056 | return false; |
| 19057 | |
| 19058 | // These are for predicates |
| 19059 | if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1 || |
| 19060 | Ty == MVT::v2i1)) { |
| 19061 | if (Fast) |
| 19062 | *Fast = 1; |
| 19063 | return true; |
| 19064 | } |
| 19065 | |
| 19066 | // These are for truncated stores/narrowing loads. They are fine so long as |
| 19067 | // the alignment is at least the size of the item being loaded |
| 19068 | if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && |
| 19069 | Alignment >= VT.getScalarSizeInBits() / 8) { |
| 19070 | if (Fast) |
| 19071 | *Fast = true; |
| 19072 | return true; |
| 19073 | } |
| 19074 | |
| 19075 | // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and |
| 19076 | // VSTRW.U32 all store the vector register in exactly the same format, and |
| 19077 | // differ only in the range of their immediate offset field and the required |
| 19078 | // alignment. So there is always a store that can be used, regardless of |
| 19079 | // actual type. |
| 19080 | // |
| 19081 | // For big endian, that is not the case. But can still emit a (VSTRB.U8; |
| 19082 | // VREV64.8) pair and get the same effect. This will likely be better than |
| 19083 | // aligning the vector through the stack. |
| 19084 | if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || |
| 19085 | Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || |
| 19086 | Ty == MVT::v2f64) { |
| 19087 | if (Fast) |
| 19088 | *Fast = 1; |
| 19089 | return true; |
| 19090 | } |
| 19091 | |
| 19092 | return false; |
| 19093 | } |
| 19094 | |
| 19095 | EVT ARMTargetLowering::getOptimalMemOpType( |
| 19096 | LLVMContext &Context, const MemOp &Op, |
| 19097 | const AttributeList &FuncAttributes) const { |
| 19098 | // See if we can use NEON instructions for this... |
| 19099 | if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && |
| 19100 | !FuncAttributes.hasFnAttr(Kind: Attribute::NoImplicitFloat)) { |
| 19101 | unsigned Fast; |
| 19102 | if (Op.size() >= 16 && |
| 19103 | (Op.isAligned(AlignCheck: Align(16)) || |
| 19104 | (allowsMisalignedMemoryAccesses(VT: MVT::v2f64, 0, Alignment: Align(1), |
| 19105 | MachineMemOperand::MONone, Fast: &Fast) && |
| 19106 | Fast))) { |
| 19107 | return MVT::v2f64; |
| 19108 | } else if (Op.size() >= 8 && |
| 19109 | (Op.isAligned(AlignCheck: Align(8)) || |
| 19110 | (allowsMisalignedMemoryAccesses( |
| 19111 | VT: MVT::f64, 0, Alignment: Align(1), MachineMemOperand::MONone, Fast: &Fast) && |
| 19112 | Fast))) { |
| 19113 | return MVT::f64; |
| 19114 | } |
| 19115 | } |
| 19116 | |
| 19117 | // Let the target-independent logic figure it out. |
| 19118 | return MVT::Other; |
| 19119 | } |
| 19120 | |
| 19121 | // 64-bit integers are split into their high and low parts and held in two |
| 19122 | // different registers, so the trunc is free since the low register can just |
| 19123 | // be used. |
| 19124 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { |
| 19125 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) |
| 19126 | return false; |
| 19127 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); |
| 19128 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); |
| 19129 | return (SrcBits == 64 && DestBits == 32); |
| 19130 | } |
| 19131 | |
| 19132 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { |
| 19133 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || |
| 19134 | !DstVT.isInteger()) |
| 19135 | return false; |
| 19136 | unsigned SrcBits = SrcVT.getSizeInBits(); |
| 19137 | unsigned DestBits = DstVT.getSizeInBits(); |
| 19138 | return (SrcBits == 64 && DestBits == 32); |
| 19139 | } |
| 19140 | |
| 19141 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { |
| 19142 | if (Val.getOpcode() != ISD::LOAD) |
| 19143 | return false; |
| 19144 | |
| 19145 | EVT VT1 = Val.getValueType(); |
| 19146 | if (!VT1.isSimple() || !VT1.isInteger() || |
| 19147 | !VT2.isSimple() || !VT2.isInteger()) |
| 19148 | return false; |
| 19149 | |
| 19150 | switch (VT1.getSimpleVT().SimpleTy) { |
| 19151 | default: break; |
| 19152 | case MVT::i1: |
| 19153 | case MVT::i8: |
| 19154 | case MVT::i16: |
| 19155 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. |
| 19156 | return true; |
| 19157 | } |
| 19158 | |
| 19159 | return false; |
| 19160 | } |
| 19161 | |
| 19162 | bool ARMTargetLowering::isFNegFree(EVT VT) const { |
| 19163 | if (!VT.isSimple()) |
| 19164 | return false; |
| 19165 | |
| 19166 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that |
| 19167 | // negate values directly (fneg is free). So, we don't want to let the DAG |
| 19168 | // combiner rewrite fneg into xors and some other instructions. For f16 and |
| 19169 | // FullFP16 argument passing, some bitcast nodes may be introduced, |
| 19170 | // triggering this DAG combine rewrite, so we are avoiding that with this. |
| 19171 | switch (VT.getSimpleVT().SimpleTy) { |
| 19172 | default: break; |
| 19173 | case MVT::f16: |
| 19174 | return Subtarget->hasFullFP16(); |
| 19175 | } |
| 19176 | |
| 19177 | return false; |
| 19178 | } |
| 19179 | |
| 19180 | Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { |
| 19181 | if (!Subtarget->hasMVEIntegerOps()) |
| 19182 | return nullptr; |
| 19183 | Type *SVIType = SVI->getType(); |
| 19184 | Type *ScalarType = SVIType->getScalarType(); |
| 19185 | |
| 19186 | if (ScalarType->isFloatTy()) |
| 19187 | return Type::getInt32Ty(C&: SVIType->getContext()); |
| 19188 | if (ScalarType->isHalfTy()) |
| 19189 | return Type::getInt16Ty(C&: SVIType->getContext()); |
| 19190 | return nullptr; |
| 19191 | } |
| 19192 | |
| 19193 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { |
| 19194 | EVT VT = ExtVal.getValueType(); |
| 19195 | |
| 19196 | if (!isTypeLegal(VT)) |
| 19197 | return false; |
| 19198 | |
| 19199 | if (auto *Ld = dyn_cast<MaskedLoadSDNode>(Val: ExtVal.getOperand(i: 0))) { |
| 19200 | if (Ld->isExpandingLoad()) |
| 19201 | return false; |
| 19202 | } |
| 19203 | |
| 19204 | if (Subtarget->hasMVEIntegerOps()) |
| 19205 | return true; |
| 19206 | |
| 19207 | // Don't create a loadext if we can fold the extension into a wide/long |
| 19208 | // instruction. |
| 19209 | // If there's more than one user instruction, the loadext is desirable no |
| 19210 | // matter what. There can be two uses by the same instruction. |
| 19211 | if (ExtVal->use_empty() || |
| 19212 | !ExtVal->user_begin()->isOnlyUserOf(N: ExtVal.getNode())) |
| 19213 | return true; |
| 19214 | |
| 19215 | SDNode *U = *ExtVal->user_begin(); |
| 19216 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || |
| 19217 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) |
| 19218 | return false; |
| 19219 | |
| 19220 | return true; |
| 19221 | } |
| 19222 | |
| 19223 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
| 19224 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
| 19225 | return false; |
| 19226 | |
| 19227 | if (!isTypeLegal(VT: EVT::getEVT(Ty: Ty1))) |
| 19228 | return false; |
| 19229 | |
| 19230 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop" ); |
| 19231 | |
| 19232 | // Assuming the caller doesn't have a zeroext or signext return parameter, |
| 19233 | // truncation all the way down to i1 is valid. |
| 19234 | return true; |
| 19235 | } |
| 19236 | |
| 19237 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster |
| 19238 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be |
| 19239 | /// expanded to FMAs when this method returns true, otherwise fmuladd is |
| 19240 | /// expanded to fmul + fadd. |
| 19241 | /// |
| 19242 | /// ARM supports both fused and unfused multiply-add operations; we already |
| 19243 | /// lower a pair of fmul and fadd to the latter so it's not clear that there |
| 19244 | /// would be a gain or that the gain would be worthwhile enough to risk |
| 19245 | /// correctness bugs. |
| 19246 | /// |
| 19247 | /// For MVE, we set this to true as it helps simplify the need for some |
| 19248 | /// patterns (and we don't have the non-fused floating point instruction). |
| 19249 | bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
| 19250 | EVT VT) const { |
| 19251 | if (Subtarget->useSoftFloat()) |
| 19252 | return false; |
| 19253 | |
| 19254 | if (!VT.isSimple()) |
| 19255 | return false; |
| 19256 | |
| 19257 | switch (VT.getSimpleVT().SimpleTy) { |
| 19258 | case MVT::v4f32: |
| 19259 | case MVT::v8f16: |
| 19260 | return Subtarget->hasMVEFloatOps(); |
| 19261 | case MVT::f16: |
| 19262 | return Subtarget->useFPVFMx16(); |
| 19263 | case MVT::f32: |
| 19264 | return Subtarget->useFPVFMx(); |
| 19265 | case MVT::f64: |
| 19266 | return Subtarget->useFPVFMx64(); |
| 19267 | default: |
| 19268 | break; |
| 19269 | } |
| 19270 | |
| 19271 | return false; |
| 19272 | } |
| 19273 | |
| 19274 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { |
| 19275 | if (V < 0) |
| 19276 | return false; |
| 19277 | |
| 19278 | unsigned Scale = 1; |
| 19279 | switch (VT.getSimpleVT().SimpleTy) { |
| 19280 | case MVT::i1: |
| 19281 | case MVT::i8: |
| 19282 | // Scale == 1; |
| 19283 | break; |
| 19284 | case MVT::i16: |
| 19285 | // Scale == 2; |
| 19286 | Scale = 2; |
| 19287 | break; |
| 19288 | default: |
| 19289 | // On thumb1 we load most things (i32, i64, floats, etc) with a LDR |
| 19290 | // Scale == 4; |
| 19291 | Scale = 4; |
| 19292 | break; |
| 19293 | } |
| 19294 | |
| 19295 | if ((V & (Scale - 1)) != 0) |
| 19296 | return false; |
| 19297 | return isUInt<5>(x: V / Scale); |
| 19298 | } |
| 19299 | |
| 19300 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, |
| 19301 | const ARMSubtarget *Subtarget) { |
| 19302 | if (!VT.isInteger() && !VT.isFloatingPoint()) |
| 19303 | return false; |
| 19304 | if (VT.isVector() && Subtarget->hasNEON()) |
| 19305 | return false; |
| 19306 | if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && |
| 19307 | !Subtarget->hasMVEFloatOps()) |
| 19308 | return false; |
| 19309 | |
| 19310 | bool IsNeg = false; |
| 19311 | if (V < 0) { |
| 19312 | IsNeg = true; |
| 19313 | V = -V; |
| 19314 | } |
| 19315 | |
| 19316 | unsigned NumBytes = std::max(a: (unsigned)VT.getSizeInBits() / 8, b: 1U); |
| 19317 | |
| 19318 | // MVE: size * imm7 |
| 19319 | if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { |
| 19320 | switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { |
| 19321 | case MVT::i32: |
| 19322 | case MVT::f32: |
| 19323 | return isShiftedUInt<7,2>(x: V); |
| 19324 | case MVT::i16: |
| 19325 | case MVT::f16: |
| 19326 | return isShiftedUInt<7,1>(x: V); |
| 19327 | case MVT::i8: |
| 19328 | return isUInt<7>(x: V); |
| 19329 | default: |
| 19330 | return false; |
| 19331 | } |
| 19332 | } |
| 19333 | |
| 19334 | // half VLDR: 2 * imm8 |
| 19335 | if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) |
| 19336 | return isShiftedUInt<8, 1>(x: V); |
| 19337 | // VLDR and LDRD: 4 * imm8 |
| 19338 | if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) |
| 19339 | return isShiftedUInt<8, 2>(x: V); |
| 19340 | |
| 19341 | if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { |
| 19342 | // + imm12 or - imm8 |
| 19343 | if (IsNeg) |
| 19344 | return isUInt<8>(x: V); |
| 19345 | return isUInt<12>(x: V); |
| 19346 | } |
| 19347 | |
| 19348 | return false; |
| 19349 | } |
| 19350 | |
| 19351 | /// isLegalAddressImmediate - Return true if the integer value can be used |
| 19352 | /// as the offset of the target addressing mode for load / store of the |
| 19353 | /// given type. |
| 19354 | static bool isLegalAddressImmediate(int64_t V, EVT VT, |
| 19355 | const ARMSubtarget *Subtarget) { |
| 19356 | if (V == 0) |
| 19357 | return true; |
| 19358 | |
| 19359 | if (!VT.isSimple()) |
| 19360 | return false; |
| 19361 | |
| 19362 | if (Subtarget->isThumb1Only()) |
| 19363 | return isLegalT1AddressImmediate(V, VT); |
| 19364 | else if (Subtarget->isThumb2()) |
| 19365 | return isLegalT2AddressImmediate(V, VT, Subtarget); |
| 19366 | |
| 19367 | // ARM mode. |
| 19368 | if (V < 0) |
| 19369 | V = - V; |
| 19370 | switch (VT.getSimpleVT().SimpleTy) { |
| 19371 | default: return false; |
| 19372 | case MVT::i1: |
| 19373 | case MVT::i8: |
| 19374 | case MVT::i32: |
| 19375 | // +- imm12 |
| 19376 | return isUInt<12>(x: V); |
| 19377 | case MVT::i16: |
| 19378 | // +- imm8 |
| 19379 | return isUInt<8>(x: V); |
| 19380 | case MVT::f32: |
| 19381 | case MVT::f64: |
| 19382 | if (!Subtarget->hasVFP2Base()) // FIXME: NEON? |
| 19383 | return false; |
| 19384 | return isShiftedUInt<8, 2>(x: V); |
| 19385 | } |
| 19386 | } |
| 19387 | |
| 19388 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, |
| 19389 | EVT VT) const { |
| 19390 | int Scale = AM.Scale; |
| 19391 | if (Scale < 0) |
| 19392 | return false; |
| 19393 | |
| 19394 | switch (VT.getSimpleVT().SimpleTy) { |
| 19395 | default: return false; |
| 19396 | case MVT::i1: |
| 19397 | case MVT::i8: |
| 19398 | case MVT::i16: |
| 19399 | case MVT::i32: |
| 19400 | if (Scale == 1) |
| 19401 | return true; |
| 19402 | // r + r << imm |
| 19403 | Scale = Scale & ~1; |
| 19404 | return Scale == 2 || Scale == 4 || Scale == 8; |
| 19405 | case MVT::i64: |
| 19406 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r |
| 19407 | // version in Thumb mode. |
| 19408 | // r + r |
| 19409 | if (Scale == 1) |
| 19410 | return true; |
| 19411 | // r * 2 (this can be lowered to r + r). |
| 19412 | if (!AM.HasBaseReg && Scale == 2) |
| 19413 | return true; |
| 19414 | return false; |
| 19415 | case MVT::isVoid: |
| 19416 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 19417 | // stores), because arm allows folding a scale into many arithmetic |
| 19418 | // operations. This should be made more precise and revisited later. |
| 19419 | |
| 19420 | // Allow r << imm, but the imm has to be a multiple of two. |
| 19421 | if (Scale & 1) return false; |
| 19422 | return isPowerOf2_32(Value: Scale); |
| 19423 | } |
| 19424 | } |
| 19425 | |
| 19426 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, |
| 19427 | EVT VT) const { |
| 19428 | const int Scale = AM.Scale; |
| 19429 | |
| 19430 | // Negative scales are not supported in Thumb1. |
| 19431 | if (Scale < 0) |
| 19432 | return false; |
| 19433 | |
| 19434 | // Thumb1 addressing modes do not support register scaling excepting the |
| 19435 | // following cases: |
| 19436 | // 1. Scale == 1 means no scaling. |
| 19437 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. |
| 19438 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); |
| 19439 | } |
| 19440 | |
| 19441 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 19442 | /// by AM is legal for this target, for a load/store of the specified type. |
| 19443 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 19444 | const AddrMode &AM, Type *Ty, |
| 19445 | unsigned AS, Instruction *I) const { |
| 19446 | EVT VT = getValueType(DL, Ty, AllowUnknown: true); |
| 19447 | if (!isLegalAddressImmediate(V: AM.BaseOffs, VT, Subtarget)) |
| 19448 | return false; |
| 19449 | |
| 19450 | // Can never fold addr of global into load/store. |
| 19451 | if (AM.BaseGV) |
| 19452 | return false; |
| 19453 | |
| 19454 | switch (AM.Scale) { |
| 19455 | case 0: // no scale reg, must be "r+i" or "r", or "i". |
| 19456 | break; |
| 19457 | default: |
| 19458 | // ARM doesn't support any R+R*scale+imm addr modes. |
| 19459 | if (AM.BaseOffs) |
| 19460 | return false; |
| 19461 | |
| 19462 | if (!VT.isSimple()) |
| 19463 | return false; |
| 19464 | |
| 19465 | if (Subtarget->isThumb1Only()) |
| 19466 | return isLegalT1ScaledAddressingMode(AM, VT); |
| 19467 | |
| 19468 | if (Subtarget->isThumb2()) |
| 19469 | return isLegalT2ScaledAddressingMode(AM, VT); |
| 19470 | |
| 19471 | int Scale = AM.Scale; |
| 19472 | switch (VT.getSimpleVT().SimpleTy) { |
| 19473 | default: return false; |
| 19474 | case MVT::i1: |
| 19475 | case MVT::i8: |
| 19476 | case MVT::i32: |
| 19477 | if (Scale < 0) Scale = -Scale; |
| 19478 | if (Scale == 1) |
| 19479 | return true; |
| 19480 | // r + r << imm |
| 19481 | return isPowerOf2_32(Value: Scale & ~1); |
| 19482 | case MVT::i16: |
| 19483 | case MVT::i64: |
| 19484 | // r +/- r |
| 19485 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) |
| 19486 | return true; |
| 19487 | // r * 2 (this can be lowered to r + r). |
| 19488 | if (!AM.HasBaseReg && Scale == 2) |
| 19489 | return true; |
| 19490 | return false; |
| 19491 | |
| 19492 | case MVT::isVoid: |
| 19493 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 19494 | // stores), because arm allows folding a scale into many arithmetic |
| 19495 | // operations. This should be made more precise and revisited later. |
| 19496 | |
| 19497 | // Allow r << imm, but the imm has to be a multiple of two. |
| 19498 | if (Scale & 1) return false; |
| 19499 | return isPowerOf2_32(Value: Scale); |
| 19500 | } |
| 19501 | } |
| 19502 | return true; |
| 19503 | } |
| 19504 | |
| 19505 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 19506 | /// icmp immediate, that is the target has icmp instructions which can compare |
| 19507 | /// a register against the immediate without having to materialize the |
| 19508 | /// immediate into a register. |
| 19509 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
| 19510 | // Thumb2 and ARM modes can use cmn for negative immediates. |
| 19511 | if (!Subtarget->isThumb()) |
| 19512 | return ARM_AM::getSOImmVal(Arg: (uint32_t)Imm) != -1 || |
| 19513 | ARM_AM::getSOImmVal(Arg: -(uint32_t)Imm) != -1; |
| 19514 | if (Subtarget->isThumb2()) |
| 19515 | return ARM_AM::getT2SOImmVal(Arg: (uint32_t)Imm) != -1 || |
| 19516 | ARM_AM::getT2SOImmVal(Arg: -(uint32_t)Imm) != -1; |
| 19517 | // Thumb1 doesn't have cmn, and only 8-bit immediates. |
| 19518 | return Imm >= 0 && Imm <= 255; |
| 19519 | } |
| 19520 | |
| 19521 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add |
| 19522 | /// *or sub* immediate, that is the target has add or sub instructions which can |
| 19523 | /// add a register with the immediate without having to materialize the |
| 19524 | /// immediate into a register. |
| 19525 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
| 19526 | // Same encoding for add/sub, just flip the sign. |
| 19527 | uint64_t AbsImm = AbsoluteValue(X: Imm); |
| 19528 | if (!Subtarget->isThumb()) |
| 19529 | return ARM_AM::getSOImmVal(Arg: AbsImm) != -1; |
| 19530 | if (Subtarget->isThumb2()) |
| 19531 | return ARM_AM::getT2SOImmVal(Arg: AbsImm) != -1; |
| 19532 | // Thumb1 only has 8-bit unsigned immediate. |
| 19533 | return AbsImm <= 255; |
| 19534 | } |
| 19535 | |
| 19536 | // Return false to prevent folding |
| 19537 | // (mul (add r, c0), c1) -> (add (mul r, c1), c0*c1) in DAGCombine, |
| 19538 | // if the folding leads to worse code. |
| 19539 | bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode, |
| 19540 | SDValue ConstNode) const { |
| 19541 | // Let the DAGCombiner decide for vector types and large types. |
| 19542 | const EVT VT = AddNode.getValueType(); |
| 19543 | if (VT.isVector() || VT.getScalarSizeInBits() > 32) |
| 19544 | return true; |
| 19545 | |
| 19546 | // It is worse if c0 is legal add immediate, while c1*c0 is not |
| 19547 | // and has to be composed by at least two instructions. |
| 19548 | const ConstantSDNode *C0Node = cast<ConstantSDNode>(Val: AddNode.getOperand(i: 1)); |
| 19549 | const ConstantSDNode *C1Node = cast<ConstantSDNode>(Val&: ConstNode); |
| 19550 | const int64_t C0 = C0Node->getSExtValue(); |
| 19551 | APInt CA = C0Node->getAPIntValue() * C1Node->getAPIntValue(); |
| 19552 | if (!isLegalAddImmediate(Imm: C0) || isLegalAddImmediate(Imm: CA.getSExtValue())) |
| 19553 | return true; |
| 19554 | if (ConstantMaterializationCost(Val: (unsigned)CA.getZExtValue(), Subtarget) > 1) |
| 19555 | return false; |
| 19556 | |
| 19557 | // Default to true and let the DAGCombiner decide. |
| 19558 | return true; |
| 19559 | } |
| 19560 | |
| 19561 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, |
| 19562 | bool isSEXTLoad, SDValue &Base, |
| 19563 | SDValue &Offset, bool &isInc, |
| 19564 | SelectionDAG &DAG) { |
| 19565 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19566 | return false; |
| 19567 | |
| 19568 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { |
| 19569 | // AddressingMode 3 |
| 19570 | Base = Ptr->getOperand(Num: 0); |
| 19571 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19572 | int RHSC = (int)RHS->getZExtValue(); |
| 19573 | if (RHSC < 0 && RHSC > -256) { |
| 19574 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19575 | isInc = false; |
| 19576 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19577 | return true; |
| 19578 | } |
| 19579 | } |
| 19580 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 19581 | Offset = Ptr->getOperand(Num: 1); |
| 19582 | return true; |
| 19583 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { |
| 19584 | // AddressingMode 2 |
| 19585 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19586 | int RHSC = (int)RHS->getZExtValue(); |
| 19587 | if (RHSC < 0 && RHSC > -0x1000) { |
| 19588 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19589 | isInc = false; |
| 19590 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19591 | Base = Ptr->getOperand(Num: 0); |
| 19592 | return true; |
| 19593 | } |
| 19594 | } |
| 19595 | |
| 19596 | if (Ptr->getOpcode() == ISD::ADD) { |
| 19597 | isInc = true; |
| 19598 | ARM_AM::ShiftOpc ShOpcVal= |
| 19599 | ARM_AM::getShiftOpcForNode(Opcode: Ptr->getOperand(Num: 0).getOpcode()); |
| 19600 | if (ShOpcVal != ARM_AM::no_shift) { |
| 19601 | Base = Ptr->getOperand(Num: 1); |
| 19602 | Offset = Ptr->getOperand(Num: 0); |
| 19603 | } else { |
| 19604 | Base = Ptr->getOperand(Num: 0); |
| 19605 | Offset = Ptr->getOperand(Num: 1); |
| 19606 | } |
| 19607 | return true; |
| 19608 | } |
| 19609 | |
| 19610 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 19611 | Base = Ptr->getOperand(Num: 0); |
| 19612 | Offset = Ptr->getOperand(Num: 1); |
| 19613 | return true; |
| 19614 | } |
| 19615 | |
| 19616 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. |
| 19617 | return false; |
| 19618 | } |
| 19619 | |
| 19620 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, |
| 19621 | bool isSEXTLoad, SDValue &Base, |
| 19622 | SDValue &Offset, bool &isInc, |
| 19623 | SelectionDAG &DAG) { |
| 19624 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19625 | return false; |
| 19626 | |
| 19627 | Base = Ptr->getOperand(Num: 0); |
| 19628 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
| 19629 | int RHSC = (int)RHS->getZExtValue(); |
| 19630 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. |
| 19631 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19632 | isInc = false; |
| 19633 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19634 | return true; |
| 19635 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. |
| 19636 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 19637 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19638 | return true; |
| 19639 | } |
| 19640 | } |
| 19641 | |
| 19642 | return false; |
| 19643 | } |
| 19644 | |
| 19645 | static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, |
| 19646 | bool isSEXTLoad, bool IsMasked, bool isLE, |
| 19647 | SDValue &Base, SDValue &Offset, |
| 19648 | bool &isInc, SelectionDAG &DAG) { |
| 19649 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 19650 | return false; |
| 19651 | if (!isa<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) |
| 19652 | return false; |
| 19653 | |
| 19654 | // We allow LE non-masked loads to change the type (for example use a vldrb.8 |
| 19655 | // as opposed to a vldrw.32). This can allow extra addressing modes or |
| 19656 | // alignments for what is otherwise an equivalent instruction. |
| 19657 | bool CanChangeType = isLE && !IsMasked; |
| 19658 | |
| 19659 | ConstantSDNode *RHS = cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1)); |
| 19660 | int RHSC = (int)RHS->getZExtValue(); |
| 19661 | |
| 19662 | auto IsInRange = [&](int RHSC, int Limit, int Scale) { |
| 19663 | if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { |
| 19664 | assert(Ptr->getOpcode() == ISD::ADD); |
| 19665 | isInc = false; |
| 19666 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19667 | return true; |
| 19668 | } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { |
| 19669 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 19670 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
| 19671 | return true; |
| 19672 | } |
| 19673 | return false; |
| 19674 | }; |
| 19675 | |
| 19676 | // Try to find a matching instruction based on s/zext, Alignment, Offset and |
| 19677 | // (in BE/masked) type. |
| 19678 | Base = Ptr->getOperand(Num: 0); |
| 19679 | if (VT == MVT::v4i16) { |
| 19680 | if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) |
| 19681 | return true; |
| 19682 | } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { |
| 19683 | if (IsInRange(RHSC, 0x80, 1)) |
| 19684 | return true; |
| 19685 | } else if (Alignment >= 4 && |
| 19686 | (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && |
| 19687 | IsInRange(RHSC, 0x80, 4)) |
| 19688 | return true; |
| 19689 | else if (Alignment >= 2 && |
| 19690 | (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && |
| 19691 | IsInRange(RHSC, 0x80, 2)) |
| 19692 | return true; |
| 19693 | else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) |
| 19694 | return true; |
| 19695 | return false; |
| 19696 | } |
| 19697 | |
| 19698 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
| 19699 | /// offset pointer and addressing mode by reference if the node's address |
| 19700 | /// can be legally represented as pre-indexed load / store address. |
| 19701 | bool |
| 19702 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
| 19703 | SDValue &Offset, |
| 19704 | ISD::MemIndexedMode &AM, |
| 19705 | SelectionDAG &DAG) const { |
| 19706 | if (Subtarget->isThumb1Only()) |
| 19707 | return false; |
| 19708 | |
| 19709 | EVT VT; |
| 19710 | SDValue Ptr; |
| 19711 | Align Alignment; |
| 19712 | unsigned AS = 0; |
| 19713 | bool isSEXTLoad = false; |
| 19714 | bool IsMasked = false; |
| 19715 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 19716 | Ptr = LD->getBasePtr(); |
| 19717 | VT = LD->getMemoryVT(); |
| 19718 | Alignment = LD->getAlign(); |
| 19719 | AS = LD->getAddressSpace(); |
| 19720 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19721 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
| 19722 | Ptr = ST->getBasePtr(); |
| 19723 | VT = ST->getMemoryVT(); |
| 19724 | Alignment = ST->getAlign(); |
| 19725 | AS = ST->getAddressSpace(); |
| 19726 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
| 19727 | Ptr = LD->getBasePtr(); |
| 19728 | VT = LD->getMemoryVT(); |
| 19729 | Alignment = LD->getAlign(); |
| 19730 | AS = LD->getAddressSpace(); |
| 19731 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19732 | IsMasked = true; |
| 19733 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
| 19734 | Ptr = ST->getBasePtr(); |
| 19735 | VT = ST->getMemoryVT(); |
| 19736 | Alignment = ST->getAlign(); |
| 19737 | AS = ST->getAddressSpace(); |
| 19738 | IsMasked = true; |
| 19739 | } else |
| 19740 | return false; |
| 19741 | |
| 19742 | unsigned Fast = 0; |
| 19743 | if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment, |
| 19744 | MachineMemOperand::MONone, Fast: &Fast)) { |
| 19745 | // Only generate post-increment or pre-increment forms when a real |
| 19746 | // hardware instruction exists for them. Do not emit postinc/preinc |
| 19747 | // if the operation will end up as a libcall. |
| 19748 | return false; |
| 19749 | } |
| 19750 | |
| 19751 | bool isInc; |
| 19752 | bool isLegal = false; |
| 19753 | if (VT.isVector()) |
| 19754 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 19755 | getMVEIndexedAddressParts( |
| 19756 | Ptr: Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, |
| 19757 | isLE: Subtarget->isLittle(), Base, Offset, isInc, DAG); |
| 19758 | else { |
| 19759 | if (Subtarget->isThumb2()) |
| 19760 | isLegal = getT2IndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
| 19761 | Offset, isInc, DAG); |
| 19762 | else |
| 19763 | isLegal = getARMIndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
| 19764 | Offset, isInc, DAG); |
| 19765 | } |
| 19766 | if (!isLegal) |
| 19767 | return false; |
| 19768 | |
| 19769 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; |
| 19770 | return true; |
| 19771 | } |
| 19772 | |
| 19773 | /// getPostIndexedAddressParts - returns true by value, base pointer and |
| 19774 | /// offset pointer and addressing mode by reference if this node can be |
| 19775 | /// combined with a load / store to form a post-indexed load / store. |
| 19776 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
| 19777 | SDValue &Base, |
| 19778 | SDValue &Offset, |
| 19779 | ISD::MemIndexedMode &AM, |
| 19780 | SelectionDAG &DAG) const { |
| 19781 | EVT VT; |
| 19782 | SDValue Ptr; |
| 19783 | Align Alignment; |
| 19784 | bool isSEXTLoad = false, isNonExt; |
| 19785 | bool IsMasked = false; |
| 19786 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
| 19787 | VT = LD->getMemoryVT(); |
| 19788 | Ptr = LD->getBasePtr(); |
| 19789 | Alignment = LD->getAlign(); |
| 19790 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19791 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 19792 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
| 19793 | VT = ST->getMemoryVT(); |
| 19794 | Ptr = ST->getBasePtr(); |
| 19795 | Alignment = ST->getAlign(); |
| 19796 | isNonExt = !ST->isTruncatingStore(); |
| 19797 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
| 19798 | VT = LD->getMemoryVT(); |
| 19799 | Ptr = LD->getBasePtr(); |
| 19800 | Alignment = LD->getAlign(); |
| 19801 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 19802 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 19803 | IsMasked = true; |
| 19804 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
| 19805 | VT = ST->getMemoryVT(); |
| 19806 | Ptr = ST->getBasePtr(); |
| 19807 | Alignment = ST->getAlign(); |
| 19808 | isNonExt = !ST->isTruncatingStore(); |
| 19809 | IsMasked = true; |
| 19810 | } else |
| 19811 | return false; |
| 19812 | |
| 19813 | if (Subtarget->isThumb1Only()) { |
| 19814 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It |
| 19815 | // must be non-extending/truncating, i32, with an offset of 4. |
| 19816 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!" ); |
| 19817 | if (Op->getOpcode() != ISD::ADD || !isNonExt) |
| 19818 | return false; |
| 19819 | auto *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1)); |
| 19820 | if (!RHS || RHS->getZExtValue() != 4) |
| 19821 | return false; |
| 19822 | if (Alignment < Align(4)) |
| 19823 | return false; |
| 19824 | |
| 19825 | Offset = Op->getOperand(Num: 1); |
| 19826 | Base = Op->getOperand(Num: 0); |
| 19827 | AM = ISD::POST_INC; |
| 19828 | return true; |
| 19829 | } |
| 19830 | |
| 19831 | bool isInc; |
| 19832 | bool isLegal = false; |
| 19833 | if (VT.isVector()) |
| 19834 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 19835 | getMVEIndexedAddressParts(Ptr: Op, VT, Alignment, isSEXTLoad, IsMasked, |
| 19836 | isLE: Subtarget->isLittle(), Base, Offset, |
| 19837 | isInc, DAG); |
| 19838 | else { |
| 19839 | if (Subtarget->isThumb2()) |
| 19840 | isLegal = getT2IndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
| 19841 | isInc, DAG); |
| 19842 | else |
| 19843 | isLegal = getARMIndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
| 19844 | isInc, DAG); |
| 19845 | } |
| 19846 | if (!isLegal) |
| 19847 | return false; |
| 19848 | |
| 19849 | if (Ptr != Base) { |
| 19850 | // Swap base ptr and offset to catch more post-index load / store when |
| 19851 | // it's legal. In Thumb2 mode, offset must be an immediate. |
| 19852 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && |
| 19853 | !Subtarget->isThumb2()) |
| 19854 | std::swap(a&: Base, b&: Offset); |
| 19855 | |
| 19856 | // Post-indexed load / store update the base pointer. |
| 19857 | if (Ptr != Base) |
| 19858 | return false; |
| 19859 | } |
| 19860 | |
| 19861 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; |
| 19862 | return true; |
| 19863 | } |
| 19864 | |
| 19865 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
| 19866 | KnownBits &Known, |
| 19867 | const APInt &DemandedElts, |
| 19868 | const SelectionDAG &DAG, |
| 19869 | unsigned Depth) const { |
| 19870 | unsigned BitWidth = Known.getBitWidth(); |
| 19871 | Known.resetAll(); |
| 19872 | switch (Op.getOpcode()) { |
| 19873 | default: break; |
| 19874 | case ARMISD::ADDC: |
| 19875 | case ARMISD::ADDE: |
| 19876 | case ARMISD::SUBC: |
| 19877 | case ARMISD::SUBE: |
| 19878 | // Special cases when we convert a carry to a boolean. |
| 19879 | if (Op.getResNo() == 0) { |
| 19880 | SDValue LHS = Op.getOperand(i: 0); |
| 19881 | SDValue RHS = Op.getOperand(i: 1); |
| 19882 | // (ADDE 0, 0, C) will give us a single bit. |
| 19883 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(V: LHS) && |
| 19884 | isNullConstant(V: RHS)) { |
| 19885 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 1); |
| 19886 | return; |
| 19887 | } |
| 19888 | } |
| 19889 | break; |
| 19890 | case ARMISD::CMOV: { |
| 19891 | // Bits are known zero/one if known on the LHS and RHS. |
| 19892 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth+1); |
| 19893 | if (Known.isUnknown()) |
| 19894 | return; |
| 19895 | |
| 19896 | KnownBits KnownRHS = DAG.computeKnownBits(Op: Op.getOperand(i: 1), Depth: Depth+1); |
| 19897 | Known = Known.intersectWith(RHS: KnownRHS); |
| 19898 | return; |
| 19899 | } |
| 19900 | case ISD::INTRINSIC_W_CHAIN: { |
| 19901 | Intrinsic::ID IntID = |
| 19902 | static_cast<Intrinsic::ID>(Op->getConstantOperandVal(Num: 1)); |
| 19903 | switch (IntID) { |
| 19904 | default: return; |
| 19905 | case Intrinsic::arm_ldaex: |
| 19906 | case Intrinsic::arm_ldrex: { |
| 19907 | EVT VT = cast<MemIntrinsicSDNode>(Val: Op)->getMemoryVT(); |
| 19908 | unsigned MemBits = VT.getScalarSizeInBits(); |
| 19909 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - MemBits); |
| 19910 | return; |
| 19911 | } |
| 19912 | } |
| 19913 | } |
| 19914 | case ARMISD::BFI: { |
| 19915 | // Conservatively, we can recurse down the first operand |
| 19916 | // and just mask out all affected bits. |
| 19917 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1); |
| 19918 | |
| 19919 | // The operand to BFI is already a mask suitable for removing the bits it |
| 19920 | // sets. |
| 19921 | const APInt &Mask = Op.getConstantOperandAPInt(i: 2); |
| 19922 | Known.Zero &= Mask; |
| 19923 | Known.One &= Mask; |
| 19924 | return; |
| 19925 | } |
| 19926 | case ARMISD::VGETLANEs: |
| 19927 | case ARMISD::VGETLANEu: { |
| 19928 | const SDValue &SrcSV = Op.getOperand(i: 0); |
| 19929 | EVT VecVT = SrcSV.getValueType(); |
| 19930 | assert(VecVT.isVector() && "VGETLANE expected a vector type" ); |
| 19931 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
| 19932 | ConstantSDNode *Pos = cast<ConstantSDNode>(Val: Op.getOperand(i: 1).getNode()); |
| 19933 | assert(Pos->getAPIntValue().ult(NumSrcElts) && |
| 19934 | "VGETLANE index out of bounds" ); |
| 19935 | unsigned Idx = Pos->getZExtValue(); |
| 19936 | APInt DemandedElt = APInt::getOneBitSet(numBits: NumSrcElts, BitNo: Idx); |
| 19937 | Known = DAG.computeKnownBits(Op: SrcSV, DemandedElts: DemandedElt, Depth: Depth + 1); |
| 19938 | |
| 19939 | EVT VT = Op.getValueType(); |
| 19940 | const unsigned DstSz = VT.getScalarSizeInBits(); |
| 19941 | const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); |
| 19942 | (void)SrcSz; |
| 19943 | assert(SrcSz == Known.getBitWidth()); |
| 19944 | assert(DstSz > SrcSz); |
| 19945 | if (Op.getOpcode() == ARMISD::VGETLANEs) |
| 19946 | Known = Known.sext(BitWidth: DstSz); |
| 19947 | else { |
| 19948 | Known = Known.zext(BitWidth: DstSz); |
| 19949 | } |
| 19950 | assert(DstSz == Known.getBitWidth()); |
| 19951 | break; |
| 19952 | } |
| 19953 | case ARMISD::VMOVrh: { |
| 19954 | KnownBits KnownOp = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
| 19955 | assert(KnownOp.getBitWidth() == 16); |
| 19956 | Known = KnownOp.zext(BitWidth: 32); |
| 19957 | break; |
| 19958 | } |
| 19959 | case ARMISD::CSINC: |
| 19960 | case ARMISD::CSINV: |
| 19961 | case ARMISD::CSNEG: { |
| 19962 | KnownBits KnownOp0 = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
| 19963 | KnownBits KnownOp1 = DAG.computeKnownBits(Op: Op->getOperand(Num: 1), Depth: Depth + 1); |
| 19964 | |
| 19965 | // The result is either: |
| 19966 | // CSINC: KnownOp0 or KnownOp1 + 1 |
| 19967 | // CSINV: KnownOp0 or ~KnownOp1 |
| 19968 | // CSNEG: KnownOp0 or KnownOp1 * -1 |
| 19969 | if (Op.getOpcode() == ARMISD::CSINC) |
| 19970 | KnownOp1 = |
| 19971 | KnownBits::add(LHS: KnownOp1, RHS: KnownBits::makeConstant(C: APInt(32, 1))); |
| 19972 | else if (Op.getOpcode() == ARMISD::CSINV) |
| 19973 | std::swap(a&: KnownOp1.Zero, b&: KnownOp1.One); |
| 19974 | else if (Op.getOpcode() == ARMISD::CSNEG) |
| 19975 | KnownOp1 = KnownBits::mul(LHS: KnownOp1, |
| 19976 | RHS: KnownBits::makeConstant(C: APInt::getAllOnes(numBits: 32))); |
| 19977 | |
| 19978 | Known = KnownOp0.intersectWith(RHS: KnownOp1); |
| 19979 | break; |
| 19980 | } |
| 19981 | case ARMISD::VORRIMM: |
| 19982 | case ARMISD::VBICIMM: { |
| 19983 | unsigned Encoded = Op.getConstantOperandVal(i: 1); |
| 19984 | unsigned DecEltBits = 0; |
| 19985 | uint64_t DecodedVal = ARM_AM::decodeVMOVModImm(ModImm: Encoded, EltBits&: DecEltBits); |
| 19986 | |
| 19987 | unsigned EltBits = Op.getScalarValueSizeInBits(); |
| 19988 | if (EltBits != DecEltBits) { |
| 19989 | // Be conservative: only update Known when EltBits == DecEltBits. |
| 19990 | // This is believed to always be true for VORRIMM/VBICIMM today, but if |
| 19991 | // that changes in the future, doing nothing here is safer than risking |
| 19992 | // subtle bugs. |
| 19993 | break; |
| 19994 | } |
| 19995 | |
| 19996 | KnownBits KnownLHS = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1); |
| 19997 | bool IsVORR = Op.getOpcode() == ARMISD::VORRIMM; |
| 19998 | APInt Imm(DecEltBits, DecodedVal); |
| 19999 | |
| 20000 | Known.One = IsVORR ? (KnownLHS.One | Imm) : (KnownLHS.One & ~Imm); |
| 20001 | Known.Zero = IsVORR ? (KnownLHS.Zero & ~Imm) : (KnownLHS.Zero | Imm); |
| 20002 | break; |
| 20003 | } |
| 20004 | } |
| 20005 | } |
| 20006 | |
| 20007 | bool ARMTargetLowering::targetShrinkDemandedConstant( |
| 20008 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, |
| 20009 | TargetLoweringOpt &TLO) const { |
| 20010 | // Delay optimization, so we don't have to deal with illegal types, or block |
| 20011 | // optimizations. |
| 20012 | if (!TLO.LegalOps) |
| 20013 | return false; |
| 20014 | |
| 20015 | // Only optimize AND for now. |
| 20016 | if (Op.getOpcode() != ISD::AND) |
| 20017 | return false; |
| 20018 | |
| 20019 | EVT VT = Op.getValueType(); |
| 20020 | |
| 20021 | // Ignore vectors. |
| 20022 | if (VT.isVector()) |
| 20023 | return false; |
| 20024 | |
| 20025 | assert(VT == MVT::i32 && "Unexpected integer type" ); |
| 20026 | |
| 20027 | // Make sure the RHS really is a constant. |
| 20028 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
| 20029 | if (!C) |
| 20030 | return false; |
| 20031 | |
| 20032 | unsigned Mask = C->getZExtValue(); |
| 20033 | |
| 20034 | unsigned Demanded = DemandedBits.getZExtValue(); |
| 20035 | unsigned ShrunkMask = Mask & Demanded; |
| 20036 | unsigned ExpandedMask = Mask | ~Demanded; |
| 20037 | |
| 20038 | // If the mask is all zeros, let the target-independent code replace the |
| 20039 | // result with zero. |
| 20040 | if (ShrunkMask == 0) |
| 20041 | return false; |
| 20042 | |
| 20043 | // If the mask is all ones, erase the AND. (Currently, the target-independent |
| 20044 | // code won't do this, so we have to do it explicitly to avoid an infinite |
| 20045 | // loop in obscure cases.) |
| 20046 | if (ExpandedMask == ~0U) |
| 20047 | return TLO.CombineTo(O: Op, N: Op.getOperand(i: 0)); |
| 20048 | |
| 20049 | auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { |
| 20050 | return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; |
| 20051 | }; |
| 20052 | auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { |
| 20053 | if (NewMask == Mask) |
| 20054 | return true; |
| 20055 | SDLoc DL(Op); |
| 20056 | SDValue NewC = TLO.DAG.getConstant(Val: NewMask, DL, VT); |
| 20057 | SDValue NewOp = TLO.DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Op.getOperand(i: 0), N2: NewC); |
| 20058 | return TLO.CombineTo(O: Op, N: NewOp); |
| 20059 | }; |
| 20060 | |
| 20061 | // Prefer uxtb mask. |
| 20062 | if (IsLegalMask(0xFF)) |
| 20063 | return UseMask(0xFF); |
| 20064 | |
| 20065 | // Prefer uxth mask. |
| 20066 | if (IsLegalMask(0xFFFF)) |
| 20067 | return UseMask(0xFFFF); |
| 20068 | |
| 20069 | // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. |
| 20070 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 20071 | if (ShrunkMask < 256) |
| 20072 | return UseMask(ShrunkMask); |
| 20073 | |
| 20074 | // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. |
| 20075 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 20076 | if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) |
| 20077 | return UseMask(ExpandedMask); |
| 20078 | |
| 20079 | // Potential improvements: |
| 20080 | // |
| 20081 | // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. |
| 20082 | // We could try to prefer Thumb1 immediates which can be lowered to a |
| 20083 | // two-instruction sequence. |
| 20084 | // We could try to recognize more legal ARM/Thumb2 immediates here. |
| 20085 | |
| 20086 | return false; |
| 20087 | } |
| 20088 | |
| 20089 | bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( |
| 20090 | SDValue Op, const APInt &OriginalDemandedBits, |
| 20091 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, |
| 20092 | unsigned Depth) const { |
| 20093 | unsigned Opc = Op.getOpcode(); |
| 20094 | |
| 20095 | switch (Opc) { |
| 20096 | case ARMISD::ASRL: |
| 20097 | case ARMISD::LSRL: { |
| 20098 | // If this is result 0 and the other result is unused, see if the demand |
| 20099 | // bits allow us to shrink this long shift into a standard small shift in |
| 20100 | // the opposite direction. |
| 20101 | if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(Value: 1) && |
| 20102 | isa<ConstantSDNode>(Val: Op->getOperand(Num: 2))) { |
| 20103 | unsigned ShAmt = Op->getConstantOperandVal(Num: 2); |
| 20104 | if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(RHS: APInt::getAllOnes(numBits: 32) |
| 20105 | << (32 - ShAmt))) |
| 20106 | return TLO.CombineTo( |
| 20107 | O: Op, N: TLO.DAG.getNode( |
| 20108 | Opcode: ISD::SHL, DL: SDLoc(Op), VT: MVT::i32, N1: Op.getOperand(i: 1), |
| 20109 | N2: TLO.DAG.getConstant(Val: 32 - ShAmt, DL: SDLoc(Op), VT: MVT::i32))); |
| 20110 | } |
| 20111 | break; |
| 20112 | } |
| 20113 | case ARMISD::VBICIMM: { |
| 20114 | SDValue Op0 = Op.getOperand(i: 0); |
| 20115 | unsigned ModImm = Op.getConstantOperandVal(i: 1); |
| 20116 | unsigned EltBits = 0; |
| 20117 | uint64_t Mask = ARM_AM::decodeVMOVModImm(ModImm, EltBits); |
| 20118 | if ((OriginalDemandedBits & Mask) == 0) |
| 20119 | return TLO.CombineTo(O: Op, N: Op0); |
| 20120 | } |
| 20121 | } |
| 20122 | |
| 20123 | return TargetLowering::SimplifyDemandedBitsForTargetNode( |
| 20124 | Op, DemandedBits: OriginalDemandedBits, DemandedElts: OriginalDemandedElts, Known, TLO, Depth); |
| 20125 | } |
| 20126 | |
| 20127 | //===----------------------------------------------------------------------===// |
| 20128 | // ARM Inline Assembly Support |
| 20129 | //===----------------------------------------------------------------------===// |
| 20130 | |
| 20131 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { |
| 20132 | // At this point, we have to lower this constraint to something else, so we |
| 20133 | // lower it to an "r" or "w". However, by doing this we will force the result |
| 20134 | // to be in register, while the X constraint is much more permissive. |
| 20135 | // |
| 20136 | // Although we are correct (we are free to emit anything, without |
| 20137 | // constraints), we might break use cases that would expect us to be more |
| 20138 | // efficient and emit something else. |
| 20139 | if (!Subtarget->hasVFP2Base()) |
| 20140 | return "r" ; |
| 20141 | if (ConstraintVT.isFloatingPoint()) |
| 20142 | return "w" ; |
| 20143 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && |
| 20144 | (ConstraintVT.getSizeInBits() == 64 || |
| 20145 | ConstraintVT.getSizeInBits() == 128)) |
| 20146 | return "w" ; |
| 20147 | |
| 20148 | return "r" ; |
| 20149 | } |
| 20150 | |
| 20151 | /// getConstraintType - Given a constraint letter, return the type of |
| 20152 | /// constraint it is for this target. |
| 20153 | ARMTargetLowering::ConstraintType |
| 20154 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { |
| 20155 | unsigned S = Constraint.size(); |
| 20156 | if (S == 1) { |
| 20157 | switch (Constraint[0]) { |
| 20158 | default: break; |
| 20159 | case 'l': return C_RegisterClass; |
| 20160 | case 'w': return C_RegisterClass; |
| 20161 | case 'h': return C_RegisterClass; |
| 20162 | case 'x': return C_RegisterClass; |
| 20163 | case 't': return C_RegisterClass; |
| 20164 | case 'j': return C_Immediate; // Constant for movw. |
| 20165 | // An address with a single base register. Due to the way we |
| 20166 | // currently handle addresses it is the same as an 'r' memory constraint. |
| 20167 | case 'Q': return C_Memory; |
| 20168 | } |
| 20169 | } else if (S == 2) { |
| 20170 | switch (Constraint[0]) { |
| 20171 | default: break; |
| 20172 | case 'T': return C_RegisterClass; |
| 20173 | // All 'U+' constraints are addresses. |
| 20174 | case 'U': return C_Memory; |
| 20175 | } |
| 20176 | } |
| 20177 | return TargetLowering::getConstraintType(Constraint); |
| 20178 | } |
| 20179 | |
| 20180 | /// Examine constraint type and operand type and determine a weight value. |
| 20181 | /// This object must already have been set up with the operand type |
| 20182 | /// and the current alternative constraint selected. |
| 20183 | TargetLowering::ConstraintWeight |
| 20184 | ARMTargetLowering::getSingleConstraintMatchWeight( |
| 20185 | AsmOperandInfo &info, const char *constraint) const { |
| 20186 | ConstraintWeight weight = CW_Invalid; |
| 20187 | Value *CallOperandVal = info.CallOperandVal; |
| 20188 | // If we don't have a value, we can't do a match, |
| 20189 | // but allow it at the lowest weight. |
| 20190 | if (!CallOperandVal) |
| 20191 | return CW_Default; |
| 20192 | Type *type = CallOperandVal->getType(); |
| 20193 | // Look at the constraint type. |
| 20194 | switch (*constraint) { |
| 20195 | default: |
| 20196 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
| 20197 | break; |
| 20198 | case 'l': |
| 20199 | if (type->isIntegerTy()) { |
| 20200 | if (Subtarget->isThumb()) |
| 20201 | weight = CW_SpecificReg; |
| 20202 | else |
| 20203 | weight = CW_Register; |
| 20204 | } |
| 20205 | break; |
| 20206 | case 'w': |
| 20207 | if (type->isFloatingPointTy()) |
| 20208 | weight = CW_Register; |
| 20209 | break; |
| 20210 | } |
| 20211 | return weight; |
| 20212 | } |
| 20213 | |
| 20214 | static bool isIncompatibleReg(const MCPhysReg &PR, MVT VT) { |
| 20215 | if (PR == 0 || VT == MVT::Other) |
| 20216 | return false; |
| 20217 | if (ARM::SPRRegClass.contains(Reg: PR)) |
| 20218 | return VT != MVT::f32 && VT != MVT::f16 && VT != MVT::i32; |
| 20219 | if (ARM::DPRRegClass.contains(Reg: PR)) |
| 20220 | return VT != MVT::f64 && !VT.is64BitVector(); |
| 20221 | return false; |
| 20222 | } |
| 20223 | |
| 20224 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; |
| 20225 | |
| 20226 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( |
| 20227 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
| 20228 | switch (Constraint.size()) { |
| 20229 | case 1: |
| 20230 | // GCC ARM Constraint Letters |
| 20231 | switch (Constraint[0]) { |
| 20232 | case 'l': // Low regs or general regs. |
| 20233 | if (Subtarget->isThumb()) |
| 20234 | return RCPair(0U, &ARM::tGPRRegClass); |
| 20235 | return RCPair(0U, &ARM::GPRRegClass); |
| 20236 | case 'h': // High regs or no regs. |
| 20237 | if (Subtarget->isThumb()) |
| 20238 | return RCPair(0U, &ARM::hGPRRegClass); |
| 20239 | break; |
| 20240 | case 'r': |
| 20241 | if (Subtarget->isThumb1Only()) |
| 20242 | return RCPair(0U, &ARM::tGPRRegClass); |
| 20243 | return RCPair(0U, &ARM::GPRRegClass); |
| 20244 | case 'w': |
| 20245 | if (VT == MVT::Other) |
| 20246 | break; |
| 20247 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20248 | return RCPair(0U, &ARM::SPRRegClass); |
| 20249 | if (VT.getSizeInBits() == 64) |
| 20250 | return RCPair(0U, &ARM::DPRRegClass); |
| 20251 | if (VT.getSizeInBits() == 128) |
| 20252 | return RCPair(0U, &ARM::QPRRegClass); |
| 20253 | break; |
| 20254 | case 'x': |
| 20255 | if (VT == MVT::Other) |
| 20256 | break; |
| 20257 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20258 | return RCPair(0U, &ARM::SPR_8RegClass); |
| 20259 | if (VT.getSizeInBits() == 64) |
| 20260 | return RCPair(0U, &ARM::DPR_8RegClass); |
| 20261 | if (VT.getSizeInBits() == 128) |
| 20262 | return RCPair(0U, &ARM::QPR_8RegClass); |
| 20263 | break; |
| 20264 | case 't': |
| 20265 | if (VT == MVT::Other) |
| 20266 | break; |
| 20267 | if (VT == MVT::f32 || VT == MVT::i32 || VT == MVT::f16 || VT == MVT::bf16) |
| 20268 | return RCPair(0U, &ARM::SPRRegClass); |
| 20269 | if (VT.getSizeInBits() == 64) |
| 20270 | return RCPair(0U, &ARM::DPR_VFP2RegClass); |
| 20271 | if (VT.getSizeInBits() == 128) |
| 20272 | return RCPair(0U, &ARM::QPR_VFP2RegClass); |
| 20273 | break; |
| 20274 | } |
| 20275 | break; |
| 20276 | |
| 20277 | case 2: |
| 20278 | if (Constraint[0] == 'T') { |
| 20279 | switch (Constraint[1]) { |
| 20280 | default: |
| 20281 | break; |
| 20282 | case 'e': |
| 20283 | return RCPair(0U, &ARM::tGPREvenRegClass); |
| 20284 | case 'o': |
| 20285 | return RCPair(0U, &ARM::tGPROddRegClass); |
| 20286 | } |
| 20287 | } |
| 20288 | break; |
| 20289 | |
| 20290 | default: |
| 20291 | break; |
| 20292 | } |
| 20293 | |
| 20294 | if (StringRef("{cc}" ).equals_insensitive(RHS: Constraint)) |
| 20295 | return std::make_pair(x: unsigned(ARM::CPSR), y: &ARM::CCRRegClass); |
| 20296 | |
| 20297 | auto RCP = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 20298 | if (isIncompatibleReg(PR: RCP.first, VT)) |
| 20299 | return {0, nullptr}; |
| 20300 | return RCP; |
| 20301 | } |
| 20302 | |
| 20303 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
| 20304 | /// vector. If it is invalid, don't add anything to Ops. |
| 20305 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
| 20306 | StringRef Constraint, |
| 20307 | std::vector<SDValue> &Ops, |
| 20308 | SelectionDAG &DAG) const { |
| 20309 | SDValue Result; |
| 20310 | |
| 20311 | // Currently only support length 1 constraints. |
| 20312 | if (Constraint.size() != 1) |
| 20313 | return; |
| 20314 | |
| 20315 | char ConstraintLetter = Constraint[0]; |
| 20316 | switch (ConstraintLetter) { |
| 20317 | default: break; |
| 20318 | case 'j': |
| 20319 | case 'I': case 'J': case 'K': case 'L': |
| 20320 | case 'M': case 'N': case 'O': |
| 20321 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op); |
| 20322 | if (!C) |
| 20323 | return; |
| 20324 | |
| 20325 | int64_t CVal64 = C->getSExtValue(); |
| 20326 | int CVal = (int) CVal64; |
| 20327 | // None of these constraints allow values larger than 32 bits. Check |
| 20328 | // that the value fits in an int. |
| 20329 | if (CVal != CVal64) |
| 20330 | return; |
| 20331 | |
| 20332 | switch (ConstraintLetter) { |
| 20333 | case 'j': |
| 20334 | // Constant suitable for movw, must be between 0 and |
| 20335 | // 65535. |
| 20336 | if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) |
| 20337 | if (CVal >= 0 && CVal <= 65535) |
| 20338 | break; |
| 20339 | return; |
| 20340 | case 'I': |
| 20341 | if (Subtarget->isThumb1Only()) { |
| 20342 | // This must be a constant between 0 and 255, for ADD |
| 20343 | // immediates. |
| 20344 | if (CVal >= 0 && CVal <= 255) |
| 20345 | break; |
| 20346 | } else if (Subtarget->isThumb2()) { |
| 20347 | // A constant that can be used as an immediate value in a |
| 20348 | // data-processing instruction. |
| 20349 | if (ARM_AM::getT2SOImmVal(Arg: CVal) != -1) |
| 20350 | break; |
| 20351 | } else { |
| 20352 | // A constant that can be used as an immediate value in a |
| 20353 | // data-processing instruction. |
| 20354 | if (ARM_AM::getSOImmVal(Arg: CVal) != -1) |
| 20355 | break; |
| 20356 | } |
| 20357 | return; |
| 20358 | |
| 20359 | case 'J': |
| 20360 | if (Subtarget->isThumb1Only()) { |
| 20361 | // This must be a constant between -255 and -1, for negated ADD |
| 20362 | // immediates. This can be used in GCC with an "n" modifier that |
| 20363 | // prints the negated value, for use with SUB instructions. It is |
| 20364 | // not useful otherwise but is implemented for compatibility. |
| 20365 | if (CVal >= -255 && CVal <= -1) |
| 20366 | break; |
| 20367 | } else { |
| 20368 | // This must be a constant between -4095 and 4095. This is suitable |
| 20369 | // for use as the immediate offset field in LDR and STR instructions |
| 20370 | // such as LDR r0,[r1,#offset]. |
| 20371 | if (CVal >= -4095 && CVal <= 4095) |
| 20372 | break; |
| 20373 | } |
| 20374 | return; |
| 20375 | |
| 20376 | case 'K': |
| 20377 | if (Subtarget->isThumb1Only()) { |
| 20378 | // A 32-bit value where only one byte has a nonzero value. Exclude |
| 20379 | // zero to match GCC. This constraint is used by GCC internally for |
| 20380 | // constants that can be loaded with a move/shift combination. |
| 20381 | // It is not useful otherwise but is implemented for compatibility. |
| 20382 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(V: CVal)) |
| 20383 | break; |
| 20384 | } else if (Subtarget->isThumb2()) { |
| 20385 | // A constant whose bitwise inverse can be used as an immediate |
| 20386 | // value in a data-processing instruction. This can be used in GCC |
| 20387 | // with a "B" modifier that prints the inverted value, for use with |
| 20388 | // BIC and MVN instructions. It is not useful otherwise but is |
| 20389 | // implemented for compatibility. |
| 20390 | if (ARM_AM::getT2SOImmVal(Arg: ~CVal) != -1) |
| 20391 | break; |
| 20392 | } else { |
| 20393 | // A constant whose bitwise inverse can be used as an immediate |
| 20394 | // value in a data-processing instruction. This can be used in GCC |
| 20395 | // with a "B" modifier that prints the inverted value, for use with |
| 20396 | // BIC and MVN instructions. It is not useful otherwise but is |
| 20397 | // implemented for compatibility. |
| 20398 | if (ARM_AM::getSOImmVal(Arg: ~CVal) != -1) |
| 20399 | break; |
| 20400 | } |
| 20401 | return; |
| 20402 | |
| 20403 | case 'L': |
| 20404 | if (Subtarget->isThumb1Only()) { |
| 20405 | // This must be a constant between -7 and 7, |
| 20406 | // for 3-operand ADD/SUB immediate instructions. |
| 20407 | if (CVal >= -7 && CVal < 7) |
| 20408 | break; |
| 20409 | } else if (Subtarget->isThumb2()) { |
| 20410 | // A constant whose negation can be used as an immediate value in a |
| 20411 | // data-processing instruction. This can be used in GCC with an "n" |
| 20412 | // modifier that prints the negated value, for use with SUB |
| 20413 | // instructions. It is not useful otherwise but is implemented for |
| 20414 | // compatibility. |
| 20415 | if (ARM_AM::getT2SOImmVal(Arg: -CVal) != -1) |
| 20416 | break; |
| 20417 | } else { |
| 20418 | // A constant whose negation can be used as an immediate value in a |
| 20419 | // data-processing instruction. This can be used in GCC with an "n" |
| 20420 | // modifier that prints the negated value, for use with SUB |
| 20421 | // instructions. It is not useful otherwise but is implemented for |
| 20422 | // compatibility. |
| 20423 | if (ARM_AM::getSOImmVal(Arg: -CVal) != -1) |
| 20424 | break; |
| 20425 | } |
| 20426 | return; |
| 20427 | |
| 20428 | case 'M': |
| 20429 | if (Subtarget->isThumb1Only()) { |
| 20430 | // This must be a multiple of 4 between 0 and 1020, for |
| 20431 | // ADD sp + immediate. |
| 20432 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) |
| 20433 | break; |
| 20434 | } else { |
| 20435 | // A power of two or a constant between 0 and 32. This is used in |
| 20436 | // GCC for the shift amount on shifted register operands, but it is |
| 20437 | // useful in general for any shift amounts. |
| 20438 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) |
| 20439 | break; |
| 20440 | } |
| 20441 | return; |
| 20442 | |
| 20443 | case 'N': |
| 20444 | if (Subtarget->isThumb1Only()) { |
| 20445 | // This must be a constant between 0 and 31, for shift amounts. |
| 20446 | if (CVal >= 0 && CVal <= 31) |
| 20447 | break; |
| 20448 | } |
| 20449 | return; |
| 20450 | |
| 20451 | case 'O': |
| 20452 | if (Subtarget->isThumb1Only()) { |
| 20453 | // This must be a multiple of 4 between -508 and 508, for |
| 20454 | // ADD/SUB sp = sp + immediate. |
| 20455 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) |
| 20456 | break; |
| 20457 | } |
| 20458 | return; |
| 20459 | } |
| 20460 | Result = DAG.getSignedTargetConstant(Val: CVal, DL: SDLoc(Op), VT: Op.getValueType()); |
| 20461 | break; |
| 20462 | } |
| 20463 | |
| 20464 | if (Result.getNode()) { |
| 20465 | Ops.push_back(x: Result); |
| 20466 | return; |
| 20467 | } |
| 20468 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
| 20469 | } |
| 20470 | |
| 20471 | static RTLIB::Libcall getDivRemLibcall( |
| 20472 | const SDNode *N, MVT::SimpleValueType SVT) { |
| 20473 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 20474 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 20475 | "Unhandled Opcode in getDivRemLibcall" ); |
| 20476 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 20477 | N->getOpcode() == ISD::SREM; |
| 20478 | RTLIB::Libcall LC; |
| 20479 | switch (SVT) { |
| 20480 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 20481 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
| 20482 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
| 20483 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
| 20484 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
| 20485 | } |
| 20486 | return LC; |
| 20487 | } |
| 20488 | |
| 20489 | static TargetLowering::ArgListTy getDivRemArgList( |
| 20490 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { |
| 20491 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 20492 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 20493 | "Unhandled Opcode in getDivRemArgList" ); |
| 20494 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 20495 | N->getOpcode() == ISD::SREM; |
| 20496 | TargetLowering::ArgListTy Args; |
| 20497 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 20498 | EVT ArgVT = N->getOperand(Num: i).getValueType(); |
| 20499 | Type *ArgTy = ArgVT.getTypeForEVT(Context&: *Context); |
| 20500 | TargetLowering::ArgListEntry Entry(N->getOperand(Num: i), ArgTy); |
| 20501 | Entry.IsSExt = isSigned; |
| 20502 | Entry.IsZExt = !isSigned; |
| 20503 | Args.push_back(x: Entry); |
| 20504 | } |
| 20505 | if (Subtarget->isTargetWindows() && Args.size() >= 2) |
| 20506 | std::swap(a&: Args[0], b&: Args[1]); |
| 20507 | return Args; |
| 20508 | } |
| 20509 | |
| 20510 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { |
| 20511 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
| 20512 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
| 20513 | Subtarget->isTargetFuchsia() || Subtarget->isTargetWindows()) && |
| 20514 | "Register-based DivRem lowering only" ); |
| 20515 | unsigned Opcode = Op->getOpcode(); |
| 20516 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && |
| 20517 | "Invalid opcode for Div/Rem lowering" ); |
| 20518 | bool isSigned = (Opcode == ISD::SDIVREM); |
| 20519 | EVT VT = Op->getValueType(ResNo: 0); |
| 20520 | SDLoc dl(Op); |
| 20521 | |
| 20522 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: Op.getOperand(i: 1))) { |
| 20523 | SmallVector<SDValue> Result; |
| 20524 | if (expandDIVREMByConstant(N: Op.getNode(), Result, HiLoVT: MVT::i32, DAG)) { |
| 20525 | SDValue Res0 = |
| 20526 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[0], N2: Result[1]); |
| 20527 | SDValue Res1 = |
| 20528 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[2], N2: Result[3]); |
| 20529 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: Op->getVTList(), |
| 20530 | Ops: {Res0, Res1}); |
| 20531 | } |
| 20532 | } |
| 20533 | |
| 20534 | Type *Ty = VT.getTypeForEVT(Context&: *DAG.getContext()); |
| 20535 | |
| 20536 | // If the target has hardware divide, use divide + multiply + subtract: |
| 20537 | // div = a / b |
| 20538 | // rem = a - b * div |
| 20539 | // return {div, rem} |
| 20540 | // This should be lowered into UDIV/SDIV + MLS later on. |
| 20541 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 20542 | : Subtarget->hasDivideInARMMode(); |
| 20543 | if (hasDivide && Op->getValueType(ResNo: 0).isSimple() && |
| 20544 | Op->getSimpleValueType(ResNo: 0) == MVT::i32) { |
| 20545 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
| 20546 | const SDValue Dividend = Op->getOperand(Num: 0); |
| 20547 | const SDValue Divisor = Op->getOperand(Num: 1); |
| 20548 | SDValue Div = DAG.getNode(Opcode: DivOpcode, DL: dl, VT, N1: Dividend, N2: Divisor); |
| 20549 | SDValue Mul = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT, N1: Div, N2: Divisor); |
| 20550 | SDValue Rem = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: Dividend, N2: Mul); |
| 20551 | |
| 20552 | SDValue Values[2] = {Div, Rem}; |
| 20553 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), Ops: Values); |
| 20554 | } |
| 20555 | |
| 20556 | RTLIB::Libcall LC = getDivRemLibcall(N: Op.getNode(), |
| 20557 | SVT: VT.getSimpleVT().SimpleTy); |
| 20558 | RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(Call: LC); |
| 20559 | |
| 20560 | SDValue InChain = DAG.getEntryNode(); |
| 20561 | |
| 20562 | TargetLowering::ArgListTy Args = getDivRemArgList(N: Op.getNode(), |
| 20563 | Context: DAG.getContext(), |
| 20564 | Subtarget); |
| 20565 | |
| 20566 | SDValue Callee = |
| 20567 | DAG.getExternalSymbol(LCImpl, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 20568 | |
| 20569 | Type *RetTy = StructType::get(elt1: Ty, elts: Ty); |
| 20570 | |
| 20571 | if (Subtarget->isTargetWindows()) |
| 20572 | InChain = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain); |
| 20573 | |
| 20574 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 20575 | CLI.setDebugLoc(dl) |
| 20576 | .setChain(InChain) |
| 20577 | .setCallee(CC: DAG.getLibcalls().getLibcallImplCallingConv(Call: LCImpl), ResultType: RetTy, |
| 20578 | Target: Callee, ArgsList: std::move(Args)) |
| 20579 | .setInRegister() |
| 20580 | .setSExtResult(isSigned) |
| 20581 | .setZExtResult(!isSigned); |
| 20582 | |
| 20583 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); |
| 20584 | return CallInfo.first; |
| 20585 | } |
| 20586 | |
| 20587 | // Lowers REM using divmod helpers |
| 20588 | // see RTABI section 4.2/4.3 |
| 20589 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { |
| 20590 | EVT VT = N->getValueType(ResNo: 0); |
| 20591 | |
| 20592 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
| 20593 | SmallVector<SDValue> Result; |
| 20594 | if (expandDIVREMByConstant(N, Result, HiLoVT: MVT::i32, DAG)) |
| 20595 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
| 20596 | N1: Result[0], N2: Result[1]); |
| 20597 | } |
| 20598 | |
| 20599 | // Build return types (div and rem) |
| 20600 | std::vector<Type*> RetTyParams; |
| 20601 | Type *RetTyElement; |
| 20602 | |
| 20603 | switch (VT.getSimpleVT().SimpleTy) { |
| 20604 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 20605 | case MVT::i8: RetTyElement = Type::getInt8Ty(C&: *DAG.getContext()); break; |
| 20606 | case MVT::i16: RetTyElement = Type::getInt16Ty(C&: *DAG.getContext()); break; |
| 20607 | case MVT::i32: RetTyElement = Type::getInt32Ty(C&: *DAG.getContext()); break; |
| 20608 | case MVT::i64: RetTyElement = Type::getInt64Ty(C&: *DAG.getContext()); break; |
| 20609 | } |
| 20610 | |
| 20611 | RetTyParams.push_back(x: RetTyElement); |
| 20612 | RetTyParams.push_back(x: RetTyElement); |
| 20613 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); |
| 20614 | Type *RetTy = StructType::get(Context&: *DAG.getContext(), Elements: ret); |
| 20615 | |
| 20616 | RTLIB::Libcall LC = getDivRemLibcall(N, SVT: N->getValueType(ResNo: 0).getSimpleVT(). |
| 20617 | SimpleTy); |
| 20618 | RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(Call: LC); |
| 20619 | SDValue InChain = DAG.getEntryNode(); |
| 20620 | TargetLowering::ArgListTy Args = getDivRemArgList(N, Context: DAG.getContext(), |
| 20621 | Subtarget); |
| 20622 | bool isSigned = N->getOpcode() == ISD::SREM; |
| 20623 | |
| 20624 | SDValue Callee = |
| 20625 | DAG.getExternalSymbol(LCImpl, VT: getPointerTy(DL: DAG.getDataLayout())); |
| 20626 | |
| 20627 | if (Subtarget->isTargetWindows()) |
| 20628 | InChain = WinDBZCheckDenominator(DAG, N, InChain); |
| 20629 | |
| 20630 | // Lower call |
| 20631 | CallLoweringInfo CLI(DAG); |
| 20632 | CLI.setChain(InChain) |
| 20633 | .setCallee(CC: DAG.getLibcalls().getLibcallImplCallingConv(Call: LCImpl), ResultType: RetTy, |
| 20634 | Target: Callee, ArgsList: std::move(Args)) |
| 20635 | .setSExtResult(isSigned) |
| 20636 | .setZExtResult(!isSigned) |
| 20637 | .setDebugLoc(SDLoc(N)); |
| 20638 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 20639 | |
| 20640 | // Return second (rem) result operand (first contains div) |
| 20641 | SDNode *ResNode = CallResult.first.getNode(); |
| 20642 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands" ); |
| 20643 | return ResNode->getOperand(Num: 1); |
| 20644 | } |
| 20645 | |
| 20646 | SDValue |
| 20647 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
| 20648 | assert(Subtarget->isTargetWindows() && "unsupported target platform" ); |
| 20649 | SDLoc DL(Op); |
| 20650 | |
| 20651 | // Get the inputs. |
| 20652 | SDValue Chain = Op.getOperand(i: 0); |
| 20653 | SDValue Size = Op.getOperand(i: 1); |
| 20654 | |
| 20655 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( |
| 20656 | Kind: "no-stack-arg-probe" )) { |
| 20657 | MaybeAlign Align = |
| 20658 | cast<ConstantSDNode>(Val: Op.getOperand(i: 2))->getMaybeAlignValue(); |
| 20659 | SDValue SP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
| 20660 | Chain = SP.getValue(R: 1); |
| 20661 | SP = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: SP, N2: Size); |
| 20662 | if (Align) |
| 20663 | SP = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: SP.getValue(R: 0), |
| 20664 | N2: DAG.getSignedConstant(Val: -Align->value(), DL, VT: MVT::i32)); |
| 20665 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::SP, N: SP); |
| 20666 | SDValue Ops[2] = { SP, Chain }; |
| 20667 | return DAG.getMergeValues(Ops, dl: DL); |
| 20668 | } |
| 20669 | |
| 20670 | SDValue Words = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: Size, |
| 20671 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
| 20672 | |
| 20673 | SDValue Glue; |
| 20674 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R4, N: Words, Glue); |
| 20675 | Glue = Chain.getValue(R: 1); |
| 20676 | |
| 20677 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 20678 | Chain = DAG.getNode(Opcode: ARMISD::WIN__CHKSTK, DL, VTList: NodeTys, N1: Chain, N2: Glue); |
| 20679 | |
| 20680 | SDValue NewSP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
| 20681 | Chain = NewSP.getValue(R: 1); |
| 20682 | |
| 20683 | SDValue Ops[2] = { NewSP, Chain }; |
| 20684 | return DAG.getMergeValues(Ops, dl: DL); |
| 20685 | } |
| 20686 | |
| 20687 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { |
| 20688 | bool IsStrict = Op->isStrictFPOpcode(); |
| 20689 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 20690 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 20691 | const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); |
| 20692 | assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && |
| 20693 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 20694 | |
| 20695 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 20696 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 20697 | |
| 20698 | assert(!(DstSz == 32 && Subtarget->hasFP16()) && |
| 20699 | "With FP16, 16 to 32 conversion is legal!" ); |
| 20700 | |
| 20701 | // Converting from 32 -> 64 is valid if we have FP64. |
| 20702 | if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { |
| 20703 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 20704 | if (IsStrict) { |
| 20705 | SDLoc Loc(Op); |
| 20706 | SDValue Result = DAG.getNode(Opcode: ISD::FP_EXTEND, |
| 20707 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
| 20708 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
| 20709 | } |
| 20710 | return Op; |
| 20711 | } |
| 20712 | |
| 20713 | // Either we are converting from 16 -> 64, without FP16 and/or |
| 20714 | // FP.double-precision or without Armv8-fp. So we must do it in two |
| 20715 | // steps. |
| 20716 | // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 |
| 20717 | // without FP16. So we must do a function call. |
| 20718 | SDLoc Loc(Op); |
| 20719 | RTLIB::Libcall LC; |
| 20720 | MakeLibCallOptions CallOptions; |
| 20721 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 20722 | for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { |
| 20723 | bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); |
| 20724 | MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); |
| 20725 | MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); |
| 20726 | if (Supported) { |
| 20727 | if (IsStrict) { |
| 20728 | SrcVal = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL: Loc, |
| 20729 | ResultTys: {DstVT, MVT::Other}, Ops: {Chain, SrcVal}); |
| 20730 | Chain = SrcVal.getValue(R: 1); |
| 20731 | } else { |
| 20732 | SrcVal = DAG.getNode(Opcode: ISD::FP_EXTEND, DL: Loc, VT: DstVT, Operand: SrcVal); |
| 20733 | } |
| 20734 | } else { |
| 20735 | LC = RTLIB::getFPEXT(OpVT: SrcVT, RetVT: DstVT); |
| 20736 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 20737 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 20738 | std::tie(args&: SrcVal, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
| 20739 | dl: Loc, Chain); |
| 20740 | } |
| 20741 | } |
| 20742 | |
| 20743 | return IsStrict ? DAG.getMergeValues(Ops: {SrcVal, Chain}, dl: Loc) : SrcVal; |
| 20744 | } |
| 20745 | |
| 20746 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
| 20747 | bool IsStrict = Op->isStrictFPOpcode(); |
| 20748 | |
| 20749 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
| 20750 | EVT SrcVT = SrcVal.getValueType(); |
| 20751 | EVT DstVT = Op.getValueType(); |
| 20752 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 20753 | const unsigned SrcSz = SrcVT.getSizeInBits(); |
| 20754 | (void)DstSz; |
| 20755 | assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && |
| 20756 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 20757 | |
| 20758 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 20759 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 20760 | |
| 20761 | SDLoc Loc(Op); |
| 20762 | |
| 20763 | // Instruction from 32 -> 16 if hasFP16 is valid |
| 20764 | if (SrcSz == 32 && Subtarget->hasFP16()) |
| 20765 | return Op; |
| 20766 | |
| 20767 | // Lib call from 32 -> 16 / 64 -> [32, 16] |
| 20768 | RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: SrcVT, RetVT: DstVT); |
| 20769 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 20770 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 20771 | MakeLibCallOptions CallOptions; |
| 20772 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
| 20773 | SDValue Result; |
| 20774 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
| 20775 | dl: Loc, Chain); |
| 20776 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
| 20777 | } |
| 20778 | |
| 20779 | bool |
| 20780 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
| 20781 | // The ARM target isn't yet aware of offsets. |
| 20782 | return false; |
| 20783 | } |
| 20784 | |
| 20785 | bool ARM::isBitFieldInvertedMask(unsigned v) { |
| 20786 | if (v == 0xffffffff) |
| 20787 | return false; |
| 20788 | |
| 20789 | // there can be 1's on either or both "outsides", all the "inside" |
| 20790 | // bits must be 0's |
| 20791 | return isShiftedMask_32(Value: ~v); |
| 20792 | } |
| 20793 | |
| 20794 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 20795 | /// specified FP immediate natively. If false, the legalizer will |
| 20796 | /// materialize the FP immediate as a load from a constant pool. |
| 20797 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
| 20798 | bool ForCodeSize) const { |
| 20799 | if (!Subtarget->hasVFP3Base()) |
| 20800 | return false; |
| 20801 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) |
| 20802 | return ARM_AM::getFP16Imm(FPImm: Imm) != -1; |
| 20803 | if (VT == MVT::f32 && Subtarget->hasFullFP16() && |
| 20804 | ARM_AM::getFP32FP16Imm(FPImm: Imm) != -1) |
| 20805 | return true; |
| 20806 | if (VT == MVT::f32) |
| 20807 | return ARM_AM::getFP32Imm(FPImm: Imm) != -1; |
| 20808 | if (VT == MVT::f64 && Subtarget->hasFP64()) |
| 20809 | return ARM_AM::getFP64Imm(FPImm: Imm) != -1; |
| 20810 | return false; |
| 20811 | } |
| 20812 | |
| 20813 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as |
| 20814 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment |
| 20815 | /// specified in the intrinsic calls. |
| 20816 | void ARMTargetLowering::getTgtMemIntrinsic( |
| 20817 | SmallVectorImpl<IntrinsicInfo> &Infos, const CallBase &I, |
| 20818 | MachineFunction &MF, unsigned Intrinsic) const { |
| 20819 | IntrinsicInfo Info; |
| 20820 | switch (Intrinsic) { |
| 20821 | case Intrinsic::arm_neon_vld1: |
| 20822 | case Intrinsic::arm_neon_vld2: |
| 20823 | case Intrinsic::arm_neon_vld3: |
| 20824 | case Intrinsic::arm_neon_vld4: |
| 20825 | case Intrinsic::arm_neon_vld2lane: |
| 20826 | case Intrinsic::arm_neon_vld3lane: |
| 20827 | case Intrinsic::arm_neon_vld4lane: |
| 20828 | case Intrinsic::arm_neon_vld2dup: |
| 20829 | case Intrinsic::arm_neon_vld3dup: |
| 20830 | case Intrinsic::arm_neon_vld4dup: { |
| 20831 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20832 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20833 | auto &DL = I.getDataLayout(); |
| 20834 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
| 20835 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20836 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20837 | Info.offset = 0; |
| 20838 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
| 20839 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
| 20840 | // volatile loads with NEON intrinsics not supported |
| 20841 | Info.flags = MachineMemOperand::MOLoad; |
| 20842 | Infos.push_back(Elt: Info); |
| 20843 | return; |
| 20844 | } |
| 20845 | case Intrinsic::arm_neon_vld1x2: |
| 20846 | case Intrinsic::arm_neon_vld1x3: |
| 20847 | case Intrinsic::arm_neon_vld1x4: { |
| 20848 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20849 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20850 | auto &DL = I.getDataLayout(); |
| 20851 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
| 20852 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20853 | Info.ptrVal = I.getArgOperand(i: I.arg_size() - 1); |
| 20854 | Info.offset = 0; |
| 20855 | Info.align = I.getParamAlign(ArgNo: I.arg_size() - 1).valueOrOne(); |
| 20856 | // volatile loads with NEON intrinsics not supported |
| 20857 | Info.flags = MachineMemOperand::MOLoad; |
| 20858 | Infos.push_back(Elt: Info); |
| 20859 | return; |
| 20860 | } |
| 20861 | case Intrinsic::arm_neon_vst1: |
| 20862 | case Intrinsic::arm_neon_vst2: |
| 20863 | case Intrinsic::arm_neon_vst3: |
| 20864 | case Intrinsic::arm_neon_vst4: |
| 20865 | case Intrinsic::arm_neon_vst2lane: |
| 20866 | case Intrinsic::arm_neon_vst3lane: |
| 20867 | case Intrinsic::arm_neon_vst4lane: { |
| 20868 | Info.opc = ISD::INTRINSIC_VOID; |
| 20869 | // Conservatively set memVT to the entire set of vectors stored. |
| 20870 | auto &DL = I.getDataLayout(); |
| 20871 | unsigned NumElts = 0; |
| 20872 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
| 20873 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
| 20874 | if (!ArgTy->isVectorTy()) |
| 20875 | break; |
| 20876 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
| 20877 | } |
| 20878 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20879 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20880 | Info.offset = 0; |
| 20881 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
| 20882 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
| 20883 | // volatile stores with NEON intrinsics not supported |
| 20884 | Info.flags = MachineMemOperand::MOStore; |
| 20885 | Infos.push_back(Elt: Info); |
| 20886 | return; |
| 20887 | } |
| 20888 | case Intrinsic::arm_neon_vst1x2: |
| 20889 | case Intrinsic::arm_neon_vst1x3: |
| 20890 | case Intrinsic::arm_neon_vst1x4: { |
| 20891 | Info.opc = ISD::INTRINSIC_VOID; |
| 20892 | // Conservatively set memVT to the entire set of vectors stored. |
| 20893 | auto &DL = I.getDataLayout(); |
| 20894 | unsigned NumElts = 0; |
| 20895 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
| 20896 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
| 20897 | if (!ArgTy->isVectorTy()) |
| 20898 | break; |
| 20899 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
| 20900 | } |
| 20901 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
| 20902 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20903 | Info.offset = 0; |
| 20904 | Info.align = I.getParamAlign(ArgNo: 0).valueOrOne(); |
| 20905 | // volatile stores with NEON intrinsics not supported |
| 20906 | Info.flags = MachineMemOperand::MOStore; |
| 20907 | Infos.push_back(Elt: Info); |
| 20908 | return; |
| 20909 | } |
| 20910 | case Intrinsic::arm_mve_vld2q: |
| 20911 | case Intrinsic::arm_mve_vld4q: { |
| 20912 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20913 | // Conservatively set memVT to the entire set of vectors loaded. |
| 20914 | Type *VecTy = cast<StructType>(Val: I.getType())->getElementType(N: 1); |
| 20915 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; |
| 20916 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
| 20917 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20918 | Info.offset = 0; |
| 20919 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 20920 | // volatile loads with MVE intrinsics not supported |
| 20921 | Info.flags = MachineMemOperand::MOLoad; |
| 20922 | Infos.push_back(Elt: Info); |
| 20923 | return; |
| 20924 | } |
| 20925 | case Intrinsic::arm_mve_vst2q: |
| 20926 | case Intrinsic::arm_mve_vst4q: { |
| 20927 | Info.opc = ISD::INTRINSIC_VOID; |
| 20928 | // Conservatively set memVT to the entire set of vectors stored. |
| 20929 | Type *VecTy = I.getArgOperand(i: 1)->getType(); |
| 20930 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; |
| 20931 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
| 20932 | Info.ptrVal = I.getArgOperand(i: 0); |
| 20933 | Info.offset = 0; |
| 20934 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 20935 | // volatile stores with MVE intrinsics not supported |
| 20936 | Info.flags = MachineMemOperand::MOStore; |
| 20937 | Infos.push_back(Elt: Info); |
| 20938 | return; |
| 20939 | } |
| 20940 | case Intrinsic::arm_mve_vldr_gather_base: |
| 20941 | case Intrinsic::arm_mve_vldr_gather_base_predicated: { |
| 20942 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20943 | Info.ptrVal = nullptr; |
| 20944 | Info.memVT = MVT::getVT(Ty: I.getType()); |
| 20945 | Info.align = Align(1); |
| 20946 | Info.flags |= MachineMemOperand::MOLoad; |
| 20947 | Infos.push_back(Elt: Info); |
| 20948 | return; |
| 20949 | } |
| 20950 | case Intrinsic::arm_mve_vldr_gather_base_wb: |
| 20951 | case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: { |
| 20952 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20953 | Info.ptrVal = nullptr; |
| 20954 | Info.memVT = MVT::getVT(Ty: I.getType()->getContainedType(i: 0)); |
| 20955 | Info.align = Align(1); |
| 20956 | Info.flags |= MachineMemOperand::MOLoad; |
| 20957 | Infos.push_back(Elt: Info); |
| 20958 | return; |
| 20959 | } |
| 20960 | case Intrinsic::arm_mve_vldr_gather_offset: |
| 20961 | case Intrinsic::arm_mve_vldr_gather_offset_predicated: { |
| 20962 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20963 | Info.ptrVal = nullptr; |
| 20964 | MVT DataVT = MVT::getVT(Ty: I.getType()); |
| 20965 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 2))->getZExtValue(); |
| 20966 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
| 20967 | NumElements: DataVT.getVectorNumElements()); |
| 20968 | Info.align = Align(1); |
| 20969 | Info.flags |= MachineMemOperand::MOLoad; |
| 20970 | Infos.push_back(Elt: Info); |
| 20971 | return; |
| 20972 | } |
| 20973 | case Intrinsic::arm_mve_vstr_scatter_base: |
| 20974 | case Intrinsic::arm_mve_vstr_scatter_base_predicated: { |
| 20975 | Info.opc = ISD::INTRINSIC_VOID; |
| 20976 | Info.ptrVal = nullptr; |
| 20977 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 20978 | Info.align = Align(1); |
| 20979 | Info.flags |= MachineMemOperand::MOStore; |
| 20980 | Infos.push_back(Elt: Info); |
| 20981 | return; |
| 20982 | } |
| 20983 | case Intrinsic::arm_mve_vstr_scatter_base_wb: |
| 20984 | case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: { |
| 20985 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 20986 | Info.ptrVal = nullptr; |
| 20987 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 20988 | Info.align = Align(1); |
| 20989 | Info.flags |= MachineMemOperand::MOStore; |
| 20990 | Infos.push_back(Elt: Info); |
| 20991 | return; |
| 20992 | } |
| 20993 | case Intrinsic::arm_mve_vstr_scatter_offset: |
| 20994 | case Intrinsic::arm_mve_vstr_scatter_offset_predicated: { |
| 20995 | Info.opc = ISD::INTRINSIC_VOID; |
| 20996 | Info.ptrVal = nullptr; |
| 20997 | MVT DataVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
| 20998 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 3))->getZExtValue(); |
| 20999 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
| 21000 | NumElements: DataVT.getVectorNumElements()); |
| 21001 | Info.align = Align(1); |
| 21002 | Info.flags |= MachineMemOperand::MOStore; |
| 21003 | Infos.push_back(Elt: Info); |
| 21004 | return; |
| 21005 | } |
| 21006 | case Intrinsic::arm_ldaex: |
| 21007 | case Intrinsic::arm_ldrex: { |
| 21008 | auto &DL = I.getDataLayout(); |
| 21009 | Type *ValTy = I.getParamElementType(ArgNo: 0); |
| 21010 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21011 | Info.memVT = MVT::getVT(Ty: ValTy); |
| 21012 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21013 | Info.offset = 0; |
| 21014 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
| 21015 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 21016 | Infos.push_back(Elt: Info); |
| 21017 | return; |
| 21018 | } |
| 21019 | case Intrinsic::arm_stlex: |
| 21020 | case Intrinsic::arm_strex: { |
| 21021 | auto &DL = I.getDataLayout(); |
| 21022 | Type *ValTy = I.getParamElementType(ArgNo: 1); |
| 21023 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21024 | Info.memVT = MVT::getVT(Ty: ValTy); |
| 21025 | Info.ptrVal = I.getArgOperand(i: 1); |
| 21026 | Info.offset = 0; |
| 21027 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
| 21028 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 21029 | Infos.push_back(Elt: Info); |
| 21030 | return; |
| 21031 | } |
| 21032 | case Intrinsic::arm_stlexd: |
| 21033 | case Intrinsic::arm_strexd: |
| 21034 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21035 | Info.memVT = MVT::i64; |
| 21036 | Info.ptrVal = I.getArgOperand(i: 2); |
| 21037 | Info.offset = 0; |
| 21038 | Info.align = Align(8); |
| 21039 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 21040 | Infos.push_back(Elt: Info); |
| 21041 | return; |
| 21042 | |
| 21043 | case Intrinsic::arm_ldaexd: |
| 21044 | case Intrinsic::arm_ldrexd: |
| 21045 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 21046 | Info.memVT = MVT::i64; |
| 21047 | Info.ptrVal = I.getArgOperand(i: 0); |
| 21048 | Info.offset = 0; |
| 21049 | Info.align = Align(8); |
| 21050 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 21051 | Infos.push_back(Elt: Info); |
| 21052 | return; |
| 21053 | |
| 21054 | default: |
| 21055 | break; |
| 21056 | } |
| 21057 | } |
| 21058 | |
| 21059 | /// Returns true if it is beneficial to convert a load of a constant |
| 21060 | /// to just the constant itself. |
| 21061 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 21062 | Type *Ty) const { |
| 21063 | assert(Ty->isIntegerTy()); |
| 21064 | |
| 21065 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
| 21066 | if (Bits == 0 || Bits > 32) |
| 21067 | return false; |
| 21068 | return true; |
| 21069 | } |
| 21070 | |
| 21071 | bool ARMTargetLowering::(EVT ResVT, EVT SrcVT, |
| 21072 | unsigned Index) const { |
| 21073 | if (!isOperationLegalOrCustom(Op: ISD::EXTRACT_SUBVECTOR, VT: ResVT)) |
| 21074 | return false; |
| 21075 | |
| 21076 | return (Index == 0 || Index == ResVT.getVectorNumElements()); |
| 21077 | } |
| 21078 | |
| 21079 | Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder, |
| 21080 | ARM_MB::MemBOpt Domain) const { |
| 21081 | // First, if the target has no DMB, see what fallback we can use. |
| 21082 | if (!Subtarget->hasDataBarrier()) { |
| 21083 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 21084 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 21085 | // here. |
| 21086 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { |
| 21087 | Value* args[6] = {Builder.getInt32(C: 15), Builder.getInt32(C: 0), |
| 21088 | Builder.getInt32(C: 0), Builder.getInt32(C: 7), |
| 21089 | Builder.getInt32(C: 10), Builder.getInt32(C: 5)}; |
| 21090 | return Builder.CreateIntrinsic(ID: Intrinsic::arm_mcr, Args: args); |
| 21091 | } else { |
| 21092 | // Instead of using barriers, atomic accesses on these subtargets use |
| 21093 | // libcalls. |
| 21094 | llvm_unreachable("makeDMB on a target so old that it has no barriers" ); |
| 21095 | } |
| 21096 | } else { |
| 21097 | // Only a full system barrier exists in the M-class architectures. |
| 21098 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; |
| 21099 | Constant *CDomain = Builder.getInt32(C: Domain); |
| 21100 | return Builder.CreateIntrinsic(ID: Intrinsic::arm_dmb, Args: CDomain); |
| 21101 | } |
| 21102 | } |
| 21103 | |
| 21104 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html |
| 21105 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder, |
| 21106 | Instruction *Inst, |
| 21107 | AtomicOrdering Ord) const { |
| 21108 | switch (Ord) { |
| 21109 | case AtomicOrdering::NotAtomic: |
| 21110 | case AtomicOrdering::Unordered: |
| 21111 | llvm_unreachable("Invalid fence: unordered/non-atomic" ); |
| 21112 | case AtomicOrdering::Monotonic: |
| 21113 | case AtomicOrdering::Acquire: |
| 21114 | return nullptr; // Nothing to do |
| 21115 | case AtomicOrdering::SequentiallyConsistent: |
| 21116 | if (!Inst->hasAtomicStore()) |
| 21117 | return nullptr; // Nothing to do |
| 21118 | [[fallthrough]]; |
| 21119 | case AtomicOrdering::Release: |
| 21120 | case AtomicOrdering::AcquireRelease: |
| 21121 | if (Subtarget->preferISHSTBarriers()) |
| 21122 | return makeDMB(Builder, Domain: ARM_MB::ISHST); |
| 21123 | // FIXME: add a comment with a link to documentation justifying this. |
| 21124 | else |
| 21125 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
| 21126 | } |
| 21127 | llvm_unreachable("Unknown fence ordering in emitLeadingFence" ); |
| 21128 | } |
| 21129 | |
| 21130 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder, |
| 21131 | Instruction *Inst, |
| 21132 | AtomicOrdering Ord) const { |
| 21133 | switch (Ord) { |
| 21134 | case AtomicOrdering::NotAtomic: |
| 21135 | case AtomicOrdering::Unordered: |
| 21136 | llvm_unreachable("Invalid fence: unordered/not-atomic" ); |
| 21137 | case AtomicOrdering::Monotonic: |
| 21138 | case AtomicOrdering::Release: |
| 21139 | return nullptr; // Nothing to do |
| 21140 | case AtomicOrdering::Acquire: |
| 21141 | case AtomicOrdering::AcquireRelease: |
| 21142 | case AtomicOrdering::SequentiallyConsistent: |
| 21143 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
| 21144 | } |
| 21145 | llvm_unreachable("Unknown fence ordering in emitTrailingFence" ); |
| 21146 | } |
| 21147 | |
| 21148 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 21149 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 21150 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 21151 | // anything for those. |
| 21152 | TargetLoweringBase::AtomicExpansionKind |
| 21153 | ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
| 21154 | bool has64BitAtomicStore; |
| 21155 | if (Subtarget->isMClass()) |
| 21156 | has64BitAtomicStore = false; |
| 21157 | else if (Subtarget->isThumb()) |
| 21158 | has64BitAtomicStore = Subtarget->hasV7Ops(); |
| 21159 | else |
| 21160 | has64BitAtomicStore = Subtarget->hasV6Ops(); |
| 21161 | |
| 21162 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); |
| 21163 | return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand |
| 21164 | : AtomicExpansionKind::None; |
| 21165 | } |
| 21166 | |
| 21167 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 21168 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 21169 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 21170 | // anything for those. |
| 21171 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that |
| 21172 | // guarantee, see DDI0406C ARM architecture reference manual, |
| 21173 | // sections A8.8.72-74 LDRD) |
| 21174 | TargetLowering::AtomicExpansionKind |
| 21175 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
| 21176 | bool has64BitAtomicLoad; |
| 21177 | if (Subtarget->isMClass()) |
| 21178 | has64BitAtomicLoad = false; |
| 21179 | else if (Subtarget->isThumb()) |
| 21180 | has64BitAtomicLoad = Subtarget->hasV7Ops(); |
| 21181 | else |
| 21182 | has64BitAtomicLoad = Subtarget->hasV6Ops(); |
| 21183 | |
| 21184 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); |
| 21185 | return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly |
| 21186 | : AtomicExpansionKind::None; |
| 21187 | } |
| 21188 | |
| 21189 | // For the real atomic operations, we have ldrex/strex up to 32 bits, |
| 21190 | // and up to 64 bits on the non-M profiles |
| 21191 | TargetLowering::AtomicExpansionKind |
| 21192 | ARMTargetLowering::shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const { |
| 21193 | if (AI->isFloatingPointOperation()) |
| 21194 | return AtomicExpansionKind::CmpXChg; |
| 21195 | |
| 21196 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); |
| 21197 | bool hasAtomicRMW; |
| 21198 | if (Subtarget->isMClass()) |
| 21199 | hasAtomicRMW = Subtarget->hasV8MBaselineOps(); |
| 21200 | else if (Subtarget->isThumb()) |
| 21201 | hasAtomicRMW = Subtarget->hasV7Ops(); |
| 21202 | else |
| 21203 | hasAtomicRMW = Subtarget->hasV6Ops(); |
| 21204 | if (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) { |
| 21205 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 21206 | // implement atomicrmw without spilling. If the target address is also on |
| 21207 | // the stack and close enough to the spill slot, this can lead to a |
| 21208 | // situation where the monitor always gets cleared and the atomic operation |
| 21209 | // can never succeed. So at -O0 lower this operation to a CAS loop. |
| 21210 | if (getTargetMachine().getOptLevel() == CodeGenOptLevel::None) |
| 21211 | return AtomicExpansionKind::CmpXChg; |
| 21212 | return AtomicExpansionKind::LLSC; |
| 21213 | } |
| 21214 | return AtomicExpansionKind::None; |
| 21215 | } |
| 21216 | |
| 21217 | // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 |
| 21218 | // bits, and up to 64 bits on the non-M profiles. |
| 21219 | TargetLowering::AtomicExpansionKind |
| 21220 | ARMTargetLowering::shouldExpandAtomicCmpXchgInIR( |
| 21221 | const AtomicCmpXchgInst *AI) const { |
| 21222 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 21223 | // implement cmpxchg without spilling. If the address being exchanged is also |
| 21224 | // on the stack and close enough to the spill slot, this can lead to a |
| 21225 | // situation where the monitor always gets cleared and the atomic operation |
| 21226 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. |
| 21227 | unsigned Size = AI->getOperand(i_nocapture: 1)->getType()->getPrimitiveSizeInBits(); |
| 21228 | bool HasAtomicCmpXchg; |
| 21229 | if (Subtarget->isMClass()) |
| 21230 | HasAtomicCmpXchg = Subtarget->hasV8MBaselineOps(); |
| 21231 | else if (Subtarget->isThumb()) |
| 21232 | HasAtomicCmpXchg = Subtarget->hasV7Ops(); |
| 21233 | else |
| 21234 | HasAtomicCmpXchg = Subtarget->hasV6Ops(); |
| 21235 | if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None && |
| 21236 | HasAtomicCmpXchg && Size <= (Subtarget->isMClass() ? 32U : 64U)) |
| 21237 | return AtomicExpansionKind::LLSC; |
| 21238 | return AtomicExpansionKind::None; |
| 21239 | } |
| 21240 | |
| 21241 | bool ARMTargetLowering::shouldInsertFencesForAtomic( |
| 21242 | const Instruction *I) const { |
| 21243 | return InsertFencesForAtomic; |
| 21244 | } |
| 21245 | |
| 21246 | bool ARMTargetLowering::useLoadStackGuardNode(const Module &M) const { |
| 21247 | // ROPI/RWPI are not supported currently. |
| 21248 | return !Subtarget->isROPI() && !Subtarget->isRWPI(); |
| 21249 | } |
| 21250 | |
| 21251 | void ARMTargetLowering::insertSSPDeclarations( |
| 21252 | Module &M, const LibcallLoweringInfo &Libcalls) const { |
| 21253 | // MSVC CRT provides functionalities for stack protection. |
| 21254 | RTLIB::LibcallImpl SecurityCheckCookieLibcall = |
| 21255 | Libcalls.getLibcallImpl(Call: RTLIB::SECURITY_CHECK_COOKIE); |
| 21256 | |
| 21257 | RTLIB::LibcallImpl SecurityCookieVar = |
| 21258 | Libcalls.getLibcallImpl(Call: RTLIB::STACK_CHECK_GUARD); |
| 21259 | if (SecurityCheckCookieLibcall != RTLIB::Unsupported && |
| 21260 | SecurityCookieVar != RTLIB::Unsupported) { |
| 21261 | // MSVC CRT has a global variable holding security cookie. |
| 21262 | M.getOrInsertGlobal(Name: getLibcallImplName(Call: SecurityCookieVar), |
| 21263 | Ty: PointerType::getUnqual(C&: M.getContext())); |
| 21264 | |
| 21265 | // MSVC CRT has a function to validate security cookie. |
| 21266 | FunctionCallee SecurityCheckCookie = |
| 21267 | M.getOrInsertFunction(Name: getLibcallImplName(Call: SecurityCheckCookieLibcall), |
| 21268 | RetTy: Type::getVoidTy(C&: M.getContext()), |
| 21269 | Args: PointerType::getUnqual(C&: M.getContext())); |
| 21270 | if (Function *F = dyn_cast<Function>(Val: SecurityCheckCookie.getCallee())) |
| 21271 | F->addParamAttr(ArgNo: 0, Kind: Attribute::AttrKind::InReg); |
| 21272 | } |
| 21273 | |
| 21274 | TargetLowering::insertSSPDeclarations(M, Libcalls); |
| 21275 | } |
| 21276 | |
| 21277 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
| 21278 | unsigned &Cost) const { |
| 21279 | // If we do not have NEON, vector types are not natively supported. |
| 21280 | if (!Subtarget->hasNEON()) |
| 21281 | return false; |
| 21282 | |
| 21283 | // Floating point values and vector values map to the same register file. |
| 21284 | // Therefore, although we could do a store extract of a vector type, this is |
| 21285 | // better to leave at float as we have more freedom in the addressing mode for |
| 21286 | // those. |
| 21287 | if (VectorTy->isFPOrFPVectorTy()) |
| 21288 | return false; |
| 21289 | |
| 21290 | // If the index is unknown at compile time, this is very expensive to lower |
| 21291 | // and it is not possible to combine the store with the extract. |
| 21292 | if (!isa<ConstantInt>(Val: Idx)) |
| 21293 | return false; |
| 21294 | |
| 21295 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type" ); |
| 21296 | unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedValue(); |
| 21297 | // We can do a store + vector extract on any vector that fits perfectly in a D |
| 21298 | // or Q register. |
| 21299 | if (BitWidth == 64 || BitWidth == 128) { |
| 21300 | Cost = 0; |
| 21301 | return true; |
| 21302 | } |
| 21303 | return false; |
| 21304 | } |
| 21305 | |
| 21306 | bool ARMTargetLowering::canCreateUndefOrPoisonForTargetNode( |
| 21307 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, |
| 21308 | bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const { |
| 21309 | unsigned Opcode = Op.getOpcode(); |
| 21310 | switch (Opcode) { |
| 21311 | case ARMISD::VORRIMM: |
| 21312 | case ARMISD::VBICIMM: |
| 21313 | return false; |
| 21314 | } |
| 21315 | return TargetLowering::canCreateUndefOrPoisonForTargetNode( |
| 21316 | Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth); |
| 21317 | } |
| 21318 | |
| 21319 | bool ARMTargetLowering::isCheapToSpeculateCttz(Type *Ty) const { |
| 21320 | return Subtarget->hasV5TOps() && !Subtarget->isThumb1Only(); |
| 21321 | } |
| 21322 | |
| 21323 | bool ARMTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const { |
| 21324 | return Subtarget->hasV5TOps() && !Subtarget->isThumb1Only(); |
| 21325 | } |
| 21326 | |
| 21327 | bool ARMTargetLowering::isMaskAndCmp0FoldingBeneficial( |
| 21328 | const Instruction &AndI) const { |
| 21329 | if (!Subtarget->hasV7Ops()) |
| 21330 | return false; |
| 21331 | |
| 21332 | // Sink the `and` instruction only if the mask would fit into a modified |
| 21333 | // immediate operand. |
| 21334 | ConstantInt *Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1)); |
| 21335 | if (!Mask || Mask->getValue().getBitWidth() > 32u) |
| 21336 | return false; |
| 21337 | auto MaskVal = unsigned(Mask->getValue().getZExtValue()); |
| 21338 | return (Subtarget->isThumb2() ? ARM_AM::getT2SOImmVal(Arg: MaskVal) |
| 21339 | : ARM_AM::getSOImmVal(Arg: MaskVal)) != -1; |
| 21340 | } |
| 21341 | |
| 21342 | TargetLowering::ShiftLegalizationStrategy |
| 21343 | ARMTargetLowering::preferredShiftLegalizationStrategy( |
| 21344 | SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const { |
| 21345 | if (Subtarget->hasMinSize() && !Subtarget->isTargetWindows()) |
| 21346 | return ShiftLegalizationStrategy::LowerToLibcall; |
| 21347 | return TargetLowering::preferredShiftLegalizationStrategy(DAG, N, |
| 21348 | ExpansionFactor); |
| 21349 | } |
| 21350 | |
| 21351 | Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, |
| 21352 | Value *Addr, |
| 21353 | AtomicOrdering Ord) const { |
| 21354 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 21355 | bool IsAcquire = isAcquireOrStronger(AO: Ord); |
| 21356 | |
| 21357 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd |
| 21358 | // intrinsic must return {i32, i32} and we have to recombine them into a |
| 21359 | // single i64 here. |
| 21360 | if (ValueTy->getPrimitiveSizeInBits() == 64) { |
| 21361 | Intrinsic::ID Int = |
| 21362 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; |
| 21363 | |
| 21364 | Value *LoHi = |
| 21365 | Builder.CreateIntrinsic(ID: Int, Args: Addr, /*FMFSource=*/nullptr, Name: "lohi" ); |
| 21366 | |
| 21367 | Value *Lo = Builder.CreateExtractValue(Agg: LoHi, Idxs: 0, Name: "lo" ); |
| 21368 | Value *Hi = Builder.CreateExtractValue(Agg: LoHi, Idxs: 1, Name: "hi" ); |
| 21369 | if (!Subtarget->isLittle()) |
| 21370 | std::swap (a&: Lo, b&: Hi); |
| 21371 | Lo = Builder.CreateZExt(V: Lo, DestTy: ValueTy, Name: "lo64" ); |
| 21372 | Hi = Builder.CreateZExt(V: Hi, DestTy: ValueTy, Name: "hi64" ); |
| 21373 | return Builder.CreateOr( |
| 21374 | LHS: Lo, RHS: Builder.CreateShl(LHS: Hi, RHS: ConstantInt::get(Ty: ValueTy, V: 32)), Name: "val64" ); |
| 21375 | } |
| 21376 | |
| 21377 | Type *Tys[] = { Addr->getType() }; |
| 21378 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; |
| 21379 | CallInst *CI = Builder.CreateIntrinsic(ID: Int, Types: Tys, Args: Addr); |
| 21380 | |
| 21381 | CI->addParamAttr( |
| 21382 | ArgNo: 0, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, Ty: ValueTy)); |
| 21383 | return Builder.CreateTruncOrBitCast(V: CI, DestTy: ValueTy); |
| 21384 | } |
| 21385 | |
| 21386 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( |
| 21387 | IRBuilderBase &Builder) const { |
| 21388 | if (!Subtarget->hasV7Ops()) |
| 21389 | return; |
| 21390 | Builder.CreateIntrinsic(ID: Intrinsic::arm_clrex, Args: {}); |
| 21391 | } |
| 21392 | |
| 21393 | Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder, |
| 21394 | Value *Val, Value *Addr, |
| 21395 | AtomicOrdering Ord) const { |
| 21396 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 21397 | bool IsRelease = isReleaseOrStronger(AO: Ord); |
| 21398 | |
| 21399 | // Since the intrinsics must have legal type, the i64 intrinsics take two |
| 21400 | // parameters: "i32, i32". We must marshal Val into the appropriate form |
| 21401 | // before the call. |
| 21402 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { |
| 21403 | Intrinsic::ID Int = |
| 21404 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; |
| 21405 | Type *Int32Ty = Type::getInt32Ty(C&: M->getContext()); |
| 21406 | |
| 21407 | Value *Lo = Builder.CreateTrunc(V: Val, DestTy: Int32Ty, Name: "lo" ); |
| 21408 | Value *Hi = Builder.CreateTrunc(V: Builder.CreateLShr(LHS: Val, RHS: 32), DestTy: Int32Ty, Name: "hi" ); |
| 21409 | if (!Subtarget->isLittle()) |
| 21410 | std::swap(a&: Lo, b&: Hi); |
| 21411 | return Builder.CreateIntrinsic(ID: Int, Args: {Lo, Hi, Addr}); |
| 21412 | } |
| 21413 | |
| 21414 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; |
| 21415 | Type *Tys[] = { Addr->getType() }; |
| 21416 | Function *Strex = Intrinsic::getOrInsertDeclaration(M, id: Int, Tys); |
| 21417 | |
| 21418 | CallInst *CI = Builder.CreateCall( |
| 21419 | Callee: Strex, Args: {Builder.CreateZExtOrBitCast( |
| 21420 | V: Val, DestTy: Strex->getFunctionType()->getParamType(i: 0)), |
| 21421 | Addr}); |
| 21422 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, |
| 21423 | Ty: Val->getType())); |
| 21424 | return CI; |
| 21425 | } |
| 21426 | |
| 21427 | |
| 21428 | bool ARMTargetLowering::alignLoopsWithOptSize() const { |
| 21429 | return Subtarget->isMClass(); |
| 21430 | } |
| 21431 | |
| 21432 | /// A helper function for determining the number of interleaved accesses we |
| 21433 | /// will generate when lowering accesses of the given type. |
| 21434 | unsigned |
| 21435 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, |
| 21436 | const DataLayout &DL) const { |
| 21437 | return (DL.getTypeSizeInBits(Ty: VecTy) + 127) / 128; |
| 21438 | } |
| 21439 | |
| 21440 | bool ARMTargetLowering::isLegalInterleavedAccessType( |
| 21441 | unsigned Factor, FixedVectorType *VecTy, Align Alignment, |
| 21442 | const DataLayout &DL) const { |
| 21443 | |
| 21444 | unsigned VecSize = DL.getTypeSizeInBits(Ty: VecTy); |
| 21445 | unsigned ElSize = DL.getTypeSizeInBits(Ty: VecTy->getElementType()); |
| 21446 | |
| 21447 | if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) |
| 21448 | return false; |
| 21449 | |
| 21450 | // Ensure the vector doesn't have f16 elements. Even though we could do an |
| 21451 | // i16 vldN, we can't hold the f16 vectors and will end up converting via |
| 21452 | // f32. |
| 21453 | if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) |
| 21454 | return false; |
| 21455 | if (Subtarget->hasMVEIntegerOps() && Factor == 3) |
| 21456 | return false; |
| 21457 | |
| 21458 | // Ensure the number of vector elements is greater than 1. |
| 21459 | if (VecTy->getNumElements() < 2) |
| 21460 | return false; |
| 21461 | |
| 21462 | // Ensure the element type is legal. |
| 21463 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) |
| 21464 | return false; |
| 21465 | // And the alignment if high enough under MVE. |
| 21466 | if (Subtarget->hasMVEIntegerOps() && Alignment < ElSize / 8) |
| 21467 | return false; |
| 21468 | |
| 21469 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than |
| 21470 | // 128 will be split into multiple interleaved accesses. |
| 21471 | if (Subtarget->hasNEON() && VecSize == 64) |
| 21472 | return true; |
| 21473 | return VecSize % 128 == 0; |
| 21474 | } |
| 21475 | |
| 21476 | unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { |
| 21477 | if (Subtarget->hasNEON()) |
| 21478 | return 4; |
| 21479 | if (Subtarget->hasMVEIntegerOps()) |
| 21480 | return MVEMaxSupportedInterleaveFactor; |
| 21481 | return TargetLoweringBase::getMaxSupportedInterleaveFactor(); |
| 21482 | } |
| 21483 | |
| 21484 | /// Lower an interleaved load into a vldN intrinsic. |
| 21485 | /// |
| 21486 | /// E.g. Lower an interleaved load (Factor = 2): |
| 21487 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 |
| 21488 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements |
| 21489 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements |
| 21490 | /// |
| 21491 | /// Into: |
| 21492 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) |
| 21493 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 |
| 21494 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 |
| 21495 | bool ARMTargetLowering::lowerInterleavedLoad( |
| 21496 | Instruction *Load, Value *Mask, ArrayRef<ShuffleVectorInst *> Shuffles, |
| 21497 | ArrayRef<unsigned> Indices, unsigned Factor, const APInt &GapMask) const { |
| 21498 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 21499 | "Invalid interleave factor" ); |
| 21500 | assert(!Shuffles.empty() && "Empty shufflevector input" ); |
| 21501 | assert(Shuffles.size() == Indices.size() && |
| 21502 | "Unmatched number of shufflevectors and indices" ); |
| 21503 | |
| 21504 | auto *LI = dyn_cast<LoadInst>(Val: Load); |
| 21505 | if (!LI) |
| 21506 | return false; |
| 21507 | assert(!Mask && GapMask.popcount() == Factor && "Unexpected mask on a load" ); |
| 21508 | |
| 21509 | auto *VecTy = cast<FixedVectorType>(Val: Shuffles[0]->getType()); |
| 21510 | Type *EltTy = VecTy->getElementType(); |
| 21511 | |
| 21512 | const DataLayout &DL = LI->getDataLayout(); |
| 21513 | Align Alignment = LI->getAlign(); |
| 21514 | |
| 21515 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 21516 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 21517 | // the vector types are divisible by 128. |
| 21518 | if (!isLegalInterleavedAccessType(Factor, VecTy, Alignment, DL)) |
| 21519 | return false; |
| 21520 | |
| 21521 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); |
| 21522 | |
| 21523 | // A pointer vector can not be the return type of the ldN intrinsics. Need to |
| 21524 | // load integer vectors first and then convert to pointer vectors. |
| 21525 | if (EltTy->isPointerTy()) |
| 21526 | VecTy = FixedVectorType::get(ElementType: DL.getIntPtrType(EltTy), FVTy: VecTy); |
| 21527 | |
| 21528 | IRBuilder<> Builder(LI); |
| 21529 | |
| 21530 | // The base address of the load. |
| 21531 | Value *BaseAddr = LI->getPointerOperand(); |
| 21532 | |
| 21533 | if (NumLoads > 1) { |
| 21534 | // If we're going to generate more than one load, reset the sub-vector type |
| 21535 | // to something legal. |
| 21536 | VecTy = FixedVectorType::get(ElementType: VecTy->getElementType(), |
| 21537 | NumElts: VecTy->getNumElements() / NumLoads); |
| 21538 | } |
| 21539 | |
| 21540 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!" ); |
| 21541 | |
| 21542 | auto createLoadIntrinsic = [&](Value *BaseAddr) { |
| 21543 | if (Subtarget->hasNEON()) { |
| 21544 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
| 21545 | Type *Tys[] = {VecTy, PtrTy}; |
| 21546 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, |
| 21547 | Intrinsic::arm_neon_vld3, |
| 21548 | Intrinsic::arm_neon_vld4}; |
| 21549 | |
| 21550 | SmallVector<Value *, 2> Ops; |
| 21551 | Ops.push_back(Elt: BaseAddr); |
| 21552 | Ops.push_back(Elt: Builder.getInt32(C: LI->getAlign().value())); |
| 21553 | |
| 21554 | return Builder.CreateIntrinsic(ID: LoadInts[Factor - 2], Types: Tys, Args: Ops, |
| 21555 | /*FMFSource=*/nullptr, Name: "vldN" ); |
| 21556 | } else { |
| 21557 | assert((Factor == 2 || Factor == 4) && |
| 21558 | "expected interleave factor of 2 or 4 for MVE" ); |
| 21559 | Intrinsic::ID LoadInts = |
| 21560 | Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; |
| 21561 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
| 21562 | Type *Tys[] = {VecTy, PtrTy}; |
| 21563 | |
| 21564 | SmallVector<Value *, 2> Ops; |
| 21565 | Ops.push_back(Elt: BaseAddr); |
| 21566 | return Builder.CreateIntrinsic(ID: LoadInts, Types: Tys, Args: Ops, /*FMFSource=*/nullptr, |
| 21567 | Name: "vldN" ); |
| 21568 | } |
| 21569 | }; |
| 21570 | |
| 21571 | // Holds sub-vectors extracted from the load intrinsic return values. The |
| 21572 | // sub-vectors are associated with the shufflevector instructions they will |
| 21573 | // replace. |
| 21574 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; |
| 21575 | |
| 21576 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { |
| 21577 | // If we're generating more than one load, compute the base address of |
| 21578 | // subsequent loads as an offset from the previous. |
| 21579 | if (LoadCount > 0) |
| 21580 | BaseAddr = Builder.CreateConstGEP1_32(Ty: VecTy->getElementType(), Ptr: BaseAddr, |
| 21581 | Idx0: VecTy->getNumElements() * Factor); |
| 21582 | |
| 21583 | CallInst *VldN = createLoadIntrinsic(BaseAddr); |
| 21584 | |
| 21585 | // Replace uses of each shufflevector with the corresponding vector loaded |
| 21586 | // by ldN. |
| 21587 | for (unsigned i = 0; i < Shuffles.size(); i++) { |
| 21588 | ShuffleVectorInst *SV = Shuffles[i]; |
| 21589 | unsigned Index = Indices[i]; |
| 21590 | |
| 21591 | Value *SubVec = Builder.CreateExtractValue(Agg: VldN, Idxs: Index); |
| 21592 | |
| 21593 | // Convert the integer vector to pointer vector if the element is pointer. |
| 21594 | if (EltTy->isPointerTy()) |
| 21595 | SubVec = Builder.CreateIntToPtr( |
| 21596 | V: SubVec, |
| 21597 | DestTy: FixedVectorType::get(ElementType: SV->getType()->getElementType(), FVTy: VecTy)); |
| 21598 | |
| 21599 | SubVecs[SV].push_back(Elt: SubVec); |
| 21600 | } |
| 21601 | } |
| 21602 | |
| 21603 | // Replace uses of the shufflevector instructions with the sub-vectors |
| 21604 | // returned by the load intrinsic. If a shufflevector instruction is |
| 21605 | // associated with more than one sub-vector, those sub-vectors will be |
| 21606 | // concatenated into a single wide vector. |
| 21607 | for (ShuffleVectorInst *SVI : Shuffles) { |
| 21608 | auto &SubVec = SubVecs[SVI]; |
| 21609 | auto *WideVec = |
| 21610 | SubVec.size() > 1 ? concatenateVectors(Builder, Vecs: SubVec) : SubVec[0]; |
| 21611 | SVI->replaceAllUsesWith(V: WideVec); |
| 21612 | } |
| 21613 | |
| 21614 | return true; |
| 21615 | } |
| 21616 | |
| 21617 | /// Lower an interleaved store into a vstN intrinsic. |
| 21618 | /// |
| 21619 | /// E.g. Lower an interleaved store (Factor = 3): |
| 21620 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, |
| 21621 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> |
| 21622 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 |
| 21623 | /// |
| 21624 | /// Into: |
| 21625 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> |
| 21626 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> |
| 21627 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> |
| 21628 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 21629 | /// |
| 21630 | /// Note that the new shufflevectors will be removed and we'll only generate one |
| 21631 | /// vst3 instruction in CodeGen. |
| 21632 | /// |
| 21633 | /// Example for a more general valid mask (Factor 3). Lower: |
| 21634 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, |
| 21635 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> |
| 21636 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr |
| 21637 | /// |
| 21638 | /// Into: |
| 21639 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> |
| 21640 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> |
| 21641 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> |
| 21642 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 21643 | bool ARMTargetLowering::lowerInterleavedStore(Instruction *Store, |
| 21644 | Value *LaneMask, |
| 21645 | ShuffleVectorInst *SVI, |
| 21646 | unsigned Factor, |
| 21647 | const APInt &GapMask) const { |
| 21648 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 21649 | "Invalid interleave factor" ); |
| 21650 | auto *SI = dyn_cast<StoreInst>(Val: Store); |
| 21651 | if (!SI) |
| 21652 | return false; |
| 21653 | assert(!LaneMask && GapMask.popcount() == Factor && |
| 21654 | "Unexpected mask on store" ); |
| 21655 | |
| 21656 | auto *VecTy = cast<FixedVectorType>(Val: SVI->getType()); |
| 21657 | assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store" ); |
| 21658 | |
| 21659 | unsigned LaneLen = VecTy->getNumElements() / Factor; |
| 21660 | Type *EltTy = VecTy->getElementType(); |
| 21661 | auto *SubVecTy = FixedVectorType::get(ElementType: EltTy, NumElts: LaneLen); |
| 21662 | |
| 21663 | const DataLayout &DL = SI->getDataLayout(); |
| 21664 | Align Alignment = SI->getAlign(); |
| 21665 | |
| 21666 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 21667 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 21668 | // the vector types are divisible by 128. |
| 21669 | if (!isLegalInterleavedAccessType(Factor, VecTy: SubVecTy, Alignment, DL)) |
| 21670 | return false; |
| 21671 | |
| 21672 | unsigned NumStores = getNumInterleavedAccesses(VecTy: SubVecTy, DL); |
| 21673 | |
| 21674 | Value *Op0 = SVI->getOperand(i_nocapture: 0); |
| 21675 | Value *Op1 = SVI->getOperand(i_nocapture: 1); |
| 21676 | IRBuilder<> Builder(SI); |
| 21677 | |
| 21678 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer |
| 21679 | // vectors to integer vectors. |
| 21680 | if (EltTy->isPointerTy()) { |
| 21681 | Type *IntTy = DL.getIntPtrType(EltTy); |
| 21682 | |
| 21683 | // Convert to the corresponding integer vector. |
| 21684 | auto *IntVecTy = |
| 21685 | FixedVectorType::get(ElementType: IntTy, FVTy: cast<FixedVectorType>(Val: Op0->getType())); |
| 21686 | Op0 = Builder.CreatePtrToInt(V: Op0, DestTy: IntVecTy); |
| 21687 | Op1 = Builder.CreatePtrToInt(V: Op1, DestTy: IntVecTy); |
| 21688 | |
| 21689 | SubVecTy = FixedVectorType::get(ElementType: IntTy, NumElts: LaneLen); |
| 21690 | } |
| 21691 | |
| 21692 | // The base address of the store. |
| 21693 | Value *BaseAddr = SI->getPointerOperand(); |
| 21694 | |
| 21695 | if (NumStores > 1) { |
| 21696 | // If we're going to generate more than one store, reset the lane length |
| 21697 | // and sub-vector type to something legal. |
| 21698 | LaneLen /= NumStores; |
| 21699 | SubVecTy = FixedVectorType::get(ElementType: SubVecTy->getElementType(), NumElts: LaneLen); |
| 21700 | } |
| 21701 | |
| 21702 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!" ); |
| 21703 | |
| 21704 | auto Mask = SVI->getShuffleMask(); |
| 21705 | |
| 21706 | auto createStoreIntrinsic = [&](Value *BaseAddr, |
| 21707 | SmallVectorImpl<Value *> &Shuffles) { |
| 21708 | if (Subtarget->hasNEON()) { |
| 21709 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, |
| 21710 | Intrinsic::arm_neon_vst3, |
| 21711 | Intrinsic::arm_neon_vst4}; |
| 21712 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
| 21713 | Type *Tys[] = {PtrTy, SubVecTy}; |
| 21714 | |
| 21715 | SmallVector<Value *, 6> Ops; |
| 21716 | Ops.push_back(Elt: BaseAddr); |
| 21717 | append_range(C&: Ops, R&: Shuffles); |
| 21718 | Ops.push_back(Elt: Builder.getInt32(C: SI->getAlign().value())); |
| 21719 | Builder.CreateIntrinsic(ID: StoreInts[Factor - 2], Types: Tys, Args: Ops); |
| 21720 | } else { |
| 21721 | assert((Factor == 2 || Factor == 4) && |
| 21722 | "expected interleave factor of 2 or 4 for MVE" ); |
| 21723 | Intrinsic::ID StoreInts = |
| 21724 | Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; |
| 21725 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
| 21726 | Type *Tys[] = {PtrTy, SubVecTy}; |
| 21727 | |
| 21728 | SmallVector<Value *, 6> Ops; |
| 21729 | Ops.push_back(Elt: BaseAddr); |
| 21730 | append_range(C&: Ops, R&: Shuffles); |
| 21731 | for (unsigned F = 0; F < Factor; F++) { |
| 21732 | Ops.push_back(Elt: Builder.getInt32(C: F)); |
| 21733 | Builder.CreateIntrinsic(ID: StoreInts, Types: Tys, Args: Ops); |
| 21734 | Ops.pop_back(); |
| 21735 | } |
| 21736 | } |
| 21737 | }; |
| 21738 | |
| 21739 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { |
| 21740 | // If we generating more than one store, we compute the base address of |
| 21741 | // subsequent stores as an offset from the previous. |
| 21742 | if (StoreCount > 0) |
| 21743 | BaseAddr = Builder.CreateConstGEP1_32(Ty: SubVecTy->getElementType(), |
| 21744 | Ptr: BaseAddr, Idx0: LaneLen * Factor); |
| 21745 | |
| 21746 | SmallVector<Value *, 4> Shuffles; |
| 21747 | |
| 21748 | // Split the shufflevector operands into sub vectors for the new vstN call. |
| 21749 | for (unsigned i = 0; i < Factor; i++) { |
| 21750 | unsigned IdxI = StoreCount * LaneLen * Factor + i; |
| 21751 | if (Mask[IdxI] >= 0) { |
| 21752 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
| 21753 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: Mask[IdxI], NumInts: LaneLen, NumUndefs: 0))); |
| 21754 | } else { |
| 21755 | unsigned StartMask = 0; |
| 21756 | for (unsigned j = 1; j < LaneLen; j++) { |
| 21757 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; |
| 21758 | if (Mask[IdxJ * Factor + IdxI] >= 0) { |
| 21759 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; |
| 21760 | break; |
| 21761 | } |
| 21762 | } |
| 21763 | // Note: If all elements in a chunk are undefs, StartMask=0! |
| 21764 | // Note: Filling undef gaps with random elements is ok, since |
| 21765 | // those elements were being written anyway (with undefs). |
| 21766 | // In the case of all undefs we're defaulting to using elems from 0 |
| 21767 | // Note: StartMask cannot be negative, it's checked in |
| 21768 | // isReInterleaveMask |
| 21769 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
| 21770 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: StartMask, NumInts: LaneLen, NumUndefs: 0))); |
| 21771 | } |
| 21772 | } |
| 21773 | |
| 21774 | createStoreIntrinsic(BaseAddr, Shuffles); |
| 21775 | } |
| 21776 | return true; |
| 21777 | } |
| 21778 | |
| 21779 | enum HABaseType { |
| 21780 | HA_UNKNOWN = 0, |
| 21781 | HA_FLOAT, |
| 21782 | HA_DOUBLE, |
| 21783 | HA_VECT64, |
| 21784 | HA_VECT128 |
| 21785 | }; |
| 21786 | |
| 21787 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, |
| 21788 | uint64_t &Members) { |
| 21789 | if (auto *ST = dyn_cast<StructType>(Val: Ty)) { |
| 21790 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { |
| 21791 | uint64_t SubMembers = 0; |
| 21792 | if (!isHomogeneousAggregate(Ty: ST->getElementType(N: i), Base, Members&: SubMembers)) |
| 21793 | return false; |
| 21794 | Members += SubMembers; |
| 21795 | } |
| 21796 | } else if (auto *AT = dyn_cast<ArrayType>(Val: Ty)) { |
| 21797 | uint64_t SubMembers = 0; |
| 21798 | if (!isHomogeneousAggregate(Ty: AT->getElementType(), Base, Members&: SubMembers)) |
| 21799 | return false; |
| 21800 | Members += SubMembers * AT->getNumElements(); |
| 21801 | } else if (Ty->isFloatTy()) { |
| 21802 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) |
| 21803 | return false; |
| 21804 | Members = 1; |
| 21805 | Base = HA_FLOAT; |
| 21806 | } else if (Ty->isDoubleTy()) { |
| 21807 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) |
| 21808 | return false; |
| 21809 | Members = 1; |
| 21810 | Base = HA_DOUBLE; |
| 21811 | } else if (auto *VT = dyn_cast<VectorType>(Val: Ty)) { |
| 21812 | Members = 1; |
| 21813 | switch (Base) { |
| 21814 | case HA_FLOAT: |
| 21815 | case HA_DOUBLE: |
| 21816 | return false; |
| 21817 | case HA_VECT64: |
| 21818 | return VT->getPrimitiveSizeInBits().getFixedValue() == 64; |
| 21819 | case HA_VECT128: |
| 21820 | return VT->getPrimitiveSizeInBits().getFixedValue() == 128; |
| 21821 | case HA_UNKNOWN: |
| 21822 | switch (VT->getPrimitiveSizeInBits().getFixedValue()) { |
| 21823 | case 64: |
| 21824 | Base = HA_VECT64; |
| 21825 | return true; |
| 21826 | case 128: |
| 21827 | Base = HA_VECT128; |
| 21828 | return true; |
| 21829 | default: |
| 21830 | return false; |
| 21831 | } |
| 21832 | } |
| 21833 | } |
| 21834 | |
| 21835 | return (Members > 0 && Members <= 4); |
| 21836 | } |
| 21837 | |
| 21838 | /// Return the correct alignment for the current calling convention. |
| 21839 | Align ARMTargetLowering::getABIAlignmentForCallingConv( |
| 21840 | Type *ArgTy, const DataLayout &DL) const { |
| 21841 | const Align ABITypeAlign = DL.getABITypeAlign(Ty: ArgTy); |
| 21842 | if (!ArgTy->isVectorTy()) |
| 21843 | return ABITypeAlign; |
| 21844 | |
| 21845 | // Avoid over-aligning vector parameters. It would require realigning the |
| 21846 | // stack and waste space for no real benefit. |
| 21847 | MaybeAlign StackAlign = DL.getStackAlignment(); |
| 21848 | assert(StackAlign && "data layout string is missing stack alignment" ); |
| 21849 | return std::min(a: ABITypeAlign, b: *StackAlign); |
| 21850 | } |
| 21851 | |
| 21852 | /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of |
| 21853 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when |
| 21854 | /// passing according to AAPCS rules. |
| 21855 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( |
| 21856 | Type *Ty, CallingConv::ID CallConv, bool isVarArg, |
| 21857 | const DataLayout &DL) const { |
| 21858 | if (getEffectiveCallingConv(CC: CallConv, isVarArg) != |
| 21859 | CallingConv::ARM_AAPCS_VFP) |
| 21860 | return false; |
| 21861 | |
| 21862 | HABaseType Base = HA_UNKNOWN; |
| 21863 | uint64_t Members = 0; |
| 21864 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); |
| 21865 | LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " " ; Ty->dump()); |
| 21866 | |
| 21867 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); |
| 21868 | return IsHA || IsIntArray; |
| 21869 | } |
| 21870 | |
| 21871 | Register ARMTargetLowering::getExceptionPointerRegister( |
| 21872 | const Constant *PersonalityFn) const { |
| 21873 | // Platforms which do not use SjLj EH may return values in these registers |
| 21874 | // via the personality function. |
| 21875 | ExceptionHandling EM = getTargetMachine().getExceptionModel(); |
| 21876 | return EM == ExceptionHandling::SjLj ? Register() : ARM::R0; |
| 21877 | } |
| 21878 | |
| 21879 | Register ARMTargetLowering::getExceptionSelectorRegister( |
| 21880 | const Constant *PersonalityFn) const { |
| 21881 | // Platforms which do not use SjLj EH may return values in these registers |
| 21882 | // via the personality function. |
| 21883 | ExceptionHandling EM = getTargetMachine().getExceptionModel(); |
| 21884 | return EM == ExceptionHandling::SjLj ? Register() : ARM::R1; |
| 21885 | } |
| 21886 | |
| 21887 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
| 21888 | // Update IsSplitCSR in ARMFunctionInfo. |
| 21889 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); |
| 21890 | AFI->setIsSplitCSR(true); |
| 21891 | } |
| 21892 | |
| 21893 | void ARMTargetLowering::insertCopiesSplitCSR( |
| 21894 | MachineBasicBlock *Entry, |
| 21895 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
| 21896 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 21897 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(MF: Entry->getParent()); |
| 21898 | if (!IStart) |
| 21899 | return; |
| 21900 | |
| 21901 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 21902 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
| 21903 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
| 21904 | for (const MCPhysReg *I = IStart; *I; ++I) { |
| 21905 | const TargetRegisterClass *RC = nullptr; |
| 21906 | if (ARM::GPRRegClass.contains(Reg: *I)) |
| 21907 | RC = &ARM::GPRRegClass; |
| 21908 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
| 21909 | RC = &ARM::DPRRegClass; |
| 21910 | else |
| 21911 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 21912 | |
| 21913 | Register NewVR = MRI->createVirtualRegister(RegClass: RC); |
| 21914 | // Create copy from CSR to a virtual register. |
| 21915 | // FIXME: this currently does not emit CFI pseudo-instructions, it works |
| 21916 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be |
| 21917 | // nounwind. If we want to generalize this later, we may need to emit |
| 21918 | // CFI pseudo-instructions. |
| 21919 | assert(Entry->getParent()->getFunction().hasFnAttribute( |
| 21920 | Attribute::NoUnwind) && |
| 21921 | "Function should be nounwind in insertCopiesSplitCSR!" ); |
| 21922 | Entry->addLiveIn(PhysReg: *I); |
| 21923 | BuildMI(BB&: *Entry, I: MBBI, MIMD: DebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVR) |
| 21924 | .addReg(RegNo: *I); |
| 21925 | |
| 21926 | // Insert the copy-back instructions right before the terminator. |
| 21927 | for (auto *Exit : Exits) |
| 21928 | BuildMI(BB&: *Exit, I: Exit->getFirstTerminator(), MIMD: DebugLoc(), |
| 21929 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: *I) |
| 21930 | .addReg(RegNo: NewVR); |
| 21931 | } |
| 21932 | } |
| 21933 | |
| 21934 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { |
| 21935 | MF.getFrameInfo().computeMaxCallFrameSize(MF); |
| 21936 | TargetLoweringBase::finalizeLowering(MF); |
| 21937 | } |
| 21938 | |
| 21939 | bool ARMTargetLowering::isComplexDeinterleavingSupported() const { |
| 21940 | return Subtarget->hasMVEIntegerOps(); |
| 21941 | } |
| 21942 | |
| 21943 | bool ARMTargetLowering::isComplexDeinterleavingOperationSupported( |
| 21944 | ComplexDeinterleavingOperation Operation, Type *Ty) const { |
| 21945 | auto *VTy = dyn_cast<FixedVectorType>(Val: Ty); |
| 21946 | if (!VTy) |
| 21947 | return false; |
| 21948 | |
| 21949 | auto *ScalarTy = VTy->getScalarType(); |
| 21950 | unsigned NumElements = VTy->getNumElements(); |
| 21951 | |
| 21952 | unsigned VTyWidth = VTy->getScalarSizeInBits() * NumElements; |
| 21953 | if (VTyWidth < 128 || !llvm::isPowerOf2_32(Value: VTyWidth)) |
| 21954 | return false; |
| 21955 | |
| 21956 | // Both VCADD and VCMUL/VCMLA support the same types, F16 and F32 |
| 21957 | if (ScalarTy->isHalfTy() || ScalarTy->isFloatTy()) |
| 21958 | return Subtarget->hasMVEFloatOps(); |
| 21959 | |
| 21960 | if (Operation != ComplexDeinterleavingOperation::CAdd) |
| 21961 | return false; |
| 21962 | |
| 21963 | return Subtarget->hasMVEIntegerOps() && |
| 21964 | (ScalarTy->isIntegerTy(Bitwidth: 8) || ScalarTy->isIntegerTy(Bitwidth: 16) || |
| 21965 | ScalarTy->isIntegerTy(Bitwidth: 32)); |
| 21966 | } |
| 21967 | |
| 21968 | ArrayRef<MCPhysReg> ARMTargetLowering::getRoundingControlRegisters() const { |
| 21969 | static const MCPhysReg RCRegs[] = {ARM::FPSCR_RM}; |
| 21970 | return RCRegs; |
| 21971 | } |
| 21972 | |
| 21973 | Value *ARMTargetLowering::createComplexDeinterleavingIR( |
| 21974 | IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, |
| 21975 | ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, |
| 21976 | Value *Accumulator) const { |
| 21977 | |
| 21978 | FixedVectorType *Ty = cast<FixedVectorType>(Val: InputA->getType()); |
| 21979 | |
| 21980 | unsigned TyWidth = Ty->getScalarSizeInBits() * Ty->getNumElements(); |
| 21981 | |
| 21982 | assert(TyWidth >= 128 && "Width of vector type must be at least 128 bits" ); |
| 21983 | |
| 21984 | if (TyWidth > 128) { |
| 21985 | int Stride = Ty->getNumElements() / 2; |
| 21986 | auto SplitSeq = llvm::seq<int>(Begin: 0, End: Ty->getNumElements()); |
| 21987 | auto SplitSeqVec = llvm::to_vector(Range&: SplitSeq); |
| 21988 | ArrayRef<int> LowerSplitMask(&SplitSeqVec[0], Stride); |
| 21989 | ArrayRef<int> UpperSplitMask(&SplitSeqVec[Stride], Stride); |
| 21990 | |
| 21991 | auto *LowerSplitA = B.CreateShuffleVector(V: InputA, Mask: LowerSplitMask); |
| 21992 | auto *LowerSplitB = B.CreateShuffleVector(V: InputB, Mask: LowerSplitMask); |
| 21993 | auto *UpperSplitA = B.CreateShuffleVector(V: InputA, Mask: UpperSplitMask); |
| 21994 | auto *UpperSplitB = B.CreateShuffleVector(V: InputB, Mask: UpperSplitMask); |
| 21995 | Value *LowerSplitAcc = nullptr; |
| 21996 | Value *UpperSplitAcc = nullptr; |
| 21997 | |
| 21998 | if (Accumulator) { |
| 21999 | LowerSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: LowerSplitMask); |
| 22000 | UpperSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: UpperSplitMask); |
| 22001 | } |
| 22002 | |
| 22003 | auto *LowerSplitInt = createComplexDeinterleavingIR( |
| 22004 | B, OperationType, Rotation, InputA: LowerSplitA, InputB: LowerSplitB, Accumulator: LowerSplitAcc); |
| 22005 | auto *UpperSplitInt = createComplexDeinterleavingIR( |
| 22006 | B, OperationType, Rotation, InputA: UpperSplitA, InputB: UpperSplitB, Accumulator: UpperSplitAcc); |
| 22007 | |
| 22008 | ArrayRef<int> JoinMask(&SplitSeqVec[0], Ty->getNumElements()); |
| 22009 | return B.CreateShuffleVector(V1: LowerSplitInt, V2: UpperSplitInt, Mask: JoinMask); |
| 22010 | } |
| 22011 | |
| 22012 | auto *IntTy = Type::getInt32Ty(C&: B.getContext()); |
| 22013 | |
| 22014 | ConstantInt *ConstRotation = nullptr; |
| 22015 | if (OperationType == ComplexDeinterleavingOperation::CMulPartial) { |
| 22016 | ConstRotation = ConstantInt::get(Ty: IntTy, V: (int)Rotation); |
| 22017 | |
| 22018 | if (Accumulator) |
| 22019 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmlaq, Types: Ty, |
| 22020 | Args: {ConstRotation, Accumulator, InputB, InputA}); |
| 22021 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmulq, Types: Ty, |
| 22022 | Args: {ConstRotation, InputB, InputA}); |
| 22023 | } |
| 22024 | |
| 22025 | if (OperationType == ComplexDeinterleavingOperation::CAdd) { |
| 22026 | // 1 means the value is not halved. |
| 22027 | auto *ConstHalving = ConstantInt::get(Ty: IntTy, V: 1); |
| 22028 | |
| 22029 | if (Rotation == ComplexDeinterleavingRotation::Rotation_90) |
| 22030 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 0); |
| 22031 | else if (Rotation == ComplexDeinterleavingRotation::Rotation_270) |
| 22032 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 1); |
| 22033 | |
| 22034 | if (!ConstRotation) |
| 22035 | return nullptr; // Invalid rotation for arm_mve_vcaddq |
| 22036 | |
| 22037 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcaddq, Types: Ty, |
| 22038 | Args: {ConstHalving, ConstRotation, InputA, InputB}); |
| 22039 | } |
| 22040 | |
| 22041 | return nullptr; |
| 22042 | } |
| 22043 | |