| 1 | //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the interfaces that Hexagon uses to lower LLVM code |
| 10 | // into a selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "HexagonISelLowering.h" |
| 15 | #include "Hexagon.h" |
| 16 | #include "HexagonMachineFunctionInfo.h" |
| 17 | #include "HexagonRegisterInfo.h" |
| 18 | #include "HexagonSubtarget.h" |
| 19 | #include "HexagonTargetMachine.h" |
| 20 | #include "HexagonTargetObjectFile.h" |
| 21 | #include "llvm/ADT/APInt.h" |
| 22 | #include "llvm/ADT/ArrayRef.h" |
| 23 | #include "llvm/ADT/SmallVector.h" |
| 24 | #include "llvm/ADT/StringSwitch.h" |
| 25 | #include "llvm/CodeGen/CallingConvLower.h" |
| 26 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 27 | #include "llvm/CodeGen/MachineFunction.h" |
| 28 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 30 | #include "llvm/CodeGen/SelectionDAG.h" |
| 31 | #include "llvm/CodeGen/TargetCallingConv.h" |
| 32 | #include "llvm/CodeGen/ValueTypes.h" |
| 33 | #include "llvm/IR/BasicBlock.h" |
| 34 | #include "llvm/IR/CallingConv.h" |
| 35 | #include "llvm/IR/DataLayout.h" |
| 36 | #include "llvm/IR/DerivedTypes.h" |
| 37 | #include "llvm/IR/DiagnosticInfo.h" |
| 38 | #include "llvm/IR/DiagnosticPrinter.h" |
| 39 | #include "llvm/IR/Function.h" |
| 40 | #include "llvm/IR/GlobalValue.h" |
| 41 | #include "llvm/IR/IRBuilder.h" |
| 42 | #include "llvm/IR/InlineAsm.h" |
| 43 | #include "llvm/IR/Instructions.h" |
| 44 | #include "llvm/IR/IntrinsicInst.h" |
| 45 | #include "llvm/IR/Intrinsics.h" |
| 46 | #include "llvm/IR/IntrinsicsHexagon.h" |
| 47 | #include "llvm/IR/Module.h" |
| 48 | #include "llvm/IR/Type.h" |
| 49 | #include "llvm/IR/Value.h" |
| 50 | #include "llvm/Support/Casting.h" |
| 51 | #include "llvm/Support/CodeGen.h" |
| 52 | #include "llvm/Support/CommandLine.h" |
| 53 | #include "llvm/Support/Debug.h" |
| 54 | #include "llvm/Support/ErrorHandling.h" |
| 55 | #include "llvm/Support/MathExtras.h" |
| 56 | #include "llvm/Support/raw_ostream.h" |
| 57 | #include "llvm/Target/TargetMachine.h" |
| 58 | #include <algorithm> |
| 59 | #include <cassert> |
| 60 | #include <cstdint> |
| 61 | #include <limits> |
| 62 | #include <utility> |
| 63 | |
| 64 | using namespace llvm; |
| 65 | |
| 66 | #define DEBUG_TYPE "hexagon-lowering" |
| 67 | |
| 68 | static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables" , |
| 69 | cl::init(Val: true), cl::Hidden, |
| 70 | cl::desc("Control jump table emission on Hexagon target" )); |
| 71 | |
| 72 | static cl::opt<bool> |
| 73 | EnableHexSDNodeSched("enable-hexagon-sdnode-sched" , cl::Hidden, |
| 74 | cl::desc("Enable Hexagon SDNode scheduling" )); |
| 75 | |
| 76 | static cl::opt<int> MinimumJumpTables("minimum-jump-tables" , cl::Hidden, |
| 77 | cl::init(Val: 5), |
| 78 | cl::desc("Set minimum jump tables" )); |
| 79 | |
| 80 | static cl::opt<int> |
| 81 | MaxStoresPerMemcpyCL("max-store-memcpy" , cl::Hidden, cl::init(Val: 6), |
| 82 | cl::desc("Max #stores to inline memcpy" )); |
| 83 | |
| 84 | static cl::opt<int> |
| 85 | MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os" , cl::Hidden, cl::init(Val: 4), |
| 86 | cl::desc("Max #stores to inline memcpy" )); |
| 87 | |
| 88 | static cl::opt<int> |
| 89 | MaxStoresPerMemmoveCL("max-store-memmove" , cl::Hidden, cl::init(Val: 6), |
| 90 | cl::desc("Max #stores to inline memmove" )); |
| 91 | |
| 92 | static cl::opt<int> |
| 93 | MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os" , cl::Hidden, |
| 94 | cl::init(Val: 4), |
| 95 | cl::desc("Max #stores to inline memmove" )); |
| 96 | |
| 97 | static cl::opt<int> |
| 98 | MaxStoresPerMemsetCL("max-store-memset" , cl::Hidden, cl::init(Val: 8), |
| 99 | cl::desc("Max #stores to inline memset" )); |
| 100 | |
| 101 | static cl::opt<int> |
| 102 | MaxStoresPerMemsetOptSizeCL("max-store-memset-Os" , cl::Hidden, cl::init(Val: 4), |
| 103 | cl::desc("Max #stores to inline memset" )); |
| 104 | |
| 105 | static cl::opt<bool> |
| 106 | ConstantLoadsToImm("constant-loads-to-imm" , cl::Hidden, cl::init(Val: true), |
| 107 | cl::desc("Convert constant loads to immediate values." )); |
| 108 | |
| 109 | static cl::opt<bool> AlignLoads("hexagon-align-loads" , |
| 110 | cl::Hidden, cl::init(Val: false), |
| 111 | cl::desc("Rewrite unaligned loads as a pair of aligned loads" )); |
| 112 | |
| 113 | static cl::opt<bool> |
| 114 | DisableArgsMinAlignment("hexagon-disable-args-min-alignment" , cl::Hidden, |
| 115 | cl::init(Val: false), |
| 116 | cl::desc("Disable minimum alignment of 1 for " |
| 117 | "arguments passed by value on stack" )); |
| 118 | |
| 119 | namespace { |
| 120 | |
| 121 | class HexagonCCState : public CCState { |
| 122 | unsigned NumNamedVarArgParams = 0; |
| 123 | |
| 124 | public: |
| 125 | HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF, |
| 126 | SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, |
| 127 | unsigned NumNamedArgs) |
| 128 | : CCState(CC, IsVarArg, MF, locs, C), |
| 129 | NumNamedVarArgParams(NumNamedArgs) {} |
| 130 | unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; } |
| 131 | }; |
| 132 | |
| 133 | } // end anonymous namespace |
| 134 | |
| 135 | |
| 136 | // Implement calling convention for Hexagon. |
| 137 | |
| 138 | static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, |
| 139 | CCValAssign::LocInfo &LocInfo, |
| 140 | ISD::ArgFlagsTy &ArgFlags, CCState &State) { |
| 141 | static const MCPhysReg ArgRegs[] = { |
| 142 | Hexagon::R0, Hexagon::R1, Hexagon::R2, |
| 143 | Hexagon::R3, Hexagon::R4, Hexagon::R5 |
| 144 | }; |
| 145 | const unsigned NumArgRegs = std::size(ArgRegs); |
| 146 | unsigned RegNum = State.getFirstUnallocated(Regs: ArgRegs); |
| 147 | |
| 148 | // RegNum is an index into ArgRegs: skip a register if RegNum is odd. |
| 149 | if (RegNum != NumArgRegs && RegNum % 2 == 1) |
| 150 | State.AllocateReg(Reg: ArgRegs[RegNum]); |
| 151 | |
| 152 | // Always return false here, as this function only makes sure that the first |
| 153 | // unallocated register has an even register number and does not actually |
| 154 | // allocate a register for the current argument. |
| 155 | return false; |
| 156 | } |
| 157 | |
| 158 | #include "HexagonGenCallingConv.inc" |
| 159 | |
| 160 | |
| 161 | SDValue |
| 162 | HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) |
| 163 | const { |
| 164 | return SDValue(); |
| 165 | } |
| 166 | |
| 167 | /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified |
| 168 | /// by "Src" to address "Dst" of size "Size". Alignment information is |
| 169 | /// specified by the specific parameter attribute. The copy will be passed as |
| 170 | /// a byval function parameter. Sometimes what we are copying is the end of a |
| 171 | /// larger object, the part that does not fit in registers. |
| 172 | static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, |
| 173 | SDValue Chain, ISD::ArgFlagsTy Flags, |
| 174 | SelectionDAG &DAG, const SDLoc &dl) { |
| 175 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize(), DL: dl, VT: MVT::i32); |
| 176 | return DAG.getMemcpy( |
| 177 | Chain, dl, Dst, Src, Size: SizeNode, Alignment: Flags.getNonZeroByValAlign(), |
| 178 | /*isVolatile=*/isVol: false, /*AlwaysInline=*/false, |
| 179 | /*CI=*/nullptr, OverrideTailCall: std::nullopt, DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo()); |
| 180 | } |
| 181 | |
| 182 | bool |
| 183 | HexagonTargetLowering::CanLowerReturn( |
| 184 | CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, |
| 185 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 186 | LLVMContext &Context, const Type *RetTy) const { |
| 187 | SmallVector<CCValAssign, 16> RVLocs; |
| 188 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); |
| 189 | |
| 190 | if (MF.getSubtarget<HexagonSubtarget>().useHVXOps()) |
| 191 | return CCInfo.CheckReturn(Outs, Fn: RetCC_Hexagon_HVX); |
| 192 | return CCInfo.CheckReturn(Outs, Fn: RetCC_Hexagon); |
| 193 | } |
| 194 | |
| 195 | // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is |
| 196 | // passed by value, the function prototype is modified to return void and |
| 197 | // the value is stored in memory pointed by a pointer passed by caller. |
| 198 | SDValue |
| 199 | HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
| 200 | bool IsVarArg, |
| 201 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 202 | const SmallVectorImpl<SDValue> &OutVals, |
| 203 | const SDLoc &dl, SelectionDAG &DAG) const { |
| 204 | // CCValAssign - represent the assignment of the return value to locations. |
| 205 | SmallVector<CCValAssign, 16> RVLocs; |
| 206 | |
| 207 | // CCState - Info about the registers and stack slot. |
| 208 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, |
| 209 | *DAG.getContext()); |
| 210 | |
| 211 | // Analyze return values of ISD::RET |
| 212 | if (Subtarget.useHVXOps()) |
| 213 | CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Hexagon_HVX); |
| 214 | else |
| 215 | CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Hexagon); |
| 216 | |
| 217 | SDValue Glue; |
| 218 | SmallVector<SDValue, 4> RetOps(1, Chain); |
| 219 | |
| 220 | // Copy the result values into the output registers. |
| 221 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 222 | CCValAssign &VA = RVLocs[i]; |
| 223 | SDValue Val = OutVals[i]; |
| 224 | |
| 225 | switch (VA.getLocInfo()) { |
| 226 | default: |
| 227 | // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. |
| 228 | llvm_unreachable("Unknown loc info!" ); |
| 229 | case CCValAssign::Full: |
| 230 | break; |
| 231 | case CCValAssign::BCvt: |
| 232 | Val = DAG.getBitcast(VT: VA.getLocVT(), V: Val); |
| 233 | break; |
| 234 | case CCValAssign::SExt: |
| 235 | Val = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val); |
| 236 | break; |
| 237 | case CCValAssign::ZExt: |
| 238 | Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val); |
| 239 | break; |
| 240 | case CCValAssign::AExt: |
| 241 | Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val); |
| 242 | break; |
| 243 | } |
| 244 | |
| 245 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: Val, Glue); |
| 246 | |
| 247 | // Guarantee that all emitted copies are stuck together with flags. |
| 248 | Glue = Chain.getValue(R: 1); |
| 249 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
| 250 | } |
| 251 | |
| 252 | RetOps[0] = Chain; // Update chain. |
| 253 | |
| 254 | // Add the glue if we have it. |
| 255 | if (Glue.getNode()) |
| 256 | RetOps.push_back(Elt: Glue); |
| 257 | |
| 258 | return DAG.getNode(Opcode: HexagonISD::RET_GLUE, DL: dl, VT: MVT::Other, Ops: RetOps); |
| 259 | } |
| 260 | |
| 261 | bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
| 262 | // If either no tail call or told not to tail call at all, don't. |
| 263 | return CI->isTailCall(); |
| 264 | } |
| 265 | |
| 266 | Register HexagonTargetLowering::getRegisterByName( |
| 267 | const char* RegName, LLT VT, const MachineFunction &) const { |
| 268 | // Just support r19, the linux kernel uses it. |
| 269 | Register Reg = StringSwitch<Register>(RegName) |
| 270 | .Case(S: "r0" , Value: Hexagon::R0) |
| 271 | .Case(S: "r1" , Value: Hexagon::R1) |
| 272 | .Case(S: "r2" , Value: Hexagon::R2) |
| 273 | .Case(S: "r3" , Value: Hexagon::R3) |
| 274 | .Case(S: "r4" , Value: Hexagon::R4) |
| 275 | .Case(S: "r5" , Value: Hexagon::R5) |
| 276 | .Case(S: "r6" , Value: Hexagon::R6) |
| 277 | .Case(S: "r7" , Value: Hexagon::R7) |
| 278 | .Case(S: "r8" , Value: Hexagon::R8) |
| 279 | .Case(S: "r9" , Value: Hexagon::R9) |
| 280 | .Case(S: "r10" , Value: Hexagon::R10) |
| 281 | .Case(S: "r11" , Value: Hexagon::R11) |
| 282 | .Case(S: "r12" , Value: Hexagon::R12) |
| 283 | .Case(S: "r13" , Value: Hexagon::R13) |
| 284 | .Case(S: "r14" , Value: Hexagon::R14) |
| 285 | .Case(S: "r15" , Value: Hexagon::R15) |
| 286 | .Case(S: "r16" , Value: Hexagon::R16) |
| 287 | .Case(S: "r17" , Value: Hexagon::R17) |
| 288 | .Case(S: "r18" , Value: Hexagon::R18) |
| 289 | .Case(S: "r19" , Value: Hexagon::R19) |
| 290 | .Case(S: "r20" , Value: Hexagon::R20) |
| 291 | .Case(S: "r21" , Value: Hexagon::R21) |
| 292 | .Case(S: "r22" , Value: Hexagon::R22) |
| 293 | .Case(S: "r23" , Value: Hexagon::R23) |
| 294 | .Case(S: "r24" , Value: Hexagon::R24) |
| 295 | .Case(S: "r25" , Value: Hexagon::R25) |
| 296 | .Case(S: "r26" , Value: Hexagon::R26) |
| 297 | .Case(S: "r27" , Value: Hexagon::R27) |
| 298 | .Case(S: "r28" , Value: Hexagon::R28) |
| 299 | .Case(S: "r29" , Value: Hexagon::R29) |
| 300 | .Case(S: "r30" , Value: Hexagon::R30) |
| 301 | .Case(S: "r31" , Value: Hexagon::R31) |
| 302 | .Case(S: "r1:0" , Value: Hexagon::D0) |
| 303 | .Case(S: "r3:2" , Value: Hexagon::D1) |
| 304 | .Case(S: "r5:4" , Value: Hexagon::D2) |
| 305 | .Case(S: "r7:6" , Value: Hexagon::D3) |
| 306 | .Case(S: "r9:8" , Value: Hexagon::D4) |
| 307 | .Case(S: "r11:10" , Value: Hexagon::D5) |
| 308 | .Case(S: "r13:12" , Value: Hexagon::D6) |
| 309 | .Case(S: "r15:14" , Value: Hexagon::D7) |
| 310 | .Case(S: "r17:16" , Value: Hexagon::D8) |
| 311 | .Case(S: "r19:18" , Value: Hexagon::D9) |
| 312 | .Case(S: "r21:20" , Value: Hexagon::D10) |
| 313 | .Case(S: "r23:22" , Value: Hexagon::D11) |
| 314 | .Case(S: "r25:24" , Value: Hexagon::D12) |
| 315 | .Case(S: "r27:26" , Value: Hexagon::D13) |
| 316 | .Case(S: "r29:28" , Value: Hexagon::D14) |
| 317 | .Case(S: "r31:30" , Value: Hexagon::D15) |
| 318 | .Case(S: "sp" , Value: Hexagon::R29) |
| 319 | .Case(S: "fp" , Value: Hexagon::R30) |
| 320 | .Case(S: "lr" , Value: Hexagon::R31) |
| 321 | .Case(S: "p0" , Value: Hexagon::P0) |
| 322 | .Case(S: "p1" , Value: Hexagon::P1) |
| 323 | .Case(S: "p2" , Value: Hexagon::P2) |
| 324 | .Case(S: "p3" , Value: Hexagon::P3) |
| 325 | .Case(S: "sa0" , Value: Hexagon::SA0) |
| 326 | .Case(S: "lc0" , Value: Hexagon::LC0) |
| 327 | .Case(S: "sa1" , Value: Hexagon::SA1) |
| 328 | .Case(S: "lc1" , Value: Hexagon::LC1) |
| 329 | .Case(S: "m0" , Value: Hexagon::M0) |
| 330 | .Case(S: "m1" , Value: Hexagon::M1) |
| 331 | .Case(S: "usr" , Value: Hexagon::USR) |
| 332 | .Case(S: "ugp" , Value: Hexagon::UGP) |
| 333 | .Case(S: "cs0" , Value: Hexagon::CS0) |
| 334 | .Case(S: "cs1" , Value: Hexagon::CS1) |
| 335 | .Default(Value: Register()); |
| 336 | return Reg; |
| 337 | } |
| 338 | |
| 339 | /// LowerCallResult - Lower the result values of an ISD::CALL into the |
| 340 | /// appropriate copies out of appropriate physical registers. This assumes that |
| 341 | /// Chain/Glue are the input chain/glue to use, and that TheCall is the call |
| 342 | /// being lowered. Returns a SDNode with the same number of values as the |
| 343 | /// ISD::CALL. |
| 344 | SDValue HexagonTargetLowering::LowerCallResult( |
| 345 | SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg, |
| 346 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 347 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, |
| 348 | const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const { |
| 349 | // Assign locations to each value returned by this call. |
| 350 | SmallVector<CCValAssign, 16> RVLocs; |
| 351 | |
| 352 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, |
| 353 | *DAG.getContext()); |
| 354 | |
| 355 | if (Subtarget.useHVXOps()) |
| 356 | CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_Hexagon_HVX); |
| 357 | else |
| 358 | CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_Hexagon); |
| 359 | |
| 360 | // Copy all of the result registers out of their specified physreg. |
| 361 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 362 | SDValue RetVal; |
| 363 | if (RVLocs[i].getValVT() == MVT::i1) { |
| 364 | // Return values of type MVT::i1 require special handling. The reason |
| 365 | // is that MVT::i1 is associated with the PredRegs register class, but |
| 366 | // values of that type are still returned in R0. Generate an explicit |
| 367 | // copy into a predicate register from R0, and treat the value of the |
| 368 | // predicate register as the call result. |
| 369 | auto &MRI = DAG.getMachineFunction().getRegInfo(); |
| 370 | SDValue FR0 = DAG.getCopyFromReg(Chain, dl, Reg: RVLocs[i].getLocReg(), |
| 371 | VT: MVT::i32, Glue); |
| 372 | // FR0 = (Value, Chain, Glue) |
| 373 | Register PredR = MRI.createVirtualRegister(RegClass: &Hexagon::PredRegsRegClass); |
| 374 | SDValue TPR = DAG.getCopyToReg(Chain: FR0.getValue(R: 1), dl, Reg: PredR, |
| 375 | N: FR0.getValue(R: 0), Glue: FR0.getValue(R: 2)); |
| 376 | // TPR = (Chain, Glue) |
| 377 | // Don't glue this CopyFromReg, because it copies from a virtual |
| 378 | // register. If it is glued to the call, InstrEmitter will add it |
| 379 | // as an implicit def to the call (EmitMachineNode). |
| 380 | RetVal = DAG.getCopyFromReg(Chain: TPR.getValue(R: 0), dl, Reg: PredR, VT: MVT::i1); |
| 381 | Glue = TPR.getValue(R: 1); |
| 382 | Chain = TPR.getValue(R: 0); |
| 383 | } else { |
| 384 | RetVal = DAG.getCopyFromReg(Chain, dl, Reg: RVLocs[i].getLocReg(), |
| 385 | VT: RVLocs[i].getValVT(), Glue); |
| 386 | Glue = RetVal.getValue(R: 2); |
| 387 | Chain = RetVal.getValue(R: 1); |
| 388 | } |
| 389 | InVals.push_back(Elt: RetVal.getValue(R: 0)); |
| 390 | } |
| 391 | |
| 392 | return Chain; |
| 393 | } |
| 394 | |
| 395 | /// LowerCall - Functions arguments are copied from virtual regs to |
| 396 | /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. |
| 397 | SDValue |
| 398 | HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 399 | SmallVectorImpl<SDValue> &InVals) const { |
| 400 | SelectionDAG &DAG = CLI.DAG; |
| 401 | SDLoc &dl = CLI.DL; |
| 402 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 403 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 404 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 405 | SDValue Chain = CLI.Chain; |
| 406 | SDValue Callee = CLI.Callee; |
| 407 | CallingConv::ID CallConv = CLI.CallConv; |
| 408 | bool IsVarArg = CLI.IsVarArg; |
| 409 | bool DoesNotReturn = CLI.DoesNotReturn; |
| 410 | |
| 411 | bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); |
| 412 | MachineFunction &MF = DAG.getMachineFunction(); |
| 413 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 414 | auto PtrVT = getPointerTy(DL: MF.getDataLayout()); |
| 415 | |
| 416 | unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0; |
| 417 | if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) |
| 418 | Callee = DAG.getTargetGlobalAddress(GV: GAN->getGlobal(), DL: dl, VT: MVT::i32); |
| 419 | |
| 420 | // Linux ABI treats var-arg calls the same way as regular ones. |
| 421 | bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; |
| 422 | |
| 423 | // Analyze operands of the call, assigning locations to each operand. |
| 424 | SmallVector<CCValAssign, 16> ArgLocs; |
| 425 | HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(), |
| 426 | NumParams); |
| 427 | |
| 428 | if (Subtarget.useHVXOps()) |
| 429 | CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon_HVX); |
| 430 | else if (DisableArgsMinAlignment) |
| 431 | CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon_Legacy); |
| 432 | else |
| 433 | CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon); |
| 434 | |
| 435 | if (CLI.IsTailCall) { |
| 436 | bool StructAttrFlag = MF.getFunction().hasStructRetAttr(); |
| 437 | CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CalleeCC: CallConv, |
| 438 | isVarArg: IsVarArg, isCalleeStructRet: IsStructRet, isCallerStructRet: StructAttrFlag, Outs, |
| 439 | OutVals, Ins, DAG); |
| 440 | for (const CCValAssign &VA : ArgLocs) { |
| 441 | if (VA.isMemLoc()) { |
| 442 | CLI.IsTailCall = false; |
| 443 | break; |
| 444 | } |
| 445 | } |
| 446 | LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n" |
| 447 | : "Argument must be passed on stack. " |
| 448 | "Not eligible for Tail Call\n" )); |
| 449 | } |
| 450 | // Get a count of how many bytes are to be pushed on the stack. |
| 451 | unsigned NumBytes = CCInfo.getStackSize(); |
| 452 | SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; |
| 453 | SmallVector<SDValue, 8> MemOpChains; |
| 454 | |
| 455 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
| 456 | SDValue StackPtr = |
| 457 | DAG.getCopyFromReg(Chain, dl, Reg: HRI.getStackRegister(), VT: PtrVT); |
| 458 | |
| 459 | bool NeedsArgAlign = false; |
| 460 | Align LargestAlignSeen; |
| 461 | // Walk the register/memloc assignments, inserting copies/loads. |
| 462 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 463 | CCValAssign &VA = ArgLocs[i]; |
| 464 | SDValue Arg = OutVals[i]; |
| 465 | ISD::ArgFlagsTy Flags = Outs[i].Flags; |
| 466 | // Record if we need > 8 byte alignment on an argument. |
| 467 | bool ArgAlign = Subtarget.isHVXVectorType(VecTy: VA.getValVT()); |
| 468 | NeedsArgAlign |= ArgAlign; |
| 469 | |
| 470 | // Promote the value if needed. |
| 471 | switch (VA.getLocInfo()) { |
| 472 | default: |
| 473 | // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. |
| 474 | llvm_unreachable("Unknown loc info!" ); |
| 475 | case CCValAssign::Full: |
| 476 | break; |
| 477 | case CCValAssign::BCvt: |
| 478 | Arg = DAG.getBitcast(VT: VA.getLocVT(), V: Arg); |
| 479 | break; |
| 480 | case CCValAssign::SExt: |
| 481 | Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 482 | break; |
| 483 | case CCValAssign::ZExt: |
| 484 | Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 485 | break; |
| 486 | case CCValAssign::AExt: |
| 487 | Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
| 488 | break; |
| 489 | } |
| 490 | |
| 491 | if (VA.isMemLoc()) { |
| 492 | unsigned LocMemOffset = VA.getLocMemOffset(); |
| 493 | SDValue MemAddr = DAG.getConstant(Val: LocMemOffset, DL: dl, |
| 494 | VT: StackPtr.getValueType()); |
| 495 | MemAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: StackPtr, N2: MemAddr); |
| 496 | if (ArgAlign) |
| 497 | LargestAlignSeen = std::max( |
| 498 | a: LargestAlignSeen, b: Align(VA.getLocVT().getStoreSizeInBits() / 8)); |
| 499 | if (Flags.isByVal()) { |
| 500 | // The argument is a struct passed by value. According to LLVM, "Arg" |
| 501 | // is a pointer. |
| 502 | MemOpChains.push_back(Elt: CreateCopyOfByValArgument(Src: Arg, Dst: MemAddr, Chain, |
| 503 | Flags, DAG, dl)); |
| 504 | } else { |
| 505 | MachinePointerInfo LocPI = MachinePointerInfo::getStack( |
| 506 | MF&: DAG.getMachineFunction(), Offset: LocMemOffset); |
| 507 | SDValue S = DAG.getStore(Chain, dl, Val: Arg, Ptr: MemAddr, PtrInfo: LocPI); |
| 508 | MemOpChains.push_back(Elt: S); |
| 509 | } |
| 510 | continue; |
| 511 | } |
| 512 | |
| 513 | // Arguments that can be passed on register must be kept at RegsToPass |
| 514 | // vector. |
| 515 | if (VA.isRegLoc()) |
| 516 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg)); |
| 517 | } |
| 518 | |
| 519 | if (NeedsArgAlign && Subtarget.hasV60Ops()) { |
| 520 | LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n" ); |
| 521 | Align VecAlign = HRI.getSpillAlign(RC: Hexagon::HvxVRRegClass); |
| 522 | LargestAlignSeen = std::max(a: LargestAlignSeen, b: VecAlign); |
| 523 | MFI.ensureMaxAlignment(Alignment: LargestAlignSeen); |
| 524 | } |
| 525 | // Transform all store nodes into one single node because all store |
| 526 | // nodes are independent of each other. |
| 527 | if (!MemOpChains.empty()) |
| 528 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains); |
| 529 | |
| 530 | SDValue Glue; |
| 531 | if (!CLI.IsTailCall) { |
| 532 | Chain = DAG.getCALLSEQ_START(Chain, InSize: NumBytes, OutSize: 0, DL: dl); |
| 533 | Glue = Chain.getValue(R: 1); |
| 534 | } |
| 535 | |
| 536 | // Build a sequence of copy-to-reg nodes chained together with token |
| 537 | // chain and flag operands which copy the outgoing args into registers. |
| 538 | // The Glue is necessary since all emitted instructions must be |
| 539 | // stuck together. |
| 540 | if (!CLI.IsTailCall) { |
| 541 | for (const auto &R : RegsToPass) { |
| 542 | Chain = DAG.getCopyToReg(Chain, dl, Reg: R.first, N: R.second, Glue); |
| 543 | Glue = Chain.getValue(R: 1); |
| 544 | } |
| 545 | } else { |
| 546 | // For tail calls lower the arguments to the 'real' stack slot. |
| 547 | // |
| 548 | // Force all the incoming stack arguments to be loaded from the stack |
| 549 | // before any new outgoing arguments are stored to the stack, because the |
| 550 | // outgoing stack slots may alias the incoming argument stack slots, and |
| 551 | // the alias isn't otherwise explicit. This is slightly more conservative |
| 552 | // than necessary, because it means that each store effectively depends |
| 553 | // on every argument instead of just those arguments it would clobber. |
| 554 | // |
| 555 | // Do not flag preceding copytoreg stuff together with the following stuff. |
| 556 | Glue = SDValue(); |
| 557 | for (const auto &R : RegsToPass) { |
| 558 | Chain = DAG.getCopyToReg(Chain, dl, Reg: R.first, N: R.second, Glue); |
| 559 | Glue = Chain.getValue(R: 1); |
| 560 | } |
| 561 | Glue = SDValue(); |
| 562 | } |
| 563 | |
| 564 | bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls(); |
| 565 | unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0; |
| 566 | |
| 567 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
| 568 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
| 569 | // node so that legalize doesn't hack it. |
| 570 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) { |
| 571 | Callee = DAG.getTargetGlobalAddress(GV: G->getGlobal(), DL: dl, VT: PtrVT, offset: 0, TargetFlags: Flags); |
| 572 | } else if (ExternalSymbolSDNode *S = |
| 573 | dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
| 574 | Callee = DAG.getTargetExternalSymbol(Sym: S->getSymbol(), VT: PtrVT, TargetFlags: Flags); |
| 575 | } |
| 576 | |
| 577 | // Returns a chain & a flag for retval copy to use. |
| 578 | SmallVector<SDValue, 8> Ops; |
| 579 | Ops.push_back(Elt: Chain); |
| 580 | Ops.push_back(Elt: Callee); |
| 581 | |
| 582 | // Add argument registers to the end of the list so that they are |
| 583 | // known live into the call. |
| 584 | for (const auto &R : RegsToPass) |
| 585 | Ops.push_back(Elt: DAG.getRegister(Reg: R.first, VT: R.second.getValueType())); |
| 586 | |
| 587 | const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv); |
| 588 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 589 | Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask)); |
| 590 | |
| 591 | if (Glue.getNode()) |
| 592 | Ops.push_back(Elt: Glue); |
| 593 | |
| 594 | if (CLI.IsTailCall) { |
| 595 | MFI.setHasTailCall(); |
| 596 | return DAG.getNode(Opcode: HexagonISD::TC_RETURN, DL: dl, VT: MVT::Other, Ops); |
| 597 | } |
| 598 | |
| 599 | // Set this here because we need to know this for "hasFP" in frame lowering. |
| 600 | // The target-independent code calls getFrameRegister before setting it, and |
| 601 | // getFrameRegister uses hasFP to determine whether the function has FP. |
| 602 | MFI.setHasCalls(true); |
| 603 | |
| 604 | unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL; |
| 605 | Chain = DAG.getNode(Opcode: OpCode, DL: dl, ResultTys: {MVT::Other, MVT::Glue}, Ops); |
| 606 | Glue = Chain.getValue(R: 1); |
| 607 | |
| 608 | // Create the CALLSEQ_END node. |
| 609 | Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: 0, Glue, DL: dl); |
| 610 | Glue = Chain.getValue(R: 1); |
| 611 | |
| 612 | // Handle result values, copying them out of physregs into vregs that we |
| 613 | // return. |
| 614 | return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG, |
| 615 | InVals, OutVals, Callee); |
| 616 | } |
| 617 | |
| 618 | /// Returns true by value, base pointer and offset pointer and addressing |
| 619 | /// mode by reference if this node can be combined with a load / store to |
| 620 | /// form a post-indexed load / store. |
| 621 | bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
| 622 | SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, |
| 623 | SelectionDAG &DAG) const { |
| 624 | LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Val: N); |
| 625 | if (!LSN) |
| 626 | return false; |
| 627 | EVT VT = LSN->getMemoryVT(); |
| 628 | if (!VT.isSimple()) |
| 629 | return false; |
| 630 | bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || |
| 631 | VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 || |
| 632 | VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 || |
| 633 | VT == MVT::v4i16 || VT == MVT::v8i8 || |
| 634 | Subtarget.isHVXVectorType(VecTy: VT.getSimpleVT()); |
| 635 | if (!IsLegalType) |
| 636 | return false; |
| 637 | |
| 638 | if (Op->getOpcode() != ISD::ADD) |
| 639 | return false; |
| 640 | Base = Op->getOperand(Num: 0); |
| 641 | Offset = Op->getOperand(Num: 1); |
| 642 | if (!isa<ConstantSDNode>(Val: Offset.getNode())) |
| 643 | return false; |
| 644 | AM = ISD::POST_INC; |
| 645 | |
| 646 | int32_t V = cast<ConstantSDNode>(Val: Offset.getNode())->getSExtValue(); |
| 647 | return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, Offset: V); |
| 648 | } |
| 649 | |
| 650 | SDValue HexagonTargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
| 651 | if (DAG.getMachineFunction().getFunction().hasOptSize()) |
| 652 | return SDValue(); |
| 653 | else |
| 654 | return Op; |
| 655 | } |
| 656 | |
| 657 | SDValue |
| 658 | HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { |
| 659 | MachineFunction &MF = DAG.getMachineFunction(); |
| 660 | auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); |
| 661 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
| 662 | unsigned LR = HRI.getRARegister(); |
| 663 | |
| 664 | if ((Op.getOpcode() != ISD::INLINEASM && |
| 665 | Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR()) |
| 666 | return Op; |
| 667 | |
| 668 | unsigned NumOps = Op.getNumOperands(); |
| 669 | if (Op.getOperand(i: NumOps-1).getValueType() == MVT::Glue) |
| 670 | --NumOps; // Ignore the flag operand. |
| 671 | |
| 672 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { |
| 673 | const InlineAsm::Flag Flags(Op.getConstantOperandVal(i)); |
| 674 | unsigned NumVals = Flags.getNumOperandRegisters(); |
| 675 | ++i; // Skip the ID value. |
| 676 | |
| 677 | switch (Flags.getKind()) { |
| 678 | default: |
| 679 | llvm_unreachable("Bad flags!" ); |
| 680 | case InlineAsm::Kind::RegUse: |
| 681 | case InlineAsm::Kind::Imm: |
| 682 | case InlineAsm::Kind::Mem: |
| 683 | i += NumVals; |
| 684 | break; |
| 685 | case InlineAsm::Kind::Clobber: |
| 686 | case InlineAsm::Kind::RegDef: |
| 687 | case InlineAsm::Kind::RegDefEarlyClobber: { |
| 688 | for (; NumVals; --NumVals, ++i) { |
| 689 | Register Reg = cast<RegisterSDNode>(Val: Op.getOperand(i))->getReg(); |
| 690 | if (Reg != LR) |
| 691 | continue; |
| 692 | HMFI.setHasClobberLR(true); |
| 693 | return Op; |
| 694 | } |
| 695 | break; |
| 696 | } |
| 697 | } |
| 698 | } |
| 699 | |
| 700 | return Op; |
| 701 | } |
| 702 | |
| 703 | // Need to transform ISD::PREFETCH into something that doesn't inherit |
| 704 | // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and |
| 705 | // SDNPMayStore. |
| 706 | SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op, |
| 707 | SelectionDAG &DAG) const { |
| 708 | SDValue Chain = Op.getOperand(i: 0); |
| 709 | SDValue Addr = Op.getOperand(i: 1); |
| 710 | // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in, |
| 711 | // if the "reg" is fed by an "add". |
| 712 | SDLoc DL(Op); |
| 713 | SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
| 714 | return DAG.getNode(Opcode: HexagonISD::DCFETCH, DL, VT: MVT::Other, N1: Chain, N2: Addr, N3: Zero); |
| 715 | } |
| 716 | |
| 717 | // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode |
| 718 | // is marked as having side-effects, while the register read on Hexagon does |
| 719 | // not have any. TableGen refuses to accept the direct pattern from that node |
| 720 | // to the A4_tfrcpp. |
| 721 | SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op, |
| 722 | SelectionDAG &DAG) const { |
| 723 | SDValue Chain = Op.getOperand(i: 0); |
| 724 | SDLoc dl(Op); |
| 725 | SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other); |
| 726 | return DAG.getNode(Opcode: HexagonISD::READCYCLE, DL: dl, VTList: VTs, N: Chain); |
| 727 | } |
| 728 | |
| 729 | // Custom-handle ISD::READSTEADYCOUNTER because the target-independent SDNode |
| 730 | // is marked as having side-effects, while the register read on Hexagon does |
| 731 | // not have any. TableGen refuses to accept the direct pattern from that node |
| 732 | // to the A4_tfrcpp. |
| 733 | SDValue HexagonTargetLowering::LowerREADSTEADYCOUNTER(SDValue Op, |
| 734 | SelectionDAG &DAG) const { |
| 735 | SDValue Chain = Op.getOperand(i: 0); |
| 736 | SDLoc dl(Op); |
| 737 | SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other); |
| 738 | return DAG.getNode(Opcode: HexagonISD::READTIMER, DL: dl, VTList: VTs, N: Chain); |
| 739 | } |
| 740 | |
| 741 | SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
| 742 | SelectionDAG &DAG) const { |
| 743 | SDValue Chain = Op.getOperand(i: 0); |
| 744 | unsigned IntNo = Op.getConstantOperandVal(i: 1); |
| 745 | // Lower the hexagon_prefetch builtin to DCFETCH, as above. |
| 746 | if (IntNo == Intrinsic::hexagon_prefetch) { |
| 747 | SDValue Addr = Op.getOperand(i: 2); |
| 748 | SDLoc DL(Op); |
| 749 | SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
| 750 | return DAG.getNode(Opcode: HexagonISD::DCFETCH, DL, VT: MVT::Other, N1: Chain, N2: Addr, N3: Zero); |
| 751 | } |
| 752 | return SDValue(); |
| 753 | } |
| 754 | |
| 755 | SDValue |
| 756 | HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, |
| 757 | SelectionDAG &DAG) const { |
| 758 | SDValue Chain = Op.getOperand(i: 0); |
| 759 | SDValue Size = Op.getOperand(i: 1); |
| 760 | SDValue Align = Op.getOperand(i: 2); |
| 761 | SDLoc dl(Op); |
| 762 | |
| 763 | ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Val&: Align); |
| 764 | assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC" ); |
| 765 | |
| 766 | unsigned A = AlignConst->getSExtValue(); |
| 767 | auto &HFI = *Subtarget.getFrameLowering(); |
| 768 | // "Zero" means natural stack alignment. |
| 769 | if (A == 0) |
| 770 | A = HFI.getStackAlign().value(); |
| 771 | |
| 772 | LLVM_DEBUG({ |
| 773 | dbgs () << __func__ << " Align: " << A << " Size: " ; |
| 774 | Size.getNode()->dump(&DAG); |
| 775 | dbgs() << "\n" ; |
| 776 | }); |
| 777 | |
| 778 | SDValue AC = DAG.getConstant(Val: A, DL: dl, VT: MVT::i32); |
| 779 | SDVTList VTs = DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other); |
| 780 | SDValue AA = DAG.getNode(Opcode: HexagonISD::ALLOCA, DL: dl, VTList: VTs, N1: Chain, N2: Size, N3: AC); |
| 781 | |
| 782 | DAG.ReplaceAllUsesOfValueWith(From: Op, To: AA); |
| 783 | return AA; |
| 784 | } |
| 785 | |
| 786 | SDValue HexagonTargetLowering::LowerFormalArguments( |
| 787 | SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, |
| 788 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 789 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
| 790 | MachineFunction &MF = DAG.getMachineFunction(); |
| 791 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 792 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 793 | |
| 794 | // Linux ABI treats var-arg calls the same way as regular ones. |
| 795 | bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; |
| 796 | |
| 797 | // Assign locations to all of the incoming arguments. |
| 798 | SmallVector<CCValAssign, 16> ArgLocs; |
| 799 | HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, |
| 800 | *DAG.getContext(), |
| 801 | MF.getFunction().getFunctionType()->getNumParams()); |
| 802 | |
| 803 | if (Subtarget.useHVXOps()) |
| 804 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon_HVX); |
| 805 | else if (DisableArgsMinAlignment) |
| 806 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon_Legacy); |
| 807 | else |
| 808 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon); |
| 809 | |
| 810 | // For LLVM, in the case when returning a struct by value (>8byte), |
| 811 | // the first argument is a pointer that points to the location on caller's |
| 812 | // stack where the return value will be stored. For Hexagon, the location on |
| 813 | // caller's stack is passed only when the struct size is smaller than (and |
| 814 | // equal to) 8 bytes. If not, no address will be passed into callee and |
| 815 | // callee return the result directly through R0/R1. |
| 816 | auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) { |
| 817 | switch (RC.getID()) { |
| 818 | case Hexagon::IntRegsRegClassID: |
| 819 | return Reg - Hexagon::R0 + 1; |
| 820 | case Hexagon::DoubleRegsRegClassID: |
| 821 | return (Reg - Hexagon::D0 + 1) * 2; |
| 822 | case Hexagon::HvxVRRegClassID: |
| 823 | return Reg - Hexagon::V0 + 1; |
| 824 | case Hexagon::HvxWRRegClassID: |
| 825 | return (Reg - Hexagon::W0 + 1) * 2; |
| 826 | } |
| 827 | llvm_unreachable("Unexpected register class" ); |
| 828 | }; |
| 829 | |
| 830 | auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering()); |
| 831 | auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); |
| 832 | HFL.FirstVarArgSavedReg = 0; |
| 833 | HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); |
| 834 | |
| 835 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 836 | CCValAssign &VA = ArgLocs[i]; |
| 837 | ISD::ArgFlagsTy Flags = Ins[i].Flags; |
| 838 | bool ByVal = Flags.isByVal(); |
| 839 | |
| 840 | // Arguments passed in registers: |
| 841 | // 1. 32- and 64-bit values and HVX vectors are passed directly, |
| 842 | // 2. Large structs are passed via an address, and the address is |
| 843 | // passed in a register. |
| 844 | if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8) |
| 845 | llvm_unreachable("ByValSize must be bigger than 8 bytes" ); |
| 846 | |
| 847 | bool InReg = VA.isRegLoc() && |
| 848 | (!ByVal || (ByVal && Flags.getByValSize() > 8)); |
| 849 | |
| 850 | if (InReg) { |
| 851 | MVT RegVT = VA.getLocVT(); |
| 852 | if (VA.getLocInfo() == CCValAssign::BCvt) |
| 853 | RegVT = VA.getValVT(); |
| 854 | |
| 855 | const TargetRegisterClass *RC = getRegClassFor(VT: RegVT); |
| 856 | Register VReg = MRI.createVirtualRegister(RegClass: RC); |
| 857 | SDValue Copy = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: RegVT); |
| 858 | |
| 859 | // Treat values of type MVT::i1 specially: they are passed in |
| 860 | // registers of type i32, but they need to remain as values of |
| 861 | // type i1 for consistency of the argument lowering. |
| 862 | if (VA.getValVT() == MVT::i1) { |
| 863 | assert(RegVT.getSizeInBits() <= 32); |
| 864 | SDValue T = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: RegVT, |
| 865 | N1: Copy, N2: DAG.getConstant(Val: 1, DL: dl, VT: RegVT)); |
| 866 | Copy = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: T, RHS: DAG.getConstant(Val: 0, DL: dl, VT: RegVT), |
| 867 | Cond: ISD::SETNE); |
| 868 | } else { |
| 869 | #ifndef NDEBUG |
| 870 | unsigned RegSize = RegVT.getSizeInBits(); |
| 871 | assert(RegSize == 32 || RegSize == 64 || |
| 872 | Subtarget.isHVXVectorType(RegVT)); |
| 873 | #endif |
| 874 | } |
| 875 | InVals.push_back(Elt: Copy); |
| 876 | MRI.addLiveIn(Reg: VA.getLocReg(), vreg: VReg); |
| 877 | HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg()); |
| 878 | } else { |
| 879 | assert(VA.isMemLoc() && "Argument should be passed in memory" ); |
| 880 | |
| 881 | // If it's a byval parameter, then we need to compute the |
| 882 | // "real" size, not the size of the pointer. |
| 883 | unsigned ObjSize = Flags.isByVal() |
| 884 | ? Flags.getByValSize() |
| 885 | : VA.getLocVT().getStoreSizeInBits() / 8; |
| 886 | |
| 887 | // Create the frame index object for this incoming parameter. |
| 888 | int Offset = HEXAGON_LRFP_SIZE + VA.getLocMemOffset(); |
| 889 | int FI = MFI.CreateFixedObject(Size: ObjSize, SPOffset: Offset, IsImmutable: true); |
| 890 | SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32); |
| 891 | |
| 892 | if (Flags.isByVal()) { |
| 893 | // If it's a pass-by-value aggregate, then do not dereference the stack |
| 894 | // location. Instead, we should generate a reference to the stack |
| 895 | // location. |
| 896 | InVals.push_back(Elt: FIN); |
| 897 | } else { |
| 898 | SDValue L = DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIN, |
| 899 | PtrInfo: MachinePointerInfo::getFixedStack(MF, FI, Offset: 0)); |
| 900 | InVals.push_back(Elt: L); |
| 901 | } |
| 902 | } |
| 903 | } |
| 904 | |
| 905 | if (IsVarArg && Subtarget.isEnvironmentMusl()) { |
| 906 | for (int i = HFL.FirstVarArgSavedReg; i < 6; i++) |
| 907 | MRI.addLiveIn(Reg: Hexagon::R0+i); |
| 908 | } |
| 909 | |
| 910 | if (IsVarArg && Subtarget.isEnvironmentMusl()) { |
| 911 | HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1); |
| 912 | HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); |
| 913 | |
| 914 | // Create Frame index for the start of register saved area. |
| 915 | int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg; |
| 916 | bool RequiresPadding = (NumVarArgRegs & 1); |
| 917 | int RegSaveAreaSizePlusPadding = RequiresPadding |
| 918 | ? (NumVarArgRegs + 1) * 4 |
| 919 | : NumVarArgRegs * 4; |
| 920 | |
| 921 | if (RegSaveAreaSizePlusPadding > 0) { |
| 922 | // The offset to saved register area should be 8 byte aligned. |
| 923 | int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); |
| 924 | if (!(RegAreaStart % 8)) |
| 925 | RegAreaStart = (RegAreaStart + 7) & -8; |
| 926 | |
| 927 | int RegSaveAreaFrameIndex = |
| 928 | MFI.CreateFixedObject(Size: RegSaveAreaSizePlusPadding, SPOffset: RegAreaStart, IsImmutable: true); |
| 929 | HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex); |
| 930 | |
| 931 | // This will point to the next argument passed via stack. |
| 932 | int Offset = RegAreaStart + RegSaveAreaSizePlusPadding; |
| 933 | int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true); |
| 934 | HMFI.setVarArgsFrameIndex(FI); |
| 935 | } else { |
| 936 | // This will point to the next argument passed via stack, when |
| 937 | // there is no saved register area. |
| 938 | int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); |
| 939 | int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true); |
| 940 | HMFI.setRegSavedAreaStartFrameIndex(FI); |
| 941 | HMFI.setVarArgsFrameIndex(FI); |
| 942 | } |
| 943 | } |
| 944 | |
| 945 | |
| 946 | if (IsVarArg && !Subtarget.isEnvironmentMusl()) { |
| 947 | // This will point to the next argument passed via stack. |
| 948 | int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); |
| 949 | int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true); |
| 950 | HMFI.setVarArgsFrameIndex(FI); |
| 951 | } |
| 952 | |
| 953 | return Chain; |
| 954 | } |
| 955 | |
| 956 | SDValue |
| 957 | HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { |
| 958 | // VASTART stores the address of the VarArgsFrameIndex slot into the |
| 959 | // memory location argument. |
| 960 | MachineFunction &MF = DAG.getMachineFunction(); |
| 961 | HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>(); |
| 962 | SDValue Addr = DAG.getFrameIndex(FI: QFI->getVarArgsFrameIndex(), VT: MVT::i32); |
| 963 | const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue(); |
| 964 | |
| 965 | if (!Subtarget.isEnvironmentMusl()) { |
| 966 | return DAG.getStore(Chain: Op.getOperand(i: 0), dl: SDLoc(Op), Val: Addr, Ptr: Op.getOperand(i: 1), |
| 967 | PtrInfo: MachinePointerInfo(SV)); |
| 968 | } |
| 969 | auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>(); |
| 970 | auto &HFL = *Subtarget.getFrameLowering(); |
| 971 | SDLoc DL(Op); |
| 972 | SmallVector<SDValue, 8> MemOps; |
| 973 | |
| 974 | // Get frame index of va_list. |
| 975 | SDValue FIN = Op.getOperand(i: 1); |
| 976 | |
| 977 | // If first Vararg register is odd, add 4 bytes to start of |
| 978 | // saved register area to point to the first register location. |
| 979 | // This is because the saved register area has to be 8 byte aligned. |
| 980 | // In case of an odd start register, there will be 4 bytes of padding in |
| 981 | // the beginning of saved register area. If all registers area used up, |
| 982 | // the following condition will handle it correctly. |
| 983 | SDValue SavedRegAreaStartFrameIndex = |
| 984 | DAG.getFrameIndex(FI: FuncInfo.getRegSavedAreaStartFrameIndex(), VT: MVT::i32); |
| 985 | |
| 986 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 987 | |
| 988 | if (HFL.FirstVarArgSavedReg & 1) |
| 989 | SavedRegAreaStartFrameIndex = |
| 990 | DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, |
| 991 | N1: DAG.getFrameIndex(FI: FuncInfo.getRegSavedAreaStartFrameIndex(), |
| 992 | VT: MVT::i32), |
| 993 | N2: DAG.getIntPtrConstant(Val: 4, DL)); |
| 994 | |
| 995 | // Store the saved register area start pointer. |
| 996 | SDValue Store = |
| 997 | DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, |
| 998 | Val: SavedRegAreaStartFrameIndex, |
| 999 | Ptr: FIN, PtrInfo: MachinePointerInfo(SV)); |
| 1000 | MemOps.push_back(Elt: Store); |
| 1001 | |
| 1002 | // Store saved register area end pointer. |
| 1003 | FIN = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, |
| 1004 | N1: FIN, N2: DAG.getIntPtrConstant(Val: 4, DL)); |
| 1005 | Store = DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, |
| 1006 | Val: DAG.getFrameIndex(FI: FuncInfo.getVarArgsFrameIndex(), |
| 1007 | VT: PtrVT), |
| 1008 | Ptr: FIN, PtrInfo: MachinePointerInfo(SV, 4)); |
| 1009 | MemOps.push_back(Elt: Store); |
| 1010 | |
| 1011 | // Store overflow area pointer. |
| 1012 | FIN = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, |
| 1013 | N1: FIN, N2: DAG.getIntPtrConstant(Val: 4, DL)); |
| 1014 | Store = DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, |
| 1015 | Val: DAG.getFrameIndex(FI: FuncInfo.getVarArgsFrameIndex(), |
| 1016 | VT: PtrVT), |
| 1017 | Ptr: FIN, PtrInfo: MachinePointerInfo(SV, 8)); |
| 1018 | MemOps.push_back(Elt: Store); |
| 1019 | |
| 1020 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: MemOps); |
| 1021 | } |
| 1022 | |
| 1023 | SDValue |
| 1024 | HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { |
| 1025 | // Assert that the linux ABI is enabled for the current compilation. |
| 1026 | assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled" ); |
| 1027 | SDValue Chain = Op.getOperand(i: 0); |
| 1028 | SDValue DestPtr = Op.getOperand(i: 1); |
| 1029 | SDValue SrcPtr = Op.getOperand(i: 2); |
| 1030 | const Value *DestSV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 3))->getValue(); |
| 1031 | const Value *SrcSV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 4))->getValue(); |
| 1032 | SDLoc DL(Op); |
| 1033 | // Size of the va_list is 12 bytes as it has 3 pointers. Therefore, |
| 1034 | // we need to memcopy 12 bytes from va_list to another similar list. |
| 1035 | return DAG.getMemcpy( |
| 1036 | Chain, dl: DL, Dst: DestPtr, Src: SrcPtr, Size: DAG.getIntPtrConstant(Val: 12, DL), Alignment: Align(4), |
| 1037 | /*isVolatile*/ isVol: false, AlwaysInline: false, /*CI=*/nullptr, OverrideTailCall: std::nullopt, |
| 1038 | DstPtrInfo: MachinePointerInfo(DestSV), SrcPtrInfo: MachinePointerInfo(SrcSV)); |
| 1039 | } |
| 1040 | |
| 1041 | SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { |
| 1042 | const SDLoc &dl(Op); |
| 1043 | SDValue LHS = Op.getOperand(i: 0); |
| 1044 | SDValue RHS = Op.getOperand(i: 1); |
| 1045 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get(); |
| 1046 | MVT ResTy = ty(Op); |
| 1047 | MVT OpTy = ty(Op: LHS); |
| 1048 | |
| 1049 | if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { |
| 1050 | MVT ElemTy = OpTy.getVectorElementType(); |
| 1051 | assert(ElemTy.isScalarInteger()); |
| 1052 | MVT WideTy = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 2*ElemTy.getSizeInBits()), |
| 1053 | NumElements: OpTy.getVectorNumElements()); |
| 1054 | return DAG.getSetCC(DL: dl, VT: ResTy, |
| 1055 | LHS: DAG.getSExtOrTrunc(Op: LHS, DL: SDLoc(LHS), VT: WideTy), |
| 1056 | RHS: DAG.getSExtOrTrunc(Op: RHS, DL: SDLoc(RHS), VT: WideTy), Cond: CC); |
| 1057 | } |
| 1058 | |
| 1059 | // Treat all other vector types as legal. |
| 1060 | if (ResTy.isVector()) |
| 1061 | return Op; |
| 1062 | |
| 1063 | // Comparisons of short integers should use sign-extend, not zero-extend, |
| 1064 | // since we can represent small negative values in the compare instructions. |
| 1065 | // The LLVM default is to use zero-extend arbitrarily in these cases. |
| 1066 | auto isSExtFree = [this](SDValue N) { |
| 1067 | switch (N.getOpcode()) { |
| 1068 | case ISD::TRUNCATE: { |
| 1069 | // A sign-extend of a truncate of a sign-extend is free. |
| 1070 | SDValue Op = N.getOperand(i: 0); |
| 1071 | if (Op.getOpcode() != ISD::AssertSext) |
| 1072 | return false; |
| 1073 | EVT OrigTy = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT(); |
| 1074 | unsigned ThisBW = ty(Op: N).getSizeInBits(); |
| 1075 | unsigned OrigBW = OrigTy.getSizeInBits(); |
| 1076 | // The type that was sign-extended to get the AssertSext must be |
| 1077 | // narrower than the type of N (so that N has still the same value |
| 1078 | // as the original). |
| 1079 | return ThisBW >= OrigBW; |
| 1080 | } |
| 1081 | case ISD::LOAD: |
| 1082 | // We have sign-extended loads. |
| 1083 | return true; |
| 1084 | } |
| 1085 | return false; |
| 1086 | }; |
| 1087 | |
| 1088 | if (OpTy == MVT::i8 || OpTy == MVT::i16) { |
| 1089 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS); |
| 1090 | bool IsNegative = C && C->getAPIntValue().isNegative(); |
| 1091 | if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS)) |
| 1092 | return DAG.getSetCC(DL: dl, VT: ResTy, |
| 1093 | LHS: DAG.getSExtOrTrunc(Op: LHS, DL: SDLoc(LHS), VT: MVT::i32), |
| 1094 | RHS: DAG.getSExtOrTrunc(Op: RHS, DL: SDLoc(RHS), VT: MVT::i32), Cond: CC); |
| 1095 | } |
| 1096 | |
| 1097 | return SDValue(); |
| 1098 | } |
| 1099 | |
| 1100 | SDValue |
| 1101 | HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 1102 | SDValue PredOp = Op.getOperand(i: 0); |
| 1103 | SDValue Op1 = Op.getOperand(i: 1), Op2 = Op.getOperand(i: 2); |
| 1104 | MVT OpTy = ty(Op: Op1); |
| 1105 | const SDLoc &dl(Op); |
| 1106 | |
| 1107 | if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { |
| 1108 | MVT ElemTy = OpTy.getVectorElementType(); |
| 1109 | assert(ElemTy.isScalarInteger()); |
| 1110 | MVT WideTy = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 2*ElemTy.getSizeInBits()), |
| 1111 | NumElements: OpTy.getVectorNumElements()); |
| 1112 | // Generate (trunc (select (_, sext, sext))). |
| 1113 | return DAG.getSExtOrTrunc( |
| 1114 | Op: DAG.getSelect(DL: dl, VT: WideTy, Cond: PredOp, |
| 1115 | LHS: DAG.getSExtOrTrunc(Op: Op1, DL: dl, VT: WideTy), |
| 1116 | RHS: DAG.getSExtOrTrunc(Op: Op2, DL: dl, VT: WideTy)), |
| 1117 | DL: dl, VT: OpTy); |
| 1118 | } |
| 1119 | |
| 1120 | return SDValue(); |
| 1121 | } |
| 1122 | |
| 1123 | SDValue |
| 1124 | HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { |
| 1125 | EVT ValTy = Op.getValueType(); |
| 1126 | ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Val&: Op); |
| 1127 | Constant *CVal = nullptr; |
| 1128 | bool isVTi1Type = false; |
| 1129 | if (auto *CV = dyn_cast<ConstantVector>(Val: CPN->getConstVal())) { |
| 1130 | if (cast<VectorType>(Val: CV->getType())->getElementType()->isIntegerTy(Bitwidth: 1)) { |
| 1131 | IRBuilder<> IRB(CV->getContext()); |
| 1132 | SmallVector<Constant*, 128> NewConst; |
| 1133 | unsigned VecLen = CV->getNumOperands(); |
| 1134 | assert(isPowerOf2_32(VecLen) && |
| 1135 | "conversion only supported for pow2 VectorSize" ); |
| 1136 | for (unsigned i = 0; i < VecLen; ++i) |
| 1137 | NewConst.push_back(Elt: IRB.getInt8(C: CV->getOperand(i_nocapture: i)->isZeroValue())); |
| 1138 | |
| 1139 | CVal = ConstantVector::get(V: NewConst); |
| 1140 | isVTi1Type = true; |
| 1141 | } |
| 1142 | } |
| 1143 | Align Alignment = CPN->getAlign(); |
| 1144 | bool IsPositionIndependent = isPositionIndependent(); |
| 1145 | unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0; |
| 1146 | |
| 1147 | unsigned Offset = 0; |
| 1148 | SDValue T; |
| 1149 | if (CPN->isMachineConstantPoolEntry()) |
| 1150 | T = DAG.getTargetConstantPool(C: CPN->getMachineCPVal(), VT: ValTy, Align: Alignment, |
| 1151 | Offset, TargetFlags: TF); |
| 1152 | else if (isVTi1Type) |
| 1153 | T = DAG.getTargetConstantPool(C: CVal, VT: ValTy, Align: Alignment, Offset, TargetFlags: TF); |
| 1154 | else |
| 1155 | T = DAG.getTargetConstantPool(C: CPN->getConstVal(), VT: ValTy, Align: Alignment, Offset, |
| 1156 | TargetFlags: TF); |
| 1157 | |
| 1158 | assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && |
| 1159 | "Inconsistent target flag encountered" ); |
| 1160 | |
| 1161 | if (IsPositionIndependent) |
| 1162 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT: ValTy, Operand: T); |
| 1163 | return DAG.getNode(Opcode: HexagonISD::CP, DL: SDLoc(Op), VT: ValTy, Operand: T); |
| 1164 | } |
| 1165 | |
| 1166 | SDValue |
| 1167 | HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { |
| 1168 | EVT VT = Op.getValueType(); |
| 1169 | int Idx = cast<JumpTableSDNode>(Val&: Op)->getIndex(); |
| 1170 | if (isPositionIndependent()) { |
| 1171 | SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT, TargetFlags: HexagonII::MO_PCREL); |
| 1172 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT, Operand: T); |
| 1173 | } |
| 1174 | |
| 1175 | SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT); |
| 1176 | return DAG.getNode(Opcode: HexagonISD::JT, DL: SDLoc(Op), VT, Operand: T); |
| 1177 | } |
| 1178 | |
| 1179 | SDValue |
| 1180 | HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { |
| 1181 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
| 1182 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1183 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 1184 | MFI.setReturnAddressIsTaken(true); |
| 1185 | |
| 1186 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
| 1187 | return SDValue(); |
| 1188 | |
| 1189 | EVT VT = Op.getValueType(); |
| 1190 | SDLoc dl(Op); |
| 1191 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 1192 | if (Depth) { |
| 1193 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
| 1194 | SDValue Offset = DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32); |
| 1195 | return DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), |
| 1196 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr, N2: Offset), |
| 1197 | PtrInfo: MachinePointerInfo()); |
| 1198 | } |
| 1199 | |
| 1200 | // Return LR, which contains the return address. Mark it an implicit live-in. |
| 1201 | Register Reg = MF.addLiveIn(PReg: HRI.getRARegister(), RC: getRegClassFor(VT: MVT::i32)); |
| 1202 | return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT); |
| 1203 | } |
| 1204 | |
| 1205 | SDValue |
| 1206 | HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
| 1207 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
| 1208 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 1209 | MFI.setFrameAddressIsTaken(true); |
| 1210 | |
| 1211 | EVT VT = Op.getValueType(); |
| 1212 | SDLoc dl(Op); |
| 1213 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
| 1214 | SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, |
| 1215 | Reg: HRI.getFrameRegister(), VT); |
| 1216 | while (Depth--) |
| 1217 | FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr, |
| 1218 | PtrInfo: MachinePointerInfo()); |
| 1219 | return FrameAddr; |
| 1220 | } |
| 1221 | |
| 1222 | SDValue |
| 1223 | HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const { |
| 1224 | SDLoc dl(Op); |
| 1225 | return DAG.getNode(Opcode: HexagonISD::BARRIER, DL: dl, VT: MVT::Other, Operand: Op.getOperand(i: 0)); |
| 1226 | } |
| 1227 | |
| 1228 | SDValue |
| 1229 | HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const { |
| 1230 | SDLoc dl(Op); |
| 1231 | auto *GAN = cast<GlobalAddressSDNode>(Val&: Op); |
| 1232 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1233 | auto *GV = GAN->getGlobal(); |
| 1234 | int64_t Offset = GAN->getOffset(); |
| 1235 | |
| 1236 | auto &HLOF = *HTM.getObjFileLowering(); |
| 1237 | Reloc::Model RM = HTM.getRelocationModel(); |
| 1238 | |
| 1239 | if (RM == Reloc::Static) { |
| 1240 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: Offset); |
| 1241 | const GlobalObject *GO = GV->getAliaseeObject(); |
| 1242 | if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, TM: HTM)) |
| 1243 | return DAG.getNode(Opcode: HexagonISD::CONST32_GP, DL: dl, VT: PtrVT, Operand: GA); |
| 1244 | return DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: GA); |
| 1245 | } |
| 1246 | |
| 1247 | bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(GV); |
| 1248 | if (UsePCRel) { |
| 1249 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: Offset, |
| 1250 | TargetFlags: HexagonII::MO_PCREL); |
| 1251 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: dl, VT: PtrVT, Operand: GA); |
| 1252 | } |
| 1253 | |
| 1254 | // Use GOT index. |
| 1255 | SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(VT: PtrVT); |
| 1256 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: HexagonII::MO_GOT); |
| 1257 | SDValue Off = DAG.getConstant(Val: Offset, DL: dl, VT: MVT::i32); |
| 1258 | return DAG.getNode(Opcode: HexagonISD::AT_GOT, DL: dl, VT: PtrVT, N1: GOT, N2: GA, N3: Off); |
| 1259 | } |
| 1260 | |
| 1261 | // Specifies that for loads and stores VT can be promoted to PromotedLdStVT. |
| 1262 | SDValue |
| 1263 | HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { |
| 1264 | const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress(); |
| 1265 | SDLoc dl(Op); |
| 1266 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1267 | |
| 1268 | Reloc::Model RM = HTM.getRelocationModel(); |
| 1269 | if (RM == Reloc::Static) { |
| 1270 | SDValue A = DAG.getTargetBlockAddress(BA, VT: PtrVT); |
| 1271 | return DAG.getNode(Opcode: HexagonISD::CONST32_GP, DL: dl, VT: PtrVT, Operand: A); |
| 1272 | } |
| 1273 | |
| 1274 | SDValue A = DAG.getTargetBlockAddress(BA, VT: PtrVT, Offset: 0, TargetFlags: HexagonII::MO_PCREL); |
| 1275 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: dl, VT: PtrVT, Operand: A); |
| 1276 | } |
| 1277 | |
| 1278 | SDValue |
| 1279 | HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) |
| 1280 | const { |
| 1281 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1282 | SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, VT: PtrVT, |
| 1283 | TargetFlags: HexagonII::MO_PCREL); |
| 1284 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT: PtrVT, Operand: GOTSym); |
| 1285 | } |
| 1286 | |
| 1287 | SDValue |
| 1288 | HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, |
| 1289 | GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg, |
| 1290 | unsigned char OperandFlags) const { |
| 1291 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1292 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 1293 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 1294 | SDLoc dl(GA); |
| 1295 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, |
| 1296 | VT: GA->getValueType(ResNo: 0), |
| 1297 | offset: GA->getOffset(), |
| 1298 | TargetFlags: OperandFlags); |
| 1299 | // Create Operands for the call.The Operands should have the following: |
| 1300 | // 1. Chain SDValue |
| 1301 | // 2. Callee which in this case is the Global address value. |
| 1302 | // 3. Registers live into the call.In this case its R0, as we |
| 1303 | // have just one argument to be passed. |
| 1304 | // 4. Glue. |
| 1305 | // Note: The order is important. |
| 1306 | |
| 1307 | const auto &HRI = *Subtarget.getRegisterInfo(); |
| 1308 | const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C); |
| 1309 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 1310 | SDValue Ops[] = { Chain, TGA, DAG.getRegister(Reg: Hexagon::R0, VT: PtrVT), |
| 1311 | DAG.getRegisterMask(RegMask: Mask), Glue }; |
| 1312 | Chain = DAG.getNode(Opcode: HexagonISD::CALL, DL: dl, VTList: NodeTys, Ops); |
| 1313 | |
| 1314 | // Inform MFI that function has calls. |
| 1315 | MFI.setAdjustsStack(true); |
| 1316 | |
| 1317 | Glue = Chain.getValue(R: 1); |
| 1318 | return DAG.getCopyFromReg(Chain, dl, Reg: ReturnReg, VT: PtrVT, Glue); |
| 1319 | } |
| 1320 | |
| 1321 | // |
| 1322 | // Lower using the initial executable model for TLS addresses |
| 1323 | // |
| 1324 | SDValue |
| 1325 | HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, |
| 1326 | SelectionDAG &DAG) const { |
| 1327 | SDLoc dl(GA); |
| 1328 | int64_t Offset = GA->getOffset(); |
| 1329 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1330 | |
| 1331 | // Get the thread pointer. |
| 1332 | SDValue TP = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::UGP, VT: PtrVT); |
| 1333 | |
| 1334 | bool IsPositionIndependent = isPositionIndependent(); |
| 1335 | unsigned char TF = |
| 1336 | IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE; |
| 1337 | |
| 1338 | // First generate the TLS symbol address |
| 1339 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, |
| 1340 | offset: Offset, TargetFlags: TF); |
| 1341 | |
| 1342 | SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA); |
| 1343 | |
| 1344 | if (IsPositionIndependent) { |
| 1345 | // Generate the GOT pointer in case of position independent code |
| 1346 | SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Op: Sym, DAG); |
| 1347 | |
| 1348 | // Add the TLS Symbol address to GOT pointer.This gives |
| 1349 | // GOT relative relocation for the symbol. |
| 1350 | Sym = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: GOT, N2: Sym); |
| 1351 | } |
| 1352 | |
| 1353 | // Load the offset value for TLS symbol.This offset is relative to |
| 1354 | // thread pointer. |
| 1355 | SDValue LoadOffset = |
| 1356 | DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Sym, PtrInfo: MachinePointerInfo()); |
| 1357 | |
| 1358 | // Address of the thread local variable is the add of thread |
| 1359 | // pointer and the offset of the variable. |
| 1360 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: TP, N2: LoadOffset); |
| 1361 | } |
| 1362 | |
| 1363 | // |
| 1364 | // Lower using the local executable model for TLS addresses |
| 1365 | // |
| 1366 | SDValue |
| 1367 | HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, |
| 1368 | SelectionDAG &DAG) const { |
| 1369 | SDLoc dl(GA); |
| 1370 | int64_t Offset = GA->getOffset(); |
| 1371 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1372 | |
| 1373 | // Get the thread pointer. |
| 1374 | SDValue TP = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::UGP, VT: PtrVT); |
| 1375 | // Generate the TLS symbol address |
| 1376 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, offset: Offset, |
| 1377 | TargetFlags: HexagonII::MO_TPREL); |
| 1378 | SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA); |
| 1379 | |
| 1380 | // Address of the thread local variable is the add of thread |
| 1381 | // pointer and the offset of the variable. |
| 1382 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: TP, N2: Sym); |
| 1383 | } |
| 1384 | |
| 1385 | // |
| 1386 | // Lower using the general dynamic model for TLS addresses |
| 1387 | // |
| 1388 | SDValue |
| 1389 | HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
| 1390 | SelectionDAG &DAG) const { |
| 1391 | SDLoc dl(GA); |
| 1392 | int64_t Offset = GA->getOffset(); |
| 1393 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 1394 | |
| 1395 | // First generate the TLS symbol address |
| 1396 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, offset: Offset, |
| 1397 | TargetFlags: HexagonII::MO_GDGOT); |
| 1398 | |
| 1399 | // Then, generate the GOT pointer |
| 1400 | SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Op: TGA, DAG); |
| 1401 | |
| 1402 | // Add the TLS symbol and the GOT pointer |
| 1403 | SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA); |
| 1404 | SDValue Chain = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: GOT, N2: Sym); |
| 1405 | |
| 1406 | // Copy over the argument to R0 |
| 1407 | SDValue InGlue; |
| 1408 | Chain = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::R0, N: Chain, Glue: InGlue); |
| 1409 | InGlue = Chain.getValue(R: 1); |
| 1410 | |
| 1411 | unsigned Flags = DAG.getSubtarget<HexagonSubtarget>().useLongCalls() |
| 1412 | ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended |
| 1413 | : HexagonII::MO_GDPLT; |
| 1414 | |
| 1415 | return GetDynamicTLSAddr(DAG, Chain, GA, Glue: InGlue, PtrVT, |
| 1416 | ReturnReg: Hexagon::R0, OperandFlags: Flags); |
| 1417 | } |
| 1418 | |
| 1419 | // |
| 1420 | // Lower TLS addresses. |
| 1421 | // |
| 1422 | // For now for dynamic models, we only support the general dynamic model. |
| 1423 | // |
| 1424 | SDValue |
| 1425 | HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op, |
| 1426 | SelectionDAG &DAG) const { |
| 1427 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op); |
| 1428 | |
| 1429 | switch (HTM.getTLSModel(GV: GA->getGlobal())) { |
| 1430 | case TLSModel::GeneralDynamic: |
| 1431 | case TLSModel::LocalDynamic: |
| 1432 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
| 1433 | case TLSModel::InitialExec: |
| 1434 | return LowerToTLSInitialExecModel(GA, DAG); |
| 1435 | case TLSModel::LocalExec: |
| 1436 | return LowerToTLSLocalExecModel(GA, DAG); |
| 1437 | } |
| 1438 | llvm_unreachable("Bogus TLS model" ); |
| 1439 | } |
| 1440 | |
| 1441 | //===----------------------------------------------------------------------===// |
| 1442 | // TargetLowering Implementation |
| 1443 | //===----------------------------------------------------------------------===// |
| 1444 | |
| 1445 | HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, |
| 1446 | const HexagonSubtarget &ST) |
| 1447 | : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)), |
| 1448 | Subtarget(ST) { |
| 1449 | auto &HRI = *Subtarget.getRegisterInfo(); |
| 1450 | |
| 1451 | setPrefLoopAlignment(Align(16)); |
| 1452 | setMinFunctionAlignment(Align(4)); |
| 1453 | setPrefFunctionAlignment(Align(16)); |
| 1454 | setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); |
| 1455 | setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); |
| 1456 | setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); |
| 1457 | |
| 1458 | setMaxAtomicSizeInBitsSupported(64); |
| 1459 | setMinCmpXchgSizeInBits(32); |
| 1460 | |
| 1461 | if (EnableHexSDNodeSched) |
| 1462 | setSchedulingPreference(Sched::VLIW); |
| 1463 | else |
| 1464 | setSchedulingPreference(Sched::Source); |
| 1465 | |
| 1466 | // Limits for inline expansion of memcpy/memmove |
| 1467 | MaxStoresPerMemcpy = MaxStoresPerMemcpyCL; |
| 1468 | MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL; |
| 1469 | MaxStoresPerMemmove = MaxStoresPerMemmoveCL; |
| 1470 | MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL; |
| 1471 | MaxStoresPerMemset = MaxStoresPerMemsetCL; |
| 1472 | MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL; |
| 1473 | |
| 1474 | // |
| 1475 | // Set up register classes. |
| 1476 | // |
| 1477 | |
| 1478 | addRegisterClass(VT: MVT::i1, RC: &Hexagon::PredRegsRegClass); |
| 1479 | addRegisterClass(VT: MVT::v2i1, RC: &Hexagon::PredRegsRegClass); // bbbbaaaa |
| 1480 | addRegisterClass(VT: MVT::v4i1, RC: &Hexagon::PredRegsRegClass); // ddccbbaa |
| 1481 | addRegisterClass(VT: MVT::v8i1, RC: &Hexagon::PredRegsRegClass); // hgfedcba |
| 1482 | addRegisterClass(VT: MVT::i32, RC: &Hexagon::IntRegsRegClass); |
| 1483 | addRegisterClass(VT: MVT::v2i16, RC: &Hexagon::IntRegsRegClass); |
| 1484 | addRegisterClass(VT: MVT::v4i8, RC: &Hexagon::IntRegsRegClass); |
| 1485 | addRegisterClass(VT: MVT::i64, RC: &Hexagon::DoubleRegsRegClass); |
| 1486 | addRegisterClass(VT: MVT::v8i8, RC: &Hexagon::DoubleRegsRegClass); |
| 1487 | addRegisterClass(VT: MVT::v4i16, RC: &Hexagon::DoubleRegsRegClass); |
| 1488 | addRegisterClass(VT: MVT::v2i32, RC: &Hexagon::DoubleRegsRegClass); |
| 1489 | |
| 1490 | addRegisterClass(VT: MVT::f32, RC: &Hexagon::IntRegsRegClass); |
| 1491 | addRegisterClass(VT: MVT::f64, RC: &Hexagon::DoubleRegsRegClass); |
| 1492 | |
| 1493 | // |
| 1494 | // Handling of scalar operations. |
| 1495 | // |
| 1496 | // All operations default to "legal", except: |
| 1497 | // - indexed loads and stores (pre-/post-incremented), |
| 1498 | // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS, |
| 1499 | // ConstantFP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN, |
| 1500 | // FLOG, FLOG2, FLOG10, FMAXIMUMNUM, FMINIMUMNUM, FNEARBYINT, FRINT, FROUND, |
| 1501 | // TRAP, FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, |
| 1502 | // ZERO_EXTEND_VECTOR_INREG, |
| 1503 | // which default to "expand" for at least one type. |
| 1504 | |
| 1505 | // Misc operations. |
| 1506 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Legal); |
| 1507 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Legal); |
| 1508 | setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal); |
| 1509 | setOperationAction(Op: ISD::DEBUGTRAP, VT: MVT::Other, Action: Legal); |
| 1510 | setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom); |
| 1511 | setOperationAction(Op: ISD::JumpTable, VT: MVT::i32, Action: Custom); |
| 1512 | setOperationAction(Op: ISD::BUILD_PAIR, VT: MVT::i64, Action: Expand); |
| 1513 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand); |
| 1514 | setOperationAction(Op: ISD::INLINEASM, VT: MVT::Other, Action: Custom); |
| 1515 | setOperationAction(Op: ISD::INLINEASM_BR, VT: MVT::Other, Action: Custom); |
| 1516 | setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom); |
| 1517 | setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64, Action: Custom); |
| 1518 | setOperationAction(Op: ISD::READSTEADYCOUNTER, VT: MVT::i64, Action: Custom); |
| 1519 | setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom); |
| 1520 | setOperationAction(Op: ISD::EH_RETURN, VT: MVT::Other, Action: Custom); |
| 1521 | setOperationAction(Op: ISD::GLOBAL_OFFSET_TABLE, VT: MVT::i32, Action: Custom); |
| 1522 | setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom); |
| 1523 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom); |
| 1524 | |
| 1525 | // Custom legalize GlobalAddress nodes into CONST32. |
| 1526 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom); |
| 1527 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i8, Action: Custom); |
| 1528 | setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom); |
| 1529 | |
| 1530 | // Hexagon needs to optimize cases with negative constants. |
| 1531 | setOperationAction(Op: ISD::SETCC, VT: MVT::i8, Action: Custom); |
| 1532 | setOperationAction(Op: ISD::SETCC, VT: MVT::i16, Action: Custom); |
| 1533 | setOperationAction(Op: ISD::SETCC, VT: MVT::v4i8, Action: Custom); |
| 1534 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i16, Action: Custom); |
| 1535 | |
| 1536 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex. |
| 1537 | setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom); |
| 1538 | setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand); |
| 1539 | setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand); |
| 1540 | if (Subtarget.isEnvironmentMusl()) |
| 1541 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Custom); |
| 1542 | else |
| 1543 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand); |
| 1544 | |
| 1545 | setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand); |
| 1546 | setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand); |
| 1547 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Custom); |
| 1548 | |
| 1549 | if (EmitJumpTables) |
| 1550 | setMinimumJumpTableEntries(MinimumJumpTables); |
| 1551 | else |
| 1552 | setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max()); |
| 1553 | setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Expand); |
| 1554 | |
| 1555 | for (unsigned LegalIntOp : |
| 1556 | {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) { |
| 1557 | setOperationAction(Op: LegalIntOp, VT: MVT::i32, Action: Legal); |
| 1558 | setOperationAction(Op: LegalIntOp, VT: MVT::i64, Action: Legal); |
| 1559 | } |
| 1560 | |
| 1561 | // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit, |
| 1562 | // but they only operate on i64. |
| 1563 | for (MVT VT : MVT::integer_valuetypes()) { |
| 1564 | setOperationAction(Op: ISD::UADDO, VT, Action: Custom); |
| 1565 | setOperationAction(Op: ISD::USUBO, VT, Action: Custom); |
| 1566 | setOperationAction(Op: ISD::SADDO, VT, Action: Expand); |
| 1567 | setOperationAction(Op: ISD::SSUBO, VT, Action: Expand); |
| 1568 | setOperationAction(Op: ISD::UADDO_CARRY, VT, Action: Expand); |
| 1569 | setOperationAction(Op: ISD::USUBO_CARRY, VT, Action: Expand); |
| 1570 | } |
| 1571 | setOperationAction(Op: ISD::UADDO_CARRY, VT: MVT::i64, Action: Custom); |
| 1572 | setOperationAction(Op: ISD::USUBO_CARRY, VT: MVT::i64, Action: Custom); |
| 1573 | |
| 1574 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i8, Action: Promote); |
| 1575 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i16, Action: Promote); |
| 1576 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i8, Action: Promote); |
| 1577 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i16, Action: Promote); |
| 1578 | |
| 1579 | // Popcount can count # of 1s in i64 but returns i32. |
| 1580 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i8, Action: Promote); |
| 1581 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i16, Action: Promote); |
| 1582 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Promote); |
| 1583 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Legal); |
| 1584 | |
| 1585 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i32, Action: Legal); |
| 1586 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i64, Action: Legal); |
| 1587 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Legal); |
| 1588 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i64, Action: Legal); |
| 1589 | |
| 1590 | setOperationAction(Op: ISD::FSHL, VT: MVT::i32, Action: Legal); |
| 1591 | setOperationAction(Op: ISD::FSHL, VT: MVT::i64, Action: Legal); |
| 1592 | setOperationAction(Op: ISD::FSHR, VT: MVT::i32, Action: Legal); |
| 1593 | setOperationAction(Op: ISD::FSHR, VT: MVT::i64, Action: Legal); |
| 1594 | |
| 1595 | for (unsigned IntExpOp : |
| 1596 | {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, |
| 1597 | ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR, |
| 1598 | ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, |
| 1599 | ISD::SMUL_LOHI, ISD::UMUL_LOHI}) { |
| 1600 | for (MVT VT : MVT::integer_valuetypes()) |
| 1601 | setOperationAction(Op: IntExpOp, VT, Action: Expand); |
| 1602 | } |
| 1603 | |
| 1604 | for (unsigned FPExpOp : |
| 1605 | {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS, |
| 1606 | ISD::FPOW, ISD::FCOPYSIGN}) { |
| 1607 | for (MVT VT : MVT::fp_valuetypes()) |
| 1608 | setOperationAction(Op: FPExpOp, VT, Action: Expand); |
| 1609 | } |
| 1610 | |
| 1611 | // No extending loads from i32. |
| 1612 | for (MVT VT : MVT::integer_valuetypes()) { |
| 1613 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand); |
| 1614 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand); |
| 1615 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand); |
| 1616 | } |
| 1617 | // Turn FP truncstore into trunc + store. |
| 1618 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand); |
| 1619 | // Turn FP extload into load/fpextend. |
| 1620 | for (MVT VT : MVT::fp_valuetypes()) |
| 1621 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand); |
| 1622 | |
| 1623 | // Expand BR_CC and SELECT_CC for all integer and fp types. |
| 1624 | for (MVT VT : MVT::integer_valuetypes()) { |
| 1625 | setOperationAction(Op: ISD::BR_CC, VT, Action: Expand); |
| 1626 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 1627 | } |
| 1628 | for (MVT VT : MVT::fp_valuetypes()) { |
| 1629 | setOperationAction(Op: ISD::BR_CC, VT, Action: Expand); |
| 1630 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
| 1631 | } |
| 1632 | setOperationAction(Op: ISD::BR_CC, VT: MVT::Other, Action: Expand); |
| 1633 | |
| 1634 | // |
| 1635 | // Handling of vector operations. |
| 1636 | // |
| 1637 | |
| 1638 | // Set the action for vector operations to "expand", then override it with |
| 1639 | // either "custom" or "legal" for specific cases. |
| 1640 | // clang-format off |
| 1641 | static const unsigned VectExpOps[] = { |
| 1642 | // Integer arithmetic: |
| 1643 | ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV, |
| 1644 | ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO, |
| 1645 | ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI, |
| 1646 | // Logical/bit: |
| 1647 | ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR, |
| 1648 | ISD::CTPOP, ISD::CTLZ, ISD::CTTZ, ISD::BSWAP, ISD::BITREVERSE, |
| 1649 | // Floating point arithmetic/math functions: |
| 1650 | ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV, |
| 1651 | ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, |
| 1652 | ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, |
| 1653 | ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC, |
| 1654 | ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR, |
| 1655 | ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM, |
| 1656 | ISD::FSINCOS, ISD::FLDEXP, |
| 1657 | // Misc: |
| 1658 | ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool, |
| 1659 | // Vector: |
| 1660 | ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR, |
| 1661 | ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, |
| 1662 | ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR, |
| 1663 | ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE, |
| 1664 | ISD::SPLAT_VECTOR, |
| 1665 | }; |
| 1666 | // clang-format on |
| 1667 | |
| 1668 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 1669 | for (unsigned VectExpOp : VectExpOps) |
| 1670 | setOperationAction(Op: VectExpOp, VT, Action: Expand); |
| 1671 | |
| 1672 | // Expand all extending loads and truncating stores: |
| 1673 | for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) { |
| 1674 | if (TargetVT == VT) |
| 1675 | continue; |
| 1676 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand); |
| 1677 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand); |
| 1678 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand); |
| 1679 | setTruncStoreAction(ValVT: VT, MemVT: TargetVT, Action: Expand); |
| 1680 | } |
| 1681 | |
| 1682 | // Normalize all inputs to SELECT to be vectors of i32. |
| 1683 | if (VT.getVectorElementType() != MVT::i32) { |
| 1684 | MVT VT32 = MVT::getVectorVT(VT: MVT::i32, NumElements: VT.getSizeInBits()/32); |
| 1685 | setOperationAction(Op: ISD::SELECT, VT, Action: Promote); |
| 1686 | AddPromotedToType(Opc: ISD::SELECT, OrigVT: VT, DestVT: VT32); |
| 1687 | } |
| 1688 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
| 1689 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
| 1690 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
| 1691 | } |
| 1692 | |
| 1693 | // Extending loads from (native) vectors of i8 into (native) vectors of i16 |
| 1694 | // are legal. |
| 1695 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal); |
| 1696 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal); |
| 1697 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal); |
| 1698 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal); |
| 1699 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal); |
| 1700 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal); |
| 1701 | |
| 1702 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i8, Action: Legal); |
| 1703 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i16, Action: Legal); |
| 1704 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i32, Action: Legal); |
| 1705 | |
| 1706 | // Types natively supported: |
| 1707 | for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8, |
| 1708 | MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) { |
| 1709 | setOperationAction(Op: ISD::BUILD_VECTOR, VT: NativeVT, Action: Custom); |
| 1710 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT: NativeVT, Action: Custom); |
| 1711 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT: NativeVT, Action: Custom); |
| 1712 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT: NativeVT, Action: Custom); |
| 1713 | setOperationAction(Op: ISD::INSERT_SUBVECTOR, VT: NativeVT, Action: Custom); |
| 1714 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT: NativeVT, Action: Custom); |
| 1715 | |
| 1716 | setOperationAction(Op: ISD::ADD, VT: NativeVT, Action: Legal); |
| 1717 | setOperationAction(Op: ISD::SUB, VT: NativeVT, Action: Legal); |
| 1718 | setOperationAction(Op: ISD::MUL, VT: NativeVT, Action: Legal); |
| 1719 | setOperationAction(Op: ISD::AND, VT: NativeVT, Action: Legal); |
| 1720 | setOperationAction(Op: ISD::OR, VT: NativeVT, Action: Legal); |
| 1721 | setOperationAction(Op: ISD::XOR, VT: NativeVT, Action: Legal); |
| 1722 | |
| 1723 | if (NativeVT.getVectorElementType() != MVT::i1) { |
| 1724 | setOperationAction(Op: ISD::SPLAT_VECTOR, VT: NativeVT, Action: Legal); |
| 1725 | setOperationAction(Op: ISD::BSWAP, VT: NativeVT, Action: Legal); |
| 1726 | setOperationAction(Op: ISD::BITREVERSE, VT: NativeVT, Action: Legal); |
| 1727 | } |
| 1728 | } |
| 1729 | |
| 1730 | for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) { |
| 1731 | setOperationAction(Op: ISD::SMIN, VT, Action: Legal); |
| 1732 | setOperationAction(Op: ISD::SMAX, VT, Action: Legal); |
| 1733 | setOperationAction(Op: ISD::UMIN, VT, Action: Legal); |
| 1734 | setOperationAction(Op: ISD::UMAX, VT, Action: Legal); |
| 1735 | } |
| 1736 | |
| 1737 | // Custom lower unaligned loads. |
| 1738 | // Also, for both loads and stores, verify the alignment of the address |
| 1739 | // in case it is a compile-time constant. This is a usability feature to |
| 1740 | // provide a meaningful error message to users. |
| 1741 | for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8, |
| 1742 | MVT::v2i16, MVT::v4i16, MVT::v2i32}) { |
| 1743 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
| 1744 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
| 1745 | } |
| 1746 | |
| 1747 | // Custom-lower load/stores of boolean vectors. |
| 1748 | for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) { |
| 1749 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
| 1750 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
| 1751 | } |
| 1752 | |
| 1753 | // Normalize integer compares to EQ/GT/UGT |
| 1754 | for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16, |
| 1755 | MVT::v2i32}) { |
| 1756 | setCondCodeAction(CCs: ISD::SETNE, VT, Action: Expand); |
| 1757 | setCondCodeAction(CCs: ISD::SETLE, VT, Action: Expand); |
| 1758 | setCondCodeAction(CCs: ISD::SETGE, VT, Action: Expand); |
| 1759 | setCondCodeAction(CCs: ISD::SETLT, VT, Action: Expand); |
| 1760 | setCondCodeAction(CCs: ISD::SETULE, VT, Action: Expand); |
| 1761 | setCondCodeAction(CCs: ISD::SETUGE, VT, Action: Expand); |
| 1762 | setCondCodeAction(CCs: ISD::SETULT, VT, Action: Expand); |
| 1763 | } |
| 1764 | |
| 1765 | // Normalize boolean compares to [U]LE/[U]LT |
| 1766 | for (MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) { |
| 1767 | setCondCodeAction(CCs: ISD::SETGE, VT, Action: Expand); |
| 1768 | setCondCodeAction(CCs: ISD::SETGT, VT, Action: Expand); |
| 1769 | setCondCodeAction(CCs: ISD::SETUGE, VT, Action: Expand); |
| 1770 | setCondCodeAction(CCs: ISD::SETUGT, VT, Action: Expand); |
| 1771 | } |
| 1772 | |
| 1773 | // Custom-lower bitcasts from i8 to v8i1. |
| 1774 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i8, Action: Custom); |
| 1775 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i16, Action: Custom); |
| 1776 | setOperationAction(Op: ISD::VSELECT, VT: MVT::v4i8, Action: Custom); |
| 1777 | setOperationAction(Op: ISD::VSELECT, VT: MVT::v2i16, Action: Custom); |
| 1778 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v4i8, Action: Custom); |
| 1779 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v4i16, Action: Custom); |
| 1780 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v8i8, Action: Custom); |
| 1781 | |
| 1782 | // V5+. |
| 1783 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
| 1784 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Expand); |
| 1785 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Expand); |
| 1786 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Expand); |
| 1787 | setOperationAction(Op: ISD::FDIV, VT: MVT::f32, Action: Custom); |
| 1788 | |
| 1789 | setOperationAction(Op: ISD::FMINIMUMNUM, VT: MVT::f32, Action: Legal); |
| 1790 | setOperationAction(Op: ISD::FMAXIMUMNUM, VT: MVT::f32, Action: Legal); |
| 1791 | |
| 1792 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i1, Action: Promote); |
| 1793 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i8, Action: Promote); |
| 1794 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i16, Action: Promote); |
| 1795 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i1, Action: Promote); |
| 1796 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i8, Action: Promote); |
| 1797 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i16, Action: Promote); |
| 1798 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i1, Action: Promote); |
| 1799 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i8, Action: Promote); |
| 1800 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i16, Action: Promote); |
| 1801 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i1, Action: Promote); |
| 1802 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i8, Action: Promote); |
| 1803 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i16, Action: Promote); |
| 1804 | |
| 1805 | // Special handling for half-precision floating point conversions. |
| 1806 | // Lower half float conversions into library calls. |
| 1807 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand); |
| 1808 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand); |
| 1809 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand); |
| 1810 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand); |
| 1811 | |
| 1812 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
| 1813 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
| 1814 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
| 1815 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
| 1816 | |
| 1817 | // Handling of indexed loads/stores: default is "expand". |
| 1818 | // |
| 1819 | for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64, |
| 1820 | MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) { |
| 1821 | setIndexedLoadAction(IdxModes: ISD::POST_INC, VT, Action: Legal); |
| 1822 | setIndexedStoreAction(IdxModes: ISD::POST_INC, VT, Action: Legal); |
| 1823 | } |
| 1824 | |
| 1825 | // Subtarget-specific operation actions. |
| 1826 | // |
| 1827 | if (Subtarget.hasV60Ops()) { |
| 1828 | setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Legal); |
| 1829 | setOperationAction(Op: ISD::ROTL, VT: MVT::i64, Action: Legal); |
| 1830 | setOperationAction(Op: ISD::ROTR, VT: MVT::i32, Action: Legal); |
| 1831 | setOperationAction(Op: ISD::ROTR, VT: MVT::i64, Action: Legal); |
| 1832 | } |
| 1833 | if (Subtarget.hasV66Ops()) { |
| 1834 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Legal); |
| 1835 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Legal); |
| 1836 | } |
| 1837 | if (Subtarget.hasV67Ops()) { |
| 1838 | setOperationAction(Op: ISD::FMINIMUMNUM, VT: MVT::f64, Action: Legal); |
| 1839 | setOperationAction(Op: ISD::FMAXIMUMNUM, VT: MVT::f64, Action: Legal); |
| 1840 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Legal); |
| 1841 | } |
| 1842 | |
| 1843 | setTargetDAGCombine(ISD::OR); |
| 1844 | setTargetDAGCombine(ISD::TRUNCATE); |
| 1845 | setTargetDAGCombine(ISD::VSELECT); |
| 1846 | |
| 1847 | if (Subtarget.useHVXOps()) |
| 1848 | initializeHVXLowering(); |
| 1849 | |
| 1850 | computeRegisterProperties(TRI: &HRI); |
| 1851 | } |
| 1852 | |
| 1853 | const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { |
| 1854 | switch ((HexagonISD::NodeType)Opcode) { |
| 1855 | case HexagonISD::ADDC: return "HexagonISD::ADDC" ; |
| 1856 | case HexagonISD::SUBC: return "HexagonISD::SUBC" ; |
| 1857 | case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA" ; |
| 1858 | case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT" ; |
| 1859 | case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL" ; |
| 1860 | case HexagonISD::BARRIER: return "HexagonISD::BARRIER" ; |
| 1861 | case HexagonISD::CALL: return "HexagonISD::CALL" ; |
| 1862 | case HexagonISD::CALLnr: return "HexagonISD::CALLnr" ; |
| 1863 | case HexagonISD::CALLR: return "HexagonISD::CALLR" ; |
| 1864 | case HexagonISD::COMBINE: return "HexagonISD::COMBINE" ; |
| 1865 | case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP" ; |
| 1866 | case HexagonISD::CONST32: return "HexagonISD::CONST32" ; |
| 1867 | case HexagonISD::CP: return "HexagonISD::CP" ; |
| 1868 | case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH" ; |
| 1869 | case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN" ; |
| 1870 | case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT" ; |
| 1871 | case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU" ; |
| 1872 | case HexagonISD::INSERT: return "HexagonISD::INSERT" ; |
| 1873 | case HexagonISD::JT: return "HexagonISD::JT" ; |
| 1874 | case HexagonISD::RET_GLUE: return "HexagonISD::RET_GLUE" ; |
| 1875 | case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN" ; |
| 1876 | case HexagonISD::VASL: return "HexagonISD::VASL" ; |
| 1877 | case HexagonISD::VASR: return "HexagonISD::VASR" ; |
| 1878 | case HexagonISD::VLSR: return "HexagonISD::VLSR" ; |
| 1879 | case HexagonISD::MFSHL: return "HexagonISD::MFSHL" ; |
| 1880 | case HexagonISD::MFSHR: return "HexagonISD::MFSHR" ; |
| 1881 | case HexagonISD::SSAT: return "HexagonISD::SSAT" ; |
| 1882 | case HexagonISD::USAT: return "HexagonISD::USAT" ; |
| 1883 | case HexagonISD::SMUL_LOHI: return "HexagonISD::SMUL_LOHI" ; |
| 1884 | case HexagonISD::UMUL_LOHI: return "HexagonISD::UMUL_LOHI" ; |
| 1885 | case HexagonISD::USMUL_LOHI: return "HexagonISD::USMUL_LOHI" ; |
| 1886 | case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW" ; |
| 1887 | case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0" ; |
| 1888 | case HexagonISD::VROR: return "HexagonISD::VROR" ; |
| 1889 | case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE" ; |
| 1890 | case HexagonISD::READTIMER: return "HexagonISD::READTIMER" ; |
| 1891 | case HexagonISD::PTRUE: return "HexagonISD::PTRUE" ; |
| 1892 | case HexagonISD::PFALSE: return "HexagonISD::PFALSE" ; |
| 1893 | case HexagonISD::D2P: return "HexagonISD::D2P" ; |
| 1894 | case HexagonISD::P2D: return "HexagonISD::P2D" ; |
| 1895 | case HexagonISD::V2Q: return "HexagonISD::V2Q" ; |
| 1896 | case HexagonISD::Q2V: return "HexagonISD::Q2V" ; |
| 1897 | case HexagonISD::QCAT: return "HexagonISD::QCAT" ; |
| 1898 | case HexagonISD::QTRUE: return "HexagonISD::QTRUE" ; |
| 1899 | case HexagonISD::QFALSE: return "HexagonISD::QFALSE" ; |
| 1900 | case HexagonISD::TL_EXTEND: return "HexagonISD::TL_EXTEND" ; |
| 1901 | case HexagonISD::TL_TRUNCATE: return "HexagonISD::TL_TRUNCATE" ; |
| 1902 | case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST" ; |
| 1903 | case HexagonISD::VALIGN: return "HexagonISD::VALIGN" ; |
| 1904 | case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR" ; |
| 1905 | case HexagonISD::ISEL: return "HexagonISD::ISEL" ; |
| 1906 | case HexagonISD::OP_END: break; |
| 1907 | } |
| 1908 | return nullptr; |
| 1909 | } |
| 1910 | |
| 1911 | bool |
| 1912 | HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign, |
| 1913 | const SDLoc &dl, SelectionDAG &DAG) const { |
| 1914 | auto *CA = dyn_cast<ConstantSDNode>(Val&: Ptr); |
| 1915 | if (!CA) |
| 1916 | return true; |
| 1917 | unsigned Addr = CA->getZExtValue(); |
| 1918 | Align HaveAlign = |
| 1919 | Addr != 0 ? Align(1ull << llvm::countr_zero(Val: Addr)) : NeedAlign; |
| 1920 | if (HaveAlign >= NeedAlign) |
| 1921 | return true; |
| 1922 | |
| 1923 | static int DK_MisalignedTrap = llvm::getNextAvailablePluginDiagnosticKind(); |
| 1924 | |
| 1925 | struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo { |
| 1926 | DiagnosticInfoMisalignedTrap(StringRef M) |
| 1927 | : DiagnosticInfo(DK_MisalignedTrap, DS_Remark), Msg(M) {} |
| 1928 | void print(DiagnosticPrinter &DP) const override { |
| 1929 | DP << Msg; |
| 1930 | } |
| 1931 | static bool classof(const DiagnosticInfo *DI) { |
| 1932 | return DI->getKind() == DK_MisalignedTrap; |
| 1933 | } |
| 1934 | StringRef Msg; |
| 1935 | }; |
| 1936 | |
| 1937 | std::string ErrMsg; |
| 1938 | raw_string_ostream O(ErrMsg); |
| 1939 | O << "Misaligned constant address: " << format_hex(N: Addr, Width: 10) |
| 1940 | << " has alignment " << HaveAlign.value() |
| 1941 | << ", but the memory access requires " << NeedAlign.value(); |
| 1942 | if (DebugLoc DL = dl.getDebugLoc()) |
| 1943 | DL.print(OS&: O << ", at " ); |
| 1944 | O << ". The instruction has been replaced with a trap." ; |
| 1945 | |
| 1946 | DAG.getContext()->diagnose(DI: DiagnosticInfoMisalignedTrap(O.str())); |
| 1947 | return false; |
| 1948 | } |
| 1949 | |
| 1950 | SDValue |
| 1951 | HexagonTargetLowering::replaceMemWithUndef(SDValue Op, SelectionDAG &DAG) |
| 1952 | const { |
| 1953 | const SDLoc &dl(Op); |
| 1954 | auto *LS = cast<LSBaseSDNode>(Val: Op.getNode()); |
| 1955 | assert(!LS->isIndexed() && "Not expecting indexed ops on constant address" ); |
| 1956 | |
| 1957 | SDValue Chain = LS->getChain(); |
| 1958 | SDValue Trap = DAG.getNode(Opcode: ISD::TRAP, DL: dl, VT: MVT::Other, Operand: Chain); |
| 1959 | if (LS->getOpcode() == ISD::LOAD) |
| 1960 | return DAG.getMergeValues(Ops: {DAG.getUNDEF(VT: ty(Op)), Trap}, dl); |
| 1961 | return Trap; |
| 1962 | } |
| 1963 | |
| 1964 | // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load |
| 1965 | // intrinsic. |
| 1966 | static bool isBrevLdIntrinsic(const Value *Inst) { |
| 1967 | unsigned ID = cast<IntrinsicInst>(Val: Inst)->getIntrinsicID(); |
| 1968 | return (ID == Intrinsic::hexagon_L2_loadrd_pbr || |
| 1969 | ID == Intrinsic::hexagon_L2_loadri_pbr || |
| 1970 | ID == Intrinsic::hexagon_L2_loadrh_pbr || |
| 1971 | ID == Intrinsic::hexagon_L2_loadruh_pbr || |
| 1972 | ID == Intrinsic::hexagon_L2_loadrb_pbr || |
| 1973 | ID == Intrinsic::hexagon_L2_loadrub_pbr); |
| 1974 | } |
| 1975 | |
| 1976 | // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous |
| 1977 | // instruction. So far we only handle bitcast, extract value and bit reverse |
| 1978 | // load intrinsic instructions. Should we handle CGEP ? |
| 1979 | static Value *getBrevLdObject(Value *V) { |
| 1980 | if (Operator::getOpcode(V) == Instruction::ExtractValue || |
| 1981 | Operator::getOpcode(V) == Instruction::BitCast) |
| 1982 | V = cast<Operator>(Val: V)->getOperand(i: 0); |
| 1983 | else if (isa<IntrinsicInst>(Val: V) && isBrevLdIntrinsic(Inst: V)) |
| 1984 | V = cast<Instruction>(Val: V)->getOperand(i: 0); |
| 1985 | return V; |
| 1986 | } |
| 1987 | |
| 1988 | // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or |
| 1989 | // a back edge. If the back edge comes from the intrinsic itself, the incoming |
| 1990 | // edge is returned. |
| 1991 | static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) { |
| 1992 | const BasicBlock *Parent = PN->getParent(); |
| 1993 | int Idx = -1; |
| 1994 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) { |
| 1995 | BasicBlock *Blk = PN->getIncomingBlock(i); |
| 1996 | // Determine if the back edge is originated from intrinsic. |
| 1997 | if (Blk == Parent) { |
| 1998 | Value *BackEdgeVal = PN->getIncomingValue(i); |
| 1999 | Value *BaseVal; |
| 2000 | // Loop over till we return the same Value or we hit the IntrBaseVal. |
| 2001 | do { |
| 2002 | BaseVal = BackEdgeVal; |
| 2003 | BackEdgeVal = getBrevLdObject(V: BackEdgeVal); |
| 2004 | } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal)); |
| 2005 | // If the getBrevLdObject returns IntrBaseVal, we should return the |
| 2006 | // incoming edge. |
| 2007 | if (IntrBaseVal == BackEdgeVal) |
| 2008 | continue; |
| 2009 | Idx = i; |
| 2010 | break; |
| 2011 | } else // Set the node to incoming edge. |
| 2012 | Idx = i; |
| 2013 | } |
| 2014 | assert(Idx >= 0 && "Unexpected index to incoming argument in PHI" ); |
| 2015 | return PN->getIncomingValue(i: Idx); |
| 2016 | } |
| 2017 | |
| 2018 | // Bit-reverse Load Intrinsic: Figure out the underlying object the base |
| 2019 | // pointer points to, for the bit-reverse load intrinsic. Setting this to |
| 2020 | // memoperand might help alias analysis to figure out the dependencies. |
| 2021 | static Value *getUnderLyingObjectForBrevLdIntr(Value *V) { |
| 2022 | Value *IntrBaseVal = V; |
| 2023 | Value *BaseVal; |
| 2024 | // Loop over till we return the same Value, implies we either figure out |
| 2025 | // the object or we hit a PHI |
| 2026 | do { |
| 2027 | BaseVal = V; |
| 2028 | V = getBrevLdObject(V); |
| 2029 | } while (BaseVal != V); |
| 2030 | |
| 2031 | // Identify the object from PHINode. |
| 2032 | if (const PHINode *PN = dyn_cast<PHINode>(Val: V)) |
| 2033 | return returnEdge(PN, IntrBaseVal); |
| 2034 | // For non PHI nodes, the object is the last value returned by getBrevLdObject |
| 2035 | else |
| 2036 | return V; |
| 2037 | } |
| 2038 | |
| 2039 | /// Given an intrinsic, checks if on the target the intrinsic will need to map |
| 2040 | /// to a MemIntrinsicNode (touches memory). If this is the case, it returns |
| 2041 | /// true and store the intrinsic information into the IntrinsicInfo that was |
| 2042 | /// passed to the function. |
| 2043 | bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
| 2044 | const CallInst &I, |
| 2045 | MachineFunction &MF, |
| 2046 | unsigned Intrinsic) const { |
| 2047 | switch (Intrinsic) { |
| 2048 | case Intrinsic::hexagon_L2_loadrd_pbr: |
| 2049 | case Intrinsic::hexagon_L2_loadri_pbr: |
| 2050 | case Intrinsic::hexagon_L2_loadrh_pbr: |
| 2051 | case Intrinsic::hexagon_L2_loadruh_pbr: |
| 2052 | case Intrinsic::hexagon_L2_loadrb_pbr: |
| 2053 | case Intrinsic::hexagon_L2_loadrub_pbr: { |
| 2054 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 2055 | auto &DL = I.getDataLayout(); |
| 2056 | auto &Cont = I.getCalledFunction()->getParent()->getContext(); |
| 2057 | // The intrinsic function call is of the form { ElTy, i8* } |
| 2058 | // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type |
| 2059 | // should be derived from ElTy. |
| 2060 | Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(N: 0); |
| 2061 | Info.memVT = MVT::getVT(Ty: ElTy); |
| 2062 | llvm::Value *BasePtrVal = I.getOperand(i_nocapture: 0); |
| 2063 | Info.ptrVal = getUnderLyingObjectForBrevLdIntr(V: BasePtrVal); |
| 2064 | // The offset value comes through Modifier register. For now, assume the |
| 2065 | // offset is 0. |
| 2066 | Info.offset = 0; |
| 2067 | Info.align = DL.getABITypeAlign(Ty: Info.memVT.getTypeForEVT(Context&: Cont)); |
| 2068 | Info.flags = MachineMemOperand::MOLoad; |
| 2069 | return true; |
| 2070 | } |
| 2071 | case Intrinsic::hexagon_V6_vgathermw: |
| 2072 | case Intrinsic::hexagon_V6_vgathermw_128B: |
| 2073 | case Intrinsic::hexagon_V6_vgathermh: |
| 2074 | case Intrinsic::hexagon_V6_vgathermh_128B: |
| 2075 | case Intrinsic::hexagon_V6_vgathermhw: |
| 2076 | case Intrinsic::hexagon_V6_vgathermhw_128B: |
| 2077 | case Intrinsic::hexagon_V6_vgathermwq: |
| 2078 | case Intrinsic::hexagon_V6_vgathermwq_128B: |
| 2079 | case Intrinsic::hexagon_V6_vgathermhq: |
| 2080 | case Intrinsic::hexagon_V6_vgathermhq_128B: |
| 2081 | case Intrinsic::hexagon_V6_vgathermhwq: |
| 2082 | case Intrinsic::hexagon_V6_vgathermhwq_128B: { |
| 2083 | const Module &M = *I.getParent()->getParent()->getParent(); |
| 2084 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 2085 | Type *VecTy = I.getArgOperand(i: 1)->getType(); |
| 2086 | Info.memVT = MVT::getVT(Ty: VecTy); |
| 2087 | Info.ptrVal = I.getArgOperand(i: 0); |
| 2088 | Info.offset = 0; |
| 2089 | Info.align = |
| 2090 | MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(Ty: VecTy) / 8); |
| 2091 | Info.flags = MachineMemOperand::MOLoad | |
| 2092 | MachineMemOperand::MOStore | |
| 2093 | MachineMemOperand::MOVolatile; |
| 2094 | return true; |
| 2095 | } |
| 2096 | default: |
| 2097 | break; |
| 2098 | } |
| 2099 | return false; |
| 2100 | } |
| 2101 | |
| 2102 | bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const { |
| 2103 | return X.getValueType().isScalarInteger(); // 'tstbit' |
| 2104 | } |
| 2105 | |
| 2106 | bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { |
| 2107 | return isTruncateFree(VT1: EVT::getEVT(Ty: Ty1), VT2: EVT::getEVT(Ty: Ty2)); |
| 2108 | } |
| 2109 | |
| 2110 | bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { |
| 2111 | if (!VT1.isSimple() || !VT2.isSimple()) |
| 2112 | return false; |
| 2113 | return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32; |
| 2114 | } |
| 2115 | |
| 2116 | bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd( |
| 2117 | const MachineFunction &MF, EVT VT) const { |
| 2118 | return isOperationLegalOrCustom(Op: ISD::FMA, VT); |
| 2119 | } |
| 2120 | |
| 2121 | // Should we expand the build vector with shuffles? |
| 2122 | bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT, |
| 2123 | unsigned DefinedValues) const { |
| 2124 | return false; |
| 2125 | } |
| 2126 | |
| 2127 | bool HexagonTargetLowering::(EVT ResVT, EVT SrcVT, |
| 2128 | unsigned Index) const { |
| 2129 | assert(ResVT.getVectorElementType() == SrcVT.getVectorElementType()); |
| 2130 | if (!ResVT.isSimple() || !SrcVT.isSimple()) |
| 2131 | return false; |
| 2132 | |
| 2133 | MVT ResTy = ResVT.getSimpleVT(), SrcTy = SrcVT.getSimpleVT(); |
| 2134 | if (ResTy.getVectorElementType() != MVT::i1) |
| 2135 | return true; |
| 2136 | |
| 2137 | // Non-HVX bool vectors are relatively cheap. |
| 2138 | return SrcTy.getVectorNumElements() <= 8; |
| 2139 | } |
| 2140 | |
| 2141 | bool HexagonTargetLowering::isTargetCanonicalConstantNode(SDValue Op) const { |
| 2142 | return Op.getOpcode() == ISD::CONCAT_VECTORS || |
| 2143 | TargetLowering::isTargetCanonicalConstantNode(Op); |
| 2144 | } |
| 2145 | |
| 2146 | bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, |
| 2147 | EVT VT) const { |
| 2148 | return true; |
| 2149 | } |
| 2150 | |
| 2151 | TargetLoweringBase::LegalizeTypeAction |
| 2152 | HexagonTargetLowering::getPreferredVectorAction(MVT VT) const { |
| 2153 | unsigned VecLen = VT.getVectorMinNumElements(); |
| 2154 | MVT ElemTy = VT.getVectorElementType(); |
| 2155 | |
| 2156 | if (VecLen == 1 || VT.isScalableVector()) |
| 2157 | return TargetLoweringBase::TypeScalarizeVector; |
| 2158 | |
| 2159 | if (Subtarget.useHVXOps()) { |
| 2160 | unsigned Action = getPreferredHvxVectorAction(VecTy: VT); |
| 2161 | if (Action != ~0u) |
| 2162 | return static_cast<TargetLoweringBase::LegalizeTypeAction>(Action); |
| 2163 | } |
| 2164 | |
| 2165 | // Always widen (remaining) vectors of i1. |
| 2166 | if (ElemTy == MVT::i1) |
| 2167 | return TargetLoweringBase::TypeWidenVector; |
| 2168 | // Widen non-power-of-2 vectors. Such types cannot be split right now, |
| 2169 | // and computeRegisterProperties will override "split" with "widen", |
| 2170 | // which can cause other issues. |
| 2171 | if (!isPowerOf2_32(Value: VecLen)) |
| 2172 | return TargetLoweringBase::TypeWidenVector; |
| 2173 | |
| 2174 | return TargetLoweringBase::TypeSplitVector; |
| 2175 | } |
| 2176 | |
| 2177 | TargetLoweringBase::LegalizeAction |
| 2178 | HexagonTargetLowering::getCustomOperationAction(SDNode &Op) const { |
| 2179 | if (Subtarget.useHVXOps()) { |
| 2180 | unsigned Action = getCustomHvxOperationAction(Op); |
| 2181 | if (Action != ~0u) |
| 2182 | return static_cast<TargetLoweringBase::LegalizeAction>(Action); |
| 2183 | } |
| 2184 | return TargetLoweringBase::Legal; |
| 2185 | } |
| 2186 | |
| 2187 | std::pair<SDValue, int> |
| 2188 | HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const { |
| 2189 | if (Addr.getOpcode() == ISD::ADD) { |
| 2190 | SDValue Op1 = Addr.getOperand(i: 1); |
| 2191 | if (auto *CN = dyn_cast<const ConstantSDNode>(Val: Op1.getNode())) |
| 2192 | return { Addr.getOperand(i: 0), CN->getSExtValue() }; |
| 2193 | } |
| 2194 | return { Addr, 0 }; |
| 2195 | } |
| 2196 | |
| 2197 | // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors |
| 2198 | // to select data from, V3 is the permutation. |
| 2199 | SDValue |
| 2200 | HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) |
| 2201 | const { |
| 2202 | const auto *SVN = cast<ShuffleVectorSDNode>(Val&: Op); |
| 2203 | ArrayRef<int> AM = SVN->getMask(); |
| 2204 | assert(AM.size() <= 8 && "Unexpected shuffle mask" ); |
| 2205 | unsigned VecLen = AM.size(); |
| 2206 | |
| 2207 | MVT VecTy = ty(Op); |
| 2208 | assert(!Subtarget.isHVXVectorType(VecTy, true) && |
| 2209 | "HVX shuffles should be legal" ); |
| 2210 | assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length" ); |
| 2211 | |
| 2212 | SDValue Op0 = Op.getOperand(i: 0); |
| 2213 | SDValue Op1 = Op.getOperand(i: 1); |
| 2214 | const SDLoc &dl(Op); |
| 2215 | |
| 2216 | // If the inputs are not the same as the output, bail. This is not an |
| 2217 | // error situation, but complicates the handling and the default expansion |
| 2218 | // (into BUILD_VECTOR) should be adequate. |
| 2219 | if (ty(Op: Op0) != VecTy || ty(Op: Op1) != VecTy) |
| 2220 | return SDValue(); |
| 2221 | |
| 2222 | // Normalize the mask so that the first non-negative index comes from |
| 2223 | // the first operand. |
| 2224 | SmallVector<int, 8> Mask(AM); |
| 2225 | unsigned F = llvm::find_if(Range&: AM, P: [](int M) { return M >= 0; }) - AM.data(); |
| 2226 | if (F == AM.size()) |
| 2227 | return DAG.getUNDEF(VT: VecTy); |
| 2228 | if (AM[F] >= int(VecLen)) { |
| 2229 | ShuffleVectorSDNode::commuteMask(Mask); |
| 2230 | std::swap(a&: Op0, b&: Op1); |
| 2231 | } |
| 2232 | |
| 2233 | // Express the shuffle mask in terms of bytes. |
| 2234 | SmallVector<int,8> ByteMask; |
| 2235 | unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8; |
| 2236 | for (int M : Mask) { |
| 2237 | if (M < 0) { |
| 2238 | for (unsigned j = 0; j != ElemBytes; ++j) |
| 2239 | ByteMask.push_back(Elt: -1); |
| 2240 | } else { |
| 2241 | for (unsigned j = 0; j != ElemBytes; ++j) |
| 2242 | ByteMask.push_back(Elt: M*ElemBytes + j); |
| 2243 | } |
| 2244 | } |
| 2245 | assert(ByteMask.size() <= 8); |
| 2246 | |
| 2247 | // All non-undef (non-negative) indexes are well within [0..127], so they |
| 2248 | // fit in a single byte. Build two 64-bit words: |
| 2249 | // - MaskIdx where each byte is the corresponding index (for non-negative |
| 2250 | // indexes), and 0xFF for negative indexes, and |
| 2251 | // - MaskUnd that has 0xFF for each negative index. |
| 2252 | uint64_t MaskIdx = 0; |
| 2253 | uint64_t MaskUnd = 0; |
| 2254 | for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) { |
| 2255 | unsigned S = 8*i; |
| 2256 | uint64_t M = ByteMask[i] & 0xFF; |
| 2257 | if (M == 0xFF) |
| 2258 | MaskUnd |= M << S; |
| 2259 | MaskIdx |= M << S; |
| 2260 | } |
| 2261 | |
| 2262 | if (ByteMask.size() == 4) { |
| 2263 | // Identity. |
| 2264 | if (MaskIdx == (0x03020100 | MaskUnd)) |
| 2265 | return Op0; |
| 2266 | // Byte swap. |
| 2267 | if (MaskIdx == (0x00010203 | MaskUnd)) { |
| 2268 | SDValue T0 = DAG.getBitcast(VT: MVT::i32, V: Op0); |
| 2269 | SDValue T1 = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT: MVT::i32, Operand: T0); |
| 2270 | return DAG.getBitcast(VT: VecTy, V: T1); |
| 2271 | } |
| 2272 | |
| 2273 | // Byte packs. |
| 2274 | SDValue Concat10 = |
| 2275 | getCombine(Hi: Op1, Lo: Op0, dl, ResTy: typeJoin(Tys: {ty(Op: Op1), ty(Op: Op0)}), DAG); |
| 2276 | if (MaskIdx == (0x06040200 | MaskUnd)) |
| 2277 | return getInstr(MachineOpc: Hexagon::S2_vtrunehb, dl, Ty: VecTy, Ops: {Concat10}, DAG); |
| 2278 | if (MaskIdx == (0x07050301 | MaskUnd)) |
| 2279 | return getInstr(MachineOpc: Hexagon::S2_vtrunohb, dl, Ty: VecTy, Ops: {Concat10}, DAG); |
| 2280 | |
| 2281 | SDValue Concat01 = |
| 2282 | getCombine(Hi: Op0, Lo: Op1, dl, ResTy: typeJoin(Tys: {ty(Op: Op0), ty(Op: Op1)}), DAG); |
| 2283 | if (MaskIdx == (0x02000604 | MaskUnd)) |
| 2284 | return getInstr(MachineOpc: Hexagon::S2_vtrunehb, dl, Ty: VecTy, Ops: {Concat01}, DAG); |
| 2285 | if (MaskIdx == (0x03010705 | MaskUnd)) |
| 2286 | return getInstr(MachineOpc: Hexagon::S2_vtrunohb, dl, Ty: VecTy, Ops: {Concat01}, DAG); |
| 2287 | } |
| 2288 | |
| 2289 | if (ByteMask.size() == 8) { |
| 2290 | // Identity. |
| 2291 | if (MaskIdx == (0x0706050403020100ull | MaskUnd)) |
| 2292 | return Op0; |
| 2293 | // Byte swap. |
| 2294 | if (MaskIdx == (0x0001020304050607ull | MaskUnd)) { |
| 2295 | SDValue T0 = DAG.getBitcast(VT: MVT::i64, V: Op0); |
| 2296 | SDValue T1 = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT: MVT::i64, Operand: T0); |
| 2297 | return DAG.getBitcast(VT: VecTy, V: T1); |
| 2298 | } |
| 2299 | |
| 2300 | // Halfword picks. |
| 2301 | if (MaskIdx == (0x0d0c050409080100ull | MaskUnd)) |
| 2302 | return getInstr(MachineOpc: Hexagon::S2_shuffeh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
| 2303 | if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd)) |
| 2304 | return getInstr(MachineOpc: Hexagon::S2_shuffoh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
| 2305 | if (MaskIdx == (0x0d0c090805040100ull | MaskUnd)) |
| 2306 | return getInstr(MachineOpc: Hexagon::S2_vtrunewh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
| 2307 | if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd)) |
| 2308 | return getInstr(MachineOpc: Hexagon::S2_vtrunowh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
| 2309 | if (MaskIdx == (0x0706030205040100ull | MaskUnd)) { |
| 2310 | VectorPair P = opSplit(Vec: Op0, dl, DAG); |
| 2311 | return getInstr(MachineOpc: Hexagon::S2_packhl, dl, Ty: VecTy, Ops: {P.second, P.first}, DAG); |
| 2312 | } |
| 2313 | |
| 2314 | // Byte packs. |
| 2315 | if (MaskIdx == (0x0e060c040a020800ull | MaskUnd)) |
| 2316 | return getInstr(MachineOpc: Hexagon::S2_shuffeb, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
| 2317 | if (MaskIdx == (0x0f070d050b030901ull | MaskUnd)) |
| 2318 | return getInstr(MachineOpc: Hexagon::S2_shuffob, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
| 2319 | } |
| 2320 | |
| 2321 | return SDValue(); |
| 2322 | } |
| 2323 | |
| 2324 | SDValue |
| 2325 | HexagonTargetLowering::getSplatValue(SDValue Op, SelectionDAG &DAG) const { |
| 2326 | switch (Op.getOpcode()) { |
| 2327 | case ISD::BUILD_VECTOR: |
| 2328 | if (SDValue S = cast<BuildVectorSDNode>(Val&: Op)->getSplatValue()) |
| 2329 | return S; |
| 2330 | break; |
| 2331 | case ISD::SPLAT_VECTOR: |
| 2332 | return Op.getOperand(i: 0); |
| 2333 | } |
| 2334 | return SDValue(); |
| 2335 | } |
| 2336 | |
| 2337 | // Create a Hexagon-specific node for shifting a vector by an integer. |
| 2338 | SDValue |
| 2339 | HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) |
| 2340 | const { |
| 2341 | unsigned NewOpc; |
| 2342 | switch (Op.getOpcode()) { |
| 2343 | case ISD::SHL: |
| 2344 | NewOpc = HexagonISD::VASL; |
| 2345 | break; |
| 2346 | case ISD::SRA: |
| 2347 | NewOpc = HexagonISD::VASR; |
| 2348 | break; |
| 2349 | case ISD::SRL: |
| 2350 | NewOpc = HexagonISD::VLSR; |
| 2351 | break; |
| 2352 | default: |
| 2353 | llvm_unreachable("Unexpected shift opcode" ); |
| 2354 | } |
| 2355 | |
| 2356 | if (SDValue Sp = getSplatValue(Op: Op.getOperand(i: 1), DAG)) |
| 2357 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: ty(Op), N1: Op.getOperand(i: 0), N2: Sp); |
| 2358 | return SDValue(); |
| 2359 | } |
| 2360 | |
| 2361 | SDValue |
| 2362 | HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const { |
| 2363 | const SDLoc &dl(Op); |
| 2364 | |
| 2365 | // First try to convert the shift (by vector) to a shift by a scalar. |
| 2366 | // If we first split the shift, the shift amount will become 'extract |
| 2367 | // subvector', and will no longer be recognized as scalar. |
| 2368 | SDValue Res = Op; |
| 2369 | if (SDValue S = getVectorShiftByInt(Op, DAG)) |
| 2370 | Res = S; |
| 2371 | |
| 2372 | unsigned Opc = Res.getOpcode(); |
| 2373 | switch (Opc) { |
| 2374 | case HexagonISD::VASR: |
| 2375 | case HexagonISD::VLSR: |
| 2376 | case HexagonISD::VASL: |
| 2377 | break; |
| 2378 | default: |
| 2379 | // No instructions for shifts by non-scalars. |
| 2380 | return SDValue(); |
| 2381 | } |
| 2382 | |
| 2383 | MVT ResTy = ty(Op: Res); |
| 2384 | if (ResTy.getVectorElementType() != MVT::i8) |
| 2385 | return Res; |
| 2386 | |
| 2387 | // For shifts of i8, extend the inputs to i16, then truncate back to i8. |
| 2388 | assert(ResTy.getVectorElementType() == MVT::i8); |
| 2389 | SDValue Val = Res.getOperand(i: 0), Amt = Res.getOperand(i: 1); |
| 2390 | |
| 2391 | auto ShiftPartI8 = [&dl, &DAG, this](unsigned Opc, SDValue V, SDValue A) { |
| 2392 | MVT Ty = ty(Op: V); |
| 2393 | MVT ExtTy = MVT::getVectorVT(VT: MVT::i16, NumElements: Ty.getVectorNumElements()); |
| 2394 | SDValue ExtV = Opc == HexagonISD::VASR ? DAG.getSExtOrTrunc(Op: V, DL: dl, VT: ExtTy) |
| 2395 | : DAG.getZExtOrTrunc(Op: V, DL: dl, VT: ExtTy); |
| 2396 | SDValue ExtS = DAG.getNode(Opcode: Opc, DL: dl, VT: ExtTy, Ops: {ExtV, A}); |
| 2397 | return DAG.getZExtOrTrunc(Op: ExtS, DL: dl, VT: Ty); |
| 2398 | }; |
| 2399 | |
| 2400 | if (ResTy.getSizeInBits() == 32) |
| 2401 | return ShiftPartI8(Opc, Val, Amt); |
| 2402 | |
| 2403 | auto [LoV, HiV] = opSplit(Vec: Val, dl, DAG); |
| 2404 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: ResTy, |
| 2405 | Ops: {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)}); |
| 2406 | } |
| 2407 | |
| 2408 | SDValue |
| 2409 | HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { |
| 2410 | if (isa<ConstantSDNode>(Val: Op.getOperand(i: 1).getNode())) |
| 2411 | return Op; |
| 2412 | return SDValue(); |
| 2413 | } |
| 2414 | |
| 2415 | SDValue |
| 2416 | HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { |
| 2417 | MVT ResTy = ty(Op); |
| 2418 | SDValue InpV = Op.getOperand(i: 0); |
| 2419 | MVT InpTy = ty(Op: InpV); |
| 2420 | assert(ResTy.getSizeInBits() == InpTy.getSizeInBits()); |
| 2421 | const SDLoc &dl(Op); |
| 2422 | |
| 2423 | // Handle conversion from i8 to v8i1. |
| 2424 | if (InpTy == MVT::i8) { |
| 2425 | if (ResTy == MVT::v8i1) { |
| 2426 | SDValue Sc = DAG.getBitcast(VT: tyScalar(Ty: InpTy), V: InpV); |
| 2427 | SDValue Ext = DAG.getZExtOrTrunc(Op: Sc, DL: dl, VT: MVT::i32); |
| 2428 | return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: ResTy, Ops: Ext, DAG); |
| 2429 | } |
| 2430 | return SDValue(); |
| 2431 | } |
| 2432 | |
| 2433 | return Op; |
| 2434 | } |
| 2435 | |
| 2436 | bool |
| 2437 | HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values, |
| 2438 | MVT VecTy, SelectionDAG &DAG, |
| 2439 | MutableArrayRef<ConstantInt*> Consts) const { |
| 2440 | MVT ElemTy = VecTy.getVectorElementType(); |
| 2441 | unsigned ElemWidth = ElemTy.getSizeInBits(); |
| 2442 | IntegerType *IntTy = IntegerType::get(C&: *DAG.getContext(), NumBits: ElemWidth); |
| 2443 | bool AllConst = true; |
| 2444 | |
| 2445 | for (unsigned i = 0, e = Values.size(); i != e; ++i) { |
| 2446 | SDValue V = Values[i]; |
| 2447 | if (V.isUndef()) { |
| 2448 | Consts[i] = ConstantInt::get(Ty: IntTy, V: 0); |
| 2449 | continue; |
| 2450 | } |
| 2451 | // Make sure to always cast to IntTy. |
| 2452 | if (auto *CN = dyn_cast<ConstantSDNode>(Val: V.getNode())) { |
| 2453 | const ConstantInt *CI = CN->getConstantIntValue(); |
| 2454 | Consts[i] = ConstantInt::get(Ty: IntTy, V: CI->getValue().getSExtValue()); |
| 2455 | } else if (auto *CN = dyn_cast<ConstantFPSDNode>(Val: V.getNode())) { |
| 2456 | const ConstantFP *CF = CN->getConstantFPValue(); |
| 2457 | APInt A = CF->getValueAPF().bitcastToAPInt(); |
| 2458 | Consts[i] = ConstantInt::get(Ty: IntTy, V: A.getZExtValue()); |
| 2459 | } else { |
| 2460 | AllConst = false; |
| 2461 | } |
| 2462 | } |
| 2463 | return AllConst; |
| 2464 | } |
| 2465 | |
| 2466 | SDValue |
| 2467 | HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, |
| 2468 | MVT VecTy, SelectionDAG &DAG) const { |
| 2469 | MVT ElemTy = VecTy.getVectorElementType(); |
| 2470 | assert(VecTy.getVectorNumElements() == Elem.size()); |
| 2471 | |
| 2472 | SmallVector<ConstantInt*,4> Consts(Elem.size()); |
| 2473 | bool AllConst = getBuildVectorConstInts(Values: Elem, VecTy, DAG, Consts); |
| 2474 | |
| 2475 | unsigned First, Num = Elem.size(); |
| 2476 | for (First = 0; First != Num; ++First) { |
| 2477 | if (!isUndef(Op: Elem[First])) |
| 2478 | break; |
| 2479 | } |
| 2480 | if (First == Num) |
| 2481 | return DAG.getUNDEF(VT: VecTy); |
| 2482 | |
| 2483 | if (AllConst && |
| 2484 | llvm::all_of(Range&: Consts, P: [](ConstantInt *CI) { return CI->isZero(); })) |
| 2485 | return getZero(dl, Ty: VecTy, DAG); |
| 2486 | |
| 2487 | if (ElemTy == MVT::i16 || ElemTy == MVT::f16) { |
| 2488 | assert(Elem.size() == 2); |
| 2489 | if (AllConst) { |
| 2490 | // The 'Consts' array will have all values as integers regardless |
| 2491 | // of the vector element type. |
| 2492 | uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) | |
| 2493 | Consts[1]->getZExtValue() << 16; |
| 2494 | return DAG.getBitcast(VT: VecTy, V: DAG.getConstant(Val: V, DL: dl, VT: MVT::i32)); |
| 2495 | } |
| 2496 | SDValue E0, E1; |
| 2497 | if (ElemTy == MVT::f16) { |
| 2498 | E0 = DAG.getZExtOrTrunc(Op: DAG.getBitcast(VT: MVT::i16, V: Elem[0]), DL: dl, VT: MVT::i32); |
| 2499 | E1 = DAG.getZExtOrTrunc(Op: DAG.getBitcast(VT: MVT::i16, V: Elem[1]), DL: dl, VT: MVT::i32); |
| 2500 | } else { |
| 2501 | E0 = Elem[0]; |
| 2502 | E1 = Elem[1]; |
| 2503 | } |
| 2504 | SDValue N = getInstr(MachineOpc: Hexagon::A2_combine_ll, dl, Ty: MVT::i32, Ops: {E1, E0}, DAG); |
| 2505 | return DAG.getBitcast(VT: VecTy, V: N); |
| 2506 | } |
| 2507 | |
| 2508 | if (ElemTy == MVT::i8) { |
| 2509 | // First try generating a constant. |
| 2510 | if (AllConst) { |
| 2511 | uint32_t V = (Consts[0]->getZExtValue() & 0xFF) | |
| 2512 | (Consts[1]->getZExtValue() & 0xFF) << 8 | |
| 2513 | (Consts[2]->getZExtValue() & 0xFF) << 16 | |
| 2514 | Consts[3]->getZExtValue() << 24; |
| 2515 | return DAG.getBitcast(VT: MVT::v4i8, V: DAG.getConstant(Val: V, DL: dl, VT: MVT::i32)); |
| 2516 | } |
| 2517 | |
| 2518 | // Then try splat. |
| 2519 | bool IsSplat = true; |
| 2520 | for (unsigned i = First+1; i != Num; ++i) { |
| 2521 | if (Elem[i] == Elem[First] || isUndef(Op: Elem[i])) |
| 2522 | continue; |
| 2523 | IsSplat = false; |
| 2524 | break; |
| 2525 | } |
| 2526 | if (IsSplat) { |
| 2527 | // Legalize the operand of SPLAT_VECTOR. |
| 2528 | SDValue Ext = DAG.getZExtOrTrunc(Op: Elem[First], DL: dl, VT: MVT::i32); |
| 2529 | return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: VecTy, Operand: Ext); |
| 2530 | } |
| 2531 | |
| 2532 | // Generate |
| 2533 | // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) | |
| 2534 | // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16 |
| 2535 | assert(Elem.size() == 4); |
| 2536 | SDValue Vs[4]; |
| 2537 | for (unsigned i = 0; i != 4; ++i) { |
| 2538 | Vs[i] = DAG.getZExtOrTrunc(Op: Elem[i], DL: dl, VT: MVT::i32); |
| 2539 | Vs[i] = DAG.getZeroExtendInReg(Op: Vs[i], DL: dl, VT: MVT::i8); |
| 2540 | } |
| 2541 | SDValue S8 = DAG.getConstant(Val: 8, DL: dl, VT: MVT::i32); |
| 2542 | SDValue T0 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, Ops: {Vs[1], S8}); |
| 2543 | SDValue T1 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, Ops: {Vs[3], S8}); |
| 2544 | SDValue B0 = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, Ops: {Vs[0], T0}); |
| 2545 | SDValue B1 = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, Ops: {Vs[2], T1}); |
| 2546 | |
| 2547 | SDValue R = getInstr(MachineOpc: Hexagon::A2_combine_ll, dl, Ty: MVT::i32, Ops: {B1, B0}, DAG); |
| 2548 | return DAG.getBitcast(VT: MVT::v4i8, V: R); |
| 2549 | } |
| 2550 | |
| 2551 | #ifndef NDEBUG |
| 2552 | dbgs() << "VecTy: " << VecTy << '\n'; |
| 2553 | #endif |
| 2554 | llvm_unreachable("Unexpected vector element type" ); |
| 2555 | } |
| 2556 | |
| 2557 | SDValue |
| 2558 | HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, |
| 2559 | MVT VecTy, SelectionDAG &DAG) const { |
| 2560 | MVT ElemTy = VecTy.getVectorElementType(); |
| 2561 | assert(VecTy.getVectorNumElements() == Elem.size()); |
| 2562 | |
| 2563 | SmallVector<ConstantInt*,8> Consts(Elem.size()); |
| 2564 | bool AllConst = getBuildVectorConstInts(Values: Elem, VecTy, DAG, Consts); |
| 2565 | |
| 2566 | unsigned First, Num = Elem.size(); |
| 2567 | for (First = 0; First != Num; ++First) { |
| 2568 | if (!isUndef(Op: Elem[First])) |
| 2569 | break; |
| 2570 | } |
| 2571 | if (First == Num) |
| 2572 | return DAG.getUNDEF(VT: VecTy); |
| 2573 | |
| 2574 | if (AllConst && |
| 2575 | llvm::all_of(Range&: Consts, P: [](ConstantInt *CI) { return CI->isZero(); })) |
| 2576 | return getZero(dl, Ty: VecTy, DAG); |
| 2577 | |
| 2578 | // First try splat if possible. |
| 2579 | if (ElemTy == MVT::i16 || ElemTy == MVT::f16) { |
| 2580 | bool IsSplat = true; |
| 2581 | for (unsigned i = First+1; i != Num; ++i) { |
| 2582 | if (Elem[i] == Elem[First] || isUndef(Op: Elem[i])) |
| 2583 | continue; |
| 2584 | IsSplat = false; |
| 2585 | break; |
| 2586 | } |
| 2587 | if (IsSplat) { |
| 2588 | // Legalize the operand of SPLAT_VECTOR |
| 2589 | SDValue S = ElemTy == MVT::f16 ? DAG.getBitcast(VT: MVT::i16, V: Elem[First]) |
| 2590 | : Elem[First]; |
| 2591 | SDValue Ext = DAG.getZExtOrTrunc(Op: S, DL: dl, VT: MVT::i32); |
| 2592 | return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: VecTy, Operand: Ext); |
| 2593 | } |
| 2594 | } |
| 2595 | |
| 2596 | // Then try constant. |
| 2597 | if (AllConst) { |
| 2598 | uint64_t Val = 0; |
| 2599 | unsigned W = ElemTy.getSizeInBits(); |
| 2600 | uint64_t Mask = (1ull << W) - 1; |
| 2601 | for (unsigned i = 0; i != Num; ++i) |
| 2602 | Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask); |
| 2603 | SDValue V0 = DAG.getConstant(Val, DL: dl, VT: MVT::i64); |
| 2604 | return DAG.getBitcast(VT: VecTy, V: V0); |
| 2605 | } |
| 2606 | |
| 2607 | // Build two 32-bit vectors and concatenate. |
| 2608 | MVT HalfTy = MVT::getVectorVT(VT: ElemTy, NumElements: Num/2); |
| 2609 | SDValue L = (ElemTy == MVT::i32) |
| 2610 | ? Elem[0] |
| 2611 | : buildVector32(Elem: Elem.take_front(N: Num/2), dl, VecTy: HalfTy, DAG); |
| 2612 | SDValue H = (ElemTy == MVT::i32) |
| 2613 | ? Elem[1] |
| 2614 | : buildVector32(Elem: Elem.drop_front(N: Num/2), dl, VecTy: HalfTy, DAG); |
| 2615 | return getCombine(Hi: H, Lo: L, dl, ResTy: VecTy, DAG); |
| 2616 | } |
| 2617 | |
| 2618 | SDValue |
| 2619 | HexagonTargetLowering::(SDValue VecV, SDValue IdxV, |
| 2620 | const SDLoc &dl, MVT ValTy, MVT ResTy, |
| 2621 | SelectionDAG &DAG) const { |
| 2622 | MVT VecTy = ty(Op: VecV); |
| 2623 | assert(!ValTy.isVector() || |
| 2624 | VecTy.getVectorElementType() == ValTy.getVectorElementType()); |
| 2625 | if (VecTy.getVectorElementType() == MVT::i1) |
| 2626 | return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG); |
| 2627 | |
| 2628 | unsigned VecWidth = VecTy.getSizeInBits(); |
| 2629 | unsigned ValWidth = ValTy.getSizeInBits(); |
| 2630 | unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits(); |
| 2631 | assert((VecWidth % ElemWidth) == 0); |
| 2632 | assert(VecWidth == 32 || VecWidth == 64); |
| 2633 | |
| 2634 | // Cast everything to scalar integer types. |
| 2635 | MVT ScalarTy = tyScalar(Ty: VecTy); |
| 2636 | VecV = DAG.getBitcast(VT: ScalarTy, V: VecV); |
| 2637 | |
| 2638 | SDValue WidthV = DAG.getConstant(Val: ValWidth, DL: dl, VT: MVT::i32); |
| 2639 | SDValue ExtV; |
| 2640 | |
| 2641 | if (auto *IdxN = dyn_cast<ConstantSDNode>(Val&: IdxV)) { |
| 2642 | unsigned Off = IdxN->getZExtValue() * ElemWidth; |
| 2643 | if (VecWidth == 64 && ValWidth == 32) { |
| 2644 | assert(Off == 0 || Off == 32); |
| 2645 | ExtV = Off == 0 ? LoHalf(V: VecV, DAG) : HiHalf(V: VecV, DAG); |
| 2646 | } else if (Off == 0 && (ValWidth % 8) == 0) { |
| 2647 | ExtV = DAG.getZeroExtendInReg(Op: VecV, DL: dl, VT: tyScalar(Ty: ValTy)); |
| 2648 | } else { |
| 2649 | SDValue OffV = DAG.getConstant(Val: Off, DL: dl, VT: MVT::i32); |
| 2650 | // The return type of EXTRACTU must be the same as the type of the |
| 2651 | // input vector. |
| 2652 | ExtV = DAG.getNode(Opcode: HexagonISD::EXTRACTU, DL: dl, VT: ScalarTy, |
| 2653 | Ops: {VecV, WidthV, OffV}); |
| 2654 | } |
| 2655 | } else { |
| 2656 | if (ty(Op: IdxV) != MVT::i32) |
| 2657 | IdxV = DAG.getZExtOrTrunc(Op: IdxV, DL: dl, VT: MVT::i32); |
| 2658 | SDValue OffV = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, |
| 2659 | N2: DAG.getConstant(Val: ElemWidth, DL: dl, VT: MVT::i32)); |
| 2660 | ExtV = DAG.getNode(Opcode: HexagonISD::EXTRACTU, DL: dl, VT: ScalarTy, |
| 2661 | Ops: {VecV, WidthV, OffV}); |
| 2662 | } |
| 2663 | |
| 2664 | // Cast ExtV to the requested result type. |
| 2665 | ExtV = DAG.getZExtOrTrunc(Op: ExtV, DL: dl, VT: tyScalar(Ty: ResTy)); |
| 2666 | ExtV = DAG.getBitcast(VT: ResTy, V: ExtV); |
| 2667 | return ExtV; |
| 2668 | } |
| 2669 | |
| 2670 | SDValue |
| 2671 | HexagonTargetLowering::(SDValue VecV, SDValue IdxV, |
| 2672 | const SDLoc &dl, MVT ValTy, MVT ResTy, |
| 2673 | SelectionDAG &DAG) const { |
| 2674 | // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon |
| 2675 | // without any coprocessors). |
| 2676 | MVT VecTy = ty(Op: VecV); |
| 2677 | unsigned VecWidth = VecTy.getSizeInBits(); |
| 2678 | unsigned ValWidth = ValTy.getSizeInBits(); |
| 2679 | assert(VecWidth == VecTy.getVectorNumElements() && |
| 2680 | "Vector elements should equal vector width size" ); |
| 2681 | assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2); |
| 2682 | |
| 2683 | // Check if this is an extract of the lowest bit. |
| 2684 | if (isNullConstant(V: IdxV) && ValTy.getSizeInBits() == 1) { |
| 2685 | // Extracting the lowest bit is a no-op, but it changes the type, |
| 2686 | // so it must be kept as an operation to avoid errors related to |
| 2687 | // type mismatches. |
| 2688 | return DAG.getNode(Opcode: HexagonISD::TYPECAST, DL: dl, VT: MVT::i1, Operand: VecV); |
| 2689 | } |
| 2690 | |
| 2691 | // If the value extracted is a single bit, use tstbit. |
| 2692 | if (ValWidth == 1) { |
| 2693 | SDValue A0 = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {VecV}, DAG); |
| 2694 | SDValue M0 = DAG.getConstant(Val: 8 / VecWidth, DL: dl, VT: MVT::i32); |
| 2695 | SDValue I0 = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: M0); |
| 2696 | return DAG.getNode(Opcode: HexagonISD::TSTBIT, DL: dl, VT: MVT::i1, N1: A0, N2: I0); |
| 2697 | } |
| 2698 | |
| 2699 | // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in |
| 2700 | // a predicate register. The elements of the vector are repeated |
| 2701 | // in the register (if necessary) so that the total number is 8. |
| 2702 | // The extracted subvector will need to be expanded in such a way. |
| 2703 | unsigned Scale = VecWidth / ValWidth; |
| 2704 | |
| 2705 | // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to |
| 2706 | // position 0. |
| 2707 | assert(ty(IdxV) == MVT::i32); |
| 2708 | unsigned VecRep = 8 / VecWidth; |
| 2709 | SDValue S0 = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, |
| 2710 | N2: DAG.getConstant(Val: 8*VecRep, DL: dl, VT: MVT::i32)); |
| 2711 | SDValue T0 = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: VecV); |
| 2712 | SDValue T1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i64, N1: T0, N2: S0); |
| 2713 | while (Scale > 1) { |
| 2714 | // The longest possible subvector is at most 32 bits, so it is always |
| 2715 | // contained in the low subregister. |
| 2716 | T1 = LoHalf(V: T1, DAG); |
| 2717 | T1 = expandPredicate(Vec32: T1, dl, DAG); |
| 2718 | Scale /= 2; |
| 2719 | } |
| 2720 | |
| 2721 | return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: ResTy, Operand: T1); |
| 2722 | } |
| 2723 | |
| 2724 | SDValue |
| 2725 | HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV, |
| 2726 | const SDLoc &dl, MVT ValTy, |
| 2727 | SelectionDAG &DAG) const { |
| 2728 | MVT VecTy = ty(Op: VecV); |
| 2729 | if (VecTy.getVectorElementType() == MVT::i1) |
| 2730 | return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG); |
| 2731 | |
| 2732 | unsigned VecWidth = VecTy.getSizeInBits(); |
| 2733 | unsigned ValWidth = ValTy.getSizeInBits(); |
| 2734 | assert(VecWidth == 32 || VecWidth == 64); |
| 2735 | assert((VecWidth % ValWidth) == 0); |
| 2736 | |
| 2737 | // Cast everything to scalar integer types. |
| 2738 | MVT ScalarTy = MVT::getIntegerVT(BitWidth: VecWidth); |
| 2739 | // The actual type of ValV may be different than ValTy (which is related |
| 2740 | // to the vector type). |
| 2741 | unsigned VW = ty(Op: ValV).getSizeInBits(); |
| 2742 | ValV = DAG.getBitcast(VT: MVT::getIntegerVT(BitWidth: VW), V: ValV); |
| 2743 | VecV = DAG.getBitcast(VT: ScalarTy, V: VecV); |
| 2744 | if (VW != VecWidth) |
| 2745 | ValV = DAG.getAnyExtOrTrunc(Op: ValV, DL: dl, VT: ScalarTy); |
| 2746 | |
| 2747 | SDValue WidthV = DAG.getConstant(Val: ValWidth, DL: dl, VT: MVT::i32); |
| 2748 | SDValue InsV; |
| 2749 | |
| 2750 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: IdxV)) { |
| 2751 | unsigned W = C->getZExtValue() * ValWidth; |
| 2752 | SDValue OffV = DAG.getConstant(Val: W, DL: dl, VT: MVT::i32); |
| 2753 | InsV = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: ScalarTy, |
| 2754 | Ops: {VecV, ValV, WidthV, OffV}); |
| 2755 | } else { |
| 2756 | if (ty(Op: IdxV) != MVT::i32) |
| 2757 | IdxV = DAG.getZExtOrTrunc(Op: IdxV, DL: dl, VT: MVT::i32); |
| 2758 | SDValue OffV = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: WidthV); |
| 2759 | InsV = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: ScalarTy, |
| 2760 | Ops: {VecV, ValV, WidthV, OffV}); |
| 2761 | } |
| 2762 | |
| 2763 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecTy, Operand: InsV); |
| 2764 | } |
| 2765 | |
| 2766 | SDValue |
| 2767 | HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV, |
| 2768 | SDValue IdxV, const SDLoc &dl, |
| 2769 | MVT ValTy, SelectionDAG &DAG) const { |
| 2770 | MVT VecTy = ty(Op: VecV); |
| 2771 | unsigned VecLen = VecTy.getVectorNumElements(); |
| 2772 | |
| 2773 | if (ValTy == MVT::i1) { |
| 2774 | SDValue ToReg = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {VecV}, DAG); |
| 2775 | SDValue Ext = DAG.getSExtOrTrunc(Op: ValV, DL: dl, VT: MVT::i32); |
| 2776 | SDValue Width = DAG.getConstant(Val: 8 / VecLen, DL: dl, VT: MVT::i32); |
| 2777 | SDValue Idx = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: Width); |
| 2778 | SDValue Ins = |
| 2779 | DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i32, Ops: {ToReg, Ext, Width, Idx}); |
| 2780 | return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: VecTy, Ops: {Ins}, DAG); |
| 2781 | } |
| 2782 | |
| 2783 | assert(ValTy.getVectorElementType() == MVT::i1); |
| 2784 | SDValue ValR = ValTy.isVector() |
| 2785 | ? DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: ValV) |
| 2786 | : DAG.getSExtOrTrunc(Op: ValV, DL: dl, VT: MVT::i64); |
| 2787 | |
| 2788 | unsigned Scale = VecLen / ValTy.getVectorNumElements(); |
| 2789 | assert(Scale > 1); |
| 2790 | |
| 2791 | for (unsigned R = Scale; R > 1; R /= 2) { |
| 2792 | ValR = contractPredicate(Vec64: ValR, dl, DAG); |
| 2793 | ValR = getCombine(Hi: DAG.getUNDEF(VT: MVT::i32), Lo: ValR, dl, ResTy: MVT::i64, DAG); |
| 2794 | } |
| 2795 | |
| 2796 | SDValue Width = DAG.getConstant(Val: 64 / Scale, DL: dl, VT: MVT::i32); |
| 2797 | SDValue Idx = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: Width); |
| 2798 | SDValue VecR = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: VecV); |
| 2799 | SDValue Ins = |
| 2800 | DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i64, Ops: {VecR, ValR, Width, Idx}); |
| 2801 | return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: VecTy, Operand: Ins); |
| 2802 | } |
| 2803 | |
| 2804 | SDValue |
| 2805 | HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl, |
| 2806 | SelectionDAG &DAG) const { |
| 2807 | assert(ty(Vec32).getSizeInBits() == 32); |
| 2808 | if (isUndef(Op: Vec32)) |
| 2809 | return DAG.getUNDEF(VT: MVT::i64); |
| 2810 | SDValue P = DAG.getBitcast(VT: MVT::v4i8, V: Vec32); |
| 2811 | SDValue X = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i16, Operand: P); |
| 2812 | return DAG.getBitcast(VT: MVT::i64, V: X); |
| 2813 | } |
| 2814 | |
| 2815 | SDValue |
| 2816 | HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl, |
| 2817 | SelectionDAG &DAG) const { |
| 2818 | assert(ty(Vec64).getSizeInBits() == 64); |
| 2819 | if (isUndef(Op: Vec64)) |
| 2820 | return DAG.getUNDEF(VT: MVT::i32); |
| 2821 | // Collect even bytes: |
| 2822 | SDValue A = DAG.getBitcast(VT: MVT::v8i8, V: Vec64); |
| 2823 | SDValue S = DAG.getVectorShuffle(VT: MVT::v8i8, dl, N1: A, N2: DAG.getUNDEF(VT: MVT::v8i8), |
| 2824 | Mask: {0, 2, 4, 6, 1, 3, 5, 7}); |
| 2825 | return extractVector(VecV: S, IdxV: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), dl, ValTy: MVT::v4i8, |
| 2826 | ResTy: MVT::i32, DAG); |
| 2827 | } |
| 2828 | |
| 2829 | SDValue |
| 2830 | HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) |
| 2831 | const { |
| 2832 | if (Ty.isVector()) { |
| 2833 | unsigned W = Ty.getSizeInBits(); |
| 2834 | if (W <= 64) |
| 2835 | return DAG.getBitcast(VT: Ty, V: DAG.getConstant(Val: 0, DL: dl, VT: MVT::getIntegerVT(BitWidth: W))); |
| 2836 | return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: Ty, Operand: getZero(dl, Ty: MVT::i32, DAG)); |
| 2837 | } |
| 2838 | |
| 2839 | if (Ty.isInteger()) |
| 2840 | return DAG.getConstant(Val: 0, DL: dl, VT: Ty); |
| 2841 | if (Ty.isFloatingPoint()) |
| 2842 | return DAG.getConstantFP(Val: 0.0, DL: dl, VT: Ty); |
| 2843 | llvm_unreachable("Invalid type for zero" ); |
| 2844 | } |
| 2845 | |
| 2846 | SDValue |
| 2847 | HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) |
| 2848 | const { |
| 2849 | MVT ValTy = ty(Op: Val); |
| 2850 | assert(ValTy.getVectorElementType() == ResTy.getVectorElementType()); |
| 2851 | |
| 2852 | unsigned ValLen = ValTy.getVectorNumElements(); |
| 2853 | unsigned ResLen = ResTy.getVectorNumElements(); |
| 2854 | if (ValLen == ResLen) |
| 2855 | return Val; |
| 2856 | |
| 2857 | const SDLoc &dl(Val); |
| 2858 | assert(ValLen < ResLen); |
| 2859 | assert(ResLen % ValLen == 0); |
| 2860 | |
| 2861 | SmallVector<SDValue, 4> Concats = {Val}; |
| 2862 | for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i) |
| 2863 | Concats.push_back(Elt: DAG.getUNDEF(VT: ValTy)); |
| 2864 | |
| 2865 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: ResTy, Ops: Concats); |
| 2866 | } |
| 2867 | |
| 2868 | SDValue |
| 2869 | HexagonTargetLowering::getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl, |
| 2870 | MVT ResTy, SelectionDAG &DAG) const { |
| 2871 | MVT ElemTy = ty(Op: Hi); |
| 2872 | assert(ElemTy == ty(Lo)); |
| 2873 | |
| 2874 | if (!ElemTy.isVector()) { |
| 2875 | assert(ElemTy.isScalarInteger()); |
| 2876 | MVT PairTy = MVT::getIntegerVT(BitWidth: 2 * ElemTy.getSizeInBits()); |
| 2877 | SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: PairTy, N1: Lo, N2: Hi); |
| 2878 | return DAG.getBitcast(VT: ResTy, V: Pair); |
| 2879 | } |
| 2880 | |
| 2881 | unsigned Width = ElemTy.getSizeInBits(); |
| 2882 | MVT IntTy = MVT::getIntegerVT(BitWidth: Width); |
| 2883 | MVT PairTy = MVT::getIntegerVT(BitWidth: 2 * Width); |
| 2884 | SDValue Pair = |
| 2885 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: PairTy, |
| 2886 | Ops: {DAG.getBitcast(VT: IntTy, V: Lo), DAG.getBitcast(VT: IntTy, V: Hi)}); |
| 2887 | return DAG.getBitcast(VT: ResTy, V: Pair); |
| 2888 | } |
| 2889 | |
| 2890 | SDValue |
| 2891 | HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { |
| 2892 | MVT VecTy = ty(Op); |
| 2893 | unsigned BW = VecTy.getSizeInBits(); |
| 2894 | const SDLoc &dl(Op); |
| 2895 | SmallVector<SDValue,8> Ops; |
| 2896 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) |
| 2897 | Ops.push_back(Elt: Op.getOperand(i)); |
| 2898 | |
| 2899 | if (BW == 32) |
| 2900 | return buildVector32(Elem: Ops, dl, VecTy, DAG); |
| 2901 | if (BW == 64) |
| 2902 | return buildVector64(Elem: Ops, dl, VecTy, DAG); |
| 2903 | |
| 2904 | if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) { |
| 2905 | // Check if this is a special case or all-0 or all-1. |
| 2906 | bool All0 = true, All1 = true; |
| 2907 | for (SDValue P : Ops) { |
| 2908 | auto *CN = dyn_cast<ConstantSDNode>(Val: P.getNode()); |
| 2909 | if (CN == nullptr) { |
| 2910 | All0 = All1 = false; |
| 2911 | break; |
| 2912 | } |
| 2913 | uint32_t C = CN->getZExtValue(); |
| 2914 | All0 &= (C == 0); |
| 2915 | All1 &= (C == 1); |
| 2916 | } |
| 2917 | if (All0) |
| 2918 | return DAG.getNode(Opcode: HexagonISD::PFALSE, DL: dl, VT: VecTy); |
| 2919 | if (All1) |
| 2920 | return DAG.getNode(Opcode: HexagonISD::PTRUE, DL: dl, VT: VecTy); |
| 2921 | |
| 2922 | // For each i1 element in the resulting predicate register, put 1 |
| 2923 | // shifted by the index of the element into a general-purpose register, |
| 2924 | // then or them together and transfer it back into a predicate register. |
| 2925 | SDValue Rs[8]; |
| 2926 | SDValue Z = getZero(dl, Ty: MVT::i32, DAG); |
| 2927 | // Always produce 8 bits, repeat inputs if necessary. |
| 2928 | unsigned Rep = 8 / VecTy.getVectorNumElements(); |
| 2929 | for (unsigned i = 0; i != 8; ++i) { |
| 2930 | SDValue S = DAG.getConstant(Val: 1ull << i, DL: dl, VT: MVT::i32); |
| 2931 | Rs[i] = DAG.getSelect(DL: dl, VT: MVT::i32, Cond: Ops[i/Rep], LHS: S, RHS: Z); |
| 2932 | } |
| 2933 | for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(N: A.size()/2)) { |
| 2934 | for (unsigned i = 0, e = A.size()/2; i != e; ++i) |
| 2935 | Rs[i] = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Rs[2*i], N2: Rs[2*i+1]); |
| 2936 | } |
| 2937 | // Move the value directly to a predicate register. |
| 2938 | return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: VecTy, Ops: {Rs[0]}, DAG); |
| 2939 | } |
| 2940 | |
| 2941 | return SDValue(); |
| 2942 | } |
| 2943 | |
| 2944 | SDValue |
| 2945 | HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op, |
| 2946 | SelectionDAG &DAG) const { |
| 2947 | MVT VecTy = ty(Op); |
| 2948 | const SDLoc &dl(Op); |
| 2949 | if (VecTy.getSizeInBits() == 64) { |
| 2950 | assert(Op.getNumOperands() == 2); |
| 2951 | return getCombine(Hi: Op.getOperand(i: 1), Lo: Op.getOperand(i: 0), dl, ResTy: VecTy, DAG); |
| 2952 | } |
| 2953 | |
| 2954 | MVT ElemTy = VecTy.getVectorElementType(); |
| 2955 | if (ElemTy == MVT::i1) { |
| 2956 | assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1); |
| 2957 | MVT OpTy = ty(Op: Op.getOperand(i: 0)); |
| 2958 | // Scale is how many times the operands need to be contracted to match |
| 2959 | // the representation in the target register. |
| 2960 | unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements(); |
| 2961 | assert(Scale == Op.getNumOperands() && Scale > 1); |
| 2962 | |
| 2963 | // First, convert all bool vectors to integers, then generate pairwise |
| 2964 | // inserts to form values of doubled length. Up until there are only |
| 2965 | // two values left to concatenate, all of these values will fit in a |
| 2966 | // 32-bit integer, so keep them as i32 to use 32-bit inserts. |
| 2967 | SmallVector<SDValue,4> Words[2]; |
| 2968 | unsigned IdxW = 0; |
| 2969 | |
| 2970 | for (SDValue P : Op.getNode()->op_values()) { |
| 2971 | SDValue W = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: P); |
| 2972 | for (unsigned R = Scale; R > 1; R /= 2) { |
| 2973 | W = contractPredicate(Vec64: W, dl, DAG); |
| 2974 | W = getCombine(Hi: DAG.getUNDEF(VT: MVT::i32), Lo: W, dl, ResTy: MVT::i64, DAG); |
| 2975 | } |
| 2976 | W = LoHalf(V: W, DAG); |
| 2977 | Words[IdxW].push_back(Elt: W); |
| 2978 | } |
| 2979 | |
| 2980 | while (Scale > 2) { |
| 2981 | SDValue WidthV = DAG.getConstant(Val: 64 / Scale, DL: dl, VT: MVT::i32); |
| 2982 | Words[IdxW ^ 1].clear(); |
| 2983 | |
| 2984 | for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) { |
| 2985 | SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1]; |
| 2986 | // Insert W1 into W0 right next to the significant bits of W0. |
| 2987 | SDValue T = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i32, |
| 2988 | Ops: {W0, W1, WidthV, WidthV}); |
| 2989 | Words[IdxW ^ 1].push_back(Elt: T); |
| 2990 | } |
| 2991 | IdxW ^= 1; |
| 2992 | Scale /= 2; |
| 2993 | } |
| 2994 | |
| 2995 | // At this point there should only be two words left, and Scale should be 2. |
| 2996 | assert(Scale == 2 && Words[IdxW].size() == 2); |
| 2997 | |
| 2998 | SDValue WW = getCombine(Hi: Words[IdxW][1], Lo: Words[IdxW][0], dl, ResTy: MVT::i64, DAG); |
| 2999 | return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: VecTy, Operand: WW); |
| 3000 | } |
| 3001 | |
| 3002 | return SDValue(); |
| 3003 | } |
| 3004 | |
| 3005 | SDValue |
| 3006 | HexagonTargetLowering::(SDValue Op, |
| 3007 | SelectionDAG &DAG) const { |
| 3008 | SDValue Vec = Op.getOperand(i: 0); |
| 3009 | MVT ElemTy = ty(Op: Vec).getVectorElementType(); |
| 3010 | return extractVector(VecV: Vec, IdxV: Op.getOperand(i: 1), dl: SDLoc(Op), ValTy: ElemTy, ResTy: ty(Op), DAG); |
| 3011 | } |
| 3012 | |
| 3013 | SDValue |
| 3014 | HexagonTargetLowering::(SDValue Op, |
| 3015 | SelectionDAG &DAG) const { |
| 3016 | return extractVector(VecV: Op.getOperand(i: 0), IdxV: Op.getOperand(i: 1), dl: SDLoc(Op), |
| 3017 | ValTy: ty(Op), ResTy: ty(Op), DAG); |
| 3018 | } |
| 3019 | |
| 3020 | SDValue |
| 3021 | HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, |
| 3022 | SelectionDAG &DAG) const { |
| 3023 | return insertVector(VecV: Op.getOperand(i: 0), ValV: Op.getOperand(i: 1), IdxV: Op.getOperand(i: 2), |
| 3024 | dl: SDLoc(Op), ValTy: ty(Op).getVectorElementType(), DAG); |
| 3025 | } |
| 3026 | |
| 3027 | SDValue |
| 3028 | HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, |
| 3029 | SelectionDAG &DAG) const { |
| 3030 | SDValue ValV = Op.getOperand(i: 1); |
| 3031 | return insertVector(VecV: Op.getOperand(i: 0), ValV, IdxV: Op.getOperand(i: 2), |
| 3032 | dl: SDLoc(Op), ValTy: ty(Op: ValV), DAG); |
| 3033 | } |
| 3034 | |
| 3035 | bool |
| 3036 | HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
| 3037 | // Assuming the caller does not have either a signext or zeroext modifier, and |
| 3038 | // only one value is accepted, any reasonable truncation is allowed. |
| 3039 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
| 3040 | return false; |
| 3041 | |
| 3042 | // FIXME: in principle up to 64-bit could be made safe, but it would be very |
| 3043 | // fragile at the moment: any support for multiple value returns would be |
| 3044 | // liable to disallow tail calls involving i64 -> iN truncation in many cases. |
| 3045 | return Ty1->getPrimitiveSizeInBits() <= 32; |
| 3046 | } |
| 3047 | |
| 3048 | SDValue |
| 3049 | HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const { |
| 3050 | MVT Ty = ty(Op); |
| 3051 | const SDLoc &dl(Op); |
| 3052 | LoadSDNode *LN = cast<LoadSDNode>(Val: Op.getNode()); |
| 3053 | MVT MemTy = LN->getMemoryVT().getSimpleVT(); |
| 3054 | ISD::LoadExtType ET = LN->getExtensionType(); |
| 3055 | |
| 3056 | bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1; |
| 3057 | if (LoadPred) { |
| 3058 | SDValue NL = DAG.getLoad( |
| 3059 | AM: LN->getAddressingMode(), ExtType: ISD::ZEXTLOAD, VT: MVT::i32, dl, Chain: LN->getChain(), |
| 3060 | Ptr: LN->getBasePtr(), Offset: LN->getOffset(), PtrInfo: LN->getPointerInfo(), |
| 3061 | /*MemoryVT*/ MemVT: MVT::i8, Alignment: LN->getAlign(), MMOFlags: LN->getMemOperand()->getFlags(), |
| 3062 | AAInfo: LN->getAAInfo(), Ranges: LN->getRanges()); |
| 3063 | LN = cast<LoadSDNode>(Val: NL.getNode()); |
| 3064 | } |
| 3065 | |
| 3066 | Align ClaimAlign = LN->getAlign(); |
| 3067 | if (!validateConstPtrAlignment(Ptr: LN->getBasePtr(), NeedAlign: ClaimAlign, dl, DAG)) |
| 3068 | return replaceMemWithUndef(Op, DAG); |
| 3069 | |
| 3070 | // Call LowerUnalignedLoad for all loads, it recognizes loads that |
| 3071 | // don't need extra aligning. |
| 3072 | SDValue LU = LowerUnalignedLoad(Op: SDValue(LN, 0), DAG); |
| 3073 | if (LoadPred) { |
| 3074 | SDValue TP = getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: MemTy, Ops: {LU}, DAG); |
| 3075 | if (ET == ISD::SEXTLOAD) { |
| 3076 | TP = DAG.getSExtOrTrunc(Op: TP, DL: dl, VT: Ty); |
| 3077 | } else if (ET != ISD::NON_EXTLOAD) { |
| 3078 | TP = DAG.getZExtOrTrunc(Op: TP, DL: dl, VT: Ty); |
| 3079 | } |
| 3080 | SDValue Ch = cast<LoadSDNode>(Val: LU.getNode())->getChain(); |
| 3081 | return DAG.getMergeValues(Ops: {TP, Ch}, dl); |
| 3082 | } |
| 3083 | return LU; |
| 3084 | } |
| 3085 | |
| 3086 | SDValue |
| 3087 | HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const { |
| 3088 | const SDLoc &dl(Op); |
| 3089 | StoreSDNode *SN = cast<StoreSDNode>(Val: Op.getNode()); |
| 3090 | SDValue Val = SN->getValue(); |
| 3091 | MVT Ty = ty(Op: Val); |
| 3092 | |
| 3093 | if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) { |
| 3094 | // Store the exact predicate (all bits). |
| 3095 | SDValue TR = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {Val}, DAG); |
| 3096 | SDValue NS = DAG.getTruncStore(Chain: SN->getChain(), dl, Val: TR, Ptr: SN->getBasePtr(), |
| 3097 | SVT: MVT::i8, MMO: SN->getMemOperand()); |
| 3098 | if (SN->isIndexed()) { |
| 3099 | NS = DAG.getIndexedStore(OrigStore: NS, dl, Base: SN->getBasePtr(), Offset: SN->getOffset(), |
| 3100 | AM: SN->getAddressingMode()); |
| 3101 | } |
| 3102 | SN = cast<StoreSDNode>(Val: NS.getNode()); |
| 3103 | } |
| 3104 | |
| 3105 | Align ClaimAlign = SN->getAlign(); |
| 3106 | if (!validateConstPtrAlignment(Ptr: SN->getBasePtr(), NeedAlign: ClaimAlign, dl, DAG)) |
| 3107 | return replaceMemWithUndef(Op, DAG); |
| 3108 | |
| 3109 | MVT StoreTy = SN->getMemoryVT().getSimpleVT(); |
| 3110 | Align NeedAlign = Subtarget.getTypeAlignment(Ty: StoreTy); |
| 3111 | if (ClaimAlign < NeedAlign) |
| 3112 | return expandUnalignedStore(ST: SN, DAG); |
| 3113 | return SDValue(SN, 0); |
| 3114 | } |
| 3115 | |
| 3116 | SDValue |
| 3117 | HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) |
| 3118 | const { |
| 3119 | LoadSDNode *LN = cast<LoadSDNode>(Val: Op.getNode()); |
| 3120 | MVT LoadTy = ty(Op); |
| 3121 | unsigned NeedAlign = Subtarget.getTypeAlignment(Ty: LoadTy).value(); |
| 3122 | unsigned HaveAlign = LN->getAlign().value(); |
| 3123 | if (HaveAlign >= NeedAlign) |
| 3124 | return Op; |
| 3125 | |
| 3126 | const SDLoc &dl(Op); |
| 3127 | const DataLayout &DL = DAG.getDataLayout(); |
| 3128 | LLVMContext &Ctx = *DAG.getContext(); |
| 3129 | |
| 3130 | // If the load aligning is disabled or the load can be broken up into two |
| 3131 | // smaller legal loads, do the default (target-independent) expansion. |
| 3132 | bool DoDefault = false; |
| 3133 | // Handle it in the default way if this is an indexed load. |
| 3134 | if (!LN->isUnindexed()) |
| 3135 | DoDefault = true; |
| 3136 | |
| 3137 | if (!AlignLoads) { |
| 3138 | if (allowsMemoryAccessForAlignment(Context&: Ctx, DL, VT: LN->getMemoryVT(), |
| 3139 | MMO: *LN->getMemOperand())) |
| 3140 | return Op; |
| 3141 | DoDefault = true; |
| 3142 | } |
| 3143 | if (!DoDefault && (2 * HaveAlign) == NeedAlign) { |
| 3144 | // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)". |
| 3145 | MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(BitWidth: 8 * HaveAlign) |
| 3146 | : MVT::getVectorVT(VT: MVT::i8, NumElements: HaveAlign); |
| 3147 | DoDefault = |
| 3148 | allowsMemoryAccessForAlignment(Context&: Ctx, DL, VT: PartTy, MMO: *LN->getMemOperand()); |
| 3149 | } |
| 3150 | if (DoDefault) { |
| 3151 | std::pair<SDValue, SDValue> P = expandUnalignedLoad(LD: LN, DAG); |
| 3152 | return DAG.getMergeValues(Ops: {P.first, P.second}, dl); |
| 3153 | } |
| 3154 | |
| 3155 | // The code below generates two loads, both aligned as NeedAlign, and |
| 3156 | // with the distance of NeedAlign between them. For that to cover the |
| 3157 | // bits that need to be loaded (and without overlapping), the size of |
| 3158 | // the loads should be equal to NeedAlign. This is true for all loadable |
| 3159 | // types, but add an assertion in case something changes in the future. |
| 3160 | assert(LoadTy.getSizeInBits() == 8*NeedAlign); |
| 3161 | |
| 3162 | unsigned LoadLen = NeedAlign; |
| 3163 | SDValue Base = LN->getBasePtr(); |
| 3164 | SDValue Chain = LN->getChain(); |
| 3165 | auto BO = getBaseAndOffset(Addr: Base); |
| 3166 | unsigned BaseOpc = BO.first.getOpcode(); |
| 3167 | if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0) |
| 3168 | return Op; |
| 3169 | |
| 3170 | if (BO.second % LoadLen != 0) { |
| 3171 | BO.first = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: BO.first, |
| 3172 | N2: DAG.getConstant(Val: BO.second % LoadLen, DL: dl, VT: MVT::i32)); |
| 3173 | BO.second -= BO.second % LoadLen; |
| 3174 | } |
| 3175 | SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR) |
| 3176 | ? DAG.getNode(Opcode: HexagonISD::VALIGNADDR, DL: dl, VT: MVT::i32, N1: BO.first, |
| 3177 | N2: DAG.getConstant(Val: NeedAlign, DL: dl, VT: MVT::i32)) |
| 3178 | : BO.first; |
| 3179 | SDValue Base0 = |
| 3180 | DAG.getMemBasePlusOffset(Base: BaseNoOff, Offset: TypeSize::getFixed(ExactSize: BO.second), DL: dl); |
| 3181 | SDValue Base1 = DAG.getMemBasePlusOffset( |
| 3182 | Base: BaseNoOff, Offset: TypeSize::getFixed(ExactSize: BO.second + LoadLen), DL: dl); |
| 3183 | |
| 3184 | MachineMemOperand *WideMMO = nullptr; |
| 3185 | if (MachineMemOperand *MMO = LN->getMemOperand()) { |
| 3186 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3187 | WideMMO = MF.getMachineMemOperand( |
| 3188 | PtrInfo: MMO->getPointerInfo(), F: MMO->getFlags(), Size: 2 * LoadLen, BaseAlignment: Align(LoadLen), |
| 3189 | AAInfo: MMO->getAAInfo(), Ranges: MMO->getRanges(), SSID: MMO->getSyncScopeID(), |
| 3190 | Ordering: MMO->getSuccessOrdering(), FailureOrdering: MMO->getFailureOrdering()); |
| 3191 | } |
| 3192 | |
| 3193 | SDValue Load0 = DAG.getLoad(VT: LoadTy, dl, Chain, Ptr: Base0, MMO: WideMMO); |
| 3194 | SDValue Load1 = DAG.getLoad(VT: LoadTy, dl, Chain, Ptr: Base1, MMO: WideMMO); |
| 3195 | |
| 3196 | SDValue Aligned = DAG.getNode(Opcode: HexagonISD::VALIGN, DL: dl, VT: LoadTy, |
| 3197 | Ops: {Load1, Load0, BaseNoOff.getOperand(i: 0)}); |
| 3198 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 3199 | N1: Load0.getValue(R: 1), N2: Load1.getValue(R: 1)); |
| 3200 | SDValue M = DAG.getMergeValues(Ops: {Aligned, NewChain}, dl); |
| 3201 | return M; |
| 3202 | } |
| 3203 | |
| 3204 | SDValue |
| 3205 | HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const { |
| 3206 | SDValue X = Op.getOperand(i: 0), Y = Op.getOperand(i: 1); |
| 3207 | auto *CY = dyn_cast<ConstantSDNode>(Val&: Y); |
| 3208 | if (!CY) |
| 3209 | return SDValue(); |
| 3210 | |
| 3211 | const SDLoc &dl(Op); |
| 3212 | SDVTList VTs = Op.getNode()->getVTList(); |
| 3213 | assert(VTs.NumVTs == 2); |
| 3214 | assert(VTs.VTs[1] == MVT::i1); |
| 3215 | unsigned Opc = Op.getOpcode(); |
| 3216 | |
| 3217 | if (CY) { |
| 3218 | uint64_t VY = CY->getZExtValue(); |
| 3219 | assert(VY != 0 && "This should have been folded" ); |
| 3220 | // X +/- 1 |
| 3221 | if (VY != 1) |
| 3222 | return SDValue(); |
| 3223 | |
| 3224 | if (Opc == ISD::UADDO) { |
| 3225 | SDValue Op = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: VTs.VTs[0], Ops: {X, Y}); |
| 3226 | SDValue Ov = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Op, RHS: getZero(dl, Ty: ty(Op), DAG), |
| 3227 | Cond: ISD::SETEQ); |
| 3228 | return DAG.getMergeValues(Ops: {Op, Ov}, dl); |
| 3229 | } |
| 3230 | if (Opc == ISD::USUBO) { |
| 3231 | SDValue Op = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: VTs.VTs[0], Ops: {X, Y}); |
| 3232 | SDValue Ov = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Op, |
| 3233 | RHS: DAG.getAllOnesConstant(DL: dl, VT: ty(Op)), Cond: ISD::SETEQ); |
| 3234 | return DAG.getMergeValues(Ops: {Op, Ov}, dl); |
| 3235 | } |
| 3236 | } |
| 3237 | |
| 3238 | return SDValue(); |
| 3239 | } |
| 3240 | |
| 3241 | SDValue HexagonTargetLowering::LowerUAddSubOCarry(SDValue Op, |
| 3242 | SelectionDAG &DAG) const { |
| 3243 | const SDLoc &dl(Op); |
| 3244 | unsigned Opc = Op.getOpcode(); |
| 3245 | SDValue X = Op.getOperand(i: 0), Y = Op.getOperand(i: 1), C = Op.getOperand(i: 2); |
| 3246 | |
| 3247 | if (Opc == ISD::UADDO_CARRY) |
| 3248 | return DAG.getNode(Opcode: HexagonISD::ADDC, DL: dl, VTList: Op.getNode()->getVTList(), |
| 3249 | Ops: { X, Y, C }); |
| 3250 | |
| 3251 | EVT CarryTy = C.getValueType(); |
| 3252 | SDValue SubC = DAG.getNode(Opcode: HexagonISD::SUBC, DL: dl, VTList: Op.getNode()->getVTList(), |
| 3253 | Ops: { X, Y, DAG.getLogicalNOT(DL: dl, Val: C, VT: CarryTy) }); |
| 3254 | SDValue Out[] = { SubC.getValue(R: 0), |
| 3255 | DAG.getLogicalNOT(DL: dl, Val: SubC.getValue(R: 1), VT: CarryTy) }; |
| 3256 | return DAG.getMergeValues(Ops: Out, dl); |
| 3257 | } |
| 3258 | |
| 3259 | SDValue |
| 3260 | HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { |
| 3261 | SDValue Chain = Op.getOperand(i: 0); |
| 3262 | SDValue Offset = Op.getOperand(i: 1); |
| 3263 | SDValue Handler = Op.getOperand(i: 2); |
| 3264 | SDLoc dl(Op); |
| 3265 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
| 3266 | |
| 3267 | // Mark function as containing a call to EH_RETURN. |
| 3268 | HexagonMachineFunctionInfo *FuncInfo = |
| 3269 | DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>(); |
| 3270 | FuncInfo->setHasEHReturn(); |
| 3271 | |
| 3272 | unsigned OffsetReg = Hexagon::R28; |
| 3273 | |
| 3274 | SDValue StoreAddr = |
| 3275 | DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: DAG.getRegister(Reg: Hexagon::R30, VT: PtrVT), |
| 3276 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
| 3277 | Chain = DAG.getStore(Chain, dl, Val: Handler, Ptr: StoreAddr, PtrInfo: MachinePointerInfo()); |
| 3278 | Chain = DAG.getCopyToReg(Chain, dl, Reg: OffsetReg, N: Offset); |
| 3279 | |
| 3280 | // Not needed we already use it as explicit input to EH_RETURN. |
| 3281 | // MF.getRegInfo().addLiveOut(OffsetReg); |
| 3282 | |
| 3283 | return DAG.getNode(Opcode: HexagonISD::EH_RETURN, DL: dl, VT: MVT::Other, Operand: Chain); |
| 3284 | } |
| 3285 | |
| 3286 | SDValue |
| 3287 | HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 3288 | unsigned Opc = Op.getOpcode(); |
| 3289 | |
| 3290 | // Handle INLINEASM first. |
| 3291 | if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR) |
| 3292 | return LowerINLINEASM(Op, DAG); |
| 3293 | |
| 3294 | if (isHvxOperation(N: Op.getNode(), DAG)) { |
| 3295 | // If HVX lowering returns nothing, try the default lowering. |
| 3296 | if (SDValue V = LowerHvxOperation(Op, DAG)) |
| 3297 | return V; |
| 3298 | } |
| 3299 | |
| 3300 | switch (Opc) { |
| 3301 | default: |
| 3302 | #ifndef NDEBUG |
| 3303 | Op.getNode()->dumpr(&DAG); |
| 3304 | if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END) |
| 3305 | errs() << "Error: check for a non-legal type in this operation\n" ; |
| 3306 | #endif |
| 3307 | llvm_unreachable("Should not custom lower this!" ); |
| 3308 | |
| 3309 | case ISD::FDIV: |
| 3310 | return LowerFDIV(Op, DAG); |
| 3311 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); |
| 3312 | case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); |
| 3313 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
| 3314 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); |
| 3315 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); |
| 3316 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); |
| 3317 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); |
| 3318 | case ISD::BITCAST: return LowerBITCAST(Op, DAG); |
| 3319 | case ISD::LOAD: return LowerLoad(Op, DAG); |
| 3320 | case ISD::STORE: return LowerStore(Op, DAG); |
| 3321 | case ISD::UADDO: |
| 3322 | case ISD::USUBO: return LowerUAddSubO(Op, DAG); |
| 3323 | case ISD::UADDO_CARRY: |
| 3324 | case ISD::USUBO_CARRY: return LowerUAddSubOCarry(Op, DAG); |
| 3325 | case ISD::SRA: |
| 3326 | case ISD::SHL: |
| 3327 | case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG); |
| 3328 | case ISD::ROTL: return LowerROTL(Op, DAG); |
| 3329 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
| 3330 | case ISD::JumpTable: return LowerJumpTable(Op, DAG); |
| 3331 | case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); |
| 3332 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
| 3333 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
| 3334 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
| 3335 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); |
| 3336 | case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG); |
| 3337 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
| 3338 | case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); |
| 3339 | case ISD::VACOPY: return LowerVACOPY(Op, DAG); |
| 3340 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
| 3341 | case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 3342 | case ISD::SETCC: return LowerSETCC(Op, DAG); |
| 3343 | case ISD::VSELECT: return LowerVSELECT(Op, DAG); |
| 3344 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
| 3345 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
| 3346 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG); |
| 3347 | case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); |
| 3348 | case ISD::READSTEADYCOUNTER: return LowerREADSTEADYCOUNTER(Op, DAG); |
| 3349 | break; |
| 3350 | } |
| 3351 | |
| 3352 | return SDValue(); |
| 3353 | } |
| 3354 | |
| 3355 | void |
| 3356 | HexagonTargetLowering::LowerOperationWrapper(SDNode *N, |
| 3357 | SmallVectorImpl<SDValue> &Results, |
| 3358 | SelectionDAG &DAG) const { |
| 3359 | if (isHvxOperation(N, DAG)) { |
| 3360 | LowerHvxOperationWrapper(N, Results, DAG); |
| 3361 | if (!Results.empty()) |
| 3362 | return; |
| 3363 | } |
| 3364 | |
| 3365 | SDValue Op(N, 0); |
| 3366 | unsigned Opc = N->getOpcode(); |
| 3367 | |
| 3368 | switch (Opc) { |
| 3369 | case HexagonISD::SSAT: |
| 3370 | case HexagonISD::USAT: |
| 3371 | Results.push_back(Elt: opJoin(Ops: SplitVectorOp(Op, DAG), dl: SDLoc(Op), DAG)); |
| 3372 | break; |
| 3373 | case ISD::STORE: |
| 3374 | // We are only custom-lowering stores to verify the alignment of the |
| 3375 | // address if it is a compile-time constant. Since a store can be |
| 3376 | // modified during type-legalization (the value being stored may need |
| 3377 | // legalization), return empty Results here to indicate that we don't |
| 3378 | // really make any changes in the custom lowering. |
| 3379 | return; |
| 3380 | default: |
| 3381 | TargetLowering::LowerOperationWrapper(N, Results, DAG); |
| 3382 | break; |
| 3383 | } |
| 3384 | } |
| 3385 | |
| 3386 | void |
| 3387 | HexagonTargetLowering::ReplaceNodeResults(SDNode *N, |
| 3388 | SmallVectorImpl<SDValue> &Results, |
| 3389 | SelectionDAG &DAG) const { |
| 3390 | if (isHvxOperation(N, DAG)) { |
| 3391 | ReplaceHvxNodeResults(N, Results, DAG); |
| 3392 | if (!Results.empty()) |
| 3393 | return; |
| 3394 | } |
| 3395 | |
| 3396 | const SDLoc &dl(N); |
| 3397 | switch (N->getOpcode()) { |
| 3398 | case ISD::SRL: |
| 3399 | case ISD::SRA: |
| 3400 | case ISD::SHL: |
| 3401 | return; |
| 3402 | case ISD::BITCAST: |
| 3403 | // Handle a bitcast from v8i1 to i8. |
| 3404 | if (N->getValueType(ResNo: 0) == MVT::i8) { |
| 3405 | if (N->getOperand(Num: 0).getValueType() == MVT::v8i1) { |
| 3406 | SDValue P = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, |
| 3407 | Ops: N->getOperand(Num: 0), DAG); |
| 3408 | SDValue T = DAG.getAnyExtOrTrunc(Op: P, DL: dl, VT: MVT::i8); |
| 3409 | Results.push_back(Elt: T); |
| 3410 | } |
| 3411 | } |
| 3412 | break; |
| 3413 | } |
| 3414 | } |
| 3415 | |
| 3416 | SDValue |
| 3417 | HexagonTargetLowering::PerformDAGCombine(SDNode *N, |
| 3418 | DAGCombinerInfo &DCI) const { |
| 3419 | if (isHvxOperation(N, DAG&: DCI.DAG)) { |
| 3420 | if (SDValue V = PerformHvxDAGCombine(N, DCI)) |
| 3421 | return V; |
| 3422 | return SDValue(); |
| 3423 | } |
| 3424 | |
| 3425 | SDValue Op(N, 0); |
| 3426 | const SDLoc &dl(Op); |
| 3427 | unsigned Opc = Op.getOpcode(); |
| 3428 | |
| 3429 | if (Opc == ISD::TRUNCATE) { |
| 3430 | SDValue Op0 = Op.getOperand(i: 0); |
| 3431 | // fold (truncate (build pair x, y)) -> (truncate x) or x |
| 3432 | if (Op0.getOpcode() == ISD::BUILD_PAIR) { |
| 3433 | EVT TruncTy = Op.getValueType(); |
| 3434 | SDValue Elem0 = Op0.getOperand(i: 0); |
| 3435 | // if we match the low element of the pair, just return it. |
| 3436 | if (Elem0.getValueType() == TruncTy) |
| 3437 | return Elem0; |
| 3438 | // otherwise, if the low part is still too large, apply the truncate. |
| 3439 | if (Elem0.getValueType().bitsGT(VT: TruncTy)) |
| 3440 | return DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: TruncTy, Operand: Elem0); |
| 3441 | } |
| 3442 | } |
| 3443 | |
| 3444 | if (DCI.isBeforeLegalizeOps()) |
| 3445 | return SDValue(); |
| 3446 | |
| 3447 | if (Opc == HexagonISD::P2D) { |
| 3448 | SDValue P = Op.getOperand(i: 0); |
| 3449 | switch (P.getOpcode()) { |
| 3450 | case HexagonISD::PTRUE: |
| 3451 | return DCI.DAG.getAllOnesConstant(DL: dl, VT: ty(Op)); |
| 3452 | case HexagonISD::PFALSE: |
| 3453 | return getZero(dl, Ty: ty(Op), DAG&: DCI.DAG); |
| 3454 | default: |
| 3455 | break; |
| 3456 | } |
| 3457 | } else if (Opc == ISD::VSELECT) { |
| 3458 | // This is pretty much duplicated in HexagonISelLoweringHVX... |
| 3459 | // |
| 3460 | // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0) |
| 3461 | SDValue Cond = Op.getOperand(i: 0); |
| 3462 | if (Cond->getOpcode() == ISD::XOR) { |
| 3463 | SDValue C0 = Cond.getOperand(i: 0), C1 = Cond.getOperand(i: 1); |
| 3464 | if (C1->getOpcode() == HexagonISD::PTRUE) { |
| 3465 | SDValue VSel = DCI.DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: ty(Op), N1: C0, |
| 3466 | N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 1)); |
| 3467 | return VSel; |
| 3468 | } |
| 3469 | } |
| 3470 | } else if (Opc == ISD::TRUNCATE) { |
| 3471 | SDValue Op0 = Op.getOperand(i: 0); |
| 3472 | // fold (truncate (build pair x, y)) -> (truncate x) or x |
| 3473 | if (Op0.getOpcode() == ISD::BUILD_PAIR) { |
| 3474 | MVT TruncTy = ty(Op); |
| 3475 | SDValue Elem0 = Op0.getOperand(i: 0); |
| 3476 | // if we match the low element of the pair, just return it. |
| 3477 | if (ty(Op: Elem0) == TruncTy) |
| 3478 | return Elem0; |
| 3479 | // otherwise, if the low part is still too large, apply the truncate. |
| 3480 | if (ty(Op: Elem0).bitsGT(VT: TruncTy)) |
| 3481 | return DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: TruncTy, Operand: Elem0); |
| 3482 | } |
| 3483 | } else if (Opc == ISD::OR) { |
| 3484 | // fold (or (shl xx, s), (zext y)) -> (COMBINE (shl xx, s-32), y) |
| 3485 | // if s >= 32 |
| 3486 | auto fold0 = [&, this](SDValue Op) { |
| 3487 | if (ty(Op) != MVT::i64) |
| 3488 | return SDValue(); |
| 3489 | SDValue Shl = Op.getOperand(i: 0); |
| 3490 | SDValue Zxt = Op.getOperand(i: 1); |
| 3491 | if (Shl.getOpcode() != ISD::SHL) |
| 3492 | std::swap(a&: Shl, b&: Zxt); |
| 3493 | |
| 3494 | if (Shl.getOpcode() != ISD::SHL || Zxt.getOpcode() != ISD::ZERO_EXTEND) |
| 3495 | return SDValue(); |
| 3496 | |
| 3497 | SDValue Z = Zxt.getOperand(i: 0); |
| 3498 | auto *Amt = dyn_cast<ConstantSDNode>(Val: Shl.getOperand(i: 1)); |
| 3499 | if (Amt && Amt->getZExtValue() >= 32 && ty(Op: Z).getSizeInBits() <= 32) { |
| 3500 | unsigned A = Amt->getZExtValue(); |
| 3501 | SDValue S = Shl.getOperand(i: 0); |
| 3502 | SDValue T0 = DCI.DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: ty(Op: S), N1: S, |
| 3503 | N2: DCI.DAG.getConstant(Val: A - 32, DL: dl, VT: MVT::i32)); |
| 3504 | SDValue T1 = DCI.DAG.getZExtOrTrunc(Op: T0, DL: dl, VT: MVT::i32); |
| 3505 | SDValue T2 = DCI.DAG.getZExtOrTrunc(Op: Z, DL: dl, VT: MVT::i32); |
| 3506 | return DCI.DAG.getNode(Opcode: HexagonISD::COMBINE, DL: dl, VT: MVT::i64, Ops: {T1, T2}); |
| 3507 | } |
| 3508 | return SDValue(); |
| 3509 | }; |
| 3510 | |
| 3511 | if (SDValue R = fold0(Op)) |
| 3512 | return R; |
| 3513 | } |
| 3514 | |
| 3515 | return SDValue(); |
| 3516 | } |
| 3517 | |
| 3518 | /// Returns relocation base for the given PIC jumptable. |
| 3519 | SDValue |
| 3520 | HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table, |
| 3521 | SelectionDAG &DAG) const { |
| 3522 | int Idx = cast<JumpTableSDNode>(Val&: Table)->getIndex(); |
| 3523 | EVT VT = Table.getValueType(); |
| 3524 | SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT, TargetFlags: HexagonII::MO_PCREL); |
| 3525 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Table), VT, Operand: T); |
| 3526 | } |
| 3527 | |
| 3528 | //===----------------------------------------------------------------------===// |
| 3529 | // Inline Assembly Support |
| 3530 | //===----------------------------------------------------------------------===// |
| 3531 | |
| 3532 | TargetLowering::ConstraintType |
| 3533 | HexagonTargetLowering::getConstraintType(StringRef Constraint) const { |
| 3534 | if (Constraint.size() == 1) { |
| 3535 | switch (Constraint[0]) { |
| 3536 | case 'q': |
| 3537 | case 'v': |
| 3538 | if (Subtarget.useHVXOps()) |
| 3539 | return C_RegisterClass; |
| 3540 | break; |
| 3541 | case 'a': |
| 3542 | return C_RegisterClass; |
| 3543 | default: |
| 3544 | break; |
| 3545 | } |
| 3546 | } |
| 3547 | return TargetLowering::getConstraintType(Constraint); |
| 3548 | } |
| 3549 | |
| 3550 | std::pair<unsigned, const TargetRegisterClass*> |
| 3551 | HexagonTargetLowering::getRegForInlineAsmConstraint( |
| 3552 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
| 3553 | |
| 3554 | if (Constraint.size() == 1) { |
| 3555 | switch (Constraint[0]) { |
| 3556 | case 'r': // R0-R31 |
| 3557 | switch (VT.SimpleTy) { |
| 3558 | default: |
| 3559 | return {0u, nullptr}; |
| 3560 | case MVT::i1: |
| 3561 | case MVT::i8: |
| 3562 | case MVT::i16: |
| 3563 | case MVT::i32: |
| 3564 | case MVT::f32: |
| 3565 | return {0u, &Hexagon::IntRegsRegClass}; |
| 3566 | case MVT::i64: |
| 3567 | case MVT::f64: |
| 3568 | return {0u, &Hexagon::DoubleRegsRegClass}; |
| 3569 | } |
| 3570 | break; |
| 3571 | case 'a': // M0-M1 |
| 3572 | if (VT != MVT::i32) |
| 3573 | return {0u, nullptr}; |
| 3574 | return {0u, &Hexagon::ModRegsRegClass}; |
| 3575 | case 'q': // q0-q3 |
| 3576 | switch (VT.getSizeInBits()) { |
| 3577 | default: |
| 3578 | return {0u, nullptr}; |
| 3579 | case 64: |
| 3580 | case 128: |
| 3581 | return {0u, &Hexagon::HvxQRRegClass}; |
| 3582 | } |
| 3583 | break; |
| 3584 | case 'v': // V0-V31 |
| 3585 | switch (VT.getSizeInBits()) { |
| 3586 | default: |
| 3587 | return {0u, nullptr}; |
| 3588 | case 512: |
| 3589 | return {0u, &Hexagon::HvxVRRegClass}; |
| 3590 | case 1024: |
| 3591 | if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps()) |
| 3592 | return {0u, &Hexagon::HvxVRRegClass}; |
| 3593 | return {0u, &Hexagon::HvxWRRegClass}; |
| 3594 | case 2048: |
| 3595 | return {0u, &Hexagon::HvxWRRegClass}; |
| 3596 | } |
| 3597 | break; |
| 3598 | default: |
| 3599 | return {0u, nullptr}; |
| 3600 | } |
| 3601 | } |
| 3602 | |
| 3603 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 3604 | } |
| 3605 | |
| 3606 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 3607 | /// specified FP immediate natively. If false, the legalizer will |
| 3608 | /// materialize the FP immediate as a load from a constant pool. |
| 3609 | bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
| 3610 | bool ForCodeSize) const { |
| 3611 | return true; |
| 3612 | } |
| 3613 | |
| 3614 | /// Returns true if it is beneficial to convert a load of a constant |
| 3615 | /// to just the constant itself. |
| 3616 | bool HexagonTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 3617 | Type *Ty) const { |
| 3618 | if (!ConstantLoadsToImm) |
| 3619 | return false; |
| 3620 | |
| 3621 | assert(Ty->isIntegerTy()); |
| 3622 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); |
| 3623 | return (BitSize > 0 && BitSize <= 64); |
| 3624 | } |
| 3625 | |
| 3626 | /// isLegalAddressingMode - Return true if the addressing mode represented by |
| 3627 | /// AM is legal for this target, for a load/store of the specified type. |
| 3628 | bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 3629 | const AddrMode &AM, Type *Ty, |
| 3630 | unsigned AS, Instruction *I) const { |
| 3631 | if (Ty->isSized()) { |
| 3632 | // When LSR detects uses of the same base address to access different |
| 3633 | // types (e.g. unions), it will assume a conservative type for these |
| 3634 | // uses: |
| 3635 | // LSR Use: Kind=Address of void in addrspace(4294967295), ... |
| 3636 | // The type Ty passed here would then be "void". Skip the alignment |
| 3637 | // checks, but do not return false right away, since that confuses |
| 3638 | // LSR into crashing. |
| 3639 | Align A = DL.getABITypeAlign(Ty); |
| 3640 | // The base offset must be a multiple of the alignment. |
| 3641 | if (!isAligned(Lhs: A, SizeInBytes: AM.BaseOffs)) |
| 3642 | return false; |
| 3643 | // The shifted offset must fit in 11 bits. |
| 3644 | if (!isInt<11>(x: AM.BaseOffs >> Log2(A))) |
| 3645 | return false; |
| 3646 | } |
| 3647 | |
| 3648 | // No global is ever allowed as a base. |
| 3649 | if (AM.BaseGV) |
| 3650 | return false; |
| 3651 | |
| 3652 | int Scale = AM.Scale; |
| 3653 | if (Scale < 0) |
| 3654 | Scale = -Scale; |
| 3655 | switch (Scale) { |
| 3656 | case 0: // No scale reg, "r+i", "r", or just "i". |
| 3657 | break; |
| 3658 | default: // No scaled addressing mode. |
| 3659 | return false; |
| 3660 | } |
| 3661 | return true; |
| 3662 | } |
| 3663 | |
| 3664 | /// Return true if folding a constant offset with the given GlobalAddress is |
| 3665 | /// legal. It is frequently not legal in PIC relocation models. |
| 3666 | bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) |
| 3667 | const { |
| 3668 | return HTM.getRelocationModel() == Reloc::Static; |
| 3669 | } |
| 3670 | |
| 3671 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 3672 | /// icmp immediate, that is the target has icmp instructions which can compare |
| 3673 | /// a register against the immediate without having to materialize the |
| 3674 | /// immediate into a register. |
| 3675 | bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
| 3676 | return Imm >= -512 && Imm <= 511; |
| 3677 | } |
| 3678 | |
| 3679 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 3680 | /// for tail call optimization. Targets which want to do tail call |
| 3681 | /// optimization should implement this function. |
| 3682 | bool HexagonTargetLowering::IsEligibleForTailCallOptimization( |
| 3683 | SDValue Callee, |
| 3684 | CallingConv::ID CalleeCC, |
| 3685 | bool IsVarArg, |
| 3686 | bool IsCalleeStructRet, |
| 3687 | bool IsCallerStructRet, |
| 3688 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 3689 | const SmallVectorImpl<SDValue> &OutVals, |
| 3690 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 3691 | SelectionDAG& DAG) const { |
| 3692 | const Function &CallerF = DAG.getMachineFunction().getFunction(); |
| 3693 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
| 3694 | bool CCMatch = CallerCC == CalleeCC; |
| 3695 | |
| 3696 | // *************************************************************************** |
| 3697 | // Look for obvious safe cases to perform tail call optimization that do not |
| 3698 | // require ABI changes. |
| 3699 | // *************************************************************************** |
| 3700 | |
| 3701 | // If this is a tail call via a function pointer, then don't do it! |
| 3702 | if (!isa<GlobalAddressSDNode>(Val: Callee) && |
| 3703 | !isa<ExternalSymbolSDNode>(Val: Callee)) { |
| 3704 | return false; |
| 3705 | } |
| 3706 | |
| 3707 | // Do not optimize if the calling conventions do not match and the conventions |
| 3708 | // used are not C or Fast. |
| 3709 | if (!CCMatch) { |
| 3710 | bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast); |
| 3711 | bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast); |
| 3712 | // If R & E, then ok. |
| 3713 | if (!R || !E) |
| 3714 | return false; |
| 3715 | } |
| 3716 | |
| 3717 | // Do not tail call optimize vararg calls. |
| 3718 | if (IsVarArg) |
| 3719 | return false; |
| 3720 | |
| 3721 | // Also avoid tail call optimization if either caller or callee uses struct |
| 3722 | // return semantics. |
| 3723 | if (IsCalleeStructRet || IsCallerStructRet) |
| 3724 | return false; |
| 3725 | |
| 3726 | // In addition to the cases above, we also disable Tail Call Optimization if |
| 3727 | // the calling convention code that at least one outgoing argument needs to |
| 3728 | // go on the stack. We cannot check that here because at this point that |
| 3729 | // information is not available. |
| 3730 | return true; |
| 3731 | } |
| 3732 | |
| 3733 | /// Returns the target specific optimal type for load and store operations as |
| 3734 | /// a result of memset, memcpy, and memmove lowering. |
| 3735 | /// |
| 3736 | /// If DstAlign is zero that means it's safe to destination alignment can |
| 3737 | /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't |
| 3738 | /// a need to check it against alignment requirement, probably because the |
| 3739 | /// source does not need to be loaded. If 'IsMemset' is true, that means it's |
| 3740 | /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of |
| 3741 | /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it |
| 3742 | /// does not need to be loaded. It returns EVT::Other if the type should be |
| 3743 | /// determined using generic target-independent logic. |
| 3744 | EVT HexagonTargetLowering::getOptimalMemOpType( |
| 3745 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
| 3746 | if (Op.size() >= 8 && Op.isAligned(AlignCheck: Align(8))) |
| 3747 | return MVT::i64; |
| 3748 | if (Op.size() >= 4 && Op.isAligned(AlignCheck: Align(4))) |
| 3749 | return MVT::i32; |
| 3750 | if (Op.size() >= 2 && Op.isAligned(AlignCheck: Align(2))) |
| 3751 | return MVT::i16; |
| 3752 | return MVT::Other; |
| 3753 | } |
| 3754 | |
| 3755 | bool HexagonTargetLowering::allowsMemoryAccess( |
| 3756 | LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, |
| 3757 | Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { |
| 3758 | if (!VT.isSimple()) |
| 3759 | return false; |
| 3760 | MVT SVT = VT.getSimpleVT(); |
| 3761 | if (Subtarget.isHVXVectorType(VecTy: SVT, IncludeBool: true)) |
| 3762 | return allowsHvxMemoryAccess(VecTy: SVT, Flags, Fast); |
| 3763 | return TargetLoweringBase::allowsMemoryAccess( |
| 3764 | Context, DL, VT, AddrSpace, Alignment, Flags, Fast); |
| 3765 | } |
| 3766 | |
| 3767 | bool HexagonTargetLowering::allowsMisalignedMemoryAccesses( |
| 3768 | EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, |
| 3769 | unsigned *Fast) const { |
| 3770 | if (!VT.isSimple()) |
| 3771 | return false; |
| 3772 | MVT SVT = VT.getSimpleVT(); |
| 3773 | if (Subtarget.isHVXVectorType(VecTy: SVT, IncludeBool: true)) |
| 3774 | return allowsHvxMisalignedMemoryAccesses(VecTy: SVT, Flags, Fast); |
| 3775 | if (Fast) |
| 3776 | *Fast = 0; |
| 3777 | return false; |
| 3778 | } |
| 3779 | |
| 3780 | std::pair<const TargetRegisterClass*, uint8_t> |
| 3781 | HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
| 3782 | MVT VT) const { |
| 3783 | if (Subtarget.isHVXVectorType(VecTy: VT, IncludeBool: true)) { |
| 3784 | unsigned BitWidth = VT.getSizeInBits(); |
| 3785 | unsigned VecWidth = Subtarget.getVectorLength() * 8; |
| 3786 | |
| 3787 | if (VT.getVectorElementType() == MVT::i1) |
| 3788 | return std::make_pair(x: &Hexagon::HvxQRRegClass, y: 1); |
| 3789 | if (BitWidth == VecWidth) |
| 3790 | return std::make_pair(x: &Hexagon::HvxVRRegClass, y: 1); |
| 3791 | assert(BitWidth == 2 * VecWidth); |
| 3792 | return std::make_pair(x: &Hexagon::HvxWRRegClass, y: 1); |
| 3793 | } |
| 3794 | |
| 3795 | return TargetLowering::findRepresentativeClass(TRI, VT); |
| 3796 | } |
| 3797 | |
| 3798 | bool HexagonTargetLowering::shouldReduceLoadWidth( |
| 3799 | SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, |
| 3800 | std::optional<unsigned> ByteOffset) const { |
| 3801 | // TODO: This may be worth removing. Check regression tests for diffs. |
| 3802 | if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT, |
| 3803 | ByteOffset)) |
| 3804 | return false; |
| 3805 | |
| 3806 | auto *L = cast<LoadSDNode>(Val: Load); |
| 3807 | std::pair<SDValue, int> BO = getBaseAndOffset(Addr: L->getBasePtr()); |
| 3808 | // Small-data object, do not shrink. |
| 3809 | if (BO.first.getOpcode() == HexagonISD::CONST32_GP) |
| 3810 | return false; |
| 3811 | if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val&: BO.first)) { |
| 3812 | auto &HTM = static_cast<const HexagonTargetMachine &>(getTargetMachine()); |
| 3813 | const auto *GO = dyn_cast_or_null<const GlobalObject>(Val: GA->getGlobal()); |
| 3814 | return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, TM: HTM); |
| 3815 | } |
| 3816 | return true; |
| 3817 | } |
| 3818 | |
| 3819 | void HexagonTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
| 3820 | SDNode *Node) const { |
| 3821 | AdjustHvxInstrPostInstrSelection(MI, Node); |
| 3822 | } |
| 3823 | |
| 3824 | Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder, |
| 3825 | Type *ValueTy, Value *Addr, |
| 3826 | AtomicOrdering Ord) const { |
| 3827 | unsigned SZ = ValueTy->getPrimitiveSizeInBits(); |
| 3828 | assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported" ); |
| 3829 | Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked |
| 3830 | : Intrinsic::hexagon_L4_loadd_locked; |
| 3831 | |
| 3832 | Value *Call = |
| 3833 | Builder.CreateIntrinsic(ID: IntID, Args: Addr, /*FMFSource=*/nullptr, Name: "larx" ); |
| 3834 | |
| 3835 | return Builder.CreateBitCast(V: Call, DestTy: ValueTy); |
| 3836 | } |
| 3837 | |
| 3838 | /// Perform a store-conditional operation to Addr. Return the status of the |
| 3839 | /// store. This should be 0 if the store succeeded, non-zero otherwise. |
| 3840 | Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder, |
| 3841 | Value *Val, Value *Addr, |
| 3842 | AtomicOrdering Ord) const { |
| 3843 | BasicBlock *BB = Builder.GetInsertBlock(); |
| 3844 | Module *M = BB->getParent()->getParent(); |
| 3845 | Type *Ty = Val->getType(); |
| 3846 | unsigned SZ = Ty->getPrimitiveSizeInBits(); |
| 3847 | |
| 3848 | Type *CastTy = Builder.getIntNTy(N: SZ); |
| 3849 | assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported" ); |
| 3850 | Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked |
| 3851 | : Intrinsic::hexagon_S4_stored_locked; |
| 3852 | |
| 3853 | Val = Builder.CreateBitCast(V: Val, DestTy: CastTy); |
| 3854 | |
| 3855 | Value *Call = Builder.CreateIntrinsic(ID: IntID, Args: {Addr, Val}, |
| 3856 | /*FMFSource=*/nullptr, Name: "stcx" ); |
| 3857 | Value *Cmp = Builder.CreateICmpEQ(LHS: Call, RHS: Builder.getInt32(C: 0), Name: "" ); |
| 3858 | Value *Ext = Builder.CreateZExt(V: Cmp, DestTy: Type::getInt32Ty(C&: M->getContext())); |
| 3859 | return Ext; |
| 3860 | } |
| 3861 | |
| 3862 | TargetLowering::AtomicExpansionKind |
| 3863 | HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
| 3864 | // Do not expand loads and stores that don't exceed 64 bits. |
| 3865 | return LI->getType()->getPrimitiveSizeInBits() > 64 |
| 3866 | ? AtomicExpansionKind::LLOnly |
| 3867 | : AtomicExpansionKind::None; |
| 3868 | } |
| 3869 | |
| 3870 | TargetLowering::AtomicExpansionKind |
| 3871 | HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
| 3872 | // Do not expand loads and stores that don't exceed 64 bits. |
| 3873 | return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64 |
| 3874 | ? AtomicExpansionKind::Expand |
| 3875 | : AtomicExpansionKind::None; |
| 3876 | } |
| 3877 | |
| 3878 | TargetLowering::AtomicExpansionKind |
| 3879 | HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR( |
| 3880 | AtomicCmpXchgInst *AI) const { |
| 3881 | return AtomicExpansionKind::LLSC; |
| 3882 | } |
| 3883 | |