1 | //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the interfaces that Hexagon uses to lower LLVM code |
10 | // into a selection DAG. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "HexagonISelLowering.h" |
15 | #include "Hexagon.h" |
16 | #include "HexagonMachineFunctionInfo.h" |
17 | #include "HexagonRegisterInfo.h" |
18 | #include "HexagonSubtarget.h" |
19 | #include "HexagonTargetMachine.h" |
20 | #include "HexagonTargetObjectFile.h" |
21 | #include "llvm/ADT/APInt.h" |
22 | #include "llvm/ADT/ArrayRef.h" |
23 | #include "llvm/ADT/SmallVector.h" |
24 | #include "llvm/ADT/StringSwitch.h" |
25 | #include "llvm/CodeGen/CallingConvLower.h" |
26 | #include "llvm/CodeGen/MachineFrameInfo.h" |
27 | #include "llvm/CodeGen/MachineFunction.h" |
28 | #include "llvm/CodeGen/MachineMemOperand.h" |
29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
30 | #include "llvm/CodeGen/RuntimeLibcallUtil.h" |
31 | #include "llvm/CodeGen/SelectionDAG.h" |
32 | #include "llvm/CodeGen/TargetCallingConv.h" |
33 | #include "llvm/CodeGen/ValueTypes.h" |
34 | #include "llvm/IR/BasicBlock.h" |
35 | #include "llvm/IR/CallingConv.h" |
36 | #include "llvm/IR/DataLayout.h" |
37 | #include "llvm/IR/DerivedTypes.h" |
38 | #include "llvm/IR/DiagnosticInfo.h" |
39 | #include "llvm/IR/DiagnosticPrinter.h" |
40 | #include "llvm/IR/Function.h" |
41 | #include "llvm/IR/GlobalValue.h" |
42 | #include "llvm/IR/IRBuilder.h" |
43 | #include "llvm/IR/InlineAsm.h" |
44 | #include "llvm/IR/Instructions.h" |
45 | #include "llvm/IR/IntrinsicInst.h" |
46 | #include "llvm/IR/Intrinsics.h" |
47 | #include "llvm/IR/IntrinsicsHexagon.h" |
48 | #include "llvm/IR/Module.h" |
49 | #include "llvm/IR/Type.h" |
50 | #include "llvm/IR/Value.h" |
51 | #include "llvm/MC/MCRegisterInfo.h" |
52 | #include "llvm/Support/Casting.h" |
53 | #include "llvm/Support/CodeGen.h" |
54 | #include "llvm/Support/CommandLine.h" |
55 | #include "llvm/Support/Debug.h" |
56 | #include "llvm/Support/ErrorHandling.h" |
57 | #include "llvm/Support/MathExtras.h" |
58 | #include "llvm/Support/raw_ostream.h" |
59 | #include "llvm/Target/TargetMachine.h" |
60 | #include <algorithm> |
61 | #include <cassert> |
62 | #include <cstddef> |
63 | #include <cstdint> |
64 | #include <limits> |
65 | #include <utility> |
66 | |
67 | using namespace llvm; |
68 | |
69 | #define DEBUG_TYPE "hexagon-lowering" |
70 | |
71 | static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables" , |
72 | cl::init(Val: true), cl::Hidden, |
73 | cl::desc("Control jump table emission on Hexagon target" )); |
74 | |
75 | static cl::opt<bool> |
76 | EnableHexSDNodeSched("enable-hexagon-sdnode-sched" , cl::Hidden, |
77 | cl::desc("Enable Hexagon SDNode scheduling" )); |
78 | |
79 | static cl::opt<bool> EnableFastMath("ffast-math" , cl::Hidden, |
80 | cl::desc("Enable Fast Math processing" )); |
81 | |
82 | static cl::opt<int> MinimumJumpTables("minimum-jump-tables" , cl::Hidden, |
83 | cl::init(Val: 5), |
84 | cl::desc("Set minimum jump tables" )); |
85 | |
86 | static cl::opt<int> |
87 | MaxStoresPerMemcpyCL("max-store-memcpy" , cl::Hidden, cl::init(Val: 6), |
88 | cl::desc("Max #stores to inline memcpy" )); |
89 | |
90 | static cl::opt<int> |
91 | MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os" , cl::Hidden, cl::init(Val: 4), |
92 | cl::desc("Max #stores to inline memcpy" )); |
93 | |
94 | static cl::opt<int> |
95 | MaxStoresPerMemmoveCL("max-store-memmove" , cl::Hidden, cl::init(Val: 6), |
96 | cl::desc("Max #stores to inline memmove" )); |
97 | |
98 | static cl::opt<int> |
99 | MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os" , cl::Hidden, |
100 | cl::init(Val: 4), |
101 | cl::desc("Max #stores to inline memmove" )); |
102 | |
103 | static cl::opt<int> |
104 | MaxStoresPerMemsetCL("max-store-memset" , cl::Hidden, cl::init(Val: 8), |
105 | cl::desc("Max #stores to inline memset" )); |
106 | |
107 | static cl::opt<int> |
108 | MaxStoresPerMemsetOptSizeCL("max-store-memset-Os" , cl::Hidden, cl::init(Val: 4), |
109 | cl::desc("Max #stores to inline memset" )); |
110 | |
111 | static cl::opt<bool> AlignLoads("hexagon-align-loads" , |
112 | cl::Hidden, cl::init(Val: false), |
113 | cl::desc("Rewrite unaligned loads as a pair of aligned loads" )); |
114 | |
115 | static cl::opt<bool> |
116 | DisableArgsMinAlignment("hexagon-disable-args-min-alignment" , cl::Hidden, |
117 | cl::init(Val: false), |
118 | cl::desc("Disable minimum alignment of 1 for " |
119 | "arguments passed by value on stack" )); |
120 | |
121 | namespace { |
122 | |
123 | class HexagonCCState : public CCState { |
124 | unsigned NumNamedVarArgParams = 0; |
125 | |
126 | public: |
127 | HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF, |
128 | SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, |
129 | unsigned NumNamedArgs) |
130 | : CCState(CC, IsVarArg, MF, locs, C), |
131 | NumNamedVarArgParams(NumNamedArgs) {} |
132 | unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; } |
133 | }; |
134 | |
135 | } // end anonymous namespace |
136 | |
137 | |
138 | // Implement calling convention for Hexagon. |
139 | |
140 | static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, |
141 | CCValAssign::LocInfo &LocInfo, |
142 | ISD::ArgFlagsTy &ArgFlags, CCState &State) { |
143 | static const MCPhysReg ArgRegs[] = { |
144 | Hexagon::R0, Hexagon::R1, Hexagon::R2, |
145 | Hexagon::R3, Hexagon::R4, Hexagon::R5 |
146 | }; |
147 | const unsigned NumArgRegs = std::size(ArgRegs); |
148 | unsigned RegNum = State.getFirstUnallocated(Regs: ArgRegs); |
149 | |
150 | // RegNum is an index into ArgRegs: skip a register if RegNum is odd. |
151 | if (RegNum != NumArgRegs && RegNum % 2 == 1) |
152 | State.AllocateReg(Reg: ArgRegs[RegNum]); |
153 | |
154 | // Always return false here, as this function only makes sure that the first |
155 | // unallocated register has an even register number and does not actually |
156 | // allocate a register for the current argument. |
157 | return false; |
158 | } |
159 | |
160 | #include "HexagonGenCallingConv.inc" |
161 | |
162 | |
163 | SDValue |
164 | HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) |
165 | const { |
166 | return SDValue(); |
167 | } |
168 | |
169 | /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified |
170 | /// by "Src" to address "Dst" of size "Size". Alignment information is |
171 | /// specified by the specific parameter attribute. The copy will be passed as |
172 | /// a byval function parameter. Sometimes what we are copying is the end of a |
173 | /// larger object, the part that does not fit in registers. |
174 | static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, |
175 | SDValue Chain, ISD::ArgFlagsTy Flags, |
176 | SelectionDAG &DAG, const SDLoc &dl) { |
177 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize(), DL: dl, VT: MVT::i32); |
178 | return DAG.getMemcpy( |
179 | Chain, dl, Dst, Src, Size: SizeNode, Alignment: Flags.getNonZeroByValAlign(), |
180 | /*isVolatile=*/isVol: false, /*AlwaysInline=*/false, |
181 | /*CI=*/nullptr, OverrideTailCall: std::nullopt, DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo()); |
182 | } |
183 | |
184 | bool |
185 | HexagonTargetLowering::CanLowerReturn( |
186 | CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, |
187 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
188 | LLVMContext &Context) const { |
189 | SmallVector<CCValAssign, 16> RVLocs; |
190 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); |
191 | |
192 | if (MF.getSubtarget<HexagonSubtarget>().useHVXOps()) |
193 | return CCInfo.CheckReturn(Outs, Fn: RetCC_Hexagon_HVX); |
194 | return CCInfo.CheckReturn(Outs, Fn: RetCC_Hexagon); |
195 | } |
196 | |
197 | // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is |
198 | // passed by value, the function prototype is modified to return void and |
199 | // the value is stored in memory pointed by a pointer passed by caller. |
200 | SDValue |
201 | HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
202 | bool IsVarArg, |
203 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
204 | const SmallVectorImpl<SDValue> &OutVals, |
205 | const SDLoc &dl, SelectionDAG &DAG) const { |
206 | // CCValAssign - represent the assignment of the return value to locations. |
207 | SmallVector<CCValAssign, 16> RVLocs; |
208 | |
209 | // CCState - Info about the registers and stack slot. |
210 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, |
211 | *DAG.getContext()); |
212 | |
213 | // Analyze return values of ISD::RET |
214 | if (Subtarget.useHVXOps()) |
215 | CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Hexagon_HVX); |
216 | else |
217 | CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Hexagon); |
218 | |
219 | SDValue Glue; |
220 | SmallVector<SDValue, 4> RetOps(1, Chain); |
221 | |
222 | // Copy the result values into the output registers. |
223 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
224 | CCValAssign &VA = RVLocs[i]; |
225 | SDValue Val = OutVals[i]; |
226 | |
227 | switch (VA.getLocInfo()) { |
228 | default: |
229 | // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. |
230 | llvm_unreachable("Unknown loc info!" ); |
231 | case CCValAssign::Full: |
232 | break; |
233 | case CCValAssign::BCvt: |
234 | Val = DAG.getBitcast(VT: VA.getLocVT(), V: Val); |
235 | break; |
236 | case CCValAssign::SExt: |
237 | Val = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val); |
238 | break; |
239 | case CCValAssign::ZExt: |
240 | Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val); |
241 | break; |
242 | case CCValAssign::AExt: |
243 | Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val); |
244 | break; |
245 | } |
246 | |
247 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: Val, Glue); |
248 | |
249 | // Guarantee that all emitted copies are stuck together with flags. |
250 | Glue = Chain.getValue(R: 1); |
251 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
252 | } |
253 | |
254 | RetOps[0] = Chain; // Update chain. |
255 | |
256 | // Add the glue if we have it. |
257 | if (Glue.getNode()) |
258 | RetOps.push_back(Elt: Glue); |
259 | |
260 | return DAG.getNode(Opcode: HexagonISD::RET_GLUE, DL: dl, VT: MVT::Other, Ops: RetOps); |
261 | } |
262 | |
263 | bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
264 | // If either no tail call or told not to tail call at all, don't. |
265 | return CI->isTailCall(); |
266 | } |
267 | |
268 | Register HexagonTargetLowering::getRegisterByName( |
269 | const char* RegName, LLT VT, const MachineFunction &) const { |
270 | // Just support r19, the linux kernel uses it. |
271 | Register Reg = StringSwitch<Register>(RegName) |
272 | .Case(S: "r0" , Value: Hexagon::R0) |
273 | .Case(S: "r1" , Value: Hexagon::R1) |
274 | .Case(S: "r2" , Value: Hexagon::R2) |
275 | .Case(S: "r3" , Value: Hexagon::R3) |
276 | .Case(S: "r4" , Value: Hexagon::R4) |
277 | .Case(S: "r5" , Value: Hexagon::R5) |
278 | .Case(S: "r6" , Value: Hexagon::R6) |
279 | .Case(S: "r7" , Value: Hexagon::R7) |
280 | .Case(S: "r8" , Value: Hexagon::R8) |
281 | .Case(S: "r9" , Value: Hexagon::R9) |
282 | .Case(S: "r10" , Value: Hexagon::R10) |
283 | .Case(S: "r11" , Value: Hexagon::R11) |
284 | .Case(S: "r12" , Value: Hexagon::R12) |
285 | .Case(S: "r13" , Value: Hexagon::R13) |
286 | .Case(S: "r14" , Value: Hexagon::R14) |
287 | .Case(S: "r15" , Value: Hexagon::R15) |
288 | .Case(S: "r16" , Value: Hexagon::R16) |
289 | .Case(S: "r17" , Value: Hexagon::R17) |
290 | .Case(S: "r18" , Value: Hexagon::R18) |
291 | .Case(S: "r19" , Value: Hexagon::R19) |
292 | .Case(S: "r20" , Value: Hexagon::R20) |
293 | .Case(S: "r21" , Value: Hexagon::R21) |
294 | .Case(S: "r22" , Value: Hexagon::R22) |
295 | .Case(S: "r23" , Value: Hexagon::R23) |
296 | .Case(S: "r24" , Value: Hexagon::R24) |
297 | .Case(S: "r25" , Value: Hexagon::R25) |
298 | .Case(S: "r26" , Value: Hexagon::R26) |
299 | .Case(S: "r27" , Value: Hexagon::R27) |
300 | .Case(S: "r28" , Value: Hexagon::R28) |
301 | .Case(S: "r29" , Value: Hexagon::R29) |
302 | .Case(S: "r30" , Value: Hexagon::R30) |
303 | .Case(S: "r31" , Value: Hexagon::R31) |
304 | .Case(S: "r1:0" , Value: Hexagon::D0) |
305 | .Case(S: "r3:2" , Value: Hexagon::D1) |
306 | .Case(S: "r5:4" , Value: Hexagon::D2) |
307 | .Case(S: "r7:6" , Value: Hexagon::D3) |
308 | .Case(S: "r9:8" , Value: Hexagon::D4) |
309 | .Case(S: "r11:10" , Value: Hexagon::D5) |
310 | .Case(S: "r13:12" , Value: Hexagon::D6) |
311 | .Case(S: "r15:14" , Value: Hexagon::D7) |
312 | .Case(S: "r17:16" , Value: Hexagon::D8) |
313 | .Case(S: "r19:18" , Value: Hexagon::D9) |
314 | .Case(S: "r21:20" , Value: Hexagon::D10) |
315 | .Case(S: "r23:22" , Value: Hexagon::D11) |
316 | .Case(S: "r25:24" , Value: Hexagon::D12) |
317 | .Case(S: "r27:26" , Value: Hexagon::D13) |
318 | .Case(S: "r29:28" , Value: Hexagon::D14) |
319 | .Case(S: "r31:30" , Value: Hexagon::D15) |
320 | .Case(S: "sp" , Value: Hexagon::R29) |
321 | .Case(S: "fp" , Value: Hexagon::R30) |
322 | .Case(S: "lr" , Value: Hexagon::R31) |
323 | .Case(S: "p0" , Value: Hexagon::P0) |
324 | .Case(S: "p1" , Value: Hexagon::P1) |
325 | .Case(S: "p2" , Value: Hexagon::P2) |
326 | .Case(S: "p3" , Value: Hexagon::P3) |
327 | .Case(S: "sa0" , Value: Hexagon::SA0) |
328 | .Case(S: "lc0" , Value: Hexagon::LC0) |
329 | .Case(S: "sa1" , Value: Hexagon::SA1) |
330 | .Case(S: "lc1" , Value: Hexagon::LC1) |
331 | .Case(S: "m0" , Value: Hexagon::M0) |
332 | .Case(S: "m1" , Value: Hexagon::M1) |
333 | .Case(S: "usr" , Value: Hexagon::USR) |
334 | .Case(S: "ugp" , Value: Hexagon::UGP) |
335 | .Case(S: "cs0" , Value: Hexagon::CS0) |
336 | .Case(S: "cs1" , Value: Hexagon::CS1) |
337 | .Default(Value: Register()); |
338 | if (Reg) |
339 | return Reg; |
340 | |
341 | report_fatal_error(reason: "Invalid register name global variable" ); |
342 | } |
343 | |
344 | /// LowerCallResult - Lower the result values of an ISD::CALL into the |
345 | /// appropriate copies out of appropriate physical registers. This assumes that |
346 | /// Chain/Glue are the input chain/glue to use, and that TheCall is the call |
347 | /// being lowered. Returns a SDNode with the same number of values as the |
348 | /// ISD::CALL. |
349 | SDValue HexagonTargetLowering::LowerCallResult( |
350 | SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg, |
351 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
352 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, |
353 | const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const { |
354 | // Assign locations to each value returned by this call. |
355 | SmallVector<CCValAssign, 16> RVLocs; |
356 | |
357 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, |
358 | *DAG.getContext()); |
359 | |
360 | if (Subtarget.useHVXOps()) |
361 | CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_Hexagon_HVX); |
362 | else |
363 | CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_Hexagon); |
364 | |
365 | // Copy all of the result registers out of their specified physreg. |
366 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
367 | SDValue RetVal; |
368 | if (RVLocs[i].getValVT() == MVT::i1) { |
369 | // Return values of type MVT::i1 require special handling. The reason |
370 | // is that MVT::i1 is associated with the PredRegs register class, but |
371 | // values of that type are still returned in R0. Generate an explicit |
372 | // copy into a predicate register from R0, and treat the value of the |
373 | // predicate register as the call result. |
374 | auto &MRI = DAG.getMachineFunction().getRegInfo(); |
375 | SDValue FR0 = DAG.getCopyFromReg(Chain, dl, Reg: RVLocs[i].getLocReg(), |
376 | VT: MVT::i32, Glue); |
377 | // FR0 = (Value, Chain, Glue) |
378 | Register PredR = MRI.createVirtualRegister(RegClass: &Hexagon::PredRegsRegClass); |
379 | SDValue TPR = DAG.getCopyToReg(Chain: FR0.getValue(R: 1), dl, Reg: PredR, |
380 | N: FR0.getValue(R: 0), Glue: FR0.getValue(R: 2)); |
381 | // TPR = (Chain, Glue) |
382 | // Don't glue this CopyFromReg, because it copies from a virtual |
383 | // register. If it is glued to the call, InstrEmitter will add it |
384 | // as an implicit def to the call (EmitMachineNode). |
385 | RetVal = DAG.getCopyFromReg(Chain: TPR.getValue(R: 0), dl, Reg: PredR, VT: MVT::i1); |
386 | Glue = TPR.getValue(R: 1); |
387 | Chain = TPR.getValue(R: 0); |
388 | } else { |
389 | RetVal = DAG.getCopyFromReg(Chain, dl, Reg: RVLocs[i].getLocReg(), |
390 | VT: RVLocs[i].getValVT(), Glue); |
391 | Glue = RetVal.getValue(R: 2); |
392 | Chain = RetVal.getValue(R: 1); |
393 | } |
394 | InVals.push_back(Elt: RetVal.getValue(R: 0)); |
395 | } |
396 | |
397 | return Chain; |
398 | } |
399 | |
400 | /// LowerCall - Functions arguments are copied from virtual regs to |
401 | /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. |
402 | SDValue |
403 | HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
404 | SmallVectorImpl<SDValue> &InVals) const { |
405 | SelectionDAG &DAG = CLI.DAG; |
406 | SDLoc &dl = CLI.DL; |
407 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
408 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
409 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
410 | SDValue Chain = CLI.Chain; |
411 | SDValue Callee = CLI.Callee; |
412 | CallingConv::ID CallConv = CLI.CallConv; |
413 | bool IsVarArg = CLI.IsVarArg; |
414 | bool DoesNotReturn = CLI.DoesNotReturn; |
415 | |
416 | bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); |
417 | MachineFunction &MF = DAG.getMachineFunction(); |
418 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
419 | auto PtrVT = getPointerTy(DL: MF.getDataLayout()); |
420 | |
421 | unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0; |
422 | if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) |
423 | Callee = DAG.getTargetGlobalAddress(GV: GAN->getGlobal(), DL: dl, VT: MVT::i32); |
424 | |
425 | // Linux ABI treats var-arg calls the same way as regular ones. |
426 | bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; |
427 | |
428 | // Analyze operands of the call, assigning locations to each operand. |
429 | SmallVector<CCValAssign, 16> ArgLocs; |
430 | HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(), |
431 | NumParams); |
432 | |
433 | if (Subtarget.useHVXOps()) |
434 | CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon_HVX); |
435 | else if (DisableArgsMinAlignment) |
436 | CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon_Legacy); |
437 | else |
438 | CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon); |
439 | |
440 | if (CLI.IsTailCall) { |
441 | bool StructAttrFlag = MF.getFunction().hasStructRetAttr(); |
442 | CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CalleeCC: CallConv, |
443 | isVarArg: IsVarArg, isCalleeStructRet: IsStructRet, isCallerStructRet: StructAttrFlag, Outs, |
444 | OutVals, Ins, DAG); |
445 | for (const CCValAssign &VA : ArgLocs) { |
446 | if (VA.isMemLoc()) { |
447 | CLI.IsTailCall = false; |
448 | break; |
449 | } |
450 | } |
451 | LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n" |
452 | : "Argument must be passed on stack. " |
453 | "Not eligible for Tail Call\n" )); |
454 | } |
455 | // Get a count of how many bytes are to be pushed on the stack. |
456 | unsigned NumBytes = CCInfo.getStackSize(); |
457 | SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; |
458 | SmallVector<SDValue, 8> MemOpChains; |
459 | |
460 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
461 | SDValue StackPtr = |
462 | DAG.getCopyFromReg(Chain, dl, Reg: HRI.getStackRegister(), VT: PtrVT); |
463 | |
464 | bool NeedsArgAlign = false; |
465 | Align LargestAlignSeen; |
466 | // Walk the register/memloc assignments, inserting copies/loads. |
467 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
468 | CCValAssign &VA = ArgLocs[i]; |
469 | SDValue Arg = OutVals[i]; |
470 | ISD::ArgFlagsTy Flags = Outs[i].Flags; |
471 | // Record if we need > 8 byte alignment on an argument. |
472 | bool ArgAlign = Subtarget.isHVXVectorType(VecTy: VA.getValVT()); |
473 | NeedsArgAlign |= ArgAlign; |
474 | |
475 | // Promote the value if needed. |
476 | switch (VA.getLocInfo()) { |
477 | default: |
478 | // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. |
479 | llvm_unreachable("Unknown loc info!" ); |
480 | case CCValAssign::Full: |
481 | break; |
482 | case CCValAssign::BCvt: |
483 | Arg = DAG.getBitcast(VT: VA.getLocVT(), V: Arg); |
484 | break; |
485 | case CCValAssign::SExt: |
486 | Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
487 | break; |
488 | case CCValAssign::ZExt: |
489 | Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
490 | break; |
491 | case CCValAssign::AExt: |
492 | Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
493 | break; |
494 | } |
495 | |
496 | if (VA.isMemLoc()) { |
497 | unsigned LocMemOffset = VA.getLocMemOffset(); |
498 | SDValue MemAddr = DAG.getConstant(Val: LocMemOffset, DL: dl, |
499 | VT: StackPtr.getValueType()); |
500 | MemAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: StackPtr, N2: MemAddr); |
501 | if (ArgAlign) |
502 | LargestAlignSeen = std::max( |
503 | a: LargestAlignSeen, b: Align(VA.getLocVT().getStoreSizeInBits() / 8)); |
504 | if (Flags.isByVal()) { |
505 | // The argument is a struct passed by value. According to LLVM, "Arg" |
506 | // is a pointer. |
507 | MemOpChains.push_back(Elt: CreateCopyOfByValArgument(Src: Arg, Dst: MemAddr, Chain, |
508 | Flags, DAG, dl)); |
509 | } else { |
510 | MachinePointerInfo LocPI = MachinePointerInfo::getStack( |
511 | MF&: DAG.getMachineFunction(), Offset: LocMemOffset); |
512 | SDValue S = DAG.getStore(Chain, dl, Val: Arg, Ptr: MemAddr, PtrInfo: LocPI); |
513 | MemOpChains.push_back(Elt: S); |
514 | } |
515 | continue; |
516 | } |
517 | |
518 | // Arguments that can be passed on register must be kept at RegsToPass |
519 | // vector. |
520 | if (VA.isRegLoc()) |
521 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg)); |
522 | } |
523 | |
524 | if (NeedsArgAlign && Subtarget.hasV60Ops()) { |
525 | LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n" ); |
526 | Align VecAlign = HRI.getSpillAlign(RC: Hexagon::HvxVRRegClass); |
527 | LargestAlignSeen = std::max(a: LargestAlignSeen, b: VecAlign); |
528 | MFI.ensureMaxAlignment(Alignment: LargestAlignSeen); |
529 | } |
530 | // Transform all store nodes into one single node because all store |
531 | // nodes are independent of each other. |
532 | if (!MemOpChains.empty()) |
533 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains); |
534 | |
535 | SDValue Glue; |
536 | if (!CLI.IsTailCall) { |
537 | Chain = DAG.getCALLSEQ_START(Chain, InSize: NumBytes, OutSize: 0, DL: dl); |
538 | Glue = Chain.getValue(R: 1); |
539 | } |
540 | |
541 | // Build a sequence of copy-to-reg nodes chained together with token |
542 | // chain and flag operands which copy the outgoing args into registers. |
543 | // The Glue is necessary since all emitted instructions must be |
544 | // stuck together. |
545 | if (!CLI.IsTailCall) { |
546 | for (const auto &R : RegsToPass) { |
547 | Chain = DAG.getCopyToReg(Chain, dl, Reg: R.first, N: R.second, Glue); |
548 | Glue = Chain.getValue(R: 1); |
549 | } |
550 | } else { |
551 | // For tail calls lower the arguments to the 'real' stack slot. |
552 | // |
553 | // Force all the incoming stack arguments to be loaded from the stack |
554 | // before any new outgoing arguments are stored to the stack, because the |
555 | // outgoing stack slots may alias the incoming argument stack slots, and |
556 | // the alias isn't otherwise explicit. This is slightly more conservative |
557 | // than necessary, because it means that each store effectively depends |
558 | // on every argument instead of just those arguments it would clobber. |
559 | // |
560 | // Do not flag preceding copytoreg stuff together with the following stuff. |
561 | Glue = SDValue(); |
562 | for (const auto &R : RegsToPass) { |
563 | Chain = DAG.getCopyToReg(Chain, dl, Reg: R.first, N: R.second, Glue); |
564 | Glue = Chain.getValue(R: 1); |
565 | } |
566 | Glue = SDValue(); |
567 | } |
568 | |
569 | bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls(); |
570 | unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0; |
571 | |
572 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
573 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
574 | // node so that legalize doesn't hack it. |
575 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) { |
576 | Callee = DAG.getTargetGlobalAddress(GV: G->getGlobal(), DL: dl, VT: PtrVT, offset: 0, TargetFlags: Flags); |
577 | } else if (ExternalSymbolSDNode *S = |
578 | dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
579 | Callee = DAG.getTargetExternalSymbol(Sym: S->getSymbol(), VT: PtrVT, TargetFlags: Flags); |
580 | } |
581 | |
582 | // Returns a chain & a flag for retval copy to use. |
583 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
584 | SmallVector<SDValue, 8> Ops; |
585 | Ops.push_back(Elt: Chain); |
586 | Ops.push_back(Elt: Callee); |
587 | |
588 | // Add argument registers to the end of the list so that they are |
589 | // known live into the call. |
590 | for (const auto &R : RegsToPass) |
591 | Ops.push_back(Elt: DAG.getRegister(Reg: R.first, VT: R.second.getValueType())); |
592 | |
593 | const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv); |
594 | assert(Mask && "Missing call preserved mask for calling convention" ); |
595 | Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask)); |
596 | |
597 | if (Glue.getNode()) |
598 | Ops.push_back(Elt: Glue); |
599 | |
600 | if (CLI.IsTailCall) { |
601 | MFI.setHasTailCall(); |
602 | return DAG.getNode(Opcode: HexagonISD::TC_RETURN, DL: dl, VTList: NodeTys, Ops); |
603 | } |
604 | |
605 | // Set this here because we need to know this for "hasFP" in frame lowering. |
606 | // The target-independent code calls getFrameRegister before setting it, and |
607 | // getFrameRegister uses hasFP to determine whether the function has FP. |
608 | MFI.setHasCalls(true); |
609 | |
610 | unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL; |
611 | Chain = DAG.getNode(Opcode: OpCode, DL: dl, VTList: NodeTys, Ops); |
612 | Glue = Chain.getValue(R: 1); |
613 | |
614 | // Create the CALLSEQ_END node. |
615 | Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: 0, Glue, DL: dl); |
616 | Glue = Chain.getValue(R: 1); |
617 | |
618 | // Handle result values, copying them out of physregs into vregs that we |
619 | // return. |
620 | return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG, |
621 | InVals, OutVals, Callee); |
622 | } |
623 | |
624 | /// Returns true by value, base pointer and offset pointer and addressing |
625 | /// mode by reference if this node can be combined with a load / store to |
626 | /// form a post-indexed load / store. |
627 | bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
628 | SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, |
629 | SelectionDAG &DAG) const { |
630 | LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Val: N); |
631 | if (!LSN) |
632 | return false; |
633 | EVT VT = LSN->getMemoryVT(); |
634 | if (!VT.isSimple()) |
635 | return false; |
636 | bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || |
637 | VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 || |
638 | VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 || |
639 | VT == MVT::v4i16 || VT == MVT::v8i8 || |
640 | Subtarget.isHVXVectorType(VecTy: VT.getSimpleVT()); |
641 | if (!IsLegalType) |
642 | return false; |
643 | |
644 | if (Op->getOpcode() != ISD::ADD) |
645 | return false; |
646 | Base = Op->getOperand(Num: 0); |
647 | Offset = Op->getOperand(Num: 1); |
648 | if (!isa<ConstantSDNode>(Val: Offset.getNode())) |
649 | return false; |
650 | AM = ISD::POST_INC; |
651 | |
652 | int32_t V = cast<ConstantSDNode>(Val: Offset.getNode())->getSExtValue(); |
653 | return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, Offset: V); |
654 | } |
655 | |
656 | SDValue HexagonTargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
657 | if (DAG.getMachineFunction().getFunction().hasOptSize()) |
658 | return SDValue(); |
659 | else |
660 | return Op; |
661 | } |
662 | |
663 | SDValue |
664 | HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { |
665 | MachineFunction &MF = DAG.getMachineFunction(); |
666 | auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); |
667 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
668 | unsigned LR = HRI.getRARegister(); |
669 | |
670 | if ((Op.getOpcode() != ISD::INLINEASM && |
671 | Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR()) |
672 | return Op; |
673 | |
674 | unsigned NumOps = Op.getNumOperands(); |
675 | if (Op.getOperand(i: NumOps-1).getValueType() == MVT::Glue) |
676 | --NumOps; // Ignore the flag operand. |
677 | |
678 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { |
679 | const InlineAsm::Flag Flags(Op.getConstantOperandVal(i)); |
680 | unsigned NumVals = Flags.getNumOperandRegisters(); |
681 | ++i; // Skip the ID value. |
682 | |
683 | switch (Flags.getKind()) { |
684 | default: |
685 | llvm_unreachable("Bad flags!" ); |
686 | case InlineAsm::Kind::RegUse: |
687 | case InlineAsm::Kind::Imm: |
688 | case InlineAsm::Kind::Mem: |
689 | i += NumVals; |
690 | break; |
691 | case InlineAsm::Kind::Clobber: |
692 | case InlineAsm::Kind::RegDef: |
693 | case InlineAsm::Kind::RegDefEarlyClobber: { |
694 | for (; NumVals; --NumVals, ++i) { |
695 | Register Reg = cast<RegisterSDNode>(Val: Op.getOperand(i))->getReg(); |
696 | if (Reg != LR) |
697 | continue; |
698 | HMFI.setHasClobberLR(true); |
699 | return Op; |
700 | } |
701 | break; |
702 | } |
703 | } |
704 | } |
705 | |
706 | return Op; |
707 | } |
708 | |
709 | // Need to transform ISD::PREFETCH into something that doesn't inherit |
710 | // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and |
711 | // SDNPMayStore. |
712 | SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op, |
713 | SelectionDAG &DAG) const { |
714 | SDValue Chain = Op.getOperand(i: 0); |
715 | SDValue Addr = Op.getOperand(i: 1); |
716 | // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in, |
717 | // if the "reg" is fed by an "add". |
718 | SDLoc DL(Op); |
719 | SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
720 | return DAG.getNode(Opcode: HexagonISD::DCFETCH, DL, VT: MVT::Other, N1: Chain, N2: Addr, N3: Zero); |
721 | } |
722 | |
723 | // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode |
724 | // is marked as having side-effects, while the register read on Hexagon does |
725 | // not have any. TableGen refuses to accept the direct pattern from that node |
726 | // to the A4_tfrcpp. |
727 | SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op, |
728 | SelectionDAG &DAG) const { |
729 | SDValue Chain = Op.getOperand(i: 0); |
730 | SDLoc dl(Op); |
731 | SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other); |
732 | return DAG.getNode(Opcode: HexagonISD::READCYCLE, DL: dl, VTList: VTs, N: Chain); |
733 | } |
734 | |
735 | // Custom-handle ISD::READSTEADYCOUNTER because the target-independent SDNode |
736 | // is marked as having side-effects, while the register read on Hexagon does |
737 | // not have any. TableGen refuses to accept the direct pattern from that node |
738 | // to the A4_tfrcpp. |
739 | SDValue HexagonTargetLowering::LowerREADSTEADYCOUNTER(SDValue Op, |
740 | SelectionDAG &DAG) const { |
741 | SDValue Chain = Op.getOperand(i: 0); |
742 | SDLoc dl(Op); |
743 | SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other); |
744 | return DAG.getNode(Opcode: HexagonISD::READTIMER, DL: dl, VTList: VTs, N: Chain); |
745 | } |
746 | |
747 | SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
748 | SelectionDAG &DAG) const { |
749 | SDValue Chain = Op.getOperand(i: 0); |
750 | unsigned IntNo = Op.getConstantOperandVal(i: 1); |
751 | // Lower the hexagon_prefetch builtin to DCFETCH, as above. |
752 | if (IntNo == Intrinsic::hexagon_prefetch) { |
753 | SDValue Addr = Op.getOperand(i: 2); |
754 | SDLoc DL(Op); |
755 | SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
756 | return DAG.getNode(Opcode: HexagonISD::DCFETCH, DL, VT: MVT::Other, N1: Chain, N2: Addr, N3: Zero); |
757 | } |
758 | return SDValue(); |
759 | } |
760 | |
761 | SDValue |
762 | HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, |
763 | SelectionDAG &DAG) const { |
764 | SDValue Chain = Op.getOperand(i: 0); |
765 | SDValue Size = Op.getOperand(i: 1); |
766 | SDValue Align = Op.getOperand(i: 2); |
767 | SDLoc dl(Op); |
768 | |
769 | ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Val&: Align); |
770 | assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC" ); |
771 | |
772 | unsigned A = AlignConst->getSExtValue(); |
773 | auto &HFI = *Subtarget.getFrameLowering(); |
774 | // "Zero" means natural stack alignment. |
775 | if (A == 0) |
776 | A = HFI.getStackAlign().value(); |
777 | |
778 | LLVM_DEBUG({ |
779 | dbgs () << __func__ << " Align: " << A << " Size: " ; |
780 | Size.getNode()->dump(&DAG); |
781 | dbgs() << "\n" ; |
782 | }); |
783 | |
784 | SDValue AC = DAG.getConstant(Val: A, DL: dl, VT: MVT::i32); |
785 | SDVTList VTs = DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other); |
786 | SDValue AA = DAG.getNode(Opcode: HexagonISD::ALLOCA, DL: dl, VTList: VTs, N1: Chain, N2: Size, N3: AC); |
787 | |
788 | DAG.ReplaceAllUsesOfValueWith(From: Op, To: AA); |
789 | return AA; |
790 | } |
791 | |
792 | SDValue HexagonTargetLowering::LowerFormalArguments( |
793 | SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, |
794 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
795 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
796 | MachineFunction &MF = DAG.getMachineFunction(); |
797 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
798 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
799 | |
800 | // Linux ABI treats var-arg calls the same way as regular ones. |
801 | bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; |
802 | |
803 | // Assign locations to all of the incoming arguments. |
804 | SmallVector<CCValAssign, 16> ArgLocs; |
805 | HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, |
806 | *DAG.getContext(), |
807 | MF.getFunction().getFunctionType()->getNumParams()); |
808 | |
809 | if (Subtarget.useHVXOps()) |
810 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon_HVX); |
811 | else if (DisableArgsMinAlignment) |
812 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon_Legacy); |
813 | else |
814 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon); |
815 | |
816 | // For LLVM, in the case when returning a struct by value (>8byte), |
817 | // the first argument is a pointer that points to the location on caller's |
818 | // stack where the return value will be stored. For Hexagon, the location on |
819 | // caller's stack is passed only when the struct size is smaller than (and |
820 | // equal to) 8 bytes. If not, no address will be passed into callee and |
821 | // callee return the result direclty through R0/R1. |
822 | auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) { |
823 | switch (RC.getID()) { |
824 | case Hexagon::IntRegsRegClassID: |
825 | return Reg - Hexagon::R0 + 1; |
826 | case Hexagon::DoubleRegsRegClassID: |
827 | return (Reg - Hexagon::D0 + 1) * 2; |
828 | case Hexagon::HvxVRRegClassID: |
829 | return Reg - Hexagon::V0 + 1; |
830 | case Hexagon::HvxWRRegClassID: |
831 | return (Reg - Hexagon::W0 + 1) * 2; |
832 | } |
833 | llvm_unreachable("Unexpected register class" ); |
834 | }; |
835 | |
836 | auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering()); |
837 | auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); |
838 | HFL.FirstVarArgSavedReg = 0; |
839 | HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); |
840 | |
841 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
842 | CCValAssign &VA = ArgLocs[i]; |
843 | ISD::ArgFlagsTy Flags = Ins[i].Flags; |
844 | bool ByVal = Flags.isByVal(); |
845 | |
846 | // Arguments passed in registers: |
847 | // 1. 32- and 64-bit values and HVX vectors are passed directly, |
848 | // 2. Large structs are passed via an address, and the address is |
849 | // passed in a register. |
850 | if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8) |
851 | llvm_unreachable("ByValSize must be bigger than 8 bytes" ); |
852 | |
853 | bool InReg = VA.isRegLoc() && |
854 | (!ByVal || (ByVal && Flags.getByValSize() > 8)); |
855 | |
856 | if (InReg) { |
857 | MVT RegVT = VA.getLocVT(); |
858 | if (VA.getLocInfo() == CCValAssign::BCvt) |
859 | RegVT = VA.getValVT(); |
860 | |
861 | const TargetRegisterClass *RC = getRegClassFor(VT: RegVT); |
862 | Register VReg = MRI.createVirtualRegister(RegClass: RC); |
863 | SDValue Copy = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: RegVT); |
864 | |
865 | // Treat values of type MVT::i1 specially: they are passed in |
866 | // registers of type i32, but they need to remain as values of |
867 | // type i1 for consistency of the argument lowering. |
868 | if (VA.getValVT() == MVT::i1) { |
869 | assert(RegVT.getSizeInBits() <= 32); |
870 | SDValue T = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: RegVT, |
871 | N1: Copy, N2: DAG.getConstant(Val: 1, DL: dl, VT: RegVT)); |
872 | Copy = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: T, RHS: DAG.getConstant(Val: 0, DL: dl, VT: RegVT), |
873 | Cond: ISD::SETNE); |
874 | } else { |
875 | #ifndef NDEBUG |
876 | unsigned RegSize = RegVT.getSizeInBits(); |
877 | assert(RegSize == 32 || RegSize == 64 || |
878 | Subtarget.isHVXVectorType(RegVT)); |
879 | #endif |
880 | } |
881 | InVals.push_back(Elt: Copy); |
882 | MRI.addLiveIn(Reg: VA.getLocReg(), vreg: VReg); |
883 | HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg()); |
884 | } else { |
885 | assert(VA.isMemLoc() && "Argument should be passed in memory" ); |
886 | |
887 | // If it's a byval parameter, then we need to compute the |
888 | // "real" size, not the size of the pointer. |
889 | unsigned ObjSize = Flags.isByVal() |
890 | ? Flags.getByValSize() |
891 | : VA.getLocVT().getStoreSizeInBits() / 8; |
892 | |
893 | // Create the frame index object for this incoming parameter. |
894 | int Offset = HEXAGON_LRFP_SIZE + VA.getLocMemOffset(); |
895 | int FI = MFI.CreateFixedObject(Size: ObjSize, SPOffset: Offset, IsImmutable: true); |
896 | SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32); |
897 | |
898 | if (Flags.isByVal()) { |
899 | // If it's a pass-by-value aggregate, then do not dereference the stack |
900 | // location. Instead, we should generate a reference to the stack |
901 | // location. |
902 | InVals.push_back(Elt: FIN); |
903 | } else { |
904 | SDValue L = DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIN, |
905 | PtrInfo: MachinePointerInfo::getFixedStack(MF, FI, Offset: 0)); |
906 | InVals.push_back(Elt: L); |
907 | } |
908 | } |
909 | } |
910 | |
911 | if (IsVarArg && Subtarget.isEnvironmentMusl()) { |
912 | for (int i = HFL.FirstVarArgSavedReg; i < 6; i++) |
913 | MRI.addLiveIn(Reg: Hexagon::R0+i); |
914 | } |
915 | |
916 | if (IsVarArg && Subtarget.isEnvironmentMusl()) { |
917 | HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1); |
918 | HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); |
919 | |
920 | // Create Frame index for the start of register saved area. |
921 | int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg; |
922 | bool RequiresPadding = (NumVarArgRegs & 1); |
923 | int RegSaveAreaSizePlusPadding = RequiresPadding |
924 | ? (NumVarArgRegs + 1) * 4 |
925 | : NumVarArgRegs * 4; |
926 | |
927 | if (RegSaveAreaSizePlusPadding > 0) { |
928 | // The offset to saved register area should be 8 byte aligned. |
929 | int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); |
930 | if (!(RegAreaStart % 8)) |
931 | RegAreaStart = (RegAreaStart + 7) & -8; |
932 | |
933 | int RegSaveAreaFrameIndex = |
934 | MFI.CreateFixedObject(Size: RegSaveAreaSizePlusPadding, SPOffset: RegAreaStart, IsImmutable: true); |
935 | HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex); |
936 | |
937 | // This will point to the next argument passed via stack. |
938 | int Offset = RegAreaStart + RegSaveAreaSizePlusPadding; |
939 | int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true); |
940 | HMFI.setVarArgsFrameIndex(FI); |
941 | } else { |
942 | // This will point to the next argument passed via stack, when |
943 | // there is no saved register area. |
944 | int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); |
945 | int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true); |
946 | HMFI.setRegSavedAreaStartFrameIndex(FI); |
947 | HMFI.setVarArgsFrameIndex(FI); |
948 | } |
949 | } |
950 | |
951 | |
952 | if (IsVarArg && !Subtarget.isEnvironmentMusl()) { |
953 | // This will point to the next argument passed via stack. |
954 | int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); |
955 | int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true); |
956 | HMFI.setVarArgsFrameIndex(FI); |
957 | } |
958 | |
959 | return Chain; |
960 | } |
961 | |
962 | SDValue |
963 | HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { |
964 | // VASTART stores the address of the VarArgsFrameIndex slot into the |
965 | // memory location argument. |
966 | MachineFunction &MF = DAG.getMachineFunction(); |
967 | HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>(); |
968 | SDValue Addr = DAG.getFrameIndex(FI: QFI->getVarArgsFrameIndex(), VT: MVT::i32); |
969 | const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue(); |
970 | |
971 | if (!Subtarget.isEnvironmentMusl()) { |
972 | return DAG.getStore(Chain: Op.getOperand(i: 0), dl: SDLoc(Op), Val: Addr, Ptr: Op.getOperand(i: 1), |
973 | PtrInfo: MachinePointerInfo(SV)); |
974 | } |
975 | auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>(); |
976 | auto &HFL = *Subtarget.getFrameLowering(); |
977 | SDLoc DL(Op); |
978 | SmallVector<SDValue, 8> MemOps; |
979 | |
980 | // Get frame index of va_list. |
981 | SDValue FIN = Op.getOperand(i: 1); |
982 | |
983 | // If first Vararg register is odd, add 4 bytes to start of |
984 | // saved register area to point to the first register location. |
985 | // This is because the saved register area has to be 8 byte aligned. |
986 | // Incase of an odd start register, there will be 4 bytes of padding in |
987 | // the beginning of saved register area. If all registers area used up, |
988 | // the following condition will handle it correctly. |
989 | SDValue SavedRegAreaStartFrameIndex = |
990 | DAG.getFrameIndex(FI: FuncInfo.getRegSavedAreaStartFrameIndex(), VT: MVT::i32); |
991 | |
992 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
993 | |
994 | if (HFL.FirstVarArgSavedReg & 1) |
995 | SavedRegAreaStartFrameIndex = |
996 | DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, |
997 | N1: DAG.getFrameIndex(FI: FuncInfo.getRegSavedAreaStartFrameIndex(), |
998 | VT: MVT::i32), |
999 | N2: DAG.getIntPtrConstant(Val: 4, DL)); |
1000 | |
1001 | // Store the saved register area start pointer. |
1002 | SDValue Store = |
1003 | DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, |
1004 | Val: SavedRegAreaStartFrameIndex, |
1005 | Ptr: FIN, PtrInfo: MachinePointerInfo(SV)); |
1006 | MemOps.push_back(Elt: Store); |
1007 | |
1008 | // Store saved register area end pointer. |
1009 | FIN = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, |
1010 | N1: FIN, N2: DAG.getIntPtrConstant(Val: 4, DL)); |
1011 | Store = DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, |
1012 | Val: DAG.getFrameIndex(FI: FuncInfo.getVarArgsFrameIndex(), |
1013 | VT: PtrVT), |
1014 | Ptr: FIN, PtrInfo: MachinePointerInfo(SV, 4)); |
1015 | MemOps.push_back(Elt: Store); |
1016 | |
1017 | // Store overflow area pointer. |
1018 | FIN = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, |
1019 | N1: FIN, N2: DAG.getIntPtrConstant(Val: 4, DL)); |
1020 | Store = DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, |
1021 | Val: DAG.getFrameIndex(FI: FuncInfo.getVarArgsFrameIndex(), |
1022 | VT: PtrVT), |
1023 | Ptr: FIN, PtrInfo: MachinePointerInfo(SV, 8)); |
1024 | MemOps.push_back(Elt: Store); |
1025 | |
1026 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: MemOps); |
1027 | } |
1028 | |
1029 | SDValue |
1030 | HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { |
1031 | // Assert that the linux ABI is enabled for the current compilation. |
1032 | assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled" ); |
1033 | SDValue Chain = Op.getOperand(i: 0); |
1034 | SDValue DestPtr = Op.getOperand(i: 1); |
1035 | SDValue SrcPtr = Op.getOperand(i: 2); |
1036 | const Value *DestSV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 3))->getValue(); |
1037 | const Value *SrcSV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 4))->getValue(); |
1038 | SDLoc DL(Op); |
1039 | // Size of the va_list is 12 bytes as it has 3 pointers. Therefore, |
1040 | // we need to memcopy 12 bytes from va_list to another similar list. |
1041 | return DAG.getMemcpy( |
1042 | Chain, dl: DL, Dst: DestPtr, Src: SrcPtr, Size: DAG.getIntPtrConstant(Val: 12, DL), Alignment: Align(4), |
1043 | /*isVolatile*/ isVol: false, AlwaysInline: false, /*CI=*/nullptr, OverrideTailCall: std::nullopt, |
1044 | DstPtrInfo: MachinePointerInfo(DestSV), SrcPtrInfo: MachinePointerInfo(SrcSV)); |
1045 | } |
1046 | |
1047 | SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { |
1048 | const SDLoc &dl(Op); |
1049 | SDValue LHS = Op.getOperand(i: 0); |
1050 | SDValue RHS = Op.getOperand(i: 1); |
1051 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get(); |
1052 | MVT ResTy = ty(Op); |
1053 | MVT OpTy = ty(Op: LHS); |
1054 | |
1055 | if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { |
1056 | MVT ElemTy = OpTy.getVectorElementType(); |
1057 | assert(ElemTy.isScalarInteger()); |
1058 | MVT WideTy = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 2*ElemTy.getSizeInBits()), |
1059 | NumElements: OpTy.getVectorNumElements()); |
1060 | return DAG.getSetCC(DL: dl, VT: ResTy, |
1061 | LHS: DAG.getSExtOrTrunc(Op: LHS, DL: SDLoc(LHS), VT: WideTy), |
1062 | RHS: DAG.getSExtOrTrunc(Op: RHS, DL: SDLoc(RHS), VT: WideTy), Cond: CC); |
1063 | } |
1064 | |
1065 | // Treat all other vector types as legal. |
1066 | if (ResTy.isVector()) |
1067 | return Op; |
1068 | |
1069 | // Comparisons of short integers should use sign-extend, not zero-extend, |
1070 | // since we can represent small negative values in the compare instructions. |
1071 | // The LLVM default is to use zero-extend arbitrarily in these cases. |
1072 | auto isSExtFree = [this](SDValue N) { |
1073 | switch (N.getOpcode()) { |
1074 | case ISD::TRUNCATE: { |
1075 | // A sign-extend of a truncate of a sign-extend is free. |
1076 | SDValue Op = N.getOperand(i: 0); |
1077 | if (Op.getOpcode() != ISD::AssertSext) |
1078 | return false; |
1079 | EVT OrigTy = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT(); |
1080 | unsigned ThisBW = ty(Op: N).getSizeInBits(); |
1081 | unsigned OrigBW = OrigTy.getSizeInBits(); |
1082 | // The type that was sign-extended to get the AssertSext must be |
1083 | // narrower than the type of N (so that N has still the same value |
1084 | // as the original). |
1085 | return ThisBW >= OrigBW; |
1086 | } |
1087 | case ISD::LOAD: |
1088 | // We have sign-extended loads. |
1089 | return true; |
1090 | } |
1091 | return false; |
1092 | }; |
1093 | |
1094 | if (OpTy == MVT::i8 || OpTy == MVT::i16) { |
1095 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS); |
1096 | bool IsNegative = C && C->getAPIntValue().isNegative(); |
1097 | if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS)) |
1098 | return DAG.getSetCC(DL: dl, VT: ResTy, |
1099 | LHS: DAG.getSExtOrTrunc(Op: LHS, DL: SDLoc(LHS), VT: MVT::i32), |
1100 | RHS: DAG.getSExtOrTrunc(Op: RHS, DL: SDLoc(RHS), VT: MVT::i32), Cond: CC); |
1101 | } |
1102 | |
1103 | return SDValue(); |
1104 | } |
1105 | |
1106 | SDValue |
1107 | HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const { |
1108 | SDValue PredOp = Op.getOperand(i: 0); |
1109 | SDValue Op1 = Op.getOperand(i: 1), Op2 = Op.getOperand(i: 2); |
1110 | MVT OpTy = ty(Op: Op1); |
1111 | const SDLoc &dl(Op); |
1112 | |
1113 | if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { |
1114 | MVT ElemTy = OpTy.getVectorElementType(); |
1115 | assert(ElemTy.isScalarInteger()); |
1116 | MVT WideTy = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 2*ElemTy.getSizeInBits()), |
1117 | NumElements: OpTy.getVectorNumElements()); |
1118 | // Generate (trunc (select (_, sext, sext))). |
1119 | return DAG.getSExtOrTrunc( |
1120 | Op: DAG.getSelect(DL: dl, VT: WideTy, Cond: PredOp, |
1121 | LHS: DAG.getSExtOrTrunc(Op: Op1, DL: dl, VT: WideTy), |
1122 | RHS: DAG.getSExtOrTrunc(Op: Op2, DL: dl, VT: WideTy)), |
1123 | DL: dl, VT: OpTy); |
1124 | } |
1125 | |
1126 | return SDValue(); |
1127 | } |
1128 | |
1129 | SDValue |
1130 | HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { |
1131 | EVT ValTy = Op.getValueType(); |
1132 | ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Val&: Op); |
1133 | Constant *CVal = nullptr; |
1134 | bool isVTi1Type = false; |
1135 | if (auto *CV = dyn_cast<ConstantVector>(Val: CPN->getConstVal())) { |
1136 | if (cast<VectorType>(Val: CV->getType())->getElementType()->isIntegerTy(Bitwidth: 1)) { |
1137 | IRBuilder<> IRB(CV->getContext()); |
1138 | SmallVector<Constant*, 128> NewConst; |
1139 | unsigned VecLen = CV->getNumOperands(); |
1140 | assert(isPowerOf2_32(VecLen) && |
1141 | "conversion only supported for pow2 VectorSize" ); |
1142 | for (unsigned i = 0; i < VecLen; ++i) |
1143 | NewConst.push_back(Elt: IRB.getInt8(C: CV->getOperand(i_nocapture: i)->isZeroValue())); |
1144 | |
1145 | CVal = ConstantVector::get(V: NewConst); |
1146 | isVTi1Type = true; |
1147 | } |
1148 | } |
1149 | Align Alignment = CPN->getAlign(); |
1150 | bool IsPositionIndependent = isPositionIndependent(); |
1151 | unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0; |
1152 | |
1153 | unsigned Offset = 0; |
1154 | SDValue T; |
1155 | if (CPN->isMachineConstantPoolEntry()) |
1156 | T = DAG.getTargetConstantPool(C: CPN->getMachineCPVal(), VT: ValTy, Align: Alignment, |
1157 | Offset, TargetFlags: TF); |
1158 | else if (isVTi1Type) |
1159 | T = DAG.getTargetConstantPool(C: CVal, VT: ValTy, Align: Alignment, Offset, TargetFlags: TF); |
1160 | else |
1161 | T = DAG.getTargetConstantPool(C: CPN->getConstVal(), VT: ValTy, Align: Alignment, Offset, |
1162 | TargetFlags: TF); |
1163 | |
1164 | assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && |
1165 | "Inconsistent target flag encountered" ); |
1166 | |
1167 | if (IsPositionIndependent) |
1168 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT: ValTy, Operand: T); |
1169 | return DAG.getNode(Opcode: HexagonISD::CP, DL: SDLoc(Op), VT: ValTy, Operand: T); |
1170 | } |
1171 | |
1172 | SDValue |
1173 | HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { |
1174 | EVT VT = Op.getValueType(); |
1175 | int Idx = cast<JumpTableSDNode>(Val&: Op)->getIndex(); |
1176 | if (isPositionIndependent()) { |
1177 | SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT, TargetFlags: HexagonII::MO_PCREL); |
1178 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT, Operand: T); |
1179 | } |
1180 | |
1181 | SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT); |
1182 | return DAG.getNode(Opcode: HexagonISD::JT, DL: SDLoc(Op), VT, Operand: T); |
1183 | } |
1184 | |
1185 | SDValue |
1186 | HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { |
1187 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
1188 | MachineFunction &MF = DAG.getMachineFunction(); |
1189 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1190 | MFI.setReturnAddressIsTaken(true); |
1191 | |
1192 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
1193 | return SDValue(); |
1194 | |
1195 | EVT VT = Op.getValueType(); |
1196 | SDLoc dl(Op); |
1197 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
1198 | if (Depth) { |
1199 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
1200 | SDValue Offset = DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32); |
1201 | return DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), |
1202 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr, N2: Offset), |
1203 | PtrInfo: MachinePointerInfo()); |
1204 | } |
1205 | |
1206 | // Return LR, which contains the return address. Mark it an implicit live-in. |
1207 | Register Reg = MF.addLiveIn(PReg: HRI.getRARegister(), RC: getRegClassFor(VT: MVT::i32)); |
1208 | return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT); |
1209 | } |
1210 | |
1211 | SDValue |
1212 | HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
1213 | const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); |
1214 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
1215 | MFI.setFrameAddressIsTaken(true); |
1216 | |
1217 | EVT VT = Op.getValueType(); |
1218 | SDLoc dl(Op); |
1219 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
1220 | SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, |
1221 | Reg: HRI.getFrameRegister(), VT); |
1222 | while (Depth--) |
1223 | FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr, |
1224 | PtrInfo: MachinePointerInfo()); |
1225 | return FrameAddr; |
1226 | } |
1227 | |
1228 | SDValue |
1229 | HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const { |
1230 | SDLoc dl(Op); |
1231 | return DAG.getNode(Opcode: HexagonISD::BARRIER, DL: dl, VT: MVT::Other, Operand: Op.getOperand(i: 0)); |
1232 | } |
1233 | |
1234 | SDValue |
1235 | HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const { |
1236 | SDLoc dl(Op); |
1237 | auto *GAN = cast<GlobalAddressSDNode>(Val&: Op); |
1238 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
1239 | auto *GV = GAN->getGlobal(); |
1240 | int64_t Offset = GAN->getOffset(); |
1241 | |
1242 | auto &HLOF = *HTM.getObjFileLowering(); |
1243 | Reloc::Model RM = HTM.getRelocationModel(); |
1244 | |
1245 | if (RM == Reloc::Static) { |
1246 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: Offset); |
1247 | const GlobalObject *GO = GV->getAliaseeObject(); |
1248 | if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, TM: HTM)) |
1249 | return DAG.getNode(Opcode: HexagonISD::CONST32_GP, DL: dl, VT: PtrVT, Operand: GA); |
1250 | return DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: GA); |
1251 | } |
1252 | |
1253 | bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(GV); |
1254 | if (UsePCRel) { |
1255 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: Offset, |
1256 | TargetFlags: HexagonII::MO_PCREL); |
1257 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: dl, VT: PtrVT, Operand: GA); |
1258 | } |
1259 | |
1260 | // Use GOT index. |
1261 | SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(VT: PtrVT); |
1262 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: HexagonII::MO_GOT); |
1263 | SDValue Off = DAG.getConstant(Val: Offset, DL: dl, VT: MVT::i32); |
1264 | return DAG.getNode(Opcode: HexagonISD::AT_GOT, DL: dl, VT: PtrVT, N1: GOT, N2: GA, N3: Off); |
1265 | } |
1266 | |
1267 | // Specifies that for loads and stores VT can be promoted to PromotedLdStVT. |
1268 | SDValue |
1269 | HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { |
1270 | const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress(); |
1271 | SDLoc dl(Op); |
1272 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
1273 | |
1274 | Reloc::Model RM = HTM.getRelocationModel(); |
1275 | if (RM == Reloc::Static) { |
1276 | SDValue A = DAG.getTargetBlockAddress(BA, VT: PtrVT); |
1277 | return DAG.getNode(Opcode: HexagonISD::CONST32_GP, DL: dl, VT: PtrVT, Operand: A); |
1278 | } |
1279 | |
1280 | SDValue A = DAG.getTargetBlockAddress(BA, VT: PtrVT, Offset: 0, TargetFlags: HexagonII::MO_PCREL); |
1281 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: dl, VT: PtrVT, Operand: A); |
1282 | } |
1283 | |
1284 | SDValue |
1285 | HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) |
1286 | const { |
1287 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
1288 | SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, VT: PtrVT, |
1289 | TargetFlags: HexagonII::MO_PCREL); |
1290 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT: PtrVT, Operand: GOTSym); |
1291 | } |
1292 | |
1293 | SDValue |
1294 | HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, |
1295 | GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg, |
1296 | unsigned char OperandFlags) const { |
1297 | MachineFunction &MF = DAG.getMachineFunction(); |
1298 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1299 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
1300 | SDLoc dl(GA); |
1301 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, |
1302 | VT: GA->getValueType(ResNo: 0), |
1303 | offset: GA->getOffset(), |
1304 | TargetFlags: OperandFlags); |
1305 | // Create Operands for the call.The Operands should have the following: |
1306 | // 1. Chain SDValue |
1307 | // 2. Callee which in this case is the Global address value. |
1308 | // 3. Registers live into the call.In this case its R0, as we |
1309 | // have just one argument to be passed. |
1310 | // 4. Glue. |
1311 | // Note: The order is important. |
1312 | |
1313 | const auto &HRI = *Subtarget.getRegisterInfo(); |
1314 | const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C); |
1315 | assert(Mask && "Missing call preserved mask for calling convention" ); |
1316 | SDValue Ops[] = { Chain, TGA, DAG.getRegister(Reg: Hexagon::R0, VT: PtrVT), |
1317 | DAG.getRegisterMask(RegMask: Mask), Glue }; |
1318 | Chain = DAG.getNode(Opcode: HexagonISD::CALL, DL: dl, VTList: NodeTys, Ops); |
1319 | |
1320 | // Inform MFI that function has calls. |
1321 | MFI.setAdjustsStack(true); |
1322 | |
1323 | Glue = Chain.getValue(R: 1); |
1324 | return DAG.getCopyFromReg(Chain, dl, Reg: ReturnReg, VT: PtrVT, Glue); |
1325 | } |
1326 | |
1327 | // |
1328 | // Lower using the intial executable model for TLS addresses |
1329 | // |
1330 | SDValue |
1331 | HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, |
1332 | SelectionDAG &DAG) const { |
1333 | SDLoc dl(GA); |
1334 | int64_t Offset = GA->getOffset(); |
1335 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
1336 | |
1337 | // Get the thread pointer. |
1338 | SDValue TP = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::UGP, VT: PtrVT); |
1339 | |
1340 | bool IsPositionIndependent = isPositionIndependent(); |
1341 | unsigned char TF = |
1342 | IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE; |
1343 | |
1344 | // First generate the TLS symbol address |
1345 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, |
1346 | offset: Offset, TargetFlags: TF); |
1347 | |
1348 | SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA); |
1349 | |
1350 | if (IsPositionIndependent) { |
1351 | // Generate the GOT pointer in case of position independent code |
1352 | SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Op: Sym, DAG); |
1353 | |
1354 | // Add the TLS Symbol address to GOT pointer.This gives |
1355 | // GOT relative relocation for the symbol. |
1356 | Sym = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: GOT, N2: Sym); |
1357 | } |
1358 | |
1359 | // Load the offset value for TLS symbol.This offset is relative to |
1360 | // thread pointer. |
1361 | SDValue LoadOffset = |
1362 | DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Sym, PtrInfo: MachinePointerInfo()); |
1363 | |
1364 | // Address of the thread local variable is the add of thread |
1365 | // pointer and the offset of the variable. |
1366 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: TP, N2: LoadOffset); |
1367 | } |
1368 | |
1369 | // |
1370 | // Lower using the local executable model for TLS addresses |
1371 | // |
1372 | SDValue |
1373 | HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, |
1374 | SelectionDAG &DAG) const { |
1375 | SDLoc dl(GA); |
1376 | int64_t Offset = GA->getOffset(); |
1377 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
1378 | |
1379 | // Get the thread pointer. |
1380 | SDValue TP = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::UGP, VT: PtrVT); |
1381 | // Generate the TLS symbol address |
1382 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, offset: Offset, |
1383 | TargetFlags: HexagonII::MO_TPREL); |
1384 | SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA); |
1385 | |
1386 | // Address of the thread local variable is the add of thread |
1387 | // pointer and the offset of the variable. |
1388 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: TP, N2: Sym); |
1389 | } |
1390 | |
1391 | // |
1392 | // Lower using the general dynamic model for TLS addresses |
1393 | // |
1394 | SDValue |
1395 | HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
1396 | SelectionDAG &DAG) const { |
1397 | SDLoc dl(GA); |
1398 | int64_t Offset = GA->getOffset(); |
1399 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
1400 | |
1401 | // First generate the TLS symbol address |
1402 | SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, offset: Offset, |
1403 | TargetFlags: HexagonII::MO_GDGOT); |
1404 | |
1405 | // Then, generate the GOT pointer |
1406 | SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Op: TGA, DAG); |
1407 | |
1408 | // Add the TLS symbol and the GOT pointer |
1409 | SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA); |
1410 | SDValue Chain = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: GOT, N2: Sym); |
1411 | |
1412 | // Copy over the argument to R0 |
1413 | SDValue InGlue; |
1414 | Chain = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::R0, N: Chain, Glue: InGlue); |
1415 | InGlue = Chain.getValue(R: 1); |
1416 | |
1417 | unsigned Flags = DAG.getSubtarget<HexagonSubtarget>().useLongCalls() |
1418 | ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended |
1419 | : HexagonII::MO_GDPLT; |
1420 | |
1421 | return GetDynamicTLSAddr(DAG, Chain, GA, Glue: InGlue, PtrVT, |
1422 | ReturnReg: Hexagon::R0, OperandFlags: Flags); |
1423 | } |
1424 | |
1425 | // |
1426 | // Lower TLS addresses. |
1427 | // |
1428 | // For now for dynamic models, we only support the general dynamic model. |
1429 | // |
1430 | SDValue |
1431 | HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op, |
1432 | SelectionDAG &DAG) const { |
1433 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op); |
1434 | |
1435 | switch (HTM.getTLSModel(GV: GA->getGlobal())) { |
1436 | case TLSModel::GeneralDynamic: |
1437 | case TLSModel::LocalDynamic: |
1438 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
1439 | case TLSModel::InitialExec: |
1440 | return LowerToTLSInitialExecModel(GA, DAG); |
1441 | case TLSModel::LocalExec: |
1442 | return LowerToTLSLocalExecModel(GA, DAG); |
1443 | } |
1444 | llvm_unreachable("Bogus TLS model" ); |
1445 | } |
1446 | |
1447 | //===----------------------------------------------------------------------===// |
1448 | // TargetLowering Implementation |
1449 | //===----------------------------------------------------------------------===// |
1450 | |
1451 | HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, |
1452 | const HexagonSubtarget &ST) |
1453 | : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)), |
1454 | Subtarget(ST) { |
1455 | auto &HRI = *Subtarget.getRegisterInfo(); |
1456 | |
1457 | setPrefLoopAlignment(Align(16)); |
1458 | setMinFunctionAlignment(Align(4)); |
1459 | setPrefFunctionAlignment(Align(16)); |
1460 | setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); |
1461 | setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); |
1462 | setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); |
1463 | |
1464 | setMaxAtomicSizeInBitsSupported(64); |
1465 | setMinCmpXchgSizeInBits(32); |
1466 | |
1467 | if (EnableHexSDNodeSched) |
1468 | setSchedulingPreference(Sched::VLIW); |
1469 | else |
1470 | setSchedulingPreference(Sched::Source); |
1471 | |
1472 | // Limits for inline expansion of memcpy/memmove |
1473 | MaxStoresPerMemcpy = MaxStoresPerMemcpyCL; |
1474 | MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL; |
1475 | MaxStoresPerMemmove = MaxStoresPerMemmoveCL; |
1476 | MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL; |
1477 | MaxStoresPerMemset = MaxStoresPerMemsetCL; |
1478 | MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL; |
1479 | |
1480 | // |
1481 | // Set up register classes. |
1482 | // |
1483 | |
1484 | addRegisterClass(VT: MVT::i1, RC: &Hexagon::PredRegsRegClass); |
1485 | addRegisterClass(VT: MVT::v2i1, RC: &Hexagon::PredRegsRegClass); // bbbbaaaa |
1486 | addRegisterClass(VT: MVT::v4i1, RC: &Hexagon::PredRegsRegClass); // ddccbbaa |
1487 | addRegisterClass(VT: MVT::v8i1, RC: &Hexagon::PredRegsRegClass); // hgfedcba |
1488 | addRegisterClass(VT: MVT::i32, RC: &Hexagon::IntRegsRegClass); |
1489 | addRegisterClass(VT: MVT::v2i16, RC: &Hexagon::IntRegsRegClass); |
1490 | addRegisterClass(VT: MVT::v4i8, RC: &Hexagon::IntRegsRegClass); |
1491 | addRegisterClass(VT: MVT::i64, RC: &Hexagon::DoubleRegsRegClass); |
1492 | addRegisterClass(VT: MVT::v8i8, RC: &Hexagon::DoubleRegsRegClass); |
1493 | addRegisterClass(VT: MVT::v4i16, RC: &Hexagon::DoubleRegsRegClass); |
1494 | addRegisterClass(VT: MVT::v2i32, RC: &Hexagon::DoubleRegsRegClass); |
1495 | |
1496 | addRegisterClass(VT: MVT::f32, RC: &Hexagon::IntRegsRegClass); |
1497 | addRegisterClass(VT: MVT::f64, RC: &Hexagon::DoubleRegsRegClass); |
1498 | |
1499 | // |
1500 | // Handling of scalar operations. |
1501 | // |
1502 | // All operations default to "legal", except: |
1503 | // - indexed loads and stores (pre-/post-incremented), |
1504 | // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS, |
1505 | // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN, |
1506 | // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP, |
1507 | // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG, |
1508 | // which default to "expand" for at least one type. |
1509 | |
1510 | // Misc operations. |
1511 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Legal); |
1512 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Legal); |
1513 | setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal); |
1514 | setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom); |
1515 | setOperationAction(Op: ISD::JumpTable, VT: MVT::i32, Action: Custom); |
1516 | setOperationAction(Op: ISD::BUILD_PAIR, VT: MVT::i64, Action: Expand); |
1517 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand); |
1518 | setOperationAction(Op: ISD::INLINEASM, VT: MVT::Other, Action: Custom); |
1519 | setOperationAction(Op: ISD::INLINEASM_BR, VT: MVT::Other, Action: Custom); |
1520 | setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom); |
1521 | setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64, Action: Custom); |
1522 | setOperationAction(Op: ISD::READSTEADYCOUNTER, VT: MVT::i64, Action: Custom); |
1523 | setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom); |
1524 | setOperationAction(Op: ISD::EH_RETURN, VT: MVT::Other, Action: Custom); |
1525 | setOperationAction(Op: ISD::GLOBAL_OFFSET_TABLE, VT: MVT::i32, Action: Custom); |
1526 | setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom); |
1527 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom); |
1528 | |
1529 | // Custom legalize GlobalAddress nodes into CONST32. |
1530 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom); |
1531 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i8, Action: Custom); |
1532 | setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom); |
1533 | |
1534 | // Hexagon needs to optimize cases with negative constants. |
1535 | setOperationAction(Op: ISD::SETCC, VT: MVT::i8, Action: Custom); |
1536 | setOperationAction(Op: ISD::SETCC, VT: MVT::i16, Action: Custom); |
1537 | setOperationAction(Op: ISD::SETCC, VT: MVT::v4i8, Action: Custom); |
1538 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i16, Action: Custom); |
1539 | |
1540 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex. |
1541 | setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom); |
1542 | setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand); |
1543 | setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand); |
1544 | if (Subtarget.isEnvironmentMusl()) |
1545 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Custom); |
1546 | else |
1547 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand); |
1548 | |
1549 | setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand); |
1550 | setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand); |
1551 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Custom); |
1552 | |
1553 | if (EmitJumpTables) |
1554 | setMinimumJumpTableEntries(MinimumJumpTables); |
1555 | else |
1556 | setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max()); |
1557 | setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Expand); |
1558 | |
1559 | for (unsigned LegalIntOp : |
1560 | {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) { |
1561 | setOperationAction(Op: LegalIntOp, VT: MVT::i32, Action: Legal); |
1562 | setOperationAction(Op: LegalIntOp, VT: MVT::i64, Action: Legal); |
1563 | } |
1564 | |
1565 | // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit, |
1566 | // but they only operate on i64. |
1567 | for (MVT VT : MVT::integer_valuetypes()) { |
1568 | setOperationAction(Op: ISD::UADDO, VT, Action: Custom); |
1569 | setOperationAction(Op: ISD::USUBO, VT, Action: Custom); |
1570 | setOperationAction(Op: ISD::SADDO, VT, Action: Expand); |
1571 | setOperationAction(Op: ISD::SSUBO, VT, Action: Expand); |
1572 | setOperationAction(Op: ISD::UADDO_CARRY, VT, Action: Expand); |
1573 | setOperationAction(Op: ISD::USUBO_CARRY, VT, Action: Expand); |
1574 | } |
1575 | setOperationAction(Op: ISD::UADDO_CARRY, VT: MVT::i64, Action: Custom); |
1576 | setOperationAction(Op: ISD::USUBO_CARRY, VT: MVT::i64, Action: Custom); |
1577 | |
1578 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i8, Action: Promote); |
1579 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i16, Action: Promote); |
1580 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i8, Action: Promote); |
1581 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i16, Action: Promote); |
1582 | |
1583 | // Popcount can count # of 1s in i64 but returns i32. |
1584 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i8, Action: Promote); |
1585 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i16, Action: Promote); |
1586 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Promote); |
1587 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Legal); |
1588 | |
1589 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i32, Action: Legal); |
1590 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i64, Action: Legal); |
1591 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Legal); |
1592 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i64, Action: Legal); |
1593 | |
1594 | setOperationAction(Op: ISD::FSHL, VT: MVT::i32, Action: Legal); |
1595 | setOperationAction(Op: ISD::FSHL, VT: MVT::i64, Action: Legal); |
1596 | setOperationAction(Op: ISD::FSHR, VT: MVT::i32, Action: Legal); |
1597 | setOperationAction(Op: ISD::FSHR, VT: MVT::i64, Action: Legal); |
1598 | |
1599 | for (unsigned IntExpOp : |
1600 | {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, |
1601 | ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR, |
1602 | ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, |
1603 | ISD::SMUL_LOHI, ISD::UMUL_LOHI}) { |
1604 | for (MVT VT : MVT::integer_valuetypes()) |
1605 | setOperationAction(Op: IntExpOp, VT, Action: Expand); |
1606 | } |
1607 | |
1608 | for (unsigned FPExpOp : |
1609 | {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS, |
1610 | ISD::FPOW, ISD::FCOPYSIGN}) { |
1611 | for (MVT VT : MVT::fp_valuetypes()) |
1612 | setOperationAction(Op: FPExpOp, VT, Action: Expand); |
1613 | } |
1614 | |
1615 | // No extending loads from i32. |
1616 | for (MVT VT : MVT::integer_valuetypes()) { |
1617 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand); |
1618 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand); |
1619 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand); |
1620 | } |
1621 | // Turn FP truncstore into trunc + store. |
1622 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand); |
1623 | // Turn FP extload into load/fpextend. |
1624 | for (MVT VT : MVT::fp_valuetypes()) |
1625 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand); |
1626 | |
1627 | // Expand BR_CC and SELECT_CC for all integer and fp types. |
1628 | for (MVT VT : MVT::integer_valuetypes()) { |
1629 | setOperationAction(Op: ISD::BR_CC, VT, Action: Expand); |
1630 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
1631 | } |
1632 | for (MVT VT : MVT::fp_valuetypes()) { |
1633 | setOperationAction(Op: ISD::BR_CC, VT, Action: Expand); |
1634 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
1635 | } |
1636 | setOperationAction(Op: ISD::BR_CC, VT: MVT::Other, Action: Expand); |
1637 | |
1638 | // |
1639 | // Handling of vector operations. |
1640 | // |
1641 | |
1642 | // Set the action for vector operations to "expand", then override it with |
1643 | // either "custom" or "legal" for specific cases. |
1644 | static const unsigned VectExpOps[] = { |
1645 | // Integer arithmetic: |
1646 | ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV, |
1647 | ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO, |
1648 | ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI, |
1649 | // Logical/bit: |
1650 | ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR, |
1651 | ISD::CTPOP, ISD::CTLZ, ISD::CTTZ, ISD::BSWAP, ISD::BITREVERSE, |
1652 | // Floating point arithmetic/math functions: |
1653 | ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV, |
1654 | ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, |
1655 | ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, |
1656 | ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC, |
1657 | ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR, |
1658 | ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS, ISD::FLDEXP, |
1659 | // Misc: |
1660 | ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool, |
1661 | // Vector: |
1662 | ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR, |
1663 | ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, |
1664 | ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR, |
1665 | ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE, |
1666 | ISD::SPLAT_VECTOR, |
1667 | }; |
1668 | |
1669 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
1670 | for (unsigned VectExpOp : VectExpOps) |
1671 | setOperationAction(Op: VectExpOp, VT, Action: Expand); |
1672 | |
1673 | // Expand all extending loads and truncating stores: |
1674 | for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) { |
1675 | if (TargetVT == VT) |
1676 | continue; |
1677 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand); |
1678 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand); |
1679 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand); |
1680 | setTruncStoreAction(ValVT: VT, MemVT: TargetVT, Action: Expand); |
1681 | } |
1682 | |
1683 | // Normalize all inputs to SELECT to be vectors of i32. |
1684 | if (VT.getVectorElementType() != MVT::i32) { |
1685 | MVT VT32 = MVT::getVectorVT(VT: MVT::i32, NumElements: VT.getSizeInBits()/32); |
1686 | setOperationAction(Op: ISD::SELECT, VT, Action: Promote); |
1687 | AddPromotedToType(Opc: ISD::SELECT, OrigVT: VT, DestVT: VT32); |
1688 | } |
1689 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
1690 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
1691 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
1692 | } |
1693 | |
1694 | // Extending loads from (native) vectors of i8 into (native) vectors of i16 |
1695 | // are legal. |
1696 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal); |
1697 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal); |
1698 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal); |
1699 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal); |
1700 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal); |
1701 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal); |
1702 | |
1703 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i8, Action: Legal); |
1704 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i16, Action: Legal); |
1705 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i32, Action: Legal); |
1706 | |
1707 | // Types natively supported: |
1708 | for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8, |
1709 | MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) { |
1710 | setOperationAction(Op: ISD::BUILD_VECTOR, VT: NativeVT, Action: Custom); |
1711 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT: NativeVT, Action: Custom); |
1712 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT: NativeVT, Action: Custom); |
1713 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT: NativeVT, Action: Custom); |
1714 | setOperationAction(Op: ISD::INSERT_SUBVECTOR, VT: NativeVT, Action: Custom); |
1715 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT: NativeVT, Action: Custom); |
1716 | |
1717 | setOperationAction(Op: ISD::ADD, VT: NativeVT, Action: Legal); |
1718 | setOperationAction(Op: ISD::SUB, VT: NativeVT, Action: Legal); |
1719 | setOperationAction(Op: ISD::MUL, VT: NativeVT, Action: Legal); |
1720 | setOperationAction(Op: ISD::AND, VT: NativeVT, Action: Legal); |
1721 | setOperationAction(Op: ISD::OR, VT: NativeVT, Action: Legal); |
1722 | setOperationAction(Op: ISD::XOR, VT: NativeVT, Action: Legal); |
1723 | |
1724 | if (NativeVT.getVectorElementType() != MVT::i1) { |
1725 | setOperationAction(Op: ISD::SPLAT_VECTOR, VT: NativeVT, Action: Legal); |
1726 | setOperationAction(Op: ISD::BSWAP, VT: NativeVT, Action: Legal); |
1727 | setOperationAction(Op: ISD::BITREVERSE, VT: NativeVT, Action: Legal); |
1728 | } |
1729 | } |
1730 | |
1731 | for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) { |
1732 | setOperationAction(Op: ISD::SMIN, VT, Action: Legal); |
1733 | setOperationAction(Op: ISD::SMAX, VT, Action: Legal); |
1734 | setOperationAction(Op: ISD::UMIN, VT, Action: Legal); |
1735 | setOperationAction(Op: ISD::UMAX, VT, Action: Legal); |
1736 | } |
1737 | |
1738 | // Custom lower unaligned loads. |
1739 | // Also, for both loads and stores, verify the alignment of the address |
1740 | // in case it is a compile-time constant. This is a usability feature to |
1741 | // provide a meaningful error message to users. |
1742 | for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8, |
1743 | MVT::v2i16, MVT::v4i16, MVT::v2i32}) { |
1744 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
1745 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
1746 | } |
1747 | |
1748 | // Custom-lower load/stores of boolean vectors. |
1749 | for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) { |
1750 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
1751 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
1752 | } |
1753 | |
1754 | // Normalize integer compares to EQ/GT/UGT |
1755 | for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16, |
1756 | MVT::v2i32}) { |
1757 | setCondCodeAction(CCs: ISD::SETNE, VT, Action: Expand); |
1758 | setCondCodeAction(CCs: ISD::SETLE, VT, Action: Expand); |
1759 | setCondCodeAction(CCs: ISD::SETGE, VT, Action: Expand); |
1760 | setCondCodeAction(CCs: ISD::SETLT, VT, Action: Expand); |
1761 | setCondCodeAction(CCs: ISD::SETULE, VT, Action: Expand); |
1762 | setCondCodeAction(CCs: ISD::SETUGE, VT, Action: Expand); |
1763 | setCondCodeAction(CCs: ISD::SETULT, VT, Action: Expand); |
1764 | } |
1765 | |
1766 | // Normalize boolean compares to [U]LE/[U]LT |
1767 | for (MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) { |
1768 | setCondCodeAction(CCs: ISD::SETGE, VT, Action: Expand); |
1769 | setCondCodeAction(CCs: ISD::SETGT, VT, Action: Expand); |
1770 | setCondCodeAction(CCs: ISD::SETUGE, VT, Action: Expand); |
1771 | setCondCodeAction(CCs: ISD::SETUGT, VT, Action: Expand); |
1772 | } |
1773 | |
1774 | // Custom-lower bitcasts from i8 to v8i1. |
1775 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i8, Action: Custom); |
1776 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i16, Action: Custom); |
1777 | setOperationAction(Op: ISD::VSELECT, VT: MVT::v4i8, Action: Custom); |
1778 | setOperationAction(Op: ISD::VSELECT, VT: MVT::v2i16, Action: Custom); |
1779 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v4i8, Action: Custom); |
1780 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v4i16, Action: Custom); |
1781 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v8i8, Action: Custom); |
1782 | |
1783 | // V5+. |
1784 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
1785 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Expand); |
1786 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Expand); |
1787 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Expand); |
1788 | setOperationAction(Op: ISD::FDIV, VT: MVT::f32, Action: Custom); |
1789 | |
1790 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f32, Action: Legal); |
1791 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f32, Action: Legal); |
1792 | |
1793 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i1, Action: Promote); |
1794 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i8, Action: Promote); |
1795 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i16, Action: Promote); |
1796 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i1, Action: Promote); |
1797 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i8, Action: Promote); |
1798 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i16, Action: Promote); |
1799 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i1, Action: Promote); |
1800 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i8, Action: Promote); |
1801 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i16, Action: Promote); |
1802 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i1, Action: Promote); |
1803 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i8, Action: Promote); |
1804 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i16, Action: Promote); |
1805 | |
1806 | // Special handling for half-precision floating point conversions. |
1807 | // Lower half float conversions into library calls. |
1808 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand); |
1809 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand); |
1810 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand); |
1811 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand); |
1812 | |
1813 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
1814 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
1815 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
1816 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
1817 | |
1818 | // Handling of indexed loads/stores: default is "expand". |
1819 | // |
1820 | for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64, |
1821 | MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) { |
1822 | setIndexedLoadAction(IdxModes: ISD::POST_INC, VT, Action: Legal); |
1823 | setIndexedStoreAction(IdxModes: ISD::POST_INC, VT, Action: Legal); |
1824 | } |
1825 | |
1826 | // Subtarget-specific operation actions. |
1827 | // |
1828 | if (Subtarget.hasV60Ops()) { |
1829 | setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Legal); |
1830 | setOperationAction(Op: ISD::ROTL, VT: MVT::i64, Action: Legal); |
1831 | setOperationAction(Op: ISD::ROTR, VT: MVT::i32, Action: Legal); |
1832 | setOperationAction(Op: ISD::ROTR, VT: MVT::i64, Action: Legal); |
1833 | } |
1834 | if (Subtarget.hasV66Ops()) { |
1835 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Legal); |
1836 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Legal); |
1837 | } |
1838 | if (Subtarget.hasV67Ops()) { |
1839 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f64, Action: Legal); |
1840 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f64, Action: Legal); |
1841 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Legal); |
1842 | } |
1843 | |
1844 | setTargetDAGCombine(ISD::OR); |
1845 | setTargetDAGCombine(ISD::TRUNCATE); |
1846 | setTargetDAGCombine(ISD::VSELECT); |
1847 | |
1848 | if (Subtarget.useHVXOps()) |
1849 | initializeHVXLowering(); |
1850 | |
1851 | computeRegisterProperties(TRI: &HRI); |
1852 | |
1853 | // |
1854 | // Library calls for unsupported operations |
1855 | // |
1856 | bool FastMath = EnableFastMath; |
1857 | |
1858 | setLibcallName(Call: RTLIB::SDIV_I32, Name: "__hexagon_divsi3" ); |
1859 | setLibcallName(Call: RTLIB::SDIV_I64, Name: "__hexagon_divdi3" ); |
1860 | setLibcallName(Call: RTLIB::UDIV_I32, Name: "__hexagon_udivsi3" ); |
1861 | setLibcallName(Call: RTLIB::UDIV_I64, Name: "__hexagon_udivdi3" ); |
1862 | setLibcallName(Call: RTLIB::SREM_I32, Name: "__hexagon_modsi3" ); |
1863 | setLibcallName(Call: RTLIB::SREM_I64, Name: "__hexagon_moddi3" ); |
1864 | setLibcallName(Call: RTLIB::UREM_I32, Name: "__hexagon_umodsi3" ); |
1865 | setLibcallName(Call: RTLIB::UREM_I64, Name: "__hexagon_umoddi3" ); |
1866 | |
1867 | setLibcallName(Call: RTLIB::SINTTOFP_I128_F64, Name: "__hexagon_floattidf" ); |
1868 | setLibcallName(Call: RTLIB::SINTTOFP_I128_F32, Name: "__hexagon_floattisf" ); |
1869 | setLibcallName(Call: RTLIB::FPTOUINT_F32_I128, Name: "__hexagon_fixunssfti" ); |
1870 | setLibcallName(Call: RTLIB::FPTOUINT_F64_I128, Name: "__hexagon_fixunsdfti" ); |
1871 | setLibcallName(Call: RTLIB::FPTOSINT_F32_I128, Name: "__hexagon_fixsfti" ); |
1872 | setLibcallName(Call: RTLIB::FPTOSINT_F64_I128, Name: "__hexagon_fixdfti" ); |
1873 | |
1874 | // This is the only fast library function for sqrtd. |
1875 | if (FastMath) |
1876 | setLibcallName(Call: RTLIB::SQRT_F64, Name: "__hexagon_fast2_sqrtdf2" ); |
1877 | |
1878 | // Prefix is: nothing for "slow-math", |
1879 | // "fast2_" for V5+ fast-math double-precision |
1880 | // (actually, keep fast-math and fast-math2 separate for now) |
1881 | if (FastMath) { |
1882 | setLibcallName(Call: RTLIB::ADD_F64, Name: "__hexagon_fast_adddf3" ); |
1883 | setLibcallName(Call: RTLIB::SUB_F64, Name: "__hexagon_fast_subdf3" ); |
1884 | setLibcallName(Call: RTLIB::MUL_F64, Name: "__hexagon_fast_muldf3" ); |
1885 | setLibcallName(Call: RTLIB::DIV_F64, Name: "__hexagon_fast_divdf3" ); |
1886 | setLibcallName(Call: RTLIB::DIV_F32, Name: "__hexagon_fast_divsf3" ); |
1887 | } else { |
1888 | setLibcallName(Call: RTLIB::ADD_F64, Name: "__hexagon_adddf3" ); |
1889 | setLibcallName(Call: RTLIB::SUB_F64, Name: "__hexagon_subdf3" ); |
1890 | setLibcallName(Call: RTLIB::MUL_F64, Name: "__hexagon_muldf3" ); |
1891 | setLibcallName(Call: RTLIB::DIV_F64, Name: "__hexagon_divdf3" ); |
1892 | setLibcallName(Call: RTLIB::DIV_F32, Name: "__hexagon_divsf3" ); |
1893 | } |
1894 | |
1895 | if (FastMath) |
1896 | setLibcallName(Call: RTLIB::SQRT_F32, Name: "__hexagon_fast2_sqrtf" ); |
1897 | else |
1898 | setLibcallName(Call: RTLIB::SQRT_F32, Name: "__hexagon_sqrtf" ); |
1899 | |
1900 | // Routines to handle fp16 storage type. |
1901 | setLibcallName(Call: RTLIB::FPROUND_F32_F16, Name: "__truncsfhf2" ); |
1902 | setLibcallName(Call: RTLIB::FPROUND_F64_F16, Name: "__truncdfhf2" ); |
1903 | setLibcallName(Call: RTLIB::FPEXT_F16_F32, Name: "__extendhfsf2" ); |
1904 | } |
1905 | |
1906 | const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { |
1907 | switch ((HexagonISD::NodeType)Opcode) { |
1908 | case HexagonISD::ADDC: return "HexagonISD::ADDC" ; |
1909 | case HexagonISD::SUBC: return "HexagonISD::SUBC" ; |
1910 | case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA" ; |
1911 | case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT" ; |
1912 | case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL" ; |
1913 | case HexagonISD::BARRIER: return "HexagonISD::BARRIER" ; |
1914 | case HexagonISD::CALL: return "HexagonISD::CALL" ; |
1915 | case HexagonISD::CALLnr: return "HexagonISD::CALLnr" ; |
1916 | case HexagonISD::CALLR: return "HexagonISD::CALLR" ; |
1917 | case HexagonISD::COMBINE: return "HexagonISD::COMBINE" ; |
1918 | case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP" ; |
1919 | case HexagonISD::CONST32: return "HexagonISD::CONST32" ; |
1920 | case HexagonISD::CP: return "HexagonISD::CP" ; |
1921 | case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH" ; |
1922 | case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN" ; |
1923 | case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT" ; |
1924 | case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU" ; |
1925 | case HexagonISD::INSERT: return "HexagonISD::INSERT" ; |
1926 | case HexagonISD::JT: return "HexagonISD::JT" ; |
1927 | case HexagonISD::RET_GLUE: return "HexagonISD::RET_GLUE" ; |
1928 | case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN" ; |
1929 | case HexagonISD::VASL: return "HexagonISD::VASL" ; |
1930 | case HexagonISD::VASR: return "HexagonISD::VASR" ; |
1931 | case HexagonISD::VLSR: return "HexagonISD::VLSR" ; |
1932 | case HexagonISD::MFSHL: return "HexagonISD::MFSHL" ; |
1933 | case HexagonISD::MFSHR: return "HexagonISD::MFSHR" ; |
1934 | case HexagonISD::SSAT: return "HexagonISD::SSAT" ; |
1935 | case HexagonISD::USAT: return "HexagonISD::USAT" ; |
1936 | case HexagonISD::SMUL_LOHI: return "HexagonISD::SMUL_LOHI" ; |
1937 | case HexagonISD::UMUL_LOHI: return "HexagonISD::UMUL_LOHI" ; |
1938 | case HexagonISD::USMUL_LOHI: return "HexagonISD::USMUL_LOHI" ; |
1939 | case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW" ; |
1940 | case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0" ; |
1941 | case HexagonISD::VROR: return "HexagonISD::VROR" ; |
1942 | case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE" ; |
1943 | case HexagonISD::READTIMER: return "HexagonISD::READTIMER" ; |
1944 | case HexagonISD::PTRUE: return "HexagonISD::PTRUE" ; |
1945 | case HexagonISD::PFALSE: return "HexagonISD::PFALSE" ; |
1946 | case HexagonISD::D2P: return "HexagonISD::D2P" ; |
1947 | case HexagonISD::P2D: return "HexagonISD::P2D" ; |
1948 | case HexagonISD::V2Q: return "HexagonISD::V2Q" ; |
1949 | case HexagonISD::Q2V: return "HexagonISD::Q2V" ; |
1950 | case HexagonISD::QCAT: return "HexagonISD::QCAT" ; |
1951 | case HexagonISD::QTRUE: return "HexagonISD::QTRUE" ; |
1952 | case HexagonISD::QFALSE: return "HexagonISD::QFALSE" ; |
1953 | case HexagonISD::TL_EXTEND: return "HexagonISD::TL_EXTEND" ; |
1954 | case HexagonISD::TL_TRUNCATE: return "HexagonISD::TL_TRUNCATE" ; |
1955 | case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST" ; |
1956 | case HexagonISD::VALIGN: return "HexagonISD::VALIGN" ; |
1957 | case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR" ; |
1958 | case HexagonISD::ISEL: return "HexagonISD::ISEL" ; |
1959 | case HexagonISD::OP_END: break; |
1960 | } |
1961 | return nullptr; |
1962 | } |
1963 | |
1964 | bool |
1965 | HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign, |
1966 | const SDLoc &dl, SelectionDAG &DAG) const { |
1967 | auto *CA = dyn_cast<ConstantSDNode>(Val&: Ptr); |
1968 | if (!CA) |
1969 | return true; |
1970 | unsigned Addr = CA->getZExtValue(); |
1971 | Align HaveAlign = |
1972 | Addr != 0 ? Align(1ull << llvm::countr_zero(Val: Addr)) : NeedAlign; |
1973 | if (HaveAlign >= NeedAlign) |
1974 | return true; |
1975 | |
1976 | static int DK_MisalignedTrap = llvm::getNextAvailablePluginDiagnosticKind(); |
1977 | |
1978 | struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo { |
1979 | DiagnosticInfoMisalignedTrap(StringRef M) |
1980 | : DiagnosticInfo(DK_MisalignedTrap, DS_Remark), Msg(M) {} |
1981 | void print(DiagnosticPrinter &DP) const override { |
1982 | DP << Msg; |
1983 | } |
1984 | static bool classof(const DiagnosticInfo *DI) { |
1985 | return DI->getKind() == DK_MisalignedTrap; |
1986 | } |
1987 | StringRef Msg; |
1988 | }; |
1989 | |
1990 | std::string ErrMsg; |
1991 | raw_string_ostream O(ErrMsg); |
1992 | O << "Misaligned constant address: " << format_hex(N: Addr, Width: 10) |
1993 | << " has alignment " << HaveAlign.value() |
1994 | << ", but the memory access requires " << NeedAlign.value(); |
1995 | if (DebugLoc DL = dl.getDebugLoc()) |
1996 | DL.print(OS&: O << ", at " ); |
1997 | O << ". The instruction has been replaced with a trap." ; |
1998 | |
1999 | DAG.getContext()->diagnose(DI: DiagnosticInfoMisalignedTrap(O.str())); |
2000 | return false; |
2001 | } |
2002 | |
2003 | SDValue |
2004 | HexagonTargetLowering::replaceMemWithUndef(SDValue Op, SelectionDAG &DAG) |
2005 | const { |
2006 | const SDLoc &dl(Op); |
2007 | auto *LS = cast<LSBaseSDNode>(Val: Op.getNode()); |
2008 | assert(!LS->isIndexed() && "Not expecting indexed ops on constant address" ); |
2009 | |
2010 | SDValue Chain = LS->getChain(); |
2011 | SDValue Trap = DAG.getNode(Opcode: ISD::TRAP, DL: dl, VT: MVT::Other, Operand: Chain); |
2012 | if (LS->getOpcode() == ISD::LOAD) |
2013 | return DAG.getMergeValues(Ops: {DAG.getUNDEF(VT: ty(Op)), Trap}, dl); |
2014 | return Trap; |
2015 | } |
2016 | |
2017 | // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load |
2018 | // intrinsic. |
2019 | static bool isBrevLdIntrinsic(const Value *Inst) { |
2020 | unsigned ID = cast<IntrinsicInst>(Val: Inst)->getIntrinsicID(); |
2021 | return (ID == Intrinsic::hexagon_L2_loadrd_pbr || |
2022 | ID == Intrinsic::hexagon_L2_loadri_pbr || |
2023 | ID == Intrinsic::hexagon_L2_loadrh_pbr || |
2024 | ID == Intrinsic::hexagon_L2_loadruh_pbr || |
2025 | ID == Intrinsic::hexagon_L2_loadrb_pbr || |
2026 | ID == Intrinsic::hexagon_L2_loadrub_pbr); |
2027 | } |
2028 | |
2029 | // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous |
2030 | // instruction. So far we only handle bitcast, extract value and bit reverse |
2031 | // load intrinsic instructions. Should we handle CGEP ? |
2032 | static Value *getBrevLdObject(Value *V) { |
2033 | if (Operator::getOpcode(V) == Instruction::ExtractValue || |
2034 | Operator::getOpcode(V) == Instruction::BitCast) |
2035 | V = cast<Operator>(Val: V)->getOperand(i: 0); |
2036 | else if (isa<IntrinsicInst>(Val: V) && isBrevLdIntrinsic(Inst: V)) |
2037 | V = cast<Instruction>(Val: V)->getOperand(i: 0); |
2038 | return V; |
2039 | } |
2040 | |
2041 | // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or |
2042 | // a back edge. If the back edge comes from the intrinsic itself, the incoming |
2043 | // edge is returned. |
2044 | static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) { |
2045 | const BasicBlock *Parent = PN->getParent(); |
2046 | int Idx = -1; |
2047 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) { |
2048 | BasicBlock *Blk = PN->getIncomingBlock(i); |
2049 | // Determine if the back edge is originated from intrinsic. |
2050 | if (Blk == Parent) { |
2051 | Value *BackEdgeVal = PN->getIncomingValue(i); |
2052 | Value *BaseVal; |
2053 | // Loop over till we return the same Value or we hit the IntrBaseVal. |
2054 | do { |
2055 | BaseVal = BackEdgeVal; |
2056 | BackEdgeVal = getBrevLdObject(V: BackEdgeVal); |
2057 | } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal)); |
2058 | // If the getBrevLdObject returns IntrBaseVal, we should return the |
2059 | // incoming edge. |
2060 | if (IntrBaseVal == BackEdgeVal) |
2061 | continue; |
2062 | Idx = i; |
2063 | break; |
2064 | } else // Set the node to incoming edge. |
2065 | Idx = i; |
2066 | } |
2067 | assert(Idx >= 0 && "Unexpected index to incoming argument in PHI" ); |
2068 | return PN->getIncomingValue(i: Idx); |
2069 | } |
2070 | |
2071 | // Bit-reverse Load Intrinsic: Figure out the underlying object the base |
2072 | // pointer points to, for the bit-reverse load intrinsic. Setting this to |
2073 | // memoperand might help alias analysis to figure out the dependencies. |
2074 | static Value *getUnderLyingObjectForBrevLdIntr(Value *V) { |
2075 | Value *IntrBaseVal = V; |
2076 | Value *BaseVal; |
2077 | // Loop over till we return the same Value, implies we either figure out |
2078 | // the object or we hit a PHI |
2079 | do { |
2080 | BaseVal = V; |
2081 | V = getBrevLdObject(V); |
2082 | } while (BaseVal != V); |
2083 | |
2084 | // Identify the object from PHINode. |
2085 | if (const PHINode *PN = dyn_cast<PHINode>(Val: V)) |
2086 | return returnEdge(PN, IntrBaseVal); |
2087 | // For non PHI nodes, the object is the last value returned by getBrevLdObject |
2088 | else |
2089 | return V; |
2090 | } |
2091 | |
2092 | /// Given an intrinsic, checks if on the target the intrinsic will need to map |
2093 | /// to a MemIntrinsicNode (touches memory). If this is the case, it returns |
2094 | /// true and store the intrinsic information into the IntrinsicInfo that was |
2095 | /// passed to the function. |
2096 | bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
2097 | const CallInst &I, |
2098 | MachineFunction &MF, |
2099 | unsigned Intrinsic) const { |
2100 | switch (Intrinsic) { |
2101 | case Intrinsic::hexagon_L2_loadrd_pbr: |
2102 | case Intrinsic::hexagon_L2_loadri_pbr: |
2103 | case Intrinsic::hexagon_L2_loadrh_pbr: |
2104 | case Intrinsic::hexagon_L2_loadruh_pbr: |
2105 | case Intrinsic::hexagon_L2_loadrb_pbr: |
2106 | case Intrinsic::hexagon_L2_loadrub_pbr: { |
2107 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
2108 | auto &DL = I.getDataLayout(); |
2109 | auto &Cont = I.getCalledFunction()->getParent()->getContext(); |
2110 | // The intrinsic function call is of the form { ElTy, i8* } |
2111 | // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type |
2112 | // should be derived from ElTy. |
2113 | Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(N: 0); |
2114 | Info.memVT = MVT::getVT(Ty: ElTy); |
2115 | llvm::Value *BasePtrVal = I.getOperand(i_nocapture: 0); |
2116 | Info.ptrVal = getUnderLyingObjectForBrevLdIntr(V: BasePtrVal); |
2117 | // The offset value comes through Modifier register. For now, assume the |
2118 | // offset is 0. |
2119 | Info.offset = 0; |
2120 | Info.align = DL.getABITypeAlign(Ty: Info.memVT.getTypeForEVT(Context&: Cont)); |
2121 | Info.flags = MachineMemOperand::MOLoad; |
2122 | return true; |
2123 | } |
2124 | case Intrinsic::hexagon_V6_vgathermw: |
2125 | case Intrinsic::hexagon_V6_vgathermw_128B: |
2126 | case Intrinsic::hexagon_V6_vgathermh: |
2127 | case Intrinsic::hexagon_V6_vgathermh_128B: |
2128 | case Intrinsic::hexagon_V6_vgathermhw: |
2129 | case Intrinsic::hexagon_V6_vgathermhw_128B: |
2130 | case Intrinsic::hexagon_V6_vgathermwq: |
2131 | case Intrinsic::hexagon_V6_vgathermwq_128B: |
2132 | case Intrinsic::hexagon_V6_vgathermhq: |
2133 | case Intrinsic::hexagon_V6_vgathermhq_128B: |
2134 | case Intrinsic::hexagon_V6_vgathermhwq: |
2135 | case Intrinsic::hexagon_V6_vgathermhwq_128B: { |
2136 | const Module &M = *I.getParent()->getParent()->getParent(); |
2137 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
2138 | Type *VecTy = I.getArgOperand(i: 1)->getType(); |
2139 | Info.memVT = MVT::getVT(Ty: VecTy); |
2140 | Info.ptrVal = I.getArgOperand(i: 0); |
2141 | Info.offset = 0; |
2142 | Info.align = |
2143 | MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(Ty: VecTy) / 8); |
2144 | Info.flags = MachineMemOperand::MOLoad | |
2145 | MachineMemOperand::MOStore | |
2146 | MachineMemOperand::MOVolatile; |
2147 | return true; |
2148 | } |
2149 | default: |
2150 | break; |
2151 | } |
2152 | return false; |
2153 | } |
2154 | |
2155 | bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const { |
2156 | return X.getValueType().isScalarInteger(); // 'tstbit' |
2157 | } |
2158 | |
2159 | bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { |
2160 | return isTruncateFree(VT1: EVT::getEVT(Ty: Ty1), VT2: EVT::getEVT(Ty: Ty2)); |
2161 | } |
2162 | |
2163 | bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { |
2164 | if (!VT1.isSimple() || !VT2.isSimple()) |
2165 | return false; |
2166 | return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32; |
2167 | } |
2168 | |
2169 | bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd( |
2170 | const MachineFunction &MF, EVT VT) const { |
2171 | return isOperationLegalOrCustom(Op: ISD::FMA, VT); |
2172 | } |
2173 | |
2174 | // Should we expand the build vector with shuffles? |
2175 | bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT, |
2176 | unsigned DefinedValues) const { |
2177 | return false; |
2178 | } |
2179 | |
2180 | bool HexagonTargetLowering::(EVT ResVT, EVT SrcVT, |
2181 | unsigned Index) const { |
2182 | assert(ResVT.getVectorElementType() == SrcVT.getVectorElementType()); |
2183 | if (!ResVT.isSimple() || !SrcVT.isSimple()) |
2184 | return false; |
2185 | |
2186 | MVT ResTy = ResVT.getSimpleVT(), SrcTy = SrcVT.getSimpleVT(); |
2187 | if (ResTy.getVectorElementType() != MVT::i1) |
2188 | return true; |
2189 | |
2190 | // Non-HVX bool vectors are relatively cheap. |
2191 | return SrcTy.getVectorNumElements() <= 8; |
2192 | } |
2193 | |
2194 | bool HexagonTargetLowering::isTargetCanonicalConstantNode(SDValue Op) const { |
2195 | return Op.getOpcode() == ISD::CONCAT_VECTORS || |
2196 | TargetLowering::isTargetCanonicalConstantNode(Op); |
2197 | } |
2198 | |
2199 | bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, |
2200 | EVT VT) const { |
2201 | return true; |
2202 | } |
2203 | |
2204 | TargetLoweringBase::LegalizeTypeAction |
2205 | HexagonTargetLowering::getPreferredVectorAction(MVT VT) const { |
2206 | unsigned VecLen = VT.getVectorMinNumElements(); |
2207 | MVT ElemTy = VT.getVectorElementType(); |
2208 | |
2209 | if (VecLen == 1 || VT.isScalableVector()) |
2210 | return TargetLoweringBase::TypeScalarizeVector; |
2211 | |
2212 | if (Subtarget.useHVXOps()) { |
2213 | unsigned Action = getPreferredHvxVectorAction(VecTy: VT); |
2214 | if (Action != ~0u) |
2215 | return static_cast<TargetLoweringBase::LegalizeTypeAction>(Action); |
2216 | } |
2217 | |
2218 | // Always widen (remaining) vectors of i1. |
2219 | if (ElemTy == MVT::i1) |
2220 | return TargetLoweringBase::TypeWidenVector; |
2221 | // Widen non-power-of-2 vectors. Such types cannot be split right now, |
2222 | // and computeRegisterProperties will override "split" with "widen", |
2223 | // which can cause other issues. |
2224 | if (!isPowerOf2_32(Value: VecLen)) |
2225 | return TargetLoweringBase::TypeWidenVector; |
2226 | |
2227 | return TargetLoweringBase::TypeSplitVector; |
2228 | } |
2229 | |
2230 | TargetLoweringBase::LegalizeAction |
2231 | HexagonTargetLowering::getCustomOperationAction(SDNode &Op) const { |
2232 | if (Subtarget.useHVXOps()) { |
2233 | unsigned Action = getCustomHvxOperationAction(Op); |
2234 | if (Action != ~0u) |
2235 | return static_cast<TargetLoweringBase::LegalizeAction>(Action); |
2236 | } |
2237 | return TargetLoweringBase::Legal; |
2238 | } |
2239 | |
2240 | std::pair<SDValue, int> |
2241 | HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const { |
2242 | if (Addr.getOpcode() == ISD::ADD) { |
2243 | SDValue Op1 = Addr.getOperand(i: 1); |
2244 | if (auto *CN = dyn_cast<const ConstantSDNode>(Val: Op1.getNode())) |
2245 | return { Addr.getOperand(i: 0), CN->getSExtValue() }; |
2246 | } |
2247 | return { Addr, 0 }; |
2248 | } |
2249 | |
2250 | // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors |
2251 | // to select data from, V3 is the permutation. |
2252 | SDValue |
2253 | HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) |
2254 | const { |
2255 | const auto *SVN = cast<ShuffleVectorSDNode>(Val&: Op); |
2256 | ArrayRef<int> AM = SVN->getMask(); |
2257 | assert(AM.size() <= 8 && "Unexpected shuffle mask" ); |
2258 | unsigned VecLen = AM.size(); |
2259 | |
2260 | MVT VecTy = ty(Op); |
2261 | assert(!Subtarget.isHVXVectorType(VecTy, true) && |
2262 | "HVX shuffles should be legal" ); |
2263 | assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length" ); |
2264 | |
2265 | SDValue Op0 = Op.getOperand(i: 0); |
2266 | SDValue Op1 = Op.getOperand(i: 1); |
2267 | const SDLoc &dl(Op); |
2268 | |
2269 | // If the inputs are not the same as the output, bail. This is not an |
2270 | // error situation, but complicates the handling and the default expansion |
2271 | // (into BUILD_VECTOR) should be adequate. |
2272 | if (ty(Op: Op0) != VecTy || ty(Op: Op1) != VecTy) |
2273 | return SDValue(); |
2274 | |
2275 | // Normalize the mask so that the first non-negative index comes from |
2276 | // the first operand. |
2277 | SmallVector<int,8> Mask(AM.begin(), AM.end()); |
2278 | unsigned F = llvm::find_if(Range&: AM, P: [](int M) { return M >= 0; }) - AM.data(); |
2279 | if (F == AM.size()) |
2280 | return DAG.getUNDEF(VT: VecTy); |
2281 | if (AM[F] >= int(VecLen)) { |
2282 | ShuffleVectorSDNode::commuteMask(Mask); |
2283 | std::swap(a&: Op0, b&: Op1); |
2284 | } |
2285 | |
2286 | // Express the shuffle mask in terms of bytes. |
2287 | SmallVector<int,8> ByteMask; |
2288 | unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8; |
2289 | for (int M : Mask) { |
2290 | if (M < 0) { |
2291 | for (unsigned j = 0; j != ElemBytes; ++j) |
2292 | ByteMask.push_back(Elt: -1); |
2293 | } else { |
2294 | for (unsigned j = 0; j != ElemBytes; ++j) |
2295 | ByteMask.push_back(Elt: M*ElemBytes + j); |
2296 | } |
2297 | } |
2298 | assert(ByteMask.size() <= 8); |
2299 | |
2300 | // All non-undef (non-negative) indexes are well within [0..127], so they |
2301 | // fit in a single byte. Build two 64-bit words: |
2302 | // - MaskIdx where each byte is the corresponding index (for non-negative |
2303 | // indexes), and 0xFF for negative indexes, and |
2304 | // - MaskUnd that has 0xFF for each negative index. |
2305 | uint64_t MaskIdx = 0; |
2306 | uint64_t MaskUnd = 0; |
2307 | for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) { |
2308 | unsigned S = 8*i; |
2309 | uint64_t M = ByteMask[i] & 0xFF; |
2310 | if (M == 0xFF) |
2311 | MaskUnd |= M << S; |
2312 | MaskIdx |= M << S; |
2313 | } |
2314 | |
2315 | if (ByteMask.size() == 4) { |
2316 | // Identity. |
2317 | if (MaskIdx == (0x03020100 | MaskUnd)) |
2318 | return Op0; |
2319 | // Byte swap. |
2320 | if (MaskIdx == (0x00010203 | MaskUnd)) { |
2321 | SDValue T0 = DAG.getBitcast(VT: MVT::i32, V: Op0); |
2322 | SDValue T1 = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT: MVT::i32, Operand: T0); |
2323 | return DAG.getBitcast(VT: VecTy, V: T1); |
2324 | } |
2325 | |
2326 | // Byte packs. |
2327 | SDValue Concat10 = |
2328 | getCombine(Hi: Op1, Lo: Op0, dl, ResTy: typeJoin(Tys: {ty(Op: Op1), ty(Op: Op0)}), DAG); |
2329 | if (MaskIdx == (0x06040200 | MaskUnd)) |
2330 | return getInstr(MachineOpc: Hexagon::S2_vtrunehb, dl, Ty: VecTy, Ops: {Concat10}, DAG); |
2331 | if (MaskIdx == (0x07050301 | MaskUnd)) |
2332 | return getInstr(MachineOpc: Hexagon::S2_vtrunohb, dl, Ty: VecTy, Ops: {Concat10}, DAG); |
2333 | |
2334 | SDValue Concat01 = |
2335 | getCombine(Hi: Op0, Lo: Op1, dl, ResTy: typeJoin(Tys: {ty(Op: Op0), ty(Op: Op1)}), DAG); |
2336 | if (MaskIdx == (0x02000604 | MaskUnd)) |
2337 | return getInstr(MachineOpc: Hexagon::S2_vtrunehb, dl, Ty: VecTy, Ops: {Concat01}, DAG); |
2338 | if (MaskIdx == (0x03010705 | MaskUnd)) |
2339 | return getInstr(MachineOpc: Hexagon::S2_vtrunohb, dl, Ty: VecTy, Ops: {Concat01}, DAG); |
2340 | } |
2341 | |
2342 | if (ByteMask.size() == 8) { |
2343 | // Identity. |
2344 | if (MaskIdx == (0x0706050403020100ull | MaskUnd)) |
2345 | return Op0; |
2346 | // Byte swap. |
2347 | if (MaskIdx == (0x0001020304050607ull | MaskUnd)) { |
2348 | SDValue T0 = DAG.getBitcast(VT: MVT::i64, V: Op0); |
2349 | SDValue T1 = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT: MVT::i64, Operand: T0); |
2350 | return DAG.getBitcast(VT: VecTy, V: T1); |
2351 | } |
2352 | |
2353 | // Halfword picks. |
2354 | if (MaskIdx == (0x0d0c050409080100ull | MaskUnd)) |
2355 | return getInstr(MachineOpc: Hexagon::S2_shuffeh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
2356 | if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd)) |
2357 | return getInstr(MachineOpc: Hexagon::S2_shuffoh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
2358 | if (MaskIdx == (0x0d0c090805040100ull | MaskUnd)) |
2359 | return getInstr(MachineOpc: Hexagon::S2_vtrunewh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
2360 | if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd)) |
2361 | return getInstr(MachineOpc: Hexagon::S2_vtrunowh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
2362 | if (MaskIdx == (0x0706030205040100ull | MaskUnd)) { |
2363 | VectorPair P = opSplit(Vec: Op0, dl, DAG); |
2364 | return getInstr(MachineOpc: Hexagon::S2_packhl, dl, Ty: VecTy, Ops: {P.second, P.first}, DAG); |
2365 | } |
2366 | |
2367 | // Byte packs. |
2368 | if (MaskIdx == (0x0e060c040a020800ull | MaskUnd)) |
2369 | return getInstr(MachineOpc: Hexagon::S2_shuffeb, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
2370 | if (MaskIdx == (0x0f070d050b030901ull | MaskUnd)) |
2371 | return getInstr(MachineOpc: Hexagon::S2_shuffob, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG); |
2372 | } |
2373 | |
2374 | return SDValue(); |
2375 | } |
2376 | |
2377 | SDValue |
2378 | HexagonTargetLowering::getSplatValue(SDValue Op, SelectionDAG &DAG) const { |
2379 | switch (Op.getOpcode()) { |
2380 | case ISD::BUILD_VECTOR: |
2381 | if (SDValue S = cast<BuildVectorSDNode>(Val&: Op)->getSplatValue()) |
2382 | return S; |
2383 | break; |
2384 | case ISD::SPLAT_VECTOR: |
2385 | return Op.getOperand(i: 0); |
2386 | } |
2387 | return SDValue(); |
2388 | } |
2389 | |
2390 | // Create a Hexagon-specific node for shifting a vector by an integer. |
2391 | SDValue |
2392 | HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) |
2393 | const { |
2394 | unsigned NewOpc; |
2395 | switch (Op.getOpcode()) { |
2396 | case ISD::SHL: |
2397 | NewOpc = HexagonISD::VASL; |
2398 | break; |
2399 | case ISD::SRA: |
2400 | NewOpc = HexagonISD::VASR; |
2401 | break; |
2402 | case ISD::SRL: |
2403 | NewOpc = HexagonISD::VLSR; |
2404 | break; |
2405 | default: |
2406 | llvm_unreachable("Unexpected shift opcode" ); |
2407 | } |
2408 | |
2409 | if (SDValue Sp = getSplatValue(Op: Op.getOperand(i: 1), DAG)) |
2410 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: ty(Op), N1: Op.getOperand(i: 0), N2: Sp); |
2411 | return SDValue(); |
2412 | } |
2413 | |
2414 | SDValue |
2415 | HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const { |
2416 | const SDLoc &dl(Op); |
2417 | |
2418 | // First try to convert the shift (by vector) to a shift by a scalar. |
2419 | // If we first split the shift, the shift amount will become 'extract |
2420 | // subvector', and will no longer be recognized as scalar. |
2421 | SDValue Res = Op; |
2422 | if (SDValue S = getVectorShiftByInt(Op, DAG)) |
2423 | Res = S; |
2424 | |
2425 | unsigned Opc = Res.getOpcode(); |
2426 | switch (Opc) { |
2427 | case HexagonISD::VASR: |
2428 | case HexagonISD::VLSR: |
2429 | case HexagonISD::VASL: |
2430 | break; |
2431 | default: |
2432 | // No instructions for shifts by non-scalars. |
2433 | return SDValue(); |
2434 | } |
2435 | |
2436 | MVT ResTy = ty(Op: Res); |
2437 | if (ResTy.getVectorElementType() != MVT::i8) |
2438 | return Res; |
2439 | |
2440 | // For shifts of i8, extend the inputs to i16, then truncate back to i8. |
2441 | assert(ResTy.getVectorElementType() == MVT::i8); |
2442 | SDValue Val = Res.getOperand(i: 0), Amt = Res.getOperand(i: 1); |
2443 | |
2444 | auto ShiftPartI8 = [&dl, &DAG, this](unsigned Opc, SDValue V, SDValue A) { |
2445 | MVT Ty = ty(Op: V); |
2446 | MVT ExtTy = MVT::getVectorVT(VT: MVT::i16, NumElements: Ty.getVectorNumElements()); |
2447 | SDValue ExtV = Opc == HexagonISD::VASR ? DAG.getSExtOrTrunc(Op: V, DL: dl, VT: ExtTy) |
2448 | : DAG.getZExtOrTrunc(Op: V, DL: dl, VT: ExtTy); |
2449 | SDValue ExtS = DAG.getNode(Opcode: Opc, DL: dl, VT: ExtTy, Ops: {ExtV, A}); |
2450 | return DAG.getZExtOrTrunc(Op: ExtS, DL: dl, VT: Ty); |
2451 | }; |
2452 | |
2453 | if (ResTy.getSizeInBits() == 32) |
2454 | return ShiftPartI8(Opc, Val, Amt); |
2455 | |
2456 | auto [LoV, HiV] = opSplit(Vec: Val, dl, DAG); |
2457 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: ResTy, |
2458 | Ops: {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)}); |
2459 | } |
2460 | |
2461 | SDValue |
2462 | HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { |
2463 | if (isa<ConstantSDNode>(Val: Op.getOperand(i: 1).getNode())) |
2464 | return Op; |
2465 | return SDValue(); |
2466 | } |
2467 | |
2468 | SDValue |
2469 | HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { |
2470 | MVT ResTy = ty(Op); |
2471 | SDValue InpV = Op.getOperand(i: 0); |
2472 | MVT InpTy = ty(Op: InpV); |
2473 | assert(ResTy.getSizeInBits() == InpTy.getSizeInBits()); |
2474 | const SDLoc &dl(Op); |
2475 | |
2476 | // Handle conversion from i8 to v8i1. |
2477 | if (InpTy == MVT::i8) { |
2478 | if (ResTy == MVT::v8i1) { |
2479 | SDValue Sc = DAG.getBitcast(VT: tyScalar(Ty: InpTy), V: InpV); |
2480 | SDValue Ext = DAG.getZExtOrTrunc(Op: Sc, DL: dl, VT: MVT::i32); |
2481 | return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: ResTy, Ops: Ext, DAG); |
2482 | } |
2483 | return SDValue(); |
2484 | } |
2485 | |
2486 | return Op; |
2487 | } |
2488 | |
2489 | bool |
2490 | HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values, |
2491 | MVT VecTy, SelectionDAG &DAG, |
2492 | MutableArrayRef<ConstantInt*> Consts) const { |
2493 | MVT ElemTy = VecTy.getVectorElementType(); |
2494 | unsigned ElemWidth = ElemTy.getSizeInBits(); |
2495 | IntegerType *IntTy = IntegerType::get(C&: *DAG.getContext(), NumBits: ElemWidth); |
2496 | bool AllConst = true; |
2497 | |
2498 | for (unsigned i = 0, e = Values.size(); i != e; ++i) { |
2499 | SDValue V = Values[i]; |
2500 | if (V.isUndef()) { |
2501 | Consts[i] = ConstantInt::get(Ty: IntTy, V: 0); |
2502 | continue; |
2503 | } |
2504 | // Make sure to always cast to IntTy. |
2505 | if (auto *CN = dyn_cast<ConstantSDNode>(Val: V.getNode())) { |
2506 | const ConstantInt *CI = CN->getConstantIntValue(); |
2507 | Consts[i] = ConstantInt::get(Ty: IntTy, V: CI->getValue().getSExtValue()); |
2508 | } else if (auto *CN = dyn_cast<ConstantFPSDNode>(Val: V.getNode())) { |
2509 | const ConstantFP *CF = CN->getConstantFPValue(); |
2510 | APInt A = CF->getValueAPF().bitcastToAPInt(); |
2511 | Consts[i] = ConstantInt::get(Ty: IntTy, V: A.getZExtValue()); |
2512 | } else { |
2513 | AllConst = false; |
2514 | } |
2515 | } |
2516 | return AllConst; |
2517 | } |
2518 | |
2519 | SDValue |
2520 | HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, |
2521 | MVT VecTy, SelectionDAG &DAG) const { |
2522 | MVT ElemTy = VecTy.getVectorElementType(); |
2523 | assert(VecTy.getVectorNumElements() == Elem.size()); |
2524 | |
2525 | SmallVector<ConstantInt*,4> Consts(Elem.size()); |
2526 | bool AllConst = getBuildVectorConstInts(Values: Elem, VecTy, DAG, Consts); |
2527 | |
2528 | unsigned First, Num = Elem.size(); |
2529 | for (First = 0; First != Num; ++First) { |
2530 | if (!isUndef(Op: Elem[First])) |
2531 | break; |
2532 | } |
2533 | if (First == Num) |
2534 | return DAG.getUNDEF(VT: VecTy); |
2535 | |
2536 | if (AllConst && |
2537 | llvm::all_of(Range&: Consts, P: [](ConstantInt *CI) { return CI->isZero(); })) |
2538 | return getZero(dl, Ty: VecTy, DAG); |
2539 | |
2540 | if (ElemTy == MVT::i16 || ElemTy == MVT::f16) { |
2541 | assert(Elem.size() == 2); |
2542 | if (AllConst) { |
2543 | // The 'Consts' array will have all values as integers regardless |
2544 | // of the vector element type. |
2545 | uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) | |
2546 | Consts[1]->getZExtValue() << 16; |
2547 | return DAG.getBitcast(VT: VecTy, V: DAG.getConstant(Val: V, DL: dl, VT: MVT::i32)); |
2548 | } |
2549 | SDValue E0, E1; |
2550 | if (ElemTy == MVT::f16) { |
2551 | E0 = DAG.getZExtOrTrunc(Op: DAG.getBitcast(VT: MVT::i16, V: Elem[0]), DL: dl, VT: MVT::i32); |
2552 | E1 = DAG.getZExtOrTrunc(Op: DAG.getBitcast(VT: MVT::i16, V: Elem[1]), DL: dl, VT: MVT::i32); |
2553 | } else { |
2554 | E0 = Elem[0]; |
2555 | E1 = Elem[1]; |
2556 | } |
2557 | SDValue N = getInstr(MachineOpc: Hexagon::A2_combine_ll, dl, Ty: MVT::i32, Ops: {E1, E0}, DAG); |
2558 | return DAG.getBitcast(VT: VecTy, V: N); |
2559 | } |
2560 | |
2561 | if (ElemTy == MVT::i8) { |
2562 | // First try generating a constant. |
2563 | if (AllConst) { |
2564 | int32_t V = (Consts[0]->getZExtValue() & 0xFF) | |
2565 | (Consts[1]->getZExtValue() & 0xFF) << 8 | |
2566 | (Consts[2]->getZExtValue() & 0xFF) << 16 | |
2567 | Consts[3]->getZExtValue() << 24; |
2568 | return DAG.getBitcast(VT: MVT::v4i8, V: DAG.getConstant(Val: V, DL: dl, VT: MVT::i32)); |
2569 | } |
2570 | |
2571 | // Then try splat. |
2572 | bool IsSplat = true; |
2573 | for (unsigned i = First+1; i != Num; ++i) { |
2574 | if (Elem[i] == Elem[First] || isUndef(Op: Elem[i])) |
2575 | continue; |
2576 | IsSplat = false; |
2577 | break; |
2578 | } |
2579 | if (IsSplat) { |
2580 | // Legalize the operand of SPLAT_VECTOR. |
2581 | SDValue Ext = DAG.getZExtOrTrunc(Op: Elem[First], DL: dl, VT: MVT::i32); |
2582 | return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: VecTy, Operand: Ext); |
2583 | } |
2584 | |
2585 | // Generate |
2586 | // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) | |
2587 | // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16 |
2588 | assert(Elem.size() == 4); |
2589 | SDValue Vs[4]; |
2590 | for (unsigned i = 0; i != 4; ++i) { |
2591 | Vs[i] = DAG.getZExtOrTrunc(Op: Elem[i], DL: dl, VT: MVT::i32); |
2592 | Vs[i] = DAG.getZeroExtendInReg(Op: Vs[i], DL: dl, VT: MVT::i8); |
2593 | } |
2594 | SDValue S8 = DAG.getConstant(Val: 8, DL: dl, VT: MVT::i32); |
2595 | SDValue T0 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, Ops: {Vs[1], S8}); |
2596 | SDValue T1 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, Ops: {Vs[3], S8}); |
2597 | SDValue B0 = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, Ops: {Vs[0], T0}); |
2598 | SDValue B1 = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, Ops: {Vs[2], T1}); |
2599 | |
2600 | SDValue R = getInstr(MachineOpc: Hexagon::A2_combine_ll, dl, Ty: MVT::i32, Ops: {B1, B0}, DAG); |
2601 | return DAG.getBitcast(VT: MVT::v4i8, V: R); |
2602 | } |
2603 | |
2604 | #ifndef NDEBUG |
2605 | dbgs() << "VecTy: " << VecTy << '\n'; |
2606 | #endif |
2607 | llvm_unreachable("Unexpected vector element type" ); |
2608 | } |
2609 | |
2610 | SDValue |
2611 | HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, |
2612 | MVT VecTy, SelectionDAG &DAG) const { |
2613 | MVT ElemTy = VecTy.getVectorElementType(); |
2614 | assert(VecTy.getVectorNumElements() == Elem.size()); |
2615 | |
2616 | SmallVector<ConstantInt*,8> Consts(Elem.size()); |
2617 | bool AllConst = getBuildVectorConstInts(Values: Elem, VecTy, DAG, Consts); |
2618 | |
2619 | unsigned First, Num = Elem.size(); |
2620 | for (First = 0; First != Num; ++First) { |
2621 | if (!isUndef(Op: Elem[First])) |
2622 | break; |
2623 | } |
2624 | if (First == Num) |
2625 | return DAG.getUNDEF(VT: VecTy); |
2626 | |
2627 | if (AllConst && |
2628 | llvm::all_of(Range&: Consts, P: [](ConstantInt *CI) { return CI->isZero(); })) |
2629 | return getZero(dl, Ty: VecTy, DAG); |
2630 | |
2631 | // First try splat if possible. |
2632 | if (ElemTy == MVT::i16 || ElemTy == MVT::f16) { |
2633 | bool IsSplat = true; |
2634 | for (unsigned i = First+1; i != Num; ++i) { |
2635 | if (Elem[i] == Elem[First] || isUndef(Op: Elem[i])) |
2636 | continue; |
2637 | IsSplat = false; |
2638 | break; |
2639 | } |
2640 | if (IsSplat) { |
2641 | // Legalize the operand of SPLAT_VECTOR |
2642 | SDValue S = ElemTy == MVT::f16 ? DAG.getBitcast(VT: MVT::i16, V: Elem[First]) |
2643 | : Elem[First]; |
2644 | SDValue Ext = DAG.getZExtOrTrunc(Op: S, DL: dl, VT: MVT::i32); |
2645 | return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: VecTy, Operand: Ext); |
2646 | } |
2647 | } |
2648 | |
2649 | // Then try constant. |
2650 | if (AllConst) { |
2651 | uint64_t Val = 0; |
2652 | unsigned W = ElemTy.getSizeInBits(); |
2653 | uint64_t Mask = (1ull << W) - 1; |
2654 | for (unsigned i = 0; i != Num; ++i) |
2655 | Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask); |
2656 | SDValue V0 = DAG.getConstant(Val, DL: dl, VT: MVT::i64); |
2657 | return DAG.getBitcast(VT: VecTy, V: V0); |
2658 | } |
2659 | |
2660 | // Build two 32-bit vectors and concatenate. |
2661 | MVT HalfTy = MVT::getVectorVT(VT: ElemTy, NumElements: Num/2); |
2662 | SDValue L = (ElemTy == MVT::i32) |
2663 | ? Elem[0] |
2664 | : buildVector32(Elem: Elem.take_front(N: Num/2), dl, VecTy: HalfTy, DAG); |
2665 | SDValue H = (ElemTy == MVT::i32) |
2666 | ? Elem[1] |
2667 | : buildVector32(Elem: Elem.drop_front(N: Num/2), dl, VecTy: HalfTy, DAG); |
2668 | return getCombine(Hi: H, Lo: L, dl, ResTy: VecTy, DAG); |
2669 | } |
2670 | |
2671 | SDValue |
2672 | HexagonTargetLowering::(SDValue VecV, SDValue IdxV, |
2673 | const SDLoc &dl, MVT ValTy, MVT ResTy, |
2674 | SelectionDAG &DAG) const { |
2675 | MVT VecTy = ty(Op: VecV); |
2676 | assert(!ValTy.isVector() || |
2677 | VecTy.getVectorElementType() == ValTy.getVectorElementType()); |
2678 | if (VecTy.getVectorElementType() == MVT::i1) |
2679 | return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG); |
2680 | |
2681 | unsigned VecWidth = VecTy.getSizeInBits(); |
2682 | unsigned ValWidth = ValTy.getSizeInBits(); |
2683 | unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits(); |
2684 | assert((VecWidth % ElemWidth) == 0); |
2685 | assert(VecWidth == 32 || VecWidth == 64); |
2686 | |
2687 | // Cast everything to scalar integer types. |
2688 | MVT ScalarTy = tyScalar(Ty: VecTy); |
2689 | VecV = DAG.getBitcast(VT: ScalarTy, V: VecV); |
2690 | |
2691 | SDValue WidthV = DAG.getConstant(Val: ValWidth, DL: dl, VT: MVT::i32); |
2692 | SDValue ExtV; |
2693 | |
2694 | if (auto *IdxN = dyn_cast<ConstantSDNode>(Val&: IdxV)) { |
2695 | unsigned Off = IdxN->getZExtValue() * ElemWidth; |
2696 | if (VecWidth == 64 && ValWidth == 32) { |
2697 | assert(Off == 0 || Off == 32); |
2698 | ExtV = Off == 0 ? LoHalf(V: VecV, DAG) : HiHalf(V: VecV, DAG); |
2699 | } else if (Off == 0 && (ValWidth % 8) == 0) { |
2700 | ExtV = DAG.getZeroExtendInReg(Op: VecV, DL: dl, VT: tyScalar(Ty: ValTy)); |
2701 | } else { |
2702 | SDValue OffV = DAG.getConstant(Val: Off, DL: dl, VT: MVT::i32); |
2703 | // The return type of EXTRACTU must be the same as the type of the |
2704 | // input vector. |
2705 | ExtV = DAG.getNode(Opcode: HexagonISD::EXTRACTU, DL: dl, VT: ScalarTy, |
2706 | Ops: {VecV, WidthV, OffV}); |
2707 | } |
2708 | } else { |
2709 | if (ty(Op: IdxV) != MVT::i32) |
2710 | IdxV = DAG.getZExtOrTrunc(Op: IdxV, DL: dl, VT: MVT::i32); |
2711 | SDValue OffV = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, |
2712 | N2: DAG.getConstant(Val: ElemWidth, DL: dl, VT: MVT::i32)); |
2713 | ExtV = DAG.getNode(Opcode: HexagonISD::EXTRACTU, DL: dl, VT: ScalarTy, |
2714 | Ops: {VecV, WidthV, OffV}); |
2715 | } |
2716 | |
2717 | // Cast ExtV to the requested result type. |
2718 | ExtV = DAG.getZExtOrTrunc(Op: ExtV, DL: dl, VT: tyScalar(Ty: ResTy)); |
2719 | ExtV = DAG.getBitcast(VT: ResTy, V: ExtV); |
2720 | return ExtV; |
2721 | } |
2722 | |
2723 | SDValue |
2724 | HexagonTargetLowering::(SDValue VecV, SDValue IdxV, |
2725 | const SDLoc &dl, MVT ValTy, MVT ResTy, |
2726 | SelectionDAG &DAG) const { |
2727 | // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon |
2728 | // without any coprocessors). |
2729 | MVT VecTy = ty(Op: VecV); |
2730 | unsigned VecWidth = VecTy.getSizeInBits(); |
2731 | unsigned ValWidth = ValTy.getSizeInBits(); |
2732 | assert(VecWidth == VecTy.getVectorNumElements() && |
2733 | "Vector elements should equal vector width size" ); |
2734 | assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2); |
2735 | |
2736 | // Check if this is an extract of the lowest bit. |
2737 | if (isNullConstant(V: IdxV) && ValTy.getSizeInBits() == 1) { |
2738 | // Extracting the lowest bit is a no-op, but it changes the type, |
2739 | // so it must be kept as an operation to avoid errors related to |
2740 | // type mismatches. |
2741 | return DAG.getNode(Opcode: HexagonISD::TYPECAST, DL: dl, VT: MVT::i1, Operand: VecV); |
2742 | } |
2743 | |
2744 | // If the value extracted is a single bit, use tstbit. |
2745 | if (ValWidth == 1) { |
2746 | SDValue A0 = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {VecV}, DAG); |
2747 | SDValue M0 = DAG.getConstant(Val: 8 / VecWidth, DL: dl, VT: MVT::i32); |
2748 | SDValue I0 = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: M0); |
2749 | return DAG.getNode(Opcode: HexagonISD::TSTBIT, DL: dl, VT: MVT::i1, N1: A0, N2: I0); |
2750 | } |
2751 | |
2752 | // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in |
2753 | // a predicate register. The elements of the vector are repeated |
2754 | // in the register (if necessary) so that the total number is 8. |
2755 | // The extracted subvector will need to be expanded in such a way. |
2756 | unsigned Scale = VecWidth / ValWidth; |
2757 | |
2758 | // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to |
2759 | // position 0. |
2760 | assert(ty(IdxV) == MVT::i32); |
2761 | unsigned VecRep = 8 / VecWidth; |
2762 | SDValue S0 = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, |
2763 | N2: DAG.getConstant(Val: 8*VecRep, DL: dl, VT: MVT::i32)); |
2764 | SDValue T0 = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: VecV); |
2765 | SDValue T1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i64, N1: T0, N2: S0); |
2766 | while (Scale > 1) { |
2767 | // The longest possible subvector is at most 32 bits, so it is always |
2768 | // contained in the low subregister. |
2769 | T1 = LoHalf(V: T1, DAG); |
2770 | T1 = expandPredicate(Vec32: T1, dl, DAG); |
2771 | Scale /= 2; |
2772 | } |
2773 | |
2774 | return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: ResTy, Operand: T1); |
2775 | } |
2776 | |
2777 | SDValue |
2778 | HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV, |
2779 | const SDLoc &dl, MVT ValTy, |
2780 | SelectionDAG &DAG) const { |
2781 | MVT VecTy = ty(Op: VecV); |
2782 | if (VecTy.getVectorElementType() == MVT::i1) |
2783 | return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG); |
2784 | |
2785 | unsigned VecWidth = VecTy.getSizeInBits(); |
2786 | unsigned ValWidth = ValTy.getSizeInBits(); |
2787 | assert(VecWidth == 32 || VecWidth == 64); |
2788 | assert((VecWidth % ValWidth) == 0); |
2789 | |
2790 | // Cast everything to scalar integer types. |
2791 | MVT ScalarTy = MVT::getIntegerVT(BitWidth: VecWidth); |
2792 | // The actual type of ValV may be different than ValTy (which is related |
2793 | // to the vector type). |
2794 | unsigned VW = ty(Op: ValV).getSizeInBits(); |
2795 | ValV = DAG.getBitcast(VT: MVT::getIntegerVT(BitWidth: VW), V: ValV); |
2796 | VecV = DAG.getBitcast(VT: ScalarTy, V: VecV); |
2797 | if (VW != VecWidth) |
2798 | ValV = DAG.getAnyExtOrTrunc(Op: ValV, DL: dl, VT: ScalarTy); |
2799 | |
2800 | SDValue WidthV = DAG.getConstant(Val: ValWidth, DL: dl, VT: MVT::i32); |
2801 | SDValue InsV; |
2802 | |
2803 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: IdxV)) { |
2804 | unsigned W = C->getZExtValue() * ValWidth; |
2805 | SDValue OffV = DAG.getConstant(Val: W, DL: dl, VT: MVT::i32); |
2806 | InsV = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: ScalarTy, |
2807 | Ops: {VecV, ValV, WidthV, OffV}); |
2808 | } else { |
2809 | if (ty(Op: IdxV) != MVT::i32) |
2810 | IdxV = DAG.getZExtOrTrunc(Op: IdxV, DL: dl, VT: MVT::i32); |
2811 | SDValue OffV = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: WidthV); |
2812 | InsV = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: ScalarTy, |
2813 | Ops: {VecV, ValV, WidthV, OffV}); |
2814 | } |
2815 | |
2816 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecTy, Operand: InsV); |
2817 | } |
2818 | |
2819 | SDValue |
2820 | HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV, |
2821 | SDValue IdxV, const SDLoc &dl, |
2822 | MVT ValTy, SelectionDAG &DAG) const { |
2823 | MVT VecTy = ty(Op: VecV); |
2824 | unsigned VecLen = VecTy.getVectorNumElements(); |
2825 | |
2826 | if (ValTy == MVT::i1) { |
2827 | SDValue ToReg = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {VecV}, DAG); |
2828 | SDValue Ext = DAG.getSExtOrTrunc(Op: ValV, DL: dl, VT: MVT::i32); |
2829 | SDValue Width = DAG.getConstant(Val: 8 / VecLen, DL: dl, VT: MVT::i32); |
2830 | SDValue Idx = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: Width); |
2831 | SDValue Ins = |
2832 | DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i32, Ops: {ToReg, Ext, Width, Idx}); |
2833 | return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: VecTy, Ops: {Ins}, DAG); |
2834 | } |
2835 | |
2836 | assert(ValTy.getVectorElementType() == MVT::i1); |
2837 | SDValue ValR = ValTy.isVector() |
2838 | ? DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: ValV) |
2839 | : DAG.getSExtOrTrunc(Op: ValV, DL: dl, VT: MVT::i64); |
2840 | |
2841 | unsigned Scale = VecLen / ValTy.getVectorNumElements(); |
2842 | assert(Scale > 1); |
2843 | |
2844 | for (unsigned R = Scale; R > 1; R /= 2) { |
2845 | ValR = contractPredicate(Vec64: ValR, dl, DAG); |
2846 | ValR = getCombine(Hi: DAG.getUNDEF(VT: MVT::i32), Lo: ValR, dl, ResTy: MVT::i64, DAG); |
2847 | } |
2848 | |
2849 | SDValue Width = DAG.getConstant(Val: 64 / Scale, DL: dl, VT: MVT::i32); |
2850 | SDValue Idx = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: Width); |
2851 | SDValue VecR = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: VecV); |
2852 | SDValue Ins = |
2853 | DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i64, Ops: {VecR, ValR, Width, Idx}); |
2854 | return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: VecTy, Operand: Ins); |
2855 | } |
2856 | |
2857 | SDValue |
2858 | HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl, |
2859 | SelectionDAG &DAG) const { |
2860 | assert(ty(Vec32).getSizeInBits() == 32); |
2861 | if (isUndef(Op: Vec32)) |
2862 | return DAG.getUNDEF(VT: MVT::i64); |
2863 | SDValue P = DAG.getBitcast(VT: MVT::v4i8, V: Vec32); |
2864 | SDValue X = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i16, Operand: P); |
2865 | return DAG.getBitcast(VT: MVT::i64, V: X); |
2866 | } |
2867 | |
2868 | SDValue |
2869 | HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl, |
2870 | SelectionDAG &DAG) const { |
2871 | assert(ty(Vec64).getSizeInBits() == 64); |
2872 | if (isUndef(Op: Vec64)) |
2873 | return DAG.getUNDEF(VT: MVT::i32); |
2874 | // Collect even bytes: |
2875 | SDValue A = DAG.getBitcast(VT: MVT::v8i8, V: Vec64); |
2876 | SDValue S = DAG.getVectorShuffle(VT: MVT::v8i8, dl, N1: A, N2: DAG.getUNDEF(VT: MVT::v8i8), |
2877 | Mask: {0, 2, 4, 6, 1, 3, 5, 7}); |
2878 | return extractVector(VecV: S, IdxV: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), dl, ValTy: MVT::v4i8, |
2879 | ResTy: MVT::i32, DAG); |
2880 | } |
2881 | |
2882 | SDValue |
2883 | HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) |
2884 | const { |
2885 | if (Ty.isVector()) { |
2886 | unsigned W = Ty.getSizeInBits(); |
2887 | if (W <= 64) |
2888 | return DAG.getBitcast(VT: Ty, V: DAG.getConstant(Val: 0, DL: dl, VT: MVT::getIntegerVT(BitWidth: W))); |
2889 | return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: Ty, Operand: getZero(dl, Ty: MVT::i32, DAG)); |
2890 | } |
2891 | |
2892 | if (Ty.isInteger()) |
2893 | return DAG.getConstant(Val: 0, DL: dl, VT: Ty); |
2894 | if (Ty.isFloatingPoint()) |
2895 | return DAG.getConstantFP(Val: 0.0, DL: dl, VT: Ty); |
2896 | llvm_unreachable("Invalid type for zero" ); |
2897 | } |
2898 | |
2899 | SDValue |
2900 | HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) |
2901 | const { |
2902 | MVT ValTy = ty(Op: Val); |
2903 | assert(ValTy.getVectorElementType() == ResTy.getVectorElementType()); |
2904 | |
2905 | unsigned ValLen = ValTy.getVectorNumElements(); |
2906 | unsigned ResLen = ResTy.getVectorNumElements(); |
2907 | if (ValLen == ResLen) |
2908 | return Val; |
2909 | |
2910 | const SDLoc &dl(Val); |
2911 | assert(ValLen < ResLen); |
2912 | assert(ResLen % ValLen == 0); |
2913 | |
2914 | SmallVector<SDValue, 4> Concats = {Val}; |
2915 | for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i) |
2916 | Concats.push_back(Elt: DAG.getUNDEF(VT: ValTy)); |
2917 | |
2918 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: ResTy, Ops: Concats); |
2919 | } |
2920 | |
2921 | SDValue |
2922 | HexagonTargetLowering::getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl, |
2923 | MVT ResTy, SelectionDAG &DAG) const { |
2924 | MVT ElemTy = ty(Op: Hi); |
2925 | assert(ElemTy == ty(Lo)); |
2926 | |
2927 | if (!ElemTy.isVector()) { |
2928 | assert(ElemTy.isScalarInteger()); |
2929 | MVT PairTy = MVT::getIntegerVT(BitWidth: 2 * ElemTy.getSizeInBits()); |
2930 | SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: PairTy, N1: Lo, N2: Hi); |
2931 | return DAG.getBitcast(VT: ResTy, V: Pair); |
2932 | } |
2933 | |
2934 | unsigned Width = ElemTy.getSizeInBits(); |
2935 | MVT IntTy = MVT::getIntegerVT(BitWidth: Width); |
2936 | MVT PairTy = MVT::getIntegerVT(BitWidth: 2 * Width); |
2937 | SDValue Pair = |
2938 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: PairTy, |
2939 | Ops: {DAG.getBitcast(VT: IntTy, V: Lo), DAG.getBitcast(VT: IntTy, V: Hi)}); |
2940 | return DAG.getBitcast(VT: ResTy, V: Pair); |
2941 | } |
2942 | |
2943 | SDValue |
2944 | HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { |
2945 | MVT VecTy = ty(Op); |
2946 | unsigned BW = VecTy.getSizeInBits(); |
2947 | const SDLoc &dl(Op); |
2948 | SmallVector<SDValue,8> Ops; |
2949 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) |
2950 | Ops.push_back(Elt: Op.getOperand(i)); |
2951 | |
2952 | if (BW == 32) |
2953 | return buildVector32(Elem: Ops, dl, VecTy, DAG); |
2954 | if (BW == 64) |
2955 | return buildVector64(Elem: Ops, dl, VecTy, DAG); |
2956 | |
2957 | if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) { |
2958 | // Check if this is a special case or all-0 or all-1. |
2959 | bool All0 = true, All1 = true; |
2960 | for (SDValue P : Ops) { |
2961 | auto *CN = dyn_cast<ConstantSDNode>(Val: P.getNode()); |
2962 | if (CN == nullptr) { |
2963 | All0 = All1 = false; |
2964 | break; |
2965 | } |
2966 | uint32_t C = CN->getZExtValue(); |
2967 | All0 &= (C == 0); |
2968 | All1 &= (C == 1); |
2969 | } |
2970 | if (All0) |
2971 | return DAG.getNode(Opcode: HexagonISD::PFALSE, DL: dl, VT: VecTy); |
2972 | if (All1) |
2973 | return DAG.getNode(Opcode: HexagonISD::PTRUE, DL: dl, VT: VecTy); |
2974 | |
2975 | // For each i1 element in the resulting predicate register, put 1 |
2976 | // shifted by the index of the element into a general-purpose register, |
2977 | // then or them together and transfer it back into a predicate register. |
2978 | SDValue Rs[8]; |
2979 | SDValue Z = getZero(dl, Ty: MVT::i32, DAG); |
2980 | // Always produce 8 bits, repeat inputs if necessary. |
2981 | unsigned Rep = 8 / VecTy.getVectorNumElements(); |
2982 | for (unsigned i = 0; i != 8; ++i) { |
2983 | SDValue S = DAG.getConstant(Val: 1ull << i, DL: dl, VT: MVT::i32); |
2984 | Rs[i] = DAG.getSelect(DL: dl, VT: MVT::i32, Cond: Ops[i/Rep], LHS: S, RHS: Z); |
2985 | } |
2986 | for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(N: A.size()/2)) { |
2987 | for (unsigned i = 0, e = A.size()/2; i != e; ++i) |
2988 | Rs[i] = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Rs[2*i], N2: Rs[2*i+1]); |
2989 | } |
2990 | // Move the value directly to a predicate register. |
2991 | return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: VecTy, Ops: {Rs[0]}, DAG); |
2992 | } |
2993 | |
2994 | return SDValue(); |
2995 | } |
2996 | |
2997 | SDValue |
2998 | HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op, |
2999 | SelectionDAG &DAG) const { |
3000 | MVT VecTy = ty(Op); |
3001 | const SDLoc &dl(Op); |
3002 | if (VecTy.getSizeInBits() == 64) { |
3003 | assert(Op.getNumOperands() == 2); |
3004 | return getCombine(Hi: Op.getOperand(i: 1), Lo: Op.getOperand(i: 0), dl, ResTy: VecTy, DAG); |
3005 | } |
3006 | |
3007 | MVT ElemTy = VecTy.getVectorElementType(); |
3008 | if (ElemTy == MVT::i1) { |
3009 | assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1); |
3010 | MVT OpTy = ty(Op: Op.getOperand(i: 0)); |
3011 | // Scale is how many times the operands need to be contracted to match |
3012 | // the representation in the target register. |
3013 | unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements(); |
3014 | assert(Scale == Op.getNumOperands() && Scale > 1); |
3015 | |
3016 | // First, convert all bool vectors to integers, then generate pairwise |
3017 | // inserts to form values of doubled length. Up until there are only |
3018 | // two values left to concatenate, all of these values will fit in a |
3019 | // 32-bit integer, so keep them as i32 to use 32-bit inserts. |
3020 | SmallVector<SDValue,4> Words[2]; |
3021 | unsigned IdxW = 0; |
3022 | |
3023 | for (SDValue P : Op.getNode()->op_values()) { |
3024 | SDValue W = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: P); |
3025 | for (unsigned R = Scale; R > 1; R /= 2) { |
3026 | W = contractPredicate(Vec64: W, dl, DAG); |
3027 | W = getCombine(Hi: DAG.getUNDEF(VT: MVT::i32), Lo: W, dl, ResTy: MVT::i64, DAG); |
3028 | } |
3029 | W = LoHalf(V: W, DAG); |
3030 | Words[IdxW].push_back(Elt: W); |
3031 | } |
3032 | |
3033 | while (Scale > 2) { |
3034 | SDValue WidthV = DAG.getConstant(Val: 64 / Scale, DL: dl, VT: MVT::i32); |
3035 | Words[IdxW ^ 1].clear(); |
3036 | |
3037 | for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) { |
3038 | SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1]; |
3039 | // Insert W1 into W0 right next to the significant bits of W0. |
3040 | SDValue T = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i32, |
3041 | Ops: {W0, W1, WidthV, WidthV}); |
3042 | Words[IdxW ^ 1].push_back(Elt: T); |
3043 | } |
3044 | IdxW ^= 1; |
3045 | Scale /= 2; |
3046 | } |
3047 | |
3048 | // At this point there should only be two words left, and Scale should be 2. |
3049 | assert(Scale == 2 && Words[IdxW].size() == 2); |
3050 | |
3051 | SDValue WW = getCombine(Hi: Words[IdxW][1], Lo: Words[IdxW][0], dl, ResTy: MVT::i64, DAG); |
3052 | return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: VecTy, Operand: WW); |
3053 | } |
3054 | |
3055 | return SDValue(); |
3056 | } |
3057 | |
3058 | SDValue |
3059 | HexagonTargetLowering::(SDValue Op, |
3060 | SelectionDAG &DAG) const { |
3061 | SDValue Vec = Op.getOperand(i: 0); |
3062 | MVT ElemTy = ty(Op: Vec).getVectorElementType(); |
3063 | return extractVector(VecV: Vec, IdxV: Op.getOperand(i: 1), dl: SDLoc(Op), ValTy: ElemTy, ResTy: ty(Op), DAG); |
3064 | } |
3065 | |
3066 | SDValue |
3067 | HexagonTargetLowering::(SDValue Op, |
3068 | SelectionDAG &DAG) const { |
3069 | return extractVector(VecV: Op.getOperand(i: 0), IdxV: Op.getOperand(i: 1), dl: SDLoc(Op), |
3070 | ValTy: ty(Op), ResTy: ty(Op), DAG); |
3071 | } |
3072 | |
3073 | SDValue |
3074 | HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, |
3075 | SelectionDAG &DAG) const { |
3076 | return insertVector(VecV: Op.getOperand(i: 0), ValV: Op.getOperand(i: 1), IdxV: Op.getOperand(i: 2), |
3077 | dl: SDLoc(Op), ValTy: ty(Op).getVectorElementType(), DAG); |
3078 | } |
3079 | |
3080 | SDValue |
3081 | HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, |
3082 | SelectionDAG &DAG) const { |
3083 | SDValue ValV = Op.getOperand(i: 1); |
3084 | return insertVector(VecV: Op.getOperand(i: 0), ValV, IdxV: Op.getOperand(i: 2), |
3085 | dl: SDLoc(Op), ValTy: ty(Op: ValV), DAG); |
3086 | } |
3087 | |
3088 | bool |
3089 | HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
3090 | // Assuming the caller does not have either a signext or zeroext modifier, and |
3091 | // only one value is accepted, any reasonable truncation is allowed. |
3092 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
3093 | return false; |
3094 | |
3095 | // FIXME: in principle up to 64-bit could be made safe, but it would be very |
3096 | // fragile at the moment: any support for multiple value returns would be |
3097 | // liable to disallow tail calls involving i64 -> iN truncation in many cases. |
3098 | return Ty1->getPrimitiveSizeInBits() <= 32; |
3099 | } |
3100 | |
3101 | SDValue |
3102 | HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const { |
3103 | MVT Ty = ty(Op); |
3104 | const SDLoc &dl(Op); |
3105 | LoadSDNode *LN = cast<LoadSDNode>(Val: Op.getNode()); |
3106 | MVT MemTy = LN->getMemoryVT().getSimpleVT(); |
3107 | ISD::LoadExtType ET = LN->getExtensionType(); |
3108 | |
3109 | bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1; |
3110 | if (LoadPred) { |
3111 | SDValue NL = DAG.getLoad( |
3112 | AM: LN->getAddressingMode(), ExtType: ISD::ZEXTLOAD, VT: MVT::i32, dl, Chain: LN->getChain(), |
3113 | Ptr: LN->getBasePtr(), Offset: LN->getOffset(), PtrInfo: LN->getPointerInfo(), |
3114 | /*MemoryVT*/ MemVT: MVT::i8, Alignment: LN->getAlign(), MMOFlags: LN->getMemOperand()->getFlags(), |
3115 | AAInfo: LN->getAAInfo(), Ranges: LN->getRanges()); |
3116 | LN = cast<LoadSDNode>(Val: NL.getNode()); |
3117 | } |
3118 | |
3119 | Align ClaimAlign = LN->getAlign(); |
3120 | if (!validateConstPtrAlignment(Ptr: LN->getBasePtr(), NeedAlign: ClaimAlign, dl, DAG)) |
3121 | return replaceMemWithUndef(Op, DAG); |
3122 | |
3123 | // Call LowerUnalignedLoad for all loads, it recognizes loads that |
3124 | // don't need extra aligning. |
3125 | SDValue LU = LowerUnalignedLoad(Op: SDValue(LN, 0), DAG); |
3126 | if (LoadPred) { |
3127 | SDValue TP = getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: MemTy, Ops: {LU}, DAG); |
3128 | if (ET == ISD::SEXTLOAD) { |
3129 | TP = DAG.getSExtOrTrunc(Op: TP, DL: dl, VT: Ty); |
3130 | } else if (ET != ISD::NON_EXTLOAD) { |
3131 | TP = DAG.getZExtOrTrunc(Op: TP, DL: dl, VT: Ty); |
3132 | } |
3133 | SDValue Ch = cast<LoadSDNode>(Val: LU.getNode())->getChain(); |
3134 | return DAG.getMergeValues(Ops: {TP, Ch}, dl); |
3135 | } |
3136 | return LU; |
3137 | } |
3138 | |
3139 | SDValue |
3140 | HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const { |
3141 | const SDLoc &dl(Op); |
3142 | StoreSDNode *SN = cast<StoreSDNode>(Val: Op.getNode()); |
3143 | SDValue Val = SN->getValue(); |
3144 | MVT Ty = ty(Op: Val); |
3145 | |
3146 | if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) { |
3147 | // Store the exact predicate (all bits). |
3148 | SDValue TR = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {Val}, DAG); |
3149 | SDValue NS = DAG.getTruncStore(Chain: SN->getChain(), dl, Val: TR, Ptr: SN->getBasePtr(), |
3150 | SVT: MVT::i8, MMO: SN->getMemOperand()); |
3151 | if (SN->isIndexed()) { |
3152 | NS = DAG.getIndexedStore(OrigStore: NS, dl, Base: SN->getBasePtr(), Offset: SN->getOffset(), |
3153 | AM: SN->getAddressingMode()); |
3154 | } |
3155 | SN = cast<StoreSDNode>(Val: NS.getNode()); |
3156 | } |
3157 | |
3158 | Align ClaimAlign = SN->getAlign(); |
3159 | if (!validateConstPtrAlignment(Ptr: SN->getBasePtr(), NeedAlign: ClaimAlign, dl, DAG)) |
3160 | return replaceMemWithUndef(Op, DAG); |
3161 | |
3162 | MVT StoreTy = SN->getMemoryVT().getSimpleVT(); |
3163 | Align NeedAlign = Subtarget.getTypeAlignment(Ty: StoreTy); |
3164 | if (ClaimAlign < NeedAlign) |
3165 | return expandUnalignedStore(ST: SN, DAG); |
3166 | return SDValue(SN, 0); |
3167 | } |
3168 | |
3169 | SDValue |
3170 | HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) |
3171 | const { |
3172 | LoadSDNode *LN = cast<LoadSDNode>(Val: Op.getNode()); |
3173 | MVT LoadTy = ty(Op); |
3174 | unsigned NeedAlign = Subtarget.getTypeAlignment(Ty: LoadTy).value(); |
3175 | unsigned HaveAlign = LN->getAlign().value(); |
3176 | if (HaveAlign >= NeedAlign) |
3177 | return Op; |
3178 | |
3179 | const SDLoc &dl(Op); |
3180 | const DataLayout &DL = DAG.getDataLayout(); |
3181 | LLVMContext &Ctx = *DAG.getContext(); |
3182 | |
3183 | // If the load aligning is disabled or the load can be broken up into two |
3184 | // smaller legal loads, do the default (target-independent) expansion. |
3185 | bool DoDefault = false; |
3186 | // Handle it in the default way if this is an indexed load. |
3187 | if (!LN->isUnindexed()) |
3188 | DoDefault = true; |
3189 | |
3190 | if (!AlignLoads) { |
3191 | if (allowsMemoryAccessForAlignment(Context&: Ctx, DL, VT: LN->getMemoryVT(), |
3192 | MMO: *LN->getMemOperand())) |
3193 | return Op; |
3194 | DoDefault = true; |
3195 | } |
3196 | if (!DoDefault && (2 * HaveAlign) == NeedAlign) { |
3197 | // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)". |
3198 | MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(BitWidth: 8 * HaveAlign) |
3199 | : MVT::getVectorVT(VT: MVT::i8, NumElements: HaveAlign); |
3200 | DoDefault = |
3201 | allowsMemoryAccessForAlignment(Context&: Ctx, DL, VT: PartTy, MMO: *LN->getMemOperand()); |
3202 | } |
3203 | if (DoDefault) { |
3204 | std::pair<SDValue, SDValue> P = expandUnalignedLoad(LD: LN, DAG); |
3205 | return DAG.getMergeValues(Ops: {P.first, P.second}, dl); |
3206 | } |
3207 | |
3208 | // The code below generates two loads, both aligned as NeedAlign, and |
3209 | // with the distance of NeedAlign between them. For that to cover the |
3210 | // bits that need to be loaded (and without overlapping), the size of |
3211 | // the loads should be equal to NeedAlign. This is true for all loadable |
3212 | // types, but add an assertion in case something changes in the future. |
3213 | assert(LoadTy.getSizeInBits() == 8*NeedAlign); |
3214 | |
3215 | unsigned LoadLen = NeedAlign; |
3216 | SDValue Base = LN->getBasePtr(); |
3217 | SDValue Chain = LN->getChain(); |
3218 | auto BO = getBaseAndOffset(Addr: Base); |
3219 | unsigned BaseOpc = BO.first.getOpcode(); |
3220 | if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0) |
3221 | return Op; |
3222 | |
3223 | if (BO.second % LoadLen != 0) { |
3224 | BO.first = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: BO.first, |
3225 | N2: DAG.getConstant(Val: BO.second % LoadLen, DL: dl, VT: MVT::i32)); |
3226 | BO.second -= BO.second % LoadLen; |
3227 | } |
3228 | SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR) |
3229 | ? DAG.getNode(Opcode: HexagonISD::VALIGNADDR, DL: dl, VT: MVT::i32, N1: BO.first, |
3230 | N2: DAG.getConstant(Val: NeedAlign, DL: dl, VT: MVT::i32)) |
3231 | : BO.first; |
3232 | SDValue Base0 = |
3233 | DAG.getMemBasePlusOffset(Base: BaseNoOff, Offset: TypeSize::getFixed(ExactSize: BO.second), DL: dl); |
3234 | SDValue Base1 = DAG.getMemBasePlusOffset( |
3235 | Base: BaseNoOff, Offset: TypeSize::getFixed(ExactSize: BO.second + LoadLen), DL: dl); |
3236 | |
3237 | MachineMemOperand *WideMMO = nullptr; |
3238 | if (MachineMemOperand *MMO = LN->getMemOperand()) { |
3239 | MachineFunction &MF = DAG.getMachineFunction(); |
3240 | WideMMO = MF.getMachineMemOperand( |
3241 | PtrInfo: MMO->getPointerInfo(), F: MMO->getFlags(), Size: 2 * LoadLen, BaseAlignment: Align(LoadLen), |
3242 | AAInfo: MMO->getAAInfo(), Ranges: MMO->getRanges(), SSID: MMO->getSyncScopeID(), |
3243 | Ordering: MMO->getSuccessOrdering(), FailureOrdering: MMO->getFailureOrdering()); |
3244 | } |
3245 | |
3246 | SDValue Load0 = DAG.getLoad(VT: LoadTy, dl, Chain, Ptr: Base0, MMO: WideMMO); |
3247 | SDValue Load1 = DAG.getLoad(VT: LoadTy, dl, Chain, Ptr: Base1, MMO: WideMMO); |
3248 | |
3249 | SDValue Aligned = DAG.getNode(Opcode: HexagonISD::VALIGN, DL: dl, VT: LoadTy, |
3250 | Ops: {Load1, Load0, BaseNoOff.getOperand(i: 0)}); |
3251 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
3252 | N1: Load0.getValue(R: 1), N2: Load1.getValue(R: 1)); |
3253 | SDValue M = DAG.getMergeValues(Ops: {Aligned, NewChain}, dl); |
3254 | return M; |
3255 | } |
3256 | |
3257 | SDValue |
3258 | HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const { |
3259 | SDValue X = Op.getOperand(i: 0), Y = Op.getOperand(i: 1); |
3260 | auto *CY = dyn_cast<ConstantSDNode>(Val&: Y); |
3261 | if (!CY) |
3262 | return SDValue(); |
3263 | |
3264 | const SDLoc &dl(Op); |
3265 | SDVTList VTs = Op.getNode()->getVTList(); |
3266 | assert(VTs.NumVTs == 2); |
3267 | assert(VTs.VTs[1] == MVT::i1); |
3268 | unsigned Opc = Op.getOpcode(); |
3269 | |
3270 | if (CY) { |
3271 | uint64_t VY = CY->getZExtValue(); |
3272 | assert(VY != 0 && "This should have been folded" ); |
3273 | // X +/- 1 |
3274 | if (VY != 1) |
3275 | return SDValue(); |
3276 | |
3277 | if (Opc == ISD::UADDO) { |
3278 | SDValue Op = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: VTs.VTs[0], Ops: {X, Y}); |
3279 | SDValue Ov = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Op, RHS: getZero(dl, Ty: ty(Op), DAG), |
3280 | Cond: ISD::SETEQ); |
3281 | return DAG.getMergeValues(Ops: {Op, Ov}, dl); |
3282 | } |
3283 | if (Opc == ISD::USUBO) { |
3284 | SDValue Op = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: VTs.VTs[0], Ops: {X, Y}); |
3285 | SDValue Ov = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Op, |
3286 | RHS: DAG.getConstant(Val: -1, DL: dl, VT: ty(Op)), Cond: ISD::SETEQ); |
3287 | return DAG.getMergeValues(Ops: {Op, Ov}, dl); |
3288 | } |
3289 | } |
3290 | |
3291 | return SDValue(); |
3292 | } |
3293 | |
3294 | SDValue HexagonTargetLowering::LowerUAddSubOCarry(SDValue Op, |
3295 | SelectionDAG &DAG) const { |
3296 | const SDLoc &dl(Op); |
3297 | unsigned Opc = Op.getOpcode(); |
3298 | SDValue X = Op.getOperand(i: 0), Y = Op.getOperand(i: 1), C = Op.getOperand(i: 2); |
3299 | |
3300 | if (Opc == ISD::UADDO_CARRY) |
3301 | return DAG.getNode(Opcode: HexagonISD::ADDC, DL: dl, VTList: Op.getNode()->getVTList(), |
3302 | Ops: { X, Y, C }); |
3303 | |
3304 | EVT CarryTy = C.getValueType(); |
3305 | SDValue SubC = DAG.getNode(Opcode: HexagonISD::SUBC, DL: dl, VTList: Op.getNode()->getVTList(), |
3306 | Ops: { X, Y, DAG.getLogicalNOT(DL: dl, Val: C, VT: CarryTy) }); |
3307 | SDValue Out[] = { SubC.getValue(R: 0), |
3308 | DAG.getLogicalNOT(DL: dl, Val: SubC.getValue(R: 1), VT: CarryTy) }; |
3309 | return DAG.getMergeValues(Ops: Out, dl); |
3310 | } |
3311 | |
3312 | SDValue |
3313 | HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { |
3314 | SDValue Chain = Op.getOperand(i: 0); |
3315 | SDValue Offset = Op.getOperand(i: 1); |
3316 | SDValue Handler = Op.getOperand(i: 2); |
3317 | SDLoc dl(Op); |
3318 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
3319 | |
3320 | // Mark function as containing a call to EH_RETURN. |
3321 | HexagonMachineFunctionInfo *FuncInfo = |
3322 | DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>(); |
3323 | FuncInfo->setHasEHReturn(); |
3324 | |
3325 | unsigned OffsetReg = Hexagon::R28; |
3326 | |
3327 | SDValue StoreAddr = |
3328 | DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: DAG.getRegister(Reg: Hexagon::R30, VT: PtrVT), |
3329 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
3330 | Chain = DAG.getStore(Chain, dl, Val: Handler, Ptr: StoreAddr, PtrInfo: MachinePointerInfo()); |
3331 | Chain = DAG.getCopyToReg(Chain, dl, Reg: OffsetReg, N: Offset); |
3332 | |
3333 | // Not needed we already use it as explict input to EH_RETURN. |
3334 | // MF.getRegInfo().addLiveOut(OffsetReg); |
3335 | |
3336 | return DAG.getNode(Opcode: HexagonISD::EH_RETURN, DL: dl, VT: MVT::Other, Operand: Chain); |
3337 | } |
3338 | |
3339 | SDValue |
3340 | HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
3341 | unsigned Opc = Op.getOpcode(); |
3342 | |
3343 | // Handle INLINEASM first. |
3344 | if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR) |
3345 | return LowerINLINEASM(Op, DAG); |
3346 | |
3347 | if (isHvxOperation(N: Op.getNode(), DAG)) { |
3348 | // If HVX lowering returns nothing, try the default lowering. |
3349 | if (SDValue V = LowerHvxOperation(Op, DAG)) |
3350 | return V; |
3351 | } |
3352 | |
3353 | switch (Opc) { |
3354 | default: |
3355 | #ifndef NDEBUG |
3356 | Op.getNode()->dumpr(&DAG); |
3357 | if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END) |
3358 | errs() << "Error: check for a non-legal type in this operation\n" ; |
3359 | #endif |
3360 | llvm_unreachable("Should not custom lower this!" ); |
3361 | |
3362 | case ISD::FDIV: |
3363 | return LowerFDIV(Op, DAG); |
3364 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); |
3365 | case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); |
3366 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
3367 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); |
3368 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); |
3369 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); |
3370 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); |
3371 | case ISD::BITCAST: return LowerBITCAST(Op, DAG); |
3372 | case ISD::LOAD: return LowerLoad(Op, DAG); |
3373 | case ISD::STORE: return LowerStore(Op, DAG); |
3374 | case ISD::UADDO: |
3375 | case ISD::USUBO: return LowerUAddSubO(Op, DAG); |
3376 | case ISD::UADDO_CARRY: |
3377 | case ISD::USUBO_CARRY: return LowerUAddSubOCarry(Op, DAG); |
3378 | case ISD::SRA: |
3379 | case ISD::SHL: |
3380 | case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG); |
3381 | case ISD::ROTL: return LowerROTL(Op, DAG); |
3382 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
3383 | case ISD::JumpTable: return LowerJumpTable(Op, DAG); |
3384 | case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); |
3385 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
3386 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
3387 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
3388 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); |
3389 | case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG); |
3390 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
3391 | case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); |
3392 | case ISD::VACOPY: return LowerVACOPY(Op, DAG); |
3393 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
3394 | case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); |
3395 | case ISD::SETCC: return LowerSETCC(Op, DAG); |
3396 | case ISD::VSELECT: return LowerVSELECT(Op, DAG); |
3397 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
3398 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
3399 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG); |
3400 | case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); |
3401 | case ISD::READSTEADYCOUNTER: return LowerREADSTEADYCOUNTER(Op, DAG); |
3402 | break; |
3403 | } |
3404 | |
3405 | return SDValue(); |
3406 | } |
3407 | |
3408 | void |
3409 | HexagonTargetLowering::LowerOperationWrapper(SDNode *N, |
3410 | SmallVectorImpl<SDValue> &Results, |
3411 | SelectionDAG &DAG) const { |
3412 | if (isHvxOperation(N, DAG)) { |
3413 | LowerHvxOperationWrapper(N, Results, DAG); |
3414 | if (!Results.empty()) |
3415 | return; |
3416 | } |
3417 | |
3418 | SDValue Op(N, 0); |
3419 | unsigned Opc = N->getOpcode(); |
3420 | |
3421 | switch (Opc) { |
3422 | case HexagonISD::SSAT: |
3423 | case HexagonISD::USAT: |
3424 | Results.push_back(Elt: opJoin(Ops: SplitVectorOp(Op, DAG), dl: SDLoc(Op), DAG)); |
3425 | break; |
3426 | case ISD::STORE: |
3427 | // We are only custom-lowering stores to verify the alignment of the |
3428 | // address if it is a compile-time constant. Since a store can be |
3429 | // modified during type-legalization (the value being stored may need |
3430 | // legalization), return empty Results here to indicate that we don't |
3431 | // really make any changes in the custom lowering. |
3432 | return; |
3433 | default: |
3434 | TargetLowering::LowerOperationWrapper(N, Results, DAG); |
3435 | break; |
3436 | } |
3437 | } |
3438 | |
3439 | void |
3440 | HexagonTargetLowering::ReplaceNodeResults(SDNode *N, |
3441 | SmallVectorImpl<SDValue> &Results, |
3442 | SelectionDAG &DAG) const { |
3443 | if (isHvxOperation(N, DAG)) { |
3444 | ReplaceHvxNodeResults(N, Results, DAG); |
3445 | if (!Results.empty()) |
3446 | return; |
3447 | } |
3448 | |
3449 | const SDLoc &dl(N); |
3450 | switch (N->getOpcode()) { |
3451 | case ISD::SRL: |
3452 | case ISD::SRA: |
3453 | case ISD::SHL: |
3454 | return; |
3455 | case ISD::BITCAST: |
3456 | // Handle a bitcast from v8i1 to i8. |
3457 | if (N->getValueType(ResNo: 0) == MVT::i8) { |
3458 | if (N->getOperand(Num: 0).getValueType() == MVT::v8i1) { |
3459 | SDValue P = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, |
3460 | Ops: N->getOperand(Num: 0), DAG); |
3461 | SDValue T = DAG.getAnyExtOrTrunc(Op: P, DL: dl, VT: MVT::i8); |
3462 | Results.push_back(Elt: T); |
3463 | } |
3464 | } |
3465 | break; |
3466 | } |
3467 | } |
3468 | |
3469 | SDValue |
3470 | HexagonTargetLowering::PerformDAGCombine(SDNode *N, |
3471 | DAGCombinerInfo &DCI) const { |
3472 | if (isHvxOperation(N, DAG&: DCI.DAG)) { |
3473 | if (SDValue V = PerformHvxDAGCombine(N, DCI)) |
3474 | return V; |
3475 | return SDValue(); |
3476 | } |
3477 | |
3478 | SDValue Op(N, 0); |
3479 | const SDLoc &dl(Op); |
3480 | unsigned Opc = Op.getOpcode(); |
3481 | |
3482 | if (Opc == ISD::TRUNCATE) { |
3483 | SDValue Op0 = Op.getOperand(i: 0); |
3484 | // fold (truncate (build pair x, y)) -> (truncate x) or x |
3485 | if (Op0.getOpcode() == ISD::BUILD_PAIR) { |
3486 | EVT TruncTy = Op.getValueType(); |
3487 | SDValue Elem0 = Op0.getOperand(i: 0); |
3488 | // if we match the low element of the pair, just return it. |
3489 | if (Elem0.getValueType() == TruncTy) |
3490 | return Elem0; |
3491 | // otherwise, if the low part is still too large, apply the truncate. |
3492 | if (Elem0.getValueType().bitsGT(VT: TruncTy)) |
3493 | return DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: TruncTy, Operand: Elem0); |
3494 | } |
3495 | } |
3496 | |
3497 | if (DCI.isBeforeLegalizeOps()) |
3498 | return SDValue(); |
3499 | |
3500 | if (Opc == HexagonISD::P2D) { |
3501 | SDValue P = Op.getOperand(i: 0); |
3502 | switch (P.getOpcode()) { |
3503 | case HexagonISD::PTRUE: |
3504 | return DCI.DAG.getConstant(Val: -1, DL: dl, VT: ty(Op)); |
3505 | case HexagonISD::PFALSE: |
3506 | return getZero(dl, Ty: ty(Op), DAG&: DCI.DAG); |
3507 | default: |
3508 | break; |
3509 | } |
3510 | } else if (Opc == ISD::VSELECT) { |
3511 | // This is pretty much duplicated in HexagonISelLoweringHVX... |
3512 | // |
3513 | // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0) |
3514 | SDValue Cond = Op.getOperand(i: 0); |
3515 | if (Cond->getOpcode() == ISD::XOR) { |
3516 | SDValue C0 = Cond.getOperand(i: 0), C1 = Cond.getOperand(i: 1); |
3517 | if (C1->getOpcode() == HexagonISD::PTRUE) { |
3518 | SDValue VSel = DCI.DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: ty(Op), N1: C0, |
3519 | N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 1)); |
3520 | return VSel; |
3521 | } |
3522 | } |
3523 | } else if (Opc == ISD::TRUNCATE) { |
3524 | SDValue Op0 = Op.getOperand(i: 0); |
3525 | // fold (truncate (build pair x, y)) -> (truncate x) or x |
3526 | if (Op0.getOpcode() == ISD::BUILD_PAIR) { |
3527 | MVT TruncTy = ty(Op); |
3528 | SDValue Elem0 = Op0.getOperand(i: 0); |
3529 | // if we match the low element of the pair, just return it. |
3530 | if (ty(Op: Elem0) == TruncTy) |
3531 | return Elem0; |
3532 | // otherwise, if the low part is still too large, apply the truncate. |
3533 | if (ty(Op: Elem0).bitsGT(VT: TruncTy)) |
3534 | return DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: TruncTy, Operand: Elem0); |
3535 | } |
3536 | } else if (Opc == ISD::OR) { |
3537 | // fold (or (shl xx, s), (zext y)) -> (COMBINE (shl xx, s-32), y) |
3538 | // if s >= 32 |
3539 | auto fold0 = [&, this](SDValue Op) { |
3540 | if (ty(Op) != MVT::i64) |
3541 | return SDValue(); |
3542 | SDValue Shl = Op.getOperand(i: 0); |
3543 | SDValue Zxt = Op.getOperand(i: 1); |
3544 | if (Shl.getOpcode() != ISD::SHL) |
3545 | std::swap(a&: Shl, b&: Zxt); |
3546 | |
3547 | if (Shl.getOpcode() != ISD::SHL || Zxt.getOpcode() != ISD::ZERO_EXTEND) |
3548 | return SDValue(); |
3549 | |
3550 | SDValue Z = Zxt.getOperand(i: 0); |
3551 | auto *Amt = dyn_cast<ConstantSDNode>(Val: Shl.getOperand(i: 1)); |
3552 | if (Amt && Amt->getZExtValue() >= 32 && ty(Op: Z).getSizeInBits() <= 32) { |
3553 | unsigned A = Amt->getZExtValue(); |
3554 | SDValue S = Shl.getOperand(i: 0); |
3555 | SDValue T0 = DCI.DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: ty(Op: S), N1: S, |
3556 | N2: DCI.DAG.getConstant(Val: A - 32, DL: dl, VT: MVT::i32)); |
3557 | SDValue T1 = DCI.DAG.getZExtOrTrunc(Op: T0, DL: dl, VT: MVT::i32); |
3558 | SDValue T2 = DCI.DAG.getZExtOrTrunc(Op: Z, DL: dl, VT: MVT::i32); |
3559 | return DCI.DAG.getNode(Opcode: HexagonISD::COMBINE, DL: dl, VT: MVT::i64, Ops: {T1, T2}); |
3560 | } |
3561 | return SDValue(); |
3562 | }; |
3563 | |
3564 | if (SDValue R = fold0(Op)) |
3565 | return R; |
3566 | } |
3567 | |
3568 | return SDValue(); |
3569 | } |
3570 | |
3571 | /// Returns relocation base for the given PIC jumptable. |
3572 | SDValue |
3573 | HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table, |
3574 | SelectionDAG &DAG) const { |
3575 | int Idx = cast<JumpTableSDNode>(Val&: Table)->getIndex(); |
3576 | EVT VT = Table.getValueType(); |
3577 | SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT, TargetFlags: HexagonII::MO_PCREL); |
3578 | return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Table), VT, Operand: T); |
3579 | } |
3580 | |
3581 | //===----------------------------------------------------------------------===// |
3582 | // Inline Assembly Support |
3583 | //===----------------------------------------------------------------------===// |
3584 | |
3585 | TargetLowering::ConstraintType |
3586 | HexagonTargetLowering::getConstraintType(StringRef Constraint) const { |
3587 | if (Constraint.size() == 1) { |
3588 | switch (Constraint[0]) { |
3589 | case 'q': |
3590 | case 'v': |
3591 | if (Subtarget.useHVXOps()) |
3592 | return C_RegisterClass; |
3593 | break; |
3594 | case 'a': |
3595 | return C_RegisterClass; |
3596 | default: |
3597 | break; |
3598 | } |
3599 | } |
3600 | return TargetLowering::getConstraintType(Constraint); |
3601 | } |
3602 | |
3603 | std::pair<unsigned, const TargetRegisterClass*> |
3604 | HexagonTargetLowering::getRegForInlineAsmConstraint( |
3605 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
3606 | |
3607 | if (Constraint.size() == 1) { |
3608 | switch (Constraint[0]) { |
3609 | case 'r': // R0-R31 |
3610 | switch (VT.SimpleTy) { |
3611 | default: |
3612 | return {0u, nullptr}; |
3613 | case MVT::i1: |
3614 | case MVT::i8: |
3615 | case MVT::i16: |
3616 | case MVT::i32: |
3617 | case MVT::f32: |
3618 | return {0u, &Hexagon::IntRegsRegClass}; |
3619 | case MVT::i64: |
3620 | case MVT::f64: |
3621 | return {0u, &Hexagon::DoubleRegsRegClass}; |
3622 | } |
3623 | break; |
3624 | case 'a': // M0-M1 |
3625 | if (VT != MVT::i32) |
3626 | return {0u, nullptr}; |
3627 | return {0u, &Hexagon::ModRegsRegClass}; |
3628 | case 'q': // q0-q3 |
3629 | switch (VT.getSizeInBits()) { |
3630 | default: |
3631 | return {0u, nullptr}; |
3632 | case 64: |
3633 | case 128: |
3634 | return {0u, &Hexagon::HvxQRRegClass}; |
3635 | } |
3636 | break; |
3637 | case 'v': // V0-V31 |
3638 | switch (VT.getSizeInBits()) { |
3639 | default: |
3640 | return {0u, nullptr}; |
3641 | case 512: |
3642 | return {0u, &Hexagon::HvxVRRegClass}; |
3643 | case 1024: |
3644 | if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps()) |
3645 | return {0u, &Hexagon::HvxVRRegClass}; |
3646 | return {0u, &Hexagon::HvxWRRegClass}; |
3647 | case 2048: |
3648 | return {0u, &Hexagon::HvxWRRegClass}; |
3649 | } |
3650 | break; |
3651 | default: |
3652 | return {0u, nullptr}; |
3653 | } |
3654 | } |
3655 | |
3656 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
3657 | } |
3658 | |
3659 | /// isFPImmLegal - Returns true if the target can instruction select the |
3660 | /// specified FP immediate natively. If false, the legalizer will |
3661 | /// materialize the FP immediate as a load from a constant pool. |
3662 | bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
3663 | bool ForCodeSize) const { |
3664 | return true; |
3665 | } |
3666 | |
3667 | /// isLegalAddressingMode - Return true if the addressing mode represented by |
3668 | /// AM is legal for this target, for a load/store of the specified type. |
3669 | bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
3670 | const AddrMode &AM, Type *Ty, |
3671 | unsigned AS, Instruction *I) const { |
3672 | if (Ty->isSized()) { |
3673 | // When LSR detects uses of the same base address to access different |
3674 | // types (e.g. unions), it will assume a conservative type for these |
3675 | // uses: |
3676 | // LSR Use: Kind=Address of void in addrspace(4294967295), ... |
3677 | // The type Ty passed here would then be "void". Skip the alignment |
3678 | // checks, but do not return false right away, since that confuses |
3679 | // LSR into crashing. |
3680 | Align A = DL.getABITypeAlign(Ty); |
3681 | // The base offset must be a multiple of the alignment. |
3682 | if (!isAligned(Lhs: A, SizeInBytes: AM.BaseOffs)) |
3683 | return false; |
3684 | // The shifted offset must fit in 11 bits. |
3685 | if (!isInt<11>(x: AM.BaseOffs >> Log2(A))) |
3686 | return false; |
3687 | } |
3688 | |
3689 | // No global is ever allowed as a base. |
3690 | if (AM.BaseGV) |
3691 | return false; |
3692 | |
3693 | int Scale = AM.Scale; |
3694 | if (Scale < 0) |
3695 | Scale = -Scale; |
3696 | switch (Scale) { |
3697 | case 0: // No scale reg, "r+i", "r", or just "i". |
3698 | break; |
3699 | default: // No scaled addressing mode. |
3700 | return false; |
3701 | } |
3702 | return true; |
3703 | } |
3704 | |
3705 | /// Return true if folding a constant offset with the given GlobalAddress is |
3706 | /// legal. It is frequently not legal in PIC relocation models. |
3707 | bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) |
3708 | const { |
3709 | return HTM.getRelocationModel() == Reloc::Static; |
3710 | } |
3711 | |
3712 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
3713 | /// icmp immediate, that is the target has icmp instructions which can compare |
3714 | /// a register against the immediate without having to materialize the |
3715 | /// immediate into a register. |
3716 | bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
3717 | return Imm >= -512 && Imm <= 511; |
3718 | } |
3719 | |
3720 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
3721 | /// for tail call optimization. Targets which want to do tail call |
3722 | /// optimization should implement this function. |
3723 | bool HexagonTargetLowering::IsEligibleForTailCallOptimization( |
3724 | SDValue Callee, |
3725 | CallingConv::ID CalleeCC, |
3726 | bool IsVarArg, |
3727 | bool IsCalleeStructRet, |
3728 | bool IsCallerStructRet, |
3729 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
3730 | const SmallVectorImpl<SDValue> &OutVals, |
3731 | const SmallVectorImpl<ISD::InputArg> &Ins, |
3732 | SelectionDAG& DAG) const { |
3733 | const Function &CallerF = DAG.getMachineFunction().getFunction(); |
3734 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
3735 | bool CCMatch = CallerCC == CalleeCC; |
3736 | |
3737 | // *************************************************************************** |
3738 | // Look for obvious safe cases to perform tail call optimization that do not |
3739 | // require ABI changes. |
3740 | // *************************************************************************** |
3741 | |
3742 | // If this is a tail call via a function pointer, then don't do it! |
3743 | if (!isa<GlobalAddressSDNode>(Val: Callee) && |
3744 | !isa<ExternalSymbolSDNode>(Val: Callee)) { |
3745 | return false; |
3746 | } |
3747 | |
3748 | // Do not optimize if the calling conventions do not match and the conventions |
3749 | // used are not C or Fast. |
3750 | if (!CCMatch) { |
3751 | bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast); |
3752 | bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast); |
3753 | // If R & E, then ok. |
3754 | if (!R || !E) |
3755 | return false; |
3756 | } |
3757 | |
3758 | // Do not tail call optimize vararg calls. |
3759 | if (IsVarArg) |
3760 | return false; |
3761 | |
3762 | // Also avoid tail call optimization if either caller or callee uses struct |
3763 | // return semantics. |
3764 | if (IsCalleeStructRet || IsCallerStructRet) |
3765 | return false; |
3766 | |
3767 | // In addition to the cases above, we also disable Tail Call Optimization if |
3768 | // the calling convention code that at least one outgoing argument needs to |
3769 | // go on the stack. We cannot check that here because at this point that |
3770 | // information is not available. |
3771 | return true; |
3772 | } |
3773 | |
3774 | /// Returns the target specific optimal type for load and store operations as |
3775 | /// a result of memset, memcpy, and memmove lowering. |
3776 | /// |
3777 | /// If DstAlign is zero that means it's safe to destination alignment can |
3778 | /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't |
3779 | /// a need to check it against alignment requirement, probably because the |
3780 | /// source does not need to be loaded. If 'IsMemset' is true, that means it's |
3781 | /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of |
3782 | /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it |
3783 | /// does not need to be loaded. It returns EVT::Other if the type should be |
3784 | /// determined using generic target-independent logic. |
3785 | EVT HexagonTargetLowering::getOptimalMemOpType( |
3786 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
3787 | if (Op.size() >= 8 && Op.isAligned(AlignCheck: Align(8))) |
3788 | return MVT::i64; |
3789 | if (Op.size() >= 4 && Op.isAligned(AlignCheck: Align(4))) |
3790 | return MVT::i32; |
3791 | if (Op.size() >= 2 && Op.isAligned(AlignCheck: Align(2))) |
3792 | return MVT::i16; |
3793 | return MVT::Other; |
3794 | } |
3795 | |
3796 | bool HexagonTargetLowering::allowsMemoryAccess( |
3797 | LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, |
3798 | Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { |
3799 | MVT SVT = VT.getSimpleVT(); |
3800 | if (Subtarget.isHVXVectorType(VecTy: SVT, IncludeBool: true)) |
3801 | return allowsHvxMemoryAccess(VecTy: SVT, Flags, Fast); |
3802 | return TargetLoweringBase::allowsMemoryAccess( |
3803 | Context, DL, VT, AddrSpace, Alignment, Flags, Fast); |
3804 | } |
3805 | |
3806 | bool HexagonTargetLowering::allowsMisalignedMemoryAccesses( |
3807 | EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, |
3808 | unsigned *Fast) const { |
3809 | MVT SVT = VT.getSimpleVT(); |
3810 | if (Subtarget.isHVXVectorType(VecTy: SVT, IncludeBool: true)) |
3811 | return allowsHvxMisalignedMemoryAccesses(VecTy: SVT, Flags, Fast); |
3812 | if (Fast) |
3813 | *Fast = 0; |
3814 | return false; |
3815 | } |
3816 | |
3817 | std::pair<const TargetRegisterClass*, uint8_t> |
3818 | HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
3819 | MVT VT) const { |
3820 | if (Subtarget.isHVXVectorType(VecTy: VT, IncludeBool: true)) { |
3821 | unsigned BitWidth = VT.getSizeInBits(); |
3822 | unsigned VecWidth = Subtarget.getVectorLength() * 8; |
3823 | |
3824 | if (VT.getVectorElementType() == MVT::i1) |
3825 | return std::make_pair(x: &Hexagon::HvxQRRegClass, y: 1); |
3826 | if (BitWidth == VecWidth) |
3827 | return std::make_pair(x: &Hexagon::HvxVRRegClass, y: 1); |
3828 | assert(BitWidth == 2 * VecWidth); |
3829 | return std::make_pair(x: &Hexagon::HvxWRRegClass, y: 1); |
3830 | } |
3831 | |
3832 | return TargetLowering::findRepresentativeClass(TRI, VT); |
3833 | } |
3834 | |
3835 | bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load, |
3836 | ISD::LoadExtType ExtTy, EVT NewVT) const { |
3837 | // TODO: This may be worth removing. Check regression tests for diffs. |
3838 | if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT)) |
3839 | return false; |
3840 | |
3841 | auto *L = cast<LoadSDNode>(Val: Load); |
3842 | std::pair<SDValue,int> BO = getBaseAndOffset(Addr: L->getBasePtr()); |
3843 | // Small-data object, do not shrink. |
3844 | if (BO.first.getOpcode() == HexagonISD::CONST32_GP) |
3845 | return false; |
3846 | if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val&: BO.first)) { |
3847 | auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine()); |
3848 | const auto *GO = dyn_cast_or_null<const GlobalObject>(Val: GA->getGlobal()); |
3849 | return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, TM: HTM); |
3850 | } |
3851 | return true; |
3852 | } |
3853 | |
3854 | void HexagonTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
3855 | SDNode *Node) const { |
3856 | AdjustHvxInstrPostInstrSelection(MI, Node); |
3857 | } |
3858 | |
3859 | Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder, |
3860 | Type *ValueTy, Value *Addr, |
3861 | AtomicOrdering Ord) const { |
3862 | BasicBlock *BB = Builder.GetInsertBlock(); |
3863 | Module *M = BB->getParent()->getParent(); |
3864 | unsigned SZ = ValueTy->getPrimitiveSizeInBits(); |
3865 | assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported" ); |
3866 | Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked |
3867 | : Intrinsic::hexagon_L4_loadd_locked; |
3868 | Function *Fn = Intrinsic::getDeclaration(M, id: IntID); |
3869 | |
3870 | Value *Call = Builder.CreateCall(Callee: Fn, Args: Addr, Name: "larx" ); |
3871 | |
3872 | return Builder.CreateBitCast(V: Call, DestTy: ValueTy); |
3873 | } |
3874 | |
3875 | /// Perform a store-conditional operation to Addr. Return the status of the |
3876 | /// store. This should be 0 if the store succeeded, non-zero otherwise. |
3877 | Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder, |
3878 | Value *Val, Value *Addr, |
3879 | AtomicOrdering Ord) const { |
3880 | BasicBlock *BB = Builder.GetInsertBlock(); |
3881 | Module *M = BB->getParent()->getParent(); |
3882 | Type *Ty = Val->getType(); |
3883 | unsigned SZ = Ty->getPrimitiveSizeInBits(); |
3884 | |
3885 | Type *CastTy = Builder.getIntNTy(N: SZ); |
3886 | assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported" ); |
3887 | Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked |
3888 | : Intrinsic::hexagon_S4_stored_locked; |
3889 | Function *Fn = Intrinsic::getDeclaration(M, id: IntID); |
3890 | |
3891 | Val = Builder.CreateBitCast(V: Val, DestTy: CastTy); |
3892 | |
3893 | Value *Call = Builder.CreateCall(Callee: Fn, Args: {Addr, Val}, Name: "stcx" ); |
3894 | Value *Cmp = Builder.CreateICmpEQ(LHS: Call, RHS: Builder.getInt32(C: 0), Name: "" ); |
3895 | Value *Ext = Builder.CreateZExt(V: Cmp, DestTy: Type::getInt32Ty(C&: M->getContext())); |
3896 | return Ext; |
3897 | } |
3898 | |
3899 | TargetLowering::AtomicExpansionKind |
3900 | HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
3901 | // Do not expand loads and stores that don't exceed 64 bits. |
3902 | return LI->getType()->getPrimitiveSizeInBits() > 64 |
3903 | ? AtomicExpansionKind::LLOnly |
3904 | : AtomicExpansionKind::None; |
3905 | } |
3906 | |
3907 | TargetLowering::AtomicExpansionKind |
3908 | HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
3909 | // Do not expand loads and stores that don't exceed 64 bits. |
3910 | return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64 |
3911 | ? AtomicExpansionKind::Expand |
3912 | : AtomicExpansionKind::None; |
3913 | } |
3914 | |
3915 | TargetLowering::AtomicExpansionKind |
3916 | HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR( |
3917 | AtomicCmpXchgInst *AI) const { |
3918 | return AtomicExpansionKind::LLSC; |
3919 | } |
3920 | |