1//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Hexagon uses to lower LLVM code
10// into a selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "HexagonISelLowering.h"
15#include "Hexagon.h"
16#include "HexagonMachineFunctionInfo.h"
17#include "HexagonRegisterInfo.h"
18#include "HexagonSubtarget.h"
19#include "HexagonTargetMachine.h"
20#include "HexagonTargetObjectFile.h"
21#include "llvm/ADT/APInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/CodeGen/CallingConvLower.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/SelectionDAG.h"
31#include "llvm/CodeGen/TargetCallingConv.h"
32#include "llvm/CodeGen/ValueTypes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/CallingConv.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/DiagnosticInfo.h"
38#include "llvm/IR/DiagnosticPrinter.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/GlobalValue.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/IntrinsicsHexagon.h"
47#include "llvm/IR/Module.h"
48#include "llvm/IR/Type.h"
49#include "llvm/IR/Value.h"
50#include "llvm/Support/Casting.h"
51#include "llvm/Support/CodeGen.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MathExtras.h"
56#include "llvm/Support/raw_ostream.h"
57#include "llvm/Target/TargetMachine.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <limits>
62#include <utility>
63
64using namespace llvm;
65
66#define DEBUG_TYPE "hexagon-lowering"
67
68static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
69 cl::init(Val: true), cl::Hidden,
70 cl::desc("Control jump table emission on Hexagon target"));
71
72static cl::opt<bool>
73 EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden,
74 cl::desc("Enable Hexagon SDNode scheduling"));
75
76static cl::opt<int> MinimumJumpTables("minimum-jump-tables", cl::Hidden,
77 cl::init(Val: 5),
78 cl::desc("Set minimum jump tables"));
79
80static cl::opt<bool>
81 ConstantLoadsToImm("constant-loads-to-imm", cl::Hidden, cl::init(Val: true),
82 cl::desc("Convert constant loads to immediate values."));
83
84static cl::opt<bool> AlignLoads("hexagon-align-loads",
85 cl::Hidden, cl::init(Val: false),
86 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
87
88static cl::opt<bool>
89 DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden,
90 cl::init(Val: false),
91 cl::desc("Disable minimum alignment of 1 for "
92 "arguments passed by value on stack"));
93
94// Implement calling convention for Hexagon.
95
96static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
97 CCValAssign::LocInfo &LocInfo,
98 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
99 static const MCPhysReg ArgRegs[] = {
100 Hexagon::R0, Hexagon::R1, Hexagon::R2,
101 Hexagon::R3, Hexagon::R4, Hexagon::R5
102 };
103 const unsigned NumArgRegs = std::size(ArgRegs);
104 unsigned RegNum = State.getFirstUnallocated(Regs: ArgRegs);
105
106 // RegNum is an index into ArgRegs: skip a register if RegNum is odd.
107 if (RegNum != NumArgRegs && RegNum % 2 == 1)
108 State.AllocateReg(Reg: ArgRegs[RegNum]);
109
110 // Always return false here, as this function only makes sure that the first
111 // unallocated register has an even register number and does not actually
112 // allocate a register for the current argument.
113 return false;
114}
115
116#include "HexagonGenCallingConv.inc"
117
118unsigned HexagonTargetLowering::getVectorTypeBreakdownForCallingConv(
119 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
120 unsigned &NumIntermediates, MVT &RegisterVT) const {
121
122 bool isBoolVector = VT.getVectorElementType() == MVT::i1;
123 bool isPowerOf2 = VT.isPow2VectorType();
124 unsigned NumElts = VT.getVectorNumElements();
125
126 // Split vectors of type vXi1 into (X/8) vectors of type v8i1,
127 // where X is divisible by 8.
128 if (isBoolVector && !Subtarget.useHVXOps() && isPowerOf2 && NumElts >= 8) {
129 RegisterVT = MVT::v8i8;
130 IntermediateVT = MVT::v8i1;
131 NumIntermediates = NumElts / 8;
132 return NumIntermediates;
133 }
134
135 // In HVX 64-byte mode, vectors of type vXi1 are split into (X / 64) vectors
136 // of type v64i1, provided that X is divisible by 64.
137 if (isBoolVector && Subtarget.useHVX64BOps() && isPowerOf2 && NumElts >= 64) {
138 RegisterVT = MVT::v64i8;
139 IntermediateVT = MVT::v64i1;
140 NumIntermediates = NumElts / 64;
141 return NumIntermediates;
142 }
143
144 // In HVX 128-byte mode, vectors of type vXi1 are split into (X / 128) vectors
145 // of type v128i1, provided that X is divisible by 128.
146 if (isBoolVector && Subtarget.useHVX128BOps() && isPowerOf2 &&
147 NumElts >= 128) {
148 RegisterVT = MVT::v128i8;
149 IntermediateVT = MVT::v128i1;
150 NumIntermediates = NumElts / 128;
151 return NumIntermediates;
152 }
153
154 return TargetLowering::getVectorTypeBreakdownForCallingConv(
155 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
156}
157
158std::pair<MVT, unsigned>
159HexagonTargetLowering::handleMaskRegisterForCallingConv(
160 const HexagonSubtarget &Subtarget, EVT VT) const {
161 assert(VT.getVectorElementType() == MVT::i1);
162
163 const unsigned NumElems = VT.getVectorNumElements();
164
165 if (!VT.isPow2VectorType())
166 return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
167
168 if (!Subtarget.useHVXOps() && NumElems >= 8)
169 return {MVT::v8i8, NumElems / 8};
170
171 if (Subtarget.useHVX64BOps() && NumElems >= 64)
172 return {MVT::v64i8, NumElems / 64};
173
174 if (Subtarget.useHVX128BOps() && NumElems >= 128)
175 return {MVT::v128i8, NumElems / 128};
176
177 return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
178}
179
180MVT HexagonTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
181 CallingConv::ID CC,
182 EVT VT) const {
183
184 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
185 auto [RegisterVT, NumRegisters] =
186 handleMaskRegisterForCallingConv(Subtarget, VT);
187 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
188 return RegisterVT;
189 }
190
191 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
192}
193
194SDValue
195HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
196 const {
197 unsigned IntNo = Op.getConstantOperandVal(i: 0);
198 SDLoc dl(Op);
199 switch (IntNo) {
200 default:
201 return SDValue(); // Don't custom lower most intrinsics.
202 case Intrinsic::thread_pointer: {
203 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
204 return DAG.getNode(Opcode: HexagonISD::THREAD_POINTER, DL: dl, VT: PtrVT);
205 }
206 }
207}
208
209/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
210/// by "Src" to address "Dst" of size "Size". Alignment information is
211/// specified by the specific parameter attribute. The copy will be passed as
212/// a byval function parameter. Sometimes what we are copying is the end of a
213/// larger object, the part that does not fit in registers.
214static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
215 SDValue Chain, ISD::ArgFlagsTy Flags,
216 SelectionDAG &DAG, const SDLoc &dl) {
217 SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize(), DL: dl, VT: MVT::i32);
218 return DAG.getMemcpy(
219 Chain, dl, Dst, Src, Size: SizeNode, Alignment: Flags.getNonZeroByValAlign(),
220 /*isVolatile=*/isVol: false, /*AlwaysInline=*/false,
221 /*CI=*/nullptr, OverrideTailCall: std::nullopt, DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo());
222}
223
224bool
225HexagonTargetLowering::CanLowerReturn(
226 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
227 const SmallVectorImpl<ISD::OutputArg> &Outs,
228 LLVMContext &Context, const Type *RetTy) const {
229 SmallVector<CCValAssign, 16> RVLocs;
230 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
231
232 if (MF.getSubtarget<HexagonSubtarget>().useHVXOps())
233 return CCInfo.CheckReturn(Outs, Fn: RetCC_Hexagon_HVX);
234 return CCInfo.CheckReturn(Outs, Fn: RetCC_Hexagon);
235}
236
237// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
238// passed by value, the function prototype is modified to return void and
239// the value is stored in memory pointed by a pointer passed by caller.
240SDValue
241HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
242 bool IsVarArg,
243 const SmallVectorImpl<ISD::OutputArg> &Outs,
244 const SmallVectorImpl<SDValue> &OutVals,
245 const SDLoc &dl, SelectionDAG &DAG) const {
246 // CCValAssign - represent the assignment of the return value to locations.
247 SmallVector<CCValAssign, 16> RVLocs;
248
249 // CCState - Info about the registers and stack slot.
250 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
251 *DAG.getContext());
252
253 // Analyze return values of ISD::RET
254 if (Subtarget.useHVXOps())
255 CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Hexagon_HVX);
256 else
257 CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Hexagon);
258
259 SDValue Glue;
260 SmallVector<SDValue, 4> RetOps(1, Chain);
261
262 // Copy the result values into the output registers.
263 for (unsigned i = 0; i != RVLocs.size(); ++i) {
264 CCValAssign &VA = RVLocs[i];
265 SDValue Val = OutVals[i];
266
267 switch (VA.getLocInfo()) {
268 default:
269 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
270 llvm_unreachable("Unknown loc info!");
271 case CCValAssign::Full:
272 break;
273 case CCValAssign::BCvt:
274 Val = DAG.getBitcast(VT: VA.getLocVT(), V: Val);
275 break;
276 case CCValAssign::SExt:
277 Val = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val);
278 break;
279 case CCValAssign::ZExt:
280 Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val);
281 break;
282 case CCValAssign::AExt:
283 Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Val);
284 break;
285 }
286
287 Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: Val, Glue);
288
289 // Guarantee that all emitted copies are stuck together with flags.
290 Glue = Chain.getValue(R: 1);
291 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
292 }
293
294 RetOps[0] = Chain; // Update chain.
295
296 // Add the glue if we have it.
297 if (Glue.getNode())
298 RetOps.push_back(Elt: Glue);
299
300 return DAG.getNode(Opcode: HexagonISD::RET_GLUE, DL: dl, VT: MVT::Other, Ops: RetOps);
301}
302
303bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
304 // If either no tail call or told not to tail call at all, don't.
305 return CI->isTailCall();
306}
307
308Register HexagonTargetLowering::getRegisterByName(
309 const char* RegName, LLT VT, const MachineFunction &) const {
310 // Just support r19, the linux kernel uses it.
311 Register Reg = StringSwitch<Register>(RegName)
312 .Case(S: "r0", Value: Hexagon::R0)
313 .Case(S: "r1", Value: Hexagon::R1)
314 .Case(S: "r2", Value: Hexagon::R2)
315 .Case(S: "r3", Value: Hexagon::R3)
316 .Case(S: "r4", Value: Hexagon::R4)
317 .Case(S: "r5", Value: Hexagon::R5)
318 .Case(S: "r6", Value: Hexagon::R6)
319 .Case(S: "r7", Value: Hexagon::R7)
320 .Case(S: "r8", Value: Hexagon::R8)
321 .Case(S: "r9", Value: Hexagon::R9)
322 .Case(S: "r10", Value: Hexagon::R10)
323 .Case(S: "r11", Value: Hexagon::R11)
324 .Case(S: "r12", Value: Hexagon::R12)
325 .Case(S: "r13", Value: Hexagon::R13)
326 .Case(S: "r14", Value: Hexagon::R14)
327 .Case(S: "r15", Value: Hexagon::R15)
328 .Case(S: "r16", Value: Hexagon::R16)
329 .Case(S: "r17", Value: Hexagon::R17)
330 .Case(S: "r18", Value: Hexagon::R18)
331 .Case(S: "r19", Value: Hexagon::R19)
332 .Case(S: "r20", Value: Hexagon::R20)
333 .Case(S: "r21", Value: Hexagon::R21)
334 .Case(S: "r22", Value: Hexagon::R22)
335 .Case(S: "r23", Value: Hexagon::R23)
336 .Case(S: "r24", Value: Hexagon::R24)
337 .Case(S: "r25", Value: Hexagon::R25)
338 .Case(S: "r26", Value: Hexagon::R26)
339 .Case(S: "r27", Value: Hexagon::R27)
340 .Case(S: "r28", Value: Hexagon::R28)
341 .Case(S: "r29", Value: Hexagon::R29)
342 .Case(S: "r30", Value: Hexagon::R30)
343 .Case(S: "r31", Value: Hexagon::R31)
344 .Case(S: "r1:0", Value: Hexagon::D0)
345 .Case(S: "r3:2", Value: Hexagon::D1)
346 .Case(S: "r5:4", Value: Hexagon::D2)
347 .Case(S: "r7:6", Value: Hexagon::D3)
348 .Case(S: "r9:8", Value: Hexagon::D4)
349 .Case(S: "r11:10", Value: Hexagon::D5)
350 .Case(S: "r13:12", Value: Hexagon::D6)
351 .Case(S: "r15:14", Value: Hexagon::D7)
352 .Case(S: "r17:16", Value: Hexagon::D8)
353 .Case(S: "r19:18", Value: Hexagon::D9)
354 .Case(S: "r21:20", Value: Hexagon::D10)
355 .Case(S: "r23:22", Value: Hexagon::D11)
356 .Case(S: "r25:24", Value: Hexagon::D12)
357 .Case(S: "r27:26", Value: Hexagon::D13)
358 .Case(S: "r29:28", Value: Hexagon::D14)
359 .Case(S: "r31:30", Value: Hexagon::D15)
360 .Case(S: "sp", Value: Hexagon::R29)
361 .Case(S: "fp", Value: Hexagon::R30)
362 .Case(S: "lr", Value: Hexagon::R31)
363 .Case(S: "p0", Value: Hexagon::P0)
364 .Case(S: "p1", Value: Hexagon::P1)
365 .Case(S: "p2", Value: Hexagon::P2)
366 .Case(S: "p3", Value: Hexagon::P3)
367 .Case(S: "sa0", Value: Hexagon::SA0)
368 .Case(S: "lc0", Value: Hexagon::LC0)
369 .Case(S: "sa1", Value: Hexagon::SA1)
370 .Case(S: "lc1", Value: Hexagon::LC1)
371 .Case(S: "m0", Value: Hexagon::M0)
372 .Case(S: "m1", Value: Hexagon::M1)
373 .Case(S: "usr", Value: Hexagon::USR)
374 .Case(S: "ugp", Value: Hexagon::UGP)
375 .Case(S: "cs0", Value: Hexagon::CS0)
376 .Case(S: "cs1", Value: Hexagon::CS1)
377 .Default(Value: Register());
378 return Reg;
379}
380
381/// LowerCallResult - Lower the result values of an ISD::CALL into the
382/// appropriate copies out of appropriate physical registers. This assumes that
383/// Chain/Glue are the input chain/glue to use, and that TheCall is the call
384/// being lowered. Returns a SDNode with the same number of values as the
385/// ISD::CALL.
386SDValue HexagonTargetLowering::LowerCallResult(
387 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
388 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
389 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
390 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
391 // Assign locations to each value returned by this call.
392 SmallVector<CCValAssign, 16> RVLocs;
393
394 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
395 *DAG.getContext());
396
397 if (Subtarget.useHVXOps())
398 CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_Hexagon_HVX);
399 else
400 CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_Hexagon);
401
402 // Copy all of the result registers out of their specified physreg.
403 for (unsigned i = 0; i != RVLocs.size(); ++i) {
404 SDValue RetVal;
405 if (RVLocs[i].getValVT() == MVT::i1) {
406 // Return values of type MVT::i1 require special handling. The reason
407 // is that MVT::i1 is associated with the PredRegs register class, but
408 // values of that type are still returned in R0. Generate an explicit
409 // copy into a predicate register from R0, and treat the value of the
410 // predicate register as the call result.
411 auto &MRI = DAG.getMachineFunction().getRegInfo();
412 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, Reg: RVLocs[i].getLocReg(),
413 VT: MVT::i32, Glue);
414 // FR0 = (Value, Chain, Glue)
415 Register PredR = MRI.createVirtualRegister(RegClass: &Hexagon::PredRegsRegClass);
416 SDValue TPR = DAG.getCopyToReg(Chain: FR0.getValue(R: 1), dl, Reg: PredR,
417 N: FR0.getValue(R: 0), Glue: FR0.getValue(R: 2));
418 // TPR = (Chain, Glue)
419 // Don't glue this CopyFromReg, because it copies from a virtual
420 // register. If it is glued to the call, InstrEmitter will add it
421 // as an implicit def to the call (EmitMachineNode).
422 RetVal = DAG.getCopyFromReg(Chain: TPR.getValue(R: 0), dl, Reg: PredR, VT: MVT::i1);
423 Glue = TPR.getValue(R: 1);
424 Chain = TPR.getValue(R: 0);
425 } else {
426 RetVal = DAG.getCopyFromReg(Chain, dl, Reg: RVLocs[i].getLocReg(),
427 VT: RVLocs[i].getValVT(), Glue);
428 Glue = RetVal.getValue(R: 2);
429 Chain = RetVal.getValue(R: 1);
430 }
431 InVals.push_back(Elt: RetVal.getValue(R: 0));
432 }
433
434 return Chain;
435}
436
437/// LowerCall - Functions arguments are copied from virtual regs to
438/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
439SDValue
440HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
441 SmallVectorImpl<SDValue> &InVals) const {
442 SelectionDAG &DAG = CLI.DAG;
443 SDLoc &dl = CLI.DL;
444 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
445 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
446 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
447 SDValue Chain = CLI.Chain;
448 SDValue Callee = CLI.Callee;
449 CallingConv::ID CallConv = CLI.CallConv;
450 bool IsVarArg = CLI.IsVarArg;
451 bool DoesNotReturn = CLI.DoesNotReturn;
452
453 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
454 MachineFunction &MF = DAG.getMachineFunction();
455 MachineFrameInfo &MFI = MF.getFrameInfo();
456 auto PtrVT = getPointerTy(DL: MF.getDataLayout());
457
458 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Val&: Callee))
459 Callee = DAG.getTargetGlobalAddress(GV: GAN->getGlobal(), DL: dl, VT: MVT::i32);
460
461 // Linux ABI treats var-arg calls the same way as regular ones.
462 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
463
464 // Analyze operands of the call, assigning locations to each operand.
465 SmallVector<CCValAssign, 16> ArgLocs;
466 CCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext());
467
468 if (Subtarget.useHVXOps())
469 CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon_HVX);
470 else if (DisableArgsMinAlignment)
471 CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon_Legacy);
472 else
473 CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Hexagon);
474
475 if (CLI.IsTailCall) {
476 bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
477 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CalleeCC: CallConv,
478 isVarArg: IsVarArg, isCalleeStructRet: IsStructRet, isCallerStructRet: StructAttrFlag, Outs,
479 OutVals, Ins, DAG);
480 for (const CCValAssign &VA : ArgLocs) {
481 if (VA.isMemLoc()) {
482 CLI.IsTailCall = false;
483 break;
484 }
485 }
486 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
487 : "Argument must be passed on stack. "
488 "Not eligible for Tail Call\n"));
489 }
490 // Get a count of how many bytes are to be pushed on the stack.
491 unsigned NumBytes = CCInfo.getStackSize();
492 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
493 SmallVector<SDValue, 8> MemOpChains;
494
495 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
496 SDValue StackPtr =
497 DAG.getCopyFromReg(Chain, dl, Reg: HRI.getStackRegister(), VT: PtrVT);
498
499 bool NeedsArgAlign = false;
500 Align LargestAlignSeen;
501 // Walk the register/memloc assignments, inserting copies/loads.
502 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
503 CCValAssign &VA = ArgLocs[i];
504 SDValue Arg = OutVals[i];
505 ISD::ArgFlagsTy Flags = Outs[i].Flags;
506 // Record if we need > 8 byte alignment on an argument.
507 bool ArgAlign = Subtarget.isHVXVectorType(VecTy: VA.getValVT());
508 NeedsArgAlign |= ArgAlign;
509
510 // Promote the value if needed.
511 switch (VA.getLocInfo()) {
512 default:
513 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
514 llvm_unreachable("Unknown loc info!");
515 case CCValAssign::Full:
516 break;
517 case CCValAssign::BCvt:
518 Arg = DAG.getBitcast(VT: VA.getLocVT(), V: Arg);
519 break;
520 case CCValAssign::SExt:
521 Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
522 break;
523 case CCValAssign::ZExt:
524 Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
525 break;
526 case CCValAssign::AExt:
527 Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
528 break;
529 }
530
531 if (VA.isMemLoc()) {
532 unsigned LocMemOffset = VA.getLocMemOffset();
533 SDValue MemAddr = DAG.getConstant(Val: LocMemOffset, DL: dl,
534 VT: StackPtr.getValueType());
535 MemAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: StackPtr, N2: MemAddr);
536 if (ArgAlign)
537 LargestAlignSeen = std::max(
538 a: LargestAlignSeen, b: Align(VA.getLocVT().getStoreSizeInBits() / 8));
539 if (Flags.isByVal()) {
540 // The argument is a struct passed by value. According to LLVM, "Arg"
541 // is a pointer.
542 MemOpChains.push_back(Elt: CreateCopyOfByValArgument(Src: Arg, Dst: MemAddr, Chain,
543 Flags, DAG, dl));
544 } else {
545 MachinePointerInfo LocPI = MachinePointerInfo::getStack(
546 MF&: DAG.getMachineFunction(), Offset: LocMemOffset);
547 SDValue S = DAG.getStore(Chain, dl, Val: Arg, Ptr: MemAddr, PtrInfo: LocPI);
548 MemOpChains.push_back(Elt: S);
549 }
550 continue;
551 }
552
553 // Arguments that can be passed on register must be kept at RegsToPass
554 // vector.
555 if (VA.isRegLoc())
556 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg));
557 }
558
559 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
560 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
561 Align VecAlign = HRI.getSpillAlign(RC: Hexagon::HvxVRRegClass);
562 LargestAlignSeen = std::max(a: LargestAlignSeen, b: VecAlign);
563 MFI.ensureMaxAlignment(Alignment: LargestAlignSeen);
564 }
565 // Transform all store nodes into one single node because all store
566 // nodes are independent of each other.
567 if (!MemOpChains.empty())
568 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains);
569
570 SDValue Glue;
571 if (!CLI.IsTailCall) {
572 Chain = DAG.getCALLSEQ_START(Chain, InSize: NumBytes, OutSize: 0, DL: dl);
573 Glue = Chain.getValue(R: 1);
574 }
575
576 // Build a sequence of copy-to-reg nodes chained together with token
577 // chain and flag operands which copy the outgoing args into registers.
578 // The Glue is necessary since all emitted instructions must be
579 // stuck together.
580 if (!CLI.IsTailCall) {
581 for (const auto &R : RegsToPass) {
582 Chain = DAG.getCopyToReg(Chain, dl, Reg: R.first, N: R.second, Glue);
583 Glue = Chain.getValue(R: 1);
584 }
585 } else {
586 // For tail calls lower the arguments to the 'real' stack slot.
587 //
588 // Force all the incoming stack arguments to be loaded from the stack
589 // before any new outgoing arguments are stored to the stack, because the
590 // outgoing stack slots may alias the incoming argument stack slots, and
591 // the alias isn't otherwise explicit. This is slightly more conservative
592 // than necessary, because it means that each store effectively depends
593 // on every argument instead of just those arguments it would clobber.
594 //
595 // Do not flag preceding copytoreg stuff together with the following stuff.
596 Glue = SDValue();
597 for (const auto &R : RegsToPass) {
598 Chain = DAG.getCopyToReg(Chain, dl, Reg: R.first, N: R.second, Glue);
599 Glue = Chain.getValue(R: 1);
600 }
601 Glue = SDValue();
602 }
603
604 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
605 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
606
607 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
608 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
609 // node so that legalize doesn't hack it.
610 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
611 Callee = DAG.getTargetGlobalAddress(GV: G->getGlobal(), DL: dl, VT: PtrVT, offset: 0, TargetFlags: Flags);
612 } else if (ExternalSymbolSDNode *S =
613 dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
614 Callee = DAG.getTargetExternalSymbol(Sym: S->getSymbol(), VT: PtrVT, TargetFlags: Flags);
615 }
616
617 // Returns a chain & a flag for retval copy to use.
618 SmallVector<SDValue, 8> Ops;
619 Ops.push_back(Elt: Chain);
620 Ops.push_back(Elt: Callee);
621
622 // Add argument registers to the end of the list so that they are
623 // known live into the call.
624 for (const auto &R : RegsToPass)
625 Ops.push_back(Elt: DAG.getRegister(Reg: R.first, VT: R.second.getValueType()));
626
627 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
628 assert(Mask && "Missing call preserved mask for calling convention");
629 Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask));
630
631 if (Glue.getNode())
632 Ops.push_back(Elt: Glue);
633
634 if (CLI.IsTailCall) {
635 MFI.setHasTailCall();
636 return DAG.getNode(Opcode: HexagonISD::TC_RETURN, DL: dl, VT: MVT::Other, Ops);
637 }
638
639 // Set this here because we need to know this for "hasFP" in frame lowering.
640 // The target-independent code calls getFrameRegister before setting it, and
641 // getFrameRegister uses hasFP to determine whether the function has FP.
642 MFI.setHasCalls(true);
643
644 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
645 Chain = DAG.getNode(Opcode: OpCode, DL: dl, ResultTys: {MVT::Other, MVT::Glue}, Ops);
646 Glue = Chain.getValue(R: 1);
647
648 // Create the CALLSEQ_END node.
649 Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: 0, Glue, DL: dl);
650 Glue = Chain.getValue(R: 1);
651
652 // Handle result values, copying them out of physregs into vregs that we
653 // return.
654 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
655 InVals, OutVals, Callee);
656}
657
658/// Returns true by value, base pointer and offset pointer and addressing
659/// mode by reference if this node can be combined with a load / store to
660/// form a post-indexed load / store.
661bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
662 SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM,
663 SelectionDAG &DAG) const {
664 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Val: N);
665 if (!LSN)
666 return false;
667 EVT VT = LSN->getMemoryVT();
668 if (!VT.isSimple())
669 return false;
670 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
671 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
672 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
673 VT == MVT::v4i16 || VT == MVT::v8i8 ||
674 Subtarget.isHVXVectorType(VecTy: VT.getSimpleVT());
675 if (!IsLegalType)
676 return false;
677
678 if (Op->getOpcode() != ISD::ADD)
679 return false;
680 Base = Op->getOperand(Num: 0);
681 Offset = Op->getOperand(Num: 1);
682 if (!isa<ConstantSDNode>(Val: Offset.getNode()))
683 return false;
684 AM = ISD::POST_INC;
685
686 int32_t V = cast<ConstantSDNode>(Val: Offset.getNode())->getSExtValue();
687 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, Offset: V);
688}
689
690SDValue HexagonTargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
691 if (DAG.getMachineFunction().getFunction().hasOptSize())
692 return SDValue();
693 else
694 return Op;
695}
696
697SDValue
698HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
699 MachineFunction &MF = DAG.getMachineFunction();
700 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
701 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
702 unsigned LR = HRI.getRARegister();
703
704 if ((Op.getOpcode() != ISD::INLINEASM &&
705 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())
706 return Op;
707
708 unsigned NumOps = Op.getNumOperands();
709 if (Op.getOperand(i: NumOps-1).getValueType() == MVT::Glue)
710 --NumOps; // Ignore the flag operand.
711
712 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
713 const InlineAsm::Flag Flags(Op.getConstantOperandVal(i));
714 unsigned NumVals = Flags.getNumOperandRegisters();
715 ++i; // Skip the ID value.
716
717 switch (Flags.getKind()) {
718 default:
719 llvm_unreachable("Bad flags!");
720 case InlineAsm::Kind::RegUse:
721 case InlineAsm::Kind::Imm:
722 case InlineAsm::Kind::Mem:
723 i += NumVals;
724 break;
725 case InlineAsm::Kind::Clobber:
726 case InlineAsm::Kind::RegDef:
727 case InlineAsm::Kind::RegDefEarlyClobber: {
728 for (; NumVals; --NumVals, ++i) {
729 Register Reg = cast<RegisterSDNode>(Val: Op.getOperand(i))->getReg();
730 if (Reg != LR)
731 continue;
732 HMFI.setHasClobberLR(true);
733 return Op;
734 }
735 break;
736 }
737 }
738 }
739
740 return Op;
741}
742
743// Need to transform ISD::PREFETCH into something that doesn't inherit
744// all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
745// SDNPMayStore.
746SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
747 SelectionDAG &DAG) const {
748 SDValue Chain = Op.getOperand(i: 0);
749 SDValue Addr = Op.getOperand(i: 1);
750 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
751 // if the "reg" is fed by an "add".
752 SDLoc DL(Op);
753 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i32);
754 return DAG.getNode(Opcode: HexagonISD::DCFETCH, DL, VT: MVT::Other, N1: Chain, N2: Addr, N3: Zero);
755}
756
757// Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
758// is marked as having side-effects, while the register read on Hexagon does
759// not have any. TableGen refuses to accept the direct pattern from that node
760// to the A4_tfrcpp.
761SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
762 SelectionDAG &DAG) const {
763 SDValue Chain = Op.getOperand(i: 0);
764 SDLoc dl(Op);
765 SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other);
766 return DAG.getNode(Opcode: HexagonISD::READCYCLE, DL: dl, VTList: VTs, N: Chain);
767}
768
769// Custom-handle ISD::READSTEADYCOUNTER because the target-independent SDNode
770// is marked as having side-effects, while the register read on Hexagon does
771// not have any. TableGen refuses to accept the direct pattern from that node
772// to the A4_tfrcpp.
773SDValue HexagonTargetLowering::LowerREADSTEADYCOUNTER(SDValue Op,
774 SelectionDAG &DAG) const {
775 SDValue Chain = Op.getOperand(i: 0);
776 SDLoc dl(Op);
777 SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other);
778 return DAG.getNode(Opcode: HexagonISD::READTIMER, DL: dl, VTList: VTs, N: Chain);
779}
780
781SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
782 SelectionDAG &DAG) const {
783 SDValue Chain = Op.getOperand(i: 0);
784 unsigned IntNo = Op.getConstantOperandVal(i: 1);
785 // Lower the hexagon_prefetch builtin to DCFETCH, as above.
786 if (IntNo == Intrinsic::hexagon_prefetch) {
787 SDValue Addr = Op.getOperand(i: 2);
788 SDLoc DL(Op);
789 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i32);
790 return DAG.getNode(Opcode: HexagonISD::DCFETCH, DL, VT: MVT::Other, N1: Chain, N2: Addr, N3: Zero);
791 }
792 return SDValue();
793}
794
795SDValue
796HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
797 SelectionDAG &DAG) const {
798 SDValue Chain = Op.getOperand(i: 0);
799 SDValue Size = Op.getOperand(i: 1);
800 SDValue Align = Op.getOperand(i: 2);
801 SDLoc dl(Op);
802
803 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Val&: Align);
804 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
805
806 unsigned A = AlignConst->getSExtValue();
807 auto &HFI = *Subtarget.getFrameLowering();
808 // "Zero" means natural stack alignment.
809 if (A == 0)
810 A = HFI.getStackAlign().value();
811
812 LLVM_DEBUG({
813 dbgs () << __func__ << " Align: " << A << " Size: ";
814 Size.getNode()->dump(&DAG);
815 dbgs() << "\n";
816 });
817
818 SDValue AC = DAG.getConstant(Val: A, DL: dl, VT: MVT::i32);
819 SDVTList VTs = DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other);
820 SDValue AA = DAG.getNode(Opcode: HexagonISD::ALLOCA, DL: dl, VTList: VTs, N1: Chain, N2: Size, N3: AC);
821
822 DAG.ReplaceAllUsesOfValueWith(From: Op, To: AA);
823 return AA;
824}
825
826SDValue HexagonTargetLowering::LowerFormalArguments(
827 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
828 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
829 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
830 MachineFunction &MF = DAG.getMachineFunction();
831 MachineFrameInfo &MFI = MF.getFrameInfo();
832 MachineRegisterInfo &MRI = MF.getRegInfo();
833
834 // Linux ABI treats var-arg calls the same way as regular ones.
835 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
836
837 // Assign locations to all of the incoming arguments.
838 SmallVector<CCValAssign, 16> ArgLocs;
839 CCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext());
840
841 if (Subtarget.useHVXOps())
842 CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon_HVX);
843 else if (DisableArgsMinAlignment)
844 CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon_Legacy);
845 else
846 CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Hexagon);
847
848 // For LLVM, in the case when returning a struct by value (>8byte),
849 // the first argument is a pointer that points to the location on caller's
850 // stack where the return value will be stored. For Hexagon, the location on
851 // caller's stack is passed only when the struct size is smaller than (and
852 // equal to) 8 bytes. If not, no address will be passed into callee and
853 // callee return the result directly through R0/R1.
854 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) {
855 switch (RC.getID()) {
856 case Hexagon::IntRegsRegClassID:
857 return Reg - Hexagon::R0 + 1;
858 case Hexagon::DoubleRegsRegClassID:
859 return (Reg - Hexagon::D0 + 1) * 2;
860 case Hexagon::HvxVRRegClassID:
861 return Reg - Hexagon::V0 + 1;
862 case Hexagon::HvxWRRegClassID:
863 return (Reg - Hexagon::W0 + 1) * 2;
864 }
865 llvm_unreachable("Unexpected register class");
866 };
867
868 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering());
869 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
870 HFL.FirstVarArgSavedReg = 0;
871 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
872
873 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
874 CCValAssign &VA = ArgLocs[i];
875 ISD::ArgFlagsTy Flags = Ins[i].Flags;
876 bool ByVal = Flags.isByVal();
877
878 // Arguments passed in registers:
879 // 1. 32- and 64-bit values and HVX vectors are passed directly,
880 // 2. Large structs are passed via an address, and the address is
881 // passed in a register.
882 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
883 llvm_unreachable("ByValSize must be bigger than 8 bytes");
884
885 bool InReg = VA.isRegLoc() &&
886 (!ByVal || (ByVal && Flags.getByValSize() > 8));
887
888 if (InReg) {
889 MVT RegVT = VA.getLocVT();
890 if (VA.getLocInfo() == CCValAssign::BCvt)
891 RegVT = VA.getValVT();
892
893 const TargetRegisterClass *RC = getRegClassFor(VT: RegVT);
894 Register VReg = MRI.createVirtualRegister(RegClass: RC);
895 SDValue Copy = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: RegVT);
896
897 // Treat values of type MVT::i1 specially: they are passed in
898 // registers of type i32, but they need to remain as values of
899 // type i1 for consistency of the argument lowering.
900 if (VA.getValVT() == MVT::i1) {
901 assert(RegVT.getSizeInBits() <= 32);
902 SDValue T = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: RegVT,
903 N1: Copy, N2: DAG.getConstant(Val: 1, DL: dl, VT: RegVT));
904 Copy = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: T, RHS: DAG.getConstant(Val: 0, DL: dl, VT: RegVT),
905 Cond: ISD::SETNE);
906 } else {
907#ifndef NDEBUG
908 unsigned RegSize = RegVT.getSizeInBits();
909 assert(RegSize == 32 || RegSize == 64 ||
910 Subtarget.isHVXVectorType(RegVT));
911#endif
912 }
913 InVals.push_back(Elt: Copy);
914 MRI.addLiveIn(Reg: VA.getLocReg(), vreg: VReg);
915 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());
916 } else {
917 assert(VA.isMemLoc() && "Argument should be passed in memory");
918
919 // If it's a byval parameter, then we need to compute the
920 // "real" size, not the size of the pointer.
921 unsigned ObjSize = Flags.isByVal()
922 ? Flags.getByValSize()
923 : VA.getLocVT().getStoreSizeInBits() / 8;
924
925 // Create the frame index object for this incoming parameter.
926 int Offset = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
927 int FI = MFI.CreateFixedObject(Size: ObjSize, SPOffset: Offset, IsImmutable: true);
928 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32);
929
930 if (Flags.isByVal()) {
931 // If it's a pass-by-value aggregate, then do not dereference the stack
932 // location. Instead, we should generate a reference to the stack
933 // location.
934 InVals.push_back(Elt: FIN);
935 } else {
936 SDValue L = DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIN,
937 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI, Offset: 0));
938 InVals.push_back(Elt: L);
939 }
940 }
941 }
942
943 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
944 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)
945 MRI.addLiveIn(Reg: Hexagon::R0+i);
946 }
947
948 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
949 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
950 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
951
952 // Create Frame index for the start of register saved area.
953 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
954 bool RequiresPadding = (NumVarArgRegs & 1);
955 int RegSaveAreaSizePlusPadding = RequiresPadding
956 ? (NumVarArgRegs + 1) * 4
957 : NumVarArgRegs * 4;
958
959 if (RegSaveAreaSizePlusPadding > 0) {
960 // The offset to saved register area should be 8 byte aligned.
961 int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getStackSize();
962 if (!(RegAreaStart % 8))
963 RegAreaStart = (RegAreaStart + 7) & -8;
964
965 int RegSaveAreaFrameIndex =
966 MFI.CreateFixedObject(Size: RegSaveAreaSizePlusPadding, SPOffset: RegAreaStart, IsImmutable: true);
967 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
968
969 // This will point to the next argument passed via stack.
970 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
971 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true);
972 HMFI.setVarArgsFrameIndex(FI);
973 } else {
974 // This will point to the next argument passed via stack, when
975 // there is no saved register area.
976 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize();
977 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true);
978 HMFI.setRegSavedAreaStartFrameIndex(FI);
979 HMFI.setVarArgsFrameIndex(FI);
980 }
981 }
982
983
984 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {
985 // This will point to the next argument passed via stack.
986 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize();
987 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, SPOffset: Offset, IsImmutable: true);
988 HMFI.setVarArgsFrameIndex(FI);
989 }
990
991 return Chain;
992}
993
994SDValue
995HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
996 // VASTART stores the address of the VarArgsFrameIndex slot into the
997 // memory location argument.
998 MachineFunction &MF = DAG.getMachineFunction();
999 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
1000 SDValue Addr = DAG.getFrameIndex(FI: QFI->getVarArgsFrameIndex(), VT: MVT::i32);
1001 const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue();
1002
1003 if (!Subtarget.isEnvironmentMusl()) {
1004 return DAG.getStore(Chain: Op.getOperand(i: 0), dl: SDLoc(Op), Val: Addr, Ptr: Op.getOperand(i: 1),
1005 PtrInfo: MachinePointerInfo(SV));
1006 }
1007 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
1008 auto &HFL = *Subtarget.getFrameLowering();
1009 SDLoc DL(Op);
1010 SmallVector<SDValue, 8> MemOps;
1011
1012 // Get frame index of va_list.
1013 SDValue FIN = Op.getOperand(i: 1);
1014
1015 // If first Vararg register is odd, add 4 bytes to start of
1016 // saved register area to point to the first register location.
1017 // This is because the saved register area has to be 8 byte aligned.
1018 // In case of an odd start register, there will be 4 bytes of padding in
1019 // the beginning of saved register area. If all registers area used up,
1020 // the following condition will handle it correctly.
1021 SDValue SavedRegAreaStartFrameIndex =
1022 DAG.getFrameIndex(FI: FuncInfo.getRegSavedAreaStartFrameIndex(), VT: MVT::i32);
1023
1024 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
1025
1026 if (HFL.FirstVarArgSavedReg & 1)
1027 SavedRegAreaStartFrameIndex =
1028 DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT,
1029 N1: DAG.getFrameIndex(FI: FuncInfo.getRegSavedAreaStartFrameIndex(),
1030 VT: MVT::i32),
1031 N2: DAG.getIntPtrConstant(Val: 4, DL));
1032
1033 // Store the saved register area start pointer.
1034 SDValue Store =
1035 DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL,
1036 Val: SavedRegAreaStartFrameIndex,
1037 Ptr: FIN, PtrInfo: MachinePointerInfo(SV));
1038 MemOps.push_back(Elt: Store);
1039
1040 // Store saved register area end pointer.
1041 FIN = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT,
1042 N1: FIN, N2: DAG.getIntPtrConstant(Val: 4, DL));
1043 Store = DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL,
1044 Val: DAG.getFrameIndex(FI: FuncInfo.getVarArgsFrameIndex(),
1045 VT: PtrVT),
1046 Ptr: FIN, PtrInfo: MachinePointerInfo(SV, 4));
1047 MemOps.push_back(Elt: Store);
1048
1049 // Store overflow area pointer.
1050 FIN = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT,
1051 N1: FIN, N2: DAG.getIntPtrConstant(Val: 4, DL));
1052 Store = DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL,
1053 Val: DAG.getFrameIndex(FI: FuncInfo.getVarArgsFrameIndex(),
1054 VT: PtrVT),
1055 Ptr: FIN, PtrInfo: MachinePointerInfo(SV, 8));
1056 MemOps.push_back(Elt: Store);
1057
1058 return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: MemOps);
1059}
1060
1061SDValue
1062HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
1063 // Assert that the linux ABI is enabled for the current compilation.
1064 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled");
1065 SDValue Chain = Op.getOperand(i: 0);
1066 SDValue DestPtr = Op.getOperand(i: 1);
1067 SDValue SrcPtr = Op.getOperand(i: 2);
1068 const Value *DestSV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 3))->getValue();
1069 const Value *SrcSV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 4))->getValue();
1070 SDLoc DL(Op);
1071 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore,
1072 // we need to memcopy 12 bytes from va_list to another similar list.
1073 return DAG.getMemcpy(
1074 Chain, dl: DL, Dst: DestPtr, Src: SrcPtr, Size: DAG.getIntPtrConstant(Val: 12, DL), Alignment: Align(4),
1075 /*isVolatile*/ isVol: false, AlwaysInline: false, /*CI=*/nullptr, OverrideTailCall: std::nullopt,
1076 DstPtrInfo: MachinePointerInfo(DestSV), SrcPtrInfo: MachinePointerInfo(SrcSV));
1077}
1078
1079SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1080 const SDLoc &dl(Op);
1081 SDValue LHS = Op.getOperand(i: 0);
1082 SDValue RHS = Op.getOperand(i: 1);
1083 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get();
1084 MVT ResTy = ty(Op);
1085 MVT OpTy = ty(Op: LHS);
1086
1087 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1088 MVT ElemTy = OpTy.getVectorElementType();
1089 assert(ElemTy.isScalarInteger());
1090 MVT WideTy = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 2*ElemTy.getSizeInBits()),
1091 NumElements: OpTy.getVectorNumElements());
1092 return DAG.getSetCC(DL: dl, VT: ResTy,
1093 LHS: DAG.getSExtOrTrunc(Op: LHS, DL: SDLoc(LHS), VT: WideTy),
1094 RHS: DAG.getSExtOrTrunc(Op: RHS, DL: SDLoc(RHS), VT: WideTy), Cond: CC);
1095 }
1096
1097 // Treat all other vector types as legal.
1098 if (ResTy.isVector())
1099 return Op;
1100
1101 // Comparisons of short integers should use sign-extend, not zero-extend,
1102 // since we can represent small negative values in the compare instructions.
1103 // The LLVM default is to use zero-extend arbitrarily in these cases.
1104 auto isSExtFree = [this](SDValue N) {
1105 switch (N.getOpcode()) {
1106 case ISD::TRUNCATE: {
1107 // A sign-extend of a truncate of a sign-extend is free.
1108 SDValue Op = N.getOperand(i: 0);
1109 if (Op.getOpcode() != ISD::AssertSext)
1110 return false;
1111 EVT OrigTy = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT();
1112 unsigned ThisBW = ty(Op: N).getSizeInBits();
1113 unsigned OrigBW = OrigTy.getSizeInBits();
1114 // The type that was sign-extended to get the AssertSext must be
1115 // narrower than the type of N (so that N has still the same value
1116 // as the original).
1117 return ThisBW >= OrigBW;
1118 }
1119 case ISD::LOAD:
1120 // We have sign-extended loads.
1121 return true;
1122 }
1123 return false;
1124 };
1125
1126 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1127 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS);
1128 bool IsNegative = C && C->getAPIntValue().isNegative();
1129 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
1130 return DAG.getSetCC(DL: dl, VT: ResTy,
1131 LHS: DAG.getSExtOrTrunc(Op: LHS, DL: SDLoc(LHS), VT: MVT::i32),
1132 RHS: DAG.getSExtOrTrunc(Op: RHS, DL: SDLoc(RHS), VT: MVT::i32), Cond: CC);
1133 }
1134
1135 return SDValue();
1136}
1137
1138SDValue
1139HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
1140 SDValue PredOp = Op.getOperand(i: 0);
1141 SDValue Op1 = Op.getOperand(i: 1), Op2 = Op.getOperand(i: 2);
1142 MVT OpTy = ty(Op: Op1);
1143 const SDLoc &dl(Op);
1144
1145 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1146 MVT ElemTy = OpTy.getVectorElementType();
1147 assert(ElemTy.isScalarInteger());
1148 MVT WideTy = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 2*ElemTy.getSizeInBits()),
1149 NumElements: OpTy.getVectorNumElements());
1150 // Generate (trunc (select (_, sext, sext))).
1151 return DAG.getSExtOrTrunc(
1152 Op: DAG.getSelect(DL: dl, VT: WideTy, Cond: PredOp,
1153 LHS: DAG.getSExtOrTrunc(Op: Op1, DL: dl, VT: WideTy),
1154 RHS: DAG.getSExtOrTrunc(Op: Op2, DL: dl, VT: WideTy)),
1155 DL: dl, VT: OpTy);
1156 }
1157
1158 return SDValue();
1159}
1160
1161SDValue
1162HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
1163 EVT ValTy = Op.getValueType();
1164 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Val&: Op);
1165 Constant *CVal = nullptr;
1166 bool isVTi1Type = false;
1167 if (auto *CV = dyn_cast<ConstantVector>(Val: CPN->getConstVal())) {
1168 if (cast<VectorType>(Val: CV->getType())->getElementType()->isIntegerTy(Bitwidth: 1)) {
1169 IRBuilder<> IRB(CV->getContext());
1170 SmallVector<Constant*, 128> NewConst;
1171 unsigned VecLen = CV->getNumOperands();
1172 assert(isPowerOf2_32(VecLen) &&
1173 "conversion only supported for pow2 VectorSize");
1174 for (unsigned i = 0; i < VecLen; ++i)
1175 NewConst.push_back(Elt: IRB.getInt8(C: CV->getOperand(i_nocapture: i)->isZeroValue()));
1176
1177 CVal = ConstantVector::get(V: NewConst);
1178 isVTi1Type = true;
1179 }
1180 }
1181 Align Alignment = CPN->getAlign();
1182 bool IsPositionIndependent = isPositionIndependent();
1183 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
1184
1185 unsigned Offset = 0;
1186 SDValue T;
1187 if (CPN->isMachineConstantPoolEntry())
1188 T = DAG.getTargetConstantPool(C: CPN->getMachineCPVal(), VT: ValTy, Align: Alignment,
1189 Offset, TargetFlags: TF);
1190 else if (isVTi1Type)
1191 T = DAG.getTargetConstantPool(C: CVal, VT: ValTy, Align: Alignment, Offset, TargetFlags: TF);
1192 else
1193 T = DAG.getTargetConstantPool(C: CPN->getConstVal(), VT: ValTy, Align: Alignment, Offset,
1194 TargetFlags: TF);
1195
1196 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&
1197 "Inconsistent target flag encountered");
1198
1199 if (IsPositionIndependent)
1200 return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT: ValTy, Operand: T);
1201 return DAG.getNode(Opcode: HexagonISD::CP, DL: SDLoc(Op), VT: ValTy, Operand: T);
1202}
1203
1204SDValue
1205HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
1206 EVT VT = Op.getValueType();
1207 int Idx = cast<JumpTableSDNode>(Val&: Op)->getIndex();
1208 if (isPositionIndependent()) {
1209 SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT, TargetFlags: HexagonII::MO_PCREL);
1210 return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT, Operand: T);
1211 }
1212
1213 SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT);
1214 return DAG.getNode(Opcode: HexagonISD::JT, DL: SDLoc(Op), VT, Operand: T);
1215}
1216
1217SDValue
1218HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
1219 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1220 MachineFunction &MF = DAG.getMachineFunction();
1221 MachineFrameInfo &MFI = MF.getFrameInfo();
1222 MFI.setReturnAddressIsTaken(true);
1223
1224 EVT VT = Op.getValueType();
1225 SDLoc dl(Op);
1226 unsigned Depth = Op.getConstantOperandVal(i: 0);
1227 if (Depth) {
1228 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1229 SDValue Offset = DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32);
1230 return DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(),
1231 Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr, N2: Offset),
1232 PtrInfo: MachinePointerInfo());
1233 }
1234
1235 // Return LR, which contains the return address. Mark it an implicit live-in.
1236 Register Reg = MF.addLiveIn(PReg: HRI.getRARegister(), RC: getRegClassFor(VT: MVT::i32));
1237 return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT);
1238}
1239
1240SDValue
1241HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1242 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1243 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1244 MFI.setFrameAddressIsTaken(true);
1245
1246 EVT VT = Op.getValueType();
1247 SDLoc dl(Op);
1248 unsigned Depth = Op.getConstantOperandVal(i: 0);
1249 SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl,
1250 Reg: HRI.getFrameRegister(), VT);
1251 while (Depth--)
1252 FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr,
1253 PtrInfo: MachinePointerInfo());
1254 return FrameAddr;
1255}
1256
1257SDValue
1258HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const {
1259 SDLoc dl(Op);
1260 return DAG.getNode(Opcode: HexagonISD::BARRIER, DL: dl, VT: MVT::Other, Operand: Op.getOperand(i: 0));
1261}
1262
1263SDValue
1264HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
1265 SDLoc dl(Op);
1266 auto *GAN = cast<GlobalAddressSDNode>(Val&: Op);
1267 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
1268 auto *GV = GAN->getGlobal();
1269 int64_t Offset = GAN->getOffset();
1270
1271 auto &HLOF = *HTM.getObjFileLowering();
1272 Reloc::Model RM = HTM.getRelocationModel();
1273
1274 if (RM == Reloc::Static) {
1275 SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: Offset);
1276 const GlobalObject *GO = GV->getAliaseeObject();
1277 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, TM: HTM))
1278 return DAG.getNode(Opcode: HexagonISD::CONST32_GP, DL: dl, VT: PtrVT, Operand: GA);
1279 return DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: GA);
1280 }
1281
1282 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(GV);
1283 if (UsePCRel) {
1284 SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: Offset,
1285 TargetFlags: HexagonII::MO_PCREL);
1286 return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: dl, VT: PtrVT, Operand: GA);
1287 }
1288
1289 // Use GOT index.
1290 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(VT: PtrVT);
1291 SDValue GA = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: HexagonII::MO_GOT);
1292 SDValue Off = DAG.getConstant(Val: Offset, DL: dl, VT: MVT::i32);
1293 return DAG.getNode(Opcode: HexagonISD::AT_GOT, DL: dl, VT: PtrVT, N1: GOT, N2: GA, N3: Off);
1294}
1295
1296// Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1297SDValue
1298HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1299 const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress();
1300 SDLoc dl(Op);
1301 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
1302
1303 Reloc::Model RM = HTM.getRelocationModel();
1304 if (RM == Reloc::Static) {
1305 SDValue A = DAG.getTargetBlockAddress(BA, VT: PtrVT);
1306 return DAG.getNode(Opcode: HexagonISD::CONST32_GP, DL: dl, VT: PtrVT, Operand: A);
1307 }
1308
1309 SDValue A = DAG.getTargetBlockAddress(BA, VT: PtrVT, Offset: 0, TargetFlags: HexagonII::MO_PCREL);
1310 return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: dl, VT: PtrVT, Operand: A);
1311}
1312
1313SDValue
1314HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
1315 const {
1316 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
1317 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, VT: PtrVT,
1318 TargetFlags: HexagonII::MO_PCREL);
1319 return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Op), VT: PtrVT, Operand: GOTSym);
1320}
1321
1322SDValue
1323HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
1324 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1325 unsigned char OperandFlags) const {
1326 MachineFunction &MF = DAG.getMachineFunction();
1327 MachineFrameInfo &MFI = MF.getFrameInfo();
1328 SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue);
1329 SDLoc dl(GA);
1330 SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl,
1331 VT: GA->getValueType(ResNo: 0),
1332 offset: GA->getOffset(),
1333 TargetFlags: OperandFlags);
1334 // Create Operands for the call.The Operands should have the following:
1335 // 1. Chain SDValue
1336 // 2. Callee which in this case is the Global address value.
1337 // 3. Registers live into the call.In this case its R0, as we
1338 // have just one argument to be passed.
1339 // 4. Glue.
1340 // Note: The order is important.
1341
1342 const auto &HRI = *Subtarget.getRegisterInfo();
1343 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1344 assert(Mask && "Missing call preserved mask for calling convention");
1345 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Reg: Hexagon::R0, VT: PtrVT),
1346 DAG.getRegisterMask(RegMask: Mask), Glue };
1347 Chain = DAG.getNode(Opcode: HexagonISD::CALL, DL: dl, VTList: NodeTys, Ops);
1348
1349 // Inform MFI that function has calls.
1350 MFI.setAdjustsStack(true);
1351
1352 Glue = Chain.getValue(R: 1);
1353 return DAG.getCopyFromReg(Chain, dl, Reg: ReturnReg, VT: PtrVT, Glue);
1354}
1355
1356//
1357// Lower using the initial executable model for TLS addresses
1358//
1359SDValue
1360HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
1361 SelectionDAG &DAG) const {
1362 SDLoc dl(GA);
1363 int64_t Offset = GA->getOffset();
1364 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
1365
1366 // Get the thread pointer.
1367 SDValue TP = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::UGP, VT: PtrVT);
1368
1369 bool IsPositionIndependent = isPositionIndependent();
1370 unsigned char TF =
1371 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1372
1373 // First generate the TLS symbol address
1374 SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT,
1375 offset: Offset, TargetFlags: TF);
1376
1377 SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA);
1378
1379 if (IsPositionIndependent) {
1380 // Generate the GOT pointer in case of position independent code
1381 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Op: Sym, DAG);
1382
1383 // Add the TLS Symbol address to GOT pointer.This gives
1384 // GOT relative relocation for the symbol.
1385 Sym = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: GOT, N2: Sym);
1386 }
1387
1388 // Load the offset value for TLS symbol.This offset is relative to
1389 // thread pointer.
1390 SDValue LoadOffset =
1391 DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Sym, PtrInfo: MachinePointerInfo());
1392
1393 // Address of the thread local variable is the add of thread
1394 // pointer and the offset of the variable.
1395 return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: TP, N2: LoadOffset);
1396}
1397
1398//
1399// Lower using the local executable model for TLS addresses
1400//
1401SDValue
1402HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
1403 SelectionDAG &DAG) const {
1404 SDLoc dl(GA);
1405 int64_t Offset = GA->getOffset();
1406 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
1407
1408 // Get the thread pointer.
1409 SDValue TP = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::UGP, VT: PtrVT);
1410 // Generate the TLS symbol address
1411 SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, offset: Offset,
1412 TargetFlags: HexagonII::MO_TPREL);
1413 SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA);
1414
1415 // Address of the thread local variable is the add of thread
1416 // pointer and the offset of the variable.
1417 return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: TP, N2: Sym);
1418}
1419
1420//
1421// Lower using the general dynamic model for TLS addresses
1422//
1423SDValue
1424HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1425 SelectionDAG &DAG) const {
1426 SDLoc dl(GA);
1427 int64_t Offset = GA->getOffset();
1428 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
1429
1430 // First generate the TLS symbol address
1431 SDValue TGA = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: dl, VT: PtrVT, offset: Offset,
1432 TargetFlags: HexagonII::MO_GDGOT);
1433
1434 // Then, generate the GOT pointer
1435 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Op: TGA, DAG);
1436
1437 // Add the TLS symbol and the GOT pointer
1438 SDValue Sym = DAG.getNode(Opcode: HexagonISD::CONST32, DL: dl, VT: PtrVT, Operand: TGA);
1439 SDValue Chain = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: GOT, N2: Sym);
1440
1441 // Copy over the argument to R0
1442 SDValue InGlue;
1443 Chain = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg: Hexagon::R0, N: Chain, Glue: InGlue);
1444 InGlue = Chain.getValue(R: 1);
1445
1446 unsigned Flags = DAG.getSubtarget<HexagonSubtarget>().useLongCalls()
1447 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
1448 : HexagonII::MO_GDPLT;
1449
1450 return GetDynamicTLSAddr(DAG, Chain, GA, Glue: InGlue, PtrVT,
1451 ReturnReg: Hexagon::R0, OperandFlags: Flags);
1452}
1453
1454//
1455// Lower TLS addresses.
1456//
1457// For now for dynamic models, we only support the general dynamic model.
1458//
1459SDValue
1460HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1461 SelectionDAG &DAG) const {
1462 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op);
1463
1464 switch (HTM.getTLSModel(GV: GA->getGlobal())) {
1465 case TLSModel::GeneralDynamic:
1466 case TLSModel::LocalDynamic:
1467 return LowerToTLSGeneralDynamicModel(GA, DAG);
1468 case TLSModel::InitialExec:
1469 return LowerToTLSInitialExecModel(GA, DAG);
1470 case TLSModel::LocalExec:
1471 return LowerToTLSLocalExecModel(GA, DAG);
1472 }
1473 llvm_unreachable("Bogus TLS model");
1474}
1475
1476//===----------------------------------------------------------------------===//
1477// TargetLowering Implementation
1478//===----------------------------------------------------------------------===//
1479
1480HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
1481 const HexagonSubtarget &ST)
1482 : TargetLowering(TM, ST),
1483 HTM(static_cast<const HexagonTargetMachine &>(TM)), Subtarget(ST) {
1484 auto &HRI = *Subtarget.getRegisterInfo();
1485
1486 setPrefLoopAlignment(Align(16));
1487 setMinFunctionAlignment(Align(4));
1488 setPrefFunctionAlignment(Align(16));
1489 setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1490 setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
1491 setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
1492
1493 setMaxAtomicSizeInBitsSupported(64);
1494 setMinCmpXchgSizeInBits(32);
1495
1496 if (EnableHexSDNodeSched)
1497 setSchedulingPreference(Sched::VLIW);
1498 else
1499 setSchedulingPreference(Sched::Source);
1500
1501 // Limits for inline expansion of memcpy/memmove
1502 MaxStoresPerMemcpy = 6;
1503 MaxStoresPerMemcpyOptSize = 4;
1504 MaxStoresPerMemmove = 6;
1505 MaxStoresPerMemmoveOptSize = 4;
1506 MaxStoresPerMemset = 8;
1507 MaxStoresPerMemsetOptSize = 4;
1508
1509 //
1510 // Set up register classes.
1511 //
1512
1513 addRegisterClass(VT: MVT::i1, RC: &Hexagon::PredRegsRegClass);
1514 addRegisterClass(VT: MVT::v2i1, RC: &Hexagon::PredRegsRegClass); // bbbbaaaa
1515 addRegisterClass(VT: MVT::v4i1, RC: &Hexagon::PredRegsRegClass); // ddccbbaa
1516 addRegisterClass(VT: MVT::v8i1, RC: &Hexagon::PredRegsRegClass); // hgfedcba
1517 addRegisterClass(VT: MVT::i32, RC: &Hexagon::IntRegsRegClass);
1518 addRegisterClass(VT: MVT::v2i16, RC: &Hexagon::IntRegsRegClass);
1519 addRegisterClass(VT: MVT::v4i8, RC: &Hexagon::IntRegsRegClass);
1520 addRegisterClass(VT: MVT::i64, RC: &Hexagon::DoubleRegsRegClass);
1521 addRegisterClass(VT: MVT::v8i8, RC: &Hexagon::DoubleRegsRegClass);
1522 addRegisterClass(VT: MVT::v4i16, RC: &Hexagon::DoubleRegsRegClass);
1523 addRegisterClass(VT: MVT::v2i32, RC: &Hexagon::DoubleRegsRegClass);
1524
1525 addRegisterClass(VT: MVT::f32, RC: &Hexagon::IntRegsRegClass);
1526 addRegisterClass(VT: MVT::f64, RC: &Hexagon::DoubleRegsRegClass);
1527
1528 //
1529 // Handling of scalar operations.
1530 //
1531 // All operations default to "legal", except:
1532 // - indexed loads and stores (pre-/post-incremented),
1533 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1534 // ConstantFP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1535 // FLOG, FLOG2, FLOG10, FMAXIMUMNUM, FMINIMUMNUM, FNEARBYINT, FRINT, FROUND,
1536 // TRAP, FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG,
1537 // ZERO_EXTEND_VECTOR_INREG,
1538 // which default to "expand" for at least one type.
1539
1540 // Misc operations.
1541 setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Legal);
1542 setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Legal);
1543 setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal);
1544 setOperationAction(Op: ISD::DEBUGTRAP, VT: MVT::Other, Action: Legal);
1545 setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom);
1546 setOperationAction(Op: ISD::JumpTable, VT: MVT::i32, Action: Custom);
1547 setOperationAction(Op: ISD::BUILD_PAIR, VT: MVT::i64, Action: Expand);
1548 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand);
1549 setOperationAction(Op: ISD::INLINEASM, VT: MVT::Other, Action: Custom);
1550 setOperationAction(Op: ISD::INLINEASM_BR, VT: MVT::Other, Action: Custom);
1551 setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom);
1552 setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64, Action: Custom);
1553 setOperationAction(Op: ISD::READSTEADYCOUNTER, VT: MVT::i64, Action: Custom);
1554 setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom);
1555 setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom);
1556 setOperationAction(Op: ISD::EH_RETURN, VT: MVT::Other, Action: Custom);
1557 setOperationAction(Op: ISD::GLOBAL_OFFSET_TABLE, VT: MVT::i32, Action: Custom);
1558 setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom);
1559 setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom);
1560
1561 // Custom legalize GlobalAddress nodes into CONST32.
1562 setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom);
1563 setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i8, Action: Custom);
1564 setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom);
1565
1566 // Hexagon needs to optimize cases with negative constants.
1567 setOperationAction(Op: ISD::SETCC, VT: MVT::i8, Action: Custom);
1568 setOperationAction(Op: ISD::SETCC, VT: MVT::i16, Action: Custom);
1569 setOperationAction(Op: ISD::SETCC, VT: MVT::v4i8, Action: Custom);
1570 setOperationAction(Op: ISD::SETCC, VT: MVT::v2i16, Action: Custom);
1571
1572 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1573 setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom);
1574 setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand);
1575 setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand);
1576 if (Subtarget.isEnvironmentMusl())
1577 setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Custom);
1578 else
1579 setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand);
1580
1581 setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand);
1582 setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand);
1583 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Custom);
1584
1585 if (EmitJumpTables)
1586 setMinimumJumpTableEntries(MinimumJumpTables);
1587 else
1588 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max());
1589 setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Expand);
1590
1591 for (unsigned LegalIntOp :
1592 {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) {
1593 setOperationAction(Op: LegalIntOp, VT: MVT::i32, Action: Legal);
1594 setOperationAction(Op: LegalIntOp, VT: MVT::i64, Action: Legal);
1595 }
1596
1597 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit,
1598 // but they only operate on i64.
1599 for (MVT VT : MVT::integer_valuetypes()) {
1600 setOperationAction(Op: ISD::UADDO, VT, Action: Custom);
1601 setOperationAction(Op: ISD::USUBO, VT, Action: Custom);
1602 setOperationAction(Op: ISD::SADDO, VT, Action: Expand);
1603 setOperationAction(Op: ISD::SSUBO, VT, Action: Expand);
1604 setOperationAction(Op: ISD::UADDO_CARRY, VT, Action: Expand);
1605 setOperationAction(Op: ISD::USUBO_CARRY, VT, Action: Expand);
1606 }
1607 setOperationAction(Op: ISD::UADDO_CARRY, VT: MVT::i64, Action: Custom);
1608 setOperationAction(Op: ISD::USUBO_CARRY, VT: MVT::i64, Action: Custom);
1609
1610 setOperationAction(Op: ISD::CTLZ, VT: MVT::i8, Action: Promote);
1611 setOperationAction(Op: ISD::CTLZ, VT: MVT::i16, Action: Promote);
1612 setOperationAction(Op: ISD::CTTZ, VT: MVT::i8, Action: Promote);
1613 setOperationAction(Op: ISD::CTTZ, VT: MVT::i16, Action: Promote);
1614
1615 // Popcount can count # of 1s in i64 but returns i32.
1616 setOperationAction(Op: ISD::CTPOP, VT: MVT::i8, Action: Promote);
1617 setOperationAction(Op: ISD::CTPOP, VT: MVT::i16, Action: Promote);
1618 setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Promote);
1619 setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Legal);
1620
1621 setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i32, Action: Legal);
1622 setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i64, Action: Legal);
1623 setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Legal);
1624 setOperationAction(Op: ISD::BSWAP, VT: MVT::i64, Action: Legal);
1625
1626 setOperationAction(Op: ISD::FSHL, VT: MVT::i32, Action: Legal);
1627 setOperationAction(Op: ISD::FSHL, VT: MVT::i64, Action: Legal);
1628 setOperationAction(Op: ISD::FSHR, VT: MVT::i32, Action: Legal);
1629 setOperationAction(Op: ISD::FSHR, VT: MVT::i64, Action: Legal);
1630
1631 for (unsigned IntExpOp :
1632 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
1633 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
1634 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS,
1635 ISD::SMUL_LOHI, ISD::UMUL_LOHI}) {
1636 for (MVT VT : MVT::integer_valuetypes())
1637 setOperationAction(Op: IntExpOp, VT, Action: Expand);
1638 }
1639 for (MVT VT : MVT::fp_valuetypes()) {
1640 for (unsigned FPExpOp : {ISD::FDIV, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
1641 ISD::FSINCOS, ISD::FPOW, ISD::FCOPYSIGN})
1642 setOperationAction(Op: FPExpOp, VT, Action: Expand);
1643
1644 setOperationAction(Op: ISD::FREM, VT, Action: LibCall);
1645 }
1646
1647 // No extending loads from i32.
1648 for (MVT VT : MVT::integer_valuetypes()) {
1649 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand);
1650 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand);
1651 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::i32, Action: Expand);
1652 }
1653 // Turn FP truncstore into trunc + store.
1654 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand);
1655 setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::bf16, Action: Expand);
1656 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::bf16, Action: Expand);
1657 // Turn FP extload into load/fpextend.
1658 for (MVT VT : MVT::fp_valuetypes())
1659 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand);
1660
1661 // Expand BR_CC and SELECT_CC for all integer and fp types.
1662 for (MVT VT : MVT::integer_valuetypes()) {
1663 setOperationAction(Op: ISD::BR_CC, VT, Action: Expand);
1664 setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand);
1665 }
1666 for (MVT VT : MVT::fp_valuetypes()) {
1667 setOperationAction(Op: ISD::BR_CC, VT, Action: Expand);
1668 setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand);
1669 }
1670 setOperationAction(Op: ISD::BR_CC, VT: MVT::Other, Action: Expand);
1671
1672 //
1673 // Handling of vector operations.
1674 //
1675
1676 // Set the action for vector operations to "expand", then override it with
1677 // either "custom" or "legal" for specific cases.
1678 // clang-format off
1679 static const unsigned VectExpOps[] = {
1680 // Integer arithmetic:
1681 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
1682 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO,
1683 ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
1684 // Logical/bit:
1685 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
1686 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ, ISD::BSWAP, ISD::BITREVERSE,
1687 // Floating point arithmetic/math functions:
1688 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV,
1689 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,
1690 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2,
1691 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC,
1692 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR,
1693 ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM,
1694 ISD::FSINCOS, ISD::FLDEXP,
1695 // Misc:
1696 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool,
1697 // Vector:
1698 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR,
1699 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT,
1700 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR,
1701 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE,
1702 ISD::SPLAT_VECTOR,
1703 };
1704 // clang-format on
1705
1706 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1707 for (unsigned VectExpOp : VectExpOps)
1708 setOperationAction(Op: VectExpOp, VT, Action: Expand);
1709
1710 // Expand all extending loads and truncating stores:
1711 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) {
1712 if (TargetVT == VT)
1713 continue;
1714 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand);
1715 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand);
1716 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: TargetVT, MemVT: VT, Action: Expand);
1717 setTruncStoreAction(ValVT: VT, MemVT: TargetVT, Action: Expand);
1718 }
1719
1720 // Normalize all inputs to SELECT to be vectors of i32.
1721 if (VT.getVectorElementType() != MVT::i32) {
1722 MVT VT32 = MVT::getVectorVT(VT: MVT::i32, NumElements: VT.getSizeInBits()/32);
1723 setOperationAction(Op: ISD::SELECT, VT, Action: Promote);
1724 AddPromotedToType(Opc: ISD::SELECT, OrigVT: VT, DestVT: VT32);
1725 }
1726 setOperationAction(Op: ISD::SRA, VT, Action: Custom);
1727 setOperationAction(Op: ISD::SHL, VT, Action: Custom);
1728 setOperationAction(Op: ISD::SRL, VT, Action: Custom);
1729 }
1730
1731 setOperationAction(Op: ISD::SADDSAT, VT: MVT::i32, Action: Legal);
1732 setOperationAction(Op: ISD::SADDSAT, VT: MVT::i64, Action: Legal);
1733
1734 // Extending loads from (native) vectors of i8 into (native) vectors of i16
1735 // are legal.
1736 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal);
1737 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal);
1738 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::v2i16, MemVT: MVT::v2i8, Action: Legal);
1739 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal);
1740 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal);
1741 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Legal);
1742
1743 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i8, Action: Legal);
1744 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i16, Action: Legal);
1745 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v2i32, Action: Legal);
1746
1747 // Types natively supported:
1748 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1749 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1750 setOperationAction(Op: ISD::BUILD_VECTOR, VT: NativeVT, Action: Custom);
1751 setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT: NativeVT, Action: Custom);
1752 setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT: NativeVT, Action: Custom);
1753 setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT: NativeVT, Action: Custom);
1754 setOperationAction(Op: ISD::INSERT_SUBVECTOR, VT: NativeVT, Action: Custom);
1755 setOperationAction(Op: ISD::CONCAT_VECTORS, VT: NativeVT, Action: Custom);
1756
1757 setOperationAction(Op: ISD::ADD, VT: NativeVT, Action: Legal);
1758 setOperationAction(Op: ISD::SUB, VT: NativeVT, Action: Legal);
1759 setOperationAction(Op: ISD::MUL, VT: NativeVT, Action: Legal);
1760 setOperationAction(Op: ISD::AND, VT: NativeVT, Action: Legal);
1761 setOperationAction(Op: ISD::OR, VT: NativeVT, Action: Legal);
1762 setOperationAction(Op: ISD::XOR, VT: NativeVT, Action: Legal);
1763
1764 if (NativeVT.getVectorElementType() != MVT::i1) {
1765 setOperationAction(Op: ISD::SPLAT_VECTOR, VT: NativeVT, Action: Legal);
1766 setOperationAction(Op: ISD::BSWAP, VT: NativeVT, Action: Legal);
1767 setOperationAction(Op: ISD::BITREVERSE, VT: NativeVT, Action: Legal);
1768 }
1769 }
1770
1771 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1772 setOperationAction(Op: ISD::SMIN, VT, Action: Legal);
1773 setOperationAction(Op: ISD::SMAX, VT, Action: Legal);
1774 setOperationAction(Op: ISD::UMIN, VT, Action: Legal);
1775 setOperationAction(Op: ISD::UMAX, VT, Action: Legal);
1776 }
1777
1778 // Custom lower unaligned loads.
1779 // Also, for both loads and stores, verify the alignment of the address
1780 // in case it is a compile-time constant. This is a usability feature to
1781 // provide a meaningful error message to users.
1782 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1783 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1784 setOperationAction(Op: ISD::LOAD, VT, Action: Custom);
1785 setOperationAction(Op: ISD::STORE, VT, Action: Custom);
1786 }
1787
1788 // Custom-lower load/stores of boolean vectors.
1789 for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1790 setOperationAction(Op: ISD::LOAD, VT, Action: Custom);
1791 setOperationAction(Op: ISD::STORE, VT, Action: Custom);
1792 }
1793
1794 // Normalize integer compares to EQ/GT/UGT
1795 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1796 MVT::v2i32}) {
1797 setCondCodeAction(CCs: ISD::SETNE, VT, Action: Expand);
1798 setCondCodeAction(CCs: ISD::SETLE, VT, Action: Expand);
1799 setCondCodeAction(CCs: ISD::SETGE, VT, Action: Expand);
1800 setCondCodeAction(CCs: ISD::SETLT, VT, Action: Expand);
1801 setCondCodeAction(CCs: ISD::SETULE, VT, Action: Expand);
1802 setCondCodeAction(CCs: ISD::SETUGE, VT, Action: Expand);
1803 setCondCodeAction(CCs: ISD::SETULT, VT, Action: Expand);
1804 }
1805
1806 // Normalize boolean compares to [U]LE/[U]LT
1807 for (MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1808 setCondCodeAction(CCs: ISD::SETGE, VT, Action: Expand);
1809 setCondCodeAction(CCs: ISD::SETGT, VT, Action: Expand);
1810 setCondCodeAction(CCs: ISD::SETUGE, VT, Action: Expand);
1811 setCondCodeAction(CCs: ISD::SETUGT, VT, Action: Expand);
1812 }
1813
1814 // Custom-lower bitcasts from i8 to v8i1.
1815 setOperationAction(Op: ISD::BITCAST, VT: MVT::i8, Action: Custom);
1816 setOperationAction(Op: ISD::SETCC, VT: MVT::v2i16, Action: Custom);
1817 setOperationAction(Op: ISD::VSELECT, VT: MVT::v4i8, Action: Custom);
1818 setOperationAction(Op: ISD::VSELECT, VT: MVT::v2i16, Action: Custom);
1819 setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v4i8, Action: Custom);
1820 setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v4i16, Action: Custom);
1821 setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v8i8, Action: Custom);
1822
1823 // V5+.
1824 setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand);
1825 setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Expand);
1826 setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Expand);
1827 setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Expand);
1828 setOperationAction(Op: ISD::FDIV, VT: MVT::f32, Action: Custom);
1829
1830 setOperationAction(Op: ISD::FMINIMUMNUM, VT: MVT::f32, Action: Legal);
1831 setOperationAction(Op: ISD::FMAXIMUMNUM, VT: MVT::f32, Action: Legal);
1832
1833 setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i1, Action: Promote);
1834 setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i8, Action: Promote);
1835 setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i16, Action: Promote);
1836 setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i1, Action: Promote);
1837 setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i8, Action: Promote);
1838 setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i16, Action: Promote);
1839 setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i1, Action: Promote);
1840 setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i8, Action: Promote);
1841 setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i16, Action: Promote);
1842 setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i1, Action: Promote);
1843 setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i8, Action: Promote);
1844 setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i16, Action: Promote);
1845
1846 // Special handling for half-precision floating point conversions.
1847 // Lower half float conversions into library calls.
1848 setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand);
1849 setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand);
1850 setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand);
1851 setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand);
1852 setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f32, Action: Expand);
1853 setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f64, Action: Expand);
1854 setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f64, Action: Expand);
1855
1856 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand);
1857 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand);
1858 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f32, MemVT: MVT::bf16, Action: Expand);
1859 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::bf16, Action: Expand);
1860
1861 setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand);
1862 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand);
1863
1864 // Handling of indexed loads/stores: default is "expand".
1865 //
1866 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1867 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1868 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT, Action: Legal);
1869 setIndexedStoreAction(IdxModes: ISD::POST_INC, VT, Action: Legal);
1870 }
1871
1872 // Subtarget-specific operation actions.
1873 //
1874 if (Subtarget.hasV60Ops()) {
1875 setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Legal);
1876 setOperationAction(Op: ISD::ROTL, VT: MVT::i64, Action: Legal);
1877 setOperationAction(Op: ISD::ROTR, VT: MVT::i32, Action: Legal);
1878 setOperationAction(Op: ISD::ROTR, VT: MVT::i64, Action: Legal);
1879 }
1880 if (Subtarget.hasV66Ops()) {
1881 setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Legal);
1882 setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Legal);
1883 }
1884 if (Subtarget.hasV67Ops()) {
1885 setOperationAction(Op: ISD::FMINIMUMNUM, VT: MVT::f64, Action: Legal);
1886 setOperationAction(Op: ISD::FMAXIMUMNUM, VT: MVT::f64, Action: Legal);
1887 setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Legal);
1888 }
1889
1890 setTargetDAGCombine(ISD::OR);
1891 setTargetDAGCombine(ISD::TRUNCATE);
1892 setTargetDAGCombine(ISD::VSELECT);
1893
1894 if (Subtarget.useHVXOps())
1895 initializeHVXLowering();
1896
1897 computeRegisterProperties(TRI: &HRI);
1898}
1899
1900bool
1901HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign,
1902 const SDLoc &dl, SelectionDAG &DAG) const {
1903 auto *CA = dyn_cast<ConstantSDNode>(Val&: Ptr);
1904 if (!CA)
1905 return true;
1906 unsigned Addr = CA->getZExtValue();
1907 Align HaveAlign =
1908 Addr != 0 ? Align(1ull << llvm::countr_zero(Val: Addr)) : NeedAlign;
1909 if (HaveAlign >= NeedAlign)
1910 return true;
1911
1912 static int DK_MisalignedTrap = llvm::getNextAvailablePluginDiagnosticKind();
1913
1914 struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo {
1915 DiagnosticInfoMisalignedTrap(StringRef M)
1916 : DiagnosticInfo(DK_MisalignedTrap, DS_Remark), Msg(M) {}
1917 void print(DiagnosticPrinter &DP) const override {
1918 DP << Msg;
1919 }
1920 static bool classof(const DiagnosticInfo *DI) {
1921 return DI->getKind() == DK_MisalignedTrap;
1922 }
1923 StringRef Msg;
1924 };
1925
1926 std::string ErrMsg;
1927 raw_string_ostream O(ErrMsg);
1928 O << "Misaligned constant address: " << format_hex(N: Addr, Width: 10)
1929 << " has alignment " << HaveAlign.value()
1930 << ", but the memory access requires " << NeedAlign.value();
1931 if (DebugLoc DL = dl.getDebugLoc())
1932 DL.print(OS&: O << ", at ");
1933 O << ". The instruction has been replaced with a trap.";
1934
1935 DAG.getContext()->diagnose(DI: DiagnosticInfoMisalignedTrap(O.str()));
1936 return false;
1937}
1938
1939SDValue
1940HexagonTargetLowering::replaceMemWithUndef(SDValue Op, SelectionDAG &DAG)
1941 const {
1942 const SDLoc &dl(Op);
1943 auto *LS = cast<LSBaseSDNode>(Val: Op.getNode());
1944 assert(!LS->isIndexed() && "Not expecting indexed ops on constant address");
1945
1946 SDValue Chain = LS->getChain();
1947 SDValue Trap = DAG.getNode(Opcode: ISD::TRAP, DL: dl, VT: MVT::Other, Operand: Chain);
1948 if (LS->getOpcode() == ISD::LOAD)
1949 return DAG.getMergeValues(Ops: {DAG.getUNDEF(VT: ty(Op)), Trap}, dl);
1950 return Trap;
1951}
1952
1953// Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
1954// intrinsic.
1955static bool isBrevLdIntrinsic(const Value *Inst) {
1956 unsigned ID = cast<IntrinsicInst>(Val: Inst)->getIntrinsicID();
1957 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1958 ID == Intrinsic::hexagon_L2_loadri_pbr ||
1959 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
1960 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
1961 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
1962 ID == Intrinsic::hexagon_L2_loadrub_pbr);
1963}
1964
1965// Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
1966// instruction. So far we only handle bitcast, extract value and bit reverse
1967// load intrinsic instructions. Should we handle CGEP ?
1968static Value *getBrevLdObject(Value *V) {
1969 if (Operator::getOpcode(V) == Instruction::ExtractValue ||
1970 Operator::getOpcode(V) == Instruction::BitCast)
1971 V = cast<Operator>(Val: V)->getOperand(i: 0);
1972 else if (isa<IntrinsicInst>(Val: V) && isBrevLdIntrinsic(Inst: V))
1973 V = cast<Instruction>(Val: V)->getOperand(i: 0);
1974 return V;
1975}
1976
1977// Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
1978// a back edge. If the back edge comes from the intrinsic itself, the incoming
1979// edge is returned.
1980static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
1981 const BasicBlock *Parent = PN->getParent();
1982 int Idx = -1;
1983 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
1984 BasicBlock *Blk = PN->getIncomingBlock(i);
1985 // Determine if the back edge is originated from intrinsic.
1986 if (Blk == Parent) {
1987 Value *BackEdgeVal = PN->getIncomingValue(i);
1988 Value *BaseVal;
1989 // Loop over till we return the same Value or we hit the IntrBaseVal.
1990 do {
1991 BaseVal = BackEdgeVal;
1992 BackEdgeVal = getBrevLdObject(V: BackEdgeVal);
1993 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
1994 // If the getBrevLdObject returns IntrBaseVal, we should return the
1995 // incoming edge.
1996 if (IntrBaseVal == BackEdgeVal)
1997 continue;
1998 Idx = i;
1999 break;
2000 } else // Set the node to incoming edge.
2001 Idx = i;
2002 }
2003 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");
2004 return PN->getIncomingValue(i: Idx);
2005}
2006
2007// Bit-reverse Load Intrinsic: Figure out the underlying object the base
2008// pointer points to, for the bit-reverse load intrinsic. Setting this to
2009// memoperand might help alias analysis to figure out the dependencies.
2010static Value *getUnderLyingObjectForBrevLdIntr(Value *V) {
2011 Value *IntrBaseVal = V;
2012 Value *BaseVal;
2013 // Loop over till we return the same Value, implies we either figure out
2014 // the object or we hit a PHI
2015 do {
2016 BaseVal = V;
2017 V = getBrevLdObject(V);
2018 } while (BaseVal != V);
2019
2020 // Identify the object from PHINode.
2021 if (const PHINode *PN = dyn_cast<PHINode>(Val: V))
2022 return returnEdge(PN, IntrBaseVal);
2023 // For non PHI nodes, the object is the last value returned by getBrevLdObject
2024 else
2025 return V;
2026}
2027
2028/// Given an intrinsic, checks if on the target the intrinsic will need to map
2029/// to a MemIntrinsicNode (touches memory). If this is the case, it stores
2030/// the intrinsic information into the Infos vector.
2031void HexagonTargetLowering::getTgtMemIntrinsic(
2032 SmallVectorImpl<IntrinsicInfo> &Infos, const CallBase &I,
2033 MachineFunction &MF, unsigned Intrinsic) const {
2034 IntrinsicInfo Info;
2035 switch (Intrinsic) {
2036 case Intrinsic::hexagon_L2_loadrd_pbr:
2037 case Intrinsic::hexagon_L2_loadri_pbr:
2038 case Intrinsic::hexagon_L2_loadrh_pbr:
2039 case Intrinsic::hexagon_L2_loadruh_pbr:
2040 case Intrinsic::hexagon_L2_loadrb_pbr:
2041 case Intrinsic::hexagon_L2_loadrub_pbr: {
2042 Info.opc = ISD::INTRINSIC_W_CHAIN;
2043 auto &DL = I.getDataLayout();
2044 auto &Cont = I.getCalledFunction()->getParent()->getContext();
2045 // The intrinsic function call is of the form { ElTy, i8* }
2046 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
2047 // should be derived from ElTy.
2048 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(N: 0);
2049 Info.memVT = MVT::getVT(Ty: ElTy);
2050 llvm::Value *BasePtrVal = I.getOperand(i_nocapture: 0);
2051 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(V: BasePtrVal);
2052 // The offset value comes through Modifier register. For now, assume the
2053 // offset is 0.
2054 Info.offset = 0;
2055 Info.align = DL.getABITypeAlign(Ty: Info.memVT.getTypeForEVT(Context&: Cont));
2056 Info.flags = MachineMemOperand::MOLoad;
2057 Infos.push_back(Elt: Info);
2058 return;
2059 }
2060 case Intrinsic::hexagon_V6_vgathermw:
2061 case Intrinsic::hexagon_V6_vgathermw_128B:
2062 case Intrinsic::hexagon_V6_vgathermh:
2063 case Intrinsic::hexagon_V6_vgathermh_128B:
2064 case Intrinsic::hexagon_V6_vgathermhw:
2065 case Intrinsic::hexagon_V6_vgathermhw_128B:
2066 case Intrinsic::hexagon_V6_vgathermwq:
2067 case Intrinsic::hexagon_V6_vgathermwq_128B:
2068 case Intrinsic::hexagon_V6_vgathermhq:
2069 case Intrinsic::hexagon_V6_vgathermhq_128B:
2070 case Intrinsic::hexagon_V6_vgathermhwq:
2071 case Intrinsic::hexagon_V6_vgathermhwq_128B:
2072 case Intrinsic::hexagon_V6_vgather_vscattermh:
2073 case Intrinsic::hexagon_V6_vgather_vscattermh_128B: {
2074 const Module &M = *I.getParent()->getParent()->getParent();
2075 Info.opc = ISD::INTRINSIC_W_CHAIN;
2076 Type *VecTy = I.getArgOperand(i: 1)->getType();
2077 Info.memVT = MVT::getVT(Ty: VecTy);
2078 Info.ptrVal = I.getArgOperand(i: 0);
2079 Info.offset = 0;
2080 Info.align =
2081 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(Ty: VecTy) / 8);
2082 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2083 MachineMemOperand::MOVolatile;
2084 Infos.push_back(Elt: Info);
2085 return;
2086 }
2087 default:
2088 break;
2089 }
2090}
2091
2092bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
2093 return X.getValueType().isScalarInteger(); // 'tstbit'
2094}
2095
2096bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
2097 return isTruncateFree(VT1: EVT::getEVT(Ty: Ty1), VT2: EVT::getEVT(Ty: Ty2));
2098}
2099
2100bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
2101 if (!VT1.isSimple() || !VT2.isSimple())
2102 return false;
2103 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32;
2104}
2105
2106bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(
2107 const MachineFunction &MF, EVT VT) const {
2108 return isOperationLegalOrCustom(Op: ISD::FMA, VT);
2109}
2110
2111// Should we expand the build vector with shuffles?
2112bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
2113 unsigned DefinedValues) const {
2114 return false;
2115}
2116
2117bool HexagonTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2118 unsigned Index) const {
2119 assert(ResVT.getVectorElementType() == SrcVT.getVectorElementType());
2120 if (!ResVT.isSimple() || !SrcVT.isSimple())
2121 return false;
2122
2123 MVT ResTy = ResVT.getSimpleVT(), SrcTy = SrcVT.getSimpleVT();
2124 if (ResTy.getVectorElementType() != MVT::i1)
2125 return true;
2126
2127 // Non-HVX bool vectors are relatively cheap.
2128 return SrcTy.getVectorNumElements() <= 8;
2129}
2130
2131bool HexagonTargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {
2132 return Op.getOpcode() == ISD::CONCAT_VECTORS ||
2133 TargetLowering::isTargetCanonicalConstantNode(Op);
2134}
2135
2136bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask,
2137 EVT VT) const {
2138 return true;
2139}
2140
2141TargetLoweringBase::LegalizeTypeAction
2142HexagonTargetLowering::getPreferredVectorAction(MVT VT) const {
2143 unsigned VecLen = VT.getVectorMinNumElements();
2144 MVT ElemTy = VT.getVectorElementType();
2145
2146 if (VecLen == 1 || VT.isScalableVector())
2147 return TargetLoweringBase::TypeScalarizeVector;
2148
2149 if (Subtarget.useHVXOps()) {
2150 unsigned Action = getPreferredHvxVectorAction(VecTy: VT);
2151 if (Action != ~0u)
2152 return static_cast<TargetLoweringBase::LegalizeTypeAction>(Action);
2153 }
2154
2155 // Always widen (remaining) vectors of i1.
2156 if (ElemTy == MVT::i1)
2157 return TargetLoweringBase::TypeWidenVector;
2158 // Widen non-power-of-2 vectors. Such types cannot be split right now,
2159 // and computeRegisterProperties will override "split" with "widen",
2160 // which can cause other issues.
2161 if (!isPowerOf2_32(Value: VecLen))
2162 return TargetLoweringBase::TypeWidenVector;
2163
2164 return TargetLoweringBase::TypeSplitVector;
2165}
2166
2167TargetLoweringBase::LegalizeAction
2168HexagonTargetLowering::getCustomOperationAction(SDNode &Op) const {
2169 if (Subtarget.useHVXOps()) {
2170 unsigned Action = getCustomHvxOperationAction(Op);
2171 if (Action != ~0u)
2172 return static_cast<TargetLoweringBase::LegalizeAction>(Action);
2173 }
2174 return TargetLoweringBase::Legal;
2175}
2176
2177std::pair<SDValue, int>
2178HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
2179 if (Addr.getOpcode() == ISD::ADD) {
2180 SDValue Op1 = Addr.getOperand(i: 1);
2181 if (auto *CN = dyn_cast<const ConstantSDNode>(Val: Op1.getNode()))
2182 return { Addr.getOperand(i: 0), CN->getSExtValue() };
2183 }
2184 return { Addr, 0 };
2185}
2186
2187// Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
2188// to select data from, V3 is the permutation.
2189SDValue
2190HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
2191 const {
2192 const auto *SVN = cast<ShuffleVectorSDNode>(Val&: Op);
2193 ArrayRef<int> AM = SVN->getMask();
2194 assert(AM.size() <= 8 && "Unexpected shuffle mask");
2195 unsigned VecLen = AM.size();
2196
2197 MVT VecTy = ty(Op);
2198 assert(!Subtarget.isHVXVectorType(VecTy, true) &&
2199 "HVX shuffles should be legal");
2200 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length");
2201
2202 SDValue Op0 = Op.getOperand(i: 0);
2203 SDValue Op1 = Op.getOperand(i: 1);
2204 const SDLoc &dl(Op);
2205
2206 // If the inputs are not the same as the output, bail. This is not an
2207 // error situation, but complicates the handling and the default expansion
2208 // (into BUILD_VECTOR) should be adequate.
2209 if (ty(Op: Op0) != VecTy || ty(Op: Op1) != VecTy)
2210 return SDValue();
2211
2212 // Normalize the mask so that the first non-negative index comes from
2213 // the first operand.
2214 SmallVector<int, 8> Mask(AM);
2215 unsigned F = llvm::find_if(Range&: AM, P: [](int M) { return M >= 0; }) - AM.data();
2216 if (F == AM.size())
2217 return DAG.getUNDEF(VT: VecTy);
2218 if (AM[F] >= int(VecLen)) {
2219 ShuffleVectorSDNode::commuteMask(Mask);
2220 std::swap(a&: Op0, b&: Op1);
2221 }
2222
2223 // Express the shuffle mask in terms of bytes.
2224 SmallVector<int,8> ByteMask;
2225 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
2226 for (int M : Mask) {
2227 if (M < 0) {
2228 for (unsigned j = 0; j != ElemBytes; ++j)
2229 ByteMask.push_back(Elt: -1);
2230 } else {
2231 for (unsigned j = 0; j != ElemBytes; ++j)
2232 ByteMask.push_back(Elt: M*ElemBytes + j);
2233 }
2234 }
2235 assert(ByteMask.size() <= 8);
2236
2237 // All non-undef (non-negative) indexes are well within [0..127], so they
2238 // fit in a single byte. Build two 64-bit words:
2239 // - MaskIdx where each byte is the corresponding index (for non-negative
2240 // indexes), and 0xFF for negative indexes, and
2241 // - MaskUnd that has 0xFF for each negative index.
2242 uint64_t MaskIdx = 0;
2243 uint64_t MaskUnd = 0;
2244 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
2245 unsigned S = 8*i;
2246 uint64_t M = ByteMask[i] & 0xFF;
2247 if (M == 0xFF)
2248 MaskUnd |= M << S;
2249 MaskIdx |= M << S;
2250 }
2251
2252 if (ByteMask.size() == 4) {
2253 // Identity.
2254 if (MaskIdx == (0x03020100 | MaskUnd))
2255 return Op0;
2256 // Byte swap.
2257 if (MaskIdx == (0x00010203 | MaskUnd)) {
2258 SDValue T0 = DAG.getBitcast(VT: MVT::i32, V: Op0);
2259 SDValue T1 = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT: MVT::i32, Operand: T0);
2260 return DAG.getBitcast(VT: VecTy, V: T1);
2261 }
2262
2263 // Byte packs.
2264 SDValue Concat10 =
2265 getCombine(Hi: Op1, Lo: Op0, dl, ResTy: typeJoin(Tys: {ty(Op: Op1), ty(Op: Op0)}), DAG);
2266 if (MaskIdx == (0x06040200 | MaskUnd))
2267 return getInstr(MachineOpc: Hexagon::S2_vtrunehb, dl, Ty: VecTy, Ops: {Concat10}, DAG);
2268 if (MaskIdx == (0x07050301 | MaskUnd))
2269 return getInstr(MachineOpc: Hexagon::S2_vtrunohb, dl, Ty: VecTy, Ops: {Concat10}, DAG);
2270
2271 SDValue Concat01 =
2272 getCombine(Hi: Op0, Lo: Op1, dl, ResTy: typeJoin(Tys: {ty(Op: Op0), ty(Op: Op1)}), DAG);
2273 if (MaskIdx == (0x02000604 | MaskUnd))
2274 return getInstr(MachineOpc: Hexagon::S2_vtrunehb, dl, Ty: VecTy, Ops: {Concat01}, DAG);
2275 if (MaskIdx == (0x03010705 | MaskUnd))
2276 return getInstr(MachineOpc: Hexagon::S2_vtrunohb, dl, Ty: VecTy, Ops: {Concat01}, DAG);
2277 }
2278
2279 if (ByteMask.size() == 8) {
2280 // Identity.
2281 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2282 return Op0;
2283 // Byte swap.
2284 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2285 SDValue T0 = DAG.getBitcast(VT: MVT::i64, V: Op0);
2286 SDValue T1 = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT: MVT::i64, Operand: T0);
2287 return DAG.getBitcast(VT: VecTy, V: T1);
2288 }
2289
2290 // Halfword picks.
2291 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2292 return getInstr(MachineOpc: Hexagon::S2_shuffeh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG);
2293 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2294 return getInstr(MachineOpc: Hexagon::S2_shuffoh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG);
2295 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2296 return getInstr(MachineOpc: Hexagon::S2_vtrunewh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG);
2297 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2298 return getInstr(MachineOpc: Hexagon::S2_vtrunowh, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG);
2299 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2300 VectorPair P = opSplit(Vec: Op0, dl, DAG);
2301 return getInstr(MachineOpc: Hexagon::S2_packhl, dl, Ty: VecTy, Ops: {P.second, P.first}, DAG);
2302 }
2303
2304 // Byte packs.
2305 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2306 return getInstr(MachineOpc: Hexagon::S2_shuffeb, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG);
2307 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2308 return getInstr(MachineOpc: Hexagon::S2_shuffob, dl, Ty: VecTy, Ops: {Op1, Op0}, DAG);
2309 }
2310
2311 return SDValue();
2312}
2313
2314SDValue
2315HexagonTargetLowering::getSplatValue(SDValue Op, SelectionDAG &DAG) const {
2316 switch (Op.getOpcode()) {
2317 case ISD::BUILD_VECTOR:
2318 if (SDValue S = cast<BuildVectorSDNode>(Val&: Op)->getSplatValue())
2319 return S;
2320 break;
2321 case ISD::SPLAT_VECTOR:
2322 return Op.getOperand(i: 0);
2323 }
2324 return SDValue();
2325}
2326
2327// Create a Hexagon-specific node for shifting a vector by an integer.
2328SDValue
2329HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
2330 const {
2331 unsigned NewOpc;
2332 switch (Op.getOpcode()) {
2333 case ISD::SHL:
2334 NewOpc = HexagonISD::VASL;
2335 break;
2336 case ISD::SRA:
2337 NewOpc = HexagonISD::VASR;
2338 break;
2339 case ISD::SRL:
2340 NewOpc = HexagonISD::VLSR;
2341 break;
2342 default:
2343 llvm_unreachable("Unexpected shift opcode");
2344 }
2345
2346 if (SDValue Sp = getSplatValue(Op: Op.getOperand(i: 1), DAG))
2347 return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: ty(Op), N1: Op.getOperand(i: 0), N2: Sp);
2348 return SDValue();
2349}
2350
2351SDValue
2352HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
2353 const SDLoc &dl(Op);
2354
2355 // First try to convert the shift (by vector) to a shift by a scalar.
2356 // If we first split the shift, the shift amount will become 'extract
2357 // subvector', and will no longer be recognized as scalar.
2358 SDValue Res = Op;
2359 if (SDValue S = getVectorShiftByInt(Op, DAG))
2360 Res = S;
2361
2362 unsigned Opc = Res.getOpcode();
2363 switch (Opc) {
2364 case HexagonISD::VASR:
2365 case HexagonISD::VLSR:
2366 case HexagonISD::VASL:
2367 break;
2368 default:
2369 // No instructions for shifts by non-scalars.
2370 return SDValue();
2371 }
2372
2373 MVT ResTy = ty(Op: Res);
2374 if (ResTy.getVectorElementType() != MVT::i8)
2375 return Res;
2376
2377 // For shifts of i8, extend the inputs to i16, then truncate back to i8.
2378 assert(ResTy.getVectorElementType() == MVT::i8);
2379 SDValue Val = Res.getOperand(i: 0), Amt = Res.getOperand(i: 1);
2380
2381 auto ShiftPartI8 = [&dl, &DAG, this](unsigned Opc, SDValue V, SDValue A) {
2382 MVT Ty = ty(Op: V);
2383 MVT ExtTy = MVT::getVectorVT(VT: MVT::i16, NumElements: Ty.getVectorNumElements());
2384 SDValue ExtV = Opc == HexagonISD::VASR ? DAG.getSExtOrTrunc(Op: V, DL: dl, VT: ExtTy)
2385 : DAG.getZExtOrTrunc(Op: V, DL: dl, VT: ExtTy);
2386 SDValue ExtS = DAG.getNode(Opcode: Opc, DL: dl, VT: ExtTy, Ops: {ExtV, A});
2387 return DAG.getZExtOrTrunc(Op: ExtS, DL: dl, VT: Ty);
2388 };
2389
2390 if (ResTy.getSizeInBits() == 32)
2391 return ShiftPartI8(Opc, Val, Amt);
2392
2393 auto [LoV, HiV] = opSplit(Vec: Val, dl, DAG);
2394 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: ResTy,
2395 Ops: {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)});
2396}
2397
2398SDValue
2399HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
2400 if (isa<ConstantSDNode>(Val: Op.getOperand(i: 1).getNode()))
2401 return Op;
2402 return SDValue();
2403}
2404
2405SDValue
2406HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2407 MVT ResTy = ty(Op);
2408 SDValue InpV = Op.getOperand(i: 0);
2409 MVT InpTy = ty(Op: InpV);
2410 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits());
2411 const SDLoc &dl(Op);
2412
2413 // Handle conversion from i8 to v8i1.
2414 if (InpTy == MVT::i8) {
2415 if (ResTy == MVT::v8i1) {
2416 SDValue Sc = DAG.getBitcast(VT: tyScalar(Ty: InpTy), V: InpV);
2417 SDValue Ext = DAG.getZExtOrTrunc(Op: Sc, DL: dl, VT: MVT::i32);
2418 return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: ResTy, Ops: Ext, DAG);
2419 }
2420 return SDValue();
2421 }
2422
2423 return Op;
2424}
2425
2426bool
2427HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
2428 MVT VecTy, SelectionDAG &DAG,
2429 MutableArrayRef<ConstantInt*> Consts) const {
2430 MVT ElemTy = VecTy.getVectorElementType();
2431 unsigned ElemWidth = ElemTy.getSizeInBits();
2432 IntegerType *IntTy = IntegerType::get(C&: *DAG.getContext(), NumBits: ElemWidth);
2433 bool AllConst = true;
2434
2435 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2436 SDValue V = Values[i];
2437 if (V.isUndef()) {
2438 Consts[i] = ConstantInt::get(Ty: IntTy, V: 0);
2439 continue;
2440 }
2441 // Make sure to always cast to IntTy.
2442 if (auto *CN = dyn_cast<ConstantSDNode>(Val: V.getNode())) {
2443 const ConstantInt *CI = CN->getConstantIntValue();
2444 Consts[i] = ConstantInt::getSigned(Ty: IntTy, V: CI->getValue().getSExtValue());
2445 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(Val: V.getNode())) {
2446 const ConstantFP *CF = CN->getConstantFPValue();
2447 APInt A = CF->getValueAPF().bitcastToAPInt();
2448 Consts[i] = ConstantInt::get(Ty: IntTy, V: A.getZExtValue());
2449 } else {
2450 AllConst = false;
2451 }
2452 }
2453 return AllConst;
2454}
2455
2456SDValue
2457HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
2458 MVT VecTy, SelectionDAG &DAG) const {
2459 MVT ElemTy = VecTy.getVectorElementType();
2460 assert(VecTy.getVectorNumElements() == Elem.size());
2461
2462 SmallVector<ConstantInt*,4> Consts(Elem.size());
2463 bool AllConst = getBuildVectorConstInts(Values: Elem, VecTy, DAG, Consts);
2464
2465 unsigned First, Num = Elem.size();
2466 for (First = 0; First != Num; ++First) {
2467 if (!isUndef(Op: Elem[First]))
2468 break;
2469 }
2470 if (First == Num)
2471 return DAG.getUNDEF(VT: VecTy);
2472
2473 if (AllConst &&
2474 llvm::all_of(Range&: Consts, P: [](ConstantInt *CI) { return CI->isZero(); }))
2475 return getZero(dl, Ty: VecTy, DAG);
2476
2477 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2478 assert(Elem.size() == 2);
2479 if (AllConst) {
2480 // The 'Consts' array will have all values as integers regardless
2481 // of the vector element type.
2482 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2483 Consts[1]->getZExtValue() << 16;
2484 return DAG.getBitcast(VT: VecTy, V: DAG.getConstant(Val: V, DL: dl, VT: MVT::i32));
2485 }
2486 SDValue E0, E1;
2487 if (ElemTy == MVT::f16) {
2488 E0 = DAG.getZExtOrTrunc(Op: DAG.getBitcast(VT: MVT::i16, V: Elem[0]), DL: dl, VT: MVT::i32);
2489 E1 = DAG.getZExtOrTrunc(Op: DAG.getBitcast(VT: MVT::i16, V: Elem[1]), DL: dl, VT: MVT::i32);
2490 } else {
2491 E0 = Elem[0];
2492 E1 = Elem[1];
2493 }
2494 SDValue N = getInstr(MachineOpc: Hexagon::A2_combine_ll, dl, Ty: MVT::i32, Ops: {E1, E0}, DAG);
2495 return DAG.getBitcast(VT: VecTy, V: N);
2496 }
2497
2498 if (ElemTy == MVT::i8) {
2499 // First try generating a constant.
2500 if (AllConst) {
2501 uint32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2502 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2503 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2504 Consts[3]->getZExtValue() << 24;
2505 return DAG.getBitcast(VT: MVT::v4i8, V: DAG.getConstant(Val: V, DL: dl, VT: MVT::i32));
2506 }
2507
2508 // Then try splat.
2509 bool IsSplat = true;
2510 for (unsigned i = First+1; i != Num; ++i) {
2511 if (Elem[i] == Elem[First] || isUndef(Op: Elem[i]))
2512 continue;
2513 IsSplat = false;
2514 break;
2515 }
2516 if (IsSplat) {
2517 // Legalize the operand of SPLAT_VECTOR.
2518 SDValue Ext = DAG.getZExtOrTrunc(Op: Elem[First], DL: dl, VT: MVT::i32);
2519 return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: VecTy, Operand: Ext);
2520 }
2521
2522 // Generate
2523 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) |
2524 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16
2525 assert(Elem.size() == 4);
2526 SDValue Vs[4];
2527 for (unsigned i = 0; i != 4; ++i) {
2528 Vs[i] = DAG.getZExtOrTrunc(Op: Elem[i], DL: dl, VT: MVT::i32);
2529 Vs[i] = DAG.getZeroExtendInReg(Op: Vs[i], DL: dl, VT: MVT::i8);
2530 }
2531 SDValue S8 = DAG.getConstant(Val: 8, DL: dl, VT: MVT::i32);
2532 SDValue T0 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, Ops: {Vs[1], S8});
2533 SDValue T1 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, Ops: {Vs[3], S8});
2534 SDValue B0 = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, Ops: {Vs[0], T0});
2535 SDValue B1 = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, Ops: {Vs[2], T1});
2536
2537 SDValue R = getInstr(MachineOpc: Hexagon::A2_combine_ll, dl, Ty: MVT::i32, Ops: {B1, B0}, DAG);
2538 return DAG.getBitcast(VT: MVT::v4i8, V: R);
2539 }
2540
2541#ifndef NDEBUG
2542 dbgs() << "VecTy: " << VecTy << '\n';
2543#endif
2544 llvm_unreachable("Unexpected vector element type");
2545}
2546
2547SDValue
2548HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
2549 MVT VecTy, SelectionDAG &DAG) const {
2550 MVT ElemTy = VecTy.getVectorElementType();
2551 assert(VecTy.getVectorNumElements() == Elem.size());
2552
2553 SmallVector<ConstantInt*,8> Consts(Elem.size());
2554 bool AllConst = getBuildVectorConstInts(Values: Elem, VecTy, DAG, Consts);
2555
2556 unsigned First, Num = Elem.size();
2557 for (First = 0; First != Num; ++First) {
2558 if (!isUndef(Op: Elem[First]))
2559 break;
2560 }
2561 if (First == Num)
2562 return DAG.getUNDEF(VT: VecTy);
2563
2564 if (AllConst &&
2565 llvm::all_of(Range&: Consts, P: [](ConstantInt *CI) { return CI->isZero(); }))
2566 return getZero(dl, Ty: VecTy, DAG);
2567
2568 // First try splat if possible.
2569 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2570 bool IsSplat = true;
2571 for (unsigned i = First+1; i != Num; ++i) {
2572 if (Elem[i] == Elem[First] || isUndef(Op: Elem[i]))
2573 continue;
2574 IsSplat = false;
2575 break;
2576 }
2577 if (IsSplat) {
2578 // Legalize the operand of SPLAT_VECTOR
2579 SDValue S = ElemTy == MVT::f16 ? DAG.getBitcast(VT: MVT::i16, V: Elem[First])
2580 : Elem[First];
2581 SDValue Ext = DAG.getZExtOrTrunc(Op: S, DL: dl, VT: MVT::i32);
2582 return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: VecTy, Operand: Ext);
2583 }
2584 }
2585
2586 // Then try constant.
2587 if (AllConst) {
2588 uint64_t Val = 0;
2589 unsigned W = ElemTy.getSizeInBits();
2590 uint64_t Mask = (1ull << W) - 1;
2591 for (unsigned i = 0; i != Num; ++i)
2592 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2593 SDValue V0 = DAG.getConstant(Val, DL: dl, VT: MVT::i64);
2594 return DAG.getBitcast(VT: VecTy, V: V0);
2595 }
2596
2597 // Build two 32-bit vectors and concatenate.
2598 MVT HalfTy = MVT::getVectorVT(VT: ElemTy, NumElements: Num/2);
2599 SDValue L = (ElemTy == MVT::i32)
2600 ? Elem[0]
2601 : buildVector32(Elem: Elem.take_front(N: Num/2), dl, VecTy: HalfTy, DAG);
2602 SDValue H = (ElemTy == MVT::i32)
2603 ? Elem[1]
2604 : buildVector32(Elem: Elem.drop_front(N: Num/2), dl, VecTy: HalfTy, DAG);
2605 return getCombine(Hi: H, Lo: L, dl, ResTy: VecTy, DAG);
2606}
2607
2608SDValue
2609HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2610 const SDLoc &dl, MVT ValTy, MVT ResTy,
2611 SelectionDAG &DAG) const {
2612 MVT VecTy = ty(Op: VecV);
2613 assert(!ValTy.isVector() ||
2614 VecTy.getVectorElementType() == ValTy.getVectorElementType());
2615 if (VecTy.getVectorElementType() == MVT::i1)
2616 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2617
2618 unsigned VecWidth = VecTy.getSizeInBits();
2619 unsigned ValWidth = ValTy.getSizeInBits();
2620 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits();
2621 assert((VecWidth % ElemWidth) == 0);
2622 assert(VecWidth == 32 || VecWidth == 64);
2623
2624 // Cast everything to scalar integer types.
2625 MVT ScalarTy = tyScalar(Ty: VecTy);
2626 VecV = DAG.getBitcast(VT: ScalarTy, V: VecV);
2627
2628 SDValue WidthV = DAG.getConstant(Val: ValWidth, DL: dl, VT: MVT::i32);
2629 SDValue ExtV;
2630
2631 if (auto *IdxN = dyn_cast<ConstantSDNode>(Val&: IdxV)) {
2632 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2633 if (VecWidth == 64 && ValWidth == 32) {
2634 assert(Off == 0 || Off == 32);
2635 ExtV = Off == 0 ? LoHalf(V: VecV, DAG) : HiHalf(V: VecV, DAG);
2636 } else if (Off == 0 && (ValWidth % 8) == 0) {
2637 ExtV = DAG.getZeroExtendInReg(Op: VecV, DL: dl, VT: tyScalar(Ty: ValTy));
2638 } else {
2639 SDValue OffV = DAG.getConstant(Val: Off, DL: dl, VT: MVT::i32);
2640 // The return type of EXTRACTU must be the same as the type of the
2641 // input vector.
2642 ExtV = DAG.getNode(Opcode: HexagonISD::EXTRACTU, DL: dl, VT: ScalarTy,
2643 Ops: {VecV, WidthV, OffV});
2644 }
2645 } else {
2646 if (ty(Op: IdxV) != MVT::i32)
2647 IdxV = DAG.getZExtOrTrunc(Op: IdxV, DL: dl, VT: MVT::i32);
2648 SDValue OffV = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV,
2649 N2: DAG.getConstant(Val: ElemWidth, DL: dl, VT: MVT::i32));
2650 ExtV = DAG.getNode(Opcode: HexagonISD::EXTRACTU, DL: dl, VT: ScalarTy,
2651 Ops: {VecV, WidthV, OffV});
2652 }
2653
2654 // Cast ExtV to the requested result type.
2655 ExtV = DAG.getZExtOrTrunc(Op: ExtV, DL: dl, VT: tyScalar(Ty: ResTy));
2656 ExtV = DAG.getBitcast(VT: ResTy, V: ExtV);
2657 return ExtV;
2658}
2659
2660SDValue
2661HexagonTargetLowering::extractVectorPred(SDValue VecV, SDValue IdxV,
2662 const SDLoc &dl, MVT ValTy, MVT ResTy,
2663 SelectionDAG &DAG) const {
2664 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon
2665 // without any coprocessors).
2666 MVT VecTy = ty(Op: VecV);
2667 unsigned VecWidth = VecTy.getSizeInBits();
2668 unsigned ValWidth = ValTy.getSizeInBits();
2669 assert(VecWidth == VecTy.getVectorNumElements() &&
2670 "Vector elements should equal vector width size");
2671 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2672
2673 // Check if this is an extract of the lowest bit.
2674 if (isNullConstant(V: IdxV) && ValTy.getSizeInBits() == 1) {
2675 // Extracting the lowest bit is a no-op, but it changes the type,
2676 // so it must be kept as an operation to avoid errors related to
2677 // type mismatches.
2678 return DAG.getNode(Opcode: HexagonISD::TYPECAST, DL: dl, VT: MVT::i1, Operand: VecV);
2679 }
2680
2681 // If the value extracted is a single bit, use tstbit.
2682 if (ValWidth == 1) {
2683 SDValue A0 = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {VecV}, DAG);
2684 SDValue M0 = DAG.getConstant(Val: 8 / VecWidth, DL: dl, VT: MVT::i32);
2685 SDValue I0 = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: M0);
2686 return DAG.getNode(Opcode: HexagonISD::TSTBIT, DL: dl, VT: MVT::i1, N1: A0, N2: I0);
2687 }
2688
2689 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in
2690 // a predicate register. The elements of the vector are repeated
2691 // in the register (if necessary) so that the total number is 8.
2692 // The extracted subvector will need to be expanded in such a way.
2693 unsigned Scale = VecWidth / ValWidth;
2694
2695 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to
2696 // position 0.
2697 assert(ty(IdxV) == MVT::i32);
2698 unsigned VecRep = 8 / VecWidth;
2699 SDValue S0 = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV,
2700 N2: DAG.getConstant(Val: 8*VecRep, DL: dl, VT: MVT::i32));
2701 SDValue T0 = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: VecV);
2702 SDValue T1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i64, N1: T0, N2: S0);
2703 while (Scale > 1) {
2704 // The longest possible subvector is at most 32 bits, so it is always
2705 // contained in the low subregister.
2706 T1 = LoHalf(V: T1, DAG);
2707 T1 = expandPredicate(Vec32: T1, dl, DAG);
2708 Scale /= 2;
2709 }
2710
2711 return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: ResTy, Operand: T1);
2712}
2713
2714SDValue
2715HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
2716 const SDLoc &dl, MVT ValTy,
2717 SelectionDAG &DAG) const {
2718 MVT VecTy = ty(Op: VecV);
2719 if (VecTy.getVectorElementType() == MVT::i1)
2720 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2721
2722 unsigned VecWidth = VecTy.getSizeInBits();
2723 unsigned ValWidth = ValTy.getSizeInBits();
2724 assert(VecWidth == 32 || VecWidth == 64);
2725 assert((VecWidth % ValWidth) == 0);
2726
2727 // Cast everything to scalar integer types.
2728 MVT ScalarTy = MVT::getIntegerVT(BitWidth: VecWidth);
2729 // The actual type of ValV may be different than ValTy (which is related
2730 // to the vector type).
2731 unsigned VW = ty(Op: ValV).getSizeInBits();
2732 ValV = DAG.getBitcast(VT: MVT::getIntegerVT(BitWidth: VW), V: ValV);
2733 VecV = DAG.getBitcast(VT: ScalarTy, V: VecV);
2734 if (VW != VecWidth)
2735 ValV = DAG.getAnyExtOrTrunc(Op: ValV, DL: dl, VT: ScalarTy);
2736
2737 SDValue WidthV = DAG.getConstant(Val: ValWidth, DL: dl, VT: MVT::i32);
2738 SDValue InsV;
2739
2740 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: IdxV)) {
2741 unsigned W = C->getZExtValue() * ValWidth;
2742 SDValue OffV = DAG.getConstant(Val: W, DL: dl, VT: MVT::i32);
2743 InsV = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: ScalarTy,
2744 Ops: {VecV, ValV, WidthV, OffV});
2745 } else {
2746 if (ty(Op: IdxV) != MVT::i32)
2747 IdxV = DAG.getZExtOrTrunc(Op: IdxV, DL: dl, VT: MVT::i32);
2748 SDValue OffV = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: WidthV);
2749 InsV = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: ScalarTy,
2750 Ops: {VecV, ValV, WidthV, OffV});
2751 }
2752
2753 return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecTy, Operand: InsV);
2754}
2755
2756SDValue
2757HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV,
2758 SDValue IdxV, const SDLoc &dl,
2759 MVT ValTy, SelectionDAG &DAG) const {
2760 MVT VecTy = ty(Op: VecV);
2761 unsigned VecLen = VecTy.getVectorNumElements();
2762
2763 if (ValTy == MVT::i1) {
2764 SDValue ToReg = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {VecV}, DAG);
2765 SDValue Ext = DAG.getSExtOrTrunc(Op: ValV, DL: dl, VT: MVT::i32);
2766 SDValue Width = DAG.getConstant(Val: 8 / VecLen, DL: dl, VT: MVT::i32);
2767 SDValue Idx = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: Width);
2768 SDValue Ins =
2769 DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i32, Ops: {ToReg, Ext, Width, Idx});
2770 return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: VecTy, Ops: {Ins}, DAG);
2771 }
2772
2773 assert(ValTy.getVectorElementType() == MVT::i1);
2774 SDValue ValR = ValTy.isVector()
2775 ? DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: ValV)
2776 : DAG.getSExtOrTrunc(Op: ValV, DL: dl, VT: MVT::i64);
2777
2778 unsigned Scale = VecLen / ValTy.getVectorNumElements();
2779 assert(Scale > 1);
2780
2781 for (unsigned R = Scale; R > 1; R /= 2) {
2782 ValR = contractPredicate(Vec64: ValR, dl, DAG);
2783 ValR = getCombine(Hi: DAG.getUNDEF(VT: MVT::i32), Lo: ValR, dl, ResTy: MVT::i64, DAG);
2784 }
2785
2786 SDValue Width = DAG.getConstant(Val: 64 / Scale, DL: dl, VT: MVT::i32);
2787 SDValue Idx = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: IdxV, N2: Width);
2788 SDValue VecR = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: VecV);
2789 SDValue Ins =
2790 DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i64, Ops: {VecR, ValR, Width, Idx});
2791 return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: VecTy, Operand: Ins);
2792}
2793
2794SDValue
2795HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2796 SelectionDAG &DAG) const {
2797 assert(ty(Vec32).getSizeInBits() == 32);
2798 if (isUndef(Op: Vec32))
2799 return DAG.getUNDEF(VT: MVT::i64);
2800 SDValue P = DAG.getBitcast(VT: MVT::v4i8, V: Vec32);
2801 SDValue X = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i16, Operand: P);
2802 return DAG.getBitcast(VT: MVT::i64, V: X);
2803}
2804
2805SDValue
2806HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2807 SelectionDAG &DAG) const {
2808 assert(ty(Vec64).getSizeInBits() == 64);
2809 if (isUndef(Op: Vec64))
2810 return DAG.getUNDEF(VT: MVT::i32);
2811 // Collect even bytes:
2812 SDValue A = DAG.getBitcast(VT: MVT::v8i8, V: Vec64);
2813 SDValue S = DAG.getVectorShuffle(VT: MVT::v8i8, dl, N1: A, N2: DAG.getUNDEF(VT: MVT::v8i8),
2814 Mask: {0, 2, 4, 6, 1, 3, 5, 7});
2815 return extractVector(VecV: S, IdxV: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), dl, ValTy: MVT::v4i8,
2816 ResTy: MVT::i32, DAG);
2817}
2818
2819SDValue
2820HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
2821 const {
2822 if (Ty.isVector()) {
2823 unsigned W = Ty.getSizeInBits();
2824 if (W <= 64)
2825 return DAG.getBitcast(VT: Ty, V: DAG.getConstant(Val: 0, DL: dl, VT: MVT::getIntegerVT(BitWidth: W)));
2826 return DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: dl, VT: Ty, Operand: getZero(dl, Ty: MVT::i32, DAG));
2827 }
2828
2829 if (Ty.isInteger())
2830 return DAG.getConstant(Val: 0, DL: dl, VT: Ty);
2831 if (Ty.isFloatingPoint())
2832 return DAG.getConstantFP(Val: 0.0, DL: dl, VT: Ty);
2833 llvm_unreachable("Invalid type for zero");
2834}
2835
2836SDValue
2837HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG)
2838 const {
2839 MVT ValTy = ty(Op: Val);
2840 assert(ValTy.getVectorElementType() == ResTy.getVectorElementType());
2841
2842 unsigned ValLen = ValTy.getVectorNumElements();
2843 unsigned ResLen = ResTy.getVectorNumElements();
2844 if (ValLen == ResLen)
2845 return Val;
2846
2847 const SDLoc &dl(Val);
2848 assert(ValLen < ResLen);
2849 assert(ResLen % ValLen == 0);
2850
2851 SmallVector<SDValue, 4> Concats = {Val};
2852 for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i)
2853 Concats.push_back(Elt: DAG.getUNDEF(VT: ValTy));
2854
2855 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: ResTy, Ops: Concats);
2856}
2857
2858SDValue
2859HexagonTargetLowering::getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl,
2860 MVT ResTy, SelectionDAG &DAG) const {
2861 MVT ElemTy = ty(Op: Hi);
2862 assert(ElemTy == ty(Lo));
2863
2864 if (!ElemTy.isVector()) {
2865 assert(ElemTy.isScalarInteger());
2866 MVT PairTy = MVT::getIntegerVT(BitWidth: 2 * ElemTy.getSizeInBits());
2867 SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: PairTy, N1: Lo, N2: Hi);
2868 return DAG.getBitcast(VT: ResTy, V: Pair);
2869 }
2870
2871 unsigned Width = ElemTy.getSizeInBits();
2872 MVT IntTy = MVT::getIntegerVT(BitWidth: Width);
2873 MVT PairTy = MVT::getIntegerVT(BitWidth: 2 * Width);
2874 SDValue Pair =
2875 DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: PairTy,
2876 Ops: {DAG.getBitcast(VT: IntTy, V: Lo), DAG.getBitcast(VT: IntTy, V: Hi)});
2877 return DAG.getBitcast(VT: ResTy, V: Pair);
2878}
2879
2880SDValue
2881HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
2882 MVT VecTy = ty(Op);
2883 unsigned BW = VecTy.getSizeInBits();
2884 const SDLoc &dl(Op);
2885 SmallVector<SDValue,8> Ops;
2886 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2887 Ops.push_back(Elt: Op.getOperand(i));
2888
2889 if (BW == 32)
2890 return buildVector32(Elem: Ops, dl, VecTy, DAG);
2891 if (BW == 64)
2892 return buildVector64(Elem: Ops, dl, VecTy, DAG);
2893
2894 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2895 // Check if this is a special case or all-0 or all-1.
2896 bool All0 = true, All1 = true;
2897 for (SDValue P : Ops) {
2898 auto *CN = dyn_cast<ConstantSDNode>(Val: P.getNode());
2899 if (CN == nullptr) {
2900 All0 = All1 = false;
2901 break;
2902 }
2903 uint32_t C = CN->getZExtValue();
2904 All0 &= (C == 0);
2905 All1 &= (C == 1);
2906 }
2907 if (All0)
2908 return DAG.getNode(Opcode: HexagonISD::PFALSE, DL: dl, VT: VecTy);
2909 if (All1)
2910 return DAG.getNode(Opcode: HexagonISD::PTRUE, DL: dl, VT: VecTy);
2911
2912 // For each i1 element in the resulting predicate register, put 1
2913 // shifted by the index of the element into a general-purpose register,
2914 // then or them together and transfer it back into a predicate register.
2915 SDValue Rs[8];
2916 SDValue Z = getZero(dl, Ty: MVT::i32, DAG);
2917 // Always produce 8 bits, repeat inputs if necessary.
2918 unsigned Rep = 8 / VecTy.getVectorNumElements();
2919 for (unsigned i = 0; i != 8; ++i) {
2920 SDValue S = DAG.getConstant(Val: 1ull << i, DL: dl, VT: MVT::i32);
2921 Rs[i] = DAG.getSelect(DL: dl, VT: MVT::i32, Cond: Ops[i/Rep], LHS: S, RHS: Z);
2922 }
2923 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(N: A.size()/2)) {
2924 for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2925 Rs[i] = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Rs[2*i], N2: Rs[2*i+1]);
2926 }
2927 // Move the value directly to a predicate register.
2928 return getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: VecTy, Ops: {Rs[0]}, DAG);
2929 }
2930
2931 return SDValue();
2932}
2933
2934SDValue
2935HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
2936 SelectionDAG &DAG) const {
2937 MVT VecTy = ty(Op);
2938 const SDLoc &dl(Op);
2939 if (VecTy.getSizeInBits() == 64) {
2940 assert(Op.getNumOperands() == 2);
2941 return getCombine(Hi: Op.getOperand(i: 1), Lo: Op.getOperand(i: 0), dl, ResTy: VecTy, DAG);
2942 }
2943
2944 MVT ElemTy = VecTy.getVectorElementType();
2945 if (ElemTy == MVT::i1) {
2946 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
2947 MVT OpTy = ty(Op: Op.getOperand(i: 0));
2948 // Scale is how many times the operands need to be contracted to match
2949 // the representation in the target register.
2950 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements();
2951 assert(Scale == Op.getNumOperands() && Scale > 1);
2952
2953 // First, convert all bool vectors to integers, then generate pairwise
2954 // inserts to form values of doubled length. Up until there are only
2955 // two values left to concatenate, all of these values will fit in a
2956 // 32-bit integer, so keep them as i32 to use 32-bit inserts.
2957 SmallVector<SDValue,4> Words[2];
2958 unsigned IdxW = 0;
2959
2960 for (SDValue P : Op.getNode()->op_values()) {
2961 SDValue W = DAG.getNode(Opcode: HexagonISD::P2D, DL: dl, VT: MVT::i64, Operand: P);
2962 for (unsigned R = Scale; R > 1; R /= 2) {
2963 W = contractPredicate(Vec64: W, dl, DAG);
2964 W = getCombine(Hi: DAG.getUNDEF(VT: MVT::i32), Lo: W, dl, ResTy: MVT::i64, DAG);
2965 }
2966 W = LoHalf(V: W, DAG);
2967 Words[IdxW].push_back(Elt: W);
2968 }
2969
2970 while (Scale > 2) {
2971 SDValue WidthV = DAG.getConstant(Val: 64 / Scale, DL: dl, VT: MVT::i32);
2972 Words[IdxW ^ 1].clear();
2973
2974 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
2975 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
2976 // Insert W1 into W0 right next to the significant bits of W0.
2977 SDValue T = DAG.getNode(Opcode: HexagonISD::INSERT, DL: dl, VT: MVT::i32,
2978 Ops: {W0, W1, WidthV, WidthV});
2979 Words[IdxW ^ 1].push_back(Elt: T);
2980 }
2981 IdxW ^= 1;
2982 Scale /= 2;
2983 }
2984
2985 // At this point there should only be two words left, and Scale should be 2.
2986 assert(Scale == 2 && Words[IdxW].size() == 2);
2987
2988 SDValue WW = getCombine(Hi: Words[IdxW][1], Lo: Words[IdxW][0], dl, ResTy: MVT::i64, DAG);
2989 return DAG.getNode(Opcode: HexagonISD::D2P, DL: dl, VT: VecTy, Operand: WW);
2990 }
2991
2992 return SDValue();
2993}
2994
2995SDValue
2996HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2997 SelectionDAG &DAG) const {
2998 SDValue Vec = Op.getOperand(i: 0);
2999 MVT ElemTy = ty(Op: Vec).getVectorElementType();
3000 return extractVector(VecV: Vec, IdxV: Op.getOperand(i: 1), dl: SDLoc(Op), ValTy: ElemTy, ResTy: ty(Op), DAG);
3001}
3002
3003SDValue
3004HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
3005 SelectionDAG &DAG) const {
3006 return extractVector(VecV: Op.getOperand(i: 0), IdxV: Op.getOperand(i: 1), dl: SDLoc(Op),
3007 ValTy: ty(Op), ResTy: ty(Op), DAG);
3008}
3009
3010SDValue
3011HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
3012 SelectionDAG &DAG) const {
3013 return insertVector(VecV: Op.getOperand(i: 0), ValV: Op.getOperand(i: 1), IdxV: Op.getOperand(i: 2),
3014 dl: SDLoc(Op), ValTy: ty(Op).getVectorElementType(), DAG);
3015}
3016
3017SDValue
3018HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
3019 SelectionDAG &DAG) const {
3020 SDValue ValV = Op.getOperand(i: 1);
3021 return insertVector(VecV: Op.getOperand(i: 0), ValV, IdxV: Op.getOperand(i: 2),
3022 dl: SDLoc(Op), ValTy: ty(Op: ValV), DAG);
3023}
3024
3025bool
3026HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
3027 // Assuming the caller does not have either a signext or zeroext modifier, and
3028 // only one value is accepted, any reasonable truncation is allowed.
3029 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
3030 return false;
3031
3032 // FIXME: in principle up to 64-bit could be made safe, but it would be very
3033 // fragile at the moment: any support for multiple value returns would be
3034 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
3035 return Ty1->getPrimitiveSizeInBits() <= 32;
3036}
3037
3038SDValue
3039HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const {
3040 MVT Ty = ty(Op);
3041 const SDLoc &dl(Op);
3042 LoadSDNode *LN = cast<LoadSDNode>(Val: Op.getNode());
3043 MVT MemTy = LN->getMemoryVT().getSimpleVT();
3044 ISD::LoadExtType ET = LN->getExtensionType();
3045
3046 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3047 if (LoadPred) {
3048 SDValue NL = DAG.getLoad(
3049 AM: LN->getAddressingMode(), ExtType: ISD::ZEXTLOAD, VT: MVT::i32, dl, Chain: LN->getChain(),
3050 Ptr: LN->getBasePtr(), Offset: LN->getOffset(), PtrInfo: LN->getPointerInfo(),
3051 /*MemoryVT*/ MemVT: MVT::i8, Alignment: LN->getAlign(), MMOFlags: LN->getMemOperand()->getFlags(),
3052 AAInfo: LN->getAAInfo(), Ranges: LN->getRanges());
3053 LN = cast<LoadSDNode>(Val: NL.getNode());
3054 }
3055
3056 Align ClaimAlign = LN->getAlign();
3057 if (!validateConstPtrAlignment(Ptr: LN->getBasePtr(), NeedAlign: ClaimAlign, dl, DAG))
3058 return replaceMemWithUndef(Op, DAG);
3059
3060 // Call LowerUnalignedLoad for all loads, it recognizes loads that
3061 // don't need extra aligning.
3062 SDValue LU = LowerUnalignedLoad(Op: SDValue(LN, 0), DAG);
3063 if (LoadPred) {
3064 SDValue TP = getInstr(MachineOpc: Hexagon::C2_tfrrp, dl, Ty: MemTy, Ops: {LU}, DAG);
3065 if (ET == ISD::SEXTLOAD) {
3066 TP = DAG.getSExtOrTrunc(Op: TP, DL: dl, VT: Ty);
3067 } else if (ET != ISD::NON_EXTLOAD) {
3068 TP = DAG.getZExtOrTrunc(Op: TP, DL: dl, VT: Ty);
3069 }
3070 SDValue Ch = cast<LoadSDNode>(Val: LU.getNode())->getChain();
3071 return DAG.getMergeValues(Ops: {TP, Ch}, dl);
3072 }
3073 return LU;
3074}
3075
3076SDValue
3077HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const {
3078 const SDLoc &dl(Op);
3079 StoreSDNode *SN = cast<StoreSDNode>(Val: Op.getNode());
3080 SDValue Val = SN->getValue();
3081 MVT Ty = ty(Op: Val);
3082
3083 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3084 // Store the exact predicate (all bits).
3085 SDValue TR = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32, Ops: {Val}, DAG);
3086 SDValue NS = DAG.getTruncStore(Chain: SN->getChain(), dl, Val: TR, Ptr: SN->getBasePtr(),
3087 SVT: MVT::i8, MMO: SN->getMemOperand());
3088 if (SN->isIndexed()) {
3089 NS = DAG.getIndexedStore(OrigStore: NS, dl, Base: SN->getBasePtr(), Offset: SN->getOffset(),
3090 AM: SN->getAddressingMode());
3091 }
3092 SN = cast<StoreSDNode>(Val: NS.getNode());
3093 }
3094
3095 Align ClaimAlign = SN->getAlign();
3096 if (!validateConstPtrAlignment(Ptr: SN->getBasePtr(), NeedAlign: ClaimAlign, dl, DAG))
3097 return replaceMemWithUndef(Op, DAG);
3098
3099 MVT StoreTy = SN->getMemoryVT().getSimpleVT();
3100 Align NeedAlign = Subtarget.getTypeAlignment(Ty: StoreTy);
3101 if (ClaimAlign < NeedAlign)
3102 return expandUnalignedStore(ST: SN, DAG);
3103 return SDValue(SN, 0);
3104}
3105
3106SDValue
3107HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
3108 const {
3109 LoadSDNode *LN = cast<LoadSDNode>(Val: Op.getNode());
3110 MVT LoadTy = ty(Op);
3111 unsigned NeedAlign = Subtarget.getTypeAlignment(Ty: LoadTy).value();
3112 unsigned HaveAlign = LN->getAlign().value();
3113 if (HaveAlign >= NeedAlign)
3114 return Op;
3115
3116 const SDLoc &dl(Op);
3117 const DataLayout &DL = DAG.getDataLayout();
3118 LLVMContext &Ctx = *DAG.getContext();
3119
3120 // If the load aligning is disabled or the load can be broken up into two
3121 // smaller legal loads, do the default (target-independent) expansion.
3122 bool DoDefault = false;
3123 // Handle it in the default way if this is an indexed load.
3124 if (!LN->isUnindexed())
3125 DoDefault = true;
3126
3127 if (!AlignLoads) {
3128 if (allowsMemoryAccessForAlignment(Context&: Ctx, DL, VT: LN->getMemoryVT(),
3129 MMO: *LN->getMemOperand()))
3130 return Op;
3131 DoDefault = true;
3132 }
3133 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3134 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
3135 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(BitWidth: 8 * HaveAlign)
3136 : MVT::getVectorVT(VT: MVT::i8, NumElements: HaveAlign);
3137 DoDefault =
3138 allowsMemoryAccessForAlignment(Context&: Ctx, DL, VT: PartTy, MMO: *LN->getMemOperand());
3139 }
3140 if (DoDefault) {
3141 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LD: LN, DAG);
3142 return DAG.getMergeValues(Ops: {P.first, P.second}, dl);
3143 }
3144
3145 // The code below generates two loads, both aligned as NeedAlign, and
3146 // with the distance of NeedAlign between them. For that to cover the
3147 // bits that need to be loaded (and without overlapping), the size of
3148 // the loads should be equal to NeedAlign. This is true for all loadable
3149 // types, but add an assertion in case something changes in the future.
3150 assert(LoadTy.getSizeInBits() == 8*NeedAlign);
3151
3152 unsigned LoadLen = NeedAlign;
3153 SDValue Base = LN->getBasePtr();
3154 SDValue Chain = LN->getChain();
3155 auto BO = getBaseAndOffset(Addr: Base);
3156 unsigned BaseOpc = BO.first.getOpcode();
3157 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
3158 return Op;
3159
3160 if (BO.second % LoadLen != 0) {
3161 BO.first = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: BO.first,
3162 N2: DAG.getConstant(Val: BO.second % LoadLen, DL: dl, VT: MVT::i32));
3163 BO.second -= BO.second % LoadLen;
3164 }
3165 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
3166 ? DAG.getNode(Opcode: HexagonISD::VALIGNADDR, DL: dl, VT: MVT::i32, N1: BO.first,
3167 N2: DAG.getConstant(Val: NeedAlign, DL: dl, VT: MVT::i32))
3168 : BO.first;
3169 SDValue Base0 =
3170 DAG.getMemBasePlusOffset(Base: BaseNoOff, Offset: TypeSize::getFixed(ExactSize: BO.second), DL: dl);
3171 SDValue Base1 = DAG.getMemBasePlusOffset(
3172 Base: BaseNoOff, Offset: TypeSize::getFixed(ExactSize: BO.second + LoadLen), DL: dl);
3173
3174 MachineMemOperand *WideMMO = nullptr;
3175 if (MachineMemOperand *MMO = LN->getMemOperand()) {
3176 MachineFunction &MF = DAG.getMachineFunction();
3177 WideMMO = MF.getMachineMemOperand(
3178 PtrInfo: MMO->getPointerInfo(), F: MMO->getFlags(), Size: 2 * LoadLen, BaseAlignment: Align(LoadLen),
3179 AAInfo: MMO->getAAInfo(), Ranges: MMO->getRanges(), SSID: MMO->getSyncScopeID(),
3180 Ordering: MMO->getSuccessOrdering(), FailureOrdering: MMO->getFailureOrdering());
3181 }
3182
3183 SDValue Load0 = DAG.getLoad(VT: LoadTy, dl, Chain, Ptr: Base0, MMO: WideMMO);
3184 SDValue Load1 = DAG.getLoad(VT: LoadTy, dl, Chain, Ptr: Base1, MMO: WideMMO);
3185
3186 SDValue Aligned = DAG.getNode(Opcode: HexagonISD::VALIGN, DL: dl, VT: LoadTy,
3187 Ops: {Load1, Load0, BaseNoOff.getOperand(i: 0)});
3188 SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other,
3189 N1: Load0.getValue(R: 1), N2: Load1.getValue(R: 1));
3190 SDValue M = DAG.getMergeValues(Ops: {Aligned, NewChain}, dl);
3191 return M;
3192}
3193
3194SDValue
3195HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const {
3196 SDValue X = Op.getOperand(i: 0), Y = Op.getOperand(i: 1);
3197 auto *CY = dyn_cast<ConstantSDNode>(Val&: Y);
3198 if (!CY)
3199 return SDValue();
3200
3201 const SDLoc &dl(Op);
3202 SDVTList VTs = Op.getNode()->getVTList();
3203 assert(VTs.NumVTs == 2);
3204 assert(VTs.VTs[1] == MVT::i1);
3205 unsigned Opc = Op.getOpcode();
3206
3207 if (CY) {
3208 uint64_t VY = CY->getZExtValue();
3209 assert(VY != 0 && "This should have been folded");
3210 // X +/- 1
3211 if (VY != 1)
3212 return SDValue();
3213
3214 if (Opc == ISD::UADDO) {
3215 SDValue Op = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: VTs.VTs[0], Ops: {X, Y});
3216 SDValue Ov = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Op, RHS: getZero(dl, Ty: ty(Op), DAG),
3217 Cond: ISD::SETEQ);
3218 return DAG.getMergeValues(Ops: {Op, Ov}, dl);
3219 }
3220 if (Opc == ISD::USUBO) {
3221 SDValue Op = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: VTs.VTs[0], Ops: {X, Y});
3222 SDValue Ov = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Op,
3223 RHS: DAG.getAllOnesConstant(DL: dl, VT: ty(Op)), Cond: ISD::SETEQ);
3224 return DAG.getMergeValues(Ops: {Op, Ov}, dl);
3225 }
3226 }
3227
3228 return SDValue();
3229}
3230
3231SDValue HexagonTargetLowering::LowerUAddSubOCarry(SDValue Op,
3232 SelectionDAG &DAG) const {
3233 const SDLoc &dl(Op);
3234 unsigned Opc = Op.getOpcode();
3235 SDValue X = Op.getOperand(i: 0), Y = Op.getOperand(i: 1), C = Op.getOperand(i: 2);
3236
3237 if (Opc == ISD::UADDO_CARRY)
3238 return DAG.getNode(Opcode: HexagonISD::ADDC, DL: dl, VTList: Op.getNode()->getVTList(),
3239 Ops: { X, Y, C });
3240
3241 EVT CarryTy = C.getValueType();
3242 SDValue SubC = DAG.getNode(Opcode: HexagonISD::SUBC, DL: dl, VTList: Op.getNode()->getVTList(),
3243 Ops: { X, Y, DAG.getLogicalNOT(DL: dl, Val: C, VT: CarryTy) });
3244 SDValue Out[] = { SubC.getValue(R: 0),
3245 DAG.getLogicalNOT(DL: dl, Val: SubC.getValue(R: 1), VT: CarryTy) };
3246 return DAG.getMergeValues(Ops: Out, dl);
3247}
3248
3249SDValue
3250HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
3251 SDValue Chain = Op.getOperand(i: 0);
3252 SDValue Offset = Op.getOperand(i: 1);
3253 SDValue Handler = Op.getOperand(i: 2);
3254 SDLoc dl(Op);
3255 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
3256
3257 // Mark function as containing a call to EH_RETURN.
3258 HexagonMachineFunctionInfo *FuncInfo =
3259 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
3260 FuncInfo->setHasEHReturn();
3261
3262 unsigned OffsetReg = Hexagon::R28;
3263
3264 SDValue StoreAddr =
3265 DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: DAG.getRegister(Reg: Hexagon::R30, VT: PtrVT),
3266 N2: DAG.getIntPtrConstant(Val: 4, DL: dl));
3267 Chain = DAG.getStore(Chain, dl, Val: Handler, Ptr: StoreAddr, PtrInfo: MachinePointerInfo());
3268 Chain = DAG.getCopyToReg(Chain, dl, Reg: OffsetReg, N: Offset);
3269
3270 // Not needed we already use it as explicit input to EH_RETURN.
3271 // MF.getRegInfo().addLiveOut(OffsetReg);
3272
3273 return DAG.getNode(Opcode: HexagonISD::EH_RETURN, DL: dl, VT: MVT::Other, Operand: Chain);
3274}
3275
3276SDValue
3277HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3278 unsigned Opc = Op.getOpcode();
3279 // Handle INLINEASM first.
3280 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)
3281 return LowerINLINEASM(Op, DAG);
3282
3283 if (isHvxOperation(N: Op.getNode(), DAG)) {
3284 // If HVX lowering returns nothing, try the default lowering.
3285 if (SDValue V = LowerHvxOperation(Op, DAG))
3286 return V;
3287 }
3288
3289 switch (Opc) {
3290 default:
3291#ifndef NDEBUG
3292 Op.getNode()->dumpr(&DAG);
3293#endif
3294 llvm_unreachable("Should not custom lower this!");
3295
3296 case ISD::FDIV:
3297 return LowerFDIV(Op, DAG);
3298 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3299 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
3300 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
3301 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
3302 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3303 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3304 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3305 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
3306 case ISD::LOAD: return LowerLoad(Op, DAG);
3307 case ISD::STORE: return LowerStore(Op, DAG);
3308 case ISD::UADDO:
3309 case ISD::USUBO: return LowerUAddSubO(Op, DAG);
3310 case ISD::UADDO_CARRY:
3311 case ISD::USUBO_CARRY: return LowerUAddSubOCarry(Op, DAG);
3312 case ISD::SRA:
3313 case ISD::SHL:
3314 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
3315 case ISD::ROTL: return LowerROTL(Op, DAG);
3316 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3317 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3318 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
3319 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3320 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3321 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3322 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
3323 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
3324 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3325 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3326 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3327 case ISD::VASTART: return LowerVASTART(Op, DAG);
3328 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3329 case ISD::SETCC: return LowerSETCC(Op, DAG);
3330 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
3331 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3332 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3333 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
3334 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
3335 case ISD::READSTEADYCOUNTER: return LowerREADSTEADYCOUNTER(Op, DAG);
3336 break;
3337 }
3338
3339 return SDValue();
3340}
3341
3342void
3343HexagonTargetLowering::LowerOperationWrapper(SDNode *N,
3344 SmallVectorImpl<SDValue> &Results,
3345 SelectionDAG &DAG) const {
3346 if (isHvxOperation(N, DAG)) {
3347 LowerHvxOperationWrapper(N, Results, DAG);
3348 if (!Results.empty())
3349 return;
3350 }
3351
3352 SDValue Op(N, 0);
3353 unsigned Opc = N->getOpcode();
3354
3355 switch (Opc) {
3356 case HexagonISD::SSAT:
3357 case HexagonISD::USAT:
3358 Results.push_back(Elt: opJoin(Ops: SplitVectorOp(Op, DAG), dl: SDLoc(Op), DAG));
3359 break;
3360 case ISD::STORE:
3361 // We are only custom-lowering stores to verify the alignment of the
3362 // address if it is a compile-time constant. Since a store can be
3363 // modified during type-legalization (the value being stored may need
3364 // legalization), return empty Results here to indicate that we don't
3365 // really make any changes in the custom lowering.
3366 return;
3367 default:
3368 TargetLowering::LowerOperationWrapper(N, Results, DAG);
3369 break;
3370 }
3371}
3372
3373void
3374HexagonTargetLowering::ReplaceNodeResults(SDNode *N,
3375 SmallVectorImpl<SDValue> &Results,
3376 SelectionDAG &DAG) const {
3377 if (isHvxOperation(N, DAG)) {
3378 ReplaceHvxNodeResults(N, Results, DAG);
3379 if (!Results.empty())
3380 return;
3381 }
3382
3383 const SDLoc &dl(N);
3384 switch (N->getOpcode()) {
3385 case ISD::SRL:
3386 case ISD::SRA:
3387 case ISD::SHL:
3388 return;
3389 case ISD::BITCAST:
3390 // Handle a bitcast from v8i1 to i8.
3391 if (N->getValueType(ResNo: 0) == MVT::i8) {
3392 if (N->getOperand(Num: 0).getValueType() == MVT::v8i1) {
3393 SDValue P = getInstr(MachineOpc: Hexagon::C2_tfrpr, dl, Ty: MVT::i32,
3394 Ops: N->getOperand(Num: 0), DAG);
3395 SDValue T = DAG.getAnyExtOrTrunc(Op: P, DL: dl, VT: MVT::i8);
3396 Results.push_back(Elt: T);
3397 }
3398 }
3399 break;
3400 }
3401}
3402
3403SDValue
3404HexagonTargetLowering::PerformDAGCombine(SDNode *N,
3405 DAGCombinerInfo &DCI) const {
3406 if (isHvxOperation(N, DAG&: DCI.DAG)) {
3407 if (SDValue V = PerformHvxDAGCombine(N, DCI))
3408 return V;
3409 return SDValue();
3410 }
3411
3412 SDValue Op(N, 0);
3413 const SDLoc &dl(Op);
3414 unsigned Opc = Op.getOpcode();
3415
3416 if (Opc == ISD::TRUNCATE) {
3417 SDValue Op0 = Op.getOperand(i: 0);
3418 // fold (truncate (build pair x, y)) -> (truncate x) or x
3419 if (Op0.getOpcode() == ISD::BUILD_PAIR) {
3420 EVT TruncTy = Op.getValueType();
3421 SDValue Elem0 = Op0.getOperand(i: 0);
3422 // if we match the low element of the pair, just return it.
3423 if (Elem0.getValueType() == TruncTy)
3424 return Elem0;
3425 // otherwise, if the low part is still too large, apply the truncate.
3426 if (Elem0.getValueType().bitsGT(VT: TruncTy))
3427 return DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: TruncTy, Operand: Elem0);
3428 }
3429 }
3430
3431 if (DCI.isBeforeLegalizeOps())
3432 return SDValue();
3433
3434 if (Opc == HexagonISD::P2D) {
3435 SDValue P = Op.getOperand(i: 0);
3436 switch (P.getOpcode()) {
3437 case HexagonISD::PTRUE:
3438 return DCI.DAG.getAllOnesConstant(DL: dl, VT: ty(Op));
3439 case HexagonISD::PFALSE:
3440 return getZero(dl, Ty: ty(Op), DAG&: DCI.DAG);
3441 default:
3442 break;
3443 }
3444 } else if (Opc == ISD::VSELECT) {
3445 // This is pretty much duplicated in HexagonISelLoweringHVX...
3446 //
3447 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0)
3448 SDValue Cond = Op.getOperand(i: 0);
3449 if (Cond->getOpcode() == ISD::XOR) {
3450 SDValue C0 = Cond.getOperand(i: 0), C1 = Cond.getOperand(i: 1);
3451 if (C1->getOpcode() == HexagonISD::PTRUE) {
3452 SDValue VSel = DCI.DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: ty(Op), N1: C0,
3453 N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 1));
3454 return VSel;
3455 }
3456 }
3457 } else if (Opc == ISD::TRUNCATE) {
3458 SDValue Op0 = Op.getOperand(i: 0);
3459 // fold (truncate (build pair x, y)) -> (truncate x) or x
3460 if (Op0.getOpcode() == ISD::BUILD_PAIR) {
3461 MVT TruncTy = ty(Op);
3462 SDValue Elem0 = Op0.getOperand(i: 0);
3463 // if we match the low element of the pair, just return it.
3464 if (ty(Op: Elem0) == TruncTy)
3465 return Elem0;
3466 // otherwise, if the low part is still too large, apply the truncate.
3467 if (ty(Op: Elem0).bitsGT(VT: TruncTy))
3468 return DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: TruncTy, Operand: Elem0);
3469 }
3470 } else if (Opc == ISD::OR) {
3471 // fold (or (shl xx, s), (zext y)) -> (COMBINE (shl xx, s-32), y)
3472 // if s >= 32
3473 auto fold0 = [&, this](SDValue Op) {
3474 if (ty(Op) != MVT::i64)
3475 return SDValue();
3476 SDValue Shl = Op.getOperand(i: 0);
3477 SDValue Zxt = Op.getOperand(i: 1);
3478 if (Shl.getOpcode() != ISD::SHL)
3479 std::swap(a&: Shl, b&: Zxt);
3480
3481 if (Shl.getOpcode() != ISD::SHL || Zxt.getOpcode() != ISD::ZERO_EXTEND)
3482 return SDValue();
3483
3484 SDValue Z = Zxt.getOperand(i: 0);
3485 auto *Amt = dyn_cast<ConstantSDNode>(Val: Shl.getOperand(i: 1));
3486 if (Amt && Amt->getZExtValue() >= 32 && ty(Op: Z).getSizeInBits() <= 32) {
3487 unsigned A = Amt->getZExtValue();
3488 SDValue S = Shl.getOperand(i: 0);
3489 SDValue T0 = DCI.DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: ty(Op: S), N1: S,
3490 N2: DCI.DAG.getConstant(Val: A - 32, DL: dl, VT: MVT::i32));
3491 SDValue T1 = DCI.DAG.getZExtOrTrunc(Op: T0, DL: dl, VT: MVT::i32);
3492 SDValue T2 = DCI.DAG.getZExtOrTrunc(Op: Z, DL: dl, VT: MVT::i32);
3493 return DCI.DAG.getNode(Opcode: HexagonISD::COMBINE, DL: dl, VT: MVT::i64, Ops: {T1, T2});
3494 }
3495 return SDValue();
3496 };
3497
3498 if (SDValue R = fold0(Op))
3499 return R;
3500 }
3501
3502 return SDValue();
3503}
3504
3505/// Returns relocation base for the given PIC jumptable.
3506SDValue
3507HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3508 SelectionDAG &DAG) const {
3509 int Idx = cast<JumpTableSDNode>(Val&: Table)->getIndex();
3510 EVT VT = Table.getValueType();
3511 SDValue T = DAG.getTargetJumpTable(JTI: Idx, VT, TargetFlags: HexagonII::MO_PCREL);
3512 return DAG.getNode(Opcode: HexagonISD::AT_PCREL, DL: SDLoc(Table), VT, Operand: T);
3513}
3514
3515//===----------------------------------------------------------------------===//
3516// Inline Assembly Support
3517//===----------------------------------------------------------------------===//
3518
3519TargetLowering::ConstraintType
3520HexagonTargetLowering::getConstraintType(StringRef Constraint) const {
3521 if (Constraint.size() == 1) {
3522 switch (Constraint[0]) {
3523 case 'q':
3524 case 'v':
3525 if (Subtarget.useHVXOps())
3526 return C_RegisterClass;
3527 break;
3528 case 'a':
3529 return C_RegisterClass;
3530 default:
3531 break;
3532 }
3533 }
3534 return TargetLowering::getConstraintType(Constraint);
3535}
3536
3537std::pair<unsigned, const TargetRegisterClass*>
3538HexagonTargetLowering::getRegForInlineAsmConstraint(
3539 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
3540
3541 if (Constraint.size() == 1) {
3542 switch (Constraint[0]) {
3543 case 'r': // R0-R31
3544 switch (VT.SimpleTy) {
3545 default:
3546 return {0u, nullptr};
3547 case MVT::i1:
3548 case MVT::i8:
3549 case MVT::i16:
3550 case MVT::i32:
3551 case MVT::f32:
3552 return {0u, &Hexagon::IntRegsRegClass};
3553 case MVT::i64:
3554 case MVT::f64:
3555 return {0u, &Hexagon::DoubleRegsRegClass};
3556 }
3557 break;
3558 case 'a': // M0-M1
3559 if (VT != MVT::i32)
3560 return {0u, nullptr};
3561 return {0u, &Hexagon::ModRegsRegClass};
3562 case 'q': // q0-q3
3563 switch (VT.getSizeInBits()) {
3564 default:
3565 return {0u, nullptr};
3566 case 64:
3567 case 128:
3568 return {0u, &Hexagon::HvxQRRegClass};
3569 }
3570 break;
3571 case 'v': // V0-V31
3572 switch (VT.getSizeInBits()) {
3573 default:
3574 return {0u, nullptr};
3575 case 512:
3576 return {0u, &Hexagon::HvxVRRegClass};
3577 case 1024:
3578 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
3579 return {0u, &Hexagon::HvxVRRegClass};
3580 return {0u, &Hexagon::HvxWRRegClass};
3581 case 2048:
3582 return {0u, &Hexagon::HvxWRRegClass};
3583 }
3584 break;
3585 default:
3586 return {0u, nullptr};
3587 }
3588 }
3589
3590 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3591}
3592
3593/// isFPImmLegal - Returns true if the target can instruction select the
3594/// specified FP immediate natively. If false, the legalizer will
3595/// materialize the FP immediate as a load from a constant pool.
3596bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3597 bool ForCodeSize) const {
3598 return true;
3599}
3600
3601/// Returns true if it is beneficial to convert a load of a constant
3602/// to just the constant itself.
3603bool HexagonTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3604 Type *Ty) const {
3605 if (!ConstantLoadsToImm)
3606 return false;
3607
3608 assert(Ty->isIntegerTy());
3609 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3610 return (BitSize > 0 && BitSize <= 64);
3611}
3612
3613/// isLegalAddressingMode - Return true if the addressing mode represented by
3614/// AM is legal for this target, for a load/store of the specified type.
3615bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
3616 const AddrMode &AM, Type *Ty,
3617 unsigned AS, Instruction *I) const {
3618 if (Ty->isSized()) {
3619 // When LSR detects uses of the same base address to access different
3620 // types (e.g. unions), it will assume a conservative type for these
3621 // uses:
3622 // LSR Use: Kind=Address of void in addrspace(4294967295), ...
3623 // The type Ty passed here would then be "void". Skip the alignment
3624 // checks, but do not return false right away, since that confuses
3625 // LSR into crashing.
3626 Align A = DL.getABITypeAlign(Ty);
3627 // The base offset must be a multiple of the alignment.
3628 if (!isAligned(Lhs: A, SizeInBytes: AM.BaseOffs))
3629 return false;
3630 // The shifted offset must fit in 11 bits.
3631 if (!isInt<11>(x: AM.BaseOffs >> Log2(A)))
3632 return false;
3633 }
3634
3635 // No global is ever allowed as a base.
3636 if (AM.BaseGV)
3637 return false;
3638
3639 int Scale = AM.Scale;
3640 if (Scale < 0)
3641 Scale = -Scale;
3642 switch (Scale) {
3643 case 0: // No scale reg, "r+i", "r", or just "i".
3644 break;
3645 default: // No scaled addressing mode.
3646 return false;
3647 }
3648 return true;
3649}
3650
3651/// Return true if folding a constant offset with the given GlobalAddress is
3652/// legal. It is frequently not legal in PIC relocation models.
3653bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA)
3654 const {
3655 return HTM.getRelocationModel() == Reloc::Static;
3656}
3657
3658/// isLegalICmpImmediate - Return true if the specified immediate is legal
3659/// icmp immediate, that is the target has icmp instructions which can compare
3660/// a register against the immediate without having to materialize the
3661/// immediate into a register.
3662bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3663 return Imm >= -512 && Imm <= 511;
3664}
3665
3666/// IsEligibleForTailCallOptimization - Check whether the call is eligible
3667/// for tail call optimization. Targets which want to do tail call
3668/// optimization should implement this function.
3669bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
3670 SDValue Callee,
3671 CallingConv::ID CalleeCC,
3672 bool IsVarArg,
3673 bool IsCalleeStructRet,
3674 bool IsCallerStructRet,
3675 const SmallVectorImpl<ISD::OutputArg> &Outs,
3676 const SmallVectorImpl<SDValue> &OutVals,
3677 const SmallVectorImpl<ISD::InputArg> &Ins,
3678 SelectionDAG& DAG) const {
3679 const Function &CallerF = DAG.getMachineFunction().getFunction();
3680 CallingConv::ID CallerCC = CallerF.getCallingConv();
3681 bool CCMatch = CallerCC == CalleeCC;
3682
3683 // ***************************************************************************
3684 // Look for obvious safe cases to perform tail call optimization that do not
3685 // require ABI changes.
3686 // ***************************************************************************
3687
3688 // If this is a tail call via a function pointer, then don't do it!
3689 if (!isa<GlobalAddressSDNode>(Val: Callee) &&
3690 !isa<ExternalSymbolSDNode>(Val: Callee)) {
3691 return false;
3692 }
3693
3694 // Do not optimize if the calling conventions do not match and the conventions
3695 // used are not C or Fast.
3696 if (!CCMatch) {
3697 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3698 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3699 // If R & E, then ok.
3700 if (!R || !E)
3701 return false;
3702 }
3703
3704 // Do not tail call optimize vararg calls.
3705 if (IsVarArg)
3706 return false;
3707
3708 // Also avoid tail call optimization if either caller or callee uses struct
3709 // return semantics.
3710 if (IsCalleeStructRet || IsCallerStructRet)
3711 return false;
3712
3713 // In addition to the cases above, we also disable Tail Call Optimization if
3714 // the calling convention code that at least one outgoing argument needs to
3715 // go on the stack. We cannot check that here because at this point that
3716 // information is not available.
3717 return true;
3718}
3719
3720/// Returns the target specific optimal type for load and store operations as
3721/// a result of memset, memcpy, and memmove lowering.
3722///
3723/// If DstAlign is zero that means it's safe to destination alignment can
3724/// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3725/// a need to check it against alignment requirement, probably because the
3726/// source does not need to be loaded. If 'IsMemset' is true, that means it's
3727/// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3728/// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3729/// does not need to be loaded. It returns EVT::Other if the type should be
3730/// determined using generic target-independent logic.
3731EVT HexagonTargetLowering::getOptimalMemOpType(
3732 LLVMContext &Context, const MemOp &Op,
3733 const AttributeList &FuncAttributes) const {
3734 if (Op.size() >= 8 && Op.isAligned(AlignCheck: Align(8)))
3735 return MVT::i64;
3736 if (Op.size() >= 4 && Op.isAligned(AlignCheck: Align(4)))
3737 return MVT::i32;
3738 if (Op.size() >= 2 && Op.isAligned(AlignCheck: Align(2)))
3739 return MVT::i16;
3740 return MVT::Other;
3741}
3742
3743bool HexagonTargetLowering::allowsMemoryAccess(
3744 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
3745 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
3746 if (!VT.isSimple())
3747 return false;
3748 MVT SVT = VT.getSimpleVT();
3749 if (Subtarget.isHVXVectorType(VecTy: SVT, IncludeBool: true))
3750 return allowsHvxMemoryAccess(VecTy: SVT, Flags, Fast);
3751 return TargetLoweringBase::allowsMemoryAccess(
3752 Context, DL, VT, AddrSpace, Alignment, Flags, Fast);
3753}
3754
3755bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
3756 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
3757 unsigned *Fast) const {
3758 if (!VT.isSimple())
3759 return false;
3760 MVT SVT = VT.getSimpleVT();
3761 if (Subtarget.isHVXVectorType(VecTy: SVT, IncludeBool: true))
3762 return allowsHvxMisalignedMemoryAccesses(VecTy: SVT, Flags, Fast);
3763 if (Fast)
3764 *Fast = 0;
3765 return false;
3766}
3767
3768std::pair<const TargetRegisterClass*, uint8_t>
3769HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3770 MVT VT) const {
3771 if (Subtarget.isHVXVectorType(VecTy: VT, IncludeBool: true)) {
3772 unsigned BitWidth = VT.getSizeInBits();
3773 unsigned VecWidth = Subtarget.getVectorLength() * 8;
3774
3775 if (VT.getVectorElementType() == MVT::i1)
3776 return std::make_pair(x: &Hexagon::HvxQRRegClass, y: 1);
3777 if (BitWidth == VecWidth)
3778 return std::make_pair(x: &Hexagon::HvxVRRegClass, y: 1);
3779 assert(BitWidth == 2 * VecWidth);
3780 return std::make_pair(x: &Hexagon::HvxWRRegClass, y: 1);
3781 }
3782
3783 return TargetLowering::findRepresentativeClass(TRI, VT);
3784}
3785
3786bool HexagonTargetLowering::shouldReduceLoadWidth(
3787 SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
3788 std::optional<unsigned> ByteOffset) const {
3789 // TODO: This may be worth removing. Check regression tests for diffs.
3790 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT,
3791 ByteOffset))
3792 return false;
3793
3794 auto *L = cast<LoadSDNode>(Val: Load);
3795 std::pair<SDValue, int> BO = getBaseAndOffset(Addr: L->getBasePtr());
3796 // Small-data object, do not shrink.
3797 if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3798 return false;
3799 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val&: BO.first)) {
3800 auto &HTM = static_cast<const HexagonTargetMachine &>(getTargetMachine());
3801 const auto *GO = dyn_cast_or_null<const GlobalObject>(Val: GA->getGlobal());
3802 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, TM: HTM);
3803 }
3804 return true;
3805}
3806
3807void HexagonTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
3808 SDNode *Node) const {
3809 AdjustHvxInstrPostInstrSelection(MI, Node);
3810}
3811
3812Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder,
3813 Type *ValueTy, Value *Addr,
3814 AtomicOrdering Ord) const {
3815 unsigned SZ = ValueTy->getPrimitiveSizeInBits();
3816 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
3817 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3818 : Intrinsic::hexagon_L4_loadd_locked;
3819
3820 Value *Call =
3821 Builder.CreateIntrinsic(ID: IntID, Args: Addr, /*FMFSource=*/nullptr, Name: "larx");
3822
3823 return Builder.CreateBitCast(V: Call, DestTy: ValueTy);
3824}
3825
3826/// Perform a store-conditional operation to Addr. Return the status of the
3827/// store. This should be 0 if the store succeeded, non-zero otherwise.
3828Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
3829 Value *Val, Value *Addr,
3830 AtomicOrdering Ord) const {
3831 BasicBlock *BB = Builder.GetInsertBlock();
3832 Module *M = BB->getParent()->getParent();
3833 Type *Ty = Val->getType();
3834 unsigned SZ = Ty->getPrimitiveSizeInBits();
3835
3836 Type *CastTy = Builder.getIntNTy(N: SZ);
3837 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
3838 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3839 : Intrinsic::hexagon_S4_stored_locked;
3840
3841 Val = Builder.CreateBitCast(V: Val, DestTy: CastTy);
3842
3843 Value *Call = Builder.CreateIntrinsic(ID: IntID, Args: {Addr, Val},
3844 /*FMFSource=*/nullptr, Name: "stcx");
3845 Value *Cmp = Builder.CreateICmpEQ(LHS: Call, RHS: Builder.getInt32(C: 0), Name: "");
3846 Value *Ext = Builder.CreateZExt(V: Cmp, DestTy: Type::getInt32Ty(C&: M->getContext()));
3847 return Ext;
3848}
3849
3850TargetLowering::AtomicExpansionKind
3851HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
3852 // Do not expand loads and stores that don't exceed 64 bits.
3853 return LI->getType()->getPrimitiveSizeInBits() > 64
3854 ? AtomicExpansionKind::LLOnly
3855 : AtomicExpansionKind::None;
3856}
3857
3858TargetLowering::AtomicExpansionKind
3859HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
3860 // Do not expand loads and stores that don't exceed 64 bits.
3861 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
3862 ? AtomicExpansionKind::Expand
3863 : AtomicExpansionKind::None;
3864}
3865
3866TargetLowering::AtomicExpansionKind
3867HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
3868 const AtomicCmpXchgInst *AI) const {
3869 return AtomicExpansionKind::LLSC;
3870}
3871
3872bool HexagonTargetLowering::isMaskAndCmp0FoldingBeneficial(
3873 const Instruction &AndI) const {
3874 // Only sink 'and' mask to cmp use block if it is masking a single bit since
3875 // this will fold the and/cmp/br into a single tstbit instruction.
3876 ConstantInt *Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1));
3877 if (!Mask)
3878 return false;
3879 return Mask->getValue().isPowerOf2();
3880}
3881
3882// Check if the result of the node is only used as a return value, as
3883// otherwise we can't perform a tail-call.
3884bool HexagonTargetLowering::isUsedByReturnOnly(SDNode *N,
3885 SDValue &Chain) const {
3886 if (N->getNumValues() != 1)
3887 return false;
3888 if (!N->hasNUsesOfValue(NUses: 1, Value: 0))
3889 return false;
3890
3891 SDNode *Copy = *N->user_begin();
3892
3893 if (Copy->getOpcode() == ISD::BITCAST) {
3894 return isUsedByReturnOnly(N: Copy, Chain);
3895 }
3896
3897 if (Copy->getOpcode() != ISD::CopyToReg) {
3898 return false;
3899 }
3900
3901 // If the ISD::CopyToReg has a glue operand, we conservatively assume it
3902 // isn't safe to perform a tail call.
3903 if (Copy->getOperand(Num: Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
3904 return false;
3905
3906 // The copy must be used by a HexagonISD::RET_GLUE, and nothing else.
3907 bool HasRet = false;
3908 for (SDNode *Node : Copy->users()) {
3909 if (Node->getOpcode() != HexagonISD::RET_GLUE)
3910 return false;
3911 HasRet = true;
3912 }
3913 if (!HasRet)
3914 return false;
3915
3916 Chain = Copy->getOperand(Num: 0);
3917 return true;
3918}
3919