1//===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Hexagon uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
15#define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
16
17#include "Hexagon.h"
18#include "MCTargetDesc/HexagonMCTargetDesc.h"
19#include "llvm/ADT/StringRef.h"
20#include "llvm/CodeGen/ISDOpcodes.h"
21#include "llvm/CodeGen/SelectionDAGNodes.h"
22#include "llvm/CodeGen/TargetLowering.h"
23#include "llvm/CodeGen/ValueTypes.h"
24#include "llvm/CodeGenTypes/MachineValueType.h"
25#include "llvm/IR/CallingConv.h"
26#include "llvm/IR/InlineAsm.h"
27#include <cstdint>
28#include <utility>
29
30namespace llvm {
31
32class HexagonSubtarget;
33
34class HexagonTargetLowering : public TargetLowering {
35 int VarArgsFrameOffset; // Frame offset to start of varargs area.
36 const HexagonTargetMachine &HTM;
37 const HexagonSubtarget &Subtarget;
38
39public:
40 explicit HexagonTargetLowering(const TargetMachine &TM,
41 const HexagonSubtarget &ST);
42
43 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
44 /// for tail call optimization. Targets which want to do tail call
45 /// optimization should implement this function.
46 bool IsEligibleForTailCallOptimization(SDValue Callee,
47 CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet,
48 bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs,
49 const SmallVectorImpl<SDValue> &OutVals,
50 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const;
51
52 void getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
53 const CallBase &I, MachineFunction &MF,
54 unsigned Intrinsic) const override;
55
56 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
57 bool isTruncateFree(EVT VT1, EVT VT2) const override;
58
59 bool isCheapToSpeculateCttz(Type *) const override { return true; }
60 bool isCheapToSpeculateCtlz(Type *) const override { return true; }
61 bool isCtlzFast() const override { return true; }
62
63 bool hasBitTest(SDValue X, SDValue Y) const override;
64
65 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
66
67 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
68
69 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
70
71 /// Return true if an FMA operation is faster than a pair of mul and add
72 /// instructions. fmuladd intrinsics will be expanded to FMAs when this
73 /// method returns true (and FMAs are legal), otherwise fmuladd is
74 /// expanded to mul + add.
75 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &,
76 EVT) const override;
77
78 // Should we expand the build vector with shuffles?
79 bool shouldExpandBuildVectorWithShuffles(EVT VT,
80 unsigned DefinedValues) const override;
81 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
82 unsigned Index) const override;
83
84 bool isTargetCanonicalConstantNode(SDValue Op) const override;
85
86 bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
87 LegalizeTypeAction getPreferredVectorAction(MVT VT) const override;
88 LegalizeAction getCustomOperationAction(SDNode &Op) const override;
89
90 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
91 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
92 SelectionDAG &DAG) const override;
93 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
94 SelectionDAG &DAG) const override;
95
96 std::pair<MVT, unsigned>
97 handleMaskRegisterForCallingConv(const HexagonSubtarget &Subtarget,
98 EVT VT) const;
99
100 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
101 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
102 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
103 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
104 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
105 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
106 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
107 SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const;
108 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
109 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
110 SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const;
111 SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
112 SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
113 SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const;
114 SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const;
115 SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const;
116 SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const;
117 SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const;
118
119 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
120 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
121 SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
122 SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
123 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
124 SDValue LowerREADSTEADYCOUNTER(SDValue Op, SelectionDAG &DAG) const;
125 SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
126 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
127 SDValue
128 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
129 const SmallVectorImpl<ISD::InputArg> &Ins,
130 const SDLoc &dl, SelectionDAG &DAG,
131 SmallVectorImpl<SDValue> &InVals) const override;
132 SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
133 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
134 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
135 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
136 SelectionDAG &DAG) const;
137 SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
138 SelectionDAG &DAG) const;
139 SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
140 SelectionDAG &DAG) const;
141 SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
142 GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT,
143 unsigned ReturnReg, unsigned char OperandGlues) const;
144 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
145
146 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
147 SmallVectorImpl<SDValue> &InVals) const override;
148 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
149 CallingConv::ID CallConv, bool isVarArg,
150 const SmallVectorImpl<ISD::InputArg> &Ins,
151 const SDLoc &dl, SelectionDAG &DAG,
152 SmallVectorImpl<SDValue> &InVals,
153 const SmallVectorImpl<SDValue> &OutVals,
154 SDValue Callee) const;
155
156 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
157 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
158 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
159 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
160 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
161
162 bool CanLowerReturn(CallingConv::ID CallConv,
163 MachineFunction &MF, bool isVarArg,
164 const SmallVectorImpl<ISD::OutputArg> &Outs,
165 LLVMContext &Context, const Type *RetTy) const override;
166
167 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
168 const SmallVectorImpl<ISD::OutputArg> &Outs,
169 const SmallVectorImpl<SDValue> &OutVals,
170 const SDLoc &dl, SelectionDAG &DAG) const override;
171
172 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
173
174 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
175
176 Register getRegisterByName(const char* RegName, LLT VT,
177 const MachineFunction &MF) const override;
178
179 unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context,
180 CallingConv::ID CC, EVT VT,
181 EVT &IntermediateVT,
182 unsigned &NumIntermediates,
183 MVT &RegisterVT) const override;
184
185 MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
186 EVT VT) const override;
187 /// If a physical register, this returns the register that receives the
188 /// exception address on entry to an EH pad.
189 Register
190 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
191 return Hexagon::R0;
192 }
193
194 /// If a physical register, this returns the register that receives the
195 /// exception typeid on entry to a landing pad.
196 Register
197 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
198 return Hexagon::R1;
199 }
200
201 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
202 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
203 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
204 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
205
206 EVT getSetCCResultType(const DataLayout &, LLVMContext &C,
207 EVT VT) const override {
208 if (!VT.isVector())
209 return MVT::i1;
210 else
211 return EVT::getVectorVT(Context&: C, VT: MVT::i1, NumElements: VT.getVectorNumElements());
212 }
213
214 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
215 SDValue &Base, SDValue &Offset,
216 ISD::MemIndexedMode &AM,
217 SelectionDAG &DAG) const override;
218
219 ConstraintType getConstraintType(StringRef Constraint) const override;
220
221 std::pair<unsigned, const TargetRegisterClass *>
222 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
223 StringRef Constraint, MVT VT) const override;
224
225 // Intrinsics
226 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
227 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
228 /// isLegalAddressingMode - Return true if the addressing mode represented
229 /// by AM is legal for this target, for a load/store of the specified type.
230 /// The type may be VoidTy, in which case only return true if the addressing
231 /// mode is legal for a load/store of any legal type.
232 /// TODO: Handle pre/postinc as well.
233 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
234 Type *Ty, unsigned AS,
235 Instruction *I = nullptr) const override;
236 /// Return true if folding a constant offset with the given GlobalAddress
237 /// is legal. It is frequently not legal in PIC relocation models.
238 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
239
240 bool isFPImmLegal(const APFloat &Imm, EVT VT,
241 bool ForCodeSize) const override;
242
243 /// isLegalICmpImmediate - Return true if the specified immediate is legal
244 /// icmp immediate, that is the target has icmp instructions which can
245 /// compare a register against the immediate without having to materialize
246 /// the immediate into a register.
247 bool isLegalICmpImmediate(int64_t Imm) const override;
248
249 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
250 const AttributeList &FuncAttributes) const override;
251
252 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
253 unsigned AddrSpace, Align Alignment,
254 MachineMemOperand::Flags Flags,
255 unsigned *Fast) const override;
256
257 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
258 Align Alignment,
259 MachineMemOperand::Flags Flags,
260 unsigned *Fast) const override;
261
262 /// Returns relocation base for the given PIC jumptable.
263 SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
264 const override;
265
266 /// Returns true if it is beneficial to convert a load of a constant
267 /// to just the constant itself.
268 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
269 Type *Ty) const override;
270
271 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
272 std::optional<unsigned> ByteOffset) const override;
273
274 void AdjustInstrPostInstrSelection(MachineInstr &MI,
275 SDNode *Node) const override;
276
277 // Handling of atomic RMW instructions.
278 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
279 AtomicOrdering Ord) const override;
280 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
281 AtomicOrdering Ord) const override;
282 AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
283 AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
284 AtomicExpansionKind
285 shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override;
286
287 AtomicExpansionKind
288 shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override {
289 return AtomicExpansionKind::LLSC;
290 }
291
292private:
293 void initializeHVXLowering();
294 unsigned getPreferredHvxVectorAction(MVT VecTy) const;
295 unsigned getCustomHvxOperationAction(SDNode &Op) const;
296
297 bool validateConstPtrAlignment(SDValue Ptr, Align NeedAlign, const SDLoc &dl,
298 SelectionDAG &DAG) const;
299 SDValue replaceMemWithUndef(SDValue Op, SelectionDAG &DAG) const;
300
301 std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const;
302
303 bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy,
304 SelectionDAG &DAG,
305 MutableArrayRef<ConstantInt*> Consts) const;
306 SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
307 SelectionDAG &DAG) const;
308 SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
309 SelectionDAG &DAG) const;
310 SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl,
311 MVT ValTy, MVT ResTy, SelectionDAG &DAG) const;
312 SDValue extractVectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
313 MVT ValTy, MVT ResTy, SelectionDAG &DAG) const;
314 SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
315 const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const;
316 SDValue insertVectorPred(SDValue VecV, SDValue ValV, SDValue IdxV,
317 const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const;
318 SDValue expandPredicate(SDValue Vec32, const SDLoc &dl,
319 SelectionDAG &DAG) const;
320 SDValue contractPredicate(SDValue Vec64, const SDLoc &dl,
321 SelectionDAG &DAG) const;
322 SDValue getSplatValue(SDValue Op, SelectionDAG &DAG) const;
323 SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const;
324 SDValue appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) const;
325 SDValue getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl, MVT ResTy,
326 SelectionDAG &DAG) const;
327
328 bool isUndef(SDValue Op) const {
329 if (Op.isMachineOpcode())
330 return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
331 return Op.getOpcode() == ISD::UNDEF;
332 }
333 SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty,
334 ArrayRef<SDValue> Ops, SelectionDAG &DAG) const {
335 SDNode *N = DAG.getMachineNode(Opcode: MachineOpc, dl, VT: Ty, Ops);
336 return SDValue(N, 0);
337 }
338 SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const;
339
340 using VectorPair = std::pair<SDValue, SDValue>;
341 using TypePair = std::pair<MVT, MVT>;
342
343 SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops,
344 const SDLoc &dl, SelectionDAG &DAG) const;
345
346 MVT ty(SDValue Op) const {
347 return Op.getValueType().getSimpleVT();
348 }
349 TypePair ty(const VectorPair &Ops) const {
350 return { Ops.first.getValueType().getSimpleVT(),
351 Ops.second.getValueType().getSimpleVT() };
352 }
353 MVT tyScalar(MVT Ty) const {
354 if (!Ty.isVector())
355 return Ty;
356 return MVT::getIntegerVT(BitWidth: Ty.getSizeInBits());
357 }
358 MVT tyVector(MVT Ty, MVT ElemTy) const {
359 if (Ty.isVector() && Ty.getVectorElementType() == ElemTy)
360 return Ty;
361 unsigned TyWidth = Ty.getSizeInBits();
362 unsigned ElemWidth = ElemTy.getSizeInBits();
363 assert((TyWidth % ElemWidth) == 0);
364 return MVT::getVectorVT(VT: ElemTy, NumElements: TyWidth/ElemWidth);
365 }
366
367 MVT typeJoin(const TypePair &Tys) const;
368 TypePair typeSplit(MVT Ty) const;
369 MVT typeExtElem(MVT VecTy, unsigned Factor) const;
370 MVT typeTruncElem(MVT VecTy, unsigned Factor) const;
371 TypePair typeExtendToWider(MVT Ty0, MVT Ty1) const;
372 TypePair typeWidenToWider(MVT Ty0, MVT Ty1) const;
373 MVT typeLegalize(MVT Ty, SelectionDAG &DAG) const;
374 MVT typeWidenToHvx(MVT Ty) const;
375
376 SDValue opJoin(const VectorPair &Ops, const SDLoc &dl,
377 SelectionDAG &DAG) const;
378 VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const;
379 SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const;
380
381 SDValue LoHalf(SDValue V, SelectionDAG &DAG) const {
382 MVT Ty = ty(Op: V);
383 const SDLoc &dl(V);
384 if (!Ty.isVector()) {
385 assert(Ty.getSizeInBits() == 64);
386 return DAG.getTargetExtractSubreg(SRIdx: Hexagon::isub_lo, DL: dl, VT: MVT::i32, Operand: V);
387 }
388 MVT HalfTy = typeSplit(Ty).first;
389 SDValue Idx = getZero(dl, Ty: MVT::i32, DAG);
390 return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: HalfTy, N1: V, N2: Idx);
391 }
392 SDValue HiHalf(SDValue V, SelectionDAG &DAG) const {
393 MVT Ty = ty(Op: V);
394 const SDLoc &dl(V);
395 if (!Ty.isVector()) {
396 assert(Ty.getSizeInBits() == 64);
397 return DAG.getTargetExtractSubreg(SRIdx: Hexagon::isub_hi, DL: dl, VT: MVT::i32, Operand: V);
398 }
399 MVT HalfTy = typeSplit(Ty).first;
400 SDValue Idx = DAG.getConstant(Val: HalfTy.getVectorNumElements(), DL: dl, VT: MVT::i32);
401 return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: HalfTy, N1: V, N2: Idx);
402 }
403
404 bool allowsHvxMemoryAccess(MVT VecTy, MachineMemOperand::Flags Flags,
405 unsigned *Fast) const;
406 bool allowsHvxMisalignedMemoryAccesses(MVT VecTy,
407 MachineMemOperand::Flags Flags,
408 unsigned *Fast) const;
409 void AdjustHvxInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const;
410
411 bool isHvxSingleTy(MVT Ty) const;
412 bool isHvxPairTy(MVT Ty) const;
413 bool isHvxBoolTy(MVT Ty) const;
414 SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy,
415 SelectionDAG &DAG) const;
416 SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const;
417 SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1,
418 ArrayRef<int> Mask, SelectionDAG &DAG) const;
419
420 SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl,
421 MVT VecTy, SelectionDAG &DAG) const;
422 SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl,
423 MVT VecTy, SelectionDAG &DAG) const;
424 SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl,
425 unsigned BitBytes, bool ZeroFill,
426 SelectionDAG &DAG) const;
427 SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
428 MVT ResTy, SelectionDAG &DAG) const;
429 SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
430 MVT ResTy, SelectionDAG &DAG) const;
431 SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV,
432 const SDLoc &dl, SelectionDAG &DAG) const;
433 SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV,
434 const SDLoc &dl, SelectionDAG &DAG) const;
435 SDValue extractHvxSubvectorReg(SDValue OrigOp, SDValue VecV, SDValue IdxV,
436 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG)
437 const;
438 SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
439 MVT ResTy, SelectionDAG &DAG) const;
440 SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV,
441 const SDLoc &dl, SelectionDAG &DAG) const;
442 SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV,
443 const SDLoc &dl, SelectionDAG &DAG) const;
444 SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy,
445 bool ZeroExt, SelectionDAG &DAG) const;
446 SDValue compressHvxPred(SDValue VecQ, const SDLoc &dl, MVT ResTy,
447 SelectionDAG &DAG) const;
448 SDValue resizeToWidth(SDValue VecV, MVT ResTy, bool Signed, const SDLoc &dl,
449 SelectionDAG &DAG) const;
450 SDValue extractSubvector(SDValue Vec, MVT SubTy, unsigned SubIdx,
451 SelectionDAG &DAG) const;
452 VectorPair emitHvxAddWithOverflow(SDValue A, SDValue B, const SDLoc &dl,
453 bool Signed, SelectionDAG &DAG) const;
454 VectorPair emitHvxShiftRightRnd(SDValue Val, unsigned Amt, bool Signed,
455 SelectionDAG &DAG) const;
456 SDValue emitHvxMulHsV60(SDValue A, SDValue B, const SDLoc &dl,
457 SelectionDAG &DAG) const;
458 SDValue emitHvxMulLoHiV60(SDValue A, bool SignedA, SDValue B, bool SignedB,
459 const SDLoc &dl, SelectionDAG &DAG) const;
460 SDValue emitHvxMulLoHiV62(SDValue A, bool SignedA, SDValue B, bool SignedB,
461 const SDLoc &dl, SelectionDAG &DAG) const;
462
463 SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerHvxSplatVector(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const;
467 SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const;
468 SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const;
469 SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerHvxBitcast(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const;
475 SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const;
476 SDValue LowerHvxMulLoHi(SDValue Op, SelectionDAG &DAG) const;
477 SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const;
478 SDValue LowerHvxSelect(SDValue Op, SelectionDAG &DAG) const;
479 SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const;
480 SDValue LowerHvxFunnelShift(SDValue Op, SelectionDAG &DAG) const;
481 SDValue LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const;
482 SDValue LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const;
483 SDValue LowerHvxFpExtend(SDValue Op, SelectionDAG &DAG) const;
484 SDValue LowerHvxFpToInt(SDValue Op, SelectionDAG &DAG) const;
485 SDValue LowerHvxIntToFp(SDValue Op, SelectionDAG &DAG) const;
486 SDValue LowerHvxPred32ToFp(SDValue Op, SelectionDAG &DAG) const;
487 SDValue LowerHvxPred64ToFp(SDValue Op, SelectionDAG &DAG) const;
488 SDValue ExpandHvxFpToInt(SDValue Op, SelectionDAG &DAG) const;
489 SDValue ExpandHvxIntToFp(SDValue Op, SelectionDAG &DAG) const;
490
491 VectorPair SplitVectorOp(SDValue Op, SelectionDAG &DAG) const;
492
493 SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const;
494 SDValue WidenHvxLoad(SDValue Op, SelectionDAG &DAG) const;
495 SDValue WidenHvxStore(SDValue Op, SelectionDAG &DAG) const;
496 SDValue WidenHvxSetCC(SDValue Op, SelectionDAG &DAG) const;
497 SDValue LegalizeHvxResize(SDValue Op, SelectionDAG &DAG) const;
498 SDValue ExpandHvxResizeIntoSteps(SDValue Op, SelectionDAG &DAG) const;
499 SDValue EqualizeFpIntConversion(SDValue Op, SelectionDAG &DAG) const;
500
501 SDValue CreateTLWrapper(SDValue Op, SelectionDAG &DAG) const;
502 SDValue RemoveTLWrapper(SDValue Op, SelectionDAG &DAG) const;
503
504 std::pair<const TargetRegisterClass*, uint8_t>
505 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT)
506 const override;
507
508 bool shouldSplitToHvx(MVT Ty, SelectionDAG &DAG) const;
509 bool shouldWidenToHvx(MVT Ty, SelectionDAG &DAG) const;
510 bool isHvxOperation(SDNode *N, SelectionDAG &DAG) const;
511 SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const;
512 void LowerHvxOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
513 SelectionDAG &DAG) const;
514 void ReplaceHvxNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
515 SelectionDAG &DAG) const;
516
517 SDValue combineTruncateBeforeLegal(SDValue Op, DAGCombinerInfo &DCI) const;
518 SDValue combineConcatVectorsBeforeLegal(SDValue Op, DAGCombinerInfo & DCI)
519 const;
520 SDValue combineVectorShuffleBeforeLegal(SDValue Op, DAGCombinerInfo & DCI)
521 const;
522
523 SDValue PerformHvxDAGCombine(SDNode * N, DAGCombinerInfo & DCI) const;
524};
525
526} // end namespace llvm
527
528#endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
529