1//===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Mips uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MipsISelLowering.h"
15#include "MCTargetDesc/MipsBaseInfo.h"
16#include "MCTargetDesc/MipsInstPrinter.h"
17#include "MCTargetDesc/MipsMCTargetDesc.h"
18#include "MipsCCState.h"
19#include "MipsInstrInfo.h"
20#include "MipsMachineFunction.h"
21#include "MipsRegisterInfo.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
24#include "MipsTargetObjectFile.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/ADT/StringRef.h"
30#include "llvm/ADT/StringSwitch.h"
31#include "llvm/CodeGen/CallingConvLower.h"
32#include "llvm/CodeGen/FunctionLoweringInfo.h"
33#include "llvm/CodeGen/ISDOpcodes.h"
34#include "llvm/CodeGen/MachineBasicBlock.h"
35#include "llvm/CodeGen/MachineFrameInfo.h"
36#include "llvm/CodeGen/MachineFunction.h"
37#include "llvm/CodeGen/MachineInstr.h"
38#include "llvm/CodeGen/MachineInstrBuilder.h"
39#include "llvm/CodeGen/MachineJumpTableInfo.h"
40#include "llvm/CodeGen/MachineMemOperand.h"
41#include "llvm/CodeGen/MachineOperand.h"
42#include "llvm/CodeGen/MachineRegisterInfo.h"
43#include "llvm/CodeGen/RuntimeLibcallUtil.h"
44#include "llvm/CodeGen/SelectionDAG.h"
45#include "llvm/CodeGen/SelectionDAGNodes.h"
46#include "llvm/CodeGen/TargetFrameLowering.h"
47#include "llvm/CodeGen/TargetInstrInfo.h"
48#include "llvm/CodeGen/TargetRegisterInfo.h"
49#include "llvm/CodeGen/ValueTypes.h"
50#include "llvm/CodeGenTypes/MachineValueType.h"
51#include "llvm/IR/CallingConv.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
54#include "llvm/IR/DebugLoc.h"
55#include "llvm/IR/DerivedTypes.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/GlobalValue.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/Type.h"
60#include "llvm/IR/Value.h"
61#include "llvm/MC/MCContext.h"
62#include "llvm/MC/MCRegisterInfo.h"
63#include "llvm/Support/Casting.h"
64#include "llvm/Support/CodeGen.h"
65#include "llvm/Support/CommandLine.h"
66#include "llvm/Support/Compiler.h"
67#include "llvm/Support/ErrorHandling.h"
68#include "llvm/Support/MathExtras.h"
69#include "llvm/Target/TargetMachine.h"
70#include "llvm/Target/TargetOptions.h"
71#include <algorithm>
72#include <cassert>
73#include <cctype>
74#include <cstdint>
75#include <deque>
76#include <iterator>
77#include <utility>
78#include <vector>
79
80using namespace llvm;
81
82#define DEBUG_TYPE "mips-lower"
83
84STATISTIC(NumTailCalls, "Number of tail calls");
85
86static cl::opt<bool>
87NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
88 cl::desc("MIPS: Don't trap on integer division by zero."),
89 cl::init(Val: false));
90
91extern cl::opt<bool> EmitJalrReloc;
92
93static const MCPhysReg Mips64DPRegs[8] = {
94 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
95 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
96};
97
98// The MIPS MSA ABI passes vector arguments in the integer register set.
99// The number of integer registers used is dependant on the ABI used.
100MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
101 CallingConv::ID CC,
102 EVT VT) const {
103 if (!VT.isVector())
104 return getRegisterType(Context, VT);
105
106 if (VT.isPow2VectorType() && VT.getVectorElementType().isRound())
107 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
108 : MVT::i64;
109 return getRegisterType(Context, VT: VT.getVectorElementType());
110}
111
112unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
113 CallingConv::ID CC,
114 EVT VT) const {
115 if (VT.isVector()) {
116 if (VT.isPow2VectorType() && VT.getVectorElementType().isRound())
117 return divideCeil(Numerator: VT.getSizeInBits(), Denominator: Subtarget.isABI_O32() ? 32 : 64);
118 return VT.getVectorNumElements() *
119 getNumRegisters(Context, VT: VT.getVectorElementType());
120 }
121 return MipsTargetLowering::getNumRegisters(Context, VT);
122}
123
124unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
125 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
126 unsigned &NumIntermediates, MVT &RegisterVT) const {
127 if (VT.isPow2VectorType()) {
128 IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
129 RegisterVT = IntermediateVT.getSimpleVT();
130 NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
131 return NumIntermediates;
132 }
133 IntermediateVT = VT.getVectorElementType();
134 NumIntermediates = VT.getVectorNumElements();
135 RegisterVT = getRegisterType(Context, VT: IntermediateVT);
136 return NumIntermediates * getNumRegisters(Context, VT: IntermediateVT);
137}
138
139SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
140 MachineFunction &MF = DAG.getMachineFunction();
141 MipsFunctionInfo *FI = MF.getInfo<MipsFunctionInfo>();
142 return DAG.getRegister(Reg: FI->getGlobalBaseReg(MF), VT: Ty);
143}
144
145SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
146 SelectionDAG &DAG,
147 unsigned Flag) const {
148 return DAG.getTargetGlobalAddress(GV: N->getGlobal(), DL: SDLoc(N), VT: Ty, offset: 0, TargetFlags: Flag);
149}
150
151SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
152 SelectionDAG &DAG,
153 unsigned Flag) const {
154 return DAG.getTargetExternalSymbol(Sym: N->getSymbol(), VT: Ty, TargetFlags: Flag);
155}
156
157SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
158 SelectionDAG &DAG,
159 unsigned Flag) const {
160 return DAG.getTargetBlockAddress(BA: N->getBlockAddress(), VT: Ty, Offset: 0, TargetFlags: Flag);
161}
162
163SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
164 SelectionDAG &DAG,
165 unsigned Flag) const {
166 return DAG.getTargetJumpTable(JTI: N->getIndex(), VT: Ty, TargetFlags: Flag);
167}
168
169SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
170 SelectionDAG &DAG,
171 unsigned Flag) const {
172 return DAG.getTargetConstantPool(C: N->getConstVal(), VT: Ty, Align: N->getAlign(),
173 Offset: N->getOffset(), TargetFlags: Flag);
174}
175
176const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
177 switch ((MipsISD::NodeType)Opcode) {
178 case MipsISD::FIRST_NUMBER: break;
179 case MipsISD::JmpLink: return "MipsISD::JmpLink";
180 case MipsISD::TailCall: return "MipsISD::TailCall";
181 case MipsISD::Highest: return "MipsISD::Highest";
182 case MipsISD::Higher: return "MipsISD::Higher";
183 case MipsISD::Hi: return "MipsISD::Hi";
184 case MipsISD::Lo: return "MipsISD::Lo";
185 case MipsISD::GotHi: return "MipsISD::GotHi";
186 case MipsISD::TlsHi: return "MipsISD::TlsHi";
187 case MipsISD::GPRel: return "MipsISD::GPRel";
188 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
189 case MipsISD::Ret: return "MipsISD::Ret";
190 case MipsISD::ERet: return "MipsISD::ERet";
191 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
192 case MipsISD::FAbs: return "MipsISD::FAbs";
193 case MipsISD::FMS: return "MipsISD::FMS";
194 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
195 case MipsISD::FPCmp: return "MipsISD::FPCmp";
196 case MipsISD::FSELECT: return "MipsISD::FSELECT";
197 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
198 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
199 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
200 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
201 case MipsISD::MFHI: return "MipsISD::MFHI";
202 case MipsISD::MFLO: return "MipsISD::MFLO";
203 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
204 case MipsISD::Mult: return "MipsISD::Mult";
205 case MipsISD::Multu: return "MipsISD::Multu";
206 case MipsISD::MAdd: return "MipsISD::MAdd";
207 case MipsISD::MAddu: return "MipsISD::MAddu";
208 case MipsISD::MSub: return "MipsISD::MSub";
209 case MipsISD::MSubu: return "MipsISD::MSubu";
210 case MipsISD::DivRem: return "MipsISD::DivRem";
211 case MipsISD::DivRemU: return "MipsISD::DivRemU";
212 case MipsISD::DivRem16: return "MipsISD::DivRem16";
213 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
214 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
215 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
216 case MipsISD::Wrapper: return "MipsISD::Wrapper";
217 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
218 case MipsISD::Sync: return "MipsISD::Sync";
219 case MipsISD::Ext: return "MipsISD::Ext";
220 case MipsISD::Ins: return "MipsISD::Ins";
221 case MipsISD::CIns: return "MipsISD::CIns";
222 case MipsISD::LWL: return "MipsISD::LWL";
223 case MipsISD::LWR: return "MipsISD::LWR";
224 case MipsISD::SWL: return "MipsISD::SWL";
225 case MipsISD::SWR: return "MipsISD::SWR";
226 case MipsISD::LDL: return "MipsISD::LDL";
227 case MipsISD::LDR: return "MipsISD::LDR";
228 case MipsISD::SDL: return "MipsISD::SDL";
229 case MipsISD::SDR: return "MipsISD::SDR";
230 case MipsISD::EXTP: return "MipsISD::EXTP";
231 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
232 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
233 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
234 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
235 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
236 case MipsISD::SHILO: return "MipsISD::SHILO";
237 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
238 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
239 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
240 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
241 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
242 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
243 case MipsISD::DOUBLE_SELECT_I: return "MipsISD::DOUBLE_SELECT_I";
244 case MipsISD::DOUBLE_SELECT_I64: return "MipsISD::DOUBLE_SELECT_I64";
245 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
246 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
247 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
248 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
249 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
250 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
251 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
252 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
253 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
254 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
255 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
256 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
257 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
258 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
259 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
260 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
261 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
262 case MipsISD::MULT: return "MipsISD::MULT";
263 case MipsISD::MULTU: return "MipsISD::MULTU";
264 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
265 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
266 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
267 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
268 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
269 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
270 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
271 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
272 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
273 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
274 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
275 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
276 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
277 case MipsISD::VCEQ: return "MipsISD::VCEQ";
278 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
279 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
280 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
281 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
282 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
283 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
284 case MipsISD::VNOR: return "MipsISD::VNOR";
285 case MipsISD::VSHF: return "MipsISD::VSHF";
286 case MipsISD::SHF: return "MipsISD::SHF";
287 case MipsISD::ILVEV: return "MipsISD::ILVEV";
288 case MipsISD::ILVOD: return "MipsISD::ILVOD";
289 case MipsISD::ILVL: return "MipsISD::ILVL";
290 case MipsISD::ILVR: return "MipsISD::ILVR";
291 case MipsISD::PCKEV: return "MipsISD::PCKEV";
292 case MipsISD::PCKOD: return "MipsISD::PCKOD";
293 case MipsISD::INSVE: return "MipsISD::INSVE";
294 }
295 return nullptr;
296}
297
298MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
299 const MipsSubtarget &STI)
300 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
301 // Mips does not have i1 type, so use i32 for
302 // setcc operations results (slt, sgt, ...).
303 setBooleanContents(ZeroOrOneBooleanContent);
304 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
305 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
306 // does. Integer booleans still use 0 and 1.
307 if (Subtarget.hasMips32r6())
308 setBooleanContents(IntTy: ZeroOrOneBooleanContent,
309 FloatTy: ZeroOrNegativeOneBooleanContent);
310
311 // Load extented operations for i1 types must be promoted
312 for (MVT VT : MVT::integer_valuetypes()) {
313 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
314 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
315 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
316 }
317
318 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
319 // for f32, f16
320 for (MVT VT : MVT::fp_valuetypes()) {
321 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand);
322 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f16, Action: Expand);
323 }
324
325 // Set LoadExtAction for f16 vectors to Expand
326 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
327 MVT F16VT = MVT::getVectorVT(VT: MVT::f16, NumElements: VT.getVectorNumElements());
328 if (F16VT.isValid())
329 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: F16VT, Action: Expand);
330 }
331
332 setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand);
333 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand);
334
335 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand);
336
337 // Used by legalize types to correctly generate the setcc result.
338 // Without this, every float setcc comes with a AND/OR with the result,
339 // we don't want this, since the fpcmp result goes to a flag register,
340 // which is used implicitly by brcond and select operations.
341 AddPromotedToType(Opc: ISD::SETCC, OrigVT: MVT::i1, DestVT: MVT::i32);
342
343 // Mips Custom Operations
344 setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Expand);
345 setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom);
346 setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom);
347 setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom);
348 setOperationAction(Op: ISD::JumpTable, VT: MVT::i32, Action: Custom);
349 setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom);
350 setOperationAction(Op: ISD::SELECT, VT: MVT::f32, Action: Custom);
351 setOperationAction(Op: ISD::SELECT, VT: MVT::f64, Action: Custom);
352 setOperationAction(Op: ISD::SELECT, VT: MVT::i32, Action: Custom);
353 setOperationAction(Op: ISD::SETCC, VT: MVT::f32, Action: Custom);
354 setOperationAction(Op: ISD::SETCC, VT: MVT::f64, Action: Custom);
355 setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Custom);
356 setOperationAction(Op: ISD::FABS, VT: MVT::f32, Action: Custom);
357 setOperationAction(Op: ISD::FABS, VT: MVT::f64, Action: Custom);
358 setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f32, Action: Custom);
359 setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Custom);
360 setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i32, Action: Custom);
361
362 // Lower fmin and fmax operations for MIPS R6.
363 // Instructions are defined but never used.
364 if (Subtarget.hasMips32r6()) {
365 setOperationAction(Op: ISD::FMINNUM_IEEE, VT: MVT::f32, Action: Legal);
366 setOperationAction(Op: ISD::FMAXNUM_IEEE, VT: MVT::f32, Action: Legal);
367 setOperationAction(Op: ISD::FMINNUM, VT: MVT::f32, Action: Expand);
368 setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f32, Action: Expand);
369 setOperationAction(Op: ISD::FMINNUM_IEEE, VT: MVT::f64, Action: Legal);
370 setOperationAction(Op: ISD::FMAXNUM_IEEE, VT: MVT::f64, Action: Legal);
371 setOperationAction(Op: ISD::FMINNUM, VT: MVT::f64, Action: Expand);
372 setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f64, Action: Expand);
373 }
374
375 if (Subtarget.isGP64bit()) {
376 setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i64, Action: Custom);
377 setOperationAction(Op: ISD::BlockAddress, VT: MVT::i64, Action: Custom);
378 setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i64, Action: Custom);
379 setOperationAction(Op: ISD::JumpTable, VT: MVT::i64, Action: Custom);
380 setOperationAction(Op: ISD::ConstantPool, VT: MVT::i64, Action: Custom);
381 setOperationAction(Op: ISD::SELECT, VT: MVT::i64, Action: Custom);
382 if (Subtarget.hasMips64r6()) {
383 setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Legal);
384 setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Legal);
385 } else {
386 setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom);
387 setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom);
388 }
389 setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i64, Action: Custom);
390 setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i64, Action: Custom);
391 setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i64, Action: Custom);
392 setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i64, Action: Custom);
393 }
394
395 if (!Subtarget.isGP64bit()) {
396 setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Custom);
397 setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Custom);
398 setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Custom);
399 }
400
401 setOperationAction(Op: ISD::EH_DWARF_CFA, VT: MVT::i32, Action: Custom);
402 if (Subtarget.isGP64bit())
403 setOperationAction(Op: ISD::EH_DWARF_CFA, VT: MVT::i64, Action: Custom);
404
405 setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: Expand);
406 setOperationAction(Op: ISD::SREM, VT: MVT::i32, Action: Expand);
407 setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: Expand);
408 setOperationAction(Op: ISD::UREM, VT: MVT::i32, Action: Expand);
409 setOperationAction(Op: ISD::SDIV, VT: MVT::i64, Action: Expand);
410 setOperationAction(Op: ISD::SREM, VT: MVT::i64, Action: Expand);
411 setOperationAction(Op: ISD::UDIV, VT: MVT::i64, Action: Expand);
412 setOperationAction(Op: ISD::UREM, VT: MVT::i64, Action: Expand);
413
414 // Operations not directly supported by Mips.
415 setOperationAction(Op: ISD::BR_CC, VT: MVT::f32, Action: Expand);
416 setOperationAction(Op: ISD::BR_CC, VT: MVT::f64, Action: Expand);
417 setOperationAction(Op: ISD::BR_CC, VT: MVT::i32, Action: Expand);
418 setOperationAction(Op: ISD::BR_CC, VT: MVT::i64, Action: Expand);
419 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i32, Action: Expand);
420 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i64, Action: Expand);
421 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f32, Action: Expand);
422 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f64, Action: Expand);
423 setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i32, Action: Expand);
424 setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i64, Action: Expand);
425 setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i32, Action: Expand);
426 setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i64, Action: Expand);
427 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand);
428 if (Subtarget.hasCnMips()) {
429 setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Legal);
430 setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Legal);
431 } else {
432 setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Expand);
433 setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Expand);
434 }
435 setOperationAction(Op: ISD::CTTZ, VT: MVT::i32, Action: Expand);
436 setOperationAction(Op: ISD::CTTZ, VT: MVT::i64, Action: Expand);
437 setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Expand);
438 setOperationAction(Op: ISD::ROTL, VT: MVT::i64, Action: Expand);
439 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Expand);
440 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i64, Action: Expand);
441
442 if (!Subtarget.hasMips32r2())
443 setOperationAction(Op: ISD::ROTR, VT: MVT::i32, Action: Expand);
444
445 if (!Subtarget.hasMips64r2())
446 setOperationAction(Op: ISD::ROTR, VT: MVT::i64, Action: Expand);
447
448 setOperationAction(Op: ISD::FSIN, VT: MVT::f32, Action: Expand);
449 setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand);
450 setOperationAction(Op: ISD::FCOS, VT: MVT::f32, Action: Expand);
451 setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand);
452 setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Expand);
453 setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Expand);
454 setOperationAction(Op: ISD::FPOW, VT: MVT::f32, Action: Expand);
455 setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand);
456 setOperationAction(Op: ISD::FLOG, VT: MVT::f32, Action: Expand);
457 setOperationAction(Op: ISD::FLOG2, VT: MVT::f32, Action: Expand);
458 setOperationAction(Op: ISD::FLOG10, VT: MVT::f32, Action: Expand);
459 setOperationAction(Op: ISD::FEXP, VT: MVT::f32, Action: Expand);
460 setOperationAction(Op: ISD::FMA, VT: MVT::f32, Action: Expand);
461 setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand);
462 setOperationAction(Op: ISD::FREM, VT: MVT::f32, Action: Expand);
463 setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: Expand);
464
465 // Lower f16 conversion operations into library calls
466 setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand);
467 setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand);
468 setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand);
469 setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand);
470
471 setOperationAction(Op: ISD::EH_RETURN, VT: MVT::Other, Action: Custom);
472
473 setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom);
474 setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Custom);
475 setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand);
476 setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand);
477
478 // Use the default for now
479 setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand);
480 setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand);
481
482 if (!Subtarget.isGP64bit()) {
483 setOperationAction(Op: ISD::ATOMIC_LOAD, VT: MVT::i64, Action: Expand);
484 setOperationAction(Op: ISD::ATOMIC_STORE, VT: MVT::i64, Action: Expand);
485 }
486
487 if (!Subtarget.hasMips32r2()) {
488 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i8, Action: Expand);
489 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i16, Action: Expand);
490 }
491
492 // MIPS16 lacks MIPS32's clz and clo instructions.
493 if (!Subtarget.hasMips32() || Subtarget.inMips16Mode())
494 setOperationAction(Op: ISD::CTLZ, VT: MVT::i32, Action: Expand);
495 if (!Subtarget.hasMips64())
496 setOperationAction(Op: ISD::CTLZ, VT: MVT::i64, Action: Expand);
497
498 if (!Subtarget.hasMips32r2())
499 setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Expand);
500 if (!Subtarget.hasMips64r2())
501 setOperationAction(Op: ISD::BSWAP, VT: MVT::i64, Action: Expand);
502
503 if (Subtarget.isGP64bit() && Subtarget.hasMips64r6()) {
504 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::i64, MemVT: MVT::i32, Action: Legal);
505 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::i64, MemVT: MVT::i32, Action: Legal);
506 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::i64, MemVT: MVT::i32, Action: Legal);
507 setTruncStoreAction(ValVT: MVT::i64, MemVT: MVT::i32, Action: Legal);
508 } else if (Subtarget.isGP64bit()) {
509 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: MVT::i64, MemVT: MVT::i32, Action: Custom);
510 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: MVT::i64, MemVT: MVT::i32, Action: Custom);
511 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::i64, MemVT: MVT::i32, Action: Custom);
512 setTruncStoreAction(ValVT: MVT::i64, MemVT: MVT::i32, Action: Custom);
513 }
514
515 setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal);
516
517 setTargetDAGCombine({ISD::SDIVREM, ISD::UDIVREM, ISD::SELECT, ISD::AND,
518 ISD::OR, ISD::ADD, ISD::SUB, ISD::AssertZext, ISD::SHL});
519
520 if (Subtarget.isGP64bit())
521 setMaxAtomicSizeInBitsSupported(64);
522 else
523 setMaxAtomicSizeInBitsSupported(32);
524
525 setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
526
527 // The arguments on the stack are defined in terms of 4-byte slots on O32
528 // and 8-byte slots on N32/N64.
529 setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8)
530 : Align(4));
531
532 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
533
534 MaxStoresPerMemcpy = 16;
535
536 isMicroMips = Subtarget.inMicroMipsMode();
537}
538
539const MipsTargetLowering *
540MipsTargetLowering::create(const MipsTargetMachine &TM,
541 const MipsSubtarget &STI) {
542 if (STI.inMips16Mode())
543 return createMips16TargetLowering(TM, STI);
544
545 return createMipsSETargetLowering(TM, STI);
546}
547
548// Create a fast isel object.
549FastISel *
550MipsTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
551 const TargetLibraryInfo *libInfo) const {
552 const MipsTargetMachine &TM =
553 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
554
555 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
556 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
557 !Subtarget.hasMips32r6() && !Subtarget.inMips16Mode() &&
558 !Subtarget.inMicroMipsMode();
559
560 // Disable if either of the following is true:
561 // We do not generate PIC, the ABI is not O32, XGOT is being used.
562 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
563 Subtarget.useXGOT())
564 UseFastISel = false;
565
566 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
567}
568
569EVT MipsTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
570 EVT VT) const {
571 if (!VT.isVector())
572 return MVT::i32;
573 return VT.changeVectorElementTypeToInteger();
574}
575
576static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
577 TargetLowering::DAGCombinerInfo &DCI,
578 const MipsSubtarget &Subtarget) {
579 if (DCI.isBeforeLegalizeOps())
580 return SDValue();
581
582 EVT Ty = N->getValueType(ResNo: 0);
583 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
584 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
585 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
586 MipsISD::DivRemU16;
587 SDLoc DL(N);
588
589 SDValue DivRem = DAG.getNode(Opcode: Opc, DL, VT: MVT::Glue,
590 N1: N->getOperand(Num: 0), N2: N->getOperand(Num: 1));
591 SDValue InChain = DAG.getEntryNode();
592 SDValue InGlue = DivRem;
593
594 // insert MFLO
595 if (N->hasAnyUseOfValue(Value: 0)) {
596 SDValue CopyFromLo = DAG.getCopyFromReg(Chain: InChain, dl: DL, Reg: LO, VT: Ty,
597 Glue: InGlue);
598 DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: CopyFromLo);
599 InChain = CopyFromLo.getValue(R: 1);
600 InGlue = CopyFromLo.getValue(R: 2);
601 }
602
603 // insert MFHI
604 if (N->hasAnyUseOfValue(Value: 1)) {
605 SDValue CopyFromHi = DAG.getCopyFromReg(Chain: InChain, dl: DL,
606 Reg: HI, VT: Ty, Glue: InGlue);
607 DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 1), To: CopyFromHi);
608 }
609
610 return SDValue();
611}
612
613static Mips::CondCode condCodeToFCC(ISD::CondCode CC) {
614 switch (CC) {
615 default: llvm_unreachable("Unknown fp condition code!");
616 case ISD::SETEQ:
617 case ISD::SETOEQ: return Mips::FCOND_OEQ;
618 case ISD::SETUNE: return Mips::FCOND_UNE;
619 case ISD::SETLT:
620 case ISD::SETOLT: return Mips::FCOND_OLT;
621 case ISD::SETGT:
622 case ISD::SETOGT: return Mips::FCOND_OGT;
623 case ISD::SETLE:
624 case ISD::SETOLE: return Mips::FCOND_OLE;
625 case ISD::SETGE:
626 case ISD::SETOGE: return Mips::FCOND_OGE;
627 case ISD::SETULT: return Mips::FCOND_ULT;
628 case ISD::SETULE: return Mips::FCOND_ULE;
629 case ISD::SETUGT: return Mips::FCOND_UGT;
630 case ISD::SETUGE: return Mips::FCOND_UGE;
631 case ISD::SETUO: return Mips::FCOND_UN;
632 case ISD::SETO: return Mips::FCOND_OR;
633 case ISD::SETNE:
634 case ISD::SETONE: return Mips::FCOND_ONE;
635 case ISD::SETUEQ: return Mips::FCOND_UEQ;
636 }
637}
638
639/// This function returns true if the floating point conditional branches and
640/// conditional moves which use condition code CC should be inverted.
641static bool invertFPCondCodeUser(Mips::CondCode CC) {
642 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
643 return false;
644
645 assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
646 "Illegal Condition Code");
647
648 return true;
649}
650
651// Creates and returns an FPCmp node from a setcc node.
652// Returns Op if setcc is not a floating point comparison.
653static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
654 // must be a SETCC node
655 if (Op.getOpcode() != ISD::SETCC)
656 return Op;
657
658 SDValue LHS = Op.getOperand(i: 0);
659
660 if (!LHS.getValueType().isFloatingPoint())
661 return Op;
662
663 SDValue RHS = Op.getOperand(i: 1);
664 SDLoc DL(Op);
665
666 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
667 // node if necessary.
668 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get();
669
670 return DAG.getNode(Opcode: MipsISD::FPCmp, DL, VT: MVT::Glue, N1: LHS, N2: RHS,
671 N3: DAG.getConstant(Val: condCodeToFCC(CC), DL, VT: MVT::i32));
672}
673
674// Creates and returns a CMovFPT/F node.
675static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
676 SDValue False, const SDLoc &DL) {
677 ConstantSDNode *CC = cast<ConstantSDNode>(Val: Cond.getOperand(i: 2));
678 bool invert = invertFPCondCodeUser(CC: (Mips::CondCode)CC->getSExtValue());
679 SDValue FCC0 = DAG.getRegister(Reg: Mips::FCC0, VT: MVT::i32);
680
681 return DAG.getNode(Opcode: (invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
682 VT: True.getValueType(), N1: True, N2: FCC0, N3: False, N4: Cond);
683}
684
685static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
686 TargetLowering::DAGCombinerInfo &DCI,
687 const MipsSubtarget &Subtarget) {
688 if (DCI.isBeforeLegalizeOps())
689 return SDValue();
690
691 SDValue SetCC = N->getOperand(Num: 0);
692
693 if ((SetCC.getOpcode() != ISD::SETCC) ||
694 !SetCC.getOperand(i: 0).getValueType().isInteger())
695 return SDValue();
696
697 SDValue False = N->getOperand(Num: 2);
698 EVT FalseTy = False.getValueType();
699
700 if (!FalseTy.isInteger())
701 return SDValue();
702
703 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(Val&: False);
704
705 // If the RHS (False) is 0, we swap the order of the operands
706 // of ISD::SELECT (obviously also inverting the condition) so that we can
707 // take advantage of conditional moves using the $0 register.
708 // Example:
709 // return (a != 0) ? x : 0;
710 // load $reg, x
711 // movz $reg, $0, a
712 if (!FalseC)
713 return SDValue();
714
715 const SDLoc DL(N);
716
717 if (!FalseC->getZExtValue()) {
718 ISD::CondCode CC = cast<CondCodeSDNode>(Val: SetCC.getOperand(i: 2))->get();
719 SDValue True = N->getOperand(Num: 1);
720
721 SetCC = DAG.getSetCC(DL, VT: SetCC.getValueType(), LHS: SetCC.getOperand(i: 0),
722 RHS: SetCC.getOperand(i: 1),
723 Cond: ISD::getSetCCInverse(Operation: CC, Type: SetCC.getValueType()));
724
725 return DAG.getNode(Opcode: ISD::SELECT, DL, VT: FalseTy, N1: SetCC, N2: False, N3: True);
726 }
727
728 // If both operands are integer constants there's a possibility that we
729 // can do some interesting optimizations.
730 SDValue True = N->getOperand(Num: 1);
731 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(Val&: True);
732
733 if (!TrueC || !True.getValueType().isInteger())
734 return SDValue();
735
736 // We'll also ignore MVT::i64 operands as this optimizations proves
737 // to be ineffective because of the required sign extensions as the result
738 // of a SETCC operator is always MVT::i32 for non-vector types.
739 if (True.getValueType() == MVT::i64)
740 return SDValue();
741
742 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
743
744 // 1) (a < x) ? y : y-1
745 // slti $reg1, a, x
746 // addiu $reg2, $reg1, y-1
747 if (Diff == 1)
748 return DAG.getNode(Opcode: ISD::ADD, DL, VT: SetCC.getValueType(), N1: SetCC, N2: False);
749
750 // 2) (a < x) ? y-1 : y
751 // slti $reg1, a, x
752 // xor $reg1, $reg1, 1
753 // addiu $reg2, $reg1, y-1
754 if (Diff == -1) {
755 ISD::CondCode CC = cast<CondCodeSDNode>(Val: SetCC.getOperand(i: 2))->get();
756 SetCC = DAG.getSetCC(DL, VT: SetCC.getValueType(), LHS: SetCC.getOperand(i: 0),
757 RHS: SetCC.getOperand(i: 1),
758 Cond: ISD::getSetCCInverse(Operation: CC, Type: SetCC.getValueType()));
759 return DAG.getNode(Opcode: ISD::ADD, DL, VT: SetCC.getValueType(), N1: SetCC, N2: True);
760 }
761
762 // Could not optimize.
763 return SDValue();
764}
765
766static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG,
767 TargetLowering::DAGCombinerInfo &DCI,
768 const MipsSubtarget &Subtarget) {
769 if (DCI.isBeforeLegalizeOps())
770 return SDValue();
771
772 SDValue ValueIfTrue = N->getOperand(Num: 0), ValueIfFalse = N->getOperand(Num: 2);
773
774 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(Val&: ValueIfFalse);
775 if (!FalseC || FalseC->getZExtValue())
776 return SDValue();
777
778 // Since RHS (False) is 0, we swap the order of the True/False operands
779 // (obviously also inverting the condition) so that we can
780 // take advantage of conditional moves using the $0 register.
781 // Example:
782 // return (a != 0) ? x : 0;
783 // load $reg, x
784 // movz $reg, $0, a
785 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
786 MipsISD::CMovFP_T;
787
788 SDValue FCC = N->getOperand(Num: 1), Glue = N->getOperand(Num: 3);
789 return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VT: ValueIfFalse.getValueType(),
790 N1: ValueIfFalse, N2: FCC, N3: ValueIfTrue, N4: Glue);
791}
792
793static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
794 TargetLowering::DAGCombinerInfo &DCI,
795 const MipsSubtarget &Subtarget) {
796 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
797 return SDValue();
798
799 SDValue FirstOperand = N->getOperand(Num: 0);
800 unsigned FirstOperandOpc = FirstOperand.getOpcode();
801 SDValue Mask = N->getOperand(Num: 1);
802 EVT ValTy = N->getValueType(ResNo: 0);
803 SDLoc DL(N);
804
805 uint64_t Pos = 0;
806 unsigned SMPos, SMSize;
807 ConstantSDNode *CN;
808 SDValue NewOperand;
809 unsigned Opc;
810
811 // Op's second operand must be a shifted mask.
812 if (!(CN = dyn_cast<ConstantSDNode>(Val&: Mask)) ||
813 !isShiftedMask_64(Value: CN->getZExtValue(), MaskIdx&: SMPos, MaskLen&: SMSize))
814 return SDValue();
815
816 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
817 // Pattern match EXT.
818 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
819 // => ext $dst, $src, pos, size
820
821 // The second operand of the shift must be an immediate.
822 if (!(CN = dyn_cast<ConstantSDNode>(Val: FirstOperand.getOperand(i: 1))))
823 return SDValue();
824
825 Pos = CN->getZExtValue();
826
827 // Return if the shifted mask does not start at bit 0 or the sum of its size
828 // and Pos exceeds the word's size.
829 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
830 return SDValue();
831
832 Opc = MipsISD::Ext;
833 NewOperand = FirstOperand.getOperand(i: 0);
834 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
835 // Pattern match CINS.
836 // $dst = and (shl $src , pos), mask
837 // => cins $dst, $src, pos, size
838 // mask is a shifted mask with consecutive 1's, pos = shift amount,
839 // size = population count.
840
841 // The second operand of the shift must be an immediate.
842 if (!(CN = dyn_cast<ConstantSDNode>(Val: FirstOperand.getOperand(i: 1))))
843 return SDValue();
844
845 Pos = CN->getZExtValue();
846
847 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
848 Pos + SMSize > ValTy.getSizeInBits())
849 return SDValue();
850
851 NewOperand = FirstOperand.getOperand(i: 0);
852 // SMSize is 'location' (position) in this case, not size.
853 SMSize--;
854 Opc = MipsISD::CIns;
855 } else {
856 // Pattern match EXT.
857 // $dst = and $src, (2**size - 1) , if size > 16
858 // => ext $dst, $src, pos, size , pos = 0
859
860 // If the mask is <= 0xffff, andi can be used instead.
861 if (CN->getZExtValue() <= 0xffff)
862 return SDValue();
863
864 // Return if the mask doesn't start at position 0.
865 if (SMPos)
866 return SDValue();
867
868 Opc = MipsISD::Ext;
869 NewOperand = FirstOperand;
870 }
871 return DAG.getNode(Opcode: Opc, DL, VT: ValTy, N1: NewOperand,
872 N2: DAG.getConstant(Val: Pos, DL, VT: MVT::i32),
873 N3: DAG.getConstant(Val: SMSize, DL, VT: MVT::i32));
874}
875
876static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
877 TargetLowering::DAGCombinerInfo &DCI,
878 const MipsSubtarget &Subtarget) {
879 // Pattern match INS.
880 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
881 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
882 // => ins $dst, $src, size, pos, $src1
883 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
884 return SDValue();
885
886 SDValue And0 = N->getOperand(Num: 0), And1 = N->getOperand(Num: 1);
887 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
888 ConstantSDNode *CN, *CN1;
889
890 // See if Op's first operand matches (and $src1 , mask0).
891 if (And0.getOpcode() != ISD::AND)
892 return SDValue();
893
894 if (!(CN = dyn_cast<ConstantSDNode>(Val: And0.getOperand(i: 1))) ||
895 !isShiftedMask_64(Value: ~CN->getSExtValue(), MaskIdx&: SMPos0, MaskLen&: SMSize0))
896 return SDValue();
897
898 // See if Op's second operand matches (and (shl $src, pos), mask1).
899 if (And1.getOpcode() == ISD::AND &&
900 And1.getOperand(i: 0).getOpcode() == ISD::SHL) {
901
902 if (!(CN = dyn_cast<ConstantSDNode>(Val: And1.getOperand(i: 1))) ||
903 !isShiftedMask_64(Value: CN->getZExtValue(), MaskIdx&: SMPos1, MaskLen&: SMSize1))
904 return SDValue();
905
906 // The shift masks must have the same position and size.
907 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
908 return SDValue();
909
910 SDValue Shl = And1.getOperand(i: 0);
911
912 if (!(CN = dyn_cast<ConstantSDNode>(Val: Shl.getOperand(i: 1))))
913 return SDValue();
914
915 unsigned Shamt = CN->getZExtValue();
916
917 // Return if the shift amount and the first bit position of mask are not the
918 // same.
919 EVT ValTy = N->getValueType(ResNo: 0);
920 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
921 return SDValue();
922
923 SDLoc DL(N);
924 return DAG.getNode(Opcode: MipsISD::Ins, DL, VT: ValTy, N1: Shl.getOperand(i: 0),
925 N2: DAG.getConstant(Val: SMPos0, DL, VT: MVT::i32),
926 N3: DAG.getConstant(Val: SMSize0, DL, VT: MVT::i32),
927 N4: And0.getOperand(i: 0));
928 } else {
929 // Pattern match DINS.
930 // $dst = or (and $src, mask0), mask1
931 // where mask0 = ((1 << SMSize0) -1) << SMPos0
932 // => dins $dst, $src, pos, size
933 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
934 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
935 (SMSize0 + SMPos0 <= 32))) {
936 // Check if AND instruction has constant as argument
937 bool isConstCase = And1.getOpcode() != ISD::AND;
938 if (And1.getOpcode() == ISD::AND) {
939 if (!(CN1 = dyn_cast<ConstantSDNode>(Val: And1->getOperand(Num: 1))))
940 return SDValue();
941 } else {
942 if (!(CN1 = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1))))
943 return SDValue();
944 }
945 // Don't generate INS if constant OR operand doesn't fit into bits
946 // cleared by constant AND operand.
947 if (CN->getSExtValue() & CN1->getSExtValue())
948 return SDValue();
949
950 SDLoc DL(N);
951 EVT ValTy = N->getOperand(Num: 0)->getValueType(ResNo: 0);
952 SDValue Const1;
953 SDValue SrlX;
954 if (!isConstCase) {
955 Const1 = DAG.getConstant(Val: SMPos0, DL, VT: MVT::i32);
956 SrlX = DAG.getNode(Opcode: ISD::SRL, DL, VT: And1->getValueType(ResNo: 0), N1: And1, N2: Const1);
957 }
958 return DAG.getNode(
959 Opcode: MipsISD::Ins, DL, VT: N->getValueType(ResNo: 0),
960 N1: isConstCase
961 ? DAG.getConstant(Val: CN1->getSExtValue() >> SMPos0, DL, VT: ValTy)
962 : SrlX,
963 N2: DAG.getConstant(Val: SMPos0, DL, VT: MVT::i32),
964 N3: DAG.getConstant(Val: ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
965 : SMSize0,
966 DL, VT: MVT::i32),
967 N4: And0->getOperand(Num: 0));
968
969 }
970 return SDValue();
971 }
972}
973
974static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG,
975 const MipsSubtarget &Subtarget) {
976 // ROOTNode must have a multiplication as an operand for the match to be
977 // successful.
978 if (ROOTNode->getOperand(Num: 0).getOpcode() != ISD::MUL &&
979 ROOTNode->getOperand(Num: 1).getOpcode() != ISD::MUL)
980 return SDValue();
981
982 // In the case where we have a multiplication as the left operand of
983 // of a subtraction, we can't combine into a MipsISD::MSub node as the
984 // the instruction definition of msub(u) places the multiplication on
985 // on the right.
986 if (ROOTNode->getOpcode() == ISD::SUB &&
987 ROOTNode->getOperand(Num: 0).getOpcode() == ISD::MUL)
988 return SDValue();
989
990 // We don't handle vector types here.
991 if (ROOTNode->getValueType(ResNo: 0).isVector())
992 return SDValue();
993
994 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
995 // arithmetic. E.g.
996 // (add (mul a b) c) =>
997 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
998 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
999 // or
1000 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
1001 //
1002 // The overhead of setting up the Hi/Lo registers and reassembling the
1003 // result makes this a dubious optimzation for MIPS64. The core of the
1004 // problem is that Hi/Lo contain the upper and lower 32 bits of the
1005 // operand and result.
1006 //
1007 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
1008 // density than doing it naively, 5 for MIPS64. Additionally, using
1009 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
1010 // extended operands, not true 64 bit values.
1011 //
1012 // FIXME: For the moment, disable this completely for MIPS64.
1013 if (Subtarget.hasMips64())
1014 return SDValue();
1015
1016 SDValue Mult = ROOTNode->getOperand(Num: 0).getOpcode() == ISD::MUL
1017 ? ROOTNode->getOperand(Num: 0)
1018 : ROOTNode->getOperand(Num: 1);
1019
1020 SDValue AddOperand = ROOTNode->getOperand(Num: 0).getOpcode() == ISD::MUL
1021 ? ROOTNode->getOperand(Num: 1)
1022 : ROOTNode->getOperand(Num: 0);
1023
1024 // Transform this to a MADD only if the user of this node is the add.
1025 // If there are other users of the mul, this function returns here.
1026 if (!Mult.hasOneUse())
1027 return SDValue();
1028
1029 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1030 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1031 // of the multiply must have 32 or more sign bits, otherwise we cannot
1032 // perform this optimization. We have to check this here as we're performing
1033 // this optimization pre-legalization.
1034 SDValue MultLHS = Mult->getOperand(Num: 0);
1035 SDValue MultRHS = Mult->getOperand(Num: 1);
1036
1037 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1038 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1039 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1040 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1041
1042 if (!IsSigned && !IsUnsigned)
1043 return SDValue();
1044
1045 // Initialize accumulator.
1046 SDLoc DL(ROOTNode);
1047 SDValue BottomHalf, TopHalf;
1048 std::tie(args&: BottomHalf, args&: TopHalf) =
1049 CurDAG.SplitScalar(N: AddOperand, DL, LoVT: MVT::i32, HiVT: MVT::i32);
1050 SDValue ACCIn =
1051 CurDAG.getNode(Opcode: MipsISD::MTLOHI, DL, VT: MVT::Untyped, N1: BottomHalf, N2: TopHalf);
1052
1053 // Create MipsMAdd(u) / MipsMSub(u) node.
1054 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1055 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1056 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1057 SDValue MAddOps[3] = {
1058 CurDAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Mult->getOperand(Num: 0)),
1059 CurDAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Mult->getOperand(Num: 1)), ACCIn};
1060 EVT VTs[2] = {MVT::i32, MVT::i32};
1061 SDValue MAdd = CurDAG.getNode(Opcode, DL, ResultTys: VTs, Ops: MAddOps);
1062
1063 SDValue ResLo = CurDAG.getNode(Opcode: MipsISD::MFLO, DL, VT: MVT::i32, Operand: MAdd);
1064 SDValue ResHi = CurDAG.getNode(Opcode: MipsISD::MFHI, DL, VT: MVT::i32, Operand: MAdd);
1065 SDValue Combined =
1066 CurDAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: ResLo, N2: ResHi);
1067 return Combined;
1068}
1069
1070static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
1071 TargetLowering::DAGCombinerInfo &DCI,
1072 const MipsSubtarget &Subtarget) {
1073 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1074 if (DCI.isBeforeLegalizeOps()) {
1075 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1076 !Subtarget.inMips16Mode() && N->getValueType(ResNo: 0) == MVT::i64)
1077 return performMADD_MSUBCombine(ROOTNode: N, CurDAG&: DAG, Subtarget);
1078
1079 return SDValue();
1080 }
1081
1082 return SDValue();
1083}
1084
1085static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
1086 TargetLowering::DAGCombinerInfo &DCI,
1087 const MipsSubtarget &Subtarget) {
1088 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1089 if (DCI.isBeforeLegalizeOps()) {
1090 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1091 !Subtarget.inMips16Mode() && N->getValueType(ResNo: 0) == MVT::i64)
1092 return performMADD_MSUBCombine(ROOTNode: N, CurDAG&: DAG, Subtarget);
1093
1094 return SDValue();
1095 }
1096
1097 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1098 SDValue Add = N->getOperand(Num: 1);
1099
1100 if (Add.getOpcode() != ISD::ADD)
1101 return SDValue();
1102
1103 SDValue Lo = Add.getOperand(i: 1);
1104
1105 if ((Lo.getOpcode() != MipsISD::Lo) ||
1106 (Lo.getOperand(i: 0).getOpcode() != ISD::TargetJumpTable))
1107 return SDValue();
1108
1109 EVT ValTy = N->getValueType(ResNo: 0);
1110 SDLoc DL(N);
1111
1112 SDValue Add1 = DAG.getNode(Opcode: ISD::ADD, DL, VT: ValTy, N1: N->getOperand(Num: 0),
1113 N2: Add.getOperand(i: 0));
1114 return DAG.getNode(Opcode: ISD::ADD, DL, VT: ValTy, N1: Add1, N2: Lo);
1115}
1116
1117static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
1118 TargetLowering::DAGCombinerInfo &DCI,
1119 const MipsSubtarget &Subtarget) {
1120 // Pattern match CINS.
1121 // $dst = shl (and $src , imm), pos
1122 // => cins $dst, $src, pos, size
1123
1124 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1125 return SDValue();
1126
1127 SDValue FirstOperand = N->getOperand(Num: 0);
1128 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1129 SDValue SecondOperand = N->getOperand(Num: 1);
1130 EVT ValTy = N->getValueType(ResNo: 0);
1131 SDLoc DL(N);
1132
1133 uint64_t Pos = 0;
1134 unsigned SMPos, SMSize;
1135 ConstantSDNode *CN;
1136 SDValue NewOperand;
1137
1138 // The second operand of the shift must be an immediate.
1139 if (!(CN = dyn_cast<ConstantSDNode>(Val&: SecondOperand)))
1140 return SDValue();
1141
1142 Pos = CN->getZExtValue();
1143
1144 if (Pos >= ValTy.getSizeInBits())
1145 return SDValue();
1146
1147 if (FirstOperandOpc != ISD::AND)
1148 return SDValue();
1149
1150 // AND's second operand must be a shifted mask.
1151 if (!(CN = dyn_cast<ConstantSDNode>(Val: FirstOperand.getOperand(i: 1))) ||
1152 !isShiftedMask_64(Value: CN->getZExtValue(), MaskIdx&: SMPos, MaskLen&: SMSize))
1153 return SDValue();
1154
1155 // Return if the shifted mask does not start at bit 0 or the sum of its size
1156 // and Pos exceeds the word's size.
1157 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1158 return SDValue();
1159
1160 NewOperand = FirstOperand.getOperand(i: 0);
1161 // SMSize is 'location' (position) in this case, not size.
1162 SMSize--;
1163
1164 return DAG.getNode(Opcode: MipsISD::CIns, DL, VT: ValTy, N1: NewOperand,
1165 N2: DAG.getConstant(Val: Pos, DL, VT: MVT::i32),
1166 N3: DAG.getConstant(Val: SMSize, DL, VT: MVT::i32));
1167}
1168
1169SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
1170 const {
1171 SelectionDAG &DAG = DCI.DAG;
1172 unsigned Opc = N->getOpcode();
1173
1174 switch (Opc) {
1175 default: break;
1176 case ISD::SDIVREM:
1177 case ISD::UDIVREM:
1178 return performDivRemCombine(N, DAG, DCI, Subtarget);
1179 case ISD::SELECT:
1180 return performSELECTCombine(N, DAG, DCI, Subtarget);
1181 case MipsISD::CMovFP_F:
1182 case MipsISD::CMovFP_T:
1183 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1184 case ISD::AND:
1185 return performANDCombine(N, DAG, DCI, Subtarget);
1186 case ISD::OR:
1187 return performORCombine(N, DAG, DCI, Subtarget);
1188 case ISD::ADD:
1189 return performADDCombine(N, DAG, DCI, Subtarget);
1190 case ISD::SHL:
1191 return performSHLCombine(N, DAG, DCI, Subtarget);
1192 case ISD::SUB:
1193 return performSUBCombine(N, DAG, DCI, Subtarget);
1194 }
1195
1196 return SDValue();
1197}
1198
1199bool MipsTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
1200 return Subtarget.hasMips32();
1201}
1202
1203bool MipsTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
1204 return Subtarget.hasMips32();
1205}
1206
1207bool MipsTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1208 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1209 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1210 // double-word variants.
1211 if (auto *C = dyn_cast<ConstantSDNode>(Val&: Y))
1212 return C->getAPIntValue().ule(RHS: 15);
1213
1214 return false;
1215}
1216
1217bool MipsTargetLowering::shouldFoldConstantShiftPairToMask(
1218 const SDNode *N, CombineLevel Level) const {
1219 assert(((N->getOpcode() == ISD::SHL &&
1220 N->getOperand(0).getOpcode() == ISD::SRL) ||
1221 (N->getOpcode() == ISD::SRL &&
1222 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1223 "Expected shift-shift mask");
1224
1225 if (N->getOperand(Num: 0).getValueType().isVector())
1226 return false;
1227 return true;
1228}
1229
1230void
1231MipsTargetLowering::ReplaceNodeResults(SDNode *N,
1232 SmallVectorImpl<SDValue> &Results,
1233 SelectionDAG &DAG) const {
1234 return LowerOperationWrapper(N, Results, DAG);
1235}
1236
1237SDValue MipsTargetLowering::
1238LowerOperation(SDValue Op, SelectionDAG &DAG) const
1239{
1240 switch (Op.getOpcode())
1241 {
1242 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1243 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1244 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1245 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1246 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1247 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1248 case ISD::SELECT: return lowerSELECT(Op, DAG);
1249 case ISD::SETCC: return lowerSETCC(Op, DAG);
1250 case ISD::VASTART: return lowerVASTART(Op, DAG);
1251 case ISD::VAARG: return lowerVAARG(Op, DAG);
1252 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1253 case ISD::FABS: return lowerFABS(Op, DAG);
1254 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1255 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1256 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1257 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1258 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1259 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, IsSRA: true);
1260 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, IsSRA: false);
1261 case ISD::LOAD: return lowerLOAD(Op, DAG);
1262 case ISD::STORE: return lowerSTORE(Op, DAG);
1263 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1264 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1265 }
1266 return SDValue();
1267}
1268
1269//===----------------------------------------------------------------------===//
1270// Lower helper functions
1271//===----------------------------------------------------------------------===//
1272
1273// addLiveIn - This helper function adds the specified physical register to the
1274// MachineFunction as a live in value. It also creates a corresponding
1275// virtual register for it.
1276static unsigned
1277addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1278{
1279 Register VReg = MF.getRegInfo().createVirtualRegister(RegClass: RC);
1280 MF.getRegInfo().addLiveIn(Reg: PReg, vreg: VReg);
1281 return VReg;
1282}
1283
1284static MachineBasicBlock *insertDivByZeroTrap(MachineInstr &MI,
1285 MachineBasicBlock &MBB,
1286 const TargetInstrInfo &TII,
1287 bool Is64Bit, bool IsMicroMips) {
1288 if (NoZeroDivCheck)
1289 return &MBB;
1290
1291 // Insert instruction "teq $divisor_reg, $zero, 7".
1292 MachineBasicBlock::iterator I(MI);
1293 MachineInstrBuilder MIB;
1294 MachineOperand &Divisor = MI.getOperand(i: 2);
1295 MIB = BuildMI(BB&: MBB, I: std::next(x: I), MIMD: MI.getDebugLoc(),
1296 MCID: TII.get(Opcode: IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1297 .addReg(RegNo: Divisor.getReg(), flags: getKillRegState(B: Divisor.isKill()))
1298 .addReg(RegNo: Mips::ZERO)
1299 .addImm(Val: 7);
1300
1301 // Use the 32-bit sub-register if this is a 64-bit division.
1302 if (Is64Bit)
1303 MIB->getOperand(i: 0).setSubReg(Mips::sub_32);
1304
1305 // Clear Divisor's kill flag.
1306 Divisor.setIsKill(false);
1307
1308 // We would normally delete the original instruction here but in this case
1309 // we only needed to inject an additional instruction rather than replace it.
1310
1311 return &MBB;
1312}
1313
1314MachineBasicBlock *
1315MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1316 MachineBasicBlock *BB) const {
1317 switch (MI.getOpcode()) {
1318 default:
1319 llvm_unreachable("Unexpected instr type to insert");
1320 case Mips::ATOMIC_LOAD_ADD_I8:
1321 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1322 case Mips::ATOMIC_LOAD_ADD_I16:
1323 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1324 case Mips::ATOMIC_LOAD_ADD_I32:
1325 return emitAtomicBinary(MI, BB);
1326 case Mips::ATOMIC_LOAD_ADD_I64:
1327 return emitAtomicBinary(MI, BB);
1328
1329 case Mips::ATOMIC_LOAD_AND_I8:
1330 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1331 case Mips::ATOMIC_LOAD_AND_I16:
1332 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1333 case Mips::ATOMIC_LOAD_AND_I32:
1334 return emitAtomicBinary(MI, BB);
1335 case Mips::ATOMIC_LOAD_AND_I64:
1336 return emitAtomicBinary(MI, BB);
1337
1338 case Mips::ATOMIC_LOAD_OR_I8:
1339 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1340 case Mips::ATOMIC_LOAD_OR_I16:
1341 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1342 case Mips::ATOMIC_LOAD_OR_I32:
1343 return emitAtomicBinary(MI, BB);
1344 case Mips::ATOMIC_LOAD_OR_I64:
1345 return emitAtomicBinary(MI, BB);
1346
1347 case Mips::ATOMIC_LOAD_XOR_I8:
1348 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1349 case Mips::ATOMIC_LOAD_XOR_I16:
1350 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1351 case Mips::ATOMIC_LOAD_XOR_I32:
1352 return emitAtomicBinary(MI, BB);
1353 case Mips::ATOMIC_LOAD_XOR_I64:
1354 return emitAtomicBinary(MI, BB);
1355
1356 case Mips::ATOMIC_LOAD_NAND_I8:
1357 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1358 case Mips::ATOMIC_LOAD_NAND_I16:
1359 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1360 case Mips::ATOMIC_LOAD_NAND_I32:
1361 return emitAtomicBinary(MI, BB);
1362 case Mips::ATOMIC_LOAD_NAND_I64:
1363 return emitAtomicBinary(MI, BB);
1364
1365 case Mips::ATOMIC_LOAD_SUB_I8:
1366 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1367 case Mips::ATOMIC_LOAD_SUB_I16:
1368 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1369 case Mips::ATOMIC_LOAD_SUB_I32:
1370 return emitAtomicBinary(MI, BB);
1371 case Mips::ATOMIC_LOAD_SUB_I64:
1372 return emitAtomicBinary(MI, BB);
1373
1374 case Mips::ATOMIC_SWAP_I8:
1375 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1376 case Mips::ATOMIC_SWAP_I16:
1377 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1378 case Mips::ATOMIC_SWAP_I32:
1379 return emitAtomicBinary(MI, BB);
1380 case Mips::ATOMIC_SWAP_I64:
1381 return emitAtomicBinary(MI, BB);
1382
1383 case Mips::ATOMIC_CMP_SWAP_I8:
1384 return emitAtomicCmpSwapPartword(MI, BB, Size: 1);
1385 case Mips::ATOMIC_CMP_SWAP_I16:
1386 return emitAtomicCmpSwapPartword(MI, BB, Size: 2);
1387 case Mips::ATOMIC_CMP_SWAP_I32:
1388 return emitAtomicCmpSwap(MI, BB);
1389 case Mips::ATOMIC_CMP_SWAP_I64:
1390 return emitAtomicCmpSwap(MI, BB);
1391
1392 case Mips::ATOMIC_LOAD_MIN_I8:
1393 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1394 case Mips::ATOMIC_LOAD_MIN_I16:
1395 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1396 case Mips::ATOMIC_LOAD_MIN_I32:
1397 return emitAtomicBinary(MI, BB);
1398 case Mips::ATOMIC_LOAD_MIN_I64:
1399 return emitAtomicBinary(MI, BB);
1400
1401 case Mips::ATOMIC_LOAD_MAX_I8:
1402 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1403 case Mips::ATOMIC_LOAD_MAX_I16:
1404 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1405 case Mips::ATOMIC_LOAD_MAX_I32:
1406 return emitAtomicBinary(MI, BB);
1407 case Mips::ATOMIC_LOAD_MAX_I64:
1408 return emitAtomicBinary(MI, BB);
1409
1410 case Mips::ATOMIC_LOAD_UMIN_I8:
1411 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1412 case Mips::ATOMIC_LOAD_UMIN_I16:
1413 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1414 case Mips::ATOMIC_LOAD_UMIN_I32:
1415 return emitAtomicBinary(MI, BB);
1416 case Mips::ATOMIC_LOAD_UMIN_I64:
1417 return emitAtomicBinary(MI, BB);
1418
1419 case Mips::ATOMIC_LOAD_UMAX_I8:
1420 return emitAtomicBinaryPartword(MI, BB, Size: 1);
1421 case Mips::ATOMIC_LOAD_UMAX_I16:
1422 return emitAtomicBinaryPartword(MI, BB, Size: 2);
1423 case Mips::ATOMIC_LOAD_UMAX_I32:
1424 return emitAtomicBinary(MI, BB);
1425 case Mips::ATOMIC_LOAD_UMAX_I64:
1426 return emitAtomicBinary(MI, BB);
1427
1428 case Mips::PseudoSDIV:
1429 case Mips::PseudoUDIV:
1430 case Mips::DIV:
1431 case Mips::DIVU:
1432 case Mips::MOD:
1433 case Mips::MODU:
1434 return insertDivByZeroTrap(MI, MBB&: *BB, TII: *Subtarget.getInstrInfo(), Is64Bit: false,
1435 IsMicroMips: false);
1436 case Mips::SDIV_MM_Pseudo:
1437 case Mips::UDIV_MM_Pseudo:
1438 case Mips::SDIV_MM:
1439 case Mips::UDIV_MM:
1440 case Mips::DIV_MMR6:
1441 case Mips::DIVU_MMR6:
1442 case Mips::MOD_MMR6:
1443 case Mips::MODU_MMR6:
1444 return insertDivByZeroTrap(MI, MBB&: *BB, TII: *Subtarget.getInstrInfo(), Is64Bit: false, IsMicroMips: true);
1445 case Mips::PseudoDSDIV:
1446 case Mips::PseudoDUDIV:
1447 case Mips::DDIV:
1448 case Mips::DDIVU:
1449 case Mips::DMOD:
1450 case Mips::DMODU:
1451 return insertDivByZeroTrap(MI, MBB&: *BB, TII: *Subtarget.getInstrInfo(), Is64Bit: true, IsMicroMips: false);
1452
1453 case Mips::PseudoSELECT_I:
1454 case Mips::PseudoSELECT_I64:
1455 case Mips::PseudoSELECT_S:
1456 case Mips::PseudoSELECT_D32:
1457 case Mips::PseudoSELECT_D64:
1458 return emitPseudoSELECT(MI, BB, isFPCmp: false, Opc: Mips::BNE);
1459 case Mips::PseudoSELECTFP_F_I:
1460 case Mips::PseudoSELECTFP_F_I64:
1461 case Mips::PseudoSELECTFP_F_S:
1462 case Mips::PseudoSELECTFP_F_D32:
1463 case Mips::PseudoSELECTFP_F_D64:
1464 return emitPseudoSELECT(MI, BB, isFPCmp: true, Opc: Mips::BC1F);
1465 case Mips::PseudoSELECTFP_T_I:
1466 case Mips::PseudoSELECTFP_T_I64:
1467 case Mips::PseudoSELECTFP_T_S:
1468 case Mips::PseudoSELECTFP_T_D32:
1469 case Mips::PseudoSELECTFP_T_D64:
1470 return emitPseudoSELECT(MI, BB, isFPCmp: true, Opc: Mips::BC1T);
1471 case Mips::PseudoD_SELECT_I:
1472 case Mips::PseudoD_SELECT_I64:
1473 return emitPseudoD_SELECT(MI, BB);
1474 case Mips::LDR_W:
1475 return emitLDR_W(MI, BB);
1476 case Mips::LDR_D:
1477 return emitLDR_D(MI, BB);
1478 case Mips::STR_W:
1479 return emitSTR_W(MI, BB);
1480 case Mips::STR_D:
1481 return emitSTR_D(MI, BB);
1482 }
1483}
1484
1485// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1486// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1487MachineBasicBlock *
1488MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1489 MachineBasicBlock *BB) const {
1490
1491 MachineFunction *MF = BB->getParent();
1492 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1493 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1494 DebugLoc DL = MI.getDebugLoc();
1495
1496 unsigned AtomicOp;
1497 bool NeedsAdditionalReg = false;
1498 switch (MI.getOpcode()) {
1499 case Mips::ATOMIC_LOAD_ADD_I32:
1500 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1501 break;
1502 case Mips::ATOMIC_LOAD_SUB_I32:
1503 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1504 break;
1505 case Mips::ATOMIC_LOAD_AND_I32:
1506 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1507 break;
1508 case Mips::ATOMIC_LOAD_OR_I32:
1509 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1510 break;
1511 case Mips::ATOMIC_LOAD_XOR_I32:
1512 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1513 break;
1514 case Mips::ATOMIC_LOAD_NAND_I32:
1515 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1516 break;
1517 case Mips::ATOMIC_SWAP_I32:
1518 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1519 break;
1520 case Mips::ATOMIC_LOAD_ADD_I64:
1521 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1522 break;
1523 case Mips::ATOMIC_LOAD_SUB_I64:
1524 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1525 break;
1526 case Mips::ATOMIC_LOAD_AND_I64:
1527 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1528 break;
1529 case Mips::ATOMIC_LOAD_OR_I64:
1530 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1531 break;
1532 case Mips::ATOMIC_LOAD_XOR_I64:
1533 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1534 break;
1535 case Mips::ATOMIC_LOAD_NAND_I64:
1536 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1537 break;
1538 case Mips::ATOMIC_SWAP_I64:
1539 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1540 break;
1541 case Mips::ATOMIC_LOAD_MIN_I32:
1542 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1543 NeedsAdditionalReg = true;
1544 break;
1545 case Mips::ATOMIC_LOAD_MAX_I32:
1546 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1547 NeedsAdditionalReg = true;
1548 break;
1549 case Mips::ATOMIC_LOAD_UMIN_I32:
1550 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1551 NeedsAdditionalReg = true;
1552 break;
1553 case Mips::ATOMIC_LOAD_UMAX_I32:
1554 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1555 NeedsAdditionalReg = true;
1556 break;
1557 case Mips::ATOMIC_LOAD_MIN_I64:
1558 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1559 NeedsAdditionalReg = true;
1560 break;
1561 case Mips::ATOMIC_LOAD_MAX_I64:
1562 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1563 NeedsAdditionalReg = true;
1564 break;
1565 case Mips::ATOMIC_LOAD_UMIN_I64:
1566 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1567 NeedsAdditionalReg = true;
1568 break;
1569 case Mips::ATOMIC_LOAD_UMAX_I64:
1570 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1571 NeedsAdditionalReg = true;
1572 break;
1573 default:
1574 llvm_unreachable("Unknown pseudo atomic for replacement!");
1575 }
1576
1577 Register OldVal = MI.getOperand(i: 0).getReg();
1578 Register Ptr = MI.getOperand(i: 1).getReg();
1579 Register Incr = MI.getOperand(i: 2).getReg();
1580 Register Scratch = RegInfo.createVirtualRegister(RegClass: RegInfo.getRegClass(Reg: OldVal));
1581
1582 MachineBasicBlock::iterator II(MI);
1583
1584 // The scratch registers here with the EarlyClobber | Define | Implicit
1585 // flags is used to persuade the register allocator and the machine
1586 // verifier to accept the usage of this register. This has to be a real
1587 // register which has an UNDEF value but is dead after the instruction which
1588 // is unique among the registers chosen for the instruction.
1589
1590 // The EarlyClobber flag has the semantic properties that the operand it is
1591 // attached to is clobbered before the rest of the inputs are read. Hence it
1592 // must be unique among the operands to the instruction.
1593 // The Define flag is needed to coerce the machine verifier that an Undef
1594 // value isn't a problem.
1595 // The Dead flag is needed as the value in scratch isn't used by any other
1596 // instruction. Kill isn't used as Dead is more precise.
1597 // The implicit flag is here due to the interaction between the other flags
1598 // and the machine verifier.
1599
1600 // For correctness purpose, a new pseudo is introduced here. We need this
1601 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1602 // that is spread over >1 basic blocks. A register allocator which
1603 // introduces (or any codegen infact) a store, can violate the expectations
1604 // of the hardware.
1605 //
1606 // An atomic read-modify-write sequence starts with a linked load
1607 // instruction and ends with a store conditional instruction. The atomic
1608 // read-modify-write sequence fails if any of the following conditions
1609 // occur between the execution of ll and sc:
1610 // * A coherent store is completed by another process or coherent I/O
1611 // module into the block of synchronizable physical memory containing
1612 // the word. The size and alignment of the block is
1613 // implementation-dependent.
1614 // * A coherent store is executed between an LL and SC sequence on the
1615 // same processor to the block of synchornizable physical memory
1616 // containing the word.
1617 //
1618
1619 Register PtrCopy = RegInfo.createVirtualRegister(RegClass: RegInfo.getRegClass(Reg: Ptr));
1620 Register IncrCopy = RegInfo.createVirtualRegister(RegClass: RegInfo.getRegClass(Reg: Incr));
1621
1622 BuildMI(BB&: *BB, I: II, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY), DestReg: IncrCopy).addReg(RegNo: Incr);
1623 BuildMI(BB&: *BB, I: II, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY), DestReg: PtrCopy).addReg(RegNo: Ptr);
1624
1625 MachineInstrBuilder MIB =
1626 BuildMI(BB&: *BB, I: II, MIMD: DL, MCID: TII->get(Opcode: AtomicOp))
1627 .addReg(RegNo: OldVal, flags: RegState::Define | RegState::EarlyClobber)
1628 .addReg(RegNo: PtrCopy)
1629 .addReg(RegNo: IncrCopy)
1630 .addReg(RegNo: Scratch, flags: RegState::Define | RegState::EarlyClobber |
1631 RegState::Implicit | RegState::Dead);
1632 if (NeedsAdditionalReg) {
1633 Register Scratch2 =
1634 RegInfo.createVirtualRegister(RegClass: RegInfo.getRegClass(Reg: OldVal));
1635 MIB.addReg(RegNo: Scratch2, flags: RegState::Define | RegState::EarlyClobber |
1636 RegState::Implicit | RegState::Dead);
1637 }
1638
1639 MI.eraseFromParent();
1640
1641 return BB;
1642}
1643
1644MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1645 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1646 unsigned SrcReg) const {
1647 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1648 const DebugLoc &DL = MI.getDebugLoc();
1649
1650 if (Subtarget.hasMips32r2() && Size == 1) {
1651 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SEB), DestReg: DstReg).addReg(RegNo: SrcReg);
1652 return BB;
1653 }
1654
1655 if (Subtarget.hasMips32r2() && Size == 2) {
1656 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SEH), DestReg: DstReg).addReg(RegNo: SrcReg);
1657 return BB;
1658 }
1659
1660 MachineFunction *MF = BB->getParent();
1661 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1662 const TargetRegisterClass *RC = getRegClassFor(VT: MVT::i32);
1663 Register ScrReg = RegInfo.createVirtualRegister(RegClass: RC);
1664
1665 assert(Size < 32);
1666 int64_t ShiftImm = 32 - (Size * 8);
1667
1668 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLL), DestReg: ScrReg).addReg(RegNo: SrcReg).addImm(Val: ShiftImm);
1669 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SRA), DestReg: DstReg).addReg(RegNo: ScrReg).addImm(Val: ShiftImm);
1670
1671 return BB;
1672}
1673
1674MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1675 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1676 assert((Size == 1 || Size == 2) &&
1677 "Unsupported size for EmitAtomicBinaryPartial.");
1678
1679 MachineFunction *MF = BB->getParent();
1680 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1681 const TargetRegisterClass *RC = getRegClassFor(VT: MVT::i32);
1682 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1683 const TargetRegisterClass *RCp =
1684 getRegClassFor(VT: ArePtrs64bit ? MVT::i64 : MVT::i32);
1685 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1686 DebugLoc DL = MI.getDebugLoc();
1687
1688 Register Dest = MI.getOperand(i: 0).getReg();
1689 Register Ptr = MI.getOperand(i: 1).getReg();
1690 Register Incr = MI.getOperand(i: 2).getReg();
1691
1692 Register AlignedAddr = RegInfo.createVirtualRegister(RegClass: RCp);
1693 Register ShiftAmt = RegInfo.createVirtualRegister(RegClass: RC);
1694 Register Mask = RegInfo.createVirtualRegister(RegClass: RC);
1695 Register Mask2 = RegInfo.createVirtualRegister(RegClass: RC);
1696 Register Incr2 = RegInfo.createVirtualRegister(RegClass: RC);
1697 Register MaskLSB2 = RegInfo.createVirtualRegister(RegClass: RCp);
1698 Register PtrLSB2 = RegInfo.createVirtualRegister(RegClass: RC);
1699 Register MaskUpper = RegInfo.createVirtualRegister(RegClass: RC);
1700 Register Scratch = RegInfo.createVirtualRegister(RegClass: RC);
1701 Register Scratch2 = RegInfo.createVirtualRegister(RegClass: RC);
1702 Register Scratch3 = RegInfo.createVirtualRegister(RegClass: RC);
1703
1704 unsigned AtomicOp = 0;
1705 bool NeedsAdditionalReg = false;
1706 switch (MI.getOpcode()) {
1707 case Mips::ATOMIC_LOAD_NAND_I8:
1708 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1709 break;
1710 case Mips::ATOMIC_LOAD_NAND_I16:
1711 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1712 break;
1713 case Mips::ATOMIC_SWAP_I8:
1714 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1715 break;
1716 case Mips::ATOMIC_SWAP_I16:
1717 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1718 break;
1719 case Mips::ATOMIC_LOAD_ADD_I8:
1720 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1721 break;
1722 case Mips::ATOMIC_LOAD_ADD_I16:
1723 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1724 break;
1725 case Mips::ATOMIC_LOAD_SUB_I8:
1726 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1727 break;
1728 case Mips::ATOMIC_LOAD_SUB_I16:
1729 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1730 break;
1731 case Mips::ATOMIC_LOAD_AND_I8:
1732 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1733 break;
1734 case Mips::ATOMIC_LOAD_AND_I16:
1735 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1736 break;
1737 case Mips::ATOMIC_LOAD_OR_I8:
1738 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1739 break;
1740 case Mips::ATOMIC_LOAD_OR_I16:
1741 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1742 break;
1743 case Mips::ATOMIC_LOAD_XOR_I8:
1744 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1745 break;
1746 case Mips::ATOMIC_LOAD_XOR_I16:
1747 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1748 break;
1749 case Mips::ATOMIC_LOAD_MIN_I8:
1750 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1751 NeedsAdditionalReg = true;
1752 break;
1753 case Mips::ATOMIC_LOAD_MIN_I16:
1754 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1755 NeedsAdditionalReg = true;
1756 break;
1757 case Mips::ATOMIC_LOAD_MAX_I8:
1758 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1759 NeedsAdditionalReg = true;
1760 break;
1761 case Mips::ATOMIC_LOAD_MAX_I16:
1762 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1763 NeedsAdditionalReg = true;
1764 break;
1765 case Mips::ATOMIC_LOAD_UMIN_I8:
1766 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1767 NeedsAdditionalReg = true;
1768 break;
1769 case Mips::ATOMIC_LOAD_UMIN_I16:
1770 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1771 NeedsAdditionalReg = true;
1772 break;
1773 case Mips::ATOMIC_LOAD_UMAX_I8:
1774 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1775 NeedsAdditionalReg = true;
1776 break;
1777 case Mips::ATOMIC_LOAD_UMAX_I16:
1778 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1779 NeedsAdditionalReg = true;
1780 break;
1781 default:
1782 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1783 }
1784
1785 // insert new blocks after the current block
1786 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1787 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB);
1788 MachineFunction::iterator It = ++BB->getIterator();
1789 MF->insert(MBBI: It, MBB: exitMBB);
1790
1791 // Transfer the remainder of BB and its successor edges to exitMBB.
1792 exitMBB->splice(Where: exitMBB->begin(), Other: BB,
1793 From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end());
1794 exitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1795
1796 BB->addSuccessor(Succ: exitMBB, Prob: BranchProbability::getOne());
1797
1798 // thisMBB:
1799 // addiu masklsb2,$0,-4 # 0xfffffffc
1800 // and alignedaddr,ptr,masklsb2
1801 // andi ptrlsb2,ptr,3
1802 // sll shiftamt,ptrlsb2,3
1803 // ori maskupper,$0,255 # 0xff
1804 // sll mask,maskupper,shiftamt
1805 // nor mask2,$0,mask
1806 // sll incr2,incr,shiftamt
1807
1808 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1809 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: ABI.GetPtrAddiuOp()), DestReg: MaskLSB2)
1810 .addReg(RegNo: ABI.GetNullPtr()).addImm(Val: -4);
1811 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: ABI.GetPtrAndOp()), DestReg: AlignedAddr)
1812 .addReg(RegNo: Ptr).addReg(RegNo: MaskLSB2);
1813 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::ANDi), DestReg: PtrLSB2)
1814 .addReg(RegNo: Ptr, flags: 0, SubReg: ArePtrs64bit ? Mips::sub_32 : 0).addImm(Val: 3);
1815 if (Subtarget.isLittle()) {
1816 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLL), DestReg: ShiftAmt).addReg(RegNo: PtrLSB2).addImm(Val: 3);
1817 } else {
1818 Register Off = RegInfo.createVirtualRegister(RegClass: RC);
1819 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::XORi), DestReg: Off)
1820 .addReg(RegNo: PtrLSB2).addImm(Val: (Size == 1) ? 3 : 2);
1821 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLL), DestReg: ShiftAmt).addReg(RegNo: Off).addImm(Val: 3);
1822 }
1823 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::ORi), DestReg: MaskUpper)
1824 .addReg(RegNo: Mips::ZERO).addImm(Val: MaskImm);
1825 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLLV), DestReg: Mask)
1826 .addReg(RegNo: MaskUpper).addReg(RegNo: ShiftAmt);
1827 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::NOR), DestReg: Mask2).addReg(RegNo: Mips::ZERO).addReg(RegNo: Mask);
1828 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLLV), DestReg: Incr2).addReg(RegNo: Incr).addReg(RegNo: ShiftAmt);
1829
1830
1831 // The purposes of the flags on the scratch registers is explained in
1832 // emitAtomicBinary. In summary, we need a scratch register which is going to
1833 // be undef, that is unique among registers chosen for the instruction.
1834
1835 MachineInstrBuilder MIB =
1836 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: AtomicOp))
1837 .addReg(RegNo: Dest, flags: RegState::Define | RegState::EarlyClobber)
1838 .addReg(RegNo: AlignedAddr)
1839 .addReg(RegNo: Incr2)
1840 .addReg(RegNo: Mask)
1841 .addReg(RegNo: Mask2)
1842 .addReg(RegNo: ShiftAmt)
1843 .addReg(RegNo: Scratch, flags: RegState::EarlyClobber | RegState::Define |
1844 RegState::Dead | RegState::Implicit)
1845 .addReg(RegNo: Scratch2, flags: RegState::EarlyClobber | RegState::Define |
1846 RegState::Dead | RegState::Implicit)
1847 .addReg(RegNo: Scratch3, flags: RegState::EarlyClobber | RegState::Define |
1848 RegState::Dead | RegState::Implicit);
1849 if (NeedsAdditionalReg) {
1850 Register Scratch4 = RegInfo.createVirtualRegister(RegClass: RC);
1851 MIB.addReg(RegNo: Scratch4, flags: RegState::EarlyClobber | RegState::Define |
1852 RegState::Dead | RegState::Implicit);
1853 }
1854
1855 MI.eraseFromParent(); // The instruction is gone now.
1856
1857 return exitMBB;
1858}
1859
1860// Lower atomic compare and swap to a pseudo instruction, taking care to
1861// define a scratch register for the pseudo instruction's expansion. The
1862// instruction is expanded after the register allocator as to prevent
1863// the insertion of stores between the linked load and the store conditional.
1864
1865MachineBasicBlock *
1866MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1867 MachineBasicBlock *BB) const {
1868
1869 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1870 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1871 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1872
1873 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1874
1875 MachineFunction *MF = BB->getParent();
1876 MachineRegisterInfo &MRI = MF->getRegInfo();
1877 const TargetRegisterClass *RC = getRegClassFor(VT: MVT::getIntegerVT(BitWidth: Size * 8));
1878 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1879 DebugLoc DL = MI.getDebugLoc();
1880
1881 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1882 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1883 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1884 Register Dest = MI.getOperand(i: 0).getReg();
1885 Register Ptr = MI.getOperand(i: 1).getReg();
1886 Register OldVal = MI.getOperand(i: 2).getReg();
1887 Register NewVal = MI.getOperand(i: 3).getReg();
1888
1889 Register Scratch = MRI.createVirtualRegister(RegClass: RC);
1890 MachineBasicBlock::iterator II(MI);
1891
1892 // We need to create copies of the various registers and kill them at the
1893 // atomic pseudo. If the copies are not made, when the atomic is expanded
1894 // after fast register allocation, the spills will end up outside of the
1895 // blocks that their values are defined in, causing livein errors.
1896
1897 Register PtrCopy = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: Ptr));
1898 Register OldValCopy = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: OldVal));
1899 Register NewValCopy = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: NewVal));
1900
1901 BuildMI(BB&: *BB, I: II, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY), DestReg: PtrCopy).addReg(RegNo: Ptr);
1902 BuildMI(BB&: *BB, I: II, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY), DestReg: OldValCopy).addReg(RegNo: OldVal);
1903 BuildMI(BB&: *BB, I: II, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY), DestReg: NewValCopy).addReg(RegNo: NewVal);
1904
1905 // The purposes of the flags on the scratch registers is explained in
1906 // emitAtomicBinary. In summary, we need a scratch register which is going to
1907 // be undef, that is unique among registers chosen for the instruction.
1908
1909 BuildMI(BB&: *BB, I: II, MIMD: DL, MCID: TII->get(Opcode: AtomicOp))
1910 .addReg(RegNo: Dest, flags: RegState::Define | RegState::EarlyClobber)
1911 .addReg(RegNo: PtrCopy, flags: RegState::Kill)
1912 .addReg(RegNo: OldValCopy, flags: RegState::Kill)
1913 .addReg(RegNo: NewValCopy, flags: RegState::Kill)
1914 .addReg(RegNo: Scratch, flags: RegState::EarlyClobber | RegState::Define |
1915 RegState::Dead | RegState::Implicit);
1916
1917 MI.eraseFromParent(); // The instruction is gone now.
1918
1919 return BB;
1920}
1921
1922MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1923 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1924 assert((Size == 1 || Size == 2) &&
1925 "Unsupported size for EmitAtomicCmpSwapPartial.");
1926
1927 MachineFunction *MF = BB->getParent();
1928 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1929 const TargetRegisterClass *RC = getRegClassFor(VT: MVT::i32);
1930 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1931 const TargetRegisterClass *RCp =
1932 getRegClassFor(VT: ArePtrs64bit ? MVT::i64 : MVT::i32);
1933 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1934 DebugLoc DL = MI.getDebugLoc();
1935
1936 Register Dest = MI.getOperand(i: 0).getReg();
1937 Register Ptr = MI.getOperand(i: 1).getReg();
1938 Register CmpVal = MI.getOperand(i: 2).getReg();
1939 Register NewVal = MI.getOperand(i: 3).getReg();
1940
1941 Register AlignedAddr = RegInfo.createVirtualRegister(RegClass: RCp);
1942 Register ShiftAmt = RegInfo.createVirtualRegister(RegClass: RC);
1943 Register Mask = RegInfo.createVirtualRegister(RegClass: RC);
1944 Register Mask2 = RegInfo.createVirtualRegister(RegClass: RC);
1945 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RegClass: RC);
1946 Register ShiftedNewVal = RegInfo.createVirtualRegister(RegClass: RC);
1947 Register MaskLSB2 = RegInfo.createVirtualRegister(RegClass: RCp);
1948 Register PtrLSB2 = RegInfo.createVirtualRegister(RegClass: RC);
1949 Register MaskUpper = RegInfo.createVirtualRegister(RegClass: RC);
1950 Register MaskedCmpVal = RegInfo.createVirtualRegister(RegClass: RC);
1951 Register MaskedNewVal = RegInfo.createVirtualRegister(RegClass: RC);
1952 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1953 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1954 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1955
1956 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
1957 // flags are used to coerce the register allocator and the machine verifier to
1958 // accept the usage of these registers.
1959 // The EarlyClobber flag has the semantic properties that the operand it is
1960 // attached to is clobbered before the rest of the inputs are read. Hence it
1961 // must be unique among the operands to the instruction.
1962 // The Define flag is needed to coerce the machine verifier that an Undef
1963 // value isn't a problem.
1964 // The Dead flag is needed as the value in scratch isn't used by any other
1965 // instruction. Kill isn't used as Dead is more precise.
1966 Register Scratch = RegInfo.createVirtualRegister(RegClass: RC);
1967 Register Scratch2 = RegInfo.createVirtualRegister(RegClass: RC);
1968
1969 // insert new blocks after the current block
1970 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1971 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB);
1972 MachineFunction::iterator It = ++BB->getIterator();
1973 MF->insert(MBBI: It, MBB: exitMBB);
1974
1975 // Transfer the remainder of BB and its successor edges to exitMBB.
1976 exitMBB->splice(Where: exitMBB->begin(), Other: BB,
1977 From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end());
1978 exitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1979
1980 BB->addSuccessor(Succ: exitMBB, Prob: BranchProbability::getOne());
1981
1982 // thisMBB:
1983 // addiu masklsb2,$0,-4 # 0xfffffffc
1984 // and alignedaddr,ptr,masklsb2
1985 // andi ptrlsb2,ptr,3
1986 // xori ptrlsb2,ptrlsb2,3 # Only for BE
1987 // sll shiftamt,ptrlsb2,3
1988 // ori maskupper,$0,255 # 0xff
1989 // sll mask,maskupper,shiftamt
1990 // nor mask2,$0,mask
1991 // andi maskedcmpval,cmpval,255
1992 // sll shiftedcmpval,maskedcmpval,shiftamt
1993 // andi maskednewval,newval,255
1994 // sll shiftednewval,maskednewval,shiftamt
1995 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1996 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), DestReg: MaskLSB2)
1997 .addReg(RegNo: ABI.GetNullPtr()).addImm(Val: -4);
1998 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: ArePtrs64bit ? Mips::AND64 : Mips::AND), DestReg: AlignedAddr)
1999 .addReg(RegNo: Ptr).addReg(RegNo: MaskLSB2);
2000 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::ANDi), DestReg: PtrLSB2)
2001 .addReg(RegNo: Ptr, flags: 0, SubReg: ArePtrs64bit ? Mips::sub_32 : 0).addImm(Val: 3);
2002 if (Subtarget.isLittle()) {
2003 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLL), DestReg: ShiftAmt).addReg(RegNo: PtrLSB2).addImm(Val: 3);
2004 } else {
2005 Register Off = RegInfo.createVirtualRegister(RegClass: RC);
2006 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::XORi), DestReg: Off)
2007 .addReg(RegNo: PtrLSB2).addImm(Val: (Size == 1) ? 3 : 2);
2008 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLL), DestReg: ShiftAmt).addReg(RegNo: Off).addImm(Val: 3);
2009 }
2010 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::ORi), DestReg: MaskUpper)
2011 .addReg(RegNo: Mips::ZERO).addImm(Val: MaskImm);
2012 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLLV), DestReg: Mask)
2013 .addReg(RegNo: MaskUpper).addReg(RegNo: ShiftAmt);
2014 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::NOR), DestReg: Mask2).addReg(RegNo: Mips::ZERO).addReg(RegNo: Mask);
2015 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::ANDi), DestReg: MaskedCmpVal)
2016 .addReg(RegNo: CmpVal).addImm(Val: MaskImm);
2017 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLLV), DestReg: ShiftedCmpVal)
2018 .addReg(RegNo: MaskedCmpVal).addReg(RegNo: ShiftAmt);
2019 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::ANDi), DestReg: MaskedNewVal)
2020 .addReg(RegNo: NewVal).addImm(Val: MaskImm);
2021 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::SLLV), DestReg: ShiftedNewVal)
2022 .addReg(RegNo: MaskedNewVal).addReg(RegNo: ShiftAmt);
2023
2024 // The purposes of the flags on the scratch registers are explained in
2025 // emitAtomicBinary. In summary, we need a scratch register which is going to
2026 // be undef, that is unique among the register chosen for the instruction.
2027
2028 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: AtomicOp))
2029 .addReg(RegNo: Dest, flags: RegState::Define | RegState::EarlyClobber)
2030 .addReg(RegNo: AlignedAddr)
2031 .addReg(RegNo: Mask)
2032 .addReg(RegNo: ShiftedCmpVal)
2033 .addReg(RegNo: Mask2)
2034 .addReg(RegNo: ShiftedNewVal)
2035 .addReg(RegNo: ShiftAmt)
2036 .addReg(RegNo: Scratch, flags: RegState::EarlyClobber | RegState::Define |
2037 RegState::Dead | RegState::Implicit)
2038 .addReg(RegNo: Scratch2, flags: RegState::EarlyClobber | RegState::Define |
2039 RegState::Dead | RegState::Implicit);
2040
2041 MI.eraseFromParent(); // The instruction is gone now.
2042
2043 return exitMBB;
2044}
2045
2046SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2047 // The first operand is the chain, the second is the condition, the third is
2048 // the block to branch to if the condition is true.
2049 SDValue Chain = Op.getOperand(i: 0);
2050 SDValue Dest = Op.getOperand(i: 2);
2051 SDLoc DL(Op);
2052
2053 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2054 SDValue CondRes = createFPCmp(DAG, Op: Op.getOperand(i: 1));
2055
2056 // Return if flag is not set by a floating point comparison.
2057 if (CondRes.getOpcode() != MipsISD::FPCmp)
2058 return Op;
2059
2060 SDValue CCNode = CondRes.getOperand(i: 2);
2061 Mips::CondCode CC = (Mips::CondCode)CCNode->getAsZExtVal();
2062 unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
2063 SDValue BrCode = DAG.getConstant(Val: Opc, DL, VT: MVT::i32);
2064 SDValue FCC0 = DAG.getRegister(Reg: Mips::FCC0, VT: MVT::i32);
2065 return DAG.getNode(Opcode: MipsISD::FPBrcond, DL, VT: Op.getValueType(), N1: Chain, N2: BrCode,
2066 N3: FCC0, N4: Dest, N5: CondRes);
2067}
2068
2069SDValue MipsTargetLowering::
2070lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2071{
2072 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2073 SDValue Cond = createFPCmp(DAG, Op: Op.getOperand(i: 0));
2074
2075 // Return if flag is not set by a floating point comparison.
2076 if (Cond.getOpcode() != MipsISD::FPCmp)
2077 return Op;
2078
2079 return createCMovFP(DAG, Cond, True: Op.getOperand(i: 1), False: Op.getOperand(i: 2),
2080 DL: SDLoc(Op));
2081}
2082
2083SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2084 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2085 SDValue Cond = createFPCmp(DAG, Op);
2086
2087 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2088 "Floating point operand expected.");
2089
2090 SDLoc DL(Op);
2091 SDValue True = DAG.getConstant(Val: 1, DL, VT: MVT::i32);
2092 SDValue False = DAG.getConstant(Val: 0, DL, VT: MVT::i32);
2093
2094 return createCMovFP(DAG, Cond, True, False, DL);
2095}
2096
2097SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2098 SelectionDAG &DAG) const {
2099 EVT Ty = Op.getValueType();
2100 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Val&: Op);
2101 const GlobalValue *GV = N->getGlobal();
2102
2103 if (!isPositionIndependent()) {
2104 const MipsTargetObjectFile *TLOF =
2105 static_cast<const MipsTargetObjectFile *>(
2106 getTargetMachine().getObjFileLowering());
2107 const GlobalObject *GO = GV->getAliaseeObject();
2108 if (GO && TLOF->IsGlobalInSmallSection(GO, TM: getTargetMachine()))
2109 // %gp_rel relocation
2110 return getAddrGPRel(N, DL: SDLoc(N), Ty, DAG, IsN64: ABI.IsN64());
2111
2112 // %hi/%lo relocation
2113 return Subtarget.hasSym32() ? getAddrNonPIC(N, DL: SDLoc(N), Ty, DAG)
2114 // %highest/%higher/%hi/%lo relocation
2115 : getAddrNonPICSym64(N, DL: SDLoc(N), Ty, DAG);
2116 }
2117
2118 // Every other architecture would use shouldAssumeDSOLocal in here, but
2119 // mips is special.
2120 // * In PIC code mips requires got loads even for local statics!
2121 // * To save on got entries, for local statics the got entry contains the
2122 // page and an additional add instruction takes care of the low bits.
2123 // * It is legal to access a hidden symbol with a non hidden undefined,
2124 // so one cannot guarantee that all access to a hidden symbol will know
2125 // it is hidden.
2126 // * Mips linkers don't support creating a page and a full got entry for
2127 // the same symbol.
2128 // * Given all that, we have to use a full got entry for hidden symbols :-(
2129 if (GV->hasLocalLinkage())
2130 return getAddrLocal(N, DL: SDLoc(N), Ty, DAG, IsN32OrN64: ABI.IsN32() || ABI.IsN64());
2131
2132 if (Subtarget.useXGOT())
2133 return getAddrGlobalLargeGOT(
2134 N, DL: SDLoc(N), Ty, DAG, HiFlag: MipsII::MO_GOT_HI16, LoFlag: MipsII::MO_GOT_LO16,
2135 Chain: DAG.getEntryNode(),
2136 PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()));
2137
2138 return getAddrGlobal(
2139 N, DL: SDLoc(N), Ty, DAG,
2140 Flag: (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP : MipsII::MO_GOT,
2141 Chain: DAG.getEntryNode(), PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()));
2142}
2143
2144SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2145 SelectionDAG &DAG) const {
2146 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Val&: Op);
2147 EVT Ty = Op.getValueType();
2148
2149 if (!isPositionIndependent())
2150 return Subtarget.hasSym32() ? getAddrNonPIC(N, DL: SDLoc(N), Ty, DAG)
2151 : getAddrNonPICSym64(N, DL: SDLoc(N), Ty, DAG);
2152
2153 return getAddrLocal(N, DL: SDLoc(N), Ty, DAG, IsN32OrN64: ABI.IsN32() || ABI.IsN64());
2154}
2155
2156SDValue MipsTargetLowering::
2157lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2158{
2159 // If the relocation model is PIC, use the General Dynamic TLS Model or
2160 // Local Dynamic TLS model, otherwise use the Initial Exec or
2161 // Local Exec TLS Model.
2162
2163 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op);
2164 if (DAG.getTarget().useEmulatedTLS())
2165 return LowerToTLSEmulatedModel(GA, DAG);
2166
2167 SDLoc DL(GA);
2168 const GlobalValue *GV = GA->getGlobal();
2169 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
2170
2171 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2172
2173 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2174 // General Dynamic and Local Dynamic TLS Model.
2175 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2176 : MipsII::MO_TLSGD;
2177
2178 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, offset: 0, TargetFlags: Flag);
2179 SDValue Argument = DAG.getNode(Opcode: MipsISD::Wrapper, DL, VT: PtrVT,
2180 N1: getGlobalReg(DAG, Ty: PtrVT), N2: TGA);
2181 unsigned PtrSize = PtrVT.getSizeInBits();
2182 IntegerType *PtrTy = Type::getIntNTy(C&: *DAG.getContext(), N: PtrSize);
2183
2184 SDValue TlsGetAddr = DAG.getExternalSymbol(Sym: "__tls_get_addr", VT: PtrVT);
2185
2186 ArgListTy Args;
2187 ArgListEntry Entry;
2188 Entry.Node = Argument;
2189 Entry.Ty = PtrTy;
2190 Args.push_back(x: Entry);
2191
2192 TargetLowering::CallLoweringInfo CLI(DAG);
2193 CLI.setDebugLoc(DL)
2194 .setChain(DAG.getEntryNode())
2195 .setLibCallee(CC: CallingConv::C, ResultType: PtrTy, Target: TlsGetAddr, ArgsList: std::move(Args));
2196 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2197
2198 SDValue Ret = CallResult.first;
2199
2200 if (model != TLSModel::LocalDynamic)
2201 return Ret;
2202
2203 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, offset: 0,
2204 TargetFlags: MipsII::MO_DTPREL_HI);
2205 SDValue Hi = DAG.getNode(Opcode: MipsISD::TlsHi, DL, VT: PtrVT, Operand: TGAHi);
2206 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, offset: 0,
2207 TargetFlags: MipsII::MO_DTPREL_LO);
2208 SDValue Lo = DAG.getNode(Opcode: MipsISD::Lo, DL, VT: PtrVT, Operand: TGALo);
2209 SDValue Add = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: Hi, N2: Ret);
2210 return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: Add, N2: Lo);
2211 }
2212
2213 SDValue Offset;
2214 if (model == TLSModel::InitialExec) {
2215 // Initial Exec TLS Model
2216 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, offset: 0,
2217 TargetFlags: MipsII::MO_GOTTPREL);
2218 TGA = DAG.getNode(Opcode: MipsISD::Wrapper, DL, VT: PtrVT, N1: getGlobalReg(DAG, Ty: PtrVT),
2219 N2: TGA);
2220 Offset =
2221 DAG.getLoad(VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: TGA, PtrInfo: MachinePointerInfo());
2222 } else {
2223 // Local Exec TLS Model
2224 assert(model == TLSModel::LocalExec);
2225 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, offset: 0,
2226 TargetFlags: MipsII::MO_TPREL_HI);
2227 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, offset: 0,
2228 TargetFlags: MipsII::MO_TPREL_LO);
2229 SDValue Hi = DAG.getNode(Opcode: MipsISD::TlsHi, DL, VT: PtrVT, Operand: TGAHi);
2230 SDValue Lo = DAG.getNode(Opcode: MipsISD::Lo, DL, VT: PtrVT, Operand: TGALo);
2231 Offset = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: Hi, N2: Lo);
2232 }
2233
2234 SDValue ThreadPointer = DAG.getNode(Opcode: MipsISD::ThreadPointer, DL, VT: PtrVT);
2235 return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: ThreadPointer, N2: Offset);
2236}
2237
2238SDValue MipsTargetLowering::
2239lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2240{
2241 JumpTableSDNode *N = cast<JumpTableSDNode>(Val&: Op);
2242 EVT Ty = Op.getValueType();
2243
2244 if (!isPositionIndependent())
2245 return Subtarget.hasSym32() ? getAddrNonPIC(N, DL: SDLoc(N), Ty, DAG)
2246 : getAddrNonPICSym64(N, DL: SDLoc(N), Ty, DAG);
2247
2248 return getAddrLocal(N, DL: SDLoc(N), Ty, DAG, IsN32OrN64: ABI.IsN32() || ABI.IsN64());
2249}
2250
2251SDValue MipsTargetLowering::
2252lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2253{
2254 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Val&: Op);
2255 EVT Ty = Op.getValueType();
2256
2257 if (!isPositionIndependent()) {
2258 const MipsTargetObjectFile *TLOF =
2259 static_cast<const MipsTargetObjectFile *>(
2260 getTargetMachine().getObjFileLowering());
2261
2262 if (TLOF->IsConstantInSmallSection(DL: DAG.getDataLayout(), CN: N->getConstVal(),
2263 TM: getTargetMachine()))
2264 // %gp_rel relocation
2265 return getAddrGPRel(N, DL: SDLoc(N), Ty, DAG, IsN64: ABI.IsN64());
2266
2267 return Subtarget.hasSym32() ? getAddrNonPIC(N, DL: SDLoc(N), Ty, DAG)
2268 : getAddrNonPICSym64(N, DL: SDLoc(N), Ty, DAG);
2269 }
2270
2271 return getAddrLocal(N, DL: SDLoc(N), Ty, DAG, IsN32OrN64: ABI.IsN32() || ABI.IsN64());
2272}
2273
2274SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2275 MachineFunction &MF = DAG.getMachineFunction();
2276 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2277
2278 SDLoc DL(Op);
2279 SDValue FI = DAG.getFrameIndex(FI: FuncInfo->getVarArgsFrameIndex(),
2280 VT: getPointerTy(DL: MF.getDataLayout()));
2281
2282 // vastart just stores the address of the VarArgsFrameIndex slot into the
2283 // memory location argument.
2284 const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue();
2285 return DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, Val: FI, Ptr: Op.getOperand(i: 1),
2286 PtrInfo: MachinePointerInfo(SV));
2287}
2288
2289SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2290 SDNode *Node = Op.getNode();
2291 EVT VT = Node->getValueType(ResNo: 0);
2292 SDValue Chain = Node->getOperand(Num: 0);
2293 SDValue VAListPtr = Node->getOperand(Num: 1);
2294 const Align Align =
2295 llvm::MaybeAlign(Node->getConstantOperandVal(Num: 3)).valueOrOne();
2296 const Value *SV = cast<SrcValueSDNode>(Val: Node->getOperand(Num: 2))->getValue();
2297 SDLoc DL(Node);
2298 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2299
2300 SDValue VAListLoad = DAG.getLoad(VT: getPointerTy(DL: DAG.getDataLayout()), dl: DL, Chain,
2301 Ptr: VAListPtr, PtrInfo: MachinePointerInfo(SV));
2302 SDValue VAList = VAListLoad;
2303
2304 // Re-align the pointer if necessary.
2305 // It should only ever be necessary for 64-bit types on O32 since the minimum
2306 // argument alignment is the same as the maximum type alignment for N32/N64.
2307 //
2308 // FIXME: We currently align too often. The code generator doesn't notice
2309 // when the pointer is still aligned from the last va_arg (or pair of
2310 // va_args for the i64 on O32 case).
2311 if (Align > getMinStackArgumentAlignment()) {
2312 VAList = DAG.getNode(
2313 Opcode: ISD::ADD, DL, VT: VAList.getValueType(), N1: VAList,
2314 N2: DAG.getConstant(Val: Align.value() - 1, DL, VT: VAList.getValueType()));
2315
2316 VAList = DAG.getNode(
2317 Opcode: ISD::AND, DL, VT: VAList.getValueType(), N1: VAList,
2318 N2: DAG.getConstant(Val: -(int64_t)Align.value(), DL, VT: VAList.getValueType()));
2319 }
2320
2321 // Increment the pointer, VAList, to the next vaarg.
2322 auto &TD = DAG.getDataLayout();
2323 unsigned ArgSizeInBytes =
2324 TD.getTypeAllocSize(Ty: VT.getTypeForEVT(Context&: *DAG.getContext()));
2325 SDValue Tmp3 =
2326 DAG.getNode(Opcode: ISD::ADD, DL, VT: VAList.getValueType(), N1: VAList,
2327 N2: DAG.getConstant(Val: alignTo(Value: ArgSizeInBytes, Align: ArgSlotSizeInBytes),
2328 DL, VT: VAList.getValueType()));
2329 // Store the incremented VAList to the legalized pointer
2330 Chain = DAG.getStore(Chain: VAListLoad.getValue(R: 1), dl: DL, Val: Tmp3, Ptr: VAListPtr,
2331 PtrInfo: MachinePointerInfo(SV));
2332
2333 // In big-endian mode we must adjust the pointer when the load size is smaller
2334 // than the argument slot size. We must also reduce the known alignment to
2335 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2336 // the correct half of the slot, and reduce the alignment from 8 (slot
2337 // alignment) down to 4 (type alignment).
2338 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2339 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2340 VAList = DAG.getNode(Opcode: ISD::ADD, DL, VT: VAListPtr.getValueType(), N1: VAList,
2341 N2: DAG.getIntPtrConstant(Val: Adjustment, DL));
2342 }
2343 // Load the actual argument out of the pointer VAList
2344 return DAG.getLoad(VT, dl: DL, Chain, Ptr: VAList, PtrInfo: MachinePointerInfo());
2345}
2346
2347static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
2348 bool HasExtractInsert) {
2349 EVT TyX = Op.getOperand(i: 0).getValueType();
2350 EVT TyY = Op.getOperand(i: 1).getValueType();
2351 SDLoc DL(Op);
2352 SDValue Const1 = DAG.getConstant(Val: 1, DL, VT: MVT::i32);
2353 SDValue Const31 = DAG.getConstant(Val: 31, DL, VT: MVT::i32);
2354 SDValue Res;
2355
2356 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2357 // to i32.
2358 SDValue X = (TyX == MVT::f32) ?
2359 DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i32, Operand: Op.getOperand(i: 0)) :
2360 DAG.getNode(Opcode: MipsISD::ExtractElementF64, DL, VT: MVT::i32, N1: Op.getOperand(i: 0),
2361 N2: Const1);
2362 SDValue Y = (TyY == MVT::f32) ?
2363 DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i32, Operand: Op.getOperand(i: 1)) :
2364 DAG.getNode(Opcode: MipsISD::ExtractElementF64, DL, VT: MVT::i32, N1: Op.getOperand(i: 1),
2365 N2: Const1);
2366
2367 if (HasExtractInsert) {
2368 // ext E, Y, 31, 1 ; extract bit31 of Y
2369 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2370 SDValue E = DAG.getNode(Opcode: MipsISD::Ext, DL, VT: MVT::i32, N1: Y, N2: Const31, N3: Const1);
2371 Res = DAG.getNode(Opcode: MipsISD::Ins, DL, VT: MVT::i32, N1: E, N2: Const31, N3: Const1, N4: X);
2372 } else {
2373 // sll SllX, X, 1
2374 // srl SrlX, SllX, 1
2375 // srl SrlY, Y, 31
2376 // sll SllY, SrlX, 31
2377 // or Or, SrlX, SllY
2378 SDValue SllX = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: X, N2: Const1);
2379 SDValue SrlX = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SllX, N2: Const1);
2380 SDValue SrlY = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: Y, N2: Const31);
2381 SDValue SllY = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SrlY, N2: Const31);
2382 Res = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: SrlX, N2: SllY);
2383 }
2384
2385 if (TyX == MVT::f32)
2386 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op.getOperand(i: 0).getValueType(), Operand: Res);
2387
2388 SDValue LowX = DAG.getNode(Opcode: MipsISD::ExtractElementF64, DL, VT: MVT::i32,
2389 N1: Op.getOperand(i: 0),
2390 N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32));
2391 return DAG.getNode(Opcode: MipsISD::BuildPairF64, DL, VT: MVT::f64, N1: LowX, N2: Res);
2392}
2393
2394static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
2395 bool HasExtractInsert) {
2396 unsigned WidthX = Op.getOperand(i: 0).getValueSizeInBits();
2397 unsigned WidthY = Op.getOperand(i: 1).getValueSizeInBits();
2398 EVT TyX = MVT::getIntegerVT(BitWidth: WidthX), TyY = MVT::getIntegerVT(BitWidth: WidthY);
2399 SDLoc DL(Op);
2400 SDValue Const1 = DAG.getConstant(Val: 1, DL, VT: MVT::i32);
2401
2402 // Bitcast to integer nodes.
2403 SDValue X = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: TyX, Operand: Op.getOperand(i: 0));
2404 SDValue Y = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: TyY, Operand: Op.getOperand(i: 1));
2405
2406 if (HasExtractInsert) {
2407 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2408 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2409 SDValue E = DAG.getNode(Opcode: MipsISD::Ext, DL, VT: TyY, N1: Y,
2410 N2: DAG.getConstant(Val: WidthY - 1, DL, VT: MVT::i32), N3: Const1);
2411
2412 if (WidthX > WidthY)
2413 E = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: TyX, Operand: E);
2414 else if (WidthY > WidthX)
2415 E = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: TyX, Operand: E);
2416
2417 SDValue I = DAG.getNode(Opcode: MipsISD::Ins, DL, VT: TyX, N1: E,
2418 N2: DAG.getConstant(Val: WidthX - 1, DL, VT: MVT::i32), N3: Const1,
2419 N4: X);
2420 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op.getOperand(i: 0).getValueType(), Operand: I);
2421 }
2422
2423 // (d)sll SllX, X, 1
2424 // (d)srl SrlX, SllX, 1
2425 // (d)srl SrlY, Y, width(Y)-1
2426 // (d)sll SllY, SrlX, width(Y)-1
2427 // or Or, SrlX, SllY
2428 SDValue SllX = DAG.getNode(Opcode: ISD::SHL, DL, VT: TyX, N1: X, N2: Const1);
2429 SDValue SrlX = DAG.getNode(Opcode: ISD::SRL, DL, VT: TyX, N1: SllX, N2: Const1);
2430 SDValue SrlY = DAG.getNode(Opcode: ISD::SRL, DL, VT: TyY, N1: Y,
2431 N2: DAG.getConstant(Val: WidthY - 1, DL, VT: MVT::i32));
2432
2433 if (WidthX > WidthY)
2434 SrlY = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: TyX, Operand: SrlY);
2435 else if (WidthY > WidthX)
2436 SrlY = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: TyX, Operand: SrlY);
2437
2438 SDValue SllY = DAG.getNode(Opcode: ISD::SHL, DL, VT: TyX, N1: SrlY,
2439 N2: DAG.getConstant(Val: WidthX - 1, DL, VT: MVT::i32));
2440 SDValue Or = DAG.getNode(Opcode: ISD::OR, DL, VT: TyX, N1: SrlX, N2: SllY);
2441 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op.getOperand(i: 0).getValueType(), Operand: Or);
2442}
2443
2444SDValue
2445MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2446 if (Subtarget.isGP64bit())
2447 return lowerFCOPYSIGN64(Op, DAG, HasExtractInsert: Subtarget.hasExtractInsert());
2448
2449 return lowerFCOPYSIGN32(Op, DAG, HasExtractInsert: Subtarget.hasExtractInsert());
2450}
2451
2452SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2453 bool HasExtractInsert) const {
2454 SDLoc DL(Op);
2455 SDValue Res, Const1 = DAG.getConstant(Val: 1, DL, VT: MVT::i32);
2456
2457 if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
2458 return DAG.getNode(Opcode: MipsISD::FAbs, DL, VT: Op.getValueType(), Operand: Op.getOperand(i: 0));
2459
2460 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2461 // to i32.
2462 SDValue X = (Op.getValueType() == MVT::f32)
2463 ? DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i32, Operand: Op.getOperand(i: 0))
2464 : DAG.getNode(Opcode: MipsISD::ExtractElementF64, DL, VT: MVT::i32,
2465 N1: Op.getOperand(i: 0), N2: Const1);
2466
2467 // Clear MSB.
2468 if (HasExtractInsert)
2469 Res = DAG.getNode(Opcode: MipsISD::Ins, DL, VT: MVT::i32,
2470 N1: DAG.getRegister(Reg: Mips::ZERO, VT: MVT::i32),
2471 N2: DAG.getConstant(Val: 31, DL, VT: MVT::i32), N3: Const1, N4: X);
2472 else {
2473 // TODO: Provide DAG patterns which transform (and x, cst)
2474 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2475 SDValue SllX = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: X, N2: Const1);
2476 Res = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SllX, N2: Const1);
2477 }
2478
2479 if (Op.getValueType() == MVT::f32)
2480 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f32, Operand: Res);
2481
2482 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2483 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2484 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2485 // place.
2486 SDValue LowX =
2487 DAG.getNode(Opcode: MipsISD::ExtractElementF64, DL, VT: MVT::i32, N1: Op.getOperand(i: 0),
2488 N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32));
2489 return DAG.getNode(Opcode: MipsISD::BuildPairF64, DL, VT: MVT::f64, N1: LowX, N2: Res);
2490}
2491
2492SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2493 bool HasExtractInsert) const {
2494 SDLoc DL(Op);
2495 SDValue Res, Const1 = DAG.getConstant(Val: 1, DL, VT: MVT::i32);
2496
2497 if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
2498 return DAG.getNode(Opcode: MipsISD::FAbs, DL, VT: Op.getValueType(), Operand: Op.getOperand(i: 0));
2499
2500 // Bitcast to integer node.
2501 SDValue X = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i64, Operand: Op.getOperand(i: 0));
2502
2503 // Clear MSB.
2504 if (HasExtractInsert)
2505 Res = DAG.getNode(Opcode: MipsISD::Ins, DL, VT: MVT::i64,
2506 N1: DAG.getRegister(Reg: Mips::ZERO_64, VT: MVT::i64),
2507 N2: DAG.getConstant(Val: 63, DL, VT: MVT::i32), N3: Const1, N4: X);
2508 else {
2509 SDValue SllX = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i64, N1: X, N2: Const1);
2510 Res = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i64, N1: SllX, N2: Const1);
2511 }
2512
2513 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: Res);
2514}
2515
2516SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2517 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2518 return lowerFABS64(Op, DAG, HasExtractInsert: Subtarget.hasExtractInsert());
2519
2520 return lowerFABS32(Op, DAG, HasExtractInsert: Subtarget.hasExtractInsert());
2521}
2522
2523SDValue MipsTargetLowering::
2524lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2525 // check the depth
2526 if (Op.getConstantOperandVal(i: 0) != 0) {
2527 DAG.getContext()->emitError(
2528 ErrorStr: "return address can be determined only for current frame");
2529 return SDValue();
2530 }
2531
2532 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2533 MFI.setFrameAddressIsTaken(true);
2534 EVT VT = Op.getValueType();
2535 SDLoc DL(Op);
2536 SDValue FrameAddr = DAG.getCopyFromReg(
2537 Chain: DAG.getEntryNode(), dl: DL, Reg: ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2538 return FrameAddr;
2539}
2540
2541SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2542 SelectionDAG &DAG) const {
2543 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2544 return SDValue();
2545
2546 // check the depth
2547 if (Op.getConstantOperandVal(i: 0) != 0) {
2548 DAG.getContext()->emitError(
2549 ErrorStr: "return address can be determined only for current frame");
2550 return SDValue();
2551 }
2552
2553 MachineFunction &MF = DAG.getMachineFunction();
2554 MachineFrameInfo &MFI = MF.getFrameInfo();
2555 MVT VT = Op.getSimpleValueType();
2556 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2557 MFI.setReturnAddressIsTaken(true);
2558
2559 // Return RA, which contains the return address. Mark it an implicit live-in.
2560 Register Reg = MF.addLiveIn(PReg: RA, RC: getRegClassFor(VT));
2561 return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl: SDLoc(Op), Reg, VT);
2562}
2563
2564// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2565// generated from __builtin_eh_return (offset, handler)
2566// The effect of this is to adjust the stack pointer by "offset"
2567// and then branch to "handler".
2568SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2569 const {
2570 MachineFunction &MF = DAG.getMachineFunction();
2571 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
2572
2573 MipsFI->setCallsEhReturn();
2574 SDValue Chain = Op.getOperand(i: 0);
2575 SDValue Offset = Op.getOperand(i: 1);
2576 SDValue Handler = Op.getOperand(i: 2);
2577 SDLoc DL(Op);
2578 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2579
2580 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2581 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2582 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2583 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2584 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: OffsetReg, N: Offset, Glue: SDValue());
2585 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: AddrReg, N: Handler, Glue: Chain.getValue(R: 1));
2586 return DAG.getNode(Opcode: MipsISD::EH_RETURN, DL, VT: MVT::Other, N1: Chain,
2587 N2: DAG.getRegister(Reg: OffsetReg, VT: Ty),
2588 N3: DAG.getRegister(Reg: AddrReg, VT: getPointerTy(DL: MF.getDataLayout())),
2589 N4: Chain.getValue(R: 1));
2590}
2591
2592SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2593 SelectionDAG &DAG) const {
2594 // FIXME: Need pseudo-fence for 'singlethread' fences
2595 // FIXME: Set SType for weaker fences where supported/appropriate.
2596 unsigned SType = 0;
2597 SDLoc DL(Op);
2598 return DAG.getNode(Opcode: MipsISD::Sync, DL, VT: MVT::Other, N1: Op.getOperand(i: 0),
2599 N2: DAG.getConstant(Val: SType, DL, VT: MVT::i32));
2600}
2601
2602SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2603 SelectionDAG &DAG) const {
2604 SDLoc DL(Op);
2605 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2606
2607 SDValue Lo = Op.getOperand(i: 0), Hi = Op.getOperand(i: 1);
2608 SDValue Shamt = Op.getOperand(i: 2);
2609 // if shamt < (VT.bits):
2610 // lo = (shl lo, shamt)
2611 // hi = (or (shl hi, shamt) (srl (srl lo, 1), (xor shamt, (VT.bits-1))))
2612 // else:
2613 // lo = 0
2614 // hi = (shl lo, shamt[4:0])
2615 SDValue Not =
2616 DAG.getNode(Opcode: ISD::XOR, DL, VT: MVT::i32, N1: Shamt,
2617 N2: DAG.getConstant(Val: VT.getSizeInBits() - 1, DL, VT: MVT::i32));
2618 SDValue ShiftRight1Lo = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: Lo,
2619 N2: DAG.getConstant(Val: 1, DL, VT));
2620 SDValue ShiftRightLo = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: ShiftRight1Lo, N2: Not);
2621 SDValue ShiftLeftHi = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: Hi, N2: Shamt);
2622 SDValue Or = DAG.getNode(Opcode: ISD::OR, DL, VT, N1: ShiftLeftHi, N2: ShiftRightLo);
2623 SDValue ShiftLeftLo = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: Lo, N2: Shamt);
2624 SDValue Cond = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: Shamt,
2625 N2: DAG.getConstant(Val: VT.getSizeInBits(), DL, VT: MVT::i32));
2626 Lo = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: Cond,
2627 N2: DAG.getConstant(Val: 0, DL, VT), N3: ShiftLeftLo);
2628 Hi = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: Cond, N2: ShiftLeftLo, N3: Or);
2629
2630 SDValue Ops[2] = {Lo, Hi};
2631 return DAG.getMergeValues(Ops, dl: DL);
2632}
2633
2634SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2635 bool IsSRA) const {
2636 SDLoc DL(Op);
2637 SDValue Lo = Op.getOperand(i: 0), Hi = Op.getOperand(i: 1);
2638 SDValue Shamt = Op.getOperand(i: 2);
2639 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2640
2641 // if shamt < (VT.bits):
2642 // lo = (or (shl (shl hi, 1), (xor shamt, (VT.bits-1))) (srl lo, shamt))
2643 // if isSRA:
2644 // hi = (sra hi, shamt)
2645 // else:
2646 // hi = (srl hi, shamt)
2647 // else:
2648 // if isSRA:
2649 // lo = (sra hi, shamt[4:0])
2650 // hi = (sra hi, 31)
2651 // else:
2652 // lo = (srl hi, shamt[4:0])
2653 // hi = 0
2654 SDValue Not =
2655 DAG.getNode(Opcode: ISD::XOR, DL, VT: MVT::i32, N1: Shamt,
2656 N2: DAG.getConstant(Val: VT.getSizeInBits() - 1, DL, VT: MVT::i32));
2657 SDValue ShiftLeft1Hi = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: Hi,
2658 N2: DAG.getConstant(Val: 1, DL, VT));
2659 SDValue ShiftLeftHi = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: ShiftLeft1Hi, N2: Not);
2660 SDValue ShiftRightLo = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: Lo, N2: Shamt);
2661 SDValue Or = DAG.getNode(Opcode: ISD::OR, DL, VT, N1: ShiftLeftHi, N2: ShiftRightLo);
2662 SDValue ShiftRightHi = DAG.getNode(Opcode: IsSRA ? ISD::SRA : ISD::SRL,
2663 DL, VT, N1: Hi, N2: Shamt);
2664 SDValue Cond = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: Shamt,
2665 N2: DAG.getConstant(Val: VT.getSizeInBits(), DL, VT: MVT::i32));
2666 SDValue Ext = DAG.getNode(Opcode: ISD::SRA, DL, VT, N1: Hi,
2667 N2: DAG.getConstant(Val: VT.getSizeInBits() - 1, DL, VT));
2668
2669 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2670 SDVTList VTList = DAG.getVTList(VT1: VT, VT2: VT);
2671 return DAG.getNode(Opcode: Subtarget.isGP64bit() ? MipsISD::DOUBLE_SELECT_I64
2672 : MipsISD::DOUBLE_SELECT_I,
2673 DL, VTList, N1: Cond, N2: ShiftRightHi,
2674 N3: IsSRA ? Ext : DAG.getConstant(Val: 0, DL, VT), N4: Or,
2675 N5: ShiftRightHi);
2676 }
2677
2678 Lo = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: Cond, N2: ShiftRightHi, N3: Or);
2679 Hi = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: Cond,
2680 N2: IsSRA ? Ext : DAG.getConstant(Val: 0, DL, VT), N3: ShiftRightHi);
2681
2682 SDValue Ops[2] = {Lo, Hi};
2683 return DAG.getMergeValues(Ops, dl: DL);
2684}
2685
2686static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2687 SDValue Chain, SDValue Src, unsigned Offset) {
2688 SDValue Ptr = LD->getBasePtr();
2689 EVT VT = LD->getValueType(ResNo: 0), MemVT = LD->getMemoryVT();
2690 EVT BasePtrVT = Ptr.getValueType();
2691 SDLoc DL(LD);
2692 SDVTList VTList = DAG.getVTList(VT1: VT, VT2: MVT::Other);
2693
2694 if (Offset)
2695 Ptr = DAG.getNode(Opcode: ISD::ADD, DL, VT: BasePtrVT, N1: Ptr,
2696 N2: DAG.getConstant(Val: Offset, DL, VT: BasePtrVT));
2697
2698 SDValue Ops[] = { Chain, Ptr, Src };
2699 return DAG.getMemIntrinsicNode(Opcode: Opc, dl: DL, VTList, Ops, MemVT,
2700 MMO: LD->getMemOperand());
2701}
2702
2703// Expand an unaligned 32 or 64-bit integer load node.
2704SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2705 LoadSDNode *LD = cast<LoadSDNode>(Val&: Op);
2706 EVT MemVT = LD->getMemoryVT();
2707
2708 if (Subtarget.systemSupportsUnalignedAccess())
2709 return Op;
2710
2711 // Return if load is aligned or if MemVT is neither i32 nor i64.
2712 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2713 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2714 return SDValue();
2715
2716 bool IsLittle = Subtarget.isLittle();
2717 EVT VT = Op.getValueType();
2718 ISD::LoadExtType ExtType = LD->getExtensionType();
2719 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2720
2721 assert((VT == MVT::i32) || (VT == MVT::i64));
2722
2723 // Expand
2724 // (set dst, (i64 (load baseptr)))
2725 // to
2726 // (set tmp, (ldl (add baseptr, 7), undef))
2727 // (set dst, (ldr baseptr, tmp))
2728 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2729 SDValue LDL = createLoadLR(Opc: MipsISD::LDL, DAG, LD, Chain, Src: Undef,
2730 Offset: IsLittle ? 7 : 0);
2731 return createLoadLR(Opc: MipsISD::LDR, DAG, LD, Chain: LDL.getValue(R: 1), Src: LDL,
2732 Offset: IsLittle ? 0 : 7);
2733 }
2734
2735 SDValue LWL = createLoadLR(Opc: MipsISD::LWL, DAG, LD, Chain, Src: Undef,
2736 Offset: IsLittle ? 3 : 0);
2737 SDValue LWR = createLoadLR(Opc: MipsISD::LWR, DAG, LD, Chain: LWL.getValue(R: 1), Src: LWL,
2738 Offset: IsLittle ? 0 : 3);
2739
2740 // Expand
2741 // (set dst, (i32 (load baseptr))) or
2742 // (set dst, (i64 (sextload baseptr))) or
2743 // (set dst, (i64 (extload baseptr)))
2744 // to
2745 // (set tmp, (lwl (add baseptr, 3), undef))
2746 // (set dst, (lwr baseptr, tmp))
2747 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2748 (ExtType == ISD::EXTLOAD))
2749 return LWR;
2750
2751 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2752
2753 // Expand
2754 // (set dst, (i64 (zextload baseptr)))
2755 // to
2756 // (set tmp0, (lwl (add baseptr, 3), undef))
2757 // (set tmp1, (lwr baseptr, tmp0))
2758 // (set tmp2, (shl tmp1, 32))
2759 // (set dst, (srl tmp2, 32))
2760 SDLoc DL(LD);
2761 SDValue Const32 = DAG.getConstant(Val: 32, DL, VT: MVT::i32);
2762 SDValue SLL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i64, N1: LWR, N2: Const32);
2763 SDValue SRL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i64, N1: SLL, N2: Const32);
2764 SDValue Ops[] = { SRL, LWR.getValue(R: 1) };
2765 return DAG.getMergeValues(Ops, dl: DL);
2766}
2767
2768static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2769 SDValue Chain, unsigned Offset) {
2770 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2771 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2772 SDLoc DL(SD);
2773 SDVTList VTList = DAG.getVTList(VT: MVT::Other);
2774
2775 if (Offset)
2776 Ptr = DAG.getNode(Opcode: ISD::ADD, DL, VT: BasePtrVT, N1: Ptr,
2777 N2: DAG.getConstant(Val: Offset, DL, VT: BasePtrVT));
2778
2779 SDValue Ops[] = { Chain, Value, Ptr };
2780 return DAG.getMemIntrinsicNode(Opcode: Opc, dl: DL, VTList, Ops, MemVT,
2781 MMO: SD->getMemOperand());
2782}
2783
2784// Expand an unaligned 32 or 64-bit integer store node.
2785static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG,
2786 bool IsLittle) {
2787 SDValue Value = SD->getValue(), Chain = SD->getChain();
2788 EVT VT = Value.getValueType();
2789
2790 // Expand
2791 // (store val, baseptr) or
2792 // (truncstore val, baseptr)
2793 // to
2794 // (swl val, (add baseptr, 3))
2795 // (swr val, baseptr)
2796 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2797 SDValue SWL = createStoreLR(Opc: MipsISD::SWL, DAG, SD, Chain,
2798 Offset: IsLittle ? 3 : 0);
2799 return createStoreLR(Opc: MipsISD::SWR, DAG, SD, Chain: SWL, Offset: IsLittle ? 0 : 3);
2800 }
2801
2802 assert(VT == MVT::i64);
2803
2804 // Expand
2805 // (store val, baseptr)
2806 // to
2807 // (sdl val, (add baseptr, 7))
2808 // (sdr val, baseptr)
2809 SDValue SDL = createStoreLR(Opc: MipsISD::SDL, DAG, SD, Chain, Offset: IsLittle ? 7 : 0);
2810 return createStoreLR(Opc: MipsISD::SDR, DAG, SD, Chain: SDL, Offset: IsLittle ? 0 : 7);
2811}
2812
2813// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2814static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG,
2815 bool SingleFloat) {
2816 SDValue Val = SD->getValue();
2817
2818 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2819 (Val.getValueSizeInBits() > 32 && SingleFloat))
2820 return SDValue();
2821
2822 EVT FPTy = EVT::getFloatingPointVT(BitWidth: Val.getValueSizeInBits());
2823 SDValue Tr = DAG.getNode(Opcode: MipsISD::TruncIntFP, DL: SDLoc(Val), VT: FPTy,
2824 Operand: Val.getOperand(i: 0));
2825 return DAG.getStore(Chain: SD->getChain(), dl: SDLoc(SD), Val: Tr, Ptr: SD->getBasePtr(),
2826 PtrInfo: SD->getPointerInfo(), Alignment: SD->getAlign(),
2827 MMOFlags: SD->getMemOperand()->getFlags());
2828}
2829
2830SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
2831 StoreSDNode *SD = cast<StoreSDNode>(Val&: Op);
2832 EVT MemVT = SD->getMemoryVT();
2833
2834 // Lower unaligned integer stores.
2835 if (!Subtarget.systemSupportsUnalignedAccess() &&
2836 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2837 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2838 return lowerUnalignedIntStore(SD, DAG, IsLittle: Subtarget.isLittle());
2839
2840 return lowerFP_TO_SINT_STORE(SD, DAG, SingleFloat: Subtarget.isSingleFloat());
2841}
2842
2843SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2844 SelectionDAG &DAG) const {
2845
2846 // Return a fixed StackObject with offset 0 which points to the old stack
2847 // pointer.
2848 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2849 EVT ValTy = Op->getValueType(ResNo: 0);
2850 int FI = MFI.CreateFixedObject(Size: Op.getValueSizeInBits() / 8, SPOffset: 0, IsImmutable: false);
2851 return DAG.getFrameIndex(FI, VT: ValTy);
2852}
2853
2854SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2855 SelectionDAG &DAG) const {
2856 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2857 return SDValue();
2858
2859 EVT FPTy = EVT::getFloatingPointVT(BitWidth: Op.getValueSizeInBits());
2860 SDValue Trunc = DAG.getNode(Opcode: MipsISD::TruncIntFP, DL: SDLoc(Op), VT: FPTy,
2861 Operand: Op.getOperand(i: 0));
2862 return DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(Op), VT: Op.getValueType(), Operand: Trunc);
2863}
2864
2865//===----------------------------------------------------------------------===//
2866// Calling Convention Implementation
2867//===----------------------------------------------------------------------===//
2868
2869//===----------------------------------------------------------------------===//
2870// TODO: Implement a generic logic using tblgen that can support this.
2871// Mips O32 ABI rules:
2872// ---
2873// i32 - Passed in A0, A1, A2, A3 and stack
2874// f32 - Only passed in f32 registers if no int reg has been used yet to hold
2875// an argument. Otherwise, passed in A1, A2, A3 and stack.
2876// f64 - Only passed in two aliased f32 registers if no int reg has been used
2877// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2878// not used, it must be shadowed. If only A3 is available, shadow it and
2879// go to stack.
2880// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2881// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2882// with the remainder spilled to the stack.
2883// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2884// spilling the remainder to the stack.
2885//
2886// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2887//===----------------------------------------------------------------------===//
2888
2889static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2890 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2891 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2892 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2893 State.getMachineFunction().getSubtarget());
2894
2895 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2896
2897 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2898
2899 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2900
2901 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2902
2903 // Do not process byval args here.
2904 if (ArgFlags.isByVal())
2905 return true;
2906
2907 // Promote i8 and i16
2908 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2909 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2910 LocVT = MVT::i32;
2911 if (ArgFlags.isSExt())
2912 LocInfo = CCValAssign::SExtUpper;
2913 else if (ArgFlags.isZExt())
2914 LocInfo = CCValAssign::ZExtUpper;
2915 else
2916 LocInfo = CCValAssign::AExtUpper;
2917 }
2918 }
2919
2920 // Promote i8 and i16
2921 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2922 LocVT = MVT::i32;
2923 if (ArgFlags.isSExt())
2924 LocInfo = CCValAssign::SExt;
2925 else if (ArgFlags.isZExt())
2926 LocInfo = CCValAssign::ZExt;
2927 else
2928 LocInfo = CCValAssign::AExt;
2929 }
2930
2931 unsigned Reg;
2932
2933 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2934 // is true: function is vararg, argument is 3rd or higher, there is previous
2935 // argument which is not f32 or f64.
2936 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2937 State.getFirstUnallocated(Regs: F32Regs) != ValNo;
2938 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2939 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
2940 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
2941
2942 // The MIPS vector ABI for floats passes them in a pair of registers
2943 if (ValVT == MVT::i32 && isVectorFloat) {
2944 // This is the start of an vector that was scalarized into an unknown number
2945 // of components. It doesn't matter how many there are. Allocate one of the
2946 // notional 8 byte aligned registers which map onto the argument stack, and
2947 // shadow the register lost to alignment requirements.
2948 if (ArgFlags.isSplit()) {
2949 Reg = State.AllocateReg(Regs: FloatVectorIntRegs);
2950 if (Reg == Mips::A2)
2951 State.AllocateReg(Reg: Mips::A1);
2952 else if (Reg == 0)
2953 State.AllocateReg(Reg: Mips::A3);
2954 } else {
2955 // If we're an intermediate component of the split, we can just attempt to
2956 // allocate a register directly.
2957 Reg = State.AllocateReg(Regs: IntRegs);
2958 }
2959 } else if (ValVT == MVT::i32 ||
2960 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2961 Reg = State.AllocateReg(Regs: IntRegs);
2962 // If this is the first part of an i64 arg,
2963 // the allocated register must be either A0 or A2.
2964 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2965 Reg = State.AllocateReg(Regs: IntRegs);
2966 LocVT = MVT::i32;
2967 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2968 // Allocate int register and shadow next int register. If first
2969 // available register is Mips::A1 or Mips::A3, shadow it too.
2970 Reg = State.AllocateReg(Regs: IntRegs);
2971 if (Reg == Mips::A1 || Reg == Mips::A3)
2972 Reg = State.AllocateReg(Regs: IntRegs);
2973
2974 if (Reg) {
2975 LocVT = MVT::i32;
2976
2977 State.addLoc(
2978 V: CCValAssign::getCustomReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
2979 MCRegister HiReg = State.AllocateReg(Regs: IntRegs);
2980 assert(HiReg);
2981 State.addLoc(
2982 V: CCValAssign::getCustomReg(ValNo, ValVT, RegNo: HiReg, LocVT, HTP: LocInfo));
2983 return false;
2984 }
2985 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2986 // we are guaranteed to find an available float register
2987 if (ValVT == MVT::f32) {
2988 Reg = State.AllocateReg(Regs: F32Regs);
2989 // Shadow int register
2990 State.AllocateReg(Regs: IntRegs);
2991 } else {
2992 Reg = State.AllocateReg(Regs: F64Regs);
2993 // Shadow int registers
2994 unsigned Reg2 = State.AllocateReg(Regs: IntRegs);
2995 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2996 State.AllocateReg(Regs: IntRegs);
2997 State.AllocateReg(Regs: IntRegs);
2998 }
2999 } else
3000 llvm_unreachable("Cannot handle this ValVT.");
3001
3002 if (!Reg) {
3003 unsigned Offset = State.AllocateStack(Size: ValVT.getStoreSize(), Alignment: OrigAlign);
3004 State.addLoc(V: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, HTP: LocInfo));
3005 } else
3006 State.addLoc(V: CCValAssign::getReg(ValNo, ValVT, RegNo: Reg, LocVT, HTP: LocInfo));
3007
3008 return false;
3009}
3010
3011static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
3012 MVT LocVT, CCValAssign::LocInfo LocInfo,
3013 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3014 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3015
3016 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3017}
3018
3019static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
3020 MVT LocVT, CCValAssign::LocInfo LocInfo,
3021 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3022 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3023
3024 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3025}
3026
3027static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3028 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3029 CCState &State) LLVM_ATTRIBUTE_UNUSED;
3030
3031#include "MipsGenCallingConv.inc"
3032
3033 CCAssignFn *MipsTargetLowering::CCAssignFnForCall() const{
3034 return CC_Mips_FixedArg;
3035 }
3036
3037 CCAssignFn *MipsTargetLowering::CCAssignFnForReturn() const{
3038 return RetCC_Mips;
3039 }
3040//===----------------------------------------------------------------------===//
3041// Call Calling Convention Implementation
3042//===----------------------------------------------------------------------===//
3043
3044SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3045 SDValue Chain, SDValue Arg,
3046 const SDLoc &DL, bool IsTailCall,
3047 SelectionDAG &DAG) const {
3048 if (!IsTailCall) {
3049 SDValue PtrOff =
3050 DAG.getNode(Opcode: ISD::ADD, DL, VT: getPointerTy(DL: DAG.getDataLayout()), N1: StackPtr,
3051 N2: DAG.getIntPtrConstant(Val: Offset, DL));
3052 return DAG.getStore(Chain, dl: DL, Val: Arg, Ptr: PtrOff, PtrInfo: MachinePointerInfo());
3053 }
3054
3055 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
3056 int FI = MFI.CreateFixedObject(Size: Arg.getValueSizeInBits() / 8, SPOffset: Offset, IsImmutable: false);
3057 SDValue FIN = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout()));
3058 return DAG.getStore(Chain, dl: DL, Val: Arg, Ptr: FIN, PtrInfo: MachinePointerInfo(), Alignment: MaybeAlign(),
3059 MMOFlags: MachineMemOperand::MOVolatile);
3060}
3061
3062void MipsTargetLowering::
3063getOpndList(SmallVectorImpl<SDValue> &Ops,
3064 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3065 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3066 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3067 SDValue Chain) const {
3068 // Insert node "GP copy globalreg" before call to function.
3069 //
3070 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3071 // in PIC mode) allow symbols to be resolved via lazy binding.
3072 // The lazy binding stub requires GP to point to the GOT.
3073 // Note that we don't need GP to point to the GOT for indirect calls
3074 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3075 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3076 // used for the function (that is, Mips linker doesn't generate lazy binding
3077 // stub for a function whose address is taken in the program).
3078 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3079 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3080 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3081 RegsToPass.push_back(x: std::make_pair(x&: GPReg, y: getGlobalReg(DAG&: CLI.DAG, Ty)));
3082 }
3083
3084 // Build a sequence of copy-to-reg nodes chained together with token
3085 // chain and flag operands which copy the outgoing args into registers.
3086 // The InGlue in necessary since all emitted instructions must be
3087 // stuck together.
3088 SDValue InGlue;
3089
3090 for (auto &R : RegsToPass) {
3091 Chain = CLI.DAG.getCopyToReg(Chain, dl: CLI.DL, Reg: R.first, N: R.second, Glue: InGlue);
3092 InGlue = Chain.getValue(R: 1);
3093 }
3094
3095 // Add argument registers to the end of the list so that they are
3096 // known live into the call.
3097 for (auto &R : RegsToPass)
3098 Ops.push_back(Elt: CLI.DAG.getRegister(Reg: R.first, VT: R.second.getValueType()));
3099
3100 // Add a register mask operand representing the call-preserved registers.
3101 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3102 const uint32_t *Mask =
3103 TRI->getCallPreservedMask(MF: CLI.DAG.getMachineFunction(), CLI.CallConv);
3104 assert(Mask && "Missing call preserved mask for calling convention");
3105 if (Subtarget.inMips16HardFloat()) {
3106 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: CLI.Callee)) {
3107 StringRef Sym = G->getGlobal()->getName();
3108 Function *F = G->getGlobal()->getParent()->getFunction(Name: Sym);
3109 if (F && F->hasFnAttribute(Kind: "__Mips16RetHelper")) {
3110 Mask = MipsRegisterInfo::getMips16RetHelperMask();
3111 }
3112 }
3113 }
3114 Ops.push_back(Elt: CLI.DAG.getRegisterMask(RegMask: Mask));
3115
3116 if (InGlue.getNode())
3117 Ops.push_back(Elt: InGlue);
3118}
3119
3120void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
3121 SDNode *Node) const {
3122 switch (MI.getOpcode()) {
3123 default:
3124 return;
3125 case Mips::JALR:
3126 case Mips::JALRPseudo:
3127 case Mips::JALR64:
3128 case Mips::JALR64Pseudo:
3129 case Mips::JALR16_MM:
3130 case Mips::JALRC16_MMR6:
3131 case Mips::TAILCALLREG:
3132 case Mips::TAILCALLREG64:
3133 case Mips::TAILCALLR6REG:
3134 case Mips::TAILCALL64R6REG:
3135 case Mips::TAILCALLREG_MM:
3136 case Mips::TAILCALLREG_MMR6: {
3137 if (!EmitJalrReloc ||
3138 Subtarget.inMips16Mode() ||
3139 !isPositionIndependent() ||
3140 Node->getNumOperands() < 1 ||
3141 Node->getOperand(Num: 0).getNumOperands() < 2) {
3142 return;
3143 }
3144 // We are after the callee address, set by LowerCall().
3145 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3146 // symbol.
3147 const SDValue TargetAddr = Node->getOperand(Num: 0).getOperand(i: 1);
3148 StringRef Sym;
3149 if (const GlobalAddressSDNode *G =
3150 dyn_cast_or_null<const GlobalAddressSDNode>(Val: TargetAddr)) {
3151 // We must not emit the R_MIPS_JALR relocation against data symbols
3152 // since this will cause run-time crashes if the linker replaces the
3153 // call instruction with a relative branch to the data symbol.
3154 if (!isa<Function>(Val: G->getGlobal())) {
3155 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3156 << G->getGlobal()->getName() << "\n");
3157 return;
3158 }
3159 Sym = G->getGlobal()->getName();
3160 }
3161 else if (const ExternalSymbolSDNode *ES =
3162 dyn_cast_or_null<const ExternalSymbolSDNode>(Val: TargetAddr)) {
3163 Sym = ES->getSymbol();
3164 }
3165
3166 if (Sym.empty())
3167 return;
3168
3169 MachineFunction *MF = MI.getParent()->getParent();
3170 MCSymbol *S = MF->getContext().getOrCreateSymbol(Name: Sym);
3171 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3172 MI.addOperand(Op: MachineOperand::CreateMCSymbol(Sym: S, TargetFlags: MipsII::MO_JALR));
3173 }
3174 }
3175}
3176
3177/// LowerCall - functions arguments are copied from virtual regs to
3178/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3179SDValue
3180MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3181 SmallVectorImpl<SDValue> &InVals) const {
3182 SelectionDAG &DAG = CLI.DAG;
3183 SDLoc DL = CLI.DL;
3184 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3185 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3186 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3187 SDValue Chain = CLI.Chain;
3188 SDValue Callee = CLI.Callee;
3189 bool &IsTailCall = CLI.IsTailCall;
3190 CallingConv::ID CallConv = CLI.CallConv;
3191 bool IsVarArg = CLI.IsVarArg;
3192
3193 MachineFunction &MF = DAG.getMachineFunction();
3194 MachineFrameInfo &MFI = MF.getFrameInfo();
3195 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
3196 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3197 bool IsPIC = isPositionIndependent();
3198
3199 // Analyze operands of the call, assigning locations to each operand.
3200 SmallVector<CCValAssign, 16> ArgLocs;
3201 MipsCCState CCInfo(
3202 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3203 MipsCCState::getSpecialCallingConvForCallee(Callee: Callee.getNode(), Subtarget));
3204
3205 const ExternalSymbolSDNode *ES =
3206 dyn_cast_or_null<const ExternalSymbolSDNode>(Val: Callee.getNode());
3207
3208 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3209 // is during the lowering of a call with a byval argument which produces
3210 // a call to memcpy. For the O32 case, this causes the caller to allocate
3211 // stack space for the reserved argument area for the callee, then recursively
3212 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3213 // ABIs mandate that the callee allocates the reserved argument area. We do
3214 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3215 //
3216 // If the callee has a byval argument and memcpy is used, we are mandated
3217 // to already have produced a reserved argument area for the callee for O32.
3218 // Therefore, the reserved argument area can be reused for both calls.
3219 //
3220 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3221 // present, as we have yet to hook that node onto the chain.
3222 //
3223 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3224 // case. GCC does a similar trick, in that wherever possible, it calculates
3225 // the maximum out going argument area (including the reserved area), and
3226 // preallocates the stack space on entrance to the caller.
3227 //
3228 // FIXME: We should do the same for efficiency and space.
3229
3230 // Note: The check on the calling convention below must match
3231 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3232 bool MemcpyInByVal = ES && StringRef(ES->getSymbol()) == "memcpy" &&
3233 CallConv != CallingConv::Fast &&
3234 Chain.getOpcode() == ISD::CALLSEQ_START;
3235
3236 // Allocate the reserved argument area. It seems strange to do this from the
3237 // caller side but removing it breaks the frame size calculation.
3238 unsigned ReservedArgArea =
3239 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CC: CallConv);
3240 CCInfo.AllocateStack(Size: ReservedArgArea, Alignment: Align(1));
3241
3242 CCInfo.AnalyzeCallOperands(Outs, Fn: CC_Mips, FuncArgs&: CLI.getArgs(),
3243 Func: ES ? ES->getSymbol() : nullptr);
3244
3245 // Get a count of how many bytes are to be pushed on the stack.
3246 unsigned StackSize = CCInfo.getStackSize();
3247
3248 // Call site info for function parameters tracking.
3249 MachineFunction::CallSiteInfo CSInfo;
3250
3251 // Check if it's really possible to do a tail call. Restrict it to functions
3252 // that are part of this compilation unit.
3253 bool InternalLinkage = false;
3254 if (IsTailCall) {
3255 IsTailCall = isEligibleForTailCallOptimization(
3256 CCInfo, NextStackOffset: StackSize, FI: *MF.getInfo<MipsFunctionInfo>());
3257 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
3258 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3259 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3260 G->getGlobal()->hasPrivateLinkage() ||
3261 G->getGlobal()->hasHiddenVisibility() ||
3262 G->getGlobal()->hasProtectedVisibility());
3263 }
3264 }
3265 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3266 report_fatal_error(reason: "failed to perform tail call elimination on a call "
3267 "site marked musttail");
3268
3269 if (IsTailCall)
3270 ++NumTailCalls;
3271
3272 // Chain is the output chain of the last Load/Store or CopyToReg node.
3273 // ByValChain is the output chain of the last Memcpy node created for copying
3274 // byval arguments to the stack.
3275 unsigned StackAlignment = TFL->getStackAlignment();
3276 StackSize = alignTo(Value: StackSize, Align: StackAlignment);
3277
3278 if (!(IsTailCall || MemcpyInByVal))
3279 Chain = DAG.getCALLSEQ_START(Chain, InSize: StackSize, OutSize: 0, DL);
3280
3281 SDValue StackPtr =
3282 DAG.getCopyFromReg(Chain, dl: DL, Reg: ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3283 VT: getPointerTy(DL: DAG.getDataLayout()));
3284
3285 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3286 SmallVector<SDValue, 8> MemOpChains;
3287
3288 CCInfo.rewindByValRegsInfo();
3289
3290 // Walk the register/memloc assignments, inserting copies/loads.
3291 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3292 SDValue Arg = OutVals[OutIdx];
3293 CCValAssign &VA = ArgLocs[i];
3294 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3295 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3296 bool UseUpperBits = false;
3297
3298 // ByVal Arg.
3299 if (Flags.isByVal()) {
3300 unsigned FirstByValReg, LastByValReg;
3301 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3302 CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: ByValIdx, BeginReg&: FirstByValReg, EndReg&: LastByValReg);
3303
3304 assert(Flags.getByValSize() &&
3305 "ByVal args of size 0 should have been ignored by front-end.");
3306 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3307 assert(!IsTailCall &&
3308 "Do not tail-call optimize if there is a byval argument.");
3309 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3310 FirstReg: FirstByValReg, LastReg: LastByValReg, Flags, isLittle: Subtarget.isLittle(),
3311 VA);
3312 CCInfo.nextInRegsParam();
3313 continue;
3314 }
3315
3316 // Promote the value if needed.
3317 switch (VA.getLocInfo()) {
3318 default:
3319 llvm_unreachable("Unknown loc info!");
3320 case CCValAssign::Full:
3321 if (VA.isRegLoc()) {
3322 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3323 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3324 (ValVT == MVT::i64 && LocVT == MVT::f64))
3325 Arg = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: LocVT, Operand: Arg);
3326 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3327 SDValue Lo = DAG.getNode(Opcode: MipsISD::ExtractElementF64, DL, VT: MVT::i32,
3328 N1: Arg, N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32));
3329 SDValue Hi = DAG.getNode(Opcode: MipsISD::ExtractElementF64, DL, VT: MVT::i32,
3330 N1: Arg, N2: DAG.getConstant(Val: 1, DL, VT: MVT::i32));
3331 if (!Subtarget.isLittle())
3332 std::swap(a&: Lo, b&: Hi);
3333
3334 assert(VA.needsCustom());
3335
3336 Register LocRegLo = VA.getLocReg();
3337 Register LocRegHigh = ArgLocs[++i].getLocReg();
3338 RegsToPass.push_back(x: std::make_pair(x&: LocRegLo, y&: Lo));
3339 RegsToPass.push_back(x: std::make_pair(x&: LocRegHigh, y&: Hi));
3340 continue;
3341 }
3342 }
3343 break;
3344 case CCValAssign::BCvt:
3345 Arg = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: LocVT, Operand: Arg);
3346 break;
3347 case CCValAssign::SExtUpper:
3348 UseUpperBits = true;
3349 [[fallthrough]];
3350 case CCValAssign::SExt:
3351 Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: LocVT, Operand: Arg);
3352 break;
3353 case CCValAssign::ZExtUpper:
3354 UseUpperBits = true;
3355 [[fallthrough]];
3356 case CCValAssign::ZExt:
3357 Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: LocVT, Operand: Arg);
3358 break;
3359 case CCValAssign::AExtUpper:
3360 UseUpperBits = true;
3361 [[fallthrough]];
3362 case CCValAssign::AExt:
3363 Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: LocVT, Operand: Arg);
3364 break;
3365 }
3366
3367 if (UseUpperBits) {
3368 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3369 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3370 Arg = DAG.getNode(
3371 Opcode: ISD::SHL, DL, VT: VA.getLocVT(), N1: Arg,
3372 N2: DAG.getConstant(Val: LocSizeInBits - ValSizeInBits, DL, VT: VA.getLocVT()));
3373 }
3374
3375 // Arguments that can be passed on register must be kept at
3376 // RegsToPass vector
3377 if (VA.isRegLoc()) {
3378 RegsToPass.push_back(x: std::make_pair(x: VA.getLocReg(), y&: Arg));
3379
3380 // If the parameter is passed through reg $D, which splits into
3381 // two physical registers, avoid creating call site info.
3382 if (Mips::AFGR64RegClass.contains(Reg: VA.getLocReg()))
3383 continue;
3384
3385 // Collect CSInfo about which register passes which parameter.
3386 const TargetOptions &Options = DAG.getTarget().Options;
3387 if (Options.EmitCallSiteInfo)
3388 CSInfo.ArgRegPairs.emplace_back(Args: VA.getLocReg(), Args&: i);
3389
3390 continue;
3391 }
3392
3393 // Register can't get to this point...
3394 assert(VA.isMemLoc());
3395
3396 // emit ISD::STORE whichs stores the
3397 // parameter value to a stack Location
3398 MemOpChains.push_back(Elt: passArgOnStack(StackPtr, Offset: VA.getLocMemOffset(),
3399 Chain, Arg, DL, IsTailCall, DAG));
3400 }
3401
3402 // Transform all store nodes into one single node because all store
3403 // nodes are independent of each other.
3404 if (!MemOpChains.empty())
3405 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: MemOpChains);
3406
3407 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3408 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3409 // node so that legalize doesn't hack it.
3410
3411 EVT Ty = Callee.getValueType();
3412 bool GlobalOrExternal = false, IsCallReloc = false;
3413
3414 // The long-calls feature is ignored in case of PIC.
3415 // While we do not support -mshared / -mno-shared properly,
3416 // ignore long-calls in case of -mabicalls too.
3417 if (!Subtarget.isABICalls() && !IsPIC) {
3418 // If the function should be called using "long call",
3419 // get its address into a register to prevent using
3420 // of the `jal` instruction for the direct call.
3421 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
3422 if (Subtarget.useLongCalls())
3423 Callee = Subtarget.hasSym32()
3424 ? getAddrNonPIC(N, DL: SDLoc(N), Ty, DAG)
3425 : getAddrNonPICSym64(N, DL: SDLoc(N), Ty, DAG);
3426 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
3427 bool UseLongCalls = Subtarget.useLongCalls();
3428 // If the function has long-call/far/near attribute
3429 // it overrides command line switch pased to the backend.
3430 if (auto *F = dyn_cast<Function>(Val: N->getGlobal())) {
3431 if (F->hasFnAttribute(Kind: "long-call"))
3432 UseLongCalls = true;
3433 else if (F->hasFnAttribute(Kind: "short-call"))
3434 UseLongCalls = false;
3435 }
3436 if (UseLongCalls)
3437 Callee = Subtarget.hasSym32()
3438 ? getAddrNonPIC(N, DL: SDLoc(N), Ty, DAG)
3439 : getAddrNonPICSym64(N, DL: SDLoc(N), Ty, DAG);
3440 }
3441 }
3442
3443 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
3444 if (IsPIC) {
3445 const GlobalValue *Val = G->getGlobal();
3446 InternalLinkage = Val->hasInternalLinkage();
3447
3448 if (InternalLinkage)
3449 Callee = getAddrLocal(N: G, DL, Ty, DAG, IsN32OrN64: ABI.IsN32() || ABI.IsN64());
3450 else if (Subtarget.useXGOT()) {
3451 Callee = getAddrGlobalLargeGOT(N: G, DL, Ty, DAG, HiFlag: MipsII::MO_CALL_HI16,
3452 LoFlag: MipsII::MO_CALL_LO16, Chain,
3453 PtrInfo: FuncInfo->callPtrInfo(MF, GV: Val));
3454 IsCallReloc = true;
3455 } else {
3456 Callee = getAddrGlobal(N: G, DL, Ty, DAG, Flag: MipsII::MO_GOT_CALL, Chain,
3457 PtrInfo: FuncInfo->callPtrInfo(MF, GV: Val));
3458 IsCallReloc = true;
3459 }
3460 } else
3461 Callee = DAG.getTargetGlobalAddress(GV: G->getGlobal(), DL,
3462 VT: getPointerTy(DL: DAG.getDataLayout()), offset: 0,
3463 TargetFlags: MipsII::MO_NO_FLAG);
3464 GlobalOrExternal = true;
3465 }
3466 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
3467 const char *Sym = S->getSymbol();
3468
3469 if (!IsPIC) // static
3470 Callee = DAG.getTargetExternalSymbol(
3471 Sym, VT: getPointerTy(DL: DAG.getDataLayout()), TargetFlags: MipsII::MO_NO_FLAG);
3472 else if (Subtarget.useXGOT()) {
3473 Callee = getAddrGlobalLargeGOT(N: S, DL, Ty, DAG, HiFlag: MipsII::MO_CALL_HI16,
3474 LoFlag: MipsII::MO_CALL_LO16, Chain,
3475 PtrInfo: FuncInfo->callPtrInfo(MF, ES: Sym));
3476 IsCallReloc = true;
3477 } else { // PIC
3478 Callee = getAddrGlobal(N: S, DL, Ty, DAG, Flag: MipsII::MO_GOT_CALL, Chain,
3479 PtrInfo: FuncInfo->callPtrInfo(MF, ES: Sym));
3480 IsCallReloc = true;
3481 }
3482
3483 GlobalOrExternal = true;
3484 }
3485
3486 SmallVector<SDValue, 8> Ops(1, Chain);
3487 SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue);
3488
3489 getOpndList(Ops, RegsToPass, IsPICCall: IsPIC, GlobalOrExternal, InternalLinkage,
3490 IsCallReloc, CLI, Callee, Chain);
3491
3492 if (IsTailCall) {
3493 MF.getFrameInfo().setHasTailCall();
3494 SDValue Ret = DAG.getNode(Opcode: MipsISD::TailCall, DL, VT: MVT::Other, Ops);
3495 DAG.addCallSiteInfo(Node: Ret.getNode(), CallInfo: std::move(CSInfo));
3496 return Ret;
3497 }
3498
3499 Chain = DAG.getNode(Opcode: MipsISD::JmpLink, DL, VTList: NodeTys, Ops);
3500 SDValue InGlue = Chain.getValue(R: 1);
3501
3502 DAG.addCallSiteInfo(Node: Chain.getNode(), CallInfo: std::move(CSInfo));
3503
3504 // Create the CALLSEQ_END node in the case of where it is not a call to
3505 // memcpy.
3506 if (!(MemcpyInByVal)) {
3507 Chain = DAG.getCALLSEQ_END(Chain, Size1: StackSize, Size2: 0, Glue: InGlue, DL);
3508 InGlue = Chain.getValue(R: 1);
3509 }
3510
3511 // Handle result values, copying them out of physregs into vregs that we
3512 // return.
3513 return LowerCallResult(Chain, InGlue, CallConv, isVarArg: IsVarArg, Ins, dl: DL, DAG,
3514 InVals, CLI);
3515}
3516
3517/// LowerCallResult - Lower the result values of a call into the
3518/// appropriate copies out of appropriate physical registers.
3519SDValue MipsTargetLowering::LowerCallResult(
3520 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
3521 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3522 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3523 TargetLowering::CallLoweringInfo &CLI) const {
3524 // Assign locations to each value returned by this call.
3525 SmallVector<CCValAssign, 16> RVLocs;
3526 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3527 *DAG.getContext());
3528
3529 const ExternalSymbolSDNode *ES =
3530 dyn_cast_or_null<const ExternalSymbolSDNode>(Val: CLI.Callee.getNode());
3531 CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_Mips, RetTy: CLI.RetTy,
3532 Func: ES ? ES->getSymbol() : nullptr);
3533
3534 // Copy all of the result registers out of their specified physreg.
3535 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3536 CCValAssign &VA = RVLocs[i];
3537 assert(VA.isRegLoc() && "Can only return in registers!");
3538
3539 SDValue Val = DAG.getCopyFromReg(Chain, dl: DL, Reg: RVLocs[i].getLocReg(),
3540 VT: RVLocs[i].getLocVT(), Glue: InGlue);
3541 Chain = Val.getValue(R: 1);
3542 InGlue = Val.getValue(R: 2);
3543
3544 if (VA.isUpperBitsInLoc()) {
3545 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3546 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3547 unsigned Shift =
3548 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3549 Val = DAG.getNode(
3550 Opcode: Shift, DL, VT: VA.getLocVT(), N1: Val,
3551 N2: DAG.getConstant(Val: LocSizeInBits - ValSizeInBits, DL, VT: VA.getLocVT()));
3552 }
3553
3554 switch (VA.getLocInfo()) {
3555 default:
3556 llvm_unreachable("Unknown loc info!");
3557 case CCValAssign::Full:
3558 break;
3559 case CCValAssign::BCvt:
3560 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: VA.getValVT(), Operand: Val);
3561 break;
3562 case CCValAssign::AExt:
3563 case CCValAssign::AExtUpper:
3564 Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VA.getValVT(), Operand: Val);
3565 break;
3566 case CCValAssign::ZExt:
3567 case CCValAssign::ZExtUpper:
3568 Val = DAG.getNode(Opcode: ISD::AssertZext, DL, VT: VA.getLocVT(), N1: Val,
3569 N2: DAG.getValueType(VA.getValVT()));
3570 Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VA.getValVT(), Operand: Val);
3571 break;
3572 case CCValAssign::SExt:
3573 case CCValAssign::SExtUpper:
3574 Val = DAG.getNode(Opcode: ISD::AssertSext, DL, VT: VA.getLocVT(), N1: Val,
3575 N2: DAG.getValueType(VA.getValVT()));
3576 Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VA.getValVT(), Operand: Val);
3577 break;
3578 }
3579
3580 InVals.push_back(Elt: Val);
3581 }
3582
3583 return Chain;
3584}
3585
3586static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
3587 EVT ArgVT, const SDLoc &DL,
3588 SelectionDAG &DAG) {
3589 MVT LocVT = VA.getLocVT();
3590 EVT ValVT = VA.getValVT();
3591
3592 // Shift into the upper bits if necessary.
3593 switch (VA.getLocInfo()) {
3594 default:
3595 break;
3596 case CCValAssign::AExtUpper:
3597 case CCValAssign::SExtUpper:
3598 case CCValAssign::ZExtUpper: {
3599 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3600 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3601 unsigned Opcode =
3602 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3603 Val = DAG.getNode(
3604 Opcode, DL, VT: VA.getLocVT(), N1: Val,
3605 N2: DAG.getConstant(Val: LocSizeInBits - ValSizeInBits, DL, VT: VA.getLocVT()));
3606 break;
3607 }
3608 }
3609
3610 // If this is an value smaller than the argument slot size (32-bit for O32,
3611 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3612 // size. Extract the value and insert any appropriate assertions regarding
3613 // sign/zero extension.
3614 switch (VA.getLocInfo()) {
3615 default:
3616 llvm_unreachable("Unknown loc info!");
3617 case CCValAssign::Full:
3618 break;
3619 case CCValAssign::AExtUpper:
3620 case CCValAssign::AExt:
3621 Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ValVT, Operand: Val);
3622 break;
3623 case CCValAssign::SExtUpper:
3624 case CCValAssign::SExt:
3625 Val = DAG.getNode(Opcode: ISD::AssertSext, DL, VT: LocVT, N1: Val, N2: DAG.getValueType(ValVT));
3626 Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ValVT, Operand: Val);
3627 break;
3628 case CCValAssign::ZExtUpper:
3629 case CCValAssign::ZExt:
3630 Val = DAG.getNode(Opcode: ISD::AssertZext, DL, VT: LocVT, N1: Val, N2: DAG.getValueType(ValVT));
3631 Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ValVT, Operand: Val);
3632 break;
3633 case CCValAssign::BCvt:
3634 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValVT, Operand: Val);
3635 break;
3636 }
3637
3638 return Val;
3639}
3640
3641//===----------------------------------------------------------------------===//
3642// Formal Arguments Calling Convention Implementation
3643//===----------------------------------------------------------------------===//
3644/// LowerFormalArguments - transform physical registers into virtual registers
3645/// and generate load operations for arguments places on the stack.
3646SDValue MipsTargetLowering::LowerFormalArguments(
3647 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3648 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3649 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3650 MachineFunction &MF = DAG.getMachineFunction();
3651 MachineFrameInfo &MFI = MF.getFrameInfo();
3652 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3653
3654 MipsFI->setVarArgsFrameIndex(0);
3655
3656 // Used with vargs to acumulate store chains.
3657 std::vector<SDValue> OutChains;
3658
3659 // Assign locations to all of the incoming arguments.
3660 SmallVector<CCValAssign, 16> ArgLocs;
3661 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3662 *DAG.getContext());
3663 CCInfo.AllocateStack(Size: ABI.GetCalleeAllocdArgSizeInBytes(CC: CallConv), Alignment: Align(1));
3664 const Function &Func = DAG.getMachineFunction().getFunction();
3665 Function::const_arg_iterator FuncArg = Func.arg_begin();
3666
3667 if (Func.hasFnAttribute(Kind: "interrupt") && !Func.arg_empty())
3668 report_fatal_error(
3669 reason: "Functions with the interrupt attribute cannot have arguments!");
3670
3671 CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_Mips_FixedArg);
3672 MipsFI->setFormalArgInfo(Size: CCInfo.getStackSize(),
3673 HasByval: CCInfo.getInRegsParamsCount() > 0);
3674
3675 unsigned CurArgIdx = 0;
3676 CCInfo.rewindByValRegsInfo();
3677
3678 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3679 CCValAssign &VA = ArgLocs[i];
3680 if (Ins[InsIdx].isOrigArg()) {
3681 std::advance(i&: FuncArg, n: Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3682 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3683 }
3684 EVT ValVT = VA.getValVT();
3685 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3686 bool IsRegLoc = VA.isRegLoc();
3687
3688 if (Flags.isByVal()) {
3689 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3690 unsigned FirstByValReg, LastByValReg;
3691 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3692 CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: ByValIdx, BeginReg&: FirstByValReg, EndReg&: LastByValReg);
3693
3694 assert(Flags.getByValSize() &&
3695 "ByVal args of size 0 should have been ignored by front-end.");
3696 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3697 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, FuncArg: &*FuncArg,
3698 FirstReg: FirstByValReg, LastReg: LastByValReg, VA, State&: CCInfo);
3699 CCInfo.nextInRegsParam();
3700 continue;
3701 }
3702
3703 // Arguments stored on registers
3704 if (IsRegLoc) {
3705 MVT RegVT = VA.getLocVT();
3706 Register ArgReg = VA.getLocReg();
3707 const TargetRegisterClass *RC = getRegClassFor(VT: RegVT);
3708
3709 // Transform the arguments stored on
3710 // physical registers into virtual ones
3711 unsigned Reg = addLiveIn(MF&: DAG.getMachineFunction(), PReg: ArgReg, RC);
3712 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl: DL, Reg, VT: RegVT);
3713
3714 ArgValue =
3715 UnpackFromArgumentSlot(Val: ArgValue, VA, ArgVT: Ins[InsIdx].ArgVT, DL, DAG);
3716
3717 // Handle floating point arguments passed in integer registers and
3718 // long double arguments passed in floating point registers.
3719 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3720 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3721 (RegVT == MVT::f64 && ValVT == MVT::i64))
3722 ArgValue = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValVT, Operand: ArgValue);
3723 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3724 ValVT == MVT::f64) {
3725 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3726 CCValAssign &NextVA = ArgLocs[++i];
3727 unsigned Reg2 =
3728 addLiveIn(MF&: DAG.getMachineFunction(), PReg: NextVA.getLocReg(), RC);
3729 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl: DL, Reg: Reg2, VT: RegVT);
3730 if (!Subtarget.isLittle())
3731 std::swap(a&: ArgValue, b&: ArgValue2);
3732 ArgValue = DAG.getNode(Opcode: MipsISD::BuildPairF64, DL, VT: MVT::f64,
3733 N1: ArgValue, N2: ArgValue2);
3734 }
3735
3736 InVals.push_back(Elt: ArgValue);
3737 } else { // VA.isRegLoc()
3738 MVT LocVT = VA.getLocVT();
3739
3740 assert(!VA.needsCustom() && "unexpected custom memory argument");
3741
3742 // Only arguments pased on the stack should make it here.
3743 assert(VA.isMemLoc());
3744
3745 // The stack pointer offset is relative to the caller stack frame.
3746 int FI = MFI.CreateFixedObject(Size: LocVT.getSizeInBits() / 8,
3747 SPOffset: VA.getLocMemOffset(), IsImmutable: true);
3748
3749 // Create load nodes to retrieve arguments from the stack
3750 SDValue FIN = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout()));
3751 SDValue ArgValue = DAG.getLoad(
3752 VT: LocVT, dl: DL, Chain, Ptr: FIN,
3753 PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI));
3754 OutChains.push_back(x: ArgValue.getValue(R: 1));
3755
3756 ArgValue =
3757 UnpackFromArgumentSlot(Val: ArgValue, VA, ArgVT: Ins[InsIdx].ArgVT, DL, DAG);
3758
3759 InVals.push_back(Elt: ArgValue);
3760 }
3761 }
3762
3763 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3764
3765 if (ArgLocs[i].needsCustom()) {
3766 ++i;
3767 continue;
3768 }
3769
3770 // The mips ABIs for returning structs by value requires that we copy
3771 // the sret argument into $v0 for the return. Save the argument into
3772 // a virtual register so that we can access it from the return points.
3773 if (Ins[InsIdx].Flags.isSRet()) {
3774 unsigned Reg = MipsFI->getSRetReturnReg();
3775 if (!Reg) {
3776 Reg = MF.getRegInfo().createVirtualRegister(
3777 RegClass: getRegClassFor(VT: ABI.IsN64() ? MVT::i64 : MVT::i32));
3778 MipsFI->setSRetReturnReg(Reg);
3779 }
3780 SDValue Copy = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl: DL, Reg, N: InVals[i]);
3781 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, N1: Copy, N2: Chain);
3782 break;
3783 }
3784 }
3785
3786 if (IsVarArg)
3787 writeVarArgRegs(OutChains, Chain, DL, DAG, State&: CCInfo);
3788
3789 // All stores are grouped in one node to allow the matching between
3790 // the size of Ins and InVals. This only happens when on varg functions
3791 if (!OutChains.empty()) {
3792 OutChains.push_back(x: Chain);
3793 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: OutChains);
3794 }
3795
3796 return Chain;
3797}
3798
3799//===----------------------------------------------------------------------===//
3800// Return Value Calling Convention Implementation
3801//===----------------------------------------------------------------------===//
3802
3803bool
3804MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3805 MachineFunction &MF, bool IsVarArg,
3806 const SmallVectorImpl<ISD::OutputArg> &Outs,
3807 LLVMContext &Context) const {
3808 SmallVector<CCValAssign, 16> RVLocs;
3809 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3810 return CCInfo.CheckReturn(ArgsFlags: Outs, Fn: RetCC_Mips);
3811}
3812
3813bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
3814 bool IsSigned) const {
3815 if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
3816 return true;
3817
3818 return IsSigned;
3819}
3820
3821SDValue
3822MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3823 const SDLoc &DL,
3824 SelectionDAG &DAG) const {
3825 MachineFunction &MF = DAG.getMachineFunction();
3826 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3827
3828 MipsFI->setISR();
3829
3830 return DAG.getNode(Opcode: MipsISD::ERet, DL, VT: MVT::Other, Ops: RetOps);
3831}
3832
3833SDValue
3834MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3835 bool IsVarArg,
3836 const SmallVectorImpl<ISD::OutputArg> &Outs,
3837 const SmallVectorImpl<SDValue> &OutVals,
3838 const SDLoc &DL, SelectionDAG &DAG) const {
3839 // CCValAssign - represent the assignment of
3840 // the return value to a location
3841 SmallVector<CCValAssign, 16> RVLocs;
3842 MachineFunction &MF = DAG.getMachineFunction();
3843
3844 // CCState - Info about the registers and stack slot.
3845 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3846
3847 // Analyze return values.
3848 CCInfo.AnalyzeReturn(Outs, Fn: RetCC_Mips);
3849
3850 SDValue Glue;
3851 SmallVector<SDValue, 4> RetOps(1, Chain);
3852
3853 // Copy the result values into the output registers.
3854 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3855 SDValue Val = OutVals[i];
3856 CCValAssign &VA = RVLocs[i];
3857 assert(VA.isRegLoc() && "Can only return in registers!");
3858 bool UseUpperBits = false;
3859
3860 switch (VA.getLocInfo()) {
3861 default:
3862 llvm_unreachable("Unknown loc info!");
3863 case CCValAssign::Full:
3864 break;
3865 case CCValAssign::BCvt:
3866 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: VA.getLocVT(), Operand: Val);
3867 break;
3868 case CCValAssign::AExtUpper:
3869 UseUpperBits = true;
3870 [[fallthrough]];
3871 case CCValAssign::AExt:
3872 Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: VA.getLocVT(), Operand: Val);
3873 break;
3874 case CCValAssign::ZExtUpper:
3875 UseUpperBits = true;
3876 [[fallthrough]];
3877 case CCValAssign::ZExt:
3878 Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: VA.getLocVT(), Operand: Val);
3879 break;
3880 case CCValAssign::SExtUpper:
3881 UseUpperBits = true;
3882 [[fallthrough]];
3883 case CCValAssign::SExt:
3884 Val = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: VA.getLocVT(), Operand: Val);
3885 break;
3886 }
3887
3888 if (UseUpperBits) {
3889 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3890 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3891 Val = DAG.getNode(
3892 Opcode: ISD::SHL, DL, VT: VA.getLocVT(), N1: Val,
3893 N2: DAG.getConstant(Val: LocSizeInBits - ValSizeInBits, DL, VT: VA.getLocVT()));
3894 }
3895
3896 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: VA.getLocReg(), N: Val, Glue);
3897
3898 // Guarantee that all emitted copies are stuck together with flags.
3899 Glue = Chain.getValue(R: 1);
3900 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
3901 }
3902
3903 // The mips ABIs for returning structs by value requires that we copy
3904 // the sret argument into $v0 for the return. We saved the argument into
3905 // a virtual register in the entry block, so now we copy the value out
3906 // and into $v0.
3907 if (MF.getFunction().hasStructRetAttr()) {
3908 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3909 unsigned Reg = MipsFI->getSRetReturnReg();
3910
3911 if (!Reg)
3912 llvm_unreachable("sret virtual register not created in the entry block");
3913 SDValue Val =
3914 DAG.getCopyFromReg(Chain, dl: DL, Reg, VT: getPointerTy(DL: DAG.getDataLayout()));
3915 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3916
3917 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: V0, N: Val, Glue);
3918 Glue = Chain.getValue(R: 1);
3919 RetOps.push_back(Elt: DAG.getRegister(Reg: V0, VT: getPointerTy(DL: DAG.getDataLayout())));
3920 }
3921
3922 RetOps[0] = Chain; // Update chain.
3923
3924 // Add the glue if we have it.
3925 if (Glue.getNode())
3926 RetOps.push_back(Elt: Glue);
3927
3928 // ISRs must use "eret".
3929 if (DAG.getMachineFunction().getFunction().hasFnAttribute(Kind: "interrupt"))
3930 return LowerInterruptReturn(RetOps, DL, DAG);
3931
3932 // Standard return on Mips is a "jr $ra"
3933 return DAG.getNode(Opcode: MipsISD::Ret, DL, VT: MVT::Other, Ops: RetOps);
3934}
3935
3936//===----------------------------------------------------------------------===//
3937// Mips Inline Assembly Support
3938//===----------------------------------------------------------------------===//
3939
3940/// getConstraintType - Given a constraint letter, return the type of
3941/// constraint it is for this target.
3942MipsTargetLowering::ConstraintType
3943MipsTargetLowering::getConstraintType(StringRef Constraint) const {
3944 // Mips specific constraints
3945 // GCC config/mips/constraints.md
3946 //
3947 // 'd' : An address register. Equivalent to r
3948 // unless generating MIPS16 code.
3949 // 'y' : Equivalent to r; retained for
3950 // backwards compatibility.
3951 // 'c' : A register suitable for use in an indirect
3952 // jump. This will always be $25 for -mabicalls.
3953 // 'l' : The lo register. 1 word storage.
3954 // 'x' : The hilo register pair. Double word storage.
3955 if (Constraint.size() == 1) {
3956 switch (Constraint[0]) {
3957 default : break;
3958 case 'd':
3959 case 'y':
3960 case 'f':
3961 case 'c':
3962 case 'l':
3963 case 'x':
3964 return C_RegisterClass;
3965 case 'R':
3966 return C_Memory;
3967 }
3968 }
3969
3970 if (Constraint == "ZC")
3971 return C_Memory;
3972
3973 return TargetLowering::getConstraintType(Constraint);
3974}
3975
3976/// Examine constraint type and operand type and determine a weight value.
3977/// This object must already have been set up with the operand type
3978/// and the current alternative constraint selected.
3979TargetLowering::ConstraintWeight
3980MipsTargetLowering::getSingleConstraintMatchWeight(
3981 AsmOperandInfo &info, const char *constraint) const {
3982 ConstraintWeight weight = CW_Invalid;
3983 Value *CallOperandVal = info.CallOperandVal;
3984 // If we don't have a value, we can't do a match,
3985 // but allow it at the lowest weight.
3986 if (!CallOperandVal)
3987 return CW_Default;
3988 Type *type = CallOperandVal->getType();
3989 // Look at the constraint type.
3990 switch (*constraint) {
3991 default:
3992 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3993 break;
3994 case 'd':
3995 case 'y':
3996 if (type->isIntegerTy())
3997 weight = CW_Register;
3998 break;
3999 case 'f': // FPU or MSA register
4000 if (Subtarget.hasMSA() && type->isVectorTy() &&
4001 type->getPrimitiveSizeInBits().getFixedValue() == 128)
4002 weight = CW_Register;
4003 else if (type->isFloatTy())
4004 weight = CW_Register;
4005 break;
4006 case 'c': // $25 for indirect jumps
4007 case 'l': // lo register
4008 case 'x': // hilo register pair
4009 if (type->isIntegerTy())
4010 weight = CW_SpecificReg;
4011 break;
4012 case 'I': // signed 16 bit immediate
4013 case 'J': // integer zero
4014 case 'K': // unsigned 16 bit immediate
4015 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4016 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4017 case 'O': // signed 15 bit immediate (+- 16383)
4018 case 'P': // immediate in the range of 65535 to 1 (inclusive)
4019 if (isa<ConstantInt>(Val: CallOperandVal))
4020 weight = CW_Constant;
4021 break;
4022 case 'R':
4023 weight = CW_Memory;
4024 break;
4025 }
4026 return weight;
4027}
4028
4029/// This is a helper function to parse a physical register string and split it
4030/// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4031/// that is returned indicates whether parsing was successful. The second flag
4032/// is true if the numeric part exists.
4033static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4034 unsigned long long &Reg) {
4035 if (C.front() != '{' || C.back() != '}')
4036 return std::make_pair(x: false, y: false);
4037
4038 // Search for the first numeric character.
4039 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
4040 I = std::find_if(first: B, last: E, pred: isdigit);
4041
4042 Prefix = StringRef(B, I - B);
4043
4044 // The second flag is set to false if no numeric characters were found.
4045 if (I == E)
4046 return std::make_pair(x: true, y: false);
4047
4048 // Parse the numeric characters.
4049 return std::make_pair(x: !getAsUnsignedInteger(Str: StringRef(I, E - I), Radix: 10, Result&: Reg),
4050 y: true);
4051}
4052
4053EVT MipsTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
4054 ISD::NodeType) const {
4055 bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
4056 EVT MinVT = getRegisterType(VT: Cond ? MVT::i64 : MVT::i32);
4057 return VT.bitsLT(VT: MinVT) ? MinVT : VT;
4058}
4059
4060std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4061parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4062 const TargetRegisterInfo *TRI =
4063 Subtarget.getRegisterInfo();
4064 const TargetRegisterClass *RC;
4065 StringRef Prefix;
4066 unsigned long long Reg;
4067
4068 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
4069
4070 if (!R.first)
4071 return std::make_pair(x: 0U, y: nullptr);
4072
4073 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
4074 // No numeric characters follow "hi" or "lo".
4075 if (R.second)
4076 return std::make_pair(x: 0U, y: nullptr);
4077
4078 RC = TRI->getRegClass(i: Prefix == "hi" ?
4079 Mips::HI32RegClassID : Mips::LO32RegClassID);
4080 return std::make_pair(x: *(RC->begin()), y&: RC);
4081 } else if (Prefix.starts_with(Prefix: "$msa")) {
4082 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
4083
4084 // No numeric characters follow the name.
4085 if (R.second)
4086 return std::make_pair(x: 0U, y: nullptr);
4087
4088 Reg = StringSwitch<unsigned long long>(Prefix)
4089 .Case(S: "$msair", Value: Mips::MSAIR)
4090 .Case(S: "$msacsr", Value: Mips::MSACSR)
4091 .Case(S: "$msaaccess", Value: Mips::MSAAccess)
4092 .Case(S: "$msasave", Value: Mips::MSASave)
4093 .Case(S: "$msamodify", Value: Mips::MSAModify)
4094 .Case(S: "$msarequest", Value: Mips::MSARequest)
4095 .Case(S: "$msamap", Value: Mips::MSAMap)
4096 .Case(S: "$msaunmap", Value: Mips::MSAUnmap)
4097 .Default(Value: 0);
4098
4099 if (!Reg)
4100 return std::make_pair(x: 0U, y: nullptr);
4101
4102 RC = TRI->getRegClass(i: Mips::MSACtrlRegClassID);
4103 return std::make_pair(x&: Reg, y&: RC);
4104 }
4105
4106 if (!R.second)
4107 return std::make_pair(x: 0U, y: nullptr);
4108
4109 if (Prefix == "$f") { // Parse $f0-$f31.
4110 // If the size of FP registers is 64-bit or Reg is an even number, select
4111 // the 64-bit register class. Otherwise, select the 32-bit register class.
4112 if (VT == MVT::Other)
4113 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4114
4115 RC = getRegClassFor(VT);
4116
4117 if (RC == &Mips::AFGR64RegClass) {
4118 assert(Reg % 2 == 0);
4119 Reg >>= 1;
4120 }
4121 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
4122 RC = TRI->getRegClass(i: Mips::FCCRegClassID);
4123 else if (Prefix == "$w") { // Parse $w0-$w31.
4124 RC = getRegClassFor(VT: (VT == MVT::Other) ? MVT::v16i8 : VT);
4125 } else { // Parse $0-$31.
4126 assert(Prefix == "$");
4127 RC = getRegClassFor(VT: (VT == MVT::Other) ? MVT::i32 : VT);
4128 }
4129
4130 assert(Reg < RC->getNumRegs());
4131 return std::make_pair(x: *(RC->begin() + Reg), y&: RC);
4132}
4133
4134/// Given a register class constraint, like 'r', if this corresponds directly
4135/// to an LLVM register class, return a register of 0 and the register class
4136/// pointer.
4137std::pair<unsigned, const TargetRegisterClass *>
4138MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4139 StringRef Constraint,
4140 MVT VT) const {
4141 if (Constraint.size() == 1) {
4142 switch (Constraint[0]) {
4143 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
4144 case 'y': // Same as 'r'. Exists for compatibility.
4145 case 'r':
4146 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4147 VT == MVT::i1) ||
4148 (VT == MVT::f32 && Subtarget.useSoftFloat())) {
4149 if (Subtarget.inMips16Mode())
4150 return std::make_pair(x: 0U, y: &Mips::CPU16RegsRegClass);
4151 return std::make_pair(x: 0U, y: &Mips::GPR32RegClass);
4152 }
4153 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4154 !Subtarget.isGP64bit())
4155 return std::make_pair(x: 0U, y: &Mips::GPR32RegClass);
4156 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4157 Subtarget.isGP64bit())
4158 return std::make_pair(x: 0U, y: &Mips::GPR64RegClass);
4159 // This will generate an error message
4160 return std::make_pair(x: 0U, y: nullptr);
4161 case 'f': // FPU or MSA register
4162 if (VT == MVT::v16i8)
4163 return std::make_pair(x: 0U, y: &Mips::MSA128BRegClass);
4164 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4165 return std::make_pair(x: 0U, y: &Mips::MSA128HRegClass);
4166 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4167 return std::make_pair(x: 0U, y: &Mips::MSA128WRegClass);
4168 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4169 return std::make_pair(x: 0U, y: &Mips::MSA128DRegClass);
4170 else if (VT == MVT::f32)
4171 return std::make_pair(x: 0U, y: &Mips::FGR32RegClass);
4172 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
4173 if (Subtarget.isFP64bit())
4174 return std::make_pair(x: 0U, y: &Mips::FGR64RegClass);
4175 return std::make_pair(x: 0U, y: &Mips::AFGR64RegClass);
4176 }
4177 break;
4178 case 'c': // register suitable for indirect jump
4179 if (VT == MVT::i32)
4180 return std::make_pair(x: (unsigned)Mips::T9, y: &Mips::GPR32RegClass);
4181 if (VT == MVT::i64)
4182 return std::make_pair(x: (unsigned)Mips::T9_64, y: &Mips::GPR64RegClass);
4183 // This will generate an error message
4184 return std::make_pair(x: 0U, y: nullptr);
4185 case 'l': // use the `lo` register to store values
4186 // that are no bigger than a word
4187 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4188 return std::make_pair(x: (unsigned)Mips::LO0, y: &Mips::LO32RegClass);
4189 return std::make_pair(x: (unsigned)Mips::LO0_64, y: &Mips::LO64RegClass);
4190 case 'x': // use the concatenated `hi` and `lo` registers
4191 // to store doubleword values
4192 // Fixme: Not triggering the use of both hi and low
4193 // This will generate an error message
4194 return std::make_pair(x: 0U, y: nullptr);
4195 }
4196 }
4197
4198 if (!Constraint.empty()) {
4199 std::pair<unsigned, const TargetRegisterClass *> R;
4200 R = parseRegForInlineAsmConstraint(C: Constraint, VT);
4201
4202 if (R.second)
4203 return R;
4204 }
4205
4206 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4207}
4208
4209/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4210/// vector. If it is invalid, don't add anything to Ops.
4211void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4212 StringRef Constraint,
4213 std::vector<SDValue> &Ops,
4214 SelectionDAG &DAG) const {
4215 SDLoc DL(Op);
4216 SDValue Result;
4217
4218 // Only support length 1 constraints for now.
4219 if (Constraint.size() > 1)
4220 return;
4221
4222 char ConstraintLetter = Constraint[0];
4223 switch (ConstraintLetter) {
4224 default: break; // This will fall through to the generic implementation
4225 case 'I': // Signed 16 bit constant
4226 // If this fails, the parent routine will give an error
4227 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
4228 EVT Type = Op.getValueType();
4229 int64_t Val = C->getSExtValue();
4230 if (isInt<16>(x: Val)) {
4231 Result = DAG.getTargetConstant(Val, DL, VT: Type);
4232 break;
4233 }
4234 }
4235 return;
4236 case 'J': // integer zero
4237 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
4238 EVT Type = Op.getValueType();
4239 int64_t Val = C->getZExtValue();
4240 if (Val == 0) {
4241 Result = DAG.getTargetConstant(Val: 0, DL, VT: Type);
4242 break;
4243 }
4244 }
4245 return;
4246 case 'K': // unsigned 16 bit immediate
4247 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
4248 EVT Type = Op.getValueType();
4249 uint64_t Val = (uint64_t)C->getZExtValue();
4250 if (isUInt<16>(x: Val)) {
4251 Result = DAG.getTargetConstant(Val, DL, VT: Type);
4252 break;
4253 }
4254 }
4255 return;
4256 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4257 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
4258 EVT Type = Op.getValueType();
4259 int64_t Val = C->getSExtValue();
4260 if ((isInt<32>(x: Val)) && ((Val & 0xffff) == 0)){
4261 Result = DAG.getTargetConstant(Val, DL, VT: Type);
4262 break;
4263 }
4264 }
4265 return;
4266 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4267 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
4268 EVT Type = Op.getValueType();
4269 int64_t Val = C->getSExtValue();
4270 if ((Val >= -65535) && (Val <= -1)) {
4271 Result = DAG.getTargetConstant(Val, DL, VT: Type);
4272 break;
4273 }
4274 }
4275 return;
4276 case 'O': // signed 15 bit immediate
4277 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
4278 EVT Type = Op.getValueType();
4279 int64_t Val = C->getSExtValue();
4280 if ((isInt<15>(x: Val))) {
4281 Result = DAG.getTargetConstant(Val, DL, VT: Type);
4282 break;
4283 }
4284 }
4285 return;
4286 case 'P': // immediate in the range of 1 to 65535 (inclusive)
4287 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
4288 EVT Type = Op.getValueType();
4289 int64_t Val = C->getSExtValue();
4290 if ((Val <= 65535) && (Val >= 1)) {
4291 Result = DAG.getTargetConstant(Val, DL, VT: Type);
4292 break;
4293 }
4294 }
4295 return;
4296 }
4297
4298 if (Result.getNode()) {
4299 Ops.push_back(x: Result);
4300 return;
4301 }
4302
4303 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4304}
4305
4306bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4307 const AddrMode &AM, Type *Ty,
4308 unsigned AS,
4309 Instruction *I) const {
4310 // No global is ever allowed as a base.
4311 if (AM.BaseGV)
4312 return false;
4313
4314 switch (AM.Scale) {
4315 case 0: // "r+i" or just "i", depending on HasBaseReg.
4316 break;
4317 case 1:
4318 if (!AM.HasBaseReg) // allow "r+i".
4319 break;
4320 return false; // disallow "r+r" or "r+r+i".
4321 default:
4322 return false;
4323 }
4324
4325 return true;
4326}
4327
4328bool
4329MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4330 // The Mips target isn't yet aware of offsets.
4331 return false;
4332}
4333
4334EVT MipsTargetLowering::getOptimalMemOpType(
4335 const MemOp &Op, const AttributeList &FuncAttributes) const {
4336 if (Subtarget.hasMips64())
4337 return MVT::i64;
4338
4339 return MVT::i32;
4340}
4341
4342bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4343 bool ForCodeSize) const {
4344 if (VT != MVT::f32 && VT != MVT::f64)
4345 return false;
4346 if (Imm.isNegZero())
4347 return false;
4348 return Imm.isZero();
4349}
4350
4351unsigned MipsTargetLowering::getJumpTableEncoding() const {
4352
4353 // FIXME: For space reasons this should be: EK_GPRel32BlockAddress.
4354 if (ABI.IsN64() && isPositionIndependent())
4355 return MachineJumpTableInfo::EK_GPRel64BlockAddress;
4356
4357 return TargetLowering::getJumpTableEncoding();
4358}
4359
4360bool MipsTargetLowering::useSoftFloat() const {
4361 return Subtarget.useSoftFloat();
4362}
4363
4364void MipsTargetLowering::copyByValRegs(
4365 SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
4366 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
4367 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
4368 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4369 MipsCCState &State) const {
4370 MachineFunction &MF = DAG.getMachineFunction();
4371 MachineFrameInfo &MFI = MF.getFrameInfo();
4372 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4373 unsigned NumRegs = LastReg - FirstReg;
4374 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4375 unsigned FrameObjSize = std::max(a: Flags.getByValSize(), b: RegAreaSize);
4376 int FrameObjOffset;
4377 ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
4378
4379 if (RegAreaSize)
4380 FrameObjOffset =
4381 (int)ABI.GetCalleeAllocdArgSizeInBytes(CC: State.getCallingConv()) -
4382 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4383 else
4384 FrameObjOffset = VA.getLocMemOffset();
4385
4386 // Create frame object.
4387 EVT PtrTy = getPointerTy(DL: DAG.getDataLayout());
4388 // Make the fixed object stored to mutable so that the load instructions
4389 // referencing it have their memory dependencies added.
4390 // Set the frame object as isAliased which clears the underlying objects
4391 // vector in ScheduleDAGInstrs::buildSchedGraph() resulting in addition of all
4392 // stores as dependencies for loads referencing this fixed object.
4393 int FI = MFI.CreateFixedObject(Size: FrameObjSize, SPOffset: FrameObjOffset, IsImmutable: false, isAliased: true);
4394 SDValue FIN = DAG.getFrameIndex(FI, VT: PtrTy);
4395 InVals.push_back(Elt: FIN);
4396
4397 if (!NumRegs)
4398 return;
4399
4400 // Copy arg registers.
4401 MVT RegTy = MVT::getIntegerVT(BitWidth: GPRSizeInBytes * 8);
4402 const TargetRegisterClass *RC = getRegClassFor(VT: RegTy);
4403
4404 for (unsigned I = 0; I < NumRegs; ++I) {
4405 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4406 unsigned VReg = addLiveIn(MF, PReg: ArgReg, RC);
4407 unsigned Offset = I * GPRSizeInBytes;
4408 SDValue StorePtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrTy, N1: FIN,
4409 N2: DAG.getConstant(Val: Offset, DL, VT: PtrTy));
4410 SDValue Store = DAG.getStore(Chain, dl: DL, Val: DAG.getRegister(Reg: VReg, VT: RegTy),
4411 Ptr: StorePtr, PtrInfo: MachinePointerInfo(FuncArg, Offset));
4412 OutChains.push_back(x: Store);
4413 }
4414}
4415
4416// Copy byVal arg to registers and stack.
4417void MipsTargetLowering::passByValArg(
4418 SDValue Chain, const SDLoc &DL,
4419 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4420 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
4421 MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
4422 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4423 const CCValAssign &VA) const {
4424 unsigned ByValSizeInBytes = Flags.getByValSize();
4425 unsigned OffsetInBytes = 0; // From beginning of struct
4426 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4427 Align Alignment =
4428 std::min(a: Flags.getNonZeroByValAlign(), b: Align(RegSizeInBytes));
4429 EVT PtrTy = getPointerTy(DL: DAG.getDataLayout()),
4430 RegTy = MVT::getIntegerVT(BitWidth: RegSizeInBytes * 8);
4431 unsigned NumRegs = LastReg - FirstReg;
4432
4433 if (NumRegs) {
4434 ArrayRef<MCPhysReg> ArgRegs = ABI.GetByValArgRegs();
4435 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4436 unsigned I = 0;
4437
4438 // Copy words to registers.
4439 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4440 SDValue LoadPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrTy, N1: Arg,
4441 N2: DAG.getConstant(Val: OffsetInBytes, DL, VT: PtrTy));
4442 SDValue LoadVal = DAG.getLoad(VT: RegTy, dl: DL, Chain, Ptr: LoadPtr,
4443 PtrInfo: MachinePointerInfo(), Alignment);
4444 MemOpChains.push_back(Elt: LoadVal.getValue(R: 1));
4445 unsigned ArgReg = ArgRegs[FirstReg + I];
4446 RegsToPass.push_back(x: std::make_pair(x&: ArgReg, y&: LoadVal));
4447 }
4448
4449 // Return if the struct has been fully copied.
4450 if (ByValSizeInBytes == OffsetInBytes)
4451 return;
4452
4453 // Copy the remainder of the byval argument with sub-word loads and shifts.
4454 if (LeftoverBytes) {
4455 SDValue Val;
4456
4457 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4458 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4459 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4460
4461 if (RemainingSizeInBytes < LoadSizeInBytes)
4462 continue;
4463
4464 // Load subword.
4465 SDValue LoadPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrTy, N1: Arg,
4466 N2: DAG.getConstant(Val: OffsetInBytes, DL,
4467 VT: PtrTy));
4468 SDValue LoadVal = DAG.getExtLoad(
4469 ExtType: ISD::ZEXTLOAD, dl: DL, VT: RegTy, Chain, Ptr: LoadPtr, PtrInfo: MachinePointerInfo(),
4470 MemVT: MVT::getIntegerVT(BitWidth: LoadSizeInBytes * 8), Alignment);
4471 MemOpChains.push_back(Elt: LoadVal.getValue(R: 1));
4472
4473 // Shift the loaded value.
4474 unsigned Shamt;
4475
4476 if (isLittle)
4477 Shamt = TotalBytesLoaded * 8;
4478 else
4479 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4480
4481 SDValue Shift = DAG.getNode(Opcode: ISD::SHL, DL, VT: RegTy, N1: LoadVal,
4482 N2: DAG.getConstant(Val: Shamt, DL, VT: MVT::i32));
4483
4484 if (Val.getNode())
4485 Val = DAG.getNode(Opcode: ISD::OR, DL, VT: RegTy, N1: Val, N2: Shift);
4486 else
4487 Val = Shift;
4488
4489 OffsetInBytes += LoadSizeInBytes;
4490 TotalBytesLoaded += LoadSizeInBytes;
4491 Alignment = std::min(a: Alignment, b: Align(LoadSizeInBytes));
4492 }
4493
4494 unsigned ArgReg = ArgRegs[FirstReg + I];
4495 RegsToPass.push_back(x: std::make_pair(x&: ArgReg, y&: Val));
4496 return;
4497 }
4498 }
4499
4500 // Copy remainder of byval arg to it with memcpy.
4501 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4502 SDValue Src = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrTy, N1: Arg,
4503 N2: DAG.getConstant(Val: OffsetInBytes, DL, VT: PtrTy));
4504 SDValue Dst = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrTy, N1: StackPtr,
4505 N2: DAG.getIntPtrConstant(Val: VA.getLocMemOffset(), DL));
4506 Chain = DAG.getMemcpy(
4507 Chain, dl: DL, Dst, Src, Size: DAG.getConstant(Val: MemCpySize, DL, VT: PtrTy),
4508 Alignment: Align(Alignment), /*isVolatile=*/isVol: false, /*AlwaysInline=*/false,
4509 /*CI=*/nullptr, OverrideTailCall: std::nullopt, DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo());
4510 MemOpChains.push_back(Elt: Chain);
4511}
4512
4513void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4514 SDValue Chain, const SDLoc &DL,
4515 SelectionDAG &DAG,
4516 CCState &State) const {
4517 ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
4518 unsigned Idx = State.getFirstUnallocated(Regs: ArgRegs);
4519 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4520 MVT RegTy = MVT::getIntegerVT(BitWidth: RegSizeInBytes * 8);
4521 const TargetRegisterClass *RC = getRegClassFor(VT: RegTy);
4522 MachineFunction &MF = DAG.getMachineFunction();
4523 MachineFrameInfo &MFI = MF.getFrameInfo();
4524 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
4525
4526 // Offset of the first variable argument from stack pointer.
4527 int VaArgOffset;
4528
4529 if (ArgRegs.size() == Idx)
4530 VaArgOffset = alignTo(Value: State.getStackSize(), Align: RegSizeInBytes);
4531 else {
4532 VaArgOffset =
4533 (int)ABI.GetCalleeAllocdArgSizeInBytes(CC: State.getCallingConv()) -
4534 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4535 }
4536
4537 // Record the frame index of the first variable argument
4538 // which is a value necessary to VASTART.
4539 int FI = MFI.CreateFixedObject(Size: RegSizeInBytes, SPOffset: VaArgOffset, IsImmutable: true);
4540 MipsFI->setVarArgsFrameIndex(FI);
4541
4542 // Copy the integer registers that have not been used for argument passing
4543 // to the argument register save area. For O32, the save area is allocated
4544 // in the caller's stack frame, while for N32/64, it is allocated in the
4545 // callee's stack frame.
4546 for (unsigned I = Idx; I < ArgRegs.size();
4547 ++I, VaArgOffset += RegSizeInBytes) {
4548 unsigned Reg = addLiveIn(MF, PReg: ArgRegs[I], RC);
4549 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl: DL, Reg, VT: RegTy);
4550 FI = MFI.CreateFixedObject(Size: RegSizeInBytes, SPOffset: VaArgOffset, IsImmutable: true);
4551 SDValue PtrOff = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout()));
4552 SDValue Store =
4553 DAG.getStore(Chain, dl: DL, Val: ArgValue, Ptr: PtrOff, PtrInfo: MachinePointerInfo());
4554 cast<StoreSDNode>(Val: Store.getNode())->getMemOperand()->setValue(
4555 (Value *)nullptr);
4556 OutChains.push_back(x: Store);
4557 }
4558}
4559
4560void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
4561 Align Alignment) const {
4562 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
4563
4564 assert(Size && "Byval argument's size shouldn't be 0.");
4565
4566 Alignment = std::min(a: Alignment, b: TFL->getStackAlign());
4567
4568 unsigned FirstReg = 0;
4569 unsigned NumRegs = 0;
4570
4571 if (State->getCallingConv() != CallingConv::Fast) {
4572 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4573 ArrayRef<MCPhysReg> IntArgRegs = ABI.GetByValArgRegs();
4574 // FIXME: The O32 case actually describes no shadow registers.
4575 const MCPhysReg *ShadowRegs =
4576 ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
4577
4578 // We used to check the size as well but we can't do that anymore since
4579 // CCState::HandleByVal() rounds up the size after calling this function.
4580 assert(
4581 Alignment >= Align(RegSizeInBytes) &&
4582 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4583
4584 FirstReg = State->getFirstUnallocated(Regs: IntArgRegs);
4585
4586 // If Alignment > RegSizeInBytes, the first arg register must be even.
4587 // FIXME: This condition happens to do the right thing but it's not the
4588 // right way to test it. We want to check that the stack frame offset
4589 // of the register is aligned.
4590 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4591 State->AllocateReg(Reg: IntArgRegs[FirstReg], ShadowReg: ShadowRegs[FirstReg]);
4592 ++FirstReg;
4593 }
4594
4595 // Mark the registers allocated.
4596 Size = alignTo(Value: Size, Align: RegSizeInBytes);
4597 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4598 Size -= RegSizeInBytes, ++I, ++NumRegs)
4599 State->AllocateReg(Reg: IntArgRegs[I], ShadowReg: ShadowRegs[I]);
4600 }
4601
4602 State->addInRegsParamInfo(RegBegin: FirstReg, RegEnd: FirstReg + NumRegs);
4603}
4604
4605MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
4606 MachineBasicBlock *BB,
4607 bool isFPCmp,
4608 unsigned Opc) const {
4609 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4610 "Subtarget already supports SELECT nodes with the use of"
4611 "conditional-move instructions.");
4612
4613 const TargetInstrInfo *TII =
4614 Subtarget.getInstrInfo();
4615 DebugLoc DL = MI.getDebugLoc();
4616
4617 // To "insert" a SELECT instruction, we actually have to insert the
4618 // diamond control-flow pattern. The incoming instruction knows the
4619 // destination vreg to set, the condition code register to branch on, the
4620 // true/false values to select between, and a branch opcode to use.
4621 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4622 MachineFunction::iterator It = ++BB->getIterator();
4623
4624 // thisMBB:
4625 // ...
4626 // TrueVal = ...
4627 // setcc r1, r2, r3
4628 // bNE r1, r0, copy1MBB
4629 // fallthrough --> copy0MBB
4630 MachineBasicBlock *thisMBB = BB;
4631 MachineFunction *F = BB->getParent();
4632 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
4633 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
4634 F->insert(MBBI: It, MBB: copy0MBB);
4635 F->insert(MBBI: It, MBB: sinkMBB);
4636
4637 // Transfer the remainder of BB and its successor edges to sinkMBB.
4638 sinkMBB->splice(Where: sinkMBB->begin(), Other: BB,
4639 From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end());
4640 sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
4641
4642 // Next, add the true and fallthrough blocks as its successors.
4643 BB->addSuccessor(Succ: copy0MBB);
4644 BB->addSuccessor(Succ: sinkMBB);
4645
4646 if (isFPCmp) {
4647 // bc1[tf] cc, sinkMBB
4648 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Opc))
4649 .addReg(RegNo: MI.getOperand(i: 1).getReg())
4650 .addMBB(MBB: sinkMBB);
4651 } else {
4652 // bne rs, $0, sinkMBB
4653 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Opc))
4654 .addReg(RegNo: MI.getOperand(i: 1).getReg())
4655 .addReg(RegNo: Mips::ZERO)
4656 .addMBB(MBB: sinkMBB);
4657 }
4658
4659 // copy0MBB:
4660 // %FalseValue = ...
4661 // # fallthrough to sinkMBB
4662 BB = copy0MBB;
4663
4664 // Update machine-CFG edges
4665 BB->addSuccessor(Succ: sinkMBB);
4666
4667 // sinkMBB:
4668 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4669 // ...
4670 BB = sinkMBB;
4671
4672 BuildMI(BB&: *BB, I: BB->begin(), MIMD: DL, MCID: TII->get(Opcode: Mips::PHI), DestReg: MI.getOperand(i: 0).getReg())
4673 .addReg(RegNo: MI.getOperand(i: 2).getReg())
4674 .addMBB(MBB: thisMBB)
4675 .addReg(RegNo: MI.getOperand(i: 3).getReg())
4676 .addMBB(MBB: copy0MBB);
4677
4678 MI.eraseFromParent(); // The pseudo instruction is gone now.
4679
4680 return BB;
4681}
4682
4683MachineBasicBlock *
4684MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4685 MachineBasicBlock *BB) const {
4686 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4687 "Subtarget already supports SELECT nodes with the use of"
4688 "conditional-move instructions.");
4689
4690 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4691 DebugLoc DL = MI.getDebugLoc();
4692
4693 // D_SELECT substitutes two SELECT nodes that goes one after another and
4694 // have the same condition operand. On machines which don't have
4695 // conditional-move instruction, it reduces unnecessary branch instructions
4696 // which are result of using two diamond patterns that are result of two
4697 // SELECT pseudo instructions.
4698 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4699 MachineFunction::iterator It = ++BB->getIterator();
4700
4701 // thisMBB:
4702 // ...
4703 // TrueVal = ...
4704 // setcc r1, r2, r3
4705 // bNE r1, r0, copy1MBB
4706 // fallthrough --> copy0MBB
4707 MachineBasicBlock *thisMBB = BB;
4708 MachineFunction *F = BB->getParent();
4709 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
4710 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
4711 F->insert(MBBI: It, MBB: copy0MBB);
4712 F->insert(MBBI: It, MBB: sinkMBB);
4713
4714 // Transfer the remainder of BB and its successor edges to sinkMBB.
4715 sinkMBB->splice(Where: sinkMBB->begin(), Other: BB,
4716 From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end());
4717 sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
4718
4719 // Next, add the true and fallthrough blocks as its successors.
4720 BB->addSuccessor(Succ: copy0MBB);
4721 BB->addSuccessor(Succ: sinkMBB);
4722
4723 // bne rs, $0, sinkMBB
4724 BuildMI(BB, MIMD: DL, MCID: TII->get(Opcode: Mips::BNE))
4725 .addReg(RegNo: MI.getOperand(i: 2).getReg())
4726 .addReg(RegNo: Mips::ZERO)
4727 .addMBB(MBB: sinkMBB);
4728
4729 // copy0MBB:
4730 // %FalseValue = ...
4731 // # fallthrough to sinkMBB
4732 BB = copy0MBB;
4733
4734 // Update machine-CFG edges
4735 BB->addSuccessor(Succ: sinkMBB);
4736
4737 // sinkMBB:
4738 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4739 // ...
4740 BB = sinkMBB;
4741
4742 // Use two PHI nodes to select two reults
4743 BuildMI(BB&: *BB, I: BB->begin(), MIMD: DL, MCID: TII->get(Opcode: Mips::PHI), DestReg: MI.getOperand(i: 0).getReg())
4744 .addReg(RegNo: MI.getOperand(i: 3).getReg())
4745 .addMBB(MBB: thisMBB)
4746 .addReg(RegNo: MI.getOperand(i: 5).getReg())
4747 .addMBB(MBB: copy0MBB);
4748 BuildMI(BB&: *BB, I: BB->begin(), MIMD: DL, MCID: TII->get(Opcode: Mips::PHI), DestReg: MI.getOperand(i: 1).getReg())
4749 .addReg(RegNo: MI.getOperand(i: 4).getReg())
4750 .addMBB(MBB: thisMBB)
4751 .addReg(RegNo: MI.getOperand(i: 6).getReg())
4752 .addMBB(MBB: copy0MBB);
4753
4754 MI.eraseFromParent(); // The pseudo instruction is gone now.
4755
4756 return BB;
4757}
4758
4759// FIXME? Maybe this could be a TableGen attribute on some registers and
4760// this table could be generated automatically from RegInfo.
4761Register
4762MipsTargetLowering::getRegisterByName(const char *RegName, LLT VT,
4763 const MachineFunction &MF) const {
4764 // The Linux kernel uses $28 and sp.
4765 if (Subtarget.isGP64bit()) {
4766 Register Reg = StringSwitch<Register>(RegName)
4767 .Case(S: "$28", Value: Mips::GP_64)
4768 .Case(S: "sp", Value: Mips::SP_64)
4769 .Default(Value: Register());
4770 if (Reg)
4771 return Reg;
4772 } else {
4773 Register Reg = StringSwitch<Register>(RegName)
4774 .Case(S: "$28", Value: Mips::GP)
4775 .Case(S: "sp", Value: Mips::SP)
4776 .Default(Value: Register());
4777 if (Reg)
4778 return Reg;
4779 }
4780 report_fatal_error(reason: "Invalid register name global variable");
4781}
4782
4783MachineBasicBlock *MipsTargetLowering::emitLDR_W(MachineInstr &MI,
4784 MachineBasicBlock *BB) const {
4785 MachineFunction *MF = BB->getParent();
4786 MachineRegisterInfo &MRI = MF->getRegInfo();
4787 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4788 const bool IsLittle = Subtarget.isLittle();
4789 DebugLoc DL = MI.getDebugLoc();
4790
4791 Register Dest = MI.getOperand(i: 0).getReg();
4792 Register Address = MI.getOperand(i: 1).getReg();
4793 unsigned Imm = MI.getOperand(i: 2).getImm();
4794
4795 MachineBasicBlock::iterator I(MI);
4796
4797 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4798 // Mips release 6 can load from adress that is not naturally-aligned.
4799 Register Temp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4800 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LW))
4801 .addDef(RegNo: Temp)
4802 .addUse(RegNo: Address)
4803 .addImm(Val: Imm);
4804 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::FILL_W)).addDef(RegNo: Dest).addUse(RegNo: Temp);
4805 } else {
4806 // Mips release 5 needs to use instructions that can load from an unaligned
4807 // memory address.
4808 Register LoadHalf = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4809 Register LoadFull = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4810 Register Undef = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4811 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::IMPLICIT_DEF)).addDef(RegNo: Undef);
4812 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LWR))
4813 .addDef(RegNo: LoadHalf)
4814 .addUse(RegNo: Address)
4815 .addImm(Val: Imm + (IsLittle ? 0 : 3))
4816 .addUse(RegNo: Undef);
4817 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LWL))
4818 .addDef(RegNo: LoadFull)
4819 .addUse(RegNo: Address)
4820 .addImm(Val: Imm + (IsLittle ? 3 : 0))
4821 .addUse(RegNo: LoadHalf);
4822 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::FILL_W)).addDef(RegNo: Dest).addUse(RegNo: LoadFull);
4823 }
4824
4825 MI.eraseFromParent();
4826 return BB;
4827}
4828
4829MachineBasicBlock *MipsTargetLowering::emitLDR_D(MachineInstr &MI,
4830 MachineBasicBlock *BB) const {
4831 MachineFunction *MF = BB->getParent();
4832 MachineRegisterInfo &MRI = MF->getRegInfo();
4833 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4834 const bool IsLittle = Subtarget.isLittle();
4835 DebugLoc DL = MI.getDebugLoc();
4836
4837 Register Dest = MI.getOperand(i: 0).getReg();
4838 Register Address = MI.getOperand(i: 1).getReg();
4839 unsigned Imm = MI.getOperand(i: 2).getImm();
4840
4841 MachineBasicBlock::iterator I(MI);
4842
4843 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4844 // Mips release 6 can load from adress that is not naturally-aligned.
4845 if (Subtarget.isGP64bit()) {
4846 Register Temp = MRI.createVirtualRegister(RegClass: &Mips::GPR64RegClass);
4847 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LD))
4848 .addDef(RegNo: Temp)
4849 .addUse(RegNo: Address)
4850 .addImm(Val: Imm);
4851 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::FILL_D)).addDef(RegNo: Dest).addUse(RegNo: Temp);
4852 } else {
4853 Register Wtemp = MRI.createVirtualRegister(RegClass: &Mips::MSA128WRegClass);
4854 Register Lo = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4855 Register Hi = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4856 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LW))
4857 .addDef(RegNo: Lo)
4858 .addUse(RegNo: Address)
4859 .addImm(Val: Imm + (IsLittle ? 0 : 4));
4860 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LW))
4861 .addDef(RegNo: Hi)
4862 .addUse(RegNo: Address)
4863 .addImm(Val: Imm + (IsLittle ? 4 : 0));
4864 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::FILL_W)).addDef(RegNo: Wtemp).addUse(RegNo: Lo);
4865 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::INSERT_W), DestReg: Dest)
4866 .addUse(RegNo: Wtemp)
4867 .addUse(RegNo: Hi)
4868 .addImm(Val: 1);
4869 }
4870 } else {
4871 // Mips release 5 needs to use instructions that can load from an unaligned
4872 // memory address.
4873 Register LoHalf = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4874 Register LoFull = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4875 Register LoUndef = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4876 Register HiHalf = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4877 Register HiFull = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4878 Register HiUndef = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4879 Register Wtemp = MRI.createVirtualRegister(RegClass: &Mips::MSA128WRegClass);
4880 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::IMPLICIT_DEF)).addDef(RegNo: LoUndef);
4881 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LWR))
4882 .addDef(RegNo: LoHalf)
4883 .addUse(RegNo: Address)
4884 .addImm(Val: Imm + (IsLittle ? 0 : 7))
4885 .addUse(RegNo: LoUndef);
4886 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LWL))
4887 .addDef(RegNo: LoFull)
4888 .addUse(RegNo: Address)
4889 .addImm(Val: Imm + (IsLittle ? 3 : 4))
4890 .addUse(RegNo: LoHalf);
4891 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::IMPLICIT_DEF)).addDef(RegNo: HiUndef);
4892 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LWR))
4893 .addDef(RegNo: HiHalf)
4894 .addUse(RegNo: Address)
4895 .addImm(Val: Imm + (IsLittle ? 4 : 3))
4896 .addUse(RegNo: HiUndef);
4897 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::LWL))
4898 .addDef(RegNo: HiFull)
4899 .addUse(RegNo: Address)
4900 .addImm(Val: Imm + (IsLittle ? 7 : 0))
4901 .addUse(RegNo: HiHalf);
4902 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::FILL_W)).addDef(RegNo: Wtemp).addUse(RegNo: LoFull);
4903 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::INSERT_W), DestReg: Dest)
4904 .addUse(RegNo: Wtemp)
4905 .addUse(RegNo: HiFull)
4906 .addImm(Val: 1);
4907 }
4908
4909 MI.eraseFromParent();
4910 return BB;
4911}
4912
4913MachineBasicBlock *MipsTargetLowering::emitSTR_W(MachineInstr &MI,
4914 MachineBasicBlock *BB) const {
4915 MachineFunction *MF = BB->getParent();
4916 MachineRegisterInfo &MRI = MF->getRegInfo();
4917 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4918 const bool IsLittle = Subtarget.isLittle();
4919 DebugLoc DL = MI.getDebugLoc();
4920
4921 Register StoreVal = MI.getOperand(i: 0).getReg();
4922 Register Address = MI.getOperand(i: 1).getReg();
4923 unsigned Imm = MI.getOperand(i: 2).getImm();
4924
4925 MachineBasicBlock::iterator I(MI);
4926
4927 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4928 // Mips release 6 can store to adress that is not naturally-aligned.
4929 Register BitcastW = MRI.createVirtualRegister(RegClass: &Mips::MSA128WRegClass);
4930 Register Tmp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4931 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY)).addDef(RegNo: BitcastW).addUse(RegNo: StoreVal);
4932 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY_S_W))
4933 .addDef(RegNo: Tmp)
4934 .addUse(RegNo: BitcastW)
4935 .addImm(Val: 0);
4936 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SW))
4937 .addUse(RegNo: Tmp)
4938 .addUse(RegNo: Address)
4939 .addImm(Val: Imm);
4940 } else {
4941 // Mips release 5 needs to use instructions that can store to an unaligned
4942 // memory address.
4943 Register Tmp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4944 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY_S_W))
4945 .addDef(RegNo: Tmp)
4946 .addUse(RegNo: StoreVal)
4947 .addImm(Val: 0);
4948 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SWR))
4949 .addUse(RegNo: Tmp)
4950 .addUse(RegNo: Address)
4951 .addImm(Val: Imm + (IsLittle ? 0 : 3));
4952 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SWL))
4953 .addUse(RegNo: Tmp)
4954 .addUse(RegNo: Address)
4955 .addImm(Val: Imm + (IsLittle ? 3 : 0));
4956 }
4957
4958 MI.eraseFromParent();
4959
4960 return BB;
4961}
4962
4963MachineBasicBlock *MipsTargetLowering::emitSTR_D(MachineInstr &MI,
4964 MachineBasicBlock *BB) const {
4965 MachineFunction *MF = BB->getParent();
4966 MachineRegisterInfo &MRI = MF->getRegInfo();
4967 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4968 const bool IsLittle = Subtarget.isLittle();
4969 DebugLoc DL = MI.getDebugLoc();
4970
4971 Register StoreVal = MI.getOperand(i: 0).getReg();
4972 Register Address = MI.getOperand(i: 1).getReg();
4973 unsigned Imm = MI.getOperand(i: 2).getImm();
4974
4975 MachineBasicBlock::iterator I(MI);
4976
4977 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4978 // Mips release 6 can store to adress that is not naturally-aligned.
4979 if (Subtarget.isGP64bit()) {
4980 Register BitcastD = MRI.createVirtualRegister(RegClass: &Mips::MSA128DRegClass);
4981 Register Lo = MRI.createVirtualRegister(RegClass: &Mips::GPR64RegClass);
4982 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY))
4983 .addDef(RegNo: BitcastD)
4984 .addUse(RegNo: StoreVal);
4985 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY_S_D))
4986 .addDef(RegNo: Lo)
4987 .addUse(RegNo: BitcastD)
4988 .addImm(Val: 0);
4989 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SD))
4990 .addUse(RegNo: Lo)
4991 .addUse(RegNo: Address)
4992 .addImm(Val: Imm);
4993 } else {
4994 Register BitcastW = MRI.createVirtualRegister(RegClass: &Mips::MSA128WRegClass);
4995 Register Lo = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4996 Register Hi = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
4997 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY))
4998 .addDef(RegNo: BitcastW)
4999 .addUse(RegNo: StoreVal);
5000 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY_S_W))
5001 .addDef(RegNo: Lo)
5002 .addUse(RegNo: BitcastW)
5003 .addImm(Val: 0);
5004 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY_S_W))
5005 .addDef(RegNo: Hi)
5006 .addUse(RegNo: BitcastW)
5007 .addImm(Val: 1);
5008 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SW))
5009 .addUse(RegNo: Lo)
5010 .addUse(RegNo: Address)
5011 .addImm(Val: Imm + (IsLittle ? 0 : 4));
5012 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SW))
5013 .addUse(RegNo: Hi)
5014 .addUse(RegNo: Address)
5015 .addImm(Val: Imm + (IsLittle ? 4 : 0));
5016 }
5017 } else {
5018 // Mips release 5 needs to use instructions that can store to an unaligned
5019 // memory address.
5020 Register Bitcast = MRI.createVirtualRegister(RegClass: &Mips::MSA128WRegClass);
5021 Register Lo = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
5022 Register Hi = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
5023 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY)).addDef(RegNo: Bitcast).addUse(RegNo: StoreVal);
5024 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY_S_W))
5025 .addDef(RegNo: Lo)
5026 .addUse(RegNo: Bitcast)
5027 .addImm(Val: 0);
5028 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::COPY_S_W))
5029 .addDef(RegNo: Hi)
5030 .addUse(RegNo: Bitcast)
5031 .addImm(Val: 1);
5032 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SWR))
5033 .addUse(RegNo: Lo)
5034 .addUse(RegNo: Address)
5035 .addImm(Val: Imm + (IsLittle ? 0 : 3));
5036 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SWL))
5037 .addUse(RegNo: Lo)
5038 .addUse(RegNo: Address)
5039 .addImm(Val: Imm + (IsLittle ? 3 : 0));
5040 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SWR))
5041 .addUse(RegNo: Hi)
5042 .addUse(RegNo: Address)
5043 .addImm(Val: Imm + (IsLittle ? 4 : 7));
5044 BuildMI(BB&: *BB, I, MIMD: DL, MCID: TII->get(Opcode: Mips::SWL))
5045 .addUse(RegNo: Hi)
5046 .addUse(RegNo: Address)
5047 .addImm(Val: Imm + (IsLittle ? 7 : 4));
5048 }
5049
5050 MI.eraseFromParent();
5051 return BB;
5052}
5053