1//===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// Mips.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "MCTargetDesc/MipsInstPrinter.h"
15#include "MipsMachineFunction.h"
16#include "MipsRegisterBankInfo.h"
17#include "MipsTargetMachine.h"
18#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20#include "llvm/CodeGen/MachineJumpTableInfo.h"
21#include "llvm/IR/IntrinsicsMips.h"
22
23#define DEBUG_TYPE "mips-isel"
24
25using namespace llvm;
26
27namespace {
28
29#define GET_GLOBALISEL_PREDICATE_BITSET
30#include "MipsGenGlobalISel.inc"
31#undef GET_GLOBALISEL_PREDICATE_BITSET
32
33class MipsInstructionSelector : public InstructionSelector {
34public:
35 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36 const MipsRegisterBankInfo &RBI);
37
38 bool select(MachineInstr &I) override;
39 static const char *getName() { return DEBUG_TYPE; }
40
41private:
42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45 bool materialize32BitImm(Register DestReg, APInt Imm,
46 MachineIRBuilder &B) const;
47 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
48 const TargetRegisterClass *
49 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50 unsigned selectLoadStoreOpCode(MachineInstr &I,
51 MachineRegisterInfo &MRI) const;
52 bool buildUnalignedStore(MachineInstr &I, unsigned Opc,
53 MachineOperand &BaseAddr, unsigned Offset,
54 MachineMemOperand *MMO) const;
55 bool buildUnalignedLoad(MachineInstr &I, unsigned Opc, Register Dest,
56 MachineOperand &BaseAddr, unsigned Offset,
57 Register TiedDest, MachineMemOperand *MMO) const;
58
59 const MipsTargetMachine &TM;
60 const MipsSubtarget &STI;
61 const MipsInstrInfo &TII;
62 const MipsRegisterInfo &TRI;
63 const MipsRegisterBankInfo &RBI;
64
65#define GET_GLOBALISEL_PREDICATES_DECL
66#include "MipsGenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATES_DECL
68
69#define GET_GLOBALISEL_TEMPORARIES_DECL
70#include "MipsGenGlobalISel.inc"
71#undef GET_GLOBALISEL_TEMPORARIES_DECL
72};
73
74} // end anonymous namespace
75
76#define GET_GLOBALISEL_IMPL
77#include "MipsGenGlobalISel.inc"
78#undef GET_GLOBALISEL_IMPL
79
80MipsInstructionSelector::MipsInstructionSelector(
81 const MipsTargetMachine &TM, const MipsSubtarget &STI,
82 const MipsRegisterBankInfo &RBI)
83 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84 RBI(RBI),
85
86#define GET_GLOBALISEL_PREDICATES_INIT
87#include "MipsGenGlobalISel.inc"
88#undef GET_GLOBALISEL_PREDICATES_INIT
89#define GET_GLOBALISEL_TEMPORARIES_INIT
90#include "MipsGenGlobalISel.inc"
91#undef GET_GLOBALISEL_TEMPORARIES_INIT
92{
93}
94
95bool MipsInstructionSelector::isRegInGprb(Register Reg,
96 MachineRegisterInfo &MRI) const {
97 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
98}
99
100bool MipsInstructionSelector::isRegInFprb(Register Reg,
101 MachineRegisterInfo &MRI) const {
102 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
103}
104
105bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106 MachineRegisterInfo &MRI) const {
107 Register DstReg = I.getOperand(i: 0).getReg();
108 if (DstReg.isPhysical())
109 return true;
110
111 const TargetRegisterClass *RC = getRegClassForTypeOnBank(Reg: DstReg, MRI);
112 if (!RBI.constrainGenericRegister(Reg: DstReg, RC: *RC, MRI)) {
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114 << " operand\n");
115 return false;
116 }
117 return true;
118}
119
120const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
121 Register Reg, MachineRegisterInfo &MRI) const {
122 const LLT Ty = MRI.getType(Reg);
123 const unsigned TySize = Ty.getSizeInBits();
124
125 if (isRegInGprb(Reg, MRI)) {
126 assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 &&
127 "Register class not available for LLT, register bank combination");
128 return &Mips::GPR32RegClass;
129 }
130
131 if (isRegInFprb(Reg, MRI)) {
132 if (Ty.isScalar()) {
133 assert((TySize == 32 || TySize == 64) &&
134 "Register class not available for LLT, register bank combination");
135 if (TySize == 32)
136 return &Mips::FGR32RegClass;
137 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
138 }
139 }
140
141 llvm_unreachable("Unsupported register bank.");
142}
143
144bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
145 MachineIRBuilder &B) const {
146 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
147 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
148 if (Imm.getHiBits(numBits: 16).isZero()) {
149 MachineInstr *Inst =
150 B.buildInstr(Opc: Mips::ORi, DstOps: {DestReg}, SrcOps: {Register(Mips::ZERO)})
151 .addImm(Val: Imm.getLoBits(numBits: 16).getLimitedValue());
152 return constrainSelectedInstRegOperands(I&: *Inst, TII, TRI, RBI);
153 }
154 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
155 if (Imm.getLoBits(numBits: 16).isZero()) {
156 MachineInstr *Inst = B.buildInstr(Opc: Mips::LUi, DstOps: {DestReg}, SrcOps: {})
157 .addImm(Val: Imm.getHiBits(numBits: 16).getLimitedValue());
158 return constrainSelectedInstRegOperands(I&: *Inst, TII, TRI, RBI);
159 }
160 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
161 if (Imm.isSignedIntN(N: 16)) {
162 MachineInstr *Inst =
163 B.buildInstr(Opc: Mips::ADDiu, DstOps: {DestReg}, SrcOps: {Register(Mips::ZERO)})
164 .addImm(Val: Imm.getLoBits(numBits: 16).getLimitedValue());
165 return constrainSelectedInstRegOperands(I&: *Inst, TII, TRI, RBI);
166 }
167 // Values that cannot be materialized with single immediate instruction.
168 Register LUiReg = B.getMRI()->createVirtualRegister(RegClass: &Mips::GPR32RegClass);
169 MachineInstr *LUi = B.buildInstr(Opc: Mips::LUi, DstOps: {LUiReg}, SrcOps: {})
170 .addImm(Val: Imm.getHiBits(numBits: 16).getLimitedValue());
171 MachineInstr *ORi = B.buildInstr(Opc: Mips::ORi, DstOps: {DestReg}, SrcOps: {LUiReg})
172 .addImm(Val: Imm.getLoBits(numBits: 16).getLimitedValue());
173 if (!constrainSelectedInstRegOperands(I&: *LUi, TII, TRI, RBI))
174 return false;
175 if (!constrainSelectedInstRegOperands(I&: *ORi, TII, TRI, RBI))
176 return false;
177 return true;
178}
179
180/// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
181unsigned
182MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
183 MachineRegisterInfo &MRI) const {
184 const Register ValueReg = I.getOperand(i: 0).getReg();
185 const LLT Ty = MRI.getType(Reg: ValueReg);
186 const unsigned TySize = Ty.getSizeInBits();
187 const unsigned MemSizeInBytes =
188 (*I.memoperands_begin())->getSize().getValue();
189 unsigned Opc = I.getOpcode();
190 const bool isStore = Opc == TargetOpcode::G_STORE;
191
192 if (isRegInGprb(Reg: ValueReg, MRI)) {
193 assert(((Ty.isScalar() && TySize == 32) ||
194 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
195 "Unsupported register bank, LLT, MemSizeInBytes combination");
196 (void)TySize;
197 if (isStore)
198 switch (MemSizeInBytes) {
199 case 4:
200 return Mips::SW;
201 case 2:
202 return Mips::SH;
203 case 1:
204 return Mips::SB;
205 default:
206 return Opc;
207 }
208 else
209 // Unspecified extending load is selected into zeroExtending load.
210 switch (MemSizeInBytes) {
211 case 4:
212 return Mips::LW;
213 case 2:
214 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
215 case 1:
216 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
217 default:
218 return Opc;
219 }
220 }
221
222 if (isRegInFprb(Reg: ValueReg, MRI)) {
223 if (Ty.isScalar()) {
224 assert(((TySize == 32 && MemSizeInBytes == 4) ||
225 (TySize == 64 && MemSizeInBytes == 8)) &&
226 "Unsupported register bank, LLT, MemSizeInBytes combination");
227
228 if (MemSizeInBytes == 4)
229 return isStore ? Mips::SWC1 : Mips::LWC1;
230
231 if (STI.isFP64bit())
232 return isStore ? Mips::SDC164 : Mips::LDC164;
233 return isStore ? Mips::SDC1 : Mips::LDC1;
234 }
235
236 if (Ty.isVector()) {
237 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
238 assert((TySize == 128 && MemSizeInBytes == 16) &&
239 "Unsupported register bank, LLT, MemSizeInBytes combination");
240 switch (Ty.getElementType().getSizeInBits()) {
241 case 8:
242 return isStore ? Mips::ST_B : Mips::LD_B;
243 case 16:
244 return isStore ? Mips::ST_H : Mips::LD_H;
245 case 32:
246 return isStore ? Mips::ST_W : Mips::LD_W;
247 case 64:
248 return isStore ? Mips::ST_D : Mips::LD_D;
249 default:
250 return Opc;
251 }
252 }
253 }
254
255 return Opc;
256}
257
258bool MipsInstructionSelector::buildUnalignedStore(
259 MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
260 MachineMemOperand *MMO) const {
261 MachineInstr *NewInst =
262 BuildMI(BB&: *I.getParent(), I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Opc))
263 .add(MO: I.getOperand(i: 0))
264 .add(MO: BaseAddr)
265 .addImm(Val: Offset)
266 .addMemOperand(MMO);
267 if (!constrainSelectedInstRegOperands(I&: *NewInst, TII, TRI, RBI))
268 return false;
269 return true;
270}
271
272bool MipsInstructionSelector::buildUnalignedLoad(
273 MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
274 unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
275 MachineInstr *NewInst =
276 BuildMI(BB&: *I.getParent(), I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Opc))
277 .addDef(RegNo: Dest)
278 .add(MO: BaseAddr)
279 .addImm(Val: Offset)
280 .addUse(RegNo: TiedDest)
281 .addMemOperand(MMO: *I.memoperands_begin());
282 if (!constrainSelectedInstRegOperands(I&: *NewInst, TII, TRI, RBI))
283 return false;
284 return true;
285}
286
287bool MipsInstructionSelector::select(MachineInstr &I) {
288
289 MachineBasicBlock &MBB = *I.getParent();
290 MachineFunction &MF = *MBB.getParent();
291 MachineRegisterInfo &MRI = MF.getRegInfo();
292
293 if (!isPreISelGenericOpcode(Opcode: I.getOpcode())) {
294 if (I.isCopy())
295 return selectCopy(I, MRI);
296
297 return true;
298 }
299
300 if (I.getOpcode() == Mips::G_MUL &&
301 isRegInGprb(Reg: I.getOperand(i: 0).getReg(), MRI)) {
302 MachineInstr *Mul = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::MUL))
303 .add(MO: I.getOperand(i: 0))
304 .add(MO: I.getOperand(i: 1))
305 .add(MO: I.getOperand(i: 2));
306 if (!constrainSelectedInstRegOperands(I&: *Mul, TII, TRI, RBI))
307 return false;
308 Mul->getOperand(i: 3).setIsDead(true);
309 Mul->getOperand(i: 4).setIsDead(true);
310
311 I.eraseFromParent();
312 return true;
313 }
314
315 if (selectImpl(I, CoverageInfo&: *CoverageInfo))
316 return true;
317
318 MachineInstr *MI = nullptr;
319 using namespace TargetOpcode;
320
321 switch (I.getOpcode()) {
322 case G_UMULH: {
323 Register PseudoMULTuReg = MRI.createVirtualRegister(RegClass: &Mips::ACC64RegClass);
324 MachineInstr *PseudoMULTu, *PseudoMove;
325
326 PseudoMULTu = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoMULTu))
327 .addDef(RegNo: PseudoMULTuReg)
328 .add(MO: I.getOperand(i: 1))
329 .add(MO: I.getOperand(i: 2));
330 if (!constrainSelectedInstRegOperands(I&: *PseudoMULTu, TII, TRI, RBI))
331 return false;
332
333 PseudoMove = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoMFHI))
334 .addDef(RegNo: I.getOperand(i: 0).getReg())
335 .addUse(RegNo: PseudoMULTuReg);
336 if (!constrainSelectedInstRegOperands(I&: *PseudoMove, TII, TRI, RBI))
337 return false;
338
339 I.eraseFromParent();
340 return true;
341 }
342 case G_PTR_ADD: {
343 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDu))
344 .add(MO: I.getOperand(i: 0))
345 .add(MO: I.getOperand(i: 1))
346 .add(MO: I.getOperand(i: 2));
347 break;
348 }
349 case G_INTTOPTR:
350 case G_PTRTOINT: {
351 I.setDesc(TII.get(Opcode: COPY));
352 return selectCopy(I, MRI);
353 }
354 case G_FRAME_INDEX: {
355 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
356 .add(MO: I.getOperand(i: 0))
357 .add(MO: I.getOperand(i: 1))
358 .addImm(Val: 0);
359 break;
360 }
361 case G_BRJT: {
362 unsigned EntrySize =
363 MF.getJumpTableInfo()->getEntrySize(TD: MF.getDataLayout());
364 assert(isPowerOf2_32(EntrySize) &&
365 "Non-power-of-two jump-table entry size not supported.");
366
367 Register JTIndex = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
368 MachineInstr *SLL = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::SLL))
369 .addDef(RegNo: JTIndex)
370 .addUse(RegNo: I.getOperand(i: 2).getReg())
371 .addImm(Val: Log2_32(Value: EntrySize));
372 if (!constrainSelectedInstRegOperands(I&: *SLL, TII, TRI, RBI))
373 return false;
374
375 Register DestAddress = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
376 MachineInstr *ADDu = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDu))
377 .addDef(RegNo: DestAddress)
378 .addUse(RegNo: I.getOperand(i: 0).getReg())
379 .addUse(RegNo: JTIndex);
380 if (!constrainSelectedInstRegOperands(I&: *ADDu, TII, TRI, RBI))
381 return false;
382
383 Register Dest = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
384 MachineInstr *LW =
385 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LW))
386 .addDef(RegNo: Dest)
387 .addUse(RegNo: DestAddress)
388 .addJumpTableIndex(Idx: I.getOperand(i: 1).getIndex(), TargetFlags: MipsII::MO_ABS_LO)
389 .addMemOperand(MMO: MF.getMachineMemOperand(
390 PtrInfo: MachinePointerInfo(), F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)));
391 if (!constrainSelectedInstRegOperands(I&: *LW, TII, TRI, RBI))
392 return false;
393
394 if (MF.getTarget().isPositionIndependent()) {
395 Register DestTmp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
396 LW->getOperand(i: 0).setReg(DestTmp);
397 MachineInstr *ADDu = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDu))
398 .addDef(RegNo: Dest)
399 .addUse(RegNo: DestTmp)
400 .addUse(RegNo: MF.getInfo<MipsFunctionInfo>()
401 ->getGlobalBaseRegForGlobalISel(MF));
402 if (!constrainSelectedInstRegOperands(I&: *ADDu, TII, TRI, RBI))
403 return false;
404 }
405
406 MachineInstr *Branch =
407 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoIndirectBranch))
408 .addUse(RegNo: Dest);
409 if (!constrainSelectedInstRegOperands(I&: *Branch, TII, TRI, RBI))
410 return false;
411
412 I.eraseFromParent();
413 return true;
414 }
415 case G_BRINDIRECT: {
416 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoIndirectBranch))
417 .add(MO: I.getOperand(i: 0));
418 break;
419 }
420 case G_PHI: {
421 const Register DestReg = I.getOperand(i: 0).getReg();
422
423 const TargetRegisterClass *DefRC = nullptr;
424 if (DestReg.isPhysical())
425 DefRC = TRI.getRegClass(i: DestReg);
426 else
427 DefRC = getRegClassForTypeOnBank(Reg: DestReg, MRI);
428
429 I.setDesc(TII.get(Opcode: TargetOpcode::PHI));
430 return RBI.constrainGenericRegister(Reg: DestReg, RC: *DefRC, MRI);
431 }
432 case G_STORE:
433 case G_LOAD:
434 case G_ZEXTLOAD:
435 case G_SEXTLOAD: {
436 auto MMO = *I.memoperands_begin();
437 MachineOperand BaseAddr = I.getOperand(i: 1);
438 int64_t SignedOffset = 0;
439 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
440 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
441 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
442 // %LoadResult/%StoreSrc = load/store %Addr(p0)
443 // into:
444 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
445
446 MachineInstr *Addr = MRI.getVRegDef(Reg: I.getOperand(i: 1).getReg());
447 if (Addr->getOpcode() == G_PTR_ADD) {
448 MachineInstr *Offset = MRI.getVRegDef(Reg: Addr->getOperand(i: 2).getReg());
449 if (Offset->getOpcode() == G_CONSTANT) {
450 APInt OffsetValue = Offset->getOperand(i: 1).getCImm()->getValue();
451 if (OffsetValue.isSignedIntN(N: 16)) {
452 BaseAddr = Addr->getOperand(i: 1);
453 SignedOffset = OffsetValue.getSExtValue();
454 }
455 }
456 }
457
458 // Unaligned memory access
459 if ((!MMO->getSize().hasValue() ||
460 MMO->getAlign() < MMO->getSize().getValue()) &&
461 !STI.systemSupportsUnalignedAccess()) {
462 if (MMO->getSize() != 4 || !isRegInGprb(Reg: I.getOperand(i: 0).getReg(), MRI))
463 return false;
464
465 if (I.getOpcode() == G_STORE) {
466 if (!buildUnalignedStore(I, Opc: Mips::SWL, BaseAddr, Offset: SignedOffset + 3, MMO))
467 return false;
468 if (!buildUnalignedStore(I, Opc: Mips::SWR, BaseAddr, Offset: SignedOffset, MMO))
469 return false;
470 I.eraseFromParent();
471 return true;
472 }
473
474 if (I.getOpcode() == G_LOAD) {
475 Register ImplDef = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
476 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::IMPLICIT_DEF))
477 .addDef(RegNo: ImplDef);
478 Register Tmp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
479 if (!buildUnalignedLoad(I, Opc: Mips::LWL, Dest: Tmp, BaseAddr, Offset: SignedOffset + 3,
480 TiedDest: ImplDef, MMO))
481 return false;
482 if (!buildUnalignedLoad(I, Opc: Mips::LWR, Dest: I.getOperand(i: 0).getReg(),
483 BaseAddr, Offset: SignedOffset, TiedDest: Tmp, MMO))
484 return false;
485 I.eraseFromParent();
486 return true;
487 }
488
489 return false;
490 }
491
492 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
493 if (NewOpc == I.getOpcode())
494 return false;
495
496 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: NewOpc))
497 .add(MO: I.getOperand(i: 0))
498 .add(MO: BaseAddr)
499 .addImm(Val: SignedOffset)
500 .addMemOperand(MMO);
501 break;
502 }
503 case G_UDIV:
504 case G_UREM:
505 case G_SDIV:
506 case G_SREM: {
507 Register HILOReg = MRI.createVirtualRegister(RegClass: &Mips::ACC64RegClass);
508 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
509 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
510
511 MachineInstr *PseudoDIV, *PseudoMove;
512 PseudoDIV = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(),
513 MCID: TII.get(Opcode: IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
514 .addDef(RegNo: HILOReg)
515 .add(MO: I.getOperand(i: 1))
516 .add(MO: I.getOperand(i: 2));
517 if (!constrainSelectedInstRegOperands(I&: *PseudoDIV, TII, TRI, RBI))
518 return false;
519
520 PseudoMove = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(),
521 MCID: TII.get(Opcode: IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
522 .addDef(RegNo: I.getOperand(i: 0).getReg())
523 .addUse(RegNo: HILOReg);
524 if (!constrainSelectedInstRegOperands(I&: *PseudoMove, TII, TRI, RBI))
525 return false;
526
527 I.eraseFromParent();
528 return true;
529 }
530 case G_SELECT: {
531 // Handle operands with pointer type.
532 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::MOVN_I_I))
533 .add(MO: I.getOperand(i: 0))
534 .add(MO: I.getOperand(i: 2))
535 .add(MO: I.getOperand(i: 1))
536 .add(MO: I.getOperand(i: 3));
537 break;
538 }
539 case G_UNMERGE_VALUES: {
540 if (I.getNumOperands() != 3)
541 return false;
542 Register Src = I.getOperand(i: 2).getReg();
543 Register Lo = I.getOperand(i: 0).getReg();
544 Register Hi = I.getOperand(i: 1).getReg();
545 if (!isRegInFprb(Reg: Src, MRI) ||
546 !(isRegInGprb(Reg: Lo, MRI) && isRegInGprb(Reg: Hi, MRI)))
547 return false;
548
549 unsigned Opcode =
550 STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
551
552 MachineInstr *ExtractLo = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode))
553 .addDef(RegNo: Lo)
554 .addUse(RegNo: Src)
555 .addImm(Val: 0);
556 if (!constrainSelectedInstRegOperands(I&: *ExtractLo, TII, TRI, RBI))
557 return false;
558
559 MachineInstr *ExtractHi = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode))
560 .addDef(RegNo: Hi)
561 .addUse(RegNo: Src)
562 .addImm(Val: 1);
563 if (!constrainSelectedInstRegOperands(I&: *ExtractHi, TII, TRI, RBI))
564 return false;
565
566 I.eraseFromParent();
567 return true;
568 }
569 case G_IMPLICIT_DEF: {
570 Register Dst = I.getOperand(i: 0).getReg();
571 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::IMPLICIT_DEF))
572 .addDef(RegNo: Dst);
573
574 // Set class based on register bank, there can be fpr and gpr implicit def.
575 MRI.setRegClass(Reg: Dst, RC: getRegClassForTypeOnBank(Reg: Dst, MRI));
576 break;
577 }
578 case G_CONSTANT: {
579 MachineIRBuilder B(I);
580 if (!materialize32BitImm(DestReg: I.getOperand(i: 0).getReg(),
581 Imm: I.getOperand(i: 1).getCImm()->getValue(), B))
582 return false;
583
584 I.eraseFromParent();
585 return true;
586 }
587 case G_FCONSTANT: {
588 const APFloat &FPimm = I.getOperand(i: 1).getFPImm()->getValueAPF();
589 APInt APImm = FPimm.bitcastToAPInt();
590 unsigned Size = MRI.getType(Reg: I.getOperand(i: 0).getReg()).getSizeInBits();
591
592 if (Size == 32) {
593 Register GPRReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
594 MachineIRBuilder B(I);
595 if (!materialize32BitImm(DestReg: GPRReg, Imm: APImm, B))
596 return false;
597
598 MachineInstrBuilder MTC1 =
599 B.buildInstr(Opc: Mips::MTC1, DstOps: {I.getOperand(i: 0).getReg()}, SrcOps: {GPRReg});
600 if (!MTC1.constrainAllUses(TII, TRI, RBI))
601 return false;
602 }
603 if (Size == 64) {
604 Register GPRRegHigh = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
605 Register GPRRegLow = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
606 MachineIRBuilder B(I);
607 if (!materialize32BitImm(DestReg: GPRRegHigh, Imm: APImm.getHiBits(numBits: 32).trunc(width: 32), B))
608 return false;
609 if (!materialize32BitImm(DestReg: GPRRegLow, Imm: APImm.getLoBits(numBits: 32).trunc(width: 32), B))
610 return false;
611
612 MachineInstrBuilder PairF64 = B.buildInstr(
613 Opc: STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
614 DstOps: {I.getOperand(i: 0).getReg()}, SrcOps: {GPRRegLow, GPRRegHigh});
615 if (!PairF64.constrainAllUses(TII, TRI, RBI))
616 return false;
617 }
618
619 I.eraseFromParent();
620 return true;
621 }
622 case G_FABS: {
623 unsigned Size = MRI.getType(Reg: I.getOperand(i: 0).getReg()).getSizeInBits();
624 unsigned FABSOpcode =
625 Size == 32 ? Mips::FABS_S
626 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
627 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: FABSOpcode))
628 .add(MO: I.getOperand(i: 0))
629 .add(MO: I.getOperand(i: 1));
630 break;
631 }
632 case G_FPTOSI: {
633 unsigned FromSize = MRI.getType(Reg: I.getOperand(i: 1).getReg()).getSizeInBits();
634 unsigned ToSize = MRI.getType(Reg: I.getOperand(i: 0).getReg()).getSizeInBits();
635 (void)ToSize;
636 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
637 assert((FromSize == 32 || FromSize == 64) &&
638 "Unsupported floating point size for G_FPTOSI");
639
640 unsigned Opcode;
641 if (FromSize == 32)
642 Opcode = Mips::TRUNC_W_S;
643 else
644 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
645 Register ResultInFPR = MRI.createVirtualRegister(RegClass: &Mips::FGR32RegClass);
646 MachineInstr *Trunc = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode))
647 .addDef(RegNo: ResultInFPR)
648 .addUse(RegNo: I.getOperand(i: 1).getReg());
649 if (!constrainSelectedInstRegOperands(I&: *Trunc, TII, TRI, RBI))
650 return false;
651
652 MachineInstr *Move = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::MFC1))
653 .addDef(RegNo: I.getOperand(i: 0).getReg())
654 .addUse(RegNo: ResultInFPR);
655 if (!constrainSelectedInstRegOperands(I&: *Move, TII, TRI, RBI))
656 return false;
657
658 I.eraseFromParent();
659 return true;
660 }
661 case G_GLOBAL_VALUE: {
662 const llvm::GlobalValue *GVal = I.getOperand(i: 1).getGlobal();
663 if (MF.getTarget().isPositionIndependent()) {
664 MachineInstr *LWGOT = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LW))
665 .addDef(RegNo: I.getOperand(i: 0).getReg())
666 .addReg(RegNo: MF.getInfo<MipsFunctionInfo>()
667 ->getGlobalBaseRegForGlobalISel(MF))
668 .addGlobalAddress(GV: GVal);
669 // Global Values that don't have local linkage are handled differently
670 // when they are part of call sequence. MipsCallLowering::lowerCall
671 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
672 // MO_GOT_CALL flag when Callee doesn't have local linkage.
673 if (I.getOperand(i: 1).getTargetFlags() == MipsII::MO_GOT_CALL)
674 LWGOT->getOperand(i: 2).setTargetFlags(MipsII::MO_GOT_CALL);
675 else
676 LWGOT->getOperand(i: 2).setTargetFlags(MipsII::MO_GOT);
677 LWGOT->addMemOperand(
678 MF, MO: MF.getMachineMemOperand(PtrInfo: MachinePointerInfo::getGOT(MF),
679 F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)));
680 if (!constrainSelectedInstRegOperands(I&: *LWGOT, TII, TRI, RBI))
681 return false;
682
683 if (GVal->hasLocalLinkage()) {
684 Register LWGOTDef = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
685 LWGOT->getOperand(i: 0).setReg(LWGOTDef);
686
687 MachineInstr *ADDiu =
688 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
689 .addDef(RegNo: I.getOperand(i: 0).getReg())
690 .addReg(RegNo: LWGOTDef)
691 .addGlobalAddress(GV: GVal);
692 ADDiu->getOperand(i: 2).setTargetFlags(MipsII::MO_ABS_LO);
693 if (!constrainSelectedInstRegOperands(I&: *ADDiu, TII, TRI, RBI))
694 return false;
695 }
696 } else {
697 Register LUiReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
698
699 MachineInstr *LUi = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LUi))
700 .addDef(RegNo: LUiReg)
701 .addGlobalAddress(GV: GVal);
702 LUi->getOperand(i: 1).setTargetFlags(MipsII::MO_ABS_HI);
703 if (!constrainSelectedInstRegOperands(I&: *LUi, TII, TRI, RBI))
704 return false;
705
706 MachineInstr *ADDiu =
707 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
708 .addDef(RegNo: I.getOperand(i: 0).getReg())
709 .addUse(RegNo: LUiReg)
710 .addGlobalAddress(GV: GVal);
711 ADDiu->getOperand(i: 2).setTargetFlags(MipsII::MO_ABS_LO);
712 if (!constrainSelectedInstRegOperands(I&: *ADDiu, TII, TRI, RBI))
713 return false;
714 }
715 I.eraseFromParent();
716 return true;
717 }
718 case G_JUMP_TABLE: {
719 if (MF.getTarget().isPositionIndependent()) {
720 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LW))
721 .addDef(RegNo: I.getOperand(i: 0).getReg())
722 .addReg(RegNo: MF.getInfo<MipsFunctionInfo>()
723 ->getGlobalBaseRegForGlobalISel(MF))
724 .addJumpTableIndex(Idx: I.getOperand(i: 1).getIndex(), TargetFlags: MipsII::MO_GOT)
725 .addMemOperand(MMO: MF.getMachineMemOperand(
726 PtrInfo: MachinePointerInfo::getGOT(MF), F: MachineMemOperand::MOLoad, Size: 4,
727 BaseAlignment: Align(4)));
728 } else {
729 MI =
730 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LUi))
731 .addDef(RegNo: I.getOperand(i: 0).getReg())
732 .addJumpTableIndex(Idx: I.getOperand(i: 1).getIndex(), TargetFlags: MipsII::MO_ABS_HI);
733 }
734 break;
735 }
736 case G_ICMP: {
737 struct Instr {
738 unsigned Opcode;
739 Register Def, LHS, RHS;
740 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
741 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
742
743 bool hasImm() const {
744 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
745 return true;
746 return false;
747 }
748 };
749
750 SmallVector<struct Instr, 2> Instructions;
751 Register ICMPReg = I.getOperand(i: 0).getReg();
752 Register Temp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
753 Register LHS = I.getOperand(i: 2).getReg();
754 Register RHS = I.getOperand(i: 3).getReg();
755 CmpInst::Predicate Cond =
756 static_cast<CmpInst::Predicate>(I.getOperand(i: 1).getPredicate());
757
758 switch (Cond) {
759 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
760 Instructions.emplace_back(Args: Mips::XOR, Args&: Temp, Args&: LHS, Args&: RHS);
761 Instructions.emplace_back(Args: Mips::SLTiu, Args&: ICMPReg, Args&: Temp, Args: 1);
762 break;
763 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
764 Instructions.emplace_back(Args: Mips::XOR, Args&: Temp, Args&: LHS, Args&: RHS);
765 Instructions.emplace_back(Args: Mips::SLTu, Args&: ICMPReg, Args: Mips::ZERO, Args&: Temp);
766 break;
767 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
768 Instructions.emplace_back(Args: Mips::SLTu, Args&: ICMPReg, Args&: RHS, Args&: LHS);
769 break;
770 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
771 Instructions.emplace_back(Args: Mips::SLTu, Args&: Temp, Args&: LHS, Args&: RHS);
772 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
773 break;
774 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
775 Instructions.emplace_back(Args: Mips::SLTu, Args&: ICMPReg, Args&: LHS, Args&: RHS);
776 break;
777 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
778 Instructions.emplace_back(Args: Mips::SLTu, Args&: Temp, Args&: RHS, Args&: LHS);
779 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
780 break;
781 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
782 Instructions.emplace_back(Args: Mips::SLT, Args&: ICMPReg, Args&: RHS, Args&: LHS);
783 break;
784 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
785 Instructions.emplace_back(Args: Mips::SLT, Args&: Temp, Args&: LHS, Args&: RHS);
786 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
787 break;
788 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
789 Instructions.emplace_back(Args: Mips::SLT, Args&: ICMPReg, Args&: LHS, Args&: RHS);
790 break;
791 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
792 Instructions.emplace_back(Args: Mips::SLT, Args&: Temp, Args&: RHS, Args&: LHS);
793 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
794 break;
795 default:
796 return false;
797 }
798
799 MachineIRBuilder B(I);
800 for (const struct Instr &Instruction : Instructions) {
801 MachineInstrBuilder MIB = B.buildInstr(
802 Opc: Instruction.Opcode, DstOps: {Instruction.Def}, SrcOps: {Instruction.LHS});
803
804 if (Instruction.hasImm())
805 MIB.addImm(Val: Instruction.RHS);
806 else
807 MIB.addUse(RegNo: Instruction.RHS);
808
809 if (!MIB.constrainAllUses(TII, TRI, RBI))
810 return false;
811 }
812
813 I.eraseFromParent();
814 return true;
815 }
816 case G_FCMP: {
817 unsigned MipsFCMPCondCode;
818 bool isLogicallyNegated;
819 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
820 I.getOperand(i: 1).getPredicate())) {
821 case CmpInst::FCMP_UNO: // Unordered
822 case CmpInst::FCMP_ORD: // Ordered (OR)
823 MipsFCMPCondCode = Mips::FCOND_UN;
824 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
825 break;
826 case CmpInst::FCMP_OEQ: // Equal
827 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
828 MipsFCMPCondCode = Mips::FCOND_OEQ;
829 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
830 break;
831 case CmpInst::FCMP_UEQ: // Unordered or Equal
832 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
833 MipsFCMPCondCode = Mips::FCOND_UEQ;
834 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
835 break;
836 case CmpInst::FCMP_OLT: // Ordered or Less Than
837 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
838 MipsFCMPCondCode = Mips::FCOND_OLT;
839 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
840 break;
841 case CmpInst::FCMP_ULT: // Unordered or Less Than
842 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
843 MipsFCMPCondCode = Mips::FCOND_ULT;
844 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
845 break;
846 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
847 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
848 MipsFCMPCondCode = Mips::FCOND_OLE;
849 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
850 break;
851 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
852 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
853 MipsFCMPCondCode = Mips::FCOND_ULE;
854 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
855 break;
856 default:
857 return false;
858 }
859
860 // Default compare result in gpr register will be `true`.
861 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
862 // using MOVF_I. When orignal predicate (Cond) is logically negated
863 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
864 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
865
866 Register TrueInReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
867 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
868 .addDef(RegNo: TrueInReg)
869 .addUse(RegNo: Mips::ZERO)
870 .addImm(Val: 1);
871
872 unsigned Size = MRI.getType(Reg: I.getOperand(i: 2).getReg()).getSizeInBits();
873 unsigned FCMPOpcode =
874 Size == 32 ? Mips::FCMP_S32
875 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
876 MachineInstr *FCMP = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: FCMPOpcode))
877 .addUse(RegNo: I.getOperand(i: 2).getReg())
878 .addUse(RegNo: I.getOperand(i: 3).getReg())
879 .addImm(Val: MipsFCMPCondCode);
880 if (!constrainSelectedInstRegOperands(I&: *FCMP, TII, TRI, RBI))
881 return false;
882
883 MachineInstr *Move = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: MoveOpcode))
884 .addDef(RegNo: I.getOperand(i: 0).getReg())
885 .addUse(RegNo: Mips::ZERO)
886 .addUse(RegNo: Mips::FCC0)
887 .addUse(RegNo: TrueInReg);
888 if (!constrainSelectedInstRegOperands(I&: *Move, TII, TRI, RBI))
889 return false;
890
891 I.eraseFromParent();
892 return true;
893 }
894 case G_FENCE: {
895 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::SYNC)).addImm(Val: 0);
896 break;
897 }
898 case G_VASTART: {
899 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
900 int FI = FuncInfo->getVarArgsFrameIndex();
901
902 Register LeaReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
903 MachineInstr *LEA_ADDiu =
904 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LEA_ADDiu))
905 .addDef(RegNo: LeaReg)
906 .addFrameIndex(Idx: FI)
907 .addImm(Val: 0);
908 if (!constrainSelectedInstRegOperands(I&: *LEA_ADDiu, TII, TRI, RBI))
909 return false;
910
911 MachineInstr *Store = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::SW))
912 .addUse(RegNo: LeaReg)
913 .addUse(RegNo: I.getOperand(i: 0).getReg())
914 .addImm(Val: 0);
915 if (!constrainSelectedInstRegOperands(I&: *Store, TII, TRI, RBI))
916 return false;
917
918 I.eraseFromParent();
919 return true;
920 }
921 default:
922 return false;
923 }
924
925 I.eraseFromParent();
926 return constrainSelectedInstRegOperands(I&: *MI, TII, TRI, RBI);
927}
928
929namespace llvm {
930InstructionSelector *
931createMipsInstructionSelector(const MipsTargetMachine &TM,
932 const MipsSubtarget &Subtarget,
933 const MipsRegisterBankInfo &RBI) {
934 return new MipsInstructionSelector(TM, Subtarget, RBI);
935}
936} // end namespace llvm
937