1//===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// Mips.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "MCTargetDesc/MipsInstPrinter.h"
15#include "MipsMachineFunction.h"
16#include "MipsRegisterBankInfo.h"
17#include "MipsTargetMachine.h"
18#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20#include "llvm/CodeGen/MachineJumpTableInfo.h"
21#include "llvm/IR/IntrinsicsMips.h"
22
23#define DEBUG_TYPE "mips-isel"
24
25using namespace llvm;
26
27namespace {
28
29#define GET_GLOBALISEL_PREDICATE_BITSET
30#include "MipsGenGlobalISel.inc"
31#undef GET_GLOBALISEL_PREDICATE_BITSET
32
33class MipsInstructionSelector : public InstructionSelector {
34public:
35 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36 const MipsRegisterBankInfo &RBI);
37
38 bool select(MachineInstr &I) override;
39 static const char *getName() { return DEBUG_TYPE; }
40
41private:
42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45 bool materialize32BitImm(Register DestReg, APInt Imm,
46 MachineIRBuilder &B) const;
47 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
48 const TargetRegisterClass *
49 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50 unsigned selectLoadStoreOpCode(MachineInstr &I,
51 MachineRegisterInfo &MRI) const;
52 bool buildUnalignedStore(MachineInstr &I, unsigned Opc,
53 MachineOperand &BaseAddr, unsigned Offset,
54 MachineMemOperand *MMO) const;
55 bool buildUnalignedLoad(MachineInstr &I, unsigned Opc, Register Dest,
56 MachineOperand &BaseAddr, unsigned Offset,
57 Register TiedDest, MachineMemOperand *MMO) const;
58
59 const MipsTargetMachine &TM;
60 const MipsSubtarget &STI;
61 const MipsInstrInfo &TII;
62 const MipsRegisterInfo &TRI;
63 const MipsRegisterBankInfo &RBI;
64
65#define GET_GLOBALISEL_PREDICATES_DECL
66#include "MipsGenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATES_DECL
68
69#define GET_GLOBALISEL_TEMPORARIES_DECL
70#include "MipsGenGlobalISel.inc"
71#undef GET_GLOBALISEL_TEMPORARIES_DECL
72};
73
74} // end anonymous namespace
75
76#define GET_GLOBALISEL_IMPL
77#include "MipsGenGlobalISel.inc"
78#undef GET_GLOBALISEL_IMPL
79
80MipsInstructionSelector::MipsInstructionSelector(
81 const MipsTargetMachine &TM, const MipsSubtarget &STI,
82 const MipsRegisterBankInfo &RBI)
83 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84 RBI(RBI),
85
86#define GET_GLOBALISEL_PREDICATES_INIT
87#include "MipsGenGlobalISel.inc"
88#undef GET_GLOBALISEL_PREDICATES_INIT
89#define GET_GLOBALISEL_TEMPORARIES_INIT
90#include "MipsGenGlobalISel.inc"
91#undef GET_GLOBALISEL_TEMPORARIES_INIT
92{
93}
94
95bool MipsInstructionSelector::isRegInGprb(Register Reg,
96 MachineRegisterInfo &MRI) const {
97 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
98}
99
100bool MipsInstructionSelector::isRegInFprb(Register Reg,
101 MachineRegisterInfo &MRI) const {
102 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
103}
104
105bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106 MachineRegisterInfo &MRI) const {
107 Register DstReg = I.getOperand(i: 0).getReg();
108 if (DstReg.isPhysical())
109 return true;
110
111 const TargetRegisterClass *RC = getRegClassForTypeOnBank(Reg: DstReg, MRI);
112 if (!RBI.constrainGenericRegister(Reg: DstReg, RC: *RC, MRI)) {
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114 << " operand\n");
115 return false;
116 }
117 return true;
118}
119
120const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
121 Register Reg, MachineRegisterInfo &MRI) const {
122 const LLT Ty = MRI.getType(Reg);
123 const unsigned TySize = Ty.getSizeInBits();
124
125 if (isRegInGprb(Reg, MRI)) {
126 assert((Ty.isScalar() || Ty.isPointer()) &&
127 (TySize == 32 || TySize == 64) &&
128 "Register class not available for LLT, register bank combination");
129 if (TySize == 32)
130 return &Mips::GPR32RegClass;
131 if (TySize == 64)
132 return &Mips::GPR64RegClass;
133 }
134
135 if (isRegInFprb(Reg, MRI)) {
136 if (Ty.isScalar()) {
137 assert((TySize == 32 || TySize == 64) &&
138 "Register class not available for LLT, register bank combination");
139 if (TySize == 32)
140 return &Mips::FGR32RegClass;
141 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
142 }
143 }
144
145 llvm_unreachable("Unsupported register bank.");
146}
147
148bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
149 MachineIRBuilder &B) const {
150 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
151 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
152 if (Imm.getHiBits(numBits: 16).isZero()) {
153 MachineInstr *Inst =
154 B.buildInstr(Opc: Mips::ORi, DstOps: {DestReg}, SrcOps: {Register(Mips::ZERO)})
155 .addImm(Val: Imm.getLoBits(numBits: 16).getLimitedValue());
156 return constrainSelectedInstRegOperands(I&: *Inst, TII, TRI, RBI);
157 }
158 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
159 if (Imm.getLoBits(numBits: 16).isZero()) {
160 MachineInstr *Inst = B.buildInstr(Opc: Mips::LUi, DstOps: {DestReg}, SrcOps: {})
161 .addImm(Val: Imm.getHiBits(numBits: 16).getLimitedValue());
162 return constrainSelectedInstRegOperands(I&: *Inst, TII, TRI, RBI);
163 }
164 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
165 if (Imm.isSignedIntN(N: 16)) {
166 MachineInstr *Inst =
167 B.buildInstr(Opc: Mips::ADDiu, DstOps: {DestReg}, SrcOps: {Register(Mips::ZERO)})
168 .addImm(Val: Imm.getLoBits(numBits: 16).getLimitedValue());
169 return constrainSelectedInstRegOperands(I&: *Inst, TII, TRI, RBI);
170 }
171 // Values that cannot be materialized with single immediate instruction.
172 Register LUiReg = B.getMRI()->createVirtualRegister(RegClass: &Mips::GPR32RegClass);
173 MachineInstr *LUi = B.buildInstr(Opc: Mips::LUi, DstOps: {LUiReg}, SrcOps: {})
174 .addImm(Val: Imm.getHiBits(numBits: 16).getLimitedValue());
175 MachineInstr *ORi = B.buildInstr(Opc: Mips::ORi, DstOps: {DestReg}, SrcOps: {LUiReg})
176 .addImm(Val: Imm.getLoBits(numBits: 16).getLimitedValue());
177 if (!constrainSelectedInstRegOperands(I&: *LUi, TII, TRI, RBI))
178 return false;
179 if (!constrainSelectedInstRegOperands(I&: *ORi, TII, TRI, RBI))
180 return false;
181 return true;
182}
183
184/// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
185unsigned
186MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
187 MachineRegisterInfo &MRI) const {
188 const Register ValueReg = I.getOperand(i: 0).getReg();
189 const LLT Ty = MRI.getType(Reg: ValueReg);
190 const unsigned TySize = Ty.getSizeInBits();
191 const unsigned MemSizeInBytes =
192 (*I.memoperands_begin())->getSize().getValue();
193 unsigned Opc = I.getOpcode();
194 const bool isStore = Opc == TargetOpcode::G_STORE;
195
196 if (isRegInGprb(Reg: ValueReg, MRI)) {
197 assert(((Ty.isScalar() && TySize == 32) ||
198 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
199 "Unsupported register bank, LLT, MemSizeInBytes combination");
200 (void)TySize;
201 if (isStore)
202 switch (MemSizeInBytes) {
203 case 4:
204 return Mips::SW;
205 case 2:
206 return Mips::SH;
207 case 1:
208 return Mips::SB;
209 default:
210 return Opc;
211 }
212 else
213 // Unspecified extending load is selected into zeroExtending load.
214 switch (MemSizeInBytes) {
215 case 4:
216 return Mips::LW;
217 case 2:
218 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
219 case 1:
220 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
221 default:
222 return Opc;
223 }
224 }
225
226 if (isRegInFprb(Reg: ValueReg, MRI)) {
227 if (Ty.isScalar()) {
228 assert(((TySize == 32 && MemSizeInBytes == 4) ||
229 (TySize == 64 && MemSizeInBytes == 8)) &&
230 "Unsupported register bank, LLT, MemSizeInBytes combination");
231
232 if (MemSizeInBytes == 4)
233 return isStore ? Mips::SWC1 : Mips::LWC1;
234
235 if (STI.isFP64bit())
236 return isStore ? Mips::SDC164 : Mips::LDC164;
237 return isStore ? Mips::SDC1 : Mips::LDC1;
238 }
239
240 if (Ty.isVector()) {
241 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
242 assert((TySize == 128 && MemSizeInBytes == 16) &&
243 "Unsupported register bank, LLT, MemSizeInBytes combination");
244 switch (Ty.getElementType().getSizeInBits()) {
245 case 8:
246 return isStore ? Mips::ST_B : Mips::LD_B;
247 case 16:
248 return isStore ? Mips::ST_H : Mips::LD_H;
249 case 32:
250 return isStore ? Mips::ST_W : Mips::LD_W;
251 case 64:
252 return isStore ? Mips::ST_D : Mips::LD_D;
253 default:
254 return Opc;
255 }
256 }
257 }
258
259 return Opc;
260}
261
262bool MipsInstructionSelector::buildUnalignedStore(
263 MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
264 MachineMemOperand *MMO) const {
265 MachineInstr *NewInst =
266 BuildMI(BB&: *I.getParent(), I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Opc))
267 .add(MO: I.getOperand(i: 0))
268 .add(MO: BaseAddr)
269 .addImm(Val: Offset)
270 .addMemOperand(MMO);
271 if (!constrainSelectedInstRegOperands(I&: *NewInst, TII, TRI, RBI))
272 return false;
273 return true;
274}
275
276bool MipsInstructionSelector::buildUnalignedLoad(
277 MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
278 unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
279 MachineInstr *NewInst =
280 BuildMI(BB&: *I.getParent(), I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Opc))
281 .addDef(RegNo: Dest)
282 .add(MO: BaseAddr)
283 .addImm(Val: Offset)
284 .addUse(RegNo: TiedDest)
285 .addMemOperand(MMO: *I.memoperands_begin());
286 if (!constrainSelectedInstRegOperands(I&: *NewInst, TII, TRI, RBI))
287 return false;
288 return true;
289}
290
291bool MipsInstructionSelector::select(MachineInstr &I) {
292
293 MachineBasicBlock &MBB = *I.getParent();
294 MachineFunction &MF = *MBB.getParent();
295 MachineRegisterInfo &MRI = MF.getRegInfo();
296
297 if (!isPreISelGenericOpcode(Opcode: I.getOpcode())) {
298 if (I.isCopy())
299 return selectCopy(I, MRI);
300
301 return true;
302 }
303
304 if (I.getOpcode() == Mips::G_MUL &&
305 isRegInGprb(Reg: I.getOperand(i: 0).getReg(), MRI)) {
306 MachineInstr *Mul = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::MUL))
307 .add(MO: I.getOperand(i: 0))
308 .add(MO: I.getOperand(i: 1))
309 .add(MO: I.getOperand(i: 2));
310 if (!constrainSelectedInstRegOperands(I&: *Mul, TII, TRI, RBI))
311 return false;
312 Mul->getOperand(i: 3).setIsDead(true);
313 Mul->getOperand(i: 4).setIsDead(true);
314
315 I.eraseFromParent();
316 return true;
317 }
318
319 if (selectImpl(I, CoverageInfo&: *CoverageInfo))
320 return true;
321
322 MachineInstr *MI = nullptr;
323 using namespace TargetOpcode;
324
325 switch (I.getOpcode()) {
326 case G_UMULH: {
327 Register PseudoMULTuReg = MRI.createVirtualRegister(RegClass: &Mips::ACC64RegClass);
328 MachineInstr *PseudoMULTu, *PseudoMove;
329
330 PseudoMULTu = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoMULTu))
331 .addDef(RegNo: PseudoMULTuReg)
332 .add(MO: I.getOperand(i: 1))
333 .add(MO: I.getOperand(i: 2));
334 if (!constrainSelectedInstRegOperands(I&: *PseudoMULTu, TII, TRI, RBI))
335 return false;
336
337 PseudoMove = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoMFHI))
338 .addDef(RegNo: I.getOperand(i: 0).getReg())
339 .addUse(RegNo: PseudoMULTuReg);
340 if (!constrainSelectedInstRegOperands(I&: *PseudoMove, TII, TRI, RBI))
341 return false;
342
343 I.eraseFromParent();
344 return true;
345 }
346 case G_PTR_ADD: {
347 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDu))
348 .add(MO: I.getOperand(i: 0))
349 .add(MO: I.getOperand(i: 1))
350 .add(MO: I.getOperand(i: 2));
351 break;
352 }
353 case G_INTTOPTR:
354 case G_PTRTOINT: {
355 I.setDesc(TII.get(Opcode: COPY));
356 return selectCopy(I, MRI);
357 }
358 case G_FRAME_INDEX: {
359 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
360 .add(MO: I.getOperand(i: 0))
361 .add(MO: I.getOperand(i: 1))
362 .addImm(Val: 0);
363 break;
364 }
365 case G_BRJT: {
366 unsigned EntrySize =
367 MF.getJumpTableInfo()->getEntrySize(TD: MF.getDataLayout());
368 assert(isPowerOf2_32(EntrySize) &&
369 "Non-power-of-two jump-table entry size not supported.");
370
371 Register JTIndex = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
372 MachineInstr *SLL = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::SLL))
373 .addDef(RegNo: JTIndex)
374 .addUse(RegNo: I.getOperand(i: 2).getReg())
375 .addImm(Val: Log2_32(Value: EntrySize));
376 if (!constrainSelectedInstRegOperands(I&: *SLL, TII, TRI, RBI))
377 return false;
378
379 Register DestAddress = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
380 MachineInstr *ADDu = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDu))
381 .addDef(RegNo: DestAddress)
382 .addUse(RegNo: I.getOperand(i: 0).getReg())
383 .addUse(RegNo: JTIndex);
384 if (!constrainSelectedInstRegOperands(I&: *ADDu, TII, TRI, RBI))
385 return false;
386
387 Register Dest = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
388 MachineInstr *LW =
389 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LW))
390 .addDef(RegNo: Dest)
391 .addUse(RegNo: DestAddress)
392 .addJumpTableIndex(Idx: I.getOperand(i: 1).getIndex(), TargetFlags: MipsII::MO_ABS_LO)
393 .addMemOperand(MMO: MF.getMachineMemOperand(
394 PtrInfo: MachinePointerInfo(), F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)));
395 if (!constrainSelectedInstRegOperands(I&: *LW, TII, TRI, RBI))
396 return false;
397
398 if (MF.getTarget().isPositionIndependent()) {
399 Register DestTmp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
400 LW->getOperand(i: 0).setReg(DestTmp);
401 MachineInstr *ADDu = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDu))
402 .addDef(RegNo: Dest)
403 .addUse(RegNo: DestTmp)
404 .addUse(RegNo: MF.getInfo<MipsFunctionInfo>()
405 ->getGlobalBaseRegForGlobalISel(MF));
406 if (!constrainSelectedInstRegOperands(I&: *ADDu, TII, TRI, RBI))
407 return false;
408 }
409
410 MachineInstr *Branch =
411 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoIndirectBranch))
412 .addUse(RegNo: Dest);
413 if (!constrainSelectedInstRegOperands(I&: *Branch, TII, TRI, RBI))
414 return false;
415
416 I.eraseFromParent();
417 return true;
418 }
419 case G_BRINDIRECT: {
420 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::PseudoIndirectBranch))
421 .add(MO: I.getOperand(i: 0));
422 break;
423 }
424 case G_PHI: {
425 const Register DestReg = I.getOperand(i: 0).getReg();
426
427 const TargetRegisterClass *DefRC = nullptr;
428 if (DestReg.isPhysical())
429 DefRC = TRI.getRegClass(i: DestReg);
430 else
431 DefRC = getRegClassForTypeOnBank(Reg: DestReg, MRI);
432
433 I.setDesc(TII.get(Opcode: TargetOpcode::PHI));
434 return RBI.constrainGenericRegister(Reg: DestReg, RC: *DefRC, MRI);
435 }
436 case G_STORE:
437 case G_LOAD:
438 case G_ZEXTLOAD:
439 case G_SEXTLOAD: {
440 auto MMO = *I.memoperands_begin();
441 MachineOperand BaseAddr = I.getOperand(i: 1);
442 int64_t SignedOffset = 0;
443 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
444 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
445 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
446 // %LoadResult/%StoreSrc = load/store %Addr(p0)
447 // into:
448 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
449
450 MachineInstr *Addr = MRI.getVRegDef(Reg: I.getOperand(i: 1).getReg());
451 if (Addr->getOpcode() == G_PTR_ADD) {
452 MachineInstr *Offset = MRI.getVRegDef(Reg: Addr->getOperand(i: 2).getReg());
453 if (Offset->getOpcode() == G_CONSTANT) {
454 APInt OffsetValue = Offset->getOperand(i: 1).getCImm()->getValue();
455 if (OffsetValue.isSignedIntN(N: 16)) {
456 BaseAddr = Addr->getOperand(i: 1);
457 SignedOffset = OffsetValue.getSExtValue();
458 }
459 }
460 }
461
462 // Unaligned memory access
463 if ((!MMO->getSize().hasValue() ||
464 MMO->getAlign() < MMO->getSize().getValue()) &&
465 !STI.systemSupportsUnalignedAccess()) {
466 if (MMO->getSize() != 4 || !isRegInGprb(Reg: I.getOperand(i: 0).getReg(), MRI))
467 return false;
468
469 if (I.getOpcode() == G_STORE) {
470 if (!buildUnalignedStore(I, Opc: Mips::SWL, BaseAddr, Offset: SignedOffset + 3, MMO))
471 return false;
472 if (!buildUnalignedStore(I, Opc: Mips::SWR, BaseAddr, Offset: SignedOffset, MMO))
473 return false;
474 I.eraseFromParent();
475 return true;
476 }
477
478 if (I.getOpcode() == G_LOAD) {
479 Register ImplDef = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
480 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::IMPLICIT_DEF))
481 .addDef(RegNo: ImplDef);
482 Register Tmp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
483 if (!buildUnalignedLoad(I, Opc: Mips::LWL, Dest: Tmp, BaseAddr, Offset: SignedOffset + 3,
484 TiedDest: ImplDef, MMO))
485 return false;
486 if (!buildUnalignedLoad(I, Opc: Mips::LWR, Dest: I.getOperand(i: 0).getReg(),
487 BaseAddr, Offset: SignedOffset, TiedDest: Tmp, MMO))
488 return false;
489 I.eraseFromParent();
490 return true;
491 }
492
493 return false;
494 }
495
496 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
497 if (NewOpc == I.getOpcode())
498 return false;
499
500 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: NewOpc))
501 .add(MO: I.getOperand(i: 0))
502 .add(MO: BaseAddr)
503 .addImm(Val: SignedOffset)
504 .addMemOperand(MMO);
505 break;
506 }
507 case G_UDIV:
508 case G_UREM:
509 case G_SDIV:
510 case G_SREM: {
511 Register HILOReg = MRI.createVirtualRegister(RegClass: &Mips::ACC64RegClass);
512 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
513 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
514
515 MachineInstr *PseudoDIV, *PseudoMove;
516 PseudoDIV = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(),
517 MCID: TII.get(Opcode: IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
518 .addDef(RegNo: HILOReg)
519 .add(MO: I.getOperand(i: 1))
520 .add(MO: I.getOperand(i: 2));
521 if (!constrainSelectedInstRegOperands(I&: *PseudoDIV, TII, TRI, RBI))
522 return false;
523
524 PseudoMove = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(),
525 MCID: TII.get(Opcode: IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
526 .addDef(RegNo: I.getOperand(i: 0).getReg())
527 .addUse(RegNo: HILOReg);
528 if (!constrainSelectedInstRegOperands(I&: *PseudoMove, TII, TRI, RBI))
529 return false;
530
531 I.eraseFromParent();
532 return true;
533 }
534 case G_SELECT: {
535 // Handle operands with pointer type.
536 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::MOVN_I_I))
537 .add(MO: I.getOperand(i: 0))
538 .add(MO: I.getOperand(i: 2))
539 .add(MO: I.getOperand(i: 1))
540 .add(MO: I.getOperand(i: 3));
541 break;
542 }
543 case G_UNMERGE_VALUES: {
544 if (I.getNumOperands() != 3)
545 return false;
546 Register Src = I.getOperand(i: 2).getReg();
547 Register Lo = I.getOperand(i: 0).getReg();
548 Register Hi = I.getOperand(i: 1).getReg();
549 if (!isRegInFprb(Reg: Src, MRI) ||
550 !(isRegInGprb(Reg: Lo, MRI) && isRegInGprb(Reg: Hi, MRI)))
551 return false;
552
553 unsigned Opcode =
554 STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
555
556 MachineInstr *ExtractLo = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode))
557 .addDef(RegNo: Lo)
558 .addUse(RegNo: Src)
559 .addImm(Val: 0);
560 if (!constrainSelectedInstRegOperands(I&: *ExtractLo, TII, TRI, RBI))
561 return false;
562
563 MachineInstr *ExtractHi = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode))
564 .addDef(RegNo: Hi)
565 .addUse(RegNo: Src)
566 .addImm(Val: 1);
567 if (!constrainSelectedInstRegOperands(I&: *ExtractHi, TII, TRI, RBI))
568 return false;
569
570 I.eraseFromParent();
571 return true;
572 }
573 case G_IMPLICIT_DEF: {
574 Register Dst = I.getOperand(i: 0).getReg();
575 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::IMPLICIT_DEF))
576 .addDef(RegNo: Dst);
577
578 // Set class based on register bank, there can be fpr and gpr implicit def.
579 MRI.setRegClass(Reg: Dst, RC: getRegClassForTypeOnBank(Reg: Dst, MRI));
580 break;
581 }
582 case G_CONSTANT: {
583 MachineIRBuilder B(I);
584 if (!materialize32BitImm(DestReg: I.getOperand(i: 0).getReg(),
585 Imm: I.getOperand(i: 1).getCImm()->getValue(), B))
586 return false;
587
588 I.eraseFromParent();
589 return true;
590 }
591 case G_FCONSTANT: {
592 const APFloat &FPimm = I.getOperand(i: 1).getFPImm()->getValueAPF();
593 APInt APImm = FPimm.bitcastToAPInt();
594 unsigned Size = MRI.getType(Reg: I.getOperand(i: 0).getReg()).getSizeInBits();
595
596 if (Size == 32) {
597 Register GPRReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
598 MachineIRBuilder B(I);
599 if (!materialize32BitImm(DestReg: GPRReg, Imm: APImm, B))
600 return false;
601
602 MachineInstrBuilder MTC1 =
603 B.buildInstr(Opc: Mips::MTC1, DstOps: {I.getOperand(i: 0).getReg()}, SrcOps: {GPRReg});
604 if (!MTC1.constrainAllUses(TII, TRI, RBI))
605 return false;
606 }
607 if (Size == 64) {
608 Register GPRRegHigh = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
609 Register GPRRegLow = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
610 MachineIRBuilder B(I);
611 if (!materialize32BitImm(DestReg: GPRRegHigh, Imm: APImm.getHiBits(numBits: 32).trunc(width: 32), B))
612 return false;
613 if (!materialize32BitImm(DestReg: GPRRegLow, Imm: APImm.getLoBits(numBits: 32).trunc(width: 32), B))
614 return false;
615
616 MachineInstrBuilder PairF64 = B.buildInstr(
617 Opc: STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
618 DstOps: {I.getOperand(i: 0).getReg()}, SrcOps: {GPRRegLow, GPRRegHigh});
619 if (!PairF64.constrainAllUses(TII, TRI, RBI))
620 return false;
621 }
622
623 I.eraseFromParent();
624 return true;
625 }
626 case G_FABS: {
627 unsigned Size = MRI.getType(Reg: I.getOperand(i: 0).getReg()).getSizeInBits();
628 unsigned FABSOpcode =
629 Size == 32 ? Mips::FABS_S
630 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
631 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: FABSOpcode))
632 .add(MO: I.getOperand(i: 0))
633 .add(MO: I.getOperand(i: 1));
634 break;
635 }
636 case G_FPTOSI: {
637 unsigned FromSize = MRI.getType(Reg: I.getOperand(i: 1).getReg()).getSizeInBits();
638 unsigned ToSize = MRI.getType(Reg: I.getOperand(i: 0).getReg()).getSizeInBits();
639 (void)ToSize;
640 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
641 assert((FromSize == 32 || FromSize == 64) &&
642 "Unsupported floating point size for G_FPTOSI");
643
644 unsigned Opcode;
645 if (FromSize == 32)
646 Opcode = Mips::TRUNC_W_S;
647 else
648 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
649 Register ResultInFPR = MRI.createVirtualRegister(RegClass: &Mips::FGR32RegClass);
650 MachineInstr *Trunc = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode))
651 .addDef(RegNo: ResultInFPR)
652 .addUse(RegNo: I.getOperand(i: 1).getReg());
653 if (!constrainSelectedInstRegOperands(I&: *Trunc, TII, TRI, RBI))
654 return false;
655
656 MachineInstr *Move = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::MFC1))
657 .addDef(RegNo: I.getOperand(i: 0).getReg())
658 .addUse(RegNo: ResultInFPR);
659 if (!constrainSelectedInstRegOperands(I&: *Move, TII, TRI, RBI))
660 return false;
661
662 I.eraseFromParent();
663 return true;
664 }
665 case G_GLOBAL_VALUE: {
666 const llvm::GlobalValue *GVal = I.getOperand(i: 1).getGlobal();
667 if (MF.getTarget().isPositionIndependent()) {
668 MachineInstr *LWGOT = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LW))
669 .addDef(RegNo: I.getOperand(i: 0).getReg())
670 .addReg(RegNo: MF.getInfo<MipsFunctionInfo>()
671 ->getGlobalBaseRegForGlobalISel(MF))
672 .addGlobalAddress(GV: GVal);
673 // Global Values that don't have local linkage are handled differently
674 // when they are part of call sequence. MipsCallLowering::lowerCall
675 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
676 // MO_GOT_CALL flag when Callee doesn't have local linkage.
677 if (I.getOperand(i: 1).getTargetFlags() == MipsII::MO_GOT_CALL)
678 LWGOT->getOperand(i: 2).setTargetFlags(MipsII::MO_GOT_CALL);
679 else
680 LWGOT->getOperand(i: 2).setTargetFlags(MipsII::MO_GOT);
681 LWGOT->addMemOperand(
682 MF, MO: MF.getMachineMemOperand(PtrInfo: MachinePointerInfo::getGOT(MF),
683 F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)));
684 if (!constrainSelectedInstRegOperands(I&: *LWGOT, TII, TRI, RBI))
685 return false;
686
687 if (GVal->hasLocalLinkage()) {
688 Register LWGOTDef = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
689 LWGOT->getOperand(i: 0).setReg(LWGOTDef);
690
691 MachineInstr *ADDiu =
692 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
693 .addDef(RegNo: I.getOperand(i: 0).getReg())
694 .addReg(RegNo: LWGOTDef)
695 .addGlobalAddress(GV: GVal);
696 ADDiu->getOperand(i: 2).setTargetFlags(MipsII::MO_ABS_LO);
697 if (!constrainSelectedInstRegOperands(I&: *ADDiu, TII, TRI, RBI))
698 return false;
699 }
700 } else {
701 Register LUiReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
702
703 MachineInstr *LUi = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LUi))
704 .addDef(RegNo: LUiReg)
705 .addGlobalAddress(GV: GVal);
706 LUi->getOperand(i: 1).setTargetFlags(MipsII::MO_ABS_HI);
707 if (!constrainSelectedInstRegOperands(I&: *LUi, TII, TRI, RBI))
708 return false;
709
710 MachineInstr *ADDiu =
711 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
712 .addDef(RegNo: I.getOperand(i: 0).getReg())
713 .addUse(RegNo: LUiReg)
714 .addGlobalAddress(GV: GVal);
715 ADDiu->getOperand(i: 2).setTargetFlags(MipsII::MO_ABS_LO);
716 if (!constrainSelectedInstRegOperands(I&: *ADDiu, TII, TRI, RBI))
717 return false;
718 }
719 I.eraseFromParent();
720 return true;
721 }
722 case G_JUMP_TABLE: {
723 if (MF.getTarget().isPositionIndependent()) {
724 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LW))
725 .addDef(RegNo: I.getOperand(i: 0).getReg())
726 .addReg(RegNo: MF.getInfo<MipsFunctionInfo>()
727 ->getGlobalBaseRegForGlobalISel(MF))
728 .addJumpTableIndex(Idx: I.getOperand(i: 1).getIndex(), TargetFlags: MipsII::MO_GOT)
729 .addMemOperand(MMO: MF.getMachineMemOperand(
730 PtrInfo: MachinePointerInfo::getGOT(MF), F: MachineMemOperand::MOLoad, Size: 4,
731 BaseAlignment: Align(4)));
732 } else {
733 MI =
734 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LUi))
735 .addDef(RegNo: I.getOperand(i: 0).getReg())
736 .addJumpTableIndex(Idx: I.getOperand(i: 1).getIndex(), TargetFlags: MipsII::MO_ABS_HI);
737 }
738 break;
739 }
740 case G_ICMP: {
741 struct Instr {
742 unsigned Opcode;
743 Register Def, LHS, RHS;
744 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
745 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
746
747 bool hasImm() const {
748 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
749 return true;
750 return false;
751 }
752 };
753
754 SmallVector<struct Instr, 2> Instructions;
755 Register ICMPReg = I.getOperand(i: 0).getReg();
756 Register Temp = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
757 Register LHS = I.getOperand(i: 2).getReg();
758 Register RHS = I.getOperand(i: 3).getReg();
759 CmpInst::Predicate Cond =
760 static_cast<CmpInst::Predicate>(I.getOperand(i: 1).getPredicate());
761
762 switch (Cond) {
763 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
764 Instructions.emplace_back(Args: Mips::XOR, Args&: Temp, Args&: LHS, Args&: RHS);
765 Instructions.emplace_back(Args: Mips::SLTiu, Args&: ICMPReg, Args&: Temp, Args: 1);
766 break;
767 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
768 Instructions.emplace_back(Args: Mips::XOR, Args&: Temp, Args&: LHS, Args&: RHS);
769 Instructions.emplace_back(Args: Mips::SLTu, Args&: ICMPReg, Args: Mips::ZERO, Args&: Temp);
770 break;
771 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
772 Instructions.emplace_back(Args: Mips::SLTu, Args&: ICMPReg, Args&: RHS, Args&: LHS);
773 break;
774 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
775 Instructions.emplace_back(Args: Mips::SLTu, Args&: Temp, Args&: LHS, Args&: RHS);
776 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
777 break;
778 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
779 Instructions.emplace_back(Args: Mips::SLTu, Args&: ICMPReg, Args&: LHS, Args&: RHS);
780 break;
781 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
782 Instructions.emplace_back(Args: Mips::SLTu, Args&: Temp, Args&: RHS, Args&: LHS);
783 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
784 break;
785 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
786 Instructions.emplace_back(Args: Mips::SLT, Args&: ICMPReg, Args&: RHS, Args&: LHS);
787 break;
788 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
789 Instructions.emplace_back(Args: Mips::SLT, Args&: Temp, Args&: LHS, Args&: RHS);
790 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
791 break;
792 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
793 Instructions.emplace_back(Args: Mips::SLT, Args&: ICMPReg, Args&: LHS, Args&: RHS);
794 break;
795 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
796 Instructions.emplace_back(Args: Mips::SLT, Args&: Temp, Args&: RHS, Args&: LHS);
797 Instructions.emplace_back(Args: Mips::XORi, Args&: ICMPReg, Args&: Temp, Args: 1);
798 break;
799 default:
800 return false;
801 }
802
803 MachineIRBuilder B(I);
804 for (const struct Instr &Instruction : Instructions) {
805 MachineInstrBuilder MIB = B.buildInstr(
806 Opc: Instruction.Opcode, DstOps: {Instruction.Def}, SrcOps: {Instruction.LHS});
807
808 if (Instruction.hasImm())
809 MIB.addImm(Val: Instruction.RHS);
810 else
811 MIB.addUse(RegNo: Instruction.RHS);
812
813 if (!MIB.constrainAllUses(TII, TRI, RBI))
814 return false;
815 }
816
817 I.eraseFromParent();
818 return true;
819 }
820 case G_FCMP: {
821 unsigned MipsFCMPCondCode;
822 bool isLogicallyNegated;
823 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
824 I.getOperand(i: 1).getPredicate())) {
825 case CmpInst::FCMP_UNO: // Unordered
826 case CmpInst::FCMP_ORD: // Ordered (OR)
827 MipsFCMPCondCode = Mips::FCOND_UN;
828 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
829 break;
830 case CmpInst::FCMP_OEQ: // Equal
831 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
832 MipsFCMPCondCode = Mips::FCOND_OEQ;
833 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
834 break;
835 case CmpInst::FCMP_UEQ: // Unordered or Equal
836 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
837 MipsFCMPCondCode = Mips::FCOND_UEQ;
838 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
839 break;
840 case CmpInst::FCMP_OLT: // Ordered or Less Than
841 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
842 MipsFCMPCondCode = Mips::FCOND_OLT;
843 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
844 break;
845 case CmpInst::FCMP_ULT: // Unordered or Less Than
846 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
847 MipsFCMPCondCode = Mips::FCOND_ULT;
848 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
849 break;
850 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
851 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
852 MipsFCMPCondCode = Mips::FCOND_OLE;
853 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
854 break;
855 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
856 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
857 MipsFCMPCondCode = Mips::FCOND_ULE;
858 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
859 break;
860 default:
861 return false;
862 }
863
864 // Default compare result in gpr register will be `true`.
865 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
866 // using MOVF_I. When orignal predicate (Cond) is logically negated
867 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
868 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
869
870 Register TrueInReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
871 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::ADDiu))
872 .addDef(RegNo: TrueInReg)
873 .addUse(RegNo: Mips::ZERO)
874 .addImm(Val: 1);
875
876 unsigned Size = MRI.getType(Reg: I.getOperand(i: 2).getReg()).getSizeInBits();
877 unsigned FCMPOpcode =
878 Size == 32 ? Mips::FCMP_S32
879 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
880 MachineInstr *FCMP = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: FCMPOpcode))
881 .addUse(RegNo: I.getOperand(i: 2).getReg())
882 .addUse(RegNo: I.getOperand(i: 3).getReg())
883 .addImm(Val: MipsFCMPCondCode);
884 if (!constrainSelectedInstRegOperands(I&: *FCMP, TII, TRI, RBI))
885 return false;
886
887 MachineInstr *Move = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: MoveOpcode))
888 .addDef(RegNo: I.getOperand(i: 0).getReg())
889 .addUse(RegNo: Mips::ZERO)
890 .addUse(RegNo: Mips::FCC0)
891 .addUse(RegNo: TrueInReg);
892 if (!constrainSelectedInstRegOperands(I&: *Move, TII, TRI, RBI))
893 return false;
894
895 I.eraseFromParent();
896 return true;
897 }
898 case G_FENCE: {
899 MI = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::SYNC)).addImm(Val: 0);
900 break;
901 }
902 case G_VASTART: {
903 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
904 int FI = FuncInfo->getVarArgsFrameIndex();
905
906 Register LeaReg = MRI.createVirtualRegister(RegClass: &Mips::GPR32RegClass);
907 MachineInstr *LEA_ADDiu =
908 BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::LEA_ADDiu))
909 .addDef(RegNo: LeaReg)
910 .addFrameIndex(Idx: FI)
911 .addImm(Val: 0);
912 if (!constrainSelectedInstRegOperands(I&: *LEA_ADDiu, TII, TRI, RBI))
913 return false;
914
915 MachineInstr *Store = BuildMI(BB&: MBB, I, MIMD: I.getDebugLoc(), MCID: TII.get(Opcode: Mips::SW))
916 .addUse(RegNo: LeaReg)
917 .addUse(RegNo: I.getOperand(i: 0).getReg())
918 .addImm(Val: 0);
919 if (!constrainSelectedInstRegOperands(I&: *Store, TII, TRI, RBI))
920 return false;
921
922 I.eraseFromParent();
923 return true;
924 }
925 default:
926 return false;
927 }
928
929 I.eraseFromParent();
930 return constrainSelectedInstRegOperands(I&: *MI, TII, TRI, RBI);
931}
932
933namespace llvm {
934InstructionSelector *
935createMipsInstructionSelector(const MipsTargetMachine &TM,
936 const MipsSubtarget &Subtarget,
937 const MipsRegisterBankInfo &RBI) {
938 return new MipsInstructionSelector(TM, Subtarget, RBI);
939}
940} // end namespace llvm
941