1//===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the Sparc implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SparcInstrInfo.h"
14#include "Sparc.h"
15#include "SparcMachineFunctionInfo.h"
16#include "SparcSubtarget.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineMemOperand.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/MC/TargetRegistry.h"
24#include "llvm/Support/ErrorHandling.h"
25
26using namespace llvm;
27
28#define GET_INSTRINFO_CTOR_DTOR
29#include "SparcGenInstrInfo.inc"
30
31static cl::opt<unsigned> BPccDisplacementBits(
32 "sparc-bpcc-offset-bits", cl::Hidden, cl::init(Val: 19),
33 cl::desc("Restrict range of BPcc/FBPfcc instructions (DEBUG)"));
34
35static cl::opt<unsigned>
36 BPrDisplacementBits("sparc-bpr-offset-bits", cl::Hidden, cl::init(Val: 16),
37 cl::desc("Restrict range of BPr instructions (DEBUG)"));
38
39// Pin the vtable to this file.
40void SparcInstrInfo::anchor() {}
41
42SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
43 : SparcGenInstrInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), RI(),
44 Subtarget(ST) {}
45
46/// isLoadFromStackSlot - If the specified machine instruction is a direct
47/// load from a stack slot, return the virtual or physical register number of
48/// the destination along with the FrameIndex of the loaded stack slot. If
49/// not, return 0. This predicate must return 0 if the instruction has
50/// any side effects other than loading from the stack slot.
51Register SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
52 int &FrameIndex) const {
53 if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri ||
54 MI.getOpcode() == SP::LDFri || MI.getOpcode() == SP::LDDFri ||
55 MI.getOpcode() == SP::LDQFri) {
56 if (MI.getOperand(i: 1).isFI() && MI.getOperand(i: 2).isImm() &&
57 MI.getOperand(i: 2).getImm() == 0) {
58 FrameIndex = MI.getOperand(i: 1).getIndex();
59 return MI.getOperand(i: 0).getReg();
60 }
61 }
62 return 0;
63}
64
65/// isStoreToStackSlot - If the specified machine instruction is a direct
66/// store to a stack slot, return the virtual or physical register number of
67/// the source reg along with the FrameIndex of the loaded stack slot. If
68/// not, return 0. This predicate must return 0 if the instruction has
69/// any side effects other than storing to the stack slot.
70Register SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
71 int &FrameIndex) const {
72 if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri ||
73 MI.getOpcode() == SP::STFri || MI.getOpcode() == SP::STDFri ||
74 MI.getOpcode() == SP::STQFri) {
75 if (MI.getOperand(i: 0).isFI() && MI.getOperand(i: 1).isImm() &&
76 MI.getOperand(i: 1).getImm() == 0) {
77 FrameIndex = MI.getOperand(i: 0).getIndex();
78 return MI.getOperand(i: 2).getReg();
79 }
80 }
81 return 0;
82}
83
84static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
85{
86 switch(CC) {
87 case SPCC::ICC_A: return SPCC::ICC_N;
88 case SPCC::ICC_N: return SPCC::ICC_A;
89 case SPCC::ICC_NE: return SPCC::ICC_E;
90 case SPCC::ICC_E: return SPCC::ICC_NE;
91 case SPCC::ICC_G: return SPCC::ICC_LE;
92 case SPCC::ICC_LE: return SPCC::ICC_G;
93 case SPCC::ICC_GE: return SPCC::ICC_L;
94 case SPCC::ICC_L: return SPCC::ICC_GE;
95 case SPCC::ICC_GU: return SPCC::ICC_LEU;
96 case SPCC::ICC_LEU: return SPCC::ICC_GU;
97 case SPCC::ICC_CC: return SPCC::ICC_CS;
98 case SPCC::ICC_CS: return SPCC::ICC_CC;
99 case SPCC::ICC_POS: return SPCC::ICC_NEG;
100 case SPCC::ICC_NEG: return SPCC::ICC_POS;
101 case SPCC::ICC_VC: return SPCC::ICC_VS;
102 case SPCC::ICC_VS: return SPCC::ICC_VC;
103
104 case SPCC::FCC_A: return SPCC::FCC_N;
105 case SPCC::FCC_N: return SPCC::FCC_A;
106 case SPCC::FCC_U: return SPCC::FCC_O;
107 case SPCC::FCC_O: return SPCC::FCC_U;
108 case SPCC::FCC_G: return SPCC::FCC_ULE;
109 case SPCC::FCC_LE: return SPCC::FCC_UG;
110 case SPCC::FCC_UG: return SPCC::FCC_LE;
111 case SPCC::FCC_ULE: return SPCC::FCC_G;
112 case SPCC::FCC_L: return SPCC::FCC_UGE;
113 case SPCC::FCC_GE: return SPCC::FCC_UL;
114 case SPCC::FCC_UL: return SPCC::FCC_GE;
115 case SPCC::FCC_UGE: return SPCC::FCC_L;
116 case SPCC::FCC_LG: return SPCC::FCC_UE;
117 case SPCC::FCC_UE: return SPCC::FCC_LG;
118 case SPCC::FCC_NE: return SPCC::FCC_E;
119 case SPCC::FCC_E: return SPCC::FCC_NE;
120
121 case SPCC::CPCC_A: return SPCC::CPCC_N;
122 case SPCC::CPCC_N: return SPCC::CPCC_A;
123 case SPCC::CPCC_3: [[fallthrough]];
124 case SPCC::CPCC_2: [[fallthrough]];
125 case SPCC::CPCC_23: [[fallthrough]];
126 case SPCC::CPCC_1: [[fallthrough]];
127 case SPCC::CPCC_13: [[fallthrough]];
128 case SPCC::CPCC_12: [[fallthrough]];
129 case SPCC::CPCC_123: [[fallthrough]];
130 case SPCC::CPCC_0: [[fallthrough]];
131 case SPCC::CPCC_03: [[fallthrough]];
132 case SPCC::CPCC_02: [[fallthrough]];
133 case SPCC::CPCC_023: [[fallthrough]];
134 case SPCC::CPCC_01: [[fallthrough]];
135 case SPCC::CPCC_013: [[fallthrough]];
136 case SPCC::CPCC_012:
137 // "Opposite" code is not meaningful, as we don't know
138 // what the CoProc condition means here. The cond-code will
139 // only be used in inline assembler, so this code should
140 // not be reached in a normal compilation pass.
141 llvm_unreachable("Meaningless inversion of co-processor cond code");
142
143 case SPCC::REG_BEGIN:
144 llvm_unreachable("Use of reserved cond code");
145 case SPCC::REG_Z:
146 return SPCC::REG_NZ;
147 case SPCC::REG_LEZ:
148 return SPCC::REG_GZ;
149 case SPCC::REG_LZ:
150 return SPCC::REG_GEZ;
151 case SPCC::REG_NZ:
152 return SPCC::REG_Z;
153 case SPCC::REG_GZ:
154 return SPCC::REG_LEZ;
155 case SPCC::REG_GEZ:
156 return SPCC::REG_LZ;
157 }
158 llvm_unreachable("Invalid cond code");
159}
160
161static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; }
162
163static bool isI32CondBranchOpcode(int Opc) {
164 return Opc == SP::BCOND || Opc == SP::BPICC || Opc == SP::BPICCA ||
165 Opc == SP::BPICCNT || Opc == SP::BPICCANT;
166}
167
168static bool isI64CondBranchOpcode(int Opc) {
169 return Opc == SP::BPXCC || Opc == SP::BPXCCA || Opc == SP::BPXCCNT ||
170 Opc == SP::BPXCCANT;
171}
172
173static bool isRegCondBranchOpcode(int Opc) {
174 return Opc == SP::BPR || Opc == SP::BPRA || Opc == SP::BPRNT ||
175 Opc == SP::BPRANT;
176}
177
178static bool isFCondBranchOpcode(int Opc) {
179 return Opc == SP::FBCOND || Opc == SP::FBCONDA || Opc == SP::FBCOND_V9 ||
180 Opc == SP::FBCONDA_V9;
181}
182
183static bool isCondBranchOpcode(int Opc) {
184 return isI32CondBranchOpcode(Opc) || isI64CondBranchOpcode(Opc) ||
185 isRegCondBranchOpcode(Opc) || isFCondBranchOpcode(Opc);
186}
187
188static bool isIndirectBranchOpcode(int Opc) {
189 return Opc == SP::BINDrr || Opc == SP::BINDri;
190}
191
192static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
193 SmallVectorImpl<MachineOperand> &Cond) {
194 unsigned Opc = LastInst->getOpcode();
195 int64_t CC = LastInst->getOperand(i: 1).getImm();
196
197 // Push the branch opcode into Cond too so later in insertBranch
198 // it can use the information to emit the correct SPARC branch opcode.
199 Cond.push_back(Elt: MachineOperand::CreateImm(Val: Opc));
200 Cond.push_back(Elt: MachineOperand::CreateImm(Val: CC));
201
202 // Branch on register contents need another argument to indicate
203 // the register it branches on.
204 if (isRegCondBranchOpcode(Opc)) {
205 Register Reg = LastInst->getOperand(i: 2).getReg();
206 Cond.push_back(Elt: MachineOperand::CreateReg(Reg, isDef: false));
207 }
208
209 Target = LastInst->getOperand(i: 0).getMBB();
210}
211
212MachineBasicBlock *
213SparcInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
214 switch (MI.getOpcode()) {
215 default:
216 llvm_unreachable("unexpected opcode!");
217 case SP::BA:
218 case SP::BCOND:
219 case SP::BCONDA:
220 case SP::FBCOND:
221 case SP::FBCONDA:
222 case SP::BPICC:
223 case SP::BPICCA:
224 case SP::BPICCNT:
225 case SP::BPICCANT:
226 case SP::BPXCC:
227 case SP::BPXCCA:
228 case SP::BPXCCNT:
229 case SP::BPXCCANT:
230 case SP::BPFCC:
231 case SP::BPFCCA:
232 case SP::BPFCCNT:
233 case SP::BPFCCANT:
234 case SP::FBCOND_V9:
235 case SP::FBCONDA_V9:
236 case SP::BPR:
237 case SP::BPRA:
238 case SP::BPRNT:
239 case SP::BPRANT:
240 return MI.getOperand(i: 0).getMBB();
241 }
242}
243
244bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
245 MachineBasicBlock *&TBB,
246 MachineBasicBlock *&FBB,
247 SmallVectorImpl<MachineOperand> &Cond,
248 bool AllowModify) const {
249 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
250 if (I == MBB.end())
251 return false;
252
253 if (!isUnpredicatedTerminator(MI: *I))
254 return false;
255
256 // Get the last instruction in the block.
257 MachineInstr *LastInst = &*I;
258 unsigned LastOpc = LastInst->getOpcode();
259
260 // If there is only one terminator instruction, process it.
261 if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) {
262 if (isUncondBranchOpcode(Opc: LastOpc)) {
263 TBB = LastInst->getOperand(i: 0).getMBB();
264 return false;
265 }
266 if (isCondBranchOpcode(Opc: LastOpc)) {
267 // Block ends with fall-through condbranch.
268 parseCondBranch(LastInst, Target&: TBB, Cond);
269 return false;
270 }
271 return true; // Can't handle indirect branch.
272 }
273
274 // Get the instruction before it if it is a terminator.
275 MachineInstr *SecondLastInst = &*I;
276 unsigned SecondLastOpc = SecondLastInst->getOpcode();
277
278 // If AllowModify is true and the block ends with two or more unconditional
279 // branches, delete all but the first unconditional branch.
280 if (AllowModify && isUncondBranchOpcode(Opc: LastOpc)) {
281 while (isUncondBranchOpcode(Opc: SecondLastOpc)) {
282 LastInst->eraseFromParent();
283 LastInst = SecondLastInst;
284 LastOpc = LastInst->getOpcode();
285 if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) {
286 // Return now the only terminator is an unconditional branch.
287 TBB = LastInst->getOperand(i: 0).getMBB();
288 return false;
289 } else {
290 SecondLastInst = &*I;
291 SecondLastOpc = SecondLastInst->getOpcode();
292 }
293 }
294 }
295
296 // If there are three terminators, we don't know what sort of block this is.
297 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(MI: *--I))
298 return true;
299
300 // If the block ends with a B and a Bcc, handle it.
301 if (isCondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
302 parseCondBranch(LastInst: SecondLastInst, Target&: TBB, Cond);
303 FBB = LastInst->getOperand(i: 0).getMBB();
304 return false;
305 }
306
307 // If the block ends with two unconditional branches, handle it. The second
308 // one is not executed.
309 if (isUncondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
310 TBB = SecondLastInst->getOperand(i: 0).getMBB();
311 return false;
312 }
313
314 // ...likewise if it ends with an indirect branch followed by an unconditional
315 // branch.
316 if (isIndirectBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
317 I = LastInst;
318 if (AllowModify)
319 I->eraseFromParent();
320 return true;
321 }
322
323 // Otherwise, can't handle this.
324 return true;
325}
326
327unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB,
328 MachineBasicBlock *TBB,
329 MachineBasicBlock *FBB,
330 ArrayRef<MachineOperand> Cond,
331 const DebugLoc &DL,
332 int *BytesAdded) const {
333 assert(TBB && "insertBranch must not be told to insert a fallthrough");
334 assert((Cond.size() <= 3) &&
335 "Sparc branch conditions should have at most three components!");
336
337 if (Cond.empty()) {
338 assert(!FBB && "Unconditional branch with multiple successors!");
339 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: SP::BA)).addMBB(MBB: TBB);
340 if (BytesAdded)
341 *BytesAdded = 8;
342 return 1;
343 }
344
345 // Conditional branch
346 unsigned Opc = Cond[0].getImm();
347 unsigned CC = Cond[1].getImm();
348 if (isRegCondBranchOpcode(Opc)) {
349 Register Reg = Cond[2].getReg();
350 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: Opc)).addMBB(MBB: TBB).addImm(Val: CC).addReg(RegNo: Reg);
351 } else {
352 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: Opc)).addMBB(MBB: TBB).addImm(Val: CC);
353 }
354
355 if (!FBB) {
356 if (BytesAdded)
357 *BytesAdded = 8;
358 return 1;
359 }
360
361 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: SP::BA)).addMBB(MBB: FBB);
362 if (BytesAdded)
363 *BytesAdded = 16;
364 return 2;
365}
366
367unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
368 int *BytesRemoved) const {
369 MachineBasicBlock::iterator I = MBB.end();
370 unsigned Count = 0;
371 int Removed = 0;
372 while (I != MBB.begin()) {
373 --I;
374
375 if (I->isDebugInstr())
376 continue;
377
378 if (!isCondBranchOpcode(Opc: I->getOpcode()) &&
379 !isUncondBranchOpcode(Opc: I->getOpcode()))
380 break; // Not a branch
381
382 Removed += getInstSizeInBytes(MI: *I);
383 I->eraseFromParent();
384 I = MBB.end();
385 ++Count;
386 }
387
388 if (BytesRemoved)
389 *BytesRemoved = Removed;
390 return Count;
391}
392
393bool SparcInstrInfo::reverseBranchCondition(
394 SmallVectorImpl<MachineOperand> &Cond) const {
395 assert(Cond.size() <= 3);
396 SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[1].getImm());
397 Cond[1].setImm(GetOppositeBranchCondition(CC));
398 return false;
399}
400
401bool SparcInstrInfo::isBranchOffsetInRange(unsigned BranchOpc,
402 int64_t Offset) const {
403 assert((Offset & 0b11) == 0 && "Malformed branch offset");
404 switch (BranchOpc) {
405 case SP::BA:
406 case SP::BCOND:
407 case SP::BCONDA:
408 case SP::FBCOND:
409 case SP::FBCONDA:
410 return isIntN(N: 22, x: Offset >> 2);
411
412 case SP::BPICC:
413 case SP::BPICCA:
414 case SP::BPICCNT:
415 case SP::BPICCANT:
416 case SP::BPXCC:
417 case SP::BPXCCA:
418 case SP::BPXCCNT:
419 case SP::BPXCCANT:
420 case SP::BPFCC:
421 case SP::BPFCCA:
422 case SP::BPFCCNT:
423 case SP::BPFCCANT:
424 case SP::FBCOND_V9:
425 case SP::FBCONDA_V9:
426 return isIntN(N: BPccDisplacementBits, x: Offset >> 2);
427
428 case SP::BPR:
429 case SP::BPRA:
430 case SP::BPRNT:
431 case SP::BPRANT:
432 return isIntN(N: BPrDisplacementBits, x: Offset >> 2);
433 }
434
435 llvm_unreachable("Unknown branch instruction!");
436}
437
438void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
439 MachineBasicBlock::iterator I,
440 const DebugLoc &DL, MCRegister DestReg,
441 MCRegister SrcReg, bool KillSrc) const {
442 unsigned numSubRegs = 0;
443 unsigned movOpc = 0;
444 const unsigned *subRegIdx = nullptr;
445 bool ExtraG0 = false;
446
447 const unsigned DW_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
448 const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
449 const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 };
450 const unsigned QFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd,
451 SP::sub_odd64_then_sub_even,
452 SP::sub_odd64_then_sub_odd };
453
454 if (SP::IntRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
455 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::ORrr), DestReg).addReg(RegNo: SP::G0)
456 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc));
457 else if (SP::IntPairRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
458 subRegIdx = DW_SubRegsIdx;
459 numSubRegs = 2;
460 movOpc = SP::ORrr;
461 ExtraG0 = true;
462 } else if (SP::FPRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
463 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::FMOVS), DestReg)
464 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc));
465 else if (SP::DFPRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
466 if (Subtarget.isV9()) {
467 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::FMOVD), DestReg)
468 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc));
469 } else {
470 // Use two FMOVS instructions.
471 subRegIdx = DFP_FP_SubRegsIdx;
472 numSubRegs = 2;
473 movOpc = SP::FMOVS;
474 }
475 } else if (SP::QFPRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
476 if (Subtarget.isV9()) {
477 if (Subtarget.hasHardQuad()) {
478 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::FMOVQ), DestReg)
479 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc));
480 } else {
481 // Use two FMOVD instructions.
482 subRegIdx = QFP_DFP_SubRegsIdx;
483 numSubRegs = 2;
484 movOpc = SP::FMOVD;
485 }
486 } else {
487 // Use four FMOVS instructions.
488 subRegIdx = QFP_FP_SubRegsIdx;
489 numSubRegs = 4;
490 movOpc = SP::FMOVS;
491 }
492 } else if (SP::ASRRegsRegClass.contains(Reg: DestReg) &&
493 SP::IntRegsRegClass.contains(Reg: SrcReg)) {
494 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::WRASRrr), DestReg)
495 .addReg(RegNo: SP::G0)
496 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc));
497 } else if (SP::IntRegsRegClass.contains(Reg: DestReg) &&
498 SP::ASRRegsRegClass.contains(Reg: SrcReg)) {
499 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::RDASR), DestReg)
500 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc));
501 } else
502 llvm_unreachable("Impossible reg-to-reg copy");
503
504 if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0)
505 return;
506
507 const TargetRegisterInfo *TRI = &getRegisterInfo();
508 MachineInstr *MovMI = nullptr;
509
510 for (unsigned i = 0; i != numSubRegs; ++i) {
511 Register Dst = TRI->getSubReg(Reg: DestReg, Idx: subRegIdx[i]);
512 Register Src = TRI->getSubReg(Reg: SrcReg, Idx: subRegIdx[i]);
513 assert(Dst && Src && "Bad sub-register");
514
515 MachineInstrBuilder MIB = BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: movOpc), DestReg: Dst);
516 if (ExtraG0)
517 MIB.addReg(RegNo: SP::G0);
518 MIB.addReg(RegNo: Src);
519 MovMI = MIB.getInstr();
520 }
521 // Add implicit super-register defs and kills to the last MovMI.
522 MovMI->addRegisterDefined(Reg: DestReg, RegInfo: TRI);
523 if (KillSrc)
524 MovMI->addRegisterKilled(IncomingReg: SrcReg, RegInfo: TRI);
525}
526
527void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
528 MachineBasicBlock::iterator I,
529 Register SrcReg, bool isKill, int FI,
530 const TargetRegisterClass *RC,
531 const TargetRegisterInfo *TRI,
532 Register VReg) const {
533 DebugLoc DL;
534 if (I != MBB.end()) DL = I->getDebugLoc();
535
536 MachineFunction *MF = MBB.getParent();
537 const MachineFrameInfo &MFI = MF->getFrameInfo();
538 MachineMemOperand *MMO = MF->getMachineMemOperand(
539 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOStore,
540 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
541
542 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
543 if (RC == &SP::I64RegsRegClass)
544 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STXri)).addFrameIndex(Idx: FI).addImm(Val: 0)
545 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill)).addMemOperand(MMO);
546 else if (RC == &SP::IntRegsRegClass)
547 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STri)).addFrameIndex(Idx: FI).addImm(Val: 0)
548 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill)).addMemOperand(MMO);
549 else if (RC == &SP::IntPairRegClass)
550 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STDri)).addFrameIndex(Idx: FI).addImm(Val: 0)
551 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill)).addMemOperand(MMO);
552 else if (RC == &SP::FPRegsRegClass)
553 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STFri)).addFrameIndex(Idx: FI).addImm(Val: 0)
554 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill)).addMemOperand(MMO);
555 else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
556 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STDFri)).addFrameIndex(Idx: FI).addImm(Val: 0)
557 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill)).addMemOperand(MMO);
558 else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
559 // Use STQFri irrespective of its legality. If STQ is not legal, it will be
560 // lowered into two STDs in eliminateFrameIndex.
561 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STQFri)).addFrameIndex(Idx: FI).addImm(Val: 0)
562 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill)).addMemOperand(MMO);
563 else
564 llvm_unreachable("Can't store this register to stack slot");
565}
566
567void SparcInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
568 MachineBasicBlock::iterator I,
569 Register DestReg, int FI,
570 const TargetRegisterClass *RC,
571 const TargetRegisterInfo *TRI,
572 Register VReg) const {
573 DebugLoc DL;
574 if (I != MBB.end()) DL = I->getDebugLoc();
575
576 MachineFunction *MF = MBB.getParent();
577 const MachineFrameInfo &MFI = MF->getFrameInfo();
578 MachineMemOperand *MMO = MF->getMachineMemOperand(
579 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOLoad,
580 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
581
582 if (RC == &SP::I64RegsRegClass)
583 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDXri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0)
584 .addMemOperand(MMO);
585 else if (RC == &SP::IntRegsRegClass)
586 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0)
587 .addMemOperand(MMO);
588 else if (RC == &SP::IntPairRegClass)
589 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDDri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0)
590 .addMemOperand(MMO);
591 else if (RC == &SP::FPRegsRegClass)
592 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDFri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0)
593 .addMemOperand(MMO);
594 else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
595 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDDFri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0)
596 .addMemOperand(MMO);
597 else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
598 // Use LDQFri irrespective of its legality. If LDQ is not legal, it will be
599 // lowered into two LDDs in eliminateFrameIndex.
600 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDQFri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0)
601 .addMemOperand(MMO);
602 else
603 llvm_unreachable("Can't load this register from stack slot");
604}
605
606Register SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
607 SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>();
608 Register GlobalBaseReg = SparcFI->getGlobalBaseReg();
609 if (GlobalBaseReg)
610 return GlobalBaseReg;
611
612 // Insert the set of GlobalBaseReg into the first MBB of the function
613 MachineBasicBlock &FirstMBB = MF->front();
614 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
615 MachineRegisterInfo &RegInfo = MF->getRegInfo();
616
617 const TargetRegisterClass *PtrRC =
618 Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
619 GlobalBaseReg = RegInfo.createVirtualRegister(RegClass: PtrRC);
620
621 DebugLoc dl;
622
623 BuildMI(BB&: FirstMBB, I: MBBI, MIMD: dl, MCID: get(Opcode: SP::GETPCX), DestReg: GlobalBaseReg);
624 SparcFI->setGlobalBaseReg(GlobalBaseReg);
625 return GlobalBaseReg;
626}
627
628unsigned SparcInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
629 unsigned Opcode = MI.getOpcode();
630
631 if (MI.isInlineAsm()) {
632 const MachineFunction *MF = MI.getParent()->getParent();
633 const char *AsmStr = MI.getOperand(i: 0).getSymbolName();
634 return getInlineAsmLength(Str: AsmStr, MAI: *MF->getTarget().getMCAsmInfo());
635 }
636
637 // If the instruction has a delay slot, be conservative and also include
638 // it for sizing purposes. This is done so that the BranchRelaxation pass
639 // will not mistakenly mark out-of-range branches as in-range.
640 if (MI.hasDelaySlot())
641 return get(Opcode).getSize() * 2;
642 return get(Opcode).getSize();
643}
644
645bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
646 switch (MI.getOpcode()) {
647 case TargetOpcode::LOAD_STACK_GUARD: {
648 assert(Subtarget.isTargetLinux() &&
649 "Only Linux target is expected to contain LOAD_STACK_GUARD");
650 // offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc.
651 const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14;
652 MI.setDesc(get(Opcode: Subtarget.is64Bit() ? SP::LDXri : SP::LDri));
653 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
654 .addReg(RegNo: SP::G7)
655 .addImm(Val: Offset);
656 return true;
657 }
658 }
659 return false;
660}
661