1//===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the VE implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "VEInstrInfo.h"
14#include "VE.h"
15#include "VEMachineFunctionInfo.h"
16#include "VESubtarget.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineMemOperand.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/MC/TargetRegistry.h"
24#include "llvm/Support/CommandLine.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/ErrorHandling.h"
27
28#define DEBUG_TYPE "ve-instr-info"
29
30using namespace llvm;
31
32#define GET_INSTRINFO_CTOR_DTOR
33#include "VEGenInstrInfo.inc"
34
35// Pin the vtable to this file.
36void VEInstrInfo::anchor() {}
37
38VEInstrInfo::VEInstrInfo(VESubtarget &ST)
39 : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
40
41static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
42
43static VECC::CondCode GetOppositeBranchCondition(VECC::CondCode CC) {
44 switch (CC) {
45 case VECC::CC_IG:
46 return VECC::CC_ILE;
47 case VECC::CC_IL:
48 return VECC::CC_IGE;
49 case VECC::CC_INE:
50 return VECC::CC_IEQ;
51 case VECC::CC_IEQ:
52 return VECC::CC_INE;
53 case VECC::CC_IGE:
54 return VECC::CC_IL;
55 case VECC::CC_ILE:
56 return VECC::CC_IG;
57 case VECC::CC_AF:
58 return VECC::CC_AT;
59 case VECC::CC_G:
60 return VECC::CC_LENAN;
61 case VECC::CC_L:
62 return VECC::CC_GENAN;
63 case VECC::CC_NE:
64 return VECC::CC_EQNAN;
65 case VECC::CC_EQ:
66 return VECC::CC_NENAN;
67 case VECC::CC_GE:
68 return VECC::CC_LNAN;
69 case VECC::CC_LE:
70 return VECC::CC_GNAN;
71 case VECC::CC_NUM:
72 return VECC::CC_NAN;
73 case VECC::CC_NAN:
74 return VECC::CC_NUM;
75 case VECC::CC_GNAN:
76 return VECC::CC_LE;
77 case VECC::CC_LNAN:
78 return VECC::CC_GE;
79 case VECC::CC_NENAN:
80 return VECC::CC_EQ;
81 case VECC::CC_EQNAN:
82 return VECC::CC_NE;
83 case VECC::CC_GENAN:
84 return VECC::CC_L;
85 case VECC::CC_LENAN:
86 return VECC::CC_G;
87 case VECC::CC_AT:
88 return VECC::CC_AF;
89 case VECC::UNKNOWN:
90 return VECC::UNKNOWN;
91 }
92 llvm_unreachable("Invalid cond code");
93}
94
95// Treat a branch relative long always instruction as unconditional branch.
96// For example, br.l.t and br.l.
97static bool isUncondBranchOpcode(int Opc) {
98 using namespace llvm::VE;
99
100#define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t)
101 // VE has other branch relative always instructions for word/double/float,
102 // but we use only long branches in our lower. So, check it here.
103 assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) &&
104 "Branch relative word/double/float always instructions should not be "
105 "used!");
106 return BRKIND(BRCFL);
107#undef BRKIND
108}
109
110// Treat branch relative conditional as conditional branch instructions.
111// For example, brgt.l.t and brle.s.nt.
112static bool isCondBranchOpcode(int Opc) {
113 using namespace llvm::VE;
114
115#define BRKIND(NAME) \
116 (Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \
117 Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t)
118 return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS);
119#undef BRKIND
120}
121
122// Treat branch long always instructions as indirect branch.
123// For example, b.l.t and b.l.
124static bool isIndirectBranchOpcode(int Opc) {
125 using namespace llvm::VE;
126
127#define BRKIND(NAME) \
128 (Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t)
129 // VE has other branch always instructions for word/double/float, but
130 // we use only long branches in our lower. So, check it here.
131 assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) &&
132 "Branch word/double/float always instructions should not be used!");
133 return BRKIND(BCFL);
134#undef BRKIND
135}
136
137static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
138 SmallVectorImpl<MachineOperand> &Cond) {
139 Cond.push_back(Elt: MachineOperand::CreateImm(Val: LastInst->getOperand(i: 0).getImm()));
140 Cond.push_back(Elt: LastInst->getOperand(i: 1));
141 Cond.push_back(Elt: LastInst->getOperand(i: 2));
142 Target = LastInst->getOperand(i: 3).getMBB();
143}
144
145bool VEInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
146 MachineBasicBlock *&FBB,
147 SmallVectorImpl<MachineOperand> &Cond,
148 bool AllowModify) const {
149 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
150 if (I == MBB.end())
151 return false;
152
153 if (!isUnpredicatedTerminator(MI: *I))
154 return false;
155
156 // Get the last instruction in the block.
157 MachineInstr *LastInst = &*I;
158 unsigned LastOpc = LastInst->getOpcode();
159
160 // If there is only one terminator instruction, process it.
161 if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) {
162 if (isUncondBranchOpcode(Opc: LastOpc)) {
163 TBB = LastInst->getOperand(i: 0).getMBB();
164 return false;
165 }
166 if (isCondBranchOpcode(Opc: LastOpc)) {
167 // Block ends with fall-through condbranch.
168 parseCondBranch(LastInst, Target&: TBB, Cond);
169 return false;
170 }
171 return true; // Can't handle indirect branch.
172 }
173
174 // Get the instruction before it if it is a terminator.
175 MachineInstr *SecondLastInst = &*I;
176 unsigned SecondLastOpc = SecondLastInst->getOpcode();
177
178 // If AllowModify is true and the block ends with two or more unconditional
179 // branches, delete all but the first unconditional branch.
180 if (AllowModify && isUncondBranchOpcode(Opc: LastOpc)) {
181 while (isUncondBranchOpcode(Opc: SecondLastOpc)) {
182 LastInst->eraseFromParent();
183 LastInst = SecondLastInst;
184 LastOpc = LastInst->getOpcode();
185 if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) {
186 // Return now the only terminator is an unconditional branch.
187 TBB = LastInst->getOperand(i: 0).getMBB();
188 return false;
189 }
190 SecondLastInst = &*I;
191 SecondLastOpc = SecondLastInst->getOpcode();
192 }
193 }
194
195 // If there are three terminators, we don't know what sort of block this is.
196 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(MI: *--I))
197 return true;
198
199 // If the block ends with a B and a Bcc, handle it.
200 if (isCondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
201 parseCondBranch(LastInst: SecondLastInst, Target&: TBB, Cond);
202 FBB = LastInst->getOperand(i: 0).getMBB();
203 return false;
204 }
205
206 // If the block ends with two unconditional branches, handle it. The second
207 // one is not executed.
208 if (isUncondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
209 TBB = SecondLastInst->getOperand(i: 0).getMBB();
210 return false;
211 }
212
213 // ...likewise if it ends with an indirect branch followed by an unconditional
214 // branch.
215 if (isIndirectBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
216 I = LastInst;
217 if (AllowModify)
218 I->eraseFromParent();
219 return true;
220 }
221
222 // Otherwise, can't handle this.
223 return true;
224}
225
226unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB,
227 MachineBasicBlock *TBB,
228 MachineBasicBlock *FBB,
229 ArrayRef<MachineOperand> Cond,
230 const DebugLoc &DL, int *BytesAdded) const {
231 assert(TBB && "insertBranch must not be told to insert a fallthrough");
232 assert((Cond.size() == 3 || Cond.size() == 0) &&
233 "VE branch conditions should have three component!");
234 assert(!BytesAdded && "code size not handled");
235 if (Cond.empty()) {
236 // Uncondition branch
237 assert(!FBB && "Unconditional branch with multiple successors!");
238 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: VE::BRCFLa_t))
239 .addMBB(MBB: TBB);
240 return 1;
241 }
242
243 // Conditional branch
244 // (BRCFir CC sy sz addr)
245 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented");
246
247 unsigned opc[2];
248 const TargetRegisterInfo *TRI = &getRegisterInfo();
249 MachineFunction *MF = MBB.getParent();
250 const MachineRegisterInfo &MRI = MF->getRegInfo();
251 Register Reg = Cond[2].getReg();
252 if (IsIntegerCC(CC: Cond[0].getImm())) {
253 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
254 opc[0] = VE::BRCFWir;
255 opc[1] = VE::BRCFWrr;
256 } else {
257 opc[0] = VE::BRCFLir;
258 opc[1] = VE::BRCFLrr;
259 }
260 } else {
261 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
262 opc[0] = VE::BRCFSir;
263 opc[1] = VE::BRCFSrr;
264 } else {
265 opc[0] = VE::BRCFDir;
266 opc[1] = VE::BRCFDrr;
267 }
268 }
269 if (Cond[1].isImm()) {
270 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: opc[0]))
271 .add(MO: Cond[0]) // condition code
272 .add(MO: Cond[1]) // lhs
273 .add(MO: Cond[2]) // rhs
274 .addMBB(MBB: TBB);
275 } else {
276 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: opc[1]))
277 .add(MO: Cond[0])
278 .add(MO: Cond[1])
279 .add(MO: Cond[2])
280 .addMBB(MBB: TBB);
281 }
282
283 if (!FBB)
284 return 1;
285
286 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: VE::BRCFLa_t))
287 .addMBB(MBB: FBB);
288 return 2;
289}
290
291unsigned VEInstrInfo::removeBranch(MachineBasicBlock &MBB,
292 int *BytesRemoved) const {
293 assert(!BytesRemoved && "code size not handled");
294
295 MachineBasicBlock::iterator I = MBB.end();
296 unsigned Count = 0;
297 while (I != MBB.begin()) {
298 --I;
299
300 if (I->isDebugValue())
301 continue;
302
303 if (!isUncondBranchOpcode(Opc: I->getOpcode()) &&
304 !isCondBranchOpcode(Opc: I->getOpcode()))
305 break; // Not a branch
306
307 I->eraseFromParent();
308 I = MBB.end();
309 ++Count;
310 }
311 return Count;
312}
313
314bool VEInstrInfo::reverseBranchCondition(
315 SmallVectorImpl<MachineOperand> &Cond) const {
316 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm());
317 Cond[0].setImm(GetOppositeBranchCondition(CC));
318 return false;
319}
320
321static bool IsAliasOfSX(Register Reg) {
322 return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
323 VE::F32RegClass.contains(Reg);
324}
325
326static void copyPhysSubRegs(MachineBasicBlock &MBB,
327 MachineBasicBlock::iterator I, const DebugLoc &DL,
328 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
329 const MCInstrDesc &MCID, unsigned int NumSubRegs,
330 const unsigned *SubRegIdx,
331 const TargetRegisterInfo *TRI) {
332 MachineInstr *MovMI = nullptr;
333
334 for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) {
335 Register SubDest = TRI->getSubReg(Reg: DestReg, Idx: SubRegIdx[Idx]);
336 Register SubSrc = TRI->getSubReg(Reg: SrcReg, Idx: SubRegIdx[Idx]);
337 assert(SubDest && SubSrc && "Bad sub-register");
338
339 if (MCID.getOpcode() == VE::ORri) {
340 // generate "ORri, dest, src, 0" instruction.
341 MachineInstrBuilder MIB =
342 BuildMI(BB&: MBB, I, MIMD: DL, MCID, DestReg: SubDest).addReg(RegNo: SubSrc).addImm(Val: 0);
343 MovMI = MIB.getInstr();
344 } else if (MCID.getOpcode() == VE::ANDMmm) {
345 // generate "ANDM, dest, vm0, src" instruction.
346 MachineInstrBuilder MIB =
347 BuildMI(BB&: MBB, I, MIMD: DL, MCID, DestReg: SubDest).addReg(RegNo: VE::VM0).addReg(RegNo: SubSrc);
348 MovMI = MIB.getInstr();
349 } else {
350 llvm_unreachable("Unexpected reg-to-reg copy instruction");
351 }
352 }
353 // Add implicit super-register defs and kills to the last MovMI.
354 MovMI->addRegisterDefined(Reg: DestReg, RegInfo: TRI);
355 if (KillSrc)
356 MovMI->addRegisterKilled(IncomingReg: SrcReg, RegInfo: TRI, AddIfNotFound: true);
357}
358
359void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
360 MachineBasicBlock::iterator I, const DebugLoc &DL,
361 MCRegister DestReg, MCRegister SrcReg,
362 bool KillSrc) const {
363
364 if (IsAliasOfSX(Reg: SrcReg) && IsAliasOfSX(Reg: DestReg)) {
365 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::ORri), DestReg)
366 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc))
367 .addImm(Val: 0);
368 } else if (VE::V64RegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
369 // Generate following instructions
370 // %sw16 = LEA32zii 256
371 // VORmvl %dest, (0)1, %src, %sw16
372 // TODO: reuse a register if vl is already assigned to a register
373 // FIXME: it would be better to scavenge a register here instead of
374 // reserving SX16 all of the time.
375 const TargetRegisterInfo *TRI = &getRegisterInfo();
376 Register TmpReg = VE::SX16;
377 Register SubTmp = TRI->getSubReg(Reg: TmpReg, Idx: VE::sub_i32);
378 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LEAzii), DestReg: TmpReg)
379 .addImm(Val: 0)
380 .addImm(Val: 0)
381 .addImm(Val: 256);
382 MachineInstrBuilder MIB = BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::VORmvl), DestReg)
383 .addImm(Val: M1(Val: 0)) // Represent (0)1.
384 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc))
385 .addReg(RegNo: SubTmp, flags: getKillRegState(B: true));
386 MIB.getInstr()->addRegisterKilled(IncomingReg: TmpReg, RegInfo: TRI, AddIfNotFound: true);
387 } else if (VE::VMRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
388 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::ANDMmm), DestReg)
389 .addReg(RegNo: VE::VM0)
390 .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc));
391 } else if (VE::VM512RegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
392 // Use two instructions.
393 const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd};
394 unsigned int NumSubRegs = 2;
395 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, MCID: get(Opcode: VE::ANDMmm),
396 NumSubRegs, SubRegIdx, TRI: &getRegisterInfo());
397 } else if (VE::F128RegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
398 // Use two instructions.
399 const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd};
400 unsigned int NumSubRegs = 2;
401 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, MCID: get(Opcode: VE::ORri),
402 NumSubRegs, SubRegIdx, TRI: &getRegisterInfo());
403 } else {
404 const TargetRegisterInfo *TRI = &getRegisterInfo();
405 dbgs() << "Impossible reg-to-reg copy from " << printReg(Reg: SrcReg, TRI)
406 << " to " << printReg(Reg: DestReg, TRI) << "\n";
407 llvm_unreachable("Impossible reg-to-reg copy");
408 }
409}
410
411/// isLoadFromStackSlot - If the specified machine instruction is a direct
412/// load from a stack slot, return the virtual or physical register number of
413/// the destination along with the FrameIndex of the loaded stack slot. If
414/// not, return 0. This predicate must return 0 if the instruction has
415/// any side effects other than loading from the stack slot.
416Register VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
417 int &FrameIndex) const {
418 if (MI.getOpcode() == VE::LDrii || // I64
419 MI.getOpcode() == VE::LDLSXrii || // I32
420 MI.getOpcode() == VE::LDUrii || // F32
421 MI.getOpcode() == VE::LDQrii || // F128 (pseudo)
422 MI.getOpcode() == VE::LDVMrii || // VM (pseudo)
423 MI.getOpcode() == VE::LDVM512rii // VM512 (pseudo)
424 ) {
425 if (MI.getOperand(i: 1).isFI() && MI.getOperand(i: 2).isImm() &&
426 MI.getOperand(i: 2).getImm() == 0 && MI.getOperand(i: 3).isImm() &&
427 MI.getOperand(i: 3).getImm() == 0) {
428 FrameIndex = MI.getOperand(i: 1).getIndex();
429 return MI.getOperand(i: 0).getReg();
430 }
431 }
432 return 0;
433}
434
435/// isStoreToStackSlot - If the specified machine instruction is a direct
436/// store to a stack slot, return the virtual or physical register number of
437/// the source reg along with the FrameIndex of the loaded stack slot. If
438/// not, return 0. This predicate must return 0 if the instruction has
439/// any side effects other than storing to the stack slot.
440Register VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
441 int &FrameIndex) const {
442 if (MI.getOpcode() == VE::STrii || // I64
443 MI.getOpcode() == VE::STLrii || // I32
444 MI.getOpcode() == VE::STUrii || // F32
445 MI.getOpcode() == VE::STQrii || // F128 (pseudo)
446 MI.getOpcode() == VE::STVMrii || // VM (pseudo)
447 MI.getOpcode() == VE::STVM512rii // VM512 (pseudo)
448 ) {
449 if (MI.getOperand(i: 0).isFI() && MI.getOperand(i: 1).isImm() &&
450 MI.getOperand(i: 1).getImm() == 0 && MI.getOperand(i: 2).isImm() &&
451 MI.getOperand(i: 2).getImm() == 0) {
452 FrameIndex = MI.getOperand(i: 0).getIndex();
453 return MI.getOperand(i: 3).getReg();
454 }
455 }
456 return 0;
457}
458
459void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
460 MachineBasicBlock::iterator I,
461 Register SrcReg, bool isKill, int FI,
462 const TargetRegisterClass *RC,
463 const TargetRegisterInfo *TRI,
464 Register VReg) const {
465 DebugLoc DL;
466 if (I != MBB.end())
467 DL = I->getDebugLoc();
468
469 MachineFunction *MF = MBB.getParent();
470 const MachineFrameInfo &MFI = MF->getFrameInfo();
471 MachineMemOperand *MMO = MF->getMachineMemOperand(
472 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOStore,
473 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
474
475 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
476 if (RC == &VE::I64RegClass) {
477 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STrii))
478 .addFrameIndex(Idx: FI)
479 .addImm(Val: 0)
480 .addImm(Val: 0)
481 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill))
482 .addMemOperand(MMO);
483 } else if (RC == &VE::I32RegClass) {
484 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STLrii))
485 .addFrameIndex(Idx: FI)
486 .addImm(Val: 0)
487 .addImm(Val: 0)
488 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill))
489 .addMemOperand(MMO);
490 } else if (RC == &VE::F32RegClass) {
491 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STUrii))
492 .addFrameIndex(Idx: FI)
493 .addImm(Val: 0)
494 .addImm(Val: 0)
495 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill))
496 .addMemOperand(MMO);
497 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
498 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STQrii))
499 .addFrameIndex(Idx: FI)
500 .addImm(Val: 0)
501 .addImm(Val: 0)
502 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill))
503 .addMemOperand(MMO);
504 } else if (RC == &VE::VMRegClass) {
505 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STVMrii))
506 .addFrameIndex(Idx: FI)
507 .addImm(Val: 0)
508 .addImm(Val: 0)
509 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill))
510 .addMemOperand(MMO);
511 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
512 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STVM512rii))
513 .addFrameIndex(Idx: FI)
514 .addImm(Val: 0)
515 .addImm(Val: 0)
516 .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill))
517 .addMemOperand(MMO);
518 } else
519 report_fatal_error(reason: "Can't store this register to stack slot");
520}
521
522void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
523 MachineBasicBlock::iterator I,
524 Register DestReg, int FI,
525 const TargetRegisterClass *RC,
526 const TargetRegisterInfo *TRI,
527 Register VReg) const {
528 DebugLoc DL;
529 if (I != MBB.end())
530 DL = I->getDebugLoc();
531
532 MachineFunction *MF = MBB.getParent();
533 const MachineFrameInfo &MFI = MF->getFrameInfo();
534 MachineMemOperand *MMO = MF->getMachineMemOperand(
535 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOLoad,
536 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
537
538 if (RC == &VE::I64RegClass) {
539 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDrii), DestReg)
540 .addFrameIndex(Idx: FI)
541 .addImm(Val: 0)
542 .addImm(Val: 0)
543 .addMemOperand(MMO);
544 } else if (RC == &VE::I32RegClass) {
545 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDLSXrii), DestReg)
546 .addFrameIndex(Idx: FI)
547 .addImm(Val: 0)
548 .addImm(Val: 0)
549 .addMemOperand(MMO);
550 } else if (RC == &VE::F32RegClass) {
551 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDUrii), DestReg)
552 .addFrameIndex(Idx: FI)
553 .addImm(Val: 0)
554 .addImm(Val: 0)
555 .addMemOperand(MMO);
556 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
557 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDQrii), DestReg)
558 .addFrameIndex(Idx: FI)
559 .addImm(Val: 0)
560 .addImm(Val: 0)
561 .addMemOperand(MMO);
562 } else if (RC == &VE::VMRegClass) {
563 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDVMrii), DestReg)
564 .addFrameIndex(Idx: FI)
565 .addImm(Val: 0)
566 .addImm(Val: 0)
567 .addMemOperand(MMO);
568 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
569 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDVM512rii), DestReg)
570 .addFrameIndex(Idx: FI)
571 .addImm(Val: 0)
572 .addImm(Val: 0)
573 .addMemOperand(MMO);
574 } else
575 report_fatal_error(reason: "Can't load this register from stack slot");
576}
577
578bool VEInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
579 Register Reg, MachineRegisterInfo *MRI) const {
580 LLVM_DEBUG(dbgs() << "foldImmediate\n");
581
582 LLVM_DEBUG(dbgs() << "checking DefMI\n");
583 int64_t ImmVal;
584 switch (DefMI.getOpcode()) {
585 default:
586 return false;
587 case VE::ORim:
588 // General move small immediate instruction on VE.
589 LLVM_DEBUG(dbgs() << "checking ORim\n");
590 LLVM_DEBUG(DefMI.dump());
591 // FIXME: We may need to support FPImm too.
592 assert(DefMI.getOperand(1).isImm());
593 assert(DefMI.getOperand(2).isImm());
594 ImmVal =
595 DefMI.getOperand(i: 1).getImm() + mimm2Val(Val: DefMI.getOperand(i: 2).getImm());
596 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
597 break;
598 case VE::LEAzii:
599 // General move immediate instruction on VE.
600 LLVM_DEBUG(dbgs() << "checking LEAzii\n");
601 LLVM_DEBUG(DefMI.dump());
602 // FIXME: We may need to support FPImm too.
603 assert(DefMI.getOperand(2).isImm());
604 if (!DefMI.getOperand(i: 3).isImm())
605 // LEAzii may refer label
606 return false;
607 ImmVal = DefMI.getOperand(i: 2).getImm() + DefMI.getOperand(i: 3).getImm();
608 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
609 break;
610 }
611
612 // Try to fold like below:
613 // %1:i64 = ORim 0, 0(1)
614 // %2:i64 = CMPSLrr %0, %1
615 // To
616 // %2:i64 = CMPSLrm %0, 0(1)
617 //
618 // Another example:
619 // %1:i64 = ORim 6, 0(1)
620 // %2:i64 = CMPSLrr %1, %0
621 // To
622 // %2:i64 = CMPSLir 6, %0
623 //
624 // Support commutable instructions like below:
625 // %1:i64 = ORim 6, 0(1)
626 // %2:i64 = ADDSLrr %1, %0
627 // To
628 // %2:i64 = ADDSLri %0, 6
629 //
630 // FIXME: Need to support i32. Current implementtation requires
631 // EXTRACT_SUBREG, so input has following COPY and it avoids folding:
632 // %1:i64 = ORim 6, 0(1)
633 // %2:i32 = COPY %1.sub_i32
634 // %3:i32 = ADDSWSXrr %0, %2
635 // FIXME: Need to support shift, cmov, and more instructions.
636 // FIXME: Need to support lvl too, but LVLGen runs after peephole-opt.
637
638 LLVM_DEBUG(dbgs() << "checking UseMI\n");
639 LLVM_DEBUG(UseMI.dump());
640 unsigned NewUseOpcSImm7;
641 unsigned NewUseOpcMImm;
642 enum InstType {
643 rr2ri_rm, // rr -> ri or rm, commutable
644 rr2ir_rm, // rr -> ir or rm
645 } InstType;
646
647 using namespace llvm::VE;
648#define INSTRKIND(NAME) \
649 case NAME##rr: \
650 NewUseOpcSImm7 = NAME##ri; \
651 NewUseOpcMImm = NAME##rm; \
652 InstType = rr2ri_rm; \
653 break
654#define NCINSTRKIND(NAME) \
655 case NAME##rr: \
656 NewUseOpcSImm7 = NAME##ir; \
657 NewUseOpcMImm = NAME##rm; \
658 InstType = rr2ir_rm; \
659 break
660
661 switch (UseMI.getOpcode()) {
662 default:
663 return false;
664
665 INSTRKIND(ADDUL);
666 INSTRKIND(ADDSWSX);
667 INSTRKIND(ADDSWZX);
668 INSTRKIND(ADDSL);
669 NCINSTRKIND(SUBUL);
670 NCINSTRKIND(SUBSWSX);
671 NCINSTRKIND(SUBSWZX);
672 NCINSTRKIND(SUBSL);
673 INSTRKIND(MULUL);
674 INSTRKIND(MULSWSX);
675 INSTRKIND(MULSWZX);
676 INSTRKIND(MULSL);
677 NCINSTRKIND(DIVUL);
678 NCINSTRKIND(DIVSWSX);
679 NCINSTRKIND(DIVSWZX);
680 NCINSTRKIND(DIVSL);
681 NCINSTRKIND(CMPUL);
682 NCINSTRKIND(CMPSWSX);
683 NCINSTRKIND(CMPSWZX);
684 NCINSTRKIND(CMPSL);
685 INSTRKIND(MAXSWSX);
686 INSTRKIND(MAXSWZX);
687 INSTRKIND(MAXSL);
688 INSTRKIND(MINSWSX);
689 INSTRKIND(MINSWZX);
690 INSTRKIND(MINSL);
691 INSTRKIND(AND);
692 INSTRKIND(OR);
693 INSTRKIND(XOR);
694 INSTRKIND(EQV);
695 NCINSTRKIND(NND);
696 NCINSTRKIND(MRG);
697 }
698
699#undef INSTRKIND
700
701 unsigned NewUseOpc;
702 unsigned UseIdx;
703 bool Commute = false;
704 LLVM_DEBUG(dbgs() << "checking UseMI operands\n");
705 switch (InstType) {
706 case rr2ri_rm:
707 UseIdx = 2;
708 if (UseMI.getOperand(i: 1).getReg() == Reg) {
709 Commute = true;
710 } else {
711 assert(UseMI.getOperand(2).getReg() == Reg);
712 }
713 if (isInt<7>(x: ImmVal)) {
714 // This ImmVal matches to SImm7 slot, so change UseOpc to an instruction
715 // holds a simm7 slot.
716 NewUseOpc = NewUseOpcSImm7;
717 } else if (isMImmVal(Val: ImmVal)) {
718 // Similarly, change UseOpc to an instruction holds a mimm slot.
719 NewUseOpc = NewUseOpcMImm;
720 ImmVal = val2MImm(Val: ImmVal);
721 } else
722 return false;
723 break;
724 case rr2ir_rm:
725 if (UseMI.getOperand(i: 1).getReg() == Reg) {
726 // Check immediate value whether it matchs to the UseMI instruction.
727 if (!isInt<7>(x: ImmVal))
728 return false;
729 NewUseOpc = NewUseOpcSImm7;
730 UseIdx = 1;
731 } else {
732 assert(UseMI.getOperand(2).getReg() == Reg);
733 // Check immediate value whether it matchs to the UseMI instruction.
734 if (!isMImmVal(Val: ImmVal))
735 return false;
736 NewUseOpc = NewUseOpcMImm;
737 ImmVal = val2MImm(Val: ImmVal);
738 UseIdx = 2;
739 }
740 break;
741 }
742
743 LLVM_DEBUG(dbgs() << "modifying UseMI\n");
744 bool DeleteDef = MRI->hasOneNonDBGUse(RegNo: Reg);
745 UseMI.setDesc(get(Opcode: NewUseOpc));
746 if (Commute) {
747 UseMI.getOperand(i: 1).setReg(UseMI.getOperand(i: UseIdx).getReg());
748 }
749 UseMI.getOperand(i: UseIdx).ChangeToImmediate(ImmVal);
750 if (DeleteDef)
751 DefMI.eraseFromParent();
752
753 return true;
754}
755
756Register VEInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
757 VEMachineFunctionInfo *VEFI = MF->getInfo<VEMachineFunctionInfo>();
758 Register GlobalBaseReg = VEFI->getGlobalBaseReg();
759 if (GlobalBaseReg != 0)
760 return GlobalBaseReg;
761
762 // We use %s15 (%got) as a global base register
763 GlobalBaseReg = VE::SX15;
764
765 // Insert a pseudo instruction to set the GlobalBaseReg into the first
766 // MBB of the function
767 MachineBasicBlock &FirstMBB = MF->front();
768 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
769 DebugLoc dl;
770 BuildMI(BB&: FirstMBB, I: MBBI, MIMD: dl, MCID: get(Opcode: VE::GETGOT), DestReg: GlobalBaseReg);
771 VEFI->setGlobalBaseReg(GlobalBaseReg);
772 return GlobalBaseReg;
773}
774
775static Register getVM512Upper(Register reg) {
776 return (reg - VE::VMP0) * 2 + VE::VM0;
777}
778
779static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
780
781// Expand pseudo logical vector instructions for VM512 registers.
782static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) {
783 MachineBasicBlock *MBB = MI.getParent();
784 DebugLoc DL = MI.getDebugLoc();
785
786 Register VMXu = getVM512Upper(reg: MI.getOperand(i: 0).getReg());
787 Register VMXl = getVM512Lower(reg: MI.getOperand(i: 0).getReg());
788 Register VMYu = getVM512Upper(reg: MI.getOperand(i: 1).getReg());
789 Register VMYl = getVM512Lower(reg: MI.getOperand(i: 1).getReg());
790
791 switch (MI.getOpcode()) {
792 default: {
793 Register VMZu = getVM512Upper(reg: MI.getOperand(i: 2).getReg());
794 Register VMZl = getVM512Lower(reg: MI.getOperand(i: 2).getReg());
795 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXu).addUse(RegNo: VMYu).addUse(RegNo: VMZu);
796 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXl).addUse(RegNo: VMYl).addUse(RegNo: VMZl);
797 break;
798 }
799 case VE::NEGMy:
800 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXu).addUse(RegNo: VMYu);
801 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXl).addUse(RegNo: VMYl);
802 break;
803 }
804 MI.eraseFromParent();
805}
806
807static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI,
808 bool Upper) {
809 // VM512
810 MIB.addReg(RegNo: Upper ? getVM512Upper(reg: MI.getOperand(i: 0).getReg())
811 : getVM512Lower(reg: MI.getOperand(i: 0).getReg()));
812
813 switch (MI.getNumExplicitOperands()) {
814 default:
815 report_fatal_error(reason: "unexpected number of operands for pvfmk");
816 case 2: // _Ml: VM512, VL
817 // VL
818 MIB.addReg(RegNo: MI.getOperand(i: 1).getReg());
819 break;
820 case 4: // _Mvl: VM512, CC, VR, VL
821 // CC
822 MIB.addImm(Val: MI.getOperand(i: 1).getImm());
823 // VR
824 MIB.addReg(RegNo: MI.getOperand(i: 2).getReg());
825 // VL
826 MIB.addReg(RegNo: MI.getOperand(i: 3).getReg());
827 break;
828 case 5: // _MvMl: VM512, CC, VR, VM512, VL
829 // CC
830 MIB.addImm(Val: MI.getOperand(i: 1).getImm());
831 // VR
832 MIB.addReg(RegNo: MI.getOperand(i: 2).getReg());
833 // VM512
834 MIB.addReg(RegNo: Upper ? getVM512Upper(reg: MI.getOperand(i: 3).getReg())
835 : getVM512Lower(reg: MI.getOperand(i: 3).getReg()));
836 // VL
837 MIB.addReg(RegNo: MI.getOperand(i: 4).getReg());
838 break;
839 }
840}
841
842static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI) {
843 // replace to pvfmk.w.up and pvfmk.w.lo
844 // replace to pvfmk.s.up and pvfmk.s.lo
845
846 static const std::pair<unsigned, std::pair<unsigned, unsigned>> VFMKMap[] = {
847 {VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}},
848 {VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}},
849 {VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}},
850 {VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}},
851 {VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}},
852 {VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}},
853 };
854
855 unsigned Opcode = MI.getOpcode();
856
857 const auto *Found =
858 llvm::find_if(Range: VFMKMap, P: [&](auto P) { return P.first == Opcode; });
859 if (Found == std::end(arr: VFMKMap))
860 report_fatal_error(reason: "unexpected opcode for pseudo vfmk");
861
862 unsigned OpcodeUpper = (*Found).second.first;
863 unsigned OpcodeLower = (*Found).second.second;
864
865 MachineBasicBlock *MBB = MI.getParent();
866 DebugLoc DL = MI.getDebugLoc();
867
868 MachineInstrBuilder Bu = BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TI.get(Opcode: OpcodeUpper));
869 addOperandsForVFMK(MIB&: Bu, MI, /* Upper */ true);
870 MachineInstrBuilder Bl = BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TI.get(Opcode: OpcodeLower));
871 addOperandsForVFMK(MIB&: Bl, MI, /* Upper */ false);
872
873 MI.eraseFromParent();
874}
875
876bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
877 switch (MI.getOpcode()) {
878 case VE::EXTEND_STACK: {
879 return expandExtendStackPseudo(MI);
880 }
881 case VE::EXTEND_STACK_GUARD: {
882 MI.eraseFromParent(); // The pseudo instruction is gone now.
883 return true;
884 }
885 case VE::GETSTACKTOP: {
886 return expandGetStackTopPseudo(MI);
887 }
888
889 case VE::ANDMyy:
890 expandPseudoLogM(MI, MCID: get(Opcode: VE::ANDMmm));
891 return true;
892 case VE::ORMyy:
893 expandPseudoLogM(MI, MCID: get(Opcode: VE::ORMmm));
894 return true;
895 case VE::XORMyy:
896 expandPseudoLogM(MI, MCID: get(Opcode: VE::XORMmm));
897 return true;
898 case VE::EQVMyy:
899 expandPseudoLogM(MI, MCID: get(Opcode: VE::EQVMmm));
900 return true;
901 case VE::NNDMyy:
902 expandPseudoLogM(MI, MCID: get(Opcode: VE::NNDMmm));
903 return true;
904 case VE::NEGMy:
905 expandPseudoLogM(MI, MCID: get(Opcode: VE::NEGMm));
906 return true;
907
908 case VE::LVMyir:
909 case VE::LVMyim:
910 case VE::LVMyir_y:
911 case VE::LVMyim_y: {
912 Register VMXu = getVM512Upper(reg: MI.getOperand(i: 0).getReg());
913 Register VMXl = getVM512Lower(reg: MI.getOperand(i: 0).getReg());
914 int64_t Imm = MI.getOperand(i: 1).getImm();
915 bool IsSrcReg =
916 MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y;
917 Register Src = IsSrcReg ? MI.getOperand(i: 2).getReg() : VE::NoRegister;
918 int64_t MImm = IsSrcReg ? 0 : MI.getOperand(i: 2).getImm();
919 bool KillSrc = IsSrcReg ? MI.getOperand(i: 2).isKill() : false;
920 Register VMX = VMXl;
921 if (Imm >= 4) {
922 VMX = VMXu;
923 Imm -= 4;
924 }
925 MachineBasicBlock *MBB = MI.getParent();
926 DebugLoc DL = MI.getDebugLoc();
927 switch (MI.getOpcode()) {
928 case VE::LVMyir:
929 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMir))
930 .addDef(RegNo: VMX)
931 .addImm(Val: Imm)
932 .addReg(RegNo: Src, flags: getKillRegState(B: KillSrc));
933 break;
934 case VE::LVMyim:
935 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMim))
936 .addDef(RegNo: VMX)
937 .addImm(Val: Imm)
938 .addImm(Val: MImm);
939 break;
940 case VE::LVMyir_y:
941 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
942 "LVMyir_y has different register in 3rd operand");
943 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMir_m))
944 .addDef(RegNo: VMX)
945 .addImm(Val: Imm)
946 .addReg(RegNo: Src, flags: getKillRegState(B: KillSrc))
947 .addReg(RegNo: VMX);
948 break;
949 case VE::LVMyim_y:
950 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
951 "LVMyim_y has different register in 3rd operand");
952 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMim_m))
953 .addDef(RegNo: VMX)
954 .addImm(Val: Imm)
955 .addImm(Val: MImm)
956 .addReg(RegNo: VMX);
957 break;
958 }
959 MI.eraseFromParent();
960 return true;
961 }
962 case VE::SVMyi: {
963 Register Dest = MI.getOperand(i: 0).getReg();
964 Register VMZu = getVM512Upper(reg: MI.getOperand(i: 1).getReg());
965 Register VMZl = getVM512Lower(reg: MI.getOperand(i: 1).getReg());
966 bool KillSrc = MI.getOperand(i: 1).isKill();
967 int64_t Imm = MI.getOperand(i: 2).getImm();
968 Register VMZ = VMZl;
969 if (Imm >= 4) {
970 VMZ = VMZu;
971 Imm -= 4;
972 }
973 MachineBasicBlock *MBB = MI.getParent();
974 DebugLoc DL = MI.getDebugLoc();
975 MachineInstrBuilder MIB =
976 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::SVMmi), DestReg: Dest).addReg(RegNo: VMZ).addImm(Val: Imm);
977 MachineInstr *Inst = MIB.getInstr();
978 if (KillSrc) {
979 const TargetRegisterInfo *TRI = &getRegisterInfo();
980 Inst->addRegisterKilled(IncomingReg: MI.getOperand(i: 1).getReg(), RegInfo: TRI, AddIfNotFound: true);
981 }
982 MI.eraseFromParent();
983 return true;
984 }
985 case VE::VFMKyal:
986 case VE::VFMKynal:
987 case VE::VFMKWyvl:
988 case VE::VFMKWyvyl:
989 case VE::VFMKSyvl:
990 case VE::VFMKSyvyl:
991 expandPseudoVFMK(TI: *this, MI);
992 return true;
993 }
994 return false;
995}
996
997bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const {
998 MachineBasicBlock &MBB = *MI.getParent();
999 MachineFunction &MF = *MBB.getParent();
1000 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1001 const VEInstrInfo &TII = *STI.getInstrInfo();
1002 DebugLoc dl = MBB.findDebugLoc(MBBI: MI);
1003
1004 // Create following instructions and multiple basic blocks.
1005 //
1006 // thisBB:
1007 // brge.l.t %sp, %sl, sinkBB
1008 // syscallBB:
1009 // ld %s61, 0x18(, %tp) // load param area
1010 // or %s62, 0, %s0 // spill the value of %s0
1011 // lea %s63, 0x13b // syscall # of grow
1012 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0
1013 // shm.l %sl, 0x8(%s61) // store old limit at addr:8
1014 // shm.l %sp, 0x10(%s61) // store new limit at addr:16
1015 // monc // call monitor
1016 // or %s0, 0, %s62 // restore the value of %s0
1017 // sinkBB:
1018
1019 // Create new MBB
1020 MachineBasicBlock *BB = &MBB;
1021 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1022 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(BB: LLVM_BB);
1023 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(BB: LLVM_BB);
1024 MachineFunction::iterator It = ++(BB->getIterator());
1025 MF.insert(MBBI: It, MBB: syscallMBB);
1026 MF.insert(MBBI: It, MBB: sinkMBB);
1027
1028 // Transfer the remainder of BB and its successor edges to sinkMBB.
1029 sinkMBB->splice(Where: sinkMBB->begin(), Other: BB,
1030 From: std::next(x: std::next(x: MachineBasicBlock::iterator(MI))),
1031 To: BB->end());
1032 sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1033
1034 // Next, add the true and fallthrough blocks as its successors.
1035 BB->addSuccessor(Succ: syscallMBB);
1036 BB->addSuccessor(Succ: sinkMBB);
1037 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::BRCFLrr_t))
1038 .addImm(Val: VECC::CC_IGE)
1039 .addReg(RegNo: VE::SX11) // %sp
1040 .addReg(RegNo: VE::SX8) // %sl
1041 .addMBB(MBB: sinkMBB);
1042
1043 BB = syscallMBB;
1044
1045 // Update machine-CFG edges
1046 BB->addSuccessor(Succ: sinkMBB);
1047
1048 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::LDrii), DestReg: VE::SX61)
1049 .addReg(RegNo: VE::SX14)
1050 .addImm(Val: 0)
1051 .addImm(Val: 0x18);
1052 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::ORri), DestReg: VE::SX62)
1053 .addReg(RegNo: VE::SX0)
1054 .addImm(Val: 0);
1055 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::LEAzii), DestReg: VE::SX63)
1056 .addImm(Val: 0)
1057 .addImm(Val: 0)
1058 .addImm(Val: 0x13b);
1059 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::SHMLri))
1060 .addReg(RegNo: VE::SX61)
1061 .addImm(Val: 0)
1062 .addReg(RegNo: VE::SX63);
1063 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::SHMLri))
1064 .addReg(RegNo: VE::SX61)
1065 .addImm(Val: 8)
1066 .addReg(RegNo: VE::SX8);
1067 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::SHMLri))
1068 .addReg(RegNo: VE::SX61)
1069 .addImm(Val: 16)
1070 .addReg(RegNo: VE::SX11);
1071 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::MONC));
1072
1073 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::ORri), DestReg: VE::SX0)
1074 .addReg(RegNo: VE::SX62)
1075 .addImm(Val: 0);
1076
1077 MI.eraseFromParent(); // The pseudo instruction is gone now.
1078 return true;
1079}
1080
1081bool VEInstrInfo::expandGetStackTopPseudo(MachineInstr &MI) const {
1082 MachineBasicBlock *MBB = MI.getParent();
1083 MachineFunction &MF = *MBB->getParent();
1084 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1085 const VEInstrInfo &TII = *STI.getInstrInfo();
1086 DebugLoc DL = MBB->findDebugLoc(MBBI: MI);
1087
1088 // Create following instruction
1089 //
1090 // dst = %sp + target specific frame + the size of parameter area
1091
1092 const MachineFrameInfo &MFI = MF.getFrameInfo();
1093 const VEFrameLowering &TFL = *STI.getFrameLowering();
1094
1095 // The VE ABI requires a reserved area at the top of stack as described
1096 // in VEFrameLowering.cpp. So, we adjust it here.
1097 unsigned NumBytes = STI.getAdjustedFrameSize(FrameSize: 0);
1098
1099 // Also adds the size of parameter area.
1100 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF))
1101 NumBytes += MFI.getMaxCallFrameSize();
1102
1103 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: VE::LEArii))
1104 .addDef(RegNo: MI.getOperand(i: 0).getReg())
1105 .addReg(RegNo: VE::SX11)
1106 .addImm(Val: 0)
1107 .addImm(Val: NumBytes);
1108
1109 MI.eraseFromParent(); // The pseudo instruction is gone now.
1110 return true;
1111}
1112