1//===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the VE implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "VEInstrInfo.h"
14#include "VE.h"
15#include "VEMachineFunctionInfo.h"
16#include "VESubtarget.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineMemOperand.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/MC/TargetRegistry.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/ErrorHandling.h"
26
27#define DEBUG_TYPE "ve-instr-info"
28
29using namespace llvm;
30
31#define GET_INSTRINFO_CTOR_DTOR
32#include "VEGenInstrInfo.inc"
33
34// Pin the vtable to this file.
35void VEInstrInfo::anchor() {}
36
37VEInstrInfo::VEInstrInfo(const VESubtarget &ST)
38 : VEGenInstrInfo(ST, RI, VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
39
40static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
41
42static VECC::CondCode GetOppositeBranchCondition(VECC::CondCode CC) {
43 switch (CC) {
44 case VECC::CC_IG:
45 return VECC::CC_ILE;
46 case VECC::CC_IL:
47 return VECC::CC_IGE;
48 case VECC::CC_INE:
49 return VECC::CC_IEQ;
50 case VECC::CC_IEQ:
51 return VECC::CC_INE;
52 case VECC::CC_IGE:
53 return VECC::CC_IL;
54 case VECC::CC_ILE:
55 return VECC::CC_IG;
56 case VECC::CC_AF:
57 return VECC::CC_AT;
58 case VECC::CC_G:
59 return VECC::CC_LENAN;
60 case VECC::CC_L:
61 return VECC::CC_GENAN;
62 case VECC::CC_NE:
63 return VECC::CC_EQNAN;
64 case VECC::CC_EQ:
65 return VECC::CC_NENAN;
66 case VECC::CC_GE:
67 return VECC::CC_LNAN;
68 case VECC::CC_LE:
69 return VECC::CC_GNAN;
70 case VECC::CC_NUM:
71 return VECC::CC_NAN;
72 case VECC::CC_NAN:
73 return VECC::CC_NUM;
74 case VECC::CC_GNAN:
75 return VECC::CC_LE;
76 case VECC::CC_LNAN:
77 return VECC::CC_GE;
78 case VECC::CC_NENAN:
79 return VECC::CC_EQ;
80 case VECC::CC_EQNAN:
81 return VECC::CC_NE;
82 case VECC::CC_GENAN:
83 return VECC::CC_L;
84 case VECC::CC_LENAN:
85 return VECC::CC_G;
86 case VECC::CC_AT:
87 return VECC::CC_AF;
88 case VECC::UNKNOWN:
89 return VECC::UNKNOWN;
90 }
91 llvm_unreachable("Invalid cond code");
92}
93
94// Treat a branch relative long always instruction as unconditional branch.
95// For example, br.l.t and br.l.
96static bool isUncondBranchOpcode(int Opc) {
97 using namespace llvm::VE;
98
99#define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t)
100 // VE has other branch relative always instructions for word/double/float,
101 // but we use only long branches in our lower. So, check it here.
102 assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) &&
103 "Branch relative word/double/float always instructions should not be "
104 "used!");
105 return BRKIND(BRCFL);
106#undef BRKIND
107}
108
109// Treat branch relative conditional as conditional branch instructions.
110// For example, brgt.l.t and brle.s.nt.
111static bool isCondBranchOpcode(int Opc) {
112 using namespace llvm::VE;
113
114#define BRKIND(NAME) \
115 (Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \
116 Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t)
117 return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS);
118#undef BRKIND
119}
120
121// Treat branch long always instructions as indirect branch.
122// For example, b.l.t and b.l.
123static bool isIndirectBranchOpcode(int Opc) {
124 using namespace llvm::VE;
125
126#define BRKIND(NAME) \
127 (Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t)
128 // VE has other branch always instructions for word/double/float, but
129 // we use only long branches in our lower. So, check it here.
130 assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) &&
131 "Branch word/double/float always instructions should not be used!");
132 return BRKIND(BCFL);
133#undef BRKIND
134}
135
136static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
137 SmallVectorImpl<MachineOperand> &Cond) {
138 Cond.push_back(Elt: MachineOperand::CreateImm(Val: LastInst->getOperand(i: 0).getImm()));
139 Cond.push_back(Elt: LastInst->getOperand(i: 1));
140 Cond.push_back(Elt: LastInst->getOperand(i: 2));
141 Target = LastInst->getOperand(i: 3).getMBB();
142}
143
144bool VEInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
145 MachineBasicBlock *&FBB,
146 SmallVectorImpl<MachineOperand> &Cond,
147 bool AllowModify) const {
148 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
149 if (I == MBB.end())
150 return false;
151
152 if (!isUnpredicatedTerminator(MI: *I))
153 return false;
154
155 // Get the last instruction in the block.
156 MachineInstr *LastInst = &*I;
157 unsigned LastOpc = LastInst->getOpcode();
158
159 // If there is only one terminator instruction, process it.
160 if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) {
161 if (isUncondBranchOpcode(Opc: LastOpc)) {
162 TBB = LastInst->getOperand(i: 0).getMBB();
163 return false;
164 }
165 if (isCondBranchOpcode(Opc: LastOpc)) {
166 // Block ends with fall-through condbranch.
167 parseCondBranch(LastInst, Target&: TBB, Cond);
168 return false;
169 }
170 return true; // Can't handle indirect branch.
171 }
172
173 // Get the instruction before it if it is a terminator.
174 MachineInstr *SecondLastInst = &*I;
175 unsigned SecondLastOpc = SecondLastInst->getOpcode();
176
177 // If AllowModify is true and the block ends with two or more unconditional
178 // branches, delete all but the first unconditional branch.
179 if (AllowModify && isUncondBranchOpcode(Opc: LastOpc)) {
180 while (isUncondBranchOpcode(Opc: SecondLastOpc)) {
181 LastInst->eraseFromParent();
182 LastInst = SecondLastInst;
183 LastOpc = LastInst->getOpcode();
184 if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) {
185 // Return now the only terminator is an unconditional branch.
186 TBB = LastInst->getOperand(i: 0).getMBB();
187 return false;
188 }
189 SecondLastInst = &*I;
190 SecondLastOpc = SecondLastInst->getOpcode();
191 }
192 }
193
194 // If there are three terminators, we don't know what sort of block this is.
195 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(MI: *--I))
196 return true;
197
198 // If the block ends with a B and a Bcc, handle it.
199 if (isCondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
200 parseCondBranch(LastInst: SecondLastInst, Target&: TBB, Cond);
201 FBB = LastInst->getOperand(i: 0).getMBB();
202 return false;
203 }
204
205 // If the block ends with two unconditional branches, handle it. The second
206 // one is not executed.
207 if (isUncondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
208 TBB = SecondLastInst->getOperand(i: 0).getMBB();
209 return false;
210 }
211
212 // ...likewise if it ends with an indirect branch followed by an unconditional
213 // branch.
214 if (isIndirectBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) {
215 I = LastInst;
216 if (AllowModify)
217 I->eraseFromParent();
218 return true;
219 }
220
221 // Otherwise, can't handle this.
222 return true;
223}
224
225unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB,
226 MachineBasicBlock *TBB,
227 MachineBasicBlock *FBB,
228 ArrayRef<MachineOperand> Cond,
229 const DebugLoc &DL, int *BytesAdded) const {
230 assert(TBB && "insertBranch must not be told to insert a fallthrough");
231 assert((Cond.size() == 3 || Cond.size() == 0) &&
232 "VE branch conditions should have three component!");
233 assert(!BytesAdded && "code size not handled");
234 if (Cond.empty()) {
235 // Uncondition branch
236 assert(!FBB && "Unconditional branch with multiple successors!");
237 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: VE::BRCFLa_t))
238 .addMBB(MBB: TBB);
239 return 1;
240 }
241
242 // Conditional branch
243 // (BRCFir CC sy sz addr)
244 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented");
245
246 unsigned opc[2];
247 const TargetRegisterInfo *TRI = &getRegisterInfo();
248 MachineFunction *MF = MBB.getParent();
249 const MachineRegisterInfo &MRI = MF->getRegInfo();
250 Register Reg = Cond[2].getReg();
251 if (IsIntegerCC(CC: Cond[0].getImm())) {
252 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
253 opc[0] = VE::BRCFWir;
254 opc[1] = VE::BRCFWrr;
255 } else {
256 opc[0] = VE::BRCFLir;
257 opc[1] = VE::BRCFLrr;
258 }
259 } else {
260 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
261 opc[0] = VE::BRCFSir;
262 opc[1] = VE::BRCFSrr;
263 } else {
264 opc[0] = VE::BRCFDir;
265 opc[1] = VE::BRCFDrr;
266 }
267 }
268 if (Cond[1].isImm()) {
269 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: opc[0]))
270 .add(MO: Cond[0]) // condition code
271 .add(MO: Cond[1]) // lhs
272 .add(MO: Cond[2]) // rhs
273 .addMBB(MBB: TBB);
274 } else {
275 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: opc[1]))
276 .add(MO: Cond[0])
277 .add(MO: Cond[1])
278 .add(MO: Cond[2])
279 .addMBB(MBB: TBB);
280 }
281
282 if (!FBB)
283 return 1;
284
285 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: VE::BRCFLa_t))
286 .addMBB(MBB: FBB);
287 return 2;
288}
289
290unsigned VEInstrInfo::removeBranch(MachineBasicBlock &MBB,
291 int *BytesRemoved) const {
292 assert(!BytesRemoved && "code size not handled");
293
294 MachineBasicBlock::iterator I = MBB.end();
295 unsigned Count = 0;
296 while (I != MBB.begin()) {
297 --I;
298
299 if (I->isDebugValue())
300 continue;
301
302 if (!isUncondBranchOpcode(Opc: I->getOpcode()) &&
303 !isCondBranchOpcode(Opc: I->getOpcode()))
304 break; // Not a branch
305
306 I->eraseFromParent();
307 I = MBB.end();
308 ++Count;
309 }
310 return Count;
311}
312
313bool VEInstrInfo::reverseBranchCondition(
314 SmallVectorImpl<MachineOperand> &Cond) const {
315 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm());
316 Cond[0].setImm(GetOppositeBranchCondition(CC));
317 return false;
318}
319
320static bool IsAliasOfSX(Register Reg) {
321 return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
322 VE::F32RegClass.contains(Reg);
323}
324
325static void copyPhysSubRegs(MachineBasicBlock &MBB,
326 MachineBasicBlock::iterator I, const DebugLoc &DL,
327 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
328 const MCInstrDesc &MCID, unsigned int NumSubRegs,
329 const unsigned *SubRegIdx,
330 const TargetRegisterInfo *TRI) {
331 MachineInstr *MovMI = nullptr;
332
333 for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) {
334 Register SubDest = TRI->getSubReg(Reg: DestReg, Idx: SubRegIdx[Idx]);
335 Register SubSrc = TRI->getSubReg(Reg: SrcReg, Idx: SubRegIdx[Idx]);
336 assert(SubDest && SubSrc && "Bad sub-register");
337
338 if (MCID.getOpcode() == VE::ORri) {
339 // generate "ORri, dest, src, 0" instruction.
340 MachineInstrBuilder MIB =
341 BuildMI(BB&: MBB, I, MIMD: DL, MCID, DestReg: SubDest).addReg(RegNo: SubSrc).addImm(Val: 0);
342 MovMI = MIB.getInstr();
343 } else if (MCID.getOpcode() == VE::ANDMmm) {
344 // generate "ANDM, dest, vm0, src" instruction.
345 MachineInstrBuilder MIB =
346 BuildMI(BB&: MBB, I, MIMD: DL, MCID, DestReg: SubDest).addReg(RegNo: VE::VM0).addReg(RegNo: SubSrc);
347 MovMI = MIB.getInstr();
348 } else {
349 llvm_unreachable("Unexpected reg-to-reg copy instruction");
350 }
351 }
352 // Add implicit super-register defs and kills to the last MovMI.
353 MovMI->addRegisterDefined(Reg: DestReg, RegInfo: TRI);
354 if (KillSrc)
355 MovMI->addRegisterKilled(IncomingReg: SrcReg, RegInfo: TRI, AddIfNotFound: true);
356}
357
358void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
359 MachineBasicBlock::iterator I, const DebugLoc &DL,
360 Register DestReg, Register SrcReg, bool KillSrc,
361 bool RenamableDest, bool RenamableSrc) const {
362
363 if (IsAliasOfSX(Reg: SrcReg) && IsAliasOfSX(Reg: DestReg)) {
364 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::ORri), DestReg)
365 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc))
366 .addImm(Val: 0);
367 } else if (VE::V64RegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
368 // Generate following instructions
369 // %sw16 = LEA32zii 256
370 // VORmvl %dest, (0)1, %src, %sw16
371 // TODO: reuse a register if vl is already assigned to a register
372 // FIXME: it would be better to scavenge a register here instead of
373 // reserving SX16 all of the time.
374 const TargetRegisterInfo *TRI = &getRegisterInfo();
375 Register TmpReg = VE::SX16;
376 Register SubTmp = TRI->getSubReg(Reg: TmpReg, Idx: VE::sub_i32);
377 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LEAzii), DestReg: TmpReg)
378 .addImm(Val: 0)
379 .addImm(Val: 0)
380 .addImm(Val: 256);
381 MachineInstrBuilder MIB = BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::VORmvl), DestReg)
382 .addImm(Val: M1(Val: 0)) // Represent (0)1.
383 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc))
384 .addReg(RegNo: SubTmp, Flags: getKillRegState(B: true));
385 MIB.getInstr()->addRegisterKilled(IncomingReg: TmpReg, RegInfo: TRI, AddIfNotFound: true);
386 } else if (VE::VMRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
387 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::ANDMmm), DestReg)
388 .addReg(RegNo: VE::VM0)
389 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
390 } else if (VE::VM512RegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
391 // Use two instructions.
392 const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd};
393 unsigned int NumSubRegs = 2;
394 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, MCID: get(Opcode: VE::ANDMmm),
395 NumSubRegs, SubRegIdx, TRI: &getRegisterInfo());
396 } else if (VE::F128RegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
397 // Use two instructions.
398 const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd};
399 unsigned int NumSubRegs = 2;
400 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, MCID: get(Opcode: VE::ORri),
401 NumSubRegs, SubRegIdx, TRI: &getRegisterInfo());
402 } else {
403 const TargetRegisterInfo *TRI = &getRegisterInfo();
404 dbgs() << "Impossible reg-to-reg copy from " << printReg(Reg: SrcReg, TRI)
405 << " to " << printReg(Reg: DestReg, TRI) << "\n";
406 llvm_unreachable("Impossible reg-to-reg copy");
407 }
408}
409
410/// isLoadFromStackSlot - If the specified machine instruction is a direct
411/// load from a stack slot, return the virtual or physical register number of
412/// the destination along with the FrameIndex of the loaded stack slot. If
413/// not, return 0. This predicate must return 0 if the instruction has
414/// any side effects other than loading from the stack slot.
415Register VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
416 int &FrameIndex) const {
417 if (MI.getOpcode() == VE::LDrii || // I64
418 MI.getOpcode() == VE::LDLSXrii || // I32
419 MI.getOpcode() == VE::LDUrii || // F32
420 MI.getOpcode() == VE::LDQrii || // F128 (pseudo)
421 MI.getOpcode() == VE::LDVMrii || // VM (pseudo)
422 MI.getOpcode() == VE::LDVM512rii // VM512 (pseudo)
423 ) {
424 if (MI.getOperand(i: 1).isFI() && MI.getOperand(i: 2).isImm() &&
425 MI.getOperand(i: 2).getImm() == 0 && MI.getOperand(i: 3).isImm() &&
426 MI.getOperand(i: 3).getImm() == 0) {
427 FrameIndex = MI.getOperand(i: 1).getIndex();
428 return MI.getOperand(i: 0).getReg();
429 }
430 }
431 return 0;
432}
433
434/// isStoreToStackSlot - If the specified machine instruction is a direct
435/// store to a stack slot, return the virtual or physical register number of
436/// the source reg along with the FrameIndex of the loaded stack slot. If
437/// not, return 0. This predicate must return 0 if the instruction has
438/// any side effects other than storing to the stack slot.
439Register VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
440 int &FrameIndex) const {
441 if (MI.getOpcode() == VE::STrii || // I64
442 MI.getOpcode() == VE::STLrii || // I32
443 MI.getOpcode() == VE::STUrii || // F32
444 MI.getOpcode() == VE::STQrii || // F128 (pseudo)
445 MI.getOpcode() == VE::STVMrii || // VM (pseudo)
446 MI.getOpcode() == VE::STVM512rii // VM512 (pseudo)
447 ) {
448 if (MI.getOperand(i: 0).isFI() && MI.getOperand(i: 1).isImm() &&
449 MI.getOperand(i: 1).getImm() == 0 && MI.getOperand(i: 2).isImm() &&
450 MI.getOperand(i: 2).getImm() == 0) {
451 FrameIndex = MI.getOperand(i: 0).getIndex();
452 return MI.getOperand(i: 3).getReg();
453 }
454 }
455 return 0;
456}
457
458void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
459 MachineBasicBlock::iterator I,
460 Register SrcReg, bool isKill, int FI,
461 const TargetRegisterClass *RC,
462 Register VReg,
463 MachineInstr::MIFlag Flags) const {
464 DebugLoc DL;
465 if (I != MBB.end())
466 DL = I->getDebugLoc();
467
468 MachineFunction *MF = MBB.getParent();
469 const MachineFrameInfo &MFI = MF->getFrameInfo();
470 MachineMemOperand *MMO = MF->getMachineMemOperand(
471 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOStore,
472 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
473
474 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
475 if (RC == &VE::I64RegClass) {
476 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STrii))
477 .addFrameIndex(Idx: FI)
478 .addImm(Val: 0)
479 .addImm(Val: 0)
480 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill))
481 .addMemOperand(MMO);
482 } else if (RC == &VE::I32RegClass) {
483 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STLrii))
484 .addFrameIndex(Idx: FI)
485 .addImm(Val: 0)
486 .addImm(Val: 0)
487 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill))
488 .addMemOperand(MMO);
489 } else if (RC == &VE::F32RegClass) {
490 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STUrii))
491 .addFrameIndex(Idx: FI)
492 .addImm(Val: 0)
493 .addImm(Val: 0)
494 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill))
495 .addMemOperand(MMO);
496 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
497 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STQrii))
498 .addFrameIndex(Idx: FI)
499 .addImm(Val: 0)
500 .addImm(Val: 0)
501 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill))
502 .addMemOperand(MMO);
503 } else if (RC == &VE::VMRegClass) {
504 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STVMrii))
505 .addFrameIndex(Idx: FI)
506 .addImm(Val: 0)
507 .addImm(Val: 0)
508 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill))
509 .addMemOperand(MMO);
510 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
511 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::STVM512rii))
512 .addFrameIndex(Idx: FI)
513 .addImm(Val: 0)
514 .addImm(Val: 0)
515 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill))
516 .addMemOperand(MMO);
517 } else
518 report_fatal_error(reason: "Can't store this register to stack slot");
519}
520
521void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
522 MachineBasicBlock::iterator I,
523 Register DestReg, int FI,
524 const TargetRegisterClass *RC,
525 Register VReg, unsigned SubReg,
526 MachineInstr::MIFlag Flags) const {
527 DebugLoc DL;
528 if (I != MBB.end())
529 DL = I->getDebugLoc();
530
531 MachineFunction *MF = MBB.getParent();
532 const MachineFrameInfo &MFI = MF->getFrameInfo();
533 MachineMemOperand *MMO = MF->getMachineMemOperand(
534 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOLoad,
535 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
536
537 if (RC == &VE::I64RegClass) {
538 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDrii), DestReg)
539 .addFrameIndex(Idx: FI)
540 .addImm(Val: 0)
541 .addImm(Val: 0)
542 .addMemOperand(MMO);
543 } else if (RC == &VE::I32RegClass) {
544 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDLSXrii), DestReg)
545 .addFrameIndex(Idx: FI)
546 .addImm(Val: 0)
547 .addImm(Val: 0)
548 .addMemOperand(MMO);
549 } else if (RC == &VE::F32RegClass) {
550 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDUrii), DestReg)
551 .addFrameIndex(Idx: FI)
552 .addImm(Val: 0)
553 .addImm(Val: 0)
554 .addMemOperand(MMO);
555 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
556 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDQrii), DestReg)
557 .addFrameIndex(Idx: FI)
558 .addImm(Val: 0)
559 .addImm(Val: 0)
560 .addMemOperand(MMO);
561 } else if (RC == &VE::VMRegClass) {
562 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDVMrii), DestReg)
563 .addFrameIndex(Idx: FI)
564 .addImm(Val: 0)
565 .addImm(Val: 0)
566 .addMemOperand(MMO);
567 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
568 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: VE::LDVM512rii), DestReg)
569 .addFrameIndex(Idx: FI)
570 .addImm(Val: 0)
571 .addImm(Val: 0)
572 .addMemOperand(MMO);
573 } else
574 report_fatal_error(reason: "Can't load this register from stack slot");
575}
576
577bool VEInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
578 Register Reg, MachineRegisterInfo *MRI) const {
579 LLVM_DEBUG(dbgs() << "foldImmediate\n");
580
581 LLVM_DEBUG(dbgs() << "checking DefMI\n");
582 int64_t ImmVal;
583 switch (DefMI.getOpcode()) {
584 default:
585 return false;
586 case VE::ORim:
587 // General move small immediate instruction on VE.
588 LLVM_DEBUG(dbgs() << "checking ORim\n");
589 LLVM_DEBUG(DefMI.dump());
590 // FIXME: We may need to support FPImm too.
591 assert(DefMI.getOperand(1).isImm());
592 assert(DefMI.getOperand(2).isImm());
593 ImmVal =
594 DefMI.getOperand(i: 1).getImm() + mimm2Val(Val: DefMI.getOperand(i: 2).getImm());
595 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
596 break;
597 case VE::LEAzii:
598 // General move immediate instruction on VE.
599 LLVM_DEBUG(dbgs() << "checking LEAzii\n");
600 LLVM_DEBUG(DefMI.dump());
601 // FIXME: We may need to support FPImm too.
602 assert(DefMI.getOperand(2).isImm());
603 if (!DefMI.getOperand(i: 3).isImm())
604 // LEAzii may refer label
605 return false;
606 ImmVal = DefMI.getOperand(i: 2).getImm() + DefMI.getOperand(i: 3).getImm();
607 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
608 break;
609 }
610
611 // Try to fold like below:
612 // %1:i64 = ORim 0, 0(1)
613 // %2:i64 = CMPSLrr %0, %1
614 // To
615 // %2:i64 = CMPSLrm %0, 0(1)
616 //
617 // Another example:
618 // %1:i64 = ORim 6, 0(1)
619 // %2:i64 = CMPSLrr %1, %0
620 // To
621 // %2:i64 = CMPSLir 6, %0
622 //
623 // Support commutable instructions like below:
624 // %1:i64 = ORim 6, 0(1)
625 // %2:i64 = ADDSLrr %1, %0
626 // To
627 // %2:i64 = ADDSLri %0, 6
628 //
629 // FIXME: Need to support i32. Current implementtation requires
630 // EXTRACT_SUBREG, so input has following COPY and it avoids folding:
631 // %1:i64 = ORim 6, 0(1)
632 // %2:i32 = COPY %1.sub_i32
633 // %3:i32 = ADDSWSXrr %0, %2
634 // FIXME: Need to support shift, cmov, and more instructions.
635 // FIXME: Need to support lvl too, but LVLGen runs after peephole-opt.
636
637 LLVM_DEBUG(dbgs() << "checking UseMI\n");
638 LLVM_DEBUG(UseMI.dump());
639 unsigned NewUseOpcSImm7;
640 unsigned NewUseOpcMImm;
641 enum InstType {
642 rr2ri_rm, // rr -> ri or rm, commutable
643 rr2ir_rm, // rr -> ir or rm
644 } InstType;
645
646 using namespace llvm::VE;
647#define INSTRKIND(NAME) \
648 case NAME##rr: \
649 NewUseOpcSImm7 = NAME##ri; \
650 NewUseOpcMImm = NAME##rm; \
651 InstType = rr2ri_rm; \
652 break
653#define NCINSTRKIND(NAME) \
654 case NAME##rr: \
655 NewUseOpcSImm7 = NAME##ir; \
656 NewUseOpcMImm = NAME##rm; \
657 InstType = rr2ir_rm; \
658 break
659
660 switch (UseMI.getOpcode()) {
661 default:
662 return false;
663
664 INSTRKIND(ADDUL);
665 INSTRKIND(ADDSWSX);
666 INSTRKIND(ADDSWZX);
667 INSTRKIND(ADDSL);
668 NCINSTRKIND(SUBUL);
669 NCINSTRKIND(SUBSWSX);
670 NCINSTRKIND(SUBSWZX);
671 NCINSTRKIND(SUBSL);
672 INSTRKIND(MULUL);
673 INSTRKIND(MULSWSX);
674 INSTRKIND(MULSWZX);
675 INSTRKIND(MULSL);
676 NCINSTRKIND(DIVUL);
677 NCINSTRKIND(DIVSWSX);
678 NCINSTRKIND(DIVSWZX);
679 NCINSTRKIND(DIVSL);
680 NCINSTRKIND(CMPUL);
681 NCINSTRKIND(CMPSWSX);
682 NCINSTRKIND(CMPSWZX);
683 NCINSTRKIND(CMPSL);
684 INSTRKIND(MAXSWSX);
685 INSTRKIND(MAXSWZX);
686 INSTRKIND(MAXSL);
687 INSTRKIND(MINSWSX);
688 INSTRKIND(MINSWZX);
689 INSTRKIND(MINSL);
690 INSTRKIND(AND);
691 INSTRKIND(OR);
692 INSTRKIND(XOR);
693 INSTRKIND(EQV);
694 NCINSTRKIND(NND);
695 NCINSTRKIND(MRG);
696 }
697
698#undef INSTRKIND
699
700 unsigned NewUseOpc;
701 unsigned UseIdx;
702 bool Commute = false;
703 LLVM_DEBUG(dbgs() << "checking UseMI operands\n");
704 switch (InstType) {
705 case rr2ri_rm:
706 UseIdx = 2;
707 if (UseMI.getOperand(i: 1).getReg() == Reg) {
708 Commute = true;
709 } else {
710 assert(UseMI.getOperand(2).getReg() == Reg);
711 }
712 if (isInt<7>(x: ImmVal)) {
713 // This ImmVal matches to SImm7 slot, so change UseOpc to an instruction
714 // holds a simm7 slot.
715 NewUseOpc = NewUseOpcSImm7;
716 } else if (isMImmVal(Val: ImmVal)) {
717 // Similarly, change UseOpc to an instruction holds a mimm slot.
718 NewUseOpc = NewUseOpcMImm;
719 ImmVal = val2MImm(Val: ImmVal);
720 } else
721 return false;
722 break;
723 case rr2ir_rm:
724 if (UseMI.getOperand(i: 1).getReg() == Reg) {
725 // Check immediate value whether it matchs to the UseMI instruction.
726 if (!isInt<7>(x: ImmVal))
727 return false;
728 NewUseOpc = NewUseOpcSImm7;
729 UseIdx = 1;
730 } else {
731 assert(UseMI.getOperand(2).getReg() == Reg);
732 // Check immediate value whether it matchs to the UseMI instruction.
733 if (!isMImmVal(Val: ImmVal))
734 return false;
735 NewUseOpc = NewUseOpcMImm;
736 ImmVal = val2MImm(Val: ImmVal);
737 UseIdx = 2;
738 }
739 break;
740 }
741
742 LLVM_DEBUG(dbgs() << "modifying UseMI\n");
743 bool DeleteDef = MRI->hasOneNonDBGUse(RegNo: Reg);
744 UseMI.setDesc(get(Opcode: NewUseOpc));
745 if (Commute) {
746 UseMI.getOperand(i: 1).setReg(UseMI.getOperand(i: UseIdx).getReg());
747 }
748 UseMI.getOperand(i: UseIdx).ChangeToImmediate(ImmVal);
749 if (DeleteDef)
750 DefMI.eraseFromParent();
751
752 return true;
753}
754
755Register VEInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
756 VEMachineFunctionInfo *VEFI = MF->getInfo<VEMachineFunctionInfo>();
757 Register GlobalBaseReg = VEFI->getGlobalBaseReg();
758 if (GlobalBaseReg != 0)
759 return GlobalBaseReg;
760
761 // We use %s15 (%got) as a global base register
762 GlobalBaseReg = VE::SX15;
763
764 // Insert a pseudo instruction to set the GlobalBaseReg into the first
765 // MBB of the function
766 MachineBasicBlock &FirstMBB = MF->front();
767 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
768 DebugLoc dl;
769 BuildMI(BB&: FirstMBB, I: MBBI, MIMD: dl, MCID: get(Opcode: VE::GETGOT), DestReg: GlobalBaseReg);
770 VEFI->setGlobalBaseReg(GlobalBaseReg);
771 return GlobalBaseReg;
772}
773
774static Register getVM512Upper(Register reg) {
775 return (reg - VE::VMP0) * 2 + VE::VM0;
776}
777
778static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
779
780// Expand pseudo logical vector instructions for VM512 registers.
781static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) {
782 MachineBasicBlock *MBB = MI.getParent();
783 DebugLoc DL = MI.getDebugLoc();
784
785 Register VMXu = getVM512Upper(reg: MI.getOperand(i: 0).getReg());
786 Register VMXl = getVM512Lower(reg: MI.getOperand(i: 0).getReg());
787 Register VMYu = getVM512Upper(reg: MI.getOperand(i: 1).getReg());
788 Register VMYl = getVM512Lower(reg: MI.getOperand(i: 1).getReg());
789
790 switch (MI.getOpcode()) {
791 default: {
792 Register VMZu = getVM512Upper(reg: MI.getOperand(i: 2).getReg());
793 Register VMZl = getVM512Lower(reg: MI.getOperand(i: 2).getReg());
794 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXu).addUse(RegNo: VMYu).addUse(RegNo: VMZu);
795 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXl).addUse(RegNo: VMYl).addUse(RegNo: VMZl);
796 break;
797 }
798 case VE::NEGMy:
799 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXu).addUse(RegNo: VMYu);
800 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID).addDef(RegNo: VMXl).addUse(RegNo: VMYl);
801 break;
802 }
803 MI.eraseFromParent();
804}
805
806static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI,
807 bool Upper) {
808 // VM512
809 MIB.addReg(RegNo: Upper ? getVM512Upper(reg: MI.getOperand(i: 0).getReg())
810 : getVM512Lower(reg: MI.getOperand(i: 0).getReg()));
811
812 switch (MI.getNumExplicitOperands()) {
813 default:
814 report_fatal_error(reason: "unexpected number of operands for pvfmk");
815 case 2: // _Ml: VM512, VL
816 // VL
817 MIB.addReg(RegNo: MI.getOperand(i: 1).getReg());
818 break;
819 case 4: // _Mvl: VM512, CC, VR, VL
820 // CC
821 MIB.addImm(Val: MI.getOperand(i: 1).getImm());
822 // VR
823 MIB.addReg(RegNo: MI.getOperand(i: 2).getReg());
824 // VL
825 MIB.addReg(RegNo: MI.getOperand(i: 3).getReg());
826 break;
827 case 5: // _MvMl: VM512, CC, VR, VM512, VL
828 // CC
829 MIB.addImm(Val: MI.getOperand(i: 1).getImm());
830 // VR
831 MIB.addReg(RegNo: MI.getOperand(i: 2).getReg());
832 // VM512
833 MIB.addReg(RegNo: Upper ? getVM512Upper(reg: MI.getOperand(i: 3).getReg())
834 : getVM512Lower(reg: MI.getOperand(i: 3).getReg()));
835 // VL
836 MIB.addReg(RegNo: MI.getOperand(i: 4).getReg());
837 break;
838 }
839}
840
841static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI) {
842 // replace to pvfmk.w.up and pvfmk.w.lo
843 // replace to pvfmk.s.up and pvfmk.s.lo
844
845 static const std::pair<unsigned, std::pair<unsigned, unsigned>> VFMKMap[] = {
846 {VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}},
847 {VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}},
848 {VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}},
849 {VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}},
850 {VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}},
851 {VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}},
852 };
853
854 unsigned Opcode = MI.getOpcode();
855
856 const auto *Found =
857 llvm::find_if(Range: VFMKMap, P: [&](auto P) { return P.first == Opcode; });
858 if (Found == std::end(arr: VFMKMap))
859 report_fatal_error(reason: "unexpected opcode for pseudo vfmk");
860
861 unsigned OpcodeUpper = (*Found).second.first;
862 unsigned OpcodeLower = (*Found).second.second;
863
864 MachineBasicBlock *MBB = MI.getParent();
865 DebugLoc DL = MI.getDebugLoc();
866
867 MachineInstrBuilder Bu = BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TI.get(Opcode: OpcodeUpper));
868 addOperandsForVFMK(MIB&: Bu, MI, /* Upper */ true);
869 MachineInstrBuilder Bl = BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TI.get(Opcode: OpcodeLower));
870 addOperandsForVFMK(MIB&: Bl, MI, /* Upper */ false);
871
872 MI.eraseFromParent();
873}
874
875bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
876 switch (MI.getOpcode()) {
877 case VE::EXTEND_STACK: {
878 return expandExtendStackPseudo(MI);
879 }
880 case VE::EXTEND_STACK_GUARD: {
881 MI.eraseFromParent(); // The pseudo instruction is gone now.
882 return true;
883 }
884 case VE::GETSTACKTOP: {
885 return expandGetStackTopPseudo(MI);
886 }
887
888 case VE::ANDMyy:
889 expandPseudoLogM(MI, MCID: get(Opcode: VE::ANDMmm));
890 return true;
891 case VE::ORMyy:
892 expandPseudoLogM(MI, MCID: get(Opcode: VE::ORMmm));
893 return true;
894 case VE::XORMyy:
895 expandPseudoLogM(MI, MCID: get(Opcode: VE::XORMmm));
896 return true;
897 case VE::EQVMyy:
898 expandPseudoLogM(MI, MCID: get(Opcode: VE::EQVMmm));
899 return true;
900 case VE::NNDMyy:
901 expandPseudoLogM(MI, MCID: get(Opcode: VE::NNDMmm));
902 return true;
903 case VE::NEGMy:
904 expandPseudoLogM(MI, MCID: get(Opcode: VE::NEGMm));
905 return true;
906
907 case VE::LVMyir:
908 case VE::LVMyim:
909 case VE::LVMyir_y:
910 case VE::LVMyim_y: {
911 Register VMXu = getVM512Upper(reg: MI.getOperand(i: 0).getReg());
912 Register VMXl = getVM512Lower(reg: MI.getOperand(i: 0).getReg());
913 int64_t Imm = MI.getOperand(i: 1).getImm();
914 bool IsSrcReg =
915 MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y;
916 Register Src = IsSrcReg ? MI.getOperand(i: 2).getReg() : VE::NoRegister;
917 int64_t MImm = IsSrcReg ? 0 : MI.getOperand(i: 2).getImm();
918 bool KillSrc = IsSrcReg ? MI.getOperand(i: 2).isKill() : false;
919 Register VMX = VMXl;
920 if (Imm >= 4) {
921 VMX = VMXu;
922 Imm -= 4;
923 }
924 MachineBasicBlock *MBB = MI.getParent();
925 DebugLoc DL = MI.getDebugLoc();
926 switch (MI.getOpcode()) {
927 case VE::LVMyir:
928 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMir))
929 .addDef(RegNo: VMX)
930 .addImm(Val: Imm)
931 .addReg(RegNo: Src, Flags: getKillRegState(B: KillSrc));
932 break;
933 case VE::LVMyim:
934 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMim))
935 .addDef(RegNo: VMX)
936 .addImm(Val: Imm)
937 .addImm(Val: MImm);
938 break;
939 case VE::LVMyir_y:
940 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
941 "LVMyir_y has different register in 3rd operand");
942 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMir_m))
943 .addDef(RegNo: VMX)
944 .addImm(Val: Imm)
945 .addReg(RegNo: Src, Flags: getKillRegState(B: KillSrc))
946 .addReg(RegNo: VMX);
947 break;
948 case VE::LVMyim_y:
949 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
950 "LVMyim_y has different register in 3rd operand");
951 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::LVMim_m))
952 .addDef(RegNo: VMX)
953 .addImm(Val: Imm)
954 .addImm(Val: MImm)
955 .addReg(RegNo: VMX);
956 break;
957 }
958 MI.eraseFromParent();
959 return true;
960 }
961 case VE::SVMyi: {
962 Register Dest = MI.getOperand(i: 0).getReg();
963 Register VMZu = getVM512Upper(reg: MI.getOperand(i: 1).getReg());
964 Register VMZl = getVM512Lower(reg: MI.getOperand(i: 1).getReg());
965 bool KillSrc = MI.getOperand(i: 1).isKill();
966 int64_t Imm = MI.getOperand(i: 2).getImm();
967 Register VMZ = VMZl;
968 if (Imm >= 4) {
969 VMZ = VMZu;
970 Imm -= 4;
971 }
972 MachineBasicBlock *MBB = MI.getParent();
973 DebugLoc DL = MI.getDebugLoc();
974 MachineInstrBuilder MIB =
975 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: get(Opcode: VE::SVMmi), DestReg: Dest).addReg(RegNo: VMZ).addImm(Val: Imm);
976 MachineInstr *Inst = MIB.getInstr();
977 if (KillSrc) {
978 const TargetRegisterInfo *TRI = &getRegisterInfo();
979 Inst->addRegisterKilled(IncomingReg: MI.getOperand(i: 1).getReg(), RegInfo: TRI, AddIfNotFound: true);
980 }
981 MI.eraseFromParent();
982 return true;
983 }
984 case VE::VFMKyal:
985 case VE::VFMKynal:
986 case VE::VFMKWyvl:
987 case VE::VFMKWyvyl:
988 case VE::VFMKSyvl:
989 case VE::VFMKSyvyl:
990 expandPseudoVFMK(TI: *this, MI);
991 return true;
992 }
993 return false;
994}
995
996bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const {
997 MachineBasicBlock &MBB = *MI.getParent();
998 MachineFunction &MF = *MBB.getParent();
999 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1000 const VEInstrInfo &TII = *STI.getInstrInfo();
1001 DebugLoc dl = MBB.findDebugLoc(MBBI: MI);
1002
1003 // Create following instructions and multiple basic blocks.
1004 //
1005 // thisBB:
1006 // brge.l.t %sp, %sl, sinkBB
1007 // syscallBB:
1008 // ld %s61, 0x18(, %tp) // load param area
1009 // or %s62, 0, %s0 // spill the value of %s0
1010 // lea %s63, 0x13b // syscall # of grow
1011 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0
1012 // shm.l %sl, 0x8(%s61) // store old limit at addr:8
1013 // shm.l %sp, 0x10(%s61) // store new limit at addr:16
1014 // monc // call monitor
1015 // or %s0, 0, %s62 // restore the value of %s0
1016 // sinkBB:
1017
1018 // Create new MBB
1019 MachineBasicBlock *BB = &MBB;
1020 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1021 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(BB: LLVM_BB);
1022 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(BB: LLVM_BB);
1023 MachineFunction::iterator It = ++(BB->getIterator());
1024 MF.insert(MBBI: It, MBB: syscallMBB);
1025 MF.insert(MBBI: It, MBB: sinkMBB);
1026
1027 // Transfer the remainder of BB and its successor edges to sinkMBB.
1028 sinkMBB->splice(Where: sinkMBB->begin(), Other: BB,
1029 From: std::next(x: std::next(x: MachineBasicBlock::iterator(MI))),
1030 To: BB->end());
1031 sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1032
1033 // Next, add the true and fallthrough blocks as its successors.
1034 BB->addSuccessor(Succ: syscallMBB);
1035 BB->addSuccessor(Succ: sinkMBB);
1036 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::BRCFLrr_t))
1037 .addImm(Val: VECC::CC_IGE)
1038 .addReg(RegNo: VE::SX11) // %sp
1039 .addReg(RegNo: VE::SX8) // %sl
1040 .addMBB(MBB: sinkMBB);
1041
1042 BB = syscallMBB;
1043
1044 // Update machine-CFG edges
1045 BB->addSuccessor(Succ: sinkMBB);
1046
1047 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::LDrii), DestReg: VE::SX61)
1048 .addReg(RegNo: VE::SX14)
1049 .addImm(Val: 0)
1050 .addImm(Val: 0x18);
1051 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::ORri), DestReg: VE::SX62)
1052 .addReg(RegNo: VE::SX0)
1053 .addImm(Val: 0);
1054 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::LEAzii), DestReg: VE::SX63)
1055 .addImm(Val: 0)
1056 .addImm(Val: 0)
1057 .addImm(Val: 0x13b);
1058 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::SHMLri))
1059 .addReg(RegNo: VE::SX61)
1060 .addImm(Val: 0)
1061 .addReg(RegNo: VE::SX63);
1062 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::SHMLri))
1063 .addReg(RegNo: VE::SX61)
1064 .addImm(Val: 8)
1065 .addReg(RegNo: VE::SX8);
1066 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::SHMLri))
1067 .addReg(RegNo: VE::SX61)
1068 .addImm(Val: 16)
1069 .addReg(RegNo: VE::SX11);
1070 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::MONC));
1071
1072 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: VE::ORri), DestReg: VE::SX0)
1073 .addReg(RegNo: VE::SX62)
1074 .addImm(Val: 0);
1075
1076 MI.eraseFromParent(); // The pseudo instruction is gone now.
1077 return true;
1078}
1079
1080bool VEInstrInfo::expandGetStackTopPseudo(MachineInstr &MI) const {
1081 MachineBasicBlock *MBB = MI.getParent();
1082 MachineFunction &MF = *MBB->getParent();
1083 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1084 const VEInstrInfo &TII = *STI.getInstrInfo();
1085 DebugLoc DL = MBB->findDebugLoc(MBBI: MI);
1086
1087 // Create following instruction
1088 //
1089 // dst = %sp + target specific frame + the size of parameter area
1090
1091 const MachineFrameInfo &MFI = MF.getFrameInfo();
1092 const VEFrameLowering &TFL = *STI.getFrameLowering();
1093
1094 // The VE ABI requires a reserved area at the top of stack as described
1095 // in VEFrameLowering.cpp. So, we adjust it here.
1096 unsigned NumBytes = STI.getAdjustedFrameSize(FrameSize: 0);
1097
1098 // Also adds the size of parameter area.
1099 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF))
1100 NumBytes += MFI.getMaxCallFrameSize();
1101
1102 BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: VE::LEArii))
1103 .addDef(RegNo: MI.getOperand(i: 0).getReg())
1104 .addReg(RegNo: VE::SX11)
1105 .addImm(Val: 0)
1106 .addImm(Val: NumBytes);
1107
1108 MI.eraseFromParent(); // The pseudo instruction is gone now.
1109 return true;
1110}
1111