1//===-------------- BPFMIPeephole.cpp - MI Peephole Cleanups -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs peephole optimizations to cleanup ugly code sequences at
10// MachineInstruction layer.
11//
12// Currently, there are two optimizations implemented:
13// - One pre-RA MachineSSA pass to eliminate type promotion sequences, those
14// zero extend 32-bit subregisters to 64-bit registers, if the compiler
15// could prove the subregisters is defined by 32-bit operations in which
16// case the upper half of the underlying 64-bit registers were zeroed
17// implicitly.
18//
19// - One post-RA PreEmit pass to do final cleanup on some redundant
20// instructions generated due to bad RA on subregister.
21//===----------------------------------------------------------------------===//
22
23#include "BPF.h"
24#include "BPFInstrInfo.h"
25#include "BPFTargetMachine.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/CodeGen/LivePhysRegs.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineFunctionPass.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/Support/Debug.h"
34#include <set>
35
36using namespace llvm;
37
38#define DEBUG_TYPE "bpf-mi-zext-elim"
39
40static cl::opt<int> GotolAbsLowBound("gotol-abs-low-bound", cl::Hidden,
41 cl::init(INT16_MAX >> 1), cl::desc("Specify gotol lower bound"));
42
43STATISTIC(ZExtElemNum, "Number of zero extension shifts eliminated");
44
45namespace {
46
47struct BPFMIPeephole : public MachineFunctionPass {
48
49 static char ID;
50 const BPFInstrInfo *TII;
51 MachineFunction *MF;
52 MachineRegisterInfo *MRI;
53
54 BPFMIPeephole() : MachineFunctionPass(ID) {}
55
56private:
57 // Initialize class variables.
58 void initialize(MachineFunction &MFParm);
59
60 bool isCopyFrom32Def(MachineInstr *CopyMI);
61 bool isInsnFrom32Def(MachineInstr *DefInsn);
62 bool isPhiFrom32Def(MachineInstr *MovMI);
63 bool isMovFrom32Def(MachineInstr *MovMI);
64 bool eliminateZExtSeq();
65 bool eliminateZExt();
66
67 std::set<MachineInstr *> PhiInsns;
68
69public:
70
71 // Main entry point for this pass.
72 bool runOnMachineFunction(MachineFunction &MF) override {
73 if (skipFunction(F: MF.getFunction()))
74 return false;
75
76 initialize(MFParm&: MF);
77
78 // First try to eliminate (zext, lshift, rshift) and then
79 // try to eliminate zext.
80 bool ZExtSeqExist, ZExtExist;
81 ZExtSeqExist = eliminateZExtSeq();
82 ZExtExist = eliminateZExt();
83 return ZExtSeqExist || ZExtExist;
84 }
85};
86
87// Initialize class variables.
88void BPFMIPeephole::initialize(MachineFunction &MFParm) {
89 MF = &MFParm;
90 MRI = &MF->getRegInfo();
91 TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
92 LLVM_DEBUG(dbgs() << "*** BPF MachineSSA ZEXT Elim peephole pass ***\n\n");
93}
94
95bool BPFMIPeephole::isCopyFrom32Def(MachineInstr *CopyMI)
96{
97 MachineOperand &opnd = CopyMI->getOperand(i: 1);
98
99 if (!opnd.isReg())
100 return false;
101
102 // Return false if getting value from a 32bit physical register.
103 // Most likely, this physical register is aliased to
104 // function call return value or current function parameters.
105 Register Reg = opnd.getReg();
106 if (!Reg.isVirtual())
107 return false;
108
109 if (MRI->getRegClass(Reg) == &BPF::GPRRegClass)
110 return false;
111
112 MachineInstr *DefInsn = MRI->getVRegDef(Reg);
113 if (!isInsnFrom32Def(DefInsn))
114 return false;
115
116 return true;
117}
118
119bool BPFMIPeephole::isPhiFrom32Def(MachineInstr *PhiMI)
120{
121 for (unsigned i = 1, e = PhiMI->getNumOperands(); i < e; i += 2) {
122 MachineOperand &opnd = PhiMI->getOperand(i);
123
124 if (!opnd.isReg())
125 return false;
126
127 MachineInstr *PhiDef = MRI->getVRegDef(Reg: opnd.getReg());
128 if (!PhiDef)
129 return false;
130 if (PhiDef->isPHI()) {
131 if (!PhiInsns.insert(x: PhiDef).second)
132 return false;
133 if (!isPhiFrom32Def(PhiMI: PhiDef))
134 return false;
135 }
136 if (PhiDef->getOpcode() == BPF::COPY && !isCopyFrom32Def(CopyMI: PhiDef))
137 return false;
138 }
139
140 return true;
141}
142
143// The \p DefInsn instruction defines a virtual register.
144bool BPFMIPeephole::isInsnFrom32Def(MachineInstr *DefInsn)
145{
146 if (!DefInsn)
147 return false;
148
149 if (DefInsn->isPHI()) {
150 if (!PhiInsns.insert(x: DefInsn).second)
151 return false;
152 if (!isPhiFrom32Def(PhiMI: DefInsn))
153 return false;
154 } else if (DefInsn->getOpcode() == BPF::COPY) {
155 if (!isCopyFrom32Def(CopyMI: DefInsn))
156 return false;
157 }
158
159 return true;
160}
161
162bool BPFMIPeephole::isMovFrom32Def(MachineInstr *MovMI)
163{
164 MachineInstr *DefInsn = MRI->getVRegDef(Reg: MovMI->getOperand(i: 1).getReg());
165
166 LLVM_DEBUG(dbgs() << " Def of Mov Src:");
167 LLVM_DEBUG(DefInsn->dump());
168
169 PhiInsns.clear();
170 if (!isInsnFrom32Def(DefInsn))
171 return false;
172
173 LLVM_DEBUG(dbgs() << " One ZExt elim sequence identified.\n");
174
175 return true;
176}
177
178bool BPFMIPeephole::eliminateZExtSeq() {
179 MachineInstr* ToErase = nullptr;
180 bool Eliminated = false;
181
182 for (MachineBasicBlock &MBB : *MF) {
183 for (MachineInstr &MI : MBB) {
184 // If the previous instruction was marked for elimination, remove it now.
185 if (ToErase) {
186 ToErase->eraseFromParent();
187 ToErase = nullptr;
188 }
189
190 // Eliminate the 32-bit to 64-bit zero extension sequence when possible.
191 //
192 // MOV_32_64 rB, wA
193 // SLL_ri rB, rB, 32
194 // SRL_ri rB, rB, 32
195 if (MI.getOpcode() == BPF::SRL_ri &&
196 MI.getOperand(i: 2).getImm() == 32) {
197 Register DstReg = MI.getOperand(i: 0).getReg();
198 Register ShfReg = MI.getOperand(i: 1).getReg();
199 MachineInstr *SllMI = MRI->getVRegDef(Reg: ShfReg);
200
201 LLVM_DEBUG(dbgs() << "Starting SRL found:");
202 LLVM_DEBUG(MI.dump());
203
204 if (!SllMI ||
205 SllMI->isPHI() ||
206 SllMI->getOpcode() != BPF::SLL_ri ||
207 SllMI->getOperand(i: 2).getImm() != 32)
208 continue;
209
210 LLVM_DEBUG(dbgs() << " SLL found:");
211 LLVM_DEBUG(SllMI->dump());
212
213 MachineInstr *MovMI = MRI->getVRegDef(Reg: SllMI->getOperand(i: 1).getReg());
214 if (!MovMI ||
215 MovMI->isPHI() ||
216 MovMI->getOpcode() != BPF::MOV_32_64)
217 continue;
218
219 LLVM_DEBUG(dbgs() << " Type cast Mov found:");
220 LLVM_DEBUG(MovMI->dump());
221
222 Register SubReg = MovMI->getOperand(i: 1).getReg();
223 if (!isMovFrom32Def(MovMI)) {
224 LLVM_DEBUG(dbgs()
225 << " One ZExt elim sequence failed qualifying elim.\n");
226 continue;
227 }
228
229 BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: BPF::SUBREG_TO_REG), DestReg: DstReg)
230 .addReg(RegNo: SubReg)
231 .addImm(Val: BPF::sub_32);
232
233 SllMI->eraseFromParent();
234 MovMI->eraseFromParent();
235 // MI is the right shift, we can't erase it in it's own iteration.
236 // Mark it to ToErase, and erase in the next iteration.
237 ToErase = &MI;
238 ZExtElemNum++;
239 Eliminated = true;
240 }
241 }
242 }
243
244 return Eliminated;
245}
246
247bool BPFMIPeephole::eliminateZExt() {
248 MachineInstr* ToErase = nullptr;
249 bool Eliminated = false;
250
251 for (MachineBasicBlock &MBB : *MF) {
252 for (MachineInstr &MI : MBB) {
253 // If the previous instruction was marked for elimination, remove it now.
254 if (ToErase) {
255 ToErase->eraseFromParent();
256 ToErase = nullptr;
257 }
258
259 if (MI.getOpcode() != BPF::MOV_32_64)
260 continue;
261
262 // Eliminate MOV_32_64 if possible.
263 // MOV_32_64 rA, wB
264 //
265 // If wB has been zero extended, replace it with a SUBREG_TO_REG.
266 // This is to workaround BPF programs where pkt->{data, data_end}
267 // is encoded as u32, but actually the verifier populates them
268 // as 64bit pointer. The MOV_32_64 will zero out the top 32 bits.
269 LLVM_DEBUG(dbgs() << "Candidate MOV_32_64 instruction:");
270 LLVM_DEBUG(MI.dump());
271
272 if (!isMovFrom32Def(MovMI: &MI))
273 continue;
274
275 LLVM_DEBUG(dbgs() << "Removing the MOV_32_64 instruction\n");
276
277 Register dst = MI.getOperand(i: 0).getReg();
278 Register src = MI.getOperand(i: 1).getReg();
279
280 // Build a SUBREG_TO_REG instruction.
281 BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: BPF::SUBREG_TO_REG), DestReg: dst)
282 .addReg(RegNo: src)
283 .addImm(Val: BPF::sub_32);
284
285 ToErase = &MI;
286 Eliminated = true;
287 }
288 }
289
290 return Eliminated;
291}
292
293} // end default namespace
294
295INITIALIZE_PASS(BPFMIPeephole, DEBUG_TYPE,
296 "BPF MachineSSA Peephole Optimization For ZEXT Eliminate",
297 false, false)
298
299char BPFMIPeephole::ID = 0;
300FunctionPass* llvm::createBPFMIPeepholePass() { return new BPFMIPeephole(); }
301
302STATISTIC(RedundantMovElemNum, "Number of redundant moves eliminated");
303
304namespace {
305
306struct BPFMIPreEmitPeephole : public MachineFunctionPass {
307
308 static char ID;
309 MachineFunction *MF;
310 const TargetRegisterInfo *TRI;
311 const BPFInstrInfo *TII;
312 bool SupportGotol;
313
314 BPFMIPreEmitPeephole() : MachineFunctionPass(ID) {}
315
316private:
317 // Initialize class variables.
318 void initialize(MachineFunction &MFParm);
319
320 bool in16BitRange(int Num);
321 bool eliminateRedundantMov();
322 bool adjustBranch();
323 bool insertMissingCallerSavedSpills();
324 bool removeMayGotoZero();
325 bool addExitAfterUnreachable();
326
327public:
328
329 // Main entry point for this pass.
330 bool runOnMachineFunction(MachineFunction &MF) override {
331 if (skipFunction(F: MF.getFunction()))
332 return false;
333
334 initialize(MFParm&: MF);
335
336 bool Changed;
337 Changed = eliminateRedundantMov();
338 if (SupportGotol)
339 Changed = adjustBranch() || Changed;
340 Changed |= insertMissingCallerSavedSpills();
341 Changed |= removeMayGotoZero();
342 Changed |= addExitAfterUnreachable();
343 return Changed;
344 }
345};
346
347// Initialize class variables.
348void BPFMIPreEmitPeephole::initialize(MachineFunction &MFParm) {
349 MF = &MFParm;
350 TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
351 TRI = MF->getSubtarget<BPFSubtarget>().getRegisterInfo();
352 SupportGotol = MF->getSubtarget<BPFSubtarget>().hasGotol();
353 LLVM_DEBUG(dbgs() << "*** BPF PreEmit peephole pass ***\n\n");
354}
355
356bool BPFMIPreEmitPeephole::eliminateRedundantMov() {
357 MachineInstr* ToErase = nullptr;
358 bool Eliminated = false;
359
360 for (MachineBasicBlock &MBB : *MF) {
361 for (MachineInstr &MI : MBB) {
362 // If the previous instruction was marked for elimination, remove it now.
363 if (ToErase) {
364 LLVM_DEBUG(dbgs() << " Redundant Mov Eliminated:");
365 LLVM_DEBUG(ToErase->dump());
366 ToErase->eraseFromParent();
367 ToErase = nullptr;
368 }
369
370 // Eliminate identical move:
371 //
372 // MOV rA, rA
373 //
374 // Note that we cannot remove
375 // MOV_32_64 rA, wA
376 // MOV_rr_32 wA, wA
377 // as these two instructions having side effects, zeroing out
378 // top 32 bits of rA.
379 unsigned Opcode = MI.getOpcode();
380 if (Opcode == BPF::MOV_rr) {
381 Register dst = MI.getOperand(i: 0).getReg();
382 Register src = MI.getOperand(i: 1).getReg();
383
384 if (dst != src)
385 continue;
386
387 ToErase = &MI;
388 RedundantMovElemNum++;
389 Eliminated = true;
390 }
391 }
392 }
393
394 return Eliminated;
395}
396
397bool BPFMIPreEmitPeephole::in16BitRange(int Num) {
398 // Well, the cut-off is not precisely at 16bit range since
399 // new codes are added during the transformation. So let us
400 // a little bit conservative.
401 return Num >= -GotolAbsLowBound && Num <= GotolAbsLowBound;
402}
403
404// Before cpu=v4, only 16bit branch target offset (-0x8000 to 0x7fff)
405// is supported for both unconditional (JMP) and condition (JEQ, JSGT,
406// etc.) branches. In certain cases, e.g., full unrolling, the branch
407// target offset might exceed 16bit range. If this happens, the llvm
408// will generate incorrect code as the offset is truncated to 16bit.
409//
410// To fix this rare case, a new insn JMPL is introduced. This new
411// insn supports supports 32bit branch target offset. The compiler
412// does not use this insn during insn selection. Rather, BPF backend
413// will estimate the branch target offset and do JMP -> JMPL and
414// JEQ -> JEQ + JMPL conversion if the estimated branch target offset
415// is beyond 16bit.
416bool BPFMIPreEmitPeephole::adjustBranch() {
417 bool Changed = false;
418 int CurrNumInsns = 0;
419 DenseMap<MachineBasicBlock *, int> SoFarNumInsns;
420 DenseMap<MachineBasicBlock *, MachineBasicBlock *> FollowThroughBB;
421 std::vector<MachineBasicBlock *> MBBs;
422
423 MachineBasicBlock *PrevBB = nullptr;
424 for (MachineBasicBlock &MBB : *MF) {
425 // MBB.size() is the number of insns in this basic block, including some
426 // debug info, e.g., DEBUG_VALUE, so we may over-count a little bit.
427 // Typically we have way more normal insns than DEBUG_VALUE insns.
428 // Also, if we indeed need to convert conditional branch like JEQ to
429 // JEQ + JMPL, we actually introduced some new insns like below.
430 CurrNumInsns += (int)MBB.size();
431 SoFarNumInsns[&MBB] = CurrNumInsns;
432 if (PrevBB != nullptr)
433 FollowThroughBB[PrevBB] = &MBB;
434 PrevBB = &MBB;
435 // A list of original BBs to make later traveral easier.
436 MBBs.push_back(x: &MBB);
437 }
438 FollowThroughBB[PrevBB] = nullptr;
439
440 for (unsigned i = 0; i < MBBs.size(); i++) {
441 // We have four cases here:
442 // (1). no terminator, simple follow through.
443 // (2). jmp to another bb.
444 // (3). conditional jmp to another bb or follow through.
445 // (4). conditional jmp followed by an unconditional jmp.
446 MachineInstr *CondJmp = nullptr, *UncondJmp = nullptr;
447
448 MachineBasicBlock *MBB = MBBs[i];
449 for (MachineInstr &Term : MBB->terminators()) {
450 if (Term.isConditionalBranch()) {
451 assert(CondJmp == nullptr);
452 CondJmp = &Term;
453 } else if (Term.isUnconditionalBranch()) {
454 assert(UncondJmp == nullptr);
455 UncondJmp = &Term;
456 }
457 }
458
459 // (1). no terminator, simple follow through.
460 if (!CondJmp && !UncondJmp)
461 continue;
462
463 MachineBasicBlock *CondTargetBB, *JmpBB;
464 CurrNumInsns = SoFarNumInsns[MBB];
465
466 // (2). jmp to another bb.
467 if (!CondJmp && UncondJmp) {
468 JmpBB = UncondJmp->getOperand(i: 0).getMBB();
469 if (in16BitRange(Num: SoFarNumInsns[JmpBB] - JmpBB->size() - CurrNumInsns))
470 continue;
471
472 // replace this insn as a JMPL.
473 BuildMI(BB: MBB, MIMD: UncondJmp->getDebugLoc(), MCID: TII->get(Opcode: BPF::JMPL)).addMBB(MBB: JmpBB);
474 UncondJmp->eraseFromParent();
475 Changed = true;
476 continue;
477 }
478
479 const BasicBlock *TermBB = MBB->getBasicBlock();
480 int Dist;
481
482 // (3). conditional jmp to another bb or follow through.
483 if (!UncondJmp) {
484 CondTargetBB = CondJmp->getOperand(i: 2).getMBB();
485 MachineBasicBlock *FollowBB = FollowThroughBB[MBB];
486 Dist = SoFarNumInsns[CondTargetBB] - CondTargetBB->size() - CurrNumInsns;
487 if (in16BitRange(Num: Dist))
488 continue;
489
490 // We have
491 // B2: ...
492 // if (cond) goto B5
493 // B3: ...
494 // where B2 -> B5 is beyond 16bit range.
495 //
496 // We do not have 32bit cond jmp insn. So we try to do
497 // the following.
498 // B2: ...
499 // if (cond) goto New_B1
500 // New_B0 goto B3
501 // New_B1: gotol B5
502 // B3: ...
503 // Basically two new basic blocks are created.
504 MachineBasicBlock *New_B0 = MF->CreateMachineBasicBlock(BB: TermBB);
505 MachineBasicBlock *New_B1 = MF->CreateMachineBasicBlock(BB: TermBB);
506
507 // Insert New_B0 and New_B1 into function block list.
508 MachineFunction::iterator MBB_I = ++MBB->getIterator();
509 MF->insert(MBBI: MBB_I, MBB: New_B0);
510 MF->insert(MBBI: MBB_I, MBB: New_B1);
511
512 // replace B2 cond jump
513 if (CondJmp->getOperand(i: 1).isReg())
514 BuildMI(BB&: *MBB, I: MachineBasicBlock::iterator(*CondJmp), MIMD: CondJmp->getDebugLoc(), MCID: TII->get(Opcode: CondJmp->getOpcode()))
515 .addReg(RegNo: CondJmp->getOperand(i: 0).getReg())
516 .addReg(RegNo: CondJmp->getOperand(i: 1).getReg())
517 .addMBB(MBB: New_B1);
518 else
519 BuildMI(BB&: *MBB, I: MachineBasicBlock::iterator(*CondJmp), MIMD: CondJmp->getDebugLoc(), MCID: TII->get(Opcode: CondJmp->getOpcode()))
520 .addReg(RegNo: CondJmp->getOperand(i: 0).getReg())
521 .addImm(Val: CondJmp->getOperand(i: 1).getImm())
522 .addMBB(MBB: New_B1);
523
524 // it is possible that CondTargetBB and FollowBB are the same. But the
525 // above Dist checking should already filtered this case.
526 MBB->removeSuccessor(Succ: CondTargetBB);
527 MBB->removeSuccessor(Succ: FollowBB);
528 MBB->addSuccessor(Succ: New_B0);
529 MBB->addSuccessor(Succ: New_B1);
530
531 // Populate insns in New_B0 and New_B1.
532 BuildMI(BB: New_B0, MIMD: CondJmp->getDebugLoc(), MCID: TII->get(Opcode: BPF::JMP)).addMBB(MBB: FollowBB);
533 BuildMI(BB: New_B1, MIMD: CondJmp->getDebugLoc(), MCID: TII->get(Opcode: BPF::JMPL))
534 .addMBB(MBB: CondTargetBB);
535
536 New_B0->addSuccessor(Succ: FollowBB);
537 New_B1->addSuccessor(Succ: CondTargetBB);
538 CondJmp->eraseFromParent();
539 Changed = true;
540 continue;
541 }
542
543 // (4). conditional jmp followed by an unconditional jmp.
544 CondTargetBB = CondJmp->getOperand(i: 2).getMBB();
545 JmpBB = UncondJmp->getOperand(i: 0).getMBB();
546
547 // We have
548 // B2: ...
549 // if (cond) goto B5
550 // JMP B7
551 // B3: ...
552 //
553 // If only B2->B5 is out of 16bit range, we can do
554 // B2: ...
555 // if (cond) goto new_B
556 // JMP B7
557 // New_B: gotol B5
558 // B3: ...
559 //
560 // If only 'JMP B7' is out of 16bit range, we can replace
561 // 'JMP B7' with 'JMPL B7'.
562 //
563 // If both B2->B5 and 'JMP B7' is out of range, just do
564 // both the above transformations.
565 Dist = SoFarNumInsns[CondTargetBB] - CondTargetBB->size() - CurrNumInsns;
566 if (!in16BitRange(Num: Dist)) {
567 MachineBasicBlock *New_B = MF->CreateMachineBasicBlock(BB: TermBB);
568
569 // Insert New_B0 into function block list.
570 MF->insert(MBBI: ++MBB->getIterator(), MBB: New_B);
571
572 // replace B2 cond jump
573 if (CondJmp->getOperand(i: 1).isReg())
574 BuildMI(BB&: *MBB, I: MachineBasicBlock::iterator(*CondJmp), MIMD: CondJmp->getDebugLoc(), MCID: TII->get(Opcode: CondJmp->getOpcode()))
575 .addReg(RegNo: CondJmp->getOperand(i: 0).getReg())
576 .addReg(RegNo: CondJmp->getOperand(i: 1).getReg())
577 .addMBB(MBB: New_B);
578 else
579 BuildMI(BB&: *MBB, I: MachineBasicBlock::iterator(*CondJmp), MIMD: CondJmp->getDebugLoc(), MCID: TII->get(Opcode: CondJmp->getOpcode()))
580 .addReg(RegNo: CondJmp->getOperand(i: 0).getReg())
581 .addImm(Val: CondJmp->getOperand(i: 1).getImm())
582 .addMBB(MBB: New_B);
583
584 if (CondTargetBB != JmpBB)
585 MBB->removeSuccessor(Succ: CondTargetBB);
586 MBB->addSuccessor(Succ: New_B);
587
588 // Populate insn in New_B.
589 BuildMI(BB: New_B, MIMD: CondJmp->getDebugLoc(), MCID: TII->get(Opcode: BPF::JMPL)).addMBB(MBB: CondTargetBB);
590
591 New_B->addSuccessor(Succ: CondTargetBB);
592 CondJmp->eraseFromParent();
593 Changed = true;
594 }
595
596 if (!in16BitRange(Num: SoFarNumInsns[JmpBB] - CurrNumInsns)) {
597 BuildMI(BB: MBB, MIMD: UncondJmp->getDebugLoc(), MCID: TII->get(Opcode: BPF::JMPL)).addMBB(MBB: JmpBB);
598 UncondJmp->eraseFromParent();
599 Changed = true;
600 }
601 }
602
603 return Changed;
604}
605
606static const unsigned CallerSavedRegs[] = {BPF::R0, BPF::R1, BPF::R2,
607 BPF::R3, BPF::R4, BPF::R5};
608
609struct BPFFastCall {
610 MachineInstr *MI;
611 unsigned LiveCallerSavedRegs;
612};
613
614static void collectBPFFastCalls(const TargetRegisterInfo *TRI,
615 LivePhysRegs &LiveRegs, MachineBasicBlock &BB,
616 SmallVectorImpl<BPFFastCall> &Calls) {
617 LiveRegs.init(TRI: *TRI);
618 LiveRegs.addLiveOuts(MBB: BB);
619 Calls.clear();
620 for (MachineInstr &MI : llvm::reverse(C&: BB)) {
621 if (MI.isCall()) {
622 unsigned LiveCallerSavedRegs = 0;
623 for (MCRegister R : CallerSavedRegs) {
624 bool DoSpillFill = false;
625 for (MCPhysReg SR : TRI->subregs(Reg: R))
626 DoSpillFill |= !MI.definesRegister(Reg: SR, TRI) && LiveRegs.contains(Reg: SR);
627 if (!DoSpillFill)
628 continue;
629 LiveCallerSavedRegs |= 1 << R;
630 }
631 if (LiveCallerSavedRegs)
632 Calls.push_back(Elt: {.MI: &MI, .LiveCallerSavedRegs: LiveCallerSavedRegs});
633 }
634 LiveRegs.stepBackward(MI);
635 }
636}
637
638static int64_t computeMinFixedObjOffset(MachineFrameInfo &MFI,
639 unsigned SlotSize) {
640 int64_t MinFixedObjOffset = 0;
641 // Same logic as in X86FrameLowering::adjustFrameForMsvcCxxEh()
642 for (int I = MFI.getObjectIndexBegin(); I < MFI.getObjectIndexEnd(); ++I) {
643 if (MFI.isDeadObjectIndex(ObjectIdx: I))
644 continue;
645 MinFixedObjOffset = std::min(a: MinFixedObjOffset, b: MFI.getObjectOffset(ObjectIdx: I));
646 }
647 MinFixedObjOffset -=
648 (SlotSize + MinFixedObjOffset % SlotSize) & (SlotSize - 1);
649 return MinFixedObjOffset;
650}
651
652bool BPFMIPreEmitPeephole::insertMissingCallerSavedSpills() {
653 MachineFrameInfo &MFI = MF->getFrameInfo();
654 SmallVector<BPFFastCall, 8> Calls;
655 LivePhysRegs LiveRegs;
656 const unsigned SlotSize = 8;
657 int64_t MinFixedObjOffset = computeMinFixedObjOffset(MFI, SlotSize);
658 bool Changed = false;
659 for (MachineBasicBlock &BB : *MF) {
660 collectBPFFastCalls(TRI, LiveRegs, BB, Calls);
661 Changed |= !Calls.empty();
662 for (BPFFastCall &Call : Calls) {
663 int64_t CurOffset = MinFixedObjOffset;
664 for (MCRegister Reg : CallerSavedRegs) {
665 if (((1 << Reg) & Call.LiveCallerSavedRegs) == 0)
666 continue;
667 // Allocate stack object
668 CurOffset -= SlotSize;
669 MFI.CreateFixedSpillStackObject(Size: SlotSize, SPOffset: CurOffset);
670 // Generate spill
671 BuildMI(BB, I: Call.MI->getIterator(), MIMD: Call.MI->getDebugLoc(),
672 MCID: TII->get(Opcode: BPF::STD))
673 .addReg(RegNo: Reg, Flags: RegState::Kill)
674 .addReg(RegNo: BPF::R10)
675 .addImm(Val: CurOffset);
676 // Generate fill
677 BuildMI(BB, I: ++Call.MI->getIterator(), MIMD: Call.MI->getDebugLoc(),
678 MCID: TII->get(Opcode: BPF::LDD))
679 .addReg(RegNo: Reg, Flags: RegState::Define)
680 .addReg(RegNo: BPF::R10)
681 .addImm(Val: CurOffset);
682 }
683 }
684 }
685 return Changed;
686}
687
688bool BPFMIPreEmitPeephole::removeMayGotoZero() {
689 bool Changed = false;
690 MachineBasicBlock *Prev_MBB, *Curr_MBB = nullptr;
691
692 for (MachineBasicBlock &MBB : make_early_inc_range(Range: reverse(C&: *MF))) {
693 Prev_MBB = Curr_MBB;
694 Curr_MBB = &MBB;
695 if (Prev_MBB == nullptr || Curr_MBB->empty())
696 continue;
697
698 MachineInstr &MI = Curr_MBB->back();
699 if (MI.getOpcode() != TargetOpcode::INLINEASM_BR)
700 continue;
701
702 const char *AsmStr = MI.getOperand(i: 0).getSymbolName();
703 SmallVector<StringRef, 4> AsmPieces;
704 SplitString(Source: AsmStr, OutFragments&: AsmPieces, Delimiters: ";\n");
705
706 // Do not support multiple insns in one inline asm.
707 if (AsmPieces.size() != 1)
708 continue;
709
710 // The asm insn must be a may_goto insn.
711 SmallVector<StringRef, 4> AsmOpPieces;
712 SplitString(Source: AsmPieces[0], OutFragments&: AsmOpPieces, Delimiters: " ");
713 if (AsmOpPieces.size() != 2 || AsmOpPieces[0] != "may_goto")
714 continue;
715 // Enforce the format of 'may_goto <label>'.
716 if (AsmOpPieces[1] != "${0:l}" && AsmOpPieces[1] != "$0")
717 continue;
718
719 // Get the may_goto branch target.
720 MachineOperand &MO = MI.getOperand(i: InlineAsm::MIOp_FirstOperand + 1);
721 if (!MO.isMBB() || MO.getMBB() != Prev_MBB)
722 continue;
723
724 Changed = true;
725 if (Curr_MBB->begin() == MI) {
726 // Single 'may_goto' insn in the same basic block.
727 Curr_MBB->removeSuccessor(Succ: Prev_MBB);
728 for (MachineBasicBlock *Pred : Curr_MBB->predecessors())
729 Pred->replaceSuccessor(Old: Curr_MBB, New: Prev_MBB);
730 Curr_MBB->eraseFromParent();
731 Curr_MBB = Prev_MBB;
732 } else {
733 // Remove 'may_goto' insn.
734 MI.eraseFromParent();
735 }
736 }
737
738 return Changed;
739}
740
741// If the last insn in a funciton is 'JAL &bpf_unreachable', let us add an
742// 'exit' insn after that insn. This will ensure no fallthrough at the last
743// insn, making kernel verification easier.
744bool BPFMIPreEmitPeephole::addExitAfterUnreachable() {
745 MachineBasicBlock &MBB = MF->back();
746 MachineInstr &MI = MBB.back();
747 if (MI.getOpcode() != BPF::JAL || !MI.getOperand(i: 0).isGlobal() ||
748 MI.getOperand(i: 0).getGlobal()->getName() != BPF_TRAP)
749 return false;
750
751 BuildMI(BB: &MBB, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: BPF::RET));
752 return true;
753}
754
755} // end default namespace
756
757INITIALIZE_PASS(BPFMIPreEmitPeephole, "bpf-mi-pemit-peephole",
758 "BPF PreEmit Peephole Optimization", false, false)
759
760char BPFMIPreEmitPeephole::ID = 0;
761FunctionPass* llvm::createBPFMIPreEmitPeepholePass()
762{
763 return new BPFMIPreEmitPeephole();
764}
765