1//===----- BPFMISimplifyPatchable.cpp - MI Simplify Patchable Insts -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass targets a subset of instructions like below
10// ld_imm64 r1, @global
11// ldd r2, r1, 0
12// add r3, struct_base_reg, r2
13//
14// Here @global should represent an AMA (abstruct member access).
15// Such an access is subject to bpf load time patching. After this pass, the
16// code becomes
17// ld_imm64 r1, @global
18// add r3, struct_base_reg, r1
19//
20// Eventually, at BTF output stage, a relocation record will be generated
21// for ld_imm64 which should be replaced later by bpf loader:
22// r1 = <calculated field_info>
23// add r3, struct_base_reg, r1
24//
25// This pass also removes the intermediate load generated in IR pass for
26// __builtin_btf_type_id() intrinsic.
27//
28//===----------------------------------------------------------------------===//
29
30#include "BPF.h"
31#include "BPFCORE.h"
32#include "BPFInstrInfo.h"
33#include "BPFTargetMachine.h"
34#include "llvm/CodeGen/MachineFunctionPass.h"
35#include "llvm/CodeGen/MachineInstrBuilder.h"
36#include "llvm/CodeGen/MachineRegisterInfo.h"
37#include "llvm/IR/GlobalVariable.h"
38#include "llvm/Support/Debug.h"
39#include <set>
40
41using namespace llvm;
42
43#define DEBUG_TYPE "bpf-mi-simplify-patchable"
44
45static cl::opt<bool>
46 DisableCOREOptimization("disable-bpf-core-optimization", cl::Hidden,
47 cl::desc("Disable CORE relocation optimization"));
48
49namespace {
50
51struct BPFMISimplifyPatchable : public MachineFunctionPass {
52
53 static char ID;
54 const BPFInstrInfo *TII;
55 MachineFunction *MF;
56
57 BPFMISimplifyPatchable() : MachineFunctionPass(ID) {}
58
59private:
60 std::set<MachineInstr *> SkipInsts;
61
62 // Initialize class variables.
63 void initialize(MachineFunction &MFParm);
64
65 bool isLoadInst(unsigned Opcode);
66 bool removeLD();
67 void processCandidate(MachineRegisterInfo *MRI, MachineBasicBlock &MBB,
68 MachineInstr &MI, Register &SrcReg, Register &DstReg,
69 const GlobalValue *GVal, bool IsAma);
70 void processDstReg(MachineRegisterInfo *MRI, Register &DstReg,
71 Register &SrcReg, const GlobalValue *GVal,
72 bool doSrcRegProp, bool IsAma);
73 void processInst(MachineRegisterInfo *MRI, MachineInstr *Inst,
74 MachineOperand *RelocOp, const GlobalValue *GVal);
75 void checkADDrr(MachineRegisterInfo *MRI, MachineOperand *RelocOp,
76 const GlobalValue *GVal);
77 void checkShift(MachineRegisterInfo *MRI, MachineBasicBlock &MBB,
78 MachineOperand *RelocOp, const GlobalValue *GVal,
79 unsigned Opcode);
80
81public:
82 // Main entry point for this pass.
83 bool runOnMachineFunction(MachineFunction &MF) override {
84 if (skipFunction(F: MF.getFunction()))
85 return false;
86
87 initialize(MFParm&: MF);
88 return removeLD();
89 }
90};
91
92// Initialize class variables.
93void BPFMISimplifyPatchable::initialize(MachineFunction &MFParm) {
94 MF = &MFParm;
95 TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
96 LLVM_DEBUG(dbgs() << "*** BPF simplify patchable insts pass ***\n\n");
97}
98
99static bool isStoreImm(unsigned Opcode) {
100 return Opcode == BPF::STB_imm || Opcode == BPF::STH_imm ||
101 Opcode == BPF::STW_imm || Opcode == BPF::STD_imm;
102}
103
104static bool isStore32(unsigned Opcode) {
105 return Opcode == BPF::STB32 || Opcode == BPF::STH32 || Opcode == BPF::STW32 ||
106 Opcode == BPF::STBREL32 || Opcode == BPF::STHREL32 ||
107 Opcode == BPF::STWREL32;
108}
109
110static bool isStore64(unsigned Opcode) {
111 return Opcode == BPF::STB || Opcode == BPF::STH || Opcode == BPF::STW ||
112 Opcode == BPF::STD || Opcode == BPF::STDREL;
113}
114
115static bool isLoad32(unsigned Opcode) {
116 return Opcode == BPF::LDB32 || Opcode == BPF::LDH32 || Opcode == BPF::LDW32 ||
117 Opcode == BPF::LDBACQ32 || Opcode == BPF::LDHACQ32 ||
118 Opcode == BPF::LDWACQ32;
119}
120
121static bool isLoad64(unsigned Opcode) {
122 return Opcode == BPF::LDB || Opcode == BPF::LDH || Opcode == BPF::LDW ||
123 Opcode == BPF::LDD || Opcode == BPF::LDDACQ;
124}
125
126static bool isLoadSext(unsigned Opcode) {
127 return Opcode == BPF::LDBSX || Opcode == BPF::LDHSX || Opcode == BPF::LDWSX;
128}
129
130bool BPFMISimplifyPatchable::isLoadInst(unsigned Opcode) {
131 return isLoad32(Opcode) || isLoad64(Opcode) || isLoadSext(Opcode);
132}
133
134void BPFMISimplifyPatchable::checkADDrr(MachineRegisterInfo *MRI,
135 MachineOperand *RelocOp, const GlobalValue *GVal) {
136 const MachineInstr *Inst = RelocOp->getParent();
137 const MachineOperand *Op1 = &Inst->getOperand(i: 1);
138 const MachineOperand *Op2 = &Inst->getOperand(i: 2);
139 const MachineOperand *BaseOp = (RelocOp == Op1) ? Op2 : Op1;
140
141 // Go through all uses of %1 as in %1 = ADD_rr %2, %3
142 const MachineOperand Op0 = Inst->getOperand(i: 0);
143 for (MachineOperand &MO :
144 llvm::make_early_inc_range(Range: MRI->use_operands(Reg: Op0.getReg()))) {
145 // The candidate needs to have a unique definition.
146 if (!MRI->getUniqueVRegDef(Reg: MO.getReg()))
147 continue;
148
149 MachineInstr *DefInst = MO.getParent();
150 unsigned Opcode = DefInst->getOpcode();
151 unsigned COREOp;
152 if (isLoad64(Opcode) || isLoadSext(Opcode))
153 COREOp = BPF::CORE_LD64;
154 else if (isLoad32(Opcode))
155 COREOp = BPF::CORE_LD32;
156 else if (isStore64(Opcode) || isStore32(Opcode) || isStoreImm(Opcode))
157 COREOp = BPF::CORE_ST;
158 else
159 continue;
160
161 // It must be a form of %2 = *(type *)(%1 + 0) or *(type *)(%1 + 0) = %2.
162 const MachineOperand &ImmOp = DefInst->getOperand(i: 2);
163 if (!ImmOp.isImm() || ImmOp.getImm() != 0)
164 continue;
165
166 // Reject the form:
167 // %1 = ADD_rr %2, %3
168 // *(type *)(%2 + 0) = %1
169 if (isStore64(Opcode) || isStore32(Opcode)) {
170 const MachineOperand &Opnd = DefInst->getOperand(i: 0);
171 if (Opnd.isReg() && Opnd.getReg() == MO.getReg())
172 continue;
173 }
174
175 BuildMI(BB&: *DefInst->getParent(), I&: *DefInst, MIMD: DefInst->getDebugLoc(), MCID: TII->get(Opcode: COREOp))
176 .add(MO: DefInst->getOperand(i: 0)).addImm(Val: Opcode).add(MO: *BaseOp)
177 .addGlobalAddress(GV: GVal);
178 DefInst->eraseFromParent();
179 }
180}
181
182void BPFMISimplifyPatchable::checkShift(MachineRegisterInfo *MRI,
183 MachineBasicBlock &MBB, MachineOperand *RelocOp, const GlobalValue *GVal,
184 unsigned Opcode) {
185 // Relocation operand should be the operand #2.
186 MachineInstr *Inst = RelocOp->getParent();
187 if (RelocOp != &Inst->getOperand(i: 2))
188 return;
189
190 BuildMI(BB&: MBB, I&: *Inst, MIMD: Inst->getDebugLoc(), MCID: TII->get(Opcode: BPF::CORE_SHIFT))
191 .add(MO: Inst->getOperand(i: 0)).addImm(Val: Opcode)
192 .add(MO: Inst->getOperand(i: 1)).addGlobalAddress(GV: GVal);
193 Inst->eraseFromParent();
194}
195
196void BPFMISimplifyPatchable::processCandidate(MachineRegisterInfo *MRI,
197 MachineBasicBlock &MBB, MachineInstr &MI, Register &SrcReg,
198 Register &DstReg, const GlobalValue *GVal, bool IsAma) {
199 if (MRI->getRegClass(Reg: DstReg) == &BPF::GPR32RegClass) {
200 if (IsAma) {
201 // We can optimize such a pattern:
202 // %1:gpr = LD_imm64 @"llvm.s:0:4$0:2"
203 // %2:gpr32 = LDW32 %1:gpr, 0
204 // %3:gpr = SUBREG_TO_REG %2:gpr32, %subreg.sub_32
205 // %4:gpr = ADD_rr %0:gpr, %3:gpr
206 // or similar patterns below for non-alu32 case.
207 auto Begin = MRI->use_begin(RegNo: DstReg), End = MRI->use_end();
208 decltype(End) NextI;
209 for (auto I = Begin; I != End; I = NextI) {
210 NextI = std::next(x: I);
211 if (!MRI->getUniqueVRegDef(Reg: I->getReg()))
212 continue;
213
214 unsigned Opcode = I->getParent()->getOpcode();
215 if (Opcode == BPF::SUBREG_TO_REG) {
216 Register TmpReg = I->getParent()->getOperand(i: 0).getReg();
217 processDstReg(MRI, DstReg&: TmpReg, SrcReg&: DstReg, GVal, doSrcRegProp: false, IsAma);
218 }
219 }
220 }
221
222 BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: BPF::COPY), DestReg: DstReg)
223 .addReg(RegNo: SrcReg, Flags: {}, SubReg: BPF::sub_32);
224 return;
225 }
226
227 // All uses of DstReg replaced by SrcReg
228 processDstReg(MRI, DstReg, SrcReg, GVal, doSrcRegProp: true, IsAma);
229}
230
231void BPFMISimplifyPatchable::processDstReg(MachineRegisterInfo *MRI,
232 Register &DstReg, Register &SrcReg, const GlobalValue *GVal,
233 bool doSrcRegProp, bool IsAma) {
234 auto Begin = MRI->use_begin(RegNo: DstReg), End = MRI->use_end();
235 decltype(End) NextI;
236 for (auto I = Begin; I != End; I = NextI) {
237 NextI = std::next(x: I);
238 if (doSrcRegProp) {
239 // In situations like below it is not known if usage is a kill
240 // after setReg():
241 //
242 // .-> %2:gpr = LD_imm64 @"llvm.t:0:0$0:0"
243 // |
244 // |`----------------.
245 // | %3:gpr = LDD %2:gpr, 0
246 // | %4:gpr = ADD_rr %0:gpr(tied-def 0), killed %3:gpr <--- (1)
247 // | %5:gpr = LDD killed %4:gpr, 0 ^^^^^^^^^^^^^
248 // | STD killed %5:gpr, %1:gpr, 0 this is I
249 // `----------------.
250 // %6:gpr = LDD %2:gpr, 0
251 // %7:gpr = ADD_rr %0:gpr(tied-def 0), killed %6:gpr <--- (2)
252 // %8:gpr = LDD killed %7:gpr, 0 ^^^^^^^^^^^^^
253 // STD killed %8:gpr, %1:gpr, 0 this is I
254 //
255 // Instructions (1) and (2) would be updated by setReg() to:
256 //
257 // ADD_rr %0:gpr(tied-def 0), %2:gpr
258 //
259 // %2:gpr is not killed at (1), so it is necessary to remove kill flag
260 // from I.
261 I->setReg(SrcReg);
262 I->setIsKill(false);
263 }
264
265 // The candidate needs to have a unique definition.
266 if (IsAma && MRI->getUniqueVRegDef(Reg: I->getReg()))
267 processInst(MRI, Inst: I->getParent(), RelocOp: &*I, GVal);
268 }
269}
270
271// Check to see whether we could do some optimization
272// to attach relocation to downstream dependent instructions.
273// Two kinds of patterns are recognized below:
274// Pattern 1:
275// %1 = LD_imm64 @"llvm.b:0:4$0:1" <== patch_imm = 4
276// %2 = LDD %1, 0 <== this insn will be removed
277// %3 = ADD_rr %0, %2
278// %4 = LDW[32] %3, 0 OR STW[32] %4, %3, 0
279// The `%4 = ...` will be transformed to
280// CORE_[ALU32_]MEM(%4, mem_opcode, %0, @"llvm.b:0:4$0:1")
281// and later on, BTF emit phase will translate to
282// %4 = LDW[32] %0, 4 STW[32] %4, %0, 4
283// and attach a relocation to it.
284// Pattern 2:
285// %15 = LD_imm64 @"llvm.t:5:63$0:2" <== relocation type 5
286// %16 = LDD %15, 0 <== this insn will be removed
287// %17 = SRA_rr %14, %16
288// The `%17 = ...` will be transformed to
289// %17 = CORE_SHIFT(SRA_ri, %14, @"llvm.t:5:63$0:2")
290// and later on, BTF emit phase will translate to
291// %r4 = SRA_ri %r4, 63
292void BPFMISimplifyPatchable::processInst(MachineRegisterInfo *MRI,
293 MachineInstr *Inst, MachineOperand *RelocOp, const GlobalValue *GVal) {
294 unsigned Opcode = Inst->getOpcode();
295 if (isLoadInst(Opcode)) {
296 SkipInsts.insert(x: Inst);
297 return;
298 }
299
300 if (DisableCOREOptimization)
301 return;
302
303 if (Opcode == BPF::ADD_rr) {
304 // If the struct offset is greater than INT16_MAX, skip optimization.
305 StringRef AccessPattern = GVal->getName();
306 size_t FirstDollar = AccessPattern.find_first_of(C: '$');
307 size_t FirstColon = AccessPattern.find_first_of(C: ':');
308 size_t SecondColon = AccessPattern.find_first_of(C: ':', From: FirstColon + 1);
309 StringRef PatchImmStr =
310 AccessPattern.substr(Start: SecondColon + 1, N: FirstDollar - SecondColon);
311 int PatchImm = std::stoll(str: std::string(PatchImmStr));
312 if (PatchImm <= INT16_MAX)
313 checkADDrr(MRI, RelocOp, GVal);
314 return;
315 }
316
317 if (Opcode == BPF::SLL_rr)
318 checkShift(MRI, MBB&: *Inst->getParent(), RelocOp, GVal, Opcode: BPF::SLL_ri);
319 else if (Opcode == BPF::SRA_rr)
320 checkShift(MRI, MBB&: *Inst->getParent(), RelocOp, GVal, Opcode: BPF::SRA_ri);
321 else if (Opcode == BPF::SRL_rr)
322 checkShift(MRI, MBB&: *Inst->getParent(), RelocOp, GVal, Opcode: BPF::SRL_ri);
323}
324
325/// Remove unneeded Load instructions.
326bool BPFMISimplifyPatchable::removeLD() {
327 MachineRegisterInfo *MRI = &MF->getRegInfo();
328 MachineInstr *ToErase = nullptr;
329 bool Changed = false;
330
331 for (MachineBasicBlock &MBB : *MF) {
332 for (MachineInstr &MI : MBB) {
333 if (ToErase) {
334 ToErase->eraseFromParent();
335 ToErase = nullptr;
336 }
337
338 // Ensure the register format is LOAD <reg>, <reg>, 0
339 if (!isLoadInst(Opcode: MI.getOpcode()))
340 continue;
341
342 if (SkipInsts.find(x: &MI) != SkipInsts.end())
343 continue;
344
345 if (!MI.getOperand(i: 0).isReg() || !MI.getOperand(i: 1).isReg())
346 continue;
347
348 if (!MI.getOperand(i: 2).isImm() || MI.getOperand(i: 2).getImm())
349 continue;
350
351 Register DstReg = MI.getOperand(i: 0).getReg();
352 Register SrcReg = MI.getOperand(i: 1).getReg();
353
354 MachineInstr *DefInst = MRI->getUniqueVRegDef(Reg: SrcReg);
355 if (!DefInst)
356 continue;
357
358 if (DefInst->getOpcode() != BPF::LD_imm64)
359 continue;
360
361 const MachineOperand &MO = DefInst->getOperand(i: 1);
362 if (!MO.isGlobal())
363 continue;
364
365 const GlobalValue *GVal = MO.getGlobal();
366 auto *GVar = dyn_cast<GlobalVariable>(Val: GVal);
367 if (!GVar)
368 continue;
369
370 // Global variables representing structure offset or type id.
371 bool IsAma = false;
372 if (GVar->hasAttribute(Kind: BPFCoreSharedInfo::AmaAttr))
373 IsAma = true;
374 else if (!GVar->hasAttribute(Kind: BPFCoreSharedInfo::TypeIdAttr))
375 continue;
376
377 processCandidate(MRI, MBB, MI, SrcReg, DstReg, GVal, IsAma);
378
379 ToErase = &MI;
380 Changed = true;
381 }
382 }
383
384 return Changed;
385}
386
387} // namespace
388
389INITIALIZE_PASS(BPFMISimplifyPatchable, DEBUG_TYPE,
390 "BPF PreEmit SimplifyPatchable", false, false)
391
392char BPFMISimplifyPatchable::ID = 0;
393FunctionPass *llvm::createBPFMISimplifyPatchablePass() {
394 return new BPFMISimplifyPatchable();
395}
396