| 1 | //===----- BPFMISimplifyPatchable.cpp - MI Simplify Patchable Insts -------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This pass targets a subset of instructions like below |
| 10 | // ld_imm64 r1, @global |
| 11 | // ldd r2, r1, 0 |
| 12 | // add r3, struct_base_reg, r2 |
| 13 | // |
| 14 | // Here @global should represent an AMA (abstruct member access). |
| 15 | // Such an access is subject to bpf load time patching. After this pass, the |
| 16 | // code becomes |
| 17 | // ld_imm64 r1, @global |
| 18 | // add r3, struct_base_reg, r1 |
| 19 | // |
| 20 | // Eventually, at BTF output stage, a relocation record will be generated |
| 21 | // for ld_imm64 which should be replaced later by bpf loader: |
| 22 | // r1 = <calculated field_info> |
| 23 | // add r3, struct_base_reg, r1 |
| 24 | // |
| 25 | // This pass also removes the intermediate load generated in IR pass for |
| 26 | // __builtin_btf_type_id() intrinsic. |
| 27 | // |
| 28 | //===----------------------------------------------------------------------===// |
| 29 | |
| 30 | #include "BPF.h" |
| 31 | #include "BPFCORE.h" |
| 32 | #include "BPFInstrInfo.h" |
| 33 | #include "BPFTargetMachine.h" |
| 34 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 35 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 36 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 37 | #include "llvm/IR/GlobalVariable.h" |
| 38 | #include "llvm/Support/Debug.h" |
| 39 | #include <set> |
| 40 | |
| 41 | using namespace llvm; |
| 42 | |
| 43 | #define DEBUG_TYPE "bpf-mi-simplify-patchable" |
| 44 | |
| 45 | namespace { |
| 46 | |
| 47 | struct BPFMISimplifyPatchable : public MachineFunctionPass { |
| 48 | |
| 49 | static char ID; |
| 50 | const BPFInstrInfo *TII; |
| 51 | MachineFunction *MF; |
| 52 | |
| 53 | BPFMISimplifyPatchable() : MachineFunctionPass(ID) {} |
| 54 | |
| 55 | private: |
| 56 | std::set<MachineInstr *> SkipInsts; |
| 57 | |
| 58 | // Initialize class variables. |
| 59 | void initialize(MachineFunction &MFParm); |
| 60 | |
| 61 | bool isLoadInst(unsigned Opcode); |
| 62 | bool removeLD(); |
| 63 | void processCandidate(MachineRegisterInfo *MRI, MachineBasicBlock &MBB, |
| 64 | MachineInstr &MI, Register &SrcReg, Register &DstReg, |
| 65 | const GlobalValue *GVal, bool IsAma); |
| 66 | void processDstReg(MachineRegisterInfo *MRI, Register &DstReg, |
| 67 | Register &SrcReg, const GlobalValue *GVal, |
| 68 | bool doSrcRegProp, bool IsAma); |
| 69 | void processInst(MachineRegisterInfo *MRI, MachineInstr *Inst, |
| 70 | MachineOperand *RelocOp, const GlobalValue *GVal); |
| 71 | void checkADDrr(MachineRegisterInfo *MRI, MachineOperand *RelocOp, |
| 72 | const GlobalValue *GVal); |
| 73 | void checkShift(MachineRegisterInfo *MRI, MachineBasicBlock &MBB, |
| 74 | MachineOperand *RelocOp, const GlobalValue *GVal, |
| 75 | unsigned Opcode); |
| 76 | |
| 77 | public: |
| 78 | // Main entry point for this pass. |
| 79 | bool runOnMachineFunction(MachineFunction &MF) override { |
| 80 | if (skipFunction(F: MF.getFunction())) |
| 81 | return false; |
| 82 | |
| 83 | initialize(MFParm&: MF); |
| 84 | return removeLD(); |
| 85 | } |
| 86 | }; |
| 87 | |
| 88 | // Initialize class variables. |
| 89 | void BPFMISimplifyPatchable::initialize(MachineFunction &MFParm) { |
| 90 | MF = &MFParm; |
| 91 | TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo(); |
| 92 | LLVM_DEBUG(dbgs() << "*** BPF simplify patchable insts pass ***\n\n" ); |
| 93 | } |
| 94 | |
| 95 | static bool isStoreImm(unsigned Opcode) { |
| 96 | return Opcode == BPF::STB_imm || Opcode == BPF::STH_imm || |
| 97 | Opcode == BPF::STW_imm || Opcode == BPF::STD_imm; |
| 98 | } |
| 99 | |
| 100 | static bool isStore32(unsigned Opcode) { |
| 101 | return Opcode == BPF::STB32 || Opcode == BPF::STH32 || Opcode == BPF::STW32 || |
| 102 | Opcode == BPF::STBREL32 || Opcode == BPF::STHREL32 || |
| 103 | Opcode == BPF::STWREL32; |
| 104 | } |
| 105 | |
| 106 | static bool isStore64(unsigned Opcode) { |
| 107 | return Opcode == BPF::STB || Opcode == BPF::STH || Opcode == BPF::STW || |
| 108 | Opcode == BPF::STD || Opcode == BPF::STDREL; |
| 109 | } |
| 110 | |
| 111 | static bool isLoad32(unsigned Opcode) { |
| 112 | return Opcode == BPF::LDB32 || Opcode == BPF::LDH32 || Opcode == BPF::LDW32 || |
| 113 | Opcode == BPF::LDBACQ32 || Opcode == BPF::LDHACQ32 || |
| 114 | Opcode == BPF::LDWACQ32; |
| 115 | } |
| 116 | |
| 117 | static bool isLoad64(unsigned Opcode) { |
| 118 | return Opcode == BPF::LDB || Opcode == BPF::LDH || Opcode == BPF::LDW || |
| 119 | Opcode == BPF::LDD || Opcode == BPF::LDDACQ; |
| 120 | } |
| 121 | |
| 122 | static bool isLoadSext(unsigned Opcode) { |
| 123 | return Opcode == BPF::LDBSX || Opcode == BPF::LDHSX || Opcode == BPF::LDWSX; |
| 124 | } |
| 125 | |
| 126 | bool BPFMISimplifyPatchable::isLoadInst(unsigned Opcode) { |
| 127 | return isLoad32(Opcode) || isLoad64(Opcode) || isLoadSext(Opcode); |
| 128 | } |
| 129 | |
| 130 | void BPFMISimplifyPatchable::checkADDrr(MachineRegisterInfo *MRI, |
| 131 | MachineOperand *RelocOp, const GlobalValue *GVal) { |
| 132 | const MachineInstr *Inst = RelocOp->getParent(); |
| 133 | const MachineOperand *Op1 = &Inst->getOperand(i: 1); |
| 134 | const MachineOperand *Op2 = &Inst->getOperand(i: 2); |
| 135 | const MachineOperand *BaseOp = (RelocOp == Op1) ? Op2 : Op1; |
| 136 | |
| 137 | // Go through all uses of %1 as in %1 = ADD_rr %2, %3 |
| 138 | const MachineOperand Op0 = Inst->getOperand(i: 0); |
| 139 | for (MachineOperand &MO : |
| 140 | llvm::make_early_inc_range(Range: MRI->use_operands(Reg: Op0.getReg()))) { |
| 141 | // The candidate needs to have a unique definition. |
| 142 | if (!MRI->getUniqueVRegDef(Reg: MO.getReg())) |
| 143 | continue; |
| 144 | |
| 145 | MachineInstr *DefInst = MO.getParent(); |
| 146 | unsigned Opcode = DefInst->getOpcode(); |
| 147 | unsigned COREOp; |
| 148 | if (isLoad64(Opcode) || isLoadSext(Opcode)) |
| 149 | COREOp = BPF::CORE_LD64; |
| 150 | else if (isLoad32(Opcode)) |
| 151 | COREOp = BPF::CORE_LD32; |
| 152 | else if (isStore64(Opcode) || isStore32(Opcode) || isStoreImm(Opcode)) |
| 153 | COREOp = BPF::CORE_ST; |
| 154 | else |
| 155 | continue; |
| 156 | |
| 157 | // It must be a form of %2 = *(type *)(%1 + 0) or *(type *)(%1 + 0) = %2. |
| 158 | const MachineOperand &ImmOp = DefInst->getOperand(i: 2); |
| 159 | if (!ImmOp.isImm() || ImmOp.getImm() != 0) |
| 160 | continue; |
| 161 | |
| 162 | // Reject the form: |
| 163 | // %1 = ADD_rr %2, %3 |
| 164 | // *(type *)(%2 + 0) = %1 |
| 165 | if (isStore64(Opcode) || isStore32(Opcode)) { |
| 166 | const MachineOperand &Opnd = DefInst->getOperand(i: 0); |
| 167 | if (Opnd.isReg() && Opnd.getReg() == MO.getReg()) |
| 168 | continue; |
| 169 | } |
| 170 | |
| 171 | BuildMI(BB&: *DefInst->getParent(), I&: *DefInst, MIMD: DefInst->getDebugLoc(), MCID: TII->get(Opcode: COREOp)) |
| 172 | .add(MO: DefInst->getOperand(i: 0)).addImm(Val: Opcode).add(MO: *BaseOp) |
| 173 | .addGlobalAddress(GV: GVal); |
| 174 | DefInst->eraseFromParent(); |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | void BPFMISimplifyPatchable::checkShift(MachineRegisterInfo *MRI, |
| 179 | MachineBasicBlock &MBB, MachineOperand *RelocOp, const GlobalValue *GVal, |
| 180 | unsigned Opcode) { |
| 181 | // Relocation operand should be the operand #2. |
| 182 | MachineInstr *Inst = RelocOp->getParent(); |
| 183 | if (RelocOp != &Inst->getOperand(i: 2)) |
| 184 | return; |
| 185 | |
| 186 | BuildMI(BB&: MBB, I&: *Inst, MIMD: Inst->getDebugLoc(), MCID: TII->get(Opcode: BPF::CORE_SHIFT)) |
| 187 | .add(MO: Inst->getOperand(i: 0)).addImm(Val: Opcode) |
| 188 | .add(MO: Inst->getOperand(i: 1)).addGlobalAddress(GV: GVal); |
| 189 | Inst->eraseFromParent(); |
| 190 | } |
| 191 | |
| 192 | void BPFMISimplifyPatchable::processCandidate(MachineRegisterInfo *MRI, |
| 193 | MachineBasicBlock &MBB, MachineInstr &MI, Register &SrcReg, |
| 194 | Register &DstReg, const GlobalValue *GVal, bool IsAma) { |
| 195 | if (MRI->getRegClass(Reg: DstReg) == &BPF::GPR32RegClass) { |
| 196 | if (IsAma) { |
| 197 | // We can optimize such a pattern: |
| 198 | // %1:gpr = LD_imm64 @"llvm.s:0:4$0:2" |
| 199 | // %2:gpr32 = LDW32 %1:gpr, 0 |
| 200 | // %3:gpr = SUBREG_TO_REG 0, %2:gpr32, %subreg.sub_32 |
| 201 | // %4:gpr = ADD_rr %0:gpr, %3:gpr |
| 202 | // or similar patterns below for non-alu32 case. |
| 203 | auto Begin = MRI->use_begin(RegNo: DstReg), End = MRI->use_end(); |
| 204 | decltype(End) NextI; |
| 205 | for (auto I = Begin; I != End; I = NextI) { |
| 206 | NextI = std::next(x: I); |
| 207 | if (!MRI->getUniqueVRegDef(Reg: I->getReg())) |
| 208 | continue; |
| 209 | |
| 210 | unsigned Opcode = I->getParent()->getOpcode(); |
| 211 | if (Opcode == BPF::SUBREG_TO_REG) { |
| 212 | Register TmpReg = I->getParent()->getOperand(i: 0).getReg(); |
| 213 | processDstReg(MRI, DstReg&: TmpReg, SrcReg&: DstReg, GVal, doSrcRegProp: false, IsAma); |
| 214 | } |
| 215 | } |
| 216 | } |
| 217 | |
| 218 | BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: BPF::COPY), DestReg: DstReg) |
| 219 | .addReg(RegNo: SrcReg, flags: 0, SubReg: BPF::sub_32); |
| 220 | return; |
| 221 | } |
| 222 | |
| 223 | // All uses of DstReg replaced by SrcReg |
| 224 | processDstReg(MRI, DstReg, SrcReg, GVal, doSrcRegProp: true, IsAma); |
| 225 | } |
| 226 | |
| 227 | void BPFMISimplifyPatchable::processDstReg(MachineRegisterInfo *MRI, |
| 228 | Register &DstReg, Register &SrcReg, const GlobalValue *GVal, |
| 229 | bool doSrcRegProp, bool IsAma) { |
| 230 | auto Begin = MRI->use_begin(RegNo: DstReg), End = MRI->use_end(); |
| 231 | decltype(End) NextI; |
| 232 | for (auto I = Begin; I != End; I = NextI) { |
| 233 | NextI = std::next(x: I); |
| 234 | if (doSrcRegProp) { |
| 235 | // In situations like below it is not known if usage is a kill |
| 236 | // after setReg(): |
| 237 | // |
| 238 | // .-> %2:gpr = LD_imm64 @"llvm.t:0:0$0:0" |
| 239 | // | |
| 240 | // |`----------------. |
| 241 | // | %3:gpr = LDD %2:gpr, 0 |
| 242 | // | %4:gpr = ADD_rr %0:gpr(tied-def 0), killed %3:gpr <--- (1) |
| 243 | // | %5:gpr = LDD killed %4:gpr, 0 ^^^^^^^^^^^^^ |
| 244 | // | STD killed %5:gpr, %1:gpr, 0 this is I |
| 245 | // `----------------. |
| 246 | // %6:gpr = LDD %2:gpr, 0 |
| 247 | // %7:gpr = ADD_rr %0:gpr(tied-def 0), killed %6:gpr <--- (2) |
| 248 | // %8:gpr = LDD killed %7:gpr, 0 ^^^^^^^^^^^^^ |
| 249 | // STD killed %8:gpr, %1:gpr, 0 this is I |
| 250 | // |
| 251 | // Instructions (1) and (2) would be updated by setReg() to: |
| 252 | // |
| 253 | // ADD_rr %0:gpr(tied-def 0), %2:gpr |
| 254 | // |
| 255 | // %2:gpr is not killed at (1), so it is necessary to remove kill flag |
| 256 | // from I. |
| 257 | I->setReg(SrcReg); |
| 258 | I->setIsKill(false); |
| 259 | } |
| 260 | |
| 261 | // The candidate needs to have a unique definition. |
| 262 | if (IsAma && MRI->getUniqueVRegDef(Reg: I->getReg())) |
| 263 | processInst(MRI, Inst: I->getParent(), RelocOp: &*I, GVal); |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | // Check to see whether we could do some optimization |
| 268 | // to attach relocation to downstream dependent instructions. |
| 269 | // Two kinds of patterns are recognized below: |
| 270 | // Pattern 1: |
| 271 | // %1 = LD_imm64 @"llvm.b:0:4$0:1" <== patch_imm = 4 |
| 272 | // %2 = LDD %1, 0 <== this insn will be removed |
| 273 | // %3 = ADD_rr %0, %2 |
| 274 | // %4 = LDW[32] %3, 0 OR STW[32] %4, %3, 0 |
| 275 | // The `%4 = ...` will be transformed to |
| 276 | // CORE_[ALU32_]MEM(%4, mem_opcode, %0, @"llvm.b:0:4$0:1") |
| 277 | // and later on, BTF emit phase will translate to |
| 278 | // %4 = LDW[32] %0, 4 STW[32] %4, %0, 4 |
| 279 | // and attach a relocation to it. |
| 280 | // Pattern 2: |
| 281 | // %15 = LD_imm64 @"llvm.t:5:63$0:2" <== relocation type 5 |
| 282 | // %16 = LDD %15, 0 <== this insn will be removed |
| 283 | // %17 = SRA_rr %14, %16 |
| 284 | // The `%17 = ...` will be transformed to |
| 285 | // %17 = CORE_SHIFT(SRA_ri, %14, @"llvm.t:5:63$0:2") |
| 286 | // and later on, BTF emit phase will translate to |
| 287 | // %r4 = SRA_ri %r4, 63 |
| 288 | void BPFMISimplifyPatchable::processInst(MachineRegisterInfo *MRI, |
| 289 | MachineInstr *Inst, MachineOperand *RelocOp, const GlobalValue *GVal) { |
| 290 | unsigned Opcode = Inst->getOpcode(); |
| 291 | if (isLoadInst(Opcode)) { |
| 292 | SkipInsts.insert(x: Inst); |
| 293 | return; |
| 294 | } |
| 295 | |
| 296 | if (Opcode == BPF::ADD_rr) |
| 297 | checkADDrr(MRI, RelocOp, GVal); |
| 298 | else if (Opcode == BPF::SLL_rr) |
| 299 | checkShift(MRI, MBB&: *Inst->getParent(), RelocOp, GVal, Opcode: BPF::SLL_ri); |
| 300 | else if (Opcode == BPF::SRA_rr) |
| 301 | checkShift(MRI, MBB&: *Inst->getParent(), RelocOp, GVal, Opcode: BPF::SRA_ri); |
| 302 | else if (Opcode == BPF::SRL_rr) |
| 303 | checkShift(MRI, MBB&: *Inst->getParent(), RelocOp, GVal, Opcode: BPF::SRL_ri); |
| 304 | } |
| 305 | |
| 306 | /// Remove unneeded Load instructions. |
| 307 | bool BPFMISimplifyPatchable::removeLD() { |
| 308 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 309 | MachineInstr *ToErase = nullptr; |
| 310 | bool Changed = false; |
| 311 | |
| 312 | for (MachineBasicBlock &MBB : *MF) { |
| 313 | for (MachineInstr &MI : MBB) { |
| 314 | if (ToErase) { |
| 315 | ToErase->eraseFromParent(); |
| 316 | ToErase = nullptr; |
| 317 | } |
| 318 | |
| 319 | // Ensure the register format is LOAD <reg>, <reg>, 0 |
| 320 | if (!isLoadInst(Opcode: MI.getOpcode())) |
| 321 | continue; |
| 322 | |
| 323 | if (SkipInsts.find(x: &MI) != SkipInsts.end()) |
| 324 | continue; |
| 325 | |
| 326 | if (!MI.getOperand(i: 0).isReg() || !MI.getOperand(i: 1).isReg()) |
| 327 | continue; |
| 328 | |
| 329 | if (!MI.getOperand(i: 2).isImm() || MI.getOperand(i: 2).getImm()) |
| 330 | continue; |
| 331 | |
| 332 | Register DstReg = MI.getOperand(i: 0).getReg(); |
| 333 | Register SrcReg = MI.getOperand(i: 1).getReg(); |
| 334 | |
| 335 | MachineInstr *DefInst = MRI->getUniqueVRegDef(Reg: SrcReg); |
| 336 | if (!DefInst) |
| 337 | continue; |
| 338 | |
| 339 | if (DefInst->getOpcode() != BPF::LD_imm64) |
| 340 | continue; |
| 341 | |
| 342 | const MachineOperand &MO = DefInst->getOperand(i: 1); |
| 343 | if (!MO.isGlobal()) |
| 344 | continue; |
| 345 | |
| 346 | const GlobalValue *GVal = MO.getGlobal(); |
| 347 | auto *GVar = dyn_cast<GlobalVariable>(Val: GVal); |
| 348 | if (!GVar) |
| 349 | continue; |
| 350 | |
| 351 | // Global variables representing structure offset or type id. |
| 352 | bool IsAma = false; |
| 353 | if (GVar->hasAttribute(Kind: BPFCoreSharedInfo::AmaAttr)) |
| 354 | IsAma = true; |
| 355 | else if (!GVar->hasAttribute(Kind: BPFCoreSharedInfo::TypeIdAttr)) |
| 356 | continue; |
| 357 | |
| 358 | processCandidate(MRI, MBB, MI, SrcReg, DstReg, GVal, IsAma); |
| 359 | |
| 360 | ToErase = &MI; |
| 361 | Changed = true; |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | return Changed; |
| 366 | } |
| 367 | |
| 368 | } // namespace |
| 369 | |
| 370 | INITIALIZE_PASS(BPFMISimplifyPatchable, DEBUG_TYPE, |
| 371 | "BPF PreEmit SimplifyPatchable" , false, false) |
| 372 | |
| 373 | char BPFMISimplifyPatchable::ID = 0; |
| 374 | FunctionPass *llvm::createBPFMISimplifyPatchablePass() { |
| 375 | return new BPFMISimplifyPatchable(); |
| 376 | } |
| 377 | |