1//===-- BPFInstrInfo.cpp - BPF Instruction Information ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the BPF implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "BPFInstrInfo.h"
14#include "BPF.h"
15#include "BPFSubtarget.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/CodeGen/MachineBasicBlock.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/IR/DebugLoc.h"
20#include "llvm/Support/ErrorHandling.h"
21#include <cassert>
22#include <iterator>
23
24#define GET_INSTRINFO_CTOR_DTOR
25#include "BPFGenInstrInfo.inc"
26
27using namespace llvm;
28
29BPFInstrInfo::BPFInstrInfo(const BPFSubtarget &STI)
30 : BPFGenInstrInfo(STI, RI, BPF::ADJCALLSTACKDOWN, BPF::ADJCALLSTACKUP) {}
31
32void BPFInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
33 MachineBasicBlock::iterator I,
34 const DebugLoc &DL, Register DestReg,
35 Register SrcReg, bool KillSrc,
36 bool RenamableDest, bool RenamableSrc) const {
37 if (BPF::GPRRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
38 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::MOV_rr), DestReg)
39 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
40 else if (BPF::GPR32RegClass.contains(Reg1: DestReg, Reg2: SrcReg))
41 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::MOV_rr_32), DestReg)
42 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
43 else
44 llvm_unreachable("Impossible reg-to-reg copy");
45}
46
47void BPFInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
48 Register DstReg = MI->getOperand(i: 0).getReg();
49 Register SrcReg = MI->getOperand(i: 1).getReg();
50 uint64_t CopyLen = MI->getOperand(i: 2).getImm();
51 uint64_t Alignment = MI->getOperand(i: 3).getImm();
52 Register ScratchReg = MI->getOperand(i: 4).getReg();
53 MachineBasicBlock *BB = MI->getParent();
54 DebugLoc dl = MI->getDebugLoc();
55 unsigned LdOpc, StOpc;
56
57 switch (Alignment) {
58 case 1:
59 LdOpc = BPF::LDB;
60 StOpc = BPF::STB;
61 break;
62 case 2:
63 LdOpc = BPF::LDH;
64 StOpc = BPF::STH;
65 break;
66 case 4:
67 LdOpc = BPF::LDW;
68 StOpc = BPF::STW;
69 break;
70 case 8:
71 LdOpc = BPF::LDD;
72 StOpc = BPF::STD;
73 break;
74 default:
75 llvm_unreachable("unsupported memcpy alignment");
76 }
77
78 unsigned IterationNum = CopyLen >> Log2_64(Value: Alignment);
79 for(unsigned I = 0; I < IterationNum; ++I) {
80 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: LdOpc))
81 .addReg(RegNo: ScratchReg, Flags: RegState::Define).addReg(RegNo: SrcReg)
82 .addImm(Val: I * Alignment);
83 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: StOpc))
84 .addReg(RegNo: ScratchReg, Flags: RegState::Kill).addReg(RegNo: DstReg)
85 .addImm(Val: I * Alignment);
86 }
87
88 unsigned BytesLeft = CopyLen & (Alignment - 1);
89 unsigned Offset = IterationNum * Alignment;
90 bool Hanging4Byte = BytesLeft & 0x4;
91 bool Hanging2Byte = BytesLeft & 0x2;
92 bool Hanging1Byte = BytesLeft & 0x1;
93 if (Hanging4Byte) {
94 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::LDW))
95 .addReg(RegNo: ScratchReg, Flags: RegState::Define).addReg(RegNo: SrcReg).addImm(Val: Offset);
96 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::STW))
97 .addReg(RegNo: ScratchReg, Flags: RegState::Kill).addReg(RegNo: DstReg).addImm(Val: Offset);
98 Offset += 4;
99 }
100 if (Hanging2Byte) {
101 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::LDH))
102 .addReg(RegNo: ScratchReg, Flags: RegState::Define).addReg(RegNo: SrcReg).addImm(Val: Offset);
103 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::STH))
104 .addReg(RegNo: ScratchReg, Flags: RegState::Kill).addReg(RegNo: DstReg).addImm(Val: Offset);
105 Offset += 2;
106 }
107 if (Hanging1Byte) {
108 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::LDB))
109 .addReg(RegNo: ScratchReg, Flags: RegState::Define).addReg(RegNo: SrcReg).addImm(Val: Offset);
110 BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::STB))
111 .addReg(RegNo: ScratchReg, Flags: RegState::Kill).addReg(RegNo: DstReg).addImm(Val: Offset);
112 }
113
114 BB->erase(I: MI);
115}
116
117bool BPFInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
118 if (MI.getOpcode() == BPF::MEMCPY) {
119 expandMEMCPY(MI);
120 return true;
121 }
122
123 return false;
124}
125
126void BPFInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
127 MachineBasicBlock::iterator I,
128 Register SrcReg, bool IsKill, int FI,
129 const TargetRegisterClass *RC,
130 Register VReg,
131 MachineInstr::MIFlag Flags) const {
132 DebugLoc DL;
133 if (I != MBB.end())
134 DL = I->getDebugLoc();
135
136 if (RC == &BPF::GPRRegClass)
137 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::STD))
138 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: IsKill))
139 .addFrameIndex(Idx: FI)
140 .addImm(Val: 0);
141 else if (RC == &BPF::GPR32RegClass)
142 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::STW32))
143 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: IsKill))
144 .addFrameIndex(Idx: FI)
145 .addImm(Val: 0);
146 else
147 llvm_unreachable("Can't store this register to stack slot");
148}
149
150void BPFInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
151 MachineBasicBlock::iterator I,
152 Register DestReg, int FI,
153 const TargetRegisterClass *RC,
154 Register VReg, unsigned SubReg,
155 MachineInstr::MIFlag Flags) const {
156 DebugLoc DL;
157 if (I != MBB.end())
158 DL = I->getDebugLoc();
159
160 if (RC == &BPF::GPRRegClass)
161 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::LDD), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0);
162 else if (RC == &BPF::GPR32RegClass)
163 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::LDW32), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0);
164 else
165 llvm_unreachable("Can't load this register from stack slot");
166}
167
168bool BPFInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
169 MachineBasicBlock *&TBB,
170 MachineBasicBlock *&FBB,
171 SmallVectorImpl<MachineOperand> &Cond,
172 bool AllowModify) const {
173 // Start from the bottom of the block and work up, examining the
174 // terminator instructions.
175 MachineBasicBlock::iterator I = MBB.end();
176 while (I != MBB.begin()) {
177 --I;
178 if (I->isDebugInstr())
179 continue;
180
181 // Working from the bottom, when we see a non-terminator
182 // instruction, we're done.
183 if (!isUnpredicatedTerminator(MI: *I))
184 break;
185
186 // From base method doc: ... returning true if it cannot be understood ...
187 // Indirect branch has multiple destinations and no true/false concepts.
188 if (I->isIndirectBranch())
189 return true;
190
191 // A terminator that isn't a branch can't easily be handled
192 // by this analysis.
193 if (!I->isBranch())
194 return true;
195
196 // Handle unconditional branches.
197 if (I->getOpcode() == BPF::JMP) {
198 if (!AllowModify) {
199 TBB = I->getOperand(i: 0).getMBB();
200 continue;
201 }
202
203 // If the block has any instructions after a J, delete them.
204 MBB.erase(I: std::next(x: I), E: MBB.end());
205 Cond.clear();
206 FBB = nullptr;
207
208 // Delete the J if it's equivalent to a fall-through.
209 if (MBB.isLayoutSuccessor(MBB: I->getOperand(i: 0).getMBB())) {
210 TBB = nullptr;
211 I->eraseFromParent();
212 I = MBB.end();
213 continue;
214 }
215
216 // TBB is used to indicate the unconditinal destination.
217 TBB = I->getOperand(i: 0).getMBB();
218 continue;
219 }
220 // Cannot handle conditional branches
221 return true;
222 }
223
224 return false;
225}
226
227unsigned BPFInstrInfo::insertBranch(MachineBasicBlock &MBB,
228 MachineBasicBlock *TBB,
229 MachineBasicBlock *FBB,
230 ArrayRef<MachineOperand> Cond,
231 const DebugLoc &DL,
232 int *BytesAdded) const {
233 assert(!BytesAdded && "code size not handled");
234
235 // Shouldn't be a fall through.
236 assert(TBB && "insertBranch must not be told to insert a fallthrough");
237
238 if (Cond.empty()) {
239 // Unconditional branch
240 assert(!FBB && "Unconditional branch with multiple successors!");
241 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: BPF::JMP)).addMBB(MBB: TBB);
242 return 1;
243 }
244
245 llvm_unreachable("Unexpected conditional branch");
246}
247
248unsigned BPFInstrInfo::removeBranch(MachineBasicBlock &MBB,
249 int *BytesRemoved) const {
250 assert(!BytesRemoved && "code size not handled");
251
252 MachineBasicBlock::iterator I = MBB.end();
253 unsigned Count = 0;
254
255 while (I != MBB.begin()) {
256 --I;
257 if (I->isDebugInstr())
258 continue;
259 if (I->getOpcode() != BPF::JMP)
260 break;
261 // Remove the branch.
262 I->eraseFromParent();
263 I = MBB.end();
264 ++Count;
265 }
266
267 return Count;
268}
269
270int BPFInstrInfo::getJumpTableIndex(const MachineInstr &MI) const {
271 if (MI.getOpcode() != BPF::JX)
272 return -1;
273
274 // The pattern looks like:
275 // %0 = LD_imm64 %jump-table.0 ; load jump-table address
276 // %1 = ADD_rr %0, $another_reg ; address + offset
277 // %2 = LDD %1, 0 ; load the actual label
278 // JX %2
279 const MachineFunction &MF = *MI.getParent()->getParent();
280 const MachineRegisterInfo &MRI = MF.getRegInfo();
281
282 Register Reg = MI.getOperand(i: 0).getReg();
283 if (!Reg.isVirtual())
284 return -1;
285 MachineInstr *Ldd = MRI.getUniqueVRegDef(Reg);
286 if (Ldd == nullptr || Ldd->getOpcode() != BPF::LDD)
287 return -1;
288
289 Reg = Ldd->getOperand(i: 1).getReg();
290 if (!Reg.isVirtual())
291 return -1;
292 MachineInstr *Add = MRI.getUniqueVRegDef(Reg);
293 if (Add == nullptr || Add->getOpcode() != BPF::ADD_rr)
294 return -1;
295
296 Reg = Add->getOperand(i: 1).getReg();
297 if (!Reg.isVirtual())
298 return -1;
299 MachineInstr *LDimm64 = MRI.getUniqueVRegDef(Reg);
300 if (LDimm64 == nullptr || LDimm64->getOpcode() != BPF::LD_imm64)
301 return -1;
302
303 const MachineOperand &MO = LDimm64->getOperand(i: 1);
304 if (!MO.isJTI())
305 return -1;
306
307 return MO.getIndex();
308}
309