1 | //===-- BPFInstrInfo.cpp - BPF Instruction Information ----------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the BPF implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "BPFInstrInfo.h" |
14 | #include "BPF.h" |
15 | #include "llvm/ADT/SmallVector.h" |
16 | #include "llvm/CodeGen/MachineBasicBlock.h" |
17 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
18 | #include "llvm/IR/DebugLoc.h" |
19 | #include "llvm/Support/ErrorHandling.h" |
20 | #include <cassert> |
21 | #include <iterator> |
22 | |
23 | #define GET_INSTRINFO_CTOR_DTOR |
24 | #include "BPFGenInstrInfo.inc" |
25 | |
26 | using namespace llvm; |
27 | |
28 | BPFInstrInfo::BPFInstrInfo() |
29 | : BPFGenInstrInfo(BPF::ADJCALLSTACKDOWN, BPF::ADJCALLSTACKUP) {} |
30 | |
31 | void BPFInstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
32 | MachineBasicBlock::iterator I, |
33 | const DebugLoc &DL, MCRegister DestReg, |
34 | MCRegister SrcReg, bool KillSrc) const { |
35 | if (BPF::GPRRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
36 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::MOV_rr), DestReg) |
37 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc)); |
38 | else if (BPF::GPR32RegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
39 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::MOV_rr_32), DestReg) |
40 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc)); |
41 | else |
42 | llvm_unreachable("Impossible reg-to-reg copy" ); |
43 | } |
44 | |
45 | void BPFInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { |
46 | Register DstReg = MI->getOperand(i: 0).getReg(); |
47 | Register SrcReg = MI->getOperand(i: 1).getReg(); |
48 | uint64_t CopyLen = MI->getOperand(i: 2).getImm(); |
49 | uint64_t Alignment = MI->getOperand(i: 3).getImm(); |
50 | Register ScratchReg = MI->getOperand(i: 4).getReg(); |
51 | MachineBasicBlock *BB = MI->getParent(); |
52 | DebugLoc dl = MI->getDebugLoc(); |
53 | unsigned LdOpc, StOpc; |
54 | |
55 | switch (Alignment) { |
56 | case 1: |
57 | LdOpc = BPF::LDB; |
58 | StOpc = BPF::STB; |
59 | break; |
60 | case 2: |
61 | LdOpc = BPF::LDH; |
62 | StOpc = BPF::STH; |
63 | break; |
64 | case 4: |
65 | LdOpc = BPF::LDW; |
66 | StOpc = BPF::STW; |
67 | break; |
68 | case 8: |
69 | LdOpc = BPF::LDD; |
70 | StOpc = BPF::STD; |
71 | break; |
72 | default: |
73 | llvm_unreachable("unsupported memcpy alignment" ); |
74 | } |
75 | |
76 | unsigned IterationNum = CopyLen >> Log2_64(Value: Alignment); |
77 | for(unsigned I = 0; I < IterationNum; ++I) { |
78 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: LdOpc)) |
79 | .addReg(RegNo: ScratchReg, flags: RegState::Define).addReg(RegNo: SrcReg) |
80 | .addImm(Val: I * Alignment); |
81 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: StOpc)) |
82 | .addReg(RegNo: ScratchReg, flags: RegState::Kill).addReg(RegNo: DstReg) |
83 | .addImm(Val: I * Alignment); |
84 | } |
85 | |
86 | unsigned BytesLeft = CopyLen & (Alignment - 1); |
87 | unsigned Offset = IterationNum * Alignment; |
88 | bool Hanging4Byte = BytesLeft & 0x4; |
89 | bool Hanging2Byte = BytesLeft & 0x2; |
90 | bool Hanging1Byte = BytesLeft & 0x1; |
91 | if (Hanging4Byte) { |
92 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::LDW)) |
93 | .addReg(RegNo: ScratchReg, flags: RegState::Define).addReg(RegNo: SrcReg).addImm(Val: Offset); |
94 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::STW)) |
95 | .addReg(RegNo: ScratchReg, flags: RegState::Kill).addReg(RegNo: DstReg).addImm(Val: Offset); |
96 | Offset += 4; |
97 | } |
98 | if (Hanging2Byte) { |
99 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::LDH)) |
100 | .addReg(RegNo: ScratchReg, flags: RegState::Define).addReg(RegNo: SrcReg).addImm(Val: Offset); |
101 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::STH)) |
102 | .addReg(RegNo: ScratchReg, flags: RegState::Kill).addReg(RegNo: DstReg).addImm(Val: Offset); |
103 | Offset += 2; |
104 | } |
105 | if (Hanging1Byte) { |
106 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::LDB)) |
107 | .addReg(RegNo: ScratchReg, flags: RegState::Define).addReg(RegNo: SrcReg).addImm(Val: Offset); |
108 | BuildMI(BB&: *BB, I: MI, MIMD: dl, MCID: get(Opcode: BPF::STB)) |
109 | .addReg(RegNo: ScratchReg, flags: RegState::Kill).addReg(RegNo: DstReg).addImm(Val: Offset); |
110 | } |
111 | |
112 | BB->erase(I: MI); |
113 | } |
114 | |
115 | bool BPFInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
116 | if (MI.getOpcode() == BPF::MEMCPY) { |
117 | expandMEMCPY(MI); |
118 | return true; |
119 | } |
120 | |
121 | return false; |
122 | } |
123 | |
124 | void BPFInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
125 | MachineBasicBlock::iterator I, |
126 | Register SrcReg, bool IsKill, int FI, |
127 | const TargetRegisterClass *RC, |
128 | const TargetRegisterInfo *TRI, |
129 | Register VReg) const { |
130 | DebugLoc DL; |
131 | if (I != MBB.end()) |
132 | DL = I->getDebugLoc(); |
133 | |
134 | if (RC == &BPF::GPRRegClass) |
135 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::STD)) |
136 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: IsKill)) |
137 | .addFrameIndex(Idx: FI) |
138 | .addImm(Val: 0); |
139 | else if (RC == &BPF::GPR32RegClass) |
140 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::STW32)) |
141 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: IsKill)) |
142 | .addFrameIndex(Idx: FI) |
143 | .addImm(Val: 0); |
144 | else |
145 | llvm_unreachable("Can't store this register to stack slot" ); |
146 | } |
147 | |
148 | void BPFInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
149 | MachineBasicBlock::iterator I, |
150 | Register DestReg, int FI, |
151 | const TargetRegisterClass *RC, |
152 | const TargetRegisterInfo *TRI, |
153 | Register VReg) const { |
154 | DebugLoc DL; |
155 | if (I != MBB.end()) |
156 | DL = I->getDebugLoc(); |
157 | |
158 | if (RC == &BPF::GPRRegClass) |
159 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::LDD), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0); |
160 | else if (RC == &BPF::GPR32RegClass) |
161 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: BPF::LDW32), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0); |
162 | else |
163 | llvm_unreachable("Can't load this register from stack slot" ); |
164 | } |
165 | |
166 | bool BPFInstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
167 | MachineBasicBlock *&TBB, |
168 | MachineBasicBlock *&FBB, |
169 | SmallVectorImpl<MachineOperand> &Cond, |
170 | bool AllowModify) const { |
171 | // Start from the bottom of the block and work up, examining the |
172 | // terminator instructions. |
173 | MachineBasicBlock::iterator I = MBB.end(); |
174 | while (I != MBB.begin()) { |
175 | --I; |
176 | if (I->isDebugInstr()) |
177 | continue; |
178 | |
179 | // Working from the bottom, when we see a non-terminator |
180 | // instruction, we're done. |
181 | if (!isUnpredicatedTerminator(MI: *I)) |
182 | break; |
183 | |
184 | // A terminator that isn't a branch can't easily be handled |
185 | // by this analysis. |
186 | if (!I->isBranch()) |
187 | return true; |
188 | |
189 | // Handle unconditional branches. |
190 | if (I->getOpcode() == BPF::JMP) { |
191 | if (!AllowModify) { |
192 | TBB = I->getOperand(i: 0).getMBB(); |
193 | continue; |
194 | } |
195 | |
196 | // If the block has any instructions after a J, delete them. |
197 | MBB.erase(I: std::next(x: I), E: MBB.end()); |
198 | Cond.clear(); |
199 | FBB = nullptr; |
200 | |
201 | // Delete the J if it's equivalent to a fall-through. |
202 | if (MBB.isLayoutSuccessor(MBB: I->getOperand(i: 0).getMBB())) { |
203 | TBB = nullptr; |
204 | I->eraseFromParent(); |
205 | I = MBB.end(); |
206 | continue; |
207 | } |
208 | |
209 | // TBB is used to indicate the unconditinal destination. |
210 | TBB = I->getOperand(i: 0).getMBB(); |
211 | continue; |
212 | } |
213 | // Cannot handle conditional branches |
214 | return true; |
215 | } |
216 | |
217 | return false; |
218 | } |
219 | |
220 | unsigned BPFInstrInfo::insertBranch(MachineBasicBlock &MBB, |
221 | MachineBasicBlock *TBB, |
222 | MachineBasicBlock *FBB, |
223 | ArrayRef<MachineOperand> Cond, |
224 | const DebugLoc &DL, |
225 | int *BytesAdded) const { |
226 | assert(!BytesAdded && "code size not handled" ); |
227 | |
228 | // Shouldn't be a fall through. |
229 | assert(TBB && "insertBranch must not be told to insert a fallthrough" ); |
230 | |
231 | if (Cond.empty()) { |
232 | // Unconditional branch |
233 | assert(!FBB && "Unconditional branch with multiple successors!" ); |
234 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: BPF::JMP)).addMBB(MBB: TBB); |
235 | return 1; |
236 | } |
237 | |
238 | llvm_unreachable("Unexpected conditional branch" ); |
239 | } |
240 | |
241 | unsigned BPFInstrInfo::removeBranch(MachineBasicBlock &MBB, |
242 | int *BytesRemoved) const { |
243 | assert(!BytesRemoved && "code size not handled" ); |
244 | |
245 | MachineBasicBlock::iterator I = MBB.end(); |
246 | unsigned Count = 0; |
247 | |
248 | while (I != MBB.begin()) { |
249 | --I; |
250 | if (I->isDebugInstr()) |
251 | continue; |
252 | if (I->getOpcode() != BPF::JMP) |
253 | break; |
254 | // Remove the branch. |
255 | I->eraseFromParent(); |
256 | I = MBB.end(); |
257 | ++Count; |
258 | } |
259 | |
260 | return Count; |
261 | } |
262 | |