1 | //===- X86VZeroUpper.cpp - AVX vzeroupper instruction inserter ------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the pass which inserts x86 AVX vzeroupper instructions |
10 | // before calls to SSE encoded functions. This avoids transition latency |
11 | // penalty when transferring control between AVX encoded instructions and old |
12 | // SSE encoding mode. |
13 | // |
14 | //===----------------------------------------------------------------------===// |
15 | |
16 | #include "X86.h" |
17 | #include "X86InstrInfo.h" |
18 | #include "X86Subtarget.h" |
19 | #include "llvm/ADT/SmallVector.h" |
20 | #include "llvm/ADT/Statistic.h" |
21 | #include "llvm/CodeGen/MachineBasicBlock.h" |
22 | #include "llvm/CodeGen/MachineFunction.h" |
23 | #include "llvm/CodeGen/MachineFunctionPass.h" |
24 | #include "llvm/CodeGen/MachineInstr.h" |
25 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
26 | #include "llvm/CodeGen/MachineOperand.h" |
27 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
28 | #include "llvm/CodeGen/TargetInstrInfo.h" |
29 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
30 | #include "llvm/IR/CallingConv.h" |
31 | #include "llvm/IR/DebugLoc.h" |
32 | #include "llvm/IR/Function.h" |
33 | #include "llvm/Support/Debug.h" |
34 | #include "llvm/Support/ErrorHandling.h" |
35 | #include "llvm/Support/raw_ostream.h" |
36 | #include <cassert> |
37 | |
38 | using namespace llvm; |
39 | |
40 | #define DEBUG_TYPE "x86-vzeroupper" |
41 | |
42 | static cl::opt<bool> |
43 | UseVZeroUpper("x86-use-vzeroupper" , cl::Hidden, |
44 | cl::desc("Minimize AVX to SSE transition penalty" ), |
45 | cl::init(Val: true)); |
46 | |
47 | STATISTIC(NumVZU, "Number of vzeroupper instructions inserted" ); |
48 | |
49 | namespace { |
50 | |
51 | class VZeroUpperInserter : public MachineFunctionPass { |
52 | public: |
53 | VZeroUpperInserter() : MachineFunctionPass(ID) {} |
54 | |
55 | bool runOnMachineFunction(MachineFunction &MF) override; |
56 | |
57 | MachineFunctionProperties getRequiredProperties() const override { |
58 | return MachineFunctionProperties().setNoVRegs(); |
59 | } |
60 | |
61 | StringRef getPassName() const override { return "X86 vzeroupper inserter" ; } |
62 | |
63 | private: |
64 | void processBasicBlock(MachineBasicBlock &MBB); |
65 | void insertVZeroUpper(MachineBasicBlock::iterator I, |
66 | MachineBasicBlock &MBB); |
67 | void addDirtySuccessor(MachineBasicBlock &MBB); |
68 | |
69 | using BlockExitState = enum { PASS_THROUGH, EXITS_CLEAN, EXITS_DIRTY }; |
70 | |
71 | static const char* getBlockExitStateName(BlockExitState ST); |
72 | |
73 | // Core algorithm state: |
74 | // BlockState - Each block is either: |
75 | // - PASS_THROUGH: There are neither YMM/ZMM dirtying instructions nor |
76 | // vzeroupper instructions in this block. |
77 | // - EXITS_CLEAN: There is (or will be) a vzeroupper instruction in this |
78 | // block that will ensure that YMM/ZMM is clean on exit. |
79 | // - EXITS_DIRTY: An instruction in the block dirties YMM/ZMM and no |
80 | // subsequent vzeroupper in the block clears it. |
81 | // |
82 | // AddedToDirtySuccessors - This flag is raised when a block is added to the |
83 | // DirtySuccessors list to ensure that it's not |
84 | // added multiple times. |
85 | // |
86 | // FirstUnguardedCall - Records the location of the first unguarded call in |
87 | // each basic block that may need to be guarded by a |
88 | // vzeroupper. We won't know whether it actually needs |
89 | // to be guarded until we discover a predecessor that |
90 | // is DIRTY_OUT. |
91 | struct BlockState { |
92 | BlockExitState ExitState = PASS_THROUGH; |
93 | bool AddedToDirtySuccessors = false; |
94 | MachineBasicBlock::iterator FirstUnguardedCall; |
95 | |
96 | BlockState() = default; |
97 | }; |
98 | |
99 | using BlockStateMap = SmallVector<BlockState, 8>; |
100 | using DirtySuccessorsWorkList = SmallVector<MachineBasicBlock *, 8>; |
101 | |
102 | BlockStateMap BlockStates; |
103 | DirtySuccessorsWorkList DirtySuccessors; |
104 | bool EverMadeChange; |
105 | bool IsX86INTR; |
106 | const TargetInstrInfo *TII; |
107 | |
108 | static char ID; |
109 | }; |
110 | |
111 | } // end anonymous namespace |
112 | |
113 | char VZeroUpperInserter::ID = 0; |
114 | |
115 | FunctionPass *llvm::createX86IssueVZeroUpperPass() { |
116 | return new VZeroUpperInserter(); |
117 | } |
118 | |
119 | #ifndef NDEBUG |
120 | const char* VZeroUpperInserter::getBlockExitStateName(BlockExitState ST) { |
121 | switch (ST) { |
122 | case PASS_THROUGH: return "Pass-through" ; |
123 | case EXITS_DIRTY: return "Exits-dirty" ; |
124 | case EXITS_CLEAN: return "Exits-clean" ; |
125 | } |
126 | llvm_unreachable("Invalid block exit state." ); |
127 | } |
128 | #endif |
129 | |
130 | /// VZEROUPPER cleans state that is related to Y/ZMM0-15 only. |
131 | /// Thus, there is no need to check for Y/ZMM16 and above. |
132 | static bool isYmmOrZmmReg(MCRegister Reg) { |
133 | return (Reg >= X86::YMM0 && Reg <= X86::YMM15) || |
134 | (Reg >= X86::ZMM0 && Reg <= X86::ZMM15); |
135 | } |
136 | |
137 | static bool checkFnHasLiveInYmmOrZmm(MachineRegisterInfo &MRI) { |
138 | for (std::pair<MCRegister, Register> LI : MRI.liveins()) |
139 | if (isYmmOrZmmReg(Reg: LI.first)) |
140 | return true; |
141 | |
142 | return false; |
143 | } |
144 | |
145 | static bool clobbersAllYmmAndZmmRegs(const MachineOperand &MO) { |
146 | for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) { |
147 | if (!MO.clobbersPhysReg(PhysReg: reg)) |
148 | return false; |
149 | } |
150 | for (unsigned reg = X86::ZMM0; reg <= X86::ZMM15; ++reg) { |
151 | if (!MO.clobbersPhysReg(PhysReg: reg)) |
152 | return false; |
153 | } |
154 | return true; |
155 | } |
156 | |
157 | static bool hasYmmOrZmmReg(MachineInstr &MI) { |
158 | for (const MachineOperand &MO : MI.operands()) { |
159 | if (MI.isCall() && MO.isRegMask() && !clobbersAllYmmAndZmmRegs(MO)) |
160 | return true; |
161 | if (!MO.isReg()) |
162 | continue; |
163 | if (MO.isDebug()) |
164 | continue; |
165 | if (isYmmOrZmmReg(Reg: MO.getReg().asMCReg())) |
166 | return true; |
167 | } |
168 | return false; |
169 | } |
170 | |
171 | /// Check if given call instruction has a RegMask operand. |
172 | static bool callHasRegMask(MachineInstr &MI) { |
173 | assert(MI.isCall() && "Can only be called on call instructions." ); |
174 | for (const MachineOperand &MO : MI.operands()) { |
175 | if (MO.isRegMask()) |
176 | return true; |
177 | } |
178 | return false; |
179 | } |
180 | |
181 | /// Insert a vzeroupper instruction before I. |
182 | void VZeroUpperInserter::insertVZeroUpper(MachineBasicBlock::iterator I, |
183 | MachineBasicBlock &MBB) { |
184 | BuildMI(BB&: MBB, I, MIMD: I->getDebugLoc(), MCID: TII->get(Opcode: X86::VZEROUPPER)); |
185 | ++NumVZU; |
186 | EverMadeChange = true; |
187 | } |
188 | |
189 | /// Add MBB to the DirtySuccessors list if it hasn't already been added. |
190 | void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) { |
191 | if (!BlockStates[MBB.getNumber()].AddedToDirtySuccessors) { |
192 | DirtySuccessors.push_back(Elt: &MBB); |
193 | BlockStates[MBB.getNumber()].AddedToDirtySuccessors = true; |
194 | } |
195 | } |
196 | |
197 | /// Loop over all of the instructions in the basic block, inserting vzeroupper |
198 | /// instructions before function calls. |
199 | void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) { |
200 | // Start by assuming that the block is PASS_THROUGH which implies no unguarded |
201 | // calls. |
202 | BlockExitState CurState = PASS_THROUGH; |
203 | BlockStates[MBB.getNumber()].FirstUnguardedCall = MBB.end(); |
204 | |
205 | for (MachineInstr &MI : MBB) { |
206 | bool IsCall = MI.isCall(); |
207 | bool IsReturn = MI.isReturn(); |
208 | bool IsControlFlow = IsCall || IsReturn; |
209 | |
210 | // No need for vzeroupper before iret in interrupt handler function, |
211 | // epilogue will restore YMM/ZMM registers if needed. |
212 | if (IsX86INTR && IsReturn) |
213 | continue; |
214 | |
215 | // An existing VZERO* instruction resets the state. |
216 | if (MI.getOpcode() == X86::VZEROALL || MI.getOpcode() == X86::VZEROUPPER) { |
217 | CurState = EXITS_CLEAN; |
218 | continue; |
219 | } |
220 | |
221 | // Shortcut: don't need to check regular instructions in dirty state. |
222 | if (!IsControlFlow && CurState == EXITS_DIRTY) |
223 | continue; |
224 | |
225 | if (hasYmmOrZmmReg(MI)) { |
226 | // We found a ymm/zmm-using instruction; this could be an AVX/AVX512 |
227 | // instruction, or it could be control flow. |
228 | CurState = EXITS_DIRTY; |
229 | continue; |
230 | } |
231 | |
232 | // Check for control-flow out of the current function (which might |
233 | // indirectly execute SSE instructions). |
234 | if (!IsControlFlow) |
235 | continue; |
236 | |
237 | // If the call has no RegMask, skip it as well. It usually happens on |
238 | // helper function calls (such as '_chkstk', '_ftol2') where standard |
239 | // calling convention is not used (RegMask is not used to mark register |
240 | // clobbered and register usage (def/implicit-def/use) is well-defined and |
241 | // explicitly specified. |
242 | if (IsCall && !callHasRegMask(MI)) |
243 | continue; |
244 | |
245 | // The VZEROUPPER instruction resets the upper 128 bits of YMM0-YMM15 |
246 | // registers. In addition, the processor changes back to Clean state, after |
247 | // which execution of SSE instructions or AVX instructions has no transition |
248 | // penalty. Add the VZEROUPPER instruction before any function call/return |
249 | // that might execute SSE code. |
250 | // FIXME: In some cases, we may want to move the VZEROUPPER into a |
251 | // predecessor block. |
252 | if (CurState == EXITS_DIRTY) { |
253 | // After the inserted VZEROUPPER the state becomes clean again, but |
254 | // other YMM/ZMM may appear before other subsequent calls or even before |
255 | // the end of the BB. |
256 | insertVZeroUpper(I: MI, MBB); |
257 | CurState = EXITS_CLEAN; |
258 | } else if (CurState == PASS_THROUGH) { |
259 | // If this block is currently in pass-through state and we encounter a |
260 | // call then whether we need a vzeroupper or not depends on whether this |
261 | // block has successors that exit dirty. Record the location of the call, |
262 | // and set the state to EXITS_CLEAN, but do not insert the vzeroupper yet. |
263 | // It will be inserted later if necessary. |
264 | BlockStates[MBB.getNumber()].FirstUnguardedCall = MI; |
265 | CurState = EXITS_CLEAN; |
266 | } |
267 | } |
268 | |
269 | LLVM_DEBUG(dbgs() << "MBB #" << MBB.getNumber() << " exit state: " |
270 | << getBlockExitStateName(CurState) << '\n'); |
271 | |
272 | if (CurState == EXITS_DIRTY) |
273 | for (MachineBasicBlock *Succ : MBB.successors()) |
274 | addDirtySuccessor(MBB&: *Succ); |
275 | |
276 | BlockStates[MBB.getNumber()].ExitState = CurState; |
277 | } |
278 | |
279 | /// Loop over all of the basic blocks, inserting vzeroupper instructions before |
280 | /// function calls. |
281 | bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) { |
282 | if (!UseVZeroUpper) |
283 | return false; |
284 | |
285 | const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>(); |
286 | if (!ST.hasAVX() || !ST.insertVZEROUPPER()) |
287 | return false; |
288 | TII = ST.getInstrInfo(); |
289 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
290 | EverMadeChange = false; |
291 | IsX86INTR = MF.getFunction().getCallingConv() == CallingConv::X86_INTR; |
292 | |
293 | bool FnHasLiveInYmmOrZmm = checkFnHasLiveInYmmOrZmm(MRI); |
294 | |
295 | // Fast check: if the function doesn't use any ymm/zmm registers, we don't |
296 | // need to insert any VZEROUPPER instructions. This is constant-time, so it |
297 | // is cheap in the common case of no ymm/zmm use. |
298 | bool YmmOrZmmUsed = FnHasLiveInYmmOrZmm; |
299 | for (const auto *RC : {&X86::VR256RegClass, &X86::VR512_0_15RegClass}) { |
300 | if (!YmmOrZmmUsed) { |
301 | for (MCPhysReg R : *RC) { |
302 | if (!MRI.reg_nodbg_empty(RegNo: R)) { |
303 | YmmOrZmmUsed = true; |
304 | break; |
305 | } |
306 | } |
307 | } |
308 | } |
309 | if (!YmmOrZmmUsed) |
310 | return false; |
311 | |
312 | assert(BlockStates.empty() && DirtySuccessors.empty() && |
313 | "X86VZeroUpper state should be clear" ); |
314 | BlockStates.resize(N: MF.getNumBlockIDs()); |
315 | |
316 | // Process all blocks. This will compute block exit states, record the first |
317 | // unguarded call in each block, and add successors of dirty blocks to the |
318 | // DirtySuccessors list. |
319 | for (MachineBasicBlock &MBB : MF) |
320 | processBasicBlock(MBB); |
321 | |
322 | // If any YMM/ZMM regs are live-in to this function, add the entry block to |
323 | // the DirtySuccessors list |
324 | if (FnHasLiveInYmmOrZmm) |
325 | addDirtySuccessor(MBB&: MF.front()); |
326 | |
327 | // Re-visit all blocks that are successors of EXITS_DIRTY blocks. Add |
328 | // vzeroupper instructions to unguarded calls, and propagate EXITS_DIRTY |
329 | // through PASS_THROUGH blocks. |
330 | while (!DirtySuccessors.empty()) { |
331 | MachineBasicBlock &MBB = *DirtySuccessors.back(); |
332 | DirtySuccessors.pop_back(); |
333 | BlockState &BBState = BlockStates[MBB.getNumber()]; |
334 | |
335 | // MBB is a successor of a dirty block, so its first call needs to be |
336 | // guarded. |
337 | if (BBState.FirstUnguardedCall != MBB.end()) |
338 | insertVZeroUpper(I: BBState.FirstUnguardedCall, MBB); |
339 | |
340 | // If this successor was a pass-through block, then it is now dirty. Its |
341 | // successors need to be added to the worklist (if they haven't been |
342 | // already). |
343 | if (BBState.ExitState == PASS_THROUGH) { |
344 | LLVM_DEBUG(dbgs() << "MBB #" << MBB.getNumber() |
345 | << " was Pass-through, is now Dirty-out.\n" ); |
346 | for (MachineBasicBlock *Succ : MBB.successors()) |
347 | addDirtySuccessor(MBB&: *Succ); |
348 | } |
349 | } |
350 | |
351 | BlockStates.clear(); |
352 | return EverMadeChange; |
353 | } |
354 | |