1//====- X86FlagsCopyLowering.cpp - Lowers COPY nodes of EFLAGS ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Lowers COPY nodes of EFLAGS by directly extracting and preserving individual
11/// flag bits.
12///
13/// We have to do this by carefully analyzing and rewriting the usage of the
14/// copied EFLAGS register because there is no general way to rematerialize the
15/// entire EFLAGS register safely and efficiently. Using `popf` both forces
16/// dynamic stack adjustment and can create correctness issues due to IF, TF,
17/// and other non-status flags being overwritten. Using sequences involving
18/// SAHF don't work on all x86 processors and are often quite slow compared to
19/// directly testing a single status preserved in its own GPR.
20///
21//===----------------------------------------------------------------------===//
22
23#include "X86.h"
24#include "X86InstrInfo.h"
25#include "X86Subtarget.h"
26#include "llvm/ADT/DepthFirstIterator.h"
27#include "llvm/ADT/PostOrderIterator.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/ScopeExit.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/ADT/Statistic.h"
33#include "llvm/CodeGen/MachineBasicBlock.h"
34#include "llvm/CodeGen/MachineConstantPool.h"
35#include "llvm/CodeGen/MachineDominators.h"
36#include "llvm/CodeGen/MachineFunction.h"
37#include "llvm/CodeGen/MachineFunctionAnalysisManager.h"
38#include "llvm/CodeGen/MachineFunctionPass.h"
39#include "llvm/CodeGen/MachineInstr.h"
40#include "llvm/CodeGen/MachineInstrBuilder.h"
41#include "llvm/CodeGen/MachineModuleInfo.h"
42#include "llvm/CodeGen/MachineOperand.h"
43#include "llvm/CodeGen/MachinePassManager.h"
44#include "llvm/CodeGen/MachineRegisterInfo.h"
45#include "llvm/CodeGen/MachineSSAUpdater.h"
46#include "llvm/CodeGen/TargetInstrInfo.h"
47#include "llvm/CodeGen/TargetRegisterInfo.h"
48#include "llvm/CodeGen/TargetSchedule.h"
49#include "llvm/CodeGen/TargetSubtargetInfo.h"
50#include "llvm/IR/Analysis.h"
51#include "llvm/IR/DebugLoc.h"
52#include "llvm/MC/MCSchedule.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/raw_ostream.h"
56#include <cassert>
57#include <iterator>
58#include <utility>
59
60using namespace llvm;
61
62#define PASS_KEY "x86-flags-copy-lowering"
63#define DEBUG_TYPE PASS_KEY
64
65STATISTIC(NumCopiesEliminated, "Number of copies of EFLAGS eliminated");
66STATISTIC(NumSetCCsInserted, "Number of setCC instructions inserted");
67STATISTIC(NumTestsInserted, "Number of test instructions inserted");
68STATISTIC(NumAddsInserted, "Number of adds instructions inserted");
69STATISTIC(NumNFsConvertedTo, "Number of NF instructions converted to");
70
71extern cl::opt<bool> X86EnableAPXForRelocation;
72
73namespace {
74
75// Convenient array type for storing registers associated with each condition.
76using CondRegArray = std::array<Register, X86::LAST_VALID_COND + 1>;
77
78class X86FlagsCopyLoweringImpl {
79public:
80 X86FlagsCopyLoweringImpl(MachineDominatorTree *MDT) : MDT(MDT) {}
81
82 bool runOnMachineFunction(MachineFunction &MF);
83
84private:
85 MachineRegisterInfo *MRI = nullptr;
86 const X86Subtarget *Subtarget = nullptr;
87 const X86InstrInfo *TII = nullptr;
88 const TargetRegisterInfo *TRI = nullptr;
89 const TargetRegisterClass *PromoteRC = nullptr;
90 MachineDominatorTree *MDT = nullptr;
91
92 CondRegArray collectCondsInRegs(MachineBasicBlock &MBB,
93 MachineBasicBlock::iterator CopyDefI);
94
95 Register promoteCondToReg(MachineBasicBlock &MBB,
96 MachineBasicBlock::iterator TestPos,
97 const DebugLoc &TestLoc, X86::CondCode Cond);
98 std::pair<Register, bool> getCondOrInverseInReg(
99 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
100 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs);
101 void insertTest(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
102 const DebugLoc &Loc, Register Reg);
103
104 void rewriteSetCC(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
105 const DebugLoc &Loc, MachineInstr &MI,
106 CondRegArray &CondRegs);
107 void rewriteArithmetic(MachineBasicBlock &MBB,
108 MachineBasicBlock::iterator Pos, const DebugLoc &Loc,
109 MachineInstr &MI, CondRegArray &CondRegs);
110 void rewriteMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
111 const DebugLoc &Loc, MachineInstr &MI, CondRegArray &CondRegs);
112};
113
114class X86FlagsCopyLoweringLegacy : public MachineFunctionPass {
115public:
116 X86FlagsCopyLoweringLegacy() : MachineFunctionPass(ID) {}
117
118 StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; }
119 bool runOnMachineFunction(MachineFunction &MF) override;
120 void getAnalysisUsage(AnalysisUsage &AU) const override;
121
122 /// Pass identification, replacement for typeid.
123 static char ID;
124};
125
126} // end anonymous namespace
127
128INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringLegacy, DEBUG_TYPE,
129 "X86 EFLAGS copy lowering", false, false)
130INITIALIZE_PASS_END(X86FlagsCopyLoweringLegacy, DEBUG_TYPE,
131 "X86 EFLAGS copy lowering", false, false)
132
133FunctionPass *llvm::createX86FlagsCopyLoweringLegacyPass() {
134 return new X86FlagsCopyLoweringLegacy();
135}
136
137char X86FlagsCopyLoweringLegacy::ID = 0;
138
139void X86FlagsCopyLoweringLegacy::getAnalysisUsage(AnalysisUsage &AU) const {
140 AU.addUsedIfAvailable<MachineDominatorTreeWrapperPass>();
141 MachineFunctionPass::getAnalysisUsage(AU);
142}
143
144static bool isArithmeticOp(unsigned Opc) {
145 return X86::isADC(Opcode: Opc) || X86::isSBB(Opcode: Opc) || X86::isRCL(Opcode: Opc) ||
146 X86::isRCR(Opcode: Opc) || (Opc == X86::SETB_C32r || Opc == X86::SETB_C64r);
147}
148
149static MachineBasicBlock &splitBlock(MachineBasicBlock &MBB,
150 MachineInstr &SplitI,
151 const X86InstrInfo &TII) {
152 MachineFunction &MF = *MBB.getParent();
153
154 assert(SplitI.getParent() == &MBB &&
155 "Split instruction must be in the split block!");
156 assert(SplitI.isBranch() &&
157 "Only designed to split a tail of branch instructions!");
158 assert(X86::getCondFromBranch(SplitI) != X86::COND_INVALID &&
159 "Must split on an actual jCC instruction!");
160
161 // Dig out the previous instruction to the split point.
162 MachineInstr &PrevI = *std::prev(x: SplitI.getIterator());
163 assert(PrevI.isBranch() && "Must split after a branch!");
164 assert(X86::getCondFromBranch(PrevI) != X86::COND_INVALID &&
165 "Must split after an actual jCC instruction!");
166 assert(!std::prev(PrevI.getIterator())->isTerminator() &&
167 "Must only have this one terminator prior to the split!");
168
169 // Grab the one successor edge that will stay in `MBB`.
170 MachineBasicBlock &UnsplitSucc = *PrevI.getOperand(i: 0).getMBB();
171
172 // Analyze the original block to see if we are actually splitting an edge
173 // into two edges. This can happen when we have multiple conditional jumps to
174 // the same successor.
175 bool IsEdgeSplit =
176 std::any_of(first: SplitI.getIterator(), last: MBB.instr_end(),
177 pred: [&](MachineInstr &MI) {
178 assert(MI.isTerminator() &&
179 "Should only have spliced terminators!");
180 return llvm::any_of(
181 Range: MI.operands(), P: [&](MachineOperand &MOp) {
182 return MOp.isMBB() && MOp.getMBB() == &UnsplitSucc;
183 });
184 }) ||
185 MBB.getFallThrough() == &UnsplitSucc;
186
187 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
188
189 // Insert the new block immediately after the current one. Any existing
190 // fallthrough will be sunk into this new block anyways.
191 MF.insert(MBBI: std::next(x: MachineFunction::iterator(&MBB)), MBB: &NewMBB);
192
193 // Splice the tail of instructions into the new block.
194 NewMBB.splice(Where: NewMBB.end(), Other: &MBB, From: SplitI.getIterator(), To: MBB.end());
195
196 // Copy the necessary succesors (and their probability info) into the new
197 // block.
198 for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
199 if (IsEdgeSplit || *SI != &UnsplitSucc)
200 NewMBB.copySuccessor(Orig: &MBB, I: SI);
201 // Normalize the probabilities if we didn't end up splitting the edge.
202 if (!IsEdgeSplit)
203 NewMBB.normalizeSuccProbs();
204
205 // Now replace all of the moved successors in the original block with the new
206 // block. This will merge their probabilities.
207 for (MachineBasicBlock *Succ : NewMBB.successors())
208 if (Succ != &UnsplitSucc)
209 MBB.replaceSuccessor(Old: Succ, New: &NewMBB);
210
211 // We should always end up replacing at least one successor.
212 assert(MBB.isSuccessor(&NewMBB) &&
213 "Failed to make the new block a successor!");
214
215 // Now update all the PHIs.
216 for (MachineBasicBlock *Succ : NewMBB.successors()) {
217 for (MachineInstr &MI : *Succ) {
218 if (!MI.isPHI())
219 break;
220
221 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
222 OpIdx += 2) {
223 MachineOperand &OpV = MI.getOperand(i: OpIdx);
224 MachineOperand &OpMBB = MI.getOperand(i: OpIdx + 1);
225 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
226 if (OpMBB.getMBB() != &MBB)
227 continue;
228
229 // Replace the operand for unsplit successors
230 if (!IsEdgeSplit || Succ != &UnsplitSucc) {
231 OpMBB.setMBB(&NewMBB);
232
233 // We have to continue scanning as there may be multiple entries in
234 // the PHI.
235 continue;
236 }
237
238 // When we have split the edge append a new successor.
239 MI.addOperand(MF, Op: OpV);
240 MI.addOperand(MF, Op: MachineOperand::CreateMBB(MBB: &NewMBB));
241 break;
242 }
243 }
244 }
245
246 return NewMBB;
247}
248
249enum EFLAGSClobber { NoClobber, EvitableClobber, InevitableClobber };
250
251static EFLAGSClobber getClobberType(const MachineInstr &MI) {
252 const MachineOperand *FlagDef =
253 MI.findRegisterDefOperand(Reg: X86::EFLAGS, /*TRI=*/nullptr);
254 if (!FlagDef)
255 return NoClobber;
256
257 // For the instructions are ADDrm/ADDmr with relocation, we'll skip the
258 // optimization for replacing non-NF with NF. This is to keep backward
259 // compatiblity with old version of linkers without APX relocation type
260 // support on Linux OS.
261 bool IsWithReloc =
262 X86EnableAPXForRelocation ? false : isAddMemInstrWithRelocation(MI);
263
264 if (FlagDef->isDead() && X86::getNFVariant(Opc: MI.getOpcode()) && !IsWithReloc)
265 return EvitableClobber;
266
267 return InevitableClobber;
268}
269
270bool X86FlagsCopyLoweringImpl::runOnMachineFunction(MachineFunction &MF) {
271 LLVM_DEBUG(dbgs() << "********** " << PASS_KEY << " : " << MF.getName()
272 << " **********\n");
273
274 Subtarget = &MF.getSubtarget<X86Subtarget>();
275 MRI = &MF.getRegInfo();
276 TII = Subtarget->getInstrInfo();
277 TRI = Subtarget->getRegisterInfo();
278 PromoteRC = &X86::GR8RegClass;
279
280 if (MF.empty())
281 // Nothing to do for a degenerate empty function...
282 return false;
283
284 if (none_of(Range: MRI->def_instructions(Reg: X86::EFLAGS), P: [](const MachineInstr &MI) {
285 return MI.getOpcode() == TargetOpcode::COPY;
286 }))
287 return false;
288
289 // We change the code, so we don't preserve the dominator tree anyway. If we
290 // got a valid MDT from the pass manager, use that, otherwise construct one
291 // now. This is an optimization that avoids unnecessary MDT construction for
292 // functions that have no flag copies.
293 std::unique_ptr<MachineDominatorTree> OwnedMDT;
294 if (!MDT) {
295 OwnedMDT = std::make_unique<MachineDominatorTree>(args&: MF);
296 MDT = OwnedMDT.get();
297 }
298
299 // Collect the copies in RPO so that when there are chains where a copy is in
300 // turn copied again we visit the first one first. This ensures we can find
301 // viable locations for testing the original EFLAGS that dominate all the
302 // uses across complex CFGs.
303 SmallSetVector<MachineInstr *, 4> Copies;
304 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
305 for (MachineBasicBlock *MBB : RPOT)
306 for (MachineInstr &MI : *MBB)
307 if (MI.getOpcode() == TargetOpcode::COPY &&
308 MI.getOperand(i: 0).getReg() == X86::EFLAGS)
309 Copies.insert(X: &MI);
310
311 // Try to elminate the copys by transform the instructions between copy and
312 // copydef to the NF (no flags update) variants, e.g.
313 //
314 // %1:gr64 = COPY $eflags
315 // OP1 implicit-def dead $eflags
316 // $eflags = COPY %1
317 // OP2 cc, implicit $eflags
318 //
319 // ->
320 //
321 // OP1_NF
322 // OP2 implicit $eflags
323 if (Subtarget->hasNF()) {
324 SmallSetVector<MachineInstr *, 4> RemovedCopies;
325 // CopyIIt may be invalidated by removing copies.
326 auto CopyIIt = Copies.begin(), CopyIEnd = Copies.end();
327 while (CopyIIt != CopyIEnd) {
328 auto NCopyIIt = std::next(x: CopyIIt);
329 SmallSetVector<MachineInstr *, 4> EvitableClobbers;
330 MachineInstr *CopyI = *CopyIIt;
331 MachineOperand &VOp = CopyI->getOperand(i: 1);
332 MachineInstr *CopyDefI = MRI->getVRegDef(Reg: VOp.getReg());
333 MachineBasicBlock *CopyIMBB = CopyI->getParent();
334 MachineBasicBlock *CopyDefIMBB = CopyDefI->getParent();
335 // Walk all basic blocks reachable in depth-first iteration on the inverse
336 // CFG from CopyIMBB to CopyDefIMBB. These blocks are all the blocks that
337 // may be executed between the execution of CopyDefIMBB and CopyIMBB. On
338 // all execution paths, instructions from CopyDefI to CopyI (exclusive)
339 // has to be NF-convertible if it clobbers flags.
340 for (auto BI = idf_begin(G: CopyIMBB), BE = idf_end(G: CopyDefIMBB); BI != BE;
341 ++BI) {
342 MachineBasicBlock *MBB = *BI;
343 for (auto I = (MBB != CopyDefIMBB)
344 ? MBB->begin()
345 : std::next(x: MachineBasicBlock::iterator(CopyDefI)),
346 E = (MBB != CopyIMBB) ? MBB->end()
347 : MachineBasicBlock::iterator(CopyI);
348 I != E; ++I) {
349 MachineInstr &MI = *I;
350 EFLAGSClobber ClobberType = getClobberType(MI);
351 if (ClobberType == NoClobber)
352 continue;
353
354 if (ClobberType == InevitableClobber)
355 goto ProcessNextCopyI;
356
357 assert(ClobberType == EvitableClobber && "unexpected workflow");
358 EvitableClobbers.insert(X: &MI);
359 }
360 }
361 // Covert evitable clobbers into NF variants and remove the copyies.
362 RemovedCopies.insert(X: CopyI);
363 CopyI->eraseFromParent();
364 if (MRI->use_nodbg_empty(RegNo: CopyDefI->getOperand(i: 0).getReg())) {
365 RemovedCopies.insert(X: CopyDefI);
366 CopyDefI->eraseFromParent();
367 }
368 ++NumCopiesEliminated;
369 for (auto *Clobber : EvitableClobbers) {
370 unsigned NewOpc = X86::getNFVariant(Opc: Clobber->getOpcode());
371 assert(NewOpc && "evitable clobber must have a NF variant");
372 Clobber->setDesc(TII->get(Opcode: NewOpc));
373 Clobber->removeOperand(
374 OpNo: Clobber->findRegisterDefOperand(Reg: X86::EFLAGS, /*TRI=*/nullptr)
375 ->getOperandNo());
376 ++NumNFsConvertedTo;
377 }
378 // Update liveins for basic blocks in the path
379 for (auto BI = idf_begin(G: CopyIMBB), BE = idf_end(G: CopyDefIMBB); BI != BE;
380 ++BI)
381 if (*BI != CopyDefIMBB)
382 BI->addLiveIn(PhysReg: X86::EFLAGS);
383 ProcessNextCopyI:
384 CopyIIt = NCopyIIt;
385 }
386 Copies.set_subtract(RemovedCopies);
387 }
388
389 // For the rest of copies that cannot be eliminated by NF transform, we use
390 // setcc to preserve the flags in GPR32 before OP1, and recheck its value
391 // before using the flags, e.g.
392 //
393 // %1:gr64 = COPY $eflags
394 // OP1 implicit-def dead $eflags
395 // $eflags = COPY %1
396 // OP2 cc, implicit $eflags
397 //
398 // ->
399 //
400 // %1:gr8 = SETCCr cc, implicit $eflags
401 // OP1 implicit-def dead $eflags
402 // TEST8rr %1, %1, implicit-def $eflags
403 // OP2 ne, implicit $eflags
404 for (MachineInstr *CopyI : Copies) {
405 MachineBasicBlock &MBB = *CopyI->getParent();
406
407 MachineOperand &VOp = CopyI->getOperand(i: 1);
408 assert(VOp.isReg() &&
409 "The input to the copy for EFLAGS should always be a register!");
410 MachineInstr &CopyDefI = *MRI->getVRegDef(Reg: VOp.getReg());
411 if (CopyDefI.getOpcode() != TargetOpcode::COPY) {
412 // FIXME: The big likely candidate here are PHI nodes. We could in theory
413 // handle PHI nodes, but it gets really, really hard. Insanely hard. Hard
414 // enough that it is probably better to change every other part of LLVM
415 // to avoid creating them. The issue is that once we have PHIs we won't
416 // know which original EFLAGS value we need to capture with our setCCs
417 // below. The end result will be computing a complete set of setCCs that
418 // we *might* want, computing them in every place where we copy *out* of
419 // EFLAGS and then doing SSA formation on all of them to insert necessary
420 // PHI nodes and consume those here. Then hoping that somehow we DCE the
421 // unnecessary ones. This DCE seems very unlikely to be successful and so
422 // we will almost certainly end up with a glut of dead setCC
423 // instructions. Until we have a motivating test case and fail to avoid
424 // it by changing other parts of LLVM's lowering, we refuse to handle
425 // this complex case here.
426 LLVM_DEBUG(
427 dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
428 CopyDefI.dump());
429 report_fatal_error(
430 reason: "Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
431 }
432
433 llvm::scope_exit Cleanup([&] {
434 // All uses of the EFLAGS copy are now rewritten, kill the copy into
435 // eflags and if dead the copy from.
436 CopyI->eraseFromParent();
437 if (MRI->use_empty(RegNo: CopyDefI.getOperand(i: 0).getReg()))
438 CopyDefI.eraseFromParent();
439 ++NumCopiesEliminated;
440 });
441
442 MachineOperand &DOp = CopyI->getOperand(i: 0);
443 assert(DOp.isDef() && "Expected register def!");
444 assert(DOp.getReg() == X86::EFLAGS && "Unexpected copy def register!");
445 if (DOp.isDead())
446 continue;
447
448 MachineBasicBlock *TestMBB = CopyDefI.getParent();
449 auto TestPos = CopyDefI.getIterator();
450 DebugLoc TestLoc = CopyDefI.getDebugLoc();
451
452 LLVM_DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
453
454 // Walk up across live-in EFLAGS to find where they were actually def'ed.
455 //
456 // This copy's def may just be part of a region of blocks covered by
457 // a single def of EFLAGS and we want to find the top of that region where
458 // possible.
459 //
460 // This is essentially a search for a *candidate* reaching definition
461 // location. We don't need to ever find the actual reaching definition here,
462 // but we want to walk up the dominator tree to find the highest point which
463 // would be viable for such a definition.
464 auto HasEFLAGSClobber = [&](MachineBasicBlock::iterator Begin,
465 MachineBasicBlock::iterator End) {
466 // Scan backwards as we expect these to be relatively short and often find
467 // a clobber near the end.
468 return llvm::any_of(
469 Range: llvm::reverse(C: llvm::make_range(x: Begin, y: End)), P: [&](MachineInstr &MI) {
470 // Flag any instruction (other than the copy we are
471 // currently rewriting) that defs EFLAGS.
472 return &MI != CopyI &&
473 MI.findRegisterDefOperand(Reg: X86::EFLAGS, /*TRI=*/nullptr);
474 });
475 };
476 auto HasEFLAGSClobberPath = [&](MachineBasicBlock *BeginMBB,
477 MachineBasicBlock *EndMBB) {
478 assert(MDT->dominates(BeginMBB, EndMBB) &&
479 "Only support paths down the dominator tree!");
480 SmallPtrSet<MachineBasicBlock *, 4> Visited;
481 SmallVector<MachineBasicBlock *, 4> Worklist;
482 // We terminate at the beginning. No need to scan it.
483 Visited.insert(Ptr: BeginMBB);
484 Worklist.push_back(Elt: EndMBB);
485 do {
486 auto *MBB = Worklist.pop_back_val();
487 for (auto *PredMBB : MBB->predecessors()) {
488 if (!Visited.insert(Ptr: PredMBB).second)
489 continue;
490 if (HasEFLAGSClobber(PredMBB->begin(), PredMBB->end()))
491 return true;
492 // Enqueue this block to walk its predecessors.
493 Worklist.push_back(Elt: PredMBB);
494 }
495 } while (!Worklist.empty());
496 // No clobber found along a path from the begin to end.
497 return false;
498 };
499 while (TestMBB->isLiveIn(Reg: X86::EFLAGS) && !TestMBB->pred_empty() &&
500 !HasEFLAGSClobber(TestMBB->begin(), TestPos)) {
501 // Find the nearest common dominator of the predecessors, as
502 // that will be the best candidate to hoist into.
503 MachineBasicBlock *HoistMBB =
504 std::accumulate(first: std::next(x: TestMBB->pred_begin()), last: TestMBB->pred_end(),
505 init: *TestMBB->pred_begin(),
506 binary_op: [&](MachineBasicBlock *LHS, MachineBasicBlock *RHS) {
507 return MDT->findNearestCommonDominator(A: LHS, B: RHS);
508 });
509
510 // Now we need to scan all predecessors that may be reached along paths to
511 // the hoist block. A clobber anywhere in any of these blocks the hoist.
512 // Note that this even handles loops because we require *no* clobbers.
513 if (HasEFLAGSClobberPath(HoistMBB, TestMBB))
514 break;
515
516 // We also need the terminators to not sneakily clobber flags.
517 if (HasEFLAGSClobber(HoistMBB->getFirstTerminator()->getIterator(),
518 HoistMBB->instr_end()))
519 break;
520
521 // We found a viable location, hoist our test position to it.
522 TestMBB = HoistMBB;
523 TestPos = TestMBB->getFirstTerminator()->getIterator();
524 // Clear the debug location as it would just be confusing after hoisting.
525 TestLoc = DebugLoc();
526 }
527 LLVM_DEBUG({
528 auto DefIt = llvm::find_if(
529 llvm::reverse(llvm::make_range(TestMBB->instr_begin(), TestPos)),
530 [&](MachineInstr &MI) {
531 return MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
532 });
533 if (DefIt.base() != TestMBB->instr_begin()) {
534 dbgs() << " Using EFLAGS defined by: ";
535 DefIt->dump();
536 } else {
537 dbgs() << " Using live-in flags for BB:\n";
538 TestMBB->dump();
539 }
540 });
541
542 // While rewriting uses, we buffer jumps and rewrite them in a second pass
543 // because doing so will perturb the CFG that we are walking to find the
544 // uses in the first place.
545 SmallVector<MachineInstr *, 4> JmpIs;
546
547 // Gather the condition flags that have already been preserved in
548 // registers. We do this from scratch each time as we expect there to be
549 // very few of them and we expect to not revisit the same copy definition
550 // many times. If either of those change sufficiently we could build a map
551 // of these up front instead.
552 CondRegArray CondRegs = collectCondsInRegs(MBB&: *TestMBB, CopyDefI: TestPos);
553
554 // Collect the basic blocks we need to scan. Typically this will just be
555 // a single basic block but we may have to scan multiple blocks if the
556 // EFLAGS copy lives into successors.
557 SmallVector<MachineBasicBlock *, 2> Blocks;
558 SmallPtrSet<MachineBasicBlock *, 2> VisitedBlocks;
559 Blocks.push_back(Elt: &MBB);
560
561 do {
562 MachineBasicBlock &UseMBB = *Blocks.pop_back_val();
563
564 // Track when if/when we find a kill of the flags in this block.
565 bool FlagsKilled = false;
566
567 // In most cases, we walk from the beginning to the end of the block. But
568 // when the block is the same block as the copy is from, we will visit it
569 // twice. The first time we start from the copy and go to the end. The
570 // second time we start from the beginning and go to the copy. This lets
571 // us handle copies inside of cycles.
572 // FIXME: This loop is *super* confusing. This is at least in part
573 // a symptom of all of this routine needing to be refactored into
574 // documentable components. Once done, there may be a better way to write
575 // this loop.
576 for (auto MII = (&UseMBB == &MBB && !VisitedBlocks.count(Ptr: &UseMBB))
577 ? std::next(x: CopyI->getIterator())
578 : UseMBB.instr_begin(),
579 MIE = UseMBB.instr_end();
580 MII != MIE;) {
581 MachineInstr &MI = *MII++;
582 // If we are in the original copy block and encounter either the copy
583 // def or the copy itself, break so that we don't re-process any part of
584 // the block or process the instructions in the range that was copied
585 // over.
586 if (&MI == CopyI || &MI == &CopyDefI) {
587 assert(&UseMBB == &MBB && VisitedBlocks.count(&MBB) &&
588 "Should only encounter these on the second pass over the "
589 "original block.");
590 break;
591 }
592
593 MachineOperand *FlagUse =
594 MI.findRegisterUseOperand(Reg: X86::EFLAGS, /*TRI=*/nullptr);
595 FlagsKilled = MI.modifiesRegister(Reg: X86::EFLAGS, TRI);
596
597 if (!FlagUse && FlagsKilled)
598 break;
599 else if (!FlagUse)
600 continue;
601
602 LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
603
604 // Check the kill flag before we rewrite as that may change it.
605 if (FlagUse->isKill())
606 FlagsKilled = true;
607
608 // Once we encounter a branch, the rest of the instructions must also be
609 // branches. We can't rewrite in place here, so we handle them below.
610 //
611 // Note that we don't have to handle tail calls here, even conditional
612 // tail calls, as those are not introduced into the X86 MI until post-RA
613 // branch folding or black placement. As a consequence, we get to deal
614 // with the simpler formulation of conditional branches followed by tail
615 // calls.
616 if (X86::getCondFromBranch(MI) != X86::COND_INVALID) {
617 auto JmpIt = MI.getIterator();
618 do {
619 JmpIs.push_back(Elt: &*JmpIt);
620 ++JmpIt;
621 } while (JmpIt != UseMBB.instr_end() &&
622 X86::getCondFromBranch(MI: *JmpIt) != X86::COND_INVALID);
623 break;
624 }
625
626 // Otherwise we can just rewrite in-place.
627 unsigned Opc = MI.getOpcode();
628 if (Opc == TargetOpcode::COPY) {
629 // Just replace this copy with the original copy def.
630 MRI->replaceRegWith(FromReg: MI.getOperand(i: 0).getReg(),
631 ToReg: CopyDefI.getOperand(i: 0).getReg());
632 MI.eraseFromParent();
633 } else if (X86::isSETCC(Opcode: Opc) || X86::isSETZUCC(Opcode: Opc)) {
634 rewriteSetCC(MBB&: *TestMBB, Pos: TestPos, Loc: TestLoc, MI, CondRegs);
635 } else if (isArithmeticOp(Opc)) {
636 rewriteArithmetic(MBB&: *TestMBB, Pos: TestPos, Loc: TestLoc, MI, CondRegs);
637 } else {
638 rewriteMI(MBB&: *TestMBB, Pos: TestPos, Loc: TestLoc, MI, CondRegs);
639 }
640
641 // If this was the last use of the flags, we're done.
642 if (FlagsKilled)
643 break;
644 }
645
646 // If the flags were killed, we're done with this block.
647 if (FlagsKilled)
648 continue;
649
650 // Otherwise we need to scan successors for ones where the flags live-in
651 // and queue those up for processing.
652 for (MachineBasicBlock *SuccMBB : UseMBB.successors())
653 if (SuccMBB->isLiveIn(Reg: X86::EFLAGS) &&
654 VisitedBlocks.insert(Ptr: SuccMBB).second) {
655 // We currently don't do any PHI insertion and so we require that the
656 // test basic block dominates all of the use basic blocks. Further, we
657 // can't have a cycle from the test block back to itself as that would
658 // create a cycle requiring a PHI to break it.
659 //
660 // We could in theory do PHI insertion here if it becomes useful by
661 // just taking undef values in along every edge that we don't trace
662 // this EFLAGS copy along. This isn't as bad as fully general PHI
663 // insertion, but still seems like a great deal of complexity.
664 //
665 // Because it is theoretically possible that some earlier MI pass or
666 // other lowering transformation could induce this to happen, we do
667 // a hard check even in non-debug builds here.
668 if (SuccMBB == TestMBB || !MDT->dominates(A: TestMBB, B: SuccMBB)) {
669 LLVM_DEBUG({
670 dbgs()
671 << "ERROR: Encountered use that is not dominated by our test "
672 "basic block! Rewriting this would require inserting PHI "
673 "nodes to track the flag state across the CFG.\n\nTest "
674 "block:\n";
675 TestMBB->dump();
676 dbgs() << "Use block:\n";
677 SuccMBB->dump();
678 });
679 report_fatal_error(
680 reason: "Cannot lower EFLAGS copy when original copy def "
681 "does not dominate all uses.");
682 }
683
684 Blocks.push_back(Elt: SuccMBB);
685
686 // After this, EFLAGS will be recreated before each use.
687 SuccMBB->removeLiveIn(Reg: X86::EFLAGS);
688 }
689 } while (!Blocks.empty());
690
691 // Now rewrite the jumps that use the flags. These we handle specially
692 // because if there are multiple jumps in a single basic block we'll have
693 // to do surgery on the CFG.
694 MachineBasicBlock *LastJmpMBB = nullptr;
695 for (MachineInstr *JmpI : JmpIs) {
696 // Past the first jump within a basic block we need to split the blocks
697 // apart.
698 if (JmpI->getParent() == LastJmpMBB)
699 splitBlock(MBB&: *JmpI->getParent(), SplitI&: *JmpI, TII: *TII);
700 else
701 LastJmpMBB = JmpI->getParent();
702
703 rewriteMI(MBB&: *TestMBB, Pos: TestPos, Loc: TestLoc, MI&: *JmpI, CondRegs);
704 }
705
706 // FIXME: Mark the last use of EFLAGS before the copy's def as a kill if
707 // the copy's def operand is itself a kill.
708 }
709
710#ifndef NDEBUG
711 for (MachineBasicBlock &MBB : MF)
712 for (MachineInstr &MI : MBB)
713 if (MI.getOpcode() == TargetOpcode::COPY &&
714 (MI.getOperand(0).getReg() == X86::EFLAGS ||
715 MI.getOperand(1).getReg() == X86::EFLAGS)) {
716 LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: ";
717 MI.dump());
718 llvm_unreachable("Unlowered EFLAGS copy!");
719 }
720#endif
721
722 return true;
723}
724
725/// Collect any conditions that have already been set in registers so that we
726/// can re-use them rather than adding duplicates.
727CondRegArray X86FlagsCopyLoweringImpl::collectCondsInRegs(
728 MachineBasicBlock &MBB, MachineBasicBlock::iterator TestPos) {
729 CondRegArray CondRegs = {};
730
731 // Scan backwards across the range of instructions with live EFLAGS.
732 for (MachineInstr &MI :
733 llvm::reverse(C: llvm::make_range(x: MBB.begin(), y: TestPos))) {
734 X86::CondCode Cond = X86::getCondFromSETCC(MI);
735 if (Cond != X86::COND_INVALID && !MI.mayStore() &&
736 MI.getOperand(i: 0).isReg() && MI.getOperand(i: 0).getReg().isVirtual()) {
737 assert(MI.getOperand(0).isDef() &&
738 "A non-storing SETcc should always define a register!");
739 CondRegs[Cond] = MI.getOperand(i: 0).getReg();
740 }
741
742 // Stop scanning when we see the first definition of the EFLAGS as prior to
743 // this we would potentially capture the wrong flag state.
744 if (MI.findRegisterDefOperand(Reg: X86::EFLAGS, /*TRI=*/nullptr))
745 break;
746 }
747 return CondRegs;
748}
749
750Register X86FlagsCopyLoweringImpl::promoteCondToReg(
751 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
752 const DebugLoc &TestLoc, X86::CondCode Cond) {
753 Register Reg = MRI->createVirtualRegister(RegClass: PromoteRC);
754 auto SetI =
755 BuildMI(BB&: TestMBB, I: TestPos, MIMD: TestLoc,
756 MCID: TII->get(Opcode: (!Subtarget->hasZU() || Subtarget->preferLegacySetCC())
757 ? X86::SETCCr
758 : X86::SETZUCCr),
759 DestReg: Reg)
760 .addImm(Val: Cond);
761 (void)SetI;
762 LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
763 ++NumSetCCsInserted;
764 return Reg;
765}
766
767std::pair<Register, bool> X86FlagsCopyLoweringImpl::getCondOrInverseInReg(
768 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
769 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) {
770 Register &CondReg = CondRegs[Cond];
771 Register &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(CC: Cond)];
772 if (!CondReg && !InvCondReg)
773 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
774
775 if (CondReg)
776 return {CondReg, false};
777 else
778 return {InvCondReg, true};
779}
780
781void X86FlagsCopyLoweringImpl::insertTest(MachineBasicBlock &MBB,
782 MachineBasicBlock::iterator Pos,
783 const DebugLoc &Loc, Register Reg) {
784 auto TestI =
785 BuildMI(BB&: MBB, I: Pos, MIMD: Loc, MCID: TII->get(Opcode: X86::TEST8rr)).addReg(RegNo: Reg).addReg(RegNo: Reg);
786 (void)TestI;
787 LLVM_DEBUG(dbgs() << " test cond: "; TestI->dump());
788 ++NumTestsInserted;
789}
790
791void X86FlagsCopyLoweringImpl::rewriteSetCC(MachineBasicBlock &MBB,
792 MachineBasicBlock::iterator Pos,
793 const DebugLoc &Loc,
794 MachineInstr &MI,
795 CondRegArray &CondRegs) {
796 X86::CondCode Cond = X86::getCondFromSETCC(MI);
797 // Note that we can't usefully rewrite this to the inverse without complex
798 // analysis of the users of the setCC. Largely we rely on duplicates which
799 // could have been avoided already being avoided here.
800 Register &CondReg = CondRegs[Cond];
801 if (!CondReg)
802 CondReg = promoteCondToReg(TestMBB&: MBB, TestPos: Pos, TestLoc: Loc, Cond);
803
804 // Rewriting a register def is trivial: we just replace the register and
805 // remove the setcc.
806 if (!MI.mayStore()) {
807 assert(MI.getOperand(0).isReg() &&
808 "Cannot have a non-register defined operand to SETcc!");
809 Register OldReg = MI.getOperand(i: 0).getReg();
810 // Drop Kill flags on the old register before replacing. CondReg may have
811 // a longer live range.
812 MRI->clearKillFlags(Reg: OldReg);
813 MRI->replaceRegWith(FromReg: OldReg, ToReg: CondReg);
814 MI.eraseFromParent();
815 return;
816 }
817
818 // Otherwise, we need to emit a store.
819 auto MIB = BuildMI(BB&: *MI.getParent(), I: MI.getIterator(), MIMD: MI.getDebugLoc(),
820 MCID: TII->get(Opcode: X86::MOV8mr));
821 // Copy the address operands.
822 for (int i = 0; i < X86::AddrNumOperands; ++i)
823 MIB.add(MO: MI.getOperand(i));
824
825 MIB.addReg(RegNo: CondReg);
826 MIB.setMemRefs(MI.memoperands());
827 MI.eraseFromParent();
828}
829
830void X86FlagsCopyLoweringImpl::rewriteArithmetic(
831 MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
832 const DebugLoc &Loc, MachineInstr &MI, CondRegArray &CondRegs) {
833 // Arithmetic is either reading CF or OF.
834 X86::CondCode Cond = X86::COND_B; // CF == 1
835 // The addend to use to reset CF or OF when added to the flag value.
836 // Set up an addend that when one is added will need a carry due to not
837 // having a higher bit available.
838 int Addend = 255;
839
840 // Now get a register that contains the value of the flag input to the
841 // arithmetic. We require exactly this flag to simplify the arithmetic
842 // required to materialize it back into the flag.
843 Register &CondReg = CondRegs[Cond];
844 if (!CondReg)
845 CondReg = promoteCondToReg(TestMBB&: MBB, TestPos: Pos, TestLoc: Loc, Cond);
846
847 // Insert an instruction that will set the flag back to the desired value.
848 Register TmpReg = MRI->createVirtualRegister(RegClass: PromoteRC);
849 auto AddI =
850 BuildMI(BB&: *MI.getParent(), I: MI.getIterator(), MIMD: MI.getDebugLoc(),
851 MCID: TII->get(Opcode: Subtarget->hasNDD() ? X86::ADD8ri_ND : X86::ADD8ri))
852 .addDef(RegNo: TmpReg, Flags: RegState::Dead)
853 .addReg(RegNo: CondReg)
854 .addImm(Val: Addend);
855 (void)AddI;
856 LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
857 ++NumAddsInserted;
858 MI.findRegisterUseOperand(Reg: X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
859}
860
861static X86::CondCode getImplicitCondFromMI(unsigned Opc) {
862#define FROM_TO(A, B) \
863 case X86::CMOV##A##_Fp32: \
864 case X86::CMOV##A##_Fp64: \
865 case X86::CMOV##A##_Fp80: \
866 return X86::COND_##B;
867
868 switch (Opc) {
869 default:
870 return X86::COND_INVALID;
871 FROM_TO(B, B)
872 FROM_TO(E, E)
873 FROM_TO(P, P)
874 FROM_TO(BE, BE)
875 FROM_TO(NB, AE)
876 FROM_TO(NE, NE)
877 FROM_TO(NP, NP)
878 FROM_TO(NBE, A)
879 }
880#undef FROM_TO
881}
882
883static unsigned getOpcodeWithCC(unsigned Opc, X86::CondCode CC) {
884 assert((CC == X86::COND_E || CC == X86::COND_NE) && "Unexpected CC");
885#define CASE(A) \
886 case X86::CMOVB_##A: \
887 case X86::CMOVE_##A: \
888 case X86::CMOVP_##A: \
889 case X86::CMOVBE_##A: \
890 case X86::CMOVNB_##A: \
891 case X86::CMOVNE_##A: \
892 case X86::CMOVNP_##A: \
893 case X86::CMOVNBE_##A: \
894 return (CC == X86::COND_E) ? X86::CMOVE_##A : X86::CMOVNE_##A;
895 switch (Opc) {
896 default:
897 llvm_unreachable("Unexpected opcode");
898 CASE(Fp32)
899 CASE(Fp64)
900 CASE(Fp80)
901 }
902#undef CASE
903}
904
905void X86FlagsCopyLoweringImpl::rewriteMI(MachineBasicBlock &MBB,
906 MachineBasicBlock::iterator Pos,
907 const DebugLoc &Loc, MachineInstr &MI,
908 CondRegArray &CondRegs) {
909 // First get the register containing this specific condition.
910 bool IsImplicitCC = false;
911 X86::CondCode CC = X86::getCondFromMI(MI);
912 if (CC == X86::COND_INVALID) {
913 CC = getImplicitCondFromMI(Opc: MI.getOpcode());
914 IsImplicitCC = true;
915 }
916 assert(CC != X86::COND_INVALID && "Unknown EFLAG user!");
917 Register CondReg;
918 bool Inverted;
919 std::tie(args&: CondReg, args&: Inverted) =
920 getCondOrInverseInReg(TestMBB&: MBB, TestPos: Pos, TestLoc: Loc, Cond: CC, CondRegs);
921
922 // Insert a direct test of the saved register.
923 insertTest(MBB&: *MI.getParent(), Pos: MI.getIterator(), Loc: MI.getDebugLoc(), Reg: CondReg);
924
925 // Rewrite the instruction to use the !ZF flag from the test, and then kill
926 // its use of the flags afterward.
927 X86::CondCode NewCC = Inverted ? X86::COND_E : X86::COND_NE;
928 if (IsImplicitCC)
929 MI.setDesc(TII->get(Opcode: getOpcodeWithCC(Opc: MI.getOpcode(), CC: NewCC)));
930 else
931 MI.getOperand(i: MI.getDesc().getNumOperands() - 1).setImm(NewCC);
932
933 MI.findRegisterUseOperand(Reg: X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
934 LLVM_DEBUG(dbgs() << " fixed instruction: "; MI.dump());
935}
936
937bool X86FlagsCopyLoweringLegacy::runOnMachineFunction(MachineFunction &MF) {
938 auto *MDTWrapper = getAnalysisIfAvailable<MachineDominatorTreeWrapperPass>();
939 MachineDominatorTree *MDT = MDTWrapper ? &MDTWrapper->getDomTree() : nullptr;
940 return X86FlagsCopyLoweringImpl(MDT).runOnMachineFunction(MF);
941}
942
943PreservedAnalyses
944X86FlagsCopyLoweringPass::run(MachineFunction &MF,
945 MachineFunctionAnalysisManager &MFAM) {
946 MachineDominatorTree *MDT =
947 MFAM.getCachedResult<MachineDominatorTreeAnalysis>(IR&: MF);
948 bool Changed = X86FlagsCopyLoweringImpl(MDT).runOnMachineFunction(MF);
949 return Changed ? PreservedAnalyses::all()
950 : getMachineFunctionPassPreservedAnalyses();
951}
952