| 1 | //===- RISCVVMV0Elimination.cpp - VMV0 Elimination -----------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===---------------------------------------------------------------------===// |
| 8 | // |
| 9 | // Mask operands in vector pseudos have to be in v0. We select them as a virtual |
| 10 | // register in the singleton vmv0 register class instead of copying them to $v0 |
| 11 | // straight away, to make optimizing masks easier. |
| 12 | // |
| 13 | // However register coalescing may end up coleascing copies into vmv0, resulting |
| 14 | // in instructions with multiple uses of vmv0 that the register allocator can't |
| 15 | // allocate: |
| 16 | // |
| 17 | // %x:vrnov0 = PseudoVADD_VV_M1_MASK %0:vrnov0, %1:vr, %2:vmv0, %3:vmv0, ... |
| 18 | // |
| 19 | // To avoid this, this pass replaces any uses* of vmv0 with copies to $v0 before |
| 20 | // register coalescing and allocation: |
| 21 | // |
| 22 | // %x:vrnov0 = PseudoVADD_VV_M1_MASK %0:vrnov0, %1:vr, %2:vr, %3:vmv0, ... |
| 23 | // -> |
| 24 | // $v0 = COPY %3:vr |
| 25 | // %x:vrnov0 = PseudoVADD_VV_M1_MASK %0:vrnov0, %1:vr, %2:vr, $0, ... |
| 26 | // |
| 27 | // * The only uses of vmv0 left behind are when used for inline asm with the vm |
| 28 | // constraint. |
| 29 | // |
| 30 | //===---------------------------------------------------------------------===// |
| 31 | |
| 32 | #include "RISCV.h" |
| 33 | #include "RISCVSubtarget.h" |
| 34 | #ifndef NDEBUG |
| 35 | #include "llvm/ADT/PostOrderIterator.h" |
| 36 | #endif |
| 37 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 38 | |
| 39 | using namespace llvm; |
| 40 | |
| 41 | #define DEBUG_TYPE "riscv-vmv0-elimination" |
| 42 | |
| 43 | namespace { |
| 44 | |
| 45 | class RISCVVMV0Elimination : public MachineFunctionPass { |
| 46 | public: |
| 47 | static char ID; |
| 48 | RISCVVMV0Elimination() : MachineFunctionPass(ID) {} |
| 49 | |
| 50 | bool runOnMachineFunction(MachineFunction &MF) override; |
| 51 | |
| 52 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 53 | AU.setPreservesCFG(); |
| 54 | MachineFunctionPass::getAnalysisUsage(AU); |
| 55 | } |
| 56 | |
| 57 | MachineFunctionProperties getRequiredProperties() const override { |
| 58 | // TODO: We could move this closer to regalloc, out of SSA, which would |
| 59 | // allow scheduling past mask operands. We would need to preserve live |
| 60 | // intervals. |
| 61 | return MachineFunctionProperties().setIsSSA(); |
| 62 | } |
| 63 | }; |
| 64 | |
| 65 | } // namespace |
| 66 | |
| 67 | char RISCVVMV0Elimination::ID = 0; |
| 68 | |
| 69 | INITIALIZE_PASS(RISCVVMV0Elimination, DEBUG_TYPE, "RISC-V VMV0 Elimination" , |
| 70 | false, false) |
| 71 | |
| 72 | FunctionPass *llvm::createRISCVVMV0EliminationPass() { |
| 73 | return new RISCVVMV0Elimination(); |
| 74 | } |
| 75 | |
| 76 | static bool isVMV0(const MCOperandInfo &MCOI) { |
| 77 | return MCOI.RegClass == RISCV::VMV0RegClassID; |
| 78 | } |
| 79 | |
| 80 | bool RISCVVMV0Elimination::runOnMachineFunction(MachineFunction &MF) { |
| 81 | // Skip if the vector extension is not enabled. |
| 82 | const RISCVSubtarget *ST = &MF.getSubtarget<RISCVSubtarget>(); |
| 83 | if (!ST->hasVInstructions()) |
| 84 | return false; |
| 85 | |
| 86 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 87 | const TargetInstrInfo *TII = ST->getInstrInfo(); |
| 88 | |
| 89 | #ifndef NDEBUG |
| 90 | // Assert that we won't clobber any existing reads of v0 where we need to |
| 91 | // insert copies. |
| 92 | const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); |
| 93 | ReversePostOrderTraversal<MachineBasicBlock *> RPOT(&*MF.begin()); |
| 94 | for (MachineBasicBlock *MBB : RPOT) { |
| 95 | bool V0Clobbered = false; |
| 96 | for (MachineInstr &MI : *MBB) { |
| 97 | assert(!(MI.readsRegister(RISCV::V0, TRI) && V0Clobbered) && |
| 98 | "Inserting a copy to v0 would clobber a read" ); |
| 99 | if (MI.modifiesRegister(RISCV::V0, TRI)) |
| 100 | V0Clobbered = false; |
| 101 | |
| 102 | if (any_of(MI.getDesc().operands(), isVMV0)) |
| 103 | V0Clobbered = true; |
| 104 | } |
| 105 | |
| 106 | assert(!(V0Clobbered && |
| 107 | any_of(MBB->successors(), |
| 108 | [](auto *Succ) { return Succ->isLiveIn(RISCV::V0); })) && |
| 109 | "Clobbered a v0 used in a successor" ); |
| 110 | } |
| 111 | #endif |
| 112 | |
| 113 | bool MadeChange = false; |
| 114 | SmallVector<MachineInstr *> DeadCopies; |
| 115 | |
| 116 | // For any instruction with a vmv0 operand, replace it with a copy to v0. |
| 117 | for (MachineBasicBlock &MBB : MF) { |
| 118 | for (MachineInstr &MI : MBB) { |
| 119 | assert(count_if(MI.getDesc().operands(), isVMV0) < 2 && |
| 120 | "Expected only one or zero vmv0 operands" ); |
| 121 | |
| 122 | for (auto [OpNo, MCOI] : enumerate(First: MI.getDesc().operands())) { |
| 123 | if (isVMV0(MCOI)) { |
| 124 | MachineOperand &MO = MI.getOperand(i: OpNo); |
| 125 | Register Src = MO.getReg(); |
| 126 | assert(MO.isUse() && MO.getSubReg() == RISCV::NoSubRegister && |
| 127 | Src.isVirtual() && "vmv0 use in unexpected form" ); |
| 128 | |
| 129 | // Peek through a single copy to match what isel does. |
| 130 | if (MachineInstr *SrcMI = MRI.getVRegDef(Reg: Src); |
| 131 | SrcMI->isCopy() && SrcMI->getOperand(i: 1).getReg().isVirtual() && |
| 132 | SrcMI->getOperand(i: 1).getSubReg() == RISCV::NoSubRegister) { |
| 133 | // Delete any dead copys to vmv0 to avoid allocating them. |
| 134 | if (MRI.hasOneNonDBGUse(RegNo: Src)) |
| 135 | DeadCopies.push_back(Elt: SrcMI); |
| 136 | Src = SrcMI->getOperand(i: 1).getReg(); |
| 137 | } |
| 138 | |
| 139 | BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: RISCV::COPY), DestReg: RISCV::V0) |
| 140 | .addReg(RegNo: Src); |
| 141 | |
| 142 | MO.setReg(RISCV::V0); |
| 143 | MadeChange = true; |
| 144 | break; |
| 145 | } |
| 146 | } |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | for (MachineInstr *MI : DeadCopies) |
| 151 | MI->eraseFromParent(); |
| 152 | |
| 153 | if (!MadeChange) |
| 154 | return false; |
| 155 | |
| 156 | // Now that any constraints requiring vmv0 are gone, eliminate any uses of |
| 157 | // vmv0 by recomputing the reg class. |
| 158 | // The only remaining uses should be around inline asm. |
| 159 | for (MachineBasicBlock &MBB : MF) { |
| 160 | for (MachineInstr &MI : MBB) { |
| 161 | for (MachineOperand &MO : MI.uses()) { |
| 162 | if (MO.isReg() && MO.getReg().isVirtual() && |
| 163 | MRI.getRegClass(Reg: MO.getReg()) == &RISCV::VMV0RegClass) { |
| 164 | MRI.recomputeRegClass(Reg: MO.getReg()); |
| 165 | assert((MRI.getRegClass(MO.getReg()) != &RISCV::VMV0RegClass || |
| 166 | MI.isInlineAsm() || |
| 167 | MRI.getVRegDef(MO.getReg())->isInlineAsm()) && |
| 168 | "Non-inline-asm use of vmv0 left behind" ); |
| 169 | } |
| 170 | } |
| 171 | } |
| 172 | } |
| 173 | |
| 174 | return true; |
| 175 | } |
| 176 | |