| 1 | //===-- RISCVExpandAtomicPseudoInsts.cpp - Expand atomic pseudo instrs. ---===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains a pass that expands atomic pseudo instructions into |
| 10 | // target instructions. This pass should be run at the last possible moment, |
| 11 | // avoiding the possibility for other passes to break the requirements for |
| 12 | // forward progress in the LR/SC block. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #include "RISCV.h" |
| 17 | #include "RISCVInstrInfo.h" |
| 18 | #include "RISCVTargetMachine.h" |
| 19 | |
| 20 | #include "llvm/CodeGen/LivePhysRegs.h" |
| 21 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 22 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 23 | |
| 24 | using namespace llvm; |
| 25 | |
| 26 | #define RISCV_EXPAND_ATOMIC_PSEUDO_NAME \ |
| 27 | "RISC-V atomic pseudo instruction expansion pass" |
| 28 | |
| 29 | namespace { |
| 30 | |
| 31 | class RISCVExpandAtomicPseudo : public MachineFunctionPass { |
| 32 | public: |
| 33 | const RISCVSubtarget *STI; |
| 34 | const RISCVInstrInfo *TII; |
| 35 | static char ID; |
| 36 | |
| 37 | RISCVExpandAtomicPseudo() : MachineFunctionPass(ID) {} |
| 38 | |
| 39 | bool runOnMachineFunction(MachineFunction &MF) override; |
| 40 | |
| 41 | StringRef getPassName() const override { |
| 42 | return RISCV_EXPAND_ATOMIC_PSEUDO_NAME; |
| 43 | } |
| 44 | |
| 45 | private: |
| 46 | bool expandMBB(MachineBasicBlock &MBB); |
| 47 | bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, |
| 48 | MachineBasicBlock::iterator &NextMBBI); |
| 49 | bool expandAtomicBinOp(MachineBasicBlock &MBB, |
| 50 | MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp, |
| 51 | bool IsMasked, int Width, |
| 52 | MachineBasicBlock::iterator &NextMBBI); |
| 53 | bool expandAtomicMinMaxOp(MachineBasicBlock &MBB, |
| 54 | MachineBasicBlock::iterator MBBI, |
| 55 | AtomicRMWInst::BinOp, bool IsMasked, int Width, |
| 56 | MachineBasicBlock::iterator &NextMBBI); |
| 57 | bool expandAtomicCmpXchg(MachineBasicBlock &MBB, |
| 58 | MachineBasicBlock::iterator MBBI, bool IsMasked, |
| 59 | int Width, MachineBasicBlock::iterator &NextMBBI); |
| 60 | #ifndef NDEBUG |
| 61 | unsigned getInstSizeInBytes(const MachineFunction &MF) const { |
| 62 | unsigned Size = 0; |
| 63 | for (auto &MBB : MF) |
| 64 | for (auto &MI : MBB) |
| 65 | Size += TII->getInstSizeInBytes(MI); |
| 66 | return Size; |
| 67 | } |
| 68 | #endif |
| 69 | }; |
| 70 | |
| 71 | char RISCVExpandAtomicPseudo::ID = 0; |
| 72 | |
| 73 | bool RISCVExpandAtomicPseudo::runOnMachineFunction(MachineFunction &MF) { |
| 74 | STI = &MF.getSubtarget<RISCVSubtarget>(); |
| 75 | TII = STI->getInstrInfo(); |
| 76 | |
| 77 | #ifndef NDEBUG |
| 78 | const unsigned OldSize = getInstSizeInBytes(MF); |
| 79 | #endif |
| 80 | |
| 81 | bool Modified = false; |
| 82 | for (auto &MBB : MF) |
| 83 | Modified |= expandMBB(MBB); |
| 84 | |
| 85 | #ifndef NDEBUG |
| 86 | const unsigned NewSize = getInstSizeInBytes(MF); |
| 87 | assert(OldSize >= NewSize); |
| 88 | #endif |
| 89 | return Modified; |
| 90 | } |
| 91 | |
| 92 | bool RISCVExpandAtomicPseudo::expandMBB(MachineBasicBlock &MBB) { |
| 93 | bool Modified = false; |
| 94 | |
| 95 | MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); |
| 96 | while (MBBI != E) { |
| 97 | MachineBasicBlock::iterator NMBBI = std::next(x: MBBI); |
| 98 | Modified |= expandMI(MBB, MBBI, NextMBBI&: NMBBI); |
| 99 | MBBI = NMBBI; |
| 100 | } |
| 101 | |
| 102 | return Modified; |
| 103 | } |
| 104 | |
| 105 | bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB, |
| 106 | MachineBasicBlock::iterator MBBI, |
| 107 | MachineBasicBlock::iterator &NextMBBI) { |
| 108 | // RISCVInstrInfo::getInstSizeInBytes expects that the total size of the |
| 109 | // expanded instructions for each pseudo is correct in the Size field of the |
| 110 | // tablegen definition for the pseudo. |
| 111 | switch (MBBI->getOpcode()) { |
| 112 | case RISCV::PseudoAtomicSwap32: |
| 113 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, IsMasked: false, Width: 32, |
| 114 | NextMBBI); |
| 115 | case RISCV::PseudoAtomicSwap64: |
| 116 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, IsMasked: false, Width: 64, |
| 117 | NextMBBI); |
| 118 | case RISCV::PseudoAtomicLoadAdd32: |
| 119 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, IsMasked: false, Width: 32, |
| 120 | NextMBBI); |
| 121 | case RISCV::PseudoAtomicLoadAdd64: |
| 122 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, IsMasked: false, Width: 64, |
| 123 | NextMBBI); |
| 124 | case RISCV::PseudoAtomicLoadSub32: |
| 125 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, IsMasked: false, Width: 32, |
| 126 | NextMBBI); |
| 127 | case RISCV::PseudoAtomicLoadSub64: |
| 128 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, IsMasked: false, Width: 64, |
| 129 | NextMBBI); |
| 130 | case RISCV::PseudoAtomicLoadAnd32: |
| 131 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::And, IsMasked: false, Width: 32, |
| 132 | NextMBBI); |
| 133 | case RISCV::PseudoAtomicLoadAnd64: |
| 134 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::And, IsMasked: false, Width: 64, |
| 135 | NextMBBI); |
| 136 | case RISCV::PseudoAtomicLoadOr32: |
| 137 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Or, IsMasked: false, Width: 32, NextMBBI); |
| 138 | case RISCV::PseudoAtomicLoadOr64: |
| 139 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Or, IsMasked: false, Width: 64, NextMBBI); |
| 140 | case RISCV::PseudoAtomicLoadXor32: |
| 141 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xor, IsMasked: false, Width: 32, |
| 142 | NextMBBI); |
| 143 | case RISCV::PseudoAtomicLoadXor64: |
| 144 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xor, IsMasked: false, Width: 64, |
| 145 | NextMBBI); |
| 146 | case RISCV::PseudoAtomicLoadNand32: |
| 147 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, IsMasked: false, Width: 32, |
| 148 | NextMBBI); |
| 149 | case RISCV::PseudoAtomicLoadNand64: |
| 150 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, IsMasked: false, Width: 64, |
| 151 | NextMBBI); |
| 152 | case RISCV::PseudoAtomicLoadMin32: |
| 153 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, IsMasked: false, Width: 32, |
| 154 | NextMBBI); |
| 155 | case RISCV::PseudoAtomicLoadMin64: |
| 156 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, IsMasked: false, Width: 64, |
| 157 | NextMBBI); |
| 158 | case RISCV::PseudoAtomicLoadMax32: |
| 159 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, IsMasked: false, Width: 32, |
| 160 | NextMBBI); |
| 161 | case RISCV::PseudoAtomicLoadMax64: |
| 162 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, IsMasked: false, Width: 64, |
| 163 | NextMBBI); |
| 164 | case RISCV::PseudoAtomicLoadUMin32: |
| 165 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, IsMasked: false, Width: 32, |
| 166 | NextMBBI); |
| 167 | case RISCV::PseudoAtomicLoadUMin64: |
| 168 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, IsMasked: false, Width: 64, |
| 169 | NextMBBI); |
| 170 | case RISCV::PseudoAtomicLoadUMax32: |
| 171 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, IsMasked: false, Width: 32, |
| 172 | NextMBBI); |
| 173 | case RISCV::PseudoAtomicLoadUMax64: |
| 174 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, IsMasked: false, Width: 64, |
| 175 | NextMBBI); |
| 176 | case RISCV::PseudoMaskedAtomicSwap32: |
| 177 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, IsMasked: true, Width: 32, |
| 178 | NextMBBI); |
| 179 | case RISCV::PseudoMaskedAtomicLoadAdd32: |
| 180 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, IsMasked: true, Width: 32, NextMBBI); |
| 181 | case RISCV::PseudoMaskedAtomicLoadSub32: |
| 182 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, IsMasked: true, Width: 32, NextMBBI); |
| 183 | case RISCV::PseudoMaskedAtomicLoadNand32: |
| 184 | return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, IsMasked: true, Width: 32, |
| 185 | NextMBBI); |
| 186 | case RISCV::PseudoMaskedAtomicLoadMax32: |
| 187 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, IsMasked: true, Width: 32, |
| 188 | NextMBBI); |
| 189 | case RISCV::PseudoMaskedAtomicLoadMin32: |
| 190 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, IsMasked: true, Width: 32, |
| 191 | NextMBBI); |
| 192 | case RISCV::PseudoMaskedAtomicLoadUMax32: |
| 193 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, IsMasked: true, Width: 32, |
| 194 | NextMBBI); |
| 195 | case RISCV::PseudoMaskedAtomicLoadUMin32: |
| 196 | return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, IsMasked: true, Width: 32, |
| 197 | NextMBBI); |
| 198 | case RISCV::PseudoCmpXchg32: |
| 199 | return expandAtomicCmpXchg(MBB, MBBI, IsMasked: false, Width: 32, NextMBBI); |
| 200 | case RISCV::PseudoCmpXchg64: |
| 201 | return expandAtomicCmpXchg(MBB, MBBI, IsMasked: false, Width: 64, NextMBBI); |
| 202 | case RISCV::PseudoMaskedCmpXchg32: |
| 203 | return expandAtomicCmpXchg(MBB, MBBI, IsMasked: true, Width: 32, NextMBBI); |
| 204 | } |
| 205 | |
| 206 | return false; |
| 207 | } |
| 208 | |
| 209 | static unsigned getLRForRMW32(AtomicOrdering Ordering, |
| 210 | const RISCVSubtarget *Subtarget) { |
| 211 | switch (Ordering) { |
| 212 | default: |
| 213 | llvm_unreachable("Unexpected AtomicOrdering" ); |
| 214 | case AtomicOrdering::Monotonic: |
| 215 | return RISCV::LR_W; |
| 216 | case AtomicOrdering::Acquire: |
| 217 | if (Subtarget->hasStdExtZtso()) |
| 218 | return RISCV::LR_W; |
| 219 | return RISCV::LR_W_AQ; |
| 220 | case AtomicOrdering::Release: |
| 221 | return RISCV::LR_W; |
| 222 | case AtomicOrdering::AcquireRelease: |
| 223 | if (Subtarget->hasStdExtZtso()) |
| 224 | return RISCV::LR_W; |
| 225 | return RISCV::LR_W_AQ; |
| 226 | case AtomicOrdering::SequentiallyConsistent: |
| 227 | return RISCV::LR_W_AQRL; |
| 228 | } |
| 229 | } |
| 230 | |
| 231 | static unsigned getSCForRMW32(AtomicOrdering Ordering, |
| 232 | const RISCVSubtarget *Subtarget) { |
| 233 | switch (Ordering) { |
| 234 | default: |
| 235 | llvm_unreachable("Unexpected AtomicOrdering" ); |
| 236 | case AtomicOrdering::Monotonic: |
| 237 | return RISCV::SC_W; |
| 238 | case AtomicOrdering::Acquire: |
| 239 | return RISCV::SC_W; |
| 240 | case AtomicOrdering::Release: |
| 241 | if (Subtarget->hasStdExtZtso()) |
| 242 | return RISCV::SC_W; |
| 243 | return RISCV::SC_W_RL; |
| 244 | case AtomicOrdering::AcquireRelease: |
| 245 | if (Subtarget->hasStdExtZtso()) |
| 246 | return RISCV::SC_W; |
| 247 | return RISCV::SC_W_RL; |
| 248 | case AtomicOrdering::SequentiallyConsistent: |
| 249 | return RISCV::SC_W_RL; |
| 250 | } |
| 251 | } |
| 252 | |
| 253 | static unsigned getLRForRMW64(AtomicOrdering Ordering, |
| 254 | const RISCVSubtarget *Subtarget) { |
| 255 | switch (Ordering) { |
| 256 | default: |
| 257 | llvm_unreachable("Unexpected AtomicOrdering" ); |
| 258 | case AtomicOrdering::Monotonic: |
| 259 | return RISCV::LR_D; |
| 260 | case AtomicOrdering::Acquire: |
| 261 | if (Subtarget->hasStdExtZtso()) |
| 262 | return RISCV::LR_D; |
| 263 | return RISCV::LR_D_AQ; |
| 264 | case AtomicOrdering::Release: |
| 265 | return RISCV::LR_D; |
| 266 | case AtomicOrdering::AcquireRelease: |
| 267 | if (Subtarget->hasStdExtZtso()) |
| 268 | return RISCV::LR_D; |
| 269 | return RISCV::LR_D_AQ; |
| 270 | case AtomicOrdering::SequentiallyConsistent: |
| 271 | return RISCV::LR_D_AQRL; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | static unsigned getSCForRMW64(AtomicOrdering Ordering, |
| 276 | const RISCVSubtarget *Subtarget) { |
| 277 | switch (Ordering) { |
| 278 | default: |
| 279 | llvm_unreachable("Unexpected AtomicOrdering" ); |
| 280 | case AtomicOrdering::Monotonic: |
| 281 | return RISCV::SC_D; |
| 282 | case AtomicOrdering::Acquire: |
| 283 | return RISCV::SC_D; |
| 284 | case AtomicOrdering::Release: |
| 285 | if (Subtarget->hasStdExtZtso()) |
| 286 | return RISCV::SC_D; |
| 287 | return RISCV::SC_D_RL; |
| 288 | case AtomicOrdering::AcquireRelease: |
| 289 | if (Subtarget->hasStdExtZtso()) |
| 290 | return RISCV::SC_D; |
| 291 | return RISCV::SC_D_RL; |
| 292 | case AtomicOrdering::SequentiallyConsistent: |
| 293 | return RISCV::SC_D_RL; |
| 294 | } |
| 295 | } |
| 296 | |
| 297 | static unsigned getLRForRMW(AtomicOrdering Ordering, int Width, |
| 298 | const RISCVSubtarget *Subtarget) { |
| 299 | if (Width == 32) |
| 300 | return getLRForRMW32(Ordering, Subtarget); |
| 301 | if (Width == 64) |
| 302 | return getLRForRMW64(Ordering, Subtarget); |
| 303 | llvm_unreachable("Unexpected LR width\n" ); |
| 304 | } |
| 305 | |
| 306 | static unsigned getSCForRMW(AtomicOrdering Ordering, int Width, |
| 307 | const RISCVSubtarget *Subtarget) { |
| 308 | if (Width == 32) |
| 309 | return getSCForRMW32(Ordering, Subtarget); |
| 310 | if (Width == 64) |
| 311 | return getSCForRMW64(Ordering, Subtarget); |
| 312 | llvm_unreachable("Unexpected SC width\n" ); |
| 313 | } |
| 314 | |
| 315 | static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI, |
| 316 | DebugLoc DL, MachineBasicBlock *ThisMBB, |
| 317 | MachineBasicBlock *LoopMBB, |
| 318 | MachineBasicBlock *DoneMBB, |
| 319 | AtomicRMWInst::BinOp BinOp, int Width, |
| 320 | const RISCVSubtarget *STI) { |
| 321 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 322 | Register ScratchReg = MI.getOperand(i: 1).getReg(); |
| 323 | Register AddrReg = MI.getOperand(i: 2).getReg(); |
| 324 | Register IncrReg = MI.getOperand(i: 3).getReg(); |
| 325 | AtomicOrdering Ordering = |
| 326 | static_cast<AtomicOrdering>(MI.getOperand(i: 4).getImm()); |
| 327 | |
| 328 | // .loop: |
| 329 | // lr.[w|d] dest, (addr) |
| 330 | // binop scratch, dest, val |
| 331 | // sc.[w|d] scratch, scratch, (addr) |
| 332 | // bnez scratch, loop |
| 333 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: getLRForRMW(Ordering, Width, Subtarget: STI)), DestReg) |
| 334 | .addReg(RegNo: AddrReg); |
| 335 | switch (BinOp) { |
| 336 | default: |
| 337 | llvm_unreachable("Unexpected AtomicRMW BinOp" ); |
| 338 | case AtomicRMWInst::Xchg: |
| 339 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: ScratchReg) |
| 340 | .addReg(RegNo: IncrReg) |
| 341 | .addImm(Val: 0); |
| 342 | break; |
| 343 | case AtomicRMWInst::Add: |
| 344 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADD), DestReg: ScratchReg) |
| 345 | .addReg(RegNo: DestReg) |
| 346 | .addReg(RegNo: IncrReg); |
| 347 | break; |
| 348 | case AtomicRMWInst::Sub: |
| 349 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: ScratchReg) |
| 350 | .addReg(RegNo: DestReg) |
| 351 | .addReg(RegNo: IncrReg); |
| 352 | break; |
| 353 | case AtomicRMWInst::And: |
| 354 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::AND), DestReg: ScratchReg) |
| 355 | .addReg(RegNo: DestReg) |
| 356 | .addReg(RegNo: IncrReg); |
| 357 | break; |
| 358 | case AtomicRMWInst::Or: |
| 359 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::OR), DestReg: ScratchReg) |
| 360 | .addReg(RegNo: DestReg) |
| 361 | .addReg(RegNo: IncrReg); |
| 362 | break; |
| 363 | case AtomicRMWInst::Xor: |
| 364 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::XOR), DestReg: ScratchReg) |
| 365 | .addReg(RegNo: DestReg) |
| 366 | .addReg(RegNo: IncrReg); |
| 367 | break; |
| 368 | case AtomicRMWInst::Nand: |
| 369 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::AND), DestReg: ScratchReg) |
| 370 | .addReg(RegNo: DestReg) |
| 371 | .addReg(RegNo: IncrReg); |
| 372 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::XORI), DestReg: ScratchReg) |
| 373 | .addReg(RegNo: ScratchReg) |
| 374 | .addImm(Val: -1); |
| 375 | break; |
| 376 | case AtomicRMWInst::Max: |
| 377 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::MAX), DestReg: ScratchReg) |
| 378 | .addReg(RegNo: DestReg) |
| 379 | .addReg(RegNo: IncrReg); |
| 380 | break; |
| 381 | case AtomicRMWInst::Min: |
| 382 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::MIN), DestReg: ScratchReg) |
| 383 | .addReg(RegNo: DestReg) |
| 384 | .addReg(RegNo: IncrReg); |
| 385 | break; |
| 386 | case AtomicRMWInst::UMax: |
| 387 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::MAXU), DestReg: ScratchReg) |
| 388 | .addReg(RegNo: DestReg) |
| 389 | .addReg(RegNo: IncrReg); |
| 390 | break; |
| 391 | case AtomicRMWInst::UMin: |
| 392 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::MINU), DestReg: ScratchReg) |
| 393 | .addReg(RegNo: DestReg) |
| 394 | .addReg(RegNo: IncrReg); |
| 395 | break; |
| 396 | } |
| 397 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: getSCForRMW(Ordering, Width, Subtarget: STI)), DestReg: ScratchReg) |
| 398 | .addReg(RegNo: ScratchReg) |
| 399 | .addReg(RegNo: AddrReg); |
| 400 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 401 | .addReg(RegNo: ScratchReg) |
| 402 | .addReg(RegNo: RISCV::X0) |
| 403 | .addMBB(MBB: LoopMBB); |
| 404 | } |
| 405 | |
| 406 | static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL, |
| 407 | MachineBasicBlock *MBB, Register DestReg, |
| 408 | Register OldValReg, Register NewValReg, |
| 409 | Register MaskReg, Register ScratchReg) { |
| 410 | assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique" ); |
| 411 | assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique" ); |
| 412 | assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique" ); |
| 413 | |
| 414 | // We select bits from newval and oldval using: |
| 415 | // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge |
| 416 | // r = oldval ^ ((oldval ^ newval) & masktargetdata); |
| 417 | BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::XOR), DestReg: ScratchReg) |
| 418 | .addReg(RegNo: OldValReg) |
| 419 | .addReg(RegNo: NewValReg); |
| 420 | BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::AND), DestReg: ScratchReg) |
| 421 | .addReg(RegNo: ScratchReg) |
| 422 | .addReg(RegNo: MaskReg); |
| 423 | BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::XOR), DestReg) |
| 424 | .addReg(RegNo: OldValReg) |
| 425 | .addReg(RegNo: ScratchReg); |
| 426 | } |
| 427 | |
| 428 | static void doMaskedAtomicBinOpExpansion(const RISCVInstrInfo *TII, |
| 429 | MachineInstr &MI, DebugLoc DL, |
| 430 | MachineBasicBlock *ThisMBB, |
| 431 | MachineBasicBlock *LoopMBB, |
| 432 | MachineBasicBlock *DoneMBB, |
| 433 | AtomicRMWInst::BinOp BinOp, int Width, |
| 434 | const RISCVSubtarget *STI) { |
| 435 | assert(Width == 32 && "Should never need to expand masked 64-bit operations" ); |
| 436 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 437 | Register ScratchReg = MI.getOperand(i: 1).getReg(); |
| 438 | Register AddrReg = MI.getOperand(i: 2).getReg(); |
| 439 | Register IncrReg = MI.getOperand(i: 3).getReg(); |
| 440 | Register MaskReg = MI.getOperand(i: 4).getReg(); |
| 441 | AtomicOrdering Ordering = |
| 442 | static_cast<AtomicOrdering>(MI.getOperand(i: 5).getImm()); |
| 443 | |
| 444 | // .loop: |
| 445 | // lr.w destreg, (alignedaddr) |
| 446 | // binop scratch, destreg, incr |
| 447 | // xor scratch, destreg, scratch |
| 448 | // and scratch, scratch, masktargetdata |
| 449 | // xor scratch, destreg, scratch |
| 450 | // sc.w scratch, scratch, (alignedaddr) |
| 451 | // bnez scratch, loop |
| 452 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: getLRForRMW32(Ordering, Subtarget: STI)), DestReg) |
| 453 | .addReg(RegNo: AddrReg); |
| 454 | switch (BinOp) { |
| 455 | default: |
| 456 | llvm_unreachable("Unexpected AtomicRMW BinOp" ); |
| 457 | case AtomicRMWInst::Xchg: |
| 458 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: ScratchReg) |
| 459 | .addReg(RegNo: IncrReg) |
| 460 | .addImm(Val: 0); |
| 461 | break; |
| 462 | case AtomicRMWInst::Add: |
| 463 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADD), DestReg: ScratchReg) |
| 464 | .addReg(RegNo: DestReg) |
| 465 | .addReg(RegNo: IncrReg); |
| 466 | break; |
| 467 | case AtomicRMWInst::Sub: |
| 468 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: ScratchReg) |
| 469 | .addReg(RegNo: DestReg) |
| 470 | .addReg(RegNo: IncrReg); |
| 471 | break; |
| 472 | case AtomicRMWInst::Nand: |
| 473 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::AND), DestReg: ScratchReg) |
| 474 | .addReg(RegNo: DestReg) |
| 475 | .addReg(RegNo: IncrReg); |
| 476 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::XORI), DestReg: ScratchReg) |
| 477 | .addReg(RegNo: ScratchReg) |
| 478 | .addImm(Val: -1); |
| 479 | break; |
| 480 | } |
| 481 | |
| 482 | insertMaskedMerge(TII, DL, MBB: LoopMBB, DestReg: ScratchReg, OldValReg: DestReg, NewValReg: ScratchReg, MaskReg, |
| 483 | ScratchReg); |
| 484 | |
| 485 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: getSCForRMW32(Ordering, Subtarget: STI)), DestReg: ScratchReg) |
| 486 | .addReg(RegNo: ScratchReg) |
| 487 | .addReg(RegNo: AddrReg); |
| 488 | BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 489 | .addReg(RegNo: ScratchReg) |
| 490 | .addReg(RegNo: RISCV::X0) |
| 491 | .addMBB(MBB: LoopMBB); |
| 492 | } |
| 493 | |
| 494 | bool RISCVExpandAtomicPseudo::expandAtomicBinOp( |
| 495 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, |
| 496 | AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width, |
| 497 | MachineBasicBlock::iterator &NextMBBI) { |
| 498 | MachineInstr &MI = *MBBI; |
| 499 | DebugLoc DL = MI.getDebugLoc(); |
| 500 | |
| 501 | MachineFunction *MF = MBB.getParent(); |
| 502 | auto LoopMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 503 | auto DoneMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 504 | |
| 505 | // Insert new MBBs. |
| 506 | MF->insert(MBBI: ++MBB.getIterator(), MBB: LoopMBB); |
| 507 | MF->insert(MBBI: ++LoopMBB->getIterator(), MBB: DoneMBB); |
| 508 | |
| 509 | // Set up successors and transfer remaining instructions to DoneMBB. |
| 510 | LoopMBB->addSuccessor(Succ: LoopMBB); |
| 511 | LoopMBB->addSuccessor(Succ: DoneMBB); |
| 512 | DoneMBB->splice(Where: DoneMBB->end(), Other: &MBB, From: MI, To: MBB.end()); |
| 513 | DoneMBB->transferSuccessors(FromMBB: &MBB); |
| 514 | MBB.addSuccessor(Succ: LoopMBB); |
| 515 | |
| 516 | if (!IsMasked) |
| 517 | doAtomicBinOpExpansion(TII, MI, DL, ThisMBB: &MBB, LoopMBB, DoneMBB, BinOp, Width, |
| 518 | STI); |
| 519 | else |
| 520 | doMaskedAtomicBinOpExpansion(TII, MI, DL, ThisMBB: &MBB, LoopMBB, DoneMBB, BinOp, |
| 521 | Width, STI); |
| 522 | |
| 523 | NextMBBI = MBB.end(); |
| 524 | MI.eraseFromParent(); |
| 525 | |
| 526 | LivePhysRegs LiveRegs; |
| 527 | computeAndAddLiveIns(LiveRegs, MBB&: *LoopMBB); |
| 528 | computeAndAddLiveIns(LiveRegs, MBB&: *DoneMBB); |
| 529 | |
| 530 | return true; |
| 531 | } |
| 532 | |
| 533 | static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL, |
| 534 | MachineBasicBlock *MBB, Register ValReg, |
| 535 | Register ShamtReg) { |
| 536 | BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::SLL), DestReg: ValReg) |
| 537 | .addReg(RegNo: ValReg) |
| 538 | .addReg(RegNo: ShamtReg); |
| 539 | BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::SRA), DestReg: ValReg) |
| 540 | .addReg(RegNo: ValReg) |
| 541 | .addReg(RegNo: ShamtReg); |
| 542 | } |
| 543 | |
| 544 | static void doAtomicMinMaxOpExpansion( |
| 545 | const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL, |
| 546 | MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopHeadMBB, |
| 547 | MachineBasicBlock *LoopIfBodyMBB, MachineBasicBlock *LoopTailMBB, |
| 548 | MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width, |
| 549 | const RISCVSubtarget *STI) { |
| 550 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 551 | Register ScratchReg = MI.getOperand(i: 1).getReg(); |
| 552 | Register AddrReg = MI.getOperand(i: 2).getReg(); |
| 553 | Register IncrReg = MI.getOperand(i: 3).getReg(); |
| 554 | AtomicOrdering Ordering = |
| 555 | static_cast<AtomicOrdering>(MI.getOperand(i: 4).getImm()); |
| 556 | |
| 557 | // .loophead: |
| 558 | // lr.[w|d] dest, (addr) |
| 559 | // mv scratch, dest |
| 560 | // ifnochangeneeded scratch, incr, .looptail |
| 561 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: getLRForRMW(Ordering, Width, Subtarget: STI)), DestReg) |
| 562 | .addReg(RegNo: AddrReg); |
| 563 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: ScratchReg) |
| 564 | .addReg(RegNo: DestReg) |
| 565 | .addImm(Val: 0); |
| 566 | switch (BinOp) { |
| 567 | default: |
| 568 | llvm_unreachable("Unexpected AtomicRMW BinOp" ); |
| 569 | case AtomicRMWInst::Max: { |
| 570 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGE)) |
| 571 | .addReg(RegNo: ScratchReg) |
| 572 | .addReg(RegNo: IncrReg) |
| 573 | .addMBB(MBB: LoopTailMBB); |
| 574 | break; |
| 575 | } |
| 576 | case AtomicRMWInst::Min: { |
| 577 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGE)) |
| 578 | .addReg(RegNo: IncrReg) |
| 579 | .addReg(RegNo: ScratchReg) |
| 580 | .addMBB(MBB: LoopTailMBB); |
| 581 | break; |
| 582 | } |
| 583 | case AtomicRMWInst::UMax: |
| 584 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGEU)) |
| 585 | .addReg(RegNo: ScratchReg) |
| 586 | .addReg(RegNo: IncrReg) |
| 587 | .addMBB(MBB: LoopTailMBB); |
| 588 | break; |
| 589 | case AtomicRMWInst::UMin: |
| 590 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGEU)) |
| 591 | .addReg(RegNo: IncrReg) |
| 592 | .addReg(RegNo: ScratchReg) |
| 593 | .addMBB(MBB: LoopTailMBB); |
| 594 | break; |
| 595 | } |
| 596 | |
| 597 | // .loopifbody: |
| 598 | // mv scratch, incr |
| 599 | BuildMI(BB: LoopIfBodyMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: ScratchReg) |
| 600 | .addReg(RegNo: IncrReg) |
| 601 | .addImm(Val: 0); |
| 602 | |
| 603 | // .looptail: |
| 604 | // sc.[w|d] scratch, scratch, (addr) |
| 605 | // bnez scratch, loop |
| 606 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: getSCForRMW(Ordering, Width, Subtarget: STI)), |
| 607 | DestReg: ScratchReg) |
| 608 | .addReg(RegNo: ScratchReg) |
| 609 | .addReg(RegNo: AddrReg); |
| 610 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 611 | .addReg(RegNo: ScratchReg) |
| 612 | .addReg(RegNo: RISCV::X0) |
| 613 | .addMBB(MBB: LoopHeadMBB); |
| 614 | } |
| 615 | |
| 616 | static void doMaskedAtomicMinMaxOpExpansion( |
| 617 | const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL, |
| 618 | MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopHeadMBB, |
| 619 | MachineBasicBlock *LoopIfBodyMBB, MachineBasicBlock *LoopTailMBB, |
| 620 | MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width, |
| 621 | const RISCVSubtarget *STI) { |
| 622 | assert(Width == 32 && "Should never need to expand masked 64-bit operations" ); |
| 623 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 624 | Register Scratch1Reg = MI.getOperand(i: 1).getReg(); |
| 625 | Register Scratch2Reg = MI.getOperand(i: 2).getReg(); |
| 626 | Register AddrReg = MI.getOperand(i: 3).getReg(); |
| 627 | Register IncrReg = MI.getOperand(i: 4).getReg(); |
| 628 | Register MaskReg = MI.getOperand(i: 5).getReg(); |
| 629 | bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max; |
| 630 | AtomicOrdering Ordering = |
| 631 | static_cast<AtomicOrdering>(MI.getOperand(i: IsSigned ? 7 : 6).getImm()); |
| 632 | |
| 633 | // |
| 634 | // .loophead: |
| 635 | // lr.w destreg, (alignedaddr) |
| 636 | // and scratch2, destreg, mask |
| 637 | // mv scratch1, destreg |
| 638 | // [sext scratch2 if signed min/max] |
| 639 | // ifnochangeneeded scratch2, incr, .looptail |
| 640 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: getLRForRMW32(Ordering, Subtarget: STI)), DestReg) |
| 641 | .addReg(RegNo: AddrReg); |
| 642 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::AND), DestReg: Scratch2Reg) |
| 643 | .addReg(RegNo: DestReg) |
| 644 | .addReg(RegNo: MaskReg); |
| 645 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: Scratch1Reg) |
| 646 | .addReg(RegNo: DestReg) |
| 647 | .addImm(Val: 0); |
| 648 | |
| 649 | switch (BinOp) { |
| 650 | default: |
| 651 | llvm_unreachable("Unexpected AtomicRMW BinOp" ); |
| 652 | case AtomicRMWInst::Max: { |
| 653 | insertSext(TII, DL, MBB: LoopHeadMBB, ValReg: Scratch2Reg, ShamtReg: MI.getOperand(i: 6).getReg()); |
| 654 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGE)) |
| 655 | .addReg(RegNo: Scratch2Reg) |
| 656 | .addReg(RegNo: IncrReg) |
| 657 | .addMBB(MBB: LoopTailMBB); |
| 658 | break; |
| 659 | } |
| 660 | case AtomicRMWInst::Min: { |
| 661 | insertSext(TII, DL, MBB: LoopHeadMBB, ValReg: Scratch2Reg, ShamtReg: MI.getOperand(i: 6).getReg()); |
| 662 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGE)) |
| 663 | .addReg(RegNo: IncrReg) |
| 664 | .addReg(RegNo: Scratch2Reg) |
| 665 | .addMBB(MBB: LoopTailMBB); |
| 666 | break; |
| 667 | } |
| 668 | case AtomicRMWInst::UMax: |
| 669 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGEU)) |
| 670 | .addReg(RegNo: Scratch2Reg) |
| 671 | .addReg(RegNo: IncrReg) |
| 672 | .addMBB(MBB: LoopTailMBB); |
| 673 | break; |
| 674 | case AtomicRMWInst::UMin: |
| 675 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BGEU)) |
| 676 | .addReg(RegNo: IncrReg) |
| 677 | .addReg(RegNo: Scratch2Reg) |
| 678 | .addMBB(MBB: LoopTailMBB); |
| 679 | break; |
| 680 | } |
| 681 | |
| 682 | // .loopifbody: |
| 683 | // xor scratch1, destreg, incr |
| 684 | // and scratch1, scratch1, mask |
| 685 | // xor scratch1, destreg, scratch1 |
| 686 | insertMaskedMerge(TII, DL, MBB: LoopIfBodyMBB, DestReg: Scratch1Reg, OldValReg: DestReg, NewValReg: IncrReg, |
| 687 | MaskReg, ScratchReg: Scratch1Reg); |
| 688 | |
| 689 | // .looptail: |
| 690 | // sc.w scratch1, scratch1, (addr) |
| 691 | // bnez scratch1, loop |
| 692 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: getSCForRMW32(Ordering, Subtarget: STI)), DestReg: Scratch1Reg) |
| 693 | .addReg(RegNo: Scratch1Reg) |
| 694 | .addReg(RegNo: AddrReg); |
| 695 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 696 | .addReg(RegNo: Scratch1Reg) |
| 697 | .addReg(RegNo: RISCV::X0) |
| 698 | .addMBB(MBB: LoopHeadMBB); |
| 699 | } |
| 700 | |
| 701 | bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp( |
| 702 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, |
| 703 | AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width, |
| 704 | MachineBasicBlock::iterator &NextMBBI) { |
| 705 | // Using MIN(U)/MAX(U) is preferrable if permitted |
| 706 | if (STI->hasPermissiveZalrsc() && STI->hasStdExtZbb() && !IsMasked) |
| 707 | return expandAtomicBinOp(MBB, MBBI, BinOp, IsMasked, Width, NextMBBI); |
| 708 | |
| 709 | MachineInstr &MI = *MBBI; |
| 710 | DebugLoc DL = MI.getDebugLoc(); |
| 711 | MachineFunction *MF = MBB.getParent(); |
| 712 | auto LoopHeadMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 713 | auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 714 | auto LoopTailMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 715 | auto DoneMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 716 | |
| 717 | // Insert new MBBs. |
| 718 | MF->insert(MBBI: ++MBB.getIterator(), MBB: LoopHeadMBB); |
| 719 | MF->insert(MBBI: ++LoopHeadMBB->getIterator(), MBB: LoopIfBodyMBB); |
| 720 | MF->insert(MBBI: ++LoopIfBodyMBB->getIterator(), MBB: LoopTailMBB); |
| 721 | MF->insert(MBBI: ++LoopTailMBB->getIterator(), MBB: DoneMBB); |
| 722 | |
| 723 | // Set up successors and transfer remaining instructions to DoneMBB. |
| 724 | LoopHeadMBB->addSuccessor(Succ: LoopIfBodyMBB); |
| 725 | LoopHeadMBB->addSuccessor(Succ: LoopTailMBB); |
| 726 | LoopIfBodyMBB->addSuccessor(Succ: LoopTailMBB); |
| 727 | LoopTailMBB->addSuccessor(Succ: LoopHeadMBB); |
| 728 | LoopTailMBB->addSuccessor(Succ: DoneMBB); |
| 729 | DoneMBB->splice(Where: DoneMBB->end(), Other: &MBB, From: MI, To: MBB.end()); |
| 730 | DoneMBB->transferSuccessors(FromMBB: &MBB); |
| 731 | MBB.addSuccessor(Succ: LoopHeadMBB); |
| 732 | |
| 733 | if (!IsMasked) |
| 734 | doAtomicMinMaxOpExpansion(TII, MI, DL, ThisMBB: &MBB, LoopHeadMBB, LoopIfBodyMBB, |
| 735 | LoopTailMBB, DoneMBB, BinOp, Width, STI); |
| 736 | else |
| 737 | doMaskedAtomicMinMaxOpExpansion(TII, MI, DL, ThisMBB: &MBB, LoopHeadMBB, |
| 738 | LoopIfBodyMBB, LoopTailMBB, DoneMBB, BinOp, |
| 739 | Width, STI); |
| 740 | |
| 741 | NextMBBI = MBB.end(); |
| 742 | MI.eraseFromParent(); |
| 743 | |
| 744 | LivePhysRegs LiveRegs; |
| 745 | computeAndAddLiveIns(LiveRegs, MBB&: *LoopHeadMBB); |
| 746 | computeAndAddLiveIns(LiveRegs, MBB&: *LoopIfBodyMBB); |
| 747 | computeAndAddLiveIns(LiveRegs, MBB&: *LoopTailMBB); |
| 748 | computeAndAddLiveIns(LiveRegs, MBB&: *DoneMBB); |
| 749 | |
| 750 | return true; |
| 751 | } |
| 752 | |
| 753 | // If a BNE on the cmpxchg comparison result immediately follows the cmpxchg |
| 754 | // operation, it can be folded into the cmpxchg expansion by |
| 755 | // modifying the branch within 'LoopHead' (which performs the same |
| 756 | // comparison). This is a valid transformation because after altering the |
| 757 | // LoopHead's BNE destination, the BNE following the cmpxchg becomes |
| 758 | // redundant and and be deleted. In the case of a masked cmpxchg, an |
| 759 | // appropriate AND and BNE must be matched. |
| 760 | // |
| 761 | // On success, returns true and deletes the matching BNE or AND+BNE, sets the |
| 762 | // LoopHeadBNETarget argument to the target that should be used within the |
| 763 | // loop head, and removes that block as a successor to MBB. |
| 764 | bool tryToFoldBNEOnCmpXchgResult(MachineBasicBlock &MBB, |
| 765 | MachineBasicBlock::iterator MBBI, |
| 766 | Register DestReg, Register CmpValReg, |
| 767 | Register MaskReg, |
| 768 | MachineBasicBlock *&LoopHeadBNETarget) { |
| 769 | SmallVector<MachineInstr *> ToErase; |
| 770 | auto E = MBB.end(); |
| 771 | if (MBBI == E) |
| 772 | return false; |
| 773 | MBBI = skipDebugInstructionsForward(It: MBBI, End: E); |
| 774 | |
| 775 | // If we have a masked cmpxchg, match AND dst, DestReg, MaskReg. |
| 776 | if (MaskReg.isValid()) { |
| 777 | if (MBBI == E || MBBI->getOpcode() != RISCV::AND) |
| 778 | return false; |
| 779 | Register ANDOp1 = MBBI->getOperand(i: 1).getReg(); |
| 780 | Register ANDOp2 = MBBI->getOperand(i: 2).getReg(); |
| 781 | if (!(ANDOp1 == DestReg && ANDOp2 == MaskReg) && |
| 782 | !(ANDOp1 == MaskReg && ANDOp2 == DestReg)) |
| 783 | return false; |
| 784 | // We now expect the BNE to use the result of the AND as an operand. |
| 785 | DestReg = MBBI->getOperand(i: 0).getReg(); |
| 786 | ToErase.push_back(Elt: &*MBBI); |
| 787 | MBBI = skipDebugInstructionsForward(It: std::next(x: MBBI), End: E); |
| 788 | } |
| 789 | |
| 790 | // Match BNE DestReg, MaskReg. |
| 791 | if (MBBI == E || MBBI->getOpcode() != RISCV::BNE) |
| 792 | return false; |
| 793 | Register BNEOp0 = MBBI->getOperand(i: 0).getReg(); |
| 794 | Register BNEOp1 = MBBI->getOperand(i: 1).getReg(); |
| 795 | if (!(BNEOp0 == DestReg && BNEOp1 == CmpValReg) && |
| 796 | !(BNEOp0 == CmpValReg && BNEOp1 == DestReg)) |
| 797 | return false; |
| 798 | |
| 799 | // Make sure the branch is the only user of the AND. |
| 800 | if (MaskReg.isValid()) { |
| 801 | if (BNEOp0 == DestReg && !MBBI->getOperand(i: 0).isKill()) |
| 802 | return false; |
| 803 | if (BNEOp1 == DestReg && !MBBI->getOperand(i: 1).isKill()) |
| 804 | return false; |
| 805 | } |
| 806 | |
| 807 | ToErase.push_back(Elt: &*MBBI); |
| 808 | LoopHeadBNETarget = MBBI->getOperand(i: 2).getMBB(); |
| 809 | MBBI = skipDebugInstructionsForward(It: std::next(x: MBBI), End: E); |
| 810 | if (MBBI != E) |
| 811 | return false; |
| 812 | |
| 813 | MBB.removeSuccessor(Succ: LoopHeadBNETarget); |
| 814 | for (auto *MI : ToErase) |
| 815 | MI->eraseFromParent(); |
| 816 | return true; |
| 817 | } |
| 818 | |
| 819 | bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg( |
| 820 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked, |
| 821 | int Width, MachineBasicBlock::iterator &NextMBBI) { |
| 822 | MachineInstr &MI = *MBBI; |
| 823 | DebugLoc DL = MI.getDebugLoc(); |
| 824 | MachineFunction *MF = MBB.getParent(); |
| 825 | auto LoopHeadMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 826 | auto LoopTailMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 827 | auto DoneMBB = MF->CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 828 | |
| 829 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 830 | Register ScratchReg = MI.getOperand(i: 1).getReg(); |
| 831 | Register AddrReg = MI.getOperand(i: 2).getReg(); |
| 832 | Register CmpValReg = MI.getOperand(i: 3).getReg(); |
| 833 | Register NewValReg = MI.getOperand(i: 4).getReg(); |
| 834 | Register MaskReg = IsMasked ? MI.getOperand(i: 5).getReg() : Register(); |
| 835 | |
| 836 | MachineBasicBlock *LoopHeadBNETarget = DoneMBB; |
| 837 | tryToFoldBNEOnCmpXchgResult(MBB, MBBI: std::next(x: MBBI), DestReg, CmpValReg, MaskReg, |
| 838 | LoopHeadBNETarget); |
| 839 | |
| 840 | // Insert new MBBs. |
| 841 | MF->insert(MBBI: ++MBB.getIterator(), MBB: LoopHeadMBB); |
| 842 | MF->insert(MBBI: ++LoopHeadMBB->getIterator(), MBB: LoopTailMBB); |
| 843 | MF->insert(MBBI: ++LoopTailMBB->getIterator(), MBB: DoneMBB); |
| 844 | |
| 845 | // Set up successors and transfer remaining instructions to DoneMBB. |
| 846 | LoopHeadMBB->addSuccessor(Succ: LoopTailMBB); |
| 847 | LoopHeadMBB->addSuccessor(Succ: LoopHeadBNETarget); |
| 848 | LoopTailMBB->addSuccessor(Succ: DoneMBB); |
| 849 | LoopTailMBB->addSuccessor(Succ: LoopHeadMBB); |
| 850 | DoneMBB->splice(Where: DoneMBB->end(), Other: &MBB, From: MI, To: MBB.end()); |
| 851 | DoneMBB->transferSuccessors(FromMBB: &MBB); |
| 852 | MBB.addSuccessor(Succ: LoopHeadMBB); |
| 853 | |
| 854 | AtomicOrdering Ordering = |
| 855 | static_cast<AtomicOrdering>(MI.getOperand(i: IsMasked ? 6 : 5).getImm()); |
| 856 | |
| 857 | if (!IsMasked) { |
| 858 | // .loophead: |
| 859 | // lr.[w|d] dest, (addr) |
| 860 | // bne dest, cmpval, done |
| 861 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: getLRForRMW(Ordering, Width, Subtarget: STI)), |
| 862 | DestReg) |
| 863 | .addReg(RegNo: AddrReg); |
| 864 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 865 | .addReg(RegNo: DestReg) |
| 866 | .addReg(RegNo: CmpValReg) |
| 867 | .addMBB(MBB: LoopHeadBNETarget); |
| 868 | // .looptail: |
| 869 | // sc.[w|d] scratch, newval, (addr) |
| 870 | // bnez scratch, loophead |
| 871 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: getSCForRMW(Ordering, Width, Subtarget: STI)), |
| 872 | DestReg: ScratchReg) |
| 873 | .addReg(RegNo: NewValReg) |
| 874 | .addReg(RegNo: AddrReg); |
| 875 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 876 | .addReg(RegNo: ScratchReg) |
| 877 | .addReg(RegNo: RISCV::X0) |
| 878 | .addMBB(MBB: LoopHeadMBB); |
| 879 | } else { |
| 880 | // .loophead: |
| 881 | // lr.w dest, (addr) |
| 882 | // and scratch, dest, mask |
| 883 | // bne scratch, cmpval, done |
| 884 | Register MaskReg = MI.getOperand(i: 5).getReg(); |
| 885 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: getLRForRMW(Ordering, Width, Subtarget: STI)), |
| 886 | DestReg) |
| 887 | .addReg(RegNo: AddrReg); |
| 888 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::AND), DestReg: ScratchReg) |
| 889 | .addReg(RegNo: DestReg) |
| 890 | .addReg(RegNo: MaskReg); |
| 891 | BuildMI(BB: LoopHeadMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 892 | .addReg(RegNo: ScratchReg) |
| 893 | .addReg(RegNo: CmpValReg) |
| 894 | .addMBB(MBB: LoopHeadBNETarget); |
| 895 | |
| 896 | // .looptail: |
| 897 | // xor scratch, dest, newval |
| 898 | // and scratch, scratch, mask |
| 899 | // xor scratch, dest, scratch |
| 900 | // sc.w scratch, scratch, (adrr) |
| 901 | // bnez scratch, loophead |
| 902 | insertMaskedMerge(TII, DL, MBB: LoopTailMBB, DestReg: ScratchReg, OldValReg: DestReg, NewValReg, |
| 903 | MaskReg, ScratchReg); |
| 904 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: getSCForRMW(Ordering, Width, Subtarget: STI)), |
| 905 | DestReg: ScratchReg) |
| 906 | .addReg(RegNo: ScratchReg) |
| 907 | .addReg(RegNo: AddrReg); |
| 908 | BuildMI(BB: LoopTailMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE)) |
| 909 | .addReg(RegNo: ScratchReg) |
| 910 | .addReg(RegNo: RISCV::X0) |
| 911 | .addMBB(MBB: LoopHeadMBB); |
| 912 | } |
| 913 | |
| 914 | NextMBBI = MBB.end(); |
| 915 | MI.eraseFromParent(); |
| 916 | |
| 917 | LivePhysRegs LiveRegs; |
| 918 | computeAndAddLiveIns(LiveRegs, MBB&: *LoopHeadMBB); |
| 919 | computeAndAddLiveIns(LiveRegs, MBB&: *LoopTailMBB); |
| 920 | computeAndAddLiveIns(LiveRegs, MBB&: *DoneMBB); |
| 921 | |
| 922 | return true; |
| 923 | } |
| 924 | |
| 925 | } // end of anonymous namespace |
| 926 | |
| 927 | INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo" , |
| 928 | RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false) |
| 929 | |
| 930 | FunctionPass *llvm::createRISCVExpandAtomicPseudoPass() { |
| 931 | return new RISCVExpandAtomicPseudo(); |
| 932 | } |
| 933 | |