1//===-- X86LowerTileCopy.cpp - Expand Tile Copy Instructions---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the pass which lower AMX tile copy instructions. Since
10// there is no tile copy instruction, we need store tile register to stack
11// and load from stack to another tile register. We need extra GR to hold
12// the stride, and we need stack slot to hold the tile data register.
13// We would run this pass after copy propagation, so that we don't miss copy
14// optimization. And we would run this pass before prolog/epilog insertion,
15// so that we can allocate stack slot.
16//
17//===----------------------------------------------------------------------===//
18
19#include "X86.h"
20#include "X86InstrBuilder.h"
21#include "X86InstrInfo.h"
22#include "X86MachineFunctionInfo.h"
23#include "X86Subtarget.h"
24#include "llvm/CodeGen/LiveRegUnits.h"
25#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineFunctionPass.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/CodeGen/Passes.h"
33#include "llvm/IR/DebugLoc.h"
34#include "llvm/InitializePasses.h"
35#include "llvm/Support/Debug.h"
36
37using namespace llvm;
38
39#define DEBUG_TYPE "x86-lower-tile-copy"
40
41namespace {
42
43class X86LowerTileCopy : public MachineFunctionPass {
44public:
45 static char ID;
46
47 X86LowerTileCopy() : MachineFunctionPass(ID) {}
48
49 void getAnalysisUsage(AnalysisUsage &AU) const override;
50
51 bool runOnMachineFunction(MachineFunction &MF) override;
52
53 StringRef getPassName() const override { return "X86 Lower Tile Copy"; }
54};
55
56} // namespace
57
58char X86LowerTileCopy::ID = 0;
59
60INITIALIZE_PASS_BEGIN(X86LowerTileCopy, "lowertilecopy", "Tile Copy Lowering",
61 false, false)
62INITIALIZE_PASS_END(X86LowerTileCopy, "lowertilecopy", "Tile Copy Lowering",
63 false, false)
64
65void X86LowerTileCopy::getAnalysisUsage(AnalysisUsage &AU) const {
66 AU.setPreservesAll();
67 MachineFunctionPass::getAnalysisUsage(AU);
68}
69
70FunctionPass *llvm::createX86LowerTileCopyPass() {
71 return new X86LowerTileCopy();
72}
73
74bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
75 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
76 if (FuncInfo->getAMXProgModel() != AMXProgModelEnum::ManagedRA)
77 return false;
78
79 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
80 const X86InstrInfo *TII = ST.getInstrInfo();
81 const TargetRegisterInfo *TRI = ST.getRegisterInfo();
82 BitVector GR64Regs =
83 TRI->getAllocatableSet(MF, RC: TRI->getRegClass(i: X86::GR64RegClassID));
84 BitVector TILERegs =
85 TRI->getAllocatableSet(MF, RC: TRI->getRegClass(i: X86::TILERegClassID));
86 bool Changed = false;
87
88 for (MachineBasicBlock &MBB : MF) {
89 LiveRegUnits UsedRegs(*TRI);
90 UsedRegs.addLiveOuts(MBB);
91 for (MachineInstr &MI : llvm::make_early_inc_range(Range: reverse(C&: MBB))) {
92 UsedRegs.stepBackward(MI);
93 if (!MI.isCopy())
94 continue;
95 MachineOperand &DstMO = MI.getOperand(i: 0);
96 MachineOperand &SrcMO = MI.getOperand(i: 1);
97 Register SrcReg = SrcMO.getReg();
98 Register DstReg = DstMO.getReg();
99 if (!X86::TILERegClass.contains(Reg1: DstReg, Reg2: SrcReg))
100 continue;
101
102 // Allocate stack slot for tile register
103 unsigned Size = TRI->getSpillSize(RC: X86::TILERegClass);
104 Align Alignment = TRI->getSpillAlign(RC: X86::TILERegClass);
105 int TileSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
106
107 int StrideSS = 0;
108
109 // Pick a killed register to avoid a save/reload.
110 Register GR64Cand = X86::NoRegister;
111 for (auto RegT : GR64Regs.set_bits()) {
112 if (UsedRegs.available(Reg: RegT)) {
113 GR64Cand = RegT;
114 break;
115 }
116 }
117
118 const DebugLoc &DL = MI.getDebugLoc();
119 if (GR64Cand) {
120 // mov 64 %reg
121 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64ri), DestReg: GR64Cand).addImm(Val: 64);
122 } else {
123 // No available register? Save RAX and reload it after use.
124
125 // Allocate stack slot for stride register
126 Size = TRI->getSpillSize(RC: X86::GR64RegClass);
127 Alignment = TRI->getSpillAlign(RC: X86::GR64RegClass);
128 StrideSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
129
130 // mov %reg (%sp)
131 addFrameReference(MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64mr)),
132 FI: StrideSS)
133 .addReg(RegNo: X86::RAX);
134 // mov 64 %reg
135 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64ri), DestReg: X86::RAX).addImm(Val: 64);
136 }
137 // tilestored %tmm, (%sp, %idx)
138#define GET_EGPR_IF_ENABLED(OPC) (ST.hasEGPR() ? OPC##_EVEX : OPC)
139 unsigned Opc = GET_EGPR_IF_ENABLED(X86::TILESTORED);
140 MachineInstr *NewMI =
141 addFrameReference(MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: Opc)), FI: TileSS)
142 .addReg(RegNo: SrcReg, flags: getKillRegState(B: SrcMO.isKill()));
143 MachineOperand &MO = NewMI->getOperand(i: 2);
144 MO.setReg(GR64Cand ? GR64Cand : X86::RAX);
145 MO.setIsKill(true);
146 // tileloadd (%sp, %idx), %tmm
147 Opc = GET_EGPR_IF_ENABLED(X86::TILELOADD);
148#undef GET_EGPR_IF_ENABLED
149 NewMI = addFrameReference(MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: Opc), DestReg: DstReg),
150 FI: TileSS);
151 if (!GR64Cand) {
152 // restore %rax
153 // mov (%sp) %rax
154 addFrameReference(
155 MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64rm), DestReg: X86::RAX), FI: StrideSS);
156 }
157 MI.eraseFromParent();
158 Changed = true;
159 }
160 }
161 return Changed;
162}
163