1//===-- X86LowerTileCopy.cpp - Expand Tile Copy Instructions---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the pass which lower AMX tile copy instructions. Since
10// there is no tile copy instruction, we need store tile register to stack
11// and load from stack to another tile register. We need extra GR to hold
12// the stride, and we need stack slot to hold the tile data register.
13// We would run this pass after copy propagation, so that we don't miss copy
14// optimization. And we would run this pass before prolog/epilog insertion,
15// so that we can allocate stack slot.
16//
17//===----------------------------------------------------------------------===//
18
19#include "X86.h"
20#include "X86InstrBuilder.h"
21#include "X86InstrInfo.h"
22#include "X86MachineFunctionInfo.h"
23#include "X86Subtarget.h"
24#include "llvm/CodeGen/LiveRegUnits.h"
25#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineFunctionPass.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/CodeGen/Passes.h"
33#include "llvm/IR/DebugLoc.h"
34
35using namespace llvm;
36
37#define DEBUG_TYPE "x86-lower-tile-copy"
38
39namespace {
40
41class X86LowerTileCopy : public MachineFunctionPass {
42public:
43 static char ID;
44
45 X86LowerTileCopy() : MachineFunctionPass(ID) {}
46
47 void getAnalysisUsage(AnalysisUsage &AU) const override;
48
49 bool runOnMachineFunction(MachineFunction &MF) override;
50
51 StringRef getPassName() const override { return "X86 Lower Tile Copy"; }
52};
53
54} // namespace
55
56char X86LowerTileCopy::ID = 0;
57
58INITIALIZE_PASS_BEGIN(X86LowerTileCopy, "lowertilecopy", "Tile Copy Lowering",
59 false, false)
60INITIALIZE_PASS_END(X86LowerTileCopy, "lowertilecopy", "Tile Copy Lowering",
61 false, false)
62
63void X86LowerTileCopy::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.setPreservesAll();
65 MachineFunctionPass::getAnalysisUsage(AU);
66}
67
68FunctionPass *llvm::createX86LowerTileCopyPass() {
69 return new X86LowerTileCopy();
70}
71
72bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
73 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
74 if (FuncInfo->getAMXProgModel() != AMXProgModelEnum::ManagedRA)
75 return false;
76
77 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
78 assert(ST.hasAMXTILE() && "Only supported on AMXTILE targets");
79
80 const X86InstrInfo *TII = ST.getInstrInfo();
81 const TargetRegisterInfo *TRI = ST.getRegisterInfo();
82 BitVector GR64Regs =
83 TRI->getAllocatableSet(MF, RC: TRI->getRegClass(i: X86::GR64RegClassID));
84 bool Changed = false;
85
86 for (MachineBasicBlock &MBB : MF) {
87 LiveRegUnits UsedRegs(*TRI);
88 UsedRegs.addLiveOuts(MBB);
89 for (MachineInstr &MI : llvm::make_early_inc_range(Range: reverse(C&: MBB))) {
90 UsedRegs.stepBackward(MI);
91 if (!MI.isCopy())
92 continue;
93 MachineOperand &DstMO = MI.getOperand(i: 0);
94 MachineOperand &SrcMO = MI.getOperand(i: 1);
95 Register SrcReg = SrcMO.getReg();
96 Register DstReg = DstMO.getReg();
97 if (!X86::TILERegClass.contains(Reg1: DstReg, Reg2: SrcReg))
98 continue;
99
100 // Allocate stack slot for tile register
101 unsigned Size = TRI->getSpillSize(RC: X86::TILERegClass);
102 Align Alignment = TRI->getSpillAlign(RC: X86::TILERegClass);
103 int TileSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
104
105 int StrideSS = 0;
106
107 // Pick a killed register to avoid a save/reload.
108 Register GR64Cand = X86::NoRegister;
109 for (auto RegT : GR64Regs.set_bits()) {
110 if (UsedRegs.available(Reg: RegT)) {
111 GR64Cand = RegT;
112 break;
113 }
114 }
115
116 const DebugLoc &DL = MI.getDebugLoc();
117 if (GR64Cand) {
118 // mov 64 %reg
119 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64ri), DestReg: GR64Cand).addImm(Val: 64);
120 } else {
121 // No available register? Save RAX and reload it after use.
122
123 // Allocate stack slot for stride register
124 Size = TRI->getSpillSize(RC: X86::GR64RegClass);
125 Alignment = TRI->getSpillAlign(RC: X86::GR64RegClass);
126 StrideSS = MF.getFrameInfo().CreateSpillStackObject(Size, Alignment);
127
128 // mov %reg (%sp)
129 addFrameReference(MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64mr)),
130 FI: StrideSS)
131 .addReg(RegNo: X86::RAX);
132 // mov 64 %reg
133 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64ri), DestReg: X86::RAX).addImm(Val: 64);
134 }
135 // tilestored %tmm, (%sp, %idx)
136#define GET_EGPR_IF_ENABLED(OPC) (ST.hasEGPR() ? OPC##_EVEX : OPC)
137 unsigned Opc = GET_EGPR_IF_ENABLED(X86::TILESTORED);
138 MachineInstr *NewMI =
139 addFrameReference(MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: Opc)), FI: TileSS)
140 .addReg(RegNo: SrcReg, flags: getKillRegState(B: SrcMO.isKill()));
141 MachineOperand *MO = &NewMI->getOperand(i: X86::AddrIndexReg);
142 MO->setReg(GR64Cand ? GR64Cand : X86::RAX);
143 // tileloadd (%sp, %idx), %tmm
144 Opc = GET_EGPR_IF_ENABLED(X86::TILELOADD);
145#undef GET_EGPR_IF_ENABLED
146 NewMI = addFrameReference(MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: Opc), DestReg: DstReg),
147 FI: TileSS);
148 MO = &NewMI->getOperand(i: 1 + X86::AddrIndexReg);
149 MO->setReg(GR64Cand ? GR64Cand : X86::RAX);
150 MO->setIsKill(true);
151 if (!GR64Cand) {
152 // restore %rax
153 // mov (%sp) %rax
154 addFrameReference(
155 MIB: BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: X86::MOV64rm), DestReg: X86::RAX), FI: StrideSS);
156 }
157 MI.eraseFromParent();
158 Changed = true;
159 }
160 }
161 return Changed;
162}
163