1//===-- X86LowerAMXIntrinsics.cpp -X86 Scalarize AMX Intrinsics------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file Pass to transform amx intrinsics to scalar operations.
10/// This pass is always enabled and it skips when it is not -O0 and has no
11/// optnone attributes. With -O0 or optnone attribute, the def of shape to amx
12/// intrinsics is near the amx intrinsics code. We are not able to find a
13/// point which post-dominate all the shape and dominate all amx intrinsics.
14/// To decouple the dependency of the shape, we transform amx intrinsics
15/// to scalar operation, so that compiling doesn't fail. In long term, we
16/// should improve fast register allocation to allocate amx register.
17//===----------------------------------------------------------------------===//
18//
19#include "X86.h"
20#include "llvm/Analysis/DomTreeUpdater.h"
21#include "llvm/Analysis/LoopInfo.h"
22#include "llvm/Analysis/TargetTransformInfo.h"
23#include "llvm/CodeGen/Passes.h"
24#include "llvm/CodeGen/TargetPassConfig.h"
25#include "llvm/CodeGen/ValueTypes.h"
26#include "llvm/IR/Analysis.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/Instructions.h"
32#include "llvm/IR/IntrinsicInst.h"
33#include "llvm/IR/IntrinsicsX86.h"
34#include "llvm/IR/PassManager.h"
35#include "llvm/IR/PatternMatch.h"
36#include "llvm/InitializePasses.h"
37#include "llvm/Pass.h"
38#include "llvm/Support/CommandLine.h"
39#include "llvm/Target/TargetMachine.h"
40#include "llvm/Transforms/Utils/BasicBlockUtils.h"
41#include "llvm/Transforms/Utils/LoopUtils.h"
42
43using namespace llvm;
44using namespace PatternMatch;
45
46#define DEBUG_TYPE "x86-lower-amx-intrinsics"
47
48#ifndef NDEBUG
49static bool isV256I32Ty(Type *Ty) {
50 if (auto *FVT = dyn_cast<FixedVectorType>(Ty))
51 return FVT->getNumElements() == 256 &&
52 FVT->getElementType()->isIntegerTy(32);
53 return false;
54}
55#endif
56
57static cl::opt<bool>
58 X86ScalarizeAMX("enable-x86-scalar-amx", cl::init(Val: false), cl::Hidden,
59 cl::desc("X86: enable AMX scalarizition."));
60
61namespace {
62class X86LowerAMXIntrinsics {
63 Function &Func;
64
65public:
66 X86LowerAMXIntrinsics(Function &F, DomTreeUpdater &DomTU, LoopInfo *LoopI)
67 : Func(F), DTU(DomTU), LI(LoopI) {}
68 bool visit();
69
70private:
71 DomTreeUpdater &DTU;
72 LoopInfo *LI;
73 BasicBlock *createLoop(BasicBlock *Preheader, BasicBlock *Exit, Value *Bound,
74 Value *Step, StringRef Name, IRBuilderBase &B,
75 Loop *L);
76 template <bool IsTileLoad>
77 Value *createTileLoadStoreLoops(BasicBlock *Start, BasicBlock *End,
78 IRBuilderBase &B, Value *Row, Value *Col,
79 Value *Ptr, Value *Stride, Value *Tile);
80 template <Intrinsic::ID IntrID>
81 std::enable_if_t<IntrID == Intrinsic::x86_tdpbssd_internal ||
82 IntrID == Intrinsic::x86_tdpbsud_internal ||
83 IntrID == Intrinsic::x86_tdpbusd_internal ||
84 IntrID == Intrinsic::x86_tdpbuud_internal ||
85 IntrID == Intrinsic::x86_tdpbf16ps_internal,
86 Value *>
87 createTileDPLoops(BasicBlock *Start, BasicBlock *End, IRBuilderBase &B,
88 Value *Row, Value *Col, Value *K, Value *Acc, Value *LHS,
89 Value *RHS);
90 template <bool IsTileLoad>
91 bool lowerTileLoadStore(Instruction *TileLoadStore);
92 template <Intrinsic::ID IntrID>
93 std::enable_if_t<IntrID == Intrinsic::x86_tdpbssd_internal ||
94 IntrID == Intrinsic::x86_tdpbsud_internal ||
95 IntrID == Intrinsic::x86_tdpbusd_internal ||
96 IntrID == Intrinsic::x86_tdpbuud_internal ||
97 IntrID == Intrinsic::x86_tdpbf16ps_internal,
98 bool>
99 lowerTileDP(Instruction *TileDP);
100 bool lowerTileZero(Instruction *TileZero);
101};
102} // anonymous namespace
103
104BasicBlock *X86LowerAMXIntrinsics::createLoop(BasicBlock *Preheader,
105 BasicBlock *Exit, Value *Bound,
106 Value *Step, StringRef Name,
107 IRBuilderBase &B, Loop *L) {
108 LLVMContext &Ctx = Preheader->getContext();
109 BasicBlock *Header =
110 BasicBlock::Create(Context&: Ctx, Name: Name + ".header", Parent: Preheader->getParent(), InsertBefore: Exit);
111 BasicBlock *Body =
112 BasicBlock::Create(Context&: Ctx, Name: Name + ".body", Parent: Header->getParent(), InsertBefore: Exit);
113 BasicBlock *Latch =
114 BasicBlock::Create(Context&: Ctx, Name: Name + ".latch", Parent: Header->getParent(), InsertBefore: Exit);
115
116 Type *I16Ty = Type::getInt16Ty(C&: Ctx);
117 BranchInst::Create(IfTrue: Body, InsertBefore: Header);
118 BranchInst::Create(IfTrue: Latch, InsertBefore: Body);
119 PHINode *IV =
120 PHINode::Create(Ty: I16Ty, NumReservedValues: 2, NameStr: Name + ".iv", InsertBefore: Header->getTerminator()->getIterator());
121 IV->addIncoming(V: ConstantInt::get(Ty: I16Ty, V: 0), BB: Preheader);
122
123 B.SetInsertPoint(Latch);
124 Value *Inc = B.CreateAdd(LHS: IV, RHS: Step, Name: Name + ".step");
125 Value *Cond = B.CreateICmpNE(LHS: Inc, RHS: Bound, Name: Name + ".cond");
126 BranchInst::Create(IfTrue: Header, IfFalse: Exit, Cond, InsertBefore: Latch);
127 IV->addIncoming(V: Inc, BB: Latch);
128
129 BranchInst *PreheaderBr = cast<BranchInst>(Val: Preheader->getTerminator());
130 BasicBlock *Tmp = PreheaderBr->getSuccessor(i: 0);
131 PreheaderBr->setSuccessor(idx: 0, NewSucc: Header);
132 DTU.applyUpdatesPermissive(Updates: {
133 {DominatorTree::Delete, Preheader, Tmp},
134 {DominatorTree::Insert, Header, Body},
135 {DominatorTree::Insert, Body, Latch},
136 {DominatorTree::Insert, Latch, Header},
137 {DominatorTree::Insert, Latch, Exit},
138 {DominatorTree::Insert, Preheader, Header},
139 });
140 if (LI) {
141 L->addBasicBlockToLoop(NewBB: Header, LI&: *LI);
142 L->addBasicBlockToLoop(NewBB: Body, LI&: *LI);
143 L->addBasicBlockToLoop(NewBB: Latch, LI&: *LI);
144 }
145 return Body;
146}
147
148template <bool IsTileLoad>
149Value *X86LowerAMXIntrinsics::createTileLoadStoreLoops(
150 BasicBlock *Start, BasicBlock *End, IRBuilderBase &B, Value *Row,
151 Value *Col, Value *Ptr, Value *Stride, Value *Tile) {
152 std::string IntrinName = IsTileLoad ? "tileload" : "tilestore";
153 Loop *RowLoop = nullptr;
154 Loop *ColLoop = nullptr;
155 if (LI) {
156 RowLoop = LI->AllocateLoop();
157 ColLoop = LI->AllocateLoop();
158 RowLoop->addChildLoop(NewChild: ColLoop);
159 if (Loop *ParentL = LI->getLoopFor(BB: Start))
160 ParentL->addChildLoop(NewChild: RowLoop);
161 else
162 LI->addTopLevelLoop(New: RowLoop);
163 }
164
165 BasicBlock *RowBody = createLoop(Preheader: Start, Exit: End, Bound: Row, Step: B.getInt16(C: 1),
166 Name: IntrinName + ".scalarize.rows", B, L: RowLoop);
167 BasicBlock *RowLatch = RowBody->getSingleSuccessor();
168
169 BasicBlock *ColBody = createLoop(Preheader: RowBody, Exit: RowLatch, Bound: Col, Step: B.getInt16(C: 1),
170 Name: IntrinName + ".scalarize.cols", B, L: ColLoop);
171
172 BasicBlock *ColLoopLatch = ColBody->getSingleSuccessor();
173 BasicBlock *ColLoopHeader = ColBody->getSinglePredecessor();
174 BasicBlock *RowLoopHeader = RowBody->getSinglePredecessor();
175 Value *CurrentRow = &*RowLoopHeader->begin();
176 Value *CurrentCol = &*ColLoopHeader->begin();
177 Type *EltTy = B.getInt32Ty();
178 FixedVectorType *V256I32Ty = FixedVectorType::get(ElementType: EltTy, NumElts: 256);
179
180 // Common part for tileload and tilestore
181 // *.scalarize.cols.body:
182 // Calculate %idxmem and %idxvec
183 B.SetInsertPoint(ColBody->getTerminator());
184 Value *CurrentRowZExt = B.CreateZExt(V: CurrentRow, DestTy: Stride->getType());
185 Value *CurrentColZExt = B.CreateZExt(V: CurrentCol, DestTy: Stride->getType());
186 Value *Offset =
187 B.CreateAdd(LHS: B.CreateMul(LHS: CurrentRowZExt, RHS: Stride), RHS: CurrentColZExt);
188 Value *EltPtr = B.CreateGEP(Ty: EltTy, Ptr, IdxList: Offset);
189 Value *Idx = B.CreateAdd(LHS: B.CreateMul(LHS: CurrentRow, RHS: B.getInt16(C: 16)), RHS: CurrentCol);
190 if (IsTileLoad) {
191 // tileload.scalarize.rows.header:
192 // %vec.phi.row = phi <256 x i32> [ zeroinitializer, %entry ], [ %ResVec,
193 // %tileload.scalarize.rows.latch ]
194 B.SetInsertPoint(RowLoopHeader->getTerminator());
195 Value *VecZero = Constant::getNullValue(Ty: V256I32Ty);
196 PHINode *VecCPhiRowLoop = B.CreatePHI(Ty: V256I32Ty, NumReservedValues: 2, Name: "vec.phi.row");
197 VecCPhiRowLoop->addIncoming(V: VecZero, BB: Start);
198
199 // tileload.scalarize.cols.header:
200 // %vec.phi = phi <256 x i32> [ %vec.phi.row, %tileload.scalarize.rows.body
201 // ], [ %ResVec, %tileload.scalarize.cols.latch ]
202 B.SetInsertPoint(ColLoopHeader->getTerminator());
203 PHINode *VecPhi = B.CreatePHI(Ty: V256I32Ty, NumReservedValues: 2, Name: "vec.phi");
204 VecPhi->addIncoming(V: VecCPhiRowLoop, BB: RowBody);
205
206 // tileload.scalarize.cols.body:
207 // Calculate %idxmem and %idxvec
208 // %eltptr = getelementptr i32, i32* %base, i64 %idxmem
209 // %elt = load i32, i32* %ptr
210 // %ResVec = insertelement <256 x i32> %vec.phi, i32 %elt, i16 %idxvec
211 B.SetInsertPoint(ColBody->getTerminator());
212 Value *Elt = B.CreateLoad(Ty: EltTy, Ptr: EltPtr);
213 Value *ResVec = B.CreateInsertElement(Vec: VecPhi, NewElt: Elt, Idx);
214 VecPhi->addIncoming(V: ResVec, BB: ColLoopLatch);
215 VecCPhiRowLoop->addIncoming(V: ResVec, BB: RowLatch);
216
217 return ResVec;
218 } else {
219 auto *BitCast = cast<BitCastInst>(Val: Tile);
220 Value *Vec = BitCast->getOperand(i_nocapture: 0);
221 assert(isV256I32Ty(Vec->getType()) && "bitcast from non-v256i32 to x86amx");
222 // tilestore.scalarize.cols.body:
223 // %mul = mul i16 %row.iv, i16 16
224 // %idx = add i16 %mul, i16 %col.iv
225 // %vec = extractelement <16 x i32> %vec, i16 %idx
226 // store i32 %vec, i32* %ptr
227 B.SetInsertPoint(ColBody->getTerminator());
228 Value *Elt = B.CreateExtractElement(Vec, Idx);
229
230 B.CreateStore(Val: Elt, Ptr: EltPtr);
231 return nullptr;
232 }
233}
234
235template <Intrinsic::ID IntrID>
236std::enable_if_t<IntrID == Intrinsic::x86_tdpbssd_internal ||
237 IntrID == Intrinsic::x86_tdpbsud_internal ||
238 IntrID == Intrinsic::x86_tdpbusd_internal ||
239 IntrID == Intrinsic::x86_tdpbuud_internal ||
240 IntrID == Intrinsic::x86_tdpbf16ps_internal,
241 Value *>
242X86LowerAMXIntrinsics::createTileDPLoops(BasicBlock *Start, BasicBlock *End,
243 IRBuilderBase &B, Value *Row,
244 Value *Col, Value *K, Value *Acc,
245 Value *LHS, Value *RHS) {
246 std::string IntrinName;
247 switch (IntrID) {
248 case Intrinsic::x86_tdpbssd_internal:
249 IntrinName = "tiledpbssd";
250 break;
251 case Intrinsic::x86_tdpbsud_internal:
252 IntrinName = "tiledpbsud";
253 break;
254 case Intrinsic::x86_tdpbusd_internal:
255 IntrinName = "tiledpbusd";
256 break;
257 case Intrinsic::x86_tdpbuud_internal:
258 IntrinName = "tiledpbuud";
259 break;
260 case Intrinsic::x86_tdpbf16ps_internal:
261 IntrinName = "tiledpbf16ps";
262 break;
263 }
264 Loop *RowLoop = nullptr;
265 Loop *ColLoop = nullptr;
266 Loop *InnerLoop = nullptr;
267 if (LI) {
268 RowLoop = LI->AllocateLoop();
269 ColLoop = LI->AllocateLoop();
270 InnerLoop = LI->AllocateLoop();
271 ColLoop->addChildLoop(NewChild: InnerLoop);
272 RowLoop->addChildLoop(NewChild: ColLoop);
273 if (Loop *ParentL = LI->getLoopFor(BB: Start))
274 ParentL->addChildLoop(NewChild: RowLoop);
275 else
276 LI->addTopLevelLoop(New: RowLoop);
277 }
278
279 BasicBlock *RowBody = createLoop(Preheader: Start, Exit: End, Bound: Row, Step: B.getInt16(C: 1),
280 Name: IntrinName + ".scalarize.rows", B, L: RowLoop);
281 BasicBlock *RowLatch = RowBody->getSingleSuccessor();
282
283 BasicBlock *ColBody = createLoop(Preheader: RowBody, Exit: RowLatch, Bound: Col, Step: B.getInt16(C: 1),
284 Name: IntrinName + ".scalarize.cols", B, L: ColLoop);
285
286 BasicBlock *ColLoopLatch = ColBody->getSingleSuccessor();
287
288 B.SetInsertPoint(ColBody->getTerminator());
289 BasicBlock *InnerBody =
290 createLoop(Preheader: ColBody, Exit: ColLoopLatch, Bound: K, Step: B.getInt16(C: 1),
291 Name: IntrinName + ".scalarize.inner", B, L: InnerLoop);
292
293 BasicBlock *ColLoopHeader = ColBody->getSinglePredecessor();
294 BasicBlock *RowLoopHeader = RowBody->getSinglePredecessor();
295 BasicBlock *InnerLoopHeader = InnerBody->getSinglePredecessor();
296 BasicBlock *InnerLoopLatch = InnerBody->getSingleSuccessor();
297 Value *CurrentRow = &*RowLoopHeader->begin();
298 Value *CurrentCol = &*ColLoopHeader->begin();
299 Value *CurrentInner = &*InnerLoopHeader->begin();
300
301 FixedVectorType *V256I32Ty = FixedVectorType::get(ElementType: B.getInt32Ty(), NumElts: 256);
302 auto *BitCastAcc = cast<BitCastInst>(Val: Acc);
303 Value *VecC = BitCastAcc->getOperand(i_nocapture: 0);
304 assert(isV256I32Ty(VecC->getType()) && "bitcast from non-v256i32 to x86amx");
305 // TODO else create BitCast from x86amx to v256i32.
306 // Store x86amx to memory, and reload from memory
307 // to vector. However with -O0, it doesn't happen.
308 auto *BitCastLHS = cast<BitCastInst>(Val: LHS);
309 Value *VecA = BitCastLHS->getOperand(i_nocapture: 0);
310 assert(isV256I32Ty(VecA->getType()) && "bitcast from non-v256i32 to x86amx");
311 auto *BitCastRHS = cast<BitCastInst>(Val: RHS);
312 Value *VecB = BitCastRHS->getOperand(i_nocapture: 0);
313 assert(isV256I32Ty(VecB->getType()) && "bitcast from non-v256i32 to x86amx");
314
315 // tiledpbssd.scalarize.rows.header:
316 // %vec.c.phi.row = phi <256 x i32> [ %VecC, %continue ], [ %NewVecC,
317 // %tiledpbssd.scalarize.rows.latch ]
318
319 // %vec.d.phi.row = phi <256 x i32> [ zeroinitializer, %continue ], [
320 // %NewVecD, %tiledpbssd.scalarize.rows.latch ]
321 B.SetInsertPoint(RowLoopHeader->getTerminator());
322 PHINode *VecCPhiRowLoop = B.CreatePHI(Ty: V256I32Ty, NumReservedValues: 2, Name: "vec.c.phi.row");
323 VecCPhiRowLoop->addIncoming(V: VecC, BB: Start);
324 Value *VecZero = Constant::getNullValue(Ty: V256I32Ty);
325 PHINode *VecDPhiRowLoop = B.CreatePHI(Ty: V256I32Ty, NumReservedValues: 2, Name: "vec.d.phi.row");
326 VecDPhiRowLoop->addIncoming(V: VecZero, BB: Start);
327
328 // tiledpbssd.scalarize.cols.header:
329 // %vec.c.phi.col = phi <256 x i32> [ %vec.c.phi.row,
330 // %tiledpbssd.scalarize.rows.body ], [ %NewVecC,
331 // %tiledpbssd.scalarize.cols.latch ]
332
333 // %vec.d.phi.col = phi <256 x i32> [
334 // %vec.d.phi.row, %tiledpbssd.scalarize.rows.body ], [ %NewVecD,
335 // %tiledpbssd.scalarize.cols.latch ]
336
337 // calculate idxc.
338 B.SetInsertPoint(ColLoopHeader->getTerminator());
339 PHINode *VecCPhiColLoop = B.CreatePHI(Ty: V256I32Ty, NumReservedValues: 2, Name: "vec.c.phi.col");
340 VecCPhiColLoop->addIncoming(V: VecCPhiRowLoop, BB: RowBody);
341 PHINode *VecDPhiColLoop = B.CreatePHI(Ty: V256I32Ty, NumReservedValues: 2, Name: "vec.d.phi.col");
342 VecDPhiColLoop->addIncoming(V: VecDPhiRowLoop, BB: RowBody);
343 Value *IdxC =
344 B.CreateAdd(LHS: B.CreateMul(LHS: CurrentRow, RHS: B.getInt16(C: 16)), RHS: CurrentCol);
345
346 // tiledpbssd.scalarize.inner.header:
347 // %vec.c.inner.phi = phi <256 x i32> [ %vec.c.phi.col,
348 // %tiledpbssd.scalarize.cols.body ], [ %NewVecC,
349 // %tiledpbssd.scalarize.inner.latch ]
350
351 B.SetInsertPoint(InnerLoopHeader->getTerminator());
352 PHINode *VecCPhi = B.CreatePHI(Ty: V256I32Ty, NumReservedValues: 2, Name: "vec.c.inner.phi");
353 VecCPhi->addIncoming(V: VecCPhiColLoop, BB: ColBody);
354
355 B.SetInsertPoint(InnerBody->getTerminator());
356 Value *IdxA =
357 B.CreateAdd(LHS: B.CreateMul(LHS: CurrentRow, RHS: B.getInt16(C: 16)), RHS: CurrentInner);
358 Value *IdxB =
359 B.CreateAdd(LHS: B.CreateMul(LHS: CurrentInner, RHS: B.getInt16(C: 16)), RHS: CurrentCol);
360 Value *NewVecC = nullptr;
361
362 if (IntrID != Intrinsic::x86_tdpbf16ps_internal) {
363 // tiledpbssd.scalarize.inner.body:
364 // calculate idxa, idxb
365 // %eltc = extractelement <256 x i32> %vec.c.inner.phi, i16 %idxc
366 // %elta = extractelement <256 x i32> %veca, i16 %idxa
367 // %eltav4i8 = bitcast i32 %elta to <4 x i8>
368 // %eltb = extractelement <256 x i32> %vecb, i16 %idxb
369 // %eltbv4i8 = bitcast i32 %eltb to <4 x i8>
370 // %eltav4i32 = sext <4 x i8> %eltav4i8 to <4 x i32>
371 // %eltbv4i32 = sext <4 x i8> %eltbv4i8 to <4 x i32>
372 // %mulab = mul <4 x i32> %eltbv4i32, %eltav4i32
373 // %acc = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %131)
374 // %neweltc = add i32 %elt, %acc
375 // %NewVecC = insertelement <256 x i32> %vec.c.inner.phi, i32 %neweltc,
376 // i16 %idxc
377 FixedVectorType *V4I8Ty = FixedVectorType::get(ElementType: B.getInt8Ty(), NumElts: 4);
378 FixedVectorType *V4I32Ty = FixedVectorType::get(ElementType: B.getInt32Ty(), NumElts: 4);
379 Value *EltC = B.CreateExtractElement(Vec: VecCPhi, Idx: IdxC);
380 Value *EltA = B.CreateExtractElement(Vec: VecA, Idx: IdxA);
381 Value *SubVecA = B.CreateBitCast(V: EltA, DestTy: V4I8Ty);
382 Value *EltB = B.CreateExtractElement(Vec: VecB, Idx: IdxB);
383 Value *SubVecB = B.CreateBitCast(V: EltB, DestTy: V4I8Ty);
384 Value *SEXTSubVecB = nullptr;
385 Value *SEXTSubVecA = nullptr;
386 switch (IntrID) {
387 case Intrinsic::x86_tdpbssd_internal:
388 SEXTSubVecB = B.CreateSExt(V: SubVecB, DestTy: V4I32Ty);
389 SEXTSubVecA = B.CreateSExt(V: SubVecA, DestTy: V4I32Ty);
390 break;
391 case Intrinsic::x86_tdpbsud_internal:
392 SEXTSubVecB = B.CreateZExt(V: SubVecB, DestTy: V4I32Ty);
393 SEXTSubVecA = B.CreateSExt(V: SubVecA, DestTy: V4I32Ty);
394 break;
395 case Intrinsic::x86_tdpbusd_internal:
396 SEXTSubVecB = B.CreateSExt(V: SubVecB, DestTy: V4I32Ty);
397 SEXTSubVecA = B.CreateZExt(V: SubVecA, DestTy: V4I32Ty);
398 break;
399 case Intrinsic::x86_tdpbuud_internal:
400 SEXTSubVecB = B.CreateZExt(V: SubVecB, DestTy: V4I32Ty);
401 SEXTSubVecA = B.CreateZExt(V: SubVecA, DestTy: V4I32Ty);
402 break;
403 default:
404 llvm_unreachable("Invalid intrinsic ID!");
405 }
406 Value *SubVecR = B.CreateAddReduce(Src: B.CreateMul(LHS: SEXTSubVecA, RHS: SEXTSubVecB));
407 Value *ResElt = B.CreateAdd(LHS: EltC, RHS: SubVecR);
408 NewVecC = B.CreateInsertElement(Vec: VecCPhi, NewElt: ResElt, Idx: IdxC);
409 } else {
410 // tiledpbf16ps.scalarize.inner.body:
411 // calculate idxa, idxb, idxc
412 // %eltc = extractelement <256 x i32> %vec.c.inner.phi, i16 %idxc
413 // %eltcf32 = bitcast i32 %eltc to float
414 // %elta = extractelement <256 x i32> %veca, i16 %idxa
415 // %eltav2i16 = bitcast i32 %elta to <2 x i16>
416 // %eltb = extractelement <256 x i32> %vecb, i16 %idxb
417 // %eltbv2i16 = bitcast i32 %eltb to <2 x i16>
418 // %shufflea = shufflevector <2 x i16> %elta, <2 x i16> zeroinitializer, <4
419 // x i32> <i32 2, i32 0, i32 3, i32 1>
420 // %eltav2f32 = bitcast <4 x i16> %shufflea to <2 x float>
421 // %shuffleb = shufflevector <2 x i16> %eltb, <2 xi16> zeroinitializer, <4 x
422 // i32> <i32 2, i32 0, i32 3, i32 1>
423 // %eltbv2f32 = bitcast <4 x i16> %shuffleb to <2 x float>
424 // %mulab = fmul <2 x float> %eltav2f32, %eltbv2f32
425 // %acc = call float
426 // @llvm.vector.reduce.fadd.v2f32(float %eltcf32, <2 x float> %mulab)
427 // %neweltc = bitcast float %acc to i32
428 // %NewVecC = insertelement <256 x i32> %vec.c.inner.phi, i32 %neweltc,
429 // i16 %idxc
430 // %NewVecD = insertelement <256 x i32> %vec.d.inner.phi, i32 %neweltc,
431 // i16 %idxc
432 FixedVectorType *V2I16Ty = FixedVectorType::get(ElementType: B.getInt16Ty(), NumElts: 2);
433 FixedVectorType *V2F32Ty = FixedVectorType::get(ElementType: B.getFloatTy(), NumElts: 2);
434 Value *EltC = B.CreateExtractElement(Vec: VecCPhi, Idx: IdxC);
435 Value *EltCF32 = B.CreateBitCast(V: EltC, DestTy: B.getFloatTy());
436 Value *EltA = B.CreateExtractElement(Vec: VecA, Idx: IdxA);
437 Value *SubVecA = B.CreateBitCast(V: EltA, DestTy: V2I16Ty);
438 Value *EltB = B.CreateExtractElement(Vec: VecB, Idx: IdxB);
439 Value *SubVecB = B.CreateBitCast(V: EltB, DestTy: V2I16Ty);
440 Value *ZeroV2I16 = Constant::getNullValue(Ty: V2I16Ty);
441 int ShuffleMask[4] = {2, 0, 3, 1};
442 auto ShuffleArray = ArrayRef(ShuffleMask);
443 Value *AV2F32 = B.CreateBitCast(
444 V: B.CreateShuffleVector(V1: SubVecA, V2: ZeroV2I16, Mask: ShuffleArray), DestTy: V2F32Ty);
445 Value *BV2F32 = B.CreateBitCast(
446 V: B.CreateShuffleVector(V1: SubVecB, V2: ZeroV2I16, Mask: ShuffleArray), DestTy: V2F32Ty);
447 Value *SubVecR = B.CreateFAddReduce(Acc: EltCF32, Src: B.CreateFMul(L: AV2F32, R: BV2F32));
448 Value *ResElt = B.CreateBitCast(V: SubVecR, DestTy: B.getInt32Ty());
449 NewVecC = B.CreateInsertElement(Vec: VecCPhi, NewElt: ResElt, Idx: IdxC);
450 }
451
452 // tiledpbssd.scalarize.cols.latch:
453 // %NewEltC = extractelement <256 x i32> %vec.c.phi.col, i16 %idxc
454 // %NewVecD = insertelement <256 x i32> %vec.d.phi.col, i32 %NewEltC,
455 // i16 %idxc
456 B.SetInsertPoint(ColLoopLatch->getTerminator());
457 Value *NewEltC = B.CreateExtractElement(Vec: NewVecC, Idx: IdxC);
458 Value *NewVecD = B.CreateInsertElement(Vec: VecDPhiColLoop, NewElt: NewEltC, Idx: IdxC);
459
460 VecCPhi->addIncoming(V: NewVecC, BB: InnerLoopLatch);
461 VecCPhiRowLoop->addIncoming(V: NewVecC, BB: RowLatch);
462 VecCPhiColLoop->addIncoming(V: NewVecC, BB: ColLoopLatch);
463 VecDPhiRowLoop->addIncoming(V: NewVecD, BB: RowLatch);
464 VecDPhiColLoop->addIncoming(V: NewVecD, BB: ColLoopLatch);
465
466 return NewVecD;
467}
468
469template <Intrinsic::ID IntrID>
470std::enable_if_t<IntrID == Intrinsic::x86_tdpbssd_internal ||
471 IntrID == Intrinsic::x86_tdpbsud_internal ||
472 IntrID == Intrinsic::x86_tdpbusd_internal ||
473 IntrID == Intrinsic::x86_tdpbuud_internal ||
474 IntrID == Intrinsic::x86_tdpbf16ps_internal,
475 bool>
476X86LowerAMXIntrinsics::lowerTileDP(Instruction *TileDP) {
477 Value *M, *N, *K, *C, *A, *B;
478 match(TileDP, m_Intrinsic<IntrID>(m_Value(V&: M), m_Value(V&: N), m_Value(V&: K),
479 m_Value(V&: C), m_Value(V&: A), m_Value(V&: B)));
480 Instruction *InsertI = TileDP;
481 IRBuilder<> PreBuilder(TileDP);
482 PreBuilder.SetInsertPoint(TileDP);
483 // We visit the loop with (m, n/4, k/4):
484 // %n_dword = lshr i16 %n, 2
485 // %k_dword = lshr i16 %k, 2
486 Value *NDWord = PreBuilder.CreateLShr(LHS: N, RHS: PreBuilder.getInt16(C: 2));
487 Value *KDWord = PreBuilder.CreateLShr(LHS: K, RHS: PreBuilder.getInt16(C: 2));
488 BasicBlock *Start = InsertI->getParent();
489 BasicBlock *End =
490 SplitBlock(Old: InsertI->getParent(), SplitPt: InsertI, DTU: &DTU, LI, MSSAU: nullptr, BBName: "continue");
491 IRBuilder<> Builder(TileDP);
492 Value *ResVec = createTileDPLoops<IntrID>(Start, End, Builder, M, NDWord,
493 KDWord, C, A, B);
494 // we cannot assume there always be bitcast after tiledpbssd. So we need to
495 // insert one bitcast as required
496 Builder.SetInsertPoint(TheBB: End, IP: End->getFirstNonPHIIt());
497 Value *ResAMX =
498 Builder.CreateBitCast(V: ResVec, DestTy: Type::getX86_AMXTy(C&: Builder.getContext()));
499 // Delete TileDP intrinsic and do some clean-up.
500 for (Use &U : llvm::make_early_inc_range(Range: TileDP->uses())) {
501 Instruction *I = cast<Instruction>(Val: U.getUser());
502 Value *Vec;
503 if (match(V: I, P: m_BitCast(Op: m_Value(V&: Vec)))) {
504 I->replaceAllUsesWith(V: ResVec);
505 I->eraseFromParent();
506 }
507 }
508 TileDP->replaceAllUsesWith(V: ResAMX);
509 TileDP->eraseFromParent();
510 return true;
511}
512
513template <bool IsTileLoad>
514bool X86LowerAMXIntrinsics::lowerTileLoadStore(Instruction *TileLoadStore) {
515 Value *M, *N, *Ptr, *Stride, *Tile;
516 if (IsTileLoad)
517 match(V: TileLoadStore,
518 P: m_Intrinsic<Intrinsic::x86_tileloadd64_internal>(
519 Op0: m_Value(V&: M), Op1: m_Value(V&: N), Op2: m_Value(V&: Ptr), Op3: m_Value(V&: Stride)));
520 else
521 match(V: TileLoadStore, P: m_Intrinsic<Intrinsic::x86_tilestored64_internal>(
522 Op0: m_Value(V&: M), Op1: m_Value(V&: N), Op2: m_Value(V&: Ptr),
523 Op3: m_Value(V&: Stride), Op4: m_Value(V&: Tile)));
524
525 Instruction *InsertI = TileLoadStore;
526 IRBuilder<> PreBuilder(TileLoadStore);
527 PreBuilder.SetInsertPoint(TileLoadStore);
528 Value *NDWord = PreBuilder.CreateLShr(LHS: N, RHS: PreBuilder.getInt16(C: 2));
529 Value *StrideDWord = PreBuilder.CreateLShr(LHS: Stride, RHS: PreBuilder.getInt64(C: 2));
530 BasicBlock *Start = InsertI->getParent();
531 BasicBlock *End =
532 SplitBlock(Old: InsertI->getParent(), SplitPt: InsertI, DTU: &DTU, LI, MSSAU: nullptr, BBName: "continue");
533 IRBuilder<> Builder(TileLoadStore);
534 Value *ResVec = createTileLoadStoreLoops<IsTileLoad>(
535 Start, End, Builder, M, NDWord, Ptr, StrideDWord,
536 IsTileLoad ? nullptr : Tile);
537 if (IsTileLoad) {
538 // we cannot assume there always be bitcast after tileload. So we need to
539 // insert one bitcast as required
540 Builder.SetInsertPoint(TheBB: End, IP: End->getFirstNonPHIIt());
541 Value *ResAMX =
542 Builder.CreateBitCast(V: ResVec, DestTy: Type::getX86_AMXTy(C&: Builder.getContext()));
543 // Delete tileloadd6 intrinsic and do some clean-up
544 for (Use &U : llvm::make_early_inc_range(Range: TileLoadStore->uses())) {
545 Instruction *I = cast<Instruction>(Val: U.getUser());
546 Value *Vec;
547 if (match(V: I, P: m_BitCast(Op: m_Value(V&: Vec)))) {
548 I->replaceAllUsesWith(V: ResVec);
549 I->eraseFromParent();
550 }
551 }
552 TileLoadStore->replaceAllUsesWith(V: ResAMX);
553 }
554 TileLoadStore->eraseFromParent();
555 return true;
556}
557
558bool X86LowerAMXIntrinsics::lowerTileZero(Instruction *TileZero) {
559 IRBuilder<> Builder(TileZero);
560 FixedVectorType *V256I32Ty = FixedVectorType::get(ElementType: Builder.getInt32Ty(), NumElts: 256);
561 Value *VecZero = Constant::getNullValue(Ty: V256I32Ty);
562 for (Use &U : llvm::make_early_inc_range(Range: TileZero->uses())) {
563 Instruction *I = cast<Instruction>(Val: U.getUser());
564 Value *Vec;
565 if (match(V: I, P: m_BitCast(Op: m_Value(V&: Vec)))) {
566 I->replaceAllUsesWith(V: VecZero);
567 I->eraseFromParent();
568 }
569 }
570 TileZero->eraseFromParent();
571 return true;
572}
573
574bool X86LowerAMXIntrinsics::visit() {
575 bool C = false;
576 SmallVector<IntrinsicInst *, 8> WorkList;
577 for (BasicBlock *BB : depth_first(G: &Func)) {
578 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
579 if (auto *Inst = dyn_cast<IntrinsicInst>(Val: &*II++)) {
580 switch (Inst->getIntrinsicID()) {
581 case Intrinsic::x86_tdpbssd_internal:
582 case Intrinsic::x86_tdpbsud_internal:
583 case Intrinsic::x86_tdpbusd_internal:
584 case Intrinsic::x86_tdpbuud_internal:
585 case Intrinsic::x86_tileloadd64_internal:
586 case Intrinsic::x86_tilestored64_internal:
587 case Intrinsic::x86_tilezero_internal:
588 case Intrinsic::x86_tdpbf16ps_internal:
589 WorkList.push_back(Elt: Inst);
590 break;
591 default:
592 break;
593 }
594 }
595 }
596 }
597
598 for (auto *Inst : WorkList) {
599 switch (Inst->getIntrinsicID()) {
600 case Intrinsic::x86_tdpbssd_internal:
601 C = lowerTileDP<Intrinsic::x86_tdpbssd_internal>(TileDP: Inst) || C;
602 break;
603 case Intrinsic::x86_tdpbsud_internal:
604 C = lowerTileDP<Intrinsic::x86_tdpbsud_internal>(TileDP: Inst) || C;
605 break;
606 case Intrinsic::x86_tdpbusd_internal:
607 C = lowerTileDP<Intrinsic::x86_tdpbusd_internal>(TileDP: Inst) || C;
608 break;
609 case Intrinsic::x86_tdpbuud_internal:
610 C = lowerTileDP<Intrinsic::x86_tdpbuud_internal>(TileDP: Inst) || C;
611 break;
612 case Intrinsic::x86_tdpbf16ps_internal:
613 C = lowerTileDP<Intrinsic::x86_tdpbf16ps_internal>(TileDP: Inst) || C;
614 break;
615 case Intrinsic::x86_tileloadd64_internal:
616 C = lowerTileLoadStore<true>(TileLoadStore: Inst) || C;
617 break;
618 case Intrinsic::x86_tilestored64_internal:
619 C = lowerTileLoadStore<false>(TileLoadStore: Inst) || C;
620 break;
621 case Intrinsic::x86_tilezero_internal:
622 C = lowerTileZero(TileZero: Inst) || C;
623 break;
624 default:
625 llvm_unreachable("invalid amx intrinsics!");
626 }
627 }
628
629 return C;
630}
631
632namespace {
633bool shouldRunLowerAMXIntrinsics(const Function &F, const TargetMachine *TM) {
634 return X86ScalarizeAMX && (F.hasFnAttribute(Kind: Attribute::OptimizeNone) ||
635 TM->getOptLevel() == CodeGenOptLevel::None);
636}
637
638bool runLowerAMXIntrinsics(Function &F, DominatorTree *DT, LoopInfo *LI) {
639 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
640
641 X86LowerAMXIntrinsics LAT(F, DTU, LI);
642 return LAT.visit();
643}
644} // namespace
645
646PreservedAnalyses X86LowerAMXIntrinsicsPass::run(Function &F,
647 FunctionAnalysisManager &FAM) {
648 if (!shouldRunLowerAMXIntrinsics(F, TM))
649 return PreservedAnalyses::all();
650
651 DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(IR&: F);
652 LoopInfo &LI = FAM.getResult<LoopAnalysis>(IR&: F);
653 bool Changed = runLowerAMXIntrinsics(F, DT: &DT, LI: &LI);
654 if (!Changed)
655 return PreservedAnalyses::all();
656
657 PreservedAnalyses PA = PreservedAnalyses::none();
658 PA.preserve<DominatorTreeAnalysis>();
659 PA.preserve<LoopAnalysis>();
660 return PA;
661}
662
663namespace {
664class X86LowerAMXIntrinsicsLegacyPass : public FunctionPass {
665public:
666 static char ID;
667
668 X86LowerAMXIntrinsicsLegacyPass() : FunctionPass(ID) {}
669
670 bool runOnFunction(Function &F) override {
671 TargetMachine *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
672 if (!shouldRunLowerAMXIntrinsics(F, TM))
673 return false;
674
675 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
676 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
677 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
678 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
679 return runLowerAMXIntrinsics(F, DT, LI);
680 }
681 StringRef getPassName() const override { return "Lower AMX intrinsics"; }
682
683 void getAnalysisUsage(AnalysisUsage &AU) const override {
684 AU.addPreserved<DominatorTreeWrapperPass>();
685 AU.addPreserved<LoopInfoWrapperPass>();
686 AU.addRequired<TargetPassConfig>();
687 }
688};
689} // namespace
690
691static const char PassName[] = "Lower AMX intrinsics";
692char X86LowerAMXIntrinsicsLegacyPass::ID = 0;
693INITIALIZE_PASS_BEGIN(X86LowerAMXIntrinsicsLegacyPass, DEBUG_TYPE, PassName,
694 false, false)
695INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
696INITIALIZE_PASS_END(X86LowerAMXIntrinsicsLegacyPass, DEBUG_TYPE, PassName,
697 false, false)
698
699FunctionPass *llvm::createX86LowerAMXIntrinsicsLegacyPass() {
700 return new X86LowerAMXIntrinsicsLegacyPass();
701}
702