1//===-- SPIRVPreLegalizer.cpp - prepare IR for legalization -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass prepares IR for legalization: it assigns SPIR-V types to registers
10// and removes intrinsics which holded these types during IR translation.
11// Also it processes constants and registers them in GR to avoid duplication.
12//
13//===----------------------------------------------------------------------===//
14
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
18#include "llvm/ADT/PostOrderIterator.h"
19#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
20#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
21#include "llvm/IR/Attributes.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/IntrinsicsSPIRV.h"
24
25#define DEBUG_TYPE "spirv-prelegalizer"
26
27using namespace llvm;
28
29namespace {
30class SPIRVPreLegalizer : public MachineFunctionPass {
31public:
32 static char ID;
33 SPIRVPreLegalizer() : MachineFunctionPass(ID) {}
34 bool runOnMachineFunction(MachineFunction &MF) override;
35 void getAnalysisUsage(AnalysisUsage &AU) const override;
36};
37} // namespace
38
39void SPIRVPreLegalizer::getAnalysisUsage(AnalysisUsage &AU) const {
40 AU.addPreserved<GISelValueTrackingAnalysisLegacy>();
41 MachineFunctionPass::getAnalysisUsage(AU);
42}
43
44static void
45addConstantsToTrack(MachineFunction &MF, SPIRVGlobalRegistry *GR,
46 const SPIRVSubtarget &STI,
47 DenseMap<MachineInstr *, Type *> &TargetExtConstTypes) {
48 MachineRegisterInfo &MRI = MF.getRegInfo();
49 DenseMap<MachineInstr *, Register> RegsAlreadyAddedToDT;
50 SmallVector<MachineInstr *, 10> ToErase, ToEraseComposites;
51 for (MachineBasicBlock &MBB : MF) {
52 for (MachineInstr &MI : MBB) {
53 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_track_constant))
54 continue;
55 ToErase.push_back(Elt: &MI);
56 Register SrcReg = MI.getOperand(i: 2).getReg();
57 auto *Const =
58 cast<Constant>(Val: cast<ConstantAsMetadata>(
59 Val: MI.getOperand(i: 3).getMetadata()->getOperand(I: 0))
60 ->getValue());
61 if (auto *GV = dyn_cast<GlobalValue>(Val: Const)) {
62 Register Reg = GR->find(V: GV, MF: &MF);
63 if (!Reg.isValid()) {
64 GR->add(V: GV, MI: MRI.getVRegDef(Reg: SrcReg));
65 GR->addGlobalObject(V: GV, MF: &MF, R: SrcReg);
66 } else
67 RegsAlreadyAddedToDT[&MI] = Reg;
68 } else {
69 Register Reg = GR->find(V: Const, MF: &MF);
70 if (!Reg.isValid()) {
71 if (auto *ConstVec = dyn_cast<ConstantDataVector>(Val: Const)) {
72 auto *BuildVec = MRI.getVRegDef(Reg: SrcReg);
73 assert(BuildVec &&
74 BuildVec->getOpcode() == TargetOpcode::G_BUILD_VECTOR);
75 GR->add(V: Const, MI: BuildVec);
76 for (unsigned i = 0; i < ConstVec->getNumElements(); ++i) {
77 // Ensure that OpConstantComposite reuses a constant when it's
78 // already created and available in the same machine function.
79 Constant *ElemConst = ConstVec->getElementAsConstant(i);
80 Register ElemReg = GR->find(V: ElemConst, MF: &MF);
81 if (!ElemReg.isValid())
82 GR->add(V: ElemConst,
83 MI: MRI.getVRegDef(Reg: BuildVec->getOperand(i: 1 + i).getReg()));
84 else
85 BuildVec->getOperand(i: 1 + i).setReg(ElemReg);
86 }
87 }
88 if (Const->getType()->isTargetExtTy()) {
89 // remember association so that we can restore it when assign types
90 MachineInstr *SrcMI = MRI.getVRegDef(Reg: SrcReg);
91 if (SrcMI)
92 GR->add(V: Const, MI: SrcMI);
93 if (SrcMI && (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT ||
94 SrcMI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF))
95 TargetExtConstTypes[SrcMI] = Const->getType();
96 if (Const->isNullValue()) {
97 MachineBasicBlock &DepMBB = MF.front();
98 MachineIRBuilder MIB(DepMBB, DepMBB.getFirstNonPHI());
99 SPIRVTypeInst ExtType = GR->getOrCreateSPIRVType(
100 Type: Const->getType(), MIRBuilder&: MIB, AQ: SPIRV::AccessQualifier::ReadWrite,
101 EmitIR: true);
102 assert(SrcMI && "Expected source instruction to be valid");
103 SrcMI->setDesc(STI.getInstrInfo()->get(Opcode: SPIRV::OpConstantNull));
104 SrcMI->addOperand(Op: MachineOperand::CreateReg(
105 Reg: GR->getSPIRVTypeID(SpirvType: ExtType), isDef: false));
106 }
107 }
108 } else {
109 RegsAlreadyAddedToDT[&MI] = Reg;
110 // This MI is unused and will be removed. If the MI uses
111 // const_composite, it will be unused and should be removed too.
112 assert(MI.getOperand(2).isReg() && "Reg operand is expected");
113 MachineInstr *SrcMI = MRI.getVRegDef(Reg: MI.getOperand(i: 2).getReg());
114 if (SrcMI && isSpvIntrinsic(MI: *SrcMI, IntrinsicID: Intrinsic::spv_const_composite))
115 ToEraseComposites.push_back(Elt: SrcMI);
116 }
117 }
118 }
119 }
120 for (MachineInstr *MI : ToErase) {
121 Register Reg = MI->getOperand(i: 2).getReg();
122 auto It = RegsAlreadyAddedToDT.find(Val: MI);
123 if (It != RegsAlreadyAddedToDT.end())
124 Reg = It->second;
125 auto *RC = MRI.getRegClassOrNull(Reg: MI->getOperand(i: 0).getReg());
126 if (!MRI.getRegClassOrNull(Reg) && RC)
127 MRI.setRegClass(Reg, RC);
128 MRI.replaceRegWith(FromReg: MI->getOperand(i: 0).getReg(), ToReg: Reg);
129 GR->invalidateMachineInstr(MI);
130 MI->eraseFromParent();
131 }
132 for (MachineInstr *MI : ToEraseComposites) {
133 GR->invalidateMachineInstr(MI);
134 MI->eraseFromParent();
135 }
136}
137
138static void foldConstantsIntoIntrinsics(MachineFunction &MF,
139 SPIRVGlobalRegistry *GR,
140 MachineIRBuilder MIB) {
141 SmallVector<MachineInstr *, 64> ToErase;
142 for (MachineBasicBlock &MBB : MF) {
143 for (MachineInstr &MI : MBB) {
144 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_name))
145 continue;
146 const MDNode *MD = MI.getOperand(i: 2).getMetadata();
147 StringRef ValueName = cast<MDString>(Val: MD->getOperand(I: 0))->getString();
148 if (ValueName.size() > 0) {
149 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
150 buildOpName(Target: MI.getOperand(i: 1).getReg(), Name: ValueName, MIRBuilder&: MIB);
151 }
152 ToErase.push_back(Elt: &MI);
153 }
154 for (MachineInstr *MI : ToErase) {
155 GR->invalidateMachineInstr(MI);
156 MI->eraseFromParent();
157 }
158 ToErase.clear();
159 }
160}
161
162static MachineInstr *findAssignTypeInstr(Register Reg,
163 MachineRegisterInfo *MRI) {
164 for (MachineRegisterInfo::use_instr_iterator I = MRI->use_instr_begin(RegNo: Reg),
165 IE = MRI->use_instr_end();
166 I != IE; ++I) {
167 MachineInstr *UseMI = &*I;
168 if ((isSpvIntrinsic(MI: *UseMI, IntrinsicID: Intrinsic::spv_assign_ptr_type) ||
169 isSpvIntrinsic(MI: *UseMI, IntrinsicID: Intrinsic::spv_assign_type)) &&
170 UseMI->getOperand(i: 1).getReg() == Reg)
171 return UseMI;
172 }
173 return nullptr;
174}
175
176static void buildOpBitcast(SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
177 Register ResVReg, Register OpReg) {
178 SPIRVTypeInst ResType = GR->getSPIRVTypeForVReg(VReg: ResVReg);
179 SPIRVTypeInst OpType = GR->getSPIRVTypeForVReg(VReg: OpReg);
180 assert(ResType && OpType && "Operand types are expected");
181 if (!GR->isBitcastCompatible(Type1: ResType, Type2: OpType))
182 report_fatal_error(reason: "incompatible result and operand types in a bitcast");
183 MachineRegisterInfo *MRI = MIB.getMRI();
184 if (!MRI->getRegClassOrNull(Reg: ResVReg))
185 MRI->setRegClass(Reg: ResVReg, RC: GR->getRegClass(SpvType: ResType));
186 if (ResType == OpType)
187 MIB.buildInstr(Opcode: TargetOpcode::COPY).addDef(RegNo: ResVReg).addUse(RegNo: OpReg);
188 else
189 MIB.buildInstr(Opcode: SPIRV::OpBitcast)
190 .addDef(RegNo: ResVReg)
191 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: ResType))
192 .addUse(RegNo: OpReg);
193}
194
195// We lower G_BITCAST to OpBitcast here to avoid a MachineVerifier error.
196// The verifier checks if the source and destination LLTs of a G_BITCAST are
197// different, but this check is too strict for SPIR-V's typed pointers, which
198// may have the same LLT but different SPIRV type (e.g. pointers to different
199// pointee types). By lowering to OpBitcast here, we bypass the verifier's
200// check. See discussion in https://github.com/llvm/llvm-project/pull/110270
201// for more context.
202//
203// We also handle the llvm.spv.bitcast intrinsic here. If the source and
204// destination SPIR-V types are the same, we lower it to a COPY to enable
205// further optimizations like copy propagation.
206static void lowerBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR,
207 MachineIRBuilder MIB) {
208 SmallVector<MachineInstr *, 16> ToErase;
209 for (MachineBasicBlock &MBB : MF) {
210 for (MachineInstr &MI : MBB) {
211 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_bitcast)) {
212 Register DstReg = MI.getOperand(i: 0).getReg();
213 Register SrcReg = MI.getOperand(i: 2).getReg();
214 SPIRVTypeInst DstType = GR->getSPIRVTypeForVReg(VReg: DstReg);
215 assert(
216 DstType &&
217 "Expected destination SPIR-V type to have been assigned already.");
218 SPIRVTypeInst SrcType = GR->getSPIRVTypeForVReg(VReg: SrcReg);
219 assert(SrcType &&
220 "Expected source SPIR-V type to have been assigned already.");
221 if (DstType == SrcType) {
222 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
223 MIB.buildCopy(Res: DstReg, Op: SrcReg);
224 ToErase.push_back(Elt: &MI);
225 continue;
226 }
227 }
228
229 if (MI.getOpcode() != TargetOpcode::G_BITCAST)
230 continue;
231
232 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
233 buildOpBitcast(GR, MIB, ResVReg: MI.getOperand(i: 0).getReg(),
234 OpReg: MI.getOperand(i: 1).getReg());
235 ToErase.push_back(Elt: &MI);
236 }
237 }
238 for (MachineInstr *MI : ToErase) {
239 GR->invalidateMachineInstr(MI);
240 MI->eraseFromParent();
241 }
242}
243
244static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR,
245 MachineIRBuilder MIB) {
246 // Get access to information about available extensions
247 const SPIRVSubtarget *ST =
248 static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
249 SmallVector<MachineInstr *, 10> ToErase;
250 for (MachineBasicBlock &MBB : MF) {
251 for (MachineInstr &MI : MBB) {
252 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_ptrcast))
253 continue;
254 assert(MI.getOperand(2).isReg());
255 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
256 ToErase.push_back(Elt: &MI);
257 Register Def = MI.getOperand(i: 0).getReg();
258 Register Source = MI.getOperand(i: 2).getReg();
259 Type *ElemTy = getMDOperandAsType(N: MI.getOperand(i: 3).getMetadata(), I: 0);
260 auto SC =
261 isa<FunctionType>(Val: ElemTy)
262 ? SPIRV::StorageClass::CodeSectionINTEL
263 : addressSpaceToStorageClass(AddrSpace: MI.getOperand(i: 4).getImm(), STI: *ST);
264 SPIRVTypeInst AssignedPtrType =
265 GR->getOrCreateSPIRVPointerType(BaseType: ElemTy, I&: MI, SC);
266
267 // If the ptrcast would be redundant, replace all uses with the source
268 // register.
269 MachineRegisterInfo *MRI = MIB.getMRI();
270 if (GR->getSPIRVTypeForVReg(VReg: Source) == AssignedPtrType) {
271 // Erase Def's assign type instruction if we are going to replace Def.
272 if (MachineInstr *AssignMI = findAssignTypeInstr(Reg: Def, MRI))
273 ToErase.push_back(Elt: AssignMI);
274 MRI->replaceRegWith(FromReg: Def, ToReg: Source);
275 } else {
276 if (!GR->getSPIRVTypeForVReg(VReg: Def, MF: &MF))
277 GR->assignSPIRVTypeToVReg(Type: AssignedPtrType, VReg: Def, MF);
278 MIB.buildBitcast(Dst: Def, Src: Source);
279 }
280 }
281 }
282 for (MachineInstr *MI : ToErase) {
283 GR->invalidateMachineInstr(MI);
284 MI->eraseFromParent();
285 }
286}
287
288// Translating GV, IRTranslator sometimes generates following IR:
289// %1 = G_GLOBAL_VALUE
290// %2 = COPY %1
291// %3 = G_ADDRSPACE_CAST %2
292//
293// or
294//
295// %1 = G_ZEXT %2
296// G_MEMCPY ... %2 ...
297//
298// New registers have no SPIRV type and no register class info.
299//
300// Set SPIRV type for GV, propagate it from GV to other instructions,
301// also set register classes.
302static SPIRVTypeInst propagateSPIRVType(MachineInstr *MI,
303 SPIRVGlobalRegistry *GR,
304 MachineRegisterInfo &MRI,
305 MachineIRBuilder &MIB) {
306 SPIRVTypeInst SpvType = nullptr;
307 assert(MI && "Machine instr is expected");
308 if (MI->getOperand(i: 0).isReg()) {
309 Register Reg = MI->getOperand(i: 0).getReg();
310 SpvType = GR->getSPIRVTypeForVReg(VReg: Reg);
311 if (!SpvType) {
312 switch (MI->getOpcode()) {
313 case TargetOpcode::G_FCONSTANT:
314 case TargetOpcode::G_CONSTANT: {
315 MIB.setInsertPt(MBB&: *MI->getParent(), II: MI);
316 Type *Ty = MI->getOperand(i: 1).getCImm()->getType();
317 SpvType = GR->getOrCreateSPIRVType(
318 Type: Ty, MIRBuilder&: MIB, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
319 break;
320 }
321 case TargetOpcode::G_GLOBAL_VALUE: {
322 MIB.setInsertPt(MBB&: *MI->getParent(), II: MI);
323 const GlobalValue *Global = MI->getOperand(i: 1).getGlobal();
324 Type *ElementTy = toTypedPointer(Ty: GR->getDeducedGlobalValueType(Global));
325 auto *Ty = TypedPointerType::get(ElementType: ElementTy,
326 AddressSpace: Global->getType()->getAddressSpace());
327 SpvType = GR->getOrCreateSPIRVType(
328 Type: Ty, MIRBuilder&: MIB, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
329 break;
330 }
331 case TargetOpcode::G_ANYEXT:
332 case TargetOpcode::G_SEXT:
333 case TargetOpcode::G_ZEXT: {
334 if (MI->getOperand(i: 1).isReg()) {
335 if (MachineInstr *DefInstr =
336 MRI.getVRegDef(Reg: MI->getOperand(i: 1).getReg())) {
337 if (SPIRVTypeInst Def =
338 propagateSPIRVType(MI: DefInstr, GR, MRI, MIB)) {
339 unsigned CurrentBW = GR->getScalarOrVectorBitWidth(Type: Def);
340 unsigned ExpectedBW =
341 std::max(a: MRI.getType(Reg).getScalarSizeInBits(), b: CurrentBW);
342 unsigned NumElements = GR->getScalarOrVectorComponentCount(Type: Def);
343 SpvType = GR->getOrCreateSPIRVIntegerType(BitWidth: ExpectedBW, MIRBuilder&: MIB);
344 if (NumElements > 1)
345 SpvType = GR->getOrCreateSPIRVVectorType(BaseType: SpvType, NumElements,
346 MIRBuilder&: MIB, EmitIR: true);
347 }
348 }
349 }
350 break;
351 }
352 case TargetOpcode::G_PTRTOINT:
353 SpvType = GR->getOrCreateSPIRVIntegerType(
354 BitWidth: MRI.getType(Reg).getScalarSizeInBits(), MIRBuilder&: MIB);
355 break;
356 case TargetOpcode::G_TRUNC:
357 case TargetOpcode::G_ADDRSPACE_CAST:
358 case TargetOpcode::G_PTR_ADD:
359 case TargetOpcode::COPY: {
360 MachineOperand &Op = MI->getOperand(i: 1);
361 MachineInstr *Def = Op.isReg() ? MRI.getVRegDef(Reg: Op.getReg()) : nullptr;
362 if (Def)
363 SpvType = propagateSPIRVType(MI: Def, GR, MRI, MIB);
364 break;
365 }
366 default:
367 break;
368 }
369 if (SpvType) {
370 // check if the address space needs correction
371 LLT RegType = MRI.getType(Reg);
372 if (SpvType->getOpcode() == SPIRV::OpTypePointer &&
373 RegType.isPointer() &&
374 storageClassToAddressSpace(SC: GR->getPointerStorageClass(Type: SpvType)) !=
375 RegType.getAddressSpace()) {
376 const SPIRVSubtarget &ST =
377 MI->getParent()->getParent()->getSubtarget<SPIRVSubtarget>();
378 auto TSC = addressSpaceToStorageClass(AddrSpace: RegType.getAddressSpace(), STI: ST);
379 SpvType = GR->changePointerStorageClass(PtrType: SpvType, SC: TSC, I&: *MI);
380 }
381 GR->assignSPIRVTypeToVReg(Type: SpvType, VReg: Reg, MF: MIB.getMF());
382 }
383 if (!MRI.getRegClassOrNull(Reg))
384 MRI.setRegClass(Reg, RC: SpvType ? GR->getRegClass(SpvType)
385 : &SPIRV::iIDRegClass);
386 }
387 }
388 return SpvType;
389}
390
391// To support current approach and limitations wrt. bit width here we widen a
392// scalar register with a bit width greater than 1 to valid sizes and cap it to
393// 128 width.
394static unsigned widenBitWidthToNextPow2(unsigned BitWidth) {
395 if (BitWidth == 1)
396 return 1; // No need to widen 1-bit values
397 return std::min(a: std::max(a: 1u << Log2_32_Ceil(Value: BitWidth), b: 8u), b: 128u);
398}
399
400static void widenScalarType(Register Reg, MachineRegisterInfo &MRI) {
401 LLT RegType = MRI.getType(Reg);
402 if (!RegType.isScalar())
403 return;
404 unsigned CurrentWidth = RegType.getScalarSizeInBits();
405 unsigned NewWidth = widenBitWidthToNextPow2(BitWidth: CurrentWidth);
406 if (NewWidth != CurrentWidth)
407 MRI.setType(VReg: Reg, Ty: LLT::scalar(SizeInBits: NewWidth));
408}
409
410static void widenCImmType(MachineOperand &MOP) {
411 const ConstantInt *CImmVal = MOP.getCImm();
412 unsigned CurrentWidth = CImmVal->getBitWidth();
413 unsigned NewWidth = widenBitWidthToNextPow2(BitWidth: CurrentWidth);
414 if (NewWidth != CurrentWidth) {
415 // Replace the immediate value with the widened version
416 MOP.setCImm(ConstantInt::get(Context&: CImmVal->getType()->getContext(),
417 V: CImmVal->getValue().zextOrTrunc(width: NewWidth)));
418 }
419}
420
421static void setInsertPtAfterDef(MachineIRBuilder &MIB, MachineInstr *Def) {
422 MachineBasicBlock &MBB = *Def->getParent();
423 MachineBasicBlock::iterator DefIt =
424 Def->getNextNode() ? Def->getNextNode()->getIterator() : MBB.end();
425 // Skip all the PHI and debug instructions.
426 while (DefIt != MBB.end() &&
427 (DefIt->isPHI() || DefIt->isDebugOrPseudoInstr()))
428 DefIt = std::next(x: DefIt);
429 MIB.setInsertPt(MBB, II: DefIt);
430}
431
432namespace llvm {
433void updateRegType(Register Reg, Type *Ty, SPIRVTypeInst SpvType,
434 SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
435 MachineRegisterInfo &MRI) {
436 assert((Ty || SpvType) && "Either LLVM or SPIRV type is expected.");
437 MachineInstr *Def = MRI.getVRegDef(Reg);
438 setInsertPtAfterDef(MIB, Def);
439 if (!SpvType)
440 SpvType = GR->getOrCreateSPIRVType(Type: Ty, MIRBuilder&: MIB,
441 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
442 if (!MRI.getRegClassOrNull(Reg))
443 MRI.setRegClass(Reg, RC: GR->getRegClass(SpvType));
444 if (!MRI.getType(Reg).isValid())
445 MRI.setType(VReg: Reg, Ty: GR->getRegType(SpvType));
446 GR->assignSPIRVTypeToVReg(Type: SpvType, VReg: Reg, MF: MIB.getMF());
447}
448
449void processInstr(MachineInstr &MI, MachineIRBuilder &MIB,
450 MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR,
451 SPIRVTypeInst KnownResType) {
452 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI.getIterator());
453 for (auto &Op : MI.operands()) {
454 if (!Op.isReg() || Op.isDef())
455 continue;
456 Register OpReg = Op.getReg();
457 SPIRVTypeInst SpvType = GR->getSPIRVTypeForVReg(VReg: OpReg);
458 if (!SpvType && KnownResType) {
459 SpvType = KnownResType;
460 GR->assignSPIRVTypeToVReg(Type: KnownResType, VReg: OpReg, MF: *MI.getMF());
461 }
462 assert(SpvType);
463 if (!MRI.getRegClassOrNull(Reg: OpReg))
464 MRI.setRegClass(Reg: OpReg, RC: GR->getRegClass(SpvType));
465 if (!MRI.getType(Reg: OpReg).isValid())
466 MRI.setType(VReg: OpReg, Ty: GR->getRegType(SpvType));
467 }
468}
469} // namespace llvm
470
471static void
472generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
473 MachineIRBuilder MIB,
474 DenseMap<MachineInstr *, Type *> &TargetExtConstTypes) {
475 // Get access to information about available extensions
476 const SPIRVSubtarget *ST =
477 static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
478
479 MachineRegisterInfo &MRI = MF.getRegInfo();
480 SmallVector<MachineInstr *, 10> ToErase;
481 DenseMap<MachineInstr *, Register> RegsAlreadyAddedToDT;
482
483 bool IsExtendedInts =
484 ST->canUseExtension(
485 E: SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers) ||
486 ST->canUseExtension(E: SPIRV::Extension::SPV_KHR_bit_instructions) ||
487 ST->canUseExtension(E: SPIRV::Extension::SPV_INTEL_int4);
488
489 for (MachineBasicBlock *MBB : post_order(G: &MF)) {
490 if (MBB->empty())
491 continue;
492
493 bool ReachedBegin = false;
494 for (auto MII = std::prev(x: MBB->end()), Begin = MBB->begin();
495 !ReachedBegin;) {
496 MachineInstr &MI = *MII;
497 unsigned MIOp = MI.getOpcode();
498
499 if (!IsExtendedInts) {
500 // validate bit width of scalar registers and constant immediates
501 for (auto &MOP : MI.operands()) {
502 if (MOP.isReg())
503 widenScalarType(Reg: MOP.getReg(), MRI);
504 else if (MOP.isCImm())
505 widenCImmType(MOP);
506 }
507 }
508
509 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_ptr_type)) {
510 Register Reg = MI.getOperand(i: 1).getReg();
511 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI.getIterator());
512 Type *ElementTy = getMDOperandAsType(N: MI.getOperand(i: 2).getMetadata(), I: 0);
513 SPIRVTypeInst AssignedPtrType = GR->getOrCreateSPIRVPointerType(
514 BaseType: ElementTy, I&: MI,
515 SC: addressSpaceToStorageClass(AddrSpace: MI.getOperand(i: 3).getImm(), STI: *ST));
516 MachineInstr *Def = MRI.getVRegDef(Reg);
517 assert(Def && "Expecting an instruction that defines the register");
518 // G_GLOBAL_VALUE already has type info.
519 if (Def->getOpcode() != TargetOpcode::G_GLOBAL_VALUE)
520 updateRegType(Reg, Ty: nullptr, SpvType: AssignedPtrType, GR, MIB,
521 MRI&: MF.getRegInfo());
522 ToErase.push_back(Elt: &MI);
523 } else if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_type)) {
524 Register Reg = MI.getOperand(i: 1).getReg();
525 Type *Ty = getMDOperandAsType(N: MI.getOperand(i: 2).getMetadata(), I: 0);
526 MachineInstr *Def = MRI.getVRegDef(Reg);
527 assert(Def && "Expecting an instruction that defines the register");
528 // G_GLOBAL_VALUE already has type info.
529 if (Def->getOpcode() != TargetOpcode::G_GLOBAL_VALUE)
530 updateRegType(Reg, Ty, SpvType: nullptr, GR, MIB, MRI&: MF.getRegInfo());
531 ToErase.push_back(Elt: &MI);
532 } else if (MIOp == TargetOpcode::FAKE_USE && MI.getNumOperands() > 0) {
533 MachineInstr *MdMI = MI.getPrevNode();
534 if (MdMI && isSpvIntrinsic(MI: *MdMI, IntrinsicID: Intrinsic::spv_value_md)) {
535 // It's an internal service info from before IRTranslator passes.
536 MachineInstr *Def = getVRegDef(MRI, Reg: MI.getOperand(i: 0).getReg());
537 for (unsigned I = 1, E = MI.getNumOperands(); I != E && Def; ++I)
538 if (getVRegDef(MRI, Reg: MI.getOperand(i: I).getReg()) != Def)
539 Def = nullptr;
540 if (Def) {
541 const MDNode *MD = MdMI->getOperand(i: 1).getMetadata();
542 StringRef ValueName =
543 cast<MDString>(Val: MD->getOperand(I: 1))->getString();
544 const MDNode *TypeMD = cast<MDNode>(Val: MD->getOperand(I: 0));
545 Type *ValueTy = getMDOperandAsType(N: TypeMD, I: 0);
546 GR->addValueAttrs(Key: Def, Val: std::make_pair(x&: ValueTy, y: ValueName.str()));
547 }
548 ToErase.push_back(Elt: MdMI);
549 }
550 ToErase.push_back(Elt: &MI);
551 } else if (MIOp == TargetOpcode::G_CONSTANT ||
552 MIOp == TargetOpcode::G_FCONSTANT ||
553 MIOp == TargetOpcode::G_BUILD_VECTOR) {
554 // %rc = G_CONSTANT ty Val
555 // Ensure %rc has a valid SPIR-V type assigned in the Global Registry.
556 Register Reg = MI.getOperand(i: 0).getReg();
557 bool NeedAssignType = !GR->getSPIRVTypeForVReg(VReg: Reg);
558 Type *Ty = nullptr;
559 if (MIOp == TargetOpcode::G_CONSTANT) {
560 auto TargetExtIt = TargetExtConstTypes.find(Val: &MI);
561 Ty = TargetExtIt == TargetExtConstTypes.end()
562 ? MI.getOperand(i: 1).getCImm()->getType()
563 : TargetExtIt->second;
564 const ConstantInt *OpCI = MI.getOperand(i: 1).getCImm();
565 // TODO: we may wish to analyze here if OpCI is zero and LLT RegType =
566 // MRI.getType(Reg); RegType.isPointer() is true, so that we observe
567 // at this point not i64/i32 constant but null pointer in the
568 // corresponding address space of RegType.getAddressSpace(). This may
569 // help to successfully validate the case when a OpConstantComposite's
570 // constituent has type that does not match Result Type of
571 // OpConstantComposite (see, for example,
572 // pointers/PtrCast-null-in-OpSpecConstantOp.ll).
573 Register PrimaryReg = GR->find(V: OpCI, MF: &MF);
574 if (!PrimaryReg.isValid()) {
575 GR->add(V: OpCI, MI: &MI);
576 } else if (PrimaryReg != Reg &&
577 MRI.getType(Reg) == MRI.getType(Reg: PrimaryReg)) {
578 auto *RCReg = MRI.getRegClassOrNull(Reg);
579 auto *RCPrimary = MRI.getRegClassOrNull(Reg: PrimaryReg);
580 if (!RCReg || RCPrimary == RCReg) {
581 RegsAlreadyAddedToDT[&MI] = PrimaryReg;
582 ToErase.push_back(Elt: &MI);
583 NeedAssignType = false;
584 }
585 }
586 } else if (MIOp == TargetOpcode::G_FCONSTANT) {
587 Ty = MI.getOperand(i: 1).getFPImm()->getType();
588 } else {
589 assert(MIOp == TargetOpcode::G_BUILD_VECTOR);
590 Type *ElemTy = nullptr;
591 MachineInstr *ElemMI = MRI.getVRegDef(Reg: MI.getOperand(i: 1).getReg());
592 assert(ElemMI);
593
594 if (ElemMI->getOpcode() == TargetOpcode::G_CONSTANT) {
595 ElemTy = ElemMI->getOperand(i: 1).getCImm()->getType();
596 } else if (ElemMI->getOpcode() == TargetOpcode::G_FCONSTANT) {
597 ElemTy = ElemMI->getOperand(i: 1).getFPImm()->getType();
598 } else {
599 if (SPIRVTypeInst ElemSpvType =
600 GR->getSPIRVTypeForVReg(VReg: MI.getOperand(i: 1).getReg(), MF: &MF))
601 ElemTy = const_cast<Type *>(GR->getTypeForSPIRVType(Ty: ElemSpvType));
602 }
603 if (ElemTy)
604 Ty = VectorType::get(
605 ElementType: ElemTy, NumElements: MI.getNumExplicitOperands() - MI.getNumExplicitDefs(),
606 Scalable: false);
607 else
608 NeedAssignType = false;
609 }
610 if (NeedAssignType)
611 updateRegType(Reg, Ty, SpvType: nullptr, GR, MIB, MRI);
612 } else if (MIOp == TargetOpcode::G_GLOBAL_VALUE) {
613 propagateSPIRVType(MI: &MI, GR, MRI, MIB);
614 }
615
616 if (MII == Begin)
617 ReachedBegin = true;
618 else
619 --MII;
620 }
621 }
622 for (MachineInstr *MI : ToErase) {
623 auto It = RegsAlreadyAddedToDT.find(Val: MI);
624 if (It != RegsAlreadyAddedToDT.end())
625 MRI.replaceRegWith(FromReg: MI->getOperand(i: 0).getReg(), ToReg: It->second);
626 GR->invalidateMachineInstr(MI);
627 MI->eraseFromParent();
628 }
629
630 // Address the case when IRTranslator introduces instructions with new
631 // registers without associated SPIRV type.
632 for (MachineBasicBlock &MBB : MF) {
633 for (MachineInstr &MI : MBB) {
634 switch (MI.getOpcode()) {
635 case TargetOpcode::G_TRUNC:
636 case TargetOpcode::G_ANYEXT:
637 case TargetOpcode::G_SEXT:
638 case TargetOpcode::G_ZEXT:
639 case TargetOpcode::G_PTRTOINT:
640 case TargetOpcode::COPY:
641 case TargetOpcode::G_ADDRSPACE_CAST:
642 propagateSPIRVType(MI: &MI, GR, MRI, MIB);
643 break;
644 }
645 }
646 }
647}
648
649static void processInstrsWithTypeFolding(MachineFunction &MF,
650 SPIRVGlobalRegistry *GR,
651 MachineIRBuilder MIB) {
652 MachineRegisterInfo &MRI = MF.getRegInfo();
653 for (MachineBasicBlock &MBB : MF)
654 for (MachineInstr &MI : MBB)
655 if (isTypeFoldingSupported(Opcode: MI.getOpcode()))
656 processInstr(MI, MIB, MRI, GR, KnownResType: nullptr);
657}
658
659static Register
660collectInlineAsmInstrOperands(MachineInstr *MI,
661 SmallVector<unsigned, 4> *Ops = nullptr) {
662 Register DefReg;
663 unsigned StartOp = InlineAsm::MIOp_FirstOperand,
664 AsmDescOp = InlineAsm::MIOp_FirstOperand;
665 for (unsigned Idx = StartOp, MISz = MI->getNumOperands(); Idx != MISz;
666 ++Idx) {
667 const MachineOperand &MO = MI->getOperand(i: Idx);
668 if (MO.isMetadata())
669 continue;
670 if (Idx == AsmDescOp && MO.isImm()) {
671 // compute the index of the next operand descriptor
672 const InlineAsm::Flag F(MO.getImm());
673 AsmDescOp += 1 + F.getNumOperandRegisters();
674 continue;
675 }
676 if (MO.isReg() && MO.isDef()) {
677 if (!Ops)
678 return MO.getReg();
679 else
680 DefReg = MO.getReg();
681 } else if (Ops) {
682 Ops->push_back(Elt: Idx);
683 }
684 }
685 return DefReg;
686}
687
688static void
689insertInlineAsmProcess(MachineFunction &MF, SPIRVGlobalRegistry *GR,
690 const SPIRVSubtarget &ST, MachineIRBuilder MIRBuilder,
691 const SmallVector<MachineInstr *> &ToProcess) {
692 MachineRegisterInfo &MRI = MF.getRegInfo();
693 Register AsmTargetReg;
694 for (unsigned i = 0, Sz = ToProcess.size(); i + 1 < Sz; i += 2) {
695 MachineInstr *I1 = ToProcess[i], *I2 = ToProcess[i + 1];
696 assert(isSpvIntrinsic(*I1, Intrinsic::spv_inline_asm) && I2->isInlineAsm());
697 MIRBuilder.setInsertPt(MBB&: *I2->getParent(), II: *I2);
698
699 if (!AsmTargetReg.isValid()) {
700 // define vendor specific assembly target or dialect
701 AsmTargetReg = MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32));
702 MRI.setRegClass(Reg: AsmTargetReg, RC: &SPIRV::iIDRegClass);
703 auto AsmTargetMIB =
704 MIRBuilder.buildInstr(Opcode: SPIRV::OpAsmTargetINTEL).addDef(RegNo: AsmTargetReg);
705 addStringImm(Str: ST.getTargetTripleAsStr(), MIB&: AsmTargetMIB);
706 GR->add(Obj: AsmTargetMIB.getInstr(), MI: AsmTargetMIB);
707 }
708
709 // create types
710 const MDNode *IAMD = I1->getOperand(i: 1).getMetadata();
711 FunctionType *FTy = cast<FunctionType>(Val: getMDOperandAsType(N: IAMD, I: 0));
712 SmallVector<SPIRVTypeInst, 4> ArgTypes;
713 for (const auto &ArgTy : FTy->params())
714 ArgTypes.push_back(Elt: GR->getOrCreateSPIRVType(
715 Type: ArgTy, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true));
716 SPIRVTypeInst RetType =
717 GR->getOrCreateSPIRVType(Type: FTy->getReturnType(), MIRBuilder,
718 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
719 SPIRVTypeInst FuncType = GR->getOrCreateOpTypeFunctionWithArgs(
720 Ty: FTy, RetType, ArgTypes, MIRBuilder);
721
722 // define vendor specific assembly instructions string
723 Register AsmReg = MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32));
724 MRI.setRegClass(Reg: AsmReg, RC: &SPIRV::iIDRegClass);
725 auto AsmMIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpAsmINTEL)
726 .addDef(RegNo: AsmReg)
727 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: RetType))
728 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: FuncType))
729 .addUse(RegNo: AsmTargetReg);
730 // inline asm string:
731 addStringImm(Str: I2->getOperand(i: InlineAsm::MIOp_AsmString).getSymbolName(),
732 MIB&: AsmMIB);
733 // inline asm constraint string:
734 addStringImm(Str: cast<MDString>(Val: I1->getOperand(i: 2).getMetadata()->getOperand(I: 0))
735 ->getString(),
736 MIB&: AsmMIB);
737 GR->add(Obj: AsmMIB.getInstr(), MI: AsmMIB);
738
739 // calls the inline assembly instruction
740 unsigned ExtraInfo = I2->getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
741 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
742 MIRBuilder.buildInstr(Opcode: SPIRV::OpDecorate)
743 .addUse(RegNo: AsmReg)
744 .addImm(Val: static_cast<uint32_t>(SPIRV::Decoration::SideEffectsINTEL));
745
746 Register DefReg = collectInlineAsmInstrOperands(MI: I2);
747 if (!DefReg.isValid()) {
748 DefReg = MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32));
749 MRI.setRegClass(Reg: DefReg, RC: &SPIRV::iIDRegClass);
750 SPIRVTypeInst VoidType = GR->getOrCreateSPIRVType(
751 Type: Type::getVoidTy(C&: MF.getFunction().getContext()), MIRBuilder,
752 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
753 GR->assignSPIRVTypeToVReg(Type: VoidType, VReg: DefReg, MF);
754 }
755
756 auto AsmCall = MIRBuilder.buildInstr(Opcode: SPIRV::OpAsmCallINTEL)
757 .addDef(RegNo: DefReg)
758 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: RetType))
759 .addUse(RegNo: AsmReg);
760 for (unsigned IntrIdx = 3; IntrIdx < I1->getNumOperands(); ++IntrIdx)
761 AsmCall.addUse(RegNo: I1->getOperand(i: IntrIdx).getReg());
762 }
763 for (MachineInstr *MI : ToProcess) {
764 GR->invalidateMachineInstr(MI);
765 MI->eraseFromParent();
766 }
767}
768
769static void insertInlineAsm(MachineFunction &MF, SPIRVGlobalRegistry *GR,
770 const SPIRVSubtarget &ST,
771 MachineIRBuilder MIRBuilder) {
772 SmallVector<MachineInstr *> ToProcess;
773 for (MachineBasicBlock &MBB : MF) {
774 for (MachineInstr &MI : MBB) {
775 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_inline_asm) ||
776 MI.getOpcode() == TargetOpcode::INLINEASM)
777 ToProcess.push_back(Elt: &MI);
778 }
779 }
780 if (ToProcess.size() == 0)
781 return;
782
783 if (!ST.canUseExtension(E: SPIRV::Extension::SPV_INTEL_inline_assembly))
784 report_fatal_error(reason: "Inline assembly instructions require the "
785 "following SPIR-V extension: SPV_INTEL_inline_assembly",
786 gen_crash_diag: false);
787
788 insertInlineAsmProcess(MF, GR, ST, MIRBuilder, ToProcess);
789}
790
791static uint32_t convertFloatToSPIRVWord(float F) {
792 union {
793 float F;
794 uint32_t Spir;
795 } FPMaxError;
796 FPMaxError.F = F;
797 return FPMaxError.Spir;
798}
799
800static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR,
801 MachineIRBuilder MIB) {
802 const SPIRVSubtarget &ST = cast<SPIRVSubtarget>(Val: MIB.getMF().getSubtarget());
803 SmallVector<MachineInstr *, 10> ToErase;
804 for (MachineBasicBlock &MBB : MF) {
805 for (MachineInstr &MI : MBB) {
806 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_decoration) &&
807 !isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_aliasing_decoration) &&
808 !isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_fpmaxerror_decoration))
809 continue;
810 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI.getNextNode());
811 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_decoration)) {
812 buildOpSpirvDecorations(Reg: MI.getOperand(i: 1).getReg(), MIRBuilder&: MIB,
813 GVarMD: MI.getOperand(i: 2).getMetadata(), ST);
814 } else if (isSpvIntrinsic(MI,
815 IntrinsicID: Intrinsic::spv_assign_fpmaxerror_decoration)) {
816 ConstantFP *OpV = mdconst::dyn_extract<ConstantFP>(
817 MD: MI.getOperand(i: 2).getMetadata()->getOperand(I: 0));
818 uint32_t OpValue =
819 convertFloatToSPIRVWord(F: OpV->getValueAPF().convertToFloat());
820
821 buildOpDecorate(Reg: MI.getOperand(i: 1).getReg(), MIRBuilder&: MIB,
822 Dec: SPIRV::Decoration::FPMaxErrorDecorationINTEL,
823 DecArgs: {OpValue});
824 } else {
825 GR->buildMemAliasingOpDecorate(Reg: MI.getOperand(i: 1).getReg(), MIRBuilder&: MIB,
826 Dec: MI.getOperand(i: 2).getImm(),
827 GVarMD: MI.getOperand(i: 3).getMetadata());
828 }
829
830 ToErase.push_back(Elt: &MI);
831 }
832 }
833 for (MachineInstr *MI : ToErase) {
834 GR->invalidateMachineInstr(MI);
835 MI->eraseFromParent();
836 }
837}
838
839// LLVM allows the switches to use registers as cases, while SPIR-V required
840// those to be immediate values. This function replaces such operands with the
841// equivalent immediate constant.
842static void processSwitchesConstants(MachineFunction &MF,
843 SPIRVGlobalRegistry *GR,
844 MachineIRBuilder MIB) {
845 MachineRegisterInfo &MRI = MF.getRegInfo();
846 for (MachineBasicBlock &MBB : MF) {
847 for (MachineInstr &MI : MBB) {
848 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_switch))
849 continue;
850
851 SmallVector<MachineOperand, 8> NewOperands;
852 NewOperands.push_back(Elt: MI.getOperand(i: 0)); // Opcode
853 NewOperands.push_back(Elt: MI.getOperand(i: 1)); // Condition
854 NewOperands.push_back(Elt: MI.getOperand(i: 2)); // Default
855 for (unsigned i = 3; i < MI.getNumOperands(); i += 2) {
856 Register Reg = MI.getOperand(i).getReg();
857 MachineInstr *ConstInstr = getDefInstrMaybeConstant(ConstReg&: Reg, MRI: &MRI);
858 NewOperands.push_back(
859 Elt: MachineOperand::CreateCImm(CI: ConstInstr->getOperand(i: 1).getCImm()));
860
861 NewOperands.push_back(Elt: MI.getOperand(i: i + 1));
862 }
863
864 assert(MI.getNumOperands() == NewOperands.size());
865 while (MI.getNumOperands() > 0)
866 MI.removeOperand(OpNo: 0);
867 for (auto &MO : NewOperands)
868 MI.addOperand(Op: MO);
869 }
870 }
871}
872
873// Some instructions are used during CodeGen but should never be emitted.
874// Cleaning up those.
875static void cleanupHelperInstructions(MachineFunction &MF,
876 SPIRVGlobalRegistry *GR) {
877 SmallVector<MachineInstr *, 8> ToEraseMI;
878 for (MachineBasicBlock &MBB : MF) {
879 for (MachineInstr &MI : MBB) {
880 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_track_constant) ||
881 MI.getOpcode() == TargetOpcode::G_BRINDIRECT)
882 ToEraseMI.push_back(Elt: &MI);
883 }
884 }
885
886 for (MachineInstr *MI : ToEraseMI) {
887 GR->invalidateMachineInstr(MI);
888 MI->eraseFromParent();
889 }
890}
891
892// Find all usages of G_BLOCK_ADDR in our intrinsics and replace those
893// operands/registers by the actual MBB it references.
894static void processBlockAddr(MachineFunction &MF, SPIRVGlobalRegistry *GR,
895 MachineIRBuilder MIB) {
896 // Gather the reverse-mapping BB -> MBB.
897 DenseMap<const BasicBlock *, MachineBasicBlock *> BB2MBB;
898 for (MachineBasicBlock &MBB : MF)
899 BB2MBB[MBB.getBasicBlock()] = &MBB;
900
901 // Gather instructions requiring patching. For now, only those can use
902 // G_BLOCK_ADDR.
903 SmallVector<MachineInstr *, 8> InstructionsToPatch;
904 for (MachineBasicBlock &MBB : MF) {
905 for (MachineInstr &MI : MBB) {
906 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_switch) ||
907 isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_loop_merge) ||
908 isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_selection_merge))
909 InstructionsToPatch.push_back(Elt: &MI);
910 }
911 }
912
913 // For each instruction to fix, we replace all the G_BLOCK_ADDR operands by
914 // the actual MBB it references. Once those references have been updated, we
915 // can cleanup remaining G_BLOCK_ADDR references.
916 SmallPtrSet<MachineBasicBlock *, 8> ClearAddressTaken;
917 SmallPtrSet<MachineInstr *, 8> ToEraseMI;
918 MachineRegisterInfo &MRI = MF.getRegInfo();
919 for (MachineInstr *MI : InstructionsToPatch) {
920 SmallVector<MachineOperand, 8> NewOps;
921 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
922 // The operand is not a register, keep as-is.
923 if (!MI->getOperand(i).isReg()) {
924 NewOps.push_back(Elt: MI->getOperand(i));
925 continue;
926 }
927
928 Register Reg = MI->getOperand(i).getReg();
929 MachineInstr *BuildMBB = MRI.getVRegDef(Reg);
930 // The register is not the result of G_BLOCK_ADDR, keep as-is.
931 if (!BuildMBB || BuildMBB->getOpcode() != TargetOpcode::G_BLOCK_ADDR) {
932 NewOps.push_back(Elt: MI->getOperand(i));
933 continue;
934 }
935
936 assert(BuildMBB && BuildMBB->getOpcode() == TargetOpcode::G_BLOCK_ADDR &&
937 BuildMBB->getOperand(1).isBlockAddress() &&
938 BuildMBB->getOperand(1).getBlockAddress());
939 BasicBlock *BB =
940 BuildMBB->getOperand(i: 1).getBlockAddress()->getBasicBlock();
941 auto It = BB2MBB.find(Val: BB);
942 if (It == BB2MBB.end())
943 report_fatal_error(reason: "cannot find a machine basic block by a basic block "
944 "in a switch statement");
945 MachineBasicBlock *ReferencedBlock = It->second;
946 NewOps.push_back(Elt: MachineOperand::CreateMBB(MBB: ReferencedBlock));
947
948 ClearAddressTaken.insert(Ptr: ReferencedBlock);
949 ToEraseMI.insert(Ptr: BuildMBB);
950 }
951
952 // Replace the operands.
953 assert(MI->getNumOperands() == NewOps.size());
954 while (MI->getNumOperands() > 0)
955 MI->removeOperand(OpNo: 0);
956 for (auto &MO : NewOps)
957 MI->addOperand(Op: MO);
958
959 if (MachineInstr *Next = MI->getNextNode()) {
960 if (isSpvIntrinsic(MI: *Next, IntrinsicID: Intrinsic::spv_track_constant)) {
961 ToEraseMI.insert(Ptr: Next);
962 Next = MI->getNextNode();
963 }
964 if (Next && Next->getOpcode() == TargetOpcode::G_BRINDIRECT)
965 ToEraseMI.insert(Ptr: Next);
966 }
967 }
968
969 // BlockAddress operands were used to keep information between passes,
970 // let's undo the "address taken" status to reflect that Succ doesn't
971 // actually correspond to an IR-level basic block.
972 for (MachineBasicBlock *Succ : ClearAddressTaken)
973 Succ->setAddressTakenIRBlock(nullptr);
974
975 // If we just delete G_BLOCK_ADDR instructions with BlockAddress operands,
976 // this leaves their BasicBlock counterparts in a "address taken" status. This
977 // would make AsmPrinter to generate a series of unneeded labels of a "Address
978 // of block that was removed by CodeGen" kind. Let's first ensure that we
979 // don't have a dangling BlockAddress constants by zapping the BlockAddress
980 // nodes, and only after that proceed with erasing G_BLOCK_ADDR instructions.
981 Constant *Replacement =
982 ConstantInt::get(Ty: Type::getInt32Ty(C&: MF.getFunction().getContext()), V: 1);
983 for (MachineInstr *BlockAddrI : ToEraseMI) {
984 if (BlockAddrI->getOpcode() == TargetOpcode::G_BLOCK_ADDR) {
985 BlockAddress *BA = const_cast<BlockAddress *>(
986 BlockAddrI->getOperand(i: 1).getBlockAddress());
987 BA->replaceAllUsesWith(
988 V: ConstantExpr::getIntToPtr(C: Replacement, Ty: BA->getType()));
989 BA->destroyConstant();
990 }
991 GR->invalidateMachineInstr(MI: BlockAddrI);
992 BlockAddrI->eraseFromParent();
993 }
994}
995
996static bool isImplicitFallthrough(MachineBasicBlock &MBB) {
997 if (MBB.empty())
998 return true;
999
1000 // Branching SPIR-V intrinsics are not detected by this generic method.
1001 // Thus, we can only trust negative result.
1002 if (!MBB.canFallThrough())
1003 return false;
1004
1005 // Otherwise, we must manually check if we have a SPIR-V intrinsic which
1006 // prevent an implicit fallthrough.
1007 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
1008 It != E; ++It) {
1009 if (isSpvIntrinsic(MI: *It, IntrinsicID: Intrinsic::spv_switch))
1010 return false;
1011 }
1012 return true;
1013}
1014
1015static void removeImplicitFallthroughs(MachineFunction &MF,
1016 MachineIRBuilder MIB) {
1017 // It is valid for MachineBasicBlocks to not finish with a branch instruction.
1018 // In such cases, they will simply fallthrough their immediate successor.
1019 for (MachineBasicBlock &MBB : MF) {
1020 if (!isImplicitFallthrough(MBB))
1021 continue;
1022
1023 assert(MBB.succ_size() == 1);
1024 MIB.setInsertPt(MBB, II: MBB.end());
1025 MIB.buildBr(Dest&: **MBB.successors().begin());
1026 }
1027}
1028
1029bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) {
1030 // Initialize the type registry.
1031 const SPIRVSubtarget &ST = MF.getSubtarget<SPIRVSubtarget>();
1032 SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry();
1033 GR->setCurrentFunc(MF);
1034 MachineIRBuilder MIB(MF);
1035 // a registry of target extension constants
1036 DenseMap<MachineInstr *, Type *> TargetExtConstTypes;
1037 // to keep record of tracked constants
1038 addConstantsToTrack(MF, GR, STI: ST, TargetExtConstTypes);
1039 foldConstantsIntoIntrinsics(MF, GR, MIB);
1040 insertBitcasts(MF, GR, MIB);
1041 generateAssignInstrs(MF, GR, MIB, TargetExtConstTypes);
1042
1043 processSwitchesConstants(MF, GR, MIB);
1044 processBlockAddr(MF, GR, MIB);
1045 cleanupHelperInstructions(MF, GR);
1046
1047 processInstrsWithTypeFolding(MF, GR, MIB);
1048 removeImplicitFallthroughs(MF, MIB);
1049 insertSpirvDecorations(MF, GR, MIB);
1050 insertInlineAsm(MF, GR, ST, MIRBuilder: MIB);
1051 lowerBitcasts(MF, GR, MIB);
1052
1053 return true;
1054}
1055
1056INITIALIZE_PASS(SPIRVPreLegalizer, DEBUG_TYPE, "SPIRV pre legalizer", false,
1057 false)
1058
1059char SPIRVPreLegalizer::ID = 0;
1060
1061FunctionPass *llvm::createSPIRVPreLegalizerPass() {
1062 return new SPIRVPreLegalizer();
1063}
1064