1//===-- SPIRVPreLegalizer.cpp - prepare IR for legalization -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass prepares IR for legalization: it assigns SPIR-V types to registers
10// and removes intrinsics which holded these types during IR translation.
11// Also it processes constants and registers them in GR to avoid duplication.
12//
13//===----------------------------------------------------------------------===//
14
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
18#include "llvm/ADT/PostOrderIterator.h"
19#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
20#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
21#include "llvm/IR/Attributes.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/IntrinsicsSPIRV.h"
24
25#define DEBUG_TYPE "spirv-prelegalizer"
26
27using namespace llvm;
28
29namespace {
30class SPIRVPreLegalizer : public MachineFunctionPass {
31public:
32 static char ID;
33 SPIRVPreLegalizer() : MachineFunctionPass(ID) {}
34 bool runOnMachineFunction(MachineFunction &MF) override;
35 void getAnalysisUsage(AnalysisUsage &AU) const override;
36};
37} // namespace
38
39void SPIRVPreLegalizer::getAnalysisUsage(AnalysisUsage &AU) const {
40 AU.addPreserved<GISelValueTrackingAnalysisLegacy>();
41 MachineFunctionPass::getAnalysisUsage(AU);
42}
43
44static void
45addConstantsToTrack(MachineFunction &MF, SPIRVGlobalRegistry *GR,
46 const SPIRVSubtarget &STI,
47 DenseMap<MachineInstr *, Type *> &TargetExtConstTypes) {
48 MachineRegisterInfo &MRI = MF.getRegInfo();
49 DenseMap<MachineInstr *, Register> RegsAlreadyAddedToDT;
50 SmallVector<MachineInstr *, 10> ToErase, ToEraseComposites;
51 for (MachineBasicBlock &MBB : MF) {
52 for (MachineInstr &MI : MBB) {
53 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_track_constant))
54 continue;
55 ToErase.push_back(Elt: &MI);
56 Register SrcReg = MI.getOperand(i: 2).getReg();
57 auto *Const =
58 cast<Constant>(Val: cast<ConstantAsMetadata>(
59 Val: MI.getOperand(i: 3).getMetadata()->getOperand(I: 0))
60 ->getValue());
61 if (auto *GV = dyn_cast<GlobalValue>(Val: Const)) {
62 Register Reg = GR->find(V: GV, MF: &MF);
63 if (!Reg.isValid()) {
64 GR->add(V: GV, MI: MRI.getVRegDef(Reg: SrcReg));
65 GR->addGlobalObject(V: GV, MF: &MF, R: SrcReg);
66 } else
67 RegsAlreadyAddedToDT[&MI] = Reg;
68 } else {
69 Register Reg = GR->find(V: Const, MF: &MF);
70 if (!Reg.isValid()) {
71 if (auto *ConstVec = dyn_cast<ConstantDataVector>(Val: Const)) {
72 auto *BuildVec = MRI.getVRegDef(Reg: SrcReg);
73 assert(BuildVec &&
74 BuildVec->getOpcode() == TargetOpcode::G_BUILD_VECTOR);
75 GR->add(V: Const, MI: BuildVec);
76 for (unsigned i = 0; i < ConstVec->getNumElements(); ++i) {
77 // Ensure that OpConstantComposite reuses a constant when it's
78 // already created and available in the same machine function.
79 Constant *ElemConst = ConstVec->getElementAsConstant(i);
80 Register ElemReg = GR->find(V: ElemConst, MF: &MF);
81 if (!ElemReg.isValid())
82 GR->add(V: ElemConst,
83 MI: MRI.getVRegDef(Reg: BuildVec->getOperand(i: 1 + i).getReg()));
84 else
85 BuildVec->getOperand(i: 1 + i).setReg(ElemReg);
86 }
87 }
88 if (Const->getType()->isTargetExtTy()) {
89 // remember association so that we can restore it when assign types
90 MachineInstr *SrcMI = MRI.getVRegDef(Reg: SrcReg);
91 if (SrcMI)
92 GR->add(V: Const, MI: SrcMI);
93 if (SrcMI && (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT ||
94 SrcMI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF))
95 TargetExtConstTypes[SrcMI] = Const->getType();
96 if (Const->isNullValue()) {
97 MachineBasicBlock &DepMBB = MF.front();
98 MachineIRBuilder MIB(DepMBB, DepMBB.getFirstNonPHI());
99 SPIRVType *ExtType = GR->getOrCreateSPIRVType(
100 Type: Const->getType(), MIRBuilder&: MIB, AQ: SPIRV::AccessQualifier::ReadWrite,
101 EmitIR: true);
102 assert(SrcMI && "Expected source instruction to be valid");
103 SrcMI->setDesc(STI.getInstrInfo()->get(Opcode: SPIRV::OpConstantNull));
104 SrcMI->addOperand(Op: MachineOperand::CreateReg(
105 Reg: GR->getSPIRVTypeID(SpirvType: ExtType), isDef: false));
106 }
107 }
108 } else {
109 RegsAlreadyAddedToDT[&MI] = Reg;
110 // This MI is unused and will be removed. If the MI uses
111 // const_composite, it will be unused and should be removed too.
112 assert(MI.getOperand(2).isReg() && "Reg operand is expected");
113 MachineInstr *SrcMI = MRI.getVRegDef(Reg: MI.getOperand(i: 2).getReg());
114 if (SrcMI && isSpvIntrinsic(MI: *SrcMI, IntrinsicID: Intrinsic::spv_const_composite))
115 ToEraseComposites.push_back(Elt: SrcMI);
116 }
117 }
118 }
119 }
120 for (MachineInstr *MI : ToErase) {
121 Register Reg = MI->getOperand(i: 2).getReg();
122 auto It = RegsAlreadyAddedToDT.find(Val: MI);
123 if (It != RegsAlreadyAddedToDT.end())
124 Reg = It->second;
125 auto *RC = MRI.getRegClassOrNull(Reg: MI->getOperand(i: 0).getReg());
126 if (!MRI.getRegClassOrNull(Reg) && RC)
127 MRI.setRegClass(Reg, RC);
128 MRI.replaceRegWith(FromReg: MI->getOperand(i: 0).getReg(), ToReg: Reg);
129 GR->invalidateMachineInstr(MI);
130 MI->eraseFromParent();
131 }
132 for (MachineInstr *MI : ToEraseComposites) {
133 GR->invalidateMachineInstr(MI);
134 MI->eraseFromParent();
135 }
136}
137
138static void foldConstantsIntoIntrinsics(MachineFunction &MF,
139 SPIRVGlobalRegistry *GR,
140 MachineIRBuilder MIB) {
141 SmallVector<MachineInstr *, 64> ToErase;
142 for (MachineBasicBlock &MBB : MF) {
143 for (MachineInstr &MI : MBB) {
144 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_name))
145 continue;
146 const MDNode *MD = MI.getOperand(i: 2).getMetadata();
147 StringRef ValueName = cast<MDString>(Val: MD->getOperand(I: 0))->getString();
148 if (ValueName.size() > 0) {
149 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
150 buildOpName(Target: MI.getOperand(i: 1).getReg(), Name: ValueName, MIRBuilder&: MIB);
151 }
152 ToErase.push_back(Elt: &MI);
153 }
154 for (MachineInstr *MI : ToErase) {
155 GR->invalidateMachineInstr(MI);
156 MI->eraseFromParent();
157 }
158 ToErase.clear();
159 }
160}
161
162static MachineInstr *findAssignTypeInstr(Register Reg,
163 MachineRegisterInfo *MRI) {
164 for (MachineRegisterInfo::use_instr_iterator I = MRI->use_instr_begin(RegNo: Reg),
165 IE = MRI->use_instr_end();
166 I != IE; ++I) {
167 MachineInstr *UseMI = &*I;
168 if ((isSpvIntrinsic(MI: *UseMI, IntrinsicID: Intrinsic::spv_assign_ptr_type) ||
169 isSpvIntrinsic(MI: *UseMI, IntrinsicID: Intrinsic::spv_assign_type)) &&
170 UseMI->getOperand(i: 1).getReg() == Reg)
171 return UseMI;
172 }
173 return nullptr;
174}
175
176static void buildOpBitcast(SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
177 Register ResVReg, Register OpReg) {
178 SPIRVType *ResType = GR->getSPIRVTypeForVReg(VReg: ResVReg);
179 SPIRVType *OpType = GR->getSPIRVTypeForVReg(VReg: OpReg);
180 assert(ResType && OpType && "Operand types are expected");
181 if (!GR->isBitcastCompatible(Type1: ResType, Type2: OpType))
182 report_fatal_error(reason: "incompatible result and operand types in a bitcast");
183 MachineRegisterInfo *MRI = MIB.getMRI();
184 if (!MRI->getRegClassOrNull(Reg: ResVReg))
185 MRI->setRegClass(Reg: ResVReg, RC: GR->getRegClass(SpvType: ResType));
186 if (ResType == OpType)
187 MIB.buildInstr(Opcode: TargetOpcode::COPY).addDef(RegNo: ResVReg).addUse(RegNo: OpReg);
188 else
189 MIB.buildInstr(Opcode: SPIRV::OpBitcast)
190 .addDef(RegNo: ResVReg)
191 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: ResType))
192 .addUse(RegNo: OpReg);
193}
194
195// We lower G_BITCAST to OpBitcast here to avoid a MachineVerifier error.
196// The verifier checks if the source and destination LLTs of a G_BITCAST are
197// different, but this check is too strict for SPIR-V's typed pointers, which
198// may have the same LLT but different SPIRVType (e.g. pointers to different
199// pointee types). By lowering to OpBitcast here, we bypass the verifier's
200// check. See discussion in https://github.com/llvm/llvm-project/pull/110270
201// for more context.
202//
203// We also handle the llvm.spv.bitcast intrinsic here. If the source and
204// destination SPIR-V types are the same, we lower it to a COPY to enable
205// further optimizations like copy propagation.
206static void lowerBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR,
207 MachineIRBuilder MIB) {
208 SmallVector<MachineInstr *, 16> ToErase;
209 for (MachineBasicBlock &MBB : MF) {
210 for (MachineInstr &MI : MBB) {
211 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_bitcast)) {
212 Register DstReg = MI.getOperand(i: 0).getReg();
213 Register SrcReg = MI.getOperand(i: 2).getReg();
214 SPIRVType *DstType = GR->getSPIRVTypeForVReg(VReg: DstReg);
215 assert(
216 DstType &&
217 "Expected destination SPIR-V type to have been assigned already.");
218 SPIRVType *SrcType = GR->getSPIRVTypeForVReg(VReg: SrcReg);
219 assert(SrcType &&
220 "Expected source SPIR-V type to have been assigned already.");
221 if (DstType == SrcType) {
222 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
223 MIB.buildCopy(Res: DstReg, Op: SrcReg);
224 ToErase.push_back(Elt: &MI);
225 continue;
226 }
227 }
228
229 if (MI.getOpcode() != TargetOpcode::G_BITCAST)
230 continue;
231
232 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
233 buildOpBitcast(GR, MIB, ResVReg: MI.getOperand(i: 0).getReg(),
234 OpReg: MI.getOperand(i: 1).getReg());
235 ToErase.push_back(Elt: &MI);
236 }
237 }
238 for (MachineInstr *MI : ToErase) {
239 GR->invalidateMachineInstr(MI);
240 MI->eraseFromParent();
241 }
242}
243
244static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR,
245 MachineIRBuilder MIB) {
246 // Get access to information about available extensions
247 const SPIRVSubtarget *ST =
248 static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
249 SmallVector<MachineInstr *, 10> ToErase;
250 for (MachineBasicBlock &MBB : MF) {
251 for (MachineInstr &MI : MBB) {
252 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_ptrcast))
253 continue;
254 assert(MI.getOperand(2).isReg());
255 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI);
256 ToErase.push_back(Elt: &MI);
257 Register Def = MI.getOperand(i: 0).getReg();
258 Register Source = MI.getOperand(i: 2).getReg();
259 Type *ElemTy = getMDOperandAsType(N: MI.getOperand(i: 3).getMetadata(), I: 0);
260 auto SC =
261 isa<FunctionType>(Val: ElemTy)
262 ? SPIRV::StorageClass::CodeSectionINTEL
263 : addressSpaceToStorageClass(AddrSpace: MI.getOperand(i: 4).getImm(), STI: *ST);
264 SPIRVType *AssignedPtrType =
265 GR->getOrCreateSPIRVPointerType(BaseType: ElemTy, I&: MI, SC);
266
267 // If the ptrcast would be redundant, replace all uses with the source
268 // register.
269 MachineRegisterInfo *MRI = MIB.getMRI();
270 if (GR->getSPIRVTypeForVReg(VReg: Source) == AssignedPtrType) {
271 // Erase Def's assign type instruction if we are going to replace Def.
272 if (MachineInstr *AssignMI = findAssignTypeInstr(Reg: Def, MRI))
273 ToErase.push_back(Elt: AssignMI);
274 MRI->replaceRegWith(FromReg: Def, ToReg: Source);
275 } else {
276 if (!GR->getSPIRVTypeForVReg(VReg: Def, MF: &MF))
277 GR->assignSPIRVTypeToVReg(Type: AssignedPtrType, VReg: Def, MF);
278 MIB.buildBitcast(Dst: Def, Src: Source);
279 }
280 }
281 }
282 for (MachineInstr *MI : ToErase) {
283 GR->invalidateMachineInstr(MI);
284 MI->eraseFromParent();
285 }
286}
287
288// Translating GV, IRTranslator sometimes generates following IR:
289// %1 = G_GLOBAL_VALUE
290// %2 = COPY %1
291// %3 = G_ADDRSPACE_CAST %2
292//
293// or
294//
295// %1 = G_ZEXT %2
296// G_MEMCPY ... %2 ...
297//
298// New registers have no SPIRVType and no register class info.
299//
300// Set SPIRVType for GV, propagate it from GV to other instructions,
301// also set register classes.
302static SPIRVType *propagateSPIRVType(MachineInstr *MI, SPIRVGlobalRegistry *GR,
303 MachineRegisterInfo &MRI,
304 MachineIRBuilder &MIB) {
305 SPIRVType *SpvType = nullptr;
306 assert(MI && "Machine instr is expected");
307 if (MI->getOperand(i: 0).isReg()) {
308 Register Reg = MI->getOperand(i: 0).getReg();
309 SpvType = GR->getSPIRVTypeForVReg(VReg: Reg);
310 if (!SpvType) {
311 switch (MI->getOpcode()) {
312 case TargetOpcode::G_FCONSTANT:
313 case TargetOpcode::G_CONSTANT: {
314 MIB.setInsertPt(MBB&: *MI->getParent(), II: MI);
315 Type *Ty = MI->getOperand(i: 1).getCImm()->getType();
316 SpvType = GR->getOrCreateSPIRVType(
317 Type: Ty, MIRBuilder&: MIB, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
318 break;
319 }
320 case TargetOpcode::G_GLOBAL_VALUE: {
321 MIB.setInsertPt(MBB&: *MI->getParent(), II: MI);
322 const GlobalValue *Global = MI->getOperand(i: 1).getGlobal();
323 Type *ElementTy = toTypedPointer(Ty: GR->getDeducedGlobalValueType(Global));
324 auto *Ty = TypedPointerType::get(ElementType: ElementTy,
325 AddressSpace: Global->getType()->getAddressSpace());
326 SpvType = GR->getOrCreateSPIRVType(
327 Type: Ty, MIRBuilder&: MIB, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
328 break;
329 }
330 case TargetOpcode::G_ANYEXT:
331 case TargetOpcode::G_SEXT:
332 case TargetOpcode::G_ZEXT: {
333 if (MI->getOperand(i: 1).isReg()) {
334 if (MachineInstr *DefInstr =
335 MRI.getVRegDef(Reg: MI->getOperand(i: 1).getReg())) {
336 if (SPIRVType *Def = propagateSPIRVType(MI: DefInstr, GR, MRI, MIB)) {
337 unsigned CurrentBW = GR->getScalarOrVectorBitWidth(Type: Def);
338 unsigned ExpectedBW =
339 std::max(a: MRI.getType(Reg).getScalarSizeInBits(), b: CurrentBW);
340 unsigned NumElements = GR->getScalarOrVectorComponentCount(Type: Def);
341 SpvType = GR->getOrCreateSPIRVIntegerType(BitWidth: ExpectedBW, MIRBuilder&: MIB);
342 if (NumElements > 1)
343 SpvType = GR->getOrCreateSPIRVVectorType(BaseType: SpvType, NumElements,
344 MIRBuilder&: MIB, EmitIR: true);
345 }
346 }
347 }
348 break;
349 }
350 case TargetOpcode::G_PTRTOINT:
351 SpvType = GR->getOrCreateSPIRVIntegerType(
352 BitWidth: MRI.getType(Reg).getScalarSizeInBits(), MIRBuilder&: MIB);
353 break;
354 case TargetOpcode::G_TRUNC:
355 case TargetOpcode::G_ADDRSPACE_CAST:
356 case TargetOpcode::G_PTR_ADD:
357 case TargetOpcode::COPY: {
358 MachineOperand &Op = MI->getOperand(i: 1);
359 MachineInstr *Def = Op.isReg() ? MRI.getVRegDef(Reg: Op.getReg()) : nullptr;
360 if (Def)
361 SpvType = propagateSPIRVType(MI: Def, GR, MRI, MIB);
362 break;
363 }
364 default:
365 break;
366 }
367 if (SpvType) {
368 // check if the address space needs correction
369 LLT RegType = MRI.getType(Reg);
370 if (SpvType->getOpcode() == SPIRV::OpTypePointer &&
371 RegType.isPointer() &&
372 storageClassToAddressSpace(SC: GR->getPointerStorageClass(Type: SpvType)) !=
373 RegType.getAddressSpace()) {
374 const SPIRVSubtarget &ST =
375 MI->getParent()->getParent()->getSubtarget<SPIRVSubtarget>();
376 auto TSC = addressSpaceToStorageClass(AddrSpace: RegType.getAddressSpace(), STI: ST);
377 SpvType = GR->changePointerStorageClass(PtrType: SpvType, SC: TSC, I&: *MI);
378 }
379 GR->assignSPIRVTypeToVReg(Type: SpvType, VReg: Reg, MF: MIB.getMF());
380 }
381 if (!MRI.getRegClassOrNull(Reg))
382 MRI.setRegClass(Reg, RC: SpvType ? GR->getRegClass(SpvType)
383 : &SPIRV::iIDRegClass);
384 }
385 }
386 return SpvType;
387}
388
389// To support current approach and limitations wrt. bit width here we widen a
390// scalar register with a bit width greater than 1 to valid sizes and cap it to
391// 128 width.
392static unsigned widenBitWidthToNextPow2(unsigned BitWidth) {
393 if (BitWidth == 1)
394 return 1; // No need to widen 1-bit values
395 return std::min(a: std::max(a: 1u << Log2_32_Ceil(Value: BitWidth), b: 8u), b: 128u);
396}
397
398static void widenScalarType(Register Reg, MachineRegisterInfo &MRI) {
399 LLT RegType = MRI.getType(Reg);
400 if (!RegType.isScalar())
401 return;
402 unsigned CurrentWidth = RegType.getScalarSizeInBits();
403 unsigned NewWidth = widenBitWidthToNextPow2(BitWidth: CurrentWidth);
404 if (NewWidth != CurrentWidth)
405 MRI.setType(VReg: Reg, Ty: LLT::scalar(SizeInBits: NewWidth));
406}
407
408static void widenCImmType(MachineOperand &MOP) {
409 const ConstantInt *CImmVal = MOP.getCImm();
410 unsigned CurrentWidth = CImmVal->getBitWidth();
411 unsigned NewWidth = widenBitWidthToNextPow2(BitWidth: CurrentWidth);
412 if (NewWidth != CurrentWidth) {
413 // Replace the immediate value with the widened version
414 MOP.setCImm(ConstantInt::get(Context&: CImmVal->getType()->getContext(),
415 V: CImmVal->getValue().zextOrTrunc(width: NewWidth)));
416 }
417}
418
419static void setInsertPtAfterDef(MachineIRBuilder &MIB, MachineInstr *Def) {
420 MachineBasicBlock &MBB = *Def->getParent();
421 MachineBasicBlock::iterator DefIt =
422 Def->getNextNode() ? Def->getNextNode()->getIterator() : MBB.end();
423 // Skip all the PHI and debug instructions.
424 while (DefIt != MBB.end() &&
425 (DefIt->isPHI() || DefIt->isDebugOrPseudoInstr()))
426 DefIt = std::next(x: DefIt);
427 MIB.setInsertPt(MBB, II: DefIt);
428}
429
430namespace llvm {
431void updateRegType(Register Reg, Type *Ty, SPIRVType *SpvType,
432 SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
433 MachineRegisterInfo &MRI) {
434 assert((Ty || SpvType) && "Either LLVM or SPIRV type is expected.");
435 MachineInstr *Def = MRI.getVRegDef(Reg);
436 setInsertPtAfterDef(MIB, Def);
437 if (!SpvType)
438 SpvType = GR->getOrCreateSPIRVType(Type: Ty, MIRBuilder&: MIB,
439 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
440 if (!MRI.getRegClassOrNull(Reg))
441 MRI.setRegClass(Reg, RC: GR->getRegClass(SpvType));
442 if (!MRI.getType(Reg).isValid())
443 MRI.setType(VReg: Reg, Ty: GR->getRegType(SpvType));
444 GR->assignSPIRVTypeToVReg(Type: SpvType, VReg: Reg, MF: MIB.getMF());
445}
446
447void processInstr(MachineInstr &MI, MachineIRBuilder &MIB,
448 MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR,
449 SPIRVType *KnownResType) {
450 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI.getIterator());
451 for (auto &Op : MI.operands()) {
452 if (!Op.isReg() || Op.isDef())
453 continue;
454 Register OpReg = Op.getReg();
455 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(VReg: OpReg);
456 if (!SpvType && KnownResType) {
457 SpvType = KnownResType;
458 GR->assignSPIRVTypeToVReg(Type: KnownResType, VReg: OpReg, MF: *MI.getMF());
459 }
460 assert(SpvType);
461 if (!MRI.getRegClassOrNull(Reg: OpReg))
462 MRI.setRegClass(Reg: OpReg, RC: GR->getRegClass(SpvType));
463 if (!MRI.getType(Reg: OpReg).isValid())
464 MRI.setType(VReg: OpReg, Ty: GR->getRegType(SpvType));
465 }
466}
467} // namespace llvm
468
469static void
470generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
471 MachineIRBuilder MIB,
472 DenseMap<MachineInstr *, Type *> &TargetExtConstTypes) {
473 // Get access to information about available extensions
474 const SPIRVSubtarget *ST =
475 static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
476
477 MachineRegisterInfo &MRI = MF.getRegInfo();
478 SmallVector<MachineInstr *, 10> ToErase;
479 DenseMap<MachineInstr *, Register> RegsAlreadyAddedToDT;
480
481 bool IsExtendedInts =
482 ST->canUseExtension(
483 E: SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers) ||
484 ST->canUseExtension(E: SPIRV::Extension::SPV_KHR_bit_instructions) ||
485 ST->canUseExtension(E: SPIRV::Extension::SPV_INTEL_int4);
486
487 for (MachineBasicBlock *MBB : post_order(G: &MF)) {
488 if (MBB->empty())
489 continue;
490
491 bool ReachedBegin = false;
492 for (auto MII = std::prev(x: MBB->end()), Begin = MBB->begin();
493 !ReachedBegin;) {
494 MachineInstr &MI = *MII;
495 unsigned MIOp = MI.getOpcode();
496
497 if (!IsExtendedInts) {
498 // validate bit width of scalar registers and constant immediates
499 for (auto &MOP : MI.operands()) {
500 if (MOP.isReg())
501 widenScalarType(Reg: MOP.getReg(), MRI);
502 else if (MOP.isCImm())
503 widenCImmType(MOP);
504 }
505 }
506
507 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_ptr_type)) {
508 Register Reg = MI.getOperand(i: 1).getReg();
509 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI.getIterator());
510 Type *ElementTy = getMDOperandAsType(N: MI.getOperand(i: 2).getMetadata(), I: 0);
511 SPIRVType *AssignedPtrType = GR->getOrCreateSPIRVPointerType(
512 BaseType: ElementTy, I&: MI,
513 SC: addressSpaceToStorageClass(AddrSpace: MI.getOperand(i: 3).getImm(), STI: *ST));
514 MachineInstr *Def = MRI.getVRegDef(Reg);
515 assert(Def && "Expecting an instruction that defines the register");
516 // G_GLOBAL_VALUE already has type info.
517 if (Def->getOpcode() != TargetOpcode::G_GLOBAL_VALUE)
518 updateRegType(Reg, Ty: nullptr, SpvType: AssignedPtrType, GR, MIB,
519 MRI&: MF.getRegInfo());
520 ToErase.push_back(Elt: &MI);
521 } else if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_type)) {
522 Register Reg = MI.getOperand(i: 1).getReg();
523 Type *Ty = getMDOperandAsType(N: MI.getOperand(i: 2).getMetadata(), I: 0);
524 MachineInstr *Def = MRI.getVRegDef(Reg);
525 assert(Def && "Expecting an instruction that defines the register");
526 // G_GLOBAL_VALUE already has type info.
527 if (Def->getOpcode() != TargetOpcode::G_GLOBAL_VALUE)
528 updateRegType(Reg, Ty, SpvType: nullptr, GR, MIB, MRI&: MF.getRegInfo());
529 ToErase.push_back(Elt: &MI);
530 } else if (MIOp == TargetOpcode::FAKE_USE && MI.getNumOperands() > 0) {
531 MachineInstr *MdMI = MI.getPrevNode();
532 if (MdMI && isSpvIntrinsic(MI: *MdMI, IntrinsicID: Intrinsic::spv_value_md)) {
533 // It's an internal service info from before IRTranslator passes.
534 MachineInstr *Def = getVRegDef(MRI, Reg: MI.getOperand(i: 0).getReg());
535 for (unsigned I = 1, E = MI.getNumOperands(); I != E && Def; ++I)
536 if (getVRegDef(MRI, Reg: MI.getOperand(i: I).getReg()) != Def)
537 Def = nullptr;
538 if (Def) {
539 const MDNode *MD = MdMI->getOperand(i: 1).getMetadata();
540 StringRef ValueName =
541 cast<MDString>(Val: MD->getOperand(I: 1))->getString();
542 const MDNode *TypeMD = cast<MDNode>(Val: MD->getOperand(I: 0));
543 Type *ValueTy = getMDOperandAsType(N: TypeMD, I: 0);
544 GR->addValueAttrs(Key: Def, Val: std::make_pair(x&: ValueTy, y: ValueName.str()));
545 }
546 ToErase.push_back(Elt: MdMI);
547 }
548 ToErase.push_back(Elt: &MI);
549 } else if (MIOp == TargetOpcode::G_CONSTANT ||
550 MIOp == TargetOpcode::G_FCONSTANT ||
551 MIOp == TargetOpcode::G_BUILD_VECTOR) {
552 // %rc = G_CONSTANT ty Val
553 // Ensure %rc has a valid SPIR-V type assigned in the Global Registry.
554 Register Reg = MI.getOperand(i: 0).getReg();
555 bool NeedAssignType = GR->getSPIRVTypeForVReg(VReg: Reg) == nullptr;
556 Type *Ty = nullptr;
557 if (MIOp == TargetOpcode::G_CONSTANT) {
558 auto TargetExtIt = TargetExtConstTypes.find(Val: &MI);
559 Ty = TargetExtIt == TargetExtConstTypes.end()
560 ? MI.getOperand(i: 1).getCImm()->getType()
561 : TargetExtIt->second;
562 const ConstantInt *OpCI = MI.getOperand(i: 1).getCImm();
563 // TODO: we may wish to analyze here if OpCI is zero and LLT RegType =
564 // MRI.getType(Reg); RegType.isPointer() is true, so that we observe
565 // at this point not i64/i32 constant but null pointer in the
566 // corresponding address space of RegType.getAddressSpace(). This may
567 // help to successfully validate the case when a OpConstantComposite's
568 // constituent has type that does not match Result Type of
569 // OpConstantComposite (see, for example,
570 // pointers/PtrCast-null-in-OpSpecConstantOp.ll).
571 Register PrimaryReg = GR->find(V: OpCI, MF: &MF);
572 if (!PrimaryReg.isValid()) {
573 GR->add(V: OpCI, MI: &MI);
574 } else if (PrimaryReg != Reg &&
575 MRI.getType(Reg) == MRI.getType(Reg: PrimaryReg)) {
576 auto *RCReg = MRI.getRegClassOrNull(Reg);
577 auto *RCPrimary = MRI.getRegClassOrNull(Reg: PrimaryReg);
578 if (!RCReg || RCPrimary == RCReg) {
579 RegsAlreadyAddedToDT[&MI] = PrimaryReg;
580 ToErase.push_back(Elt: &MI);
581 NeedAssignType = false;
582 }
583 }
584 } else if (MIOp == TargetOpcode::G_FCONSTANT) {
585 Ty = MI.getOperand(i: 1).getFPImm()->getType();
586 } else {
587 assert(MIOp == TargetOpcode::G_BUILD_VECTOR);
588 Type *ElemTy = nullptr;
589 MachineInstr *ElemMI = MRI.getVRegDef(Reg: MI.getOperand(i: 1).getReg());
590 assert(ElemMI);
591
592 if (ElemMI->getOpcode() == TargetOpcode::G_CONSTANT) {
593 ElemTy = ElemMI->getOperand(i: 1).getCImm()->getType();
594 } else if (ElemMI->getOpcode() == TargetOpcode::G_FCONSTANT) {
595 ElemTy = ElemMI->getOperand(i: 1).getFPImm()->getType();
596 } else {
597 if (const SPIRVType *ElemSpvType =
598 GR->getSPIRVTypeForVReg(VReg: MI.getOperand(i: 1).getReg(), MF: &MF))
599 ElemTy = const_cast<Type *>(GR->getTypeForSPIRVType(Ty: ElemSpvType));
600 }
601 if (ElemTy)
602 Ty = VectorType::get(
603 ElementType: ElemTy, NumElements: MI.getNumExplicitOperands() - MI.getNumExplicitDefs(),
604 Scalable: false);
605 else
606 NeedAssignType = false;
607 }
608 if (NeedAssignType)
609 updateRegType(Reg, Ty, SpvType: nullptr, GR, MIB, MRI);
610 } else if (MIOp == TargetOpcode::G_GLOBAL_VALUE) {
611 propagateSPIRVType(MI: &MI, GR, MRI, MIB);
612 }
613
614 if (MII == Begin)
615 ReachedBegin = true;
616 else
617 --MII;
618 }
619 }
620 for (MachineInstr *MI : ToErase) {
621 auto It = RegsAlreadyAddedToDT.find(Val: MI);
622 if (It != RegsAlreadyAddedToDT.end())
623 MRI.replaceRegWith(FromReg: MI->getOperand(i: 0).getReg(), ToReg: It->second);
624 GR->invalidateMachineInstr(MI);
625 MI->eraseFromParent();
626 }
627
628 // Address the case when IRTranslator introduces instructions with new
629 // registers without SPIRVType associated.
630 for (MachineBasicBlock &MBB : MF) {
631 for (MachineInstr &MI : MBB) {
632 switch (MI.getOpcode()) {
633 case TargetOpcode::G_TRUNC:
634 case TargetOpcode::G_ANYEXT:
635 case TargetOpcode::G_SEXT:
636 case TargetOpcode::G_ZEXT:
637 case TargetOpcode::G_PTRTOINT:
638 case TargetOpcode::COPY:
639 case TargetOpcode::G_ADDRSPACE_CAST:
640 propagateSPIRVType(MI: &MI, GR, MRI, MIB);
641 break;
642 }
643 }
644 }
645}
646
647static void processInstrsWithTypeFolding(MachineFunction &MF,
648 SPIRVGlobalRegistry *GR,
649 MachineIRBuilder MIB) {
650 MachineRegisterInfo &MRI = MF.getRegInfo();
651 for (MachineBasicBlock &MBB : MF)
652 for (MachineInstr &MI : MBB)
653 if (isTypeFoldingSupported(Opcode: MI.getOpcode()))
654 processInstr(MI, MIB, MRI, GR, KnownResType: nullptr);
655}
656
657static Register
658collectInlineAsmInstrOperands(MachineInstr *MI,
659 SmallVector<unsigned, 4> *Ops = nullptr) {
660 Register DefReg;
661 unsigned StartOp = InlineAsm::MIOp_FirstOperand,
662 AsmDescOp = InlineAsm::MIOp_FirstOperand;
663 for (unsigned Idx = StartOp, MISz = MI->getNumOperands(); Idx != MISz;
664 ++Idx) {
665 const MachineOperand &MO = MI->getOperand(i: Idx);
666 if (MO.isMetadata())
667 continue;
668 if (Idx == AsmDescOp && MO.isImm()) {
669 // compute the index of the next operand descriptor
670 const InlineAsm::Flag F(MO.getImm());
671 AsmDescOp += 1 + F.getNumOperandRegisters();
672 continue;
673 }
674 if (MO.isReg() && MO.isDef()) {
675 if (!Ops)
676 return MO.getReg();
677 else
678 DefReg = MO.getReg();
679 } else if (Ops) {
680 Ops->push_back(Elt: Idx);
681 }
682 }
683 return DefReg;
684}
685
686static void
687insertInlineAsmProcess(MachineFunction &MF, SPIRVGlobalRegistry *GR,
688 const SPIRVSubtarget &ST, MachineIRBuilder MIRBuilder,
689 const SmallVector<MachineInstr *> &ToProcess) {
690 MachineRegisterInfo &MRI = MF.getRegInfo();
691 Register AsmTargetReg;
692 for (unsigned i = 0, Sz = ToProcess.size(); i + 1 < Sz; i += 2) {
693 MachineInstr *I1 = ToProcess[i], *I2 = ToProcess[i + 1];
694 assert(isSpvIntrinsic(*I1, Intrinsic::spv_inline_asm) && I2->isInlineAsm());
695 MIRBuilder.setInsertPt(MBB&: *I2->getParent(), II: *I2);
696
697 if (!AsmTargetReg.isValid()) {
698 // define vendor specific assembly target or dialect
699 AsmTargetReg = MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32));
700 MRI.setRegClass(Reg: AsmTargetReg, RC: &SPIRV::iIDRegClass);
701 auto AsmTargetMIB =
702 MIRBuilder.buildInstr(Opcode: SPIRV::OpAsmTargetINTEL).addDef(RegNo: AsmTargetReg);
703 addStringImm(Str: ST.getTargetTripleAsStr(), MIB&: AsmTargetMIB);
704 GR->add(Obj: AsmTargetMIB.getInstr(), MI: AsmTargetMIB);
705 }
706
707 // create types
708 const MDNode *IAMD = I1->getOperand(i: 1).getMetadata();
709 FunctionType *FTy = cast<FunctionType>(Val: getMDOperandAsType(N: IAMD, I: 0));
710 SmallVector<SPIRVType *, 4> ArgTypes;
711 for (const auto &ArgTy : FTy->params())
712 ArgTypes.push_back(Elt: GR->getOrCreateSPIRVType(
713 Type: ArgTy, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true));
714 SPIRVType *RetType =
715 GR->getOrCreateSPIRVType(Type: FTy->getReturnType(), MIRBuilder,
716 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
717 SPIRVType *FuncType = GR->getOrCreateOpTypeFunctionWithArgs(
718 Ty: FTy, RetType, ArgTypes, MIRBuilder);
719
720 // define vendor specific assembly instructions string
721 Register AsmReg = MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32));
722 MRI.setRegClass(Reg: AsmReg, RC: &SPIRV::iIDRegClass);
723 auto AsmMIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpAsmINTEL)
724 .addDef(RegNo: AsmReg)
725 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: RetType))
726 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: FuncType))
727 .addUse(RegNo: AsmTargetReg);
728 // inline asm string:
729 addStringImm(Str: I2->getOperand(i: InlineAsm::MIOp_AsmString).getSymbolName(),
730 MIB&: AsmMIB);
731 // inline asm constraint string:
732 addStringImm(Str: cast<MDString>(Val: I1->getOperand(i: 2).getMetadata()->getOperand(I: 0))
733 ->getString(),
734 MIB&: AsmMIB);
735 GR->add(Obj: AsmMIB.getInstr(), MI: AsmMIB);
736
737 // calls the inline assembly instruction
738 unsigned ExtraInfo = I2->getOperand(i: InlineAsm::MIOp_ExtraInfo).getImm();
739 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
740 MIRBuilder.buildInstr(Opcode: SPIRV::OpDecorate)
741 .addUse(RegNo: AsmReg)
742 .addImm(Val: static_cast<uint32_t>(SPIRV::Decoration::SideEffectsINTEL));
743
744 Register DefReg = collectInlineAsmInstrOperands(MI: I2);
745 if (!DefReg.isValid()) {
746 DefReg = MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32));
747 MRI.setRegClass(Reg: DefReg, RC: &SPIRV::iIDRegClass);
748 SPIRVType *VoidType = GR->getOrCreateSPIRVType(
749 Type: Type::getVoidTy(C&: MF.getFunction().getContext()), MIRBuilder,
750 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
751 GR->assignSPIRVTypeToVReg(Type: VoidType, VReg: DefReg, MF);
752 }
753
754 auto AsmCall = MIRBuilder.buildInstr(Opcode: SPIRV::OpAsmCallINTEL)
755 .addDef(RegNo: DefReg)
756 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: RetType))
757 .addUse(RegNo: AsmReg);
758 for (unsigned IntrIdx = 3; IntrIdx < I1->getNumOperands(); ++IntrIdx)
759 AsmCall.addUse(RegNo: I1->getOperand(i: IntrIdx).getReg());
760 }
761 for (MachineInstr *MI : ToProcess) {
762 GR->invalidateMachineInstr(MI);
763 MI->eraseFromParent();
764 }
765}
766
767static void insertInlineAsm(MachineFunction &MF, SPIRVGlobalRegistry *GR,
768 const SPIRVSubtarget &ST,
769 MachineIRBuilder MIRBuilder) {
770 SmallVector<MachineInstr *> ToProcess;
771 for (MachineBasicBlock &MBB : MF) {
772 for (MachineInstr &MI : MBB) {
773 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_inline_asm) ||
774 MI.getOpcode() == TargetOpcode::INLINEASM)
775 ToProcess.push_back(Elt: &MI);
776 }
777 }
778 if (ToProcess.size() == 0)
779 return;
780
781 if (!ST.canUseExtension(E: SPIRV::Extension::SPV_INTEL_inline_assembly))
782 report_fatal_error(reason: "Inline assembly instructions require the "
783 "following SPIR-V extension: SPV_INTEL_inline_assembly",
784 gen_crash_diag: false);
785
786 insertInlineAsmProcess(MF, GR, ST, MIRBuilder, ToProcess);
787}
788
789static uint32_t convertFloatToSPIRVWord(float F) {
790 union {
791 float F;
792 uint32_t Spir;
793 } FPMaxError;
794 FPMaxError.F = F;
795 return FPMaxError.Spir;
796}
797
798static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR,
799 MachineIRBuilder MIB) {
800 const SPIRVSubtarget &ST = cast<SPIRVSubtarget>(Val: MIB.getMF().getSubtarget());
801 SmallVector<MachineInstr *, 10> ToErase;
802 for (MachineBasicBlock &MBB : MF) {
803 for (MachineInstr &MI : MBB) {
804 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_decoration) &&
805 !isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_aliasing_decoration) &&
806 !isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_fpmaxerror_decoration))
807 continue;
808 MIB.setInsertPt(MBB&: *MI.getParent(), II: MI.getNextNode());
809 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_assign_decoration)) {
810 buildOpSpirvDecorations(Reg: MI.getOperand(i: 1).getReg(), MIRBuilder&: MIB,
811 GVarMD: MI.getOperand(i: 2).getMetadata(), ST);
812 } else if (isSpvIntrinsic(MI,
813 IntrinsicID: Intrinsic::spv_assign_fpmaxerror_decoration)) {
814 ConstantFP *OpV = mdconst::dyn_extract<ConstantFP>(
815 MD: MI.getOperand(i: 2).getMetadata()->getOperand(I: 0));
816 uint32_t OpValue =
817 convertFloatToSPIRVWord(F: OpV->getValueAPF().convertToFloat());
818
819 buildOpDecorate(Reg: MI.getOperand(i: 1).getReg(), MIRBuilder&: MIB,
820 Dec: SPIRV::Decoration::FPMaxErrorDecorationINTEL,
821 DecArgs: {OpValue});
822 } else {
823 GR->buildMemAliasingOpDecorate(Reg: MI.getOperand(i: 1).getReg(), MIRBuilder&: MIB,
824 Dec: MI.getOperand(i: 2).getImm(),
825 GVarMD: MI.getOperand(i: 3).getMetadata());
826 }
827
828 ToErase.push_back(Elt: &MI);
829 }
830 }
831 for (MachineInstr *MI : ToErase) {
832 GR->invalidateMachineInstr(MI);
833 MI->eraseFromParent();
834 }
835}
836
837// LLVM allows the switches to use registers as cases, while SPIR-V required
838// those to be immediate values. This function replaces such operands with the
839// equivalent immediate constant.
840static void processSwitchesConstants(MachineFunction &MF,
841 SPIRVGlobalRegistry *GR,
842 MachineIRBuilder MIB) {
843 MachineRegisterInfo &MRI = MF.getRegInfo();
844 for (MachineBasicBlock &MBB : MF) {
845 for (MachineInstr &MI : MBB) {
846 if (!isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_switch))
847 continue;
848
849 SmallVector<MachineOperand, 8> NewOperands;
850 NewOperands.push_back(Elt: MI.getOperand(i: 0)); // Opcode
851 NewOperands.push_back(Elt: MI.getOperand(i: 1)); // Condition
852 NewOperands.push_back(Elt: MI.getOperand(i: 2)); // Default
853 for (unsigned i = 3; i < MI.getNumOperands(); i += 2) {
854 Register Reg = MI.getOperand(i).getReg();
855 MachineInstr *ConstInstr = getDefInstrMaybeConstant(ConstReg&: Reg, MRI: &MRI);
856 NewOperands.push_back(
857 Elt: MachineOperand::CreateCImm(CI: ConstInstr->getOperand(i: 1).getCImm()));
858
859 NewOperands.push_back(Elt: MI.getOperand(i: i + 1));
860 }
861
862 assert(MI.getNumOperands() == NewOperands.size());
863 while (MI.getNumOperands() > 0)
864 MI.removeOperand(OpNo: 0);
865 for (auto &MO : NewOperands)
866 MI.addOperand(Op: MO);
867 }
868 }
869}
870
871// Some instructions are used during CodeGen but should never be emitted.
872// Cleaning up those.
873static void cleanupHelperInstructions(MachineFunction &MF,
874 SPIRVGlobalRegistry *GR) {
875 SmallVector<MachineInstr *, 8> ToEraseMI;
876 for (MachineBasicBlock &MBB : MF) {
877 for (MachineInstr &MI : MBB) {
878 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_track_constant) ||
879 MI.getOpcode() == TargetOpcode::G_BRINDIRECT)
880 ToEraseMI.push_back(Elt: &MI);
881 }
882 }
883
884 for (MachineInstr *MI : ToEraseMI) {
885 GR->invalidateMachineInstr(MI);
886 MI->eraseFromParent();
887 }
888}
889
890// Find all usages of G_BLOCK_ADDR in our intrinsics and replace those
891// operands/registers by the actual MBB it references.
892static void processBlockAddr(MachineFunction &MF, SPIRVGlobalRegistry *GR,
893 MachineIRBuilder MIB) {
894 // Gather the reverse-mapping BB -> MBB.
895 DenseMap<const BasicBlock *, MachineBasicBlock *> BB2MBB;
896 for (MachineBasicBlock &MBB : MF)
897 BB2MBB[MBB.getBasicBlock()] = &MBB;
898
899 // Gather instructions requiring patching. For now, only those can use
900 // G_BLOCK_ADDR.
901 SmallVector<MachineInstr *, 8> InstructionsToPatch;
902 for (MachineBasicBlock &MBB : MF) {
903 for (MachineInstr &MI : MBB) {
904 if (isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_switch) ||
905 isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_loop_merge) ||
906 isSpvIntrinsic(MI, IntrinsicID: Intrinsic::spv_selection_merge))
907 InstructionsToPatch.push_back(Elt: &MI);
908 }
909 }
910
911 // For each instruction to fix, we replace all the G_BLOCK_ADDR operands by
912 // the actual MBB it references. Once those references have been updated, we
913 // can cleanup remaining G_BLOCK_ADDR references.
914 SmallPtrSet<MachineBasicBlock *, 8> ClearAddressTaken;
915 SmallPtrSet<MachineInstr *, 8> ToEraseMI;
916 MachineRegisterInfo &MRI = MF.getRegInfo();
917 for (MachineInstr *MI : InstructionsToPatch) {
918 SmallVector<MachineOperand, 8> NewOps;
919 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
920 // The operand is not a register, keep as-is.
921 if (!MI->getOperand(i).isReg()) {
922 NewOps.push_back(Elt: MI->getOperand(i));
923 continue;
924 }
925
926 Register Reg = MI->getOperand(i).getReg();
927 MachineInstr *BuildMBB = MRI.getVRegDef(Reg);
928 // The register is not the result of G_BLOCK_ADDR, keep as-is.
929 if (!BuildMBB || BuildMBB->getOpcode() != TargetOpcode::G_BLOCK_ADDR) {
930 NewOps.push_back(Elt: MI->getOperand(i));
931 continue;
932 }
933
934 assert(BuildMBB && BuildMBB->getOpcode() == TargetOpcode::G_BLOCK_ADDR &&
935 BuildMBB->getOperand(1).isBlockAddress() &&
936 BuildMBB->getOperand(1).getBlockAddress());
937 BasicBlock *BB =
938 BuildMBB->getOperand(i: 1).getBlockAddress()->getBasicBlock();
939 auto It = BB2MBB.find(Val: BB);
940 if (It == BB2MBB.end())
941 report_fatal_error(reason: "cannot find a machine basic block by a basic block "
942 "in a switch statement");
943 MachineBasicBlock *ReferencedBlock = It->second;
944 NewOps.push_back(Elt: MachineOperand::CreateMBB(MBB: ReferencedBlock));
945
946 ClearAddressTaken.insert(Ptr: ReferencedBlock);
947 ToEraseMI.insert(Ptr: BuildMBB);
948 }
949
950 // Replace the operands.
951 assert(MI->getNumOperands() == NewOps.size());
952 while (MI->getNumOperands() > 0)
953 MI->removeOperand(OpNo: 0);
954 for (auto &MO : NewOps)
955 MI->addOperand(Op: MO);
956
957 if (MachineInstr *Next = MI->getNextNode()) {
958 if (isSpvIntrinsic(MI: *Next, IntrinsicID: Intrinsic::spv_track_constant)) {
959 ToEraseMI.insert(Ptr: Next);
960 Next = MI->getNextNode();
961 }
962 if (Next && Next->getOpcode() == TargetOpcode::G_BRINDIRECT)
963 ToEraseMI.insert(Ptr: Next);
964 }
965 }
966
967 // BlockAddress operands were used to keep information between passes,
968 // let's undo the "address taken" status to reflect that Succ doesn't
969 // actually correspond to an IR-level basic block.
970 for (MachineBasicBlock *Succ : ClearAddressTaken)
971 Succ->setAddressTakenIRBlock(nullptr);
972
973 // If we just delete G_BLOCK_ADDR instructions with BlockAddress operands,
974 // this leaves their BasicBlock counterparts in a "address taken" status. This
975 // would make AsmPrinter to generate a series of unneeded labels of a "Address
976 // of block that was removed by CodeGen" kind. Let's first ensure that we
977 // don't have a dangling BlockAddress constants by zapping the BlockAddress
978 // nodes, and only after that proceed with erasing G_BLOCK_ADDR instructions.
979 Constant *Replacement =
980 ConstantInt::get(Ty: Type::getInt32Ty(C&: MF.getFunction().getContext()), V: 1);
981 for (MachineInstr *BlockAddrI : ToEraseMI) {
982 if (BlockAddrI->getOpcode() == TargetOpcode::G_BLOCK_ADDR) {
983 BlockAddress *BA = const_cast<BlockAddress *>(
984 BlockAddrI->getOperand(i: 1).getBlockAddress());
985 BA->replaceAllUsesWith(
986 V: ConstantExpr::getIntToPtr(C: Replacement, Ty: BA->getType()));
987 BA->destroyConstant();
988 }
989 GR->invalidateMachineInstr(MI: BlockAddrI);
990 BlockAddrI->eraseFromParent();
991 }
992}
993
994static bool isImplicitFallthrough(MachineBasicBlock &MBB) {
995 if (MBB.empty())
996 return true;
997
998 // Branching SPIR-V intrinsics are not detected by this generic method.
999 // Thus, we can only trust negative result.
1000 if (!MBB.canFallThrough())
1001 return false;
1002
1003 // Otherwise, we must manually check if we have a SPIR-V intrinsic which
1004 // prevent an implicit fallthrough.
1005 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
1006 It != E; ++It) {
1007 if (isSpvIntrinsic(MI: *It, IntrinsicID: Intrinsic::spv_switch))
1008 return false;
1009 }
1010 return true;
1011}
1012
1013static void removeImplicitFallthroughs(MachineFunction &MF,
1014 MachineIRBuilder MIB) {
1015 // It is valid for MachineBasicBlocks to not finish with a branch instruction.
1016 // In such cases, they will simply fallthrough their immediate successor.
1017 for (MachineBasicBlock &MBB : MF) {
1018 if (!isImplicitFallthrough(MBB))
1019 continue;
1020
1021 assert(MBB.succ_size() == 1);
1022 MIB.setInsertPt(MBB, II: MBB.end());
1023 MIB.buildBr(Dest&: **MBB.successors().begin());
1024 }
1025}
1026
1027bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) {
1028 // Initialize the type registry.
1029 const SPIRVSubtarget &ST = MF.getSubtarget<SPIRVSubtarget>();
1030 SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry();
1031 GR->setCurrentFunc(MF);
1032 MachineIRBuilder MIB(MF);
1033 // a registry of target extension constants
1034 DenseMap<MachineInstr *, Type *> TargetExtConstTypes;
1035 // to keep record of tracked constants
1036 addConstantsToTrack(MF, GR, STI: ST, TargetExtConstTypes);
1037 foldConstantsIntoIntrinsics(MF, GR, MIB);
1038 insertBitcasts(MF, GR, MIB);
1039 generateAssignInstrs(MF, GR, MIB, TargetExtConstTypes);
1040
1041 processSwitchesConstants(MF, GR, MIB);
1042 processBlockAddr(MF, GR, MIB);
1043 cleanupHelperInstructions(MF, GR);
1044
1045 processInstrsWithTypeFolding(MF, GR, MIB);
1046 removeImplicitFallthroughs(MF, MIB);
1047 insertSpirvDecorations(MF, GR, MIB);
1048 insertInlineAsm(MF, GR, ST, MIRBuilder: MIB);
1049 lowerBitcasts(MF, GR, MIB);
1050
1051 return true;
1052}
1053
1054INITIALIZE_PASS(SPIRVPreLegalizer, DEBUG_TYPE, "SPIRV pre legalizer", false,
1055 false)
1056
1057char SPIRVPreLegalizer::ID = 0;
1058
1059FunctionPass *llvm::createSPIRVPreLegalizerPass() {
1060 return new SPIRVPreLegalizer();
1061}
1062