1//===-- SPIRVGlobalRegistry.cpp - SPIR-V Global Registry --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the SPIRVGlobalRegistry class,
10// which is used to maintain rich type information required for SPIR-V even
11// after lowering from LLVM IR to GMIR. It can convert an llvm::Type into
12// an OpTypeXXX instruction, and map it to a virtual register. Also it builds
13// and supports consistency of constants and global variables.
14//
15//===----------------------------------------------------------------------===//
16
17#include "SPIRVGlobalRegistry.h"
18#include "SPIRV.h"
19#include "SPIRVBuiltins.h"
20#include "SPIRVSubtarget.h"
21#include "SPIRVUtils.h"
22#include "llvm/ADT/APInt.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DiagnosticInfo.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/IntrinsicInst.h"
27#include "llvm/IR/Intrinsics.h"
28#include "llvm/IR/IntrinsicsSPIRV.h"
29#include "llvm/IR/Type.h"
30#include "llvm/Support/Casting.h"
31#include "llvm/Support/MathExtras.h"
32#include <cassert>
33#include <functional>
34
35using namespace llvm;
36
37static bool allowEmitFakeUse(const Value *Arg) {
38 if (isSpvIntrinsic(Arg))
39 return false;
40 if (isa<AtomicCmpXchgInst, InsertValueInst, UndefValue>(Val: Arg))
41 return false;
42 if (const auto *LI = dyn_cast<LoadInst>(Val: Arg))
43 if (LI->getType()->isAggregateType())
44 return false;
45 return true;
46}
47
48static unsigned typeToAddressSpace(const Type *Ty) {
49 if (auto PType = dyn_cast<TypedPointerType>(Val: Ty))
50 return PType->getAddressSpace();
51 if (auto PType = dyn_cast<PointerType>(Val: Ty))
52 return PType->getAddressSpace();
53 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty);
54 ExtTy && isTypedPointerWrapper(ExtTy))
55 return ExtTy->getIntParameter(i: 0);
56 reportFatalInternalError(reason: "Unable to convert LLVM type to SPIRVType");
57}
58
59static bool
60storageClassRequiresExplictLayout(SPIRV::StorageClass::StorageClass SC) {
61 switch (SC) {
62 case SPIRV::StorageClass::Uniform:
63 case SPIRV::StorageClass::PushConstant:
64 case SPIRV::StorageClass::StorageBuffer:
65 case SPIRV::StorageClass::PhysicalStorageBufferEXT:
66 return true;
67 case SPIRV::StorageClass::UniformConstant:
68 case SPIRV::StorageClass::Input:
69 case SPIRV::StorageClass::Output:
70 case SPIRV::StorageClass::Workgroup:
71 case SPIRV::StorageClass::CrossWorkgroup:
72 case SPIRV::StorageClass::Private:
73 case SPIRV::StorageClass::Function:
74 case SPIRV::StorageClass::Generic:
75 case SPIRV::StorageClass::AtomicCounter:
76 case SPIRV::StorageClass::Image:
77 case SPIRV::StorageClass::CallableDataNV:
78 case SPIRV::StorageClass::IncomingCallableDataNV:
79 case SPIRV::StorageClass::RayPayloadNV:
80 case SPIRV::StorageClass::HitAttributeNV:
81 case SPIRV::StorageClass::IncomingRayPayloadNV:
82 case SPIRV::StorageClass::ShaderRecordBufferNV:
83 case SPIRV::StorageClass::CodeSectionINTEL:
84 case SPIRV::StorageClass::DeviceOnlyINTEL:
85 case SPIRV::StorageClass::HostOnlyINTEL:
86 return false;
87 }
88 llvm_unreachable("Unknown SPIRV::StorageClass enum");
89}
90
91SPIRVGlobalRegistry::SPIRVGlobalRegistry(unsigned PointerSize)
92 : PointerSize(PointerSize), Bound(0), CurMF(nullptr) {}
93
94SPIRVType *SPIRVGlobalRegistry::assignIntTypeToVReg(unsigned BitWidth,
95 Register VReg,
96 MachineInstr &I,
97 const SPIRVInstrInfo &TII) {
98 SPIRVType *SpirvType = getOrCreateSPIRVIntegerType(BitWidth, I, TII);
99 assignSPIRVTypeToVReg(Type: SpirvType, VReg, MF: *CurMF);
100 return SpirvType;
101}
102
103SPIRVType *
104SPIRVGlobalRegistry::assignFloatTypeToVReg(unsigned BitWidth, Register VReg,
105 MachineInstr &I,
106 const SPIRVInstrInfo &TII) {
107 SPIRVType *SpirvType = getOrCreateSPIRVFloatType(BitWidth, I, TII);
108 assignSPIRVTypeToVReg(Type: SpirvType, VReg, MF: *CurMF);
109 return SpirvType;
110}
111
112SPIRVType *SPIRVGlobalRegistry::assignVectTypeToVReg(
113 SPIRVType *BaseType, unsigned NumElements, Register VReg, MachineInstr &I,
114 const SPIRVInstrInfo &TII) {
115 SPIRVType *SpirvType =
116 getOrCreateSPIRVVectorType(BaseType, NumElements, I, TII);
117 assignSPIRVTypeToVReg(Type: SpirvType, VReg, MF: *CurMF);
118 return SpirvType;
119}
120
121SPIRVType *SPIRVGlobalRegistry::assignTypeToVReg(
122 const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder,
123 SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR) {
124 SPIRVType *SpirvType =
125 getOrCreateSPIRVType(Type, MIRBuilder, AQ: AccessQual, EmitIR);
126 assignSPIRVTypeToVReg(Type: SpirvType, VReg, MF: MIRBuilder.getMF());
127 return SpirvType;
128}
129
130void SPIRVGlobalRegistry::assignSPIRVTypeToVReg(SPIRVType *SpirvType,
131 Register VReg,
132 const MachineFunction &MF) {
133 VRegToTypeMap[&MF][VReg] = SpirvType;
134}
135
136static Register createTypeVReg(MachineRegisterInfo &MRI) {
137 auto Res = MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 64));
138 MRI.setRegClass(Reg: Res, RC: &SPIRV::TYPERegClass);
139 return Res;
140}
141
142inline Register createTypeVReg(MachineIRBuilder &MIRBuilder) {
143 return createTypeVReg(MRI&: MIRBuilder.getMF().getRegInfo());
144}
145
146SPIRVType *SPIRVGlobalRegistry::getOpTypeBool(MachineIRBuilder &MIRBuilder) {
147 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
148 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeBool)
149 .addDef(RegNo: createTypeVReg(MIRBuilder));
150 });
151}
152
153unsigned SPIRVGlobalRegistry::adjustOpTypeIntWidth(unsigned Width) const {
154 const SPIRVSubtarget &ST = cast<SPIRVSubtarget>(Val: CurMF->getSubtarget());
155 if (ST.canUseExtension(
156 E: SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers) ||
157 (Width == 4 && ST.canUseExtension(E: SPIRV::Extension::SPV_INTEL_int4)))
158 return Width;
159 if (Width <= 8)
160 return 8;
161 else if (Width <= 16)
162 return 16;
163 else if (Width <= 32)
164 return 32;
165 else if (Width <= 64)
166 return 64;
167 else if (Width <= 128)
168 return 128;
169 reportFatalUsageError(reason: "Unsupported Integer width!");
170}
171
172SPIRVType *SPIRVGlobalRegistry::getOpTypeInt(unsigned Width,
173 MachineIRBuilder &MIRBuilder,
174 bool IsSigned) {
175 Width = adjustOpTypeIntWidth(Width);
176 const SPIRVSubtarget &ST =
177 cast<SPIRVSubtarget>(Val: MIRBuilder.getMF().getSubtarget());
178 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
179 if (Width == 4 && ST.canUseExtension(E: SPIRV::Extension::SPV_INTEL_int4)) {
180 MIRBuilder.buildInstr(Opcode: SPIRV::OpExtension)
181 .addImm(Val: SPIRV::Extension::SPV_INTEL_int4);
182 MIRBuilder.buildInstr(Opcode: SPIRV::OpCapability)
183 .addImm(Val: SPIRV::Capability::Int4TypeINTEL);
184 } else if ((!isPowerOf2_32(Value: Width) || Width < 8) &&
185 ST.canUseExtension(
186 E: SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers)) {
187 MIRBuilder.buildInstr(Opcode: SPIRV::OpExtension)
188 .addImm(Val: SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers);
189 MIRBuilder.buildInstr(Opcode: SPIRV::OpCapability)
190 .addImm(Val: SPIRV::Capability::ArbitraryPrecisionIntegersALTERA);
191 }
192 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeInt)
193 .addDef(RegNo: createTypeVReg(MIRBuilder))
194 .addImm(Val: Width)
195 .addImm(Val: IsSigned ? 1 : 0);
196 });
197}
198
199SPIRVType *SPIRVGlobalRegistry::getOpTypeFloat(uint32_t Width,
200 MachineIRBuilder &MIRBuilder) {
201 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
202 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeFloat)
203 .addDef(RegNo: createTypeVReg(MIRBuilder))
204 .addImm(Val: Width);
205 });
206}
207
208SPIRVType *
209SPIRVGlobalRegistry::getOpTypeFloat(uint32_t Width,
210 MachineIRBuilder &MIRBuilder,
211 SPIRV::FPEncoding::FPEncoding FPEncode) {
212 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
213 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeFloat)
214 .addDef(RegNo: createTypeVReg(MIRBuilder))
215 .addImm(Val: Width)
216 .addImm(Val: FPEncode);
217 });
218}
219
220SPIRVType *SPIRVGlobalRegistry::getOpTypeVoid(MachineIRBuilder &MIRBuilder) {
221 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
222 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeVoid)
223 .addDef(RegNo: createTypeVReg(MIRBuilder));
224 });
225}
226
227void SPIRVGlobalRegistry::invalidateMachineInstr(MachineInstr *MI) {
228 // Other maps that may hold MachineInstr*:
229 // - VRegToTypeMap: We cannot remove the definitions of `MI` from
230 // VRegToTypeMap because some calls to invalidateMachineInstr are replacing MI
231 // with another instruction defining the same register. We expect that if MI
232 // is a type instruction, and it is still referenced in VRegToTypeMap, then
233 // those registers are dead or the VRegToTypeMap is out-of-date. We do not
234 // expect passes to ask for the SPIR-V type of a dead register. If the
235 // VRegToTypeMap is out-of-date already, then there was an error before. We
236 // cannot add an assert to verify this because the VRegToTypeMap can be
237 // out-of-date.
238 // - FunctionToInstr & FunctionToInstrRev: At this point, we should not be
239 // deleting functions. No need to update.
240 // - AliasInstMDMap: Would require a linear search, and the Intel Alias
241 // instruction are not instructions instruction selection will be able to
242 // remove.
243
244 const SPIRVSubtarget &ST = MI->getMF()->getSubtarget<SPIRVSubtarget>();
245 [[maybe_unused]] const SPIRVInstrInfo *TII = ST.getInstrInfo();
246 assert(!TII->isAliasingInstr(*MI) &&
247 "Cannot invalidate aliasing instructions.");
248 assert(MI->getOpcode() != SPIRV::OpFunction &&
249 "Cannot invalidate OpFunction.");
250
251 if (MI->getOpcode() == SPIRV::OpFunctionCall) {
252 if (const auto *F = dyn_cast<Function>(Val: MI->getOperand(i: 2).getGlobal())) {
253 auto It = ForwardCalls.find(Val: F);
254 if (It != ForwardCalls.end()) {
255 It->second.erase(Ptr: MI);
256 if (It->second.empty())
257 ForwardCalls.erase(I: It);
258 }
259 }
260 }
261
262 const MachineFunction *MF = MI->getMF();
263 auto It = LastInsertedTypeMap.find(Val: MF);
264 if (It != LastInsertedTypeMap.end() && It->second == MI)
265 LastInsertedTypeMap.erase(Val: MF);
266 // remove from the duplicate tracker to avoid incorrect reuse
267 erase(MI);
268}
269
270SPIRVType *SPIRVGlobalRegistry::createOpType(
271 MachineIRBuilder &MIRBuilder,
272 std::function<MachineInstr *(MachineIRBuilder &)> Op) {
273 auto oldInsertPoint = MIRBuilder.getInsertPt();
274 MachineBasicBlock *OldMBB = &MIRBuilder.getMBB();
275 MachineBasicBlock *NewMBB = &*MIRBuilder.getMF().begin();
276
277 auto LastInsertedType = LastInsertedTypeMap.find(Val: CurMF);
278 if (LastInsertedType != LastInsertedTypeMap.end()) {
279 auto It = LastInsertedType->second->getIterator();
280 // It might happen that this instruction was removed from the first MBB,
281 // hence the Parent's check.
282 MachineBasicBlock::iterator InsertAt;
283 if (It->getParent() != NewMBB)
284 InsertAt = oldInsertPoint->getParent() == NewMBB
285 ? oldInsertPoint
286 : getInsertPtValidEnd(MBB: NewMBB);
287 else if (It->getNextNode())
288 InsertAt = It->getNextNode()->getIterator();
289 else
290 InsertAt = getInsertPtValidEnd(MBB: NewMBB);
291 MIRBuilder.setInsertPt(MBB&: *NewMBB, II: InsertAt);
292 } else {
293 MIRBuilder.setInsertPt(MBB&: *NewMBB, II: NewMBB->begin());
294 auto Result = LastInsertedTypeMap.try_emplace(Key: CurMF, Args: nullptr);
295 assert(Result.second);
296 LastInsertedType = Result.first;
297 }
298
299 MachineInstr *Type = Op(MIRBuilder);
300 // We expect all users of this function to insert definitions at the insertion
301 // point set above that is always the first MBB.
302 assert(Type->getParent() == NewMBB);
303 LastInsertedType->second = Type;
304
305 MIRBuilder.setInsertPt(MBB&: *OldMBB, II: oldInsertPoint);
306 return Type;
307}
308
309SPIRVType *SPIRVGlobalRegistry::getOpTypeVector(uint32_t NumElems,
310 SPIRVType *ElemType,
311 MachineIRBuilder &MIRBuilder) {
312 auto EleOpc = ElemType->getOpcode();
313 (void)EleOpc;
314 assert((EleOpc == SPIRV::OpTypeInt || EleOpc == SPIRV::OpTypeFloat ||
315 EleOpc == SPIRV::OpTypeBool) &&
316 "Invalid vector element type");
317
318 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
319 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeVector)
320 .addDef(RegNo: createTypeVReg(MIRBuilder))
321 .addUse(RegNo: getSPIRVTypeID(SpirvType: ElemType))
322 .addImm(Val: NumElems);
323 });
324}
325
326Register SPIRVGlobalRegistry::getOrCreateConstFP(APFloat Val, MachineInstr &I,
327 SPIRVType *SpvType,
328 const SPIRVInstrInfo &TII,
329 bool ZeroAsNull) {
330 LLVMContext &Ctx = CurMF->getFunction().getContext();
331 auto *const CF = ConstantFP::get(Context&: Ctx, V: Val);
332 const MachineInstr *MI = findMI(Obj: CF, MF: CurMF);
333 if (MI && (MI->getOpcode() == SPIRV::OpConstantNull ||
334 MI->getOpcode() == SPIRV::OpConstantF))
335 return MI->getOperand(i: 0).getReg();
336 return createConstFP(CF, I, SpvType, TII, ZeroAsNull);
337}
338
339Register SPIRVGlobalRegistry::createConstFP(const ConstantFP *CF,
340 MachineInstr &I, SPIRVType *SpvType,
341 const SPIRVInstrInfo &TII,
342 bool ZeroAsNull) {
343 unsigned BitWidth = getScalarOrVectorBitWidth(Type: SpvType);
344 LLT LLTy = LLT::scalar(SizeInBits: BitWidth);
345 Register Res = CurMF->getRegInfo().createGenericVirtualRegister(Ty: LLTy);
346 CurMF->getRegInfo().setRegClass(Reg: Res, RC: &SPIRV::fIDRegClass);
347 assignSPIRVTypeToVReg(SpirvType: SpvType, VReg: Res, MF: *CurMF);
348
349 MachineInstr *DepMI = const_cast<MachineInstr *>(SpvType);
350 MachineIRBuilder MIRBuilder(*DepMI->getParent(), DepMI->getIterator());
351 SPIRVType *NewType =
352 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
353 MachineInstrBuilder MIB;
354 // In OpenCL OpConstantNull - Scalar floating point: +0.0 (all bits 0)
355 if (CF->getValue().isPosZero() && ZeroAsNull) {
356 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantNull)
357 .addDef(RegNo: Res)
358 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
359 } else {
360 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantF)
361 .addDef(RegNo: Res)
362 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
363 addNumImm(Imm: APInt(BitWidth,
364 CF->getValueAPF().bitcastToAPInt().getZExtValue()),
365 MIB);
366 }
367 const auto &ST = CurMF->getSubtarget();
368 constrainSelectedInstRegOperands(I&: *MIB, TII: *ST.getInstrInfo(),
369 TRI: *ST.getRegisterInfo(),
370 RBI: *ST.getRegBankInfo());
371 return MIB;
372 });
373 add(V: CF, MI: NewType);
374 return Res;
375}
376
377Register SPIRVGlobalRegistry::getOrCreateConstInt(uint64_t Val, MachineInstr &I,
378 SPIRVType *SpvType,
379 const SPIRVInstrInfo &TII,
380 bool ZeroAsNull) {
381 const IntegerType *Ty = cast<IntegerType>(Val: getTypeForSPIRVType(Ty: SpvType));
382 auto *const CI = ConstantInt::get(Ty: const_cast<IntegerType *>(Ty), V: Val);
383 const MachineInstr *MI = findMI(Obj: CI, MF: CurMF);
384 if (MI && (MI->getOpcode() == SPIRV::OpConstantNull ||
385 MI->getOpcode() == SPIRV::OpConstantI))
386 return MI->getOperand(i: 0).getReg();
387 return createConstInt(CI, I, SpvType, TII, ZeroAsNull);
388}
389
390Register SPIRVGlobalRegistry::createConstInt(const ConstantInt *CI,
391 MachineInstr &I,
392 SPIRVType *SpvType,
393 const SPIRVInstrInfo &TII,
394 bool ZeroAsNull) {
395 unsigned BitWidth = getScalarOrVectorBitWidth(Type: SpvType);
396 LLT LLTy = LLT::scalar(SizeInBits: BitWidth);
397 Register Res = CurMF->getRegInfo().createGenericVirtualRegister(Ty: LLTy);
398 CurMF->getRegInfo().setRegClass(Reg: Res, RC: &SPIRV::iIDRegClass);
399 assignIntTypeToVReg(BitWidth, VReg: Res, I, TII);
400
401 MachineInstr *DepMI = const_cast<MachineInstr *>(SpvType);
402 MachineIRBuilder MIRBuilder(*DepMI->getParent(), DepMI->getIterator());
403 SPIRVType *NewType =
404 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
405 MachineInstrBuilder MIB;
406 if (BitWidth == 1) {
407 MIB = MIRBuilder
408 .buildInstr(Opcode: CI->isZero() ? SPIRV::OpConstantFalse
409 : SPIRV::OpConstantTrue)
410 .addDef(RegNo: Res)
411 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
412 } else if (!CI->isZero() || !ZeroAsNull) {
413 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantI)
414 .addDef(RegNo: Res)
415 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
416 addNumImm(Imm: CI->getValue(), MIB);
417 } else {
418 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantNull)
419 .addDef(RegNo: Res)
420 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
421 }
422 const auto &ST = CurMF->getSubtarget();
423 constrainSelectedInstRegOperands(I&: *MIB, TII: *ST.getInstrInfo(),
424 TRI: *ST.getRegisterInfo(),
425 RBI: *ST.getRegBankInfo());
426 return MIB;
427 });
428 add(V: CI, MI: NewType);
429 return Res;
430}
431
432Register SPIRVGlobalRegistry::buildConstantInt(uint64_t Val,
433 MachineIRBuilder &MIRBuilder,
434 SPIRVType *SpvType, bool EmitIR,
435 bool ZeroAsNull) {
436 assert(SpvType);
437 auto &MF = MIRBuilder.getMF();
438 const IntegerType *Ty = cast<IntegerType>(Val: getTypeForSPIRVType(Ty: SpvType));
439 // TODO: Avoid implicit trunc?
440 // See https://github.com/llvm/llvm-project/issues/112510.
441 auto *const CI = ConstantInt::get(Ty: const_cast<IntegerType *>(Ty), V: Val,
442 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
443 Register Res = find(V: CI, MF: &MF);
444 if (Res.isValid())
445 return Res;
446
447 unsigned BitWidth = getScalarOrVectorBitWidth(Type: SpvType);
448 LLT LLTy = LLT::scalar(SizeInBits: BitWidth);
449 MachineRegisterInfo &MRI = MF.getRegInfo();
450 Res = MRI.createGenericVirtualRegister(Ty: LLTy);
451 MRI.setRegClass(Reg: Res, RC: &SPIRV::iIDRegClass);
452 assignTypeToVReg(Type: Ty, VReg: Res, MIRBuilder, AccessQual: SPIRV::AccessQualifier::ReadWrite,
453 EmitIR);
454
455 SPIRVType *NewType =
456 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
457 if (EmitIR)
458 return MIRBuilder.buildConstant(Res, Val: *CI);
459 Register SpvTypeReg = getSPIRVTypeID(SpirvType: SpvType);
460 MachineInstrBuilder MIB;
461 if (Val || !ZeroAsNull) {
462 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantI)
463 .addDef(RegNo: Res)
464 .addUse(RegNo: SpvTypeReg);
465 addNumImm(Imm: APInt(BitWidth, Val), MIB);
466 } else {
467 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantNull)
468 .addDef(RegNo: Res)
469 .addUse(RegNo: SpvTypeReg);
470 }
471 const auto &Subtarget = CurMF->getSubtarget();
472 constrainSelectedInstRegOperands(I&: *MIB, TII: *Subtarget.getInstrInfo(),
473 TRI: *Subtarget.getRegisterInfo(),
474 RBI: *Subtarget.getRegBankInfo());
475 return MIB;
476 });
477 add(V: CI, MI: NewType);
478 return Res;
479}
480
481Register SPIRVGlobalRegistry::buildConstantFP(APFloat Val,
482 MachineIRBuilder &MIRBuilder,
483 SPIRVType *SpvType) {
484 auto &MF = MIRBuilder.getMF();
485 LLVMContext &Ctx = MF.getFunction().getContext();
486 if (!SpvType)
487 SpvType = getOrCreateSPIRVType(Type: Type::getFloatTy(C&: Ctx), MIRBuilder,
488 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
489 auto *const CF = ConstantFP::get(Context&: Ctx, V: Val);
490 Register Res = find(V: CF, MF: &MF);
491 if (Res.isValid())
492 return Res;
493
494 LLT LLTy = LLT::scalar(SizeInBits: getScalarOrVectorBitWidth(Type: SpvType));
495 Res = MF.getRegInfo().createGenericVirtualRegister(Ty: LLTy);
496 MF.getRegInfo().setRegClass(Reg: Res, RC: &SPIRV::fIDRegClass);
497 assignSPIRVTypeToVReg(SpirvType: SpvType, VReg: Res, MF);
498
499 SPIRVType *NewType =
500 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
501 MachineInstrBuilder MIB;
502 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantF)
503 .addDef(RegNo: Res)
504 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
505 addNumImm(Imm: CF->getValueAPF().bitcastToAPInt(), MIB);
506 return MIB;
507 });
508 add(V: CF, MI: NewType);
509 return Res;
510}
511
512Register SPIRVGlobalRegistry::getOrCreateBaseRegister(
513 Constant *Val, MachineInstr &I, SPIRVType *SpvType,
514 const SPIRVInstrInfo &TII, unsigned BitWidth, bool ZeroAsNull) {
515 SPIRVType *Type = SpvType;
516 if (SpvType->getOpcode() == SPIRV::OpTypeVector ||
517 SpvType->getOpcode() == SPIRV::OpTypeArray) {
518 auto EleTypeReg = SpvType->getOperand(i: 1).getReg();
519 Type = getSPIRVTypeForVReg(VReg: EleTypeReg);
520 }
521 if (Type->getOpcode() == SPIRV::OpTypeFloat) {
522 SPIRVType *SpvBaseType = getOrCreateSPIRVFloatType(BitWidth, I, TII);
523 return getOrCreateConstFP(Val: cast<ConstantFP>(Val)->getValue(), I, SpvType: SpvBaseType,
524 TII, ZeroAsNull);
525 }
526 assert(Type->getOpcode() == SPIRV::OpTypeInt);
527 SPIRVType *SpvBaseType = getOrCreateSPIRVIntegerType(BitWidth, I, TII);
528 return getOrCreateConstInt(Val: Val->getUniqueInteger().getZExtValue(), I,
529 SpvType: SpvBaseType, TII, ZeroAsNull);
530}
531
532Register SPIRVGlobalRegistry::getOrCreateCompositeOrNull(
533 Constant *Val, MachineInstr &I, SPIRVType *SpvType,
534 const SPIRVInstrInfo &TII, Constant *CA, unsigned BitWidth,
535 unsigned ElemCnt, bool ZeroAsNull) {
536 if (Register R = find(V: CA, MF: CurMF); R.isValid())
537 return R;
538
539 bool IsNull = Val->isNullValue() && ZeroAsNull;
540 Register ElemReg;
541 if (!IsNull)
542 ElemReg =
543 getOrCreateBaseRegister(Val, I, SpvType, TII, BitWidth, ZeroAsNull);
544
545 LLT LLTy = LLT::scalar(SizeInBits: 64);
546 Register Res = CurMF->getRegInfo().createGenericVirtualRegister(Ty: LLTy);
547 CurMF->getRegInfo().setRegClass(Reg: Res, RC: getRegClass(SpvType));
548 assignSPIRVTypeToVReg(SpirvType: SpvType, VReg: Res, MF: *CurMF);
549
550 MachineInstr *DepMI = const_cast<MachineInstr *>(SpvType);
551 MachineIRBuilder MIRBuilder(*DepMI->getParent(), DepMI->getIterator());
552 const MachineInstr *NewMI =
553 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
554 MachineInstrBuilder MIB;
555 if (!IsNull) {
556 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantComposite)
557 .addDef(RegNo: Res)
558 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
559 for (unsigned i = 0; i < ElemCnt; ++i)
560 MIB.addUse(RegNo: ElemReg);
561 } else {
562 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantNull)
563 .addDef(RegNo: Res)
564 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
565 }
566 const auto &Subtarget = CurMF->getSubtarget();
567 constrainSelectedInstRegOperands(I&: *MIB, TII: *Subtarget.getInstrInfo(),
568 TRI: *Subtarget.getRegisterInfo(),
569 RBI: *Subtarget.getRegBankInfo());
570 return MIB;
571 });
572 add(V: CA, MI: NewMI);
573 return Res;
574}
575
576Register SPIRVGlobalRegistry::getOrCreateConstVector(uint64_t Val,
577 MachineInstr &I,
578 SPIRVType *SpvType,
579 const SPIRVInstrInfo &TII,
580 bool ZeroAsNull) {
581 const Type *LLVMTy = getTypeForSPIRVType(Ty: SpvType);
582 assert(LLVMTy->isVectorTy());
583 const FixedVectorType *LLVMVecTy = cast<FixedVectorType>(Val: LLVMTy);
584 Type *LLVMBaseTy = LLVMVecTy->getElementType();
585 assert(LLVMBaseTy->isIntegerTy());
586 auto *ConstVal = ConstantInt::get(Ty: LLVMBaseTy, V: Val);
587 auto *ConstVec =
588 ConstantVector::getSplat(EC: LLVMVecTy->getElementCount(), Elt: ConstVal);
589 unsigned BW = getScalarOrVectorBitWidth(Type: SpvType);
590 return getOrCreateCompositeOrNull(Val: ConstVal, I, SpvType, TII, CA: ConstVec, BitWidth: BW,
591 ElemCnt: SpvType->getOperand(i: 2).getImm(),
592 ZeroAsNull);
593}
594
595Register SPIRVGlobalRegistry::getOrCreateConstVector(APFloat Val,
596 MachineInstr &I,
597 SPIRVType *SpvType,
598 const SPIRVInstrInfo &TII,
599 bool ZeroAsNull) {
600 const Type *LLVMTy = getTypeForSPIRVType(Ty: SpvType);
601 assert(LLVMTy->isVectorTy());
602 const FixedVectorType *LLVMVecTy = cast<FixedVectorType>(Val: LLVMTy);
603 Type *LLVMBaseTy = LLVMVecTy->getElementType();
604 assert(LLVMBaseTy->isFloatingPointTy());
605 auto *ConstVal = ConstantFP::get(Ty: LLVMBaseTy, V: Val);
606 auto *ConstVec =
607 ConstantVector::getSplat(EC: LLVMVecTy->getElementCount(), Elt: ConstVal);
608 unsigned BW = getScalarOrVectorBitWidth(Type: SpvType);
609 return getOrCreateCompositeOrNull(Val: ConstVal, I, SpvType, TII, CA: ConstVec, BitWidth: BW,
610 ElemCnt: SpvType->getOperand(i: 2).getImm(),
611 ZeroAsNull);
612}
613
614Register SPIRVGlobalRegistry::getOrCreateConstIntArray(
615 uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType,
616 const SPIRVInstrInfo &TII) {
617 const Type *LLVMTy = getTypeForSPIRVType(Ty: SpvType);
618 assert(LLVMTy->isArrayTy());
619 const ArrayType *LLVMArrTy = cast<ArrayType>(Val: LLVMTy);
620 Type *LLVMBaseTy = LLVMArrTy->getElementType();
621 Constant *CI = ConstantInt::get(Ty: LLVMBaseTy, V: Val);
622 SPIRVType *SpvBaseTy = getSPIRVTypeForVReg(VReg: SpvType->getOperand(i: 1).getReg());
623 unsigned BW = getScalarOrVectorBitWidth(Type: SpvBaseTy);
624 // The following is reasonably unique key that is better that [Val]. The naive
625 // alternative would be something along the lines of:
626 // SmallVector<Constant *> NumCI(Num, CI);
627 // Constant *UniqueKey =
628 // ConstantArray::get(const_cast<ArrayType*>(LLVMArrTy), NumCI);
629 // that would be a truly unique but dangerous key, because it could lead to
630 // the creation of constants of arbitrary length (that is, the parameter of
631 // memset) which were missing in the original module.
632 Constant *UniqueKey = ConstantStruct::getAnon(
633 V: {PoisonValue::get(T: const_cast<ArrayType *>(LLVMArrTy)),
634 ConstantInt::get(Ty: LLVMBaseTy, V: Val), ConstantInt::get(Ty: LLVMBaseTy, V: Num)});
635 return getOrCreateCompositeOrNull(Val: CI, I, SpvType, TII, CA: UniqueKey, BitWidth: BW,
636 ElemCnt: LLVMArrTy->getNumElements());
637}
638
639Register SPIRVGlobalRegistry::getOrCreateIntCompositeOrNull(
640 uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR,
641 Constant *CA, unsigned BitWidth, unsigned ElemCnt) {
642 if (Register R = find(V: CA, MF: CurMF); R.isValid())
643 return R;
644
645 Register ElemReg;
646 if (Val || EmitIR) {
647 SPIRVType *SpvBaseType = getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
648 ElemReg = buildConstantInt(Val, MIRBuilder, SpvType: SpvBaseType, EmitIR);
649 }
650 LLT LLTy = EmitIR ? LLT::fixed_vector(NumElements: ElemCnt, ScalarSizeInBits: BitWidth) : LLT::scalar(SizeInBits: 64);
651 Register Res = CurMF->getRegInfo().createGenericVirtualRegister(Ty: LLTy);
652 CurMF->getRegInfo().setRegClass(Reg: Res, RC: &SPIRV::iIDRegClass);
653 assignSPIRVTypeToVReg(SpirvType: SpvType, VReg: Res, MF: *CurMF);
654
655 const MachineInstr *NewMI =
656 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
657 if (EmitIR)
658 return MIRBuilder.buildSplatBuildVector(Res, Src: ElemReg);
659
660 if (Val) {
661 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantComposite)
662 .addDef(RegNo: Res)
663 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
664 for (unsigned i = 0; i < ElemCnt; ++i)
665 MIB.addUse(RegNo: ElemReg);
666 return MIB;
667 }
668
669 return MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantNull)
670 .addDef(RegNo: Res)
671 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
672 });
673 add(V: CA, MI: NewMI);
674 return Res;
675}
676
677Register
678SPIRVGlobalRegistry::getOrCreateConsIntVector(uint64_t Val,
679 MachineIRBuilder &MIRBuilder,
680 SPIRVType *SpvType, bool EmitIR) {
681 const Type *LLVMTy = getTypeForSPIRVType(Ty: SpvType);
682 assert(LLVMTy->isVectorTy());
683 const FixedVectorType *LLVMVecTy = cast<FixedVectorType>(Val: LLVMTy);
684 Type *LLVMBaseTy = LLVMVecTy->getElementType();
685 const auto ConstInt = ConstantInt::get(Ty: LLVMBaseTy, V: Val);
686 auto ConstVec =
687 ConstantVector::getSplat(EC: LLVMVecTy->getElementCount(), Elt: ConstInt);
688 unsigned BW = getScalarOrVectorBitWidth(Type: SpvType);
689 return getOrCreateIntCompositeOrNull(Val, MIRBuilder, SpvType, EmitIR,
690 CA: ConstVec, BitWidth: BW,
691 ElemCnt: SpvType->getOperand(i: 2).getImm());
692}
693
694Register
695SPIRVGlobalRegistry::getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder,
696 SPIRVType *SpvType) {
697 const Type *Ty = getTypeForSPIRVType(Ty: SpvType);
698 unsigned AddressSpace = typeToAddressSpace(Ty);
699 Type *ElemTy = ::getPointeeType(Ty);
700 assert(ElemTy);
701 const Constant *CP = ConstantTargetNone::get(
702 T: dyn_cast<TargetExtType>(Val: getTypedPointerWrapper(ElemTy, AS: AddressSpace)));
703 Register Res = find(V: CP, MF: CurMF);
704 if (Res.isValid())
705 return Res;
706
707 LLT LLTy = LLT::pointer(AddressSpace, SizeInBits: PointerSize);
708 Res = CurMF->getRegInfo().createGenericVirtualRegister(Ty: LLTy);
709 CurMF->getRegInfo().setRegClass(Reg: Res, RC: &SPIRV::pIDRegClass);
710 assignSPIRVTypeToVReg(SpirvType: SpvType, VReg: Res, MF: *CurMF);
711
712 const MachineInstr *NewMI =
713 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
714 return MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantNull)
715 .addDef(RegNo: Res)
716 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
717 });
718 add(V: CP, MI: NewMI);
719 return Res;
720}
721
722Register
723SPIRVGlobalRegistry::buildConstantSampler(Register ResReg, unsigned AddrMode,
724 unsigned Param, unsigned FilerMode,
725 MachineIRBuilder &MIRBuilder) {
726 auto Sampler =
727 ResReg.isValid()
728 ? ResReg
729 : MIRBuilder.getMRI()->createVirtualRegister(RegClass: &SPIRV::iIDRegClass);
730 SPIRVType *TypeSampler = getOrCreateOpTypeSampler(MIRBuilder);
731 Register TypeSamplerReg = getSPIRVTypeID(SpirvType: TypeSampler);
732 // We cannot use createOpType() logic here, because of the
733 // GlobalISel/IRTranslator.cpp check for a tail call that expects that
734 // MIRBuilder.getInsertPt() has a previous instruction. If this constant is
735 // inserted as a result of "__translate_sampler_initializer()" this would
736 // break this IRTranslator assumption.
737 MIRBuilder.buildInstr(Opcode: SPIRV::OpConstantSampler)
738 .addDef(RegNo: Sampler)
739 .addUse(RegNo: TypeSamplerReg)
740 .addImm(Val: AddrMode)
741 .addImm(Val: Param)
742 .addImm(Val: FilerMode);
743 return Sampler;
744}
745
746Register SPIRVGlobalRegistry::buildGlobalVariable(
747 Register ResVReg, SPIRVType *BaseType, StringRef Name,
748 const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage,
749 const MachineInstr *Init, bool IsConst,
750 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageType,
751 MachineIRBuilder &MIRBuilder, bool IsInstSelector) {
752 const GlobalVariable *GVar = nullptr;
753 if (GV) {
754 GVar = cast<const GlobalVariable>(Val: GV);
755 } else {
756 // If GV is not passed explicitly, use the name to find or construct
757 // the global variable.
758 Module *M = MIRBuilder.getMF().getFunction().getParent();
759 GVar = M->getGlobalVariable(Name);
760 if (GVar == nullptr) {
761 const Type *Ty = getTypeForSPIRVType(Ty: BaseType); // TODO: check type.
762 // Module takes ownership of the global var.
763 GVar = new GlobalVariable(*M, const_cast<Type *>(Ty), false,
764 GlobalValue::ExternalLinkage, nullptr,
765 Twine(Name));
766 }
767 GV = GVar;
768 }
769
770 const MachineFunction *MF = &MIRBuilder.getMF();
771 Register Reg = find(V: GVar, MF);
772 if (Reg.isValid()) {
773 if (Reg != ResVReg)
774 MIRBuilder.buildCopy(Res: ResVReg, Op: Reg);
775 return ResVReg;
776 }
777
778 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpVariable)
779 .addDef(RegNo: ResVReg)
780 .addUse(RegNo: getSPIRVTypeID(SpirvType: BaseType))
781 .addImm(Val: static_cast<uint32_t>(Storage));
782 if (Init)
783 MIB.addUse(RegNo: Init->getOperand(i: 0).getReg());
784 // ISel may introduce a new register on this step, so we need to add it to
785 // DT and correct its type avoiding fails on the next stage.
786 if (IsInstSelector) {
787 const auto &Subtarget = CurMF->getSubtarget();
788 constrainSelectedInstRegOperands(I&: *MIB, TII: *Subtarget.getInstrInfo(),
789 TRI: *Subtarget.getRegisterInfo(),
790 RBI: *Subtarget.getRegBankInfo());
791 }
792 add(V: GVar, MI: MIB);
793
794 Reg = MIB->getOperand(i: 0).getReg();
795 addGlobalObject(V: GVar, MF, R: Reg);
796
797 // Set to Reg the same type as ResVReg has.
798 auto MRI = MIRBuilder.getMRI();
799 if (Reg != ResVReg) {
800 LLT RegLLTy =
801 LLT::pointer(AddressSpace: MRI->getType(Reg: ResVReg).getAddressSpace(), SizeInBits: getPointerSize());
802 MRI->setType(VReg: Reg, Ty: RegLLTy);
803 assignSPIRVTypeToVReg(SpirvType: BaseType, VReg: Reg, MF: MIRBuilder.getMF());
804 } else {
805 // Our knowledge about the type may be updated.
806 // If that's the case, we need to update a type
807 // associated with the register.
808 SPIRVType *DefType = getSPIRVTypeForVReg(VReg: ResVReg);
809 if (!DefType || DefType != BaseType)
810 assignSPIRVTypeToVReg(SpirvType: BaseType, VReg: Reg, MF: MIRBuilder.getMF());
811 }
812
813 // If it's a global variable with name, output OpName for it.
814 if (GVar && GVar->hasName())
815 buildOpName(Target: Reg, Name: GVar->getName(), MIRBuilder);
816
817 // Output decorations for the GV.
818 // TODO: maybe move to GenerateDecorations pass.
819 const SPIRVSubtarget &ST =
820 cast<SPIRVSubtarget>(Val: MIRBuilder.getMF().getSubtarget());
821 if (IsConst && !ST.isShader())
822 buildOpDecorate(Reg, MIRBuilder, Dec: SPIRV::Decoration::Constant, DecArgs: {});
823
824 if (GVar && GVar->getAlign().valueOrOne().value() != 1 && !ST.isShader()) {
825 unsigned Alignment = (unsigned)GVar->getAlign().valueOrOne().value();
826 buildOpDecorate(Reg, MIRBuilder, Dec: SPIRV::Decoration::Alignment, DecArgs: {Alignment});
827 }
828
829 if (LinkageType)
830 buildOpDecorate(Reg, MIRBuilder, Dec: SPIRV::Decoration::LinkageAttributes,
831 DecArgs: {static_cast<uint32_t>(*LinkageType)}, StrImm: Name);
832
833 SPIRV::BuiltIn::BuiltIn BuiltInId;
834 if (getSpirvBuiltInIdByName(Name, BI&: BuiltInId))
835 buildOpDecorate(Reg, MIRBuilder, Dec: SPIRV::Decoration::BuiltIn,
836 DecArgs: {static_cast<uint32_t>(BuiltInId)});
837
838 // If it's a global variable with "spirv.Decorations" metadata node
839 // recognize it as a SPIR-V friendly LLVM IR and parse "spirv.Decorations"
840 // arguments.
841 MDNode *GVarMD = nullptr;
842 if (GVar && (GVarMD = GVar->getMetadata(Kind: "spirv.Decorations")) != nullptr)
843 buildOpSpirvDecorations(Reg, MIRBuilder, GVarMD, ST);
844
845 return Reg;
846}
847
848// Returns a name based on the Type. Notes that this does not look at
849// decorations, and will return the same string for two types that are the same
850// except for decorations.
851Register SPIRVGlobalRegistry::getOrCreateGlobalVariableWithBinding(
852 const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name,
853 MachineIRBuilder &MIRBuilder) {
854 Register VarReg =
855 MIRBuilder.getMRI()->createVirtualRegister(RegClass: &SPIRV::iIDRegClass);
856
857 buildGlobalVariable(ResVReg: VarReg, BaseType: VarType, Name, GV: nullptr,
858 Storage: getPointerStorageClass(Type: VarType), Init: nullptr, IsConst: false,
859 LinkageType: std::nullopt, MIRBuilder, IsInstSelector: false);
860
861 buildOpDecorate(Reg: VarReg, MIRBuilder, Dec: SPIRV::Decoration::DescriptorSet, DecArgs: {Set});
862 buildOpDecorate(Reg: VarReg, MIRBuilder, Dec: SPIRV::Decoration::Binding, DecArgs: {Binding});
863 return VarReg;
864}
865
866// TODO: Double check the calls to getOpTypeArray to make sure that `ElemType`
867// is explicitly laid out when required.
868SPIRVType *SPIRVGlobalRegistry::getOpTypeArray(uint32_t NumElems,
869 SPIRVType *ElemType,
870 MachineIRBuilder &MIRBuilder,
871 bool ExplicitLayoutRequired,
872 bool EmitIR) {
873 assert((ElemType->getOpcode() != SPIRV::OpTypeVoid) &&
874 "Invalid array element type");
875 SPIRVType *SpvTypeInt32 = getOrCreateSPIRVIntegerType(BitWidth: 32, MIRBuilder);
876 SPIRVType *ArrayType = nullptr;
877 const SPIRVSubtarget &ST =
878 cast<SPIRVSubtarget>(Val: MIRBuilder.getMF().getSubtarget());
879 if (NumElems != 0) {
880 Register NumElementsVReg =
881 buildConstantInt(Val: NumElems, MIRBuilder, SpvType: SpvTypeInt32, EmitIR);
882 ArrayType = createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
883 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeArray)
884 .addDef(RegNo: createTypeVReg(MIRBuilder))
885 .addUse(RegNo: getSPIRVTypeID(SpirvType: ElemType))
886 .addUse(RegNo: NumElementsVReg);
887 });
888 } else if (ST.getTargetTriple().getVendor() == Triple::VendorType::AMD) {
889 // We set the array size to the token UINT64_MAX value, which is generally
890 // illegal (the maximum legal size is 61-bits) for the foreseeable future.
891 SPIRVType *SpvTypeInt64 = getOrCreateSPIRVIntegerType(BitWidth: 64, MIRBuilder);
892 Register NumElementsVReg =
893 buildConstantInt(UINT64_MAX, MIRBuilder, SpvType: SpvTypeInt64, EmitIR);
894 ArrayType = createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
895 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeArray)
896 .addDef(RegNo: createTypeVReg(MIRBuilder))
897 .addUse(RegNo: getSPIRVTypeID(SpirvType: ElemType))
898 .addUse(RegNo: NumElementsVReg);
899 });
900 } else {
901 if (!ST.isShader()) {
902 llvm::reportFatalUsageError(
903 reason: "Runtime arrays are not allowed in non-shader "
904 "SPIR-V modules");
905 return nullptr;
906 }
907 ArrayType = createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
908 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeRuntimeArray)
909 .addDef(RegNo: createTypeVReg(MIRBuilder))
910 .addUse(RegNo: getSPIRVTypeID(SpirvType: ElemType));
911 });
912 }
913
914 if (ExplicitLayoutRequired && !isResourceType(Type: ElemType)) {
915 Type *ET = const_cast<Type *>(getTypeForSPIRVType(Ty: ElemType));
916 addArrayStrideDecorations(Reg: ArrayType->defs().begin()->getReg(), ElementType: ET,
917 MIRBuilder);
918 }
919
920 return ArrayType;
921}
922
923SPIRVType *SPIRVGlobalRegistry::getOpTypeOpaque(const StructType *Ty,
924 MachineIRBuilder &MIRBuilder) {
925 assert(Ty->hasName());
926 const StringRef Name = Ty->hasName() ? Ty->getName() : "";
927 Register ResVReg = createTypeVReg(MIRBuilder);
928 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
929 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeOpaque).addDef(RegNo: ResVReg);
930 addStringImm(Str: Name, MIB);
931 buildOpName(Target: ResVReg, Name, MIRBuilder);
932 return MIB;
933 });
934}
935
936SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct(
937 const StructType *Ty, MachineIRBuilder &MIRBuilder,
938 SPIRV::AccessQualifier::AccessQualifier AccQual,
939 StructOffsetDecorator Decorator, bool EmitIR) {
940 Type *OriginalElementType = nullptr;
941 uint64_t TotalSize = 0;
942 if (matchPeeledArrayPattern(Ty, OriginalElementType, TotalSize)) {
943 SPIRVType *ElementSPIRVType = findSPIRVType(
944 Ty: OriginalElementType, MIRBuilder, accessQual: AccQual,
945 /* ExplicitLayoutRequired= */ Decorator != nullptr, EmitIR);
946 return getOpTypeArray(NumElems: TotalSize, ElemType: ElementSPIRVType, MIRBuilder,
947 /*ExplicitLayoutRequired=*/Decorator != nullptr,
948 EmitIR);
949 }
950
951 const SPIRVSubtarget &ST =
952 cast<SPIRVSubtarget>(Val: MIRBuilder.getMF().getSubtarget());
953 SmallVector<Register, 4> FieldTypes;
954 constexpr unsigned MaxWordCount = UINT16_MAX;
955 const size_t NumElements = Ty->getNumElements();
956
957 size_t MaxNumElements = MaxWordCount - 2;
958 size_t SPIRVStructNumElements = NumElements;
959 if (NumElements > MaxNumElements) {
960 // Do adjustments for continued instructions.
961 SPIRVStructNumElements = MaxNumElements;
962 MaxNumElements = MaxWordCount - 1;
963 }
964
965 for (const auto &Elem : Ty->elements()) {
966 SPIRVType *ElemTy = findSPIRVType(
967 Ty: toTypedPointer(Ty: Elem), MIRBuilder, accessQual: AccQual,
968 /* ExplicitLayoutRequired= */ Decorator != nullptr, EmitIR);
969 assert(ElemTy && ElemTy->getOpcode() != SPIRV::OpTypeVoid &&
970 "Invalid struct element type");
971 FieldTypes.push_back(Elt: getSPIRVTypeID(SpirvType: ElemTy));
972 }
973 Register ResVReg = createTypeVReg(MIRBuilder);
974 if (Ty->hasName())
975 buildOpName(Target: ResVReg, Name: Ty->getName(), MIRBuilder);
976 if (Ty->isPacked() && !ST.isShader())
977 buildOpDecorate(Reg: ResVReg, MIRBuilder, Dec: SPIRV::Decoration::CPacked, DecArgs: {});
978
979 SPIRVType *SPVType =
980 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
981 auto MIBStruct =
982 MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeStruct).addDef(RegNo: ResVReg);
983 for (size_t I = 0; I < SPIRVStructNumElements; ++I)
984 MIBStruct.addUse(RegNo: FieldTypes[I]);
985 for (size_t I = SPIRVStructNumElements; I < NumElements;
986 I += MaxNumElements) {
987 auto MIBCont =
988 MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeStructContinuedINTEL);
989 for (size_t J = I; J < std::min(a: I + MaxNumElements, b: NumElements); ++J)
990 MIBCont.addUse(RegNo: FieldTypes[I]);
991 }
992 return MIBStruct;
993 });
994
995 if (Decorator)
996 Decorator(SPVType->defs().begin()->getReg());
997
998 return SPVType;
999}
1000
1001SPIRVType *SPIRVGlobalRegistry::getOrCreateSpecialType(
1002 const Type *Ty, MachineIRBuilder &MIRBuilder,
1003 SPIRV::AccessQualifier::AccessQualifier AccQual) {
1004 assert(isSpecialOpaqueType(Ty) && "Not a special opaque builtin type");
1005 return SPIRV::lowerBuiltinType(Type: Ty, AccessQual: AccQual, MIRBuilder, GR: this);
1006}
1007
1008SPIRVType *SPIRVGlobalRegistry::getOpTypePointer(
1009 SPIRV::StorageClass::StorageClass SC, SPIRVType *ElemType,
1010 MachineIRBuilder &MIRBuilder, Register Reg) {
1011 if (!Reg.isValid())
1012 Reg = createTypeVReg(MIRBuilder);
1013
1014 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1015 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypePointer)
1016 .addDef(RegNo: Reg)
1017 .addImm(Val: static_cast<uint32_t>(SC))
1018 .addUse(RegNo: getSPIRVTypeID(SpirvType: ElemType));
1019 });
1020}
1021
1022SPIRVType *SPIRVGlobalRegistry::getOpTypeForwardPointer(
1023 SPIRV::StorageClass::StorageClass SC, MachineIRBuilder &MIRBuilder) {
1024 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1025 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeForwardPointer)
1026 .addUse(RegNo: createTypeVReg(MIRBuilder))
1027 .addImm(Val: static_cast<uint32_t>(SC));
1028 });
1029}
1030
1031SPIRVType *SPIRVGlobalRegistry::getOpTypeFunction(
1032 const FunctionType *Ty, SPIRVType *RetType,
1033 const SmallVectorImpl<SPIRVType *> &ArgTypes,
1034 MachineIRBuilder &MIRBuilder) {
1035 const SPIRVSubtarget *ST =
1036 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1037 if (Ty->isVarArg() && ST->isShader()) {
1038 Function &Fn = MIRBuilder.getMF().getFunction();
1039 Ty->getContext().diagnose(DI: DiagnosticInfoUnsupported(
1040 Fn, "SPIR-V shaders do not support variadic functions",
1041 MIRBuilder.getDebugLoc()));
1042 }
1043 return createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1044 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeFunction)
1045 .addDef(RegNo: createTypeVReg(MIRBuilder))
1046 .addUse(RegNo: getSPIRVTypeID(SpirvType: RetType));
1047 for (const SPIRVType *ArgType : ArgTypes)
1048 MIB.addUse(RegNo: getSPIRVTypeID(SpirvType: ArgType));
1049 return MIB;
1050 });
1051}
1052
1053SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeFunctionWithArgs(
1054 const Type *Ty, SPIRVType *RetType,
1055 const SmallVectorImpl<SPIRVType *> &ArgTypes,
1056 MachineIRBuilder &MIRBuilder) {
1057 if (const MachineInstr *MI = findMI(T: Ty, RequiresExplicitLayout: false, MF: &MIRBuilder.getMF()))
1058 return MI;
1059 const MachineInstr *NewMI =
1060 getOpTypeFunction(Ty: cast<FunctionType>(Val: Ty), RetType, ArgTypes, MIRBuilder);
1061 add(T: Ty, RequiresExplicitLayout: false, MI: NewMI);
1062 return finishCreatingSPIRVType(LLVMTy: Ty, SpirvType: NewMI);
1063}
1064
1065SPIRVType *SPIRVGlobalRegistry::findSPIRVType(
1066 const Type *Ty, MachineIRBuilder &MIRBuilder,
1067 SPIRV::AccessQualifier::AccessQualifier AccQual,
1068 bool ExplicitLayoutRequired, bool EmitIR) {
1069 Ty = adjustIntTypeByWidth(Ty);
1070 // TODO: findMI needs to know if a layout is required.
1071 if (const MachineInstr *MI =
1072 findMI(T: Ty, RequiresExplicitLayout: ExplicitLayoutRequired, MF: &MIRBuilder.getMF()))
1073 return MI;
1074 if (auto It = ForwardPointerTypes.find(Val: Ty); It != ForwardPointerTypes.end())
1075 return It->second;
1076 return restOfCreateSPIRVType(Type: Ty, MIRBuilder, AccessQual: AccQual, ExplicitLayoutRequired,
1077 EmitIR);
1078}
1079
1080Register SPIRVGlobalRegistry::getSPIRVTypeID(const SPIRVType *SpirvType) const {
1081 assert(SpirvType && "Attempting to get type id for nullptr type.");
1082 if (SpirvType->getOpcode() == SPIRV::OpTypeForwardPointer ||
1083 SpirvType->getOpcode() == SPIRV::OpTypeStructContinuedINTEL)
1084 return SpirvType->uses().begin()->getReg();
1085 return SpirvType->defs().begin()->getReg();
1086}
1087
1088// We need to use a new LLVM integer type if there is a mismatch between
1089// number of bits in LLVM and SPIRV integer types to let DuplicateTracker
1090// ensure uniqueness of a SPIRV type by the corresponding LLVM type. Without
1091// such an adjustment SPIRVGlobalRegistry::getOpTypeInt() could create the
1092// same "OpTypeInt 8" type for a series of LLVM integer types with number of
1093// bits less than 8. This would lead to duplicate type definitions
1094// eventually due to the method that DuplicateTracker utilizes to reason
1095// about uniqueness of type records.
1096const Type *SPIRVGlobalRegistry::adjustIntTypeByWidth(const Type *Ty) const {
1097 if (auto IType = dyn_cast<IntegerType>(Val: Ty)) {
1098 unsigned SrcBitWidth = IType->getBitWidth();
1099 if (SrcBitWidth > 1) {
1100 unsigned BitWidth = adjustOpTypeIntWidth(Width: SrcBitWidth);
1101 // Maybe change source LLVM type to keep DuplicateTracker consistent.
1102 if (SrcBitWidth != BitWidth)
1103 Ty = IntegerType::get(C&: Ty->getContext(), NumBits: BitWidth);
1104 }
1105 }
1106 return Ty;
1107}
1108
1109SPIRVType *SPIRVGlobalRegistry::createSPIRVType(
1110 const Type *Ty, MachineIRBuilder &MIRBuilder,
1111 SPIRV::AccessQualifier::AccessQualifier AccQual,
1112 bool ExplicitLayoutRequired, bool EmitIR) {
1113 if (isSpecialOpaqueType(Ty))
1114 return getOrCreateSpecialType(Ty, MIRBuilder, AccQual);
1115
1116 if (const MachineInstr *MI =
1117 findMI(T: Ty, RequiresExplicitLayout: ExplicitLayoutRequired, MF: &MIRBuilder.getMF()))
1118 return MI;
1119
1120 if (auto IType = dyn_cast<IntegerType>(Val: Ty)) {
1121 const unsigned Width = IType->getBitWidth();
1122 return Width == 1 ? getOpTypeBool(MIRBuilder)
1123 : getOpTypeInt(Width, MIRBuilder, IsSigned: false);
1124 }
1125 if (Ty->isFloatingPointTy()) {
1126 if (Ty->isBFloatTy()) {
1127 return getOpTypeFloat(Width: Ty->getPrimitiveSizeInBits(), MIRBuilder,
1128 FPEncode: SPIRV::FPEncoding::BFloat16KHR);
1129 } else {
1130 return getOpTypeFloat(Width: Ty->getPrimitiveSizeInBits(), MIRBuilder);
1131 }
1132 }
1133 if (Ty->isVoidTy())
1134 return getOpTypeVoid(MIRBuilder);
1135 if (Ty->isVectorTy()) {
1136 SPIRVType *El =
1137 findSPIRVType(Ty: cast<FixedVectorType>(Val: Ty)->getElementType(), MIRBuilder,
1138 AccQual, ExplicitLayoutRequired, EmitIR);
1139 return getOpTypeVector(NumElems: cast<FixedVectorType>(Val: Ty)->getNumElements(), ElemType: El,
1140 MIRBuilder);
1141 }
1142 if (Ty->isArrayTy()) {
1143 SPIRVType *El = findSPIRVType(Ty: Ty->getArrayElementType(), MIRBuilder,
1144 AccQual, ExplicitLayoutRequired, EmitIR);
1145 return getOpTypeArray(NumElems: Ty->getArrayNumElements(), ElemType: El, MIRBuilder,
1146 ExplicitLayoutRequired, EmitIR);
1147 }
1148 if (auto SType = dyn_cast<StructType>(Val: Ty)) {
1149 if (SType->isOpaque())
1150 return getOpTypeOpaque(Ty: SType, MIRBuilder);
1151
1152 StructOffsetDecorator Decorator = nullptr;
1153 if (ExplicitLayoutRequired) {
1154 Decorator = [&MIRBuilder, SType, this](Register Reg) {
1155 addStructOffsetDecorations(Reg, Ty: const_cast<StructType *>(SType),
1156 MIRBuilder);
1157 };
1158 }
1159 return getOpTypeStruct(Ty: SType, MIRBuilder, AccQual, Decorator: std::move(Decorator),
1160 EmitIR);
1161 }
1162 if (auto FType = dyn_cast<FunctionType>(Val: Ty)) {
1163 SPIRVType *RetTy = findSPIRVType(Ty: FType->getReturnType(), MIRBuilder,
1164 AccQual, ExplicitLayoutRequired, EmitIR);
1165 SmallVector<SPIRVType *, 4> ParamTypes;
1166 for (const auto &ParamTy : FType->params())
1167 ParamTypes.push_back(Elt: findSPIRVType(Ty: ParamTy, MIRBuilder, AccQual,
1168 ExplicitLayoutRequired, EmitIR));
1169 return getOpTypeFunction(Ty: FType, RetType: RetTy, ArgTypes: ParamTypes, MIRBuilder);
1170 }
1171
1172 unsigned AddrSpace = typeToAddressSpace(Ty);
1173 SPIRVType *SpvElementType = nullptr;
1174 if (Type *ElemTy = ::getPointeeType(Ty))
1175 SpvElementType = getOrCreateSPIRVType(Type: ElemTy, MIRBuilder, AQ: AccQual, EmitIR);
1176 else
1177 SpvElementType = getOrCreateSPIRVIntegerType(BitWidth: 8, MIRBuilder);
1178
1179 // Get access to information about available extensions
1180 const SPIRVSubtarget *ST =
1181 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1182 auto SC = addressSpaceToStorageClass(AddrSpace, STI: *ST);
1183
1184 Type *ElemTy = ::getPointeeType(Ty);
1185 if (!ElemTy) {
1186 ElemTy = Type::getInt8Ty(C&: MIRBuilder.getContext());
1187 }
1188
1189 // If we have forward pointer associated with this type, use its register
1190 // operand to create OpTypePointer.
1191 if (auto It = ForwardPointerTypes.find(Val: Ty); It != ForwardPointerTypes.end()) {
1192 Register Reg = getSPIRVTypeID(SpirvType: It->second);
1193 // TODO: what does getOpTypePointer do?
1194 return getOpTypePointer(SC, ElemType: SpvElementType, MIRBuilder, Reg);
1195 }
1196
1197 return getOrCreateSPIRVPointerType(BaseType: ElemTy, MIRBuilder, SC);
1198}
1199
1200SPIRVType *SPIRVGlobalRegistry::restOfCreateSPIRVType(
1201 const Type *Ty, MachineIRBuilder &MIRBuilder,
1202 SPIRV::AccessQualifier::AccessQualifier AccessQual,
1203 bool ExplicitLayoutRequired, bool EmitIR) {
1204 // TODO: Could this create a problem if one requires an explicit layout, and
1205 // the next time it does not?
1206 if (TypesInProcessing.count(Ptr: Ty) && !isPointerTyOrWrapper(Ty))
1207 return nullptr;
1208 TypesInProcessing.insert(Ptr: Ty);
1209 SPIRVType *SpirvType = createSPIRVType(Ty, MIRBuilder, AccQual: AccessQual,
1210 ExplicitLayoutRequired, EmitIR);
1211 TypesInProcessing.erase(Ptr: Ty);
1212 VRegToTypeMap[&MIRBuilder.getMF()][getSPIRVTypeID(SpirvType)] = SpirvType;
1213
1214 // TODO: We could end up with two SPIR-V types pointing to the same llvm type.
1215 // Is that a problem?
1216 SPIRVToLLVMType[SpirvType] = unifyPtrType(Ty);
1217
1218 if (SpirvType->getOpcode() == SPIRV::OpTypeForwardPointer ||
1219 findMI(T: Ty, RequiresExplicitLayout: false, MF: &MIRBuilder.getMF()) || isSpecialOpaqueType(Ty))
1220 return SpirvType;
1221
1222 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty);
1223 ExtTy && isTypedPointerWrapper(ExtTy))
1224 add(PointeeTy: ExtTy->getTypeParameter(i: 0), AddressSpace: ExtTy->getIntParameter(i: 0), MI: SpirvType);
1225 else if (!isPointerTy(T: Ty))
1226 add(T: Ty, RequiresExplicitLayout: ExplicitLayoutRequired, MI: SpirvType);
1227 else if (isTypedPointerTy(T: Ty))
1228 add(PointeeTy: cast<TypedPointerType>(Val: Ty)->getElementType(),
1229 AddressSpace: getPointerAddressSpace(T: Ty), MI: SpirvType);
1230 else
1231 add(PointeeTy: Type::getInt8Ty(C&: MIRBuilder.getMF().getFunction().getContext()),
1232 AddressSpace: getPointerAddressSpace(T: Ty), MI: SpirvType);
1233 return SpirvType;
1234}
1235
1236SPIRVType *
1237SPIRVGlobalRegistry::getSPIRVTypeForVReg(Register VReg,
1238 const MachineFunction *MF) const {
1239 auto t = VRegToTypeMap.find(Val: MF ? MF : CurMF);
1240 if (t != VRegToTypeMap.end()) {
1241 auto tt = t->second.find(Val: VReg);
1242 if (tt != t->second.end())
1243 return tt->second;
1244 }
1245 return nullptr;
1246}
1247
1248SPIRVType *SPIRVGlobalRegistry::getResultType(Register VReg,
1249 MachineFunction *MF) {
1250 if (!MF)
1251 MF = CurMF;
1252 MachineInstr *Instr = getVRegDef(MRI&: MF->getRegInfo(), Reg: VReg);
1253 return getSPIRVTypeForVReg(VReg: Instr->getOperand(i: 1).getReg(), MF);
1254}
1255
1256SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVType(
1257 const Type *Ty, MachineIRBuilder &MIRBuilder,
1258 SPIRV::AccessQualifier::AccessQualifier AccessQual,
1259 bool ExplicitLayoutRequired, bool EmitIR) {
1260 const MachineFunction *MF = &MIRBuilder.getMF();
1261 Register Reg;
1262 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty);
1263 ExtTy && isTypedPointerWrapper(ExtTy))
1264 Reg = find(PointeeTy: ExtTy->getTypeParameter(i: 0), AddressSpace: ExtTy->getIntParameter(i: 0), MF);
1265 else if (!isPointerTy(T: Ty))
1266 Reg = find(T: Ty = adjustIntTypeByWidth(Ty), RequiresExplicitLayout: ExplicitLayoutRequired, MF);
1267 else if (isTypedPointerTy(T: Ty))
1268 Reg = find(PointeeTy: cast<TypedPointerType>(Val: Ty)->getElementType(),
1269 AddressSpace: getPointerAddressSpace(T: Ty), MF);
1270 else
1271 Reg = find(PointeeTy: Type::getInt8Ty(C&: MIRBuilder.getMF().getFunction().getContext()),
1272 AddressSpace: getPointerAddressSpace(T: Ty), MF);
1273 if (Reg.isValid() && !isSpecialOpaqueType(Ty))
1274 return getSPIRVTypeForVReg(VReg: Reg);
1275
1276 TypesInProcessing.clear();
1277 SPIRVType *STy = restOfCreateSPIRVType(Ty, MIRBuilder, AccessQual,
1278 ExplicitLayoutRequired, EmitIR);
1279 // Create normal pointer types for the corresponding OpTypeForwardPointers.
1280 for (auto &CU : ForwardPointerTypes) {
1281 // Pointer type themselves do not require an explicit layout. The types
1282 // they pointer to might, but that is taken care of when creating the type.
1283 bool PtrNeedsLayout = false;
1284 const Type *Ty2 = CU.first;
1285 SPIRVType *STy2 = CU.second;
1286 if ((Reg = find(T: Ty2, RequiresExplicitLayout: PtrNeedsLayout, MF)).isValid())
1287 STy2 = getSPIRVTypeForVReg(VReg: Reg);
1288 else
1289 STy2 = restOfCreateSPIRVType(Ty: Ty2, MIRBuilder, AccessQual, ExplicitLayoutRequired: PtrNeedsLayout,
1290 EmitIR);
1291 if (Ty == Ty2)
1292 STy = STy2;
1293 }
1294 ForwardPointerTypes.clear();
1295 return STy;
1296}
1297
1298bool SPIRVGlobalRegistry::isScalarOfType(Register VReg,
1299 unsigned TypeOpcode) const {
1300 SPIRVType *Type = getSPIRVTypeForVReg(VReg);
1301 assert(Type && "isScalarOfType VReg has no type assigned");
1302 return Type->getOpcode() == TypeOpcode;
1303}
1304
1305bool SPIRVGlobalRegistry::isScalarOrVectorOfType(Register VReg,
1306 unsigned TypeOpcode) const {
1307 SPIRVType *Type = getSPIRVTypeForVReg(VReg);
1308 assert(Type && "isScalarOrVectorOfType VReg has no type assigned");
1309 if (Type->getOpcode() == TypeOpcode)
1310 return true;
1311 if (Type->getOpcode() == SPIRV::OpTypeVector) {
1312 Register ScalarTypeVReg = Type->getOperand(i: 1).getReg();
1313 SPIRVType *ScalarType = getSPIRVTypeForVReg(VReg: ScalarTypeVReg);
1314 return ScalarType->getOpcode() == TypeOpcode;
1315 }
1316 return false;
1317}
1318
1319bool SPIRVGlobalRegistry::isResourceType(SPIRVType *Type) const {
1320 switch (Type->getOpcode()) {
1321 case SPIRV::OpTypeImage:
1322 case SPIRV::OpTypeSampler:
1323 case SPIRV::OpTypeSampledImage:
1324 return true;
1325 case SPIRV::OpTypeStruct:
1326 return hasBlockDecoration(Type);
1327 default:
1328 return false;
1329 }
1330 return false;
1331}
1332unsigned
1333SPIRVGlobalRegistry::getScalarOrVectorComponentCount(Register VReg) const {
1334 return getScalarOrVectorComponentCount(Type: getSPIRVTypeForVReg(VReg));
1335}
1336
1337unsigned
1338SPIRVGlobalRegistry::getScalarOrVectorComponentCount(SPIRVType *Type) const {
1339 if (!Type)
1340 return 0;
1341 return Type->getOpcode() == SPIRV::OpTypeVector
1342 ? static_cast<unsigned>(Type->getOperand(i: 2).getImm())
1343 : 1;
1344}
1345
1346SPIRVType *
1347SPIRVGlobalRegistry::getScalarOrVectorComponentType(Register VReg) const {
1348 return getScalarOrVectorComponentType(Type: getSPIRVTypeForVReg(VReg));
1349}
1350
1351SPIRVType *
1352SPIRVGlobalRegistry::getScalarOrVectorComponentType(SPIRVType *Type) const {
1353 if (!Type)
1354 return nullptr;
1355 Register ScalarReg = Type->getOpcode() == SPIRV::OpTypeVector
1356 ? Type->getOperand(i: 1).getReg()
1357 : Type->getOperand(i: 0).getReg();
1358 SPIRVType *ScalarType = getSPIRVTypeForVReg(VReg: ScalarReg);
1359 assert(isScalarOrVectorOfType(Type->getOperand(0).getReg(),
1360 ScalarType->getOpcode()));
1361 return ScalarType;
1362}
1363
1364unsigned
1365SPIRVGlobalRegistry::getScalarOrVectorBitWidth(const SPIRVType *Type) const {
1366 assert(Type && "Invalid Type pointer");
1367 if (Type->getOpcode() == SPIRV::OpTypeVector) {
1368 auto EleTypeReg = Type->getOperand(i: 1).getReg();
1369 Type = getSPIRVTypeForVReg(VReg: EleTypeReg);
1370 }
1371 if (Type->getOpcode() == SPIRV::OpTypeInt ||
1372 Type->getOpcode() == SPIRV::OpTypeFloat)
1373 return Type->getOperand(i: 1).getImm();
1374 if (Type->getOpcode() == SPIRV::OpTypeBool)
1375 return 1;
1376 llvm_unreachable("Attempting to get bit width of non-integer/float type.");
1377}
1378
1379unsigned SPIRVGlobalRegistry::getNumScalarOrVectorTotalBitWidth(
1380 const SPIRVType *Type) const {
1381 assert(Type && "Invalid Type pointer");
1382 unsigned NumElements = 1;
1383 if (Type->getOpcode() == SPIRV::OpTypeVector) {
1384 NumElements = static_cast<unsigned>(Type->getOperand(i: 2).getImm());
1385 Type = getSPIRVTypeForVReg(VReg: Type->getOperand(i: 1).getReg());
1386 }
1387 return Type->getOpcode() == SPIRV::OpTypeInt ||
1388 Type->getOpcode() == SPIRV::OpTypeFloat
1389 ? NumElements * Type->getOperand(i: 1).getImm()
1390 : 0;
1391}
1392
1393const SPIRVType *SPIRVGlobalRegistry::retrieveScalarOrVectorIntType(
1394 const SPIRVType *Type) const {
1395 if (Type && Type->getOpcode() == SPIRV::OpTypeVector)
1396 Type = getSPIRVTypeForVReg(VReg: Type->getOperand(i: 1).getReg());
1397 return Type && Type->getOpcode() == SPIRV::OpTypeInt ? Type : nullptr;
1398}
1399
1400bool SPIRVGlobalRegistry::isScalarOrVectorSigned(const SPIRVType *Type) const {
1401 const SPIRVType *IntType = retrieveScalarOrVectorIntType(Type);
1402 return IntType && IntType->getOperand(i: 2).getImm() != 0;
1403}
1404
1405SPIRVType *SPIRVGlobalRegistry::getPointeeType(SPIRVType *PtrType) {
1406 return PtrType && PtrType->getOpcode() == SPIRV::OpTypePointer
1407 ? getSPIRVTypeForVReg(VReg: PtrType->getOperand(i: 2).getReg())
1408 : nullptr;
1409}
1410
1411unsigned SPIRVGlobalRegistry::getPointeeTypeOp(Register PtrReg) {
1412 SPIRVType *ElemType = getPointeeType(PtrType: getSPIRVTypeForVReg(VReg: PtrReg));
1413 return ElemType ? ElemType->getOpcode() : 0;
1414}
1415
1416bool SPIRVGlobalRegistry::isBitcastCompatible(const SPIRVType *Type1,
1417 const SPIRVType *Type2) const {
1418 if (!Type1 || !Type2)
1419 return false;
1420 auto Op1 = Type1->getOpcode(), Op2 = Type2->getOpcode();
1421 // Ignore difference between <1.5 and >=1.5 protocol versions:
1422 // it's valid if either Result Type or Operand is a pointer, and the other
1423 // is a pointer, an integer scalar, or an integer vector.
1424 if (Op1 == SPIRV::OpTypePointer &&
1425 (Op2 == SPIRV::OpTypePointer || retrieveScalarOrVectorIntType(Type: Type2)))
1426 return true;
1427 if (Op2 == SPIRV::OpTypePointer &&
1428 (Op1 == SPIRV::OpTypePointer || retrieveScalarOrVectorIntType(Type: Type1)))
1429 return true;
1430 unsigned Bits1 = getNumScalarOrVectorTotalBitWidth(Type: Type1),
1431 Bits2 = getNumScalarOrVectorTotalBitWidth(Type: Type2);
1432 return Bits1 > 0 && Bits1 == Bits2;
1433}
1434
1435SPIRV::StorageClass::StorageClass
1436SPIRVGlobalRegistry::getPointerStorageClass(Register VReg) const {
1437 SPIRVType *Type = getSPIRVTypeForVReg(VReg);
1438 assert(Type && Type->getOpcode() == SPIRV::OpTypePointer &&
1439 Type->getOperand(1).isImm() && "Pointer type is expected");
1440 return getPointerStorageClass(Type);
1441}
1442
1443SPIRV::StorageClass::StorageClass
1444SPIRVGlobalRegistry::getPointerStorageClass(const SPIRVType *Type) const {
1445 return static_cast<SPIRV::StorageClass::StorageClass>(
1446 Type->getOperand(i: 1).getImm());
1447}
1448
1449SPIRVType *SPIRVGlobalRegistry::getOrCreateVulkanBufferType(
1450 MachineIRBuilder &MIRBuilder, Type *ElemType,
1451 SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr) {
1452 auto Key = SPIRV::irhandle_vkbuffer(ElementType: ElemType, SC, IsWriteable: IsWritable);
1453 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1454 return MI;
1455
1456 bool ExplicitLayoutRequired = storageClassRequiresExplictLayout(SC);
1457 // We need to get the SPIR-V type for the element here, so we can add the
1458 // decoration to it.
1459 auto *T = StructType::create(Elements: ElemType);
1460 auto *BlockType =
1461 getOrCreateSPIRVType(Ty: T, MIRBuilder, AccessQual: SPIRV::AccessQualifier::None,
1462 ExplicitLayoutRequired, EmitIR: EmitIr);
1463
1464 buildOpDecorate(Reg: BlockType->defs().begin()->getReg(), MIRBuilder,
1465 Dec: SPIRV::Decoration::Block, DecArgs: {});
1466
1467 if (!IsWritable) {
1468 buildOpMemberDecorate(Reg: BlockType->defs().begin()->getReg(), MIRBuilder,
1469 Dec: SPIRV::Decoration::NonWritable, Member: 0, DecArgs: {});
1470 }
1471
1472 SPIRVType *R = getOrCreateSPIRVPointerTypeInternal(BaseType: BlockType, MIRBuilder, SC);
1473 add(Handle: Key, MI: R);
1474 return R;
1475}
1476
1477SPIRVType *
1478SPIRVGlobalRegistry::getOrCreatePaddingType(MachineIRBuilder &MIRBuilder) {
1479 auto Key = SPIRV::irhandle_padding();
1480 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1481 return MI;
1482 auto *T = Type::getInt8Ty(C&: MIRBuilder.getContext());
1483 SPIRVType *R = getOrCreateSPIRVIntegerType(BitWidth: 8, MIRBuilder);
1484 finishCreatingSPIRVType(LLVMTy: T, SpirvType: R);
1485 add(Handle: Key, MI: R);
1486 return R;
1487}
1488
1489SPIRVType *SPIRVGlobalRegistry::getOrCreateVulkanPushConstantType(
1490 MachineIRBuilder &MIRBuilder, Type *T) {
1491 const auto SC = SPIRV::StorageClass::PushConstant;
1492
1493 auto Key = SPIRV::irhandle_vkbuffer(ElementType: T, SC, /* IsWritable= */ IsWriteable: false);
1494 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1495 return MI;
1496
1497 // We need to get the SPIR-V type for the element here, so we can add the
1498 // decoration to it.
1499 auto *BlockType = getOrCreateSPIRVType(
1500 Ty: T, MIRBuilder, AccessQual: SPIRV::AccessQualifier::None,
1501 /* ExplicitLayoutRequired= */ true, /* EmitIr= */ EmitIR: false);
1502
1503 buildOpDecorate(Reg: BlockType->defs().begin()->getReg(), MIRBuilder,
1504 Dec: SPIRV::Decoration::Block, DecArgs: {});
1505 SPIRVType *R = BlockType;
1506 add(Handle: Key, MI: R);
1507 return R;
1508}
1509
1510SPIRVType *SPIRVGlobalRegistry::getOrCreateLayoutType(
1511 MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr) {
1512 auto Key = SPIRV::handle(Ty: T);
1513 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1514 return MI;
1515
1516 StructType *ST = cast<StructType>(Val: T->getTypeParameter(i: 0));
1517 ArrayRef<uint32_t> Offsets = T->int_params().slice(N: 1);
1518 assert(ST->getNumElements() == Offsets.size());
1519
1520 StructOffsetDecorator Decorator = [&MIRBuilder, &Offsets](Register Reg) {
1521 for (uint32_t I = 0; I < Offsets.size(); ++I) {
1522 buildOpMemberDecorate(Reg, MIRBuilder, Dec: SPIRV::Decoration::Offset, Member: I,
1523 DecArgs: {Offsets[I]});
1524 }
1525 };
1526
1527 // We need a new OpTypeStruct instruction because decorations will be
1528 // different from a struct with an explicit layout created from a different
1529 // entry point.
1530 SPIRVType *SPIRVStructType =
1531 getOpTypeStruct(Ty: ST, MIRBuilder, AccQual: SPIRV::AccessQualifier::None,
1532 Decorator: std::move(Decorator), EmitIR: EmitIr);
1533 add(Handle: Key, MI: SPIRVStructType);
1534 return SPIRVStructType;
1535}
1536
1537SPIRVType *SPIRVGlobalRegistry::getImageType(
1538 const TargetExtType *ExtensionType,
1539 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
1540 MachineIRBuilder &MIRBuilder) {
1541 assert(ExtensionType->getNumTypeParameters() == 1 &&
1542 "SPIR-V image builtin type must have sampled type parameter!");
1543 const SPIRVType *SampledType =
1544 getOrCreateSPIRVType(Type: ExtensionType->getTypeParameter(i: 0), MIRBuilder,
1545 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
1546 assert((ExtensionType->getNumIntParameters() == 7 ||
1547 ExtensionType->getNumIntParameters() == 6) &&
1548 "Invalid number of parameters for SPIR-V image builtin!");
1549
1550 SPIRV::AccessQualifier::AccessQualifier accessQualifier =
1551 SPIRV::AccessQualifier::None;
1552 if (ExtensionType->getNumIntParameters() == 7) {
1553 accessQualifier = Qualifier == SPIRV::AccessQualifier::WriteOnly
1554 ? SPIRV::AccessQualifier::WriteOnly
1555 : SPIRV::AccessQualifier::AccessQualifier(
1556 ExtensionType->getIntParameter(i: 6));
1557 }
1558
1559 // Create or get an existing type from GlobalRegistry.
1560 SPIRVType *R = getOrCreateOpTypeImage(
1561 MIRBuilder, SampledType,
1562 Dim: SPIRV::Dim::Dim(ExtensionType->getIntParameter(i: 0)),
1563 Depth: ExtensionType->getIntParameter(i: 1), Arrayed: ExtensionType->getIntParameter(i: 2),
1564 Multisampled: ExtensionType->getIntParameter(i: 3), Sampled: ExtensionType->getIntParameter(i: 4),
1565 ImageFormat: SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(i: 5)),
1566 AccQual: accessQualifier);
1567 SPIRVToLLVMType[R] = ExtensionType;
1568 return R;
1569}
1570
1571SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeImage(
1572 MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim,
1573 uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled,
1574 SPIRV::ImageFormat::ImageFormat ImageFormat,
1575 SPIRV::AccessQualifier::AccessQualifier AccessQual) {
1576 auto Key = SPIRV::irhandle_image(SampledTy: SPIRVToLLVMType.lookup(Val: SampledType), Dim,
1577 Depth, Arrayed, MS: Multisampled, Sampled,
1578 ImageFormat, AQ: AccessQual);
1579 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1580 return MI;
1581 const MachineInstr *NewMI =
1582 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1583 auto MIB =
1584 MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeImage)
1585 .addDef(RegNo: createTypeVReg(MIRBuilder))
1586 .addUse(RegNo: getSPIRVTypeID(SpirvType: SampledType))
1587 .addImm(Val: Dim)
1588 .addImm(Val: Depth) // Depth (whether or not it is a Depth image).
1589 .addImm(Val: Arrayed) // Arrayed.
1590 .addImm(Val: Multisampled) // Multisampled (0 = only single-sample).
1591 .addImm(Val: Sampled) // Sampled (0 = usage known at runtime).
1592 .addImm(Val: ImageFormat);
1593 if (AccessQual != SPIRV::AccessQualifier::None)
1594 MIB.addImm(Val: AccessQual);
1595 return MIB;
1596 });
1597 add(Handle: Key, MI: NewMI);
1598 return NewMI;
1599}
1600
1601SPIRVType *
1602SPIRVGlobalRegistry::getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder) {
1603 auto Key = SPIRV::irhandle_sampler();
1604 const MachineFunction *MF = &MIRBuilder.getMF();
1605 if (const MachineInstr *MI = findMI(Handle: Key, MF))
1606 return MI;
1607 const MachineInstr *NewMI =
1608 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1609 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeSampler)
1610 .addDef(RegNo: createTypeVReg(MIRBuilder));
1611 });
1612 add(Handle: Key, MI: NewMI);
1613 return NewMI;
1614}
1615
1616SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypePipe(
1617 MachineIRBuilder &MIRBuilder,
1618 SPIRV::AccessQualifier::AccessQualifier AccessQual) {
1619 auto Key = SPIRV::irhandle_pipe(AQ: AccessQual);
1620 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1621 return MI;
1622 const MachineInstr *NewMI =
1623 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1624 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypePipe)
1625 .addDef(RegNo: createTypeVReg(MIRBuilder))
1626 .addImm(Val: AccessQual);
1627 });
1628 add(Handle: Key, MI: NewMI);
1629 return NewMI;
1630}
1631
1632SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeDeviceEvent(
1633 MachineIRBuilder &MIRBuilder) {
1634 auto Key = SPIRV::irhandle_event();
1635 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1636 return MI;
1637 const MachineInstr *NewMI =
1638 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1639 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeDeviceEvent)
1640 .addDef(RegNo: createTypeVReg(MIRBuilder));
1641 });
1642 add(Handle: Key, MI: NewMI);
1643 return NewMI;
1644}
1645
1646SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeSampledImage(
1647 SPIRVType *ImageType, MachineIRBuilder &MIRBuilder) {
1648 auto Key = SPIRV::irhandle_sampled_image(
1649 SampledTy: SPIRVToLLVMType.lookup(Val: MIRBuilder.getMF().getRegInfo().getVRegDef(
1650 Reg: ImageType->getOperand(i: 1).getReg())),
1651 ImageTy: ImageType);
1652 if (const MachineInstr *MI = findMI(Handle: Key, MF: &MIRBuilder.getMF()))
1653 return MI;
1654 const MachineInstr *NewMI =
1655 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1656 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeSampledImage)
1657 .addDef(RegNo: createTypeVReg(MIRBuilder))
1658 .addUse(RegNo: getSPIRVTypeID(SpirvType: ImageType));
1659 });
1660 add(Handle: Key, MI: NewMI);
1661 return NewMI;
1662}
1663
1664SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeCoopMatr(
1665 MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType,
1666 const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns,
1667 uint32_t Use, bool EmitIR) {
1668 if (const MachineInstr *MI =
1669 findMI(T: ExtensionType, RequiresExplicitLayout: false, MF: &MIRBuilder.getMF()))
1670 return MI;
1671 const MachineInstr *NewMI =
1672 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1673 SPIRVType *SpvTypeInt32 = getOrCreateSPIRVIntegerType(BitWidth: 32, MIRBuilder);
1674 const Type *ET = getTypeForSPIRVType(Ty: ElemType);
1675 if (ET->isIntegerTy() && ET->getIntegerBitWidth() == 4 &&
1676 cast<SPIRVSubtarget>(Val: MIRBuilder.getMF().getSubtarget())
1677 .canUseExtension(E: SPIRV::Extension::SPV_INTEL_int4)) {
1678 MIRBuilder.buildInstr(Opcode: SPIRV::OpCapability)
1679 .addImm(Val: SPIRV::Capability::Int4CooperativeMatrixINTEL);
1680 }
1681 return MIRBuilder.buildInstr(Opcode: SPIRV::OpTypeCooperativeMatrixKHR)
1682 .addDef(RegNo: createTypeVReg(MIRBuilder))
1683 .addUse(RegNo: getSPIRVTypeID(SpirvType: ElemType))
1684 .addUse(RegNo: buildConstantInt(Val: Scope, MIRBuilder, SpvType: SpvTypeInt32, EmitIR))
1685 .addUse(RegNo: buildConstantInt(Val: Rows, MIRBuilder, SpvType: SpvTypeInt32, EmitIR))
1686 .addUse(RegNo: buildConstantInt(Val: Columns, MIRBuilder, SpvType: SpvTypeInt32, EmitIR))
1687 .addUse(RegNo: buildConstantInt(Val: Use, MIRBuilder, SpvType: SpvTypeInt32, EmitIR));
1688 });
1689 add(T: ExtensionType, RequiresExplicitLayout: false, MI: NewMI);
1690 return NewMI;
1691}
1692
1693SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeByOpcode(
1694 const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode) {
1695 if (const MachineInstr *MI = findMI(T: Ty, RequiresExplicitLayout: false, MF: &MIRBuilder.getMF()))
1696 return MI;
1697 const MachineInstr *NewMI =
1698 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1699 return MIRBuilder.buildInstr(Opcode).addDef(RegNo: createTypeVReg(MIRBuilder));
1700 });
1701 add(T: Ty, RequiresExplicitLayout: false, MI: NewMI);
1702 return NewMI;
1703}
1704
1705SPIRVType *SPIRVGlobalRegistry::getOrCreateUnknownType(
1706 const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode,
1707 const ArrayRef<MCOperand> Operands) {
1708 if (const MachineInstr *MI = findMI(T: Ty, RequiresExplicitLayout: false, MF: &MIRBuilder.getMF()))
1709 return MI;
1710 Register ResVReg = createTypeVReg(MIRBuilder);
1711 const MachineInstr *NewMI =
1712 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1713 MachineInstrBuilder MIB = MIRBuilder.buildInstr(Opcode: SPIRV::UNKNOWN_type)
1714 .addDef(RegNo: ResVReg)
1715 .addImm(Val: Opcode);
1716 for (MCOperand Operand : Operands) {
1717 if (Operand.isReg()) {
1718 MIB.addUse(RegNo: Operand.getReg());
1719 } else if (Operand.isImm()) {
1720 MIB.addImm(Val: Operand.getImm());
1721 }
1722 }
1723 return MIB;
1724 });
1725 add(T: Ty, RequiresExplicitLayout: false, MI: NewMI);
1726 return NewMI;
1727}
1728
1729// Returns nullptr if unable to recognize SPIRV type name
1730SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVTypeByName(
1731 StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR,
1732 SPIRV::StorageClass::StorageClass SC,
1733 SPIRV::AccessQualifier::AccessQualifier AQ) {
1734 unsigned VecElts = 0;
1735 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
1736
1737 // Parse strings representing either a SPIR-V or OpenCL builtin type.
1738 if (hasBuiltinTypePrefix(Name: TypeStr))
1739 return getOrCreateSPIRVType(Ty: SPIRV::parseBuiltinTypeNameToTargetExtType(
1740 TypeName: TypeStr.str(), Context&: MIRBuilder.getContext()),
1741 MIRBuilder, AccessQual: AQ, ExplicitLayoutRequired: false, EmitIR: true);
1742
1743 // Parse type name in either "typeN" or "type vector[N]" format, where
1744 // N is the number of elements of the vector.
1745 Type *Ty;
1746
1747 Ty = parseBasicTypeName(TypeName&: TypeStr, Ctx);
1748 if (!Ty)
1749 // Unable to recognize SPIRV type name
1750 return nullptr;
1751
1752 const SPIRVType *SpirvTy =
1753 getOrCreateSPIRVType(Ty, MIRBuilder, AccessQual: AQ, ExplicitLayoutRequired: false, EmitIR: true);
1754
1755 // Handle "type*" or "type* vector[N]".
1756 if (TypeStr.consume_front(Prefix: "*"))
1757 SpirvTy = getOrCreateSPIRVPointerType(BaseType: Ty, MIRBuilder, SC);
1758
1759 // Handle "typeN*" or "type vector[N]*".
1760 bool IsPtrToVec = TypeStr.consume_back(Suffix: "*");
1761
1762 if (TypeStr.consume_front(Prefix: " vector[")) {
1763 TypeStr = TypeStr.substr(Start: 0, N: TypeStr.find(C: ']'));
1764 }
1765 TypeStr.getAsInteger(Radix: 10, Result&: VecElts);
1766 if (VecElts > 0)
1767 SpirvTy = getOrCreateSPIRVVectorType(BaseType: SpirvTy, NumElements: VecElts, MIRBuilder, EmitIR);
1768
1769 if (IsPtrToVec)
1770 SpirvTy = getOrCreateSPIRVPointerType(BaseType: SpirvTy, MIRBuilder, SC);
1771
1772 return SpirvTy;
1773}
1774
1775SPIRVType *
1776SPIRVGlobalRegistry::getOrCreateSPIRVIntegerType(unsigned BitWidth,
1777 MachineIRBuilder &MIRBuilder) {
1778 return getOrCreateSPIRVType(
1779 Ty: IntegerType::get(C&: MIRBuilder.getMF().getFunction().getContext(), NumBits: BitWidth),
1780 MIRBuilder, AccessQual: SPIRV::AccessQualifier::ReadWrite, ExplicitLayoutRequired: false, EmitIR: true);
1781}
1782
1783SPIRVType *SPIRVGlobalRegistry::finishCreatingSPIRVType(const Type *LLVMTy,
1784 SPIRVType *SpirvType) {
1785 assert(CurMF == SpirvType->getMF());
1786 VRegToTypeMap[CurMF][getSPIRVTypeID(SpirvType)] = SpirvType;
1787 SPIRVToLLVMType[SpirvType] = unifyPtrType(Ty: LLVMTy);
1788 return SpirvType;
1789}
1790
1791SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVType(unsigned BitWidth,
1792 MachineInstr &I,
1793 const SPIRVInstrInfo &TII,
1794 unsigned SPIRVOPcode,
1795 Type *Ty) {
1796 if (const MachineInstr *MI = findMI(T: Ty, RequiresExplicitLayout: false, MF: CurMF))
1797 return MI;
1798 MachineBasicBlock &DepMBB = I.getMF()->front();
1799 MachineIRBuilder MIRBuilder(DepMBB, DepMBB.getFirstNonPHI());
1800 const MachineInstr *NewMI =
1801 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1802 auto NewTypeMI = BuildMI(BB&: MIRBuilder.getMBB(), I&: *MIRBuilder.getInsertPt(),
1803 MIMD: MIRBuilder.getDL(), MCID: TII.get(Opcode: SPIRVOPcode))
1804 .addDef(RegNo: createTypeVReg(MRI&: CurMF->getRegInfo()))
1805 .addImm(Val: BitWidth);
1806 // Don't add Encoding to FP type
1807 if (!Ty->isFloatTy()) {
1808 return NewTypeMI.addImm(Val: 0);
1809 } else {
1810 return NewTypeMI;
1811 }
1812 });
1813 add(T: Ty, RequiresExplicitLayout: false, MI: NewMI);
1814 return finishCreatingSPIRVType(LLVMTy: Ty, SpirvType: NewMI);
1815}
1816
1817SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVIntegerType(
1818 unsigned BitWidth, MachineInstr &I, const SPIRVInstrInfo &TII) {
1819 // Maybe adjust bit width to keep DuplicateTracker consistent. Without
1820 // such an adjustment SPIRVGlobalRegistry::getOpTypeInt() could create, for
1821 // example, the same "OpTypeInt 8" type for a series of LLVM integer types
1822 // with number of bits less than 8, causing duplicate type definitions.
1823 if (BitWidth > 1)
1824 BitWidth = adjustOpTypeIntWidth(Width: BitWidth);
1825 Type *LLVMTy = IntegerType::get(C&: CurMF->getFunction().getContext(), NumBits: BitWidth);
1826 return getOrCreateSPIRVType(BitWidth, I, TII, SPIRVOPcode: SPIRV::OpTypeInt, Ty: LLVMTy);
1827}
1828
1829SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVFloatType(
1830 unsigned BitWidth, MachineInstr &I, const SPIRVInstrInfo &TII) {
1831 LLVMContext &Ctx = CurMF->getFunction().getContext();
1832 Type *LLVMTy;
1833 switch (BitWidth) {
1834 case 16:
1835 LLVMTy = Type::getHalfTy(C&: Ctx);
1836 break;
1837 case 32:
1838 LLVMTy = Type::getFloatTy(C&: Ctx);
1839 break;
1840 case 64:
1841 LLVMTy = Type::getDoubleTy(C&: Ctx);
1842 break;
1843 default:
1844 llvm_unreachable("Bit width is of unexpected size.");
1845 }
1846 return getOrCreateSPIRVType(BitWidth, I, TII, SPIRVOPcode: SPIRV::OpTypeFloat, Ty: LLVMTy);
1847}
1848
1849SPIRVType *
1850SPIRVGlobalRegistry::getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder,
1851 bool EmitIR) {
1852 return getOrCreateSPIRVType(
1853 Ty: IntegerType::get(C&: MIRBuilder.getMF().getFunction().getContext(), NumBits: 1),
1854 MIRBuilder, AccessQual: SPIRV::AccessQualifier::ReadWrite, ExplicitLayoutRequired: false, EmitIR);
1855}
1856
1857SPIRVType *
1858SPIRVGlobalRegistry::getOrCreateSPIRVBoolType(MachineInstr &I,
1859 const SPIRVInstrInfo &TII) {
1860 Type *Ty = IntegerType::get(C&: CurMF->getFunction().getContext(), NumBits: 1);
1861 if (const MachineInstr *MI = findMI(T: Ty, RequiresExplicitLayout: false, MF: CurMF))
1862 return MI;
1863 MachineBasicBlock &DepMBB = I.getMF()->front();
1864 MachineIRBuilder MIRBuilder(DepMBB, DepMBB.getFirstNonPHI());
1865 const MachineInstr *NewMI =
1866 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1867 return BuildMI(BB&: MIRBuilder.getMBB(), I&: *MIRBuilder.getInsertPt(),
1868 MIMD: MIRBuilder.getDL(), MCID: TII.get(Opcode: SPIRV::OpTypeBool))
1869 .addDef(RegNo: createTypeVReg(MRI&: CurMF->getRegInfo()));
1870 });
1871 add(T: Ty, RequiresExplicitLayout: false, MI: NewMI);
1872 return finishCreatingSPIRVType(LLVMTy: Ty, SpirvType: NewMI);
1873}
1874
1875SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVVectorType(
1876 SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder,
1877 bool EmitIR) {
1878 return getOrCreateSPIRVType(
1879 Ty: FixedVectorType::get(ElementType: const_cast<Type *>(getTypeForSPIRVType(Ty: BaseType)),
1880 NumElts: NumElements),
1881 MIRBuilder, AccessQual: SPIRV::AccessQualifier::ReadWrite, ExplicitLayoutRequired: false, EmitIR);
1882}
1883
1884SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVVectorType(
1885 SPIRVType *BaseType, unsigned NumElements, MachineInstr &I,
1886 const SPIRVInstrInfo &TII) {
1887 Type *Ty = FixedVectorType::get(
1888 ElementType: const_cast<Type *>(getTypeForSPIRVType(Ty: BaseType)), NumElts: NumElements);
1889 if (const MachineInstr *MI = findMI(T: Ty, RequiresExplicitLayout: false, MF: CurMF))
1890 return MI;
1891 MachineInstr *DepMI = const_cast<MachineInstr *>(BaseType);
1892 MachineIRBuilder MIRBuilder(*DepMI->getParent(), DepMI->getIterator());
1893 const MachineInstr *NewMI =
1894 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1895 return BuildMI(BB&: MIRBuilder.getMBB(), I&: *MIRBuilder.getInsertPt(),
1896 MIMD: MIRBuilder.getDL(), MCID: TII.get(Opcode: SPIRV::OpTypeVector))
1897 .addDef(RegNo: createTypeVReg(MRI&: CurMF->getRegInfo()))
1898 .addUse(RegNo: getSPIRVTypeID(SpirvType: BaseType))
1899 .addImm(Val: NumElements);
1900 });
1901 add(T: Ty, RequiresExplicitLayout: false, MI: NewMI);
1902 return finishCreatingSPIRVType(LLVMTy: Ty, SpirvType: NewMI);
1903}
1904
1905SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVPointerType(
1906 const Type *BaseType, MachineInstr &I,
1907 SPIRV::StorageClass::StorageClass SC) {
1908 MachineIRBuilder MIRBuilder(I);
1909 return getOrCreateSPIRVPointerType(BaseType, MIRBuilder, SC);
1910}
1911
1912SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVPointerType(
1913 const Type *BaseType, MachineIRBuilder &MIRBuilder,
1914 SPIRV::StorageClass::StorageClass SC) {
1915 // TODO: Need to check if EmitIr should always be true.
1916 SPIRVType *SpirvBaseType = getOrCreateSPIRVType(
1917 Ty: BaseType, MIRBuilder, AccessQual: SPIRV::AccessQualifier::ReadWrite,
1918 ExplicitLayoutRequired: storageClassRequiresExplictLayout(SC), EmitIR: true);
1919 assert(SpirvBaseType);
1920 return getOrCreateSPIRVPointerTypeInternal(BaseType: SpirvBaseType, MIRBuilder, SC);
1921}
1922
1923SPIRVType *SPIRVGlobalRegistry::changePointerStorageClass(
1924 SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I) {
1925 [[maybe_unused]] SPIRV::StorageClass::StorageClass OldSC =
1926 getPointerStorageClass(Type: PtrType);
1927 assert(storageClassRequiresExplictLayout(OldSC) ==
1928 storageClassRequiresExplictLayout(SC));
1929
1930 SPIRVType *PointeeType = getPointeeType(PtrType);
1931 MachineIRBuilder MIRBuilder(I);
1932 return getOrCreateSPIRVPointerTypeInternal(BaseType: PointeeType, MIRBuilder, SC);
1933}
1934
1935SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVPointerType(
1936 SPIRVType *BaseType, MachineIRBuilder &MIRBuilder,
1937 SPIRV::StorageClass::StorageClass SC) {
1938 const Type *LLVMType = getTypeForSPIRVType(Ty: BaseType);
1939 assert(!storageClassRequiresExplictLayout(SC));
1940 SPIRVType *R = getOrCreateSPIRVPointerType(BaseType: LLVMType, MIRBuilder, SC);
1941 assert(
1942 getPointeeType(R) == BaseType &&
1943 "The base type was not correctly laid out for the given storage class.");
1944 return R;
1945}
1946
1947SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVPointerTypeInternal(
1948 SPIRVType *BaseType, MachineIRBuilder &MIRBuilder,
1949 SPIRV::StorageClass::StorageClass SC) {
1950 const Type *PointerElementType = getTypeForSPIRVType(Ty: BaseType);
1951 unsigned AddressSpace = storageClassToAddressSpace(SC);
1952 if (const MachineInstr *MI = findMI(PointeeTy: PointerElementType, AddressSpace, MF: CurMF))
1953 return MI;
1954 Type *Ty = TypedPointerType::get(ElementType: const_cast<Type *>(PointerElementType),
1955 AddressSpace);
1956 const MachineInstr *NewMI =
1957 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1958 return BuildMI(BB&: MIRBuilder.getMBB(), I: MIRBuilder.getInsertPt(),
1959 MIMD: MIRBuilder.getDebugLoc(),
1960 MCID: MIRBuilder.getTII().get(Opcode: SPIRV::OpTypePointer))
1961 .addDef(RegNo: createTypeVReg(MRI&: CurMF->getRegInfo()))
1962 .addImm(Val: static_cast<uint32_t>(SC))
1963 .addUse(RegNo: getSPIRVTypeID(SpirvType: BaseType));
1964 });
1965 add(PointeeTy: PointerElementType, AddressSpace, MI: NewMI);
1966 return finishCreatingSPIRVType(LLVMTy: Ty, SpirvType: NewMI);
1967}
1968
1969Register SPIRVGlobalRegistry::getOrCreateUndef(MachineInstr &I,
1970 SPIRVType *SpvType,
1971 const SPIRVInstrInfo &TII) {
1972 UndefValue *UV =
1973 UndefValue::get(T: const_cast<Type *>(getTypeForSPIRVType(Ty: SpvType)));
1974 Register Res = find(V: UV, MF: CurMF);
1975 if (Res.isValid())
1976 return Res;
1977
1978 LLT LLTy = LLT::scalar(SizeInBits: 64);
1979 Res = CurMF->getRegInfo().createGenericVirtualRegister(Ty: LLTy);
1980 CurMF->getRegInfo().setRegClass(Reg: Res, RC: &SPIRV::iIDRegClass);
1981 assignSPIRVTypeToVReg(SpirvType: SpvType, VReg: Res, MF: *CurMF);
1982
1983 MachineInstr *DepMI = const_cast<MachineInstr *>(SpvType);
1984 MachineIRBuilder MIRBuilder(*DepMI->getParent(), DepMI->getIterator());
1985 const MachineInstr *NewMI =
1986 createOpType(MIRBuilder, Op: [&](MachineIRBuilder &MIRBuilder) {
1987 auto MIB = BuildMI(BB&: MIRBuilder.getMBB(), I&: *MIRBuilder.getInsertPt(),
1988 MIMD: MIRBuilder.getDL(), MCID: TII.get(Opcode: SPIRV::OpUndef))
1989 .addDef(RegNo: Res)
1990 .addUse(RegNo: getSPIRVTypeID(SpirvType: SpvType));
1991 const auto &ST = CurMF->getSubtarget();
1992 constrainSelectedInstRegOperands(I&: *MIB, TII: *ST.getInstrInfo(),
1993 TRI: *ST.getRegisterInfo(),
1994 RBI: *ST.getRegBankInfo());
1995 return MIB;
1996 });
1997 add(V: UV, MI: NewMI);
1998 return Res;
1999}
2000
2001const TargetRegisterClass *
2002SPIRVGlobalRegistry::getRegClass(SPIRVType *SpvType) const {
2003 unsigned Opcode = SpvType->getOpcode();
2004 switch (Opcode) {
2005 case SPIRV::OpTypeFloat:
2006 return &SPIRV::fIDRegClass;
2007 case SPIRV::OpTypePointer:
2008 return &SPIRV::pIDRegClass;
2009 case SPIRV::OpTypeVector: {
2010 SPIRVType *ElemType = getSPIRVTypeForVReg(VReg: SpvType->getOperand(i: 1).getReg());
2011 unsigned ElemOpcode = ElemType ? ElemType->getOpcode() : 0;
2012 if (ElemOpcode == SPIRV::OpTypeFloat)
2013 return &SPIRV::vfIDRegClass;
2014 if (ElemOpcode == SPIRV::OpTypePointer)
2015 return &SPIRV::vpIDRegClass;
2016 return &SPIRV::vIDRegClass;
2017 }
2018 }
2019 return &SPIRV::iIDRegClass;
2020}
2021
2022inline unsigned getAS(SPIRVType *SpvType) {
2023 return storageClassToAddressSpace(
2024 SC: static_cast<SPIRV::StorageClass::StorageClass>(
2025 SpvType->getOperand(i: 1).getImm()));
2026}
2027
2028LLT SPIRVGlobalRegistry::getRegType(SPIRVType *SpvType) const {
2029 unsigned Opcode = SpvType ? SpvType->getOpcode() : 0;
2030 switch (Opcode) {
2031 case SPIRV::OpTypeInt:
2032 case SPIRV::OpTypeFloat:
2033 case SPIRV::OpTypeBool:
2034 return LLT::scalar(SizeInBits: getScalarOrVectorBitWidth(Type: SpvType));
2035 case SPIRV::OpTypePointer:
2036 return LLT::pointer(AddressSpace: getAS(SpvType), SizeInBits: getPointerSize());
2037 case SPIRV::OpTypeVector: {
2038 SPIRVType *ElemType = getSPIRVTypeForVReg(VReg: SpvType->getOperand(i: 1).getReg());
2039 LLT ET;
2040 switch (ElemType ? ElemType->getOpcode() : 0) {
2041 case SPIRV::OpTypePointer:
2042 ET = LLT::pointer(AddressSpace: getAS(SpvType: ElemType), SizeInBits: getPointerSize());
2043 break;
2044 case SPIRV::OpTypeInt:
2045 case SPIRV::OpTypeFloat:
2046 case SPIRV::OpTypeBool:
2047 ET = LLT::scalar(SizeInBits: getScalarOrVectorBitWidth(Type: ElemType));
2048 break;
2049 default:
2050 ET = LLT::scalar(SizeInBits: 64);
2051 }
2052 return LLT::fixed_vector(
2053 NumElements: static_cast<unsigned>(SpvType->getOperand(i: 2).getImm()), ScalarTy: ET);
2054 }
2055 }
2056 return LLT::scalar(SizeInBits: 64);
2057}
2058
2059// Aliasing list MD contains several scope MD nodes whithin it. Each scope MD
2060// has a selfreference and an extra MD node for aliasing domain and also it
2061// can contain an optional string operand. Domain MD contains a self-reference
2062// with an optional string operand. Here we unfold the list, creating SPIR-V
2063// aliasing instructions.
2064// TODO: add support for an optional string operand.
2065MachineInstr *SPIRVGlobalRegistry::getOrAddMemAliasingINTELInst(
2066 MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD) {
2067 if (AliasingListMD->getNumOperands() == 0)
2068 return nullptr;
2069 if (auto L = AliasInstMDMap.find(x: AliasingListMD); L != AliasInstMDMap.end())
2070 return L->second;
2071
2072 SmallVector<MachineInstr *> ScopeList;
2073 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2074 for (const MDOperand &MDListOp : AliasingListMD->operands()) {
2075 if (MDNode *ScopeMD = dyn_cast<MDNode>(Val: MDListOp)) {
2076 if (ScopeMD->getNumOperands() < 2)
2077 return nullptr;
2078 MDNode *DomainMD = dyn_cast<MDNode>(Val: ScopeMD->getOperand(I: 1));
2079 if (!DomainMD)
2080 return nullptr;
2081 auto *Domain = [&] {
2082 auto D = AliasInstMDMap.find(x: DomainMD);
2083 if (D != AliasInstMDMap.end())
2084 return D->second;
2085 const Register Ret = MRI->createVirtualRegister(RegClass: &SPIRV::IDRegClass);
2086 auto MIB =
2087 MIRBuilder.buildInstr(Opcode: SPIRV::OpAliasDomainDeclINTEL).addDef(RegNo: Ret);
2088 return MIB.getInstr();
2089 }();
2090 AliasInstMDMap.insert(x: std::make_pair(x&: DomainMD, y&: Domain));
2091 auto *Scope = [&] {
2092 auto S = AliasInstMDMap.find(x: ScopeMD);
2093 if (S != AliasInstMDMap.end())
2094 return S->second;
2095 const Register Ret = MRI->createVirtualRegister(RegClass: &SPIRV::IDRegClass);
2096 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpAliasScopeDeclINTEL)
2097 .addDef(RegNo: Ret)
2098 .addUse(RegNo: Domain->getOperand(i: 0).getReg());
2099 return MIB.getInstr();
2100 }();
2101 AliasInstMDMap.insert(x: std::make_pair(x&: ScopeMD, y&: Scope));
2102 ScopeList.push_back(Elt: Scope);
2103 }
2104 }
2105
2106 const Register Ret = MRI->createVirtualRegister(RegClass: &SPIRV::IDRegClass);
2107 auto MIB =
2108 MIRBuilder.buildInstr(Opcode: SPIRV::OpAliasScopeListDeclINTEL).addDef(RegNo: Ret);
2109 for (auto *Scope : ScopeList)
2110 MIB.addUse(RegNo: Scope->getOperand(i: 0).getReg());
2111 auto List = MIB.getInstr();
2112 AliasInstMDMap.insert(x: std::make_pair(x&: AliasingListMD, y&: List));
2113 return List;
2114}
2115
2116void SPIRVGlobalRegistry::buildMemAliasingOpDecorate(
2117 Register Reg, MachineIRBuilder &MIRBuilder, uint32_t Dec,
2118 const MDNode *AliasingListMD) {
2119 MachineInstr *AliasList =
2120 getOrAddMemAliasingINTELInst(MIRBuilder, AliasingListMD);
2121 if (!AliasList)
2122 return;
2123 MIRBuilder.buildInstr(Opcode: SPIRV::OpDecorate)
2124 .addUse(RegNo: Reg)
2125 .addImm(Val: Dec)
2126 .addUse(RegNo: AliasList->getOperand(i: 0).getReg());
2127}
2128void SPIRVGlobalRegistry::replaceAllUsesWith(Value *Old, Value *New,
2129 bool DeleteOld) {
2130 Old->replaceAllUsesWith(V: New);
2131 updateIfExistDeducedElementType(OldVal: Old, NewVal: New, DeleteOld);
2132 updateIfExistAssignPtrTypeInstr(OldVal: Old, NewVal: New, DeleteOld);
2133}
2134
2135void SPIRVGlobalRegistry::buildAssignType(IRBuilder<> &B, Type *Ty,
2136 Value *Arg) {
2137 Value *OfType = getNormalizedPoisonValue(Ty);
2138 CallInst *AssignCI = nullptr;
2139 if (Arg->getType()->isAggregateType() && Ty->isAggregateType() &&
2140 allowEmitFakeUse(Arg)) {
2141 LLVMContext &Ctx = Arg->getContext();
2142 SmallVector<Metadata *, 2> ArgMDs{
2143 MDNode::get(Context&: Ctx, MDs: ValueAsMetadata::getConstant(C: OfType)),
2144 MDString::get(Context&: Ctx, Str: Arg->getName())};
2145 B.CreateIntrinsic(ID: Intrinsic::spv_value_md,
2146 Args: {MetadataAsValue::get(Context&: Ctx, MD: MDTuple::get(Context&: Ctx, MDs: ArgMDs))});
2147 AssignCI = B.CreateIntrinsic(ID: Intrinsic::fake_use, Args: {Arg});
2148 } else {
2149 AssignCI = buildIntrWithMD(IntrID: Intrinsic::spv_assign_type, Types: {Arg->getType()},
2150 Arg: OfType, Arg2: Arg, Imms: {}, B);
2151 }
2152 addAssignPtrTypeInstr(Val: Arg, AssignPtrTyCI: AssignCI);
2153}
2154
2155void SPIRVGlobalRegistry::buildAssignPtr(IRBuilder<> &B, Type *ElemTy,
2156 Value *Arg) {
2157 Value *OfType = PoisonValue::get(T: ElemTy);
2158 CallInst *AssignPtrTyCI = findAssignPtrTypeInstr(Val: Arg);
2159 Function *CurrF =
2160 B.GetInsertBlock() ? B.GetInsertBlock()->getParent() : nullptr;
2161 if (AssignPtrTyCI == nullptr ||
2162 AssignPtrTyCI->getParent()->getParent() != CurrF) {
2163 AssignPtrTyCI = buildIntrWithMD(
2164 IntrID: Intrinsic::spv_assign_ptr_type, Types: {Arg->getType()}, Arg: OfType, Arg2: Arg,
2165 Imms: {B.getInt32(C: getPointerAddressSpace(T: Arg->getType()))}, B);
2166 addDeducedElementType(Val: AssignPtrTyCI, Ty: ElemTy);
2167 addDeducedElementType(Val: Arg, Ty: ElemTy);
2168 addAssignPtrTypeInstr(Val: Arg, AssignPtrTyCI);
2169 } else {
2170 updateAssignType(AssignCI: AssignPtrTyCI, Arg, OfType);
2171 }
2172}
2173
2174void SPIRVGlobalRegistry::updateAssignType(CallInst *AssignCI, Value *Arg,
2175 Value *OfType) {
2176 AssignCI->setArgOperand(i: 1, v: buildMD(Arg: OfType));
2177 if (cast<IntrinsicInst>(Val: AssignCI)->getIntrinsicID() !=
2178 Intrinsic::spv_assign_ptr_type)
2179 return;
2180
2181 // update association with the pointee type
2182 Type *ElemTy = OfType->getType();
2183 addDeducedElementType(Val: AssignCI, Ty: ElemTy);
2184 addDeducedElementType(Val: Arg, Ty: ElemTy);
2185}
2186
2187void SPIRVGlobalRegistry::addStructOffsetDecorations(
2188 Register Reg, StructType *Ty, MachineIRBuilder &MIRBuilder) {
2189 DataLayout DL;
2190 ArrayRef<TypeSize> Offsets = DL.getStructLayout(Ty)->getMemberOffsets();
2191 for (uint32_t I = 0; I < Ty->getNumElements(); ++I) {
2192 buildOpMemberDecorate(Reg, MIRBuilder, Dec: SPIRV::Decoration::Offset, Member: I,
2193 DecArgs: {static_cast<uint32_t>(Offsets[I])});
2194 }
2195}
2196
2197void SPIRVGlobalRegistry::addArrayStrideDecorations(
2198 Register Reg, Type *ElementType, MachineIRBuilder &MIRBuilder) {
2199 uint32_t SizeInBytes = DataLayout().getTypeSizeInBits(Ty: ElementType) / 8;
2200 buildOpDecorate(Reg, MIRBuilder, Dec: SPIRV::Decoration::ArrayStride,
2201 DecArgs: {SizeInBytes});
2202}
2203
2204bool SPIRVGlobalRegistry::hasBlockDecoration(SPIRVType *Type) const {
2205 Register Def = getSPIRVTypeID(SpirvType: Type);
2206 for (const MachineInstr &Use :
2207 Type->getMF()->getRegInfo().use_instructions(Reg: Def)) {
2208 if (Use.getOpcode() != SPIRV::OpDecorate)
2209 continue;
2210
2211 if (Use.getOperand(i: 1).getImm() == SPIRV::Decoration::Block)
2212 return true;
2213 }
2214 return false;
2215}
2216