1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/Analysis/ValueTracking.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
32struct DemangledBuiltin {
33 StringRef Name;
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
36 uint8_t MinNumArgs;
37 uint8_t MaxNumArgs;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
43struct IncomingCall {
44 const std::string BuiltinName;
45 const DemangledBuiltin *Builtin;
46
47 const Register ReturnRegister;
48 const SPIRVType *ReturnType;
49 const SmallVectorImpl<Register> &Arguments;
50
51 IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin,
52 const Register ReturnRegister, const SPIRVType *ReturnType,
53 const SmallVectorImpl<Register> &Arguments)
54 : BuiltinName(std::move(BuiltinName)), Builtin(Builtin),
55 ReturnRegister(ReturnRegister), ReturnType(ReturnType),
56 Arguments(Arguments) {}
57
58 bool isSpirvOp() const { return BuiltinName.rfind(s: "__spirv_", pos: 0) == 0; }
59};
60
61struct NativeBuiltin {
62 StringRef Name;
63 InstructionSet::InstructionSet Set;
64 uint32_t Opcode;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
70struct GroupBuiltin {
71 StringRef Name;
72 uint32_t Opcode;
73 uint32_t GroupOperation;
74 bool IsElect;
75 bool IsAllOrAny;
76 bool IsAllEqual;
77 bool IsBallot;
78 bool IsInverseBallot;
79 bool IsBallotBitExtract;
80 bool IsBallotFindBit;
81 bool IsLogical;
82 bool NoGroupOperation;
83 bool HasBoolArg;
84};
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
89struct IntelSubgroupsBuiltin {
90 StringRef Name;
91 uint32_t Opcode;
92 bool IsBlock;
93 bool IsWrite;
94 bool IsMedia;
95};
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
100struct AtomicFloatingBuiltin {
101 StringRef Name;
102 uint32_t Opcode;
103};
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
107struct GroupUniformBuiltin {
108 StringRef Name;
109 uint32_t Opcode;
110 bool IsLogical;
111};
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
116struct GetBuiltin {
117 StringRef Name;
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
126struct ImageQueryBuiltin {
127 StringRef Name;
128 InstructionSet::InstructionSet Set;
129 uint32_t Component;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
135struct IntegerDotProductBuiltin {
136 StringRef Name;
137 uint32_t Opcode;
138 bool IsSwapReq;
139};
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
144struct ConvertBuiltin {
145 StringRef Name;
146 InstructionSet::InstructionSet Set;
147 bool IsDestinationSigned;
148 bool IsSaturated;
149 bool IsRounded;
150 bool IsBfloat16;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
155struct VectorLoadStoreBuiltin {
156 StringRef Name;
157 InstructionSet::InstructionSet Set;
158 uint32_t Number;
159 uint32_t ElementCount;
160 bool IsRounded;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 StringRef SpvPrefix = "__spv::";
190 std::string BuiltinName = DemangledCall.str();
191
192 // Check if the extracted name contains type information between angle
193 // brackets. If so, the builtin is an instantiated template - needs to have
194 // the information after angle brackets and return type removed.
195 std::size_t Pos = BuiltinName.find(s: ">(");
196 if (Pos != std::string::npos) {
197 BuiltinName = BuiltinName.substr(pos: 0, n: BuiltinName.rfind(c: '<', pos: Pos));
198 } else {
199 Pos = BuiltinName.find(c: '(');
200 if (Pos != std::string::npos)
201 BuiltinName = BuiltinName.substr(pos: 0, n: Pos);
202 }
203 BuiltinName = BuiltinName.substr(pos: BuiltinName.find_last_of(c: ' ') + 1);
204
205 // Itanium Demangler result may have "(anonymous namespace)::" or "__spv::"
206 // prefix.
207 if (BuiltinName.find(svt: PassPrefix) == 0)
208 BuiltinName = BuiltinName.substr(pos: PassPrefix.size());
209 else if (BuiltinName.find(svt: SpvPrefix) == 0)
210 BuiltinName = BuiltinName.substr(pos: SpvPrefix.size());
211
212 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
213 if (BuiltinName.rfind(s: "__spirv_ocl_", pos: 0) == 0)
214 BuiltinName = BuiltinName.substr(pos: 12);
215
216 // Check if the extracted name begins with:
217 // - "__spirv_ImageSampleExplicitLod"
218 // - "__spirv_ImageRead"
219 // - "__spirv_ImageWrite"
220 // - "__spirv_ImageQuerySizeLod"
221 // - "__spirv_UDotKHR"
222 // - "__spirv_SDotKHR"
223 // - "__spirv_SUDotKHR"
224 // - "__spirv_SDotAccSatKHR"
225 // - "__spirv_UDotAccSatKHR"
226 // - "__spirv_SUDotAccSatKHR"
227 // - "__spirv_ReadClockKHR"
228 // - "__spirv_SubgroupBlockReadINTEL"
229 // - "__spirv_SubgroupImageBlockReadINTEL"
230 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
232 // - "__spirv_Convert"
233 // - "__spirv_Round"
234 // - "__spirv_UConvert"
235 // - "__spirv_SConvert"
236 // - "__spirv_FConvert"
237 // - "__spirv_SatConvert"
238 // and maybe contains return type information at the end "_R<type>".
239 // If so, extract the plain builtin name without the type information.
240 static const std::regex SpvWithR(
241 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
242 "UDotKHR|"
243 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
244 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
245 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
246 "Convert|Round|"
247 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
248 std::smatch Match;
249 if (std::regex_match(s: BuiltinName, m&: Match, re: SpvWithR) && Match.size() > 1) {
250 std::ssub_match SubMatch;
251 if (DecorationId && Match.size() > 3) {
252 SubMatch = Match[4];
253 *DecorationId = demangledPostfixToDecorationId(S: SubMatch.str());
254 }
255 SubMatch = Match[1];
256 BuiltinName = SubMatch.str();
257 }
258
259 return BuiltinName;
260}
261} // namespace SPIRV
262
263/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
264/// the provided \p DemangledCall and specified \p Set.
265///
266/// The lookup follows the following algorithm, returning the first successful
267/// match:
268/// 1. Search with the plain demangled name (expecting a 1:1 match).
269/// 2. Search with the prefix before or suffix after the demangled name
270/// signyfying the type of the first argument.
271///
272/// \returns Wrapper around the demangled call and found builtin definition.
273static std::unique_ptr<const SPIRV::IncomingCall>
274lookupBuiltin(StringRef DemangledCall,
275 SPIRV::InstructionSet::InstructionSet Set,
276 Register ReturnRegister, const SPIRVType *ReturnType,
277 const SmallVectorImpl<Register> &Arguments) {
278 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
279
280 SmallVector<StringRef, 10> BuiltinArgumentTypes;
281 StringRef BuiltinArgs =
282 DemangledCall.slice(Start: DemangledCall.find(C: '(') + 1, End: DemangledCall.find(C: ')'));
283 BuiltinArgs.split(A&: BuiltinArgumentTypes, Separator: ',', MaxSplit: -1, KeepEmpty: false);
284
285 // Look up the builtin in the defined set. Start with the plain demangled
286 // name, expecting a 1:1 match in the defined builtin set.
287 const SPIRV::DemangledBuiltin *Builtin;
288 if ((Builtin = SPIRV::lookupBuiltin(Name: BuiltinName, Set)))
289 return std::make_unique<SPIRV::IncomingCall>(
290 args&: BuiltinName, args&: Builtin, args&: ReturnRegister, args&: ReturnType, args: Arguments);
291
292 // If the initial look up was unsuccessful and the demangled call takes at
293 // least 1 argument, add a prefix or suffix signifying the type of the first
294 // argument and repeat the search.
295 if (BuiltinArgumentTypes.size() >= 1) {
296 char FirstArgumentType = BuiltinArgumentTypes[0][0];
297 // Prefix to be added to the builtin's name for lookup.
298 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
299 std::string Prefix;
300
301 switch (FirstArgumentType) {
302 // Unsigned:
303 case 'u':
304 if (Set == SPIRV::InstructionSet::OpenCL_std)
305 Prefix = "u_";
306 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
307 Prefix = "u";
308 break;
309 // Signed:
310 case 'c':
311 case 's':
312 case 'i':
313 case 'l':
314 if (Set == SPIRV::InstructionSet::OpenCL_std)
315 Prefix = "s_";
316 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
317 Prefix = "s";
318 break;
319 // Floating-point:
320 case 'f':
321 case 'd':
322 case 'h':
323 if (Set == SPIRV::InstructionSet::OpenCL_std ||
324 Set == SPIRV::InstructionSet::GLSL_std_450)
325 Prefix = "f";
326 break;
327 }
328
329 // If argument-type name prefix was added, look up the builtin again.
330 if (!Prefix.empty() &&
331 (Builtin = SPIRV::lookupBuiltin(Name: Prefix + BuiltinName, Set)))
332 return std::make_unique<SPIRV::IncomingCall>(
333 args&: BuiltinName, args&: Builtin, args&: ReturnRegister, args&: ReturnType, args: Arguments);
334
335 // If lookup with a prefix failed, find a suffix to be added to the
336 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
337 // an unsigned value has a suffix "u".
338 std::string Suffix;
339
340 switch (FirstArgumentType) {
341 // Unsigned:
342 case 'u':
343 Suffix = "u";
344 break;
345 // Signed:
346 case 'c':
347 case 's':
348 case 'i':
349 case 'l':
350 Suffix = "s";
351 break;
352 // Floating-point:
353 case 'f':
354 case 'd':
355 case 'h':
356 Suffix = "f";
357 break;
358 }
359
360 // If argument-type name suffix was added, look up the builtin again.
361 if (!Suffix.empty() &&
362 (Builtin = SPIRV::lookupBuiltin(Name: BuiltinName + Suffix, Set)))
363 return std::make_unique<SPIRV::IncomingCall>(
364 args&: BuiltinName, args&: Builtin, args&: ReturnRegister, args&: ReturnType, args: Arguments);
365 }
366
367 // No builtin with such name was found in the set.
368 return nullptr;
369}
370
371static MachineInstr *getBlockStructInstr(Register ParamReg,
372 MachineRegisterInfo *MRI) {
373 // We expect the following sequence of instructions:
374 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
375 // or = G_GLOBAL_VALUE @block_literal_global
376 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
377 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
378 MachineInstr *MI = MRI->getUniqueVRegDef(Reg: ParamReg);
379 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
380 MI->getOperand(1).isReg());
381 Register BitcastReg = MI->getOperand(i: 1).getReg();
382 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(Reg: BitcastReg);
383 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
384 BitcastMI->getOperand(2).isReg());
385 Register ValueReg = BitcastMI->getOperand(i: 2).getReg();
386 MachineInstr *ValueMI = MRI->getUniqueVRegDef(Reg: ValueReg);
387 return ValueMI;
388}
389
390// Return an integer constant corresponding to the given register and
391// defined in spv_track_constant.
392// TODO: maybe unify with prelegalizer pass.
393static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI) {
394 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
395 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
396 DefMI->getOperand(1).isCImm());
397 return DefMI->getOperand(i: 1).getCImm()->getValue().getZExtValue();
398}
399
400// Return type of the instruction result from spv_assign_type intrinsic.
401// TODO: maybe unify with prelegalizer pass.
402static const Type *getMachineInstrType(MachineInstr *MI) {
403 MachineInstr *NextMI = MI->getNextNode();
404 if (!NextMI)
405 return nullptr;
406 if (isSpvIntrinsic(MI: *NextMI, IntrinsicID: Intrinsic::spv_assign_name))
407 if ((NextMI = NextMI->getNextNode()) == nullptr)
408 return nullptr;
409 Register ValueReg = MI->getOperand(i: 0).getReg();
410 if ((!isSpvIntrinsic(MI: *NextMI, IntrinsicID: Intrinsic::spv_assign_type) &&
411 !isSpvIntrinsic(MI: *NextMI, IntrinsicID: Intrinsic::spv_assign_ptr_type)) ||
412 NextMI->getOperand(i: 1).getReg() != ValueReg)
413 return nullptr;
414 Type *Ty = getMDOperandAsType(N: NextMI->getOperand(i: 2).getMetadata(), I: 0);
415 assert(Ty && "Type is expected");
416 return Ty;
417}
418
419static const Type *getBlockStructType(Register ParamReg,
420 MachineRegisterInfo *MRI) {
421 // In principle, this information should be passed to us from Clang via
422 // an elementtype attribute. However, said attribute requires that
423 // the function call be an intrinsic, which is not. Instead, we rely on being
424 // able to trace this to the declaration of a variable: OpenCL C specification
425 // section 6.12.5 should guarantee that we can do this.
426 MachineInstr *MI = getBlockStructInstr(ParamReg, MRI);
427 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
428 return MI->getOperand(i: 1).getGlobal()->getType();
429 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
430 "Blocks in OpenCL C must be traceable to allocation site");
431 return getMachineInstrType(MI);
432}
433
434//===----------------------------------------------------------------------===//
435// Helper functions for building misc instructions
436//===----------------------------------------------------------------------===//
437
438/// Helper function building either a resulting scalar or vector bool register
439/// depending on the expected \p ResultType.
440///
441/// \returns Tuple of the resulting register and its type.
442static std::tuple<Register, SPIRVType *>
443buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
444 SPIRVGlobalRegistry *GR) {
445 LLT Type;
446 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, EmitIR: true);
447
448 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
449 unsigned VectorElements = ResultType->getOperand(i: 2).getImm();
450 BoolType = GR->getOrCreateSPIRVVectorType(BaseType: BoolType, NumElements: VectorElements,
451 MIRBuilder, EmitIR: true);
452 const FixedVectorType *LLVMVectorType =
453 cast<FixedVectorType>(Val: GR->getTypeForSPIRVType(Ty: BoolType));
454 Type = LLT::vector(EC: LLVMVectorType->getElementCount(), ScalarSizeInBits: 1);
455 } else {
456 Type = LLT::scalar(SizeInBits: 1);
457 }
458
459 Register ResultRegister =
460 MIRBuilder.getMRI()->createGenericVirtualRegister(Ty: Type);
461 MIRBuilder.getMRI()->setRegClass(Reg: ResultRegister, RC: GR->getRegClass(SpvType: ResultType));
462 GR->assignSPIRVTypeToVReg(Type: BoolType, VReg: ResultRegister, MF: MIRBuilder.getMF());
463 return std::make_tuple(args&: ResultRegister, args&: BoolType);
464}
465
466/// Helper function for building either a vector or scalar select instruction
467/// depending on the expected \p ResultType.
468static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
469 Register ReturnRegister, Register SourceRegister,
470 const SPIRVType *ReturnType,
471 SPIRVGlobalRegistry *GR) {
472 Register TrueConst, FalseConst;
473
474 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
475 unsigned Bits = GR->getScalarOrVectorBitWidth(Type: ReturnType);
476 uint64_t AllOnes = APInt::getAllOnes(numBits: Bits).getZExtValue();
477 TrueConst =
478 GR->getOrCreateConsIntVector(Val: AllOnes, MIRBuilder, SpvType: ReturnType, EmitIR: true);
479 FalseConst = GR->getOrCreateConsIntVector(Val: 0, MIRBuilder, SpvType: ReturnType, EmitIR: true);
480 } else {
481 TrueConst = GR->buildConstantInt(Val: 1, MIRBuilder, SpvType: ReturnType, EmitIR: true);
482 FalseConst = GR->buildConstantInt(Val: 0, MIRBuilder, SpvType: ReturnType, EmitIR: true);
483 }
484
485 return MIRBuilder.buildSelect(Res: ReturnRegister, Tst: SourceRegister, Op0: TrueConst,
486 Op1: FalseConst);
487}
488
489/// Helper function for building a load instruction loading into the
490/// \p DestinationReg.
491static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister,
492 MachineIRBuilder &MIRBuilder,
493 SPIRVGlobalRegistry *GR, LLT LowLevelType,
494 Register DestinationReg = Register(0)) {
495 if (!DestinationReg.isValid())
496 DestinationReg = createVirtualRegister(SpvType: BaseType, GR, MIRBuilder);
497 // TODO: consider using correct address space and alignment (p0 is canonical
498 // type for selection though).
499 MachinePointerInfo PtrInfo = MachinePointerInfo();
500 MIRBuilder.buildLoad(Res: DestinationReg, Addr: PtrRegister, PtrInfo, Alignment: Align());
501 return DestinationReg;
502}
503
504/// Helper function for building a load instruction for loading a builtin global
505/// variable of \p BuiltinValue value.
506static Register buildBuiltinVariableLoad(
507 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
508 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
509 Register Reg = Register(0), bool isConst = true,
510 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
511 SPIRV::LinkageType::Import}) {
512 Register NewRegister =
513 MIRBuilder.getMRI()->createVirtualRegister(RegClass: &SPIRV::pIDRegClass);
514 MIRBuilder.getMRI()->setType(
515 VReg: NewRegister,
516 Ty: LLT::pointer(AddressSpace: storageClassToAddressSpace(SC: SPIRV::StorageClass::Function),
517 SizeInBits: GR->getPointerSize()));
518 SPIRVType *PtrType = GR->getOrCreateSPIRVPointerType(
519 BaseType: VariableType, MIRBuilder, SC: SPIRV::StorageClass::Input);
520 GR->assignSPIRVTypeToVReg(Type: PtrType, VReg: NewRegister, MF: MIRBuilder.getMF());
521
522 // Set up the global OpVariable with the necessary builtin decorations.
523 Register Variable = GR->buildGlobalVariable(
524 Reg: NewRegister, BaseType: PtrType, Name: getLinkStringForBuiltIn(BuiltInValue: BuiltinValue), GV: nullptr,
525 Storage: SPIRV::StorageClass::Input, Init: nullptr, /* isConst= */ IsConst: isConst, LinkageType: LinkageTy,
526 MIRBuilder, IsInstSelector: false);
527
528 // Load the value from the global variable.
529 Register LoadedRegister =
530 buildLoadInst(BaseType: VariableType, PtrRegister: Variable, MIRBuilder, GR, LowLevelType: LLType, DestinationReg: Reg);
531 MIRBuilder.getMRI()->setType(VReg: LoadedRegister, Ty: LLType);
532 return LoadedRegister;
533}
534
535/// Helper external function for assigning SPIRVType to a register, ensuring the
536/// register class and type are set in MRI. Defined in SPIRVPreLegalizer.cpp.
537extern void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy,
538 SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
539 MachineRegisterInfo &MRI);
540
541// TODO: Move to TableGen.
542static SPIRV::MemorySemantics::MemorySemantics
543getSPIRVMemSemantics(std::memory_order MemOrder) {
544 switch (MemOrder) {
545 case std::memory_order_relaxed:
546 return SPIRV::MemorySemantics::None;
547 case std::memory_order_acquire:
548 return SPIRV::MemorySemantics::Acquire;
549 case std::memory_order_release:
550 return SPIRV::MemorySemantics::Release;
551 case std::memory_order_acq_rel:
552 return SPIRV::MemorySemantics::AcquireRelease;
553 case std::memory_order_seq_cst:
554 return SPIRV::MemorySemantics::SequentiallyConsistent;
555 default:
556 report_fatal_error(reason: "Unknown CL memory scope");
557 }
558}
559
560static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
561 switch (ClScope) {
562 case SPIRV::CLMemoryScope::memory_scope_work_item:
563 return SPIRV::Scope::Invocation;
564 case SPIRV::CLMemoryScope::memory_scope_work_group:
565 return SPIRV::Scope::Workgroup;
566 case SPIRV::CLMemoryScope::memory_scope_device:
567 return SPIRV::Scope::Device;
568 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
569 return SPIRV::Scope::CrossDevice;
570 case SPIRV::CLMemoryScope::memory_scope_sub_group:
571 return SPIRV::Scope::Subgroup;
572 }
573 report_fatal_error(reason: "Unknown CL memory scope");
574}
575
576static Register buildConstantIntReg32(uint64_t Val,
577 MachineIRBuilder &MIRBuilder,
578 SPIRVGlobalRegistry *GR) {
579 return GR->buildConstantInt(
580 Val, MIRBuilder, SpvType: GR->getOrCreateSPIRVIntegerType(BitWidth: 32, MIRBuilder), EmitIR: true);
581}
582
583static Register buildScopeReg(Register CLScopeRegister,
584 SPIRV::Scope::Scope Scope,
585 MachineIRBuilder &MIRBuilder,
586 SPIRVGlobalRegistry *GR,
587 MachineRegisterInfo *MRI) {
588 if (CLScopeRegister.isValid()) {
589 auto CLScope =
590 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ConstReg: CLScopeRegister, MRI));
591 Scope = getSPIRVScope(ClScope: CLScope);
592
593 if (CLScope == static_cast<unsigned>(Scope)) {
594 MRI->setRegClass(Reg: CLScopeRegister, RC: &SPIRV::iIDRegClass);
595 return CLScopeRegister;
596 }
597 }
598 return buildConstantIntReg32(Val: Scope, MIRBuilder, GR);
599}
600
601static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI,
602 SPIRVGlobalRegistry *GR) {
603 if (MRI->getRegClassOrNull(Reg))
604 return;
605 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(VReg: Reg);
606 MRI->setRegClass(Reg,
607 RC: SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
608}
609
610static Register buildMemSemanticsReg(Register SemanticsRegister,
611 Register PtrRegister, unsigned &Semantics,
612 MachineIRBuilder &MIRBuilder,
613 SPIRVGlobalRegistry *GR) {
614 if (SemanticsRegister.isValid()) {
615 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
616 std::memory_order Order =
617 static_cast<std::memory_order>(getIConstVal(ConstReg: SemanticsRegister, MRI));
618 Semantics =
619 getSPIRVMemSemantics(MemOrder: Order) |
620 getMemSemanticsForStorageClass(SC: GR->getPointerStorageClass(VReg: PtrRegister));
621 if (static_cast<unsigned>(Order) == Semantics) {
622 MRI->setRegClass(Reg: SemanticsRegister, RC: &SPIRV::iIDRegClass);
623 return SemanticsRegister;
624 }
625 }
626 return buildConstantIntReg32(Val: Semantics, MIRBuilder, GR);
627}
628
629static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
630 const SPIRV::IncomingCall *Call,
631 Register TypeReg,
632 ArrayRef<uint32_t> ImmArgs = {}) {
633 auto MIB = MIRBuilder.buildInstr(Opcode);
634 if (TypeReg.isValid())
635 MIB.addDef(RegNo: Call->ReturnRegister).addUse(RegNo: TypeReg);
636 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
637 for (unsigned i = 0; i < Sz; ++i)
638 MIB.addUse(RegNo: Call->Arguments[i]);
639 for (uint32_t ImmArg : ImmArgs)
640 MIB.addImm(Val: ImmArg);
641 return true;
642}
643
644/// Helper function for translating atomic init to OpStore.
645static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
646 MachineIRBuilder &MIRBuilder) {
647 if (Call->isSpirvOp())
648 return buildOpFromWrapper(MIRBuilder, Opcode: SPIRV::OpStore, Call, TypeReg: Register(0));
649
650 assert(Call->Arguments.size() == 2 &&
651 "Need 2 arguments for atomic init translation");
652 MIRBuilder.buildInstr(Opcode: SPIRV::OpStore)
653 .addUse(RegNo: Call->Arguments[0])
654 .addUse(RegNo: Call->Arguments[1]);
655 return true;
656}
657
658/// Helper function for building an atomic load instruction.
659static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
660 MachineIRBuilder &MIRBuilder,
661 SPIRVGlobalRegistry *GR) {
662 Register TypeReg = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
663 if (Call->isSpirvOp())
664 return buildOpFromWrapper(MIRBuilder, Opcode: SPIRV::OpAtomicLoad, Call, TypeReg);
665
666 Register PtrRegister = Call->Arguments[0];
667 // TODO: if true insert call to __translate_ocl_memory_sccope before
668 // OpAtomicLoad and the function implementation. We can use Translator's
669 // output for transcoding/atomic_explicit_arguments.cl as an example.
670 Register ScopeRegister =
671 Call->Arguments.size() > 1
672 ? Call->Arguments[1]
673 : buildConstantIntReg32(Val: SPIRV::Scope::Device, MIRBuilder, GR);
674 Register MemSemanticsReg;
675 if (Call->Arguments.size() > 2) {
676 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
677 MemSemanticsReg = Call->Arguments[2];
678 } else {
679 int Semantics =
680 SPIRV::MemorySemantics::SequentiallyConsistent |
681 getMemSemanticsForStorageClass(SC: GR->getPointerStorageClass(VReg: PtrRegister));
682 MemSemanticsReg = buildConstantIntReg32(Val: Semantics, MIRBuilder, GR);
683 }
684
685 MIRBuilder.buildInstr(Opcode: SPIRV::OpAtomicLoad)
686 .addDef(RegNo: Call->ReturnRegister)
687 .addUse(RegNo: TypeReg)
688 .addUse(RegNo: PtrRegister)
689 .addUse(RegNo: ScopeRegister)
690 .addUse(RegNo: MemSemanticsReg);
691 return true;
692}
693
694/// Helper function for building an atomic store instruction.
695static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
696 MachineIRBuilder &MIRBuilder,
697 SPIRVGlobalRegistry *GR) {
698 if (Call->isSpirvOp())
699 return buildOpFromWrapper(MIRBuilder, Opcode: SPIRV::OpAtomicStore, Call,
700 TypeReg: Register(0));
701
702 Register ScopeRegister =
703 buildConstantIntReg32(Val: SPIRV::Scope::Device, MIRBuilder, GR);
704 Register PtrRegister = Call->Arguments[0];
705 int Semantics =
706 SPIRV::MemorySemantics::SequentiallyConsistent |
707 getMemSemanticsForStorageClass(SC: GR->getPointerStorageClass(VReg: PtrRegister));
708 Register MemSemanticsReg = buildConstantIntReg32(Val: Semantics, MIRBuilder, GR);
709 MIRBuilder.buildInstr(Opcode: SPIRV::OpAtomicStore)
710 .addUse(RegNo: PtrRegister)
711 .addUse(RegNo: ScopeRegister)
712 .addUse(RegNo: MemSemanticsReg)
713 .addUse(RegNo: Call->Arguments[1]);
714 return true;
715}
716
717/// Helper function for building an atomic compare-exchange instruction.
718static bool buildAtomicCompareExchangeInst(
719 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
720 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
721 if (Call->isSpirvOp())
722 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
723 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
724
725 bool IsCmpxchg = Call->Builtin->Name.contains(Other: "cmpxchg");
726 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
727
728 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
729 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
730 Register Desired = Call->Arguments[2]; // Value (C Desired).
731 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(VReg: Desired);
732 LLT DesiredLLT = MRI->getType(Reg: Desired);
733
734 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
735 SPIRV::OpTypePointer);
736 unsigned ExpectedType = GR->getSPIRVTypeForVReg(VReg: ExpectedArg)->getOpcode();
737 (void)ExpectedType;
738 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
739 : ExpectedType == SPIRV::OpTypePointer);
740 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
741
742 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(VReg: ObjectPtr);
743 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
744 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
745 SpvObjectPtrTy->getOperand(i: 1).getImm());
746 auto MemSemStorage = getMemSemanticsForStorageClass(SC: StorageClass);
747
748 Register MemSemEqualReg;
749 Register MemSemUnequalReg;
750 uint64_t MemSemEqual =
751 IsCmpxchg
752 ? SPIRV::MemorySemantics::None
753 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
754 uint64_t MemSemUnequal =
755 IsCmpxchg
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
758 if (Call->Arguments.size() >= 4) {
759 assert(Call->Arguments.size() >= 5 &&
760 "Need 5+ args for explicit atomic cmpxchg");
761 auto MemOrdEq =
762 static_cast<std::memory_order>(getIConstVal(ConstReg: Call->Arguments[3], MRI));
763 auto MemOrdNeq =
764 static_cast<std::memory_order>(getIConstVal(ConstReg: Call->Arguments[4], MRI));
765 MemSemEqual = getSPIRVMemSemantics(MemOrder: MemOrdEq) | MemSemStorage;
766 MemSemUnequal = getSPIRVMemSemantics(MemOrder: MemOrdNeq) | MemSemStorage;
767 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
768 MemSemEqualReg = Call->Arguments[3];
769 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
770 MemSemUnequalReg = Call->Arguments[4];
771 }
772 if (!MemSemEqualReg.isValid())
773 MemSemEqualReg = buildConstantIntReg32(Val: MemSemEqual, MIRBuilder, GR);
774 if (!MemSemUnequalReg.isValid())
775 MemSemUnequalReg = buildConstantIntReg32(Val: MemSemUnequal, MIRBuilder, GR);
776
777 Register ScopeReg;
778 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
779 if (Call->Arguments.size() >= 6) {
780 assert(Call->Arguments.size() == 6 &&
781 "Extra args for explicit atomic cmpxchg");
782 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
783 getIConstVal(ConstReg: Call->Arguments[5], MRI));
784 Scope = getSPIRVScope(ClScope);
785 if (ClScope == static_cast<unsigned>(Scope))
786 ScopeReg = Call->Arguments[5];
787 }
788 if (!ScopeReg.isValid())
789 ScopeReg = buildConstantIntReg32(Val: Scope, MIRBuilder, GR);
790
791 Register Expected = IsCmpxchg
792 ? ExpectedArg
793 : buildLoadInst(BaseType: SpvDesiredTy, PtrRegister: ExpectedArg, MIRBuilder,
794 GR, LowLevelType: LLT::scalar(SizeInBits: 64));
795 MRI->setType(VReg: Expected, Ty: DesiredLLT);
796 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(Ty: DesiredLLT)
797 : Call->ReturnRegister;
798 if (!MRI->getRegClassOrNull(Reg: Tmp))
799 MRI->setRegClass(Reg: Tmp, RC: GR->getRegClass(SpvType: SpvDesiredTy));
800 GR->assignSPIRVTypeToVReg(Type: SpvDesiredTy, VReg: Tmp, MF: MIRBuilder.getMF());
801
802 MIRBuilder.buildInstr(Opcode)
803 .addDef(RegNo: Tmp)
804 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: SpvDesiredTy))
805 .addUse(RegNo: ObjectPtr)
806 .addUse(RegNo: ScopeReg)
807 .addUse(RegNo: MemSemEqualReg)
808 .addUse(RegNo: MemSemUnequalReg)
809 .addUse(RegNo: Desired)
810 .addUse(RegNo: Expected);
811 if (!IsCmpxchg) {
812 MIRBuilder.buildInstr(Opcode: SPIRV::OpStore).addUse(RegNo: ExpectedArg).addUse(RegNo: Tmp);
813 MIRBuilder.buildICmp(Pred: CmpInst::ICMP_EQ, Res: Call->ReturnRegister, Op0: Tmp, Op1: Expected);
814 }
815 return true;
816}
817
818/// Helper function for building atomic instructions.
819static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
820 MachineIRBuilder &MIRBuilder,
821 SPIRVGlobalRegistry *GR) {
822 if (Call->isSpirvOp())
823 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
824 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
825
826 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
827 Register ScopeRegister =
828 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
829
830 assert(Call->Arguments.size() <= 4 &&
831 "Too many args for explicit atomic RMW");
832 ScopeRegister = buildScopeReg(CLScopeRegister: ScopeRegister, Scope: SPIRV::Scope::Workgroup,
833 MIRBuilder, GR, MRI);
834
835 Register PtrRegister = Call->Arguments[0];
836 unsigned Semantics = SPIRV::MemorySemantics::None;
837 Register MemSemanticsReg =
838 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
839 MemSemanticsReg = buildMemSemanticsReg(SemanticsRegister: MemSemanticsReg, PtrRegister,
840 Semantics, MIRBuilder, GR);
841 Register ValueReg = Call->Arguments[1];
842 Register ValueTypeReg = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
843 // support cl_ext_float_atomics
844 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
845 if (Opcode == SPIRV::OpAtomicIAdd) {
846 Opcode = SPIRV::OpAtomicFAddEXT;
847 } else if (Opcode == SPIRV::OpAtomicISub) {
848 // Translate OpAtomicISub applied to a floating type argument to
849 // OpAtomicFAddEXT with the negative value operand
850 Opcode = SPIRV::OpAtomicFAddEXT;
851 Register NegValueReg =
852 MRI->createGenericVirtualRegister(Ty: MRI->getType(Reg: ValueReg));
853 MRI->setRegClass(Reg: NegValueReg, RC: GR->getRegClass(SpvType: Call->ReturnType));
854 GR->assignSPIRVTypeToVReg(Type: Call->ReturnType, VReg: NegValueReg,
855 MF: MIRBuilder.getMF());
856 MIRBuilder.buildInstr(Opcode: TargetOpcode::G_FNEG)
857 .addDef(RegNo: NegValueReg)
858 .addUse(RegNo: ValueReg);
859 updateRegType(Reg: NegValueReg, Ty: nullptr, SpirvTy: Call->ReturnType, GR, MIB&: MIRBuilder,
860 MRI&: MIRBuilder.getMF().getRegInfo());
861 ValueReg = NegValueReg;
862 }
863 }
864 MIRBuilder.buildInstr(Opcode)
865 .addDef(RegNo: Call->ReturnRegister)
866 .addUse(RegNo: ValueTypeReg)
867 .addUse(RegNo: PtrRegister)
868 .addUse(RegNo: ScopeRegister)
869 .addUse(RegNo: MemSemanticsReg)
870 .addUse(RegNo: ValueReg);
871 return true;
872}
873
874/// Helper function for building an atomic floating-type instruction.
875static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
876 unsigned Opcode,
877 MachineIRBuilder &MIRBuilder,
878 SPIRVGlobalRegistry *GR) {
879 assert(Call->Arguments.size() == 4 &&
880 "Wrong number of atomic floating-type builtin");
881 Register PtrReg = Call->Arguments[0];
882 Register ScopeReg = Call->Arguments[1];
883 Register MemSemanticsReg = Call->Arguments[2];
884 Register ValueReg = Call->Arguments[3];
885 MIRBuilder.buildInstr(Opcode)
886 .addDef(RegNo: Call->ReturnRegister)
887 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
888 .addUse(RegNo: PtrReg)
889 .addUse(RegNo: ScopeReg)
890 .addUse(RegNo: MemSemanticsReg)
891 .addUse(RegNo: ValueReg);
892 return true;
893}
894
895/// Helper function for building atomic flag instructions (e.g.
896/// OpAtomicFlagTestAndSet).
897static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
898 unsigned Opcode, MachineIRBuilder &MIRBuilder,
899 SPIRVGlobalRegistry *GR) {
900 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
901 Register TypeReg = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
902 if (Call->isSpirvOp())
903 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
904 TypeReg: IsSet ? TypeReg : Register(0));
905
906 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
907 Register PtrRegister = Call->Arguments[0];
908 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
909 Register MemSemanticsReg =
910 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
911 MemSemanticsReg = buildMemSemanticsReg(SemanticsRegister: MemSemanticsReg, PtrRegister,
912 Semantics, MIRBuilder, GR);
913
914 assert((Opcode != SPIRV::OpAtomicFlagClear ||
915 (Semantics != SPIRV::MemorySemantics::Acquire &&
916 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
917 "Invalid memory order argument!");
918
919 Register ScopeRegister =
920 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
921 ScopeRegister =
922 buildScopeReg(CLScopeRegister: ScopeRegister, Scope: SPIRV::Scope::Device, MIRBuilder, GR, MRI);
923
924 auto MIB = MIRBuilder.buildInstr(Opcode);
925 if (IsSet)
926 MIB.addDef(RegNo: Call->ReturnRegister).addUse(RegNo: TypeReg);
927
928 MIB.addUse(RegNo: PtrRegister).addUse(RegNo: ScopeRegister).addUse(RegNo: MemSemanticsReg);
929 return true;
930}
931
932/// Helper function for building barriers, i.e., memory/control ordering
933/// operations.
934static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
935 MachineIRBuilder &MIRBuilder,
936 SPIRVGlobalRegistry *GR) {
937 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
938 const auto *ST =
939 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
940 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
941 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
942 !ST->canUseExtension(E: SPIRV::Extension::SPV_INTEL_split_barrier)) {
943 std::string DiagMsg = std::string(Builtin->Name) +
944 ": the builtin requires the following SPIR-V "
945 "extension: SPV_INTEL_split_barrier";
946 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
947 }
948
949 if (Call->isSpirvOp())
950 return buildOpFromWrapper(MIRBuilder, Opcode, Call, TypeReg: Register(0));
951
952 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
953 unsigned MemFlags = getIConstVal(ConstReg: Call->Arguments[0], MRI);
954 unsigned MemSemantics = SPIRV::MemorySemantics::None;
955
956 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
957 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
958
959 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
960 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
961
962 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
963 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
964
965 if (Opcode == SPIRV::OpMemoryBarrier)
966 MemSemantics = getSPIRVMemSemantics(MemOrder: static_cast<std::memory_order>(
967 getIConstVal(ConstReg: Call->Arguments[1], MRI))) |
968 MemSemantics;
969 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
970 MemSemantics |= SPIRV::MemorySemantics::Release;
971 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
972 MemSemantics |= SPIRV::MemorySemantics::Acquire;
973 else
974 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
975
976 Register MemSemanticsReg =
977 MemFlags == MemSemantics
978 ? Call->Arguments[0]
979 : buildConstantIntReg32(Val: MemSemantics, MIRBuilder, GR);
980 Register ScopeReg;
981 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
982 SPIRV::Scope::Scope MemScope = Scope;
983 if (Call->Arguments.size() >= 2) {
984 assert(
985 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
986 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
987 "Extra args for explicitly scoped barrier");
988 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
989 : Call->Arguments[1];
990 SPIRV::CLMemoryScope CLScope =
991 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ConstReg: ScopeArg, MRI));
992 MemScope = getSPIRVScope(ClScope: CLScope);
993 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
994 (Opcode == SPIRV::OpMemoryBarrier))
995 Scope = MemScope;
996 if (CLScope == static_cast<unsigned>(Scope))
997 ScopeReg = Call->Arguments[1];
998 }
999
1000 if (!ScopeReg.isValid())
1001 ScopeReg = buildConstantIntReg32(Val: Scope, MIRBuilder, GR);
1002
1003 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(RegNo: ScopeReg);
1004 if (Opcode != SPIRV::OpMemoryBarrier)
1005 MIB.addUse(RegNo: buildConstantIntReg32(Val: MemScope, MIRBuilder, GR));
1006 MIB.addUse(RegNo: MemSemanticsReg);
1007 return true;
1008}
1009
1010/// Helper function for building extended bit operations.
1011static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call,
1012 unsigned Opcode,
1013 MachineIRBuilder &MIRBuilder,
1014 SPIRVGlobalRegistry *GR) {
1015 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1016 const auto *ST =
1017 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1018 if ((Opcode == SPIRV::OpBitFieldInsert ||
1019 Opcode == SPIRV::OpBitFieldSExtract ||
1020 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1021 !ST->canUseExtension(E: SPIRV::Extension::SPV_KHR_bit_instructions)) {
1022 std::string DiagMsg = std::string(Builtin->Name) +
1023 ": the builtin requires the following SPIR-V "
1024 "extension: SPV_KHR_bit_instructions";
1025 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
1026 }
1027
1028 // Generate SPIRV instruction accordingly.
1029 if (Call->isSpirvOp())
1030 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1031 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1032
1033 auto MIB = MIRBuilder.buildInstr(Opcode)
1034 .addDef(RegNo: Call->ReturnRegister)
1035 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1036 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1037 MIB.addUse(RegNo: Call->Arguments[i]);
1038
1039 return true;
1040}
1041
1042/// Helper function for building Intel's bindless image instructions.
1043static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call,
1044 unsigned Opcode,
1045 MachineIRBuilder &MIRBuilder,
1046 SPIRVGlobalRegistry *GR) {
1047 // Generate SPIRV instruction accordingly.
1048 if (Call->isSpirvOp())
1049 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1050 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1051
1052 MIRBuilder.buildInstr(Opcode)
1053 .addDef(RegNo: Call->ReturnRegister)
1054 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
1055 .addUse(RegNo: Call->Arguments[0]);
1056
1057 return true;
1058}
1059
1060/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1061static bool buildTernaryBitwiseFunctionINTELInst(
1062 const SPIRV::IncomingCall *Call, unsigned Opcode,
1063 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1064 // Generate SPIRV instruction accordingly.
1065 if (Call->isSpirvOp())
1066 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1067 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1068
1069 auto MIB = MIRBuilder.buildInstr(Opcode)
1070 .addDef(RegNo: Call->ReturnRegister)
1071 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1072 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1073 MIB.addUse(RegNo: Call->Arguments[i]);
1074
1075 return true;
1076}
1077
1078static bool buildImageChannelDataTypeInst(const SPIRV::IncomingCall *Call,
1079 unsigned Opcode,
1080 MachineIRBuilder &MIRBuilder,
1081 SPIRVGlobalRegistry *GR) {
1082 if (Call->isSpirvOp())
1083 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1084 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1085
1086 auto MIB = MIRBuilder.buildInstr(Opcode)
1087 .addDef(RegNo: Call->ReturnRegister)
1088 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1089 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1090 MIB.addUse(RegNo: Call->Arguments[i]);
1091
1092 return true;
1093}
1094
1095/// Helper function for building Intel's 2d block io instructions.
1096static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call,
1097 unsigned Opcode,
1098 MachineIRBuilder &MIRBuilder,
1099 SPIRVGlobalRegistry *GR) {
1100 // Generate SPIRV instruction accordingly.
1101 if (Call->isSpirvOp())
1102 return buildOpFromWrapper(MIRBuilder, Opcode, Call, TypeReg: Register(0));
1103
1104 auto MIB = MIRBuilder.buildInstr(Opcode)
1105 .addDef(RegNo: Call->ReturnRegister)
1106 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1107 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1108 MIB.addUse(RegNo: Call->Arguments[i]);
1109
1110 return true;
1111}
1112
1113static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1114 unsigned Scope, MachineIRBuilder &MIRBuilder,
1115 SPIRVGlobalRegistry *GR) {
1116 switch (Opcode) {
1117 case SPIRV::OpCommitReadPipe:
1118 case SPIRV::OpCommitWritePipe:
1119 return buildOpFromWrapper(MIRBuilder, Opcode, Call, TypeReg: Register(0));
1120 case SPIRV::OpGroupCommitReadPipe:
1121 case SPIRV::OpGroupCommitWritePipe:
1122 case SPIRV::OpGroupReserveReadPipePackets:
1123 case SPIRV::OpGroupReserveWritePipePackets: {
1124 Register ScopeConstReg =
1125 MIRBuilder.buildConstant(Res: LLT::scalar(SizeInBits: 32), Val: Scope).getReg(Idx: 0);
1126 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1127 MRI->setRegClass(Reg: ScopeConstReg, RC: &SPIRV::iIDRegClass);
1128 MachineInstrBuilder MIB;
1129 MIB = MIRBuilder.buildInstr(Opcode);
1130 // Add Return register and type.
1131 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1132 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1133 MIB.addDef(RegNo: Call->ReturnRegister)
1134 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1135
1136 MIB.addUse(RegNo: ScopeConstReg);
1137 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1138 MIB.addUse(RegNo: Call->Arguments[i]);
1139
1140 return true;
1141 }
1142 default:
1143 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1144 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1145 }
1146}
1147
1148static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1149 switch (dim) {
1150 case SPIRV::Dim::DIM_1D:
1151 case SPIRV::Dim::DIM_Buffer:
1152 return 1;
1153 case SPIRV::Dim::DIM_2D:
1154 case SPIRV::Dim::DIM_Cube:
1155 case SPIRV::Dim::DIM_Rect:
1156 return 2;
1157 case SPIRV::Dim::DIM_3D:
1158 return 3;
1159 default:
1160 report_fatal_error(reason: "Cannot get num components for given Dim");
1161 }
1162}
1163
1164/// Helper function for obtaining the number of size components.
1165static unsigned getNumSizeComponents(SPIRVType *imgType) {
1166 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1167 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(i: 2).getImm());
1168 unsigned numComps = getNumComponentsForDim(dim);
1169 bool arrayed = imgType->getOperand(i: 4).getImm() == 1;
1170 return arrayed ? numComps + 1 : numComps;
1171}
1172
1173static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber) {
1174 switch (BuiltinNumber) {
1175 case SPIRV::OpenCLExtInst::s_min:
1176 case SPIRV::OpenCLExtInst::u_min:
1177 case SPIRV::OpenCLExtInst::s_max:
1178 case SPIRV::OpenCLExtInst::u_max:
1179 case SPIRV::OpenCLExtInst::fmax:
1180 case SPIRV::OpenCLExtInst::fmin:
1181 case SPIRV::OpenCLExtInst::fmax_common:
1182 case SPIRV::OpenCLExtInst::fmin_common:
1183 case SPIRV::OpenCLExtInst::s_clamp:
1184 case SPIRV::OpenCLExtInst::fclamp:
1185 case SPIRV::OpenCLExtInst::u_clamp:
1186 case SPIRV::OpenCLExtInst::mix:
1187 case SPIRV::OpenCLExtInst::step:
1188 case SPIRV::OpenCLExtInst::smoothstep:
1189 return true;
1190 default:
1191 break;
1192 }
1193 return false;
1194}
1195
1196//===----------------------------------------------------------------------===//
1197// Implementation functions for each builtin group
1198//===----------------------------------------------------------------------===//
1199
1200static SmallVector<Register>
1201getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber,
1202 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1203
1204 Register ReturnTypeId = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
1205 unsigned ResultElementCount =
1206 GR->getScalarOrVectorComponentCount(VReg: ReturnTypeId);
1207 bool MayNeedPromotionToVec =
1208 builtinMayNeedPromotionToVec(BuiltinNumber) && ResultElementCount > 1;
1209
1210 if (!MayNeedPromotionToVec)
1211 return {Call->Arguments.begin(), Call->Arguments.end()};
1212
1213 SmallVector<Register> Arguments;
1214 for (Register Argument : Call->Arguments) {
1215 Register VecArg = Argument;
1216 SPIRVType *ArgumentType = GR->getSPIRVTypeForVReg(VReg: Argument);
1217 if (ArgumentType != Call->ReturnType) {
1218 VecArg = createVirtualRegister(SpvType: Call->ReturnType, GR, MIRBuilder);
1219 auto VecSplat = MIRBuilder.buildInstr(Opcode: SPIRV::OpCompositeConstruct)
1220 .addDef(RegNo: VecArg)
1221 .addUse(RegNo: ReturnTypeId);
1222 for (unsigned I = 0; I != ResultElementCount; ++I)
1223 VecSplat.addUse(RegNo: Argument);
1224 }
1225 Arguments.push_back(Elt: VecArg);
1226 }
1227 return Arguments;
1228}
1229
1230static bool generateExtInst(const SPIRV::IncomingCall *Call,
1231 MachineIRBuilder &MIRBuilder,
1232 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1233 // Lookup the extended instruction number in the TableGen records.
1234 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1235 uint32_t Number =
1236 SPIRV::lookupExtendedBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Number;
1237 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1238 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1239 // later the NoNans and NoInfs flags.
1240 uint32_t OrigNumber = Number;
1241 const SPIRVSubtarget &ST =
1242 cast<SPIRVSubtarget>(Val: MIRBuilder.getMF().getSubtarget());
1243 if (ST.canUseExtension(E: SPIRV::Extension::SPV_KHR_float_controls2) &&
1244 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1245 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1246 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1247 ? SPIRV::OpenCLExtInst::fmin
1248 : SPIRV::OpenCLExtInst::fmax;
1249 }
1250
1251 Register ReturnTypeId = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
1252 SmallVector<Register> Arguments =
1253 getBuiltinCallArguments(Call, BuiltinNumber: Number, MIRBuilder, GR);
1254
1255 MachineInstrBuilder MIB;
1256 if (ST.canUseExtension(E: SPIRV::Extension::SPV_KHR_fma) &&
1257 Number == SPIRV::OpenCLExtInst::fma) {
1258 // Use the SPIR-V fma instruction instead of the OpenCL extended
1259 // instruction if the extension is available.
1260 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpFmaKHR)
1261 .addDef(RegNo: Call->ReturnRegister)
1262 .addUse(RegNo: ReturnTypeId);
1263 } else {
1264 // Build extended instruction.
1265 MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpExtInst)
1266 .addDef(RegNo: Call->ReturnRegister)
1267 .addUse(RegNo: ReturnTypeId)
1268 .addImm(Val: static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1269 .addImm(Val: Number);
1270 }
1271
1272 for (Register Argument : Arguments)
1273 MIB.addUse(RegNo: Argument);
1274
1275 MIB.getInstr()->copyIRFlags(I: CB);
1276 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1277 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1278 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1279 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1280 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1281 }
1282 return true;
1283}
1284
1285static bool generateRelationalInst(const SPIRV::IncomingCall *Call,
1286 MachineIRBuilder &MIRBuilder,
1287 SPIRVGlobalRegistry *GR) {
1288 // Lookup the instruction opcode in the TableGen records.
1289 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1290 unsigned Opcode =
1291 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
1292
1293 Register CompareRegister;
1294 SPIRVType *RelationType;
1295 std::tie(args&: CompareRegister, args&: RelationType) =
1296 buildBoolRegister(MIRBuilder, ResultType: Call->ReturnType, GR);
1297
1298 // Build relational instruction.
1299 auto MIB = MIRBuilder.buildInstr(Opcode)
1300 .addDef(RegNo: CompareRegister)
1301 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: RelationType));
1302
1303 for (auto Argument : Call->Arguments)
1304 MIB.addUse(RegNo: Argument);
1305
1306 // Build select instruction.
1307 return buildSelectInst(MIRBuilder, ReturnRegister: Call->ReturnRegister, SourceRegister: CompareRegister,
1308 ReturnType: Call->ReturnType, GR);
1309}
1310
1311static bool generateGroupInst(const SPIRV::IncomingCall *Call,
1312 MachineIRBuilder &MIRBuilder,
1313 SPIRVGlobalRegistry *GR) {
1314 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1315 const SPIRV::GroupBuiltin *GroupBuiltin =
1316 SPIRV::lookupGroupBuiltin(Name: Builtin->Name);
1317
1318 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1319 if (Call->isSpirvOp()) {
1320 if (GroupBuiltin->NoGroupOperation) {
1321 SmallVector<uint32_t, 1> ImmArgs;
1322 if (GroupBuiltin->Opcode ==
1323 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1324 Call->Arguments.size() > 4)
1325 ImmArgs.push_back(Elt: getConstFromIntrinsic(Reg: Call->Arguments[4], MRI));
1326 return buildOpFromWrapper(MIRBuilder, Opcode: GroupBuiltin->Opcode, Call,
1327 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType), ImmArgs);
1328 }
1329
1330 // Group Operation is a literal
1331 Register GroupOpReg = Call->Arguments[1];
1332 const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg&: GroupOpReg, MRI);
1333 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1334 report_fatal_error(
1335 reason: "Group Operation parameter must be an integer constant");
1336 uint64_t GrpOp = MI->getOperand(i: 1).getCImm()->getValue().getZExtValue();
1337 Register ScopeReg = Call->Arguments[0];
1338 auto MIB = MIRBuilder.buildInstr(Opcode: GroupBuiltin->Opcode)
1339 .addDef(RegNo: Call->ReturnRegister)
1340 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
1341 .addUse(RegNo: ScopeReg)
1342 .addImm(Val: GrpOp);
1343 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1344 MIB.addUse(RegNo: Call->Arguments[i]);
1345 return true;
1346 }
1347
1348 Register Arg0;
1349 if (GroupBuiltin->HasBoolArg) {
1350 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, EmitIR: true);
1351 Register BoolReg = Call->Arguments[0];
1352 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(VReg: BoolReg);
1353 if (!BoolRegType)
1354 report_fatal_error(reason: "Can't find a register's type definition");
1355 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(ConstReg&: BoolReg, MRI);
1356 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1357 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1358 Arg0 = GR->buildConstantInt(Val: getIConstVal(ConstReg: BoolReg, MRI), MIRBuilder,
1359 SpvType: BoolType, EmitIR: true);
1360 } else {
1361 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1362 Arg0 = MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 1));
1363 MRI->setRegClass(Reg: Arg0, RC: &SPIRV::iIDRegClass);
1364 GR->assignSPIRVTypeToVReg(Type: BoolType, VReg: Arg0, MF: MIRBuilder.getMF());
1365 MIRBuilder.buildICmp(
1366 Pred: CmpInst::ICMP_NE, Res: Arg0, Op0: BoolReg,
1367 Op1: GR->buildConstantInt(Val: 0, MIRBuilder, SpvType: BoolRegType, EmitIR: true));
1368 updateRegType(Reg: Arg0, Ty: nullptr, SpirvTy: BoolType, GR, MIB&: MIRBuilder,
1369 MRI&: MIRBuilder.getMF().getRegInfo());
1370 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1371 report_fatal_error(reason: "Expect a boolean argument");
1372 }
1373 // if BoolReg is a boolean register, we don't need to do anything
1374 }
1375 }
1376
1377 Register GroupResultRegister = Call->ReturnRegister;
1378 SPIRVType *GroupResultType = Call->ReturnType;
1379
1380 // TODO: maybe we need to check whether the result type is already boolean
1381 // and in this case do not insert select instruction.
1382 const bool HasBoolReturnTy =
1383 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1384 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1385 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1386
1387 if (HasBoolReturnTy)
1388 std::tie(args&: GroupResultRegister, args&: GroupResultType) =
1389 buildBoolRegister(MIRBuilder, ResultType: Call->ReturnType, GR);
1390
1391 auto Scope = Builtin->Name.starts_with(Prefix: "sub_group") ? SPIRV::Scope::Subgroup
1392 : SPIRV::Scope::Workgroup;
1393 Register ScopeRegister = buildConstantIntReg32(Val: Scope, MIRBuilder, GR);
1394
1395 Register VecReg;
1396 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1397 Call->Arguments.size() > 2) {
1398 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1399 // scalar, a vector with 2 components, or a vector with 3 components.",
1400 // meaning that we must create a vector from the function arguments if
1401 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1402 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1403 Register ElemReg = Call->Arguments[1];
1404 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(VReg: ElemReg);
1405 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1406 report_fatal_error(reason: "Expect an integer <LocalId> argument");
1407 unsigned VecLen = Call->Arguments.size() - 1;
1408 VecReg = MRI->createGenericVirtualRegister(
1409 Ty: LLT::fixed_vector(NumElements: VecLen, ScalarTy: MRI->getType(Reg: ElemReg)));
1410 MRI->setRegClass(Reg: VecReg, RC: &SPIRV::vIDRegClass);
1411 SPIRVType *VecType =
1412 GR->getOrCreateSPIRVVectorType(BaseType: ElemType, NumElements: VecLen, MIRBuilder, EmitIR: true);
1413 GR->assignSPIRVTypeToVReg(Type: VecType, VReg: VecReg, MF: MIRBuilder.getMF());
1414 auto MIB =
1415 MIRBuilder.buildInstr(Opcode: TargetOpcode::G_BUILD_VECTOR).addDef(RegNo: VecReg);
1416 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1417 MIB.addUse(RegNo: Call->Arguments[i]);
1418 setRegClassIfNull(Reg: Call->Arguments[i], MRI, GR);
1419 }
1420 updateRegType(Reg: VecReg, Ty: nullptr, SpirvTy: VecType, GR, MIB&: MIRBuilder,
1421 MRI&: MIRBuilder.getMF().getRegInfo());
1422 }
1423
1424 // Build work/sub group instruction.
1425 auto MIB = MIRBuilder.buildInstr(Opcode: GroupBuiltin->Opcode)
1426 .addDef(RegNo: GroupResultRegister)
1427 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: GroupResultType))
1428 .addUse(RegNo: ScopeRegister);
1429
1430 if (!GroupBuiltin->NoGroupOperation)
1431 MIB.addImm(Val: GroupBuiltin->GroupOperation);
1432 if (Call->Arguments.size() > 0) {
1433 MIB.addUse(RegNo: Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1434 setRegClassIfNull(Reg: Call->Arguments[0], MRI, GR);
1435 if (VecReg.isValid())
1436 MIB.addUse(RegNo: VecReg);
1437 else
1438 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1439 MIB.addUse(RegNo: Call->Arguments[i]);
1440 }
1441
1442 // Build select instruction.
1443 if (HasBoolReturnTy)
1444 buildSelectInst(MIRBuilder, ReturnRegister: Call->ReturnRegister, SourceRegister: GroupResultRegister,
1445 ReturnType: Call->ReturnType, GR);
1446 return true;
1447}
1448
1449static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call,
1450 MachineIRBuilder &MIRBuilder,
1451 SPIRVGlobalRegistry *GR) {
1452 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1453 MachineFunction &MF = MIRBuilder.getMF();
1454 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1455 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1456 SPIRV::lookupIntelSubgroupsBuiltin(Name: Builtin->Name);
1457
1458 if (IntelSubgroups->IsMedia &&
1459 !ST->canUseExtension(E: SPIRV::Extension::SPV_INTEL_media_block_io)) {
1460 std::string DiagMsg = std::string(Builtin->Name) +
1461 ": the builtin requires the following SPIR-V "
1462 "extension: SPV_INTEL_media_block_io";
1463 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
1464 } else if (!IntelSubgroups->IsMedia &&
1465 !ST->canUseExtension(E: SPIRV::Extension::SPV_INTEL_subgroups)) {
1466 std::string DiagMsg = std::string(Builtin->Name) +
1467 ": the builtin requires the following SPIR-V "
1468 "extension: SPV_INTEL_subgroups";
1469 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
1470 }
1471
1472 uint32_t OpCode = IntelSubgroups->Opcode;
1473 if (Call->isSpirvOp()) {
1474 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1475 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1476 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1477 return buildOpFromWrapper(MIRBuilder, Opcode: OpCode, Call,
1478 TypeReg: IsSet ? GR->getSPIRVTypeID(SpirvType: Call->ReturnType)
1479 : Register(0));
1480 }
1481
1482 if (IntelSubgroups->IsBlock) {
1483 // Minimal number or arguments set in TableGen records is 1
1484 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(VReg: Call->Arguments[0])) {
1485 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1486 // TODO: add required validation from the specification:
1487 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1488 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1489 // dimensions require a capability."
1490 switch (OpCode) {
1491 case SPIRV::OpSubgroupBlockReadINTEL:
1492 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1493 break;
1494 case SPIRV::OpSubgroupBlockWriteINTEL:
1495 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1496 break;
1497 }
1498 }
1499 }
1500 }
1501
1502 // TODO: opaque pointers types should be eventually resolved in such a way
1503 // that validation of block read is enabled with respect to the following
1504 // specification requirement:
1505 // "'Result Type' may be a scalar or vector type, and its component type must
1506 // be equal to the type pointed to by 'Ptr'."
1507 // For example, function parameter type should not be default i8 pointer, but
1508 // depend on the result type of the instruction where it is used as a pointer
1509 // argument of OpSubgroupBlockReadINTEL
1510
1511 // Build Intel subgroups instruction
1512 MachineInstrBuilder MIB =
1513 IntelSubgroups->IsWrite
1514 ? MIRBuilder.buildInstr(Opcode: OpCode)
1515 : MIRBuilder.buildInstr(Opcode: OpCode)
1516 .addDef(RegNo: Call->ReturnRegister)
1517 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1518 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1519 MIB.addUse(RegNo: Call->Arguments[i]);
1520 return true;
1521}
1522
1523static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
1524 MachineIRBuilder &MIRBuilder,
1525 SPIRVGlobalRegistry *GR) {
1526 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1527 MachineFunction &MF = MIRBuilder.getMF();
1528 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1529 if (!ST->canUseExtension(
1530 E: SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1531 std::string DiagMsg = std::string(Builtin->Name) +
1532 ": the builtin requires the following SPIR-V "
1533 "extension: SPV_KHR_uniform_group_instructions";
1534 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
1535 }
1536 const SPIRV::GroupUniformBuiltin *GroupUniform =
1537 SPIRV::lookupGroupUniformBuiltin(Name: Builtin->Name);
1538 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1539
1540 Register GroupResultReg = Call->ReturnRegister;
1541 Register ScopeReg = Call->Arguments[0];
1542 Register ValueReg = Call->Arguments[2];
1543
1544 // Group Operation
1545 Register ConstGroupOpReg = Call->Arguments[1];
1546 const MachineInstr *Const = getDefInstrMaybeConstant(ConstReg&: ConstGroupOpReg, MRI);
1547 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1548 report_fatal_error(
1549 reason: "expect a constant group operation for a uniform group instruction",
1550 gen_crash_diag: false);
1551 const MachineOperand &ConstOperand = Const->getOperand(i: 1);
1552 if (!ConstOperand.isCImm())
1553 report_fatal_error(reason: "uniform group instructions: group operation must be an "
1554 "integer constant",
1555 gen_crash_diag: false);
1556
1557 auto MIB = MIRBuilder.buildInstr(Opcode: GroupUniform->Opcode)
1558 .addDef(RegNo: GroupResultReg)
1559 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
1560 .addUse(RegNo: ScopeReg);
1561 addNumImm(Imm: ConstOperand.getCImm()->getValue(), MIB);
1562 MIB.addUse(RegNo: ValueReg);
1563
1564 return true;
1565}
1566
1567static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
1568 MachineIRBuilder &MIRBuilder,
1569 SPIRVGlobalRegistry *GR) {
1570 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1571 MachineFunction &MF = MIRBuilder.getMF();
1572 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1573 if (!ST->canUseExtension(E: SPIRV::Extension::SPV_KHR_shader_clock)) {
1574 std::string DiagMsg = std::string(Builtin->Name) +
1575 ": the builtin requires the following SPIR-V "
1576 "extension: SPV_KHR_shader_clock";
1577 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
1578 }
1579
1580 Register ResultReg = Call->ReturnRegister;
1581
1582 if (Builtin->Name == "__spirv_ReadClockKHR") {
1583 MIRBuilder.buildInstr(Opcode: SPIRV::OpReadClockKHR)
1584 .addDef(RegNo: ResultReg)
1585 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
1586 .addUse(RegNo: Call->Arguments[0]);
1587 } else {
1588 // Deduce the `Scope` operand from the builtin function name.
1589 SPIRV::Scope::Scope ScopeArg =
1590 StringSwitch<SPIRV::Scope::Scope>(Builtin->Name)
1591 .EndsWith(S: "device", Value: SPIRV::Scope::Scope::Device)
1592 .EndsWith(S: "work_group", Value: SPIRV::Scope::Scope::Workgroup)
1593 .EndsWith(S: "sub_group", Value: SPIRV::Scope::Scope::Subgroup);
1594 Register ScopeReg = buildConstantIntReg32(Val: ScopeArg, MIRBuilder, GR);
1595
1596 MIRBuilder.buildInstr(Opcode: SPIRV::OpReadClockKHR)
1597 .addDef(RegNo: ResultReg)
1598 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
1599 .addUse(RegNo: ScopeReg);
1600 }
1601
1602 return true;
1603}
1604
1605// These queries ask for a single size_t result for a given dimension index,
1606// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1607// corresponding to these values are all vec3 types, so we need to extract the
1608// correct index or return DefaultValue (0 or 1 depending on the query). We also
1609// handle extending or truncating in case size_t does not match the expected
1610// result type's bitwidth.
1611//
1612// For a constant index >= 3 we generate:
1613// %res = OpConstant %SizeT DefaultValue
1614//
1615// For other indices we generate:
1616// %g = OpVariable %ptr_V3_SizeT Input
1617// OpDecorate %g BuiltIn XXX
1618// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1619// OpDecorate %g Constant
1620// %loadedVec = OpLoad %V3_SizeT %g
1621//
1622// Then, if the index is constant < 3, we generate:
1623// %res = OpCompositeExtract %SizeT %loadedVec idx
1624// If the index is dynamic, we generate:
1625// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1626// %cmp = OpULessThan %bool %idx %const_3
1627// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1628//
1629// If the bitwidth of %res does not match the expected return type, we add an
1630// extend or truncate.
1631static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call,
1632 MachineIRBuilder &MIRBuilder,
1633 SPIRVGlobalRegistry *GR,
1634 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1635 uint64_t DefaultValue) {
1636 Register IndexRegister = Call->Arguments[0];
1637 const unsigned ResultWidth = Call->ReturnType->getOperand(i: 1).getImm();
1638 const unsigned PointerSize = GR->getPointerSize();
1639 const SPIRVType *PointerSizeType =
1640 GR->getOrCreateSPIRVIntegerType(BitWidth: PointerSize, MIRBuilder);
1641 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1642 auto IndexInstruction = getDefInstrMaybeConstant(ConstReg&: IndexRegister, MRI);
1643
1644 // Set up the final register to do truncation or extension on at the end.
1645 Register ToTruncate = Call->ReturnRegister;
1646
1647 // If the index is constant, we can statically determine if it is in range.
1648 bool IsConstantIndex =
1649 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1650
1651 // If it's out of range (max dimension is 3), we can just return the constant
1652 // default value (0 or 1 depending on which query function).
1653 if (IsConstantIndex && getIConstVal(ConstReg: IndexRegister, MRI) >= 3) {
1654 Register DefaultReg = Call->ReturnRegister;
1655 if (PointerSize != ResultWidth) {
1656 DefaultReg = MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: PointerSize));
1657 MRI->setRegClass(Reg: DefaultReg, RC: &SPIRV::iIDRegClass);
1658 GR->assignSPIRVTypeToVReg(Type: PointerSizeType, VReg: DefaultReg,
1659 MF: MIRBuilder.getMF());
1660 ToTruncate = DefaultReg;
1661 }
1662 auto NewRegister =
1663 GR->buildConstantInt(Val: DefaultValue, MIRBuilder, SpvType: PointerSizeType, EmitIR: true);
1664 MIRBuilder.buildCopy(Res: DefaultReg, Op: NewRegister);
1665 } else { // If it could be in range, we need to load from the given builtin.
1666 auto Vec3Ty =
1667 GR->getOrCreateSPIRVVectorType(BaseType: PointerSizeType, NumElements: 3, MIRBuilder, EmitIR: true);
1668 Register LoadedVector =
1669 buildBuiltinVariableLoad(MIRBuilder, VariableType: Vec3Ty, GR, BuiltinValue,
1670 LLType: LLT::fixed_vector(NumElements: 3, ScalarSizeInBits: PointerSize));
1671 // Set up the vreg to extract the result to (possibly a new temporary one).
1672 Register Extracted = Call->ReturnRegister;
1673 if (!IsConstantIndex || PointerSize != ResultWidth) {
1674 Extracted = MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: PointerSize));
1675 MRI->setRegClass(Reg: Extracted, RC: &SPIRV::iIDRegClass);
1676 GR->assignSPIRVTypeToVReg(Type: PointerSizeType, VReg: Extracted, MF: MIRBuilder.getMF());
1677 }
1678 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1679 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1680 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1681 ID: Intrinsic::spv_extractelt, Res: ArrayRef<Register>{Extracted}, HasSideEffects: true, isConvergent: false);
1682 ExtractInst.addUse(RegNo: LoadedVector).addUse(RegNo: IndexRegister);
1683
1684 // If the index is dynamic, need check if it's < 3, and then use a select.
1685 if (!IsConstantIndex) {
1686 updateRegType(Reg: Extracted, Ty: nullptr, SpirvTy: PointerSizeType, GR, MIB&: MIRBuilder, MRI&: *MRI);
1687
1688 auto IndexType = GR->getSPIRVTypeForVReg(VReg: IndexRegister);
1689 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, EmitIR: true);
1690
1691 Register CompareRegister =
1692 MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 1));
1693 MRI->setRegClass(Reg: CompareRegister, RC: &SPIRV::iIDRegClass);
1694 GR->assignSPIRVTypeToVReg(Type: BoolType, VReg: CompareRegister, MF: MIRBuilder.getMF());
1695
1696 // Use G_ICMP to check if idxVReg < 3.
1697 MIRBuilder.buildICmp(
1698 Pred: CmpInst::ICMP_ULT, Res: CompareRegister, Op0: IndexRegister,
1699 Op1: GR->buildConstantInt(Val: 3, MIRBuilder, SpvType: IndexType, EmitIR: true));
1700
1701 // Get constant for the default value (0 or 1 depending on which
1702 // function).
1703 Register DefaultRegister =
1704 GR->buildConstantInt(Val: DefaultValue, MIRBuilder, SpvType: PointerSizeType, EmitIR: true);
1705
1706 // Get a register for the selection result (possibly a new temporary one).
1707 Register SelectionResult = Call->ReturnRegister;
1708 if (PointerSize != ResultWidth) {
1709 SelectionResult =
1710 MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: PointerSize));
1711 MRI->setRegClass(Reg: SelectionResult, RC: &SPIRV::iIDRegClass);
1712 GR->assignSPIRVTypeToVReg(Type: PointerSizeType, VReg: SelectionResult,
1713 MF: MIRBuilder.getMF());
1714 }
1715 // Create the final G_SELECT to return the extracted value or the default.
1716 MIRBuilder.buildSelect(Res: SelectionResult, Tst: CompareRegister, Op0: Extracted,
1717 Op1: DefaultRegister);
1718 ToTruncate = SelectionResult;
1719 } else {
1720 ToTruncate = Extracted;
1721 }
1722 }
1723 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1724 if (PointerSize != ResultWidth)
1725 MIRBuilder.buildZExtOrTrunc(Res: Call->ReturnRegister, Op: ToTruncate);
1726 return true;
1727}
1728
1729static bool generateBuiltinVar(const SPIRV::IncomingCall *Call,
1730 MachineIRBuilder &MIRBuilder,
1731 SPIRVGlobalRegistry *GR) {
1732 // Lookup the builtin variable record.
1733 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1734 SPIRV::BuiltIn::BuiltIn Value =
1735 SPIRV::lookupGetBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Value;
1736
1737 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1738 return genWorkgroupQuery(Call, MIRBuilder, GR, BuiltinValue: Value, DefaultValue: 0);
1739
1740 // Build a load instruction for the builtin variable.
1741 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Type: Call->ReturnType);
1742 LLT LLType;
1743 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1744 LLType =
1745 LLT::fixed_vector(NumElements: Call->ReturnType->getOperand(i: 2).getImm(), ScalarSizeInBits: BitWidth);
1746 else
1747 LLType = LLT::scalar(SizeInBits: BitWidth);
1748
1749 return buildBuiltinVariableLoad(MIRBuilder, VariableType: Call->ReturnType, GR, BuiltinValue: Value,
1750 LLType, Reg: Call->ReturnRegister);
1751}
1752
1753static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
1754 MachineIRBuilder &MIRBuilder,
1755 SPIRVGlobalRegistry *GR) {
1756 // Lookup the instruction opcode in the TableGen records.
1757 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1758 unsigned Opcode =
1759 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
1760
1761 switch (Opcode) {
1762 case SPIRV::OpStore:
1763 return buildAtomicInitInst(Call, MIRBuilder);
1764 case SPIRV::OpAtomicLoad:
1765 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1766 case SPIRV::OpAtomicStore:
1767 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1768 case SPIRV::OpAtomicCompareExchange:
1769 case SPIRV::OpAtomicCompareExchangeWeak:
1770 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1771 GR);
1772 case SPIRV::OpAtomicIAdd:
1773 case SPIRV::OpAtomicISub:
1774 case SPIRV::OpAtomicOr:
1775 case SPIRV::OpAtomicXor:
1776 case SPIRV::OpAtomicAnd:
1777 case SPIRV::OpAtomicExchange:
1778 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1779 case SPIRV::OpMemoryBarrier:
1780 return buildBarrierInst(Call, Opcode: SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1781 case SPIRV::OpAtomicFlagTestAndSet:
1782 case SPIRV::OpAtomicFlagClear:
1783 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1784 default:
1785 if (Call->isSpirvOp())
1786 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1787 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1788 return false;
1789 }
1790}
1791
1792static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call,
1793 MachineIRBuilder &MIRBuilder,
1794 SPIRVGlobalRegistry *GR) {
1795 // Lookup the instruction opcode in the TableGen records.
1796 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1797 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Name: Builtin->Name)->Opcode;
1798
1799 switch (Opcode) {
1800 case SPIRV::OpAtomicFAddEXT:
1801 case SPIRV::OpAtomicFMinEXT:
1802 case SPIRV::OpAtomicFMaxEXT:
1803 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1804 default:
1805 return false;
1806 }
1807}
1808
1809static bool generateBarrierInst(const SPIRV::IncomingCall *Call,
1810 MachineIRBuilder &MIRBuilder,
1811 SPIRVGlobalRegistry *GR) {
1812 // Lookup the instruction opcode in the TableGen records.
1813 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1814 unsigned Opcode =
1815 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
1816
1817 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1818}
1819
1820static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call,
1821 MachineIRBuilder &MIRBuilder,
1822 SPIRVGlobalRegistry *GR) {
1823 // Lookup the instruction opcode in the TableGen records.
1824 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1825 unsigned Opcode =
1826 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
1827
1828 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1829 SPIRV::StorageClass::StorageClass ResSC =
1830 GR->getPointerStorageClass(VReg: Call->ReturnRegister);
1831 if (!isGenericCastablePtr(SC: ResSC))
1832 return false;
1833
1834 MIRBuilder.buildInstr(Opcode)
1835 .addDef(RegNo: Call->ReturnRegister)
1836 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
1837 .addUse(RegNo: Call->Arguments[0])
1838 .addImm(Val: ResSC);
1839 } else {
1840 MIRBuilder.buildInstr(Opcode: TargetOpcode::G_ADDRSPACE_CAST)
1841 .addDef(RegNo: Call->ReturnRegister)
1842 .addUse(RegNo: Call->Arguments[0]);
1843 }
1844 return true;
1845}
1846
1847static bool generateDotOrFMulInst(const StringRef DemangledCall,
1848 const SPIRV::IncomingCall *Call,
1849 MachineIRBuilder &MIRBuilder,
1850 SPIRVGlobalRegistry *GR) {
1851 if (Call->isSpirvOp())
1852 return buildOpFromWrapper(MIRBuilder, Opcode: SPIRV::OpDot, Call,
1853 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1854
1855 bool IsVec = GR->getSPIRVTypeForVReg(VReg: Call->Arguments[0])->getOpcode() ==
1856 SPIRV::OpTypeVector;
1857 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1858 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1859 bool IsSwapReq = false;
1860
1861 const auto *ST =
1862 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1863 if (GR->isScalarOrVectorOfType(VReg: Call->ReturnRegister, TypeOpcode: SPIRV::OpTypeInt) &&
1864 (ST->canUseExtension(E: SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1865 ST->isAtLeastSPIRVVer(VerToCompareTo: VersionTuple(1, 6)))) {
1866 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1867 const SPIRV::IntegerDotProductBuiltin *IntDot =
1868 SPIRV::lookupIntegerDotProductBuiltin(Name: Builtin->Name);
1869 if (IntDot) {
1870 OC = IntDot->Opcode;
1871 IsSwapReq = IntDot->IsSwapReq;
1872 } else if (IsVec) {
1873 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1874 // integers.
1875 LLVMContext &Ctx = MIRBuilder.getContext();
1876 SmallVector<StringRef, 10> TypeStrs;
1877 SPIRV::parseBuiltinTypeStr(BuiltinArgsTypeStrs&: TypeStrs, DemangledCall, Ctx);
1878 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1879 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1880
1881 if (Call->BuiltinName == "dot") {
1882 if (IsFirstSigned && IsSecondSigned)
1883 OC = SPIRV::OpSDot;
1884 else if (!IsFirstSigned && !IsSecondSigned)
1885 OC = SPIRV::OpUDot;
1886 else {
1887 OC = SPIRV::OpSUDot;
1888 if (!IsFirstSigned)
1889 IsSwapReq = true;
1890 }
1891 } else if (Call->BuiltinName == "dot_acc_sat") {
1892 if (IsFirstSigned && IsSecondSigned)
1893 OC = SPIRV::OpSDotAccSat;
1894 else if (!IsFirstSigned && !IsSecondSigned)
1895 OC = SPIRV::OpUDotAccSat;
1896 else {
1897 OC = SPIRV::OpSUDotAccSat;
1898 if (!IsFirstSigned)
1899 IsSwapReq = true;
1900 }
1901 }
1902 }
1903 }
1904
1905 MachineInstrBuilder MIB = MIRBuilder.buildInstr(Opcode: OC)
1906 .addDef(RegNo: Call->ReturnRegister)
1907 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
1908
1909 if (IsSwapReq) {
1910 MIB.addUse(RegNo: Call->Arguments[1]);
1911 MIB.addUse(RegNo: Call->Arguments[0]);
1912 // needed for dot_acc_sat* builtins
1913 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1914 MIB.addUse(RegNo: Call->Arguments[i]);
1915 } else {
1916 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1917 MIB.addUse(RegNo: Call->Arguments[i]);
1918 }
1919
1920 // Add Packed Vector Format for Integer dot product builtins if arguments are
1921 // scalar
1922 if (!IsVec && OC != SPIRV::OpFMulS)
1923 MIB.addImm(Val: SPIRV::PackedVectorFormat4x8Bit);
1924
1925 return true;
1926}
1927
1928static bool generateWaveInst(const SPIRV::IncomingCall *Call,
1929 MachineIRBuilder &MIRBuilder,
1930 SPIRVGlobalRegistry *GR) {
1931 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1932 SPIRV::BuiltIn::BuiltIn Value =
1933 SPIRV::lookupGetBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Value;
1934
1935 // For now, we only support a single Wave intrinsic with a single return type.
1936 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1937 LLT LLType = LLT::scalar(SizeInBits: GR->getScalarOrVectorBitWidth(Type: Call->ReturnType));
1938
1939 return buildBuiltinVariableLoad(
1940 MIRBuilder, VariableType: Call->ReturnType, GR, BuiltinValue: Value, LLType, Reg: Call->ReturnRegister,
1941 /* isConst= */ false, /* LinkageType= */ LinkageTy: std::nullopt);
1942}
1943
1944// We expect a builtin
1945// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1946// where %result is a pointer to where the result of the builtin execution
1947// is to be stored, and generate the following instructions:
1948// Res = Opcode RetType Operand1 Operand1
1949// OpStore RetVariable Res
1950static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call,
1951 MachineIRBuilder &MIRBuilder,
1952 SPIRVGlobalRegistry *GR) {
1953 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1954 unsigned Opcode =
1955 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
1956
1957 Register SRetReg = Call->Arguments[0];
1958 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(VReg: SRetReg);
1959 SPIRVType *RetType = GR->getPointeeType(PtrType: PtrRetType);
1960 if (!RetType)
1961 report_fatal_error(reason: "The first parameter must be a pointer");
1962 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1963 report_fatal_error(reason: "Expected struct type result for the arithmetic with "
1964 "overflow builtins");
1965
1966 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(VReg: Call->Arguments[1]);
1967 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(VReg: Call->Arguments[2]);
1968 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1969 report_fatal_error(reason: "Operands must have the same type");
1970 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1971 switch (Opcode) {
1972 case SPIRV::OpIAddCarryS:
1973 Opcode = SPIRV::OpIAddCarryV;
1974 break;
1975 case SPIRV::OpISubBorrowS:
1976 Opcode = SPIRV::OpISubBorrowV;
1977 break;
1978 }
1979
1980 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1981 Register ResReg = MRI->createVirtualRegister(RegClass: &SPIRV::iIDRegClass);
1982 if (const TargetRegisterClass *DstRC =
1983 MRI->getRegClassOrNull(Reg: Call->Arguments[1])) {
1984 MRI->setRegClass(Reg: ResReg, RC: DstRC);
1985 MRI->setType(VReg: ResReg, Ty: MRI->getType(Reg: Call->Arguments[1]));
1986 } else {
1987 MRI->setType(VReg: ResReg, Ty: LLT::scalar(SizeInBits: 64));
1988 }
1989 GR->assignSPIRVTypeToVReg(Type: RetType, VReg: ResReg, MF: MIRBuilder.getMF());
1990 MIRBuilder.buildInstr(Opcode)
1991 .addDef(RegNo: ResReg)
1992 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: RetType))
1993 .addUse(RegNo: Call->Arguments[1])
1994 .addUse(RegNo: Call->Arguments[2]);
1995 MIRBuilder.buildInstr(Opcode: SPIRV::OpStore).addUse(RegNo: SRetReg).addUse(RegNo: ResReg);
1996 return true;
1997}
1998
1999static bool generateGetQueryInst(const SPIRV::IncomingCall *Call,
2000 MachineIRBuilder &MIRBuilder,
2001 SPIRVGlobalRegistry *GR) {
2002 // Lookup the builtin record.
2003 SPIRV::BuiltIn::BuiltIn Value =
2004 SPIRV::lookupGetBuiltin(Name: Call->Builtin->Name, Set: Call->Builtin->Set)->Value;
2005 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
2006 Value == SPIRV::BuiltIn::NumWorkgroups ||
2007 Value == SPIRV::BuiltIn::WorkgroupSize ||
2008 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
2009 return genWorkgroupQuery(Call, MIRBuilder, GR, BuiltinValue: Value, DefaultValue: IsDefaultOne ? 1 : 0);
2010}
2011
2012static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call,
2013 MachineIRBuilder &MIRBuilder,
2014 SPIRVGlobalRegistry *GR) {
2015 // Lookup the image size query component number in the TableGen records.
2016 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2017 uint32_t Component =
2018 SPIRV::lookupImageQueryBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Component;
2019 // Query result may either be a vector or a scalar. If return type is not a
2020 // vector, expect only a single size component. Otherwise get the number of
2021 // expected components.
2022 unsigned NumExpectedRetComponents =
2023 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
2024 ? Call->ReturnType->getOperand(i: 2).getImm()
2025 : 1;
2026 // Get the actual number of query result/size components.
2027 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(VReg: Call->Arguments[0]);
2028 unsigned NumActualRetComponents = getNumSizeComponents(imgType: ImgType);
2029 Register QueryResult = Call->ReturnRegister;
2030 SPIRVType *QueryResultType = Call->ReturnType;
2031 if (NumExpectedRetComponents != NumActualRetComponents) {
2032 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2033 ? Call->ReturnType->getOperand(i: 1).getImm()
2034 : 32;
2035 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
2036 Ty: LLT::fixed_vector(NumElements: NumActualRetComponents, ScalarSizeInBits: Bitwidth));
2037 MIRBuilder.getMRI()->setRegClass(Reg: QueryResult, RC: &SPIRV::vIDRegClass);
2038 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(BitWidth: Bitwidth, MIRBuilder);
2039 QueryResultType = GR->getOrCreateSPIRVVectorType(
2040 BaseType: IntTy, NumElements: NumActualRetComponents, MIRBuilder, EmitIR: true);
2041 GR->assignSPIRVTypeToVReg(Type: QueryResultType, VReg: QueryResult, MF: MIRBuilder.getMF());
2042 }
2043 bool IsDimBuf = ImgType->getOperand(i: 2).getImm() == SPIRV::Dim::DIM_Buffer;
2044 unsigned Opcode =
2045 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2046 auto MIB = MIRBuilder.buildInstr(Opcode)
2047 .addDef(RegNo: QueryResult)
2048 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: QueryResultType))
2049 .addUse(RegNo: Call->Arguments[0]);
2050 if (!IsDimBuf)
2051 MIB.addUse(RegNo: buildConstantIntReg32(Val: 0, MIRBuilder, GR)); // Lod id.
2052 if (NumExpectedRetComponents == NumActualRetComponents)
2053 return true;
2054 if (NumExpectedRetComponents == 1) {
2055 // Only 1 component is expected, build OpCompositeExtract instruction.
2056 unsigned ExtractedComposite =
2057 Component == 3 ? NumActualRetComponents - 1 : Component;
2058 assert(ExtractedComposite < NumActualRetComponents &&
2059 "Invalid composite index!");
2060 Register TypeReg = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
2061 SPIRVType *NewType = nullptr;
2062 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
2063 Register NewTypeReg = QueryResultType->getOperand(i: 1).getReg();
2064 if (TypeReg != NewTypeReg &&
2065 (NewType = GR->getSPIRVTypeForVReg(VReg: NewTypeReg)) != nullptr)
2066 TypeReg = NewTypeReg;
2067 }
2068 MIRBuilder.buildInstr(Opcode: SPIRV::OpCompositeExtract)
2069 .addDef(RegNo: Call->ReturnRegister)
2070 .addUse(RegNo: TypeReg)
2071 .addUse(RegNo: QueryResult)
2072 .addImm(Val: ExtractedComposite);
2073 if (NewType != nullptr)
2074 updateRegType(Reg: Call->ReturnRegister, Ty: nullptr, SpirvTy: NewType, GR, MIB&: MIRBuilder,
2075 MRI&: MIRBuilder.getMF().getRegInfo());
2076 } else {
2077 // More than 1 component is expected, fill a new vector.
2078 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpVectorShuffle)
2079 .addDef(RegNo: Call->ReturnRegister)
2080 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2081 .addUse(RegNo: QueryResult)
2082 .addUse(RegNo: QueryResult);
2083 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2084 MIB.addImm(Val: i < NumActualRetComponents ? i : 0xffffffff);
2085 }
2086 return true;
2087}
2088
2089static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call,
2090 MachineIRBuilder &MIRBuilder,
2091 SPIRVGlobalRegistry *GR) {
2092 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2093 "Image samples query result must be of int type!");
2094
2095 // Lookup the instruction opcode in the TableGen records.
2096 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2097 unsigned Opcode =
2098 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2099
2100 Register Image = Call->Arguments[0];
2101 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2102 GR->getSPIRVTypeForVReg(VReg: Image)->getOperand(i: 2).getImm());
2103 (void)ImageDimensionality;
2104
2105 switch (Opcode) {
2106 case SPIRV::OpImageQuerySamples:
2107 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2108 "Image must be of 2D dimensionality");
2109 break;
2110 case SPIRV::OpImageQueryLevels:
2111 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2112 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2113 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2114 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2115 "Image must be of 1D/2D/3D/Cube dimensionality");
2116 break;
2117 }
2118
2119 MIRBuilder.buildInstr(Opcode)
2120 .addDef(RegNo: Call->ReturnRegister)
2121 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2122 .addUse(RegNo: Image);
2123 return true;
2124}
2125
2126// TODO: Move to TableGen.
2127static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2128getSamplerAddressingModeFromBitmask(unsigned Bitmask) {
2129 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2130 case SPIRV::CLK_ADDRESS_CLAMP:
2131 return SPIRV::SamplerAddressingMode::Clamp;
2132 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2133 return SPIRV::SamplerAddressingMode::ClampToEdge;
2134 case SPIRV::CLK_ADDRESS_REPEAT:
2135 return SPIRV::SamplerAddressingMode::Repeat;
2136 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2137 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2138 case SPIRV::CLK_ADDRESS_NONE:
2139 return SPIRV::SamplerAddressingMode::None;
2140 default:
2141 report_fatal_error(reason: "Unknown CL address mode");
2142 }
2143}
2144
2145static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2146 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2147}
2148
2149static SPIRV::SamplerFilterMode::SamplerFilterMode
2150getSamplerFilterModeFromBitmask(unsigned Bitmask) {
2151 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2152 return SPIRV::SamplerFilterMode::Linear;
2153 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2154 return SPIRV::SamplerFilterMode::Nearest;
2155 return SPIRV::SamplerFilterMode::Nearest;
2156}
2157
2158static bool generateReadImageInst(const StringRef DemangledCall,
2159 const SPIRV::IncomingCall *Call,
2160 MachineIRBuilder &MIRBuilder,
2161 SPIRVGlobalRegistry *GR) {
2162 if (Call->isSpirvOp())
2163 return buildOpFromWrapper(MIRBuilder, Opcode: SPIRV::OpImageRead, Call,
2164 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
2165 Register Image = Call->Arguments[0];
2166 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2167 bool HasOclSampler = DemangledCall.contains_insensitive(Other: "ocl_sampler");
2168 bool HasMsaa = DemangledCall.contains_insensitive(Other: "msaa");
2169 if (HasOclSampler) {
2170 Register Sampler = Call->Arguments[1];
2171
2172 if (!GR->isScalarOfType(VReg: Sampler, TypeOpcode: SPIRV::OpTypeSampler) &&
2173 getDefInstrMaybeConstant(ConstReg&: Sampler, MRI)->getOperand(i: 1).isCImm()) {
2174 uint64_t SamplerMask = getIConstVal(ConstReg: Sampler, MRI);
2175 Sampler = GR->buildConstantSampler(
2176 Res: Register(), AddrMode: getSamplerAddressingModeFromBitmask(Bitmask: SamplerMask),
2177 Param: getSamplerParamFromBitmask(Bitmask: SamplerMask),
2178 FilerMode: getSamplerFilterModeFromBitmask(Bitmask: SamplerMask), MIRBuilder);
2179 }
2180 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(VReg: Image);
2181 SPIRVType *SampledImageType =
2182 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2183 Register SampledImage = MRI->createVirtualRegister(RegClass: &SPIRV::iIDRegClass);
2184
2185 MIRBuilder.buildInstr(Opcode: SPIRV::OpSampledImage)
2186 .addDef(RegNo: SampledImage)
2187 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: SampledImageType))
2188 .addUse(RegNo: Image)
2189 .addUse(RegNo: Sampler);
2190
2191 Register Lod = GR->buildConstantFP(Val: APFloat::getZero(Sem: APFloat::IEEEsingle()),
2192 MIRBuilder);
2193
2194 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2195 SPIRVType *TempType =
2196 GR->getOrCreateSPIRVVectorType(BaseType: Call->ReturnType, NumElements: 4, MIRBuilder, EmitIR: true);
2197 Register TempRegister =
2198 MRI->createGenericVirtualRegister(Ty: GR->getRegType(SpvType: TempType));
2199 MRI->setRegClass(Reg: TempRegister, RC: GR->getRegClass(SpvType: TempType));
2200 GR->assignSPIRVTypeToVReg(Type: TempType, VReg: TempRegister, MF: MIRBuilder.getMF());
2201 MIRBuilder.buildInstr(Opcode: SPIRV::OpImageSampleExplicitLod)
2202 .addDef(RegNo: TempRegister)
2203 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: TempType))
2204 .addUse(RegNo: SampledImage)
2205 .addUse(RegNo: Call->Arguments[2]) // Coordinate.
2206 .addImm(Val: SPIRV::ImageOperand::Lod)
2207 .addUse(RegNo: Lod);
2208 MIRBuilder.buildInstr(Opcode: SPIRV::OpCompositeExtract)
2209 .addDef(RegNo: Call->ReturnRegister)
2210 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2211 .addUse(RegNo: TempRegister)
2212 .addImm(Val: 0);
2213 } else {
2214 MIRBuilder.buildInstr(Opcode: SPIRV::OpImageSampleExplicitLod)
2215 .addDef(RegNo: Call->ReturnRegister)
2216 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2217 .addUse(RegNo: SampledImage)
2218 .addUse(RegNo: Call->Arguments[2]) // Coordinate.
2219 .addImm(Val: SPIRV::ImageOperand::Lod)
2220 .addUse(RegNo: Lod);
2221 }
2222 } else if (HasMsaa) {
2223 MIRBuilder.buildInstr(Opcode: SPIRV::OpImageRead)
2224 .addDef(RegNo: Call->ReturnRegister)
2225 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2226 .addUse(RegNo: Image)
2227 .addUse(RegNo: Call->Arguments[1]) // Coordinate.
2228 .addImm(Val: SPIRV::ImageOperand::Sample)
2229 .addUse(RegNo: Call->Arguments[2]);
2230 } else {
2231 MIRBuilder.buildInstr(Opcode: SPIRV::OpImageRead)
2232 .addDef(RegNo: Call->ReturnRegister)
2233 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2234 .addUse(RegNo: Image)
2235 .addUse(RegNo: Call->Arguments[1]); // Coordinate.
2236 }
2237 return true;
2238}
2239
2240static bool generateWriteImageInst(const SPIRV::IncomingCall *Call,
2241 MachineIRBuilder &MIRBuilder,
2242 SPIRVGlobalRegistry *GR) {
2243 if (Call->isSpirvOp())
2244 return buildOpFromWrapper(MIRBuilder, Opcode: SPIRV::OpImageWrite, Call,
2245 TypeReg: Register(0));
2246 MIRBuilder.buildInstr(Opcode: SPIRV::OpImageWrite)
2247 .addUse(RegNo: Call->Arguments[0]) // Image.
2248 .addUse(RegNo: Call->Arguments[1]) // Coordinate.
2249 .addUse(RegNo: Call->Arguments[2]); // Texel.
2250 return true;
2251}
2252
2253static bool generateSampleImageInst(const StringRef DemangledCall,
2254 const SPIRV::IncomingCall *Call,
2255 MachineIRBuilder &MIRBuilder,
2256 SPIRVGlobalRegistry *GR) {
2257 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2258 if (Call->Builtin->Name.contains_insensitive(
2259 Other: "__translate_sampler_initializer")) {
2260 // Build sampler literal.
2261 uint64_t Bitmask = getIConstVal(ConstReg: Call->Arguments[0], MRI);
2262 Register Sampler = GR->buildConstantSampler(
2263 Res: Call->ReturnRegister, AddrMode: getSamplerAddressingModeFromBitmask(Bitmask),
2264 Param: getSamplerParamFromBitmask(Bitmask),
2265 FilerMode: getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2266 return Sampler.isValid();
2267 } else if (Call->Builtin->Name.contains_insensitive(Other: "__spirv_SampledImage")) {
2268 // Create OpSampledImage.
2269 Register Image = Call->Arguments[0];
2270 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(VReg: Image);
2271 SPIRVType *SampledImageType =
2272 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2273 Register SampledImage =
2274 Call->ReturnRegister.isValid()
2275 ? Call->ReturnRegister
2276 : MRI->createVirtualRegister(RegClass: &SPIRV::iIDRegClass);
2277 MIRBuilder.buildInstr(Opcode: SPIRV::OpSampledImage)
2278 .addDef(RegNo: SampledImage)
2279 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: SampledImageType))
2280 .addUse(RegNo: Image)
2281 .addUse(RegNo: Call->Arguments[1]); // Sampler.
2282 return true;
2283 } else if (Call->Builtin->Name.contains_insensitive(
2284 Other: "__spirv_ImageSampleExplicitLod")) {
2285 // Sample an image using an explicit level of detail.
2286 std::string ReturnType = DemangledCall.str();
2287 if (DemangledCall.contains(Other: "_R")) {
2288 ReturnType = ReturnType.substr(pos: ReturnType.find(s: "_R") + 2);
2289 ReturnType = ReturnType.substr(pos: 0, n: ReturnType.find(c: '('));
2290 }
2291 SPIRVType *Type =
2292 Call->ReturnType
2293 ? Call->ReturnType
2294 : GR->getOrCreateSPIRVTypeByName(TypeStr: ReturnType, MIRBuilder, EmitIR: true);
2295 if (!Type) {
2296 std::string DiagMsg =
2297 "Unable to recognize SPIRV type name: " + ReturnType;
2298 report_fatal_error(reason: DiagMsg.c_str());
2299 }
2300 MIRBuilder.buildInstr(Opcode: SPIRV::OpImageSampleExplicitLod)
2301 .addDef(RegNo: Call->ReturnRegister)
2302 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Type))
2303 .addUse(RegNo: Call->Arguments[0]) // Image.
2304 .addUse(RegNo: Call->Arguments[1]) // Coordinate.
2305 .addImm(Val: SPIRV::ImageOperand::Lod)
2306 .addUse(RegNo: Call->Arguments[3]);
2307 return true;
2308 }
2309 return false;
2310}
2311
2312static bool generateSelectInst(const SPIRV::IncomingCall *Call,
2313 MachineIRBuilder &MIRBuilder) {
2314 MIRBuilder.buildSelect(Res: Call->ReturnRegister, Tst: Call->Arguments[0],
2315 Op0: Call->Arguments[1], Op1: Call->Arguments[2]);
2316 return true;
2317}
2318
2319static bool generateConstructInst(const SPIRV::IncomingCall *Call,
2320 MachineIRBuilder &MIRBuilder,
2321 SPIRVGlobalRegistry *GR) {
2322 createContinuedInstructions(MIRBuilder, Opcode: SPIRV::OpCompositeConstruct, MinWC: 3,
2323 ContinuedOpcode: SPIRV::OpCompositeConstructContinuedINTEL,
2324 Args: Call->Arguments, ReturnRegister: Call->ReturnRegister,
2325 TypeID: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
2326 return true;
2327}
2328
2329static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call,
2330 MachineIRBuilder &MIRBuilder,
2331 SPIRVGlobalRegistry *GR) {
2332 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2333 unsigned Opcode =
2334 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2335 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2336 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2337 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2338 unsigned ArgSz = Call->Arguments.size();
2339 unsigned LiteralIdx = 0;
2340 switch (Opcode) {
2341 // Memory operand is optional and is literal.
2342 case SPIRV::OpCooperativeMatrixLoadKHR:
2343 LiteralIdx = ArgSz > 3 ? 3 : 0;
2344 break;
2345 case SPIRV::OpCooperativeMatrixStoreKHR:
2346 LiteralIdx = ArgSz > 4 ? 4 : 0;
2347 break;
2348 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2349 LiteralIdx = ArgSz > 7 ? 7 : 0;
2350 break;
2351 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2352 LiteralIdx = ArgSz > 8 ? 8 : 0;
2353 break;
2354 // Cooperative Matrix Operands operand is optional and is literal.
2355 case SPIRV::OpCooperativeMatrixMulAddKHR:
2356 LiteralIdx = ArgSz > 3 ? 3 : 0;
2357 break;
2358 };
2359
2360 SmallVector<uint32_t, 1> ImmArgs;
2361 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2362 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2363 const uint32_t CacheLevel = getConstFromIntrinsic(Reg: Call->Arguments[3], MRI);
2364 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpCooperativeMatrixPrefetchINTEL)
2365 .addUse(RegNo: Call->Arguments[0]) // pointer
2366 .addUse(RegNo: Call->Arguments[1]) // rows
2367 .addUse(RegNo: Call->Arguments[2]) // columns
2368 .addImm(Val: CacheLevel) // cache level
2369 .addUse(RegNo: Call->Arguments[4]); // memory layout
2370 if (ArgSz > 5)
2371 MIB.addUse(RegNo: Call->Arguments[5]); // stride
2372 if (ArgSz > 6) {
2373 const uint32_t MemOp = getConstFromIntrinsic(Reg: Call->Arguments[6], MRI);
2374 MIB.addImm(Val: MemOp); // memory operand
2375 }
2376 return true;
2377 }
2378 if (LiteralIdx > 0)
2379 ImmArgs.push_back(Elt: getConstFromIntrinsic(Reg: Call->Arguments[LiteralIdx], MRI));
2380 Register TypeReg = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
2381 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2382 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(VReg: Call->Arguments[0]);
2383 if (!CoopMatrType)
2384 report_fatal_error(reason: "Can't find a register's type definition");
2385 MIRBuilder.buildInstr(Opcode)
2386 .addDef(RegNo: Call->ReturnRegister)
2387 .addUse(RegNo: TypeReg)
2388 .addUse(RegNo: CoopMatrType->getOperand(i: 0).getReg());
2389 return true;
2390 }
2391 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2392 TypeReg: IsSet ? TypeReg : Register(0), ImmArgs);
2393}
2394
2395static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call,
2396 MachineIRBuilder &MIRBuilder,
2397 SPIRVGlobalRegistry *GR) {
2398 // Lookup the instruction opcode in the TableGen records.
2399 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2400 unsigned Opcode =
2401 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2402 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2403
2404 switch (Opcode) {
2405 case SPIRV::OpSpecConstant: {
2406 // Build the SpecID decoration.
2407 unsigned SpecId =
2408 static_cast<unsigned>(getIConstVal(ConstReg: Call->Arguments[0], MRI));
2409 buildOpDecorate(Reg: Call->ReturnRegister, MIRBuilder, Dec: SPIRV::Decoration::SpecId,
2410 DecArgs: {SpecId});
2411 // Determine the constant MI.
2412 Register ConstRegister = Call->Arguments[1];
2413 const MachineInstr *Const = getDefInstrMaybeConstant(ConstReg&: ConstRegister, MRI);
2414 assert(Const &&
2415 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2416 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2417 "Argument should be either an int or floating-point constant");
2418 // Determine the opcode and built the OpSpec MI.
2419 const MachineOperand &ConstOperand = Const->getOperand(i: 1);
2420 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2421 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2422 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2423 ? SPIRV::OpSpecConstantTrue
2424 : SPIRV::OpSpecConstantFalse;
2425 }
2426 auto MIB = MIRBuilder.buildInstr(Opcode)
2427 .addDef(RegNo: Call->ReturnRegister)
2428 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
2429
2430 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2431 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2432 addNumImm(Imm: ConstOperand.getCImm()->getValue(), MIB);
2433 else
2434 addNumImm(Imm: ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2435 }
2436 return true;
2437 }
2438 case SPIRV::OpSpecConstantComposite: {
2439 createContinuedInstructions(MIRBuilder, Opcode, MinWC: 3,
2440 ContinuedOpcode: SPIRV::OpSpecConstantCompositeContinuedINTEL,
2441 Args: Call->Arguments, ReturnRegister: Call->ReturnRegister,
2442 TypeID: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
2443 return true;
2444 }
2445 default:
2446 return false;
2447 }
2448}
2449
2450static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call,
2451 MachineIRBuilder &MIRBuilder,
2452 SPIRVGlobalRegistry *GR) {
2453 // Lookup the instruction opcode in the TableGen records.
2454 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2455 unsigned Opcode =
2456 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2457
2458 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2459}
2460
2461static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call,
2462 MachineIRBuilder &MIRBuilder,
2463 SPIRVGlobalRegistry *GR) {
2464 // Lookup the instruction opcode in the TableGen records.
2465 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2466 unsigned Opcode =
2467 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2468
2469 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2470}
2471
2472static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call,
2473 MachineIRBuilder &MIRBuilder,
2474 SPIRVGlobalRegistry *GR) {
2475 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2476 unsigned Opcode =
2477 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2478 return buildOpFromWrapper(MIRBuilder, Opcode, Call, TypeReg: Register(0));
2479}
2480
2481static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call,
2482 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2483 SPIRVGlobalRegistry *GR) {
2484 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2485 SmallVector<uint32_t, 1> ImmArgs;
2486 Register InputReg = Call->Arguments[0];
2487 const Type *RetTy = GR->getTypeForSPIRVType(Ty: Call->ReturnType);
2488 bool IsSRet = RetTy->isVoidTy();
2489
2490 if (IsSRet) {
2491 const LLT ValTy = MRI->getType(Reg: InputReg);
2492 Register ActualRetValReg = MRI->createGenericVirtualRegister(Ty: ValTy);
2493 SPIRVType *InstructionType =
2494 GR->getPointeeType(PtrType: GR->getSPIRVTypeForVReg(VReg: InputReg));
2495 InputReg = Call->Arguments[1];
2496 auto InputType = GR->getTypeForSPIRVType(Ty: GR->getSPIRVTypeForVReg(VReg: InputReg));
2497 Register PtrInputReg;
2498 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2499 LLT InputLLT = MRI->getType(Reg: InputReg);
2500 PtrInputReg = MRI->createGenericVirtualRegister(Ty: InputLLT);
2501 SPIRVType *PtrType =
2502 GR->getPointeeType(PtrType: GR->getSPIRVTypeForVReg(VReg: InputReg));
2503 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2504 PtrInfo: MachinePointerInfo(), F: MachineMemOperand::MOLoad,
2505 Size: InputLLT.getSizeInBytes(), BaseAlignment: Align(4));
2506 MIRBuilder.buildLoad(Res: PtrInputReg, Addr: InputReg, MMO&: *MMO1);
2507 MRI->setRegClass(Reg: PtrInputReg, RC: &SPIRV::iIDRegClass);
2508 GR->assignSPIRVTypeToVReg(Type: PtrType, VReg: PtrInputReg, MF: MIRBuilder.getMF());
2509 }
2510
2511 for (unsigned index = 2; index < 7; index++) {
2512 ImmArgs.push_back(Elt: getConstFromIntrinsic(Reg: Call->Arguments[index], MRI));
2513 }
2514
2515 // Emit the instruction
2516 auto MIB = MIRBuilder.buildInstr(Opcode)
2517 .addDef(RegNo: ActualRetValReg)
2518 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: InstructionType));
2519 if (PtrInputReg)
2520 MIB.addUse(RegNo: PtrInputReg);
2521 else
2522 MIB.addUse(RegNo: InputReg);
2523
2524 for (uint32_t Imm : ImmArgs)
2525 MIB.addImm(Val: Imm);
2526 unsigned Size = ValTy.getSizeInBytes();
2527 // Store result to the pointer passed in Arg[0]
2528 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2529 PtrInfo: MachinePointerInfo(), F: MachineMemOperand::MOStore, Size, BaseAlignment: Align(4));
2530 MRI->setRegClass(Reg: ActualRetValReg, RC: &SPIRV::pIDRegClass);
2531 MIRBuilder.buildStore(Val: ActualRetValReg, Addr: Call->Arguments[0], MMO&: *MMO);
2532 return true;
2533 } else {
2534 for (unsigned index = 1; index < 6; index++)
2535 ImmArgs.push_back(Elt: getConstFromIntrinsic(Reg: Call->Arguments[index], MRI));
2536
2537 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2538 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType), ImmArgs);
2539 }
2540}
2541
2542static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call,
2543 MachineIRBuilder &MIRBuilder,
2544 SPIRVGlobalRegistry *GR) {
2545 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2546 unsigned Opcode =
2547 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2548
2549 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2550}
2551
2552static bool
2553generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call,
2554 MachineIRBuilder &MIRBuilder,
2555 SPIRVGlobalRegistry *GR) {
2556 // Lookup the instruction opcode in the TableGen records.
2557 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2558 unsigned Opcode =
2559 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2560
2561 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2562}
2563
2564static bool generateImageChannelDataTypeInst(const SPIRV::IncomingCall *Call,
2565 MachineIRBuilder &MIRBuilder,
2566 SPIRVGlobalRegistry *GR) {
2567 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2568 unsigned Opcode =
2569 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2570
2571 return buildImageChannelDataTypeInst(Call, Opcode, MIRBuilder, GR);
2572}
2573
2574static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call,
2575 MachineIRBuilder &MIRBuilder,
2576 SPIRVGlobalRegistry *GR) {
2577 // Lookup the instruction opcode in the TableGen records.
2578 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2579 unsigned Opcode =
2580 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2581
2582 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2583}
2584
2585static bool generatePipeInst(const SPIRV::IncomingCall *Call,
2586 MachineIRBuilder &MIRBuilder,
2587 SPIRVGlobalRegistry *GR) {
2588 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2589 unsigned Opcode =
2590 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2591
2592 unsigned Scope = SPIRV::Scope::Workgroup;
2593 if (Builtin->Name.contains(Other: "sub_group"))
2594 Scope = SPIRV::Scope::Subgroup;
2595
2596 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2597}
2598
2599static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call,
2600 MachineIRBuilder &MIRBuilder,
2601 SPIRVGlobalRegistry *GR) {
2602 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2603 unsigned Opcode =
2604 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2605
2606 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2607 unsigned ArgSz = Call->Arguments.size();
2608 SmallVector<uint32_t, 1> ImmArgs;
2609 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2610 // Memory operand is optional and is literal.
2611 if (ArgSz > 3)
2612 ImmArgs.push_back(
2613 Elt: getConstFromIntrinsic(Reg: Call->Arguments[/*Literal index*/ 3], MRI));
2614
2615 Register TypeReg = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
2616 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2617 TypeReg: IsSet ? TypeReg : Register(0), ImmArgs);
2618}
2619
2620static bool buildNDRange(const SPIRV::IncomingCall *Call,
2621 MachineIRBuilder &MIRBuilder,
2622 SPIRVGlobalRegistry *GR) {
2623 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2624 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(VReg: Call->Arguments[0]);
2625 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2626 PtrType->getOperand(2).isReg());
2627 Register TypeReg = PtrType->getOperand(i: 2).getReg();
2628 SPIRVType *StructType = GR->getSPIRVTypeForVReg(VReg: TypeReg);
2629 MachineFunction &MF = MIRBuilder.getMF();
2630 Register TmpReg = MRI->createVirtualRegister(RegClass: &SPIRV::iIDRegClass);
2631 GR->assignSPIRVTypeToVReg(Type: StructType, VReg: TmpReg, MF);
2632 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2633 // three other arguments, so pass zero constant on absence.
2634 unsigned NumArgs = Call->Arguments.size();
2635 assert(NumArgs >= 2);
2636 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2637 Register LocalWorkSize =
2638 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2639 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2640 if (NumArgs < 4) {
2641 Register Const;
2642 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(VReg: GlobalWorkSize);
2643 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2644 MachineInstr *DefInstr = MRI->getUniqueVRegDef(Reg: GlobalWorkSize);
2645 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2646 DefInstr->getOperand(3).isReg());
2647 Register GWSPtr = DefInstr->getOperand(i: 3).getReg();
2648 // TODO: Maybe simplify generation of the type of the fields.
2649 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2650 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2651 Type *BaseTy = IntegerType::get(C&: MF.getFunction().getContext(), NumBits: BitWidth);
2652 Type *FieldTy = ArrayType::get(ElementType: BaseTy, NumElements: Size);
2653 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2654 Type: FieldTy, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
2655 GlobalWorkSize = MRI->createVirtualRegister(RegClass: &SPIRV::iIDRegClass);
2656 GR->assignSPIRVTypeToVReg(Type: SpvFieldTy, VReg: GlobalWorkSize, MF);
2657 MIRBuilder.buildInstr(Opcode: SPIRV::OpLoad)
2658 .addDef(RegNo: GlobalWorkSize)
2659 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: SpvFieldTy))
2660 .addUse(RegNo: GWSPtr);
2661 const SPIRVSubtarget &ST =
2662 cast<SPIRVSubtarget>(Val: MIRBuilder.getMF().getSubtarget());
2663 Const = GR->getOrCreateConstIntArray(Val: 0, Num: Size, I&: *MIRBuilder.getInsertPt(),
2664 SpvType: SpvFieldTy, TII: *ST.getInstrInfo());
2665 } else {
2666 Const = GR->buildConstantInt(Val: 0, MIRBuilder, SpvType: SpvTy, EmitIR: true);
2667 }
2668 if (!LocalWorkSize.isValid())
2669 LocalWorkSize = Const;
2670 if (!GlobalWorkOffset.isValid())
2671 GlobalWorkOffset = Const;
2672 }
2673 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2674 MIRBuilder.buildInstr(Opcode: SPIRV::OpBuildNDRange)
2675 .addDef(RegNo: TmpReg)
2676 .addUse(RegNo: TypeReg)
2677 .addUse(RegNo: GlobalWorkSize)
2678 .addUse(RegNo: LocalWorkSize)
2679 .addUse(RegNo: GlobalWorkOffset);
2680 return MIRBuilder.buildInstr(Opcode: SPIRV::OpStore)
2681 .addUse(RegNo: Call->Arguments[0])
2682 .addUse(RegNo: TmpReg);
2683}
2684
2685// TODO: maybe move to the global register.
2686static SPIRVType *
2687getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder,
2688 SPIRVGlobalRegistry *GR) {
2689 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2690 unsigned SC1 = storageClassToAddressSpace(SC: SPIRV::StorageClass::Generic);
2691 Type *PtrType = PointerType::get(C&: Context, AddressSpace: SC1);
2692 return GR->getOrCreateSPIRVType(Type: PtrType, MIRBuilder,
2693 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
2694}
2695
2696static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
2697 MachineIRBuilder &MIRBuilder,
2698 SPIRVGlobalRegistry *GR) {
2699 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2700 const DataLayout &DL = MIRBuilder.getDataLayout();
2701 bool IsSpirvOp = Call->isSpirvOp();
2702 bool HasEvents = Call->Builtin->Name.contains(Other: "events") || IsSpirvOp;
2703 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(BitWidth: 32, MIRBuilder);
2704
2705 // Make vararg instructions before OpEnqueueKernel.
2706 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2707 // local size operands as an array, so we need to unpack them.
2708 SmallVector<Register, 16> LocalSizes;
2709 if (Call->Builtin->Name.contains(Other: "_varargs") || IsSpirvOp) {
2710 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2711 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2712 MachineInstr *GepMI = MRI->getUniqueVRegDef(Reg: GepReg);
2713 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2714 GepMI->getOperand(3).isReg());
2715 Register ArrayReg = GepMI->getOperand(i: 3).getReg();
2716 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(Reg: ArrayReg);
2717 const Type *LocalSizeTy = getMachineInstrType(MI: ArrayMI);
2718 assert(LocalSizeTy && "Local size type is expected");
2719 const uint64_t LocalSizeNum =
2720 cast<ArrayType>(Val: LocalSizeTy)->getNumElements();
2721 unsigned SC = storageClassToAddressSpace(SC: SPIRV::StorageClass::Generic);
2722 const LLT LLType = LLT::pointer(AddressSpace: SC, SizeInBits: GR->getPointerSize());
2723 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2724 BaseType: Int32Ty, MIRBuilder, SC: SPIRV::StorageClass::Function);
2725 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2726 Register Reg = MRI->createVirtualRegister(RegClass: &SPIRV::pIDRegClass);
2727 MRI->setType(VReg: Reg, Ty: LLType);
2728 GR->assignSPIRVTypeToVReg(Type: PointerSizeTy, VReg: Reg, MF: MIRBuilder.getMF());
2729 auto GEPInst = MIRBuilder.buildIntrinsic(
2730 ID: Intrinsic::spv_gep, Res: ArrayRef<Register>{Reg}, HasSideEffects: true, isConvergent: false);
2731 GEPInst
2732 .addImm(Val: GepMI->getOperand(i: 2).getImm()) // In bound.
2733 .addUse(RegNo: ArrayMI->getOperand(i: 0).getReg()) // Alloca.
2734 .addUse(RegNo: buildConstantIntReg32(Val: 0, MIRBuilder, GR)) // Indices.
2735 .addUse(RegNo: buildConstantIntReg32(Val: I, MIRBuilder, GR));
2736 LocalSizes.push_back(Elt: Reg);
2737 }
2738 }
2739
2740 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2741 auto MIB = MIRBuilder.buildInstr(Opcode: SPIRV::OpEnqueueKernel)
2742 .addDef(RegNo: Call->ReturnRegister)
2743 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Int32Ty));
2744
2745 // Copy all arguments before block invoke function pointer.
2746 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2747 for (unsigned i = 0; i < BlockFIdx; i++)
2748 MIB.addUse(RegNo: Call->Arguments[i]);
2749
2750 // If there are no event arguments in the original call, add dummy ones.
2751 if (!HasEvents) {
2752 MIB.addUse(RegNo: buildConstantIntReg32(Val: 0, MIRBuilder, GR)); // Dummy num events.
2753 Register NullPtr = GR->getOrCreateConstNullPtr(
2754 MIRBuilder, SpvType: getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2755 MIB.addUse(RegNo: NullPtr); // Dummy wait events.
2756 MIB.addUse(RegNo: NullPtr); // Dummy ret event.
2757 }
2758
2759 MachineInstr *BlockMI = getBlockStructInstr(ParamReg: Call->Arguments[BlockFIdx], MRI);
2760 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2761 // Invoke: Pointer to invoke function.
2762 MIB.addGlobalAddress(GV: BlockMI->getOperand(i: 1).getGlobal());
2763
2764 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2765 // Param: Pointer to block literal.
2766 MIB.addUse(RegNo: BlockLiteralReg);
2767
2768 Type *PType = const_cast<Type *>(getBlockStructType(ParamReg: BlockLiteralReg, MRI));
2769 // TODO: these numbers should be obtained from block literal structure.
2770 // Param Size: Size of block literal structure.
2771 MIB.addUse(RegNo: buildConstantIntReg32(Val: DL.getTypeStoreSize(Ty: PType), MIRBuilder, GR));
2772 // Param Aligment: Aligment of block literal structure.
2773 MIB.addUse(RegNo: buildConstantIntReg32(Val: DL.getPrefTypeAlign(Ty: PType).value(),
2774 MIRBuilder, GR));
2775
2776 for (unsigned i = 0; i < LocalSizes.size(); i++)
2777 MIB.addUse(RegNo: LocalSizes[i]);
2778 return true;
2779}
2780
2781static bool generateEnqueueInst(const SPIRV::IncomingCall *Call,
2782 MachineIRBuilder &MIRBuilder,
2783 SPIRVGlobalRegistry *GR) {
2784 // Lookup the instruction opcode in the TableGen records.
2785 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2786 unsigned Opcode =
2787 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2788
2789 switch (Opcode) {
2790 case SPIRV::OpRetainEvent:
2791 case SPIRV::OpReleaseEvent:
2792 return MIRBuilder.buildInstr(Opcode).addUse(RegNo: Call->Arguments[0]);
2793 case SPIRV::OpCreateUserEvent:
2794 case SPIRV::OpGetDefaultQueue:
2795 return MIRBuilder.buildInstr(Opcode)
2796 .addDef(RegNo: Call->ReturnRegister)
2797 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
2798 case SPIRV::OpIsValidEvent:
2799 return MIRBuilder.buildInstr(Opcode)
2800 .addDef(RegNo: Call->ReturnRegister)
2801 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2802 .addUse(RegNo: Call->Arguments[0]);
2803 case SPIRV::OpSetUserEventStatus:
2804 return MIRBuilder.buildInstr(Opcode)
2805 .addUse(RegNo: Call->Arguments[0])
2806 .addUse(RegNo: Call->Arguments[1]);
2807 case SPIRV::OpCaptureEventProfilingInfo:
2808 return MIRBuilder.buildInstr(Opcode)
2809 .addUse(RegNo: Call->Arguments[0])
2810 .addUse(RegNo: Call->Arguments[1])
2811 .addUse(RegNo: Call->Arguments[2]);
2812 case SPIRV::OpBuildNDRange:
2813 return buildNDRange(Call, MIRBuilder, GR);
2814 case SPIRV::OpEnqueueKernel:
2815 return buildEnqueueKernel(Call, MIRBuilder, GR);
2816 default:
2817 return false;
2818 }
2819}
2820
2821static bool generateAsyncCopy(const SPIRV::IncomingCall *Call,
2822 MachineIRBuilder &MIRBuilder,
2823 SPIRVGlobalRegistry *GR) {
2824 // Lookup the instruction opcode in the TableGen records.
2825 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2826 unsigned Opcode =
2827 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2828
2829 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2830 Register TypeReg = GR->getSPIRVTypeID(SpirvType: Call->ReturnType);
2831 if (Call->isSpirvOp())
2832 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2833 TypeReg: IsSet ? TypeReg : Register(0));
2834
2835 auto Scope = buildConstantIntReg32(Val: SPIRV::Scope::Workgroup, MIRBuilder, GR);
2836
2837 switch (Opcode) {
2838 case SPIRV::OpGroupAsyncCopy: {
2839 SPIRVType *NewType =
2840 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2841 ? nullptr
2842 : GR->getOrCreateSPIRVTypeByName(TypeStr: "spirv.Event", MIRBuilder, EmitIR: true);
2843 Register TypeReg = GR->getSPIRVTypeID(SpirvType: NewType ? NewType : Call->ReturnType);
2844 unsigned NumArgs = Call->Arguments.size();
2845 Register EventReg = Call->Arguments[NumArgs - 1];
2846 bool Res = MIRBuilder.buildInstr(Opcode)
2847 .addDef(RegNo: Call->ReturnRegister)
2848 .addUse(RegNo: TypeReg)
2849 .addUse(RegNo: Scope)
2850 .addUse(RegNo: Call->Arguments[0])
2851 .addUse(RegNo: Call->Arguments[1])
2852 .addUse(RegNo: Call->Arguments[2])
2853 .addUse(RegNo: Call->Arguments.size() > 4
2854 ? Call->Arguments[3]
2855 : buildConstantIntReg32(Val: 1, MIRBuilder, GR))
2856 .addUse(RegNo: EventReg);
2857 if (NewType != nullptr)
2858 updateRegType(Reg: Call->ReturnRegister, Ty: nullptr, SpirvTy: NewType, GR, MIB&: MIRBuilder,
2859 MRI&: MIRBuilder.getMF().getRegInfo());
2860 return Res;
2861 }
2862 case SPIRV::OpGroupWaitEvents:
2863 return MIRBuilder.buildInstr(Opcode)
2864 .addUse(RegNo: Scope)
2865 .addUse(RegNo: Call->Arguments[0])
2866 .addUse(RegNo: Call->Arguments[1]);
2867 default:
2868 return false;
2869 }
2870}
2871
2872static bool generateConvertInst(const StringRef DemangledCall,
2873 const SPIRV::IncomingCall *Call,
2874 MachineIRBuilder &MIRBuilder,
2875 SPIRVGlobalRegistry *GR) {
2876 // Lookup the conversion builtin in the TableGen records.
2877 const SPIRV::ConvertBuiltin *Builtin =
2878 SPIRV::lookupConvertBuiltin(Name: Call->Builtin->Name, Set: Call->Builtin->Set);
2879
2880 if (!Builtin && Call->isSpirvOp()) {
2881 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2882 unsigned Opcode =
2883 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
2884 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2885 TypeReg: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
2886 }
2887
2888 assert(Builtin && "Conversion builtin not found.");
2889 if (Builtin->IsSaturated)
2890 buildOpDecorate(Reg: Call->ReturnRegister, MIRBuilder,
2891 Dec: SPIRV::Decoration::SaturatedConversion, DecArgs: {});
2892 if (Builtin->IsRounded)
2893 buildOpDecorate(Reg: Call->ReturnRegister, MIRBuilder,
2894 Dec: SPIRV::Decoration::FPRoundingMode,
2895 DecArgs: {(unsigned)Builtin->RoundingMode});
2896
2897 std::string NeedExtMsg; // no errors if empty
2898 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2899 unsigned Opcode = SPIRV::OpNop;
2900 if (GR->isScalarOrVectorOfType(VReg: Call->Arguments[0], TypeOpcode: SPIRV::OpTypeInt)) {
2901 // Int -> ...
2902 if (GR->isScalarOrVectorOfType(VReg: Call->ReturnRegister, TypeOpcode: SPIRV::OpTypeInt)) {
2903 // Int -> Int
2904 if (Builtin->IsSaturated)
2905 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2906 : SPIRV::OpSatConvertSToU;
2907 else
2908 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2909 : SPIRV::OpSConvert;
2910 } else if (GR->isScalarOrVectorOfType(VReg: Call->ReturnRegister,
2911 TypeOpcode: SPIRV::OpTypeFloat)) {
2912 // Int -> Float
2913 if (Builtin->IsBfloat16) {
2914 const auto *ST = static_cast<const SPIRVSubtarget *>(
2915 &MIRBuilder.getMF().getSubtarget());
2916 if (!ST->canUseExtension(
2917 E: SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2918 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2919 IsRightComponentsNumber =
2920 GR->getScalarOrVectorComponentCount(VReg: Call->Arguments[0]) ==
2921 GR->getScalarOrVectorComponentCount(VReg: Call->ReturnRegister);
2922 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2923 } else {
2924 bool IsSourceSigned =
2925 DemangledCall[DemangledCall.find_first_of(C: '(') + 1] != 'u';
2926 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2927 }
2928 }
2929 } else if (GR->isScalarOrVectorOfType(VReg: Call->Arguments[0],
2930 TypeOpcode: SPIRV::OpTypeFloat)) {
2931 // Float -> ...
2932 if (GR->isScalarOrVectorOfType(VReg: Call->ReturnRegister, TypeOpcode: SPIRV::OpTypeInt)) {
2933 // Float -> Int
2934 if (Builtin->IsBfloat16) {
2935 const auto *ST = static_cast<const SPIRVSubtarget *>(
2936 &MIRBuilder.getMF().getSubtarget());
2937 if (!ST->canUseExtension(
2938 E: SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2939 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2940 IsRightComponentsNumber =
2941 GR->getScalarOrVectorComponentCount(VReg: Call->Arguments[0]) ==
2942 GR->getScalarOrVectorComponentCount(VReg: Call->ReturnRegister);
2943 Opcode = SPIRV::OpConvertFToBF16INTEL;
2944 } else {
2945 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2946 : SPIRV::OpConvertFToU;
2947 }
2948 } else if (GR->isScalarOrVectorOfType(VReg: Call->ReturnRegister,
2949 TypeOpcode: SPIRV::OpTypeFloat)) {
2950 if (Builtin->IsTF32) {
2951 const auto *ST = static_cast<const SPIRVSubtarget *>(
2952 &MIRBuilder.getMF().getSubtarget());
2953 if (!ST->canUseExtension(
2954 E: SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2955 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2956 IsRightComponentsNumber =
2957 GR->getScalarOrVectorComponentCount(VReg: Call->Arguments[0]) ==
2958 GR->getScalarOrVectorComponentCount(VReg: Call->ReturnRegister);
2959 Opcode = SPIRV::OpRoundFToTF32INTEL;
2960 } else {
2961 // Float -> Float
2962 Opcode = SPIRV::OpFConvert;
2963 }
2964 }
2965 }
2966
2967 if (!NeedExtMsg.empty()) {
2968 std::string DiagMsg = std::string(Builtin->Name) +
2969 ": the builtin requires the following SPIR-V "
2970 "extension: " +
2971 NeedExtMsg;
2972 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
2973 }
2974 if (!IsRightComponentsNumber) {
2975 std::string DiagMsg =
2976 std::string(Builtin->Name) +
2977 ": result and argument must have the same number of components";
2978 report_fatal_error(reason: DiagMsg.c_str(), gen_crash_diag: false);
2979 }
2980 assert(Opcode != SPIRV::OpNop &&
2981 "Conversion between the types not implemented!");
2982
2983 MIRBuilder.buildInstr(Opcode)
2984 .addDef(RegNo: Call->ReturnRegister)
2985 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
2986 .addUse(RegNo: Call->Arguments[0]);
2987 return true;
2988}
2989
2990static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
2991 MachineIRBuilder &MIRBuilder,
2992 SPIRVGlobalRegistry *GR) {
2993 // Lookup the vector load/store builtin in the TableGen records.
2994 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2995 SPIRV::lookupVectorLoadStoreBuiltin(Name: Call->Builtin->Name,
2996 Set: Call->Builtin->Set);
2997 // Build extended instruction.
2998 auto MIB =
2999 MIRBuilder.buildInstr(Opcode: SPIRV::OpExtInst)
3000 .addDef(RegNo: Call->ReturnRegister)
3001 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType))
3002 .addImm(Val: static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
3003 .addImm(Val: Builtin->Number);
3004 for (auto Argument : Call->Arguments)
3005 MIB.addUse(RegNo: Argument);
3006 if (Builtin->Name.contains(Other: "load") && Builtin->ElementCount > 1)
3007 MIB.addImm(Val: Builtin->ElementCount);
3008
3009 // Rounding mode should be passed as a last argument in the MI for builtins
3010 // like "vstorea_halfn_r".
3011 if (Builtin->IsRounded)
3012 MIB.addImm(Val: static_cast<uint32_t>(Builtin->RoundingMode));
3013 return true;
3014}
3015
3016static bool generateAFPInst(const SPIRV::IncomingCall *Call,
3017 MachineIRBuilder &MIRBuilder,
3018 SPIRVGlobalRegistry *GR) {
3019 const auto *Builtin = Call->Builtin;
3020 auto *MRI = MIRBuilder.getMRI();
3021 unsigned Opcode =
3022 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
3023 const Type *RetTy = GR->getTypeForSPIRVType(Ty: Call->ReturnType);
3024 bool IsVoid = RetTy->isVoidTy();
3025 auto MIB = MIRBuilder.buildInstr(Opcode);
3026 Register DestReg;
3027 if (IsVoid) {
3028 LLT PtrTy = MRI->getType(Reg: Call->Arguments[0]);
3029 DestReg = MRI->createGenericVirtualRegister(Ty: PtrTy);
3030 MRI->setRegClass(Reg: DestReg, RC: &SPIRV::pIDRegClass);
3031 SPIRVType *PointeeTy =
3032 GR->getPointeeType(PtrType: GR->getSPIRVTypeForVReg(VReg: Call->Arguments[0]));
3033 MIB.addDef(RegNo: DestReg);
3034 MIB.addUse(RegNo: GR->getSPIRVTypeID(SpirvType: PointeeTy));
3035 } else {
3036 MIB.addDef(RegNo: Call->ReturnRegister);
3037 MIB.addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
3038 }
3039 for (unsigned i = IsVoid ? 1 : 0; i < Call->Arguments.size(); ++i) {
3040 Register Arg = Call->Arguments[i];
3041 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg: Arg);
3042 if (DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
3043 DefMI->getOperand(i: 1).isCImm()) {
3044 MIB.addImm(Val: getConstFromIntrinsic(Reg: Arg, MRI));
3045 } else {
3046 MIB.addUse(RegNo: Arg);
3047 }
3048 }
3049 if (IsVoid) {
3050 LLT PtrTy = MRI->getType(Reg: Call->Arguments[0]);
3051 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
3052 PtrInfo: MachinePointerInfo(), F: MachineMemOperand::MOStore,
3053 Size: PtrTy.getSizeInBytes(), BaseAlignment: Align(4));
3054 MIRBuilder.buildStore(Val: DestReg, Addr: Call->Arguments[0], MMO&: *MMO);
3055 }
3056 return true;
3057}
3058
3059static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
3060 MachineIRBuilder &MIRBuilder,
3061 SPIRVGlobalRegistry *GR) {
3062 // Lookup the instruction opcode in the TableGen records.
3063 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
3064 unsigned Opcode =
3065 SPIRV::lookupNativeBuiltin(Name: Builtin->Name, Set: Builtin->Set)->Opcode;
3066 bool IsLoad = Opcode == SPIRV::OpLoad;
3067 // Build the instruction.
3068 auto MIB = MIRBuilder.buildInstr(Opcode);
3069 if (IsLoad) {
3070 MIB.addDef(RegNo: Call->ReturnRegister);
3071 MIB.addUse(RegNo: GR->getSPIRVTypeID(SpirvType: Call->ReturnType));
3072 }
3073 // Add a pointer to the value to load/store.
3074 MIB.addUse(RegNo: Call->Arguments[0]);
3075 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3076 // Add a value to store.
3077 if (!IsLoad)
3078 MIB.addUse(RegNo: Call->Arguments[1]);
3079 // Add optional memory attributes and an alignment.
3080 unsigned NumArgs = Call->Arguments.size();
3081 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3082 MIB.addImm(Val: getConstFromIntrinsic(Reg: Call->Arguments[IsLoad ? 1 : 2], MRI));
3083 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3084 MIB.addImm(Val: getConstFromIntrinsic(Reg: Call->Arguments[IsLoad ? 2 : 3], MRI));
3085 return true;
3086}
3087
3088namespace SPIRV {
3089// Try to find a builtin function attributes by a demangled function name and
3090// return a tuple <builtin group, op code, ext instruction number>, or a special
3091// tuple value <-1, 0, 0> if the builtin function is not found.
3092// Not all builtin functions are supported, only those with a ready-to-use op
3093// code or instruction number defined in TableGen.
3094// TODO: consider a major rework of mapping demangled calls into a builtin
3095// functions to unify search and decrease number of individual cases.
3096std::tuple<int, unsigned, unsigned>
3097mapBuiltinToOpcode(const StringRef DemangledCall,
3098 SPIRV::InstructionSet::InstructionSet Set) {
3099 Register Reg;
3100 SmallVector<Register> Args;
3101 std::unique_ptr<const IncomingCall> Call =
3102 lookupBuiltin(DemangledCall, Set, ReturnRegister: Reg, ReturnType: nullptr, Arguments: Args);
3103 if (!Call)
3104 return std::make_tuple(args: -1, args: 0, args: 0);
3105
3106 switch (Call->Builtin->Group) {
3107 case SPIRV::Relational:
3108 case SPIRV::Atomic:
3109 case SPIRV::Barrier:
3110 case SPIRV::CastToPtr:
3111 case SPIRV::ImageMiscQuery:
3112 case SPIRV::SpecConstant:
3113 case SPIRV::Enqueue:
3114 case SPIRV::AsyncCopy:
3115 case SPIRV::LoadStore:
3116 case SPIRV::CoopMatr:
3117 if (const auto *R =
3118 SPIRV::lookupNativeBuiltin(Name: Call->Builtin->Name, Set: Call->Builtin->Set))
3119 return std::make_tuple(args: Call->Builtin->Group, args: R->Opcode, args: 0);
3120 break;
3121 case SPIRV::Extended:
3122 if (const auto *R = SPIRV::lookupExtendedBuiltin(Name: Call->Builtin->Name,
3123 Set: Call->Builtin->Set))
3124 return std::make_tuple(args: Call->Builtin->Group, args: 0, args: R->Number);
3125 break;
3126 case SPIRV::VectorLoadStore:
3127 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Name: Call->Builtin->Name,
3128 Set: Call->Builtin->Set))
3129 return std::make_tuple(args: SPIRV::Extended, args: 0, args: R->Number);
3130 break;
3131 case SPIRV::Group:
3132 if (const auto *R = SPIRV::lookupGroupBuiltin(Name: Call->Builtin->Name))
3133 return std::make_tuple(args: Call->Builtin->Group, args: R->Opcode, args: 0);
3134 break;
3135 case SPIRV::AtomicFloating:
3136 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Name: Call->Builtin->Name))
3137 return std::make_tuple(args: Call->Builtin->Group, args: R->Opcode, args: 0);
3138 break;
3139 case SPIRV::IntelSubgroups:
3140 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Name: Call->Builtin->Name))
3141 return std::make_tuple(args: Call->Builtin->Group, args: R->Opcode, args: 0);
3142 break;
3143 case SPIRV::GroupUniform:
3144 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Name: Call->Builtin->Name))
3145 return std::make_tuple(args: Call->Builtin->Group, args: R->Opcode, args: 0);
3146 break;
3147 case SPIRV::IntegerDot:
3148 if (const auto *R =
3149 SPIRV::lookupIntegerDotProductBuiltin(Name: Call->Builtin->Name))
3150 return std::make_tuple(args: Call->Builtin->Group, args: R->Opcode, args: 0);
3151 break;
3152 case SPIRV::WriteImage:
3153 return std::make_tuple(args: Call->Builtin->Group, args: SPIRV::OpImageWrite, args: 0);
3154 case SPIRV::Select:
3155 return std::make_tuple(args: Call->Builtin->Group, args: TargetOpcode::G_SELECT, args: 0);
3156 case SPIRV::Construct:
3157 return std::make_tuple(args: Call->Builtin->Group, args: SPIRV::OpCompositeConstruct,
3158 args: 0);
3159 case SPIRV::KernelClock:
3160 return std::make_tuple(args: Call->Builtin->Group, args: SPIRV::OpReadClockKHR, args: 0);
3161 default:
3162 return std::make_tuple(args: -1, args: 0, args: 0);
3163 }
3164 return std::make_tuple(args: -1, args: 0, args: 0);
3165}
3166
3167std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3168 SPIRV::InstructionSet::InstructionSet Set,
3169 MachineIRBuilder &MIRBuilder,
3170 const Register OrigRet, const Type *OrigRetTy,
3171 const SmallVectorImpl<Register> &Args,
3172 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3173 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3174
3175 // Lookup the builtin in the TableGen records.
3176 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(VReg: OrigRet);
3177 assert(SpvType && "Inconsistent return register: expected valid type info");
3178 std::unique_ptr<const IncomingCall> Call =
3179 lookupBuiltin(DemangledCall, Set, ReturnRegister: OrigRet, ReturnType: SpvType, Arguments: Args);
3180
3181 if (!Call) {
3182 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3183 return std::nullopt;
3184 }
3185
3186 // TODO: check if the provided args meet the builtin requirments.
3187 assert(Args.size() >= Call->Builtin->MinNumArgs &&
3188 "Too few arguments to generate the builtin");
3189 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
3190 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
3191
3192 // Match the builtin with implementation based on the grouping.
3193 switch (Call->Builtin->Group) {
3194 case SPIRV::Extended:
3195 return generateExtInst(Call: Call.get(), MIRBuilder, GR, CB);
3196 case SPIRV::Relational:
3197 return generateRelationalInst(Call: Call.get(), MIRBuilder, GR);
3198 case SPIRV::Group:
3199 return generateGroupInst(Call: Call.get(), MIRBuilder, GR);
3200 case SPIRV::Variable:
3201 return generateBuiltinVar(Call: Call.get(), MIRBuilder, GR);
3202 case SPIRV::Atomic:
3203 return generateAtomicInst(Call: Call.get(), MIRBuilder, GR);
3204 case SPIRV::AtomicFloating:
3205 return generateAtomicFloatingInst(Call: Call.get(), MIRBuilder, GR);
3206 case SPIRV::Barrier:
3207 return generateBarrierInst(Call: Call.get(), MIRBuilder, GR);
3208 case SPIRV::CastToPtr:
3209 return generateCastToPtrInst(Call: Call.get(), MIRBuilder, GR);
3210 case SPIRV::Dot:
3211 case SPIRV::IntegerDot:
3212 return generateDotOrFMulInst(DemangledCall, Call: Call.get(), MIRBuilder, GR);
3213 case SPIRV::Wave:
3214 return generateWaveInst(Call: Call.get(), MIRBuilder, GR);
3215 case SPIRV::ICarryBorrow:
3216 return generateICarryBorrowInst(Call: Call.get(), MIRBuilder, GR);
3217 case SPIRV::GetQuery:
3218 return generateGetQueryInst(Call: Call.get(), MIRBuilder, GR);
3219 case SPIRV::ImageSizeQuery:
3220 return generateImageSizeQueryInst(Call: Call.get(), MIRBuilder, GR);
3221 case SPIRV::ImageMiscQuery:
3222 return generateImageMiscQueryInst(Call: Call.get(), MIRBuilder, GR);
3223 case SPIRV::ReadImage:
3224 return generateReadImageInst(DemangledCall, Call: Call.get(), MIRBuilder, GR);
3225 case SPIRV::WriteImage:
3226 return generateWriteImageInst(Call: Call.get(), MIRBuilder, GR);
3227 case SPIRV::SampleImage:
3228 return generateSampleImageInst(DemangledCall, Call: Call.get(), MIRBuilder, GR);
3229 case SPIRV::Select:
3230 return generateSelectInst(Call: Call.get(), MIRBuilder);
3231 case SPIRV::Construct:
3232 return generateConstructInst(Call: Call.get(), MIRBuilder, GR);
3233 case SPIRV::SpecConstant:
3234 return generateSpecConstantInst(Call: Call.get(), MIRBuilder, GR);
3235 case SPIRV::Enqueue:
3236 return generateEnqueueInst(Call: Call.get(), MIRBuilder, GR);
3237 case SPIRV::AsyncCopy:
3238 return generateAsyncCopy(Call: Call.get(), MIRBuilder, GR);
3239 case SPIRV::Convert:
3240 return generateConvertInst(DemangledCall, Call: Call.get(), MIRBuilder, GR);
3241 case SPIRV::VectorLoadStore:
3242 return generateVectorLoadStoreInst(Call: Call.get(), MIRBuilder, GR);
3243 case SPIRV::LoadStore:
3244 return generateLoadStoreInst(Call: Call.get(), MIRBuilder, GR);
3245 case SPIRV::IntelSubgroups:
3246 return generateIntelSubgroupsInst(Call: Call.get(), MIRBuilder, GR);
3247 case SPIRV::GroupUniform:
3248 return generateGroupUniformInst(Call: Call.get(), MIRBuilder, GR);
3249 case SPIRV::KernelClock:
3250 return generateKernelClockInst(Call: Call.get(), MIRBuilder, GR);
3251 case SPIRV::CoopMatr:
3252 return generateCoopMatrInst(Call: Call.get(), MIRBuilder, GR);
3253 case SPIRV::ExtendedBitOps:
3254 return generateExtendedBitOpsInst(Call: Call.get(), MIRBuilder, GR);
3255 case SPIRV::BindlessINTEL:
3256 return generateBindlessImageINTELInst(Call: Call.get(), MIRBuilder, GR);
3257 case SPIRV::TernaryBitwiseINTEL:
3258 return generateTernaryBitwiseFunctionINTELInst(Call: Call.get(), MIRBuilder, GR);
3259 case SPIRV::Block2DLoadStore:
3260 return generate2DBlockIOINTELInst(Call: Call.get(), MIRBuilder, GR);
3261 case SPIRV::Pipe:
3262 return generatePipeInst(Call: Call.get(), MIRBuilder, GR);
3263 case SPIRV::PredicatedLoadStore:
3264 return generatePredicatedLoadStoreInst(Call: Call.get(), MIRBuilder, GR);
3265 case SPIRV::BlockingPipes:
3266 return generateBlockingPipesInst(Call: Call.get(), MIRBuilder, GR);
3267 case SPIRV::ArbitraryPrecisionFixedPoint:
3268 return generateAPFixedPointInst(Call: Call.get(), MIRBuilder, GR);
3269 case SPIRV::ImageChannelDataTypes:
3270 return generateImageChannelDataTypeInst(Call: Call.get(), MIRBuilder, GR);
3271 case SPIRV::ArbitraryFloatingPoint:
3272 return generateAFPInst(Call: Call.get(), MIRBuilder, GR);
3273 }
3274 return false;
3275}
3276
3277Type *parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx) {
3278 // Parse strings representing OpenCL builtin types.
3279 if (hasBuiltinTypePrefix(Name: TypeStr)) {
3280 // OpenCL builtin types in demangled call strings have the following format:
3281 // e.g. ocl_image2d_ro
3282 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front(Prefix: "ocl_");
3283 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3284
3285 // Check if this is pointer to a builtin type and not just pointer
3286 // representing a builtin type. In case it is a pointer to builtin type,
3287 // this will require additional handling in the method calling
3288 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3289 // base types.
3290 if (TypeStr.ends_with(Suffix: "*"))
3291 TypeStr = TypeStr.slice(Start: 0, End: TypeStr.find_first_of(Chars: " *"));
3292
3293 return parseBuiltinTypeNameToTargetExtType(TypeName: "opencl." + TypeStr.str() + "_t",
3294 Context&: Ctx);
3295 }
3296
3297 // Parse type name in either "typeN" or "type vector[N]" format, where
3298 // N is the number of elements of the vector.
3299 Type *BaseType;
3300 unsigned VecElts = 0;
3301
3302 BaseType = parseBasicTypeName(TypeName&: TypeStr, Ctx);
3303 if (!BaseType)
3304 // Unable to recognize SPIRV type name.
3305 return nullptr;
3306
3307 // Handle "typeN*" or "type vector[N]*".
3308 TypeStr.consume_back(Suffix: "*");
3309
3310 if (TypeStr.consume_front(Prefix: " vector["))
3311 TypeStr = TypeStr.substr(Start: 0, N: TypeStr.find(C: ']'));
3312
3313 TypeStr.getAsInteger(Radix: 10, Result&: VecElts);
3314 if (VecElts > 0)
3315 BaseType = VectorType::get(
3316 ElementType: BaseType->isVoidTy() ? Type::getInt8Ty(C&: Ctx) : BaseType, NumElements: VecElts, Scalable: false);
3317
3318 return BaseType;
3319}
3320
3321bool parseBuiltinTypeStr(SmallVector<StringRef, 10> &BuiltinArgsTypeStrs,
3322 const StringRef DemangledCall, LLVMContext &Ctx) {
3323 auto Pos1 = DemangledCall.find(C: '(');
3324 if (Pos1 == StringRef::npos)
3325 return false;
3326 auto Pos2 = DemangledCall.find(C: ')');
3327 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3328 return false;
3329 DemangledCall.slice(Start: Pos1 + 1, End: Pos2)
3330 .split(A&: BuiltinArgsTypeStrs, Separator: ',', MaxSplit: -1, KeepEmpty: false);
3331 return true;
3332}
3333
3334Type *parseBuiltinCallArgumentBaseType(const StringRef DemangledCall,
3335 unsigned ArgIdx, LLVMContext &Ctx) {
3336 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3337 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3338 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3339 return nullptr;
3340 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3341 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3342}
3343
3344struct BuiltinType {
3345 StringRef Name;
3346 uint32_t Opcode;
3347};
3348
3349#define GET_BuiltinTypes_DECL
3350#define GET_BuiltinTypes_IMPL
3351
3352struct OpenCLType {
3353 StringRef Name;
3354 StringRef SpirvTypeLiteral;
3355};
3356
3357#define GET_OpenCLTypes_DECL
3358#define GET_OpenCLTypes_IMPL
3359
3360#include "SPIRVGenTables.inc"
3361} // namespace SPIRV
3362
3363//===----------------------------------------------------------------------===//
3364// Misc functions for parsing builtin types.
3365//===----------------------------------------------------------------------===//
3366
3367static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3368 if (Name.starts_with(Prefix: "void"))
3369 return Type::getVoidTy(C&: Context);
3370 else if (Name.starts_with(Prefix: "int") || Name.starts_with(Prefix: "uint"))
3371 return Type::getInt32Ty(C&: Context);
3372 else if (Name.starts_with(Prefix: "float"))
3373 return Type::getFloatTy(C&: Context);
3374 else if (Name.starts_with(Prefix: "half"))
3375 return Type::getHalfTy(C&: Context);
3376 report_fatal_error(reason: "Unable to recognize type!");
3377}
3378
3379//===----------------------------------------------------------------------===//
3380// Implementation functions for builtin types.
3381//===----------------------------------------------------------------------===//
3382
3383static SPIRVType *getNonParameterizedType(const TargetExtType *ExtensionType,
3384 const SPIRV::BuiltinType *TypeRecord,
3385 MachineIRBuilder &MIRBuilder,
3386 SPIRVGlobalRegistry *GR) {
3387 unsigned Opcode = TypeRecord->Opcode;
3388 // Create or get an existing type from GlobalRegistry.
3389 return GR->getOrCreateOpTypeByOpcode(Ty: ExtensionType, MIRBuilder, Opcode);
3390}
3391
3392static SPIRVType *getSamplerType(MachineIRBuilder &MIRBuilder,
3393 SPIRVGlobalRegistry *GR) {
3394 // Create or get an existing type from GlobalRegistry.
3395 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3396}
3397
3398static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3399 MachineIRBuilder &MIRBuilder,
3400 SPIRVGlobalRegistry *GR) {
3401 assert(ExtensionType->getNumIntParameters() == 1 &&
3402 "Invalid number of parameters for SPIR-V pipe builtin!");
3403 // Create or get an existing type from GlobalRegistry.
3404 return GR->getOrCreateOpTypePipe(MIRBuilder,
3405 AccQual: SPIRV::AccessQualifier::AccessQualifier(
3406 ExtensionType->getIntParameter(i: 0)));
3407}
3408
3409static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3410 MachineIRBuilder &MIRBuilder,
3411 SPIRVGlobalRegistry *GR) {
3412 assert(ExtensionType->getNumIntParameters() == 4 &&
3413 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3414 assert(ExtensionType->getNumTypeParameters() == 1 &&
3415 "SPIR-V coop matrices builtin type must have a type parameter!");
3416 const SPIRVType *ElemType =
3417 GR->getOrCreateSPIRVType(Type: ExtensionType->getTypeParameter(i: 0), MIRBuilder,
3418 AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
3419 // Create or get an existing type from GlobalRegistry.
3420 return GR->getOrCreateOpTypeCoopMatr(
3421 MIRBuilder, ExtensionType, ElemType, Scope: ExtensionType->getIntParameter(i: 0),
3422 Rows: ExtensionType->getIntParameter(i: 1), Columns: ExtensionType->getIntParameter(i: 2),
3423 Use: ExtensionType->getIntParameter(i: 3), EmitIR: true);
3424}
3425
3426static SPIRVType *getSampledImageType(const TargetExtType *OpaqueType,
3427 MachineIRBuilder &MIRBuilder,
3428 SPIRVGlobalRegistry *GR) {
3429 SPIRVType *OpaqueImageType = GR->getImageType(
3430 ExtensionType: OpaqueType, Qualifier: SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3431 // Create or get an existing type from GlobalRegistry.
3432 return GR->getOrCreateOpTypeSampledImage(ImageType: OpaqueImageType, MIRBuilder);
3433}
3434
3435static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3436 MachineIRBuilder &MIRBuilder,
3437 SPIRVGlobalRegistry *GR) {
3438 assert(ExtensionType->getNumIntParameters() == 3 &&
3439 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3440 "parameter");
3441 auto Opcode = ExtensionType->getIntParameter(i: 0);
3442
3443 SmallVector<MCOperand> Operands;
3444 for (Type *Param : ExtensionType->type_params()) {
3445 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Val: Param)) {
3446 if (ParamEType->getName() == "spirv.IntegralConstant") {
3447 assert(ParamEType->getNumTypeParameters() == 1 &&
3448 "Inline SPIR-V integral constant builtin must have a type "
3449 "parameter");
3450 assert(ParamEType->getNumIntParameters() == 1 &&
3451 "Inline SPIR-V integral constant builtin must have a "
3452 "value parameter");
3453
3454 auto OperandValue = ParamEType->getIntParameter(i: 0);
3455 auto *OperandType = ParamEType->getTypeParameter(i: 0);
3456
3457 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3458 Type: OperandType, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
3459
3460 Operands.push_back(Elt: MCOperand::createReg(Reg: GR->buildConstantInt(
3461 Val: OperandValue, MIRBuilder, SpvType: OperandSPIRVType, EmitIR: true)));
3462 continue;
3463 } else if (ParamEType->getName() == "spirv.Literal") {
3464 assert(ParamEType->getNumTypeParameters() == 0 &&
3465 "Inline SPIR-V literal builtin does not take type "
3466 "parameters");
3467 assert(ParamEType->getNumIntParameters() == 1 &&
3468 "Inline SPIR-V literal builtin must have an integer "
3469 "parameter");
3470
3471 auto OperandValue = ParamEType->getIntParameter(i: 0);
3472
3473 Operands.push_back(Elt: MCOperand::createImm(Val: OperandValue));
3474 continue;
3475 }
3476 }
3477 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3478 Type: Param, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
3479 Operands.push_back(Elt: MCOperand::createReg(Reg: GR->getSPIRVTypeID(SpirvType: TypeOperand)));
3480 }
3481
3482 return GR->getOrCreateUnknownType(Ty: ExtensionType, MIRBuilder, Opcode,
3483 Operands);
3484}
3485
3486static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3487 MachineIRBuilder &MIRBuilder,
3488 SPIRVGlobalRegistry *GR) {
3489 assert(ExtensionType->getNumTypeParameters() == 1 &&
3490 "Vulkan buffers have exactly one type for the type of the buffer.");
3491 assert(ExtensionType->getNumIntParameters() == 2 &&
3492 "Vulkan buffer have 2 integer parameters: storage class and is "
3493 "writable.");
3494
3495 auto *T = ExtensionType->getTypeParameter(i: 0);
3496 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3497 ExtensionType->getIntParameter(i: 0));
3498 bool IsWritable = ExtensionType->getIntParameter(i: 1);
3499 return GR->getOrCreateVulkanBufferType(MIRBuilder, ElemType: T, SC, IsWritable);
3500}
3501
3502static SPIRVType *getVulkanPushConstantType(const TargetExtType *ExtensionType,
3503 MachineIRBuilder &MIRBuilder,
3504 SPIRVGlobalRegistry *GR) {
3505 assert(ExtensionType->getNumTypeParameters() == 1 &&
3506 "Vulkan push constants have exactly one type as argument.");
3507 auto *T = ExtensionType->getTypeParameter(i: 0);
3508 return GR->getOrCreateVulkanPushConstantType(MIRBuilder, ElemType: T);
3509}
3510
3511static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3512 MachineIRBuilder &MIRBuilder,
3513 SPIRVGlobalRegistry *GR) {
3514 return GR->getOrCreateLayoutType(MIRBuilder, T: ExtensionType);
3515}
3516
3517namespace SPIRV {
3518TargetExtType *parseBuiltinTypeNameToTargetExtType(std::string TypeName,
3519 LLVMContext &Context) {
3520 StringRef NameWithParameters = TypeName;
3521
3522 // Pointers-to-opaque-structs representing OpenCL types are first translated
3523 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3524 // following format: e.g. %opencl.event_t
3525 if (NameWithParameters.starts_with(Prefix: "opencl.")) {
3526 const SPIRV::OpenCLType *OCLTypeRecord =
3527 SPIRV::lookupOpenCLType(Name: NameWithParameters);
3528 if (!OCLTypeRecord)
3529 report_fatal_error(reason: "Missing TableGen record for OpenCL type: " +
3530 NameWithParameters);
3531 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3532 // Continue with the SPIR-V builtin type...
3533 }
3534
3535 // Names of the opaque structs representing a SPIR-V builtins without
3536 // parameters should have the following format: e.g. %spirv.Event
3537 assert(NameWithParameters.starts_with("spirv.") &&
3538 "Unknown builtin opaque type!");
3539
3540 // Parameterized SPIR-V builtins names follow this format:
3541 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3542 if (!NameWithParameters.contains(C: '_'))
3543 return TargetExtType::get(Context, Name: NameWithParameters);
3544
3545 SmallVector<StringRef> Parameters;
3546 unsigned BaseNameLength = NameWithParameters.find(C: '_') - 1;
3547 SplitString(Source: NameWithParameters.substr(Start: BaseNameLength + 1), OutFragments&: Parameters, Delimiters: "_");
3548
3549 SmallVector<Type *, 1> TypeParameters;
3550 bool HasTypeParameter = !isDigit(C: Parameters[0][0]);
3551 if (HasTypeParameter)
3552 TypeParameters.push_back(Elt: parseTypeString(Name: Parameters[0], Context));
3553 SmallVector<unsigned> IntParameters;
3554 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3555 unsigned IntParameter = 0;
3556 bool ValidLiteral = !Parameters[i].getAsInteger(Radix: 10, Result&: IntParameter);
3557 (void)ValidLiteral;
3558 assert(ValidLiteral &&
3559 "Invalid format of SPIR-V builtin parameter literal!");
3560 IntParameters.push_back(Elt: IntParameter);
3561 }
3562 return TargetExtType::get(Context,
3563 Name: NameWithParameters.substr(Start: 0, N: BaseNameLength),
3564 Types: TypeParameters, Ints: IntParameters);
3565}
3566
3567SPIRVType *lowerBuiltinType(const Type *OpaqueType,
3568 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3569 MachineIRBuilder &MIRBuilder,
3570 SPIRVGlobalRegistry *GR) {
3571 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3572 // target(...) target extension types or pointers-to-opaque-structs. The
3573 // approach relying on structs is deprecated and works only in the non-opaque
3574 // pointer mode (-opaque-pointers=0).
3575 // In order to maintain compatibility with LLVM IR generated by older versions
3576 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3577 // "translated" to target extension types. This translation is temporary and
3578 // will be removed in the future release of LLVM.
3579 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(Val: OpaqueType);
3580 if (!BuiltinType)
3581 BuiltinType = parseBuiltinTypeNameToTargetExtType(
3582 TypeName: OpaqueType->getStructName().str(), Context&: MIRBuilder.getContext());
3583
3584 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3585
3586 const StringRef Name = BuiltinType->getName();
3587 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3588
3589 SPIRVType *TargetType;
3590 if (Name == "spirv.Type") {
3591 TargetType = getInlineSpirvType(ExtensionType: BuiltinType, MIRBuilder, GR);
3592 } else if (Name == "spirv.VulkanBuffer") {
3593 TargetType = getVulkanBufferType(ExtensionType: BuiltinType, MIRBuilder, GR);
3594 } else if (Name == "spirv.Padding") {
3595 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3596 } else if (Name == "spirv.PushConstant") {
3597 TargetType = getVulkanPushConstantType(ExtensionType: BuiltinType, MIRBuilder, GR);
3598 } else if (Name == "spirv.Layout") {
3599 TargetType = getLayoutType(ExtensionType: BuiltinType, MIRBuilder, GR);
3600 } else {
3601 // Lookup the demangled builtin type in the TableGen records.
3602 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3603 if (!TypeRecord)
3604 report_fatal_error(reason: "Missing TableGen record for builtin type: " + Name);
3605
3606 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3607 // methods use the implementation details from TableGen records or
3608 // TargetExtType parameters to either create a new OpType<...> machine
3609 // instruction or get an existing equivalent SPIRVType from
3610 // GlobalRegistry.
3611
3612 switch (TypeRecord->Opcode) {
3613 case SPIRV::OpTypeImage:
3614 TargetType = GR->getImageType(ExtensionType: BuiltinType, Qualifier: AccessQual, MIRBuilder);
3615 break;
3616 case SPIRV::OpTypePipe:
3617 TargetType = getPipeType(ExtensionType: BuiltinType, MIRBuilder, GR);
3618 break;
3619 case SPIRV::OpTypeDeviceEvent:
3620 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3621 break;
3622 case SPIRV::OpTypeSampler:
3623 TargetType = getSamplerType(MIRBuilder, GR);
3624 break;
3625 case SPIRV::OpTypeSampledImage:
3626 TargetType = getSampledImageType(OpaqueType: BuiltinType, MIRBuilder, GR);
3627 break;
3628 case SPIRV::OpTypeCooperativeMatrixKHR:
3629 TargetType = getCoopMatrType(ExtensionType: BuiltinType, MIRBuilder, GR);
3630 break;
3631 default:
3632 TargetType =
3633 getNonParameterizedType(ExtensionType: BuiltinType, TypeRecord, MIRBuilder, GR);
3634 break;
3635 }
3636 }
3637
3638 // Emit OpName instruction if a new OpType<...> instruction was added
3639 // (equivalent type was not found in GlobalRegistry).
3640 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3641 buildOpName(Target: GR->getSPIRVTypeID(SpirvType: TargetType), Name, MIRBuilder);
3642
3643 return TargetType;
3644}
3645} // namespace SPIRV
3646} // namespace llvm
3647