1//===--- SPIRVUtils.h ---- SPIR-V Utility Functions -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains miscellaneous utility functions.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
14#define LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
15
16#include "MCTargetDesc/SPIRVBaseInfo.h"
17#include "llvm/Analysis/LoopInfo.h"
18#include "llvm/CodeGen/MachineBasicBlock.h"
19#include "llvm/IR/Dominators.h"
20#include "llvm/IR/GlobalVariable.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/TypedPointerType.h"
23#include <queue>
24#include <string>
25#include <unordered_map>
26#include <unordered_set>
27
28#include "SPIRVTypeInst.h"
29
30namespace llvm {
31class MCInst;
32class MachineFunction;
33class MachineInstr;
34class MachineInstrBuilder;
35class MachineIRBuilder;
36class MachineRegisterInfo;
37class Register;
38class StringRef;
39class SPIRVInstrInfo;
40class SPIRVSubtarget;
41class SPIRVGlobalRegistry;
42class SPIRVTypeInst;
43
44// This class implements a partial ordering visitor, which visits a cyclic graph
45// in natural topological-like ordering. Topological ordering is not defined for
46// directed graphs with cycles, so this assumes cycles are a single node, and
47// ignores back-edges. The cycle is visited from the entry in the same
48// topological-like ordering.
49//
50// Note: this visitor REQUIRES a reducible graph.
51//
52// This means once we visit a node, we know all the possible ancestors have been
53// visited.
54//
55// clang-format off
56//
57// Given this graph:
58//
59// ,-> B -\
60// A -+ +---> D ----> E -> F -> G -> H
61// `-> C -/ ^ |
62// +-----------------+
63//
64// Visit order is:
65// A, [B, C in any order], D, E, F, G, H
66//
67// clang-format on
68//
69// Changing the function CFG between the construction of the visitor and
70// visiting is undefined. The visitor can be reused, but if the CFG is updated,
71// the visitor must be rebuilt.
72class PartialOrderingVisitor {
73 DomTreeBuilder::BBDomTree DT;
74 LoopInfo LI;
75
76 std::unordered_set<BasicBlock *> Queued = {};
77 std::queue<BasicBlock *> ToVisit = {};
78
79 struct OrderInfo {
80 size_t Rank;
81 size_t TraversalIndex;
82 };
83
84 using BlockToOrderInfoMap = std::unordered_map<BasicBlock *, OrderInfo>;
85 BlockToOrderInfoMap BlockToOrder;
86 std::vector<BasicBlock *> Order = {};
87
88 // Get all basic-blocks reachable from Start.
89 std::unordered_set<BasicBlock *> getReachableFrom(BasicBlock *Start);
90
91 // Internal function used to determine the partial ordering.
92 // Visits |BB| with the current rank being |Rank|.
93 size_t visit(BasicBlock *BB, size_t Rank);
94
95 bool CanBeVisited(BasicBlock *BB) const;
96
97public:
98 size_t GetNodeRank(BasicBlock *BB) const;
99
100 // Build the visitor to operate on the function F.
101 PartialOrderingVisitor(Function &F);
102
103 // Returns true is |LHS| comes before |RHS| in the partial ordering.
104 // If |LHS| and |RHS| have the same rank, the traversal order determines the
105 // order (order is stable).
106 bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const;
107
108 // Visit the function starting from the basic block |Start|, and calling |Op|
109 // on each visited BB. This traversal ignores back-edges, meaning this won't
110 // visit a node to which |Start| is not an ancestor.
111 // If Op returns |true|, the visitor continues. If |Op| returns false, the
112 // visitor will stop at that rank. This means if 2 nodes share the same rank,
113 // and Op returns false when visiting the first, the second will be visited
114 // afterwards. But none of their successors will.
115 void partialOrderVisit(BasicBlock &Start,
116 std::function<bool(BasicBlock *)> Op);
117};
118
119namespace SPIRV {
120struct FPFastMathDefaultInfo {
121 const Type *Ty = nullptr;
122 unsigned FastMathFlags = 0;
123 // When SPV_KHR_float_controls2 ContractionOff and SignzeroInfNanPreserve are
124 // deprecated, and we replace them with FPFastMathDefault appropriate flags
125 // instead. However, we have no guarantee about the order in which we will
126 // process execution modes. Therefore it could happen that we first process
127 // ContractionOff, setting AllowContraction bit to 0, and then we process
128 // FPFastMathDefault enabling AllowContraction bit, effectively invalidating
129 // ContractionOff. Because of that, it's best to keep separate bits for the
130 // different execution modes, and we will try and combine them later when we
131 // emit OpExecutionMode instructions.
132 bool ContractionOff = false;
133 bool SignedZeroInfNanPreserve = false;
134 bool FPFastMathDefault = false;
135
136 FPFastMathDefaultInfo() = default;
137 FPFastMathDefaultInfo(const Type *Ty, unsigned FastMathFlags)
138 : Ty(Ty), FastMathFlags(FastMathFlags) {}
139 bool operator==(const FPFastMathDefaultInfo &Other) const {
140 return Ty == Other.Ty && FastMathFlags == Other.FastMathFlags &&
141 ContractionOff == Other.ContractionOff &&
142 SignedZeroInfNanPreserve == Other.SignedZeroInfNanPreserve &&
143 FPFastMathDefault == Other.FPFastMathDefault;
144 }
145};
146
147struct FPFastMathDefaultInfoVector
148 : public SmallVector<SPIRV::FPFastMathDefaultInfo, 3> {
149 static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth) {
150 switch (BitWidth) {
151 case 16: // half
152 return 0;
153 case 32: // float
154 return 1;
155 case 64: // double
156 return 2;
157 default:
158 report_fatal_error(reason: "Expected BitWidth to be 16, 32, 64", gen_crash_diag: false);
159 }
160 llvm_unreachable(
161 "Unreachable code in computeFPFastMathDefaultInfoVecIndex");
162 }
163};
164
165// This code restores function args/retvalue types for composite cases
166// because the final types should still be aggregate whereas they're i32
167// during the translation to cope with aggregate flattening etc.
168FunctionType *getOriginalFunctionType(const Function &F);
169FunctionType *getOriginalFunctionType(const CallBase &CB);
170} // namespace SPIRV
171
172// Add the given string as a series of integer operand, inserting null
173// terminators and padding to make sure the operands all have 32-bit
174// little-endian words.
175void addStringImm(const StringRef &Str, MCInst &Inst);
176void addStringImm(const StringRef &Str, MachineInstrBuilder &MIB);
177void addStringImm(const StringRef &Str, IRBuilder<> &B,
178 std::vector<Value *> &Args);
179
180// Read the series of integer operands back as a null-terminated string using
181// the reverse of the logic in addStringImm.
182std::string getStringImm(const MachineInstr &MI, unsigned StartIndex);
183
184// Returns the string constant that the register refers to. It is assumed that
185// Reg is a global value that contains a string.
186std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI);
187
188// Add the given numerical immediate to MIB.
189void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB);
190
191// Add an OpName instruction for the given target register.
192void buildOpName(Register Target, const StringRef &Name,
193 MachineIRBuilder &MIRBuilder);
194void buildOpName(Register Target, const StringRef &Name, MachineInstr &I,
195 const SPIRVInstrInfo &TII);
196
197// Add an OpDecorate instruction for the given Reg.
198void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder,
199 SPIRV::Decoration::Decoration Dec,
200 const std::vector<uint32_t> &DecArgs,
201 StringRef StrImm = "");
202void buildOpDecorate(Register Reg, MachineInstr &I, const SPIRVInstrInfo &TII,
203 SPIRV::Decoration::Decoration Dec,
204 const std::vector<uint32_t> &DecArgs,
205 StringRef StrImm = "");
206
207// Add an OpDecorate instruction for the given Reg.
208void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder,
209 SPIRV::Decoration::Decoration Dec, uint32_t Member,
210 const std::vector<uint32_t> &DecArgs,
211 StringRef StrImm = "");
212void buildOpMemberDecorate(Register Reg, MachineInstr &I,
213 const SPIRVInstrInfo &TII,
214 SPIRV::Decoration::Decoration Dec, uint32_t Member,
215 const std::vector<uint32_t> &DecArgs,
216 StringRef StrImm = "");
217
218// Add an OpDecorate instruction by "spirv.Decorations" metadata node.
219void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder,
220 const MDNode *GVarMD, const SPIRVSubtarget &ST);
221
222// Return a valid position for the OpVariable instruction inside a function,
223// i.e., at the beginning of the first block of the function.
224MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I);
225
226// Return a valid position for the instruction at the end of the block before
227// terminators and debug instructions.
228MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB);
229
230// Returns true if a pointer to the storage class can be casted to/from a
231// pointer to the Generic storage class.
232constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
233 switch (SC) {
234 case SPIRV::StorageClass::Workgroup:
235 case SPIRV::StorageClass::CrossWorkgroup:
236 case SPIRV::StorageClass::Function:
237 return true;
238 default:
239 return false;
240 }
241}
242
243// Convert a SPIR-V storage class to the corresponding LLVM IR address space.
244// TODO: maybe the following two functions should be handled in the subtarget
245// to allow for different OpenCL vs Vulkan handling.
246constexpr unsigned
247storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC) {
248 switch (SC) {
249 case SPIRV::StorageClass::Function:
250 return 0;
251 case SPIRV::StorageClass::CrossWorkgroup:
252 return 1;
253 case SPIRV::StorageClass::UniformConstant:
254 return 2;
255 case SPIRV::StorageClass::Workgroup:
256 return 3;
257 case SPIRV::StorageClass::Generic:
258 return 4;
259 case SPIRV::StorageClass::DeviceOnlyINTEL:
260 return 5;
261 case SPIRV::StorageClass::HostOnlyINTEL:
262 return 6;
263 case SPIRV::StorageClass::Input:
264 return 7;
265 case SPIRV::StorageClass::Output:
266 return 8;
267 case SPIRV::StorageClass::CodeSectionINTEL:
268 return 9;
269 case SPIRV::StorageClass::Private:
270 return 10;
271 case SPIRV::StorageClass::StorageBuffer:
272 return 11;
273 case SPIRV::StorageClass::Uniform:
274 return 12;
275 case SPIRV::StorageClass::PushConstant:
276 return 13;
277 default:
278 report_fatal_error(reason: "Unable to get address space id");
279 }
280}
281
282// Convert an LLVM IR address space to a SPIR-V storage class.
283SPIRV::StorageClass::StorageClass
284addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI);
285
286SPIRV::MemorySemantics::MemorySemantics
287getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC);
288
289SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord);
290
291SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id);
292
293// Find def instruction for the given ConstReg, walking through
294// spv_track_constant and ASSIGN_TYPE instructions. Updates ConstReg by def
295// of OpConstant instruction.
296MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
297 const MachineRegisterInfo *MRI);
298
299// Get constant integer value of the given ConstReg.
300uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI);
301
302// Get constant integer value of the given ConstReg, sign-extended.
303int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI);
304
305// Check if MI is a SPIR-V specific intrinsic call.
306bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID);
307// Check if it's a SPIR-V specific intrinsic call.
308bool isSpvIntrinsic(const Value *Arg);
309
310// Get type of i-th operand of the metadata node.
311Type *getMDOperandAsType(const MDNode *N, unsigned I);
312
313// If OpenCL or SPIR-V builtin function name is recognized, return a demangled
314// name, otherwise return an empty string.
315std::string getOclOrSpirvBuiltinDemangledName(StringRef Name);
316
317// Check if a string contains a builtin prefix.
318bool hasBuiltinTypePrefix(StringRef Name);
319
320// Check if given LLVM type is a special opaque builtin type.
321bool isSpecialOpaqueType(const Type *Ty);
322
323// Check if the function is an SPIR-V entry point
324bool isEntryPoint(const Function &F);
325
326// Parse basic scalar type name, substring TypeName, and return LLVM type.
327Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx);
328
329// Sort blocks in a partial ordering, so each block is after all its
330// dominators. This should match both the SPIR-V and the MIR requirements.
331// Returns true if the function was changed.
332bool sortBlocks(Function &F);
333
334// Check for peeled array structs and recursively reconstitute them. In HLSL
335// CBuffers, arrays may have padding between the elements, but not after the
336// last element. To represent this in LLVM IR an array [N x T] will be
337// represented as {[N-1 x {T, spirv.Padding}], T}. The function
338// matchPeeledArrayPattern recognizes this pattern retrieving the type {T,
339// spirv.Padding}, and the size N.
340bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType,
341 uint64_t &TotalSize);
342
343// This function will turn the type {[N-1 x {T, spirv.Padding}], T} back into
344// [N x {T, spirv.Padding}]. So it can be translated into SPIR-V. The offset
345// decorations will be such that there will be no padding after the array when
346// relevant.
347Type *reconstitutePeeledArrayType(Type *Ty);
348
349inline bool hasInitializer(const GlobalVariable *GV) {
350 if (!GV->hasInitializer())
351 return false;
352 if (const auto *Init = GV->getInitializer(); isa<UndefValue>(Val: Init))
353 return GV->isConstant() && Init->getType()->isAggregateType();
354 return true;
355}
356
357// True if this is an instance of TypedPointerType.
358inline bool isTypedPointerTy(const Type *T) {
359 return T && T->getTypeID() == Type::TypedPointerTyID;
360}
361
362// True if this is an instance of PointerType.
363inline bool isUntypedPointerTy(const Type *T) {
364 return T && T->getTypeID() == Type::PointerTyID;
365}
366
367// True if this is an instance of PointerType or TypedPointerType.
368inline bool isPointerTy(const Type *T) {
369 return isUntypedPointerTy(T) || isTypedPointerTy(T);
370}
371
372// Get the address space of this pointer or pointer vector type for instances of
373// PointerType or TypedPointerType.
374inline unsigned getPointerAddressSpace(const Type *T) {
375 Type *SubT = T->getScalarType();
376 return SubT->getTypeID() == Type::PointerTyID
377 ? cast<PointerType>(Val: SubT)->getAddressSpace()
378 : cast<TypedPointerType>(Val: SubT)->getAddressSpace();
379}
380
381// Return true if the Argument is decorated with a pointee type
382inline bool hasPointeeTypeAttr(Argument *Arg) {
383 return Arg->hasByValAttr() || Arg->hasByRefAttr() || Arg->hasStructRetAttr();
384}
385
386// Return the pointee type of the argument or nullptr otherwise
387inline Type *getPointeeTypeByAttr(Argument *Arg) {
388 if (Arg->hasByValAttr())
389 return Arg->getParamByValType();
390 if (Arg->hasStructRetAttr())
391 return Arg->getParamStructRetType();
392 if (Arg->hasByRefAttr())
393 return Arg->getParamByRefType();
394 return nullptr;
395}
396
397inline Type *reconstructFunctionType(Function *F) {
398 SmallVector<Type *> ArgTys;
399 for (unsigned i = 0; i < F->arg_size(); ++i)
400 ArgTys.push_back(Elt: F->getArg(i)->getType());
401 return FunctionType::get(Result: F->getReturnType(), Params: ArgTys, isVarArg: F->isVarArg());
402}
403
404#define TYPED_PTR_TARGET_EXT_NAME "spirv.$TypedPointerType"
405inline Type *getTypedPointerWrapper(Type *ElemTy, unsigned AS) {
406 return TargetExtType::get(Context&: ElemTy->getContext(), TYPED_PTR_TARGET_EXT_NAME,
407 Types: {ElemTy}, Ints: {AS});
408}
409
410inline bool isTypedPointerWrapper(const TargetExtType *ExtTy) {
411 return ExtTy->getName() == TYPED_PTR_TARGET_EXT_NAME &&
412 ExtTy->getNumIntParameters() == 1 &&
413 ExtTy->getNumTypeParameters() == 1;
414}
415
416// True if this is an instance of PointerType or TypedPointerType.
417inline bool isPointerTyOrWrapper(const Type *Ty) {
418 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty))
419 return isTypedPointerWrapper(ExtTy);
420 return isPointerTy(T: Ty);
421}
422
423inline Type *applyWrappers(Type *Ty) {
424 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty)) {
425 if (isTypedPointerWrapper(ExtTy))
426 return TypedPointerType::get(ElementType: applyWrappers(Ty: ExtTy->getTypeParameter(i: 0)),
427 AddressSpace: ExtTy->getIntParameter(i: 0));
428 } else if (auto *VecTy = dyn_cast<VectorType>(Val: Ty)) {
429 Type *ElemTy = VecTy->getElementType();
430 Type *NewElemTy = ElemTy->isTargetExtTy() ? applyWrappers(Ty: ElemTy) : ElemTy;
431 if (NewElemTy != ElemTy)
432 return VectorType::get(ElementType: NewElemTy, EC: VecTy->getElementCount());
433 }
434 return Ty;
435}
436
437inline Type *getPointeeType(const Type *Ty) {
438 if (Ty) {
439 if (auto PType = dyn_cast<TypedPointerType>(Val: Ty))
440 return PType->getElementType();
441 else if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty))
442 if (isTypedPointerWrapper(ExtTy))
443 return ExtTy->getTypeParameter(i: 0);
444 }
445 return nullptr;
446}
447
448inline bool isUntypedEquivalentToTyExt(Type *Ty1, Type *Ty2) {
449 if (!isUntypedPointerTy(T: Ty1) || !Ty2)
450 return false;
451 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty2))
452 if (isTypedPointerWrapper(ExtTy) &&
453 ExtTy->getTypeParameter(i: 0) ==
454 IntegerType::getInt8Ty(C&: Ty1->getContext()) &&
455 ExtTy->getIntParameter(i: 0) == cast<PointerType>(Val: Ty1)->getAddressSpace())
456 return true;
457 return false;
458}
459
460inline bool isEquivalentTypes(Type *Ty1, Type *Ty2) {
461 return isUntypedEquivalentToTyExt(Ty1, Ty2) ||
462 isUntypedEquivalentToTyExt(Ty1: Ty2, Ty2: Ty1);
463}
464
465inline Type *toTypedPointer(Type *Ty) {
466 if (Type *NewTy = applyWrappers(Ty); NewTy != Ty)
467 return NewTy;
468 return isUntypedPointerTy(T: Ty)
469 ? TypedPointerType::get(ElementType: IntegerType::getInt8Ty(C&: Ty->getContext()),
470 AddressSpace: getPointerAddressSpace(T: Ty))
471 : Ty;
472}
473
474inline Type *toTypedFunPointer(FunctionType *FTy) {
475 Type *OrigRetTy = FTy->getReturnType();
476 Type *RetTy = toTypedPointer(Ty: OrigRetTy);
477 bool IsUntypedPtr = false;
478 for (Type *PTy : FTy->params()) {
479 if (isUntypedPointerTy(T: PTy)) {
480 IsUntypedPtr = true;
481 break;
482 }
483 }
484 if (!IsUntypedPtr && RetTy == OrigRetTy)
485 return FTy;
486 SmallVector<Type *> ParamTys;
487 for (Type *PTy : FTy->params())
488 ParamTys.push_back(Elt: toTypedPointer(Ty: PTy));
489 return FunctionType::get(Result: RetTy, Params: ParamTys, isVarArg: FTy->isVarArg());
490}
491
492inline const Type *unifyPtrType(const Type *Ty) {
493 if (auto FTy = dyn_cast<FunctionType>(Val: Ty))
494 return toTypedFunPointer(FTy: const_cast<FunctionType *>(FTy));
495 return toTypedPointer(Ty: const_cast<Type *>(Ty));
496}
497
498inline bool isVector1(Type *Ty) {
499 auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty);
500 return FVTy && FVTy->getNumElements() == 1;
501}
502
503// Modify an LLVM type to conform with future transformations in IRTranslator.
504// At the moment use cases comprise only a <1 x Type> vector. To extend when/if
505// needed.
506inline Type *normalizeType(Type *Ty) {
507 auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty);
508 if (!FVTy || FVTy->getNumElements() != 1)
509 return Ty;
510 // If it's a <1 x Type> vector type, replace it by the element type, because
511 // it's not a legal vector type in LLT and IRTranslator will represent it as
512 // the scalar eventually.
513 return normalizeType(Ty: FVTy->getElementType());
514}
515
516inline PoisonValue *getNormalizedPoisonValue(Type *Ty) {
517 return PoisonValue::get(T: normalizeType(Ty));
518}
519
520inline MetadataAsValue *buildMD(Value *Arg) {
521 LLVMContext &Ctx = Arg->getContext();
522 return MetadataAsValue::get(
523 Context&: Ctx, MD: MDNode::get(Context&: Ctx, MDs: ValueAsMetadata::getConstant(C: Arg)));
524}
525
526CallInst *buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef<Type *> Types,
527 Value *Arg, Value *Arg2, ArrayRef<Constant *> Imms,
528 IRBuilder<> &B);
529
530MachineInstr *getVRegDef(MachineRegisterInfo &MRI, Register Reg);
531
532#define SPIRV_BACKEND_SERVICE_FUN_NAME "__spirv_backend_service_fun"
533bool getVacantFunctionName(Module &M, std::string &Name);
534
535void setRegClassType(Register Reg, const Type *Ty, SPIRVGlobalRegistry *GR,
536 MachineIRBuilder &MIRBuilder,
537 SPIRV::AccessQualifier::AccessQualifier AccessQual,
538 bool EmitIR, bool Force = false);
539void setRegClassType(Register Reg, SPIRVTypeInst SpvType,
540 SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI,
541 const MachineFunction &MF, bool Force = false);
542Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR,
543 MachineRegisterInfo *MRI,
544 const MachineFunction &MF);
545Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR,
546 MachineIRBuilder &MIRBuilder);
547Register createVirtualRegister(
548 const Type *Ty, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIRBuilder,
549 SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR);
550
551// Return true if there is an opaque pointer type nested in the argument.
552bool isNestedPointer(const Type *Ty);
553
554enum FPDecorationId { NONE, RTE, RTZ, RTP, RTN, SAT };
555
556inline FPDecorationId demangledPostfixToDecorationId(const std::string &S) {
557 static std::unordered_map<std::string, FPDecorationId> Mapping = {
558 {"rte", FPDecorationId::RTE},
559 {"rtz", FPDecorationId::RTZ},
560 {"rtp", FPDecorationId::RTP},
561 {"rtn", FPDecorationId::RTN},
562 {"sat", FPDecorationId::SAT}};
563 auto It = Mapping.find(x: S);
564 return It == Mapping.end() ? FPDecorationId::NONE : It->second;
565}
566
567SmallVector<MachineInstr *, 4>
568createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode,
569 unsigned MinWC, unsigned ContinuedOpcode,
570 ArrayRef<Register> Args, Register ReturnRegister,
571 Register TypeID);
572
573// Instruction selection directed by type folding.
574const std::set<unsigned> &getTypeFoldingSupportedOpcodes();
575bool isTypeFoldingSupported(unsigned Opcode);
576
577// Get loop controls from llvm.loop. metadata.
578SmallVector<unsigned, 1> getSpirvLoopControlOperandsFromLoopMetadata(Loop *L);
579SmallVector<unsigned, 1>
580getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD);
581
582// Traversing [g]MIR accounting for pseudo-instructions.
583MachineInstr *passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI);
584MachineInstr *getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI);
585MachineInstr *getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI);
586int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI);
587unsigned getArrayComponentCount(const MachineRegisterInfo *MRI,
588 const MachineInstr *ResType);
589MachineBasicBlock::iterator
590getFirstValidInstructionInsertPoint(MachineBasicBlock &BB);
591
592std::optional<SPIRV::LinkageType::LinkageType>
593getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV);
594Function *getOrCreateBackendServiceFunction(Module &M);
595} // namespace llvm
596#endif // LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
597