1//===--- SPIRVUtils.h ---- SPIR-V Utility Functions -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains miscellaneous utility functions.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
14#define LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
15
16#include "MCTargetDesc/SPIRVBaseInfo.h"
17#include "llvm/Analysis/LoopInfo.h"
18#include "llvm/CodeGen/MachineBasicBlock.h"
19#include "llvm/IR/Dominators.h"
20#include "llvm/IR/GlobalVariable.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/TypedPointerType.h"
23#include <queue>
24#include <string>
25#include <unordered_map>
26#include <unordered_set>
27
28#include "SPIRVTypeInst.h"
29
30namespace llvm {
31class MCInst;
32class MachineFunction;
33class MachineInstr;
34class MachineInstrBuilder;
35class MachineIRBuilder;
36class MachineRegisterInfo;
37class Register;
38class StringRef;
39class SPIRVInstrInfo;
40class SPIRVSubtarget;
41class SPIRVGlobalRegistry;
42class SPIRVTypeInst;
43
44// This class implements a partial ordering visitor, which visits a cyclic graph
45// in natural topological-like ordering. Topological ordering is not defined for
46// directed graphs with cycles, so this assumes cycles are a single node, and
47// ignores back-edges. The cycle is visited from the entry in the same
48// topological-like ordering.
49//
50// Note: this visitor REQUIRES a reducible graph.
51//
52// This means once we visit a node, we know all the possible ancestors have been
53// visited.
54//
55// clang-format off
56//
57// Given this graph:
58//
59// ,-> B -\
60// A -+ +---> D ----> E -> F -> G -> H
61// `-> C -/ ^ |
62// +-----------------+
63//
64// Visit order is:
65// A, [B, C in any order], D, E, F, G, H
66//
67// clang-format on
68//
69// Changing the function CFG between the construction of the visitor and
70// visiting is undefined. The visitor can be reused, but if the CFG is updated,
71// the visitor must be rebuilt.
72class PartialOrderingVisitor {
73 DomTreeBuilder::BBDomTree DT;
74 LoopInfo LI;
75
76 std::unordered_set<BasicBlock *> Queued = {};
77 std::queue<BasicBlock *> ToVisit = {};
78
79 struct OrderInfo {
80 size_t Rank;
81 size_t TraversalIndex;
82 };
83
84 using BlockToOrderInfoMap = std::unordered_map<BasicBlock *, OrderInfo>;
85 BlockToOrderInfoMap BlockToOrder;
86 std::vector<BasicBlock *> Order = {};
87
88 // Get all basic-blocks reachable from Start.
89 std::unordered_set<BasicBlock *> getReachableFrom(BasicBlock *Start);
90
91 // Internal function used to determine the partial ordering.
92 // Visits |BB| with the current rank being |Rank|.
93 size_t visit(BasicBlock *BB, size_t Rank);
94
95 bool CanBeVisited(BasicBlock *BB) const;
96
97public:
98 size_t GetNodeRank(BasicBlock *BB) const;
99
100 // Build the visitor to operate on the function F.
101 PartialOrderingVisitor(Function &F);
102
103 // Returns true is |LHS| comes before |RHS| in the partial ordering.
104 // If |LHS| and |RHS| have the same rank, the traversal order determines the
105 // order (order is stable).
106 bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const;
107
108 // Visit the function starting from the basic block |Start|, and calling |Op|
109 // on each visited BB. This traversal ignores back-edges, meaning this won't
110 // visit a node to which |Start| is not an ancestor.
111 // If Op returns |true|, the visitor continues. If |Op| returns false, the
112 // visitor will stop at that rank. This means if 2 nodes share the same rank,
113 // and Op returns false when visiting the first, the second will be visited
114 // afterwards. But none of their successors will.
115 void partialOrderVisit(BasicBlock &Start,
116 std::function<bool(BasicBlock *)> Op);
117};
118
119namespace SPIRV {
120struct FPFastMathDefaultInfo {
121 const Type *Ty = nullptr;
122 unsigned FastMathFlags = 0;
123 // When SPV_KHR_float_controls2 ContractionOff and SignzeroInfNanPreserve are
124 // deprecated, and we replace them with FPFastMathDefault appropriate flags
125 // instead. However, we have no guarantee about the order in which we will
126 // process execution modes. Therefore it could happen that we first process
127 // ContractionOff, setting AllowContraction bit to 0, and then we process
128 // FPFastMathDefault enabling AllowContraction bit, effectively invalidating
129 // ContractionOff. Because of that, it's best to keep separate bits for the
130 // different execution modes, and we will try and combine them later when we
131 // emit OpExecutionMode instructions.
132 bool ContractionOff = false;
133 bool SignedZeroInfNanPreserve = false;
134 bool FPFastMathDefault = false;
135
136 FPFastMathDefaultInfo() = default;
137 FPFastMathDefaultInfo(const Type *Ty, unsigned FastMathFlags)
138 : Ty(Ty), FastMathFlags(FastMathFlags) {}
139 bool operator==(const FPFastMathDefaultInfo &Other) const {
140 return Ty == Other.Ty && FastMathFlags == Other.FastMathFlags &&
141 ContractionOff == Other.ContractionOff &&
142 SignedZeroInfNanPreserve == Other.SignedZeroInfNanPreserve &&
143 FPFastMathDefault == Other.FPFastMathDefault;
144 }
145};
146
147struct FPFastMathDefaultInfoVector
148 : public SmallVector<SPIRV::FPFastMathDefaultInfo, 3> {
149 static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth) {
150 switch (BitWidth) {
151 case 16: // half
152 return 0;
153 case 32: // float
154 return 1;
155 case 64: // double
156 return 2;
157 default:
158 report_fatal_error(reason: "Expected BitWidth to be 16, 32, 64", gen_crash_diag: false);
159 }
160 llvm_unreachable(
161 "Unreachable code in computeFPFastMathDefaultInfoVecIndex");
162 }
163};
164
165// This code restores function args/retvalue types for composite cases
166// because the final types should still be aggregate whereas they're i32
167// during the translation to cope with aggregate flattening etc.
168FunctionType *getOriginalFunctionType(const Function &F);
169FunctionType *getOriginalFunctionType(const CallBase &CB);
170} // namespace SPIRV
171
172// Add the given string as a series of integer operand, inserting null
173// terminators and padding to make sure the operands all have 32-bit
174// little-endian words.
175void addStringImm(const StringRef &Str, MCInst &Inst);
176void addStringImm(const StringRef &Str, MachineInstrBuilder &MIB);
177void addStringImm(const StringRef &Str, IRBuilder<> &B,
178 std::vector<Value *> &Args);
179
180// Read the series of integer operands back as a null-terminated string using
181// the reverse of the logic in addStringImm.
182std::string getStringImm(const MachineInstr &MI, unsigned StartIndex);
183
184// Returns the string constant that the register refers to. It is assumed that
185// Reg is a global value that contains a string.
186std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI);
187
188// Add the given numerical immediate to MIB.
189void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB);
190
191// Add an OpName instruction for the given target register.
192void buildOpName(Register Target, const StringRef &Name,
193 MachineIRBuilder &MIRBuilder);
194void buildOpName(Register Target, const StringRef &Name, MachineInstr &I,
195 const SPIRVInstrInfo &TII);
196
197// Add an OpDecorate instruction for the given Reg.
198void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder,
199 SPIRV::Decoration::Decoration Dec,
200 const std::vector<uint32_t> &DecArgs,
201 StringRef StrImm = "");
202void buildOpDecorate(Register Reg, MachineInstr &I, const SPIRVInstrInfo &TII,
203 SPIRV::Decoration::Decoration Dec,
204 const std::vector<uint32_t> &DecArgs,
205 StringRef StrImm = "");
206
207// Add an OpDecorate instruction for the given Reg.
208void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder,
209 SPIRV::Decoration::Decoration Dec, uint32_t Member,
210 const std::vector<uint32_t> &DecArgs,
211 StringRef StrImm = "");
212void buildOpMemberDecorate(Register Reg, MachineInstr &I,
213 const SPIRVInstrInfo &TII,
214 SPIRV::Decoration::Decoration Dec, uint32_t Member,
215 const std::vector<uint32_t> &DecArgs,
216 StringRef StrImm = "");
217
218// Add an OpDecorate instruction by "spirv.Decorations" metadata node.
219void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder,
220 const MDNode *GVarMD, const SPIRVSubtarget &ST);
221
222// Return a valid position for the OpVariable instruction inside a function,
223// i.e., at the beginning of the first block of the function.
224MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I);
225
226// Return a valid position for the instruction at the end of the block before
227// terminators and debug instructions.
228MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB);
229
230// Returns true if a pointer to the storage class can be casted to/from a
231// pointer to the Generic storage class.
232constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
233 switch (SC) {
234 case SPIRV::StorageClass::Workgroup:
235 case SPIRV::StorageClass::CrossWorkgroup:
236 case SPIRV::StorageClass::Function:
237 return true;
238 default:
239 return false;
240 }
241}
242
243// Convert a SPIR-V storage class to the corresponding LLVM IR address space.
244// TODO: maybe the following two functions should be handled in the subtarget
245// to allow for different OpenCL vs Vulkan handling.
246constexpr unsigned
247storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC) {
248 switch (SC) {
249 case SPIRV::StorageClass::Function:
250 return 0;
251 case SPIRV::StorageClass::CrossWorkgroup:
252 return 1;
253 case SPIRV::StorageClass::UniformConstant:
254 return 2;
255 case SPIRV::StorageClass::Workgroup:
256 return 3;
257 case SPIRV::StorageClass::Generic:
258 return 4;
259 case SPIRV::StorageClass::DeviceOnlyINTEL:
260 return 5;
261 case SPIRV::StorageClass::HostOnlyINTEL:
262 return 6;
263 case SPIRV::StorageClass::Input:
264 return 7;
265 case SPIRV::StorageClass::Output:
266 return 8;
267 case SPIRV::StorageClass::CodeSectionINTEL:
268 return 9;
269 case SPIRV::StorageClass::Private:
270 return 10;
271 case SPIRV::StorageClass::StorageBuffer:
272 return 11;
273 case SPIRV::StorageClass::Uniform:
274 return 12;
275 case SPIRV::StorageClass::PushConstant:
276 return 13;
277 default:
278 report_fatal_error(reason: "Unable to get address space id");
279 }
280}
281
282// Convert an LLVM IR address space to a SPIR-V storage class.
283SPIRV::StorageClass::StorageClass
284addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI);
285
286SPIRV::MemorySemantics::MemorySemantics
287getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC);
288
289SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord);
290
291SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id);
292
293// Find def instruction for the given ConstReg, walking through
294// spv_track_constant and ASSIGN_TYPE instructions. Updates ConstReg by def
295// of OpConstant instruction.
296MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
297 const MachineRegisterInfo *MRI);
298
299// Get constant integer value of the given ConstReg.
300uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI);
301
302// Get constant integer value of the given ConstReg, sign-extended.
303int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI);
304
305// Check if MI is a SPIR-V specific intrinsic call.
306bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID);
307// Check if it's a SPIR-V specific intrinsic call.
308bool isSpvIntrinsic(const Value *Arg);
309
310// Get type of i-th operand of the metadata node.
311Type *getMDOperandAsType(const MDNode *N, unsigned I);
312
313// If OpenCL or SPIR-V builtin function name is recognized, return a demangled
314// name, otherwise return an empty string.
315std::string getOclOrSpirvBuiltinDemangledName(StringRef Name);
316
317// Check if a string contains a builtin prefix.
318bool hasBuiltinTypePrefix(StringRef Name);
319
320// Check if given LLVM type is a special opaque builtin type.
321bool isSpecialOpaqueType(const Type *Ty);
322
323// Check if the function is an SPIR-V entry point
324bool isEntryPoint(const Function &F);
325
326// Parse basic scalar type name, substring TypeName, and return LLVM type.
327Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx);
328
329// Sort blocks in a partial ordering, so each block is after all its
330// dominators. This should match both the SPIR-V and the MIR requirements.
331// Returns true if the function was changed.
332bool sortBlocks(Function &F);
333
334// Check for peeled array structs and recursively reconstitute them. In HLSL
335// CBuffers, arrays may have padding between the elements, but not after the
336// last element. To represent this in LLVM IR an array [N x T] will be
337// represented as {[N-1 x {T, spirv.Padding}], T}. The function
338// matchPeeledArrayPattern recognizes this pattern retrieving the type {T,
339// spirv.Padding}, and the size N.
340bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType,
341 uint64_t &TotalSize);
342
343// This function will turn the type {[N-1 x {T, spirv.Padding}], T} back into
344// [N x {T, spirv.Padding}]. So it can be translated into SPIR-V. The offset
345// decorations will be such that there will be no padding after the array when
346// relevant.
347Type *reconstitutePeeledArrayType(Type *Ty);
348
349inline bool hasInitializer(const GlobalVariable *GV) {
350 return GV->hasInitializer() && !isa<UndefValue>(Val: GV->getInitializer());
351}
352
353// True if this is an instance of TypedPointerType.
354inline bool isTypedPointerTy(const Type *T) {
355 return T && T->getTypeID() == Type::TypedPointerTyID;
356}
357
358// True if this is an instance of PointerType.
359inline bool isUntypedPointerTy(const Type *T) {
360 return T && T->getTypeID() == Type::PointerTyID;
361}
362
363// True if this is an instance of PointerType or TypedPointerType.
364inline bool isPointerTy(const Type *T) {
365 return isUntypedPointerTy(T) || isTypedPointerTy(T);
366}
367
368// Get the address space of this pointer or pointer vector type for instances of
369// PointerType or TypedPointerType.
370inline unsigned getPointerAddressSpace(const Type *T) {
371 Type *SubT = T->getScalarType();
372 return SubT->getTypeID() == Type::PointerTyID
373 ? cast<PointerType>(Val: SubT)->getAddressSpace()
374 : cast<TypedPointerType>(Val: SubT)->getAddressSpace();
375}
376
377// Return true if the Argument is decorated with a pointee type
378inline bool hasPointeeTypeAttr(Argument *Arg) {
379 return Arg->hasByValAttr() || Arg->hasByRefAttr() || Arg->hasStructRetAttr();
380}
381
382// Return the pointee type of the argument or nullptr otherwise
383inline Type *getPointeeTypeByAttr(Argument *Arg) {
384 if (Arg->hasByValAttr())
385 return Arg->getParamByValType();
386 if (Arg->hasStructRetAttr())
387 return Arg->getParamStructRetType();
388 if (Arg->hasByRefAttr())
389 return Arg->getParamByRefType();
390 return nullptr;
391}
392
393inline Type *reconstructFunctionType(Function *F) {
394 SmallVector<Type *> ArgTys;
395 for (unsigned i = 0; i < F->arg_size(); ++i)
396 ArgTys.push_back(Elt: F->getArg(i)->getType());
397 return FunctionType::get(Result: F->getReturnType(), Params: ArgTys, isVarArg: F->isVarArg());
398}
399
400#define TYPED_PTR_TARGET_EXT_NAME "spirv.$TypedPointerType"
401inline Type *getTypedPointerWrapper(Type *ElemTy, unsigned AS) {
402 return TargetExtType::get(Context&: ElemTy->getContext(), TYPED_PTR_TARGET_EXT_NAME,
403 Types: {ElemTy}, Ints: {AS});
404}
405
406inline bool isTypedPointerWrapper(const TargetExtType *ExtTy) {
407 return ExtTy->getName() == TYPED_PTR_TARGET_EXT_NAME &&
408 ExtTy->getNumIntParameters() == 1 &&
409 ExtTy->getNumTypeParameters() == 1;
410}
411
412// True if this is an instance of PointerType or TypedPointerType.
413inline bool isPointerTyOrWrapper(const Type *Ty) {
414 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty))
415 return isTypedPointerWrapper(ExtTy);
416 return isPointerTy(T: Ty);
417}
418
419inline Type *applyWrappers(Type *Ty) {
420 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty)) {
421 if (isTypedPointerWrapper(ExtTy))
422 return TypedPointerType::get(ElementType: applyWrappers(Ty: ExtTy->getTypeParameter(i: 0)),
423 AddressSpace: ExtTy->getIntParameter(i: 0));
424 } else if (auto *VecTy = dyn_cast<VectorType>(Val: Ty)) {
425 Type *ElemTy = VecTy->getElementType();
426 Type *NewElemTy = ElemTy->isTargetExtTy() ? applyWrappers(Ty: ElemTy) : ElemTy;
427 if (NewElemTy != ElemTy)
428 return VectorType::get(ElementType: NewElemTy, EC: VecTy->getElementCount());
429 }
430 return Ty;
431}
432
433inline Type *getPointeeType(const Type *Ty) {
434 if (Ty) {
435 if (auto PType = dyn_cast<TypedPointerType>(Val: Ty))
436 return PType->getElementType();
437 else if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty))
438 if (isTypedPointerWrapper(ExtTy))
439 return ExtTy->getTypeParameter(i: 0);
440 }
441 return nullptr;
442}
443
444inline bool isUntypedEquivalentToTyExt(Type *Ty1, Type *Ty2) {
445 if (!isUntypedPointerTy(T: Ty1) || !Ty2)
446 return false;
447 if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty2))
448 if (isTypedPointerWrapper(ExtTy) &&
449 ExtTy->getTypeParameter(i: 0) ==
450 IntegerType::getInt8Ty(C&: Ty1->getContext()) &&
451 ExtTy->getIntParameter(i: 0) == cast<PointerType>(Val: Ty1)->getAddressSpace())
452 return true;
453 return false;
454}
455
456inline bool isEquivalentTypes(Type *Ty1, Type *Ty2) {
457 return isUntypedEquivalentToTyExt(Ty1, Ty2) ||
458 isUntypedEquivalentToTyExt(Ty1: Ty2, Ty2: Ty1);
459}
460
461inline Type *toTypedPointer(Type *Ty) {
462 if (Type *NewTy = applyWrappers(Ty); NewTy != Ty)
463 return NewTy;
464 return isUntypedPointerTy(T: Ty)
465 ? TypedPointerType::get(ElementType: IntegerType::getInt8Ty(C&: Ty->getContext()),
466 AddressSpace: getPointerAddressSpace(T: Ty))
467 : Ty;
468}
469
470inline Type *toTypedFunPointer(FunctionType *FTy) {
471 Type *OrigRetTy = FTy->getReturnType();
472 Type *RetTy = toTypedPointer(Ty: OrigRetTy);
473 bool IsUntypedPtr = false;
474 for (Type *PTy : FTy->params()) {
475 if (isUntypedPointerTy(T: PTy)) {
476 IsUntypedPtr = true;
477 break;
478 }
479 }
480 if (!IsUntypedPtr && RetTy == OrigRetTy)
481 return FTy;
482 SmallVector<Type *> ParamTys;
483 for (Type *PTy : FTy->params())
484 ParamTys.push_back(Elt: toTypedPointer(Ty: PTy));
485 return FunctionType::get(Result: RetTy, Params: ParamTys, isVarArg: FTy->isVarArg());
486}
487
488inline const Type *unifyPtrType(const Type *Ty) {
489 if (auto FTy = dyn_cast<FunctionType>(Val: Ty))
490 return toTypedFunPointer(FTy: const_cast<FunctionType *>(FTy));
491 return toTypedPointer(Ty: const_cast<Type *>(Ty));
492}
493
494inline bool isVector1(Type *Ty) {
495 auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty);
496 return FVTy && FVTy->getNumElements() == 1;
497}
498
499// Modify an LLVM type to conform with future transformations in IRTranslator.
500// At the moment use cases comprise only a <1 x Type> vector. To extend when/if
501// needed.
502inline Type *normalizeType(Type *Ty) {
503 auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty);
504 if (!FVTy || FVTy->getNumElements() != 1)
505 return Ty;
506 // If it's a <1 x Type> vector type, replace it by the element type, because
507 // it's not a legal vector type in LLT and IRTranslator will represent it as
508 // the scalar eventually.
509 return normalizeType(Ty: FVTy->getElementType());
510}
511
512inline PoisonValue *getNormalizedPoisonValue(Type *Ty) {
513 return PoisonValue::get(T: normalizeType(Ty));
514}
515
516inline MetadataAsValue *buildMD(Value *Arg) {
517 LLVMContext &Ctx = Arg->getContext();
518 return MetadataAsValue::get(
519 Context&: Ctx, MD: MDNode::get(Context&: Ctx, MDs: ValueAsMetadata::getConstant(C: Arg)));
520}
521
522CallInst *buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef<Type *> Types,
523 Value *Arg, Value *Arg2, ArrayRef<Constant *> Imms,
524 IRBuilder<> &B);
525
526MachineInstr *getVRegDef(MachineRegisterInfo &MRI, Register Reg);
527
528#define SPIRV_BACKEND_SERVICE_FUN_NAME "__spirv_backend_service_fun"
529bool getVacantFunctionName(Module &M, std::string &Name);
530
531void setRegClassType(Register Reg, const Type *Ty, SPIRVGlobalRegistry *GR,
532 MachineIRBuilder &MIRBuilder,
533 SPIRV::AccessQualifier::AccessQualifier AccessQual,
534 bool EmitIR, bool Force = false);
535void setRegClassType(Register Reg, SPIRVTypeInst SpvType,
536 SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI,
537 const MachineFunction &MF, bool Force = false);
538Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR,
539 MachineRegisterInfo *MRI,
540 const MachineFunction &MF);
541Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR,
542 MachineIRBuilder &MIRBuilder);
543Register createVirtualRegister(
544 const Type *Ty, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIRBuilder,
545 SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR);
546
547// Return true if there is an opaque pointer type nested in the argument.
548bool isNestedPointer(const Type *Ty);
549
550enum FPDecorationId { NONE, RTE, RTZ, RTP, RTN, SAT };
551
552inline FPDecorationId demangledPostfixToDecorationId(const std::string &S) {
553 static std::unordered_map<std::string, FPDecorationId> Mapping = {
554 {"rte", FPDecorationId::RTE},
555 {"rtz", FPDecorationId::RTZ},
556 {"rtp", FPDecorationId::RTP},
557 {"rtn", FPDecorationId::RTN},
558 {"sat", FPDecorationId::SAT}};
559 auto It = Mapping.find(x: S);
560 return It == Mapping.end() ? FPDecorationId::NONE : It->second;
561}
562
563SmallVector<MachineInstr *, 4>
564createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode,
565 unsigned MinWC, unsigned ContinuedOpcode,
566 ArrayRef<Register> Args, Register ReturnRegister,
567 Register TypeID);
568
569// Instruction selection directed by type folding.
570const std::set<unsigned> &getTypeFoldingSupportedOpcodes();
571bool isTypeFoldingSupported(unsigned Opcode);
572
573// Get loop controls from llvm.loop. metadata.
574SmallVector<unsigned, 1> getSpirvLoopControlOperandsFromLoopMetadata(Loop *L);
575SmallVector<unsigned, 1>
576getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD);
577
578// Traversing [g]MIR accounting for pseudo-instructions.
579MachineInstr *passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI);
580MachineInstr *getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI);
581MachineInstr *getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI);
582int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI);
583unsigned getArrayComponentCount(const MachineRegisterInfo *MRI,
584 const MachineInstr *ResType);
585MachineBasicBlock::iterator
586getFirstValidInstructionInsertPoint(MachineBasicBlock &BB);
587
588std::optional<SPIRV::LinkageType::LinkageType>
589getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV);
590} // namespace llvm
591#endif // LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
592