| 1 | //===--- SPIRVUtils.h ---- SPIR-V Utility Functions -------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains miscellaneous utility functions. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #ifndef LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H |
| 14 | #define LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H |
| 15 | |
| 16 | #include "MCTargetDesc/SPIRVBaseInfo.h" |
| 17 | #include "llvm/Analysis/LoopInfo.h" |
| 18 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 19 | #include "llvm/IR/Dominators.h" |
| 20 | #include "llvm/IR/GlobalVariable.h" |
| 21 | #include "llvm/IR/IRBuilder.h" |
| 22 | #include "llvm/IR/TypedPointerType.h" |
| 23 | #include <queue> |
| 24 | #include <string> |
| 25 | #include <unordered_map> |
| 26 | #include <unordered_set> |
| 27 | |
| 28 | namespace llvm { |
| 29 | class MCInst; |
| 30 | class MachineFunction; |
| 31 | class MachineInstr; |
| 32 | class MachineInstrBuilder; |
| 33 | class MachineIRBuilder; |
| 34 | class MachineRegisterInfo; |
| 35 | class Register; |
| 36 | class StringRef; |
| 37 | class SPIRVInstrInfo; |
| 38 | class SPIRVSubtarget; |
| 39 | class SPIRVGlobalRegistry; |
| 40 | |
| 41 | // This class implements a partial ordering visitor, which visits a cyclic graph |
| 42 | // in natural topological-like ordering. Topological ordering is not defined for |
| 43 | // directed graphs with cycles, so this assumes cycles are a single node, and |
| 44 | // ignores back-edges. The cycle is visited from the entry in the same |
| 45 | // topological-like ordering. |
| 46 | // |
| 47 | // Note: this visitor REQUIRES a reducible graph. |
| 48 | // |
| 49 | // This means once we visit a node, we know all the possible ancestors have been |
| 50 | // visited. |
| 51 | // |
| 52 | // clang-format off |
| 53 | // |
| 54 | // Given this graph: |
| 55 | // |
| 56 | // ,-> B -\ |
| 57 | // A -+ +---> D ----> E -> F -> G -> H |
| 58 | // `-> C -/ ^ | |
| 59 | // +-----------------+ |
| 60 | // |
| 61 | // Visit order is: |
| 62 | // A, [B, C in any order], D, E, F, G, H |
| 63 | // |
| 64 | // clang-format on |
| 65 | // |
| 66 | // Changing the function CFG between the construction of the visitor and |
| 67 | // visiting is undefined. The visitor can be reused, but if the CFG is updated, |
| 68 | // the visitor must be rebuilt. |
| 69 | class PartialOrderingVisitor { |
| 70 | DomTreeBuilder::BBDomTree DT; |
| 71 | LoopInfo LI; |
| 72 | |
| 73 | std::unordered_set<BasicBlock *> Queued = {}; |
| 74 | std::queue<BasicBlock *> ToVisit = {}; |
| 75 | |
| 76 | struct OrderInfo { |
| 77 | size_t Rank; |
| 78 | size_t TraversalIndex; |
| 79 | }; |
| 80 | |
| 81 | using BlockToOrderInfoMap = std::unordered_map<BasicBlock *, OrderInfo>; |
| 82 | BlockToOrderInfoMap BlockToOrder; |
| 83 | std::vector<BasicBlock *> Order = {}; |
| 84 | |
| 85 | // Get all basic-blocks reachable from Start. |
| 86 | std::unordered_set<BasicBlock *> getReachableFrom(BasicBlock *Start); |
| 87 | |
| 88 | // Internal function used to determine the partial ordering. |
| 89 | // Visits |BB| with the current rank being |Rank|. |
| 90 | size_t visit(BasicBlock *BB, size_t Rank); |
| 91 | |
| 92 | bool CanBeVisited(BasicBlock *BB) const; |
| 93 | |
| 94 | public: |
| 95 | size_t GetNodeRank(BasicBlock *BB) const; |
| 96 | |
| 97 | // Build the visitor to operate on the function F. |
| 98 | PartialOrderingVisitor(Function &F); |
| 99 | |
| 100 | // Returns true is |LHS| comes before |RHS| in the partial ordering. |
| 101 | // If |LHS| and |RHS| have the same rank, the traversal order determines the |
| 102 | // order (order is stable). |
| 103 | bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const; |
| 104 | |
| 105 | // Visit the function starting from the basic block |Start|, and calling |Op| |
| 106 | // on each visited BB. This traversal ignores back-edges, meaning this won't |
| 107 | // visit a node to which |Start| is not an ancestor. |
| 108 | // If Op returns |true|, the visitor continues. If |Op| returns false, the |
| 109 | // visitor will stop at that rank. This means if 2 nodes share the same rank, |
| 110 | // and Op returns false when visiting the first, the second will be visited |
| 111 | // afterwards. But none of their successors will. |
| 112 | void partialOrderVisit(BasicBlock &Start, |
| 113 | std::function<bool(BasicBlock *)> Op); |
| 114 | }; |
| 115 | |
| 116 | namespace SPIRV { |
| 117 | struct FPFastMathDefaultInfo { |
| 118 | const Type *Ty = nullptr; |
| 119 | unsigned FastMathFlags = 0; |
| 120 | // When SPV_KHR_float_controls2 ContractionOff and SignzeroInfNanPreserve are |
| 121 | // deprecated, and we replace them with FPFastMathDefault appropriate flags |
| 122 | // instead. However, we have no guarantee about the order in which we will |
| 123 | // process execution modes. Therefore it could happen that we first process |
| 124 | // ContractionOff, setting AllowContraction bit to 0, and then we process |
| 125 | // FPFastMathDefault enabling AllowContraction bit, effectively invalidating |
| 126 | // ContractionOff. Because of that, it's best to keep separate bits for the |
| 127 | // different execution modes, and we will try and combine them later when we |
| 128 | // emit OpExecutionMode instructions. |
| 129 | bool ContractionOff = false; |
| 130 | bool SignedZeroInfNanPreserve = false; |
| 131 | bool FPFastMathDefault = false; |
| 132 | |
| 133 | FPFastMathDefaultInfo() = default; |
| 134 | FPFastMathDefaultInfo(const Type *Ty, unsigned FastMathFlags) |
| 135 | : Ty(Ty), FastMathFlags(FastMathFlags) {} |
| 136 | bool operator==(const FPFastMathDefaultInfo &Other) const { |
| 137 | return Ty == Other.Ty && FastMathFlags == Other.FastMathFlags && |
| 138 | ContractionOff == Other.ContractionOff && |
| 139 | SignedZeroInfNanPreserve == Other.SignedZeroInfNanPreserve && |
| 140 | FPFastMathDefault == Other.FPFastMathDefault; |
| 141 | } |
| 142 | }; |
| 143 | |
| 144 | struct FPFastMathDefaultInfoVector |
| 145 | : public SmallVector<SPIRV::FPFastMathDefaultInfo, 3> { |
| 146 | static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth) { |
| 147 | switch (BitWidth) { |
| 148 | case 16: // half |
| 149 | return 0; |
| 150 | case 32: // float |
| 151 | return 1; |
| 152 | case 64: // double |
| 153 | return 2; |
| 154 | default: |
| 155 | report_fatal_error(reason: "Expected BitWidth to be 16, 32, 64" , gen_crash_diag: false); |
| 156 | } |
| 157 | llvm_unreachable( |
| 158 | "Unreachable code in computeFPFastMathDefaultInfoVecIndex" ); |
| 159 | } |
| 160 | }; |
| 161 | |
| 162 | // This code restores function args/retvalue types for composite cases |
| 163 | // because the final types should still be aggregate whereas they're i32 |
| 164 | // during the translation to cope with aggregate flattening etc. |
| 165 | FunctionType *getOriginalFunctionType(const Function &F); |
| 166 | FunctionType *getOriginalFunctionType(const CallBase &CB); |
| 167 | } // namespace SPIRV |
| 168 | |
| 169 | // Add the given string as a series of integer operand, inserting null |
| 170 | // terminators and padding to make sure the operands all have 32-bit |
| 171 | // little-endian words. |
| 172 | void addStringImm(const StringRef &Str, MCInst &Inst); |
| 173 | void addStringImm(const StringRef &Str, MachineInstrBuilder &MIB); |
| 174 | void addStringImm(const StringRef &Str, IRBuilder<> &B, |
| 175 | std::vector<Value *> &Args); |
| 176 | |
| 177 | // Read the series of integer operands back as a null-terminated string using |
| 178 | // the reverse of the logic in addStringImm. |
| 179 | std::string getStringImm(const MachineInstr &MI, unsigned StartIndex); |
| 180 | |
| 181 | // Returns the string constant that the register refers to. It is assumed that |
| 182 | // Reg is a global value that contains a string. |
| 183 | std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI); |
| 184 | |
| 185 | // Add the given numerical immediate to MIB. |
| 186 | void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB); |
| 187 | |
| 188 | // Add an OpName instruction for the given target register. |
| 189 | void buildOpName(Register Target, const StringRef &Name, |
| 190 | MachineIRBuilder &MIRBuilder); |
| 191 | void buildOpName(Register Target, const StringRef &Name, MachineInstr &I, |
| 192 | const SPIRVInstrInfo &TII); |
| 193 | |
| 194 | // Add an OpDecorate instruction for the given Reg. |
| 195 | void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, |
| 196 | SPIRV::Decoration::Decoration Dec, |
| 197 | const std::vector<uint32_t> &DecArgs, |
| 198 | StringRef StrImm = "" ); |
| 199 | void buildOpDecorate(Register Reg, MachineInstr &I, const SPIRVInstrInfo &TII, |
| 200 | SPIRV::Decoration::Decoration Dec, |
| 201 | const std::vector<uint32_t> &DecArgs, |
| 202 | StringRef StrImm = "" ); |
| 203 | |
| 204 | // Add an OpDecorate instruction for the given Reg. |
| 205 | void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder, |
| 206 | SPIRV::Decoration::Decoration Dec, uint32_t Member, |
| 207 | const std::vector<uint32_t> &DecArgs, |
| 208 | StringRef StrImm = "" ); |
| 209 | void buildOpMemberDecorate(Register Reg, MachineInstr &I, |
| 210 | const SPIRVInstrInfo &TII, |
| 211 | SPIRV::Decoration::Decoration Dec, uint32_t Member, |
| 212 | const std::vector<uint32_t> &DecArgs, |
| 213 | StringRef StrImm = "" ); |
| 214 | |
| 215 | // Add an OpDecorate instruction by "spirv.Decorations" metadata node. |
| 216 | void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, |
| 217 | const MDNode *GVarMD, const SPIRVSubtarget &ST); |
| 218 | |
| 219 | // Return a valid position for the OpVariable instruction inside a function, |
| 220 | // i.e., at the beginning of the first block of the function. |
| 221 | MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I); |
| 222 | |
| 223 | // Return a valid position for the instruction at the end of the block before |
| 224 | // terminators and debug instructions. |
| 225 | MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB); |
| 226 | |
| 227 | // Returns true if a pointer to the storage class can be casted to/from a |
| 228 | // pointer to the Generic storage class. |
| 229 | constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) { |
| 230 | switch (SC) { |
| 231 | case SPIRV::StorageClass::Workgroup: |
| 232 | case SPIRV::StorageClass::CrossWorkgroup: |
| 233 | case SPIRV::StorageClass::Function: |
| 234 | return true; |
| 235 | default: |
| 236 | return false; |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | // Convert a SPIR-V storage class to the corresponding LLVM IR address space. |
| 241 | // TODO: maybe the following two functions should be handled in the subtarget |
| 242 | // to allow for different OpenCL vs Vulkan handling. |
| 243 | constexpr unsigned |
| 244 | storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC) { |
| 245 | switch (SC) { |
| 246 | case SPIRV::StorageClass::Function: |
| 247 | return 0; |
| 248 | case SPIRV::StorageClass::CrossWorkgroup: |
| 249 | return 1; |
| 250 | case SPIRV::StorageClass::UniformConstant: |
| 251 | return 2; |
| 252 | case SPIRV::StorageClass::Workgroup: |
| 253 | return 3; |
| 254 | case SPIRV::StorageClass::Generic: |
| 255 | return 4; |
| 256 | case SPIRV::StorageClass::DeviceOnlyINTEL: |
| 257 | return 5; |
| 258 | case SPIRV::StorageClass::HostOnlyINTEL: |
| 259 | return 6; |
| 260 | case SPIRV::StorageClass::Input: |
| 261 | return 7; |
| 262 | case SPIRV::StorageClass::Output: |
| 263 | return 8; |
| 264 | case SPIRV::StorageClass::CodeSectionINTEL: |
| 265 | return 9; |
| 266 | case SPIRV::StorageClass::Private: |
| 267 | return 10; |
| 268 | case SPIRV::StorageClass::StorageBuffer: |
| 269 | return 11; |
| 270 | case SPIRV::StorageClass::Uniform: |
| 271 | return 12; |
| 272 | case SPIRV::StorageClass::PushConstant: |
| 273 | return 13; |
| 274 | default: |
| 275 | report_fatal_error(reason: "Unable to get address space id" ); |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | // Convert an LLVM IR address space to a SPIR-V storage class. |
| 280 | SPIRV::StorageClass::StorageClass |
| 281 | addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI); |
| 282 | |
| 283 | SPIRV::MemorySemantics::MemorySemantics |
| 284 | getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC); |
| 285 | |
| 286 | SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord); |
| 287 | |
| 288 | SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id); |
| 289 | |
| 290 | // Find def instruction for the given ConstReg, walking through |
| 291 | // spv_track_constant and ASSIGN_TYPE instructions. Updates ConstReg by def |
| 292 | // of OpConstant instruction. |
| 293 | MachineInstr *getDefInstrMaybeConstant(Register &ConstReg, |
| 294 | const MachineRegisterInfo *MRI); |
| 295 | |
| 296 | // Get constant integer value of the given ConstReg. |
| 297 | uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI); |
| 298 | |
| 299 | // Get constant integer value of the given ConstReg, sign-extended. |
| 300 | int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI); |
| 301 | |
| 302 | // Check if MI is a SPIR-V specific intrinsic call. |
| 303 | bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID); |
| 304 | // Check if it's a SPIR-V specific intrinsic call. |
| 305 | bool isSpvIntrinsic(const Value *Arg); |
| 306 | |
| 307 | // Get type of i-th operand of the metadata node. |
| 308 | Type *getMDOperandAsType(const MDNode *N, unsigned I); |
| 309 | |
| 310 | // If OpenCL or SPIR-V builtin function name is recognized, return a demangled |
| 311 | // name, otherwise return an empty string. |
| 312 | std::string getOclOrSpirvBuiltinDemangledName(StringRef Name); |
| 313 | |
| 314 | // Check if a string contains a builtin prefix. |
| 315 | bool hasBuiltinTypePrefix(StringRef Name); |
| 316 | |
| 317 | // Check if given LLVM type is a special opaque builtin type. |
| 318 | bool isSpecialOpaqueType(const Type *Ty); |
| 319 | |
| 320 | // Check if the function is an SPIR-V entry point |
| 321 | bool isEntryPoint(const Function &F); |
| 322 | |
| 323 | // Parse basic scalar type name, substring TypeName, and return LLVM type. |
| 324 | Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx); |
| 325 | |
| 326 | // Sort blocks in a partial ordering, so each block is after all its |
| 327 | // dominators. This should match both the SPIR-V and the MIR requirements. |
| 328 | // Returns true if the function was changed. |
| 329 | bool sortBlocks(Function &F); |
| 330 | |
| 331 | // Check for peeled array structs and recursively reconstitute them. In HLSL |
| 332 | // CBuffers, arrays may have padding between the elements, but not after the |
| 333 | // last element. To represent this in LLVM IR an array [N x T] will be |
| 334 | // represented as {[N-1 x {T, spirv.Padding}], T}. The function |
| 335 | // matchPeeledArrayPattern recognizes this pattern retrieving the type {T, |
| 336 | // spirv.Padding}, and the size N. |
| 337 | bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, |
| 338 | uint64_t &TotalSize); |
| 339 | |
| 340 | // This function will turn the type {[N-1 x {T, spirv.Padding}], T} back into |
| 341 | // [N x {T, spirv.Padding}]. So it can be translated into SPIR-V. The offset |
| 342 | // decorations will be such that there will be no padding after the array when |
| 343 | // relevant. |
| 344 | Type *reconstitutePeeledArrayType(Type *Ty); |
| 345 | |
| 346 | inline bool hasInitializer(const GlobalVariable *GV) { |
| 347 | return GV->hasInitializer() && !isa<UndefValue>(Val: GV->getInitializer()); |
| 348 | } |
| 349 | |
| 350 | // True if this is an instance of TypedPointerType. |
| 351 | inline bool isTypedPointerTy(const Type *T) { |
| 352 | return T && T->getTypeID() == Type::TypedPointerTyID; |
| 353 | } |
| 354 | |
| 355 | // True if this is an instance of PointerType. |
| 356 | inline bool isUntypedPointerTy(const Type *T) { |
| 357 | return T && T->getTypeID() == Type::PointerTyID; |
| 358 | } |
| 359 | |
| 360 | // True if this is an instance of PointerType or TypedPointerType. |
| 361 | inline bool isPointerTy(const Type *T) { |
| 362 | return isUntypedPointerTy(T) || isTypedPointerTy(T); |
| 363 | } |
| 364 | |
| 365 | // Get the address space of this pointer or pointer vector type for instances of |
| 366 | // PointerType or TypedPointerType. |
| 367 | inline unsigned getPointerAddressSpace(const Type *T) { |
| 368 | Type *SubT = T->getScalarType(); |
| 369 | return SubT->getTypeID() == Type::PointerTyID |
| 370 | ? cast<PointerType>(Val: SubT)->getAddressSpace() |
| 371 | : cast<TypedPointerType>(Val: SubT)->getAddressSpace(); |
| 372 | } |
| 373 | |
| 374 | // Return true if the Argument is decorated with a pointee type |
| 375 | inline bool hasPointeeTypeAttr(Argument *Arg) { |
| 376 | return Arg->hasByValAttr() || Arg->hasByRefAttr() || Arg->hasStructRetAttr(); |
| 377 | } |
| 378 | |
| 379 | // Return the pointee type of the argument or nullptr otherwise |
| 380 | inline Type *getPointeeTypeByAttr(Argument *Arg) { |
| 381 | if (Arg->hasByValAttr()) |
| 382 | return Arg->getParamByValType(); |
| 383 | if (Arg->hasStructRetAttr()) |
| 384 | return Arg->getParamStructRetType(); |
| 385 | if (Arg->hasByRefAttr()) |
| 386 | return Arg->getParamByRefType(); |
| 387 | return nullptr; |
| 388 | } |
| 389 | |
| 390 | inline Type *reconstructFunctionType(Function *F) { |
| 391 | SmallVector<Type *> ArgTys; |
| 392 | for (unsigned i = 0; i < F->arg_size(); ++i) |
| 393 | ArgTys.push_back(Elt: F->getArg(i)->getType()); |
| 394 | return FunctionType::get(Result: F->getReturnType(), Params: ArgTys, isVarArg: F->isVarArg()); |
| 395 | } |
| 396 | |
| 397 | #define TYPED_PTR_TARGET_EXT_NAME "spirv.$TypedPointerType" |
| 398 | inline Type *getTypedPointerWrapper(Type *ElemTy, unsigned AS) { |
| 399 | return TargetExtType::get(Context&: ElemTy->getContext(), TYPED_PTR_TARGET_EXT_NAME, |
| 400 | Types: {ElemTy}, Ints: {AS}); |
| 401 | } |
| 402 | |
| 403 | inline bool isTypedPointerWrapper(const TargetExtType *ExtTy) { |
| 404 | return ExtTy->getName() == TYPED_PTR_TARGET_EXT_NAME && |
| 405 | ExtTy->getNumIntParameters() == 1 && |
| 406 | ExtTy->getNumTypeParameters() == 1; |
| 407 | } |
| 408 | |
| 409 | // True if this is an instance of PointerType or TypedPointerType. |
| 410 | inline bool isPointerTyOrWrapper(const Type *Ty) { |
| 411 | if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty)) |
| 412 | return isTypedPointerWrapper(ExtTy); |
| 413 | return isPointerTy(T: Ty); |
| 414 | } |
| 415 | |
| 416 | inline Type *applyWrappers(Type *Ty) { |
| 417 | if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty)) { |
| 418 | if (isTypedPointerWrapper(ExtTy)) |
| 419 | return TypedPointerType::get(ElementType: applyWrappers(Ty: ExtTy->getTypeParameter(i: 0)), |
| 420 | AddressSpace: ExtTy->getIntParameter(i: 0)); |
| 421 | } else if (auto *VecTy = dyn_cast<VectorType>(Val: Ty)) { |
| 422 | Type *ElemTy = VecTy->getElementType(); |
| 423 | Type *NewElemTy = ElemTy->isTargetExtTy() ? applyWrappers(Ty: ElemTy) : ElemTy; |
| 424 | if (NewElemTy != ElemTy) |
| 425 | return VectorType::get(ElementType: NewElemTy, EC: VecTy->getElementCount()); |
| 426 | } |
| 427 | return Ty; |
| 428 | } |
| 429 | |
| 430 | inline Type *getPointeeType(const Type *Ty) { |
| 431 | if (Ty) { |
| 432 | if (auto PType = dyn_cast<TypedPointerType>(Val: Ty)) |
| 433 | return PType->getElementType(); |
| 434 | else if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty)) |
| 435 | if (isTypedPointerWrapper(ExtTy)) |
| 436 | return ExtTy->getTypeParameter(i: 0); |
| 437 | } |
| 438 | return nullptr; |
| 439 | } |
| 440 | |
| 441 | inline bool isUntypedEquivalentToTyExt(Type *Ty1, Type *Ty2) { |
| 442 | if (!isUntypedPointerTy(T: Ty1) || !Ty2) |
| 443 | return false; |
| 444 | if (auto *ExtTy = dyn_cast<TargetExtType>(Val: Ty2)) |
| 445 | if (isTypedPointerWrapper(ExtTy) && |
| 446 | ExtTy->getTypeParameter(i: 0) == |
| 447 | IntegerType::getInt8Ty(C&: Ty1->getContext()) && |
| 448 | ExtTy->getIntParameter(i: 0) == cast<PointerType>(Val: Ty1)->getAddressSpace()) |
| 449 | return true; |
| 450 | return false; |
| 451 | } |
| 452 | |
| 453 | inline bool isEquivalentTypes(Type *Ty1, Type *Ty2) { |
| 454 | return isUntypedEquivalentToTyExt(Ty1, Ty2) || |
| 455 | isUntypedEquivalentToTyExt(Ty1: Ty2, Ty2: Ty1); |
| 456 | } |
| 457 | |
| 458 | inline Type *toTypedPointer(Type *Ty) { |
| 459 | if (Type *NewTy = applyWrappers(Ty); NewTy != Ty) |
| 460 | return NewTy; |
| 461 | return isUntypedPointerTy(T: Ty) |
| 462 | ? TypedPointerType::get(ElementType: IntegerType::getInt8Ty(C&: Ty->getContext()), |
| 463 | AddressSpace: getPointerAddressSpace(T: Ty)) |
| 464 | : Ty; |
| 465 | } |
| 466 | |
| 467 | inline Type *toTypedFunPointer(FunctionType *FTy) { |
| 468 | Type *OrigRetTy = FTy->getReturnType(); |
| 469 | Type *RetTy = toTypedPointer(Ty: OrigRetTy); |
| 470 | bool IsUntypedPtr = false; |
| 471 | for (Type *PTy : FTy->params()) { |
| 472 | if (isUntypedPointerTy(T: PTy)) { |
| 473 | IsUntypedPtr = true; |
| 474 | break; |
| 475 | } |
| 476 | } |
| 477 | if (!IsUntypedPtr && RetTy == OrigRetTy) |
| 478 | return FTy; |
| 479 | SmallVector<Type *> ParamTys; |
| 480 | for (Type *PTy : FTy->params()) |
| 481 | ParamTys.push_back(Elt: toTypedPointer(Ty: PTy)); |
| 482 | return FunctionType::get(Result: RetTy, Params: ParamTys, isVarArg: FTy->isVarArg()); |
| 483 | } |
| 484 | |
| 485 | inline const Type *unifyPtrType(const Type *Ty) { |
| 486 | if (auto FTy = dyn_cast<FunctionType>(Val: Ty)) |
| 487 | return toTypedFunPointer(FTy: const_cast<FunctionType *>(FTy)); |
| 488 | return toTypedPointer(Ty: const_cast<Type *>(Ty)); |
| 489 | } |
| 490 | |
| 491 | inline bool isVector1(Type *Ty) { |
| 492 | auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty); |
| 493 | return FVTy && FVTy->getNumElements() == 1; |
| 494 | } |
| 495 | |
| 496 | // Modify an LLVM type to conform with future transformations in IRTranslator. |
| 497 | // At the moment use cases comprise only a <1 x Type> vector. To extend when/if |
| 498 | // needed. |
| 499 | inline Type *normalizeType(Type *Ty) { |
| 500 | auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty); |
| 501 | if (!FVTy || FVTy->getNumElements() != 1) |
| 502 | return Ty; |
| 503 | // If it's a <1 x Type> vector type, replace it by the element type, because |
| 504 | // it's not a legal vector type in LLT and IRTranslator will represent it as |
| 505 | // the scalar eventually. |
| 506 | return normalizeType(Ty: FVTy->getElementType()); |
| 507 | } |
| 508 | |
| 509 | inline PoisonValue *getNormalizedPoisonValue(Type *Ty) { |
| 510 | return PoisonValue::get(T: normalizeType(Ty)); |
| 511 | } |
| 512 | |
| 513 | inline MetadataAsValue *buildMD(Value *Arg) { |
| 514 | LLVMContext &Ctx = Arg->getContext(); |
| 515 | return MetadataAsValue::get( |
| 516 | Context&: Ctx, MD: MDNode::get(Context&: Ctx, MDs: ValueAsMetadata::getConstant(C: Arg))); |
| 517 | } |
| 518 | |
| 519 | CallInst *buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef<Type *> Types, |
| 520 | Value *Arg, Value *Arg2, ArrayRef<Constant *> Imms, |
| 521 | IRBuilder<> &B); |
| 522 | |
| 523 | MachineInstr *getVRegDef(MachineRegisterInfo &MRI, Register Reg); |
| 524 | |
| 525 | #define SPIRV_BACKEND_SERVICE_FUN_NAME "__spirv_backend_service_fun" |
| 526 | bool getVacantFunctionName(Module &M, std::string &Name); |
| 527 | |
| 528 | void setRegClassType(Register Reg, const Type *Ty, SPIRVGlobalRegistry *GR, |
| 529 | MachineIRBuilder &MIRBuilder, |
| 530 | SPIRV::AccessQualifier::AccessQualifier AccessQual, |
| 531 | bool EmitIR, bool Force = false); |
| 532 | void setRegClassType(Register Reg, const MachineInstr *SpvType, |
| 533 | SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, |
| 534 | const MachineFunction &MF, bool Force = false); |
| 535 | Register createVirtualRegister(const MachineInstr *SpvType, |
| 536 | SPIRVGlobalRegistry *GR, |
| 537 | MachineRegisterInfo *MRI, |
| 538 | const MachineFunction &MF); |
| 539 | Register createVirtualRegister(const MachineInstr *SpvType, |
| 540 | SPIRVGlobalRegistry *GR, |
| 541 | MachineIRBuilder &MIRBuilder); |
| 542 | Register createVirtualRegister( |
| 543 | const Type *Ty, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIRBuilder, |
| 544 | SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR); |
| 545 | |
| 546 | // Return true if there is an opaque pointer type nested in the argument. |
| 547 | bool isNestedPointer(const Type *Ty); |
| 548 | |
| 549 | enum FPDecorationId { NONE, RTE, RTZ, RTP, RTN, SAT }; |
| 550 | |
| 551 | inline FPDecorationId demangledPostfixToDecorationId(const std::string &S) { |
| 552 | static std::unordered_map<std::string, FPDecorationId> Mapping = { |
| 553 | {"rte" , FPDecorationId::RTE}, |
| 554 | {"rtz" , FPDecorationId::RTZ}, |
| 555 | {"rtp" , FPDecorationId::RTP}, |
| 556 | {"rtn" , FPDecorationId::RTN}, |
| 557 | {"sat" , FPDecorationId::SAT}}; |
| 558 | auto It = Mapping.find(x: S); |
| 559 | return It == Mapping.end() ? FPDecorationId::NONE : It->second; |
| 560 | } |
| 561 | |
| 562 | SmallVector<MachineInstr *, 4> |
| 563 | createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, |
| 564 | unsigned MinWC, unsigned ContinuedOpcode, |
| 565 | ArrayRef<Register> Args, Register ReturnRegister, |
| 566 | Register TypeID); |
| 567 | |
| 568 | // Instruction selection directed by type folding. |
| 569 | const std::set<unsigned> &getTypeFoldingSupportedOpcodes(); |
| 570 | bool isTypeFoldingSupported(unsigned Opcode); |
| 571 | |
| 572 | // Get loop controls from llvm.loop. metadata. |
| 573 | SmallVector<unsigned, 1> getSpirvLoopControlOperandsFromLoopMetadata(Loop *L); |
| 574 | SmallVector<unsigned, 1> |
| 575 | getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD); |
| 576 | |
| 577 | // Traversing [g]MIR accounting for pseudo-instructions. |
| 578 | MachineInstr *passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI); |
| 579 | MachineInstr *getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI); |
| 580 | MachineInstr *getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI); |
| 581 | int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI); |
| 582 | unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, |
| 583 | const MachineInstr *ResType); |
| 584 | MachineBasicBlock::iterator |
| 585 | getFirstValidInstructionInsertPoint(MachineBasicBlock &BB); |
| 586 | |
| 587 | std::optional<SPIRV::LinkageType::LinkageType> |
| 588 | getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV); |
| 589 | } // namespace llvm |
| 590 | #endif // LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H |
| 591 | |