| 1 | //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the IRBuilder class, which is used as a convenient way |
| 10 | // to create LLVM instructions with a consistent and simplified interface. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "llvm/IR/IRBuilder.h" |
| 15 | #include "llvm/ADT/ArrayRef.h" |
| 16 | #include "llvm/IR/Constant.h" |
| 17 | #include "llvm/IR/Constants.h" |
| 18 | #include "llvm/IR/DerivedTypes.h" |
| 19 | #include "llvm/IR/Function.h" |
| 20 | #include "llvm/IR/GlobalValue.h" |
| 21 | #include "llvm/IR/GlobalVariable.h" |
| 22 | #include "llvm/IR/IntrinsicInst.h" |
| 23 | #include "llvm/IR/Intrinsics.h" |
| 24 | #include "llvm/IR/LLVMContext.h" |
| 25 | #include "llvm/IR/Module.h" |
| 26 | #include "llvm/IR/NoFolder.h" |
| 27 | #include "llvm/IR/Operator.h" |
| 28 | #include "llvm/IR/ProfDataUtils.h" |
| 29 | #include "llvm/IR/Statepoint.h" |
| 30 | #include "llvm/IR/Type.h" |
| 31 | #include "llvm/IR/Value.h" |
| 32 | #include "llvm/Support/Casting.h" |
| 33 | #include <cassert> |
| 34 | #include <cstdint> |
| 35 | #include <optional> |
| 36 | #include <vector> |
| 37 | |
| 38 | using namespace llvm; |
| 39 | |
| 40 | /// CreateGlobalString - Make a new global variable with an initializer that |
| 41 | /// has array of i8 type filled in with the nul terminated string value |
| 42 | /// specified. If Name is specified, it is the name of the global variable |
| 43 | /// created. |
| 44 | GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str, |
| 45 | const Twine &Name, |
| 46 | unsigned AddressSpace, |
| 47 | Module *M, bool AddNull) { |
| 48 | Constant *StrConstant = ConstantDataArray::getString(Context, Initializer: Str, AddNull); |
| 49 | if (!M) |
| 50 | M = BB->getParent()->getParent(); |
| 51 | auto *GV = new GlobalVariable( |
| 52 | *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage, |
| 53 | StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace); |
| 54 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); |
| 55 | GV->setAlignment(M->getDataLayout().getPrefTypeAlign(Ty: getInt8Ty())); |
| 56 | return GV; |
| 57 | } |
| 58 | |
| 59 | Type *IRBuilderBase::getCurrentFunctionReturnType() const { |
| 60 | assert(BB && BB->getParent() && "No current function!" ); |
| 61 | return BB->getParent()->getReturnType(); |
| 62 | } |
| 63 | |
| 64 | DebugLoc IRBuilderBase::getCurrentDebugLocation() const { return StoredDL; } |
| 65 | void IRBuilderBase::SetInstDebugLocation(Instruction *I) const { |
| 66 | // We prefer to set our current debug location if any has been set, but if |
| 67 | // our debug location is empty and I has a valid location, we shouldn't |
| 68 | // overwrite it. |
| 69 | I->setDebugLoc(StoredDL.orElse(Other: I->getDebugLoc())); |
| 70 | } |
| 71 | |
| 72 | Value *IRBuilderBase::CreateAggregateCast(Value *V, Type *DestTy) { |
| 73 | Type *SrcTy = V->getType(); |
| 74 | if (SrcTy == DestTy) |
| 75 | return V; |
| 76 | |
| 77 | if (SrcTy->isAggregateType()) { |
| 78 | unsigned NumElements; |
| 79 | if (SrcTy->isStructTy()) { |
| 80 | assert(DestTy->isStructTy() && "Expected StructType" ); |
| 81 | assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements() && |
| 82 | "Expected StructTypes with equal number of elements" ); |
| 83 | NumElements = SrcTy->getStructNumElements(); |
| 84 | } else { |
| 85 | assert(SrcTy->isArrayTy() && DestTy->isArrayTy() && "Expected ArrayType" ); |
| 86 | assert(SrcTy->getArrayNumElements() == DestTy->getArrayNumElements() && |
| 87 | "Expected ArrayTypes with equal number of elements" ); |
| 88 | NumElements = SrcTy->getArrayNumElements(); |
| 89 | } |
| 90 | |
| 91 | Value *Result = PoisonValue::get(T: DestTy); |
| 92 | for (unsigned I = 0; I < NumElements; ++I) { |
| 93 | Type *ElementTy = SrcTy->isStructTy() ? DestTy->getStructElementType(N: I) |
| 94 | : DestTy->getArrayElementType(); |
| 95 | Value *Element = |
| 96 | CreateAggregateCast(V: CreateExtractValue(Agg: V, Idxs: ArrayRef(I)), DestTy: ElementTy); |
| 97 | |
| 98 | Result = CreateInsertValue(Agg: Result, Val: Element, Idxs: ArrayRef(I)); |
| 99 | } |
| 100 | return Result; |
| 101 | } |
| 102 | |
| 103 | return CreateBitOrPointerCast(V, DestTy); |
| 104 | } |
| 105 | |
| 106 | Value *IRBuilderBase::CreateBitPreservingCastChain(const DataLayout &DL, |
| 107 | Value *V, Type *NewTy) { |
| 108 | Type *OldTy = V->getType(); |
| 109 | |
| 110 | if (OldTy == NewTy) |
| 111 | return V; |
| 112 | |
| 113 | assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && |
| 114 | "Integer types must be the exact same to convert." ); |
| 115 | |
| 116 | // A variant of bitcast that supports a mixture of fixed and scalable types |
| 117 | // that are know to have the same size. |
| 118 | auto CreateBitCastLike = [this](Value *In, Type *Ty) -> Value * { |
| 119 | Type *InTy = In->getType(); |
| 120 | if (InTy == Ty) |
| 121 | return In; |
| 122 | |
| 123 | if (isa<FixedVectorType>(Val: InTy) && isa<ScalableVectorType>(Val: Ty)) { |
| 124 | // For vscale_range(2) expand <4 x i32> to <vscale x 4 x i16> --> |
| 125 | // <4 x i32> to <vscale x 2 x i32> to <vscale x 4 x i16> |
| 126 | auto *VTy = VectorType::getWithSizeAndScalar(SizeTy: cast<VectorType>(Val: Ty), EltTy: InTy); |
| 127 | return CreateBitCast( |
| 128 | V: CreateInsertVector(DstType: VTy, SrcVec: PoisonValue::get(T: VTy), SubVec: In, Idx: getInt64(C: 0)), DestTy: Ty); |
| 129 | } |
| 130 | |
| 131 | if (isa<ScalableVectorType>(Val: InTy) && isa<FixedVectorType>(Val: Ty)) { |
| 132 | // For vscale_range(2) expand <vscale x 4 x i16> to <4 x i32> --> |
| 133 | // <vscale x 4 x i16> to <vscale x 2 x i32> to <4 x i32> |
| 134 | auto *VTy = VectorType::getWithSizeAndScalar(SizeTy: cast<VectorType>(Val: InTy), EltTy: Ty); |
| 135 | return CreateExtractVector(DstType: Ty, SrcVec: CreateBitCast(V: In, DestTy: VTy), Idx: getInt64(C: 0)); |
| 136 | } |
| 137 | |
| 138 | return CreateBitCast(V: In, DestTy: Ty); |
| 139 | }; |
| 140 | |
| 141 | // See if we need inttoptr for this type pair. May require additional bitcast. |
| 142 | if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { |
| 143 | // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* |
| 144 | // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> |
| 145 | // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*> |
| 146 | // Directly handle i64 to i8* |
| 147 | return CreateIntToPtr(V: CreateBitCastLike(V, DL.getIntPtrType(NewTy)), DestTy: NewTy); |
| 148 | } |
| 149 | |
| 150 | // See if we need ptrtoint for this type pair. May require additional bitcast. |
| 151 | if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { |
| 152 | // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 |
| 153 | // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> |
| 154 | // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32> |
| 155 | // Expand i8* to i64 --> i8* to i64 to i64 |
| 156 | return CreateBitCastLike(CreatePtrToInt(V, DestTy: DL.getIntPtrType(OldTy)), NewTy); |
| 157 | } |
| 158 | |
| 159 | if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) { |
| 160 | unsigned OldAS = OldTy->getPointerAddressSpace(); |
| 161 | unsigned NewAS = NewTy->getPointerAddressSpace(); |
| 162 | // To convert pointers with different address spaces (they are already |
| 163 | // checked convertible, i.e. they have the same pointer size), so far we |
| 164 | // cannot use `bitcast` (which has restrict on the same address space) or |
| 165 | // `addrspacecast` (which is not always no-op casting). Instead, use a pair |
| 166 | // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit |
| 167 | // size. |
| 168 | if (OldAS != NewAS) { |
| 169 | return CreateIntToPtr( |
| 170 | V: CreateBitCastLike(CreatePtrToInt(V, DestTy: DL.getIntPtrType(OldTy)), |
| 171 | DL.getIntPtrType(NewTy)), |
| 172 | DestTy: NewTy); |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | return CreateBitCastLike(V, NewTy); |
| 177 | } |
| 178 | |
| 179 | CallInst * |
| 180 | IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops, |
| 181 | const Twine &Name, FMFSource FMFSource, |
| 182 | ArrayRef<OperandBundleDef> OpBundles) { |
| 183 | CallInst *CI = CreateCall(Callee, Args: Ops, OpBundles, Name); |
| 184 | if (isa<FPMathOperator>(Val: CI)) |
| 185 | CI->setFastMathFlags(FMFSource.get(Default: FMF)); |
| 186 | return CI; |
| 187 | } |
| 188 | |
| 189 | static Value *CreateVScaleMultiple(IRBuilderBase &B, Type *Ty, uint64_t Scale) { |
| 190 | Value *VScale = B.CreateVScale(Ty); |
| 191 | if (Scale == 1) |
| 192 | return VScale; |
| 193 | |
| 194 | return B.CreateNUWMul(LHS: VScale, RHS: ConstantInt::get(Ty, V: Scale)); |
| 195 | } |
| 196 | |
| 197 | Value *IRBuilderBase::CreateElementCount(Type *Ty, ElementCount EC) { |
| 198 | if (EC.isFixed() || EC.isZero()) |
| 199 | return ConstantInt::get(Ty, V: EC.getKnownMinValue()); |
| 200 | |
| 201 | return CreateVScaleMultiple(B&: *this, Ty, Scale: EC.getKnownMinValue()); |
| 202 | } |
| 203 | |
| 204 | Value *IRBuilderBase::CreateTypeSize(Type *Ty, TypeSize Size) { |
| 205 | if (Size.isFixed() || Size.isZero()) |
| 206 | return ConstantInt::get(Ty, V: Size.getKnownMinValue()); |
| 207 | |
| 208 | return CreateVScaleMultiple(B&: *this, Ty, Scale: Size.getKnownMinValue()); |
| 209 | } |
| 210 | |
| 211 | Value *IRBuilderBase::CreateAllocationSize(Type *DestTy, AllocaInst *AI) { |
| 212 | const DataLayout &DL = BB->getDataLayout(); |
| 213 | TypeSize ElemSize = DL.getTypeAllocSize(Ty: AI->getAllocatedType()); |
| 214 | Value *Size = CreateTypeSize(Ty: DestTy, Size: ElemSize); |
| 215 | if (AI->isArrayAllocation()) |
| 216 | Size = CreateMul(LHS: CreateZExtOrTrunc(V: AI->getArraySize(), DestTy), RHS: Size); |
| 217 | return Size; |
| 218 | } |
| 219 | |
| 220 | Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) { |
| 221 | Type *STy = DstType->getScalarType(); |
| 222 | if (isa<ScalableVectorType>(Val: DstType)) { |
| 223 | Type *StepVecType = DstType; |
| 224 | // TODO: We expect this special case (element type < 8 bits) to be |
| 225 | // temporary - once the intrinsic properly supports < 8 bits this code |
| 226 | // can be removed. |
| 227 | if (STy->getScalarSizeInBits() < 8) |
| 228 | StepVecType = |
| 229 | VectorType::get(ElementType: getInt8Ty(), Other: cast<ScalableVectorType>(Val: DstType)); |
| 230 | Value *Res = CreateIntrinsic(ID: Intrinsic::stepvector, Types: {StepVecType}, Args: {}, |
| 231 | FMFSource: nullptr, Name); |
| 232 | if (StepVecType != DstType) |
| 233 | Res = CreateTrunc(V: Res, DestTy: DstType); |
| 234 | return Res; |
| 235 | } |
| 236 | |
| 237 | unsigned NumEls = cast<FixedVectorType>(Val: DstType)->getNumElements(); |
| 238 | |
| 239 | // Create a vector of consecutive numbers from zero to VF. |
| 240 | // It's okay if the values wrap around. |
| 241 | SmallVector<Constant *, 8> Indices; |
| 242 | for (unsigned i = 0; i < NumEls; ++i) |
| 243 | Indices.push_back( |
| 244 | Elt: ConstantInt::get(Ty: STy, V: i, /*IsSigned=*/false, /*ImplicitTrunc=*/true)); |
| 245 | |
| 246 | // Add the consecutive indices to the vector value. |
| 247 | return ConstantVector::get(V: Indices); |
| 248 | } |
| 249 | |
| 250 | CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size, |
| 251 | MaybeAlign Align, bool isVolatile, |
| 252 | const AAMDNodes &AAInfo) { |
| 253 | Value *Ops[] = {Ptr, Val, Size, getInt1(V: isVolatile)}; |
| 254 | Type *Tys[] = {Ptr->getType(), Size->getType()}; |
| 255 | |
| 256 | CallInst *CI = CreateIntrinsic(ID: Intrinsic::memset, Types: Tys, Args: Ops); |
| 257 | |
| 258 | if (Align) |
| 259 | cast<MemSetInst>(Val: CI)->setDestAlignment(*Align); |
| 260 | CI->setAAMetadata(AAInfo); |
| 261 | return CI; |
| 262 | } |
| 263 | |
| 264 | CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, |
| 265 | Value *Val, Value *Size, |
| 266 | bool IsVolatile, |
| 267 | const AAMDNodes &AAInfo) { |
| 268 | Value *Ops[] = {Dst, Val, Size, getInt1(V: IsVolatile)}; |
| 269 | Type *Tys[] = {Dst->getType(), Size->getType()}; |
| 270 | |
| 271 | CallInst *CI = CreateIntrinsic(ID: Intrinsic::memset_inline, Types: Tys, Args: Ops); |
| 272 | |
| 273 | if (DstAlign) |
| 274 | cast<MemSetInst>(Val: CI)->setDestAlignment(*DstAlign); |
| 275 | CI->setAAMetadata(AAInfo); |
| 276 | return CI; |
| 277 | } |
| 278 | |
| 279 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet( |
| 280 | Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize, |
| 281 | const AAMDNodes &AAInfo) { |
| 282 | |
| 283 | Value *Ops[] = {Ptr, Val, Size, getInt32(C: ElementSize)}; |
| 284 | Type *Tys[] = {Ptr->getType(), Size->getType()}; |
| 285 | |
| 286 | CallInst *CI = |
| 287 | CreateIntrinsic(ID: Intrinsic::memset_element_unordered_atomic, Types: Tys, Args: Ops); |
| 288 | |
| 289 | cast<AnyMemSetInst>(Val: CI)->setDestAlignment(Alignment); |
| 290 | CI->setAAMetadata(AAInfo); |
| 291 | return CI; |
| 292 | } |
| 293 | |
| 294 | CallInst *IRBuilderBase::CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, |
| 295 | MaybeAlign DstAlign, Value *Src, |
| 296 | MaybeAlign SrcAlign, Value *Size, |
| 297 | bool isVolatile, |
| 298 | const AAMDNodes &AAInfo) { |
| 299 | assert((IntrID == Intrinsic::memcpy || IntrID == Intrinsic::memcpy_inline || |
| 300 | IntrID == Intrinsic::memmove) && |
| 301 | "Unexpected intrinsic ID" ); |
| 302 | Value *Ops[] = {Dst, Src, Size, getInt1(V: isVolatile)}; |
| 303 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
| 304 | |
| 305 | CallInst *CI = CreateIntrinsic(ID: IntrID, Types: Tys, Args: Ops); |
| 306 | |
| 307 | auto* MCI = cast<MemTransferInst>(Val: CI); |
| 308 | if (DstAlign) |
| 309 | MCI->setDestAlignment(*DstAlign); |
| 310 | if (SrcAlign) |
| 311 | MCI->setSourceAlignment(*SrcAlign); |
| 312 | MCI->setAAMetadata(AAInfo); |
| 313 | return CI; |
| 314 | } |
| 315 | |
| 316 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( |
| 317 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
| 318 | uint32_t ElementSize, const AAMDNodes &AAInfo) { |
| 319 | assert(DstAlign >= ElementSize && |
| 320 | "Pointer alignment must be at least element size" ); |
| 321 | assert(SrcAlign >= ElementSize && |
| 322 | "Pointer alignment must be at least element size" ); |
| 323 | Value *Ops[] = {Dst, Src, Size, getInt32(C: ElementSize)}; |
| 324 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
| 325 | |
| 326 | CallInst *CI = |
| 327 | CreateIntrinsic(ID: Intrinsic::memcpy_element_unordered_atomic, Types: Tys, Args: Ops); |
| 328 | |
| 329 | // Set the alignment of the pointer args. |
| 330 | auto *AMCI = cast<AnyMemCpyInst>(Val: CI); |
| 331 | AMCI->setDestAlignment(DstAlign); |
| 332 | AMCI->setSourceAlignment(SrcAlign); |
| 333 | AMCI->setAAMetadata(AAInfo); |
| 334 | return CI; |
| 335 | } |
| 336 | |
| 337 | /// isConstantOne - Return true only if val is constant int 1 |
| 338 | static bool isConstantOne(const Value *Val) { |
| 339 | assert(Val && "isConstantOne does not work with nullptr Val" ); |
| 340 | const ConstantInt *CVal = dyn_cast<ConstantInt>(Val); |
| 341 | return CVal && CVal->isOne(); |
| 342 | } |
| 343 | |
| 344 | CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy, |
| 345 | Value *AllocSize, Value *ArraySize, |
| 346 | ArrayRef<OperandBundleDef> OpB, |
| 347 | Function *MallocF, const Twine &Name) { |
| 348 | // malloc(type) becomes: |
| 349 | // i8* malloc(typeSize) |
| 350 | // malloc(type, arraySize) becomes: |
| 351 | // i8* malloc(typeSize*arraySize) |
| 352 | if (!ArraySize) |
| 353 | ArraySize = ConstantInt::get(Ty: IntPtrTy, V: 1); |
| 354 | else if (ArraySize->getType() != IntPtrTy) |
| 355 | ArraySize = CreateIntCast(V: ArraySize, DestTy: IntPtrTy, isSigned: false); |
| 356 | |
| 357 | if (!isConstantOne(Val: ArraySize)) { |
| 358 | if (isConstantOne(Val: AllocSize)) { |
| 359 | AllocSize = ArraySize; // Operand * 1 = Operand |
| 360 | } else { |
| 361 | // Multiply type size by the array size... |
| 362 | AllocSize = CreateMul(LHS: ArraySize, RHS: AllocSize, Name: "mallocsize" ); |
| 363 | } |
| 364 | } |
| 365 | |
| 366 | assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size" ); |
| 367 | // Create the call to Malloc. |
| 368 | Module *M = BB->getParent()->getParent(); |
| 369 | Type *BPTy = PointerType::getUnqual(C&: Context); |
| 370 | FunctionCallee MallocFunc = MallocF; |
| 371 | if (!MallocFunc) |
| 372 | // prototype malloc as "void *malloc(size_t)" |
| 373 | MallocFunc = M->getOrInsertFunction(Name: "malloc" , RetTy: BPTy, Args: IntPtrTy); |
| 374 | CallInst *MCall = CreateCall(Callee: MallocFunc, Args: AllocSize, OpBundles: OpB, Name); |
| 375 | |
| 376 | MCall->setTailCall(); |
| 377 | if (Function *F = dyn_cast<Function>(Val: MallocFunc.getCallee())) { |
| 378 | MCall->setCallingConv(F->getCallingConv()); |
| 379 | F->setReturnDoesNotAlias(); |
| 380 | } |
| 381 | |
| 382 | assert(!MCall->getType()->isVoidTy() && "Malloc has void return type" ); |
| 383 | |
| 384 | return MCall; |
| 385 | } |
| 386 | |
| 387 | CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy, |
| 388 | Value *AllocSize, Value *ArraySize, |
| 389 | Function *MallocF, const Twine &Name) { |
| 390 | |
| 391 | return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, OpB: {}, MallocF, |
| 392 | Name); |
| 393 | } |
| 394 | |
| 395 | /// CreateFree - Generate the IR for a call to the builtin free function. |
| 396 | CallInst *IRBuilderBase::CreateFree(Value *Source, |
| 397 | ArrayRef<OperandBundleDef> Bundles) { |
| 398 | assert(Source->getType()->isPointerTy() && |
| 399 | "Can not free something of nonpointer type!" ); |
| 400 | |
| 401 | Module *M = BB->getParent()->getParent(); |
| 402 | |
| 403 | Type *VoidTy = Type::getVoidTy(C&: M->getContext()); |
| 404 | Type *VoidPtrTy = PointerType::getUnqual(C&: M->getContext()); |
| 405 | // prototype free as "void free(void*)" |
| 406 | FunctionCallee FreeFunc = M->getOrInsertFunction(Name: "free" , RetTy: VoidTy, Args: VoidPtrTy); |
| 407 | CallInst *Result = CreateCall(Callee: FreeFunc, Args: Source, OpBundles: Bundles, Name: "" ); |
| 408 | Result->setTailCall(); |
| 409 | if (Function *F = dyn_cast<Function>(Val: FreeFunc.getCallee())) |
| 410 | Result->setCallingConv(F->getCallingConv()); |
| 411 | |
| 412 | return Result; |
| 413 | } |
| 414 | |
| 415 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove( |
| 416 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
| 417 | uint32_t ElementSize, const AAMDNodes &AAInfo) { |
| 418 | assert(DstAlign >= ElementSize && |
| 419 | "Pointer alignment must be at least element size" ); |
| 420 | assert(SrcAlign >= ElementSize && |
| 421 | "Pointer alignment must be at least element size" ); |
| 422 | Value *Ops[] = {Dst, Src, Size, getInt32(C: ElementSize)}; |
| 423 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
| 424 | |
| 425 | CallInst *CI = |
| 426 | CreateIntrinsic(ID: Intrinsic::memmove_element_unordered_atomic, Types: Tys, Args: Ops); |
| 427 | |
| 428 | // Set the alignment of the pointer args. |
| 429 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: DstAlign)); |
| 430 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: SrcAlign)); |
| 431 | CI->setAAMetadata(AAInfo); |
| 432 | return CI; |
| 433 | } |
| 434 | |
| 435 | CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) { |
| 436 | Value *Ops[] = {Src}; |
| 437 | Type *Tys[] = { Src->getType() }; |
| 438 | return CreateIntrinsic(ID, Types: Tys, Args: Ops); |
| 439 | } |
| 440 | |
| 441 | CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) { |
| 442 | Value *Ops[] = {Acc, Src}; |
| 443 | return CreateIntrinsic(ID: Intrinsic::vector_reduce_fadd, Types: {Src->getType()}, Args: Ops); |
| 444 | } |
| 445 | |
| 446 | CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) { |
| 447 | Value *Ops[] = {Acc, Src}; |
| 448 | return CreateIntrinsic(ID: Intrinsic::vector_reduce_fmul, Types: {Src->getType()}, Args: Ops); |
| 449 | } |
| 450 | |
| 451 | CallInst *IRBuilderBase::CreateAddReduce(Value *Src) { |
| 452 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_add, Src); |
| 453 | } |
| 454 | |
| 455 | CallInst *IRBuilderBase::CreateMulReduce(Value *Src) { |
| 456 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_mul, Src); |
| 457 | } |
| 458 | |
| 459 | CallInst *IRBuilderBase::CreateAndReduce(Value *Src) { |
| 460 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_and, Src); |
| 461 | } |
| 462 | |
| 463 | CallInst *IRBuilderBase::CreateOrReduce(Value *Src) { |
| 464 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_or, Src); |
| 465 | } |
| 466 | |
| 467 | CallInst *IRBuilderBase::CreateXorReduce(Value *Src) { |
| 468 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_xor, Src); |
| 469 | } |
| 470 | |
| 471 | CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) { |
| 472 | auto ID = |
| 473 | IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax; |
| 474 | return getReductionIntrinsic(ID, Src); |
| 475 | } |
| 476 | |
| 477 | CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) { |
| 478 | auto ID = |
| 479 | IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin; |
| 480 | return getReductionIntrinsic(ID, Src); |
| 481 | } |
| 482 | |
| 483 | CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) { |
| 484 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fmax, Src); |
| 485 | } |
| 486 | |
| 487 | CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) { |
| 488 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fmin, Src); |
| 489 | } |
| 490 | |
| 491 | CallInst *IRBuilderBase::CreateFPMaximumReduce(Value *Src) { |
| 492 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fmaximum, Src); |
| 493 | } |
| 494 | |
| 495 | CallInst *IRBuilderBase::CreateFPMinimumReduce(Value *Src) { |
| 496 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fminimum, Src); |
| 497 | } |
| 498 | |
| 499 | CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr) { |
| 500 | assert(isa<PointerType>(Ptr->getType()) && |
| 501 | "lifetime.start only applies to pointers." ); |
| 502 | return CreateIntrinsic(ID: Intrinsic::lifetime_start, Types: {Ptr->getType()}, Args: {Ptr}); |
| 503 | } |
| 504 | |
| 505 | CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr) { |
| 506 | assert(isa<PointerType>(Ptr->getType()) && |
| 507 | "lifetime.end only applies to pointers." ); |
| 508 | return CreateIntrinsic(ID: Intrinsic::lifetime_end, Types: {Ptr->getType()}, Args: {Ptr}); |
| 509 | } |
| 510 | |
| 511 | CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { |
| 512 | |
| 513 | assert(isa<PointerType>(Ptr->getType()) && |
| 514 | "invariant.start only applies to pointers." ); |
| 515 | if (!Size) |
| 516 | Size = getInt64(C: -1); |
| 517 | else |
| 518 | assert(Size->getType() == getInt64Ty() && |
| 519 | "invariant.start requires the size to be an i64" ); |
| 520 | |
| 521 | Value *Ops[] = {Size, Ptr}; |
| 522 | // Fill in the single overloaded type: memory object type. |
| 523 | Type *ObjectPtr[1] = {Ptr->getType()}; |
| 524 | return CreateIntrinsic(ID: Intrinsic::invariant_start, Types: ObjectPtr, Args: Ops); |
| 525 | } |
| 526 | |
| 527 | static MaybeAlign getAlign(Value *Ptr) { |
| 528 | if (auto *V = dyn_cast<GlobalVariable>(Val: Ptr)) |
| 529 | return V->getAlign(); |
| 530 | if (auto *A = dyn_cast<GlobalAlias>(Val: Ptr)) |
| 531 | return getAlign(Ptr: A->getAliaseeObject()); |
| 532 | return {}; |
| 533 | } |
| 534 | |
| 535 | CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) { |
| 536 | assert(isa<GlobalValue>(Ptr) && cast<GlobalValue>(Ptr)->isThreadLocal() && |
| 537 | "threadlocal_address only applies to thread local variables." ); |
| 538 | CallInst *CI = CreateIntrinsic(ID: llvm::Intrinsic::threadlocal_address, |
| 539 | Types: {Ptr->getType()}, Args: {Ptr}); |
| 540 | if (MaybeAlign A = getAlign(Ptr)) { |
| 541 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *A)); |
| 542 | CI->addRetAttr(Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *A)); |
| 543 | } |
| 544 | return CI; |
| 545 | } |
| 546 | |
| 547 | CallInst * |
| 548 | IRBuilderBase::CreateAssumption(Value *Cond, |
| 549 | ArrayRef<OperandBundleDef> OpBundles) { |
| 550 | assert(Cond->getType() == getInt1Ty() && |
| 551 | "an assumption condition must be of type i1" ); |
| 552 | |
| 553 | Value *Ops[] = { Cond }; |
| 554 | Module *M = BB->getParent()->getParent(); |
| 555 | Function *FnAssume = Intrinsic::getOrInsertDeclaration(M, id: Intrinsic::assume); |
| 556 | return CreateCall(Callee: FnAssume, Args: Ops, OpBundles); |
| 557 | } |
| 558 | |
| 559 | Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) { |
| 560 | return CreateIntrinsic(ID: Intrinsic::experimental_noalias_scope_decl, Types: {}, |
| 561 | Args: {Scope}); |
| 562 | } |
| 563 | |
| 564 | /// Create a call to a Masked Load intrinsic. |
| 565 | /// \p Ty - vector type to load |
| 566 | /// \p Ptr - base pointer for the load |
| 567 | /// \p Alignment - alignment of the source location |
| 568 | /// \p Mask - vector of booleans which indicates what vector lanes should |
| 569 | /// be accessed in memory |
| 570 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
| 571 | /// of the result |
| 572 | /// \p Name - name of the result variable |
| 573 | CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, |
| 574 | Value *Mask, Value *PassThru, |
| 575 | const Twine &Name) { |
| 576 | auto *PtrTy = cast<PointerType>(Val: Ptr->getType()); |
| 577 | assert(Ty->isVectorTy() && "Type should be vector" ); |
| 578 | assert(Mask && "Mask should not be all-ones (null)" ); |
| 579 | if (!PassThru) |
| 580 | PassThru = PoisonValue::get(T: Ty); |
| 581 | Type *OverloadedTypes[] = { Ty, PtrTy }; |
| 582 | Value *Ops[] = {Ptr, Mask, PassThru}; |
| 583 | CallInst *CI = |
| 584 | CreateMaskedIntrinsic(Id: Intrinsic::masked_load, Ops, OverloadedTypes, Name); |
| 585 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment)); |
| 586 | return CI; |
| 587 | } |
| 588 | |
| 589 | /// Create a call to a Masked Store intrinsic. |
| 590 | /// \p Val - data to be stored, |
| 591 | /// \p Ptr - base pointer for the store |
| 592 | /// \p Alignment - alignment of the destination location |
| 593 | /// \p Mask - vector of booleans which indicates what vector lanes should |
| 594 | /// be accessed in memory |
| 595 | CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, |
| 596 | Align Alignment, Value *Mask) { |
| 597 | auto *PtrTy = cast<PointerType>(Val: Ptr->getType()); |
| 598 | Type *DataTy = Val->getType(); |
| 599 | assert(DataTy->isVectorTy() && "Val should be a vector" ); |
| 600 | assert(Mask && "Mask should not be all-ones (null)" ); |
| 601 | Type *OverloadedTypes[] = { DataTy, PtrTy }; |
| 602 | Value *Ops[] = {Val, Ptr, Mask}; |
| 603 | CallInst *CI = |
| 604 | CreateMaskedIntrinsic(Id: Intrinsic::masked_store, Ops, OverloadedTypes); |
| 605 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment)); |
| 606 | return CI; |
| 607 | } |
| 608 | |
| 609 | /// Create a call to a Masked intrinsic, with given intrinsic Id, |
| 610 | /// an array of operands - Ops, and an array of overloaded types - |
| 611 | /// OverloadedTypes. |
| 612 | CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, |
| 613 | ArrayRef<Value *> Ops, |
| 614 | ArrayRef<Type *> OverloadedTypes, |
| 615 | const Twine &Name) { |
| 616 | return CreateIntrinsic(ID: Id, Types: OverloadedTypes, Args: Ops, FMFSource: {}, Name); |
| 617 | } |
| 618 | |
| 619 | /// Create a call to a Masked Gather intrinsic. |
| 620 | /// \p Ty - vector type to gather |
| 621 | /// \p Ptrs - vector of pointers for loading |
| 622 | /// \p Align - alignment for one element |
| 623 | /// \p Mask - vector of booleans which indicates what vector lanes should |
| 624 | /// be accessed in memory |
| 625 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
| 626 | /// of the result |
| 627 | /// \p Name - name of the result variable |
| 628 | CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs, |
| 629 | Align Alignment, Value *Mask, |
| 630 | Value *PassThru, |
| 631 | const Twine &Name) { |
| 632 | auto *VecTy = cast<VectorType>(Val: Ty); |
| 633 | ElementCount NumElts = VecTy->getElementCount(); |
| 634 | auto *PtrsTy = cast<VectorType>(Val: Ptrs->getType()); |
| 635 | assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch" ); |
| 636 | |
| 637 | if (!Mask) |
| 638 | Mask = getAllOnesMask(NumElts); |
| 639 | |
| 640 | if (!PassThru) |
| 641 | PassThru = PoisonValue::get(T: Ty); |
| 642 | |
| 643 | Type *OverloadedTypes[] = {Ty, PtrsTy}; |
| 644 | Value *Ops[] = {Ptrs, Mask, PassThru}; |
| 645 | |
| 646 | // We specify only one type when we create this intrinsic. Types of other |
| 647 | // arguments are derived from this type. |
| 648 | CallInst *CI = CreateMaskedIntrinsic(Id: Intrinsic::masked_gather, Ops, |
| 649 | OverloadedTypes, Name); |
| 650 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment)); |
| 651 | return CI; |
| 652 | } |
| 653 | |
| 654 | /// Create a call to a Masked Scatter intrinsic. |
| 655 | /// \p Data - data to be stored, |
| 656 | /// \p Ptrs - the vector of pointers, where the \p Data elements should be |
| 657 | /// stored |
| 658 | /// \p Align - alignment for one element |
| 659 | /// \p Mask - vector of booleans which indicates what vector lanes should |
| 660 | /// be accessed in memory |
| 661 | CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, |
| 662 | Align Alignment, Value *Mask) { |
| 663 | auto *PtrsTy = cast<VectorType>(Val: Ptrs->getType()); |
| 664 | auto *DataTy = cast<VectorType>(Val: Data->getType()); |
| 665 | ElementCount NumElts = PtrsTy->getElementCount(); |
| 666 | |
| 667 | if (!Mask) |
| 668 | Mask = getAllOnesMask(NumElts); |
| 669 | |
| 670 | Type *OverloadedTypes[] = {DataTy, PtrsTy}; |
| 671 | Value *Ops[] = {Data, Ptrs, Mask}; |
| 672 | |
| 673 | // We specify only one type when we create this intrinsic. Types of other |
| 674 | // arguments are derived from this type. |
| 675 | CallInst *CI = |
| 676 | CreateMaskedIntrinsic(Id: Intrinsic::masked_scatter, Ops, OverloadedTypes); |
| 677 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment)); |
| 678 | return CI; |
| 679 | } |
| 680 | |
| 681 | /// Create a call to Masked Expand Load intrinsic |
| 682 | /// \p Ty - vector type to load |
| 683 | /// \p Ptr - base pointer for the load |
| 684 | /// \p Align - alignment of \p Ptr |
| 685 | /// \p Mask - vector of booleans which indicates what vector lanes should |
| 686 | /// be accessed in memory |
| 687 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
| 688 | /// of the result |
| 689 | /// \p Name - name of the result variable |
| 690 | CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr, |
| 691 | MaybeAlign Align, Value *Mask, |
| 692 | Value *PassThru, |
| 693 | const Twine &Name) { |
| 694 | assert(Ty->isVectorTy() && "Type should be vector" ); |
| 695 | assert(Mask && "Mask should not be all-ones (null)" ); |
| 696 | if (!PassThru) |
| 697 | PassThru = PoisonValue::get(T: Ty); |
| 698 | Type *OverloadedTypes[] = {Ty}; |
| 699 | Value *Ops[] = {Ptr, Mask, PassThru}; |
| 700 | CallInst *CI = CreateMaskedIntrinsic(Id: Intrinsic::masked_expandload, Ops, |
| 701 | OverloadedTypes, Name); |
| 702 | if (Align) |
| 703 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *Align)); |
| 704 | return CI; |
| 705 | } |
| 706 | |
| 707 | /// Create a call to Masked Compress Store intrinsic |
| 708 | /// \p Val - data to be stored, |
| 709 | /// \p Ptr - base pointer for the store |
| 710 | /// \p Align - alignment of \p Ptr |
| 711 | /// \p Mask - vector of booleans which indicates what vector lanes should |
| 712 | /// be accessed in memory |
| 713 | CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr, |
| 714 | MaybeAlign Align, |
| 715 | Value *Mask) { |
| 716 | Type *DataTy = Val->getType(); |
| 717 | assert(DataTy->isVectorTy() && "Val should be a vector" ); |
| 718 | assert(Mask && "Mask should not be all-ones (null)" ); |
| 719 | Type *OverloadedTypes[] = {DataTy}; |
| 720 | Value *Ops[] = {Val, Ptr, Mask}; |
| 721 | CallInst *CI = CreateMaskedIntrinsic(Id: Intrinsic::masked_compressstore, Ops, |
| 722 | OverloadedTypes); |
| 723 | if (Align) |
| 724 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *Align)); |
| 725 | return CI; |
| 726 | } |
| 727 | |
| 728 | template <typename T0> |
| 729 | static std::vector<Value *> |
| 730 | getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, |
| 731 | Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) { |
| 732 | std::vector<Value *> Args; |
| 733 | Args.push_back(x: B.getInt64(C: ID)); |
| 734 | Args.push_back(x: B.getInt32(C: NumPatchBytes)); |
| 735 | Args.push_back(x: ActualCallee); |
| 736 | Args.push_back(B.getInt32(C: CallArgs.size())); |
| 737 | Args.push_back(x: B.getInt32(C: Flags)); |
| 738 | llvm::append_range(Args, CallArgs); |
| 739 | // GC Transition and Deopt args are now always handled via operand bundle. |
| 740 | // They will be removed from the signature of gc.statepoint shortly. |
| 741 | Args.push_back(x: B.getInt32(C: 0)); |
| 742 | Args.push_back(x: B.getInt32(C: 0)); |
| 743 | // GC args are now encoded in the gc-live operand bundle |
| 744 | return Args; |
| 745 | } |
| 746 | |
| 747 | template<typename T1, typename T2, typename T3> |
| 748 | static std::vector<OperandBundleDef> |
| 749 | getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs, |
| 750 | std::optional<ArrayRef<T2>> DeoptArgs, |
| 751 | ArrayRef<T3> GCArgs) { |
| 752 | std::vector<OperandBundleDef> Rval; |
| 753 | if (DeoptArgs) |
| 754 | Rval.emplace_back(args: "deopt" , args: SmallVector<Value *, 16>(*DeoptArgs)); |
| 755 | if (TransitionArgs) |
| 756 | Rval.emplace_back(args: "gc-transition" , |
| 757 | args: SmallVector<Value *, 16>(*TransitionArgs)); |
| 758 | if (GCArgs.size()) |
| 759 | Rval.emplace_back(args: "gc-live" , args: SmallVector<Value *, 16>(GCArgs)); |
| 760 | return Rval; |
| 761 | } |
| 762 | |
| 763 | template <typename T0, typename T1, typename T2, typename T3> |
| 764 | static CallInst *CreateGCStatepointCallCommon( |
| 765 | IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, |
| 766 | FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs, |
| 767 | std::optional<ArrayRef<T1>> TransitionArgs, |
| 768 | std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, |
| 769 | const Twine &Name) { |
| 770 | Module *M = Builder->GetInsertBlock()->getParent()->getParent(); |
| 771 | // Fill in the one generic type'd argument (the function is also vararg) |
| 772 | Function *FnStatepoint = Intrinsic::getOrInsertDeclaration( |
| 773 | M, id: Intrinsic::experimental_gc_statepoint, |
| 774 | Tys: {ActualCallee.getCallee()->getType()}); |
| 775 | |
| 776 | std::vector<Value *> Args = getStatepointArgs( |
| 777 | *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs); |
| 778 | |
| 779 | CallInst *CI = Builder->CreateCall( |
| 780 | FnStatepoint, Args, |
| 781 | getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); |
| 782 | CI->addParamAttr(ArgNo: 2, |
| 783 | Attr: Attribute::get(Context&: Builder->getContext(), Kind: Attribute::ElementType, |
| 784 | Ty: ActualCallee.getFunctionType())); |
| 785 | return CI; |
| 786 | } |
| 787 | |
| 788 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
| 789 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
| 790 | ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
| 791 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
| 792 | return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>( |
| 793 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags: uint32_t(StatepointFlags::None), |
| 794 | CallArgs, TransitionArgs: std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name); |
| 795 | } |
| 796 | |
| 797 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
| 798 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
| 799 | uint32_t Flags, ArrayRef<Value *> CallArgs, |
| 800 | std::optional<ArrayRef<Use>> TransitionArgs, |
| 801 | std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
| 802 | const Twine &Name) { |
| 803 | return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>( |
| 804 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs, |
| 805 | DeoptArgs, GCArgs, Name); |
| 806 | } |
| 807 | |
| 808 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
| 809 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
| 810 | ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
| 811 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
| 812 | return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>( |
| 813 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags: uint32_t(StatepointFlags::None), |
| 814 | CallArgs, TransitionArgs: std::nullopt, DeoptArgs, GCArgs, Name); |
| 815 | } |
| 816 | |
| 817 | template <typename T0, typename T1, typename T2, typename T3> |
| 818 | static InvokeInst *CreateGCStatepointInvokeCommon( |
| 819 | IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, |
| 820 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, |
| 821 | BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs, |
| 822 | std::optional<ArrayRef<T1>> TransitionArgs, |
| 823 | std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, |
| 824 | const Twine &Name) { |
| 825 | Module *M = Builder->GetInsertBlock()->getParent()->getParent(); |
| 826 | // Fill in the one generic type'd argument (the function is also vararg) |
| 827 | Function *FnStatepoint = Intrinsic::getOrInsertDeclaration( |
| 828 | M, id: Intrinsic::experimental_gc_statepoint, |
| 829 | Tys: {ActualInvokee.getCallee()->getType()}); |
| 830 | |
| 831 | std::vector<Value *> Args = |
| 832 | getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(), |
| 833 | Flags, InvokeArgs); |
| 834 | |
| 835 | InvokeInst *II = Builder->CreateInvoke( |
| 836 | FnStatepoint, NormalDest, UnwindDest, Args, |
| 837 | getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); |
| 838 | II->addParamAttr(ArgNo: 2, |
| 839 | Attr: Attribute::get(Context&: Builder->getContext(), Kind: Attribute::ElementType, |
| 840 | Ty: ActualInvokee.getFunctionType())); |
| 841 | return II; |
| 842 | } |
| 843 | |
| 844 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
| 845 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
| 846 | BasicBlock *NormalDest, BasicBlock *UnwindDest, |
| 847 | ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
| 848 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
| 849 | return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>( |
| 850 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, |
| 851 | Flags: uint32_t(StatepointFlags::None), InvokeArgs, |
| 852 | TransitionArgs: std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name); |
| 853 | } |
| 854 | |
| 855 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
| 856 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
| 857 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, |
| 858 | ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs, |
| 859 | std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
| 860 | const Twine &Name) { |
| 861 | return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>( |
| 862 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags, |
| 863 | InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name); |
| 864 | } |
| 865 | |
| 866 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
| 867 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
| 868 | BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, |
| 869 | std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, |
| 870 | const Twine &Name) { |
| 871 | return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>( |
| 872 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, |
| 873 | Flags: uint32_t(StatepointFlags::None), InvokeArgs, TransitionArgs: std::nullopt, DeoptArgs, |
| 874 | GCArgs, Name); |
| 875 | } |
| 876 | |
| 877 | CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint, |
| 878 | Type *ResultType, const Twine &Name) { |
| 879 | Intrinsic::ID ID = Intrinsic::experimental_gc_result; |
| 880 | Type *Types[] = {ResultType}; |
| 881 | |
| 882 | Value *Args[] = {Statepoint}; |
| 883 | return CreateIntrinsic(ID, Types, Args, FMFSource: {}, Name); |
| 884 | } |
| 885 | |
| 886 | CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint, |
| 887 | int BaseOffset, int DerivedOffset, |
| 888 | Type *ResultType, const Twine &Name) { |
| 889 | Type *Types[] = {ResultType}; |
| 890 | |
| 891 | Value *Args[] = {Statepoint, getInt32(C: BaseOffset), getInt32(C: DerivedOffset)}; |
| 892 | return CreateIntrinsic(ID: Intrinsic::experimental_gc_relocate, Types, Args, FMFSource: {}, |
| 893 | Name); |
| 894 | } |
| 895 | |
| 896 | CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr, |
| 897 | const Twine &Name) { |
| 898 | Type *PtrTy = DerivedPtr->getType(); |
| 899 | return CreateIntrinsic(ID: Intrinsic::experimental_gc_get_pointer_base, |
| 900 | Types: {PtrTy, PtrTy}, Args: {DerivedPtr}, FMFSource: {}, Name); |
| 901 | } |
| 902 | |
| 903 | CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr, |
| 904 | const Twine &Name) { |
| 905 | Type *PtrTy = DerivedPtr->getType(); |
| 906 | return CreateIntrinsic(ID: Intrinsic::experimental_gc_get_pointer_offset, Types: {PtrTy}, |
| 907 | Args: {DerivedPtr}, FMFSource: {}, Name); |
| 908 | } |
| 909 | |
| 910 | CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, |
| 911 | FMFSource FMFSource, |
| 912 | const Twine &Name) { |
| 913 | Module *M = BB->getModule(); |
| 914 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, id: ID, Tys: {V->getType()}); |
| 915 | return createCallHelper(Callee: Fn, Ops: {V}, Name, FMFSource); |
| 916 | } |
| 917 | |
| 918 | Value *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, |
| 919 | Value *RHS, FMFSource FMFSource, |
| 920 | const Twine &Name) { |
| 921 | Module *M = BB->getModule(); |
| 922 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, id: ID, Tys: {LHS->getType()}); |
| 923 | if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Ty: Fn->getReturnType(), |
| 924 | /*FMFSource=*/nullptr)) |
| 925 | return V; |
| 926 | return createCallHelper(Callee: Fn, Ops: {LHS, RHS}, Name, FMFSource); |
| 927 | } |
| 928 | |
| 929 | CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, |
| 930 | ArrayRef<Type *> Types, |
| 931 | ArrayRef<Value *> Args, |
| 932 | FMFSource FMFSource, |
| 933 | const Twine &Name) { |
| 934 | Module *M = BB->getModule(); |
| 935 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, id: ID, Tys: Types); |
| 936 | return createCallHelper(Callee: Fn, Ops: Args, Name, FMFSource); |
| 937 | } |
| 938 | |
| 939 | CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID, |
| 940 | ArrayRef<Value *> Args, |
| 941 | FMFSource FMFSource, |
| 942 | const Twine &Name) { |
| 943 | Module *M = BB->getModule(); |
| 944 | |
| 945 | SmallVector<Type *> ArgTys; |
| 946 | ArgTys.reserve(N: Args.size()); |
| 947 | for (auto &I : Args) |
| 948 | ArgTys.push_back(Elt: I->getType()); |
| 949 | |
| 950 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, IID: ID, RetTy, ArgTys); |
| 951 | return createCallHelper(Callee: Fn, Ops: Args, Name, FMFSource); |
| 952 | } |
| 953 | |
| 954 | CallInst *IRBuilderBase::CreateConstrainedFPBinOp( |
| 955 | Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource, |
| 956 | const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding, |
| 957 | std::optional<fp::ExceptionBehavior> Except) { |
| 958 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
| 959 | Value *ExceptV = getConstrainedFPExcept(Except); |
| 960 | |
| 961 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
| 962 | |
| 963 | CallInst *C = CreateIntrinsic(ID, Types: {L->getType()}, |
| 964 | Args: {L, R, RoundingV, ExceptV}, FMFSource: nullptr, Name); |
| 965 | setConstrainedFPCallAttr(C); |
| 966 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
| 967 | return C; |
| 968 | } |
| 969 | |
| 970 | CallInst *IRBuilderBase::CreateConstrainedFPIntrinsic( |
| 971 | Intrinsic::ID ID, ArrayRef<Type *> Types, ArrayRef<Value *> Args, |
| 972 | FMFSource FMFSource, const Twine &Name, MDNode *FPMathTag, |
| 973 | std::optional<RoundingMode> Rounding, |
| 974 | std::optional<fp::ExceptionBehavior> Except) { |
| 975 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
| 976 | Value *ExceptV = getConstrainedFPExcept(Except); |
| 977 | |
| 978 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
| 979 | |
| 980 | llvm::SmallVector<Value *, 5> ExtArgs(Args); |
| 981 | ExtArgs.push_back(Elt: RoundingV); |
| 982 | ExtArgs.push_back(Elt: ExceptV); |
| 983 | |
| 984 | CallInst *C = CreateIntrinsic(ID, Types, Args: ExtArgs, FMFSource: nullptr, Name); |
| 985 | setConstrainedFPCallAttr(C); |
| 986 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
| 987 | return C; |
| 988 | } |
| 989 | |
| 990 | CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp( |
| 991 | Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource, |
| 992 | const Twine &Name, MDNode *FPMathTag, |
| 993 | std::optional<fp::ExceptionBehavior> Except) { |
| 994 | Value *ExceptV = getConstrainedFPExcept(Except); |
| 995 | |
| 996 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
| 997 | |
| 998 | CallInst *C = |
| 999 | CreateIntrinsic(ID, Types: {L->getType()}, Args: {L, R, ExceptV}, FMFSource: nullptr, Name); |
| 1000 | setConstrainedFPCallAttr(C); |
| 1001 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
| 1002 | return C; |
| 1003 | } |
| 1004 | |
| 1005 | Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, |
| 1006 | const Twine &Name, MDNode *FPMathTag) { |
| 1007 | if (Instruction::isBinaryOp(Opcode: Opc)) { |
| 1008 | assert(Ops.size() == 2 && "Invalid number of operands!" ); |
| 1009 | return CreateBinOp(Opc: static_cast<Instruction::BinaryOps>(Opc), |
| 1010 | LHS: Ops[0], RHS: Ops[1], Name, FPMathTag); |
| 1011 | } |
| 1012 | if (Instruction::isUnaryOp(Opcode: Opc)) { |
| 1013 | assert(Ops.size() == 1 && "Invalid number of operands!" ); |
| 1014 | return CreateUnOp(Opc: static_cast<Instruction::UnaryOps>(Opc), |
| 1015 | V: Ops[0], Name, FPMathTag); |
| 1016 | } |
| 1017 | llvm_unreachable("Unexpected opcode!" ); |
| 1018 | } |
| 1019 | |
| 1020 | CallInst *IRBuilderBase::CreateConstrainedFPCast( |
| 1021 | Intrinsic::ID ID, Value *V, Type *DestTy, FMFSource FMFSource, |
| 1022 | const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding, |
| 1023 | std::optional<fp::ExceptionBehavior> Except) { |
| 1024 | Value *ExceptV = getConstrainedFPExcept(Except); |
| 1025 | |
| 1026 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
| 1027 | |
| 1028 | CallInst *C; |
| 1029 | if (Intrinsic::hasConstrainedFPRoundingModeOperand(QID: ID)) { |
| 1030 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
| 1031 | C = CreateIntrinsic(ID, Types: {DestTy, V->getType()}, Args: {V, RoundingV, ExceptV}, |
| 1032 | FMFSource: nullptr, Name); |
| 1033 | } else |
| 1034 | C = CreateIntrinsic(ID, Types: {DestTy, V->getType()}, Args: {V, ExceptV}, FMFSource: nullptr, |
| 1035 | Name); |
| 1036 | |
| 1037 | setConstrainedFPCallAttr(C); |
| 1038 | |
| 1039 | if (isa<FPMathOperator>(Val: C)) |
| 1040 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
| 1041 | return C; |
| 1042 | } |
| 1043 | |
| 1044 | Value *IRBuilderBase::CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, |
| 1045 | Value *RHS, const Twine &Name, |
| 1046 | MDNode *FPMathTag, FMFSource FMFSource, |
| 1047 | bool IsSignaling) { |
| 1048 | if (IsFPConstrained) { |
| 1049 | auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps |
| 1050 | : Intrinsic::experimental_constrained_fcmp; |
| 1051 | return CreateConstrainedFPCmp(ID, P, L: LHS, R: RHS, Name); |
| 1052 | } |
| 1053 | |
| 1054 | if (auto *V = Folder.FoldCmp(P, LHS, RHS)) |
| 1055 | return V; |
| 1056 | return Insert( |
| 1057 | I: setFPAttrs(I: new FCmpInst(P, LHS, RHS), FPMD: FPMathTag, FMF: FMFSource.get(Default: FMF)), |
| 1058 | Name); |
| 1059 | } |
| 1060 | |
| 1061 | CallInst *IRBuilderBase::CreateConstrainedFPCmp( |
| 1062 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, |
| 1063 | const Twine &Name, std::optional<fp::ExceptionBehavior> Except) { |
| 1064 | Value *PredicateV = getConstrainedFPPredicate(Predicate: P); |
| 1065 | Value *ExceptV = getConstrainedFPExcept(Except); |
| 1066 | |
| 1067 | CallInst *C = CreateIntrinsic(ID, Types: {L->getType()}, |
| 1068 | Args: {L, R, PredicateV, ExceptV}, FMFSource: nullptr, Name); |
| 1069 | setConstrainedFPCallAttr(C); |
| 1070 | return C; |
| 1071 | } |
| 1072 | |
| 1073 | CallInst *IRBuilderBase::CreateConstrainedFPCall( |
| 1074 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name, |
| 1075 | std::optional<RoundingMode> Rounding, |
| 1076 | std::optional<fp::ExceptionBehavior> Except) { |
| 1077 | llvm::SmallVector<Value *, 6> UseArgs(Args); |
| 1078 | |
| 1079 | if (Intrinsic::hasConstrainedFPRoundingModeOperand(QID: Callee->getIntrinsicID())) |
| 1080 | UseArgs.push_back(Elt: getConstrainedFPRounding(Rounding)); |
| 1081 | UseArgs.push_back(Elt: getConstrainedFPExcept(Except)); |
| 1082 | |
| 1083 | CallInst *C = CreateCall(Callee, Args: UseArgs, Name); |
| 1084 | setConstrainedFPCallAttr(C); |
| 1085 | return C; |
| 1086 | } |
| 1087 | |
| 1088 | Value *IRBuilderBase::CreateSelectWithUnknownProfile(Value *C, Value *True, |
| 1089 | Value *False, |
| 1090 | StringRef PassName, |
| 1091 | const Twine &Name) { |
| 1092 | Value *Ret = CreateSelectFMF(C, True, False, FMFSource: {}, Name); |
| 1093 | if (auto *SI = dyn_cast<SelectInst>(Val: Ret)) { |
| 1094 | setExplicitlyUnknownBranchWeightsIfProfiled(I&: *SI, PassName); |
| 1095 | } |
| 1096 | return Ret; |
| 1097 | } |
| 1098 | |
| 1099 | Value *IRBuilderBase::CreateSelectFMFWithUnknownProfile(Value *C, Value *True, |
| 1100 | Value *False, |
| 1101 | FMFSource FMFSource, |
| 1102 | StringRef PassName, |
| 1103 | const Twine &Name) { |
| 1104 | Value *Ret = CreateSelectFMF(C, True, False, FMFSource, Name); |
| 1105 | if (auto *SI = dyn_cast<SelectInst>(Val: Ret)) |
| 1106 | setExplicitlyUnknownBranchWeightsIfProfiled(I&: *SI, PassName); |
| 1107 | return Ret; |
| 1108 | } |
| 1109 | |
| 1110 | Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False, |
| 1111 | const Twine &Name, Instruction *MDFrom) { |
| 1112 | return CreateSelectFMF(C, True, False, FMFSource: {}, Name, MDFrom); |
| 1113 | } |
| 1114 | |
| 1115 | Value *IRBuilderBase::CreateSelectFMF(Value *C, Value *True, Value *False, |
| 1116 | FMFSource FMFSource, const Twine &Name, |
| 1117 | Instruction *MDFrom) { |
| 1118 | if (auto *V = Folder.FoldSelect(C, True, False)) |
| 1119 | return V; |
| 1120 | |
| 1121 | SelectInst *Sel = SelectInst::Create(C, S1: True, S2: False); |
| 1122 | if (MDFrom) { |
| 1123 | MDNode *Prof = MDFrom->getMetadata(KindID: LLVMContext::MD_prof); |
| 1124 | MDNode *Unpred = MDFrom->getMetadata(KindID: LLVMContext::MD_unpredictable); |
| 1125 | Sel = addBranchMetadata(I: Sel, Weights: Prof, Unpredictable: Unpred); |
| 1126 | } |
| 1127 | if (isa<FPMathOperator>(Val: Sel)) |
| 1128 | setFPAttrs(I: Sel, /*MDNode=*/FPMD: nullptr, FMF: FMFSource.get(Default: FMF)); |
| 1129 | return Insert(I: Sel, Name); |
| 1130 | } |
| 1131 | |
| 1132 | Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, |
| 1133 | const Twine &Name) { |
| 1134 | assert(LHS->getType() == RHS->getType() && |
| 1135 | "Pointer subtraction operand types must match!" ); |
| 1136 | Value *LHS_int = CreatePtrToInt(V: LHS, DestTy: Type::getInt64Ty(C&: Context)); |
| 1137 | Value *RHS_int = CreatePtrToInt(V: RHS, DestTy: Type::getInt64Ty(C&: Context)); |
| 1138 | Value *Difference = CreateSub(LHS: LHS_int, RHS: RHS_int); |
| 1139 | return CreateExactSDiv(LHS: Difference, RHS: ConstantExpr::getSizeOf(Ty: ElemTy), |
| 1140 | Name); |
| 1141 | } |
| 1142 | |
| 1143 | Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) { |
| 1144 | assert(isa<PointerType>(Ptr->getType()) && |
| 1145 | "launder.invariant.group only applies to pointers." ); |
| 1146 | auto *PtrType = Ptr->getType(); |
| 1147 | Module *M = BB->getParent()->getParent(); |
| 1148 | Function *FnLaunderInvariantGroup = Intrinsic::getOrInsertDeclaration( |
| 1149 | M, id: Intrinsic::launder_invariant_group, Tys: {PtrType}); |
| 1150 | |
| 1151 | assert(FnLaunderInvariantGroup->getReturnType() == PtrType && |
| 1152 | FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == |
| 1153 | PtrType && |
| 1154 | "LaunderInvariantGroup should take and return the same type" ); |
| 1155 | |
| 1156 | return CreateCall(Callee: FnLaunderInvariantGroup, Args: {Ptr}); |
| 1157 | } |
| 1158 | |
| 1159 | Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) { |
| 1160 | assert(isa<PointerType>(Ptr->getType()) && |
| 1161 | "strip.invariant.group only applies to pointers." ); |
| 1162 | |
| 1163 | auto *PtrType = Ptr->getType(); |
| 1164 | Module *M = BB->getParent()->getParent(); |
| 1165 | Function *FnStripInvariantGroup = Intrinsic::getOrInsertDeclaration( |
| 1166 | M, id: Intrinsic::strip_invariant_group, Tys: {PtrType}); |
| 1167 | |
| 1168 | assert(FnStripInvariantGroup->getReturnType() == PtrType && |
| 1169 | FnStripInvariantGroup->getFunctionType()->getParamType(0) == |
| 1170 | PtrType && |
| 1171 | "StripInvariantGroup should take and return the same type" ); |
| 1172 | |
| 1173 | return CreateCall(Callee: FnStripInvariantGroup, Args: {Ptr}); |
| 1174 | } |
| 1175 | |
| 1176 | Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) { |
| 1177 | auto *Ty = cast<VectorType>(Val: V->getType()); |
| 1178 | if (isa<ScalableVectorType>(Val: Ty)) { |
| 1179 | Module *M = BB->getParent()->getParent(); |
| 1180 | Function *F = |
| 1181 | Intrinsic::getOrInsertDeclaration(M, id: Intrinsic::vector_reverse, Tys: Ty); |
| 1182 | return Insert(I: CallInst::Create(Func: F, Args: V), Name); |
| 1183 | } |
| 1184 | // Keep the original behaviour for fixed vector |
| 1185 | SmallVector<int, 8> ShuffleMask; |
| 1186 | int NumElts = Ty->getElementCount().getKnownMinValue(); |
| 1187 | for (int i = 0; i < NumElts; ++i) |
| 1188 | ShuffleMask.push_back(Elt: NumElts - i - 1); |
| 1189 | return CreateShuffleVector(V, Mask: ShuffleMask, Name); |
| 1190 | } |
| 1191 | |
| 1192 | static SmallVector<int, 8> getSpliceMask(int64_t Imm, unsigned NumElts) { |
| 1193 | unsigned Idx = (NumElts + Imm) % NumElts; |
| 1194 | SmallVector<int, 8> Mask; |
| 1195 | for (unsigned I = 0; I < NumElts; ++I) |
| 1196 | Mask.push_back(Elt: Idx + I); |
| 1197 | return Mask; |
| 1198 | } |
| 1199 | |
| 1200 | Value *IRBuilderBase::CreateVectorSpliceLeft(Value *V1, Value *V2, |
| 1201 | Value *Offset, const Twine &Name) { |
| 1202 | assert(isa<VectorType>(V1->getType()) && "Unexpected type" ); |
| 1203 | assert(V1->getType() == V2->getType() && |
| 1204 | "Splice expects matching operand types!" ); |
| 1205 | |
| 1206 | // Emit a shufflevector for fixed vectors with a constant offset |
| 1207 | if (auto *COffset = dyn_cast<ConstantInt>(Val: Offset)) |
| 1208 | if (auto *FVTy = dyn_cast<FixedVectorType>(Val: V1->getType())) |
| 1209 | return CreateShuffleVector( |
| 1210 | V1, V2, |
| 1211 | Mask: getSpliceMask(Imm: COffset->getZExtValue(), NumElts: FVTy->getNumElements())); |
| 1212 | |
| 1213 | return CreateIntrinsic(ID: Intrinsic::vector_splice_left, Types: V1->getType(), |
| 1214 | Args: {V1, V2, Offset}, FMFSource: {}, Name); |
| 1215 | } |
| 1216 | |
| 1217 | Value *IRBuilderBase::CreateVectorSpliceRight(Value *V1, Value *V2, |
| 1218 | Value *Offset, |
| 1219 | const Twine &Name) { |
| 1220 | assert(isa<VectorType>(V1->getType()) && "Unexpected type" ); |
| 1221 | assert(V1->getType() == V2->getType() && |
| 1222 | "Splice expects matching operand types!" ); |
| 1223 | |
| 1224 | // Emit a shufflevector for fixed vectors with a constant offset |
| 1225 | if (auto *COffset = dyn_cast<ConstantInt>(Val: Offset)) |
| 1226 | if (auto *FVTy = dyn_cast<FixedVectorType>(Val: V1->getType())) |
| 1227 | return CreateShuffleVector( |
| 1228 | V1, V2, |
| 1229 | Mask: getSpliceMask(Imm: -COffset->getZExtValue(), NumElts: FVTy->getNumElements())); |
| 1230 | |
| 1231 | return CreateIntrinsic(ID: Intrinsic::vector_splice_right, Types: V1->getType(), |
| 1232 | Args: {V1, V2, Offset}, FMFSource: {}, Name); |
| 1233 | } |
| 1234 | |
| 1235 | Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, |
| 1236 | const Twine &Name) { |
| 1237 | auto EC = ElementCount::getFixed(MinVal: NumElts); |
| 1238 | return CreateVectorSplat(EC, V, Name); |
| 1239 | } |
| 1240 | |
| 1241 | Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V, |
| 1242 | const Twine &Name) { |
| 1243 | assert(EC.isNonZero() && "Cannot splat to an empty vector!" ); |
| 1244 | |
| 1245 | // First insert it into a poison vector so we can shuffle it. |
| 1246 | Value *Poison = PoisonValue::get(T: VectorType::get(ElementType: V->getType(), EC)); |
| 1247 | V = CreateInsertElement(Vec: Poison, NewElt: V, Idx: getInt64(C: 0), Name: Name + ".splatinsert" ); |
| 1248 | |
| 1249 | // Shuffle the value across the desired number of elements. |
| 1250 | SmallVector<int, 16> Zeros; |
| 1251 | Zeros.resize(N: EC.getKnownMinValue()); |
| 1252 | return CreateShuffleVector(V, Mask: Zeros, Name: Name + ".splat" ); |
| 1253 | } |
| 1254 | |
| 1255 | Value *IRBuilderBase::CreateVectorInterleave(ArrayRef<Value *> Ops, |
| 1256 | const Twine &Name) { |
| 1257 | assert(Ops.size() >= 2 && Ops.size() <= 8 && |
| 1258 | "Unexpected number of operands to interleave" ); |
| 1259 | |
| 1260 | // Make sure all operands are the same type. |
| 1261 | assert(isa<VectorType>(Ops[0]->getType()) && "Unexpected type" ); |
| 1262 | |
| 1263 | #ifndef NDEBUG |
| 1264 | for (unsigned I = 1; I < Ops.size(); I++) { |
| 1265 | assert(Ops[I]->getType() == Ops[0]->getType() && |
| 1266 | "Vector interleave expects matching operand types!" ); |
| 1267 | } |
| 1268 | #endif |
| 1269 | |
| 1270 | unsigned IID = Intrinsic::getInterleaveIntrinsicID(Factor: Ops.size()); |
| 1271 | auto *SubvecTy = cast<VectorType>(Val: Ops[0]->getType()); |
| 1272 | Type *DestTy = VectorType::get(ElementType: SubvecTy->getElementType(), |
| 1273 | EC: SubvecTy->getElementCount() * Ops.size()); |
| 1274 | return CreateIntrinsic(ID: IID, Types: {DestTy}, Args: Ops, FMFSource: {}, Name); |
| 1275 | } |
| 1276 | |
| 1277 | Value *IRBuilderBase::CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, |
| 1278 | unsigned Dimension, |
| 1279 | unsigned LastIndex, |
| 1280 | MDNode *DbgInfo) { |
| 1281 | auto *BaseType = Base->getType(); |
| 1282 | assert(isa<PointerType>(BaseType) && |
| 1283 | "Invalid Base ptr type for preserve.array.access.index." ); |
| 1284 | |
| 1285 | Value *LastIndexV = getInt32(C: LastIndex); |
| 1286 | Constant *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 0); |
| 1287 | SmallVector<Value *, 4> IdxList(Dimension, Zero); |
| 1288 | IdxList.push_back(Elt: LastIndexV); |
| 1289 | |
| 1290 | Type *ResultType = GetElementPtrInst::getGEPReturnType(Ptr: Base, IdxList); |
| 1291 | |
| 1292 | Value *DimV = getInt32(C: Dimension); |
| 1293 | CallInst *Fn = |
| 1294 | CreateIntrinsic(ID: Intrinsic::preserve_array_access_index, |
| 1295 | Types: {ResultType, BaseType}, Args: {Base, DimV, LastIndexV}); |
| 1296 | Fn->addParamAttr( |
| 1297 | ArgNo: 0, Attr: Attribute::get(Context&: Fn->getContext(), Kind: Attribute::ElementType, Ty: ElTy)); |
| 1298 | if (DbgInfo) |
| 1299 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
| 1300 | |
| 1301 | return Fn; |
| 1302 | } |
| 1303 | |
| 1304 | Value *IRBuilderBase::CreatePreserveUnionAccessIndex( |
| 1305 | Value *Base, unsigned FieldIndex, MDNode *DbgInfo) { |
| 1306 | assert(isa<PointerType>(Base->getType()) && |
| 1307 | "Invalid Base ptr type for preserve.union.access.index." ); |
| 1308 | auto *BaseType = Base->getType(); |
| 1309 | |
| 1310 | Value *DIIndex = getInt32(C: FieldIndex); |
| 1311 | CallInst *Fn = CreateIntrinsic(ID: Intrinsic::preserve_union_access_index, |
| 1312 | Types: {BaseType, BaseType}, Args: {Base, DIIndex}); |
| 1313 | if (DbgInfo) |
| 1314 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
| 1315 | |
| 1316 | return Fn; |
| 1317 | } |
| 1318 | |
| 1319 | Value *IRBuilderBase::CreatePreserveStructAccessIndex( |
| 1320 | Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, |
| 1321 | MDNode *DbgInfo) { |
| 1322 | auto *BaseType = Base->getType(); |
| 1323 | assert(isa<PointerType>(BaseType) && |
| 1324 | "Invalid Base ptr type for preserve.struct.access.index." ); |
| 1325 | |
| 1326 | Value *GEPIndex = getInt32(C: Index); |
| 1327 | Constant *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 0); |
| 1328 | Type *ResultType = |
| 1329 | GetElementPtrInst::getGEPReturnType(Ptr: Base, IdxList: {Zero, GEPIndex}); |
| 1330 | |
| 1331 | Value *DIIndex = getInt32(C: FieldIndex); |
| 1332 | CallInst *Fn = |
| 1333 | CreateIntrinsic(ID: Intrinsic::preserve_struct_access_index, |
| 1334 | Types: {ResultType, BaseType}, Args: {Base, GEPIndex, DIIndex}); |
| 1335 | Fn->addParamAttr( |
| 1336 | ArgNo: 0, Attr: Attribute::get(Context&: Fn->getContext(), Kind: Attribute::ElementType, Ty: ElTy)); |
| 1337 | if (DbgInfo) |
| 1338 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
| 1339 | |
| 1340 | return Fn; |
| 1341 | } |
| 1342 | |
| 1343 | Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) { |
| 1344 | ConstantInt *TestV = getInt32(C: Test); |
| 1345 | return CreateIntrinsic(ID: Intrinsic::is_fpclass, Types: {FPNum->getType()}, |
| 1346 | Args: {FPNum, TestV}); |
| 1347 | } |
| 1348 | |
| 1349 | CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, |
| 1350 | Value *PtrValue, |
| 1351 | Value *AlignValue, |
| 1352 | Value *OffsetValue) { |
| 1353 | SmallVector<Value *, 4> Vals({PtrValue, AlignValue}); |
| 1354 | if (OffsetValue) |
| 1355 | Vals.push_back(Elt: OffsetValue); |
| 1356 | OperandBundleDefT<Value *> AlignOpB("align" , Vals); |
| 1357 | return CreateAssumption(Cond: ConstantInt::getTrue(Context&: getContext()), OpBundles: {AlignOpB}); |
| 1358 | } |
| 1359 | |
| 1360 | CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, |
| 1361 | Value *PtrValue, |
| 1362 | unsigned Alignment, |
| 1363 | Value *OffsetValue) { |
| 1364 | assert(isa<PointerType>(PtrValue->getType()) && |
| 1365 | "trying to create an alignment assumption on a non-pointer?" ); |
| 1366 | assert(Alignment != 0 && "Invalid Alignment" ); |
| 1367 | auto *PtrTy = cast<PointerType>(Val: PtrValue->getType()); |
| 1368 | Type *IntPtrTy = getIntPtrTy(DL, AddrSpace: PtrTy->getAddressSpace()); |
| 1369 | Value *AlignValue = ConstantInt::get(Ty: IntPtrTy, V: Alignment); |
| 1370 | return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); |
| 1371 | } |
| 1372 | |
| 1373 | CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, |
| 1374 | Value *PtrValue, |
| 1375 | Value *Alignment, |
| 1376 | Value *OffsetValue) { |
| 1377 | assert(isa<PointerType>(PtrValue->getType()) && |
| 1378 | "trying to create an alignment assumption on a non-pointer?" ); |
| 1379 | return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue: Alignment, OffsetValue); |
| 1380 | } |
| 1381 | |
| 1382 | CallInst *IRBuilderBase::CreateDereferenceableAssumption(Value *PtrValue, |
| 1383 | Value *SizeValue) { |
| 1384 | assert(isa<PointerType>(PtrValue->getType()) && |
| 1385 | "trying to create an deferenceable assumption on a non-pointer?" ); |
| 1386 | SmallVector<Value *, 4> Vals({PtrValue, SizeValue}); |
| 1387 | OperandBundleDefT<Value *> DereferenceableOpB("dereferenceable" , Vals); |
| 1388 | return CreateAssumption(Cond: ConstantInt::getTrue(Context&: getContext()), |
| 1389 | OpBundles: {DereferenceableOpB}); |
| 1390 | } |
| 1391 | |
| 1392 | IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default; |
| 1393 | IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default; |
| 1394 | IRBuilderFolder::~IRBuilderFolder() = default; |
| 1395 | void ConstantFolder::anchor() {} |
| 1396 | void NoFolder::anchor() {} |
| 1397 | |