1 | //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the IRBuilder class, which is used as a convenient way |
10 | // to create LLVM instructions with a consistent and simplified interface. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "llvm/IR/IRBuilder.h" |
15 | #include "llvm/ADT/ArrayRef.h" |
16 | #include "llvm/IR/Constant.h" |
17 | #include "llvm/IR/Constants.h" |
18 | #include "llvm/IR/DerivedTypes.h" |
19 | #include "llvm/IR/Function.h" |
20 | #include "llvm/IR/GlobalValue.h" |
21 | #include "llvm/IR/GlobalVariable.h" |
22 | #include "llvm/IR/IntrinsicInst.h" |
23 | #include "llvm/IR/Intrinsics.h" |
24 | #include "llvm/IR/LLVMContext.h" |
25 | #include "llvm/IR/Module.h" |
26 | #include "llvm/IR/NoFolder.h" |
27 | #include "llvm/IR/Operator.h" |
28 | #include "llvm/IR/Statepoint.h" |
29 | #include "llvm/IR/Type.h" |
30 | #include "llvm/IR/Value.h" |
31 | #include "llvm/Support/Casting.h" |
32 | #include <cassert> |
33 | #include <cstdint> |
34 | #include <optional> |
35 | #include <vector> |
36 | |
37 | using namespace llvm; |
38 | |
39 | /// CreateGlobalString - Make a new global variable with an initializer that |
40 | /// has array of i8 type filled in with the nul terminated string value |
41 | /// specified. If Name is specified, it is the name of the global variable |
42 | /// created. |
43 | GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str, |
44 | const Twine &Name, |
45 | unsigned AddressSpace, |
46 | Module *M, bool AddNull) { |
47 | Constant *StrConstant = ConstantDataArray::getString(Context, Initializer: Str, AddNull); |
48 | if (!M) |
49 | M = BB->getParent()->getParent(); |
50 | auto *GV = new GlobalVariable( |
51 | *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage, |
52 | StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace); |
53 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); |
54 | GV->setAlignment(M->getDataLayout().getPrefTypeAlign(Ty: getInt8Ty())); |
55 | return GV; |
56 | } |
57 | |
58 | Type *IRBuilderBase::getCurrentFunctionReturnType() const { |
59 | assert(BB && BB->getParent() && "No current function!" ); |
60 | return BB->getParent()->getReturnType(); |
61 | } |
62 | |
63 | DebugLoc IRBuilderBase::getCurrentDebugLocation() const { return StoredDL; } |
64 | void IRBuilderBase::SetInstDebugLocation(Instruction *I) const { |
65 | // We prefer to set our current debug location if any has been set, but if |
66 | // our debug location is empty and I has a valid location, we shouldn't |
67 | // overwrite it. |
68 | I->setDebugLoc(StoredDL.orElse(Other: I->getDebugLoc())); |
69 | } |
70 | |
71 | Value *IRBuilderBase::CreateAggregateCast(Value *V, Type *DestTy) { |
72 | Type *SrcTy = V->getType(); |
73 | if (SrcTy == DestTy) |
74 | return V; |
75 | |
76 | if (SrcTy->isAggregateType()) { |
77 | unsigned NumElements; |
78 | if (SrcTy->isStructTy()) { |
79 | assert(DestTy->isStructTy() && "Expected StructType" ); |
80 | assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements() && |
81 | "Expected StructTypes with equal number of elements" ); |
82 | NumElements = SrcTy->getStructNumElements(); |
83 | } else { |
84 | assert(SrcTy->isArrayTy() && DestTy->isArrayTy() && "Expected ArrayType" ); |
85 | assert(SrcTy->getArrayNumElements() == DestTy->getArrayNumElements() && |
86 | "Expected ArrayTypes with equal number of elements" ); |
87 | NumElements = SrcTy->getArrayNumElements(); |
88 | } |
89 | |
90 | Value *Result = PoisonValue::get(T: DestTy); |
91 | for (unsigned I = 0; I < NumElements; ++I) { |
92 | Type *ElementTy = SrcTy->isStructTy() ? DestTy->getStructElementType(N: I) |
93 | : DestTy->getArrayElementType(); |
94 | Value *Element = |
95 | CreateAggregateCast(V: CreateExtractValue(Agg: V, Idxs: ArrayRef(I)), DestTy: ElementTy); |
96 | |
97 | Result = CreateInsertValue(Agg: Result, Val: Element, Idxs: ArrayRef(I)); |
98 | } |
99 | return Result; |
100 | } |
101 | |
102 | return CreateBitOrPointerCast(V, DestTy); |
103 | } |
104 | |
105 | CallInst * |
106 | IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops, |
107 | const Twine &Name, FMFSource FMFSource, |
108 | ArrayRef<OperandBundleDef> OpBundles) { |
109 | CallInst *CI = CreateCall(Callee, Args: Ops, OpBundles, Name); |
110 | if (isa<FPMathOperator>(Val: CI)) |
111 | CI->setFastMathFlags(FMFSource.get(Default: FMF)); |
112 | return CI; |
113 | } |
114 | |
115 | static Value *CreateVScaleMultiple(IRBuilderBase &B, Type *Ty, uint64_t Scale) { |
116 | Value *VScale = B.CreateVScale(Ty); |
117 | if (Scale == 1) |
118 | return VScale; |
119 | |
120 | return B.CreateNUWMul(LHS: VScale, RHS: ConstantInt::get(Ty, V: Scale)); |
121 | } |
122 | |
123 | Value *IRBuilderBase::CreateElementCount(Type *Ty, ElementCount EC) { |
124 | if (EC.isFixed() || EC.isZero()) |
125 | return ConstantInt::get(Ty, V: EC.getKnownMinValue()); |
126 | |
127 | return CreateVScaleMultiple(B&: *this, Ty, Scale: EC.getKnownMinValue()); |
128 | } |
129 | |
130 | Value *IRBuilderBase::CreateTypeSize(Type *Ty, TypeSize Size) { |
131 | if (Size.isFixed() || Size.isZero()) |
132 | return ConstantInt::get(Ty, V: Size.getKnownMinValue()); |
133 | |
134 | return CreateVScaleMultiple(B&: *this, Ty, Scale: Size.getKnownMinValue()); |
135 | } |
136 | |
137 | Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) { |
138 | Type *STy = DstType->getScalarType(); |
139 | if (isa<ScalableVectorType>(Val: DstType)) { |
140 | Type *StepVecType = DstType; |
141 | // TODO: We expect this special case (element type < 8 bits) to be |
142 | // temporary - once the intrinsic properly supports < 8 bits this code |
143 | // can be removed. |
144 | if (STy->getScalarSizeInBits() < 8) |
145 | StepVecType = |
146 | VectorType::get(ElementType: getInt8Ty(), Other: cast<ScalableVectorType>(Val: DstType)); |
147 | Value *Res = CreateIntrinsic(ID: Intrinsic::stepvector, Types: {StepVecType}, Args: {}, |
148 | FMFSource: nullptr, Name); |
149 | if (StepVecType != DstType) |
150 | Res = CreateTrunc(V: Res, DestTy: DstType); |
151 | return Res; |
152 | } |
153 | |
154 | unsigned NumEls = cast<FixedVectorType>(Val: DstType)->getNumElements(); |
155 | |
156 | // Create a vector of consecutive numbers from zero to VF. |
157 | SmallVector<Constant *, 8> Indices; |
158 | for (unsigned i = 0; i < NumEls; ++i) |
159 | Indices.push_back(Elt: ConstantInt::get(Ty: STy, V: i)); |
160 | |
161 | // Add the consecutive indices to the vector value. |
162 | return ConstantVector::get(V: Indices); |
163 | } |
164 | |
165 | CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size, |
166 | MaybeAlign Align, bool isVolatile, |
167 | const AAMDNodes &AAInfo) { |
168 | Value *Ops[] = {Ptr, Val, Size, getInt1(V: isVolatile)}; |
169 | Type *Tys[] = {Ptr->getType(), Size->getType()}; |
170 | |
171 | CallInst *CI = CreateIntrinsic(ID: Intrinsic::memset, Types: Tys, Args: Ops); |
172 | |
173 | if (Align) |
174 | cast<MemSetInst>(Val: CI)->setDestAlignment(*Align); |
175 | CI->setAAMetadata(AAInfo); |
176 | return CI; |
177 | } |
178 | |
179 | CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, |
180 | Value *Val, Value *Size, |
181 | bool IsVolatile, |
182 | const AAMDNodes &AAInfo) { |
183 | Value *Ops[] = {Dst, Val, Size, getInt1(V: IsVolatile)}; |
184 | Type *Tys[] = {Dst->getType(), Size->getType()}; |
185 | |
186 | CallInst *CI = CreateIntrinsic(ID: Intrinsic::memset_inline, Types: Tys, Args: Ops); |
187 | |
188 | if (DstAlign) |
189 | cast<MemSetInst>(Val: CI)->setDestAlignment(*DstAlign); |
190 | CI->setAAMetadata(AAInfo); |
191 | return CI; |
192 | } |
193 | |
194 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet( |
195 | Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize, |
196 | const AAMDNodes &AAInfo) { |
197 | |
198 | Value *Ops[] = {Ptr, Val, Size, getInt32(C: ElementSize)}; |
199 | Type *Tys[] = {Ptr->getType(), Size->getType()}; |
200 | |
201 | CallInst *CI = |
202 | CreateIntrinsic(ID: Intrinsic::memset_element_unordered_atomic, Types: Tys, Args: Ops); |
203 | |
204 | cast<AnyMemSetInst>(Val: CI)->setDestAlignment(Alignment); |
205 | CI->setAAMetadata(AAInfo); |
206 | return CI; |
207 | } |
208 | |
209 | CallInst *IRBuilderBase::CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, |
210 | MaybeAlign DstAlign, Value *Src, |
211 | MaybeAlign SrcAlign, Value *Size, |
212 | bool isVolatile, |
213 | const AAMDNodes &AAInfo) { |
214 | assert((IntrID == Intrinsic::memcpy || IntrID == Intrinsic::memcpy_inline || |
215 | IntrID == Intrinsic::memmove) && |
216 | "Unexpected intrinsic ID" ); |
217 | Value *Ops[] = {Dst, Src, Size, getInt1(V: isVolatile)}; |
218 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
219 | |
220 | CallInst *CI = CreateIntrinsic(ID: IntrID, Types: Tys, Args: Ops); |
221 | |
222 | auto* MCI = cast<MemTransferInst>(Val: CI); |
223 | if (DstAlign) |
224 | MCI->setDestAlignment(*DstAlign); |
225 | if (SrcAlign) |
226 | MCI->setSourceAlignment(*SrcAlign); |
227 | MCI->setAAMetadata(AAInfo); |
228 | return CI; |
229 | } |
230 | |
231 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( |
232 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
233 | uint32_t ElementSize, const AAMDNodes &AAInfo) { |
234 | assert(DstAlign >= ElementSize && |
235 | "Pointer alignment must be at least element size" ); |
236 | assert(SrcAlign >= ElementSize && |
237 | "Pointer alignment must be at least element size" ); |
238 | Value *Ops[] = {Dst, Src, Size, getInt32(C: ElementSize)}; |
239 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
240 | |
241 | CallInst *CI = |
242 | CreateIntrinsic(ID: Intrinsic::memcpy_element_unordered_atomic, Types: Tys, Args: Ops); |
243 | |
244 | // Set the alignment of the pointer args. |
245 | auto *AMCI = cast<AnyMemCpyInst>(Val: CI); |
246 | AMCI->setDestAlignment(DstAlign); |
247 | AMCI->setSourceAlignment(SrcAlign); |
248 | AMCI->setAAMetadata(AAInfo); |
249 | return CI; |
250 | } |
251 | |
252 | /// isConstantOne - Return true only if val is constant int 1 |
253 | static bool isConstantOne(const Value *Val) { |
254 | assert(Val && "isConstantOne does not work with nullptr Val" ); |
255 | const ConstantInt *CVal = dyn_cast<ConstantInt>(Val); |
256 | return CVal && CVal->isOne(); |
257 | } |
258 | |
259 | CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy, |
260 | Value *AllocSize, Value *ArraySize, |
261 | ArrayRef<OperandBundleDef> OpB, |
262 | Function *MallocF, const Twine &Name) { |
263 | // malloc(type) becomes: |
264 | // i8* malloc(typeSize) |
265 | // malloc(type, arraySize) becomes: |
266 | // i8* malloc(typeSize*arraySize) |
267 | if (!ArraySize) |
268 | ArraySize = ConstantInt::get(Ty: IntPtrTy, V: 1); |
269 | else if (ArraySize->getType() != IntPtrTy) |
270 | ArraySize = CreateIntCast(V: ArraySize, DestTy: IntPtrTy, isSigned: false); |
271 | |
272 | if (!isConstantOne(Val: ArraySize)) { |
273 | if (isConstantOne(Val: AllocSize)) { |
274 | AllocSize = ArraySize; // Operand * 1 = Operand |
275 | } else { |
276 | // Multiply type size by the array size... |
277 | AllocSize = CreateMul(LHS: ArraySize, RHS: AllocSize, Name: "mallocsize" ); |
278 | } |
279 | } |
280 | |
281 | assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size" ); |
282 | // Create the call to Malloc. |
283 | Module *M = BB->getParent()->getParent(); |
284 | Type *BPTy = PointerType::getUnqual(C&: Context); |
285 | FunctionCallee MallocFunc = MallocF; |
286 | if (!MallocFunc) |
287 | // prototype malloc as "void *malloc(size_t)" |
288 | MallocFunc = M->getOrInsertFunction(Name: "malloc" , RetTy: BPTy, Args: IntPtrTy); |
289 | CallInst *MCall = CreateCall(Callee: MallocFunc, Args: AllocSize, OpBundles: OpB, Name); |
290 | |
291 | MCall->setTailCall(); |
292 | if (Function *F = dyn_cast<Function>(Val: MallocFunc.getCallee())) { |
293 | MCall->setCallingConv(F->getCallingConv()); |
294 | F->setReturnDoesNotAlias(); |
295 | } |
296 | |
297 | assert(!MCall->getType()->isVoidTy() && "Malloc has void return type" ); |
298 | |
299 | return MCall; |
300 | } |
301 | |
302 | CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy, |
303 | Value *AllocSize, Value *ArraySize, |
304 | Function *MallocF, const Twine &Name) { |
305 | |
306 | return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, OpB: {}, MallocF, |
307 | Name); |
308 | } |
309 | |
310 | /// CreateFree - Generate the IR for a call to the builtin free function. |
311 | CallInst *IRBuilderBase::CreateFree(Value *Source, |
312 | ArrayRef<OperandBundleDef> Bundles) { |
313 | assert(Source->getType()->isPointerTy() && |
314 | "Can not free something of nonpointer type!" ); |
315 | |
316 | Module *M = BB->getParent()->getParent(); |
317 | |
318 | Type *VoidTy = Type::getVoidTy(C&: M->getContext()); |
319 | Type *VoidPtrTy = PointerType::getUnqual(C&: M->getContext()); |
320 | // prototype free as "void free(void*)" |
321 | FunctionCallee FreeFunc = M->getOrInsertFunction(Name: "free" , RetTy: VoidTy, Args: VoidPtrTy); |
322 | CallInst *Result = CreateCall(Callee: FreeFunc, Args: Source, OpBundles: Bundles, Name: "" ); |
323 | Result->setTailCall(); |
324 | if (Function *F = dyn_cast<Function>(Val: FreeFunc.getCallee())) |
325 | Result->setCallingConv(F->getCallingConv()); |
326 | |
327 | return Result; |
328 | } |
329 | |
330 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove( |
331 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
332 | uint32_t ElementSize, const AAMDNodes &AAInfo) { |
333 | assert(DstAlign >= ElementSize && |
334 | "Pointer alignment must be at least element size" ); |
335 | assert(SrcAlign >= ElementSize && |
336 | "Pointer alignment must be at least element size" ); |
337 | Value *Ops[] = {Dst, Src, Size, getInt32(C: ElementSize)}; |
338 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
339 | |
340 | CallInst *CI = |
341 | CreateIntrinsic(ID: Intrinsic::memmove_element_unordered_atomic, Types: Tys, Args: Ops); |
342 | |
343 | // Set the alignment of the pointer args. |
344 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: DstAlign)); |
345 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: SrcAlign)); |
346 | CI->setAAMetadata(AAInfo); |
347 | return CI; |
348 | } |
349 | |
350 | CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) { |
351 | Value *Ops[] = {Src}; |
352 | Type *Tys[] = { Src->getType() }; |
353 | return CreateIntrinsic(ID, Types: Tys, Args: Ops); |
354 | } |
355 | |
356 | CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) { |
357 | Value *Ops[] = {Acc, Src}; |
358 | return CreateIntrinsic(ID: Intrinsic::vector_reduce_fadd, Types: {Src->getType()}, Args: Ops); |
359 | } |
360 | |
361 | CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) { |
362 | Value *Ops[] = {Acc, Src}; |
363 | return CreateIntrinsic(ID: Intrinsic::vector_reduce_fmul, Types: {Src->getType()}, Args: Ops); |
364 | } |
365 | |
366 | CallInst *IRBuilderBase::CreateAddReduce(Value *Src) { |
367 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_add, Src); |
368 | } |
369 | |
370 | CallInst *IRBuilderBase::CreateMulReduce(Value *Src) { |
371 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_mul, Src); |
372 | } |
373 | |
374 | CallInst *IRBuilderBase::CreateAndReduce(Value *Src) { |
375 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_and, Src); |
376 | } |
377 | |
378 | CallInst *IRBuilderBase::CreateOrReduce(Value *Src) { |
379 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_or, Src); |
380 | } |
381 | |
382 | CallInst *IRBuilderBase::CreateXorReduce(Value *Src) { |
383 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_xor, Src); |
384 | } |
385 | |
386 | CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) { |
387 | auto ID = |
388 | IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax; |
389 | return getReductionIntrinsic(ID, Src); |
390 | } |
391 | |
392 | CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) { |
393 | auto ID = |
394 | IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin; |
395 | return getReductionIntrinsic(ID, Src); |
396 | } |
397 | |
398 | CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) { |
399 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fmax, Src); |
400 | } |
401 | |
402 | CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) { |
403 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fmin, Src); |
404 | } |
405 | |
406 | CallInst *IRBuilderBase::CreateFPMaximumReduce(Value *Src) { |
407 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fmaximum, Src); |
408 | } |
409 | |
410 | CallInst *IRBuilderBase::CreateFPMinimumReduce(Value *Src) { |
411 | return getReductionIntrinsic(ID: Intrinsic::vector_reduce_fminimum, Src); |
412 | } |
413 | |
414 | CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { |
415 | assert(isa<PointerType>(Ptr->getType()) && |
416 | "lifetime.start only applies to pointers." ); |
417 | if (!Size) |
418 | Size = getInt64(C: -1); |
419 | else |
420 | assert(Size->getType() == getInt64Ty() && |
421 | "lifetime.start requires the size to be an i64" ); |
422 | Value *Ops[] = { Size, Ptr }; |
423 | return CreateIntrinsic(ID: Intrinsic::lifetime_start, Types: {Ptr->getType()}, Args: Ops); |
424 | } |
425 | |
426 | CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { |
427 | assert(isa<PointerType>(Ptr->getType()) && |
428 | "lifetime.end only applies to pointers." ); |
429 | if (!Size) |
430 | Size = getInt64(C: -1); |
431 | else |
432 | assert(Size->getType() == getInt64Ty() && |
433 | "lifetime.end requires the size to be an i64" ); |
434 | Value *Ops[] = { Size, Ptr }; |
435 | return CreateIntrinsic(ID: Intrinsic::lifetime_end, Types: {Ptr->getType()}, Args: Ops); |
436 | } |
437 | |
438 | CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { |
439 | |
440 | assert(isa<PointerType>(Ptr->getType()) && |
441 | "invariant.start only applies to pointers." ); |
442 | if (!Size) |
443 | Size = getInt64(C: -1); |
444 | else |
445 | assert(Size->getType() == getInt64Ty() && |
446 | "invariant.start requires the size to be an i64" ); |
447 | |
448 | Value *Ops[] = {Size, Ptr}; |
449 | // Fill in the single overloaded type: memory object type. |
450 | Type *ObjectPtr[1] = {Ptr->getType()}; |
451 | return CreateIntrinsic(ID: Intrinsic::invariant_start, Types: ObjectPtr, Args: Ops); |
452 | } |
453 | |
454 | static MaybeAlign getAlign(Value *Ptr) { |
455 | if (auto *V = dyn_cast<GlobalVariable>(Val: Ptr)) |
456 | return V->getAlign(); |
457 | if (auto *A = dyn_cast<GlobalAlias>(Val: Ptr)) |
458 | return getAlign(Ptr: A->getAliaseeObject()); |
459 | return {}; |
460 | } |
461 | |
462 | CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) { |
463 | assert(isa<GlobalValue>(Ptr) && cast<GlobalValue>(Ptr)->isThreadLocal() && |
464 | "threadlocal_address only applies to thread local variables." ); |
465 | CallInst *CI = CreateIntrinsic(ID: llvm::Intrinsic::threadlocal_address, |
466 | Types: {Ptr->getType()}, Args: {Ptr}); |
467 | if (MaybeAlign A = getAlign(Ptr)) { |
468 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *A)); |
469 | CI->addRetAttr(Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *A)); |
470 | } |
471 | return CI; |
472 | } |
473 | |
474 | CallInst * |
475 | IRBuilderBase::CreateAssumption(Value *Cond, |
476 | ArrayRef<OperandBundleDef> OpBundles) { |
477 | assert(Cond->getType() == getInt1Ty() && |
478 | "an assumption condition must be of type i1" ); |
479 | |
480 | Value *Ops[] = { Cond }; |
481 | Module *M = BB->getParent()->getParent(); |
482 | Function *FnAssume = Intrinsic::getOrInsertDeclaration(M, id: Intrinsic::assume); |
483 | return CreateCall(Callee: FnAssume, Args: Ops, OpBundles); |
484 | } |
485 | |
486 | Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) { |
487 | return CreateIntrinsic(ID: Intrinsic::experimental_noalias_scope_decl, Types: {}, |
488 | Args: {Scope}); |
489 | } |
490 | |
491 | /// Create a call to a Masked Load intrinsic. |
492 | /// \p Ty - vector type to load |
493 | /// \p Ptr - base pointer for the load |
494 | /// \p Alignment - alignment of the source location |
495 | /// \p Mask - vector of booleans which indicates what vector lanes should |
496 | /// be accessed in memory |
497 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
498 | /// of the result |
499 | /// \p Name - name of the result variable |
500 | CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, |
501 | Value *Mask, Value *PassThru, |
502 | const Twine &Name) { |
503 | auto *PtrTy = cast<PointerType>(Val: Ptr->getType()); |
504 | assert(Ty->isVectorTy() && "Type should be vector" ); |
505 | assert(Mask && "Mask should not be all-ones (null)" ); |
506 | if (!PassThru) |
507 | PassThru = PoisonValue::get(T: Ty); |
508 | Type *OverloadedTypes[] = { Ty, PtrTy }; |
509 | Value *Ops[] = {Ptr, getInt32(C: Alignment.value()), Mask, PassThru}; |
510 | return CreateMaskedIntrinsic(Id: Intrinsic::masked_load, Ops, |
511 | OverloadedTypes, Name); |
512 | } |
513 | |
514 | /// Create a call to a Masked Store intrinsic. |
515 | /// \p Val - data to be stored, |
516 | /// \p Ptr - base pointer for the store |
517 | /// \p Alignment - alignment of the destination location |
518 | /// \p Mask - vector of booleans which indicates what vector lanes should |
519 | /// be accessed in memory |
520 | CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, |
521 | Align Alignment, Value *Mask) { |
522 | auto *PtrTy = cast<PointerType>(Val: Ptr->getType()); |
523 | Type *DataTy = Val->getType(); |
524 | assert(DataTy->isVectorTy() && "Val should be a vector" ); |
525 | assert(Mask && "Mask should not be all-ones (null)" ); |
526 | Type *OverloadedTypes[] = { DataTy, PtrTy }; |
527 | Value *Ops[] = {Val, Ptr, getInt32(C: Alignment.value()), Mask}; |
528 | return CreateMaskedIntrinsic(Id: Intrinsic::masked_store, Ops, OverloadedTypes); |
529 | } |
530 | |
531 | /// Create a call to a Masked intrinsic, with given intrinsic Id, |
532 | /// an array of operands - Ops, and an array of overloaded types - |
533 | /// OverloadedTypes. |
534 | CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, |
535 | ArrayRef<Value *> Ops, |
536 | ArrayRef<Type *> OverloadedTypes, |
537 | const Twine &Name) { |
538 | return CreateIntrinsic(ID: Id, Types: OverloadedTypes, Args: Ops, FMFSource: {}, Name); |
539 | } |
540 | |
541 | /// Create a call to a Masked Gather intrinsic. |
542 | /// \p Ty - vector type to gather |
543 | /// \p Ptrs - vector of pointers for loading |
544 | /// \p Align - alignment for one element |
545 | /// \p Mask - vector of booleans which indicates what vector lanes should |
546 | /// be accessed in memory |
547 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
548 | /// of the result |
549 | /// \p Name - name of the result variable |
550 | CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs, |
551 | Align Alignment, Value *Mask, |
552 | Value *PassThru, |
553 | const Twine &Name) { |
554 | auto *VecTy = cast<VectorType>(Val: Ty); |
555 | ElementCount NumElts = VecTy->getElementCount(); |
556 | auto *PtrsTy = cast<VectorType>(Val: Ptrs->getType()); |
557 | assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch" ); |
558 | |
559 | if (!Mask) |
560 | Mask = getAllOnesMask(NumElts); |
561 | |
562 | if (!PassThru) |
563 | PassThru = PoisonValue::get(T: Ty); |
564 | |
565 | Type *OverloadedTypes[] = {Ty, PtrsTy}; |
566 | Value *Ops[] = {Ptrs, getInt32(C: Alignment.value()), Mask, PassThru}; |
567 | |
568 | // We specify only one type when we create this intrinsic. Types of other |
569 | // arguments are derived from this type. |
570 | return CreateMaskedIntrinsic(Id: Intrinsic::masked_gather, Ops, OverloadedTypes, |
571 | Name); |
572 | } |
573 | |
574 | /// Create a call to a Masked Scatter intrinsic. |
575 | /// \p Data - data to be stored, |
576 | /// \p Ptrs - the vector of pointers, where the \p Data elements should be |
577 | /// stored |
578 | /// \p Align - alignment for one element |
579 | /// \p Mask - vector of booleans which indicates what vector lanes should |
580 | /// be accessed in memory |
581 | CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, |
582 | Align Alignment, Value *Mask) { |
583 | auto *PtrsTy = cast<VectorType>(Val: Ptrs->getType()); |
584 | auto *DataTy = cast<VectorType>(Val: Data->getType()); |
585 | ElementCount NumElts = PtrsTy->getElementCount(); |
586 | |
587 | if (!Mask) |
588 | Mask = getAllOnesMask(NumElts); |
589 | |
590 | Type *OverloadedTypes[] = {DataTy, PtrsTy}; |
591 | Value *Ops[] = {Data, Ptrs, getInt32(C: Alignment.value()), Mask}; |
592 | |
593 | // We specify only one type when we create this intrinsic. Types of other |
594 | // arguments are derived from this type. |
595 | return CreateMaskedIntrinsic(Id: Intrinsic::masked_scatter, Ops, OverloadedTypes); |
596 | } |
597 | |
598 | /// Create a call to Masked Expand Load intrinsic |
599 | /// \p Ty - vector type to load |
600 | /// \p Ptr - base pointer for the load |
601 | /// \p Align - alignment of \p Ptr |
602 | /// \p Mask - vector of booleans which indicates what vector lanes should |
603 | /// be accessed in memory |
604 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
605 | /// of the result |
606 | /// \p Name - name of the result variable |
607 | CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr, |
608 | MaybeAlign Align, Value *Mask, |
609 | Value *PassThru, |
610 | const Twine &Name) { |
611 | assert(Ty->isVectorTy() && "Type should be vector" ); |
612 | assert(Mask && "Mask should not be all-ones (null)" ); |
613 | if (!PassThru) |
614 | PassThru = PoisonValue::get(T: Ty); |
615 | Type *OverloadedTypes[] = {Ty}; |
616 | Value *Ops[] = {Ptr, Mask, PassThru}; |
617 | CallInst *CI = CreateMaskedIntrinsic(Id: Intrinsic::masked_expandload, Ops, |
618 | OverloadedTypes, Name); |
619 | if (Align) |
620 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *Align)); |
621 | return CI; |
622 | } |
623 | |
624 | /// Create a call to Masked Compress Store intrinsic |
625 | /// \p Val - data to be stored, |
626 | /// \p Ptr - base pointer for the store |
627 | /// \p Align - alignment of \p Ptr |
628 | /// \p Mask - vector of booleans which indicates what vector lanes should |
629 | /// be accessed in memory |
630 | CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr, |
631 | MaybeAlign Align, |
632 | Value *Mask) { |
633 | Type *DataTy = Val->getType(); |
634 | assert(DataTy->isVectorTy() && "Val should be a vector" ); |
635 | assert(Mask && "Mask should not be all-ones (null)" ); |
636 | Type *OverloadedTypes[] = {DataTy}; |
637 | Value *Ops[] = {Val, Ptr, Mask}; |
638 | CallInst *CI = CreateMaskedIntrinsic(Id: Intrinsic::masked_compressstore, Ops, |
639 | OverloadedTypes); |
640 | if (Align) |
641 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *Align)); |
642 | return CI; |
643 | } |
644 | |
645 | template <typename T0> |
646 | static std::vector<Value *> |
647 | getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, |
648 | Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) { |
649 | std::vector<Value *> Args; |
650 | Args.push_back(x: B.getInt64(C: ID)); |
651 | Args.push_back(x: B.getInt32(C: NumPatchBytes)); |
652 | Args.push_back(x: ActualCallee); |
653 | Args.push_back(B.getInt32(C: CallArgs.size())); |
654 | Args.push_back(x: B.getInt32(C: Flags)); |
655 | llvm::append_range(Args, CallArgs); |
656 | // GC Transition and Deopt args are now always handled via operand bundle. |
657 | // They will be removed from the signature of gc.statepoint shortly. |
658 | Args.push_back(x: B.getInt32(C: 0)); |
659 | Args.push_back(x: B.getInt32(C: 0)); |
660 | // GC args are now encoded in the gc-live operand bundle |
661 | return Args; |
662 | } |
663 | |
664 | template<typename T1, typename T2, typename T3> |
665 | static std::vector<OperandBundleDef> |
666 | getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs, |
667 | std::optional<ArrayRef<T2>> DeoptArgs, |
668 | ArrayRef<T3> GCArgs) { |
669 | std::vector<OperandBundleDef> Rval; |
670 | if (DeoptArgs) |
671 | Rval.emplace_back(args: "deopt" , args: SmallVector<Value *, 16>(*DeoptArgs)); |
672 | if (TransitionArgs) |
673 | Rval.emplace_back(args: "gc-transition" , |
674 | args: SmallVector<Value *, 16>(*TransitionArgs)); |
675 | if (GCArgs.size()) |
676 | Rval.emplace_back(args: "gc-live" , args: SmallVector<Value *, 16>(GCArgs)); |
677 | return Rval; |
678 | } |
679 | |
680 | template <typename T0, typename T1, typename T2, typename T3> |
681 | static CallInst *CreateGCStatepointCallCommon( |
682 | IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, |
683 | FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs, |
684 | std::optional<ArrayRef<T1>> TransitionArgs, |
685 | std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, |
686 | const Twine &Name) { |
687 | Module *M = Builder->GetInsertBlock()->getParent()->getParent(); |
688 | // Fill in the one generic type'd argument (the function is also vararg) |
689 | Function *FnStatepoint = Intrinsic::getOrInsertDeclaration( |
690 | M, id: Intrinsic::experimental_gc_statepoint, |
691 | Tys: {ActualCallee.getCallee()->getType()}); |
692 | |
693 | std::vector<Value *> Args = getStatepointArgs( |
694 | *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs); |
695 | |
696 | CallInst *CI = Builder->CreateCall( |
697 | FnStatepoint, Args, |
698 | getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); |
699 | CI->addParamAttr(ArgNo: 2, |
700 | Attr: Attribute::get(Context&: Builder->getContext(), Kind: Attribute::ElementType, |
701 | Ty: ActualCallee.getFunctionType())); |
702 | return CI; |
703 | } |
704 | |
705 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
706 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
707 | ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
708 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
709 | return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>( |
710 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags: uint32_t(StatepointFlags::None), |
711 | CallArgs, TransitionArgs: std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name); |
712 | } |
713 | |
714 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
715 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
716 | uint32_t Flags, ArrayRef<Value *> CallArgs, |
717 | std::optional<ArrayRef<Use>> TransitionArgs, |
718 | std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
719 | const Twine &Name) { |
720 | return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>( |
721 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs, |
722 | DeoptArgs, GCArgs, Name); |
723 | } |
724 | |
725 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
726 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
727 | ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
728 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
729 | return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>( |
730 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags: uint32_t(StatepointFlags::None), |
731 | CallArgs, TransitionArgs: std::nullopt, DeoptArgs, GCArgs, Name); |
732 | } |
733 | |
734 | template <typename T0, typename T1, typename T2, typename T3> |
735 | static InvokeInst *CreateGCStatepointInvokeCommon( |
736 | IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, |
737 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, |
738 | BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs, |
739 | std::optional<ArrayRef<T1>> TransitionArgs, |
740 | std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, |
741 | const Twine &Name) { |
742 | Module *M = Builder->GetInsertBlock()->getParent()->getParent(); |
743 | // Fill in the one generic type'd argument (the function is also vararg) |
744 | Function *FnStatepoint = Intrinsic::getOrInsertDeclaration( |
745 | M, id: Intrinsic::experimental_gc_statepoint, |
746 | Tys: {ActualInvokee.getCallee()->getType()}); |
747 | |
748 | std::vector<Value *> Args = |
749 | getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(), |
750 | Flags, InvokeArgs); |
751 | |
752 | InvokeInst *II = Builder->CreateInvoke( |
753 | FnStatepoint, NormalDest, UnwindDest, Args, |
754 | getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); |
755 | II->addParamAttr(ArgNo: 2, |
756 | Attr: Attribute::get(Context&: Builder->getContext(), Kind: Attribute::ElementType, |
757 | Ty: ActualInvokee.getFunctionType())); |
758 | return II; |
759 | } |
760 | |
761 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
762 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
763 | BasicBlock *NormalDest, BasicBlock *UnwindDest, |
764 | ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
765 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
766 | return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>( |
767 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, |
768 | Flags: uint32_t(StatepointFlags::None), InvokeArgs, |
769 | TransitionArgs: std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name); |
770 | } |
771 | |
772 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
773 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
774 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, |
775 | ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs, |
776 | std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
777 | const Twine &Name) { |
778 | return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>( |
779 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags, |
780 | InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name); |
781 | } |
782 | |
783 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
784 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
785 | BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, |
786 | std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, |
787 | const Twine &Name) { |
788 | return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>( |
789 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, |
790 | Flags: uint32_t(StatepointFlags::None), InvokeArgs, TransitionArgs: std::nullopt, DeoptArgs, |
791 | GCArgs, Name); |
792 | } |
793 | |
794 | CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint, |
795 | Type *ResultType, const Twine &Name) { |
796 | Intrinsic::ID ID = Intrinsic::experimental_gc_result; |
797 | Type *Types[] = {ResultType}; |
798 | |
799 | Value *Args[] = {Statepoint}; |
800 | return CreateIntrinsic(ID, Types, Args, FMFSource: {}, Name); |
801 | } |
802 | |
803 | CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint, |
804 | int BaseOffset, int DerivedOffset, |
805 | Type *ResultType, const Twine &Name) { |
806 | Type *Types[] = {ResultType}; |
807 | |
808 | Value *Args[] = {Statepoint, getInt32(C: BaseOffset), getInt32(C: DerivedOffset)}; |
809 | return CreateIntrinsic(ID: Intrinsic::experimental_gc_relocate, Types, Args, FMFSource: {}, |
810 | Name); |
811 | } |
812 | |
813 | CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr, |
814 | const Twine &Name) { |
815 | Type *PtrTy = DerivedPtr->getType(); |
816 | return CreateIntrinsic(ID: Intrinsic::experimental_gc_get_pointer_base, |
817 | Types: {PtrTy, PtrTy}, Args: {DerivedPtr}, FMFSource: {}, Name); |
818 | } |
819 | |
820 | CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr, |
821 | const Twine &Name) { |
822 | Type *PtrTy = DerivedPtr->getType(); |
823 | return CreateIntrinsic(ID: Intrinsic::experimental_gc_get_pointer_offset, Types: {PtrTy}, |
824 | Args: {DerivedPtr}, FMFSource: {}, Name); |
825 | } |
826 | |
827 | CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, |
828 | FMFSource FMFSource, |
829 | const Twine &Name) { |
830 | Module *M = BB->getModule(); |
831 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, id: ID, Tys: {V->getType()}); |
832 | return createCallHelper(Callee: Fn, Ops: {V}, Name, FMFSource); |
833 | } |
834 | |
835 | Value *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, |
836 | Value *RHS, FMFSource FMFSource, |
837 | const Twine &Name) { |
838 | Module *M = BB->getModule(); |
839 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, id: ID, Tys: {LHS->getType()}); |
840 | if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Ty: Fn->getReturnType(), |
841 | /*FMFSource=*/nullptr)) |
842 | return V; |
843 | return createCallHelper(Callee: Fn, Ops: {LHS, RHS}, Name, FMFSource); |
844 | } |
845 | |
846 | CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, |
847 | ArrayRef<Type *> Types, |
848 | ArrayRef<Value *> Args, |
849 | FMFSource FMFSource, |
850 | const Twine &Name) { |
851 | Module *M = BB->getModule(); |
852 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, id: ID, Tys: Types); |
853 | return createCallHelper(Callee: Fn, Ops: Args, Name, FMFSource); |
854 | } |
855 | |
856 | CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID, |
857 | ArrayRef<Value *> Args, |
858 | FMFSource FMFSource, |
859 | const Twine &Name) { |
860 | Module *M = BB->getModule(); |
861 | |
862 | SmallVector<Intrinsic::IITDescriptor> Table; |
863 | Intrinsic::getIntrinsicInfoTableEntries(id: ID, T&: Table); |
864 | ArrayRef<Intrinsic::IITDescriptor> TableRef(Table); |
865 | |
866 | SmallVector<Type *> ArgTys; |
867 | ArgTys.reserve(N: Args.size()); |
868 | for (auto &I : Args) |
869 | ArgTys.push_back(Elt: I->getType()); |
870 | FunctionType *FTy = FunctionType::get(Result: RetTy, Params: ArgTys, isVarArg: false); |
871 | SmallVector<Type *> OverloadTys; |
872 | Intrinsic::MatchIntrinsicTypesResult Res = |
873 | matchIntrinsicSignature(FTy, Infos&: TableRef, ArgTys&: OverloadTys); |
874 | (void)Res; |
875 | assert(Res == Intrinsic::MatchIntrinsicTypes_Match && TableRef.empty() && |
876 | "Wrong types for intrinsic!" ); |
877 | // TODO: Handle varargs intrinsics. |
878 | |
879 | Function *Fn = Intrinsic::getOrInsertDeclaration(M, id: ID, Tys: OverloadTys); |
880 | return createCallHelper(Callee: Fn, Ops: Args, Name, FMFSource); |
881 | } |
882 | |
883 | CallInst *IRBuilderBase::CreateConstrainedFPBinOp( |
884 | Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource, |
885 | const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding, |
886 | std::optional<fp::ExceptionBehavior> Except) { |
887 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
888 | Value *ExceptV = getConstrainedFPExcept(Except); |
889 | |
890 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
891 | |
892 | CallInst *C = CreateIntrinsic(ID, Types: {L->getType()}, |
893 | Args: {L, R, RoundingV, ExceptV}, FMFSource: nullptr, Name); |
894 | setConstrainedFPCallAttr(C); |
895 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
896 | return C; |
897 | } |
898 | |
899 | CallInst *IRBuilderBase::CreateConstrainedFPIntrinsic( |
900 | Intrinsic::ID ID, ArrayRef<Type *> Types, ArrayRef<Value *> Args, |
901 | FMFSource FMFSource, const Twine &Name, MDNode *FPMathTag, |
902 | std::optional<RoundingMode> Rounding, |
903 | std::optional<fp::ExceptionBehavior> Except) { |
904 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
905 | Value *ExceptV = getConstrainedFPExcept(Except); |
906 | |
907 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
908 | |
909 | llvm::SmallVector<Value *, 5> ExtArgs(Args); |
910 | ExtArgs.push_back(Elt: RoundingV); |
911 | ExtArgs.push_back(Elt: ExceptV); |
912 | |
913 | CallInst *C = CreateIntrinsic(ID, Types, Args: ExtArgs, FMFSource: nullptr, Name); |
914 | setConstrainedFPCallAttr(C); |
915 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
916 | return C; |
917 | } |
918 | |
919 | CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp( |
920 | Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource, |
921 | const Twine &Name, MDNode *FPMathTag, |
922 | std::optional<fp::ExceptionBehavior> Except) { |
923 | Value *ExceptV = getConstrainedFPExcept(Except); |
924 | |
925 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
926 | |
927 | CallInst *C = |
928 | CreateIntrinsic(ID, Types: {L->getType()}, Args: {L, R, ExceptV}, FMFSource: nullptr, Name); |
929 | setConstrainedFPCallAttr(C); |
930 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
931 | return C; |
932 | } |
933 | |
934 | Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, |
935 | const Twine &Name, MDNode *FPMathTag) { |
936 | if (Instruction::isBinaryOp(Opcode: Opc)) { |
937 | assert(Ops.size() == 2 && "Invalid number of operands!" ); |
938 | return CreateBinOp(Opc: static_cast<Instruction::BinaryOps>(Opc), |
939 | LHS: Ops[0], RHS: Ops[1], Name, FPMathTag); |
940 | } |
941 | if (Instruction::isUnaryOp(Opcode: Opc)) { |
942 | assert(Ops.size() == 1 && "Invalid number of operands!" ); |
943 | return CreateUnOp(Opc: static_cast<Instruction::UnaryOps>(Opc), |
944 | V: Ops[0], Name, FPMathTag); |
945 | } |
946 | llvm_unreachable("Unexpected opcode!" ); |
947 | } |
948 | |
949 | CallInst *IRBuilderBase::CreateConstrainedFPCast( |
950 | Intrinsic::ID ID, Value *V, Type *DestTy, FMFSource FMFSource, |
951 | const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding, |
952 | std::optional<fp::ExceptionBehavior> Except) { |
953 | Value *ExceptV = getConstrainedFPExcept(Except); |
954 | |
955 | FastMathFlags UseFMF = FMFSource.get(Default: FMF); |
956 | |
957 | CallInst *C; |
958 | if (Intrinsic::hasConstrainedFPRoundingModeOperand(QID: ID)) { |
959 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
960 | C = CreateIntrinsic(ID, Types: {DestTy, V->getType()}, Args: {V, RoundingV, ExceptV}, |
961 | FMFSource: nullptr, Name); |
962 | } else |
963 | C = CreateIntrinsic(ID, Types: {DestTy, V->getType()}, Args: {V, ExceptV}, FMFSource: nullptr, |
964 | Name); |
965 | |
966 | setConstrainedFPCallAttr(C); |
967 | |
968 | if (isa<FPMathOperator>(Val: C)) |
969 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
970 | return C; |
971 | } |
972 | |
973 | Value *IRBuilderBase::CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, |
974 | Value *RHS, const Twine &Name, |
975 | MDNode *FPMathTag, FMFSource FMFSource, |
976 | bool IsSignaling) { |
977 | if (IsFPConstrained) { |
978 | auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps |
979 | : Intrinsic::experimental_constrained_fcmp; |
980 | return CreateConstrainedFPCmp(ID, P, L: LHS, R: RHS, Name); |
981 | } |
982 | |
983 | if (auto *V = Folder.FoldCmp(P, LHS, RHS)) |
984 | return V; |
985 | return Insert( |
986 | I: setFPAttrs(I: new FCmpInst(P, LHS, RHS), FPMD: FPMathTag, FMF: FMFSource.get(Default: FMF)), |
987 | Name); |
988 | } |
989 | |
990 | CallInst *IRBuilderBase::CreateConstrainedFPCmp( |
991 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, |
992 | const Twine &Name, std::optional<fp::ExceptionBehavior> Except) { |
993 | Value *PredicateV = getConstrainedFPPredicate(Predicate: P); |
994 | Value *ExceptV = getConstrainedFPExcept(Except); |
995 | |
996 | CallInst *C = CreateIntrinsic(ID, Types: {L->getType()}, |
997 | Args: {L, R, PredicateV, ExceptV}, FMFSource: nullptr, Name); |
998 | setConstrainedFPCallAttr(C); |
999 | return C; |
1000 | } |
1001 | |
1002 | CallInst *IRBuilderBase::CreateConstrainedFPCall( |
1003 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name, |
1004 | std::optional<RoundingMode> Rounding, |
1005 | std::optional<fp::ExceptionBehavior> Except) { |
1006 | llvm::SmallVector<Value *, 6> UseArgs(Args); |
1007 | |
1008 | if (Intrinsic::hasConstrainedFPRoundingModeOperand(QID: Callee->getIntrinsicID())) |
1009 | UseArgs.push_back(Elt: getConstrainedFPRounding(Rounding)); |
1010 | UseArgs.push_back(Elt: getConstrainedFPExcept(Except)); |
1011 | |
1012 | CallInst *C = CreateCall(Callee, Args: UseArgs, Name); |
1013 | setConstrainedFPCallAttr(C); |
1014 | return C; |
1015 | } |
1016 | |
1017 | Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False, |
1018 | const Twine &Name, Instruction *MDFrom) { |
1019 | return CreateSelectFMF(C, True, False, FMFSource: {}, Name, MDFrom); |
1020 | } |
1021 | |
1022 | Value *IRBuilderBase::CreateSelectFMF(Value *C, Value *True, Value *False, |
1023 | FMFSource FMFSource, const Twine &Name, |
1024 | Instruction *MDFrom) { |
1025 | if (auto *V = Folder.FoldSelect(C, True, False)) |
1026 | return V; |
1027 | |
1028 | SelectInst *Sel = SelectInst::Create(C, S1: True, S2: False); |
1029 | if (MDFrom) { |
1030 | MDNode *Prof = MDFrom->getMetadata(KindID: LLVMContext::MD_prof); |
1031 | MDNode *Unpred = MDFrom->getMetadata(KindID: LLVMContext::MD_unpredictable); |
1032 | Sel = addBranchMetadata(I: Sel, Weights: Prof, Unpredictable: Unpred); |
1033 | } |
1034 | if (isa<FPMathOperator>(Val: Sel)) |
1035 | setFPAttrs(I: Sel, /*MDNode=*/FPMD: nullptr, FMF: FMFSource.get(Default: FMF)); |
1036 | return Insert(I: Sel, Name); |
1037 | } |
1038 | |
1039 | Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, |
1040 | const Twine &Name) { |
1041 | assert(LHS->getType() == RHS->getType() && |
1042 | "Pointer subtraction operand types must match!" ); |
1043 | Value *LHS_int = CreatePtrToInt(V: LHS, DestTy: Type::getInt64Ty(C&: Context)); |
1044 | Value *RHS_int = CreatePtrToInt(V: RHS, DestTy: Type::getInt64Ty(C&: Context)); |
1045 | Value *Difference = CreateSub(LHS: LHS_int, RHS: RHS_int); |
1046 | return CreateExactSDiv(LHS: Difference, RHS: ConstantExpr::getSizeOf(Ty: ElemTy), |
1047 | Name); |
1048 | } |
1049 | |
1050 | Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) { |
1051 | assert(isa<PointerType>(Ptr->getType()) && |
1052 | "launder.invariant.group only applies to pointers." ); |
1053 | auto *PtrType = Ptr->getType(); |
1054 | Module *M = BB->getParent()->getParent(); |
1055 | Function *FnLaunderInvariantGroup = Intrinsic::getOrInsertDeclaration( |
1056 | M, id: Intrinsic::launder_invariant_group, Tys: {PtrType}); |
1057 | |
1058 | assert(FnLaunderInvariantGroup->getReturnType() == PtrType && |
1059 | FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == |
1060 | PtrType && |
1061 | "LaunderInvariantGroup should take and return the same type" ); |
1062 | |
1063 | return CreateCall(Callee: FnLaunderInvariantGroup, Args: {Ptr}); |
1064 | } |
1065 | |
1066 | Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) { |
1067 | assert(isa<PointerType>(Ptr->getType()) && |
1068 | "strip.invariant.group only applies to pointers." ); |
1069 | |
1070 | auto *PtrType = Ptr->getType(); |
1071 | Module *M = BB->getParent()->getParent(); |
1072 | Function *FnStripInvariantGroup = Intrinsic::getOrInsertDeclaration( |
1073 | M, id: Intrinsic::strip_invariant_group, Tys: {PtrType}); |
1074 | |
1075 | assert(FnStripInvariantGroup->getReturnType() == PtrType && |
1076 | FnStripInvariantGroup->getFunctionType()->getParamType(0) == |
1077 | PtrType && |
1078 | "StripInvariantGroup should take and return the same type" ); |
1079 | |
1080 | return CreateCall(Callee: FnStripInvariantGroup, Args: {Ptr}); |
1081 | } |
1082 | |
1083 | Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) { |
1084 | auto *Ty = cast<VectorType>(Val: V->getType()); |
1085 | if (isa<ScalableVectorType>(Val: Ty)) { |
1086 | Module *M = BB->getParent()->getParent(); |
1087 | Function *F = |
1088 | Intrinsic::getOrInsertDeclaration(M, id: Intrinsic::vector_reverse, Tys: Ty); |
1089 | return Insert(I: CallInst::Create(Func: F, Args: V), Name); |
1090 | } |
1091 | // Keep the original behaviour for fixed vector |
1092 | SmallVector<int, 8> ShuffleMask; |
1093 | int NumElts = Ty->getElementCount().getKnownMinValue(); |
1094 | for (int i = 0; i < NumElts; ++i) |
1095 | ShuffleMask.push_back(Elt: NumElts - i - 1); |
1096 | return CreateShuffleVector(V, Mask: ShuffleMask, Name); |
1097 | } |
1098 | |
1099 | Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, |
1100 | const Twine &Name) { |
1101 | assert(isa<VectorType>(V1->getType()) && "Unexpected type" ); |
1102 | assert(V1->getType() == V2->getType() && |
1103 | "Splice expects matching operand types!" ); |
1104 | |
1105 | if (auto *VTy = dyn_cast<ScalableVectorType>(Val: V1->getType())) { |
1106 | Module *M = BB->getParent()->getParent(); |
1107 | Function *F = |
1108 | Intrinsic::getOrInsertDeclaration(M, id: Intrinsic::vector_splice, Tys: VTy); |
1109 | |
1110 | Value *Ops[] = {V1, V2, getInt32(C: Imm)}; |
1111 | return Insert(I: CallInst::Create(Func: F, Args: Ops), Name); |
1112 | } |
1113 | |
1114 | unsigned NumElts = cast<FixedVectorType>(Val: V1->getType())->getNumElements(); |
1115 | assert(((-Imm <= NumElts) || (Imm < NumElts)) && |
1116 | "Invalid immediate for vector splice!" ); |
1117 | |
1118 | // Keep the original behaviour for fixed vector |
1119 | unsigned Idx = (NumElts + Imm) % NumElts; |
1120 | SmallVector<int, 8> Mask; |
1121 | for (unsigned I = 0; I < NumElts; ++I) |
1122 | Mask.push_back(Elt: Idx + I); |
1123 | |
1124 | return CreateShuffleVector(V1, V2, Mask); |
1125 | } |
1126 | |
1127 | Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, |
1128 | const Twine &Name) { |
1129 | auto EC = ElementCount::getFixed(MinVal: NumElts); |
1130 | return CreateVectorSplat(EC, V, Name); |
1131 | } |
1132 | |
1133 | Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V, |
1134 | const Twine &Name) { |
1135 | assert(EC.isNonZero() && "Cannot splat to an empty vector!" ); |
1136 | |
1137 | // First insert it into a poison vector so we can shuffle it. |
1138 | Value *Poison = PoisonValue::get(T: VectorType::get(ElementType: V->getType(), EC)); |
1139 | V = CreateInsertElement(Vec: Poison, NewElt: V, Idx: getInt64(C: 0), Name: Name + ".splatinsert" ); |
1140 | |
1141 | // Shuffle the value across the desired number of elements. |
1142 | SmallVector<int, 16> Zeros; |
1143 | Zeros.resize(N: EC.getKnownMinValue()); |
1144 | return CreateShuffleVector(V, Mask: Zeros, Name: Name + ".splat" ); |
1145 | } |
1146 | |
1147 | Value *IRBuilderBase::CreatePreserveArrayAccessIndex( |
1148 | Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, |
1149 | MDNode *DbgInfo) { |
1150 | auto *BaseType = Base->getType(); |
1151 | assert(isa<PointerType>(BaseType) && |
1152 | "Invalid Base ptr type for preserve.array.access.index." ); |
1153 | |
1154 | Value *LastIndexV = getInt32(C: LastIndex); |
1155 | Constant *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 0); |
1156 | SmallVector<Value *, 4> IdxList(Dimension, Zero); |
1157 | IdxList.push_back(Elt: LastIndexV); |
1158 | |
1159 | Type *ResultType = GetElementPtrInst::getGEPReturnType(Ptr: Base, IdxList); |
1160 | |
1161 | Value *DimV = getInt32(C: Dimension); |
1162 | CallInst *Fn = |
1163 | CreateIntrinsic(ID: Intrinsic::preserve_array_access_index, |
1164 | Types: {ResultType, BaseType}, Args: {Base, DimV, LastIndexV}); |
1165 | Fn->addParamAttr( |
1166 | ArgNo: 0, Attr: Attribute::get(Context&: Fn->getContext(), Kind: Attribute::ElementType, Ty: ElTy)); |
1167 | if (DbgInfo) |
1168 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
1169 | |
1170 | return Fn; |
1171 | } |
1172 | |
1173 | Value *IRBuilderBase::CreatePreserveUnionAccessIndex( |
1174 | Value *Base, unsigned FieldIndex, MDNode *DbgInfo) { |
1175 | assert(isa<PointerType>(Base->getType()) && |
1176 | "Invalid Base ptr type for preserve.union.access.index." ); |
1177 | auto *BaseType = Base->getType(); |
1178 | |
1179 | Value *DIIndex = getInt32(C: FieldIndex); |
1180 | CallInst *Fn = CreateIntrinsic(ID: Intrinsic::preserve_union_access_index, |
1181 | Types: {BaseType, BaseType}, Args: {Base, DIIndex}); |
1182 | if (DbgInfo) |
1183 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
1184 | |
1185 | return Fn; |
1186 | } |
1187 | |
1188 | Value *IRBuilderBase::CreatePreserveStructAccessIndex( |
1189 | Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, |
1190 | MDNode *DbgInfo) { |
1191 | auto *BaseType = Base->getType(); |
1192 | assert(isa<PointerType>(BaseType) && |
1193 | "Invalid Base ptr type for preserve.struct.access.index." ); |
1194 | |
1195 | Value *GEPIndex = getInt32(C: Index); |
1196 | Constant *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 0); |
1197 | Type *ResultType = |
1198 | GetElementPtrInst::getGEPReturnType(Ptr: Base, IdxList: {Zero, GEPIndex}); |
1199 | |
1200 | Value *DIIndex = getInt32(C: FieldIndex); |
1201 | CallInst *Fn = |
1202 | CreateIntrinsic(ID: Intrinsic::preserve_struct_access_index, |
1203 | Types: {ResultType, BaseType}, Args: {Base, GEPIndex, DIIndex}); |
1204 | Fn->addParamAttr( |
1205 | ArgNo: 0, Attr: Attribute::get(Context&: Fn->getContext(), Kind: Attribute::ElementType, Ty: ElTy)); |
1206 | if (DbgInfo) |
1207 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
1208 | |
1209 | return Fn; |
1210 | } |
1211 | |
1212 | Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) { |
1213 | ConstantInt *TestV = getInt32(C: Test); |
1214 | return CreateIntrinsic(ID: Intrinsic::is_fpclass, Types: {FPNum->getType()}, |
1215 | Args: {FPNum, TestV}); |
1216 | } |
1217 | |
1218 | CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, |
1219 | Value *PtrValue, |
1220 | Value *AlignValue, |
1221 | Value *OffsetValue) { |
1222 | SmallVector<Value *, 4> Vals({PtrValue, AlignValue}); |
1223 | if (OffsetValue) |
1224 | Vals.push_back(Elt: OffsetValue); |
1225 | OperandBundleDefT<Value *> AlignOpB("align" , Vals); |
1226 | return CreateAssumption(Cond: ConstantInt::getTrue(Context&: getContext()), OpBundles: {AlignOpB}); |
1227 | } |
1228 | |
1229 | CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, |
1230 | Value *PtrValue, |
1231 | unsigned Alignment, |
1232 | Value *OffsetValue) { |
1233 | assert(isa<PointerType>(PtrValue->getType()) && |
1234 | "trying to create an alignment assumption on a non-pointer?" ); |
1235 | assert(Alignment != 0 && "Invalid Alignment" ); |
1236 | auto *PtrTy = cast<PointerType>(Val: PtrValue->getType()); |
1237 | Type *IntPtrTy = getIntPtrTy(DL, AddrSpace: PtrTy->getAddressSpace()); |
1238 | Value *AlignValue = ConstantInt::get(Ty: IntPtrTy, V: Alignment); |
1239 | return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); |
1240 | } |
1241 | |
1242 | CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, |
1243 | Value *PtrValue, |
1244 | Value *Alignment, |
1245 | Value *OffsetValue) { |
1246 | assert(isa<PointerType>(PtrValue->getType()) && |
1247 | "trying to create an alignment assumption on a non-pointer?" ); |
1248 | return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue: Alignment, OffsetValue); |
1249 | } |
1250 | |
1251 | CallInst *IRBuilderBase::CreateDereferenceableAssumption(Value *PtrValue, |
1252 | Value *SizeValue) { |
1253 | assert(isa<PointerType>(PtrValue->getType()) && |
1254 | "trying to create an deferenceable assumption on a non-pointer?" ); |
1255 | SmallVector<Value *, 4> Vals({PtrValue, SizeValue}); |
1256 | OperandBundleDefT<Value *> DereferenceableOpB("dereferenceable" , Vals); |
1257 | return CreateAssumption(Cond: ConstantInt::getTrue(Context&: getContext()), |
1258 | OpBundles: {DereferenceableOpB}); |
1259 | } |
1260 | |
1261 | IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default; |
1262 | IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default; |
1263 | IRBuilderFolder::~IRBuilderFolder() = default; |
1264 | void ConstantFolder::anchor() {} |
1265 | void NoFolder::anchor() {} |
1266 | |