1//===- PreISelIntrinsicLowering.cpp - Pre-ISel intrinsic lowering pass ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements IR lowering for the llvm.memcpy, llvm.memmove,
10// llvm.memset, llvm.load.relative and llvm.objc.* intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/PreISelIntrinsicLowering.h"
15#include "llvm/Analysis/ObjCARCInstKind.h"
16#include "llvm/Analysis/ObjCARCUtil.h"
17#include "llvm/Analysis/TargetLibraryInfo.h"
18#include "llvm/Analysis/TargetTransformInfo.h"
19#include "llvm/CodeGen/ExpandVectorPredication.h"
20#include "llvm/CodeGen/Passes.h"
21#include "llvm/CodeGen/TargetLowering.h"
22#include "llvm/CodeGen/TargetPassConfig.h"
23#include "llvm/IR/Function.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/Instructions.h"
26#include "llvm/IR/IntrinsicInst.h"
27#include "llvm/IR/Module.h"
28#include "llvm/IR/Type.h"
29#include "llvm/IR/Use.h"
30#include "llvm/InitializePasses.h"
31#include "llvm/Pass.h"
32#include "llvm/Support/Casting.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Transforms/Scalar/LowerConstantIntrinsics.h"
35#include "llvm/Transforms/Utils/BuildLibCalls.h"
36#include "llvm/Transforms/Utils/LowerMemIntrinsics.h"
37#include "llvm/Transforms/Utils/LowerVectorIntrinsics.h"
38
39using namespace llvm;
40
41/// Threshold to leave statically sized memory intrinsic calls. Calls of known
42/// size larger than this will be expanded by the pass. Calls of unknown or
43/// lower size will be left for expansion in codegen.
44static cl::opt<int64_t> MemIntrinsicExpandSizeThresholdOpt(
45 "mem-intrinsic-expand-size",
46 cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(Val: -1),
47 cl::Hidden);
48
49namespace {
50
51struct PreISelIntrinsicLowering {
52 const TargetMachine *TM;
53 const function_ref<TargetTransformInfo &(Function &)> LookupTTI;
54 const function_ref<TargetLibraryInfo &(Function &)> LookupTLI;
55
56 /// If this is true, assume it's preferably to leave memory intrinsic calls
57 /// for replacement with a library call later. Otherwise this depends on
58 /// TargetLoweringInfo availability of the corresponding function.
59 const bool UseMemIntrinsicLibFunc;
60
61 explicit PreISelIntrinsicLowering(
62 const TargetMachine *TM_,
63 function_ref<TargetTransformInfo &(Function &)> LookupTTI_,
64 function_ref<TargetLibraryInfo &(Function &)> LookupTLI_,
65 bool UseMemIntrinsicLibFunc_ = true)
66 : TM(TM_), LookupTTI(LookupTTI_), LookupTLI(LookupTLI_),
67 UseMemIntrinsicLibFunc(UseMemIntrinsicLibFunc_) {}
68
69 static bool shouldExpandMemIntrinsicWithSize(Value *Size,
70 const TargetTransformInfo &TTI);
71 bool
72 expandMemIntrinsicUses(Function &F,
73 DenseMap<Constant *, GlobalVariable *> &CMap) const;
74 bool lowerIntrinsics(Module &M) const;
75};
76
77} // namespace
78
79template <class T> static bool forEachCall(Function &Intrin, T Callback) {
80 // Lowering all intrinsics in a function will delete multiple uses, so we
81 // can't use an early-inc-range. In case some remain, we don't want to look
82 // at them again. Unfortunately, Value::UseList is private, so we can't use a
83 // simple Use**. If LastUse is null, the next use to consider is
84 // Intrin.use_begin(), otherwise it's LastUse->getNext().
85 Use *LastUse = nullptr;
86 bool Changed = false;
87 while (!Intrin.use_empty() && (!LastUse || LastUse->getNext())) {
88 Use *U = LastUse ? LastUse->getNext() : &*Intrin.use_begin();
89 bool Removed = false;
90 // An intrinsic cannot have its address taken, so it cannot be an argument
91 // operand. It might be used as operand in debug metadata, though.
92 if (auto CI = dyn_cast<CallInst>(Val: U->getUser()))
93 Changed |= Removed = Callback(CI);
94 if (!Removed)
95 LastUse = U;
96 }
97 return Changed;
98}
99
100static bool lowerLoadRelative(Function &F) {
101 if (F.use_empty())
102 return false;
103
104 bool Changed = false;
105 Type *Int32Ty = Type::getInt32Ty(C&: F.getContext());
106
107 for (Use &U : llvm::make_early_inc_range(Range: F.uses())) {
108 auto CI = dyn_cast<CallInst>(Val: U.getUser());
109 if (!CI || CI->getCalledOperand() != &F)
110 continue;
111
112 IRBuilder<> B(CI);
113 Value *OffsetPtr =
114 B.CreatePtrAdd(Ptr: CI->getArgOperand(i: 0), Offset: CI->getArgOperand(i: 1));
115 Value *OffsetI32 = B.CreateAlignedLoad(Ty: Int32Ty, Ptr: OffsetPtr, Align: Align(4));
116
117 Value *ResultPtr = B.CreatePtrAdd(Ptr: CI->getArgOperand(i: 0), Offset: OffsetI32);
118
119 CI->replaceAllUsesWith(V: ResultPtr);
120 CI->eraseFromParent();
121 Changed = true;
122 }
123
124 return Changed;
125}
126
127// ObjCARC has knowledge about whether an obj-c runtime function needs to be
128// always tail-called or never tail-called.
129static CallInst::TailCallKind getOverridingTailCallKind(const Function &F) {
130 objcarc::ARCInstKind Kind = objcarc::GetFunctionClass(F: &F);
131 if (objcarc::IsAlwaysTail(Class: Kind))
132 return CallInst::TCK_Tail;
133 else if (objcarc::IsNeverTail(Class: Kind))
134 return CallInst::TCK_NoTail;
135 return CallInst::TCK_None;
136}
137
138static bool lowerObjCCall(Function &F, const char *NewFn,
139 bool setNonLazyBind = false) {
140 assert(IntrinsicInst::mayLowerToFunctionCall(F.getIntrinsicID()) &&
141 "Pre-ISel intrinsics do lower into regular function calls");
142 if (F.use_empty())
143 return false;
144
145 // If we haven't already looked up this function, check to see if the
146 // program already contains a function with this name.
147 Module *M = F.getParent();
148 FunctionCallee FCache = M->getOrInsertFunction(Name: NewFn, T: F.getFunctionType());
149
150 if (Function *Fn = dyn_cast<Function>(Val: FCache.getCallee())) {
151 Fn->setLinkage(F.getLinkage());
152 if (setNonLazyBind && !Fn->isWeakForLinker()) {
153 // If we have Native ARC, set nonlazybind attribute for these APIs for
154 // performance.
155 Fn->addFnAttr(Kind: Attribute::NonLazyBind);
156 }
157 }
158
159 CallInst::TailCallKind OverridingTCK = getOverridingTailCallKind(F);
160
161 for (Use &U : llvm::make_early_inc_range(Range: F.uses())) {
162 auto *CB = cast<CallBase>(Val: U.getUser());
163
164 if (CB->getCalledFunction() != &F) {
165 assert(objcarc::getAttachedARCFunction(CB) == &F &&
166 "use expected to be the argument of operand bundle "
167 "\"clang.arc.attachedcall\"");
168 U.set(FCache.getCallee());
169 continue;
170 }
171
172 auto *CI = cast<CallInst>(Val: CB);
173 assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
174
175 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
176 SmallVector<Value *, 8> Args(CI->args());
177 SmallVector<llvm::OperandBundleDef, 1> BundleList;
178 CI->getOperandBundlesAsDefs(Defs&: BundleList);
179 CallInst *NewCI = Builder.CreateCall(Callee: FCache, Args, OpBundles: BundleList);
180 NewCI->setName(CI->getName());
181
182 // Try to set the most appropriate TailCallKind based on both the current
183 // attributes and the ones that we could get from ObjCARC's special
184 // knowledge of the runtime functions.
185 //
186 // std::max respects both requirements of notail and tail here:
187 // * notail on either the call or from ObjCARC becomes notail
188 // * tail on either side is stronger than none, but not notail
189 CallInst::TailCallKind TCK = CI->getTailCallKind();
190 NewCI->setTailCallKind(std::max(a: TCK, b: OverridingTCK));
191
192 // Transfer the 'returned' attribute from the intrinsic to the call site.
193 // By applying this only to intrinsic call sites, we avoid applying it to
194 // non-ARC explicit calls to things like objc_retain which have not been
195 // auto-upgraded to use the intrinsics.
196 unsigned Index;
197 if (F.getAttributes().hasAttrSomewhere(Kind: Attribute::Returned, Index: &Index) &&
198 Index)
199 NewCI->addParamAttr(ArgNo: Index - AttributeList::FirstArgIndex,
200 Kind: Attribute::Returned);
201
202 if (!CI->use_empty())
203 CI->replaceAllUsesWith(V: NewCI);
204 CI->eraseFromParent();
205 }
206
207 return true;
208}
209
210// TODO: Should refine based on estimated number of accesses (e.g. does it
211// require splitting based on alignment)
212bool PreISelIntrinsicLowering::shouldExpandMemIntrinsicWithSize(
213 Value *Size, const TargetTransformInfo &TTI) {
214 ConstantInt *CI = dyn_cast<ConstantInt>(Val: Size);
215 if (!CI)
216 return true;
217 uint64_t Threshold = MemIntrinsicExpandSizeThresholdOpt.getNumOccurrences()
218 ? MemIntrinsicExpandSizeThresholdOpt
219 : TTI.getMaxMemIntrinsicInlineSizeThreshold();
220 uint64_t SizeVal = CI->getZExtValue();
221
222 // Treat a threshold of 0 as a special case to force expansion of all
223 // intrinsics, including size 0.
224 return SizeVal > Threshold || Threshold == 0;
225}
226
227static bool canEmitLibcall(const TargetMachine *TM, Function *F,
228 RTLIB::Libcall LC) {
229 // TODO: Should this consider the address space of the memcpy?
230 if (!TM)
231 return true;
232 const TargetLowering *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
233 return TLI->getLibcallName(Call: LC) != nullptr;
234}
235
236static bool canEmitMemcpy(const TargetMachine *TM, Function *F) {
237 // TODO: Should this consider the address space of the memcpy?
238 if (!TM)
239 return true;
240 const TargetLowering *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
241 return TLI->getMemcpyName() != nullptr;
242}
243
244// Return a value appropriate for use with the memset_pattern16 libcall, if
245// possible and if we know how. (Adapted from equivalent helper in
246// LoopIdiomRecognize).
247static Constant *getMemSetPattern16Value(MemSetPatternInst *Inst,
248 const TargetLibraryInfo &TLI) {
249 // TODO: This could check for UndefValue because it can be merged into any
250 // other valid pattern.
251
252 // Don't emit libcalls if a non-default address space is being used.
253 if (Inst->getRawDest()->getType()->getPointerAddressSpace() != 0)
254 return nullptr;
255
256 Value *V = Inst->getValue();
257 Type *VTy = V->getType();
258 const DataLayout &DL = Inst->getDataLayout();
259 Module *M = Inst->getModule();
260
261 if (!isLibFuncEmittable(M, TLI: &TLI, TheLibFunc: LibFunc_memset_pattern16))
262 return nullptr;
263
264 // If the value isn't a constant, we can't promote it to being in a constant
265 // array. We could theoretically do a store to an alloca or something, but
266 // that doesn't seem worthwhile.
267 Constant *C = dyn_cast<Constant>(Val: V);
268 if (!C || isa<ConstantExpr>(Val: C))
269 return nullptr;
270
271 // Only handle simple values that are a power of two bytes in size.
272 uint64_t Size = DL.getTypeSizeInBits(Ty: VTy);
273 if (!DL.typeSizeEqualsStoreSize(Ty: VTy) || !isPowerOf2_64(Value: Size))
274 return nullptr;
275
276 // Don't care enough about darwin/ppc to implement this.
277 if (DL.isBigEndian())
278 return nullptr;
279
280 // Convert to size in bytes.
281 Size /= 8;
282
283 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
284 // if the top and bottom are the same (e.g. for vectors and large integers).
285 if (Size > 16)
286 return nullptr;
287
288 // If the constant is exactly 16 bytes, just use it.
289 if (Size == 16)
290 return C;
291
292 // Otherwise, we'll use an array of the constants.
293 uint64_t ArraySize = 16 / Size;
294 ArrayType *AT = ArrayType::get(ElementType: V->getType(), NumElements: ArraySize);
295 return ConstantArray::get(T: AT, V: std::vector<Constant *>(ArraySize, C));
296}
297
298// TODO: Handle atomic memcpy and memcpy.inline
299// TODO: Pass ScalarEvolution
300bool PreISelIntrinsicLowering::expandMemIntrinsicUses(
301 Function &F, DenseMap<Constant *, GlobalVariable *> &CMap) const {
302 Intrinsic::ID ID = F.getIntrinsicID();
303 bool Changed = false;
304
305 for (User *U : llvm::make_early_inc_range(Range: F.users())) {
306 Instruction *Inst = cast<Instruction>(Val: U);
307
308 switch (ID) {
309 case Intrinsic::memcpy: {
310 auto *Memcpy = cast<MemCpyInst>(Val: Inst);
311 Function *ParentFunc = Memcpy->getFunction();
312 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
313 if (shouldExpandMemIntrinsicWithSize(Size: Memcpy->getLength(), TTI)) {
314 if (UseMemIntrinsicLibFunc && canEmitMemcpy(TM, F: ParentFunc))
315 break;
316
317 // TODO: For optsize, emit the loop into a separate function
318 expandMemCpyAsLoop(MemCpy: Memcpy, TTI);
319 Changed = true;
320 Memcpy->eraseFromParent();
321 }
322
323 break;
324 }
325 case Intrinsic::memcpy_inline: {
326 // Only expand llvm.memcpy.inline with non-constant length in this
327 // codepath, leaving the current SelectionDAG expansion for constant
328 // length memcpy intrinsics undisturbed.
329 auto *Memcpy = cast<MemCpyInst>(Val: Inst);
330 if (isa<ConstantInt>(Val: Memcpy->getLength()))
331 break;
332
333 Function *ParentFunc = Memcpy->getFunction();
334 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
335 expandMemCpyAsLoop(MemCpy: Memcpy, TTI);
336 Changed = true;
337 Memcpy->eraseFromParent();
338 break;
339 }
340 case Intrinsic::memmove: {
341 auto *Memmove = cast<MemMoveInst>(Val: Inst);
342 Function *ParentFunc = Memmove->getFunction();
343 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
344 if (shouldExpandMemIntrinsicWithSize(Size: Memmove->getLength(), TTI)) {
345 if (UseMemIntrinsicLibFunc &&
346 canEmitLibcall(TM, F: ParentFunc, LC: RTLIB::MEMMOVE))
347 break;
348
349 if (expandMemMoveAsLoop(MemMove: Memmove, TTI)) {
350 Changed = true;
351 Memmove->eraseFromParent();
352 }
353 }
354
355 break;
356 }
357 case Intrinsic::memset: {
358 auto *Memset = cast<MemSetInst>(Val: Inst);
359 Function *ParentFunc = Memset->getFunction();
360 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
361 if (shouldExpandMemIntrinsicWithSize(Size: Memset->getLength(), TTI)) {
362 if (UseMemIntrinsicLibFunc &&
363 canEmitLibcall(TM, F: ParentFunc, LC: RTLIB::MEMSET))
364 break;
365
366 expandMemSetAsLoop(MemSet: Memset);
367 Changed = true;
368 Memset->eraseFromParent();
369 }
370
371 break;
372 }
373 case Intrinsic::memset_inline: {
374 // Only expand llvm.memset.inline with non-constant length in this
375 // codepath, leaving the current SelectionDAG expansion for constant
376 // length memset intrinsics undisturbed.
377 auto *Memset = cast<MemSetInst>(Val: Inst);
378 if (isa<ConstantInt>(Val: Memset->getLength()))
379 break;
380
381 expandMemSetAsLoop(MemSet: Memset);
382 Changed = true;
383 Memset->eraseFromParent();
384 break;
385 }
386 case Intrinsic::experimental_memset_pattern: {
387 auto *Memset = cast<MemSetPatternInst>(Val: Inst);
388 const TargetLibraryInfo &TLI = LookupTLI(*Memset->getFunction());
389 Constant *PatternValue = getMemSetPattern16Value(Inst: Memset, TLI);
390 if (!PatternValue) {
391 // If it isn't possible to emit a memset_pattern16 libcall, expand to
392 // a loop instead.
393 expandMemSetPatternAsLoop(MemSet: Memset);
394 Changed = true;
395 Memset->eraseFromParent();
396 break;
397 }
398 // FIXME: There is currently no profitability calculation for emitting
399 // the libcall vs expanding the memset.pattern directly.
400 IRBuilder<> Builder(Inst);
401 Module *M = Memset->getModule();
402 const DataLayout &DL = Memset->getDataLayout();
403
404 Type *DestPtrTy = Memset->getRawDest()->getType();
405 Type *SizeTTy = TLI.getSizeTType(M: *M);
406 StringRef FuncName = "memset_pattern16";
407 FunctionCallee MSP = getOrInsertLibFunc(M, TLI, TheLibFunc: LibFunc_memset_pattern16,
408 RetTy: Builder.getVoidTy(), Args: DestPtrTy,
409 Args: Builder.getPtrTy(), Args: SizeTTy);
410 inferNonMandatoryLibFuncAttrs(M, Name: FuncName, TLI);
411
412 // Otherwise we should form a memset_pattern16. PatternValue is known
413 // to be an constant array of 16-bytes. Put the value into a mergable
414 // global.
415 assert(Memset->getRawDest()->getType()->getPointerAddressSpace() == 0 &&
416 "Should have skipped if non-zero AS");
417 GlobalVariable *GV;
418 auto It = CMap.find(Val: PatternValue);
419 if (It != CMap.end()) {
420 GV = It->second;
421 } else {
422 GV = new GlobalVariable(
423 *M, PatternValue->getType(), /*isConstant=*/true,
424 GlobalValue::PrivateLinkage, PatternValue, ".memset_pattern");
425 GV->setUnnamedAddr(
426 GlobalValue::UnnamedAddr::Global); // Ok to merge these.
427 // TODO: Consider relaxing alignment requirement.
428 GV->setAlignment(Align(16));
429 CMap[PatternValue] = GV;
430 }
431 Value *PatternPtr = GV;
432 Value *NumBytes = Builder.CreateMul(
433 LHS: TLI.getAsSizeT(V: DL.getTypeAllocSize(Ty: Memset->getValue()->getType()),
434 M: *M),
435 RHS: Builder.CreateZExtOrTrunc(V: Memset->getLength(), DestTy: SizeTTy));
436 CallInst *MemsetPattern16Call =
437 Builder.CreateCall(Callee: MSP, Args: {Memset->getRawDest(), PatternPtr, NumBytes});
438 MemsetPattern16Call->setAAMetadata(Memset->getAAMetadata());
439 // Preserve any call site attributes on the destination pointer
440 // argument (e.g. alignment).
441 AttrBuilder ArgAttrs(Memset->getContext(),
442 Memset->getAttributes().getParamAttrs(ArgNo: 0));
443 MemsetPattern16Call->setAttributes(
444 MemsetPattern16Call->getAttributes().addParamAttributes(
445 C&: Memset->getContext(), ArgNo: 0, B: ArgAttrs));
446 Changed = true;
447 Memset->eraseFromParent();
448 break;
449 }
450 default:
451 llvm_unreachable("unhandled intrinsic");
452 }
453 }
454
455 return Changed;
456}
457
458bool PreISelIntrinsicLowering::lowerIntrinsics(Module &M) const {
459 // Map unique constants to globals.
460 DenseMap<Constant *, GlobalVariable *> CMap;
461 bool Changed = false;
462 for (Function &F : M) {
463 switch (F.getIntrinsicID()) {
464 default:
465 break;
466 case Intrinsic::memcpy:
467 case Intrinsic::memcpy_inline:
468 case Intrinsic::memmove:
469 case Intrinsic::memset:
470 case Intrinsic::memset_inline:
471 case Intrinsic::experimental_memset_pattern:
472 Changed |= expandMemIntrinsicUses(F, CMap);
473 break;
474 case Intrinsic::load_relative:
475 Changed |= lowerLoadRelative(F);
476 break;
477 case Intrinsic::is_constant:
478 case Intrinsic::objectsize:
479 Changed |= forEachCall(Intrin&: F, Callback: [&](CallInst *CI) {
480 Function *Parent = CI->getParent()->getParent();
481 TargetLibraryInfo &TLI = LookupTLI(*Parent);
482 // Intrinsics in unreachable code are not lowered.
483 bool Changed = lowerConstantIntrinsics(F&: *Parent, TLI, /*DT=*/nullptr);
484 return Changed;
485 });
486 break;
487#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
488 case Intrinsic::VPID:
489#include "llvm/IR/VPIntrinsics.def"
490 forEachCall(Intrin&: F, Callback: [&](CallInst *CI) {
491 Function *Parent = CI->getParent()->getParent();
492 const TargetTransformInfo &TTI = LookupTTI(*Parent);
493 auto *VPI = cast<VPIntrinsic>(Val: CI);
494 VPExpansionDetails ED = expandVectorPredicationIntrinsic(VPI&: *VPI, TTI);
495 // Expansion of VP intrinsics may change the IR but not actually
496 // replace the intrinsic, so update Changed for the pass
497 // and compute Removed for forEachCall.
498 Changed |= ED != VPExpansionDetails::IntrinsicUnchanged;
499 bool Removed = ED == VPExpansionDetails::IntrinsicReplaced;
500 return Removed;
501 });
502 break;
503 case Intrinsic::objc_autorelease:
504 Changed |= lowerObjCCall(F, NewFn: "objc_autorelease");
505 break;
506 case Intrinsic::objc_autoreleasePoolPop:
507 Changed |= lowerObjCCall(F, NewFn: "objc_autoreleasePoolPop");
508 break;
509 case Intrinsic::objc_autoreleasePoolPush:
510 Changed |= lowerObjCCall(F, NewFn: "objc_autoreleasePoolPush");
511 break;
512 case Intrinsic::objc_autoreleaseReturnValue:
513 Changed |= lowerObjCCall(F, NewFn: "objc_autoreleaseReturnValue");
514 break;
515 case Intrinsic::objc_copyWeak:
516 Changed |= lowerObjCCall(F, NewFn: "objc_copyWeak");
517 break;
518 case Intrinsic::objc_destroyWeak:
519 Changed |= lowerObjCCall(F, NewFn: "objc_destroyWeak");
520 break;
521 case Intrinsic::objc_initWeak:
522 Changed |= lowerObjCCall(F, NewFn: "objc_initWeak");
523 break;
524 case Intrinsic::objc_loadWeak:
525 Changed |= lowerObjCCall(F, NewFn: "objc_loadWeak");
526 break;
527 case Intrinsic::objc_loadWeakRetained:
528 Changed |= lowerObjCCall(F, NewFn: "objc_loadWeakRetained");
529 break;
530 case Intrinsic::objc_moveWeak:
531 Changed |= lowerObjCCall(F, NewFn: "objc_moveWeak");
532 break;
533 case Intrinsic::objc_release:
534 Changed |= lowerObjCCall(F, NewFn: "objc_release", setNonLazyBind: true);
535 break;
536 case Intrinsic::objc_retain:
537 Changed |= lowerObjCCall(F, NewFn: "objc_retain", setNonLazyBind: true);
538 break;
539 case Intrinsic::objc_retainAutorelease:
540 Changed |= lowerObjCCall(F, NewFn: "objc_retainAutorelease");
541 break;
542 case Intrinsic::objc_retainAutoreleaseReturnValue:
543 Changed |= lowerObjCCall(F, NewFn: "objc_retainAutoreleaseReturnValue");
544 break;
545 case Intrinsic::objc_retainAutoreleasedReturnValue:
546 Changed |= lowerObjCCall(F, NewFn: "objc_retainAutoreleasedReturnValue");
547 break;
548 case Intrinsic::objc_claimAutoreleasedReturnValue:
549 Changed |= lowerObjCCall(F, NewFn: "objc_claimAutoreleasedReturnValue");
550 break;
551 case Intrinsic::objc_retainBlock:
552 Changed |= lowerObjCCall(F, NewFn: "objc_retainBlock");
553 break;
554 case Intrinsic::objc_storeStrong:
555 Changed |= lowerObjCCall(F, NewFn: "objc_storeStrong");
556 break;
557 case Intrinsic::objc_storeWeak:
558 Changed |= lowerObjCCall(F, NewFn: "objc_storeWeak");
559 break;
560 case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
561 Changed |= lowerObjCCall(F, NewFn: "objc_unsafeClaimAutoreleasedReturnValue");
562 break;
563 case Intrinsic::objc_retainedObject:
564 Changed |= lowerObjCCall(F, NewFn: "objc_retainedObject");
565 break;
566 case Intrinsic::objc_unretainedObject:
567 Changed |= lowerObjCCall(F, NewFn: "objc_unretainedObject");
568 break;
569 case Intrinsic::objc_unretainedPointer:
570 Changed |= lowerObjCCall(F, NewFn: "objc_unretainedPointer");
571 break;
572 case Intrinsic::objc_retain_autorelease:
573 Changed |= lowerObjCCall(F, NewFn: "objc_retain_autorelease");
574 break;
575 case Intrinsic::objc_sync_enter:
576 Changed |= lowerObjCCall(F, NewFn: "objc_sync_enter");
577 break;
578 case Intrinsic::objc_sync_exit:
579 Changed |= lowerObjCCall(F, NewFn: "objc_sync_exit");
580 break;
581 case Intrinsic::exp:
582 case Intrinsic::exp2:
583 Changed |= forEachCall(Intrin&: F, Callback: [&](CallInst *CI) {
584 Type *Ty = CI->getArgOperand(i: 0)->getType();
585 if (!isa<ScalableVectorType>(Val: Ty))
586 return false;
587 const TargetLowering *TL = TM->getSubtargetImpl(F)->getTargetLowering();
588 unsigned Op = TL->IntrinsicIDToISD(ID: F.getIntrinsicID());
589 if (!TL->isOperationExpand(Op, VT: EVT::getEVT(Ty)))
590 return false;
591 return lowerUnaryVectorIntrinsicAsLoop(M, CI);
592 });
593 break;
594 }
595 }
596 return Changed;
597}
598
599namespace {
600
601class PreISelIntrinsicLoweringLegacyPass : public ModulePass {
602public:
603 static char ID;
604
605 PreISelIntrinsicLoweringLegacyPass() : ModulePass(ID) {}
606
607 void getAnalysisUsage(AnalysisUsage &AU) const override {
608 AU.addRequired<TargetTransformInfoWrapperPass>();
609 AU.addRequired<TargetLibraryInfoWrapperPass>();
610 AU.addRequired<TargetPassConfig>();
611 }
612
613 bool runOnModule(Module &M) override {
614 auto LookupTTI = [this](Function &F) -> TargetTransformInfo & {
615 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
616 };
617 auto LookupTLI = [this](Function &F) -> TargetLibraryInfo & {
618 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
619 };
620
621 const auto *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
622 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
623 return Lowering.lowerIntrinsics(M);
624 }
625};
626
627} // end anonymous namespace
628
629char PreISelIntrinsicLoweringLegacyPass::ID;
630
631INITIALIZE_PASS_BEGIN(PreISelIntrinsicLoweringLegacyPass,
632 "pre-isel-intrinsic-lowering",
633 "Pre-ISel Intrinsic Lowering", false, false)
634INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
635INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
636INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
637INITIALIZE_PASS_END(PreISelIntrinsicLoweringLegacyPass,
638 "pre-isel-intrinsic-lowering",
639 "Pre-ISel Intrinsic Lowering", false, false)
640
641ModulePass *llvm::createPreISelIntrinsicLoweringPass() {
642 return new PreISelIntrinsicLoweringLegacyPass();
643}
644
645PreservedAnalyses PreISelIntrinsicLoweringPass::run(Module &M,
646 ModuleAnalysisManager &AM) {
647 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
648
649 auto LookupTTI = [&FAM](Function &F) -> TargetTransformInfo & {
650 return FAM.getResult<TargetIRAnalysis>(IR&: F);
651 };
652 auto LookupTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
653 return FAM.getResult<TargetLibraryAnalysis>(IR&: F);
654 };
655
656 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
657 if (!Lowering.lowerIntrinsics(M))
658 return PreservedAnalyses::all();
659 else
660 return PreservedAnalyses::none();
661}
662