1//===- CloneFunction.cpp - Clone a function into another function ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the CloneFunctionInto interface, which is used as the
10// low-level function cloner. This is used by the CloneFunction and function
11// inliner to do the dirty work of copying the body of a function around.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/ConstantFolding.h"
18#include "llvm/Analysis/DomTreeUpdater.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/LoopInfo.h"
21#include "llvm/IR/AttributeMask.h"
22#include "llvm/IR/CFG.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DebugInfo.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstIterator.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/Transforms/Utils/BasicBlockUtils.h"
35#include "llvm/Transforms/Utils/Cloning.h"
36#include "llvm/Transforms/Utils/Local.h"
37#include "llvm/Transforms/Utils/ValueMapper.h"
38#include <cstdint>
39#include <map>
40#include <optional>
41using namespace llvm;
42
43#define DEBUG_TYPE "clone-function"
44
45STATISTIC(RemappedAtomMax, "Highest global NextAtomGroup (after mapping)");
46
47void llvm::mapAtomInstance(const DebugLoc &DL, ValueToValueMapTy &VMap) {
48 uint64_t CurGroup = DL->getAtomGroup();
49 if (!CurGroup)
50 return;
51
52 // Try inserting a new entry. If there's already a mapping for this atom
53 // then there's nothing to do.
54 auto [It, Inserted] = VMap.AtomMap.insert(KV: {{DL.getInlinedAt(), CurGroup}, 0});
55 if (!Inserted)
56 return;
57
58 // Map entry to a new atom group.
59 uint64_t NewGroup = DL->getContext().incNextDILocationAtomGroup();
60 assert(NewGroup > CurGroup && "Next should always be greater than current");
61 It->second = NewGroup;
62
63 RemappedAtomMax = std::max<uint64_t>(a: NewGroup, b: RemappedAtomMax);
64}
65
66static void collectDebugInfoFromInstructions(const Function &F,
67 DebugInfoFinder &DIFinder) {
68 const Module *M = F.getParent();
69 if (!M)
70 return;
71 // Inspect instructions to process e.g. DILexicalBlocks of inlined functions
72 for (const Instruction &I : instructions(F))
73 DIFinder.processInstruction(M: *M, I);
74}
75
76// Create a predicate that matches the metadata that should be identity mapped
77// during function cloning.
78static MetadataPredicate
79createIdentityMDPredicate(const Function &F, CloneFunctionChangeType Changes) {
80 if (Changes >= CloneFunctionChangeType::DifferentModule)
81 return [](const Metadata *MD) { return false; };
82
83 DISubprogram *SPClonedWithinModule = F.getSubprogram();
84
85 // Don't clone inlined subprograms.
86 auto ShouldKeep = [SPClonedWithinModule](const DISubprogram *SP) -> bool {
87 return SP != SPClonedWithinModule;
88 };
89
90 return [=](const Metadata *MD) {
91 // Avoid cloning types, compile units, and (other) subprograms.
92 if (isa<DICompileUnit>(Val: MD) || isa<DIType>(Val: MD))
93 return true;
94
95 if (auto *SP = dyn_cast<DISubprogram>(Val: MD))
96 return ShouldKeep(SP);
97
98 // If a subprogram isn't going to be cloned skip its lexical blocks as well.
99 if (auto *LScope = dyn_cast<DILocalScope>(Val: MD))
100 return ShouldKeep(LScope->getSubprogram());
101
102 // Avoid cloning local variables of subprograms that won't be cloned.
103 if (auto *DV = dyn_cast<DILocalVariable>(Val: MD))
104 if (auto *S = dyn_cast_or_null<DILocalScope>(Val: DV->getScope()))
105 return ShouldKeep(S->getSubprogram());
106
107 return false;
108 };
109}
110
111/// See comments in Cloning.h.
112BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
113 const Twine &NameSuffix, Function *F,
114 ClonedCodeInfo *CodeInfo, bool MapAtoms) {
115 BasicBlock *NewBB = BasicBlock::Create(Context&: BB->getContext(), Name: "", Parent: F);
116 if (BB->hasName())
117 NewBB->setName(BB->getName() + NameSuffix);
118
119 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false;
120
121 // Loop over all instructions, and copy them over.
122 for (const Instruction &I : *BB) {
123 Instruction *NewInst = I.clone();
124 if (I.hasName())
125 NewInst->setName(I.getName() + NameSuffix);
126
127 NewInst->insertBefore(BB&: *NewBB, InsertPos: NewBB->end());
128 NewInst->cloneDebugInfoFrom(From: &I);
129
130 VMap[&I] = NewInst; // Add instruction map to value.
131
132 if (MapAtoms) {
133 if (const DebugLoc &DL = NewInst->getDebugLoc())
134 mapAtomInstance(DL: DL.get(), VMap);
135 }
136
137 if (isa<CallInst>(Val: I) && !I.isDebugOrPseudoInst()) {
138 hasCalls = true;
139 hasMemProfMetadata |= I.hasMetadata(KindID: LLVMContext::MD_memprof);
140 hasMemProfMetadata |= I.hasMetadata(KindID: LLVMContext::MD_callsite);
141 }
142 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: &I)) {
143 if (!AI->isStaticAlloca()) {
144 hasDynamicAllocas = true;
145 }
146 }
147 }
148
149 if (CodeInfo) {
150 CodeInfo->ContainsCalls |= hasCalls;
151 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata;
152 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
153 }
154 return NewBB;
155}
156
157void llvm::CloneFunctionAttributesInto(Function *NewFunc,
158 const Function *OldFunc,
159 ValueToValueMapTy &VMap,
160 bool ModuleLevelChanges,
161 ValueMapTypeRemapper *TypeMapper,
162 ValueMaterializer *Materializer) {
163 // Copy all attributes other than those stored in Function's AttributeList
164 // which holds e.g. parameters and return value attributes.
165 AttributeList NewAttrs = NewFunc->getAttributes();
166 NewFunc->copyAttributesFrom(Src: OldFunc);
167 NewFunc->setAttributes(NewAttrs);
168
169 const RemapFlags FuncGlobalRefFlags =
170 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges;
171
172 // Fix up the personality function that got copied over.
173 if (OldFunc->hasPersonalityFn())
174 NewFunc->setPersonalityFn(MapValue(V: OldFunc->getPersonalityFn(), VM&: VMap,
175 Flags: FuncGlobalRefFlags, TypeMapper,
176 Materializer));
177
178 if (OldFunc->hasPrefixData()) {
179 NewFunc->setPrefixData(MapValue(V: OldFunc->getPrefixData(), VM&: VMap,
180 Flags: FuncGlobalRefFlags, TypeMapper,
181 Materializer));
182 }
183
184 if (OldFunc->hasPrologueData()) {
185 NewFunc->setPrologueData(MapValue(V: OldFunc->getPrologueData(), VM&: VMap,
186 Flags: FuncGlobalRefFlags, TypeMapper,
187 Materializer));
188 }
189
190 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size());
191 AttributeList OldAttrs = OldFunc->getAttributes();
192
193 // Clone any argument attributes that are present in the VMap.
194 for (const Argument &OldArg : OldFunc->args()) {
195 if (Argument *NewArg = dyn_cast<Argument>(Val&: VMap[&OldArg])) {
196 // Remap the parameter indices.
197 NewArgAttrs[NewArg->getArgNo()] =
198 OldAttrs.getParamAttrs(ArgNo: OldArg.getArgNo());
199 }
200 }
201
202 NewFunc->setAttributes(
203 AttributeList::get(C&: NewFunc->getContext(), FnAttrs: OldAttrs.getFnAttrs(),
204 RetAttrs: OldAttrs.getRetAttrs(), ArgAttrs: NewArgAttrs));
205}
206
207void llvm::CloneFunctionMetadataInto(Function &NewFunc, const Function &OldFunc,
208 ValueToValueMapTy &VMap,
209 RemapFlags RemapFlag,
210 ValueMapTypeRemapper *TypeMapper,
211 ValueMaterializer *Materializer,
212 const MetadataPredicate *IdentityMD) {
213 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
214 OldFunc.getAllMetadata(MDs);
215 for (const auto &[Kind, MD] : MDs) {
216 NewFunc.addMetadata(KindID: Kind, MD&: *MapMetadata(MD, VM&: VMap, Flags: RemapFlag, TypeMapper,
217 Materializer, IdentityMD));
218 }
219}
220
221void llvm::CloneFunctionBodyInto(Function &NewFunc, const Function &OldFunc,
222 ValueToValueMapTy &VMap, RemapFlags RemapFlag,
223 SmallVectorImpl<ReturnInst *> &Returns,
224 const char *NameSuffix,
225 ClonedCodeInfo *CodeInfo,
226 ValueMapTypeRemapper *TypeMapper,
227 ValueMaterializer *Materializer,
228 const MetadataPredicate *IdentityMD) {
229 if (OldFunc.isDeclaration())
230 return;
231
232 // Loop over all of the basic blocks in the function, cloning them as
233 // appropriate. Note that we save BE this way in order to handle cloning of
234 // recursive functions into themselves.
235 for (const BasicBlock &BB : OldFunc) {
236 // Create a new basic block and copy instructions into it!
237 BasicBlock *CBB =
238 CloneBasicBlock(BB: &BB, VMap, NameSuffix, F: &NewFunc, CodeInfo);
239
240 // Add basic block mapping.
241 VMap[&BB] = CBB;
242
243 // It is only legal to clone a function if a block address within that
244 // function is never referenced outside of the function. Given that, we
245 // want to map block addresses from the old function to block addresses in
246 // the clone. (This is different from the generic ValueMapper
247 // implementation, which generates an invalid blockaddress when
248 // cloning a function.)
249 if (BB.hasAddressTaken()) {
250 Constant *OldBBAddr = BlockAddress::get(F: const_cast<Function *>(&OldFunc),
251 BB: const_cast<BasicBlock *>(&BB));
252 VMap[OldBBAddr] = BlockAddress::get(F: &NewFunc, BB: CBB);
253 }
254
255 // Note return instructions for the caller.
256 if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: CBB->getTerminator()))
257 Returns.push_back(Elt: RI);
258 }
259
260 // Loop over all of the instructions in the new function, fixing up operand
261 // references as we go. This uses VMap to do all the hard work.
262 for (Function::iterator
263 BB = cast<BasicBlock>(Val&: VMap[&OldFunc.front()])->getIterator(),
264 BE = NewFunc.end();
265 BB != BE; ++BB)
266 // Loop over all instructions, fixing each one as we find it, and any
267 // attached debug-info records.
268 for (Instruction &II : *BB) {
269 RemapInstruction(I: &II, VM&: VMap, Flags: RemapFlag, TypeMapper, Materializer,
270 IdentityMD);
271 RemapDbgRecordRange(M: II.getModule(), Range: II.getDbgRecordRange(), VM&: VMap,
272 Flags: RemapFlag, TypeMapper, Materializer, IdentityMD);
273 }
274}
275
276// Clone OldFunc into NewFunc, transforming the old arguments into references to
277// VMap values.
278void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
279 ValueToValueMapTy &VMap,
280 CloneFunctionChangeType Changes,
281 SmallVectorImpl<ReturnInst *> &Returns,
282 const char *NameSuffix, ClonedCodeInfo *CodeInfo,
283 ValueMapTypeRemapper *TypeMapper,
284 ValueMaterializer *Materializer) {
285 assert(NameSuffix && "NameSuffix cannot be null!");
286
287#ifndef NDEBUG
288 for (const Argument &I : OldFunc->args())
289 assert(VMap.count(&I) && "No mapping from source argument specified!");
290#endif
291
292 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly;
293
294 CloneFunctionAttributesInto(NewFunc, OldFunc, VMap, ModuleLevelChanges,
295 TypeMapper, Materializer);
296
297 // Everything else beyond this point deals with function instructions,
298 // so if we are dealing with a function declaration, we're done.
299 if (OldFunc->isDeclaration())
300 return;
301
302 if (Changes < CloneFunctionChangeType::DifferentModule) {
303 assert((NewFunc->getParent() == nullptr ||
304 NewFunc->getParent() == OldFunc->getParent()) &&
305 "Expected NewFunc to have the same parent, or no parent");
306 } else {
307 assert((NewFunc->getParent() == nullptr ||
308 NewFunc->getParent() != OldFunc->getParent()) &&
309 "Expected NewFunc to have different parents, or no parent");
310
311 if (Changes == CloneFunctionChangeType::DifferentModule) {
312 assert(NewFunc->getParent() &&
313 "Need parent of new function to maintain debug info invariants");
314 }
315 }
316
317 MetadataPredicate IdentityMD = createIdentityMDPredicate(F: *OldFunc, Changes);
318
319 // Cloning is always a Module level operation, since Metadata needs to be
320 // cloned.
321 const RemapFlags RemapFlag = RF_None;
322
323 CloneFunctionMetadataInto(NewFunc&: *NewFunc, OldFunc: *OldFunc, VMap, RemapFlag, TypeMapper,
324 Materializer, IdentityMD: &IdentityMD);
325
326 CloneFunctionBodyInto(NewFunc&: *NewFunc, OldFunc: *OldFunc, VMap, RemapFlag, Returns,
327 NameSuffix, CodeInfo, TypeMapper, Materializer,
328 IdentityMD: &IdentityMD);
329
330 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the
331 // same module, the compile unit will already be listed (or not). When
332 // cloning a module, CloneModule() will handle creating the named metadata.
333 if (Changes != CloneFunctionChangeType::DifferentModule)
334 return;
335
336 // Update !llvm.dbg.cu with compile units added to the new module if this
337 // function is being cloned in isolation.
338 //
339 // FIXME: This is making global / module-level changes, which doesn't seem
340 // like the right encapsulation Consider dropping the requirement to update
341 // !llvm.dbg.cu (either obsoleting the node, or restricting it to
342 // non-discardable compile units) instead of discovering compile units by
343 // visiting the metadata attached to global values, which would allow this
344 // code to be deleted. Alternatively, perhaps give responsibility for this
345 // update to CloneFunctionInto's callers.
346 Module *NewModule = NewFunc->getParent();
347 NamedMDNode *NMD = NewModule->getOrInsertNamedMetadata(Name: "llvm.dbg.cu");
348 // Avoid multiple insertions of the same DICompileUnit to NMD.
349 SmallPtrSet<const void *, 8> Visited(llvm::from_range, NMD->operands());
350
351 // Collect and clone all the compile units referenced from the instructions in
352 // the function (e.g. as instructions' scope).
353 DebugInfoFinder DIFinder;
354 collectDebugInfoFromInstructions(F: *OldFunc, DIFinder);
355 for (DICompileUnit *Unit : DIFinder.compile_units()) {
356 MDNode *MappedUnit =
357 MapMetadata(MD: Unit, VM&: VMap, Flags: RF_None, TypeMapper, Materializer);
358 if (Visited.insert(Ptr: MappedUnit).second)
359 NMD->addOperand(M: MappedUnit);
360 }
361}
362
363/// Return a copy of the specified function and add it to that function's
364/// module. Also, any references specified in the VMap are changed to refer to
365/// their mapped value instead of the original one. If any of the arguments to
366/// the function are in the VMap, the arguments are deleted from the resultant
367/// function. The VMap is updated to include mappings from all of the
368/// instructions and basicblocks in the function from their old to new values.
369///
370Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap,
371 ClonedCodeInfo *CodeInfo) {
372 std::vector<Type *> ArgTypes;
373
374 // The user might be deleting arguments to the function by specifying them in
375 // the VMap. If so, we need to not add the arguments to the arg ty vector
376 //
377 for (const Argument &I : F->args())
378 if (VMap.count(Val: &I) == 0) // Haven't mapped the argument to anything yet?
379 ArgTypes.push_back(x: I.getType());
380
381 // Create a new function type...
382 FunctionType *FTy =
383 FunctionType::get(Result: F->getFunctionType()->getReturnType(), Params: ArgTypes,
384 isVarArg: F->getFunctionType()->isVarArg());
385
386 // Create the new function...
387 Function *NewF = Function::Create(Ty: FTy, Linkage: F->getLinkage(), AddrSpace: F->getAddressSpace(),
388 N: F->getName(), M: F->getParent());
389
390 // Loop over the arguments, copying the names of the mapped arguments over...
391 Function::arg_iterator DestI = NewF->arg_begin();
392 for (const Argument &I : F->args())
393 if (VMap.count(Val: &I) == 0) { // Is this argument preserved?
394 DestI->setName(I.getName()); // Copy the name over...
395 VMap[&I] = &*DestI++; // Add mapping to VMap
396 }
397
398 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
399 CloneFunctionInto(NewFunc: NewF, OldFunc: F, VMap, Changes: CloneFunctionChangeType::LocalChangesOnly,
400 Returns, NameSuffix: "", CodeInfo);
401
402 return NewF;
403}
404
405namespace {
406/// This is a private class used to implement CloneAndPruneFunctionInto.
407struct PruningFunctionCloner {
408 Function *NewFunc;
409 const Function *OldFunc;
410 ValueToValueMapTy &VMap;
411 bool ModuleLevelChanges;
412 const char *NameSuffix;
413 ClonedCodeInfo *CodeInfo;
414 bool HostFuncIsStrictFP;
415
416 Instruction *cloneInstruction(BasicBlock::const_iterator II);
417
418public:
419 PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
420 ValueToValueMapTy &valueMap, bool moduleLevelChanges,
421 const char *nameSuffix, ClonedCodeInfo *codeInfo)
422 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap),
423 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix),
424 CodeInfo(codeInfo) {
425 HostFuncIsStrictFP =
426 newFunc->getAttributes().hasFnAttr(Kind: Attribute::StrictFP);
427 }
428
429 /// The specified block is found to be reachable, clone it and
430 /// anything that it can reach.
431 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst,
432 std::vector<const BasicBlock *> &ToClone);
433};
434} // namespace
435
436Instruction *
437PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
438 const Instruction &OldInst = *II;
439 Instruction *NewInst = nullptr;
440 if (HostFuncIsStrictFP) {
441 Intrinsic::ID CIID = getConstrainedIntrinsicID(Instr: OldInst);
442 if (CIID != Intrinsic::not_intrinsic) {
443 // Instead of cloning the instruction, a call to constrained intrinsic
444 // should be created.
445 // Assume the first arguments of constrained intrinsics are the same as
446 // the operands of original instruction.
447
448 // Determine overloaded types of the intrinsic.
449 SmallVector<Type *, 2> TParams;
450 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor;
451 getIntrinsicInfoTableEntries(id: CIID, T&: Descriptor);
452 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) {
453 Intrinsic::IITDescriptor Operand = Descriptor[I];
454 switch (Operand.Kind) {
455 case Intrinsic::IITDescriptor::Argument:
456 if (Operand.getArgumentKind() !=
457 Intrinsic::IITDescriptor::AK_MatchType) {
458 if (I == 0)
459 TParams.push_back(Elt: OldInst.getType());
460 else
461 TParams.push_back(Elt: OldInst.getOperand(i: I - 1)->getType());
462 }
463 break;
464 case Intrinsic::IITDescriptor::SameVecWidthArgument:
465 ++I;
466 break;
467 default:
468 break;
469 }
470 }
471
472 // Create intrinsic call.
473 LLVMContext &Ctx = NewFunc->getContext();
474 Function *IFn = Intrinsic::getOrInsertDeclaration(M: NewFunc->getParent(),
475 id: CIID, Tys: TParams);
476 SmallVector<Value *, 4> Args;
477 unsigned NumOperands = OldInst.getNumOperands();
478 if (isa<CallInst>(Val: OldInst))
479 --NumOperands;
480 for (unsigned I = 0; I < NumOperands; ++I) {
481 Value *Op = OldInst.getOperand(i: I);
482 Args.push_back(Elt: Op);
483 }
484 if (const auto *CmpI = dyn_cast<FCmpInst>(Val: &OldInst)) {
485 FCmpInst::Predicate Pred = CmpI->getPredicate();
486 StringRef PredName = FCmpInst::getPredicateName(P: Pred);
487 Args.push_back(Elt: MetadataAsValue::get(Context&: Ctx, MD: MDString::get(Context&: Ctx, Str: PredName)));
488 }
489
490 // The last arguments of a constrained intrinsic are metadata that
491 // represent rounding mode (absents in some intrinsics) and exception
492 // behavior. The inlined function uses default settings.
493 if (Intrinsic::hasConstrainedFPRoundingModeOperand(QID: CIID))
494 Args.push_back(
495 Elt: MetadataAsValue::get(Context&: Ctx, MD: MDString::get(Context&: Ctx, Str: "round.tonearest")));
496 Args.push_back(
497 Elt: MetadataAsValue::get(Context&: Ctx, MD: MDString::get(Context&: Ctx, Str: "fpexcept.ignore")));
498
499 NewInst = CallInst::Create(Func: IFn, Args, NameStr: OldInst.getName() + ".strict");
500 }
501 }
502 if (!NewInst)
503 NewInst = II->clone();
504 return NewInst;
505}
506
507/// The specified block is found to be reachable, clone it and
508/// anything that it can reach.
509void PruningFunctionCloner::CloneBlock(
510 const BasicBlock *BB, BasicBlock::const_iterator StartingInst,
511 std::vector<const BasicBlock *> &ToClone) {
512 WeakTrackingVH &BBEntry = VMap[BB];
513
514 // Have we already cloned this block?
515 if (BBEntry)
516 return;
517
518 // Nope, clone it now.
519 BasicBlock *NewBB;
520 Twine NewName(BB->hasName() ? Twine(BB->getName()) + NameSuffix : "");
521 BBEntry = NewBB = BasicBlock::Create(Context&: BB->getContext(), Name: NewName, Parent: NewFunc);
522
523 // It is only legal to clone a function if a block address within that
524 // function is never referenced outside of the function. Given that, we
525 // want to map block addresses from the old function to block addresses in
526 // the clone. (This is different from the generic ValueMapper
527 // implementation, which generates an invalid blockaddress when
528 // cloning a function.)
529 //
530 // Note that we don't need to fix the mapping for unreachable blocks;
531 // the default mapping there is safe.
532 if (BB->hasAddressTaken()) {
533 Constant *OldBBAddr = BlockAddress::get(F: const_cast<Function *>(OldFunc),
534 BB: const_cast<BasicBlock *>(BB));
535 VMap[OldBBAddr] = BlockAddress::get(F: NewFunc, BB: NewBB);
536 }
537
538 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false;
539 bool hasMemProfMetadata = false;
540
541 // Keep a cursor pointing at the last place we cloned debug-info records from.
542 BasicBlock::const_iterator DbgCursor = StartingInst;
543 auto CloneDbgRecordsToHere =
544 [&DbgCursor](Instruction *NewInst, BasicBlock::const_iterator II) {
545 // Clone debug-info records onto this instruction. Iterate through any
546 // source-instructions we've cloned and then subsequently optimised
547 // away, so that their debug-info doesn't go missing.
548 for (; DbgCursor != II; ++DbgCursor)
549 NewInst->cloneDebugInfoFrom(From: &*DbgCursor, FromHere: std::nullopt, InsertAtHead: false);
550 NewInst->cloneDebugInfoFrom(From: &*II);
551 DbgCursor = std::next(x: II);
552 };
553
554 // Loop over all instructions, and copy them over, DCE'ing as we go. This
555 // loop doesn't include the terminator.
556 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE;
557 ++II) {
558
559 // Don't clone fake_use as it may suppress many optimizations
560 // due to inlining, especially SROA.
561 if (auto *IntrInst = dyn_cast<IntrinsicInst>(Val&: II))
562 if (IntrInst->getIntrinsicID() == Intrinsic::fake_use)
563 continue;
564
565 Instruction *NewInst = cloneInstruction(II);
566 NewInst->insertInto(ParentBB: NewBB, It: NewBB->end());
567
568 if (HostFuncIsStrictFP) {
569 // All function calls in the inlined function must get 'strictfp'
570 // attribute to prevent undesirable optimizations.
571 if (auto *Call = dyn_cast<CallInst>(Val: NewInst))
572 Call->addFnAttr(Kind: Attribute::StrictFP);
573 }
574
575 // Eagerly remap operands to the newly cloned instruction, except for PHI
576 // nodes for which we defer processing until we update the CFG.
577 if (!isa<PHINode>(Val: NewInst)) {
578 RemapInstruction(I: NewInst, VM&: VMap,
579 Flags: ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
580
581 // Eagerly constant fold the newly cloned instruction. If successful, add
582 // a mapping to the new value. Non-constant operands may be incomplete at
583 // this stage, thus instruction simplification is performed after
584 // processing phi-nodes.
585 if (Value *V = ConstantFoldInstruction(
586 I: NewInst, DL: BB->getDataLayout())) {
587 if (isInstructionTriviallyDead(I: NewInst)) {
588 VMap[&*II] = V;
589 NewInst->eraseFromParent();
590 continue;
591 }
592 }
593 }
594
595 if (II->hasName())
596 NewInst->setName(II->getName() + NameSuffix);
597 VMap[&*II] = NewInst; // Add instruction map to value.
598 if (isa<CallInst>(Val: II) && !II->isDebugOrPseudoInst()) {
599 hasCalls = true;
600 hasMemProfMetadata |= II->hasMetadata(KindID: LLVMContext::MD_memprof);
601 hasMemProfMetadata |= II->hasMetadata(KindID: LLVMContext::MD_callsite);
602 }
603
604 CloneDbgRecordsToHere(NewInst, II);
605
606 if (CodeInfo) {
607 CodeInfo->OrigVMap[&*II] = NewInst;
608 if (auto *CB = dyn_cast<CallBase>(Val: &*II))
609 if (CB->hasOperandBundles())
610 CodeInfo->OperandBundleCallSites.push_back(x: NewInst);
611 }
612
613 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val&: II)) {
614 if (isa<ConstantInt>(Val: AI->getArraySize()))
615 hasStaticAllocas = true;
616 else
617 hasDynamicAllocas = true;
618 }
619 }
620
621 // Finally, clone over the terminator.
622 const Instruction *OldTI = BB->getTerminator();
623 bool TerminatorDone = false;
624 if (const BranchInst *BI = dyn_cast<BranchInst>(Val: OldTI)) {
625 if (BI->isConditional()) {
626 // If the condition was a known constant in the callee...
627 ConstantInt *Cond = dyn_cast<ConstantInt>(Val: BI->getCondition());
628 // Or is a known constant in the caller...
629 if (!Cond) {
630 Value *V = VMap.lookup(Val: BI->getCondition());
631 Cond = dyn_cast_or_null<ConstantInt>(Val: V);
632 }
633
634 // Constant fold to uncond branch!
635 if (Cond) {
636 BasicBlock *Dest = BI->getSuccessor(i: !Cond->getZExtValue());
637 auto *NewBI = BranchInst::Create(IfTrue: Dest, InsertBefore: NewBB);
638 NewBI->setDebugLoc(BI->getDebugLoc());
639 VMap[OldTI] = NewBI;
640 ToClone.push_back(x: Dest);
641 TerminatorDone = true;
642 }
643 }
644 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(Val: OldTI)) {
645 // If switching on a value known constant in the caller.
646 ConstantInt *Cond = dyn_cast<ConstantInt>(Val: SI->getCondition());
647 if (!Cond) { // Or known constant after constant prop in the callee...
648 Value *V = VMap.lookup(Val: SI->getCondition());
649 Cond = dyn_cast_or_null<ConstantInt>(Val: V);
650 }
651 if (Cond) { // Constant fold to uncond branch!
652 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(C: Cond);
653 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor());
654 auto *NewBI = BranchInst::Create(IfTrue: Dest, InsertBefore: NewBB);
655 NewBI->setDebugLoc(SI->getDebugLoc());
656 VMap[OldTI] = NewBI;
657 ToClone.push_back(x: Dest);
658 TerminatorDone = true;
659 }
660 }
661
662 if (!TerminatorDone) {
663 Instruction *NewInst = OldTI->clone();
664 if (OldTI->hasName())
665 NewInst->setName(OldTI->getName() + NameSuffix);
666 NewInst->insertInto(ParentBB: NewBB, It: NewBB->end());
667
668 CloneDbgRecordsToHere(NewInst, OldTI->getIterator());
669
670 VMap[OldTI] = NewInst; // Add instruction map to value.
671
672 if (CodeInfo) {
673 CodeInfo->OrigVMap[OldTI] = NewInst;
674 if (auto *CB = dyn_cast<CallBase>(Val: OldTI))
675 if (CB->hasOperandBundles())
676 CodeInfo->OperandBundleCallSites.push_back(x: NewInst);
677 }
678
679 // Recursively clone any reachable successor blocks.
680 append_range(C&: ToClone, R: successors(I: BB->getTerminator()));
681 } else {
682 // If we didn't create a new terminator, clone DbgVariableRecords from the
683 // old terminator onto the new terminator.
684 Instruction *NewInst = NewBB->getTerminator();
685 assert(NewInst);
686
687 CloneDbgRecordsToHere(NewInst, OldTI->getIterator());
688 }
689
690 if (CodeInfo) {
691 CodeInfo->ContainsCalls |= hasCalls;
692 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata;
693 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
694 CodeInfo->ContainsDynamicAllocas |=
695 hasStaticAllocas && BB != &BB->getParent()->front();
696 }
697}
698
699/// This works like CloneAndPruneFunctionInto, except that it does not clone the
700/// entire function. Instead it starts at an instruction provided by the caller
701/// and copies (and prunes) only the code reachable from that instruction.
702void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
703 const Instruction *StartingInst,
704 ValueToValueMapTy &VMap,
705 bool ModuleLevelChanges,
706 SmallVectorImpl<ReturnInst *> &Returns,
707 const char *NameSuffix,
708 ClonedCodeInfo *CodeInfo) {
709 assert(NameSuffix && "NameSuffix cannot be null!");
710
711 ValueMapTypeRemapper *TypeMapper = nullptr;
712 ValueMaterializer *Materializer = nullptr;
713
714#ifndef NDEBUG
715 // If the cloning starts at the beginning of the function, verify that
716 // the function arguments are mapped.
717 if (!StartingInst)
718 for (const Argument &II : OldFunc->args())
719 assert(VMap.count(&II) && "No mapping from source argument specified!");
720#endif
721
722 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
723 NameSuffix, CodeInfo);
724 const BasicBlock *StartingBB;
725 if (StartingInst)
726 StartingBB = StartingInst->getParent();
727 else {
728 StartingBB = &OldFunc->getEntryBlock();
729 StartingInst = &StartingBB->front();
730 }
731
732 // Clone the entry block, and anything recursively reachable from it.
733 std::vector<const BasicBlock *> CloneWorklist;
734 PFC.CloneBlock(BB: StartingBB, StartingInst: StartingInst->getIterator(), ToClone&: CloneWorklist);
735 while (!CloneWorklist.empty()) {
736 const BasicBlock *BB = CloneWorklist.back();
737 CloneWorklist.pop_back();
738 PFC.CloneBlock(BB, StartingInst: BB->begin(), ToClone&: CloneWorklist);
739 }
740
741 // Loop over all of the basic blocks in the old function. If the block was
742 // reachable, we have cloned it and the old block is now in the value map:
743 // insert it into the new function in the right order. If not, ignore it.
744 //
745 // Defer PHI resolution until rest of function is resolved.
746 SmallVector<const PHINode *, 16> PHIToResolve;
747 for (const BasicBlock &BI : *OldFunc) {
748 Value *V = VMap.lookup(Val: &BI);
749 BasicBlock *NewBB = cast_or_null<BasicBlock>(Val: V);
750 if (!NewBB)
751 continue; // Dead block.
752
753 // Move the new block to preserve the order in the original function.
754 NewBB->moveBefore(MovePos: NewFunc->end());
755
756 // Handle PHI nodes specially, as we have to remove references to dead
757 // blocks.
758 for (const PHINode &PN : BI.phis()) {
759 // PHI nodes may have been remapped to non-PHI nodes by the caller or
760 // during the cloning process.
761 if (isa<PHINode>(Val: VMap[&PN]))
762 PHIToResolve.push_back(Elt: &PN);
763 else
764 break;
765 }
766
767 // Finally, remap the terminator instructions, as those can't be remapped
768 // until all BBs are mapped.
769 RemapInstruction(I: NewBB->getTerminator(), VM&: VMap,
770 Flags: ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges,
771 TypeMapper, Materializer);
772 }
773
774 // Defer PHI resolution until rest of function is resolved, PHI resolution
775 // requires the CFG to be up-to-date.
776 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) {
777 const PHINode *OPN = PHIToResolve[phino];
778 unsigned NumPreds = OPN->getNumIncomingValues();
779 const BasicBlock *OldBB = OPN->getParent();
780 BasicBlock *NewBB = cast<BasicBlock>(Val&: VMap[OldBB]);
781
782 // Map operands for blocks that are live and remove operands for blocks
783 // that are dead.
784 for (; phino != PHIToResolve.size() &&
785 PHIToResolve[phino]->getParent() == OldBB;
786 ++phino) {
787 OPN = PHIToResolve[phino];
788 PHINode *PN = cast<PHINode>(Val&: VMap[OPN]);
789 for (int64_t pred = NumPreds - 1; pred >= 0; --pred) {
790 Value *V = VMap.lookup(Val: PN->getIncomingBlock(i: pred));
791 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(Val: V)) {
792 Value *InVal =
793 MapValue(V: PN->getIncomingValue(i: pred), VM&: VMap,
794 Flags: ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
795 assert(InVal && "Unknown input value?");
796 PN->setIncomingValue(i: pred, V: InVal);
797 PN->setIncomingBlock(i: pred, BB: MappedBlock);
798 continue;
799 }
800 PN->removeIncomingValue(Idx: pred, DeletePHIIfEmpty: false);
801 }
802 }
803
804 // The loop above has removed PHI entries for those blocks that are dead
805 // and has updated others. However, if a block is live (i.e. copied over)
806 // but its terminator has been changed to not go to this block, then our
807 // phi nodes will have invalid entries. Update the PHI nodes in this
808 // case.
809 PHINode *PN = cast<PHINode>(Val: NewBB->begin());
810 NumPreds = pred_size(BB: NewBB);
811 if (NumPreds != PN->getNumIncomingValues()) {
812 assert(NumPreds < PN->getNumIncomingValues());
813 // Count how many times each predecessor comes to this block.
814 DenseMap<BasicBlock *, unsigned> PredCount;
815 for (BasicBlock *Pred : predecessors(BB: NewBB))
816 ++PredCount[Pred];
817
818 BasicBlock::iterator I = NewBB->begin();
819 DenseMap<BasicBlock *, unsigned> SeenPredCount;
820 SeenPredCount.reserve(NumEntries: PredCount.size());
821 for (; (PN = dyn_cast<PHINode>(Val&: I)); ++I) {
822 SeenPredCount.clear();
823 PN->removeIncomingValueIf(
824 Predicate: [&](unsigned Idx) {
825 BasicBlock *IncomingBlock = PN->getIncomingBlock(i: Idx);
826 auto It = PredCount.find(Val: IncomingBlock);
827 if (It == PredCount.end())
828 return true;
829 unsigned &SeenCount = SeenPredCount[IncomingBlock];
830 if (SeenCount < It->second) {
831 SeenCount++;
832 return false;
833 }
834 return true;
835 },
836 DeletePHIIfEmpty: false);
837 }
838 }
839
840 // If the loops above have made these phi nodes have 0 or 1 operand,
841 // replace them with poison or the input value. We must do this for
842 // correctness, because 0-operand phis are not valid.
843 PN = cast<PHINode>(Val: NewBB->begin());
844 if (PN->getNumIncomingValues() == 0) {
845 BasicBlock::iterator I = NewBB->begin();
846 BasicBlock::const_iterator OldI = OldBB->begin();
847 while ((PN = dyn_cast<PHINode>(Val: I++))) {
848 Value *NV = PoisonValue::get(T: PN->getType());
849 PN->replaceAllUsesWith(V: NV);
850 assert(VMap[&*OldI] == PN && "VMap mismatch");
851 VMap[&*OldI] = NV;
852 PN->eraseFromParent();
853 ++OldI;
854 }
855 }
856 }
857
858 // Drop all incompatible return attributes that cannot be applied to NewFunc
859 // during cloning, so as to allow instruction simplification to reason on the
860 // old state of the function. The original attributes are restored later.
861 AttributeList Attrs = NewFunc->getAttributes();
862 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
863 Ty: OldFunc->getReturnType(), AS: Attrs.getRetAttrs());
864 NewFunc->removeRetAttrs(Attrs: IncompatibleAttrs);
865
866 // As phi-nodes have been now remapped, allow incremental simplification of
867 // newly-cloned instructions.
868 const DataLayout &DL = NewFunc->getDataLayout();
869 for (const BasicBlock &BB : *OldFunc) {
870 for (const Instruction &I : BB) {
871 auto *NewI = dyn_cast_or_null<Instruction>(Val: VMap.lookup(Val: &I));
872 if (!NewI)
873 continue;
874
875 if (Value *V = simplifyInstruction(I: NewI, Q: DL)) {
876 NewI->replaceAllUsesWith(V);
877
878 if (isInstructionTriviallyDead(I: NewI)) {
879 NewI->eraseFromParent();
880 } else {
881 // Did not erase it? Restore the new instruction into VMap previously
882 // dropped by `ValueIsRAUWd`.
883 VMap[&I] = NewI;
884 }
885 }
886 }
887 }
888
889 // Restore attributes.
890 NewFunc->setAttributes(Attrs);
891
892 // Remap debug records operands now that all values have been mapped.
893 // Doing this now (late) preserves use-before-defs in debug records. If
894 // we didn't do this, ValueAsMetadata(use-before-def) operands would be
895 // replaced by empty metadata. This would signal later cleanup passes to
896 // remove the debug records, potentially causing incorrect locations.
897 Function::iterator Begin = cast<BasicBlock>(Val&: VMap[StartingBB])->getIterator();
898 for (BasicBlock &BB : make_range(x: Begin, y: NewFunc->end())) {
899 for (Instruction &I : BB) {
900 RemapDbgRecordRange(M: I.getModule(), Range: I.getDbgRecordRange(), VM&: VMap,
901 Flags: ModuleLevelChanges ? RF_None
902 : RF_NoModuleLevelChanges,
903 TypeMapper, Materializer);
904 }
905 }
906
907 // Simplify conditional branches and switches with a constant operand. We try
908 // to prune these out when cloning, but if the simplification required
909 // looking through PHI nodes, those are only available after forming the full
910 // basic block. That may leave some here, and we still want to prune the dead
911 // code as early as possible.
912 for (BasicBlock &BB : make_range(x: Begin, y: NewFunc->end()))
913 ConstantFoldTerminator(BB: &BB);
914
915 // Some blocks may have become unreachable as a result. Find and delete them.
916 {
917 SmallPtrSet<BasicBlock *, 16> ReachableBlocks;
918 SmallVector<BasicBlock *, 16> Worklist;
919 Worklist.push_back(Elt: &*Begin);
920 while (!Worklist.empty()) {
921 BasicBlock *BB = Worklist.pop_back_val();
922 if (ReachableBlocks.insert(Ptr: BB).second)
923 append_range(C&: Worklist, R: successors(BB));
924 }
925
926 SmallVector<BasicBlock *, 16> UnreachableBlocks;
927 for (BasicBlock &BB : make_range(x: Begin, y: NewFunc->end()))
928 if (!ReachableBlocks.contains(Ptr: &BB))
929 UnreachableBlocks.push_back(Elt: &BB);
930 DeleteDeadBlocks(BBs: UnreachableBlocks);
931 }
932
933 // Now that the inlined function body has been fully constructed, go through
934 // and zap unconditional fall-through branches. This happens all the time when
935 // specializing code: code specialization turns conditional branches into
936 // uncond branches, and this code folds them.
937 Function::iterator I = Begin;
938 while (I != NewFunc->end()) {
939 BranchInst *BI = dyn_cast<BranchInst>(Val: I->getTerminator());
940 if (!BI || BI->isConditional()) {
941 ++I;
942 continue;
943 }
944
945 BasicBlock *Dest = BI->getSuccessor(i: 0);
946 if (!Dest->getSinglePredecessor() || Dest->hasAddressTaken()) {
947 ++I;
948 continue;
949 }
950
951 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify
952 // above should have zapped all of them..
953 assert(!isa<PHINode>(Dest->begin()));
954
955 // We know all single-entry PHI nodes in the inlined function have been
956 // removed, so we just need to splice the blocks.
957 BI->eraseFromParent();
958
959 // Make all PHI nodes that referred to Dest now refer to I as their source.
960 Dest->replaceAllUsesWith(V: &*I);
961
962 // Move all the instructions in the succ to the pred.
963 I->splice(ToIt: I->end(), FromBB: Dest);
964
965 // Remove the dest block.
966 Dest->eraseFromParent();
967
968 // Do not increment I, iteratively merge all things this block branches to.
969 }
970
971 // Make a final pass over the basic blocks from the old function to gather
972 // any return instructions which survived folding. We have to do this here
973 // because we can iteratively remove and merge returns above.
974 for (Function::iterator I = cast<BasicBlock>(Val&: VMap[StartingBB])->getIterator(),
975 E = NewFunc->end();
976 I != E; ++I)
977 if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: I->getTerminator()))
978 Returns.push_back(Elt: RI);
979}
980
981/// This works exactly like CloneFunctionInto,
982/// except that it does some simple constant prop and DCE on the fly. The
983/// effect of this is to copy significantly less code in cases where (for
984/// example) a function call with constant arguments is inlined, and those
985/// constant arguments cause a significant amount of code in the callee to be
986/// dead. Since this doesn't produce an exact copy of the input, it can't be
987/// used for things like CloneFunction or CloneModule.
988void llvm::CloneAndPruneFunctionInto(
989 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap,
990 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns,
991 const char *NameSuffix, ClonedCodeInfo *CodeInfo) {
992 CloneAndPruneIntoFromInst(NewFunc, OldFunc, StartingInst: &OldFunc->front().front(), VMap,
993 ModuleLevelChanges, Returns, NameSuffix, CodeInfo);
994}
995
996/// Remaps instructions in \p Blocks using the mapping in \p VMap.
997void llvm::remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks,
998 ValueToValueMapTy &VMap) {
999 // Rewrite the code to refer to itself.
1000 for (BasicBlock *BB : Blocks) {
1001 for (Instruction &Inst : *BB) {
1002 RemapDbgRecordRange(M: Inst.getModule(), Range: Inst.getDbgRecordRange(), VM&: VMap,
1003 Flags: RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
1004 RemapInstruction(I: &Inst, VM&: VMap,
1005 Flags: RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
1006 }
1007 }
1008}
1009
1010/// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
1011/// Blocks.
1012///
1013/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
1014/// \p LoopDomBB. Insert the new blocks before block specified in \p Before.
1015Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
1016 Loop *OrigLoop, ValueToValueMapTy &VMap,
1017 const Twine &NameSuffix, LoopInfo *LI,
1018 DominatorTree *DT,
1019 SmallVectorImpl<BasicBlock *> &Blocks) {
1020 Function *F = OrigLoop->getHeader()->getParent();
1021 Loop *ParentLoop = OrigLoop->getParentLoop();
1022 DenseMap<Loop *, Loop *> LMap;
1023
1024 Loop *NewLoop = LI->AllocateLoop();
1025 LMap[OrigLoop] = NewLoop;
1026 if (ParentLoop)
1027 ParentLoop->addChildLoop(NewChild: NewLoop);
1028 else
1029 LI->addTopLevelLoop(New: NewLoop);
1030
1031 BasicBlock *OrigPH = OrigLoop->getLoopPreheader();
1032 assert(OrigPH && "No preheader");
1033 BasicBlock *NewPH = CloneBasicBlock(BB: OrigPH, VMap, NameSuffix, F);
1034 // To rename the loop PHIs.
1035 VMap[OrigPH] = NewPH;
1036 Blocks.push_back(Elt: NewPH);
1037
1038 // Update LoopInfo.
1039 if (ParentLoop)
1040 ParentLoop->addBasicBlockToLoop(NewBB: NewPH, LI&: *LI);
1041
1042 // Update DominatorTree.
1043 DT->addNewBlock(BB: NewPH, DomBB: LoopDomBB);
1044
1045 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) {
1046 Loop *&NewLoop = LMap[CurLoop];
1047 if (!NewLoop) {
1048 NewLoop = LI->AllocateLoop();
1049
1050 // Establish the parent/child relationship.
1051 Loop *OrigParent = CurLoop->getParentLoop();
1052 assert(OrigParent && "Could not find the original parent loop");
1053 Loop *NewParentLoop = LMap[OrigParent];
1054 assert(NewParentLoop && "Could not find the new parent loop");
1055
1056 NewParentLoop->addChildLoop(NewChild: NewLoop);
1057 }
1058 }
1059
1060 for (BasicBlock *BB : OrigLoop->getBlocks()) {
1061 Loop *CurLoop = LI->getLoopFor(BB);
1062 Loop *&NewLoop = LMap[CurLoop];
1063 assert(NewLoop && "Expecting new loop to be allocated");
1064
1065 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F);
1066 VMap[BB] = NewBB;
1067
1068 // Update LoopInfo.
1069 NewLoop->addBasicBlockToLoop(NewBB, LI&: *LI);
1070
1071 // Add DominatorTree node. After seeing all blocks, update to correct
1072 // IDom.
1073 DT->addNewBlock(BB: NewBB, DomBB: NewPH);
1074
1075 Blocks.push_back(Elt: NewBB);
1076 }
1077
1078 for (BasicBlock *BB : OrigLoop->getBlocks()) {
1079 // Update loop headers.
1080 Loop *CurLoop = LI->getLoopFor(BB);
1081 if (BB == CurLoop->getHeader())
1082 LMap[CurLoop]->moveToHeader(BB: cast<BasicBlock>(Val&: VMap[BB]));
1083
1084 // Update DominatorTree.
1085 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock();
1086 DT->changeImmediateDominator(BB: cast<BasicBlock>(Val&: VMap[BB]),
1087 NewBB: cast<BasicBlock>(Val&: VMap[IDomBB]));
1088 }
1089
1090 // Move them physically from the end of the block list.
1091 F->splice(ToIt: Before->getIterator(), FromF: F, FromIt: NewPH->getIterator());
1092 F->splice(ToIt: Before->getIterator(), FromF: F, FromBeginIt: NewLoop->getHeader()->getIterator(),
1093 FromEndIt: F->end());
1094
1095 return NewLoop;
1096}
1097
1098/// Duplicate non-Phi instructions from the beginning of block up to
1099/// StopAt instruction into a split block between BB and its predecessor.
1100BasicBlock *llvm::DuplicateInstructionsInSplitBetween(
1101 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt,
1102 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) {
1103
1104 assert(count(successors(PredBB), BB) == 1 &&
1105 "There must be a single edge between PredBB and BB!");
1106 // We are going to have to map operands from the original BB block to the new
1107 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to
1108 // account for entry from PredBB.
1109 BasicBlock::iterator BI = BB->begin();
1110 for (; PHINode *PN = dyn_cast<PHINode>(Val&: BI); ++BI)
1111 ValueMapping[PN] = PN->getIncomingValueForBlock(BB: PredBB);
1112
1113 BasicBlock *NewBB = SplitEdge(From: PredBB, To: BB);
1114 NewBB->setName(PredBB->getName() + ".split");
1115 Instruction *NewTerm = NewBB->getTerminator();
1116
1117 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge
1118 // in the update set here.
1119 DTU.applyUpdates(Updates: {{DominatorTree::Delete, PredBB, BB},
1120 {DominatorTree::Insert, PredBB, NewBB},
1121 {DominatorTree::Insert, NewBB, BB}});
1122
1123 // Clone the non-phi instructions of BB into NewBB, keeping track of the
1124 // mapping and using it to remap operands in the cloned instructions.
1125 // Stop once we see the terminator too. This covers the case where BB's
1126 // terminator gets replaced and StopAt == BB's terminator.
1127 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) {
1128 Instruction *New = BI->clone();
1129 New->setName(BI->getName());
1130 New->insertBefore(InsertPos: NewTerm->getIterator());
1131 New->cloneDebugInfoFrom(From: &*BI);
1132 ValueMapping[&*BI] = New;
1133
1134 // Remap operands to patch up intra-block references.
1135 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
1136 if (Instruction *Inst = dyn_cast<Instruction>(Val: New->getOperand(i))) {
1137 auto I = ValueMapping.find(Val: Inst);
1138 if (I != ValueMapping.end())
1139 New->setOperand(i, Val: I->second);
1140 }
1141
1142 // Remap debug variable operands.
1143 remapDebugVariable(Mapping&: ValueMapping, Inst: New);
1144 }
1145
1146 return NewBB;
1147}
1148
1149void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1150 DenseMap<MDNode *, MDNode *> &ClonedScopes,
1151 StringRef Ext, LLVMContext &Context) {
1152 MDBuilder MDB(Context);
1153
1154 for (MDNode *ScopeList : NoAliasDeclScopes) {
1155 for (const MDOperand &MDOp : ScopeList->operands()) {
1156 if (MDNode *MD = dyn_cast<MDNode>(Val: MDOp)) {
1157 AliasScopeNode SNANode(MD);
1158
1159 std::string Name;
1160 auto ScopeName = SNANode.getName();
1161 if (!ScopeName.empty())
1162 Name = (Twine(ScopeName) + ":" + Ext).str();
1163 else
1164 Name = std::string(Ext);
1165
1166 MDNode *NewScope = MDB.createAnonymousAliasScope(
1167 Domain: const_cast<MDNode *>(SNANode.getDomain()), Name);
1168 ClonedScopes.insert(KV: std::make_pair(x&: MD, y&: NewScope));
1169 }
1170 }
1171 }
1172}
1173
1174void llvm::adaptNoAliasScopes(Instruction *I,
1175 const DenseMap<MDNode *, MDNode *> &ClonedScopes,
1176 LLVMContext &Context) {
1177 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * {
1178 bool NeedsReplacement = false;
1179 SmallVector<Metadata *, 8> NewScopeList;
1180 for (const MDOperand &MDOp : ScopeList->operands()) {
1181 if (MDNode *MD = dyn_cast<MDNode>(Val: MDOp)) {
1182 if (auto *NewMD = ClonedScopes.lookup(Val: MD)) {
1183 NewScopeList.push_back(Elt: NewMD);
1184 NeedsReplacement = true;
1185 continue;
1186 }
1187 NewScopeList.push_back(Elt: MD);
1188 }
1189 }
1190 if (NeedsReplacement)
1191 return MDNode::get(Context, MDs: NewScopeList);
1192 return nullptr;
1193 };
1194
1195 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(Val: I))
1196 if (MDNode *NewScopeList = CloneScopeList(Decl->getScopeList()))
1197 Decl->setScopeList(NewScopeList);
1198
1199 auto replaceWhenNeeded = [&](unsigned MD_ID) {
1200 if (const MDNode *CSNoAlias = I->getMetadata(KindID: MD_ID))
1201 if (MDNode *NewScopeList = CloneScopeList(CSNoAlias))
1202 I->setMetadata(KindID: MD_ID, Node: NewScopeList);
1203 };
1204 replaceWhenNeeded(LLVMContext::MD_noalias);
1205 replaceWhenNeeded(LLVMContext::MD_alias_scope);
1206}
1207
1208void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1209 ArrayRef<BasicBlock *> NewBlocks,
1210 LLVMContext &Context, StringRef Ext) {
1211 if (NoAliasDeclScopes.empty())
1212 return;
1213
1214 DenseMap<MDNode *, MDNode *> ClonedScopes;
1215 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning "
1216 << NoAliasDeclScopes.size() << " node(s)\n");
1217
1218 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context);
1219 // Identify instructions using metadata that needs adaptation
1220 for (BasicBlock *NewBlock : NewBlocks)
1221 for (Instruction &I : *NewBlock)
1222 adaptNoAliasScopes(I: &I, ClonedScopes, Context);
1223}
1224
1225void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1226 Instruction *IStart, Instruction *IEnd,
1227 LLVMContext &Context, StringRef Ext) {
1228 if (NoAliasDeclScopes.empty())
1229 return;
1230
1231 DenseMap<MDNode *, MDNode *> ClonedScopes;
1232 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning "
1233 << NoAliasDeclScopes.size() << " node(s)\n");
1234
1235 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context);
1236 // Identify instructions using metadata that needs adaptation
1237 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?");
1238 auto ItStart = IStart->getIterator();
1239 auto ItEnd = IEnd->getIterator();
1240 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range
1241 for (auto &I : llvm::make_range(x: ItStart, y: ItEnd))
1242 adaptNoAliasScopes(I: &I, ClonedScopes, Context);
1243}
1244
1245void llvm::identifyNoAliasScopesToClone(
1246 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) {
1247 for (BasicBlock *BB : BBs)
1248 for (Instruction &I : *BB)
1249 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(Val: &I))
1250 NoAliasDeclScopes.push_back(Elt: Decl->getScopeList());
1251}
1252
1253void llvm::identifyNoAliasScopesToClone(
1254 BasicBlock::iterator Start, BasicBlock::iterator End,
1255 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) {
1256 for (Instruction &I : make_range(x: Start, y: End))
1257 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(Val: &I))
1258 NoAliasDeclScopes.push_back(Elt: Decl->getScopeList());
1259}
1260