1//===- CloneFunction.cpp - Clone a function into another function ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the CloneFunctionInto interface, which is used as the
10// low-level function cloner. This is used by the CloneFunction and function
11// inliner to do the dirty work of copying the body of a function around.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/ConstantFolding.h"
18#include "llvm/Analysis/DomTreeUpdater.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/LoopInfo.h"
21#include "llvm/IR/AttributeMask.h"
22#include "llvm/IR/CFG.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DebugInfo.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstIterator.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/Transforms/Utils/BasicBlockUtils.h"
35#include "llvm/Transforms/Utils/Cloning.h"
36#include "llvm/Transforms/Utils/Local.h"
37#include "llvm/Transforms/Utils/ValueMapper.h"
38#include <cstdint>
39#include <map>
40#include <optional>
41using namespace llvm;
42
43#define DEBUG_TYPE "clone-function"
44
45STATISTIC(RemappedAtomMax, "Highest global NextAtomGroup (after mapping)");
46
47void llvm::mapAtomInstance(const DebugLoc &DL, ValueToValueMapTy &VMap) {
48 uint64_t CurGroup = DL->getAtomGroup();
49 if (!CurGroup)
50 return;
51
52 // Try inserting a new entry. If there's already a mapping for this atom
53 // then there's nothing to do.
54 auto [It, Inserted] = VMap.AtomMap.insert(KV: {{DL.getInlinedAt(), CurGroup}, 0});
55 if (!Inserted)
56 return;
57
58 // Map entry to a new atom group.
59 uint64_t NewGroup = DL->getContext().incNextDILocationAtomGroup();
60 assert(NewGroup > CurGroup && "Next should always be greater than current");
61 It->second = NewGroup;
62
63 RemappedAtomMax = std::max<uint64_t>(a: NewGroup, b: RemappedAtomMax);
64}
65
66static void collectDebugInfoFromInstructions(const Function &F,
67 DebugInfoFinder &DIFinder) {
68 const Module *M = F.getParent();
69 if (!M)
70 return;
71 // Inspect instructions to process e.g. DILexicalBlocks of inlined functions
72 for (const Instruction &I : instructions(F))
73 DIFinder.processInstruction(M: *M, I);
74}
75
76// Create a predicate that matches the metadata that should be identity mapped
77// during function cloning.
78static MetadataPredicate
79createIdentityMDPredicate(const Function &F, CloneFunctionChangeType Changes) {
80 if (Changes >= CloneFunctionChangeType::DifferentModule)
81 return [](const Metadata *MD) { return false; };
82
83 DISubprogram *SPClonedWithinModule = F.getSubprogram();
84
85 // Don't clone inlined subprograms.
86 auto ShouldKeep = [SPClonedWithinModule](const DISubprogram *SP) -> bool {
87 return SP != SPClonedWithinModule;
88 };
89
90 return [=](const Metadata *MD) {
91 // Avoid cloning compile units.
92 if (isa<DICompileUnit>(Val: MD))
93 return true;
94
95 if (auto *SP = dyn_cast<DISubprogram>(Val: MD))
96 return ShouldKeep(SP);
97
98 // If a subprogram isn't going to be cloned skip its lexical blocks as well.
99 if (auto *LScope = dyn_cast<DILocalScope>(Val: MD))
100 return ShouldKeep(LScope->getSubprogram());
101
102 // Avoid cloning local variables of subprograms that won't be cloned.
103 if (auto *DV = dyn_cast<DILocalVariable>(Val: MD))
104 if (auto *S = dyn_cast_or_null<DILocalScope>(Val: DV->getScope()))
105 return ShouldKeep(S->getSubprogram());
106
107 // Clone types that are local to subprograms being cloned.
108 // Avoid cloning other types.
109 auto *Type = dyn_cast<DIType>(Val: MD);
110 if (!Type)
111 return false;
112
113 // No need to clone types if subprograms are not cloned.
114 if (SPClonedWithinModule == nullptr)
115 return true;
116
117 // Scopeless types may be derived from local types (e.g. pointers to local
118 // types). They may need cloning.
119 if (const DIDerivedType *DTy = dyn_cast_or_null<DIDerivedType>(Val: Type);
120 DTy && !DTy->getScope())
121 return false;
122
123 auto *LScope = dyn_cast_or_null<DILocalScope>(Val: Type->getScope());
124 if (!LScope)
125 return true;
126
127 if (ShouldKeep(LScope->getSubprogram()))
128 return true;
129
130 return false;
131 };
132}
133
134/// See comments in Cloning.h.
135BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
136 const Twine &NameSuffix, Function *F,
137 ClonedCodeInfo *CodeInfo, bool MapAtoms) {
138 BasicBlock *NewBB = BasicBlock::Create(Context&: BB->getContext(), Name: "", Parent: F);
139 if (BB->hasName())
140 NewBB->setName(BB->getName() + NameSuffix);
141
142 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false;
143
144 // Loop over all instructions, and copy them over.
145 for (const Instruction &I : *BB) {
146 Instruction *NewInst = I.clone();
147 if (I.hasName())
148 NewInst->setName(I.getName() + NameSuffix);
149
150 NewInst->insertBefore(BB&: *NewBB, InsertPos: NewBB->end());
151 NewInst->cloneDebugInfoFrom(From: &I);
152
153 VMap[&I] = NewInst; // Add instruction map to value.
154
155 if (MapAtoms) {
156 if (const DebugLoc &DL = NewInst->getDebugLoc())
157 mapAtomInstance(DL: DL.get(), VMap);
158 }
159
160 if (isa<CallInst>(Val: I) && !I.isDebugOrPseudoInst()) {
161 hasCalls = true;
162 hasMemProfMetadata |= I.hasMetadata(KindID: LLVMContext::MD_memprof);
163 hasMemProfMetadata |= I.hasMetadata(KindID: LLVMContext::MD_callsite);
164 }
165 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: &I)) {
166 if (!AI->isStaticAlloca()) {
167 hasDynamicAllocas = true;
168 }
169 }
170 }
171
172 if (CodeInfo) {
173 CodeInfo->ContainsCalls |= hasCalls;
174 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata;
175 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
176 }
177 return NewBB;
178}
179
180void llvm::CloneFunctionAttributesInto(Function *NewFunc,
181 const Function *OldFunc,
182 ValueToValueMapTy &VMap,
183 bool ModuleLevelChanges,
184 ValueMapTypeRemapper *TypeMapper,
185 ValueMaterializer *Materializer) {
186 // Copy all attributes other than those stored in Function's AttributeList
187 // which holds e.g. parameters and return value attributes.
188 AttributeList NewAttrs = NewFunc->getAttributes();
189 NewFunc->copyAttributesFrom(Src: OldFunc);
190 NewFunc->setAttributes(NewAttrs);
191
192 const RemapFlags FuncGlobalRefFlags =
193 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges;
194
195 // Fix up the personality function that got copied over.
196 if (OldFunc->hasPersonalityFn())
197 NewFunc->setPersonalityFn(MapValue(V: OldFunc->getPersonalityFn(), VM&: VMap,
198 Flags: FuncGlobalRefFlags, TypeMapper,
199 Materializer));
200
201 if (OldFunc->hasPrefixData()) {
202 NewFunc->setPrefixData(MapValue(V: OldFunc->getPrefixData(), VM&: VMap,
203 Flags: FuncGlobalRefFlags, TypeMapper,
204 Materializer));
205 }
206
207 if (OldFunc->hasPrologueData()) {
208 NewFunc->setPrologueData(MapValue(V: OldFunc->getPrologueData(), VM&: VMap,
209 Flags: FuncGlobalRefFlags, TypeMapper,
210 Materializer));
211 }
212
213 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size());
214 AttributeList OldAttrs = OldFunc->getAttributes();
215
216 // Clone any argument attributes that are present in the VMap.
217 for (const Argument &OldArg : OldFunc->args()) {
218 if (Argument *NewArg = dyn_cast<Argument>(Val&: VMap[&OldArg])) {
219 // Remap the parameter indices.
220 NewArgAttrs[NewArg->getArgNo()] =
221 OldAttrs.getParamAttrs(ArgNo: OldArg.getArgNo());
222 }
223 }
224
225 NewFunc->setAttributes(
226 AttributeList::get(C&: NewFunc->getContext(), FnAttrs: OldAttrs.getFnAttrs(),
227 RetAttrs: OldAttrs.getRetAttrs(), ArgAttrs: NewArgAttrs));
228}
229
230void llvm::CloneFunctionMetadataInto(Function &NewFunc, const Function &OldFunc,
231 ValueToValueMapTy &VMap,
232 RemapFlags RemapFlag,
233 ValueMapTypeRemapper *TypeMapper,
234 ValueMaterializer *Materializer,
235 const MetadataPredicate *IdentityMD) {
236 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
237 OldFunc.getAllMetadata(MDs);
238 for (const auto &[Kind, MD] : MDs) {
239 NewFunc.addMetadata(KindID: Kind, MD&: *MapMetadata(MD, VM&: VMap, Flags: RemapFlag, TypeMapper,
240 Materializer, IdentityMD));
241 }
242}
243
244void llvm::CloneFunctionBodyInto(Function &NewFunc, const Function &OldFunc,
245 ValueToValueMapTy &VMap, RemapFlags RemapFlag,
246 SmallVectorImpl<ReturnInst *> &Returns,
247 const char *NameSuffix,
248 ClonedCodeInfo *CodeInfo,
249 ValueMapTypeRemapper *TypeMapper,
250 ValueMaterializer *Materializer,
251 const MetadataPredicate *IdentityMD) {
252 if (OldFunc.isDeclaration())
253 return;
254
255 // Loop over all of the basic blocks in the function, cloning them as
256 // appropriate. Note that we save BE this way in order to handle cloning of
257 // recursive functions into themselves.
258 for (const BasicBlock &BB : OldFunc) {
259 // Create a new basic block and copy instructions into it!
260 BasicBlock *CBB =
261 CloneBasicBlock(BB: &BB, VMap, NameSuffix, F: &NewFunc, CodeInfo);
262
263 // Add basic block mapping.
264 VMap[&BB] = CBB;
265
266 // It is only legal to clone a function if a block address within that
267 // function is never referenced outside of the function. Given that, we
268 // want to map block addresses from the old function to block addresses in
269 // the clone. (This is different from the generic ValueMapper
270 // implementation, which generates an invalid blockaddress when
271 // cloning a function.)
272 if (BB.hasAddressTaken()) {
273 Constant *OldBBAddr = BlockAddress::get(F: const_cast<Function *>(&OldFunc),
274 BB: const_cast<BasicBlock *>(&BB));
275 VMap[OldBBAddr] = BlockAddress::get(F: &NewFunc, BB: CBB);
276 }
277
278 // Note return instructions for the caller.
279 if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: CBB->getTerminator()))
280 Returns.push_back(Elt: RI);
281 }
282
283 // Loop over all of the instructions in the new function, fixing up operand
284 // references as we go. This uses VMap to do all the hard work.
285 for (Function::iterator
286 BB = cast<BasicBlock>(Val&: VMap[&OldFunc.front()])->getIterator(),
287 BE = NewFunc.end();
288 BB != BE; ++BB)
289 // Loop over all instructions, fixing each one as we find it, and any
290 // attached debug-info records.
291 for (Instruction &II : *BB) {
292 RemapInstruction(I: &II, VM&: VMap, Flags: RemapFlag, TypeMapper, Materializer,
293 IdentityMD);
294 RemapDbgRecordRange(M: II.getModule(), Range: II.getDbgRecordRange(), VM&: VMap,
295 Flags: RemapFlag, TypeMapper, Materializer, IdentityMD);
296 }
297}
298
299// Clone OldFunc into NewFunc, transforming the old arguments into references to
300// VMap values.
301void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
302 ValueToValueMapTy &VMap,
303 CloneFunctionChangeType Changes,
304 SmallVectorImpl<ReturnInst *> &Returns,
305 const char *NameSuffix, ClonedCodeInfo *CodeInfo,
306 ValueMapTypeRemapper *TypeMapper,
307 ValueMaterializer *Materializer) {
308 assert(NameSuffix && "NameSuffix cannot be null!");
309
310#ifndef NDEBUG
311 for (const Argument &I : OldFunc->args())
312 assert(VMap.count(&I) && "No mapping from source argument specified!");
313#endif
314
315 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly;
316
317 CloneFunctionAttributesInto(NewFunc, OldFunc, VMap, ModuleLevelChanges,
318 TypeMapper, Materializer);
319
320 // Everything else beyond this point deals with function instructions,
321 // so if we are dealing with a function declaration, we're done.
322 if (OldFunc->isDeclaration())
323 return;
324
325 if (Changes < CloneFunctionChangeType::DifferentModule) {
326 assert((NewFunc->getParent() == nullptr ||
327 NewFunc->getParent() == OldFunc->getParent()) &&
328 "Expected NewFunc to have the same parent, or no parent");
329 } else {
330 assert((NewFunc->getParent() == nullptr ||
331 NewFunc->getParent() != OldFunc->getParent()) &&
332 "Expected NewFunc to have different parents, or no parent");
333
334 if (Changes == CloneFunctionChangeType::DifferentModule) {
335 assert(NewFunc->getParent() &&
336 "Need parent of new function to maintain debug info invariants");
337 }
338 }
339
340 MetadataPredicate IdentityMD = createIdentityMDPredicate(F: *OldFunc, Changes);
341
342 // Cloning is always a Module level operation, since Metadata needs to be
343 // cloned.
344 const RemapFlags RemapFlag = RF_None;
345
346 CloneFunctionMetadataInto(NewFunc&: *NewFunc, OldFunc: *OldFunc, VMap, RemapFlag, TypeMapper,
347 Materializer, IdentityMD: &IdentityMD);
348
349 CloneFunctionBodyInto(NewFunc&: *NewFunc, OldFunc: *OldFunc, VMap, RemapFlag, Returns,
350 NameSuffix, CodeInfo, TypeMapper, Materializer,
351 IdentityMD: &IdentityMD);
352
353 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the
354 // same module, the compile unit will already be listed (or not). When
355 // cloning a module, CloneModule() will handle creating the named metadata.
356 if (Changes != CloneFunctionChangeType::DifferentModule)
357 return;
358
359 // Update !llvm.dbg.cu with compile units added to the new module if this
360 // function is being cloned in isolation.
361 //
362 // FIXME: This is making global / module-level changes, which doesn't seem
363 // like the right encapsulation Consider dropping the requirement to update
364 // !llvm.dbg.cu (either obsoleting the node, or restricting it to
365 // non-discardable compile units) instead of discovering compile units by
366 // visiting the metadata attached to global values, which would allow this
367 // code to be deleted. Alternatively, perhaps give responsibility for this
368 // update to CloneFunctionInto's callers.
369 Module *NewModule = NewFunc->getParent();
370 NamedMDNode *NMD = NewModule->getOrInsertNamedMetadata(Name: "llvm.dbg.cu");
371 // Avoid multiple insertions of the same DICompileUnit to NMD.
372 SmallPtrSet<const void *, 8> Visited(llvm::from_range, NMD->operands());
373
374 // Collect and clone all the compile units referenced from the instructions in
375 // the function (e.g. as instructions' scope).
376 DebugInfoFinder DIFinder;
377 collectDebugInfoFromInstructions(F: *OldFunc, DIFinder);
378 for (DICompileUnit *Unit : DIFinder.compile_units()) {
379 MDNode *MappedUnit =
380 MapMetadata(MD: Unit, VM&: VMap, Flags: RF_None, TypeMapper, Materializer);
381 if (Visited.insert(Ptr: MappedUnit).second)
382 NMD->addOperand(M: MappedUnit);
383 }
384}
385
386/// Return a copy of the specified function and add it to that function's
387/// module. Also, any references specified in the VMap are changed to refer to
388/// their mapped value instead of the original one. If any of the arguments to
389/// the function are in the VMap, the arguments are deleted from the resultant
390/// function. The VMap is updated to include mappings from all of the
391/// instructions and basicblocks in the function from their old to new values.
392///
393Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap,
394 ClonedCodeInfo *CodeInfo) {
395 std::vector<Type *> ArgTypes;
396
397 // The user might be deleting arguments to the function by specifying them in
398 // the VMap. If so, we need to not add the arguments to the arg ty vector
399 //
400 for (const Argument &I : F->args())
401 if (VMap.count(Val: &I) == 0) // Haven't mapped the argument to anything yet?
402 ArgTypes.push_back(x: I.getType());
403
404 // Create a new function type...
405 FunctionType *FTy =
406 FunctionType::get(Result: F->getFunctionType()->getReturnType(), Params: ArgTypes,
407 isVarArg: F->getFunctionType()->isVarArg());
408
409 // Create the new function...
410 Function *NewF = Function::Create(Ty: FTy, Linkage: F->getLinkage(), AddrSpace: F->getAddressSpace(),
411 N: F->getName(), M: F->getParent());
412
413 // Loop over the arguments, copying the names of the mapped arguments over...
414 Function::arg_iterator DestI = NewF->arg_begin();
415 for (const Argument &I : F->args())
416 if (VMap.count(Val: &I) == 0) { // Is this argument preserved?
417 DestI->setName(I.getName()); // Copy the name over...
418 VMap[&I] = &*DestI++; // Add mapping to VMap
419 }
420
421 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
422 CloneFunctionInto(NewFunc: NewF, OldFunc: F, VMap, Changes: CloneFunctionChangeType::LocalChangesOnly,
423 Returns, NameSuffix: "", CodeInfo);
424
425 return NewF;
426}
427
428namespace {
429/// This is a private class used to implement CloneAndPruneFunctionInto.
430struct PruningFunctionCloner {
431 Function *NewFunc;
432 const Function *OldFunc;
433 ValueToValueMapTy &VMap;
434 bool ModuleLevelChanges;
435 const char *NameSuffix;
436 ClonedCodeInfo *CodeInfo;
437 bool HostFuncIsStrictFP;
438
439 Instruction *cloneInstruction(BasicBlock::const_iterator II);
440
441public:
442 PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
443 ValueToValueMapTy &valueMap, bool moduleLevelChanges,
444 const char *nameSuffix, ClonedCodeInfo *codeInfo)
445 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap),
446 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix),
447 CodeInfo(codeInfo) {
448 HostFuncIsStrictFP =
449 newFunc->getAttributes().hasFnAttr(Kind: Attribute::StrictFP);
450 }
451
452 /// The specified block is found to be reachable, clone it and
453 /// anything that it can reach.
454 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst,
455 std::vector<const BasicBlock *> &ToClone);
456};
457} // namespace
458
459Instruction *
460PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
461 const Instruction &OldInst = *II;
462 Instruction *NewInst = nullptr;
463 if (HostFuncIsStrictFP) {
464 Intrinsic::ID CIID = getConstrainedIntrinsicID(Instr: OldInst);
465 if (CIID != Intrinsic::not_intrinsic) {
466 // Instead of cloning the instruction, a call to constrained intrinsic
467 // should be created.
468 // Assume the first arguments of constrained intrinsics are the same as
469 // the operands of original instruction.
470
471 // Determine overloaded types of the intrinsic.
472 SmallVector<Type *, 2> TParams;
473 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor;
474 getIntrinsicInfoTableEntries(id: CIID, T&: Descriptor);
475 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) {
476 Intrinsic::IITDescriptor Operand = Descriptor[I];
477 switch (Operand.Kind) {
478 case Intrinsic::IITDescriptor::Argument:
479 if (Operand.getArgumentKind() !=
480 Intrinsic::IITDescriptor::AK_MatchType) {
481 if (I == 0)
482 TParams.push_back(Elt: OldInst.getType());
483 else
484 TParams.push_back(Elt: OldInst.getOperand(i: I - 1)->getType());
485 }
486 break;
487 case Intrinsic::IITDescriptor::SameVecWidthArgument:
488 ++I;
489 break;
490 default:
491 break;
492 }
493 }
494
495 // Create intrinsic call.
496 LLVMContext &Ctx = NewFunc->getContext();
497 Function *IFn = Intrinsic::getOrInsertDeclaration(M: NewFunc->getParent(),
498 id: CIID, Tys: TParams);
499 SmallVector<Value *, 4> Args;
500 unsigned NumOperands = OldInst.getNumOperands();
501 if (isa<CallInst>(Val: OldInst))
502 --NumOperands;
503 for (unsigned I = 0; I < NumOperands; ++I) {
504 Value *Op = OldInst.getOperand(i: I);
505 Args.push_back(Elt: Op);
506 }
507 if (const auto *CmpI = dyn_cast<FCmpInst>(Val: &OldInst)) {
508 FCmpInst::Predicate Pred = CmpI->getPredicate();
509 StringRef PredName = FCmpInst::getPredicateName(P: Pred);
510 Args.push_back(Elt: MetadataAsValue::get(Context&: Ctx, MD: MDString::get(Context&: Ctx, Str: PredName)));
511 }
512
513 // The last arguments of a constrained intrinsic are metadata that
514 // represent rounding mode (absents in some intrinsics) and exception
515 // behavior. The inlined function uses default settings.
516 if (Intrinsic::hasConstrainedFPRoundingModeOperand(QID: CIID))
517 Args.push_back(
518 Elt: MetadataAsValue::get(Context&: Ctx, MD: MDString::get(Context&: Ctx, Str: "round.tonearest")));
519 Args.push_back(
520 Elt: MetadataAsValue::get(Context&: Ctx, MD: MDString::get(Context&: Ctx, Str: "fpexcept.ignore")));
521
522 NewInst = CallInst::Create(Func: IFn, Args, NameStr: OldInst.getName() + ".strict");
523 }
524 }
525 if (!NewInst)
526 NewInst = II->clone();
527 return NewInst;
528}
529
530/// The specified block is found to be reachable, clone it and
531/// anything that it can reach.
532void PruningFunctionCloner::CloneBlock(
533 const BasicBlock *BB, BasicBlock::const_iterator StartingInst,
534 std::vector<const BasicBlock *> &ToClone) {
535 WeakTrackingVH &BBEntry = VMap[BB];
536
537 // Have we already cloned this block?
538 if (BBEntry)
539 return;
540
541 // Nope, clone it now.
542 BasicBlock *NewBB;
543 Twine NewName(BB->hasName() ? Twine(BB->getName()) + NameSuffix : "");
544 BBEntry = NewBB = BasicBlock::Create(Context&: BB->getContext(), Name: NewName, Parent: NewFunc);
545
546 // It is only legal to clone a function if a block address within that
547 // function is never referenced outside of the function. Given that, we
548 // want to map block addresses from the old function to block addresses in
549 // the clone. (This is different from the generic ValueMapper
550 // implementation, which generates an invalid blockaddress when
551 // cloning a function.)
552 //
553 // Note that we don't need to fix the mapping for unreachable blocks;
554 // the default mapping there is safe.
555 if (BB->hasAddressTaken()) {
556 Constant *OldBBAddr = BlockAddress::get(F: const_cast<Function *>(OldFunc),
557 BB: const_cast<BasicBlock *>(BB));
558 VMap[OldBBAddr] = BlockAddress::get(F: NewFunc, BB: NewBB);
559 }
560
561 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false;
562 bool hasMemProfMetadata = false;
563
564 // Keep a cursor pointing at the last place we cloned debug-info records from.
565 BasicBlock::const_iterator DbgCursor = StartingInst;
566 auto CloneDbgRecordsToHere =
567 [&DbgCursor](Instruction *NewInst, BasicBlock::const_iterator II) {
568 // Clone debug-info records onto this instruction. Iterate through any
569 // source-instructions we've cloned and then subsequently optimised
570 // away, so that their debug-info doesn't go missing.
571 for (; DbgCursor != II; ++DbgCursor)
572 NewInst->cloneDebugInfoFrom(From: &*DbgCursor, FromHere: std::nullopt, InsertAtHead: false);
573 NewInst->cloneDebugInfoFrom(From: &*II);
574 DbgCursor = std::next(x: II);
575 };
576
577 // Loop over all instructions, and copy them over, DCE'ing as we go. This
578 // loop doesn't include the terminator.
579 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE;
580 ++II) {
581
582 // Don't clone fake_use as it may suppress many optimizations
583 // due to inlining, especially SROA.
584 if (auto *IntrInst = dyn_cast<IntrinsicInst>(Val&: II))
585 if (IntrInst->getIntrinsicID() == Intrinsic::fake_use)
586 continue;
587
588 Instruction *NewInst = cloneInstruction(II);
589 NewInst->insertInto(ParentBB: NewBB, It: NewBB->end());
590
591 if (HostFuncIsStrictFP) {
592 // All function calls in the inlined function must get 'strictfp'
593 // attribute to prevent undesirable optimizations.
594 if (auto *Call = dyn_cast<CallInst>(Val: NewInst))
595 Call->addFnAttr(Kind: Attribute::StrictFP);
596 }
597
598 // Eagerly remap operands to the newly cloned instruction, except for PHI
599 // nodes for which we defer processing until we update the CFG.
600 if (!isa<PHINode>(Val: NewInst)) {
601 RemapInstruction(I: NewInst, VM&: VMap,
602 Flags: ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
603
604 // Eagerly constant fold the newly cloned instruction. If successful, add
605 // a mapping to the new value. Non-constant operands may be incomplete at
606 // this stage, thus instruction simplification is performed after
607 // processing phi-nodes.
608 if (Value *V = ConstantFoldInstruction(
609 I: NewInst, DL: BB->getDataLayout())) {
610 if (isInstructionTriviallyDead(I: NewInst)) {
611 VMap[&*II] = V;
612 NewInst->eraseFromParent();
613 continue;
614 }
615 }
616 }
617
618 if (II->hasName())
619 NewInst->setName(II->getName() + NameSuffix);
620 VMap[&*II] = NewInst; // Add instruction map to value.
621 if (isa<CallInst>(Val: II) && !II->isDebugOrPseudoInst()) {
622 hasCalls = true;
623 hasMemProfMetadata |= II->hasMetadata(KindID: LLVMContext::MD_memprof);
624 hasMemProfMetadata |= II->hasMetadata(KindID: LLVMContext::MD_callsite);
625 }
626
627 CloneDbgRecordsToHere(NewInst, II);
628
629 if (CodeInfo) {
630 CodeInfo->OrigVMap[&*II] = NewInst;
631 if (auto *CB = dyn_cast<CallBase>(Val: &*II))
632 if (CB->hasOperandBundles())
633 CodeInfo->OperandBundleCallSites.push_back(x: NewInst);
634 }
635
636 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val&: II)) {
637 if (isa<ConstantInt>(Val: AI->getArraySize()))
638 hasStaticAllocas = true;
639 else
640 hasDynamicAllocas = true;
641 }
642 }
643
644 // Finally, clone over the terminator.
645 const Instruction *OldTI = BB->getTerminator();
646 bool TerminatorDone = false;
647 if (const CondBrInst *BI = dyn_cast<CondBrInst>(Val: OldTI)) {
648 // If the condition was a known constant in the callee...
649 ConstantInt *Cond = dyn_cast<ConstantInt>(Val: BI->getCondition());
650 // Or is a known constant in the caller...
651 if (!Cond) {
652 Value *V = VMap.lookup(Val: BI->getCondition());
653 Cond = dyn_cast_or_null<ConstantInt>(Val: V);
654 }
655
656 // Constant fold to uncond branch!
657 if (Cond) {
658 BasicBlock *Dest = BI->getSuccessor(i: !Cond->getZExtValue());
659 auto *NewBI = UncondBrInst::Create(IfTrue: Dest, InsertBefore: NewBB);
660 NewBI->setDebugLoc(BI->getDebugLoc());
661 VMap[OldTI] = NewBI;
662 ToClone.push_back(x: Dest);
663 TerminatorDone = true;
664 }
665 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(Val: OldTI)) {
666 // If switching on a value known constant in the caller.
667 ConstantInt *Cond = dyn_cast<ConstantInt>(Val: SI->getCondition());
668 if (!Cond) { // Or known constant after constant prop in the callee...
669 Value *V = VMap.lookup(Val: SI->getCondition());
670 Cond = dyn_cast_or_null<ConstantInt>(Val: V);
671 }
672 if (Cond) { // Constant fold to uncond branch!
673 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(C: Cond);
674 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor());
675 auto *NewBI = UncondBrInst::Create(IfTrue: Dest, InsertBefore: NewBB);
676 NewBI->setDebugLoc(SI->getDebugLoc());
677 VMap[OldTI] = NewBI;
678 ToClone.push_back(x: Dest);
679 TerminatorDone = true;
680 }
681 }
682
683 if (!TerminatorDone) {
684 Instruction *NewInst = OldTI->clone();
685 if (OldTI->hasName())
686 NewInst->setName(OldTI->getName() + NameSuffix);
687 NewInst->insertInto(ParentBB: NewBB, It: NewBB->end());
688
689 CloneDbgRecordsToHere(NewInst, OldTI->getIterator());
690
691 VMap[OldTI] = NewInst; // Add instruction map to value.
692
693 if (CodeInfo) {
694 CodeInfo->OrigVMap[OldTI] = NewInst;
695 if (auto *CB = dyn_cast<CallBase>(Val: OldTI))
696 if (CB->hasOperandBundles())
697 CodeInfo->OperandBundleCallSites.push_back(x: NewInst);
698 }
699
700 // Recursively clone any reachable successor blocks.
701 append_range(C&: ToClone, R: successors(I: BB->getTerminator()));
702 } else {
703 // If we didn't create a new terminator, clone DbgVariableRecords from the
704 // old terminator onto the new terminator.
705 Instruction *NewInst = NewBB->getTerminator();
706 assert(NewInst);
707
708 CloneDbgRecordsToHere(NewInst, OldTI->getIterator());
709 }
710
711 if (CodeInfo) {
712 CodeInfo->ContainsCalls |= hasCalls;
713 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata;
714 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
715 CodeInfo->ContainsDynamicAllocas |=
716 hasStaticAllocas && BB != &BB->getParent()->front();
717 }
718}
719
720/// This works like CloneAndPruneFunctionInto, except that it does not clone the
721/// entire function. Instead it starts at an instruction provided by the caller
722/// and copies (and prunes) only the code reachable from that instruction.
723void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
724 const Instruction *StartingInst,
725 ValueToValueMapTy &VMap,
726 bool ModuleLevelChanges,
727 SmallVectorImpl<ReturnInst *> &Returns,
728 const char *NameSuffix,
729 ClonedCodeInfo *CodeInfo) {
730 assert(NameSuffix && "NameSuffix cannot be null!");
731
732 ValueMapTypeRemapper *TypeMapper = nullptr;
733 ValueMaterializer *Materializer = nullptr;
734
735#ifndef NDEBUG
736 // If the cloning starts at the beginning of the function, verify that
737 // the function arguments are mapped.
738 if (!StartingInst)
739 for (const Argument &II : OldFunc->args())
740 assert(VMap.count(&II) && "No mapping from source argument specified!");
741#endif
742
743 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
744 NameSuffix, CodeInfo);
745 const BasicBlock *StartingBB;
746 if (StartingInst)
747 StartingBB = StartingInst->getParent();
748 else {
749 StartingBB = &OldFunc->getEntryBlock();
750 StartingInst = &StartingBB->front();
751 }
752
753 // Clone the entry block, and anything recursively reachable from it.
754 std::vector<const BasicBlock *> CloneWorklist;
755 PFC.CloneBlock(BB: StartingBB, StartingInst: StartingInst->getIterator(), ToClone&: CloneWorklist);
756 while (!CloneWorklist.empty()) {
757 const BasicBlock *BB = CloneWorklist.back();
758 CloneWorklist.pop_back();
759 PFC.CloneBlock(BB, StartingInst: BB->begin(), ToClone&: CloneWorklist);
760 }
761
762 // Loop over all of the basic blocks in the old function. If the block was
763 // reachable, we have cloned it and the old block is now in the value map:
764 // insert it into the new function in the right order. If not, ignore it.
765 //
766 // Defer PHI resolution until rest of function is resolved.
767 SmallVector<const PHINode *, 16> PHIToResolve;
768 for (const BasicBlock &BI : *OldFunc) {
769 Value *V = VMap.lookup(Val: &BI);
770 BasicBlock *NewBB = cast_or_null<BasicBlock>(Val: V);
771 if (!NewBB)
772 continue; // Dead block.
773
774 // Move the new block to preserve the order in the original function.
775 NewBB->moveBefore(MovePos: NewFunc->end());
776
777 // Handle PHI nodes specially, as we have to remove references to dead
778 // blocks.
779 for (const PHINode &PN : BI.phis()) {
780 // PHI nodes may have been remapped to non-PHI nodes by the caller or
781 // during the cloning process.
782 if (isa<PHINode>(Val: VMap[&PN]))
783 PHIToResolve.push_back(Elt: &PN);
784 else
785 break;
786 }
787
788 // Finally, remap the terminator instructions, as those can't be remapped
789 // until all BBs are mapped.
790 RemapInstruction(I: NewBB->getTerminator(), VM&: VMap,
791 Flags: ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges,
792 TypeMapper, Materializer);
793 }
794
795 // Defer PHI resolution until rest of function is resolved, PHI resolution
796 // requires the CFG to be up-to-date.
797 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) {
798 const PHINode *OPN = PHIToResolve[phino];
799 unsigned NumPreds = OPN->getNumIncomingValues();
800 const BasicBlock *OldBB = OPN->getParent();
801 BasicBlock *NewBB = cast<BasicBlock>(Val&: VMap[OldBB]);
802
803 // Map operands for blocks that are live and remove operands for blocks
804 // that are dead.
805 for (; phino != PHIToResolve.size() &&
806 PHIToResolve[phino]->getParent() == OldBB;
807 ++phino) {
808 OPN = PHIToResolve[phino];
809 PHINode *PN = cast<PHINode>(Val&: VMap[OPN]);
810 for (int64_t pred = NumPreds - 1; pred >= 0; --pred) {
811 Value *V = VMap.lookup(Val: PN->getIncomingBlock(i: pred));
812 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(Val: V)) {
813 Value *InVal =
814 MapValue(V: PN->getIncomingValue(i: pred), VM&: VMap,
815 Flags: ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
816 assert(InVal && "Unknown input value?");
817 PN->setIncomingValue(i: pred, V: InVal);
818 PN->setIncomingBlock(i: pred, BB: MappedBlock);
819 continue;
820 }
821 PN->removeIncomingValue(Idx: pred, DeletePHIIfEmpty: false);
822 }
823 }
824
825 // The loop above has removed PHI entries for those blocks that are dead
826 // and has updated others. However, if a block is live (i.e. copied over)
827 // but its terminator has been changed to not go to this block, then our
828 // phi nodes will have invalid entries. Update the PHI nodes in this
829 // case.
830 PHINode *PN = cast<PHINode>(Val: NewBB->begin());
831 NumPreds = pred_size(BB: NewBB);
832 if (NumPreds != PN->getNumIncomingValues()) {
833 assert(NumPreds < PN->getNumIncomingValues());
834 // Count how many times each predecessor comes to this block.
835 DenseMap<BasicBlock *, unsigned> PredCount;
836 for (BasicBlock *Pred : predecessors(BB: NewBB))
837 ++PredCount[Pred];
838
839 BasicBlock::iterator I = NewBB->begin();
840 DenseMap<BasicBlock *, unsigned> SeenPredCount;
841 SeenPredCount.reserve(NumEntries: PredCount.size());
842 for (; (PN = dyn_cast<PHINode>(Val&: I)); ++I) {
843 SeenPredCount.clear();
844 PN->removeIncomingValueIf(
845 Predicate: [&](unsigned Idx) {
846 BasicBlock *IncomingBlock = PN->getIncomingBlock(i: Idx);
847 auto It = PredCount.find(Val: IncomingBlock);
848 if (It == PredCount.end())
849 return true;
850 unsigned &SeenCount = SeenPredCount[IncomingBlock];
851 if (SeenCount < It->second) {
852 SeenCount++;
853 return false;
854 }
855 return true;
856 },
857 DeletePHIIfEmpty: false);
858 }
859 }
860
861 // If the loops above have made these phi nodes have 0 or 1 operand,
862 // replace them with poison or the input value. We must do this for
863 // correctness, because 0-operand phis are not valid.
864 PN = cast<PHINode>(Val: NewBB->begin());
865 if (PN->getNumIncomingValues() == 0) {
866 BasicBlock::iterator I = NewBB->begin();
867 BasicBlock::const_iterator OldI = OldBB->begin();
868 while ((PN = dyn_cast<PHINode>(Val: I++))) {
869 Value *NV = PoisonValue::get(T: PN->getType());
870 PN->replaceAllUsesWith(V: NV);
871 assert(VMap[&*OldI] == PN && "VMap mismatch");
872 VMap[&*OldI] = NV;
873 PN->eraseFromParent();
874 ++OldI;
875 }
876 }
877 }
878
879 // Drop all incompatible return attributes that cannot be applied to NewFunc
880 // during cloning, so as to allow instruction simplification to reason on the
881 // old state of the function. The original attributes are restored later.
882 AttributeList Attrs = NewFunc->getAttributes();
883 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
884 Ty: OldFunc->getReturnType(), AS: Attrs.getRetAttrs());
885 NewFunc->removeRetAttrs(Attrs: IncompatibleAttrs);
886
887 // As phi-nodes have been now remapped, allow incremental simplification of
888 // newly-cloned instructions.
889 const DataLayout &DL = NewFunc->getDataLayout();
890 for (const BasicBlock &BB : *OldFunc) {
891 for (const Instruction &I : BB) {
892 auto *NewI = dyn_cast_or_null<Instruction>(Val: VMap.lookup(Val: &I));
893 if (!NewI)
894 continue;
895
896 if (Value *V = simplifyInstruction(I: NewI, Q: DL)) {
897 NewI->replaceAllUsesWith(V);
898
899 if (isInstructionTriviallyDead(I: NewI)) {
900 NewI->eraseFromParent();
901 } else {
902 // Did not erase it? Restore the new instruction into VMap previously
903 // dropped by `ValueIsRAUWd`.
904 VMap[&I] = NewI;
905 }
906 }
907 }
908 }
909
910 // Restore attributes.
911 NewFunc->setAttributes(Attrs);
912
913 // Remap debug records operands now that all values have been mapped.
914 // Doing this now (late) preserves use-before-defs in debug records. If
915 // we didn't do this, ValueAsMetadata(use-before-def) operands would be
916 // replaced by empty metadata. This would signal later cleanup passes to
917 // remove the debug records, potentially causing incorrect locations.
918 Function::iterator Begin = cast<BasicBlock>(Val&: VMap[StartingBB])->getIterator();
919 for (BasicBlock &BB : make_range(x: Begin, y: NewFunc->end())) {
920 for (Instruction &I : BB) {
921 RemapDbgRecordRange(M: I.getModule(), Range: I.getDbgRecordRange(), VM&: VMap,
922 Flags: ModuleLevelChanges ? RF_None
923 : RF_NoModuleLevelChanges,
924 TypeMapper, Materializer);
925 }
926 }
927
928 // Simplify conditional branches and switches with a constant operand. We try
929 // to prune these out when cloning, but if the simplification required
930 // looking through PHI nodes, those are only available after forming the full
931 // basic block. That may leave some here, and we still want to prune the dead
932 // code as early as possible.
933 for (BasicBlock &BB : make_range(x: Begin, y: NewFunc->end()))
934 ConstantFoldTerminator(BB: &BB);
935
936 // Some blocks may have become unreachable as a result. Find and delete them.
937 {
938 SmallPtrSet<BasicBlock *, 16> ReachableBlocks;
939 SmallVector<BasicBlock *, 16> Worklist;
940 Worklist.push_back(Elt: &*Begin);
941 while (!Worklist.empty()) {
942 BasicBlock *BB = Worklist.pop_back_val();
943 if (ReachableBlocks.insert(Ptr: BB).second)
944 append_range(C&: Worklist, R: successors(BB));
945 }
946
947 SmallVector<BasicBlock *, 16> UnreachableBlocks;
948 for (BasicBlock &BB : make_range(x: Begin, y: NewFunc->end()))
949 if (!ReachableBlocks.contains(Ptr: &BB))
950 UnreachableBlocks.push_back(Elt: &BB);
951 DeleteDeadBlocks(BBs: UnreachableBlocks);
952 }
953
954 // Now that the inlined function body has been fully constructed, go through
955 // and zap unconditional fall-through branches. This happens all the time when
956 // specializing code: code specialization turns conditional branches into
957 // uncond branches, and this code folds them.
958 Function::iterator I = Begin;
959 while (I != NewFunc->end()) {
960 UncondBrInst *BI = dyn_cast<UncondBrInst>(Val: I->getTerminator());
961 if (!BI) {
962 ++I;
963 continue;
964 }
965
966 BasicBlock *Dest = BI->getSuccessor();
967 if (!Dest->getSinglePredecessor() || Dest->hasAddressTaken()) {
968 ++I;
969 continue;
970 }
971
972 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify
973 // above should have zapped all of them..
974 assert(!isa<PHINode>(Dest->begin()));
975
976 // We know all single-entry PHI nodes in the inlined function have been
977 // removed, so we just need to splice the blocks.
978 BI->eraseFromParent();
979
980 // Make all PHI nodes that referred to Dest now refer to I as their source.
981 Dest->replaceAllUsesWith(V: &*I);
982
983 // Move all the instructions in the succ to the pred.
984 I->splice(ToIt: I->end(), FromBB: Dest);
985
986 // Remove the dest block.
987 Dest->eraseFromParent();
988
989 // Do not increment I, iteratively merge all things this block branches to.
990 }
991
992 // Make a final pass over the basic blocks from the old function to gather
993 // any return instructions which survived folding. We have to do this here
994 // because we can iteratively remove and merge returns above.
995 for (Function::iterator I = cast<BasicBlock>(Val&: VMap[StartingBB])->getIterator(),
996 E = NewFunc->end();
997 I != E; ++I)
998 if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: I->getTerminator()))
999 Returns.push_back(Elt: RI);
1000}
1001
1002/// This works exactly like CloneFunctionInto,
1003/// except that it does some simple constant prop and DCE on the fly. The
1004/// effect of this is to copy significantly less code in cases where (for
1005/// example) a function call with constant arguments is inlined, and those
1006/// constant arguments cause a significant amount of code in the callee to be
1007/// dead. Since this doesn't produce an exact copy of the input, it can't be
1008/// used for things like CloneFunction or CloneModule.
1009void llvm::CloneAndPruneFunctionInto(
1010 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap,
1011 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns,
1012 const char *NameSuffix, ClonedCodeInfo *CodeInfo) {
1013 CloneAndPruneIntoFromInst(NewFunc, OldFunc, StartingInst: &OldFunc->front().front(), VMap,
1014 ModuleLevelChanges, Returns, NameSuffix, CodeInfo);
1015}
1016
1017/// Remaps instructions in \p Blocks using the mapping in \p VMap.
1018void llvm::remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks,
1019 ValueToValueMapTy &VMap) {
1020 // Rewrite the code to refer to itself.
1021 for (BasicBlock *BB : Blocks) {
1022 for (Instruction &Inst : *BB) {
1023 RemapDbgRecordRange(M: Inst.getModule(), Range: Inst.getDbgRecordRange(), VM&: VMap,
1024 Flags: RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
1025 RemapInstruction(I: &Inst, VM&: VMap,
1026 Flags: RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
1027 }
1028 }
1029}
1030
1031/// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
1032/// Blocks.
1033///
1034/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
1035/// \p LoopDomBB. Insert the new blocks before block specified in \p Before.
1036Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
1037 Loop *OrigLoop, ValueToValueMapTy &VMap,
1038 const Twine &NameSuffix, LoopInfo *LI,
1039 DominatorTree *DT,
1040 SmallVectorImpl<BasicBlock *> &Blocks) {
1041 Function *F = OrigLoop->getHeader()->getParent();
1042 Loop *ParentLoop = OrigLoop->getParentLoop();
1043 DenseMap<Loop *, Loop *> LMap;
1044
1045 Loop *NewLoop = LI->AllocateLoop();
1046 LMap[OrigLoop] = NewLoop;
1047 if (ParentLoop)
1048 ParentLoop->addChildLoop(NewChild: NewLoop);
1049 else
1050 LI->addTopLevelLoop(New: NewLoop);
1051
1052 BasicBlock *OrigPH = OrigLoop->getLoopPreheader();
1053 assert(OrigPH && "No preheader");
1054 BasicBlock *NewPH = CloneBasicBlock(BB: OrigPH, VMap, NameSuffix, F);
1055 // To rename the loop PHIs.
1056 VMap[OrigPH] = NewPH;
1057 Blocks.push_back(Elt: NewPH);
1058
1059 // Update LoopInfo.
1060 if (ParentLoop)
1061 ParentLoop->addBasicBlockToLoop(NewBB: NewPH, LI&: *LI);
1062
1063 // Update DominatorTree.
1064 DT->addNewBlock(BB: NewPH, DomBB: LoopDomBB);
1065
1066 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) {
1067 Loop *&NewLoop = LMap[CurLoop];
1068 if (!NewLoop) {
1069 NewLoop = LI->AllocateLoop();
1070
1071 // Establish the parent/child relationship.
1072 Loop *OrigParent = CurLoop->getParentLoop();
1073 assert(OrigParent && "Could not find the original parent loop");
1074 Loop *NewParentLoop = LMap[OrigParent];
1075 assert(NewParentLoop && "Could not find the new parent loop");
1076
1077 NewParentLoop->addChildLoop(NewChild: NewLoop);
1078 }
1079 }
1080
1081 for (BasicBlock *BB : OrigLoop->getBlocks()) {
1082 Loop *CurLoop = LI->getLoopFor(BB);
1083 Loop *&NewLoop = LMap[CurLoop];
1084 assert(NewLoop && "Expecting new loop to be allocated");
1085
1086 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F);
1087 VMap[BB] = NewBB;
1088
1089 // Update LoopInfo.
1090 NewLoop->addBasicBlockToLoop(NewBB, LI&: *LI);
1091
1092 // Add DominatorTree node. After seeing all blocks, update to correct
1093 // IDom.
1094 DT->addNewBlock(BB: NewBB, DomBB: NewPH);
1095
1096 Blocks.push_back(Elt: NewBB);
1097 }
1098
1099 for (BasicBlock *BB : OrigLoop->getBlocks()) {
1100 // Update loop headers.
1101 Loop *CurLoop = LI->getLoopFor(BB);
1102 if (BB == CurLoop->getHeader())
1103 LMap[CurLoop]->moveToHeader(BB: cast<BasicBlock>(Val&: VMap[BB]));
1104
1105 // Update DominatorTree.
1106 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock();
1107 DT->changeImmediateDominator(BB: cast<BasicBlock>(Val&: VMap[BB]),
1108 NewBB: cast<BasicBlock>(Val&: VMap[IDomBB]));
1109 }
1110
1111 // Move them physically from the end of the block list.
1112 F->splice(ToIt: Before->getIterator(), FromF: F, FromIt: NewPH->getIterator());
1113 F->splice(ToIt: Before->getIterator(), FromF: F, FromBeginIt: NewLoop->getHeader()->getIterator(),
1114 FromEndIt: F->end());
1115
1116 return NewLoop;
1117}
1118
1119/// Duplicate non-Phi instructions from the beginning of block up to
1120/// StopAt instruction into a split block between BB and its predecessor.
1121BasicBlock *llvm::DuplicateInstructionsInSplitBetween(
1122 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt,
1123 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) {
1124
1125 assert(count(successors(PredBB), BB) == 1 &&
1126 "There must be a single edge between PredBB and BB!");
1127 // We are going to have to map operands from the original BB block to the new
1128 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to
1129 // account for entry from PredBB.
1130 BasicBlock::iterator BI = BB->begin();
1131 for (; PHINode *PN = dyn_cast<PHINode>(Val&: BI); ++BI)
1132 ValueMapping[PN] = PN->getIncomingValueForBlock(BB: PredBB);
1133
1134 BasicBlock *NewBB = SplitEdge(From: PredBB, To: BB);
1135 NewBB->setName(PredBB->getName() + ".split");
1136 Instruction *NewTerm = NewBB->getTerminator();
1137
1138 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge
1139 // in the update set here.
1140 DTU.applyUpdates(Updates: {{DominatorTree::Delete, PredBB, BB},
1141 {DominatorTree::Insert, PredBB, NewBB},
1142 {DominatorTree::Insert, NewBB, BB}});
1143
1144 // Clone the non-phi instructions of BB into NewBB, keeping track of the
1145 // mapping and using it to remap operands in the cloned instructions.
1146 // Stop once we see the terminator too. This covers the case where BB's
1147 // terminator gets replaced and StopAt == BB's terminator.
1148 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) {
1149 Instruction *New = BI->clone();
1150 New->setName(BI->getName());
1151 New->insertBefore(InsertPos: NewTerm->getIterator());
1152 New->cloneDebugInfoFrom(From: &*BI);
1153 ValueMapping[&*BI] = New;
1154
1155 // Remap operands to patch up intra-block references.
1156 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
1157 if (Instruction *Inst = dyn_cast<Instruction>(Val: New->getOperand(i))) {
1158 auto I = ValueMapping.find(Val: Inst);
1159 if (I != ValueMapping.end())
1160 New->setOperand(i, Val: I->second);
1161 }
1162
1163 // Remap debug variable operands.
1164 remapDebugVariable(Mapping&: ValueMapping, Inst: New);
1165 }
1166
1167 return NewBB;
1168}
1169
1170void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1171 DenseMap<MDNode *, MDNode *> &ClonedScopes,
1172 StringRef Ext, LLVMContext &Context) {
1173 MDBuilder MDB(Context);
1174
1175 for (MDNode *ScopeList : NoAliasDeclScopes) {
1176 for (const MDOperand &MDOp : ScopeList->operands()) {
1177 if (MDNode *MD = dyn_cast<MDNode>(Val: MDOp)) {
1178 AliasScopeNode SNANode(MD);
1179
1180 std::string Name;
1181 auto ScopeName = SNANode.getName();
1182 if (!ScopeName.empty())
1183 Name = (Twine(ScopeName) + ":" + Ext).str();
1184 else
1185 Name = std::string(Ext);
1186
1187 MDNode *NewScope = MDB.createAnonymousAliasScope(
1188 Domain: const_cast<MDNode *>(SNANode.getDomain()), Name);
1189 ClonedScopes.insert(KV: std::make_pair(x&: MD, y&: NewScope));
1190 }
1191 }
1192 }
1193}
1194
1195void llvm::adaptNoAliasScopes(Instruction *I,
1196 const DenseMap<MDNode *, MDNode *> &ClonedScopes,
1197 LLVMContext &Context) {
1198 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * {
1199 bool NeedsReplacement = false;
1200 SmallVector<Metadata *, 8> NewScopeList;
1201 for (const MDOperand &MDOp : ScopeList->operands()) {
1202 if (MDNode *MD = dyn_cast<MDNode>(Val: MDOp)) {
1203 if (auto *NewMD = ClonedScopes.lookup(Val: MD)) {
1204 NewScopeList.push_back(Elt: NewMD);
1205 NeedsReplacement = true;
1206 continue;
1207 }
1208 NewScopeList.push_back(Elt: MD);
1209 }
1210 }
1211 if (NeedsReplacement)
1212 return MDNode::get(Context, MDs: NewScopeList);
1213 return nullptr;
1214 };
1215
1216 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(Val: I))
1217 if (MDNode *NewScopeList = CloneScopeList(Decl->getScopeList()))
1218 Decl->setScopeList(NewScopeList);
1219
1220 auto replaceWhenNeeded = [&](unsigned MD_ID) {
1221 if (const MDNode *CSNoAlias = I->getMetadata(KindID: MD_ID))
1222 if (MDNode *NewScopeList = CloneScopeList(CSNoAlias))
1223 I->setMetadata(KindID: MD_ID, Node: NewScopeList);
1224 };
1225 replaceWhenNeeded(LLVMContext::MD_noalias);
1226 replaceWhenNeeded(LLVMContext::MD_alias_scope);
1227}
1228
1229void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1230 ArrayRef<BasicBlock *> NewBlocks,
1231 LLVMContext &Context, StringRef Ext) {
1232 if (NoAliasDeclScopes.empty())
1233 return;
1234
1235 DenseMap<MDNode *, MDNode *> ClonedScopes;
1236 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning "
1237 << NoAliasDeclScopes.size() << " node(s)\n");
1238
1239 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context);
1240 // Identify instructions using metadata that needs adaptation
1241 for (BasicBlock *NewBlock : NewBlocks)
1242 for (Instruction &I : *NewBlock)
1243 adaptNoAliasScopes(I: &I, ClonedScopes, Context);
1244}
1245
1246void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1247 Instruction *IStart, Instruction *IEnd,
1248 LLVMContext &Context, StringRef Ext) {
1249 if (NoAliasDeclScopes.empty())
1250 return;
1251
1252 DenseMap<MDNode *, MDNode *> ClonedScopes;
1253 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning "
1254 << NoAliasDeclScopes.size() << " node(s)\n");
1255
1256 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context);
1257 // Identify instructions using metadata that needs adaptation
1258 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?");
1259 auto ItStart = IStart->getIterator();
1260 auto ItEnd = IEnd->getIterator();
1261 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range
1262 for (auto &I : llvm::make_range(x: ItStart, y: ItEnd))
1263 adaptNoAliasScopes(I: &I, ClonedScopes, Context);
1264}
1265
1266void llvm::identifyNoAliasScopesToClone(
1267 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) {
1268 for (BasicBlock *BB : BBs)
1269 for (Instruction &I : *BB)
1270 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(Val: &I))
1271 NoAliasDeclScopes.push_back(Elt: Decl->getScopeList());
1272}
1273
1274void llvm::identifyNoAliasScopesToClone(
1275 BasicBlock::iterator Start, BasicBlock::iterator End,
1276 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) {
1277 for (Instruction &I : make_range(x: Start, y: End))
1278 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(Val: &I))
1279 NoAliasDeclScopes.push_back(Elt: Decl->getScopeList());
1280}
1281