1//===- CodeExtractor.cpp - Pull code region into a new function -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interface to tear out a code region, such as an
10// individual loop or a parallel section, into a new function, replacing it with
11// a call to the new function.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/CodeExtractor.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SetVector.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/BlockFrequencyInfo.h"
24#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
25#include "llvm/Analysis/BranchProbabilityInfo.h"
26#include "llvm/IR/Argument.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CFG.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DIBuilder.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/DebugInfo.h"
35#include "llvm/IR/DebugInfoMetadata.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalValue.h"
40#include "llvm/IR/InstIterator.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/LLVMContext.h"
47#include "llvm/IR/MDBuilder.h"
48#include "llvm/IR/Module.h"
49#include "llvm/IR/PatternMatch.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/User.h"
52#include "llvm/IR/Value.h"
53#include "llvm/IR/Verifier.h"
54#include "llvm/Support/BlockFrequency.h"
55#include "llvm/Support/BranchProbability.h"
56#include "llvm/Support/Casting.h"
57#include "llvm/Support/CommandLine.h"
58#include "llvm/Support/Debug.h"
59#include "llvm/Support/ErrorHandling.h"
60#include "llvm/Support/raw_ostream.h"
61#include "llvm/Transforms/Utils/BasicBlockUtils.h"
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <map>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70using ProfileCount = Function::ProfileCount;
71
72#define DEBUG_TYPE "code-extractor"
73
74// Provide a command-line option to aggregate function arguments into a struct
75// for functions produced by the code extractor. This is useful when converting
76// extracted functions to pthread-based code, as only one argument (void*) can
77// be passed in to pthread_create().
78static cl::opt<bool>
79AggregateArgsOpt("aggregate-extracted-args", cl::Hidden,
80 cl::desc("Aggregate arguments to code-extracted functions"));
81
82/// Test whether a block is valid for extraction.
83static bool isBlockValidForExtraction(const BasicBlock &BB,
84 const SetVector<BasicBlock *> &Result,
85 bool AllowVarArgs, bool AllowAlloca) {
86 // taking the address of a basic block moved to another function is illegal
87 if (BB.hasAddressTaken())
88 return false;
89
90 // don't hoist code that uses another basicblock address, as it's likely to
91 // lead to unexpected behavior, like cross-function jumps
92 SmallPtrSet<User const *, 16> Visited;
93 SmallVector<User const *, 16> ToVisit(llvm::make_pointer_range(Range: BB));
94
95 while (!ToVisit.empty()) {
96 User const *Curr = ToVisit.pop_back_val();
97 if (!Visited.insert(Ptr: Curr).second)
98 continue;
99 if (isa<BlockAddress const>(Val: Curr))
100 return false; // even a reference to self is likely to be not compatible
101
102 if (isa<Instruction>(Val: Curr) && cast<Instruction>(Val: Curr)->getParent() != &BB)
103 continue;
104
105 for (auto const &U : Curr->operands()) {
106 if (auto *UU = dyn_cast<User>(Val: U))
107 ToVisit.push_back(Elt: UU);
108 }
109 }
110
111 // If explicitly requested, allow vastart and alloca. For invoke instructions
112 // verify that extraction is valid.
113 for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
114 if (isa<AllocaInst>(Val: I)) {
115 if (!AllowAlloca)
116 return false;
117 continue;
118 }
119
120 if (const auto *II = dyn_cast<InvokeInst>(Val&: I)) {
121 // Unwind destination (either a landingpad, catchswitch, or cleanuppad)
122 // must be a part of the subgraph which is being extracted.
123 if (auto *UBB = II->getUnwindDest())
124 if (!Result.count(key: UBB))
125 return false;
126 continue;
127 }
128
129 // All catch handlers of a catchswitch instruction as well as the unwind
130 // destination must be in the subgraph.
131 if (const auto *CSI = dyn_cast<CatchSwitchInst>(Val&: I)) {
132 if (auto *UBB = CSI->getUnwindDest())
133 if (!Result.count(key: UBB))
134 return false;
135 for (const auto *HBB : CSI->handlers())
136 if (!Result.count(key: const_cast<BasicBlock*>(HBB)))
137 return false;
138 continue;
139 }
140
141 // Make sure that entire catch handler is within subgraph. It is sufficient
142 // to check that catch return's block is in the list.
143 if (const auto *CPI = dyn_cast<CatchPadInst>(Val&: I)) {
144 for (const auto *U : CPI->users())
145 if (const auto *CRI = dyn_cast<CatchReturnInst>(Val: U))
146 if (!Result.count(key: const_cast<BasicBlock*>(CRI->getParent())))
147 return false;
148 continue;
149 }
150
151 // And do similar checks for cleanup handler - the entire handler must be
152 // in subgraph which is going to be extracted. For cleanup return should
153 // additionally check that the unwind destination is also in the subgraph.
154 if (const auto *CPI = dyn_cast<CleanupPadInst>(Val&: I)) {
155 for (const auto *U : CPI->users())
156 if (const auto *CRI = dyn_cast<CleanupReturnInst>(Val: U))
157 if (!Result.count(key: const_cast<BasicBlock*>(CRI->getParent())))
158 return false;
159 continue;
160 }
161 if (const auto *CRI = dyn_cast<CleanupReturnInst>(Val&: I)) {
162 if (auto *UBB = CRI->getUnwindDest())
163 if (!Result.count(key: UBB))
164 return false;
165 continue;
166 }
167
168 if (const CallInst *CI = dyn_cast<CallInst>(Val&: I)) {
169 // musttail calls have several restrictions, generally enforcing matching
170 // calling conventions between the caller parent and musttail callee.
171 // We can't usually honor them, because the extracted function has a
172 // different signature altogether, taking inputs/outputs and returning
173 // a control-flow identifier rather than the actual return value.
174 if (CI->isMustTailCall())
175 return false;
176
177 if (const Function *F = CI->getCalledFunction()) {
178 auto IID = F->getIntrinsicID();
179 if (IID == Intrinsic::vastart) {
180 if (AllowVarArgs)
181 continue;
182 else
183 return false;
184 }
185
186 // Currently, we miscompile outlined copies of eh_typid_for. There are
187 // proposals for fixing this in llvm.org/PR39545.
188 if (IID == Intrinsic::eh_typeid_for)
189 return false;
190 }
191 }
192 }
193
194 return true;
195}
196
197/// Build a set of blocks to extract if the input blocks are viable.
198static SetVector<BasicBlock *>
199buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
200 bool AllowVarArgs, bool AllowAlloca) {
201 assert(!BBs.empty() && "The set of blocks to extract must be non-empty");
202 SetVector<BasicBlock *> Result;
203
204 // Loop over the blocks, adding them to our set-vector, and aborting with an
205 // empty set if we encounter invalid blocks.
206 for (BasicBlock *BB : BBs) {
207 // If this block is dead, don't process it.
208 if (DT && !DT->isReachableFromEntry(A: BB))
209 continue;
210
211 if (!Result.insert(X: BB))
212 llvm_unreachable("Repeated basic blocks in extraction input");
213 }
214
215 LLVM_DEBUG(dbgs() << "Region front block: " << Result.front()->getName()
216 << '\n');
217
218 for (auto *BB : Result) {
219 if (!isBlockValidForExtraction(BB: *BB, Result, AllowVarArgs, AllowAlloca))
220 return {};
221
222 // Make sure that the first block is not a landing pad.
223 if (BB == Result.front()) {
224 if (BB->isEHPad()) {
225 LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n");
226 return {};
227 }
228 continue;
229 }
230
231 // All blocks other than the first must not have predecessors outside of
232 // the subgraph which is being extracted.
233 for (auto *PBB : predecessors(BB))
234 if (!Result.count(key: PBB)) {
235 LLVM_DEBUG(dbgs() << "No blocks in this region may have entries from "
236 "outside the region except for the first block!\n"
237 << "Problematic source BB: " << BB->getName() << "\n"
238 << "Problematic destination BB: " << PBB->getName()
239 << "\n");
240 return {};
241 }
242 }
243
244 return Result;
245}
246
247/// isAlignmentPreservedForAddrCast - Return true if the cast operation
248/// for specified target preserves original alignment
249static bool isAlignmentPreservedForAddrCast(const Triple &TargetTriple) {
250 switch (TargetTriple.getArch()) {
251 case Triple::ArchType::amdgcn:
252 case Triple::ArchType::r600:
253 return true;
254 // TODO: Add other architectures for which we are certain that alignment
255 // is preserved during address space cast operations.
256 default:
257 return false;
258 }
259 return false;
260}
261
262CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
263 bool AggregateArgs, BlockFrequencyInfo *BFI,
264 BranchProbabilityInfo *BPI, AssumptionCache *AC,
265 bool AllowVarArgs, bool AllowAlloca,
266 BasicBlock *AllocationBlock, std::string Suffix,
267 bool ArgsInZeroAddressSpace)
268 : DT(DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
269 BPI(BPI), AC(AC), AllocationBlock(AllocationBlock),
270 AllowVarArgs(AllowVarArgs),
271 Blocks(buildExtractionBlockSet(BBs, DT, AllowVarArgs, AllowAlloca)),
272 Suffix(Suffix), ArgsInZeroAddressSpace(ArgsInZeroAddressSpace) {}
273
274/// definedInRegion - Return true if the specified value is defined in the
275/// extracted region.
276static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) {
277 if (Instruction *I = dyn_cast<Instruction>(Val: V))
278 if (Blocks.count(key: I->getParent()))
279 return true;
280 return false;
281}
282
283/// definedInCaller - Return true if the specified value is defined in the
284/// function being code extracted, but not in the region being extracted.
285/// These values must be passed in as live-ins to the function.
286static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) {
287 if (isa<Argument>(Val: V)) return true;
288 if (Instruction *I = dyn_cast<Instruction>(Val: V))
289 if (!Blocks.count(key: I->getParent()))
290 return true;
291 return false;
292}
293
294static BasicBlock *getCommonExitBlock(const SetVector<BasicBlock *> &Blocks) {
295 BasicBlock *CommonExitBlock = nullptr;
296 auto hasNonCommonExitSucc = [&](BasicBlock *Block) {
297 for (auto *Succ : successors(BB: Block)) {
298 // Internal edges, ok.
299 if (Blocks.count(key: Succ))
300 continue;
301 if (!CommonExitBlock) {
302 CommonExitBlock = Succ;
303 continue;
304 }
305 if (CommonExitBlock != Succ)
306 return true;
307 }
308 return false;
309 };
310
311 if (any_of(Range: Blocks, P: hasNonCommonExitSucc))
312 return nullptr;
313
314 return CommonExitBlock;
315}
316
317CodeExtractorAnalysisCache::CodeExtractorAnalysisCache(Function &F) {
318 for (BasicBlock &BB : F) {
319 for (Instruction &II : BB.instructionsWithoutDebug())
320 if (auto *AI = dyn_cast<AllocaInst>(Val: &II))
321 Allocas.push_back(Elt: AI);
322
323 findSideEffectInfoForBlock(BB);
324 }
325}
326
327void CodeExtractorAnalysisCache::findSideEffectInfoForBlock(BasicBlock &BB) {
328 for (Instruction &II : BB.instructionsWithoutDebug()) {
329 unsigned Opcode = II.getOpcode();
330 Value *MemAddr = nullptr;
331 switch (Opcode) {
332 case Instruction::Store:
333 case Instruction::Load: {
334 if (Opcode == Instruction::Store) {
335 StoreInst *SI = cast<StoreInst>(Val: &II);
336 MemAddr = SI->getPointerOperand();
337 } else {
338 LoadInst *LI = cast<LoadInst>(Val: &II);
339 MemAddr = LI->getPointerOperand();
340 }
341 // Global variable can not be aliased with locals.
342 if (isa<Constant>(Val: MemAddr))
343 break;
344 Value *Base = MemAddr->stripInBoundsConstantOffsets();
345 if (!isa<AllocaInst>(Val: Base)) {
346 SideEffectingBlocks.insert(V: &BB);
347 return;
348 }
349 BaseMemAddrs[&BB].insert(V: Base);
350 break;
351 }
352 default: {
353 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(Val: &II);
354 if (IntrInst) {
355 if (IntrInst->isLifetimeStartOrEnd())
356 break;
357 SideEffectingBlocks.insert(V: &BB);
358 return;
359 }
360 // Treat all the other cases conservatively if it has side effects.
361 if (II.mayHaveSideEffects()) {
362 SideEffectingBlocks.insert(V: &BB);
363 return;
364 }
365 }
366 }
367 }
368}
369
370bool CodeExtractorAnalysisCache::doesBlockContainClobberOfAddr(
371 BasicBlock &BB, AllocaInst *Addr) const {
372 if (SideEffectingBlocks.count(V: &BB))
373 return true;
374 auto It = BaseMemAddrs.find(Val: &BB);
375 if (It != BaseMemAddrs.end())
376 return It->second.count(V: Addr);
377 return false;
378}
379
380bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
381 const CodeExtractorAnalysisCache &CEAC, Instruction *Addr) const {
382 AllocaInst *AI = cast<AllocaInst>(Val: Addr->stripInBoundsConstantOffsets());
383 Function *Func = (*Blocks.begin())->getParent();
384 for (BasicBlock &BB : *Func) {
385 if (Blocks.count(key: &BB))
386 continue;
387 if (CEAC.doesBlockContainClobberOfAddr(BB, Addr: AI))
388 return false;
389 }
390 return true;
391}
392
393BasicBlock *
394CodeExtractor::findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock) {
395 BasicBlock *SinglePredFromOutlineRegion = nullptr;
396 assert(!Blocks.count(CommonExitBlock) &&
397 "Expect a block outside the region!");
398 for (auto *Pred : predecessors(BB: CommonExitBlock)) {
399 if (!Blocks.count(key: Pred))
400 continue;
401 if (!SinglePredFromOutlineRegion) {
402 SinglePredFromOutlineRegion = Pred;
403 } else if (SinglePredFromOutlineRegion != Pred) {
404 SinglePredFromOutlineRegion = nullptr;
405 break;
406 }
407 }
408
409 if (SinglePredFromOutlineRegion)
410 return SinglePredFromOutlineRegion;
411
412#ifndef NDEBUG
413 auto getFirstPHI = [](BasicBlock *BB) {
414 BasicBlock::iterator I = BB->begin();
415 PHINode *FirstPhi = nullptr;
416 while (I != BB->end()) {
417 PHINode *Phi = dyn_cast<PHINode>(I);
418 if (!Phi)
419 break;
420 if (!FirstPhi) {
421 FirstPhi = Phi;
422 break;
423 }
424 }
425 return FirstPhi;
426 };
427 // If there are any phi nodes, the single pred either exists or has already
428 // be created before code extraction.
429 assert(!getFirstPHI(CommonExitBlock) && "Phi not expected");
430#endif
431
432 BasicBlock *NewExitBlock =
433 CommonExitBlock->splitBasicBlock(I: CommonExitBlock->getFirstNonPHIIt());
434
435 for (BasicBlock *Pred :
436 llvm::make_early_inc_range(Range: predecessors(BB: CommonExitBlock))) {
437 if (Blocks.count(key: Pred))
438 continue;
439 Pred->getTerminator()->replaceUsesOfWith(From: CommonExitBlock, To: NewExitBlock);
440 }
441 // Now add the old exit block to the outline region.
442 Blocks.insert(X: CommonExitBlock);
443 return CommonExitBlock;
444}
445
446// Find the pair of life time markers for address 'Addr' that are either
447// defined inside the outline region or can legally be shrinkwrapped into the
448// outline region. If there are not other untracked uses of the address, return
449// the pair of markers if found; otherwise return a pair of nullptr.
450CodeExtractor::LifetimeMarkerInfo
451CodeExtractor::getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
452 Instruction *Addr,
453 BasicBlock *ExitBlock) const {
454 LifetimeMarkerInfo Info;
455
456 for (User *U : Addr->users()) {
457 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(Val: U);
458 if (IntrInst) {
459 // We don't model addresses with multiple start/end markers, but the
460 // markers do not need to be in the region.
461 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start) {
462 if (Info.LifeStart)
463 return {};
464 Info.LifeStart = IntrInst;
465 continue;
466 }
467 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) {
468 if (Info.LifeEnd)
469 return {};
470 Info.LifeEnd = IntrInst;
471 continue;
472 }
473 }
474 // Find untracked uses of the address, bail.
475 if (!definedInRegion(Blocks, V: U))
476 return {};
477 }
478
479 if (!Info.LifeStart || !Info.LifeEnd)
480 return {};
481
482 Info.SinkLifeStart = !definedInRegion(Blocks, V: Info.LifeStart);
483 Info.HoistLifeEnd = !definedInRegion(Blocks, V: Info.LifeEnd);
484 // Do legality check.
485 if ((Info.SinkLifeStart || Info.HoistLifeEnd) &&
486 !isLegalToShrinkwrapLifetimeMarkers(CEAC, Addr))
487 return {};
488
489 // Check to see if we have a place to do hoisting, if not, bail.
490 if (Info.HoistLifeEnd && !ExitBlock)
491 return {};
492
493 return Info;
494}
495
496void CodeExtractor::findAllocas(const CodeExtractorAnalysisCache &CEAC,
497 ValueSet &SinkCands, ValueSet &HoistCands,
498 BasicBlock *&ExitBlock) const {
499 Function *Func = (*Blocks.begin())->getParent();
500 ExitBlock = getCommonExitBlock(Blocks);
501
502 auto moveOrIgnoreLifetimeMarkers =
503 [&](const LifetimeMarkerInfo &LMI) -> bool {
504 if (!LMI.LifeStart)
505 return false;
506 if (LMI.SinkLifeStart) {
507 LLVM_DEBUG(dbgs() << "Sinking lifetime.start: " << *LMI.LifeStart
508 << "\n");
509 SinkCands.insert(X: LMI.LifeStart);
510 }
511 if (LMI.HoistLifeEnd) {
512 LLVM_DEBUG(dbgs() << "Hoisting lifetime.end: " << *LMI.LifeEnd << "\n");
513 HoistCands.insert(X: LMI.LifeEnd);
514 }
515 return true;
516 };
517
518 // Look up allocas in the original function in CodeExtractorAnalysisCache, as
519 // this is much faster than walking all the instructions.
520 for (AllocaInst *AI : CEAC.getAllocas()) {
521 BasicBlock *BB = AI->getParent();
522 if (Blocks.count(key: BB))
523 continue;
524
525 // As a prior call to extractCodeRegion() may have shrinkwrapped the alloca,
526 // check whether it is actually still in the original function.
527 Function *AIFunc = BB->getParent();
528 if (AIFunc != Func)
529 continue;
530
531 LifetimeMarkerInfo MarkerInfo = getLifetimeMarkers(CEAC, Addr: AI, ExitBlock);
532 bool Moved = moveOrIgnoreLifetimeMarkers(MarkerInfo);
533 if (Moved) {
534 LLVM_DEBUG(dbgs() << "Sinking alloca: " << *AI << "\n");
535 SinkCands.insert(X: AI);
536 continue;
537 }
538
539 // Find bitcasts in the outlined region that have lifetime marker users
540 // outside that region. Replace the lifetime marker use with an
541 // outside region bitcast to avoid unnecessary alloca/reload instructions
542 // and extra lifetime markers.
543 SmallVector<Instruction *, 2> LifetimeBitcastUsers;
544 for (User *U : AI->users()) {
545 if (!definedInRegion(Blocks, V: U))
546 continue;
547
548 if (U->stripInBoundsConstantOffsets() != AI)
549 continue;
550
551 Instruction *Bitcast = cast<Instruction>(Val: U);
552 for (User *BU : Bitcast->users()) {
553 auto *IntrInst = dyn_cast<LifetimeIntrinsic>(Val: BU);
554 if (!IntrInst)
555 continue;
556
557 if (definedInRegion(Blocks, V: IntrInst))
558 continue;
559
560 LLVM_DEBUG(dbgs() << "Replace use of extracted region bitcast"
561 << *Bitcast << " in out-of-region lifetime marker "
562 << *IntrInst << "\n");
563 LifetimeBitcastUsers.push_back(Elt: IntrInst);
564 }
565 }
566
567 for (Instruction *I : LifetimeBitcastUsers) {
568 Module *M = AIFunc->getParent();
569 LLVMContext &Ctx = M->getContext();
570 auto *Int8PtrTy = PointerType::getUnqual(C&: Ctx);
571 CastInst *CastI =
572 CastInst::CreatePointerCast(S: AI, Ty: Int8PtrTy, Name: "lt.cast", InsertBefore: I->getIterator());
573 I->replaceUsesOfWith(From: I->getOperand(i: 1), To: CastI);
574 }
575
576 // Follow any bitcasts.
577 SmallVector<Instruction *, 2> Bitcasts;
578 SmallVector<LifetimeMarkerInfo, 2> BitcastLifetimeInfo;
579 for (User *U : AI->users()) {
580 if (U->stripInBoundsConstantOffsets() == AI) {
581 Instruction *Bitcast = cast<Instruction>(Val: U);
582 LifetimeMarkerInfo LMI = getLifetimeMarkers(CEAC, Addr: Bitcast, ExitBlock);
583 if (LMI.LifeStart) {
584 Bitcasts.push_back(Elt: Bitcast);
585 BitcastLifetimeInfo.push_back(Elt: LMI);
586 continue;
587 }
588 }
589
590 // Found unknown use of AI.
591 if (!definedInRegion(Blocks, V: U)) {
592 Bitcasts.clear();
593 break;
594 }
595 }
596
597 // Either no bitcasts reference the alloca or there are unknown uses.
598 if (Bitcasts.empty())
599 continue;
600
601 LLVM_DEBUG(dbgs() << "Sinking alloca (via bitcast): " << *AI << "\n");
602 SinkCands.insert(X: AI);
603 for (unsigned I = 0, E = Bitcasts.size(); I != E; ++I) {
604 Instruction *BitcastAddr = Bitcasts[I];
605 const LifetimeMarkerInfo &LMI = BitcastLifetimeInfo[I];
606 assert(LMI.LifeStart &&
607 "Unsafe to sink bitcast without lifetime markers");
608 moveOrIgnoreLifetimeMarkers(LMI);
609 if (!definedInRegion(Blocks, V: BitcastAddr)) {
610 LLVM_DEBUG(dbgs() << "Sinking bitcast-of-alloca: " << *BitcastAddr
611 << "\n");
612 SinkCands.insert(X: BitcastAddr);
613 }
614 }
615 }
616}
617
618bool CodeExtractor::isEligible() const {
619 if (Blocks.empty())
620 return false;
621 BasicBlock *Header = *Blocks.begin();
622 Function *F = Header->getParent();
623
624 // For functions with varargs, check that varargs handling is only done in the
625 // outlined function, i.e vastart and vaend are only used in outlined blocks.
626 if (AllowVarArgs && F->getFunctionType()->isVarArg()) {
627 auto containsVarArgIntrinsic = [](const Instruction &I) {
628 if (const CallInst *CI = dyn_cast<CallInst>(Val: &I))
629 if (const Function *Callee = CI->getCalledFunction())
630 return Callee->getIntrinsicID() == Intrinsic::vastart ||
631 Callee->getIntrinsicID() == Intrinsic::vaend;
632 return false;
633 };
634
635 for (auto &BB : *F) {
636 if (Blocks.count(key: &BB))
637 continue;
638 if (llvm::any_of(Range&: BB, P: containsVarArgIntrinsic))
639 return false;
640 }
641 }
642 // stacksave as input implies stackrestore in the outlined function.
643 // This can confuse prolog epilog insertion phase.
644 // stacksave's uses must not cross outlined function.
645 for (BasicBlock *BB : Blocks) {
646 for (Instruction &I : *BB) {
647 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: &I);
648 if (!II)
649 continue;
650 bool IsSave = II->getIntrinsicID() == Intrinsic::stacksave;
651 bool IsRestore = II->getIntrinsicID() == Intrinsic::stackrestore;
652 if (IsSave && any_of(Range: II->users(), P: [&Blks = this->Blocks](User *U) {
653 return !definedInRegion(Blocks: Blks, V: U);
654 }))
655 return false;
656 if (IsRestore && !definedInRegion(Blocks, V: II->getArgOperand(i: 0)))
657 return false;
658 }
659 }
660 return true;
661}
662
663void CodeExtractor::findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
664 const ValueSet &SinkCands,
665 bool CollectGlobalInputs) const {
666 for (BasicBlock *BB : Blocks) {
667 // If a used value is defined outside the region, it's an input. If an
668 // instruction is used outside the region, it's an output.
669 for (Instruction &II : *BB) {
670 for (auto &OI : II.operands()) {
671 Value *V = OI;
672 if (!SinkCands.count(key: V) &&
673 (definedInCaller(Blocks, V) ||
674 (CollectGlobalInputs && llvm::isa<llvm::GlobalVariable>(Val: V))))
675 Inputs.insert(X: V);
676 }
677
678 for (User *U : II.users())
679 if (!definedInRegion(Blocks, V: U)) {
680 Outputs.insert(X: &II);
681 break;
682 }
683 }
684 }
685}
686
687/// severSplitPHINodesOfEntry - If a PHI node has multiple inputs from outside
688/// of the region, we need to split the entry block of the region so that the
689/// PHI node is easier to deal with.
690void CodeExtractor::severSplitPHINodesOfEntry(BasicBlock *&Header) {
691 unsigned NumPredsFromRegion = 0;
692 unsigned NumPredsOutsideRegion = 0;
693
694 if (Header != &Header->getParent()->getEntryBlock()) {
695 PHINode *PN = dyn_cast<PHINode>(Val: Header->begin());
696 if (!PN) return; // No PHI nodes.
697
698 // If the header node contains any PHI nodes, check to see if there is more
699 // than one entry from outside the region. If so, we need to sever the
700 // header block into two.
701 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
702 if (Blocks.count(key: PN->getIncomingBlock(i)))
703 ++NumPredsFromRegion;
704 else
705 ++NumPredsOutsideRegion;
706
707 // If there is one (or fewer) predecessor from outside the region, we don't
708 // need to do anything special.
709 if (NumPredsOutsideRegion <= 1) return;
710 }
711
712 // Otherwise, we need to split the header block into two pieces: one
713 // containing PHI nodes merging values from outside of the region, and a
714 // second that contains all of the code for the block and merges back any
715 // incoming values from inside of the region.
716 BasicBlock *NewBB = SplitBlock(Old: Header, SplitPt: Header->getFirstNonPHIIt(), DT);
717
718 // We only want to code extract the second block now, and it becomes the new
719 // header of the region.
720 BasicBlock *OldPred = Header;
721 Blocks.remove(X: OldPred);
722 Blocks.insert(X: NewBB);
723 Header = NewBB;
724
725 // Okay, now we need to adjust the PHI nodes and any branches from within the
726 // region to go to the new header block instead of the old header block.
727 if (NumPredsFromRegion) {
728 PHINode *PN = cast<PHINode>(Val: OldPred->begin());
729 // Loop over all of the predecessors of OldPred that are in the region,
730 // changing them to branch to NewBB instead.
731 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
732 if (Blocks.count(key: PN->getIncomingBlock(i))) {
733 Instruction *TI = PN->getIncomingBlock(i)->getTerminator();
734 TI->replaceUsesOfWith(From: OldPred, To: NewBB);
735 }
736
737 // Okay, everything within the region is now branching to the right block, we
738 // just have to update the PHI nodes now, inserting PHI nodes into NewBB.
739 BasicBlock::iterator AfterPHIs;
740 for (AfterPHIs = OldPred->begin(); isa<PHINode>(Val: AfterPHIs); ++AfterPHIs) {
741 PHINode *PN = cast<PHINode>(Val&: AfterPHIs);
742 // Create a new PHI node in the new region, which has an incoming value
743 // from OldPred of PN.
744 PHINode *NewPN = PHINode::Create(Ty: PN->getType(), NumReservedValues: 1 + NumPredsFromRegion,
745 NameStr: PN->getName() + ".ce");
746 NewPN->insertBefore(InsertPos: NewBB->begin());
747 PN->replaceAllUsesWith(V: NewPN);
748 NewPN->addIncoming(V: PN, BB: OldPred);
749
750 // Loop over all of the incoming value in PN, moving them to NewPN if they
751 // are from the extracted region.
752 PN->removeIncomingValueIf(Predicate: [&](unsigned i) {
753 if (Blocks.count(key: PN->getIncomingBlock(i))) {
754 NewPN->addIncoming(V: PN->getIncomingValue(i), BB: PN->getIncomingBlock(i));
755 return true;
756 }
757 return false;
758 });
759 }
760 }
761}
762
763/// severSplitPHINodesOfExits - if PHI nodes in exit blocks have inputs from
764/// outlined region, we split these PHIs on two: one with inputs from region
765/// and other with remaining incoming blocks; then first PHIs are placed in
766/// outlined region.
767void CodeExtractor::severSplitPHINodesOfExits() {
768 for (BasicBlock *ExitBB : ExtractedFuncRetVals) {
769 BasicBlock *NewBB = nullptr;
770
771 for (PHINode &PN : ExitBB->phis()) {
772 // Find all incoming values from the outlining region.
773 SmallVector<unsigned, 2> IncomingVals;
774 for (unsigned i = 0; i < PN.getNumIncomingValues(); ++i)
775 if (Blocks.count(key: PN.getIncomingBlock(i)))
776 IncomingVals.push_back(Elt: i);
777
778 // Do not process PHI if there is one (or fewer) predecessor from region.
779 // If PHI has exactly one predecessor from region, only this one incoming
780 // will be replaced on codeRepl block, so it should be safe to skip PHI.
781 if (IncomingVals.size() <= 1)
782 continue;
783
784 // Create block for new PHIs and add it to the list of outlined if it
785 // wasn't done before.
786 if (!NewBB) {
787 NewBB = BasicBlock::Create(Context&: ExitBB->getContext(),
788 Name: ExitBB->getName() + ".split",
789 Parent: ExitBB->getParent(), InsertBefore: ExitBB);
790 SmallVector<BasicBlock *, 4> Preds(predecessors(BB: ExitBB));
791 for (BasicBlock *PredBB : Preds)
792 if (Blocks.count(key: PredBB))
793 PredBB->getTerminator()->replaceUsesOfWith(From: ExitBB, To: NewBB);
794 BranchInst::Create(IfTrue: ExitBB, InsertBefore: NewBB);
795 Blocks.insert(X: NewBB);
796 }
797
798 // Split this PHI.
799 PHINode *NewPN = PHINode::Create(Ty: PN.getType(), NumReservedValues: IncomingVals.size(),
800 NameStr: PN.getName() + ".ce");
801 NewPN->insertBefore(InsertPos: NewBB->getFirstNonPHIIt());
802 for (unsigned i : IncomingVals)
803 NewPN->addIncoming(V: PN.getIncomingValue(i), BB: PN.getIncomingBlock(i));
804 for (unsigned i : reverse(C&: IncomingVals))
805 PN.removeIncomingValue(Idx: i, DeletePHIIfEmpty: false);
806 PN.addIncoming(V: NewPN, BB: NewBB);
807 }
808 }
809}
810
811void CodeExtractor::splitReturnBlocks() {
812 for (BasicBlock *Block : Blocks)
813 if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: Block->getTerminator())) {
814 BasicBlock *New =
815 Block->splitBasicBlock(I: RI->getIterator(), BBName: Block->getName() + ".ret");
816 if (DT) {
817 // Old dominates New. New node dominates all other nodes dominated
818 // by Old.
819 DomTreeNode *OldNode = DT->getNode(BB: Block);
820 SmallVector<DomTreeNode *, 8> Children(OldNode->begin(),
821 OldNode->end());
822
823 DomTreeNode *NewNode = DT->addNewBlock(BB: New, DomBB: Block);
824
825 for (DomTreeNode *I : Children)
826 DT->changeImmediateDominator(N: I, NewIDom: NewNode);
827 }
828 }
829}
830
831Function *CodeExtractor::constructFunctionDeclaration(
832 const ValueSet &inputs, const ValueSet &outputs, BlockFrequency EntryFreq,
833 const Twine &Name, ValueSet &StructValues, StructType *&StructTy) {
834 LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n");
835 LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n");
836
837 Function *oldFunction = Blocks.front()->getParent();
838 Module *M = Blocks.front()->getModule();
839
840 // Assemble the function's parameter lists.
841 std::vector<Type *> ParamTy;
842 std::vector<Type *> AggParamTy;
843 const DataLayout &DL = M->getDataLayout();
844
845 // Add the types of the input values to the function's argument list
846 for (Value *value : inputs) {
847 LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n");
848 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: value)) {
849 AggParamTy.push_back(x: value->getType());
850 StructValues.insert(X: value);
851 } else
852 ParamTy.push_back(x: value->getType());
853 }
854
855 // Add the types of the output values to the function's argument list.
856 for (Value *output : outputs) {
857 LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n");
858 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: output)) {
859 AggParamTy.push_back(x: output->getType());
860 StructValues.insert(X: output);
861 } else
862 ParamTy.push_back(
863 x: PointerType::get(C&: output->getContext(), AddressSpace: DL.getAllocaAddrSpace()));
864 }
865
866 assert(
867 (ParamTy.size() + AggParamTy.size()) ==
868 (inputs.size() + outputs.size()) &&
869 "Number of scalar and aggregate params does not match inputs, outputs");
870 assert((StructValues.empty() || AggregateArgs) &&
871 "Expeced StructValues only with AggregateArgs set");
872
873 // Concatenate scalar and aggregate params in ParamTy.
874 if (!AggParamTy.empty()) {
875 StructTy = StructType::get(Context&: M->getContext(), Elements: AggParamTy);
876 ParamTy.push_back(x: PointerType::get(
877 C&: M->getContext(), AddressSpace: ArgsInZeroAddressSpace ? 0 : DL.getAllocaAddrSpace()));
878 }
879
880 Type *RetTy = getSwitchType();
881 LLVM_DEBUG({
882 dbgs() << "Function type: " << *RetTy << " f(";
883 for (Type *i : ParamTy)
884 dbgs() << *i << ", ";
885 dbgs() << ")\n";
886 });
887
888 FunctionType *funcType = FunctionType::get(
889 Result: RetTy, Params: ParamTy, isVarArg: AllowVarArgs && oldFunction->isVarArg());
890
891 // Create the new function
892 Function *newFunction =
893 Function::Create(Ty: funcType, Linkage: GlobalValue::InternalLinkage,
894 AddrSpace: oldFunction->getAddressSpace(), N: Name, M);
895
896 // Propagate personality info to the new function if there is one.
897 if (oldFunction->hasPersonalityFn())
898 newFunction->setPersonalityFn(oldFunction->getPersonalityFn());
899
900 // Inherit all of the target dependent attributes and white-listed
901 // target independent attributes.
902 // (e.g. If the extracted region contains a call to an x86.sse
903 // instruction we need to make sure that the extracted region has the
904 // "target-features" attribute allowing it to be lowered.
905 // FIXME: This should be changed to check to see if a specific
906 // attribute can not be inherited.
907 for (const auto &Attr : oldFunction->getAttributes().getFnAttrs()) {
908 if (Attr.isStringAttribute()) {
909 if (Attr.getKindAsString() == "thunk")
910 continue;
911 } else
912 switch (Attr.getKindAsEnum()) {
913 // Those attributes cannot be propagated safely. Explicitly list them
914 // here so we get a warning if new attributes are added.
915 case Attribute::AllocSize:
916 case Attribute::Builtin:
917 case Attribute::Convergent:
918 case Attribute::JumpTable:
919 case Attribute::Naked:
920 case Attribute::NoBuiltin:
921 case Attribute::NoMerge:
922 case Attribute::NoReturn:
923 case Attribute::NoSync:
924 case Attribute::ReturnsTwice:
925 case Attribute::Speculatable:
926 case Attribute::StackAlignment:
927 case Attribute::WillReturn:
928 case Attribute::AllocKind:
929 case Attribute::PresplitCoroutine:
930 case Attribute::Memory:
931 case Attribute::NoFPClass:
932 case Attribute::CoroDestroyOnlyWhenComplete:
933 case Attribute::CoroElideSafe:
934 case Attribute::NoDivergenceSource:
935 case Attribute::NoCreateUndefOrPoison:
936 continue;
937 // Those attributes should be safe to propagate to the extracted function.
938 case Attribute::AlwaysInline:
939 case Attribute::Cold:
940 case Attribute::DisableSanitizerInstrumentation:
941 case Attribute::FnRetThunkExtern:
942 case Attribute::Hot:
943 case Attribute::HybridPatchable:
944 case Attribute::NoRecurse:
945 case Attribute::InlineHint:
946 case Attribute::MinSize:
947 case Attribute::NoCallback:
948 case Attribute::NoDuplicate:
949 case Attribute::NoFree:
950 case Attribute::NoImplicitFloat:
951 case Attribute::NoInline:
952 case Attribute::NoOutline:
953 case Attribute::NonLazyBind:
954 case Attribute::NoRedZone:
955 case Attribute::NoUnwind:
956 case Attribute::NoSanitizeBounds:
957 case Attribute::NoSanitizeCoverage:
958 case Attribute::NullPointerIsValid:
959 case Attribute::OptimizeForDebugging:
960 case Attribute::OptForFuzzing:
961 case Attribute::OptimizeNone:
962 case Attribute::OptimizeForSize:
963 case Attribute::SafeStack:
964 case Attribute::ShadowCallStack:
965 case Attribute::SanitizeAddress:
966 case Attribute::SanitizeMemory:
967 case Attribute::SanitizeNumericalStability:
968 case Attribute::SanitizeThread:
969 case Attribute::SanitizeType:
970 case Attribute::SanitizeHWAddress:
971 case Attribute::SanitizeMemTag:
972 case Attribute::SanitizeRealtime:
973 case Attribute::SanitizeRealtimeBlocking:
974 case Attribute::SanitizeAllocToken:
975 case Attribute::SpeculativeLoadHardening:
976 case Attribute::StackProtect:
977 case Attribute::StackProtectReq:
978 case Attribute::StackProtectStrong:
979 case Attribute::StrictFP:
980 case Attribute::UWTable:
981 case Attribute::VScaleRange:
982 case Attribute::NoCfCheck:
983 case Attribute::MustProgress:
984 case Attribute::NoProfile:
985 case Attribute::SkipProfile:
986 case Attribute::DenormalFPEnv:
987 break;
988 // These attributes cannot be applied to functions.
989 case Attribute::Alignment:
990 case Attribute::AllocatedPointer:
991 case Attribute::AllocAlign:
992 case Attribute::ByVal:
993 case Attribute::Captures:
994 case Attribute::Dereferenceable:
995 case Attribute::DereferenceableOrNull:
996 case Attribute::ElementType:
997 case Attribute::InAlloca:
998 case Attribute::InReg:
999 case Attribute::Nest:
1000 case Attribute::NoAlias:
1001 case Attribute::NoUndef:
1002 case Attribute::NonNull:
1003 case Attribute::Preallocated:
1004 case Attribute::ReadNone:
1005 case Attribute::ReadOnly:
1006 case Attribute::Returned:
1007 case Attribute::SExt:
1008 case Attribute::StructRet:
1009 case Attribute::SwiftError:
1010 case Attribute::SwiftSelf:
1011 case Attribute::SwiftAsync:
1012 case Attribute::ZExt:
1013 case Attribute::ImmArg:
1014 case Attribute::ByRef:
1015 case Attribute::WriteOnly:
1016 case Attribute::Writable:
1017 case Attribute::DeadOnUnwind:
1018 case Attribute::Range:
1019 case Attribute::Initializes:
1020 case Attribute::NoExt:
1021 // These are not really attributes.
1022 case Attribute::None:
1023 case Attribute::EndAttrKinds:
1024 case Attribute::EmptyKey:
1025 case Attribute::TombstoneKey:
1026 case Attribute::DeadOnReturn:
1027 llvm_unreachable("Not a function attribute");
1028 }
1029
1030 newFunction->addFnAttr(Attr);
1031 }
1032
1033 // Create scalar and aggregate iterators to name all of the arguments we
1034 // inserted.
1035 Function::arg_iterator ScalarAI = newFunction->arg_begin();
1036
1037 // Set names and attributes for input and output arguments.
1038 ScalarAI = newFunction->arg_begin();
1039 for (Value *input : inputs) {
1040 if (StructValues.contains(key: input))
1041 continue;
1042
1043 ScalarAI->setName(input->getName());
1044 if (input->isSwiftError())
1045 newFunction->addParamAttr(ArgNo: ScalarAI - newFunction->arg_begin(),
1046 Kind: Attribute::SwiftError);
1047 ++ScalarAI;
1048 }
1049 for (Value *output : outputs) {
1050 if (StructValues.contains(key: output))
1051 continue;
1052
1053 ScalarAI->setName(output->getName() + ".out");
1054 ++ScalarAI;
1055 }
1056
1057 // Update the entry count of the function.
1058 if (BFI) {
1059 auto Count = BFI->getProfileCountFromFreq(Freq: EntryFreq);
1060 if (Count.has_value())
1061 newFunction->setEntryCount(
1062 Count: ProfileCount(*Count, Function::PCT_Real)); // FIXME
1063 }
1064
1065 return newFunction;
1066}
1067
1068/// If the original function has debug info, we have to add a debug location
1069/// to the new branch instruction from the artificial entry block.
1070/// We use the debug location of the first instruction in the extracted
1071/// blocks, as there is no other equivalent line in the source code.
1072static void applyFirstDebugLoc(Function *oldFunction,
1073 ArrayRef<BasicBlock *> Blocks,
1074 Instruction *BranchI) {
1075 if (oldFunction->getSubprogram()) {
1076 any_of(Range&: Blocks, P: [&BranchI](const BasicBlock *BB) {
1077 return any_of(Range: *BB, P: [&BranchI](const Instruction &I) {
1078 if (!I.getDebugLoc())
1079 return false;
1080 BranchI->setDebugLoc(I.getDebugLoc());
1081 return true;
1082 });
1083 });
1084 }
1085}
1086
1087/// Erase lifetime.start markers which reference inputs to the extraction
1088/// region, and insert the referenced memory into \p LifetimesStart.
1089///
1090/// The extraction region is defined by a set of blocks (\p Blocks), and a set
1091/// of allocas which will be moved from the caller function into the extracted
1092/// function (\p SunkAllocas).
1093static void eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
1094 const SetVector<Value *> &SunkAllocas,
1095 SetVector<Value *> &LifetimesStart) {
1096 for (BasicBlock *BB : Blocks) {
1097 for (Instruction &I : llvm::make_early_inc_range(Range&: *BB)) {
1098 auto *II = dyn_cast<LifetimeIntrinsic>(Val: &I);
1099 if (!II)
1100 continue;
1101
1102 // Get the memory operand of the lifetime marker. If the underlying
1103 // object is a sunk alloca, or is otherwise defined in the extraction
1104 // region, the lifetime marker must not be erased.
1105 Value *Mem = II->getOperand(i_nocapture: 0);
1106 if (SunkAllocas.count(key: Mem) || definedInRegion(Blocks, V: Mem))
1107 continue;
1108
1109 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1110 LifetimesStart.insert(X: Mem);
1111 II->eraseFromParent();
1112 }
1113 }
1114}
1115
1116/// Insert lifetime start/end markers surrounding the call to the new function
1117/// for objects defined in the caller.
1118static void insertLifetimeMarkersSurroundingCall(
1119 Module *M, ArrayRef<Value *> LifetimesStart, ArrayRef<Value *> LifetimesEnd,
1120 CallInst *TheCall) {
1121 Instruction *Term = TheCall->getParent()->getTerminator();
1122
1123 // Emit lifetime markers for the pointers given in \p Objects. Insert the
1124 // markers before the call if \p InsertBefore, and after the call otherwise.
1125 auto insertMarkers = [&](Intrinsic::ID MarkerFunc, ArrayRef<Value *> Objects,
1126 bool InsertBefore) {
1127 for (Value *Mem : Objects) {
1128 assert((!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() ==
1129 TheCall->getFunction()) &&
1130 "Input memory not defined in original function");
1131
1132 Function *Func =
1133 Intrinsic::getOrInsertDeclaration(M, id: MarkerFunc, Tys: Mem->getType());
1134 auto Marker = CallInst::Create(Func, Args: Mem);
1135 if (InsertBefore)
1136 Marker->insertBefore(InsertPos: TheCall->getIterator());
1137 else
1138 Marker->insertBefore(InsertPos: Term->getIterator());
1139 }
1140 };
1141
1142 if (!LifetimesStart.empty()) {
1143 insertMarkers(Intrinsic::lifetime_start, LifetimesStart,
1144 /*InsertBefore=*/true);
1145 }
1146
1147 if (!LifetimesEnd.empty()) {
1148 insertMarkers(Intrinsic::lifetime_end, LifetimesEnd,
1149 /*InsertBefore=*/false);
1150 }
1151}
1152
1153void CodeExtractor::moveCodeToFunction(Function *newFunction) {
1154 auto newFuncIt = newFunction->begin();
1155 for (BasicBlock *Block : Blocks) {
1156 // Delete the basic block from the old function, and the list of blocks
1157 Block->removeFromParent();
1158
1159 // Insert this basic block into the new function
1160 // Insert the original blocks after the entry block created
1161 // for the new function. The entry block may be followed
1162 // by a set of exit blocks at this point, but these exit
1163 // blocks better be placed at the end of the new function.
1164 newFuncIt = newFunction->insert(Position: std::next(x: newFuncIt), BB: Block);
1165 }
1166}
1167
1168void CodeExtractor::calculateNewCallTerminatorWeights(
1169 BasicBlock *CodeReplacer,
1170 const DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
1171 BranchProbabilityInfo *BPI) {
1172 using Distribution = BlockFrequencyInfoImplBase::Distribution;
1173 using BlockNode = BlockFrequencyInfoImplBase::BlockNode;
1174
1175 // Update the branch weights for the exit block.
1176 Instruction *TI = CodeReplacer->getTerminator();
1177 SmallVector<unsigned, 8> BranchWeights(TI->getNumSuccessors(), 0);
1178
1179 // Block Frequency distribution with dummy node.
1180 Distribution BranchDist;
1181
1182 SmallVector<BranchProbability, 4> EdgeProbabilities(
1183 TI->getNumSuccessors(), BranchProbability::getUnknown());
1184
1185 // Add each of the frequencies of the successors.
1186 for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) {
1187 BlockNode ExitNode(i);
1188 uint64_t ExitFreq = ExitWeights.lookup(Val: TI->getSuccessor(Idx: i)).getFrequency();
1189 if (ExitFreq != 0)
1190 BranchDist.addExit(Node: ExitNode, Amount: ExitFreq);
1191 else
1192 EdgeProbabilities[i] = BranchProbability::getZero();
1193 }
1194
1195 // Check for no total weight.
1196 if (BranchDist.Total == 0) {
1197 BPI->setEdgeProbability(Src: CodeReplacer, Probs: EdgeProbabilities);
1198 return;
1199 }
1200
1201 // Normalize the distribution so that they can fit in unsigned.
1202 BranchDist.normalize();
1203
1204 // Create normalized branch weights and set the metadata.
1205 for (unsigned I = 0, E = BranchDist.Weights.size(); I < E; ++I) {
1206 const auto &Weight = BranchDist.Weights[I];
1207
1208 // Get the weight and update the current BFI.
1209 BranchWeights[Weight.TargetNode.Index] = Weight.Amount;
1210 BranchProbability BP(Weight.Amount, BranchDist.Total);
1211 EdgeProbabilities[Weight.TargetNode.Index] = BP;
1212 }
1213 BPI->setEdgeProbability(Src: CodeReplacer, Probs: EdgeProbabilities);
1214 TI->setMetadata(
1215 KindID: LLVMContext::MD_prof,
1216 Node: MDBuilder(TI->getContext()).createBranchWeights(Weights: BranchWeights));
1217}
1218
1219/// Erase debug info intrinsics which refer to values in \p F but aren't in
1220/// \p F.
1221static void eraseDebugIntrinsicsWithNonLocalRefs(Function &F) {
1222 for (Instruction &I : instructions(F)) {
1223 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1224 findDbgUsers(V: &I, DbgVariableRecords);
1225 for (DbgVariableRecord *DVR : DbgVariableRecords)
1226 if (DVR->getFunction() != &F)
1227 DVR->eraseFromParent();
1228 }
1229}
1230
1231/// Fix up the debug info in the old and new functions. Following changes are
1232/// done.
1233/// 1. If a debug record points to a value that has been replaced, update the
1234/// record to use the new value.
1235/// 2. If an Input value that has been replaced was used as a location of a
1236/// debug record in the Parent function, then materealize a similar record in
1237/// the new function.
1238/// 3. Point line locations and debug intrinsics to the new subprogram scope
1239/// 4. Remove intrinsics which point to values outside of the new function.
1240static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
1241 CallInst &TheCall,
1242 const SetVector<Value *> &Inputs,
1243 ArrayRef<Value *> NewValues) {
1244 DISubprogram *OldSP = OldFunc.getSubprogram();
1245 LLVMContext &Ctx = OldFunc.getContext();
1246
1247 if (!OldSP) {
1248 // Erase any debug info the new function contains.
1249 stripDebugInfo(F&: NewFunc);
1250 // Make sure the old function doesn't contain any non-local metadata refs.
1251 eraseDebugIntrinsicsWithNonLocalRefs(F&: NewFunc);
1252 return;
1253 }
1254
1255 // Create a subprogram for the new function. Leave out a description of the
1256 // function arguments, as the parameters don't correspond to anything at the
1257 // source level.
1258 assert(OldSP->getUnit() && "Missing compile unit for subprogram");
1259 DIBuilder DIB(*OldFunc.getParent(), /*AllowUnresolved=*/false,
1260 OldSP->getUnit());
1261 auto SPType = DIB.createSubroutineType(ParameterTypes: DIB.getOrCreateTypeArray(Elements: {}));
1262 DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition |
1263 DISubprogram::SPFlagOptimized |
1264 DISubprogram::SPFlagLocalToUnit;
1265 auto NewSP = DIB.createFunction(
1266 Scope: OldSP->getUnit(), Name: NewFunc.getName(), LinkageName: NewFunc.getName(), File: OldSP->getFile(),
1267 /*LineNo=*/0, Ty: SPType, /*ScopeLine=*/0, Flags: DINode::FlagZero, SPFlags);
1268 NewFunc.setSubprogram(NewSP);
1269
1270 auto UpdateOrInsertDebugRecord = [&](auto *DR, Value *OldLoc, Value *NewLoc,
1271 DIExpression *Expr, bool Declare) {
1272 if (DR->getParent()->getParent() == &NewFunc) {
1273 DR->replaceVariableLocationOp(OldLoc, NewLoc);
1274 return;
1275 }
1276 if (Declare) {
1277 DIB.insertDeclare(NewLoc, DR->getVariable(), Expr, DR->getDebugLoc(),
1278 &NewFunc.getEntryBlock());
1279 return;
1280 }
1281 DIB.insertDbgValueIntrinsic(
1282 Val: NewLoc, VarInfo: DR->getVariable(), Expr, DL: DR->getDebugLoc(),
1283 InsertPt: NewFunc.getEntryBlock().getTerminator()->getIterator());
1284 };
1285 for (auto [Input, NewVal] : zip_equal(t: Inputs, u&: NewValues)) {
1286 SmallVector<DbgVariableRecord *, 1> DPUsers;
1287 findDbgUsers(V: Input, DbgVariableRecords&: DPUsers);
1288 DIExpression *Expr = DIB.createExpression();
1289
1290 // Iterate the debud users of the Input values. If they are in the extracted
1291 // function then update their location with the new value. If they are in
1292 // the parent function then create a similar debug record.
1293 for (auto *DVR : DPUsers)
1294 UpdateOrInsertDebugRecord(DVR, Input, NewVal, Expr, DVR->isDbgDeclare());
1295 }
1296
1297 auto IsInvalidLocation = [&NewFunc](Value *Location) {
1298 // Location is invalid if it isn't a constant, an instruction or an
1299 // argument, or is an instruction/argument but isn't in the new function.
1300 if (!Location || (!isa<Constant>(Val: Location) && !isa<Argument>(Val: Location) &&
1301 !isa<Instruction>(Val: Location)))
1302 return true;
1303
1304 if (Argument *Arg = dyn_cast<Argument>(Val: Location))
1305 return Arg->getParent() != &NewFunc;
1306 if (Instruction *LocationInst = dyn_cast<Instruction>(Val: Location))
1307 return LocationInst->getFunction() != &NewFunc;
1308 return false;
1309 };
1310
1311 // Debug intrinsics in the new function need to be updated in one of two
1312 // ways:
1313 // 1) They need to be deleted, because they describe a value in the old
1314 // function.
1315 // 2) They need to point to fresh metadata, e.g. because they currently
1316 // point to a variable in the wrong scope.
1317 SmallDenseMap<DINode *, DINode *> RemappedMetadata;
1318 SmallVector<DbgVariableRecord *, 4> DVRsToDelete;
1319 DenseMap<const MDNode *, MDNode *> Cache;
1320
1321 auto GetUpdatedDIVariable = [&](DILocalVariable *OldVar) {
1322 DINode *&NewVar = RemappedMetadata[OldVar];
1323 if (!NewVar) {
1324 DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
1325 RootScope&: *OldVar->getScope(), NewSP&: *NewSP, Ctx, Cache);
1326 NewVar = DIB.createAutoVariable(
1327 Scope: NewScope, Name: OldVar->getName(), File: OldVar->getFile(), LineNo: OldVar->getLine(),
1328 Ty: OldVar->getType(), /*AlwaysPreserve=*/false, Flags: DINode::FlagZero,
1329 AlignInBits: OldVar->getAlignInBits());
1330 }
1331 return cast<DILocalVariable>(Val: NewVar);
1332 };
1333
1334 auto UpdateDbgLabel = [&](auto *LabelRecord) {
1335 // Point the label record to a fresh label within the new function if
1336 // the record was not inlined from some other function.
1337 if (LabelRecord->getDebugLoc().getInlinedAt())
1338 return;
1339 DILabel *OldLabel = LabelRecord->getLabel();
1340 DINode *&NewLabel = RemappedMetadata[OldLabel];
1341 if (!NewLabel) {
1342 DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
1343 RootScope&: *OldLabel->getScope(), NewSP&: *NewSP, Ctx, Cache);
1344 NewLabel =
1345 DILabel::get(Context&: Ctx, Scope: NewScope, Name: OldLabel->getName(), File: OldLabel->getFile(),
1346 Line: OldLabel->getLine(), Column: OldLabel->getColumn(),
1347 IsArtificial: OldLabel->isArtificial(), CoroSuspendIdx: OldLabel->getCoroSuspendIdx());
1348 }
1349 LabelRecord->setLabel(cast<DILabel>(Val: NewLabel));
1350 };
1351
1352 auto UpdateDbgRecordsOnInst = [&](Instruction &I) -> void {
1353 for (DbgRecord &DR : I.getDbgRecordRange()) {
1354 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(Val: &DR)) {
1355 UpdateDbgLabel(DLR);
1356 continue;
1357 }
1358
1359 DbgVariableRecord &DVR = cast<DbgVariableRecord>(Val&: DR);
1360 // If any of the used locations are invalid, delete the record.
1361 if (any_of(Range: DVR.location_ops(), P: IsInvalidLocation)) {
1362 DVRsToDelete.push_back(Elt: &DVR);
1363 continue;
1364 }
1365
1366 // DbgAssign intrinsics have an extra Value argument:
1367 if (DVR.isDbgAssign() && IsInvalidLocation(DVR.getAddress())) {
1368 DVRsToDelete.push_back(Elt: &DVR);
1369 continue;
1370 }
1371
1372 // If the variable was in the scope of the old function, i.e. it was not
1373 // inlined, point the intrinsic to a fresh variable within the new
1374 // function.
1375 if (!DVR.getDebugLoc().getInlinedAt())
1376 DVR.setVariable(GetUpdatedDIVariable(DVR.getVariable()));
1377 }
1378 };
1379
1380 for (Instruction &I : instructions(F&: NewFunc))
1381 UpdateDbgRecordsOnInst(I);
1382
1383 for (auto *DVR : DVRsToDelete)
1384 DVR->getMarker()->MarkedInstr->dropOneDbgRecord(I: DVR);
1385 DIB.finalizeSubprogram(SP: NewSP);
1386
1387 // Fix up the scope information attached to the line locations and the
1388 // debug assignment metadata in the new function.
1389 DenseMap<DIAssignID *, DIAssignID *> AssignmentIDMap;
1390 for (Instruction &I : instructions(F&: NewFunc)) {
1391 if (const DebugLoc &DL = I.getDebugLoc())
1392 I.setDebugLoc(
1393 DebugLoc::replaceInlinedAtSubprogram(DL, NewSP&: *NewSP, Ctx, Cache));
1394 for (DbgRecord &DR : I.getDbgRecordRange())
1395 DR.setDebugLoc(DebugLoc::replaceInlinedAtSubprogram(DL: DR.getDebugLoc(),
1396 NewSP&: *NewSP, Ctx, Cache));
1397
1398 // Loop info metadata may contain line locations. Fix them up.
1399 auto updateLoopInfoLoc = [&Ctx, &Cache, NewSP](Metadata *MD) -> Metadata * {
1400 if (auto *Loc = dyn_cast_or_null<DILocation>(Val: MD))
1401 return DebugLoc::replaceInlinedAtSubprogram(DL: Loc, NewSP&: *NewSP, Ctx, Cache);
1402 return MD;
1403 };
1404 updateLoopMetadataDebugLocations(I, Updater: updateLoopInfoLoc);
1405 at::remapAssignID(Map&: AssignmentIDMap, I);
1406 }
1407 if (!TheCall.getDebugLoc())
1408 TheCall.setDebugLoc(DILocation::get(Context&: Ctx, Line: 0, Column: 0, Scope: OldSP));
1409
1410 eraseDebugIntrinsicsWithNonLocalRefs(F&: NewFunc);
1411}
1412
1413Function *
1414CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC) {
1415 ValueSet Inputs, Outputs;
1416 return extractCodeRegion(CEAC, Inputs, Outputs);
1417}
1418
1419Function *
1420CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC,
1421 ValueSet &inputs, ValueSet &outputs) {
1422 if (!isEligible())
1423 return nullptr;
1424
1425 // Assumption: this is a single-entry code region, and the header is the first
1426 // block in the region.
1427 BasicBlock *header = *Blocks.begin();
1428 Function *oldFunction = header->getParent();
1429
1430 normalizeCFGForExtraction(header);
1431
1432 // Remove @llvm.assume calls that will be moved to the new function from the
1433 // old function's assumption cache.
1434 for (BasicBlock *Block : Blocks) {
1435 for (Instruction &I : llvm::make_early_inc_range(Range&: *Block)) {
1436 if (auto *AI = dyn_cast<AssumeInst>(Val: &I)) {
1437 if (AC)
1438 AC->unregisterAssumption(CI: AI);
1439 AI->eraseFromParent();
1440 }
1441 }
1442 }
1443
1444 ValueSet SinkingCands, HoistingCands;
1445 BasicBlock *CommonExit = nullptr;
1446 findAllocas(CEAC, SinkCands&: SinkingCands, HoistCands&: HoistingCands, ExitBlock&: CommonExit);
1447 assert(HoistingCands.empty() || CommonExit);
1448
1449 // Find inputs to, outputs from the code region.
1450 findInputsOutputs(Inputs&: inputs, Outputs&: outputs, SinkCands: SinkingCands);
1451
1452 // Collect objects which are inputs to the extraction region and also
1453 // referenced by lifetime start markers within it. The effects of these
1454 // markers must be replicated in the calling function to prevent the stack
1455 // coloring pass from merging slots which store input objects.
1456 ValueSet LifetimesStart;
1457 eraseLifetimeMarkersOnInputs(Blocks, SunkAllocas: SinkingCands, LifetimesStart);
1458
1459 if (!HoistingCands.empty()) {
1460 auto *HoistToBlock = findOrCreateBlockForHoisting(CommonExitBlock: CommonExit);
1461 Instruction *TI = HoistToBlock->getTerminator();
1462 for (auto *II : HoistingCands)
1463 cast<Instruction>(Val: II)->moveBefore(InsertPos: TI->getIterator());
1464 computeExtractedFuncRetVals();
1465 }
1466
1467 // CFG/ExitBlocks must not change hereafter
1468
1469 // Calculate the entry frequency of the new function before we change the root
1470 // block.
1471 BlockFrequency EntryFreq;
1472 DenseMap<BasicBlock *, BlockFrequency> ExitWeights;
1473 if (BFI) {
1474 assert(BPI && "Both BPI and BFI are required to preserve profile info");
1475 for (BasicBlock *Pred : predecessors(BB: header)) {
1476 if (Blocks.count(key: Pred))
1477 continue;
1478 EntryFreq +=
1479 BFI->getBlockFreq(BB: Pred) * BPI->getEdgeProbability(Src: Pred, Dst: header);
1480 }
1481
1482 for (BasicBlock *Succ : ExtractedFuncRetVals) {
1483 for (BasicBlock *Block : predecessors(BB: Succ)) {
1484 if (!Blocks.count(key: Block))
1485 continue;
1486
1487 // Update the branch weight for this successor.
1488 BlockFrequency &BF = ExitWeights[Succ];
1489 BF += BFI->getBlockFreq(BB: Block) * BPI->getEdgeProbability(Src: Block, Dst: Succ);
1490 }
1491 }
1492 }
1493
1494 // Determine position for the replacement code. Do so before header is moved
1495 // to the new function.
1496 BasicBlock *ReplIP = header;
1497 while (ReplIP && Blocks.count(key: ReplIP))
1498 ReplIP = ReplIP->getNextNode();
1499
1500 // Construct new function based on inputs/outputs & add allocas for all defs.
1501 std::string SuffixToUse =
1502 Suffix.empty()
1503 ? (header->getName().empty() ? "extracted" : header->getName().str())
1504 : Suffix;
1505
1506 ValueSet StructValues;
1507 StructType *StructTy = nullptr;
1508 Function *newFunction = constructFunctionDeclaration(
1509 inputs, outputs, EntryFreq, Name: oldFunction->getName() + "." + SuffixToUse,
1510 StructValues, StructTy);
1511 SmallVector<Value *> NewValues;
1512
1513 emitFunctionBody(inputs, outputs, StructValues, newFunction, StructArgTy: StructTy, header,
1514 SinkingCands, NewValues);
1515
1516 std::vector<Value *> Reloads;
1517 CallInst *TheCall = emitReplacerCall(
1518 inputs, outputs, StructValues, newFunction, StructArgTy: StructTy, oldFunction, ReplIP,
1519 EntryFreq, LifetimesStart: LifetimesStart.getArrayRef(), Reloads);
1520
1521 insertReplacerCall(oldFunction, header, codeReplacer: TheCall->getParent(), outputs,
1522 Reloads, ExitWeights);
1523
1524 fixupDebugInfoPostExtraction(OldFunc&: *oldFunction, NewFunc&: *newFunction, TheCall&: *TheCall, Inputs: inputs,
1525 NewValues);
1526
1527 LLVM_DEBUG(llvm::dbgs() << "After extractCodeRegion - newFunction:\n");
1528 LLVM_DEBUG(newFunction->dump());
1529 LLVM_DEBUG(llvm::dbgs() << "After extractCodeRegion - oldFunction:\n");
1530 LLVM_DEBUG(oldFunction->dump());
1531 LLVM_DEBUG(if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC))
1532 report_fatal_error("Stale Asumption cache for old Function!"));
1533 return newFunction;
1534}
1535
1536void CodeExtractor::normalizeCFGForExtraction(BasicBlock *&header) {
1537 // If we have any return instructions in the region, split those blocks so
1538 // that the return is not in the region.
1539 splitReturnBlocks();
1540
1541 // If we have to split PHI nodes of the entry or exit blocks, do so now.
1542 severSplitPHINodesOfEntry(Header&: header);
1543
1544 // If a PHI in an exit block has multiple incoming values from the outlined
1545 // region, create a new PHI for those values within the region such that only
1546 // PHI itself becomes an output value, not each of its incoming values
1547 // individually.
1548 computeExtractedFuncRetVals();
1549 severSplitPHINodesOfExits();
1550}
1551
1552void CodeExtractor::computeExtractedFuncRetVals() {
1553 ExtractedFuncRetVals.clear();
1554
1555 SmallPtrSet<BasicBlock *, 2> ExitBlocks;
1556 for (BasicBlock *Block : Blocks) {
1557 for (BasicBlock *Succ : successors(BB: Block)) {
1558 if (Blocks.count(key: Succ))
1559 continue;
1560
1561 bool IsNew = ExitBlocks.insert(Ptr: Succ).second;
1562 if (IsNew)
1563 ExtractedFuncRetVals.push_back(Elt: Succ);
1564 }
1565 }
1566}
1567
1568Type *CodeExtractor::getSwitchType() {
1569 LLVMContext &Context = Blocks.front()->getContext();
1570
1571 assert(ExtractedFuncRetVals.size() < 0xffff &&
1572 "too many exit blocks for switch");
1573 switch (ExtractedFuncRetVals.size()) {
1574 case 0:
1575 case 1:
1576 return Type::getVoidTy(C&: Context);
1577 case 2:
1578 // Conditional branch, return a bool
1579 return Type::getInt1Ty(C&: Context);
1580 default:
1581 return Type::getInt16Ty(C&: Context);
1582 }
1583}
1584
1585void CodeExtractor::emitFunctionBody(
1586 const ValueSet &inputs, const ValueSet &outputs,
1587 const ValueSet &StructValues, Function *newFunction,
1588 StructType *StructArgTy, BasicBlock *header, const ValueSet &SinkingCands,
1589 SmallVectorImpl<Value *> &NewValues) {
1590 Function *oldFunction = header->getParent();
1591 LLVMContext &Context = oldFunction->getContext();
1592
1593 // The new function needs a root node because other nodes can branch to the
1594 // head of the region, but the entry node of a function cannot have preds.
1595 BasicBlock *newFuncRoot =
1596 BasicBlock::Create(Context, Name: "newFuncRoot", Parent: newFunction);
1597
1598 // Now sink all instructions which only have non-phi uses inside the region.
1599 // Group the allocas at the start of the block, so that any bitcast uses of
1600 // the allocas are well-defined.
1601 for (auto *II : SinkingCands) {
1602 if (!isa<AllocaInst>(Val: II)) {
1603 cast<Instruction>(Val: II)->moveBefore(BB&: *newFuncRoot,
1604 I: newFuncRoot->getFirstInsertionPt());
1605 }
1606 }
1607 for (auto *II : SinkingCands) {
1608 if (auto *AI = dyn_cast<AllocaInst>(Val: II)) {
1609 AI->moveBefore(BB&: *newFuncRoot, I: newFuncRoot->getFirstInsertionPt());
1610 }
1611 }
1612
1613 Function::arg_iterator ScalarAI = newFunction->arg_begin();
1614 Argument *AggArg = StructValues.empty()
1615 ? nullptr
1616 : newFunction->getArg(i: newFunction->arg_size() - 1);
1617
1618 // Rewrite all users of the inputs in the extracted region to use the
1619 // arguments (or appropriate addressing into struct) instead.
1620 for (unsigned i = 0, e = inputs.size(), aggIdx = 0; i != e; ++i) {
1621 Value *RewriteVal;
1622 if (StructValues.contains(key: inputs[i])) {
1623 Value *Idx[2];
1624 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: header->getContext()));
1625 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: header->getContext()), V: aggIdx);
1626 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1627 PointeeType: StructArgTy, Ptr: AggArg, IdxList: Idx, NameStr: "gep_" + inputs[i]->getName(), InsertBefore: newFuncRoot);
1628 LoadInst *LoadGEP =
1629 new LoadInst(StructArgTy->getElementType(N: aggIdx), GEP,
1630 "loadgep_" + inputs[i]->getName(), newFuncRoot);
1631 // If we load pointer, we can add optional !align metadata
1632 // The existence of the !align metadata on the instruction tells
1633 // the optimizer that the value loaded is known to be aligned to
1634 // a boundary specified by the integer value in the metadata node.
1635 // Example:
1636 // %res = load ptr, ptr %input, align 8, !align !align_md_node
1637 // ^ ^
1638 // | |
1639 // alignment of %input address |
1640 // |
1641 // alignment of %res object
1642 if (StructArgTy->getElementType(N: aggIdx)->isPointerTy()) {
1643 unsigned AlignmentValue;
1644 const Triple &TargetTriple =
1645 newFunction->getParent()->getTargetTriple();
1646 const DataLayout &DL = header->getDataLayout();
1647 // Pointers without casting can provide more information about
1648 // alignment. Use pointers without casts if given target preserves
1649 // alignment information for cast the operation.
1650 if (isAlignmentPreservedForAddrCast(TargetTriple))
1651 AlignmentValue =
1652 inputs[i]->stripPointerCasts()->getPointerAlignment(DL).value();
1653 else
1654 AlignmentValue = inputs[i]->getPointerAlignment(DL).value();
1655 MDBuilder MDB(header->getContext());
1656 LoadGEP->setMetadata(
1657 KindID: LLVMContext::MD_align,
1658 Node: MDNode::get(
1659 Context&: header->getContext(),
1660 MDs: MDB.createConstant(C: ConstantInt::get(
1661 Ty: Type::getInt64Ty(C&: header->getContext()), V: AlignmentValue))));
1662 }
1663 RewriteVal = LoadGEP;
1664 ++aggIdx;
1665 } else
1666 RewriteVal = &*ScalarAI++;
1667
1668 NewValues.push_back(Elt: RewriteVal);
1669 }
1670
1671 moveCodeToFunction(newFunction);
1672
1673 for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
1674 Value *RewriteVal = NewValues[i];
1675
1676 std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end());
1677 for (User *use : Users)
1678 if (Instruction *inst = dyn_cast<Instruction>(Val: use))
1679 if (Blocks.count(key: inst->getParent()))
1680 inst->replaceUsesOfWith(From: inputs[i], To: RewriteVal);
1681 }
1682
1683 // Since there may be multiple exits from the original region, make the new
1684 // function return an unsigned, switch on that number. This loop iterates
1685 // over all of the blocks in the extracted region, updating any terminator
1686 // instructions in the to-be-extracted region that branch to blocks that are
1687 // not in the region to be extracted.
1688 std::map<BasicBlock *, BasicBlock *> ExitBlockMap;
1689
1690 // Iterate over the previously collected targets, and create new blocks inside
1691 // the function to branch to.
1692 for (auto P : enumerate(First&: ExtractedFuncRetVals)) {
1693 BasicBlock *OldTarget = P.value();
1694 size_t SuccNum = P.index();
1695
1696 BasicBlock *NewTarget = BasicBlock::Create(
1697 Context, Name: OldTarget->getName() + ".exitStub", Parent: newFunction);
1698 ExitBlockMap[OldTarget] = NewTarget;
1699
1700 Value *brVal = nullptr;
1701 Type *RetTy = getSwitchType();
1702 assert(ExtractedFuncRetVals.size() < 0xffff &&
1703 "too many exit blocks for switch");
1704 switch (ExtractedFuncRetVals.size()) {
1705 case 0:
1706 case 1:
1707 // No value needed.
1708 break;
1709 case 2: // Conditional branch, return a bool
1710 brVal = ConstantInt::get(Ty: RetTy, V: !SuccNum);
1711 break;
1712 default:
1713 brVal = ConstantInt::get(Ty: RetTy, V: SuccNum);
1714 break;
1715 }
1716
1717 ReturnInst::Create(C&: Context, retVal: brVal, InsertBefore: NewTarget);
1718 }
1719
1720 for (BasicBlock *Block : Blocks) {
1721 Instruction *TI = Block->getTerminator();
1722 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1723 if (Blocks.count(key: TI->getSuccessor(Idx: i)))
1724 continue;
1725 BasicBlock *OldTarget = TI->getSuccessor(Idx: i);
1726 // add a new basic block which returns the appropriate value
1727 BasicBlock *NewTarget = ExitBlockMap[OldTarget];
1728 assert(NewTarget && "Unknown target block!");
1729
1730 // rewrite the original branch instruction with this new target
1731 TI->setSuccessor(Idx: i, BB: NewTarget);
1732 }
1733 }
1734
1735 // Loop over all of the PHI nodes in the header and exit blocks, and change
1736 // any references to the old incoming edge to be the new incoming edge.
1737 for (BasicBlock::iterator I = header->begin(); isa<PHINode>(Val: I); ++I) {
1738 PHINode *PN = cast<PHINode>(Val&: I);
1739 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1740 if (!Blocks.count(key: PN->getIncomingBlock(i)))
1741 PN->setIncomingBlock(i, BB: newFuncRoot);
1742 }
1743
1744 // Connect newFunction entry block to new header.
1745 BranchInst *BranchI = BranchInst::Create(IfTrue: header, InsertBefore: newFuncRoot);
1746 applyFirstDebugLoc(oldFunction, Blocks: Blocks.getArrayRef(), BranchI);
1747
1748 // Store the arguments right after the definition of output value.
1749 // This should be proceeded after creating exit stubs to be ensure that invoke
1750 // result restore will be placed in the outlined function.
1751 ScalarAI = newFunction->arg_begin();
1752 unsigned AggIdx = 0;
1753
1754 for (Value *Input : inputs) {
1755 if (StructValues.contains(key: Input))
1756 ++AggIdx;
1757 else
1758 ++ScalarAI;
1759 }
1760
1761 for (Value *Output : outputs) {
1762 // Find proper insertion point.
1763 // In case Output is an invoke, we insert the store at the beginning in the
1764 // 'normal destination' BB. Otherwise we insert the store right after
1765 // Output.
1766 BasicBlock::iterator InsertPt;
1767 if (auto *InvokeI = dyn_cast<InvokeInst>(Val: Output))
1768 InsertPt = InvokeI->getNormalDest()->getFirstInsertionPt();
1769 else if (auto *Phi = dyn_cast<PHINode>(Val: Output))
1770 InsertPt = Phi->getParent()->getFirstInsertionPt();
1771 else if (auto *OutI = dyn_cast<Instruction>(Val: Output))
1772 InsertPt = std::next(x: OutI->getIterator());
1773 else {
1774 // Globals don't need to be updated, just advance to the next argument.
1775 if (StructValues.contains(key: Output))
1776 ++AggIdx;
1777 else
1778 ++ScalarAI;
1779 continue;
1780 }
1781
1782 assert((InsertPt->getFunction() == newFunction ||
1783 Blocks.count(InsertPt->getParent())) &&
1784 "InsertPt should be in new function");
1785
1786 if (StructValues.contains(key: Output)) {
1787 assert(AggArg && "Number of aggregate output arguments should match "
1788 "the number of defined values");
1789 Value *Idx[2];
1790 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1791 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: AggIdx);
1792 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1793 PointeeType: StructArgTy, Ptr: AggArg, IdxList: Idx, NameStr: "gep_" + Output->getName(), InsertBefore: InsertPt);
1794 new StoreInst(Output, GEP, InsertPt);
1795 ++AggIdx;
1796 } else {
1797 assert(ScalarAI != newFunction->arg_end() &&
1798 "Number of scalar output arguments should match "
1799 "the number of defined values");
1800 new StoreInst(Output, &*ScalarAI, InsertPt);
1801 ++ScalarAI;
1802 }
1803 }
1804
1805 if (ExtractedFuncRetVals.empty()) {
1806 // Mark the new function `noreturn` if applicable. Terminators which resume
1807 // exception propagation are treated as returning instructions. This is to
1808 // avoid inserting traps after calls to outlined functions which unwind.
1809 if (none_of(Range&: Blocks, P: [](const BasicBlock *BB) {
1810 const Instruction *Term = BB->getTerminator();
1811 return isa<ReturnInst>(Val: Term) || isa<ResumeInst>(Val: Term);
1812 }))
1813 newFunction->setDoesNotReturn();
1814 }
1815}
1816
1817CallInst *CodeExtractor::emitReplacerCall(
1818 const ValueSet &inputs, const ValueSet &outputs,
1819 const ValueSet &StructValues, Function *newFunction,
1820 StructType *StructArgTy, Function *oldFunction, BasicBlock *ReplIP,
1821 BlockFrequency EntryFreq, ArrayRef<Value *> LifetimesStart,
1822 std::vector<Value *> &Reloads) {
1823 LLVMContext &Context = oldFunction->getContext();
1824 Module *M = oldFunction->getParent();
1825 const DataLayout &DL = M->getDataLayout();
1826
1827 // This takes place of the original loop
1828 BasicBlock *codeReplacer =
1829 BasicBlock::Create(Context, Name: "codeRepl", Parent: oldFunction, InsertBefore: ReplIP);
1830 if (AllocationBlock)
1831 assert(AllocationBlock->getParent() == oldFunction &&
1832 "AllocationBlock is not in the same function");
1833 BasicBlock *AllocaBlock =
1834 AllocationBlock ? AllocationBlock : &oldFunction->getEntryBlock();
1835
1836 // Update the entry count of the function.
1837 if (BFI)
1838 BFI->setBlockFreq(BB: codeReplacer, Freq: EntryFreq);
1839
1840 std::vector<Value *> params;
1841
1842 // Add inputs as params, or to be filled into the struct
1843 for (Value *input : inputs) {
1844 if (StructValues.contains(key: input))
1845 continue;
1846
1847 params.push_back(x: input);
1848 }
1849
1850 // Create allocas for the outputs
1851 std::vector<Value *> ReloadOutputs;
1852 for (Value *output : outputs) {
1853 if (StructValues.contains(key: output))
1854 continue;
1855
1856 AllocaInst *alloca = new AllocaInst(
1857 output->getType(), DL.getAllocaAddrSpace(), nullptr,
1858 output->getName() + ".loc", AllocaBlock->getFirstInsertionPt());
1859 params.push_back(x: alloca);
1860 ReloadOutputs.push_back(x: alloca);
1861 }
1862
1863 AllocaInst *Struct = nullptr;
1864 if (!StructValues.empty()) {
1865 Struct = new AllocaInst(StructArgTy, DL.getAllocaAddrSpace(), nullptr,
1866 "structArg", AllocaBlock->getFirstInsertionPt());
1867 if (ArgsInZeroAddressSpace && DL.getAllocaAddrSpace() != 0) {
1868 auto *StructSpaceCast = new AddrSpaceCastInst(
1869 Struct, PointerType ::get(C&: Context, AddressSpace: 0), "structArg.ascast");
1870 StructSpaceCast->insertAfter(InsertPos: Struct->getIterator());
1871 params.push_back(x: StructSpaceCast);
1872 } else {
1873 params.push_back(x: Struct);
1874 }
1875
1876 unsigned AggIdx = 0;
1877 for (Value *input : inputs) {
1878 if (!StructValues.contains(key: input))
1879 continue;
1880
1881 Value *Idx[2];
1882 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1883 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: AggIdx);
1884 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1885 PointeeType: StructArgTy, Ptr: Struct, IdxList: Idx, NameStr: "gep_" + input->getName());
1886 GEP->insertInto(ParentBB: codeReplacer, It: codeReplacer->end());
1887 new StoreInst(input, GEP, codeReplacer);
1888
1889 ++AggIdx;
1890 }
1891 }
1892
1893 // Emit the call to the function
1894 CallInst *call = CallInst::Create(
1895 Func: newFunction, Args: params, NameStr: ExtractedFuncRetVals.size() > 1 ? "targetBlock" : "",
1896 InsertBefore: codeReplacer);
1897
1898 // Set swifterror parameter attributes.
1899 unsigned ParamIdx = 0;
1900 unsigned AggIdx = 0;
1901 for (auto input : inputs) {
1902 if (StructValues.contains(key: input)) {
1903 ++AggIdx;
1904 } else {
1905 if (input->isSwiftError())
1906 call->addParamAttr(ArgNo: ParamIdx, Kind: Attribute::SwiftError);
1907 ++ParamIdx;
1908 }
1909 }
1910
1911 // Add debug location to the new call, if the original function has debug
1912 // info. In that case, the terminator of the entry block of the extracted
1913 // function contains the first debug location of the extracted function,
1914 // set in extractCodeRegion.
1915 if (codeReplacer->getParent()->getSubprogram()) {
1916 if (auto DL = newFunction->getEntryBlock().getTerminator()->getDebugLoc())
1917 call->setDebugLoc(DL);
1918 }
1919
1920 // Reload the outputs passed in by reference, use the struct if output is in
1921 // the aggregate or reload from the scalar argument.
1922 for (unsigned i = 0, e = outputs.size(), scalarIdx = 0; i != e; ++i) {
1923 Value *Output = nullptr;
1924 if (StructValues.contains(key: outputs[i])) {
1925 Value *Idx[2];
1926 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1927 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: AggIdx);
1928 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1929 PointeeType: StructArgTy, Ptr: Struct, IdxList: Idx, NameStr: "gep_reload_" + outputs[i]->getName());
1930 GEP->insertInto(ParentBB: codeReplacer, It: codeReplacer->end());
1931 Output = GEP;
1932 ++AggIdx;
1933 } else {
1934 Output = ReloadOutputs[scalarIdx];
1935 ++scalarIdx;
1936 }
1937 LoadInst *load =
1938 new LoadInst(outputs[i]->getType(), Output,
1939 outputs[i]->getName() + ".reload", codeReplacer);
1940 Reloads.push_back(x: load);
1941 }
1942
1943 // Now we can emit a switch statement using the call as a value.
1944 SwitchInst *TheSwitch =
1945 SwitchInst::Create(Value: Constant::getNullValue(Ty: Type::getInt16Ty(C&: Context)),
1946 Default: codeReplacer, NumCases: 0, InsertBefore: codeReplacer);
1947 for (auto P : enumerate(First&: ExtractedFuncRetVals)) {
1948 BasicBlock *OldTarget = P.value();
1949 size_t SuccNum = P.index();
1950
1951 TheSwitch->addCase(OnVal: ConstantInt::get(Ty: Type::getInt16Ty(C&: Context), V: SuccNum),
1952 Dest: OldTarget);
1953 }
1954
1955 // Now that we've done the deed, simplify the switch instruction.
1956 Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType();
1957 switch (ExtractedFuncRetVals.size()) {
1958 case 0:
1959 // There are no successors (the block containing the switch itself), which
1960 // means that previously this was the last part of the function, and hence
1961 // this should be rewritten as a `ret` or `unreachable`.
1962 if (newFunction->doesNotReturn()) {
1963 // If fn is no return, end with an unreachable terminator.
1964 (void)new UnreachableInst(Context, TheSwitch->getIterator());
1965 } else if (OldFnRetTy->isVoidTy()) {
1966 // We have no return value.
1967 ReturnInst::Create(C&: Context, retVal: nullptr,
1968 InsertBefore: TheSwitch->getIterator()); // Return void
1969 } else if (OldFnRetTy == TheSwitch->getCondition()->getType()) {
1970 // return what we have
1971 ReturnInst::Create(C&: Context, retVal: TheSwitch->getCondition(),
1972 InsertBefore: TheSwitch->getIterator());
1973 } else {
1974 // Otherwise we must have code extracted an unwind or something, just
1975 // return whatever we want.
1976 ReturnInst::Create(C&: Context, retVal: Constant::getNullValue(Ty: OldFnRetTy),
1977 InsertBefore: TheSwitch->getIterator());
1978 }
1979
1980 TheSwitch->eraseFromParent();
1981 break;
1982 case 1:
1983 // Only a single destination, change the switch into an unconditional
1984 // branch.
1985 BranchInst::Create(IfTrue: TheSwitch->getSuccessor(idx: 1), InsertBefore: TheSwitch->getIterator());
1986 TheSwitch->eraseFromParent();
1987 break;
1988 case 2:
1989 // Only two destinations, convert to a condition branch.
1990 // Remark: This also swaps the target branches:
1991 // 0 -> false -> getSuccessor(2); 1 -> true -> getSuccessor(1)
1992 BranchInst::Create(IfTrue: TheSwitch->getSuccessor(idx: 1), IfFalse: TheSwitch->getSuccessor(idx: 2),
1993 Cond: call, InsertBefore: TheSwitch->getIterator());
1994 TheSwitch->eraseFromParent();
1995 break;
1996 default:
1997 // Otherwise, make the default destination of the switch instruction be one
1998 // of the other successors.
1999 TheSwitch->setCondition(call);
2000 TheSwitch->setDefaultDest(
2001 TheSwitch->getSuccessor(idx: ExtractedFuncRetVals.size()));
2002 // Remove redundant case
2003 TheSwitch->removeCase(
2004 I: SwitchInst::CaseIt(TheSwitch, ExtractedFuncRetVals.size() - 1));
2005 break;
2006 }
2007
2008 // Insert lifetime markers around the reloads of any output values. The
2009 // allocas output values are stored in are only in-use in the codeRepl block.
2010 insertLifetimeMarkersSurroundingCall(M, LifetimesStart: ReloadOutputs, LifetimesEnd: ReloadOutputs, TheCall: call);
2011
2012 // Replicate the effects of any lifetime start/end markers which referenced
2013 // input objects in the extraction region by placing markers around the call.
2014 insertLifetimeMarkersSurroundingCall(M: oldFunction->getParent(), LifetimesStart,
2015 LifetimesEnd: {}, TheCall: call);
2016
2017 return call;
2018}
2019
2020void CodeExtractor::insertReplacerCall(
2021 Function *oldFunction, BasicBlock *header, BasicBlock *codeReplacer,
2022 const ValueSet &outputs, ArrayRef<Value *> Reloads,
2023 const DenseMap<BasicBlock *, BlockFrequency> &ExitWeights) {
2024
2025 // Rewrite branches to basic blocks outside of the loop to new dummy blocks
2026 // within the new function. This must be done before we lose track of which
2027 // blocks were originally in the code region.
2028 std::vector<User *> Users(header->user_begin(), header->user_end());
2029 for (auto &U : Users)
2030 // The BasicBlock which contains the branch is not in the region
2031 // modify the branch target to a new block
2032 if (Instruction *I = dyn_cast<Instruction>(Val: U))
2033 if (I->isTerminator() && I->getFunction() == oldFunction &&
2034 !Blocks.count(key: I->getParent()))
2035 I->replaceUsesOfWith(From: header, To: codeReplacer);
2036
2037 // When moving the code region it is sufficient to replace all uses to the
2038 // extracted function values. Since the original definition's block
2039 // dominated its use, it will also be dominated by codeReplacer's switch
2040 // which joined multiple exit blocks.
2041 for (BasicBlock *ExitBB : ExtractedFuncRetVals)
2042 for (PHINode &PN : ExitBB->phis()) {
2043 Value *IncomingCodeReplacerVal = nullptr;
2044 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
2045 // Ignore incoming values from outside of the extracted region.
2046 if (!Blocks.count(key: PN.getIncomingBlock(i)))
2047 continue;
2048
2049 // Ensure that there is only one incoming value from codeReplacer.
2050 if (!IncomingCodeReplacerVal) {
2051 PN.setIncomingBlock(i, BB: codeReplacer);
2052 IncomingCodeReplacerVal = PN.getIncomingValue(i);
2053 } else
2054 assert(IncomingCodeReplacerVal == PN.getIncomingValue(i) &&
2055 "PHI has two incompatbile incoming values from codeRepl");
2056 }
2057 }
2058
2059 for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
2060 Value *load = Reloads[i];
2061 std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end());
2062 for (User *U : Users) {
2063 Instruction *inst = cast<Instruction>(Val: U);
2064 if (inst->getParent()->getParent() == oldFunction)
2065 inst->replaceUsesOfWith(From: outputs[i], To: load);
2066 }
2067 }
2068
2069 // Update the branch weights for the exit block.
2070 if (BFI && ExtractedFuncRetVals.size() > 1)
2071 calculateNewCallTerminatorWeights(CodeReplacer: codeReplacer, ExitWeights, BPI);
2072}
2073
2074bool CodeExtractor::verifyAssumptionCache(const Function &OldFunc,
2075 const Function &NewFunc,
2076 AssumptionCache *AC) {
2077 for (auto AssumeVH : AC->assumptions()) {
2078 auto *I = dyn_cast_or_null<CallInst>(Val&: AssumeVH);
2079 if (!I)
2080 continue;
2081
2082 // There shouldn't be any llvm.assume intrinsics in the new function.
2083 if (I->getFunction() != &OldFunc)
2084 return true;
2085
2086 // There shouldn't be any stale affected values in the assumption cache
2087 // that were previously in the old function, but that have now been moved
2088 // to the new function.
2089 for (auto AffectedValVH : AC->assumptionsFor(V: I->getOperand(i_nocapture: 0))) {
2090 auto *AffectedCI = dyn_cast_or_null<CallInst>(Val&: AffectedValVH);
2091 if (!AffectedCI)
2092 continue;
2093 if (AffectedCI->getFunction() != &OldFunc)
2094 return true;
2095 auto *AssumedInst = cast<Instruction>(Val: AffectedCI->getOperand(i_nocapture: 0));
2096 if (AssumedInst->getFunction() != &OldFunc)
2097 return true;
2098 }
2099 }
2100 return false;
2101}
2102
2103void CodeExtractor::excludeArgFromAggregate(Value *Arg) {
2104 ExcludeArgsFromAggregate.insert(X: Arg);
2105}
2106