1//===- CodeExtractor.cpp - Pull code region into a new function -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interface to tear out a code region, such as an
10// individual loop or a parallel section, into a new function, replacing it with
11// a call to the new function.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/CodeExtractor.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SetVector.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/BlockFrequencyInfo.h"
24#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
25#include "llvm/Analysis/BranchProbabilityInfo.h"
26#include "llvm/IR/Argument.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CFG.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DIBuilder.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/DebugInfo.h"
35#include "llvm/IR/DebugInfoMetadata.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalValue.h"
40#include "llvm/IR/InstIterator.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/LLVMContext.h"
47#include "llvm/IR/MDBuilder.h"
48#include "llvm/IR/Module.h"
49#include "llvm/IR/PatternMatch.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/User.h"
52#include "llvm/IR/Value.h"
53#include "llvm/IR/Verifier.h"
54#include "llvm/Support/BlockFrequency.h"
55#include "llvm/Support/BranchProbability.h"
56#include "llvm/Support/Casting.h"
57#include "llvm/Support/CommandLine.h"
58#include "llvm/Support/Debug.h"
59#include "llvm/Support/ErrorHandling.h"
60#include "llvm/Support/raw_ostream.h"
61#include "llvm/Transforms/Utils/BasicBlockUtils.h"
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <map>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70using ProfileCount = Function::ProfileCount;
71
72#define DEBUG_TYPE "code-extractor"
73
74// Provide a command-line option to aggregate function arguments into a struct
75// for functions produced by the code extractor. This is useful when converting
76// extracted functions to pthread-based code, as only one argument (void*) can
77// be passed in to pthread_create().
78static cl::opt<bool>
79AggregateArgsOpt("aggregate-extracted-args", cl::Hidden,
80 cl::desc("Aggregate arguments to code-extracted functions"));
81
82/// Test whether a block is valid for extraction.
83static bool isBlockValidForExtraction(const BasicBlock &BB,
84 const SetVector<BasicBlock *> &Result,
85 bool AllowVarArgs, bool AllowAlloca) {
86 // taking the address of a basic block moved to another function is illegal
87 if (BB.hasAddressTaken())
88 return false;
89
90 // don't hoist code that uses another basicblock address, as it's likely to
91 // lead to unexpected behavior, like cross-function jumps
92 SmallPtrSet<User const *, 16> Visited;
93 SmallVector<User const *, 16> ToVisit(llvm::make_pointer_range(Range: BB));
94
95 while (!ToVisit.empty()) {
96 User const *Curr = ToVisit.pop_back_val();
97 if (!Visited.insert(Ptr: Curr).second)
98 continue;
99 if (isa<BlockAddress const>(Val: Curr))
100 return false; // even a reference to self is likely to be not compatible
101
102 if (isa<Instruction>(Val: Curr) && cast<Instruction>(Val: Curr)->getParent() != &BB)
103 continue;
104
105 for (auto const &U : Curr->operands()) {
106 if (auto *UU = dyn_cast<User>(Val: U))
107 ToVisit.push_back(Elt: UU);
108 }
109 }
110
111 // If explicitly requested, allow vastart and alloca. For invoke instructions
112 // verify that extraction is valid.
113 for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
114 if (isa<AllocaInst>(Val: I)) {
115 if (!AllowAlloca)
116 return false;
117 continue;
118 }
119
120 if (const auto *II = dyn_cast<InvokeInst>(Val&: I)) {
121 // Unwind destination (either a landingpad, catchswitch, or cleanuppad)
122 // must be a part of the subgraph which is being extracted.
123 if (auto *UBB = II->getUnwindDest())
124 if (!Result.count(key: UBB))
125 return false;
126 continue;
127 }
128
129 // All catch handlers of a catchswitch instruction as well as the unwind
130 // destination must be in the subgraph.
131 if (const auto *CSI = dyn_cast<CatchSwitchInst>(Val&: I)) {
132 if (auto *UBB = CSI->getUnwindDest())
133 if (!Result.count(key: UBB))
134 return false;
135 for (const auto *HBB : CSI->handlers())
136 if (!Result.count(key: const_cast<BasicBlock*>(HBB)))
137 return false;
138 continue;
139 }
140
141 // Make sure that entire catch handler is within subgraph. It is sufficient
142 // to check that catch return's block is in the list.
143 if (const auto *CPI = dyn_cast<CatchPadInst>(Val&: I)) {
144 for (const auto *U : CPI->users())
145 if (const auto *CRI = dyn_cast<CatchReturnInst>(Val: U))
146 if (!Result.count(key: const_cast<BasicBlock*>(CRI->getParent())))
147 return false;
148 continue;
149 }
150
151 // And do similar checks for cleanup handler - the entire handler must be
152 // in subgraph which is going to be extracted. For cleanup return should
153 // additionally check that the unwind destination is also in the subgraph.
154 if (const auto *CPI = dyn_cast<CleanupPadInst>(Val&: I)) {
155 for (const auto *U : CPI->users())
156 if (const auto *CRI = dyn_cast<CleanupReturnInst>(Val: U))
157 if (!Result.count(key: const_cast<BasicBlock*>(CRI->getParent())))
158 return false;
159 continue;
160 }
161 if (const auto *CRI = dyn_cast<CleanupReturnInst>(Val&: I)) {
162 if (auto *UBB = CRI->getUnwindDest())
163 if (!Result.count(key: UBB))
164 return false;
165 continue;
166 }
167
168 if (const CallInst *CI = dyn_cast<CallInst>(Val&: I)) {
169 // musttail calls have several restrictions, generally enforcing matching
170 // calling conventions between the caller parent and musttail callee.
171 // We can't usually honor them, because the extracted function has a
172 // different signature altogether, taking inputs/outputs and returning
173 // a control-flow identifier rather than the actual return value.
174 if (CI->isMustTailCall())
175 return false;
176
177 if (const Function *F = CI->getCalledFunction()) {
178 auto IID = F->getIntrinsicID();
179 if (IID == Intrinsic::vastart) {
180 if (AllowVarArgs)
181 continue;
182 else
183 return false;
184 }
185
186 // Currently, we miscompile outlined copies of eh_typid_for. There are
187 // proposals for fixing this in llvm.org/PR39545.
188 if (IID == Intrinsic::eh_typeid_for)
189 return false;
190 }
191 }
192 }
193
194 return true;
195}
196
197/// Build a set of blocks to extract if the input blocks are viable.
198static SetVector<BasicBlock *>
199buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
200 bool AllowVarArgs, bool AllowAlloca) {
201 assert(!BBs.empty() && "The set of blocks to extract must be non-empty");
202 SetVector<BasicBlock *> Result;
203
204 // Loop over the blocks, adding them to our set-vector, and aborting with an
205 // empty set if we encounter invalid blocks.
206 for (BasicBlock *BB : BBs) {
207 // If this block is dead, don't process it.
208 if (DT && !DT->isReachableFromEntry(A: BB))
209 continue;
210
211 if (!Result.insert(X: BB))
212 llvm_unreachable("Repeated basic blocks in extraction input");
213 }
214
215 LLVM_DEBUG(dbgs() << "Region front block: " << Result.front()->getName()
216 << '\n');
217
218 for (auto *BB : Result) {
219 if (!isBlockValidForExtraction(BB: *BB, Result, AllowVarArgs, AllowAlloca))
220 return {};
221
222 // Make sure that the first block is not a landing pad.
223 if (BB == Result.front()) {
224 if (BB->isEHPad()) {
225 LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n");
226 return {};
227 }
228 continue;
229 }
230
231 // All blocks other than the first must not have predecessors outside of
232 // the subgraph which is being extracted.
233 for (auto *PBB : predecessors(BB))
234 if (!Result.count(key: PBB)) {
235 LLVM_DEBUG(dbgs() << "No blocks in this region may have entries from "
236 "outside the region except for the first block!\n"
237 << "Problematic source BB: " << BB->getName() << "\n"
238 << "Problematic destination BB: " << PBB->getName()
239 << "\n");
240 return {};
241 }
242 }
243
244 return Result;
245}
246
247/// isAlignmentPreservedForAddrCast - Return true if the cast operation
248/// for specified target preserves original alignment
249static bool isAlignmentPreservedForAddrCast(const Triple &TargetTriple) {
250 switch (TargetTriple.getArch()) {
251 case Triple::ArchType::amdgcn:
252 case Triple::ArchType::r600:
253 return true;
254 // TODO: Add other architectures for which we are certain that alignment
255 // is preserved during address space cast operations.
256 default:
257 return false;
258 }
259 return false;
260}
261
262CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
263 bool AggregateArgs, BlockFrequencyInfo *BFI,
264 BranchProbabilityInfo *BPI, AssumptionCache *AC,
265 bool AllowVarArgs, bool AllowAlloca,
266 BasicBlock *AllocationBlock, std::string Suffix,
267 bool ArgsInZeroAddressSpace)
268 : DT(DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
269 BPI(BPI), AC(AC), AllocationBlock(AllocationBlock),
270 AllowVarArgs(AllowVarArgs),
271 Blocks(buildExtractionBlockSet(BBs, DT, AllowVarArgs, AllowAlloca)),
272 Suffix(Suffix), ArgsInZeroAddressSpace(ArgsInZeroAddressSpace) {}
273
274/// definedInRegion - Return true if the specified value is defined in the
275/// extracted region.
276static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) {
277 if (Instruction *I = dyn_cast<Instruction>(Val: V))
278 if (Blocks.count(key: I->getParent()))
279 return true;
280 return false;
281}
282
283/// definedInCaller - Return true if the specified value is defined in the
284/// function being code extracted, but not in the region being extracted.
285/// These values must be passed in as live-ins to the function.
286static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) {
287 if (isa<Argument>(Val: V)) return true;
288 if (Instruction *I = dyn_cast<Instruction>(Val: V))
289 if (!Blocks.count(key: I->getParent()))
290 return true;
291 return false;
292}
293
294static BasicBlock *getCommonExitBlock(const SetVector<BasicBlock *> &Blocks) {
295 BasicBlock *CommonExitBlock = nullptr;
296 auto hasNonCommonExitSucc = [&](BasicBlock *Block) {
297 for (auto *Succ : successors(BB: Block)) {
298 // Internal edges, ok.
299 if (Blocks.count(key: Succ))
300 continue;
301 if (!CommonExitBlock) {
302 CommonExitBlock = Succ;
303 continue;
304 }
305 if (CommonExitBlock != Succ)
306 return true;
307 }
308 return false;
309 };
310
311 if (any_of(Range: Blocks, P: hasNonCommonExitSucc))
312 return nullptr;
313
314 return CommonExitBlock;
315}
316
317CodeExtractorAnalysisCache::CodeExtractorAnalysisCache(Function &F) {
318 for (BasicBlock &BB : F) {
319 for (Instruction &II : BB)
320 if (auto *AI = dyn_cast<AllocaInst>(Val: &II))
321 Allocas.push_back(Elt: AI);
322
323 findSideEffectInfoForBlock(BB);
324 }
325}
326
327void CodeExtractorAnalysisCache::findSideEffectInfoForBlock(BasicBlock &BB) {
328 for (Instruction &II : BB) {
329 unsigned Opcode = II.getOpcode();
330 Value *MemAddr = nullptr;
331 switch (Opcode) {
332 case Instruction::Store:
333 case Instruction::Load: {
334 if (Opcode == Instruction::Store) {
335 StoreInst *SI = cast<StoreInst>(Val: &II);
336 MemAddr = SI->getPointerOperand();
337 } else {
338 LoadInst *LI = cast<LoadInst>(Val: &II);
339 MemAddr = LI->getPointerOperand();
340 }
341 // Global variable can not be aliased with locals.
342 if (isa<Constant>(Val: MemAddr))
343 break;
344 Value *Base = MemAddr->stripInBoundsConstantOffsets();
345 if (!isa<AllocaInst>(Val: Base)) {
346 SideEffectingBlocks.insert(V: &BB);
347 return;
348 }
349 BaseMemAddrs[&BB].insert(V: Base);
350 break;
351 }
352 default: {
353 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(Val: &II);
354 if (IntrInst) {
355 if (IntrInst->isLifetimeStartOrEnd() || isa<PseudoProbeInst>(Val: IntrInst))
356 break;
357 SideEffectingBlocks.insert(V: &BB);
358 return;
359 }
360 // Treat all the other cases conservatively if it has side effects.
361 if (II.mayHaveSideEffects()) {
362 SideEffectingBlocks.insert(V: &BB);
363 return;
364 }
365 }
366 }
367 }
368}
369
370bool CodeExtractorAnalysisCache::doesBlockContainClobberOfAddr(
371 BasicBlock &BB, AllocaInst *Addr) const {
372 if (SideEffectingBlocks.count(V: &BB))
373 return true;
374 auto It = BaseMemAddrs.find(Val: &BB);
375 if (It != BaseMemAddrs.end())
376 return It->second.count(V: Addr);
377 return false;
378}
379
380bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
381 const CodeExtractorAnalysisCache &CEAC, Instruction *Addr) const {
382 AllocaInst *AI = cast<AllocaInst>(Val: Addr->stripInBoundsConstantOffsets());
383 Function *Func = (*Blocks.begin())->getParent();
384 for (BasicBlock &BB : *Func) {
385 if (Blocks.count(key: &BB))
386 continue;
387 if (CEAC.doesBlockContainClobberOfAddr(BB, Addr: AI))
388 return false;
389 }
390 return true;
391}
392
393BasicBlock *
394CodeExtractor::findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock) {
395 BasicBlock *SinglePredFromOutlineRegion = nullptr;
396 assert(!Blocks.count(CommonExitBlock) &&
397 "Expect a block outside the region!");
398 for (auto *Pred : predecessors(BB: CommonExitBlock)) {
399 if (!Blocks.count(key: Pred))
400 continue;
401 if (!SinglePredFromOutlineRegion) {
402 SinglePredFromOutlineRegion = Pred;
403 } else if (SinglePredFromOutlineRegion != Pred) {
404 SinglePredFromOutlineRegion = nullptr;
405 break;
406 }
407 }
408
409 if (SinglePredFromOutlineRegion)
410 return SinglePredFromOutlineRegion;
411
412#ifndef NDEBUG
413 auto getFirstPHI = [](BasicBlock *BB) {
414 BasicBlock::iterator I = BB->begin();
415 PHINode *FirstPhi = nullptr;
416 while (I != BB->end()) {
417 PHINode *Phi = dyn_cast<PHINode>(I);
418 if (!Phi)
419 break;
420 if (!FirstPhi) {
421 FirstPhi = Phi;
422 break;
423 }
424 }
425 return FirstPhi;
426 };
427 // If there are any phi nodes, the single pred either exists or has already
428 // be created before code extraction.
429 assert(!getFirstPHI(CommonExitBlock) && "Phi not expected");
430#endif
431
432 BasicBlock *NewExitBlock =
433 CommonExitBlock->splitBasicBlock(I: CommonExitBlock->getFirstNonPHIIt());
434
435 for (BasicBlock *Pred :
436 llvm::make_early_inc_range(Range: predecessors(BB: CommonExitBlock))) {
437 if (Blocks.count(key: Pred))
438 continue;
439 Pred->getTerminator()->replaceUsesOfWith(From: CommonExitBlock, To: NewExitBlock);
440 }
441 // Now add the old exit block to the outline region.
442 Blocks.insert(X: CommonExitBlock);
443 return CommonExitBlock;
444}
445
446// Find the pair of life time markers for address 'Addr' that are either
447// defined inside the outline region or can legally be shrinkwrapped into the
448// outline region. If there are not other untracked uses of the address, return
449// the pair of markers if found; otherwise return a pair of nullptr.
450CodeExtractor::LifetimeMarkerInfo
451CodeExtractor::getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
452 Instruction *Addr,
453 BasicBlock *ExitBlock) const {
454 LifetimeMarkerInfo Info;
455
456 for (User *U : Addr->users()) {
457 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(Val: U);
458 if (IntrInst) {
459 // We don't model addresses with multiple start/end markers, but the
460 // markers do not need to be in the region.
461 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start) {
462 if (Info.LifeStart)
463 return {};
464 Info.LifeStart = IntrInst;
465 continue;
466 }
467 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) {
468 if (Info.LifeEnd)
469 return {};
470 Info.LifeEnd = IntrInst;
471 continue;
472 }
473 }
474 // Find untracked uses of the address, bail.
475 if (!definedInRegion(Blocks, V: U))
476 return {};
477 }
478
479 if (!Info.LifeStart || !Info.LifeEnd)
480 return {};
481
482 Info.SinkLifeStart = !definedInRegion(Blocks, V: Info.LifeStart);
483 Info.HoistLifeEnd = !definedInRegion(Blocks, V: Info.LifeEnd);
484 // Do legality check.
485 if ((Info.SinkLifeStart || Info.HoistLifeEnd) &&
486 !isLegalToShrinkwrapLifetimeMarkers(CEAC, Addr))
487 return {};
488
489 // Check to see if we have a place to do hoisting, if not, bail.
490 if (Info.HoistLifeEnd && !ExitBlock)
491 return {};
492
493 return Info;
494}
495
496void CodeExtractor::findAllocas(const CodeExtractorAnalysisCache &CEAC,
497 ValueSet &SinkCands, ValueSet &HoistCands,
498 BasicBlock *&ExitBlock) const {
499 Function *Func = (*Blocks.begin())->getParent();
500 ExitBlock = getCommonExitBlock(Blocks);
501
502 auto moveOrIgnoreLifetimeMarkers =
503 [&](const LifetimeMarkerInfo &LMI) -> bool {
504 if (!LMI.LifeStart)
505 return false;
506 if (LMI.SinkLifeStart) {
507 LLVM_DEBUG(dbgs() << "Sinking lifetime.start: " << *LMI.LifeStart
508 << "\n");
509 SinkCands.insert(X: LMI.LifeStart);
510 }
511 if (LMI.HoistLifeEnd) {
512 LLVM_DEBUG(dbgs() << "Hoisting lifetime.end: " << *LMI.LifeEnd << "\n");
513 HoistCands.insert(X: LMI.LifeEnd);
514 }
515 return true;
516 };
517
518 // Look up allocas in the original function in CodeExtractorAnalysisCache, as
519 // this is much faster than walking all the instructions.
520 for (AllocaInst *AI : CEAC.getAllocas()) {
521 BasicBlock *BB = AI->getParent();
522 if (Blocks.count(key: BB))
523 continue;
524
525 // As a prior call to extractCodeRegion() may have shrinkwrapped the alloca,
526 // check whether it is actually still in the original function.
527 Function *AIFunc = BB->getParent();
528 if (AIFunc != Func)
529 continue;
530
531 LifetimeMarkerInfo MarkerInfo = getLifetimeMarkers(CEAC, Addr: AI, ExitBlock);
532 bool Moved = moveOrIgnoreLifetimeMarkers(MarkerInfo);
533 if (Moved) {
534 LLVM_DEBUG(dbgs() << "Sinking alloca: " << *AI << "\n");
535 SinkCands.insert(X: AI);
536 continue;
537 }
538
539 // Find bitcasts in the outlined region that have lifetime marker users
540 // outside that region. Replace the lifetime marker use with an
541 // outside region bitcast to avoid unnecessary alloca/reload instructions
542 // and extra lifetime markers.
543 SmallVector<Instruction *, 2> LifetimeBitcastUsers;
544 for (User *U : AI->users()) {
545 if (!definedInRegion(Blocks, V: U))
546 continue;
547
548 if (U->stripInBoundsConstantOffsets() != AI)
549 continue;
550
551 Instruction *Bitcast = cast<Instruction>(Val: U);
552 for (User *BU : Bitcast->users()) {
553 auto *IntrInst = dyn_cast<LifetimeIntrinsic>(Val: BU);
554 if (!IntrInst)
555 continue;
556
557 if (definedInRegion(Blocks, V: IntrInst))
558 continue;
559
560 LLVM_DEBUG(dbgs() << "Replace use of extracted region bitcast"
561 << *Bitcast << " in out-of-region lifetime marker "
562 << *IntrInst << "\n");
563 LifetimeBitcastUsers.push_back(Elt: IntrInst);
564 }
565 }
566
567 for (Instruction *I : LifetimeBitcastUsers) {
568 Module *M = AIFunc->getParent();
569 LLVMContext &Ctx = M->getContext();
570 auto *Int8PtrTy = PointerType::getUnqual(C&: Ctx);
571 CastInst *CastI =
572 CastInst::CreatePointerCast(S: AI, Ty: Int8PtrTy, Name: "lt.cast", InsertBefore: I->getIterator());
573 I->replaceUsesOfWith(From: I->getOperand(i: 1), To: CastI);
574 }
575
576 // Follow any bitcasts.
577 SmallVector<Instruction *, 2> Bitcasts;
578 SmallVector<LifetimeMarkerInfo, 2> BitcastLifetimeInfo;
579 for (User *U : AI->users()) {
580 if (U->stripInBoundsConstantOffsets() == AI) {
581 Instruction *Bitcast = cast<Instruction>(Val: U);
582 LifetimeMarkerInfo LMI = getLifetimeMarkers(CEAC, Addr: Bitcast, ExitBlock);
583 if (LMI.LifeStart) {
584 Bitcasts.push_back(Elt: Bitcast);
585 BitcastLifetimeInfo.push_back(Elt: LMI);
586 continue;
587 }
588 }
589
590 // Found unknown use of AI.
591 if (!definedInRegion(Blocks, V: U)) {
592 Bitcasts.clear();
593 break;
594 }
595 }
596
597 // Either no bitcasts reference the alloca or there are unknown uses.
598 if (Bitcasts.empty())
599 continue;
600
601 LLVM_DEBUG(dbgs() << "Sinking alloca (via bitcast): " << *AI << "\n");
602 SinkCands.insert(X: AI);
603 for (unsigned I = 0, E = Bitcasts.size(); I != E; ++I) {
604 Instruction *BitcastAddr = Bitcasts[I];
605 const LifetimeMarkerInfo &LMI = BitcastLifetimeInfo[I];
606 assert(LMI.LifeStart &&
607 "Unsafe to sink bitcast without lifetime markers");
608 moveOrIgnoreLifetimeMarkers(LMI);
609 if (!definedInRegion(Blocks, V: BitcastAddr)) {
610 LLVM_DEBUG(dbgs() << "Sinking bitcast-of-alloca: " << *BitcastAddr
611 << "\n");
612 SinkCands.insert(X: BitcastAddr);
613 }
614 }
615 }
616}
617
618bool CodeExtractor::isEligible() const {
619 if (Blocks.empty())
620 return false;
621 BasicBlock *Header = *Blocks.begin();
622 Function *F = Header->getParent();
623
624 // For functions with varargs, check that varargs handling is only done in the
625 // outlined function, i.e vastart and vaend are only used in outlined blocks.
626 if (AllowVarArgs && F->getFunctionType()->isVarArg()) {
627 auto containsVarArgIntrinsic = [](const Instruction &I) {
628 if (const CallInst *CI = dyn_cast<CallInst>(Val: &I))
629 if (const Function *Callee = CI->getCalledFunction())
630 return Callee->getIntrinsicID() == Intrinsic::vastart ||
631 Callee->getIntrinsicID() == Intrinsic::vaend;
632 return false;
633 };
634
635 for (auto &BB : *F) {
636 if (Blocks.count(key: &BB))
637 continue;
638 if (llvm::any_of(Range&: BB, P: containsVarArgIntrinsic))
639 return false;
640 }
641 }
642 // stacksave as input implies stackrestore in the outlined function.
643 // This can confuse prolog epilog insertion phase.
644 // stacksave's uses must not cross outlined function.
645 for (BasicBlock *BB : Blocks) {
646 for (Instruction &I : *BB) {
647 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: &I);
648 if (!II)
649 continue;
650 bool IsSave = II->getIntrinsicID() == Intrinsic::stacksave;
651 bool IsRestore = II->getIntrinsicID() == Intrinsic::stackrestore;
652 if (IsSave && any_of(Range: II->users(), P: [&Blks = this->Blocks](User *U) {
653 return !definedInRegion(Blocks: Blks, V: U);
654 }))
655 return false;
656 if (IsRestore && !definedInRegion(Blocks, V: II->getArgOperand(i: 0)))
657 return false;
658 }
659 }
660 return true;
661}
662
663void CodeExtractor::findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
664 const ValueSet &SinkCands,
665 bool CollectGlobalInputs) const {
666 for (BasicBlock *BB : Blocks) {
667 // If a used value is defined outside the region, it's an input. If an
668 // instruction is used outside the region, it's an output.
669 for (Instruction &II : *BB) {
670 for (auto &OI : II.operands()) {
671 Value *V = OI;
672 if (!SinkCands.count(key: V) &&
673 (definedInCaller(Blocks, V) ||
674 (CollectGlobalInputs && llvm::isa<llvm::GlobalVariable>(Val: V))))
675 Inputs.insert(X: V);
676 }
677
678 for (User *U : II.users())
679 if (!definedInRegion(Blocks, V: U)) {
680 Outputs.insert(X: &II);
681 break;
682 }
683 }
684 }
685}
686
687/// severSplitPHINodesOfEntry - If a PHI node has multiple inputs from outside
688/// of the region, we need to split the entry block of the region so that the
689/// PHI node is easier to deal with.
690void CodeExtractor::severSplitPHINodesOfEntry(BasicBlock *&Header) {
691 unsigned NumPredsFromRegion = 0;
692 unsigned NumPredsOutsideRegion = 0;
693
694 if (Header != &Header->getParent()->getEntryBlock()) {
695 PHINode *PN = dyn_cast<PHINode>(Val: Header->begin());
696 if (!PN) return; // No PHI nodes.
697
698 // If the header node contains any PHI nodes, check to see if there is more
699 // than one entry from outside the region. If so, we need to sever the
700 // header block into two.
701 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
702 if (Blocks.count(key: PN->getIncomingBlock(i)))
703 ++NumPredsFromRegion;
704 else
705 ++NumPredsOutsideRegion;
706
707 // If there is one (or fewer) predecessor from outside the region, we don't
708 // need to do anything special.
709 if (NumPredsOutsideRegion <= 1) return;
710 }
711
712 // Otherwise, we need to split the header block into two pieces: one
713 // containing PHI nodes merging values from outside of the region, and a
714 // second that contains all of the code for the block and merges back any
715 // incoming values from inside of the region.
716 BasicBlock *NewBB = SplitBlock(Old: Header, SplitPt: Header->getFirstNonPHIIt(), DT);
717
718 // We only want to code extract the second block now, and it becomes the new
719 // header of the region.
720 BasicBlock *OldPred = Header;
721 Blocks.remove(X: OldPred);
722 Blocks.insert(X: NewBB);
723 Header = NewBB;
724
725 // Okay, now we need to adjust the PHI nodes and any branches from within the
726 // region to go to the new header block instead of the old header block.
727 if (NumPredsFromRegion) {
728 PHINode *PN = cast<PHINode>(Val: OldPred->begin());
729 // Loop over all of the predecessors of OldPred that are in the region,
730 // changing them to branch to NewBB instead.
731 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
732 if (Blocks.count(key: PN->getIncomingBlock(i))) {
733 Instruction *TI = PN->getIncomingBlock(i)->getTerminator();
734 TI->replaceUsesOfWith(From: OldPred, To: NewBB);
735 }
736
737 // Okay, everything within the region is now branching to the right block, we
738 // just have to update the PHI nodes now, inserting PHI nodes into NewBB.
739 BasicBlock::iterator AfterPHIs;
740 for (AfterPHIs = OldPred->begin(); isa<PHINode>(Val: AfterPHIs); ++AfterPHIs) {
741 PHINode *PN = cast<PHINode>(Val&: AfterPHIs);
742 // Create a new PHI node in the new region, which has an incoming value
743 // from OldPred of PN.
744 PHINode *NewPN = PHINode::Create(Ty: PN->getType(), NumReservedValues: 1 + NumPredsFromRegion,
745 NameStr: PN->getName() + ".ce");
746 NewPN->insertBefore(InsertPos: NewBB->begin());
747 PN->replaceAllUsesWith(V: NewPN);
748 NewPN->addIncoming(V: PN, BB: OldPred);
749
750 // Loop over all of the incoming value in PN, moving them to NewPN if they
751 // are from the extracted region.
752 PN->removeIncomingValueIf(Predicate: [&](unsigned i) {
753 if (Blocks.count(key: PN->getIncomingBlock(i))) {
754 NewPN->addIncoming(V: PN->getIncomingValue(i), BB: PN->getIncomingBlock(i));
755 return true;
756 }
757 return false;
758 });
759 }
760 }
761}
762
763/// severSplitPHINodesOfExits - if PHI nodes in exit blocks have inputs from
764/// outlined region, we split these PHIs on two: one with inputs from region
765/// and other with remaining incoming blocks; then first PHIs are placed in
766/// outlined region.
767void CodeExtractor::severSplitPHINodesOfExits() {
768 for (BasicBlock *ExitBB : ExtractedFuncRetVals) {
769 BasicBlock *NewBB = nullptr;
770
771 for (PHINode &PN : ExitBB->phis()) {
772 // Find all incoming values from the outlining region.
773 SmallVector<unsigned, 2> IncomingVals;
774 for (unsigned i = 0; i < PN.getNumIncomingValues(); ++i)
775 if (Blocks.count(key: PN.getIncomingBlock(i)))
776 IncomingVals.push_back(Elt: i);
777
778 // Do not process PHI if there is one (or fewer) predecessor from region.
779 // If PHI has exactly one predecessor from region, only this one incoming
780 // will be replaced on codeRepl block, so it should be safe to skip PHI.
781 if (IncomingVals.size() <= 1)
782 continue;
783
784 // Create block for new PHIs and add it to the list of outlined if it
785 // wasn't done before.
786 if (!NewBB) {
787 NewBB = BasicBlock::Create(Context&: ExitBB->getContext(),
788 Name: ExitBB->getName() + ".split",
789 Parent: ExitBB->getParent(), InsertBefore: ExitBB);
790 SmallVector<BasicBlock *, 4> Preds(predecessors(BB: ExitBB));
791 for (BasicBlock *PredBB : Preds)
792 if (Blocks.count(key: PredBB))
793 PredBB->getTerminator()->replaceUsesOfWith(From: ExitBB, To: NewBB);
794 UncondBrInst::Create(Target: ExitBB, InsertBefore: NewBB);
795 Blocks.insert(X: NewBB);
796 }
797
798 // Split this PHI.
799 PHINode *NewPN = PHINode::Create(Ty: PN.getType(), NumReservedValues: IncomingVals.size(),
800 NameStr: PN.getName() + ".ce");
801 NewPN->insertBefore(InsertPos: NewBB->getFirstNonPHIIt());
802 for (unsigned i : IncomingVals)
803 NewPN->addIncoming(V: PN.getIncomingValue(i), BB: PN.getIncomingBlock(i));
804 for (unsigned i : reverse(C&: IncomingVals))
805 PN.removeIncomingValue(Idx: i, DeletePHIIfEmpty: false);
806 PN.addIncoming(V: NewPN, BB: NewBB);
807 }
808 }
809}
810
811void CodeExtractor::splitReturnBlocks() {
812 for (BasicBlock *Block : Blocks)
813 if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: Block->getTerminator())) {
814 BasicBlock *New =
815 Block->splitBasicBlock(I: RI->getIterator(), BBName: Block->getName() + ".ret");
816 if (DT) {
817 // Old dominates New. New node dominates all other nodes dominated
818 // by Old.
819 DomTreeNode *OldNode = DT->getNode(BB: Block);
820 SmallVector<DomTreeNode *, 8> Children(OldNode->begin(),
821 OldNode->end());
822
823 DomTreeNode *NewNode = DT->addNewBlock(BB: New, DomBB: Block);
824
825 for (DomTreeNode *I : Children)
826 DT->changeImmediateDominator(N: I, NewIDom: NewNode);
827 }
828 }
829}
830
831Function *CodeExtractor::constructFunctionDeclaration(
832 const ValueSet &inputs, const ValueSet &outputs, BlockFrequency EntryFreq,
833 const Twine &Name, ValueSet &StructValues, StructType *&StructTy) {
834 LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n");
835 LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n");
836
837 Function *oldFunction = Blocks.front()->getParent();
838 Module *M = Blocks.front()->getModule();
839
840 // Assemble the function's parameter lists.
841 std::vector<Type *> ParamTy;
842 std::vector<Type *> AggParamTy;
843 const DataLayout &DL = M->getDataLayout();
844
845 // Add the types of the input values to the function's argument list
846 for (Value *value : inputs) {
847 LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n");
848 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: value)) {
849 AggParamTy.push_back(x: value->getType());
850 StructValues.insert(X: value);
851 } else
852 ParamTy.push_back(x: value->getType());
853 }
854
855 // Add the types of the output values to the function's argument list.
856 for (Value *output : outputs) {
857 LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n");
858 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: output)) {
859 AggParamTy.push_back(x: output->getType());
860 StructValues.insert(X: output);
861 } else
862 ParamTy.push_back(
863 x: PointerType::get(C&: output->getContext(), AddressSpace: DL.getAllocaAddrSpace()));
864 }
865
866 assert(
867 (ParamTy.size() + AggParamTy.size()) ==
868 (inputs.size() + outputs.size()) &&
869 "Number of scalar and aggregate params does not match inputs, outputs");
870 assert((StructValues.empty() || AggregateArgs) &&
871 "Expeced StructValues only with AggregateArgs set");
872
873 // Concatenate scalar and aggregate params in ParamTy.
874 if (!AggParamTy.empty()) {
875 StructTy = StructType::get(Context&: M->getContext(), Elements: AggParamTy);
876 ParamTy.push_back(x: PointerType::get(
877 C&: M->getContext(), AddressSpace: ArgsInZeroAddressSpace ? 0 : DL.getAllocaAddrSpace()));
878 }
879
880 Type *RetTy = getSwitchType();
881 LLVM_DEBUG({
882 dbgs() << "Function type: " << *RetTy << " f(";
883 for (Type *i : ParamTy)
884 dbgs() << *i << ", ";
885 dbgs() << ")\n";
886 });
887
888 FunctionType *funcType = FunctionType::get(
889 Result: RetTy, Params: ParamTy, isVarArg: AllowVarArgs && oldFunction->isVarArg());
890
891 // Create the new function
892 Function *newFunction =
893 Function::Create(Ty: funcType, Linkage: GlobalValue::InternalLinkage,
894 AddrSpace: oldFunction->getAddressSpace(), N: Name, M);
895
896 // Propagate personality info to the new function if there is one.
897 if (oldFunction->hasPersonalityFn())
898 newFunction->setPersonalityFn(oldFunction->getPersonalityFn());
899
900 // Inherit all of the target dependent attributes and white-listed
901 // target independent attributes.
902 // (e.g. If the extracted region contains a call to an x86.sse
903 // instruction we need to make sure that the extracted region has the
904 // "target-features" attribute allowing it to be lowered.
905 // FIXME: This should be changed to check to see if a specific
906 // attribute can not be inherited.
907 for (const auto &Attr : oldFunction->getAttributes().getFnAttrs()) {
908 if (Attr.isStringAttribute()) {
909 if (Attr.getKindAsString() == "thunk")
910 continue;
911 } else
912 switch (Attr.getKindAsEnum()) {
913 // Those attributes cannot be propagated safely. Explicitly list them
914 // here so we get a warning if new attributes are added.
915 case Attribute::AllocSize:
916 case Attribute::Builtin:
917 case Attribute::Convergent:
918 case Attribute::JumpTable:
919 case Attribute::Naked:
920 case Attribute::NoBuiltin:
921 case Attribute::NoMerge:
922 case Attribute::NoReturn:
923 case Attribute::NoSync:
924 case Attribute::ReturnsTwice:
925 case Attribute::Speculatable:
926 case Attribute::StackAlignment:
927 case Attribute::WillReturn:
928 case Attribute::AllocKind:
929 case Attribute::PresplitCoroutine:
930 case Attribute::Memory:
931 case Attribute::NoFPClass:
932 case Attribute::CoroDestroyOnlyWhenComplete:
933 case Attribute::CoroElideSafe:
934 case Attribute::NoDivergenceSource:
935 case Attribute::NoCreateUndefOrPoison:
936 continue;
937 // Those attributes should be safe to propagate to the extracted function.
938 case Attribute::AlwaysInline:
939 case Attribute::Cold:
940 case Attribute::DisableSanitizerInstrumentation:
941 case Attribute::Flatten:
942 case Attribute::FnRetThunkExtern:
943 case Attribute::Hot:
944 case Attribute::HybridPatchable:
945 case Attribute::NoRecurse:
946 case Attribute::InlineHint:
947 case Attribute::MinSize:
948 case Attribute::NoCallback:
949 case Attribute::NoDuplicate:
950 case Attribute::NoFree:
951 case Attribute::NoImplicitFloat:
952 case Attribute::NoInline:
953 case Attribute::NoOutline:
954 case Attribute::NonLazyBind:
955 case Attribute::NoRedZone:
956 case Attribute::NoUnwind:
957 case Attribute::NoSanitizeBounds:
958 case Attribute::NoSanitizeCoverage:
959 case Attribute::NullPointerIsValid:
960 case Attribute::OptimizeForDebugging:
961 case Attribute::OptForFuzzing:
962 case Attribute::OptimizeNone:
963 case Attribute::OptimizeForSize:
964 case Attribute::SafeStack:
965 case Attribute::ShadowCallStack:
966 case Attribute::SanitizeAddress:
967 case Attribute::SanitizeMemory:
968 case Attribute::SanitizeNumericalStability:
969 case Attribute::SanitizeThread:
970 case Attribute::SanitizeType:
971 case Attribute::SanitizeHWAddress:
972 case Attribute::SanitizeMemTag:
973 case Attribute::SanitizeRealtime:
974 case Attribute::SanitizeRealtimeBlocking:
975 case Attribute::SanitizeAllocToken:
976 case Attribute::SpeculativeLoadHardening:
977 case Attribute::StackProtect:
978 case Attribute::StackProtectReq:
979 case Attribute::StackProtectStrong:
980 case Attribute::StrictFP:
981 case Attribute::UWTable:
982 case Attribute::VScaleRange:
983 case Attribute::NoCfCheck:
984 case Attribute::MustProgress:
985 case Attribute::NoProfile:
986 case Attribute::SkipProfile:
987 case Attribute::DenormalFPEnv:
988 break;
989 // These attributes cannot be applied to functions.
990 case Attribute::Alignment:
991 case Attribute::AllocatedPointer:
992 case Attribute::AllocAlign:
993 case Attribute::ByVal:
994 case Attribute::Captures:
995 case Attribute::Dereferenceable:
996 case Attribute::DereferenceableOrNull:
997 case Attribute::ElementType:
998 case Attribute::InAlloca:
999 case Attribute::InReg:
1000 case Attribute::Nest:
1001 case Attribute::NoAlias:
1002 case Attribute::NoUndef:
1003 case Attribute::NonNull:
1004 case Attribute::Preallocated:
1005 case Attribute::ReadNone:
1006 case Attribute::ReadOnly:
1007 case Attribute::Returned:
1008 case Attribute::SExt:
1009 case Attribute::StructRet:
1010 case Attribute::SwiftError:
1011 case Attribute::SwiftSelf:
1012 case Attribute::SwiftAsync:
1013 case Attribute::ZExt:
1014 case Attribute::ImmArg:
1015 case Attribute::ByRef:
1016 case Attribute::WriteOnly:
1017 case Attribute::Writable:
1018 case Attribute::DeadOnUnwind:
1019 case Attribute::Range:
1020 case Attribute::Initializes:
1021 case Attribute::NoExt:
1022 // These are not really attributes.
1023 case Attribute::None:
1024 case Attribute::EndAttrKinds:
1025 case Attribute::EmptyKey:
1026 case Attribute::TombstoneKey:
1027 case Attribute::DeadOnReturn:
1028 llvm_unreachable("Not a function attribute");
1029 }
1030
1031 newFunction->addFnAttr(Attr);
1032 }
1033
1034 // Create scalar and aggregate iterators to name all of the arguments we
1035 // inserted.
1036 Function::arg_iterator ScalarAI = newFunction->arg_begin();
1037
1038 // Set names and attributes for input and output arguments.
1039 ScalarAI = newFunction->arg_begin();
1040 for (Value *input : inputs) {
1041 if (StructValues.contains(key: input))
1042 continue;
1043
1044 ScalarAI->setName(input->getName());
1045 if (input->isSwiftError())
1046 newFunction->addParamAttr(ArgNo: ScalarAI - newFunction->arg_begin(),
1047 Kind: Attribute::SwiftError);
1048 ++ScalarAI;
1049 }
1050 for (Value *output : outputs) {
1051 if (StructValues.contains(key: output))
1052 continue;
1053
1054 ScalarAI->setName(output->getName() + ".out");
1055 ++ScalarAI;
1056 }
1057
1058 // Update the entry count of the function.
1059 if (BFI) {
1060 auto Count = BFI->getProfileCountFromFreq(Freq: EntryFreq);
1061 if (Count.has_value())
1062 newFunction->setEntryCount(
1063 Count: ProfileCount(*Count, Function::PCT_Real)); // FIXME
1064 }
1065
1066 return newFunction;
1067}
1068
1069/// If the original function has debug info, we have to add a debug location
1070/// to the new branch instruction from the artificial entry block.
1071/// We use the debug location of the first instruction in the extracted
1072/// blocks, as there is no other equivalent line in the source code.
1073static void applyFirstDebugLoc(Function *oldFunction,
1074 ArrayRef<BasicBlock *> Blocks,
1075 Instruction *BranchI) {
1076 if (oldFunction->getSubprogram()) {
1077 any_of(Range&: Blocks, P: [&BranchI](const BasicBlock *BB) {
1078 return any_of(Range: *BB, P: [&BranchI](const Instruction &I) {
1079 if (!I.getDebugLoc())
1080 return false;
1081 BranchI->setDebugLoc(I.getDebugLoc());
1082 return true;
1083 });
1084 });
1085 }
1086}
1087
1088/// Erase lifetime.start markers which reference inputs to the extraction
1089/// region, and insert the referenced memory into \p LifetimesStart.
1090///
1091/// The extraction region is defined by a set of blocks (\p Blocks), and a set
1092/// of allocas which will be moved from the caller function into the extracted
1093/// function (\p SunkAllocas).
1094static void eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
1095 const SetVector<Value *> &SunkAllocas,
1096 SetVector<Value *> &LifetimesStart) {
1097 for (BasicBlock *BB : Blocks) {
1098 for (Instruction &I : llvm::make_early_inc_range(Range&: *BB)) {
1099 auto *II = dyn_cast<LifetimeIntrinsic>(Val: &I);
1100 if (!II)
1101 continue;
1102
1103 // Get the memory operand of the lifetime marker. If the underlying
1104 // object is a sunk alloca, or is otherwise defined in the extraction
1105 // region, the lifetime marker must not be erased.
1106 Value *Mem = II->getOperand(i_nocapture: 0);
1107 if (SunkAllocas.count(key: Mem) || definedInRegion(Blocks, V: Mem))
1108 continue;
1109
1110 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1111 LifetimesStart.insert(X: Mem);
1112 II->eraseFromParent();
1113 }
1114 }
1115}
1116
1117/// Insert lifetime start/end markers surrounding the call to the new function
1118/// for objects defined in the caller.
1119static void insertLifetimeMarkersSurroundingCall(
1120 Module *M, ArrayRef<Value *> LifetimesStart, ArrayRef<Value *> LifetimesEnd,
1121 CallInst *TheCall) {
1122 Instruction *Term = TheCall->getParent()->getTerminator();
1123
1124 // Emit lifetime markers for the pointers given in \p Objects. Insert the
1125 // markers before the call if \p InsertBefore, and after the call otherwise.
1126 auto insertMarkers = [&](Intrinsic::ID MarkerFunc, ArrayRef<Value *> Objects,
1127 bool InsertBefore) {
1128 for (Value *Mem : Objects) {
1129 assert((!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() ==
1130 TheCall->getFunction()) &&
1131 "Input memory not defined in original function");
1132
1133 Function *Func =
1134 Intrinsic::getOrInsertDeclaration(M, id: MarkerFunc, OverloadTys: Mem->getType());
1135 auto Marker = CallInst::Create(Func, Args: Mem);
1136 if (InsertBefore)
1137 Marker->insertBefore(InsertPos: TheCall->getIterator());
1138 else
1139 Marker->insertBefore(InsertPos: Term->getIterator());
1140 }
1141 };
1142
1143 if (!LifetimesStart.empty()) {
1144 insertMarkers(Intrinsic::lifetime_start, LifetimesStart,
1145 /*InsertBefore=*/true);
1146 }
1147
1148 if (!LifetimesEnd.empty()) {
1149 insertMarkers(Intrinsic::lifetime_end, LifetimesEnd,
1150 /*InsertBefore=*/false);
1151 }
1152}
1153
1154void CodeExtractor::moveCodeToFunction(Function *newFunction) {
1155 auto newFuncIt = newFunction->begin();
1156 for (BasicBlock *Block : Blocks) {
1157 // Delete the basic block from the old function, and the list of blocks
1158 Block->removeFromParent();
1159
1160 // Insert this basic block into the new function
1161 // Insert the original blocks after the entry block created
1162 // for the new function. The entry block may be followed
1163 // by a set of exit blocks at this point, but these exit
1164 // blocks better be placed at the end of the new function.
1165 newFuncIt = newFunction->insert(Position: std::next(x: newFuncIt), BB: Block);
1166 }
1167}
1168
1169void CodeExtractor::calculateNewCallTerminatorWeights(
1170 BasicBlock *CodeReplacer,
1171 const DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
1172 BranchProbabilityInfo *BPI) {
1173 using Distribution = BlockFrequencyInfoImplBase::Distribution;
1174 using BlockNode = BlockFrequencyInfoImplBase::BlockNode;
1175
1176 // Update the branch weights for the exit block.
1177 Instruction *TI = CodeReplacer->getTerminator();
1178 SmallVector<unsigned, 8> BranchWeights(TI->getNumSuccessors(), 0);
1179
1180 // Block Frequency distribution with dummy node.
1181 Distribution BranchDist;
1182
1183 SmallVector<BranchProbability, 4> EdgeProbabilities(
1184 TI->getNumSuccessors(), BranchProbability::getUnknown());
1185
1186 // Add each of the frequencies of the successors.
1187 for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) {
1188 BlockNode ExitNode(i);
1189 uint64_t ExitFreq = ExitWeights.lookup(Val: TI->getSuccessor(Idx: i)).getFrequency();
1190 if (ExitFreq != 0)
1191 BranchDist.addExit(Node: ExitNode, Amount: ExitFreq);
1192 else
1193 EdgeProbabilities[i] = BranchProbability::getZero();
1194 }
1195
1196 // Check for no total weight.
1197 if (BranchDist.Total == 0) {
1198 BPI->setEdgeProbability(Src: CodeReplacer, Probs: EdgeProbabilities);
1199 return;
1200 }
1201
1202 // Normalize the distribution so that they can fit in unsigned.
1203 BranchDist.normalize();
1204
1205 // Create normalized branch weights and set the metadata.
1206 for (unsigned I = 0, E = BranchDist.Weights.size(); I < E; ++I) {
1207 const auto &Weight = BranchDist.Weights[I];
1208
1209 // Get the weight and update the current BFI.
1210 BranchWeights[Weight.TargetNode.Index] = Weight.Amount;
1211 BranchProbability BP(Weight.Amount, BranchDist.Total);
1212 EdgeProbabilities[Weight.TargetNode.Index] = BP;
1213 }
1214 BPI->setEdgeProbability(Src: CodeReplacer, Probs: EdgeProbabilities);
1215 TI->setMetadata(
1216 KindID: LLVMContext::MD_prof,
1217 Node: MDBuilder(TI->getContext()).createBranchWeights(Weights: BranchWeights));
1218}
1219
1220/// Erase debug info intrinsics which refer to values in \p F but aren't in
1221/// \p F.
1222static void eraseDebugIntrinsicsWithNonLocalRefs(Function &F) {
1223 for (Instruction &I : instructions(F)) {
1224 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1225 findDbgUsers(V: &I, DbgVariableRecords);
1226 for (DbgVariableRecord *DVR : DbgVariableRecords)
1227 if (DVR->getFunction() != &F)
1228 DVR->eraseFromParent();
1229 }
1230}
1231
1232/// Fix up the debug info in the old and new functions. Following changes are
1233/// done.
1234/// 1. If a debug record points to a value that has been replaced, update the
1235/// record to use the new value.
1236/// 2. If an Input value that has been replaced was used as a location of a
1237/// debug record in the Parent function, then materealize a similar record in
1238/// the new function.
1239/// 3. Point line locations and debug intrinsics to the new subprogram scope
1240/// 4. Remove intrinsics which point to values outside of the new function.
1241static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
1242 CallInst &TheCall,
1243 const SetVector<Value *> &Inputs,
1244 ArrayRef<Value *> NewValues) {
1245 DISubprogram *OldSP = OldFunc.getSubprogram();
1246 LLVMContext &Ctx = OldFunc.getContext();
1247
1248 if (!OldSP) {
1249 // Erase any debug info the new function contains.
1250 stripDebugInfo(F&: NewFunc);
1251 // Make sure the old function doesn't contain any non-local metadata refs.
1252 eraseDebugIntrinsicsWithNonLocalRefs(F&: NewFunc);
1253 return;
1254 }
1255
1256 // Create a subprogram for the new function. Leave out a description of the
1257 // function arguments, as the parameters don't correspond to anything at the
1258 // source level.
1259 assert(OldSP->getUnit() && "Missing compile unit for subprogram");
1260 DIBuilder DIB(*OldFunc.getParent(), /*AllowUnresolved=*/false,
1261 OldSP->getUnit());
1262 auto SPType = DIB.createSubroutineType(ParameterTypes: DIB.getOrCreateTypeArray(Elements: {}));
1263 DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition |
1264 DISubprogram::SPFlagOptimized |
1265 DISubprogram::SPFlagLocalToUnit;
1266 auto NewSP = DIB.createFunction(
1267 Scope: OldSP->getUnit(), Name: NewFunc.getName(), LinkageName: NewFunc.getName(), File: OldSP->getFile(),
1268 /*LineNo=*/0, Ty: SPType, /*ScopeLine=*/0, Flags: DINode::FlagZero, SPFlags);
1269 NewFunc.setSubprogram(NewSP);
1270
1271 auto UpdateOrInsertDebugRecord = [&](auto *DR, Value *OldLoc, Value *NewLoc,
1272 DIExpression *Expr, bool Declare) {
1273 if (DR->getParent()->getParent() == &NewFunc) {
1274 DR->replaceVariableLocationOp(OldLoc, NewLoc);
1275 return;
1276 }
1277 if (Declare) {
1278 DIB.insertDeclare(NewLoc, DR->getVariable(), Expr, DR->getDebugLoc(),
1279 &NewFunc.getEntryBlock());
1280 return;
1281 }
1282 DIB.insertDbgValueIntrinsic(
1283 Val: NewLoc, VarInfo: DR->getVariable(), Expr, DL: DR->getDebugLoc(),
1284 InsertPt: NewFunc.getEntryBlock().getTerminator()->getIterator());
1285 };
1286 for (auto [Input, NewVal] : zip_equal(t: Inputs, u&: NewValues)) {
1287 SmallVector<DbgVariableRecord *, 1> DPUsers;
1288 findDbgUsers(V: Input, DbgVariableRecords&: DPUsers);
1289 DIExpression *Expr = DIB.createExpression();
1290
1291 // Iterate the debud users of the Input values. If they are in the extracted
1292 // function then update their location with the new value. If they are in
1293 // the parent function then create a similar debug record.
1294 for (auto *DVR : DPUsers)
1295 UpdateOrInsertDebugRecord(DVR, Input, NewVal, Expr, DVR->isDbgDeclare());
1296 }
1297
1298 auto IsInvalidLocation = [&NewFunc](Value *Location) {
1299 // Location is invalid if it isn't a constant, an instruction or an
1300 // argument, or is an instruction/argument but isn't in the new function.
1301 if (!Location || (!isa<Constant>(Val: Location) && !isa<Argument>(Val: Location) &&
1302 !isa<Instruction>(Val: Location)))
1303 return true;
1304
1305 if (Argument *Arg = dyn_cast<Argument>(Val: Location))
1306 return Arg->getParent() != &NewFunc;
1307 if (Instruction *LocationInst = dyn_cast<Instruction>(Val: Location))
1308 return LocationInst->getFunction() != &NewFunc;
1309 return false;
1310 };
1311
1312 // Debug intrinsics in the new function need to be updated in one of two
1313 // ways:
1314 // 1) They need to be deleted, because they describe a value in the old
1315 // function.
1316 // 2) They need to point to fresh metadata, e.g. because they currently
1317 // point to a variable in the wrong scope.
1318 SmallDenseMap<DINode *, DINode *> RemappedMetadata;
1319 SmallVector<DbgVariableRecord *, 4> DVRsToDelete;
1320 DenseMap<const MDNode *, MDNode *> Cache;
1321
1322 auto GetUpdatedDIVariable = [&](DILocalVariable *OldVar) {
1323 DINode *&NewVar = RemappedMetadata[OldVar];
1324 if (!NewVar) {
1325 DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
1326 RootScope&: *OldVar->getScope(), NewSP&: *NewSP, Ctx, Cache);
1327 NewVar = DIB.createAutoVariable(
1328 Scope: NewScope, Name: OldVar->getName(), File: OldVar->getFile(), LineNo: OldVar->getLine(),
1329 Ty: OldVar->getType(), /*AlwaysPreserve=*/false, Flags: DINode::FlagZero,
1330 AlignInBits: OldVar->getAlignInBits());
1331 }
1332 return cast<DILocalVariable>(Val: NewVar);
1333 };
1334
1335 auto UpdateDbgLabel = [&](auto *LabelRecord) {
1336 // Point the label record to a fresh label within the new function if
1337 // the record was not inlined from some other function.
1338 if (LabelRecord->getDebugLoc().getInlinedAt())
1339 return;
1340 DILabel *OldLabel = LabelRecord->getLabel();
1341 DINode *&NewLabel = RemappedMetadata[OldLabel];
1342 if (!NewLabel) {
1343 DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
1344 RootScope&: *OldLabel->getScope(), NewSP&: *NewSP, Ctx, Cache);
1345 NewLabel =
1346 DILabel::get(Context&: Ctx, Scope: NewScope, Name: OldLabel->getName(), File: OldLabel->getFile(),
1347 Line: OldLabel->getLine(), Column: OldLabel->getColumn(),
1348 IsArtificial: OldLabel->isArtificial(), CoroSuspendIdx: OldLabel->getCoroSuspendIdx());
1349 }
1350 LabelRecord->setLabel(cast<DILabel>(Val: NewLabel));
1351 };
1352
1353 auto UpdateDbgRecordsOnInst = [&](Instruction &I) -> void {
1354 for (DbgRecord &DR : I.getDbgRecordRange()) {
1355 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(Val: &DR)) {
1356 UpdateDbgLabel(DLR);
1357 continue;
1358 }
1359
1360 DbgVariableRecord &DVR = cast<DbgVariableRecord>(Val&: DR);
1361 // If any of the used locations are invalid, delete the record.
1362 if (any_of(Range: DVR.location_ops(), P: IsInvalidLocation)) {
1363 DVRsToDelete.push_back(Elt: &DVR);
1364 continue;
1365 }
1366
1367 // DbgAssign intrinsics have an extra Value argument:
1368 if (DVR.isDbgAssign() && IsInvalidLocation(DVR.getAddress())) {
1369 DVRsToDelete.push_back(Elt: &DVR);
1370 continue;
1371 }
1372
1373 // If the variable was in the scope of the old function, i.e. it was not
1374 // inlined, point the intrinsic to a fresh variable within the new
1375 // function.
1376 if (!DVR.getDebugLoc().getInlinedAt())
1377 DVR.setVariable(GetUpdatedDIVariable(DVR.getVariable()));
1378 }
1379 };
1380
1381 for (Instruction &I : instructions(F&: NewFunc))
1382 UpdateDbgRecordsOnInst(I);
1383
1384 for (auto *DVR : DVRsToDelete)
1385 DVR->getMarker()->MarkedInstr->dropOneDbgRecord(I: DVR);
1386 DIB.finalizeSubprogram(SP: NewSP);
1387
1388 // Fix up the scope information attached to the line locations and the
1389 // debug assignment metadata in the new function.
1390 DenseMap<DIAssignID *, DIAssignID *> AssignmentIDMap;
1391 for (Instruction &I : instructions(F&: NewFunc)) {
1392 if (const DebugLoc &DL = I.getDebugLoc())
1393 I.setDebugLoc(
1394 DebugLoc::replaceInlinedAtSubprogram(DL, NewSP&: *NewSP, Ctx, Cache));
1395 for (DbgRecord &DR : I.getDbgRecordRange())
1396 DR.setDebugLoc(DebugLoc::replaceInlinedAtSubprogram(DL: DR.getDebugLoc(),
1397 NewSP&: *NewSP, Ctx, Cache));
1398
1399 // Loop info metadata may contain line locations. Fix them up.
1400 auto updateLoopInfoLoc = [&Ctx, &Cache, NewSP](Metadata *MD) -> Metadata * {
1401 if (auto *Loc = dyn_cast_or_null<DILocation>(Val: MD))
1402 return DebugLoc::replaceInlinedAtSubprogram(DL: Loc, NewSP&: *NewSP, Ctx, Cache);
1403 return MD;
1404 };
1405 updateLoopMetadataDebugLocations(I, Updater: updateLoopInfoLoc);
1406 at::remapAssignID(Map&: AssignmentIDMap, I);
1407 }
1408 if (!TheCall.getDebugLoc())
1409 TheCall.setDebugLoc(DILocation::get(Context&: Ctx, Line: 0, Column: 0, Scope: OldSP));
1410
1411 eraseDebugIntrinsicsWithNonLocalRefs(F&: NewFunc);
1412}
1413
1414Function *
1415CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC) {
1416 ValueSet Inputs, Outputs;
1417 return extractCodeRegion(CEAC, Inputs, Outputs);
1418}
1419
1420Function *
1421CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC,
1422 ValueSet &inputs, ValueSet &outputs) {
1423 if (!isEligible())
1424 return nullptr;
1425
1426 // Assumption: this is a single-entry code region, and the header is the first
1427 // block in the region.
1428 BasicBlock *header = *Blocks.begin();
1429 Function *oldFunction = header->getParent();
1430
1431 normalizeCFGForExtraction(header);
1432
1433 // Remove @llvm.assume calls that will be moved to the new function from the
1434 // old function's assumption cache.
1435 for (BasicBlock *Block : Blocks) {
1436 for (Instruction &I : llvm::make_early_inc_range(Range&: *Block)) {
1437 if (auto *AI = dyn_cast<AssumeInst>(Val: &I)) {
1438 if (AC)
1439 AC->unregisterAssumption(CI: AI);
1440 AI->eraseFromParent();
1441 }
1442 }
1443 }
1444
1445 ValueSet SinkingCands, HoistingCands;
1446 BasicBlock *CommonExit = nullptr;
1447 findAllocas(CEAC, SinkCands&: SinkingCands, HoistCands&: HoistingCands, ExitBlock&: CommonExit);
1448 assert(HoistingCands.empty() || CommonExit);
1449
1450 // Find inputs to, outputs from the code region.
1451 findInputsOutputs(Inputs&: inputs, Outputs&: outputs, SinkCands: SinkingCands);
1452
1453 // Collect objects which are inputs to the extraction region and also
1454 // referenced by lifetime start markers within it. The effects of these
1455 // markers must be replicated in the calling function to prevent the stack
1456 // coloring pass from merging slots which store input objects.
1457 ValueSet LifetimesStart;
1458 eraseLifetimeMarkersOnInputs(Blocks, SunkAllocas: SinkingCands, LifetimesStart);
1459
1460 if (!HoistingCands.empty()) {
1461 auto *HoistToBlock = findOrCreateBlockForHoisting(CommonExitBlock: CommonExit);
1462 Instruction *TI = HoistToBlock->getTerminator();
1463 for (auto *II : HoistingCands)
1464 cast<Instruction>(Val: II)->moveBefore(InsertPos: TI->getIterator());
1465 computeExtractedFuncRetVals();
1466 }
1467
1468 // CFG/ExitBlocks must not change hereafter
1469
1470 // Calculate the entry frequency of the new function before we change the root
1471 // block.
1472 BlockFrequency EntryFreq;
1473 DenseMap<BasicBlock *, BlockFrequency> ExitWeights;
1474 if (BFI) {
1475 assert(BPI && "Both BPI and BFI are required to preserve profile info");
1476 for (BasicBlock *Pred : predecessors(BB: header)) {
1477 if (Blocks.count(key: Pred))
1478 continue;
1479 EntryFreq +=
1480 BFI->getBlockFreq(BB: Pred) * BPI->getEdgeProbability(Src: Pred, Dst: header);
1481 }
1482
1483 for (BasicBlock *Succ : ExtractedFuncRetVals) {
1484 for (BasicBlock *Block : predecessors(BB: Succ)) {
1485 if (!Blocks.count(key: Block))
1486 continue;
1487
1488 // Update the branch weight for this successor.
1489 BlockFrequency &BF = ExitWeights[Succ];
1490 BF += BFI->getBlockFreq(BB: Block) * BPI->getEdgeProbability(Src: Block, Dst: Succ);
1491 }
1492 }
1493 }
1494
1495 // Determine position for the replacement code. Do so before header is moved
1496 // to the new function.
1497 BasicBlock *ReplIP = header;
1498 while (ReplIP && Blocks.count(key: ReplIP))
1499 ReplIP = ReplIP->getNextNode();
1500
1501 // Construct new function based on inputs/outputs & add allocas for all defs.
1502 std::string SuffixToUse =
1503 Suffix.empty()
1504 ? (header->getName().empty() ? "extracted" : header->getName().str())
1505 : Suffix;
1506
1507 ValueSet StructValues;
1508 StructType *StructTy = nullptr;
1509 Function *newFunction = constructFunctionDeclaration(
1510 inputs, outputs, EntryFreq, Name: oldFunction->getName() + "." + SuffixToUse,
1511 StructValues, StructTy);
1512 SmallVector<Value *> NewValues;
1513
1514 emitFunctionBody(inputs, outputs, StructValues, newFunction, StructArgTy: StructTy, header,
1515 SinkingCands, NewValues);
1516
1517 std::vector<Value *> Reloads;
1518 CallInst *TheCall = emitReplacerCall(
1519 inputs, outputs, StructValues, newFunction, StructArgTy: StructTy, oldFunction, ReplIP,
1520 EntryFreq, LifetimesStart: LifetimesStart.getArrayRef(), Reloads);
1521
1522 insertReplacerCall(oldFunction, header, codeReplacer: TheCall->getParent(), outputs,
1523 Reloads, ExitWeights);
1524
1525 fixupDebugInfoPostExtraction(OldFunc&: *oldFunction, NewFunc&: *newFunction, TheCall&: *TheCall, Inputs: inputs,
1526 NewValues);
1527
1528 LLVM_DEBUG(llvm::dbgs() << "After extractCodeRegion - newFunction:\n");
1529 LLVM_DEBUG(newFunction->dump());
1530 LLVM_DEBUG(llvm::dbgs() << "After extractCodeRegion - oldFunction:\n");
1531 LLVM_DEBUG(oldFunction->dump());
1532 LLVM_DEBUG(if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC))
1533 report_fatal_error("Stale Asumption cache for old Function!"));
1534 return newFunction;
1535}
1536
1537void CodeExtractor::normalizeCFGForExtraction(BasicBlock *&header) {
1538 // If we have any return instructions in the region, split those blocks so
1539 // that the return is not in the region.
1540 splitReturnBlocks();
1541
1542 // If we have to split PHI nodes of the entry or exit blocks, do so now.
1543 severSplitPHINodesOfEntry(Header&: header);
1544
1545 // If a PHI in an exit block has multiple incoming values from the outlined
1546 // region, create a new PHI for those values within the region such that only
1547 // PHI itself becomes an output value, not each of its incoming values
1548 // individually.
1549 computeExtractedFuncRetVals();
1550 severSplitPHINodesOfExits();
1551}
1552
1553void CodeExtractor::computeExtractedFuncRetVals() {
1554 ExtractedFuncRetVals.clear();
1555
1556 SmallPtrSet<BasicBlock *, 2> ExitBlocks;
1557 for (BasicBlock *Block : Blocks) {
1558 for (BasicBlock *Succ : successors(BB: Block)) {
1559 if (Blocks.count(key: Succ))
1560 continue;
1561
1562 bool IsNew = ExitBlocks.insert(Ptr: Succ).second;
1563 if (IsNew)
1564 ExtractedFuncRetVals.push_back(Elt: Succ);
1565 }
1566 }
1567}
1568
1569Type *CodeExtractor::getSwitchType() {
1570 LLVMContext &Context = Blocks.front()->getContext();
1571
1572 assert(ExtractedFuncRetVals.size() < 0xffff &&
1573 "too many exit blocks for switch");
1574 switch (ExtractedFuncRetVals.size()) {
1575 case 0:
1576 case 1:
1577 return Type::getVoidTy(C&: Context);
1578 case 2:
1579 // Conditional branch, return a bool
1580 return Type::getInt1Ty(C&: Context);
1581 default:
1582 return Type::getInt16Ty(C&: Context);
1583 }
1584}
1585
1586void CodeExtractor::emitFunctionBody(
1587 const ValueSet &inputs, const ValueSet &outputs,
1588 const ValueSet &StructValues, Function *newFunction,
1589 StructType *StructArgTy, BasicBlock *header, const ValueSet &SinkingCands,
1590 SmallVectorImpl<Value *> &NewValues) {
1591 Function *oldFunction = header->getParent();
1592 LLVMContext &Context = oldFunction->getContext();
1593
1594 // The new function needs a root node because other nodes can branch to the
1595 // head of the region, but the entry node of a function cannot have preds.
1596 BasicBlock *newFuncRoot =
1597 BasicBlock::Create(Context, Name: "newFuncRoot", Parent: newFunction);
1598
1599 // Now sink all instructions which only have non-phi uses inside the region.
1600 // Group the allocas at the start of the block, so that any bitcast uses of
1601 // the allocas are well-defined.
1602 for (auto *II : SinkingCands) {
1603 if (!isa<AllocaInst>(Val: II)) {
1604 cast<Instruction>(Val: II)->moveBefore(BB&: *newFuncRoot,
1605 I: newFuncRoot->getFirstInsertionPt());
1606 }
1607 }
1608 for (auto *II : SinkingCands) {
1609 if (auto *AI = dyn_cast<AllocaInst>(Val: II)) {
1610 AI->moveBefore(BB&: *newFuncRoot, I: newFuncRoot->getFirstInsertionPt());
1611 }
1612 }
1613
1614 Function::arg_iterator ScalarAI = newFunction->arg_begin();
1615 Argument *AggArg = StructValues.empty()
1616 ? nullptr
1617 : newFunction->getArg(i: newFunction->arg_size() - 1);
1618
1619 // Rewrite all users of the inputs in the extracted region to use the
1620 // arguments (or appropriate addressing into struct) instead.
1621 for (unsigned i = 0, e = inputs.size(), aggIdx = 0; i != e; ++i) {
1622 Value *RewriteVal;
1623 if (StructValues.contains(key: inputs[i])) {
1624 Value *Idx[2];
1625 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: header->getContext()));
1626 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: header->getContext()), V: aggIdx);
1627 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1628 PointeeType: StructArgTy, Ptr: AggArg, IdxList: Idx, NameStr: "gep_" + inputs[i]->getName(), InsertBefore: newFuncRoot);
1629 LoadInst *LoadGEP =
1630 new LoadInst(StructArgTy->getElementType(N: aggIdx), GEP,
1631 "loadgep_" + inputs[i]->getName(), newFuncRoot);
1632 // If we load pointer, we can add optional !align metadata
1633 // The existence of the !align metadata on the instruction tells
1634 // the optimizer that the value loaded is known to be aligned to
1635 // a boundary specified by the integer value in the metadata node.
1636 // Example:
1637 // %res = load ptr, ptr %input, align 8, !align !align_md_node
1638 // ^ ^
1639 // | |
1640 // alignment of %input address |
1641 // |
1642 // alignment of %res object
1643 if (StructArgTy->getElementType(N: aggIdx)->isPointerTy()) {
1644 unsigned AlignmentValue;
1645 const Triple &TargetTriple =
1646 newFunction->getParent()->getTargetTriple();
1647 const DataLayout &DL = header->getDataLayout();
1648 // Pointers without casting can provide more information about
1649 // alignment. Use pointers without casts if given target preserves
1650 // alignment information for cast the operation.
1651 if (isAlignmentPreservedForAddrCast(TargetTriple))
1652 AlignmentValue =
1653 inputs[i]->stripPointerCasts()->getPointerAlignment(DL).value();
1654 else
1655 AlignmentValue = inputs[i]->getPointerAlignment(DL).value();
1656 MDBuilder MDB(header->getContext());
1657 LoadGEP->setMetadata(
1658 KindID: LLVMContext::MD_align,
1659 Node: MDNode::get(
1660 Context&: header->getContext(),
1661 MDs: MDB.createConstant(C: ConstantInt::get(
1662 Ty: Type::getInt64Ty(C&: header->getContext()), V: AlignmentValue))));
1663 }
1664 RewriteVal = LoadGEP;
1665 ++aggIdx;
1666 } else
1667 RewriteVal = &*ScalarAI++;
1668
1669 NewValues.push_back(Elt: RewriteVal);
1670 }
1671
1672 moveCodeToFunction(newFunction);
1673
1674 for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
1675 Value *RewriteVal = NewValues[i];
1676
1677 std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end());
1678 for (User *use : Users)
1679 if (Instruction *inst = dyn_cast<Instruction>(Val: use))
1680 if (Blocks.count(key: inst->getParent()))
1681 inst->replaceUsesOfWith(From: inputs[i], To: RewriteVal);
1682 }
1683
1684 // Since there may be multiple exits from the original region, make the new
1685 // function return an unsigned, switch on that number. This loop iterates
1686 // over all of the blocks in the extracted region, updating any terminator
1687 // instructions in the to-be-extracted region that branch to blocks that are
1688 // not in the region to be extracted.
1689 std::map<BasicBlock *, BasicBlock *> ExitBlockMap;
1690
1691 // Iterate over the previously collected targets, and create new blocks inside
1692 // the function to branch to.
1693 for (auto P : enumerate(First&: ExtractedFuncRetVals)) {
1694 BasicBlock *OldTarget = P.value();
1695 size_t SuccNum = P.index();
1696
1697 BasicBlock *NewTarget = BasicBlock::Create(
1698 Context, Name: OldTarget->getName() + ".exitStub", Parent: newFunction);
1699 ExitBlockMap[OldTarget] = NewTarget;
1700
1701 Value *brVal = nullptr;
1702 Type *RetTy = getSwitchType();
1703 assert(ExtractedFuncRetVals.size() < 0xffff &&
1704 "too many exit blocks for switch");
1705 switch (ExtractedFuncRetVals.size()) {
1706 case 0:
1707 case 1:
1708 // No value needed.
1709 break;
1710 case 2: // Conditional branch, return a bool
1711 brVal = ConstantInt::get(Ty: RetTy, V: !SuccNum);
1712 break;
1713 default:
1714 brVal = ConstantInt::get(Ty: RetTy, V: SuccNum);
1715 break;
1716 }
1717
1718 ReturnInst::Create(C&: Context, retVal: brVal, InsertBefore: NewTarget);
1719 }
1720
1721 for (BasicBlock *Block : Blocks) {
1722 Instruction *TI = Block->getTerminator();
1723 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1724 if (Blocks.count(key: TI->getSuccessor(Idx: i)))
1725 continue;
1726 BasicBlock *OldTarget = TI->getSuccessor(Idx: i);
1727 // add a new basic block which returns the appropriate value
1728 BasicBlock *NewTarget = ExitBlockMap[OldTarget];
1729 assert(NewTarget && "Unknown target block!");
1730
1731 // rewrite the original branch instruction with this new target
1732 TI->setSuccessor(Idx: i, BB: NewTarget);
1733 }
1734 }
1735
1736 // Loop over all of the PHI nodes in the header and exit blocks, and change
1737 // any references to the old incoming edge to be the new incoming edge.
1738 for (BasicBlock::iterator I = header->begin(); isa<PHINode>(Val: I); ++I) {
1739 PHINode *PN = cast<PHINode>(Val&: I);
1740 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1741 if (!Blocks.count(key: PN->getIncomingBlock(i)))
1742 PN->setIncomingBlock(i, BB: newFuncRoot);
1743 }
1744
1745 // Connect newFunction entry block to new header.
1746 UncondBrInst *BranchI = UncondBrInst::Create(Target: header, InsertBefore: newFuncRoot);
1747 applyFirstDebugLoc(oldFunction, Blocks: Blocks.getArrayRef(), BranchI);
1748
1749 // Store the arguments right after the definition of output value.
1750 // This should be proceeded after creating exit stubs to be ensure that invoke
1751 // result restore will be placed in the outlined function.
1752 ScalarAI = newFunction->arg_begin();
1753 unsigned AggIdx = 0;
1754
1755 for (Value *Input : inputs) {
1756 if (StructValues.contains(key: Input))
1757 ++AggIdx;
1758 else
1759 ++ScalarAI;
1760 }
1761
1762 for (Value *Output : outputs) {
1763 // Find proper insertion point.
1764 // In case Output is an invoke, we insert the store at the beginning in the
1765 // 'normal destination' BB. Otherwise we insert the store right after
1766 // Output.
1767 BasicBlock::iterator InsertPt;
1768 if (auto *InvokeI = dyn_cast<InvokeInst>(Val: Output))
1769 InsertPt = InvokeI->getNormalDest()->getFirstInsertionPt();
1770 else if (auto *Phi = dyn_cast<PHINode>(Val: Output))
1771 InsertPt = Phi->getParent()->getFirstInsertionPt();
1772 else if (auto *OutI = dyn_cast<Instruction>(Val: Output))
1773 InsertPt = std::next(x: OutI->getIterator());
1774 else {
1775 // Globals don't need to be updated, just advance to the next argument.
1776 if (StructValues.contains(key: Output))
1777 ++AggIdx;
1778 else
1779 ++ScalarAI;
1780 continue;
1781 }
1782
1783 assert((InsertPt->getFunction() == newFunction ||
1784 Blocks.count(InsertPt->getParent())) &&
1785 "InsertPt should be in new function");
1786
1787 if (StructValues.contains(key: Output)) {
1788 assert(AggArg && "Number of aggregate output arguments should match "
1789 "the number of defined values");
1790 Value *Idx[2];
1791 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1792 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: AggIdx);
1793 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1794 PointeeType: StructArgTy, Ptr: AggArg, IdxList: Idx, NameStr: "gep_" + Output->getName(), InsertBefore: InsertPt);
1795 new StoreInst(Output, GEP, InsertPt);
1796 ++AggIdx;
1797 } else {
1798 assert(ScalarAI != newFunction->arg_end() &&
1799 "Number of scalar output arguments should match "
1800 "the number of defined values");
1801 new StoreInst(Output, &*ScalarAI, InsertPt);
1802 ++ScalarAI;
1803 }
1804 }
1805
1806 if (ExtractedFuncRetVals.empty()) {
1807 // Mark the new function `noreturn` if applicable. Terminators which resume
1808 // exception propagation are treated as returning instructions. This is to
1809 // avoid inserting traps after calls to outlined functions which unwind.
1810 if (none_of(Range&: Blocks, P: [](const BasicBlock *BB) {
1811 const Instruction *Term = BB->getTerminator();
1812 return isa<ReturnInst>(Val: Term) || isa<ResumeInst>(Val: Term);
1813 }))
1814 newFunction->setDoesNotReturn();
1815 }
1816}
1817
1818CallInst *CodeExtractor::emitReplacerCall(
1819 const ValueSet &inputs, const ValueSet &outputs,
1820 const ValueSet &StructValues, Function *newFunction,
1821 StructType *StructArgTy, Function *oldFunction, BasicBlock *ReplIP,
1822 BlockFrequency EntryFreq, ArrayRef<Value *> LifetimesStart,
1823 std::vector<Value *> &Reloads) {
1824 LLVMContext &Context = oldFunction->getContext();
1825 Module *M = oldFunction->getParent();
1826 const DataLayout &DL = M->getDataLayout();
1827
1828 // This takes place of the original loop
1829 BasicBlock *codeReplacer =
1830 BasicBlock::Create(Context, Name: "codeRepl", Parent: oldFunction, InsertBefore: ReplIP);
1831 if (AllocationBlock)
1832 assert(AllocationBlock->getParent() == oldFunction &&
1833 "AllocationBlock is not in the same function");
1834 BasicBlock *AllocaBlock =
1835 AllocationBlock ? AllocationBlock : &oldFunction->getEntryBlock();
1836
1837 // Update the entry count of the function.
1838 if (BFI)
1839 BFI->setBlockFreq(BB: codeReplacer, Freq: EntryFreq);
1840
1841 std::vector<Value *> params;
1842
1843 // Add inputs as params, or to be filled into the struct
1844 for (Value *input : inputs) {
1845 if (StructValues.contains(key: input))
1846 continue;
1847
1848 params.push_back(x: input);
1849 }
1850
1851 // Create allocas for the outputs
1852 std::vector<Value *> ReloadOutputs;
1853 for (Value *output : outputs) {
1854 if (StructValues.contains(key: output))
1855 continue;
1856
1857 AllocaInst *alloca = new AllocaInst(
1858 output->getType(), DL.getAllocaAddrSpace(), nullptr,
1859 output->getName() + ".loc", AllocaBlock->getFirstInsertionPt());
1860 params.push_back(x: alloca);
1861 ReloadOutputs.push_back(x: alloca);
1862 }
1863
1864 AllocaInst *Struct = nullptr;
1865 if (!StructValues.empty()) {
1866 Struct = new AllocaInst(StructArgTy, DL.getAllocaAddrSpace(), nullptr,
1867 "structArg", AllocaBlock->getFirstInsertionPt());
1868 if (ArgsInZeroAddressSpace && DL.getAllocaAddrSpace() != 0) {
1869 auto *StructSpaceCast = new AddrSpaceCastInst(
1870 Struct, PointerType ::get(C&: Context, AddressSpace: 0), "structArg.ascast");
1871 StructSpaceCast->insertAfter(InsertPos: Struct->getIterator());
1872 params.push_back(x: StructSpaceCast);
1873 } else {
1874 params.push_back(x: Struct);
1875 }
1876
1877 unsigned AggIdx = 0;
1878 for (Value *input : inputs) {
1879 if (!StructValues.contains(key: input))
1880 continue;
1881
1882 Value *Idx[2];
1883 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1884 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: AggIdx);
1885 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1886 PointeeType: StructArgTy, Ptr: Struct, IdxList: Idx, NameStr: "gep_" + input->getName());
1887 GEP->insertInto(ParentBB: codeReplacer, It: codeReplacer->end());
1888 new StoreInst(input, GEP, codeReplacer);
1889
1890 ++AggIdx;
1891 }
1892 }
1893
1894 // Emit the call to the function
1895 CallInst *call = CallInst::Create(
1896 Func: newFunction, Args: params, NameStr: ExtractedFuncRetVals.size() > 1 ? "targetBlock" : "",
1897 InsertBefore: codeReplacer);
1898
1899 // Set swifterror parameter attributes.
1900 unsigned ParamIdx = 0;
1901 unsigned AggIdx = 0;
1902 for (auto input : inputs) {
1903 if (StructValues.contains(key: input)) {
1904 ++AggIdx;
1905 } else {
1906 if (input->isSwiftError())
1907 call->addParamAttr(ArgNo: ParamIdx, Kind: Attribute::SwiftError);
1908 ++ParamIdx;
1909 }
1910 }
1911
1912 // Add debug location to the new call, if the original function has debug
1913 // info. In that case, the terminator of the entry block of the extracted
1914 // function contains the first debug location of the extracted function,
1915 // set in extractCodeRegion.
1916 if (codeReplacer->getParent()->getSubprogram()) {
1917 if (auto DL = newFunction->getEntryBlock().getTerminator()->getDebugLoc())
1918 call->setDebugLoc(DL);
1919 }
1920
1921 // Reload the outputs passed in by reference, use the struct if output is in
1922 // the aggregate or reload from the scalar argument.
1923 for (unsigned i = 0, e = outputs.size(), scalarIdx = 0; i != e; ++i) {
1924 Value *Output = nullptr;
1925 if (StructValues.contains(key: outputs[i])) {
1926 Value *Idx[2];
1927 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1928 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: AggIdx);
1929 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1930 PointeeType: StructArgTy, Ptr: Struct, IdxList: Idx, NameStr: "gep_reload_" + outputs[i]->getName());
1931 GEP->insertInto(ParentBB: codeReplacer, It: codeReplacer->end());
1932 Output = GEP;
1933 ++AggIdx;
1934 } else {
1935 Output = ReloadOutputs[scalarIdx];
1936 ++scalarIdx;
1937 }
1938 LoadInst *load =
1939 new LoadInst(outputs[i]->getType(), Output,
1940 outputs[i]->getName() + ".reload", codeReplacer);
1941 Reloads.push_back(x: load);
1942 }
1943
1944 // Now we can emit a switch statement using the call as a value.
1945 SwitchInst *TheSwitch =
1946 SwitchInst::Create(Value: Constant::getNullValue(Ty: Type::getInt16Ty(C&: Context)),
1947 Default: codeReplacer, NumCases: 0, InsertBefore: codeReplacer);
1948 for (auto P : enumerate(First&: ExtractedFuncRetVals)) {
1949 BasicBlock *OldTarget = P.value();
1950 size_t SuccNum = P.index();
1951
1952 TheSwitch->addCase(OnVal: ConstantInt::get(Ty: Type::getInt16Ty(C&: Context), V: SuccNum),
1953 Dest: OldTarget);
1954 }
1955
1956 // Now that we've done the deed, simplify the switch instruction.
1957 Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType();
1958 switch (ExtractedFuncRetVals.size()) {
1959 case 0:
1960 // There are no successors (the block containing the switch itself), which
1961 // means that previously this was the last part of the function, and hence
1962 // this should be rewritten as a `ret` or `unreachable`.
1963 if (newFunction->doesNotReturn()) {
1964 // If fn is no return, end with an unreachable terminator.
1965 (void)new UnreachableInst(Context, TheSwitch->getIterator());
1966 } else if (OldFnRetTy->isVoidTy()) {
1967 // We have no return value.
1968 ReturnInst::Create(C&: Context, retVal: nullptr,
1969 InsertBefore: TheSwitch->getIterator()); // Return void
1970 } else if (OldFnRetTy == TheSwitch->getCondition()->getType()) {
1971 // return what we have
1972 ReturnInst::Create(C&: Context, retVal: TheSwitch->getCondition(),
1973 InsertBefore: TheSwitch->getIterator());
1974 } else {
1975 // Otherwise we must have code extracted an unwind or something, just
1976 // return whatever we want.
1977 ReturnInst::Create(C&: Context, retVal: Constant::getNullValue(Ty: OldFnRetTy),
1978 InsertBefore: TheSwitch->getIterator());
1979 }
1980
1981 TheSwitch->eraseFromParent();
1982 break;
1983 case 1:
1984 // Only a single destination, change the switch into an unconditional
1985 // branch.
1986 UncondBrInst::Create(Target: TheSwitch->getSuccessor(idx: 1), InsertBefore: TheSwitch->getIterator());
1987 TheSwitch->eraseFromParent();
1988 break;
1989 case 2:
1990 // Only two destinations, convert to a condition branch.
1991 // Remark: This also swaps the target branches:
1992 // 0 -> false -> getSuccessor(2); 1 -> true -> getSuccessor(1)
1993 CondBrInst::Create(Cond: call, IfTrue: TheSwitch->getSuccessor(idx: 1),
1994 IfFalse: TheSwitch->getSuccessor(idx: 2), InsertBefore: TheSwitch->getIterator());
1995 TheSwitch->eraseFromParent();
1996 break;
1997 default:
1998 // Otherwise, make the default destination of the switch instruction be one
1999 // of the other successors.
2000 TheSwitch->setCondition(call);
2001 TheSwitch->setDefaultDest(
2002 TheSwitch->getSuccessor(idx: ExtractedFuncRetVals.size()));
2003 // Remove redundant case
2004 TheSwitch->removeCase(
2005 I: SwitchInst::CaseIt(TheSwitch, ExtractedFuncRetVals.size() - 1));
2006 break;
2007 }
2008
2009 // Insert lifetime markers around the reloads of any output values. The
2010 // allocas output values are stored in are only in-use in the codeRepl block.
2011 insertLifetimeMarkersSurroundingCall(M, LifetimesStart: ReloadOutputs, LifetimesEnd: ReloadOutputs, TheCall: call);
2012
2013 // Replicate the effects of any lifetime start/end markers which referenced
2014 // input objects in the extraction region by placing markers around the call.
2015 insertLifetimeMarkersSurroundingCall(M: oldFunction->getParent(), LifetimesStart,
2016 LifetimesEnd: {}, TheCall: call);
2017
2018 return call;
2019}
2020
2021void CodeExtractor::insertReplacerCall(
2022 Function *oldFunction, BasicBlock *header, BasicBlock *codeReplacer,
2023 const ValueSet &outputs, ArrayRef<Value *> Reloads,
2024 const DenseMap<BasicBlock *, BlockFrequency> &ExitWeights) {
2025
2026 // Rewrite branches to basic blocks outside of the loop to new dummy blocks
2027 // within the new function. This must be done before we lose track of which
2028 // blocks were originally in the code region.
2029 std::vector<User *> Users(header->user_begin(), header->user_end());
2030 for (auto &U : Users)
2031 // The BasicBlock which contains the branch is not in the region
2032 // modify the branch target to a new block
2033 if (Instruction *I = dyn_cast<Instruction>(Val: U))
2034 if (I->isTerminator() && I->getFunction() == oldFunction &&
2035 !Blocks.count(key: I->getParent()))
2036 I->replaceUsesOfWith(From: header, To: codeReplacer);
2037
2038 // When moving the code region it is sufficient to replace all uses to the
2039 // extracted function values. Since the original definition's block
2040 // dominated its use, it will also be dominated by codeReplacer's switch
2041 // which joined multiple exit blocks.
2042 for (BasicBlock *ExitBB : ExtractedFuncRetVals)
2043 for (PHINode &PN : ExitBB->phis()) {
2044 Value *IncomingCodeReplacerVal = nullptr;
2045 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
2046 // Ignore incoming values from outside of the extracted region.
2047 if (!Blocks.count(key: PN.getIncomingBlock(i)))
2048 continue;
2049
2050 // Ensure that there is only one incoming value from codeReplacer.
2051 if (!IncomingCodeReplacerVal) {
2052 PN.setIncomingBlock(i, BB: codeReplacer);
2053 IncomingCodeReplacerVal = PN.getIncomingValue(i);
2054 } else
2055 assert(IncomingCodeReplacerVal == PN.getIncomingValue(i) &&
2056 "PHI has two incompatbile incoming values from codeRepl");
2057 }
2058 }
2059
2060 for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
2061 Value *load = Reloads[i];
2062 std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end());
2063 for (User *U : Users) {
2064 Instruction *inst = cast<Instruction>(Val: U);
2065 if (inst->getParent()->getParent() == oldFunction)
2066 inst->replaceUsesOfWith(From: outputs[i], To: load);
2067 }
2068 }
2069
2070 // Update the branch weights for the exit block.
2071 if (BFI && ExtractedFuncRetVals.size() > 1)
2072 calculateNewCallTerminatorWeights(CodeReplacer: codeReplacer, ExitWeights, BPI);
2073}
2074
2075bool CodeExtractor::verifyAssumptionCache(const Function &OldFunc,
2076 const Function &NewFunc,
2077 AssumptionCache *AC) {
2078 for (auto AssumeVH : AC->assumptions()) {
2079 auto *I = dyn_cast_or_null<CallInst>(Val&: AssumeVH);
2080 if (!I)
2081 continue;
2082
2083 // There shouldn't be any llvm.assume intrinsics in the new function.
2084 if (I->getFunction() != &OldFunc)
2085 return true;
2086
2087 // There shouldn't be any stale affected values in the assumption cache
2088 // that were previously in the old function, but that have now been moved
2089 // to the new function.
2090 for (auto AffectedValVH : AC->assumptionsFor(V: I->getOperand(i_nocapture: 0))) {
2091 auto *AffectedCI = dyn_cast_or_null<CallInst>(Val&: AffectedValVH);
2092 if (!AffectedCI)
2093 continue;
2094 if (AffectedCI->getFunction() != &OldFunc)
2095 return true;
2096 auto *AssumedInst = cast<Instruction>(Val: AffectedCI->getOperand(i_nocapture: 0));
2097 if (AssumedInst->getFunction() != &OldFunc)
2098 return true;
2099 }
2100 }
2101 return false;
2102}
2103
2104void CodeExtractor::excludeArgFromAggregate(Value *Arg) {
2105 ExcludeArgsFromAggregate.insert(X: Arg);
2106}
2107