1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs various transformations related to eliminating memcpy
10// calls, or transforming sets of stores into memset's.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/ScopeExit.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/iterator_range.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/CaptureTracking.h"
25#include "llvm/Analysis/GlobalsModRef.h"
26#include "llvm/Analysis/InstructionSimplify.h"
27#include "llvm/Analysis/Loads.h"
28#include "llvm/Analysis/MemoryLocation.h"
29#include "llvm/Analysis/MemorySSA.h"
30#include "llvm/Analysis/MemorySSAUpdater.h"
31#include "llvm/Analysis/PostDominators.h"
32#include "llvm/Analysis/TargetLibraryInfo.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/Module.h"
49#include "llvm/IR/PassManager.h"
50#include "llvm/IR/ProfDataUtils.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/User.h"
53#include "llvm/IR/Value.h"
54#include "llvm/Support/Casting.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/raw_ostream.h"
57#include "llvm/Transforms/Utils/Local.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <optional>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "memcpyopt"
66
67static cl::opt<bool> EnableMemCpyOptWithoutLibcalls(
68 "enable-memcpyopt-without-libcalls", cl::Hidden,
69 cl::desc("Enable memcpyopt even when libcalls are disabled"));
70
71STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
72STATISTIC(NumMemMoveInstr, "Number of memmove instructions deleted");
73STATISTIC(NumMemSetInfer, "Number of memsets inferred");
74STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
75STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
76STATISTIC(NumCallSlot, "Number of call slot optimizations performed");
77STATISTIC(NumStackMove, "Number of stack-move optimizations performed");
78
79namespace {
80
81/// Represents a range of memset'd bytes with the ByteVal value.
82/// This allows us to analyze stores like:
83/// store 0 -> P+1
84/// store 0 -> P+0
85/// store 0 -> P+3
86/// store 0 -> P+2
87/// which sometimes happens with stores to arrays of structs etc. When we see
88/// the first store, we make a range [1, 2). The second store extends the range
89/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
90/// two ranges into [0, 3) which is memset'able.
91struct MemsetRange {
92 // Start/End - A semi range that describes the span that this range covers.
93 // The range is closed at the start and open at the end: [Start, End).
94 int64_t Start, End;
95
96 /// StartPtr - The getelementptr instruction that points to the start of the
97 /// range.
98 Value *StartPtr;
99
100 /// Alignment - The known alignment of the first store.
101 MaybeAlign Alignment;
102
103 /// TheStores - The actual stores that make up this range.
104 SmallVector<Instruction *, 16> TheStores;
105
106 bool isProfitableToUseMemset(const DataLayout &DL) const;
107};
108
109} // end anonymous namespace
110
111static bool overreadUndefContents(MemorySSA *MSSA, MemCpyInst *MemCpy,
112 MemIntrinsic *MemSrc, BatchAAResults &BAA);
113
114bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
115 // If we found more than 4 stores to merge or 16 bytes, use memset.
116 if (TheStores.size() >= 4 || End - Start >= 16)
117 return true;
118
119 // If there is nothing to merge, don't do anything.
120 if (TheStores.size() < 2)
121 return false;
122
123 // If any of the stores are a memset, then it is always good to extend the
124 // memset.
125 for (Instruction *SI : TheStores)
126 if (!isa<StoreInst>(Val: SI))
127 return true;
128
129 // Assume that the code generator is capable of merging pairs of stores
130 // together if it wants to.
131 if (TheStores.size() == 2)
132 return false;
133
134 // If we have fewer than 8 stores, it can still be worthwhile to do this.
135 // For example, merging 4 i8 stores into an i32 store is useful almost always.
136 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
137 // memset will be split into 2 32-bit stores anyway) and doing so can
138 // pessimize the llvm optimizer.
139 //
140 // Since we don't have perfect knowledge here, make some assumptions: assume
141 // the maximum GPR width is the same size as the largest legal integer
142 // size. If so, check to see whether we will end up actually reducing the
143 // number of stores used.
144 unsigned Bytes = unsigned(End - Start);
145 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
146 if (MaxIntSize == 0)
147 MaxIntSize = 1;
148 unsigned NumPointerStores = Bytes / MaxIntSize;
149
150 // Assume the remaining bytes if any are done a byte at a time.
151 unsigned NumByteStores = Bytes % MaxIntSize;
152
153 // If we will reduce the # stores (according to this heuristic), do the
154 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
155 // etc.
156 return TheStores.size() > NumPointerStores + NumByteStores;
157}
158
159namespace {
160
161class MemsetRanges {
162 using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
163
164 /// A sorted list of the memset ranges.
165 SmallVector<MemsetRange, 8> Ranges;
166
167 const DataLayout &DL;
168
169public:
170 MemsetRanges(const DataLayout &DL) : DL(DL) {}
171
172 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
173
174 const_iterator begin() const { return Ranges.begin(); }
175 const_iterator end() const { return Ranges.end(); }
176 bool empty() const { return Ranges.empty(); }
177
178 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
179 if (auto *SI = dyn_cast<StoreInst>(Val: Inst))
180 addStore(OffsetFromFirst, SI);
181 else
182 addMemSet(OffsetFromFirst, MSI: cast<MemSetInst>(Val: Inst));
183 }
184
185 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
186 TypeSize StoreSize = DL.getTypeStoreSize(Ty: SI->getOperand(i_nocapture: 0)->getType());
187 assert(!StoreSize.isScalable() && "Can't track scalable-typed stores");
188 addRange(Start: OffsetFromFirst, Size: StoreSize.getFixedValue(),
189 Ptr: SI->getPointerOperand(), Alignment: SI->getAlign(), Inst: SI);
190 }
191
192 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
193 int64_t Size = cast<ConstantInt>(Val: MSI->getLength())->getZExtValue();
194 addRange(Start: OffsetFromFirst, Size, Ptr: MSI->getDest(), Alignment: MSI->getDestAlign(), Inst: MSI);
195 }
196
197 void addRange(int64_t Start, int64_t Size, Value *Ptr, MaybeAlign Alignment,
198 Instruction *Inst);
199};
200
201} // end anonymous namespace
202
203/// Add a new store to the MemsetRanges data structure. This adds a
204/// new range for the specified store at the specified offset, merging into
205/// existing ranges as appropriate.
206void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
207 MaybeAlign Alignment, Instruction *Inst) {
208 int64_t End = Start + Size;
209
210 range_iterator I = partition_point(
211 Range&: Ranges, P: [=](const MemsetRange &O) { return O.End < Start; });
212
213 // We now know that I == E, in which case we didn't find anything to merge
214 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
215 // to insert a new range. Handle this now.
216 if (I == Ranges.end() || End < I->Start) {
217 MemsetRange &R = *Ranges.insert(I, Elt: MemsetRange());
218 R.Start = Start;
219 R.End = End;
220 R.StartPtr = Ptr;
221 R.Alignment = Alignment;
222 R.TheStores.push_back(Elt: Inst);
223 return;
224 }
225
226 // This store overlaps with I, add it.
227 I->TheStores.push_back(Elt: Inst);
228
229 // At this point, we may have an interval that completely contains our store.
230 // If so, just add it to the interval and return.
231 if (I->Start <= Start && I->End >= End)
232 return;
233
234 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
235 // but is not entirely contained within the range.
236
237 // See if the range extends the start of the range. In this case, it couldn't
238 // possibly cause it to join the prior range, because otherwise we would have
239 // stopped on *it*.
240 if (Start < I->Start) {
241 I->Start = Start;
242 I->StartPtr = Ptr;
243 I->Alignment = Alignment;
244 }
245
246 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
247 // is in or right at the end of I), and that End >= I->Start. Extend I out to
248 // End.
249 if (End > I->End) {
250 I->End = End;
251 range_iterator NextI = I;
252 while (++NextI != Ranges.end() && End >= NextI->Start) {
253 // Merge the range in.
254 I->TheStores.append(in_start: NextI->TheStores.begin(), in_end: NextI->TheStores.end());
255 if (NextI->End > I->End)
256 I->End = NextI->End;
257 Ranges.erase(CI: NextI);
258 NextI = I;
259 }
260 }
261}
262
263//===----------------------------------------------------------------------===//
264// MemCpyOptLegacyPass Pass
265//===----------------------------------------------------------------------===//
266
267// Check that V is either not accessible by the caller, or unwinding cannot
268// occur between Start and End.
269static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start,
270 Instruction *End) {
271 assert(Start->getParent() == End->getParent() && "Must be in same block");
272 // Function can't unwind, so it also can't be visible through unwinding.
273 if (Start->getFunction()->doesNotThrow())
274 return false;
275
276 // Object is not visible on unwind.
277 // TODO: Support RequiresNoCaptureBeforeUnwind case.
278 bool RequiresNoCaptureBeforeUnwind;
279 if (isNotVisibleOnUnwind(Object: getUnderlyingObject(V),
280 RequiresNoCaptureBeforeUnwind) &&
281 !RequiresNoCaptureBeforeUnwind)
282 return false;
283
284 // Check whether there are any unwinding instructions in the range.
285 return any_of(Range: make_range(x: Start->getIterator(), y: End->getIterator()),
286 P: [](const Instruction &I) { return I.mayThrow(); });
287}
288
289void MemCpyOptPass::eraseInstruction(Instruction *I) {
290 MSSAU->removeMemoryAccess(I);
291 EEA->removeInstruction(I);
292 I->eraseFromParent();
293}
294
295// Check for mod or ref of Loc between Start and End, excluding both boundaries.
296// Start and End must be in the same block.
297// If SkippedLifetimeStart is provided, skip over one clobbering lifetime.start
298// intrinsic and store it inside SkippedLifetimeStart.
299static bool accessedBetween(BatchAAResults &AA, MemoryLocation Loc,
300 const MemoryUseOrDef *Start,
301 const MemoryUseOrDef *End,
302 Instruction **SkippedLifetimeStart = nullptr) {
303 assert(Start->getBlock() == End->getBlock() && "Only local supported");
304 for (const MemoryAccess &MA :
305 make_range(x: ++Start->getIterator(), y: End->getIterator())) {
306 Instruction *I = cast<MemoryUseOrDef>(Val: MA).getMemoryInst();
307 if (isModOrRefSet(MRI: AA.getModRefInfo(I, OptLoc: Loc))) {
308 auto *II = dyn_cast<IntrinsicInst>(Val: I);
309 if (II && II->getIntrinsicID() == Intrinsic::lifetime_start &&
310 SkippedLifetimeStart && !*SkippedLifetimeStart) {
311 *SkippedLifetimeStart = I;
312 continue;
313 }
314
315 return true;
316 }
317 }
318 return false;
319}
320
321// Check for mod of Loc between Start and End, excluding both boundaries.
322// Start and End can be in different blocks.
323static bool writtenBetween(MemorySSA *MSSA, BatchAAResults &AA,
324 MemoryLocation Loc, const MemoryUseOrDef *Start,
325 const MemoryUseOrDef *End) {
326 if (isa<MemoryUse>(Val: End)) {
327 // For MemoryUses, getClobberingMemoryAccess may skip non-clobbering writes.
328 // Manually check read accesses between Start and End, if they are in the
329 // same block, for clobbers. Otherwise assume Loc is clobbered.
330 return Start->getBlock() != End->getBlock() ||
331 any_of(
332 Range: make_range(x: std::next(x: Start->getIterator()), y: End->getIterator()),
333 P: [&AA, Loc](const MemoryAccess &Acc) {
334 if (isa<MemoryUse>(Val: &Acc))
335 return false;
336 Instruction *AccInst =
337 cast<MemoryUseOrDef>(Val: &Acc)->getMemoryInst();
338 return isModSet(MRI: AA.getModRefInfo(I: AccInst, OptLoc: Loc));
339 });
340 }
341
342 // TODO: Only walk until we hit Start.
343 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
344 End->getDefiningAccess(), Loc, AA);
345 return !MSSA->dominates(A: Clobber, B: Start);
346}
347
348/// When scanning forward over instructions, we look for some other patterns to
349/// fold away. In particular, this looks for stores to neighboring locations of
350/// memory. If it sees enough consecutive ones, it attempts to merge them
351/// together into a memcpy/memset.
352Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
353 Value *StartPtr,
354 Value *ByteVal) {
355 const DataLayout &DL = StartInst->getDataLayout();
356
357 // We can't track scalable types
358 if (auto *SI = dyn_cast<StoreInst>(Val: StartInst))
359 if (DL.getTypeStoreSize(Ty: SI->getOperand(i_nocapture: 0)->getType()).isScalable())
360 return nullptr;
361
362 // Okay, so we now have a single store that can be splatable. Scan to find
363 // all subsequent stores of the same value to offset from the same pointer.
364 // Join these together into ranges, so we can decide whether contiguous blocks
365 // are stored.
366 MemsetRanges Ranges(DL);
367
368 BasicBlock::iterator BI(StartInst);
369
370 // Keeps track of the last memory use or def before the insertion point for
371 // the new memset. The new MemoryDef for the inserted memsets will be inserted
372 // after MemInsertPoint.
373 MemoryUseOrDef *MemInsertPoint = nullptr;
374 for (++BI; !BI->isTerminator(); ++BI) {
375 auto *CurrentAcc =
376 cast_or_null<MemoryUseOrDef>(Val: MSSA->getMemoryAccess(I: &*BI));
377 if (CurrentAcc)
378 MemInsertPoint = CurrentAcc;
379
380 // Calls that only access inaccessible memory do not block merging
381 // accessible stores.
382 if (auto *CB = dyn_cast<CallBase>(Val&: BI)) {
383 if (CB->onlyAccessesInaccessibleMemory())
384 continue;
385 }
386
387 if (!isa<StoreInst>(Val: BI) && !isa<MemSetInst>(Val: BI)) {
388 // If the instruction is readnone, ignore it, otherwise bail out. We
389 // don't even allow readonly here because we don't want something like:
390 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
391 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
392 break;
393 continue;
394 }
395
396 if (auto *NextStore = dyn_cast<StoreInst>(Val&: BI)) {
397 // If this is a store, see if we can merge it in.
398 if (!NextStore->isSimple())
399 break;
400
401 Value *StoredVal = NextStore->getValueOperand();
402
403 // Don't convert stores of non-integral pointer types to memsets (which
404 // stores integers).
405 if (DL.isNonIntegralPointerType(Ty: StoredVal->getType()->getScalarType()))
406 break;
407
408 // We can't track ranges involving scalable types.
409 if (DL.getTypeStoreSize(Ty: StoredVal->getType()).isScalable())
410 break;
411
412 // Check to see if this stored value is of the same byte-splattable value.
413 Value *StoredByte = isBytewiseValue(V: StoredVal, DL);
414 // We can blindly merge this store into `StartInst` if it's being filled
415 // with an undef value but we don't because:
416 // 1. `StartInst` can be removed since it's storing an `undef`.
417 // 2. The resulting memset will be much larger than it needs to be.
418 if (ByteVal != StoredByte)
419 break;
420
421 // Check to see if this store is to a constant offset from the start ptr.
422 std::optional<int64_t> Offset =
423 NextStore->getPointerOperand()->getPointerOffsetFrom(Other: StartPtr, DL);
424 if (!Offset)
425 break;
426
427 Ranges.addStore(OffsetFromFirst: *Offset, SI: NextStore);
428 } else {
429 auto *MSI = cast<MemSetInst>(Val&: BI);
430
431 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
432 !isa<ConstantInt>(Val: MSI->getLength()))
433 break;
434
435 // Check to see if this store is to a constant offset from the start ptr.
436 std::optional<int64_t> Offset =
437 MSI->getDest()->getPointerOffsetFrom(Other: StartPtr, DL);
438 if (!Offset)
439 break;
440
441 Ranges.addMemSet(OffsetFromFirst: *Offset, MSI);
442 }
443 }
444
445 // If we have no ranges, then we just had a single store with nothing that
446 // could be merged in. This is a very common case of course.
447 if (Ranges.empty())
448 return nullptr;
449
450 // If we had at least one store that could be merged in, add the starting
451 // store as well. We try to avoid this unless there is at least something
452 // interesting as a small compile-time optimization.
453 Ranges.addInst(OffsetFromFirst: 0, Inst: StartInst);
454
455 // If we create any memsets, we put it right before the first instruction that
456 // isn't part of the memset block. This ensure that the memset is dominated
457 // by any addressing instruction needed by the start of the block.
458 IRBuilder<> Builder(&*BI);
459
460 // Now that we have full information about ranges, loop over the ranges and
461 // emit memset's for anything big enough to be worthwhile.
462 Instruction *AMemSet = nullptr;
463 for (const MemsetRange &Range : Ranges) {
464 if (Range.TheStores.size() == 1)
465 continue;
466
467 // If it is profitable to lower this range to memset, do so now.
468 if (!Range.isProfitableToUseMemset(DL))
469 continue;
470
471 // Otherwise, we do want to transform this! Create a new memset.
472 // Get the starting pointer of the block.
473 StartPtr = Range.StartPtr;
474
475 AMemSet = Builder.CreateMemSet(Ptr: StartPtr, Val: ByteVal, Size: Range.End - Range.Start,
476 Align: Range.Alignment);
477 AMemSet->mergeDIAssignID(SourceInstructions: Range.TheStores);
478
479 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
480 : Range.TheStores) dbgs()
481 << *SI << '\n';
482 dbgs() << "With: " << *AMemSet << '\n');
483 if (!Range.TheStores.empty())
484 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
485
486 auto *NewDef = cast<MemoryDef>(
487 Val: MemInsertPoint->getMemoryInst() == &*BI
488 ? MSSAU->createMemoryAccessBefore(I: AMemSet, Definition: nullptr, InsertPt: MemInsertPoint)
489 : MSSAU->createMemoryAccessAfter(I: AMemSet, Definition: nullptr, InsertPt: MemInsertPoint));
490 MSSAU->insertDef(Def: NewDef, /*RenameUses=*/true);
491 MemInsertPoint = NewDef;
492
493 // Zap all the stores.
494 for (Instruction *SI : Range.TheStores)
495 eraseInstruction(I: SI);
496
497 ++NumMemSetInfer;
498 }
499
500 return AMemSet;
501}
502
503// This method try to lift a store instruction before position P.
504// It will lift the store and its argument + that anything that
505// may alias with these.
506// The method returns true if it was successful.
507bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
508 // If the store alias this position, early bail out.
509 MemoryLocation StoreLoc = MemoryLocation::get(SI);
510 if (isModOrRefSet(MRI: AA->getModRefInfo(I: P, OptLoc: StoreLoc)))
511 return false;
512
513 // Keep track of the arguments of all instruction we plan to lift
514 // so we can make sure to lift them as well if appropriate.
515 DenseSet<Instruction *> Args;
516 auto AddArg = [&](Value *Arg) {
517 auto *I = dyn_cast<Instruction>(Val: Arg);
518 if (I && I->getParent() == SI->getParent()) {
519 // Cannot hoist user of P above P
520 if (I == P)
521 return false;
522 Args.insert(V: I);
523 }
524 return true;
525 };
526 if (!AddArg(SI->getPointerOperand()))
527 return false;
528
529 // Instruction to lift before P.
530 SmallVector<Instruction *, 8> ToLift{SI};
531
532 // Memory locations of lifted instructions.
533 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
534
535 // Lifted calls.
536 SmallVector<const CallBase *, 8> Calls;
537
538 const MemoryLocation LoadLoc = MemoryLocation::get(LI);
539
540 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
541 auto *C = &*I;
542
543 // Make sure hoisting does not perform a store that was not guaranteed to
544 // happen.
545 if (!isGuaranteedToTransferExecutionToSuccessor(I: C))
546 return false;
547
548 bool MayAlias = isModOrRefSet(MRI: AA->getModRefInfo(I: C, OptLoc: std::nullopt));
549
550 bool NeedLift = false;
551 if (Args.erase(V: C))
552 NeedLift = true;
553 else if (MayAlias) {
554 NeedLift = llvm::any_of(Range&: MemLocs, P: [C, this](const MemoryLocation &ML) {
555 return isModOrRefSet(MRI: AA->getModRefInfo(I: C, OptLoc: ML));
556 });
557
558 if (!NeedLift)
559 NeedLift = llvm::any_of(Range&: Calls, P: [C, this](const CallBase *Call) {
560 return isModOrRefSet(MRI: AA->getModRefInfo(I: C, Call));
561 });
562 }
563
564 if (!NeedLift)
565 continue;
566
567 if (MayAlias) {
568 // Since LI is implicitly moved downwards past the lifted instructions,
569 // none of them may modify its source.
570 if (isModSet(MRI: AA->getModRefInfo(I: C, OptLoc: LoadLoc)))
571 return false;
572 else if (const auto *Call = dyn_cast<CallBase>(Val: C)) {
573 // If we can't lift this before P, it's game over.
574 if (isModOrRefSet(MRI: AA->getModRefInfo(I: P, Call)))
575 return false;
576
577 Calls.push_back(Elt: Call);
578 } else if (isa<LoadInst>(Val: C) || isa<StoreInst>(Val: C) || isa<VAArgInst>(Val: C)) {
579 // If we can't lift this before P, it's game over.
580 auto ML = MemoryLocation::get(Inst: C);
581 if (isModOrRefSet(MRI: AA->getModRefInfo(I: P, OptLoc: ML)))
582 return false;
583
584 MemLocs.push_back(Elt: ML);
585 } else
586 // We don't know how to lift this instruction.
587 return false;
588 }
589
590 ToLift.push_back(Elt: C);
591 for (Value *Op : C->operands())
592 if (!AddArg(Op))
593 return false;
594 }
595
596 // Find MSSA insertion point. Normally P will always have a corresponding
597 // memory access before which we can insert. However, with non-standard AA
598 // pipelines, there may be a mismatch between AA and MSSA, in which case we
599 // will scan for a memory access before P. In either case, we know for sure
600 // that at least the load will have a memory access.
601 // TODO: Simplify this once P will be determined by MSSA, in which case the
602 // discrepancy can no longer occur.
603 MemoryUseOrDef *MemInsertPoint = nullptr;
604 if (MemoryUseOrDef *MA = MSSA->getMemoryAccess(I: P)) {
605 MemInsertPoint = cast<MemoryUseOrDef>(Val&: --MA->getIterator());
606 } else {
607 const Instruction *ConstP = P;
608 for (const Instruction &I : make_range(x: ++ConstP->getReverseIterator(),
609 y: ++LI->getReverseIterator())) {
610 if (MemoryUseOrDef *MA = MSSA->getMemoryAccess(I: &I)) {
611 MemInsertPoint = MA;
612 break;
613 }
614 }
615 }
616
617 // We made it, we need to lift.
618 for (auto *I : llvm::reverse(C&: ToLift)) {
619 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
620 I->moveBefore(InsertPos: P->getIterator());
621 assert(MemInsertPoint && "Must have found insert point");
622 if (MemoryUseOrDef *MA = MSSA->getMemoryAccess(I)) {
623 MSSAU->moveAfter(What: MA, Where: MemInsertPoint);
624 MemInsertPoint = MA;
625 }
626 }
627
628 return true;
629}
630
631bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
632 const DataLayout &DL,
633 BasicBlock::iterator &BBI) {
634 if (!LI->isSimple() || !LI->hasOneUse() || LI->getParent() != SI->getParent())
635 return false;
636
637 BatchAAResults BAA(*AA, EEA);
638 auto *T = LI->getType();
639 // Don't introduce calls to memcpy/memmove intrinsics out of thin air if
640 // the corresponding libcalls are not available.
641 // TODO: We should really distinguish between libcall availability and
642 // our ability to introduce intrinsics.
643 if (T->isAggregateType() &&
644 (EnableMemCpyOptWithoutLibcalls ||
645 (TLI->has(F: LibFunc_memcpy) && TLI->has(F: LibFunc_memmove)))) {
646 MemoryLocation LoadLoc = MemoryLocation::get(LI);
647
648 // We use alias analysis to check if an instruction may store to
649 // the memory we load from in between the load and the store. If
650 // such an instruction is found, we try to promote there instead
651 // of at the store position.
652 // TODO: Can use MSSA for this.
653 Instruction *P = SI;
654 for (auto &I : make_range(x: ++LI->getIterator(), y: SI->getIterator())) {
655 if (isModSet(MRI: BAA.getModRefInfo(I: &I, OptLoc: LoadLoc))) {
656 P = &I;
657 break;
658 }
659 }
660
661 // If we found an instruction that may write to the loaded memory,
662 // we can try to promote at this position instead of the store
663 // position if nothing aliases the store memory after this and the store
664 // destination is not in the range.
665 if (P == SI || moveUp(SI, P, LI)) {
666 // If we load from memory that may alias the memory we store to,
667 // memmove must be used to preserve semantic. If not, memcpy can
668 // be used. Also, if we load from constant memory, memcpy can be used
669 // as the constant memory won't be modified.
670 bool UseMemMove = false;
671 if (isModSet(MRI: AA->getModRefInfo(I: SI, OptLoc: LoadLoc)))
672 UseMemMove = true;
673
674 IRBuilder<> Builder(P);
675 Value *Size =
676 Builder.CreateTypeSize(Ty: Builder.getInt64Ty(), Size: DL.getTypeStoreSize(Ty: T));
677 Instruction *M;
678 if (UseMemMove)
679 M = Builder.CreateMemMove(Dst: SI->getPointerOperand(), DstAlign: SI->getAlign(),
680 Src: LI->getPointerOperand(), SrcAlign: LI->getAlign(),
681 Size);
682 else
683 M = Builder.CreateMemCpy(Dst: SI->getPointerOperand(), DstAlign: SI->getAlign(),
684 Src: LI->getPointerOperand(), SrcAlign: LI->getAlign(), Size);
685 M->copyMetadata(SrcInst: *SI, WL: LLVMContext::MD_DIAssignID);
686
687 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " << *M
688 << "\n");
689
690 auto *LastDef = cast<MemoryDef>(Val: MSSA->getMemoryAccess(I: SI));
691 auto *NewAccess = MSSAU->createMemoryAccessAfter(I: M, Definition: nullptr, InsertPt: LastDef);
692 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewAccess), /*RenameUses=*/true);
693
694 eraseInstruction(I: SI);
695 eraseInstruction(I: LI);
696 ++NumMemCpyInstr;
697
698 // Make sure we do not invalidate the iterator.
699 BBI = M->getIterator();
700 return true;
701 }
702 }
703
704 // Detect cases where we're performing call slot forwarding, but
705 // happen to be using a load-store pair to implement it, rather than
706 // a memcpy.
707 auto GetCall = [&]() -> CallInst * {
708 // We defer this expensive clobber walk until the cheap checks
709 // have been done on the source inside performCallSlotOptzn.
710 if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>(
711 Val: MSSA->getWalker()->getClobberingMemoryAccess(I: LI, AA&: BAA)))
712 return dyn_cast_or_null<CallInst>(Val: LoadClobber->getMemoryInst());
713 return nullptr;
714 };
715
716 bool Changed = performCallSlotOptzn(
717 cpyLoad: LI, cpyStore: SI, cpyDst: SI->getPointerOperand()->stripPointerCasts(),
718 cpySrc: LI->getPointerOperand()->stripPointerCasts(),
719 cpyLen: DL.getTypeStoreSize(Ty: SI->getOperand(i_nocapture: 0)->getType()),
720 cpyAlign: std::min(a: SI->getAlign(), b: LI->getAlign()), BAA, GetC: GetCall);
721 if (Changed) {
722 eraseInstruction(I: SI);
723 eraseInstruction(I: LI);
724 ++NumMemCpyInstr;
725 return true;
726 }
727
728 // If this is a load-store pair from a stack slot to a stack slot, we
729 // might be able to perform the stack-move optimization just as we do for
730 // memcpys from an alloca to an alloca.
731 if (performStackMoveOptzn(Load: LI, Store: SI, DestPtr: SI->getPointerOperand(),
732 SrcPtr: LI->getPointerOperand(), Size: DL.getTypeStoreSize(Ty: T),
733 BAA)) {
734 // Avoid invalidating the iterator.
735 BBI = SI->getNextNode()->getIterator();
736 eraseInstruction(I: SI);
737 eraseInstruction(I: LI);
738 ++NumMemCpyInstr;
739 return true;
740 }
741
742 return false;
743}
744
745bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
746 if (!SI->isSimple())
747 return false;
748
749 // Avoid merging nontemporal stores since the resulting
750 // memcpy/memset would not be able to preserve the nontemporal hint.
751 // In theory we could teach how to propagate the !nontemporal metadata to
752 // memset calls. However, that change would force the backend to
753 // conservatively expand !nontemporal memset calls back to sequences of
754 // store instructions (effectively undoing the merging).
755 if (SI->getMetadata(KindID: LLVMContext::MD_nontemporal))
756 return false;
757
758 const DataLayout &DL = SI->getDataLayout();
759
760 Value *StoredVal = SI->getValueOperand();
761
762 // Not all the transforms below are correct for non-integral pointers, bail
763 // until we've audited the individual pieces.
764 if (DL.isNonIntegralPointerType(Ty: StoredVal->getType()->getScalarType()))
765 return false;
766
767 // Load to store forwarding can be interpreted as memcpy.
768 if (auto *LI = dyn_cast<LoadInst>(Val: StoredVal))
769 return processStoreOfLoad(SI, LI, DL, BBI);
770
771 // The following code creates memset intrinsics out of thin air. Don't do
772 // this if the corresponding libfunc is not available.
773 // TODO: We should really distinguish between libcall availability and
774 // our ability to introduce intrinsics.
775 if (!(TLI->has(F: LibFunc_memset) || EnableMemCpyOptWithoutLibcalls))
776 return false;
777
778 // There are two cases that are interesting for this code to handle: memcpy
779 // and memset. Right now we only handle memset.
780
781 // Ensure that the value being stored is something that can be memset'able a
782 // byte at a time like "0" or "-1" or any width, as well as things like
783 // 0xA0A0A0A0 and 0.0.
784 Value *V = SI->getOperand(i_nocapture: 0);
785 Value *ByteVal = isBytewiseValue(V, DL);
786 if (!ByteVal)
787 return false;
788
789 if (Instruction *I =
790 tryMergingIntoMemset(StartInst: SI, StartPtr: SI->getPointerOperand(), ByteVal)) {
791 BBI = I->getIterator(); // Don't invalidate iterator.
792 return true;
793 }
794
795 // If we have an aggregate, we try to promote it to memset regardless
796 // of opportunity for merging as it can expose optimization opportunities
797 // in subsequent passes.
798 auto *T = V->getType();
799 if (!T->isAggregateType())
800 return false;
801
802 TypeSize Size = DL.getTypeStoreSize(Ty: T);
803 if (Size.isScalable())
804 return false;
805
806 IRBuilder<> Builder(SI);
807 auto *M = Builder.CreateMemSet(Ptr: SI->getPointerOperand(), Val: ByteVal, Size,
808 Align: SI->getAlign());
809 M->copyMetadata(SrcInst: *SI, WL: LLVMContext::MD_DIAssignID);
810
811 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
812
813 // The newly inserted memset is immediately overwritten by the original
814 // store, so we do not need to rename uses.
815 auto *StoreDef = cast<MemoryDef>(Val: MSSA->getMemoryAccess(I: SI));
816 auto *NewAccess = MSSAU->createMemoryAccessBefore(I: M, Definition: nullptr, InsertPt: StoreDef);
817 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewAccess), /*RenameUses=*/false);
818
819 eraseInstruction(I: SI);
820 NumMemSetInfer++;
821
822 // Make sure we do not invalidate the iterator.
823 BBI = M->getIterator();
824 return true;
825}
826
827bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
828 // See if there is another memset or store neighboring this memset which
829 // allows us to widen out the memset to do a single larger store.
830 if (isa<ConstantInt>(Val: MSI->getLength()) && !MSI->isVolatile())
831 if (Instruction *I =
832 tryMergingIntoMemset(StartInst: MSI, StartPtr: MSI->getDest(), ByteVal: MSI->getValue())) {
833 BBI = I->getIterator(); // Don't invalidate iterator.
834 return true;
835 }
836 return false;
837}
838
839/// Takes a memcpy and a call that it depends on,
840/// and checks for the possibility of a call slot optimization by having
841/// the call write its result directly into the destination of the memcpy.
842bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
843 Instruction *cpyStore, Value *cpyDest,
844 Value *cpySrc, TypeSize cpySize,
845 Align cpyDestAlign,
846 BatchAAResults &BAA,
847 std::function<CallInst *()> GetC) {
848 // The general transformation to keep in mind is
849 //
850 // call @func(..., src, ...)
851 // memcpy(dest, src, ...)
852 //
853 // ->
854 //
855 // memcpy(dest, src, ...)
856 // call @func(..., dest, ...)
857 //
858 // Since moving the memcpy is technically awkward, we additionally check that
859 // src only holds uninitialized values at the moment of the call, meaning that
860 // the memcpy can be discarded rather than moved.
861
862 // We can't optimize scalable types.
863 if (cpySize.isScalable())
864 return false;
865
866 // Require that src be an alloca. This simplifies the reasoning considerably.
867 auto *srcAlloca = dyn_cast<AllocaInst>(Val: cpySrc);
868 if (!srcAlloca)
869 return false;
870
871 const DataLayout &DL = cpyLoad->getDataLayout();
872 // We can't optimize scalable types or variable-length allocas.
873 std::optional<TypeSize> SrcAllocaSize = srcAlloca->getAllocationSize(DL);
874 if (!SrcAllocaSize || SrcAllocaSize->isScalable())
875 return false;
876 uint64_t srcSize = SrcAllocaSize->getFixedValue();
877
878 if (cpySize < srcSize)
879 return false;
880
881 CallInst *C = GetC();
882 if (!C)
883 return false;
884
885 // Lifetime marks shouldn't be operated on.
886 if (Function *F = C->getCalledFunction())
887 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
888 return false;
889
890 if (C->getParent() != cpyStore->getParent()) {
891 LLVM_DEBUG(dbgs() << "Call Slot: block local restriction\n");
892 return false;
893 }
894
895 MemoryLocation DestLoc =
896 isa<StoreInst>(Val: cpyStore)
897 ? MemoryLocation::get(Inst: cpyStore)
898 : MemoryLocation::getForDest(MI: cast<MemCpyInst>(Val: cpyStore));
899
900 // Check that nothing touches the dest of the copy between
901 // the call and the store/memcpy.
902 Instruction *SkippedLifetimeStart = nullptr;
903 if (accessedBetween(AA&: BAA, Loc: DestLoc, Start: MSSA->getMemoryAccess(I: C),
904 End: MSSA->getMemoryAccess(I: cpyStore), SkippedLifetimeStart: &SkippedLifetimeStart)) {
905 LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer modified after call\n");
906 return false;
907 }
908
909 // If we need to move a lifetime.start above the call, make sure that we can
910 // actually do so. If the argument is bitcasted for example, we would have to
911 // move the bitcast as well, which we don't handle.
912 if (SkippedLifetimeStart) {
913 auto *LifetimeArg =
914 dyn_cast<Instruction>(Val: SkippedLifetimeStart->getOperand(i: 0));
915 if (LifetimeArg && LifetimeArg->getParent() == C->getParent() &&
916 C->comesBefore(Other: LifetimeArg))
917 return false;
918 }
919
920 // Check that storing to the first srcSize bytes of dest will not cause a
921 // trap or data race.
922 bool ExplicitlyDereferenceableOnly;
923 if (!isWritableObject(Object: getUnderlyingObject(V: cpyDest),
924 ExplicitlyDereferenceableOnly) ||
925 !isDereferenceableAndAlignedPointer(V: cpyDest, Alignment: Align(1), Size: APInt(64, cpySize),
926 DL, CtxI: C, AC, DT)) {
927 LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer not dereferenceable\n");
928 return false;
929 }
930
931 // Make sure that nothing can observe cpyDest being written early. There are
932 // a number of cases to consider:
933 // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of
934 // the transform.
935 // 2. C itself may not access cpyDest (prior to the transform). This is
936 // checked further below.
937 // 3. If cpyDest is accessible to the caller of this function (potentially
938 // captured and not based on an alloca), we need to ensure that we cannot
939 // unwind between C and cpyStore. This is checked here.
940 // 4. If cpyDest is potentially captured, there may be accesses to it from
941 // another thread. In this case, we need to check that cpyStore is
942 // guaranteed to be executed if C is. As it is a non-atomic access, it
943 // renders accesses from other threads undefined.
944 // TODO: This is currently not checked.
945 if (mayBeVisibleThroughUnwinding(V: cpyDest, Start: C, End: cpyStore)) {
946 LLVM_DEBUG(dbgs() << "Call Slot: Dest may be visible through unwinding\n");
947 return false;
948 }
949
950 // Check that dest points to memory that is at least as aligned as src.
951 Align srcAlign = srcAlloca->getAlign();
952 bool isDestSufficientlyAligned = srcAlign <= cpyDestAlign;
953 // If dest is not aligned enough and we can't increase its alignment then
954 // bail out.
955 if (!isDestSufficientlyAligned && !isa<AllocaInst>(Val: cpyDest)) {
956 LLVM_DEBUG(dbgs() << "Call Slot: Dest not sufficiently aligned\n");
957 return false;
958 }
959
960 // Check that src is not accessed except via the call and the memcpy. This
961 // guarantees that it holds only undefined values when passed in (so the final
962 // memcpy can be dropped), that it is not read or written between the call and
963 // the memcpy, and that writing beyond the end of it is undefined.
964 SmallVector<User *, 8> srcUseList(srcAlloca->users());
965 while (!srcUseList.empty()) {
966 User *U = srcUseList.pop_back_val();
967
968 if (isa<AddrSpaceCastInst>(Val: U)) {
969 append_range(C&: srcUseList, R: U->users());
970 continue;
971 }
972 if (isa<LifetimeIntrinsic>(Val: U))
973 continue;
974
975 if (U != C && U != cpyLoad) {
976 LLVM_DEBUG(dbgs() << "Call slot: Source accessed by " << *U << "\n");
977 return false;
978 }
979 }
980
981 // Check whether src is captured by the called function, in which case there
982 // may be further indirect uses of src.
983 bool SrcIsCaptured = any_of(Range: C->args(), P: [&](Use &U) {
984 return U->stripPointerCasts() == cpySrc &&
985 !C->doesNotCapture(OpNo: C->getArgOperandNo(U: &U));
986 });
987
988 // If src is captured, then check whether there are any potential uses of
989 // src through the captured pointer before the lifetime of src ends, either
990 // due to a lifetime.end or a return from the function.
991 if (SrcIsCaptured) {
992 // Check that dest is not captured before/at the call. We have already
993 // checked that src is not captured before it. If either had been captured,
994 // then the call might be comparing the argument against the captured dest
995 // or src pointer.
996 Value *DestObj = getUnderlyingObject(V: cpyDest);
997 if (!isIdentifiedFunctionLocal(V: DestObj) ||
998 PointerMayBeCapturedBefore(V: DestObj, /* ReturnCaptures */ true, I: C, DT,
999 /* IncludeI */ true))
1000 return false;
1001
1002 MemoryLocation SrcLoc =
1003 MemoryLocation(srcAlloca, LocationSize::precise(Value: srcSize));
1004 for (Instruction &I :
1005 make_range(x: ++C->getIterator(), y: C->getParent()->end())) {
1006 // Lifetime of srcAlloca ends at lifetime.end.
1007 if (auto *II = dyn_cast<IntrinsicInst>(Val: &I)) {
1008 if (II->getIntrinsicID() == Intrinsic::lifetime_end &&
1009 II->getArgOperand(i: 0) == srcAlloca)
1010 break;
1011 }
1012
1013 // Lifetime of srcAlloca ends at return.
1014 if (isa<ReturnInst>(Val: &I))
1015 break;
1016
1017 // Ignore the direct read of src in the load.
1018 if (&I == cpyLoad)
1019 continue;
1020
1021 // Check whether this instruction may mod/ref src through the captured
1022 // pointer (we have already any direct mod/refs in the loop above).
1023 // Also bail if we hit a terminator, as we don't want to scan into other
1024 // blocks.
1025 if (isModOrRefSet(MRI: BAA.getModRefInfo(I: &I, OptLoc: SrcLoc)) || I.isTerminator())
1026 return false;
1027 }
1028 }
1029
1030 // Since we're changing the parameter to the callsite, we need to make sure
1031 // that what would be the new parameter dominates the callsite.
1032 bool NeedMoveGEP = false;
1033 if (!DT->dominates(Def: cpyDest, User: C)) {
1034 // Support moving a constant index GEP before the call.
1035 auto *GEP = dyn_cast<GetElementPtrInst>(Val: cpyDest);
1036 if (GEP && GEP->hasAllConstantIndices() &&
1037 DT->dominates(Def: GEP->getPointerOperand(), User: C))
1038 NeedMoveGEP = true;
1039 else
1040 return false;
1041 }
1042
1043 // In addition to knowing that the call does not access src in some
1044 // unexpected manner, for example via a global, which we deduce from
1045 // the use analysis, we also need to know that it does not sneakily
1046 // access dest. We rely on AA to figure this out for us.
1047 MemoryLocation DestWithSrcSize(cpyDest, LocationSize::precise(Value: srcSize));
1048 ModRefInfo MR = BAA.getModRefInfo(I: C, OptLoc: DestWithSrcSize);
1049 // If necessary, perform additional analysis.
1050 if (isModOrRefSet(MRI: MR))
1051 MR = BAA.callCapturesBefore(I: C, MemLoc: DestWithSrcSize, DT);
1052 if (isModOrRefSet(MRI: MR))
1053 return false;
1054
1055 // We can't create address space casts here because we don't know if they're
1056 // safe for the target.
1057 if (cpySrc->getType() != cpyDest->getType())
1058 return false;
1059 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
1060 if (C->getArgOperand(i: ArgI)->stripPointerCasts() == cpySrc &&
1061 cpySrc->getType() != C->getArgOperand(i: ArgI)->getType())
1062 return false;
1063
1064 // All the checks have passed, so do the transformation.
1065 bool changedArgument = false;
1066 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
1067 if (C->getArgOperand(i: ArgI)->stripPointerCasts() == cpySrc) {
1068 changedArgument = true;
1069 C->setArgOperand(i: ArgI, v: cpyDest);
1070 }
1071
1072 if (!changedArgument)
1073 return false;
1074
1075 // If the destination wasn't sufficiently aligned then increase its alignment.
1076 if (!isDestSufficientlyAligned) {
1077 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
1078 cast<AllocaInst>(Val: cpyDest)->setAlignment(srcAlign);
1079 }
1080
1081 if (NeedMoveGEP) {
1082 auto *GEP = dyn_cast<GetElementPtrInst>(Val: cpyDest);
1083 GEP->moveBefore(InsertPos: C->getIterator());
1084 }
1085
1086 if (SkippedLifetimeStart) {
1087 SkippedLifetimeStart->moveBefore(InsertPos: C->getIterator());
1088 MSSAU->moveBefore(What: MSSA->getMemoryAccess(I: SkippedLifetimeStart),
1089 Where: MSSA->getMemoryAccess(I: C));
1090 }
1091
1092 combineAAMetadata(K: C, J: cpyLoad);
1093 if (cpyLoad != cpyStore)
1094 combineAAMetadata(K: C, J: cpyStore);
1095
1096 ++NumCallSlot;
1097 return true;
1098}
1099
1100/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1101/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
1102bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
1103 MemCpyInst *MDep,
1104 BatchAAResults &BAA) {
1105 // We can only optimize non-volatile memcpy's.
1106 if (MDep->isVolatile())
1107 return false;
1108
1109 // If dep instruction is reading from our current input, then it is a noop
1110 // transfer and substituting the input won't change this instruction. Just
1111 // ignore the input and let someone else zap MDep. This handles cases like:
1112 // memcpy(a <- a)
1113 // memcpy(b <- a)
1114 // This also avoids infinite loops.
1115 if (BAA.isMustAlias(V1: MDep->getDest(), V2: MDep->getSource()))
1116 return false;
1117
1118 int64_t MForwardOffset = 0;
1119 const DataLayout &DL = M->getModule()->getDataLayout();
1120 // We can only transforms memcpy's where the dest of one is the source of the
1121 // other, or they have an offset in a range.
1122 if (M->getSource() != MDep->getDest()) {
1123 std::optional<int64_t> Offset =
1124 M->getSource()->getPointerOffsetFrom(Other: MDep->getDest(), DL);
1125 if (!Offset || *Offset < 0)
1126 return false;
1127 MForwardOffset = *Offset;
1128 }
1129
1130 Value *CopyLength = M->getLength();
1131
1132 // The length of the memcpy's must be the same, or the preceding one must be
1133 // larger than the following one, or the contents of the overread must be
1134 // undefined bytes of a defined size.
1135 if (MForwardOffset != 0 || MDep->getLength() != CopyLength) {
1136 auto *MDepLen = dyn_cast<ConstantInt>(Val: MDep->getLength());
1137 auto *MLen = dyn_cast<ConstantInt>(Val: CopyLength);
1138 // This could be converted to a runtime test (%CopyLength =
1139 // min(max(0, MDepLen - MForwardOffset), MLen)), but it is
1140 // unclear if that is useful
1141 if (!MDepLen || !MLen)
1142 return false;
1143 if (MDepLen->getZExtValue() < MLen->getZExtValue() + MForwardOffset) {
1144 if (!overreadUndefContents(MSSA, MemCpy: M, MemSrc: MDep, BAA))
1145 return false;
1146 if (MDepLen->getZExtValue() <= (uint64_t)MForwardOffset)
1147 return false; // Should not reach here (there is obviously no aliasing
1148 // with MDep), so just bail in case it had incomplete info
1149 // somehow
1150 CopyLength = ConstantInt::get(Ty: CopyLength->getType(),
1151 V: MDepLen->getZExtValue() - MForwardOffset);
1152 }
1153 }
1154
1155 IRBuilder<> Builder(M);
1156 auto *CopySource = MDep->getSource();
1157 Instruction *NewCopySource = nullptr;
1158 llvm::scope_exit CleanupOnRet([&] {
1159 if (NewCopySource && NewCopySource->use_empty())
1160 // Safety: It's safe here because we will only allocate more instructions
1161 // after finishing all BatchAA queries, but we have to be careful if we
1162 // want to do something like this in another place. Then we'd probably
1163 // have to delay instruction removal until all transforms on an
1164 // instruction finished.
1165 eraseInstruction(I: NewCopySource);
1166 });
1167 MaybeAlign CopySourceAlign = MDep->getSourceAlign();
1168 auto MCopyLoc = MemoryLocation::getForSource(MTI: MDep);
1169 // Truncate the size of the MDep access to just the bytes read
1170 if (MDep->getLength() != CopyLength) {
1171 auto *ConstLength = cast<ConstantInt>(Val: CopyLength);
1172 MCopyLoc = MCopyLoc.getWithNewSize(
1173 NewSize: LocationSize::precise(Value: ConstLength->getZExtValue()));
1174 }
1175
1176 // When the forwarding offset is greater than 0, we transform
1177 // memcpy(d1 <- s1)
1178 // memcpy(d2 <- d1+o)
1179 // to
1180 // memcpy(d2 <- s1+o)
1181 if (MForwardOffset > 0) {
1182 // The copy destination of `M` maybe can serve as the source of copying.
1183 std::optional<int64_t> MDestOffset =
1184 M->getRawDest()->getPointerOffsetFrom(Other: MDep->getRawSource(), DL);
1185 if (MDestOffset == MForwardOffset)
1186 CopySource = M->getDest();
1187 else {
1188 CopySource = Builder.CreateInBoundsPtrAdd(
1189 Ptr: CopySource, Offset: Builder.getInt64(C: MForwardOffset));
1190 NewCopySource = dyn_cast<Instruction>(Val: CopySource);
1191 }
1192 // We need to update `MCopyLoc` if an offset exists.
1193 MCopyLoc = MCopyLoc.getWithNewPtr(NewPtr: CopySource);
1194 if (CopySourceAlign)
1195 CopySourceAlign = commonAlignment(A: *CopySourceAlign, Offset: MForwardOffset);
1196 }
1197
1198 // Verify that the copied-from memory doesn't change in between the two
1199 // transfers. For example, in:
1200 // memcpy(a <- b)
1201 // *b = 42;
1202 // memcpy(c <- a)
1203 // It would be invalid to transform the second memcpy into memcpy(c <- b).
1204 //
1205 // TODO: If the code between M and MDep is transparent to the destination "c",
1206 // then we could still perform the xform by moving M up to the first memcpy.
1207 if (writtenBetween(MSSA, AA&: BAA, Loc: MCopyLoc, Start: MSSA->getMemoryAccess(I: MDep),
1208 End: MSSA->getMemoryAccess(I: M)))
1209 return false;
1210
1211 // No need to create `memcpy(a <- a)`.
1212 if (BAA.isMustAlias(V1: M->getDest(), V2: CopySource)) {
1213 // Remove the instruction we're replacing.
1214 eraseInstruction(I: M);
1215 ++NumMemCpyInstr;
1216 return true;
1217 }
1218
1219 // If the dest of the second might alias the source of the first, then the
1220 // source and dest might overlap. In addition, if the source of the first
1221 // points to constant memory, they won't overlap by definition. Otherwise, we
1222 // still want to eliminate the intermediate value, but we have to generate a
1223 // memmove instead of memcpy.
1224 bool UseMemMove = false;
1225 if (isModSet(MRI: BAA.getModRefInfo(I: M, OptLoc: MemoryLocation::getForSource(MTI: MDep)))) {
1226 // Don't convert llvm.memcpy.inline into memmove because memmove can be
1227 // lowered as a call, and that is not allowed for llvm.memcpy.inline (and
1228 // there is no inline version of llvm.memmove)
1229 if (M->isForceInlined())
1230 return false;
1231 UseMemMove = true;
1232 }
1233
1234 // If all checks passed, then we can transform M.
1235 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
1236 << *MDep << '\n'
1237 << *M << '\n');
1238
1239 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1240 // example we could be moving from movaps -> movq on x86.
1241 Instruction *NewM;
1242 if (UseMemMove)
1243 NewM = Builder.CreateMemMove(Dst: M->getDest(), DstAlign: M->getDestAlign(), Src: CopySource,
1244 SrcAlign: CopySourceAlign, Size: CopyLength, isVolatile: M->isVolatile());
1245 else if (M->isForceInlined())
1246 // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
1247 // never allowed since that would allow the latter to be lowered as a call
1248 // to an external function.
1249 NewM = Builder.CreateMemCpyInline(Dst: M->getDest(), DstAlign: M->getDestAlign(),
1250 Src: CopySource, SrcAlign: CopySourceAlign, Size: CopyLength,
1251 isVolatile: M->isVolatile());
1252 else
1253 NewM = Builder.CreateMemCpy(Dst: M->getDest(), DstAlign: M->getDestAlign(), Src: CopySource,
1254 SrcAlign: CopySourceAlign, Size: CopyLength, isVolatile: M->isVolatile());
1255
1256 NewM->copyMetadata(SrcInst: *M, WL: LLVMContext::MD_DIAssignID);
1257
1258 assert(isa<MemoryDef>(MSSA->getMemoryAccess(M)));
1259 auto *LastDef = cast<MemoryDef>(Val: MSSA->getMemoryAccess(I: M));
1260 auto *NewAccess = MSSAU->createMemoryAccessAfter(I: NewM, Definition: nullptr, InsertPt: LastDef);
1261 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewAccess), /*RenameUses=*/true);
1262
1263 // Remove the instruction we're replacing.
1264 eraseInstruction(I: M);
1265 ++NumMemCpyInstr;
1266 return true;
1267}
1268
1269/// We've found that the (upward scanning) memory dependence of \p MemCpy is
1270/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1271/// weren't copied over by \p MemCpy.
1272///
1273/// In other words, transform:
1274/// \code
1275/// memset(dst, c, dst_size);
1276/// ...
1277/// memcpy(dst, src, src_size);
1278/// \endcode
1279/// into:
1280/// \code
1281/// ...
1282/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1283/// memcpy(dst, src, src_size);
1284/// \endcode
1285///
1286/// The memset is sunk to just before the memcpy to ensure that src_size is
1287/// present when emitting the simplified memset.
1288bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1289 MemSetInst *MemSet,
1290 BatchAAResults &BAA) {
1291 // We can only transform memset/memcpy with the same destination.
1292 if (!BAA.isMustAlias(V1: MemSet->getDest(), V2: MemCpy->getDest()))
1293 return false;
1294
1295 // Don't perform the transform if src_size may be zero. In that case, the
1296 // transform is essentially a complex no-op and may lead to an infinite
1297 // loop if BasicAA is smart enough to understand that dst and dst + src_size
1298 // are still MustAlias after the transform.
1299 Value *SrcSize = MemCpy->getLength();
1300 if (!isKnownNonZero(V: SrcSize,
1301 Q: SimplifyQuery(MemCpy->getDataLayout(), DT, AC, MemCpy)))
1302 return false;
1303
1304 // Check that src and dst of the memcpy aren't the same. While memcpy
1305 // operands cannot partially overlap, exact equality is allowed.
1306 if (isModSet(MRI: BAA.getModRefInfo(I: MemCpy, OptLoc: MemoryLocation::getForSource(MTI: MemCpy))))
1307 return false;
1308
1309 // We know that dst up to src_size is not written. We now need to make sure
1310 // that dst up to dst_size is not accessed. (If we did not move the memset,
1311 // checking for reads would be sufficient.)
1312 if (accessedBetween(AA&: BAA, Loc: MemoryLocation::getForDest(MI: MemSet),
1313 Start: MSSA->getMemoryAccess(I: MemSet),
1314 End: MSSA->getMemoryAccess(I: MemCpy)))
1315 return false;
1316
1317 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1318 Value *Dest = MemCpy->getRawDest();
1319 Value *DestSize = MemSet->getLength();
1320
1321 if (mayBeVisibleThroughUnwinding(V: Dest, Start: MemSet, End: MemCpy))
1322 return false;
1323
1324 // If the sizes are the same, simply drop the memset instead of generating
1325 // a replacement with zero size.
1326 if (DestSize == SrcSize) {
1327 eraseInstruction(I: MemSet);
1328 return true;
1329 }
1330
1331 // By default, create an unaligned memset.
1332 Align Alignment = Align(1);
1333 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1334 // of the sum.
1335 const Align DestAlign = std::max(a: MemSet->getDestAlign().valueOrOne(),
1336 b: MemCpy->getDestAlign().valueOrOne());
1337 if (DestAlign > 1)
1338 if (auto *SrcSizeC = dyn_cast<ConstantInt>(Val: SrcSize))
1339 Alignment = commonAlignment(A: DestAlign, Offset: SrcSizeC->getZExtValue());
1340
1341 IRBuilder<> Builder(MemCpy);
1342
1343 // Preserve the debug location of the old memset for the code emitted here
1344 // related to the new memset. This is correct according to the rules in
1345 // https://llvm.org/docs/HowToUpdateDebugInfo.html about "when to preserve an
1346 // instruction location", given that we move the memset within the basic
1347 // block.
1348 assert(MemSet->getParent() == MemCpy->getParent() &&
1349 "Preserving debug location based on moving memset within BB.");
1350 Builder.SetCurrentDebugLocation(MemSet->getDebugLoc());
1351
1352 // If the sizes have different types, zext the smaller one.
1353 if (DestSize->getType() != SrcSize->getType()) {
1354 if (DestSize->getType()->getIntegerBitWidth() >
1355 SrcSize->getType()->getIntegerBitWidth())
1356 SrcSize = Builder.CreateZExt(V: SrcSize, DestTy: DestSize->getType());
1357 else
1358 DestSize = Builder.CreateZExt(V: DestSize, DestTy: SrcSize->getType());
1359 }
1360
1361 Value *Ule = Builder.CreateICmpULE(LHS: DestSize, RHS: SrcSize);
1362 Value *SizeDiff = Builder.CreateSub(LHS: DestSize, RHS: SrcSize);
1363 Value *MemsetLen = Builder.CreateSelect(
1364 C: Ule, True: ConstantInt::getNullValue(Ty: DestSize->getType()), False: SizeDiff);
1365 // FIXME (#167968): we could explore estimating the branch_weights based on
1366 // value profiling data about the 2 sizes.
1367 if (auto *SI = dyn_cast<SelectInst>(Val: MemsetLen))
1368 setExplicitlyUnknownBranchWeightsIfProfiled(I&: *SI, DEBUG_TYPE);
1369 Instruction *NewMemSet =
1370 Builder.CreateMemSet(Ptr: Builder.CreatePtrAdd(Ptr: Dest, Offset: SrcSize),
1371 Val: MemSet->getOperand(i_nocapture: 1), Size: MemsetLen, Align: Alignment);
1372
1373 assert(isa<MemoryDef>(MSSA->getMemoryAccess(MemCpy)) &&
1374 "MemCpy must be a MemoryDef");
1375 // The new memset is inserted before the memcpy, and it is known that the
1376 // memcpy's defining access is the memset about to be removed.
1377 auto *LastDef = cast<MemoryDef>(Val: MSSA->getMemoryAccess(I: MemCpy));
1378 auto *NewAccess =
1379 MSSAU->createMemoryAccessBefore(I: NewMemSet, Definition: nullptr, InsertPt: LastDef);
1380 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewAccess), /*RenameUses=*/true);
1381
1382 eraseInstruction(I: MemSet);
1383 return true;
1384}
1385
1386/// Determine whether the pointer V had only undefined content (due to Def),
1387/// either because it was freshly alloca'd or started its lifetime.
1388static bool hasUndefContents(MemorySSA *MSSA, BatchAAResults &AA, Value *V,
1389 MemoryDef *Def) {
1390 if (MSSA->isLiveOnEntryDef(MA: Def))
1391 return isa<AllocaInst>(Val: getUnderlyingObject(V));
1392
1393 if (auto *II = dyn_cast_or_null<IntrinsicInst>(Val: Def->getMemoryInst()))
1394 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1395 if (auto *Alloca = dyn_cast<AllocaInst>(Val: getUnderlyingObject(V)))
1396 return II->getArgOperand(i: 0) == Alloca;
1397
1398 return false;
1399}
1400
1401// If the memcpy is larger than the previous, but the memory was undef prior to
1402// that, we can just ignore the tail. Technically we're only interested in the
1403// bytes from 0..MemSrcOffset and MemSrcLength+MemSrcOffset..CopySize here, but
1404// as we can't easily represent this location (hasUndefContents uses mustAlias
1405// which cannot deal with offsets), we use the full 0..CopySize range.
1406static bool overreadUndefContents(MemorySSA *MSSA, MemCpyInst *MemCpy,
1407 MemIntrinsic *MemSrc, BatchAAResults &BAA) {
1408 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MTI: MemCpy);
1409 MemoryUseOrDef *MemSrcAccess = MSSA->getMemoryAccess(I: MemSrc);
1410 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1411 MemSrcAccess->getDefiningAccess(), MemCpyLoc, AA&: BAA);
1412 if (auto *MD = dyn_cast<MemoryDef>(Val: Clobber))
1413 if (hasUndefContents(MSSA, AA&: BAA, V: MemCpy->getSource(), Def: MD))
1414 return true;
1415 return false;
1416}
1417
1418/// Transform memcpy to memset when its source was just memset.
1419/// In other words, turn:
1420/// \code
1421/// memset(dst1, c, dst1_size);
1422/// memcpy(dst2, dst1, dst2_size);
1423/// \endcode
1424/// into:
1425/// \code
1426/// memset(dst1, c, dst1_size);
1427/// memset(dst2, c, dst2_size);
1428/// \endcode
1429bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1430 MemSetInst *MemSet,
1431 BatchAAResults &BAA) {
1432 Value *MemSetSize = MemSet->getLength();
1433 Value *CopySize = MemCpy->getLength();
1434
1435 int64_t MOffset = 0;
1436 const DataLayout &DL = MemCpy->getModule()->getDataLayout();
1437 // We can only transforms memcpy's where the dest of one is the source of the
1438 // other, or they have a known offset.
1439 if (MemCpy->getSource() != MemSet->getDest()) {
1440 std::optional<int64_t> Offset =
1441 MemCpy->getSource()->getPointerOffsetFrom(Other: MemSet->getDest(), DL);
1442 if (!Offset)
1443 return false;
1444 // On positive offsets, the memcpy source is at a offset into the memset'd
1445 // region. On negative offsets, the copy starts at a offset prior to the
1446 // previously memset'd area, namely, we memcpy from a partially initialized
1447 // region.
1448 MOffset = *Offset;
1449 }
1450
1451 if (MOffset != 0 || MemSetSize != CopySize) {
1452 // Make sure the memcpy doesn't read any more than what the memset wrote,
1453 // other than undef. Likewise, the memcpy should not read from an area not
1454 // covered by the memset unless undef bytes. Don't worry about sizes larger
1455 // than i64.
1456 auto *CMemSetSize = dyn_cast<ConstantInt>(Val: MemSetSize);
1457 auto *CCopySize = dyn_cast<ConstantInt>(Val: CopySize);
1458 if (!CMemSetSize || !CCopySize || MOffset < 0 ||
1459 CCopySize->getZExtValue() + MOffset > CMemSetSize->getZExtValue()) {
1460 if (!overreadUndefContents(MSSA, MemCpy, MemSrc: MemSet, BAA))
1461 return false;
1462
1463 if (CMemSetSize && CCopySize) {
1464 uint64_t MemSetSizeVal = CMemSetSize->getZExtValue();
1465 uint64_t MemCpySizeVal = CCopySize->getZExtValue();
1466 uint64_t NewSize;
1467
1468 if (MOffset < 0) {
1469 // Offset from beginning of the initialized region.
1470 uint64_t Offset = -MOffset;
1471 NewSize = MemCpySizeVal <= Offset ? 0 : MemCpySizeVal - Offset;
1472 } else if (MOffset == 0) {
1473 NewSize = MemSetSizeVal;
1474 } else {
1475 NewSize =
1476 MemSetSizeVal <= (uint64_t)MOffset ? 0 : MemSetSizeVal - MOffset;
1477 }
1478 CopySize = ConstantInt::get(Ty: CopySize->getType(), V: NewSize);
1479 } else {
1480 if (MOffset < 0)
1481 return false;
1482 }
1483 }
1484 }
1485
1486 IRBuilder<> Builder(MemCpy);
1487 Value *DestPtr = MemCpy->getRawDest();
1488 MaybeAlign Align = MemCpy->getDestAlign();
1489 if (MOffset < 0) {
1490 DestPtr = Builder.CreatePtrAdd(Ptr: DestPtr, Offset: Builder.getInt64(C: -MOffset));
1491 if (Align)
1492 Align = commonAlignment(A: *Align, Offset: -MOffset);
1493 }
1494
1495 Instruction *NewM =
1496 Builder.CreateMemSet(Ptr: DestPtr, Val: MemSet->getOperand(i_nocapture: 1), Size: CopySize, Align);
1497 auto *LastDef = cast<MemoryDef>(Val: MSSA->getMemoryAccess(I: MemCpy));
1498 auto *NewAccess = MSSAU->createMemoryAccessAfter(I: NewM, Definition: nullptr, InsertPt: LastDef);
1499 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewAccess), /*RenameUses=*/true);
1500
1501 return true;
1502}
1503
1504// Attempts to optimize the pattern whereby memory is copied from an alloca to
1505// another alloca, where the two allocas don't have conflicting mod/ref. If
1506// successful, the two allocas can be merged into one and the transfer can be
1507// deleted. This pattern is generated frequently in Rust, due to the ubiquity of
1508// move operations in that language.
1509//
1510// Once we determine that the optimization is safe to perform, we replace all
1511// uses of the destination alloca with the source alloca. We also "shrink wrap"
1512// the lifetime markers of the single merged alloca to before the first use
1513// and after the last use. Note that the "shrink wrapping" procedure is a safe
1514// transformation only because we restrict the scope of this optimization to
1515// allocas that aren't captured.
1516bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store,
1517 Value *DestPtr, Value *SrcPtr,
1518 TypeSize Size, BatchAAResults &BAA) {
1519 LLVM_DEBUG(dbgs() << "Stack Move: Attempting to optimize:\n"
1520 << *Store << "\n");
1521
1522 AllocaInst *DestAlloca = dyn_cast<AllocaInst>(Val: getUnderlyingObject(V: DestPtr));
1523 if (!DestAlloca)
1524 return false;
1525
1526 AllocaInst *SrcAlloca = dyn_cast<AllocaInst>(Val: getUnderlyingObject(V: SrcPtr));
1527 if (!SrcAlloca)
1528 return false;
1529
1530 // Explicitly don't handle degenerate case of a partial copy within one
1531 // alloca. It would always fail the dominator check later anyways, and
1532 // possibly the modref checks also.
1533 if (SrcAlloca == DestAlloca)
1534 return false;
1535
1536 // Make sure the two allocas are in the same address space.
1537 if (SrcAlloca->getAddressSpace() != DestAlloca->getAddressSpace()) {
1538 LLVM_DEBUG(dbgs() << "Stack Move: Address space mismatch\n");
1539 return false;
1540 }
1541
1542 if (!SrcAlloca->isStaticAlloca() || !DestAlloca->isStaticAlloca())
1543 return false;
1544
1545 // Check that copy is full with static size.
1546 const DataLayout &DL = DestAlloca->getDataLayout();
1547
1548 auto DestOffset = DestPtr->getPointerOffsetFrom(Other: DestAlloca, DL);
1549 if (!DestOffset)
1550 return false;
1551
1552 auto SrcOffset = SrcPtr->getPointerOffsetFrom(Other: SrcAlloca, DL);
1553 if (!SrcOffset || *SrcOffset < *DestOffset || *SrcOffset < 0)
1554 return false;
1555 // Offset difference must preserve dest alloca's alignment.
1556 if ((*SrcOffset - *DestOffset) % DestAlloca->getAlign().value() != 0)
1557 return false;
1558 std::optional<TypeSize> SrcSize = SrcAlloca->getAllocationSize(DL);
1559 std::optional<TypeSize> DestSize = DestAlloca->getAllocationSize(DL);
1560 if (!SrcSize || !DestSize)
1561 return false;
1562 if (*SrcSize != *DestSize)
1563 if (!SrcSize->isFixed() || !DestSize->isFixed())
1564 return false;
1565 // Check that copy covers entirety of dest alloca.
1566 if (Size != *DestSize || *DestOffset != 0) {
1567 LLVM_DEBUG(dbgs() << "Stack Move: Destination alloca size mismatch\n");
1568 return false;
1569 }
1570
1571 // Check if it will be legal to combine allocas without breaking dominator.
1572 bool MoveSrc = !DT->dominates(Def: SrcAlloca, User: DestAlloca);
1573 if (MoveSrc) {
1574 if (!DT->dominates(Def: DestAlloca, User: SrcAlloca))
1575 return false;
1576 }
1577
1578 // Check that src and dest are never captured, unescaped allocas. Also
1579 // find the nearest common dominator and postdominator for all users in
1580 // order to shrink wrap the lifetimes, and instructions with noalias metadata
1581 // to remove them.
1582
1583 SmallVector<Instruction *, 4> LifetimeMarkers;
1584 SmallPtrSet<Instruction *, 4> AAMetadataInstrs;
1585
1586 auto CaptureTrackingWithModRef =
1587 [&](Instruction *AI, function_ref<bool(Instruction *)> ModRefCallback,
1588 bool &AddressCaptured) -> bool {
1589 SmallVector<Instruction *, 8> Worklist;
1590 Worklist.push_back(Elt: AI);
1591 unsigned MaxUsesToExplore = getDefaultMaxUsesToExploreForCaptureTracking();
1592 Worklist.reserve(N: MaxUsesToExplore);
1593 SmallPtrSet<const Use *, 20> Visited;
1594 while (!Worklist.empty()) {
1595 Instruction *I = Worklist.pop_back_val();
1596 for (const Use &U : I->uses()) {
1597 auto *UI = cast<Instruction>(Val: U.getUser());
1598
1599 if (Visited.size() >= MaxUsesToExplore) {
1600 LLVM_DEBUG(
1601 dbgs()
1602 << "Stack Move: Exceeded max uses to see ModRef, bailing\n");
1603 return false;
1604 }
1605 if (!Visited.insert(Ptr: &U).second)
1606 continue;
1607 UseCaptureInfo CI = DetermineUseCaptureKind(U, Base: AI);
1608 if (capturesAnyProvenance(CC: CI.UseCC))
1609 return false;
1610 AddressCaptured |= capturesAddress(CC: CI.UseCC);
1611
1612 if (UI->mayReadOrWriteMemory()) {
1613 if (UI->isLifetimeStartOrEnd()) {
1614 // We note the locations of these intrinsic calls so that we can
1615 // delete them later if the optimization succeeds, this is safe
1616 // since both llvm.lifetime.start and llvm.lifetime.end intrinsics
1617 // practically fill all the bytes of the alloca with an undefined
1618 // value, although conceptually marked as alive/dead.
1619 LifetimeMarkers.push_back(Elt: UI);
1620 continue;
1621 }
1622 AAMetadataInstrs.insert(Ptr: UI);
1623
1624 if (!ModRefCallback(UI))
1625 return false;
1626 }
1627
1628 if (capturesAnything(CC: CI.ResultCC)) {
1629 Worklist.push_back(Elt: UI);
1630 continue;
1631 }
1632 }
1633 }
1634 return true;
1635 };
1636
1637 // Check that dest alloca has no Mod/Ref, from the alloca to the Store. And
1638 // collect modref inst for the reachability check.
1639 ModRefInfo DestModRef = ModRefInfo::NoModRef;
1640 MemoryLocation DestLoc(DestAlloca, LocationSize::precise(Value: *DestSize));
1641 SmallVector<BasicBlock *, 8> ReachabilityWorklist;
1642 auto DestModRefCallback = [&](Instruction *UI) -> bool {
1643 // We don't care about the store itself.
1644 if (UI == Store)
1645 return true;
1646 ModRefInfo Res = BAA.getModRefInfo(I: UI, OptLoc: DestLoc);
1647 DestModRef |= Res;
1648 if (isModOrRefSet(MRI: Res)) {
1649 // Instructions reachability checks.
1650 // FIXME: adding the Instruction version isPotentiallyReachableFromMany on
1651 // lib/Analysis/CFG.cpp (currently only for BasicBlocks) might be helpful.
1652 if (UI->getParent() == Store->getParent()) {
1653 // The same block case is special because it's the only time we're
1654 // looking within a single block to see which instruction comes first.
1655 // Once we start looking at multiple blocks, the first instruction of
1656 // the block is reachable, so we only need to determine reachability
1657 // between whole blocks.
1658 BasicBlock *BB = UI->getParent();
1659
1660 // If A comes before B, then B is definitively reachable from A.
1661 if (UI->comesBefore(Other: Store))
1662 return false;
1663
1664 // If the user's parent block is entry, no predecessor exists.
1665 if (BB->isEntryBlock())
1666 return true;
1667
1668 // Otherwise, continue doing the normal per-BB CFG walk.
1669 ReachabilityWorklist.append(in_start: succ_begin(BB), in_end: succ_end(BB));
1670 } else {
1671 ReachabilityWorklist.push_back(Elt: UI->getParent());
1672 }
1673 }
1674 return true;
1675 };
1676
1677 bool DestAddressCaptured = false;
1678 if (!CaptureTrackingWithModRef(DestAlloca, DestModRefCallback,
1679 DestAddressCaptured))
1680 return false;
1681 // Bailout if Dest may have any ModRef before Store.
1682 if (!ReachabilityWorklist.empty() &&
1683 isPotentiallyReachableFromMany(Worklist&: ReachabilityWorklist, StopBB: Store->getParent(),
1684 ExclusionSet: nullptr, DT, LI: nullptr))
1685 return false;
1686
1687 // Check that, from after the Load to the end of the BB,
1688 // - if the dest has any Mod, src has no Ref, and
1689 // - if the dest has any Ref, src has no Mod except full-sized lifetimes
1690 // Where:
1691 // - src is defined as the memory from max(SrcAlloca, SrcPtr minus
1692 // dest_offset) to min(dest_size, SrcSize minus SrcOffset)
1693 // - dest_offset and dest_size could be computed by DestModRefCallback
1694 // to be the bounds of the first and last mod region, and which is at
1695 // least as large as DestOffset to DestSize, and at most as large as
1696 // SrcAlloca to SrcSize.
1697 // - Currently DestOffset==0 and DestSize==Size, so this math is simplified.
1698 MemoryLocation SrcLoc(SrcPtr, LocationSize::precise(Value: Size));
1699
1700 auto SrcModRefCallback = [&](Instruction *UI) -> bool {
1701 // Any ModRef post-dominated by Load doesn't matter, also Load and Store
1702 // themselves can be ignored.
1703 if (PDT->dominates(I1: Load, I2: UI) || UI == Load || UI == Store)
1704 return true;
1705 ModRefInfo Res = BAA.getModRefInfo(I: UI, OptLoc: SrcLoc);
1706 if ((isModSet(MRI: DestModRef) && isRefSet(MRI: Res)) ||
1707 (isRefSet(MRI: DestModRef) && isModSet(MRI: Res)))
1708 return false;
1709
1710 return true;
1711 };
1712
1713 bool SrcAddressCaptured = false;
1714 if (!CaptureTrackingWithModRef(SrcAlloca, SrcModRefCallback,
1715 SrcAddressCaptured))
1716 return false;
1717
1718 // If both the source and destination address are captured, the fact that they
1719 // are no longer two separate allocations may be observed.
1720 if (DestAddressCaptured && SrcAddressCaptured)
1721 return false;
1722
1723 // We can now do the transformation. First move the Src if it was after Dest.
1724 if (MoveSrc)
1725 SrcAlloca->moveBefore(InsertPos: DestAlloca->getIterator());
1726
1727 // Align the allocas appropriately.
1728 SrcAlloca->setAlignment(
1729 std::max(a: SrcAlloca->getAlign(), b: DestAlloca->getAlign()));
1730
1731 // Size the allocas appropriately.
1732 if (*SrcSize != *DestSize) {
1733 // Only possible if both sizes are fixed (due to earlier check)
1734 // Set Src to the type and array size of Dest if Dest was larger
1735 if (DestSize->getFixedValue() > SrcSize->getFixedValue()) {
1736 SrcAlloca->setAllocatedType(DestAlloca->getAllocatedType());
1737 SrcAlloca->setOperand(i_nocapture: 0, Val_nocapture: DestAlloca->getArraySize());
1738 }
1739 }
1740
1741 // Merge the two allocas.
1742 Value *NewDestPtr = SrcAlloca;
1743 if (*SrcOffset != *DestOffset) {
1744 IRBuilder<> Builder(DestAlloca);
1745 NewDestPtr = Builder.CreateInBoundsPtrAdd(
1746 Ptr: SrcAlloca, Offset: Builder.getInt64(C: *SrcOffset - *DestOffset));
1747 }
1748 DestAlloca->replaceAllUsesWith(V: NewDestPtr);
1749 eraseInstruction(I: DestAlloca);
1750
1751 // Drop metadata on the source alloca.
1752 SrcAlloca->dropUnknownNonDebugMetadata();
1753
1754 // TODO: Reconstruct merged lifetime markers.
1755 // Remove all other lifetime markers. if the original lifetime intrinsics
1756 // exists.
1757 if (!LifetimeMarkers.empty()) {
1758 for (Instruction *I : LifetimeMarkers)
1759 eraseInstruction(I);
1760 }
1761
1762 // As this transformation can cause memory accesses that didn't previously
1763 // alias to begin to alias one another, we remove !alias.scope, !noalias,
1764 // !tbaa and !tbaa_struct metadata from any uses of either alloca.
1765 // This is conservative, but more precision doesn't seem worthwhile
1766 // right now.
1767 for (Instruction *I : AAMetadataInstrs) {
1768 I->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: nullptr);
1769 I->setMetadata(KindID: LLVMContext::MD_noalias, Node: nullptr);
1770 I->setMetadata(KindID: LLVMContext::MD_tbaa, Node: nullptr);
1771 I->setMetadata(KindID: LLVMContext::MD_tbaa_struct, Node: nullptr);
1772 }
1773
1774 LLVM_DEBUG(dbgs() << "Stack Move: Performed stack-move optimization\n");
1775 NumStackMove++;
1776 return true;
1777}
1778
1779static bool isZeroSize(Value *Size) {
1780 if (auto *I = dyn_cast<Instruction>(Val: Size))
1781 if (auto *Res = simplifyInstruction(I, Q: I->getDataLayout()))
1782 Size = Res;
1783 // Treat undef/poison size like zero.
1784 if (auto *C = dyn_cast<Constant>(Val: Size))
1785 return isa<UndefValue>(Val: C) || C->isNullValue();
1786 return false;
1787}
1788
1789/// Perform simplification of memcpy's. If we have memcpy A
1790/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1791/// B to be a memcpy from X to Z (or potentially a memmove, depending on
1792/// circumstances). This allows later passes to remove the first memcpy
1793/// altogether.
1794bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
1795 // We can only optimize non-volatile memcpy's.
1796 if (M->isVolatile())
1797 return false;
1798
1799 // If the source and destination of the memcpy are the same, then zap it.
1800 if (M->getSource() == M->getDest()) {
1801 ++BBI;
1802 eraseInstruction(I: M);
1803 return true;
1804 }
1805
1806 // If the size is zero, remove the memcpy.
1807 if (isZeroSize(Size: M->getLength())) {
1808 ++BBI;
1809 eraseInstruction(I: M);
1810 return true;
1811 }
1812
1813 MemoryUseOrDef *MA = MSSA->getMemoryAccess(I: M);
1814 if (!MA)
1815 // Degenerate case: memcpy marked as not accessing memory.
1816 return false;
1817
1818 // If copying from a constant, try to turn the memcpy into a memset.
1819 if (auto *GV = dyn_cast<GlobalVariable>(Val: getUnderlyingObject(V: M->getSource())))
1820 if (GV->isConstant() && GV->hasDefinitiveInitializer())
1821 if (Value *ByteVal = isBytewiseValue(V: GV->getInitializer(),
1822 DL: M->getDataLayout())) {
1823 IRBuilder<> Builder(M);
1824 Instruction *NewM = Builder.CreateMemSet(
1825 Ptr: M->getRawDest(), Val: ByteVal, Size: M->getLength(), Align: M->getDestAlign(), isVolatile: false);
1826 auto *LastDef = cast<MemoryDef>(Val: MA);
1827 auto *NewAccess =
1828 MSSAU->createMemoryAccessAfter(I: NewM, Definition: nullptr, InsertPt: LastDef);
1829 MSSAU->insertDef(Def: cast<MemoryDef>(Val: NewAccess), /*RenameUses=*/true);
1830
1831 eraseInstruction(I: M);
1832 ++NumCpyToSet;
1833 return true;
1834 }
1835
1836 BatchAAResults BAA(*AA, EEA);
1837 // FIXME: Not using getClobberingMemoryAccess() here due to PR54682.
1838 MemoryAccess *AnyClobber = MA->getDefiningAccess();
1839 MemoryLocation DestLoc = MemoryLocation::getForDest(MI: M);
1840 const MemoryAccess *DestClobber =
1841 MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc, AA&: BAA);
1842
1843 // Try to turn a partially redundant memset + memcpy into
1844 // smaller memset + memcpy. We don't need the memcpy size for this.
1845 // The memcpy must post-dom the memset, so limit this to the same basic
1846 // block. A non-local generalization is likely not worthwhile.
1847 if (auto *MD = dyn_cast<MemoryDef>(Val: DestClobber))
1848 if (auto *MDep = dyn_cast_or_null<MemSetInst>(Val: MD->getMemoryInst()))
1849 if (DestClobber->getBlock() == M->getParent())
1850 if (processMemSetMemCpyDependence(MemCpy: M, MemSet: MDep, BAA))
1851 return true;
1852
1853 MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess(
1854 AnyClobber, MemoryLocation::getForSource(MTI: M), AA&: BAA);
1855
1856 // There are five possible optimizations we can do for memcpy:
1857 // a) memcpy-memcpy xform which exposes redundance for DSE.
1858 // b) call-memcpy xform for return slot optimization.
1859 // c) memcpy from freshly alloca'd space or space that has just started
1860 // its lifetime copies undefined data, and we can therefore eliminate
1861 // the memcpy in favor of the data that was already at the destination.
1862 // d) memcpy from a just-memset'd source can be turned into memset.
1863 // e) elimination of memcpy via stack-move optimization.
1864 if (auto *MD = dyn_cast<MemoryDef>(Val: SrcClobber)) {
1865 if (Instruction *MI = MD->getMemoryInst()) {
1866 if (auto *CopySize = dyn_cast<ConstantInt>(Val: M->getLength())) {
1867 if (auto *C = dyn_cast<CallInst>(Val: MI)) {
1868 if (performCallSlotOptzn(cpyLoad: M, cpyStore: M, cpyDest: M->getDest(), cpySrc: M->getSource(),
1869 cpySize: TypeSize::getFixed(ExactSize: CopySize->getZExtValue()),
1870 cpyDestAlign: M->getDestAlign().valueOrOne(), BAA,
1871 GetC: [C]() -> CallInst * { return C; })) {
1872 LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"
1873 << " call: " << *C << "\n"
1874 << " memcpy: " << *M << "\n");
1875 eraseInstruction(I: M);
1876 ++NumMemCpyInstr;
1877 return true;
1878 }
1879 }
1880 }
1881 if (auto *MDep = dyn_cast<MemCpyInst>(Val: MI))
1882 if (processMemCpyMemCpyDependence(M, MDep, BAA))
1883 return true;
1884 if (auto *MDep = dyn_cast<MemSetInst>(Val: MI)) {
1885 if (performMemCpyToMemSetOptzn(MemCpy: M, MemSet: MDep, BAA)) {
1886 LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n");
1887 eraseInstruction(I: M);
1888 ++NumCpyToSet;
1889 return true;
1890 }
1891 }
1892 }
1893
1894 if (hasUndefContents(MSSA, AA&: BAA, V: M->getSource(), Def: MD)) {
1895 LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n");
1896 eraseInstruction(I: M);
1897 ++NumMemCpyInstr;
1898 return true;
1899 }
1900 }
1901
1902 // If the transfer is from a stack slot to a stack slot, then we may be able
1903 // to perform the stack-move optimization. See the comments in
1904 // performStackMoveOptzn() for more details.
1905 ConstantInt *Len = dyn_cast<ConstantInt>(Val: M->getLength());
1906 if (Len == nullptr)
1907 return false;
1908 if (performStackMoveOptzn(Load: M, Store: M, DestPtr: M->getDest(), SrcPtr: M->getSource(),
1909 Size: TypeSize::getFixed(ExactSize: Len->getZExtValue()), BAA)) {
1910 // Avoid invalidating the iterator.
1911 BBI = M->getNextNode()->getIterator();
1912 eraseInstruction(I: M);
1913 ++NumMemCpyInstr;
1914 return true;
1915 }
1916
1917 return false;
1918}
1919
1920/// Memmove calls with overlapping src/dest buffers that come after a memset may
1921/// be removed.
1922bool MemCpyOptPass::isMemMoveMemSetDependency(MemMoveInst *M) {
1923 const auto &DL = M->getDataLayout();
1924 MemoryUseOrDef *MemMoveAccess = MSSA->getMemoryAccess(I: M);
1925 if (!MemMoveAccess)
1926 return false;
1927
1928 // The memmove is of form memmove(x, x + A, B).
1929 MemoryLocation SourceLoc = MemoryLocation::getForSource(MTI: M);
1930 auto *MemMoveSourceOp = M->getSource();
1931 auto *Source = dyn_cast<GEPOperator>(Val: MemMoveSourceOp);
1932 if (!Source)
1933 return false;
1934
1935 APInt Offset(DL.getIndexTypeSizeInBits(Ty: Source->getType()), 0);
1936 LocationSize MemMoveLocSize = SourceLoc.Size;
1937 if (Source->getPointerOperand() != M->getDest() ||
1938 !MemMoveLocSize.hasValue() ||
1939 !Source->accumulateConstantOffset(DL, Offset) || Offset.isNegative()) {
1940 return false;
1941 }
1942
1943 uint64_t MemMoveSize = MemMoveLocSize.getValue();
1944 LocationSize TotalSize =
1945 LocationSize::precise(Value: Offset.getZExtValue() + MemMoveSize);
1946 MemoryLocation CombinedLoc(M->getDest(), TotalSize);
1947
1948 // The first dominating clobbering MemoryAccess for the combined location
1949 // needs to be a memset.
1950 BatchAAResults BAA(*AA);
1951 MemoryAccess *FirstDef = MemMoveAccess->getDefiningAccess();
1952 auto *DestClobber = dyn_cast<MemoryDef>(
1953 Val: MSSA->getWalker()->getClobberingMemoryAccess(FirstDef, CombinedLoc, AA&: BAA));
1954 if (!DestClobber)
1955 return false;
1956
1957 auto *MS = dyn_cast_or_null<MemSetInst>(Val: DestClobber->getMemoryInst());
1958 if (!MS)
1959 return false;
1960
1961 // Memset length must be sufficiently large.
1962 auto *MemSetLength = dyn_cast<ConstantInt>(Val: MS->getLength());
1963 if (!MemSetLength || MemSetLength->getZExtValue() < MemMoveSize)
1964 return false;
1965
1966 // The destination buffer must have been memset'd.
1967 if (!BAA.isMustAlias(V1: MS->getDest(), V2: M->getDest()))
1968 return false;
1969
1970 return true;
1971}
1972
1973/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1974/// not to alias.
1975bool MemCpyOptPass::processMemMove(MemMoveInst *M, BasicBlock::iterator &BBI) {
1976 // See if the source could be modified by this memmove potentially.
1977 if (isModSet(MRI: AA->getModRefInfo(I: M, OptLoc: MemoryLocation::getForSource(MTI: M)))) {
1978 // On the off-chance the memmove clobbers src with previously memset'd
1979 // bytes, the memmove may be redundant.
1980 if (!M->isVolatile() && isMemMoveMemSetDependency(M)) {
1981 LLVM_DEBUG(dbgs() << "Removed redundant memmove.\n");
1982 ++BBI;
1983 eraseInstruction(I: M);
1984 ++NumMemMoveInstr;
1985 return true;
1986 }
1987 return false;
1988 }
1989
1990 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1991 << "\n");
1992
1993 // If not, then we know we can transform this.
1994 Type *ArgTys[3] = {M->getRawDest()->getType(), M->getRawSource()->getType(),
1995 M->getLength()->getType()};
1996 M->setCalledFunction(Intrinsic::getOrInsertDeclaration(
1997 M: M->getModule(), id: Intrinsic::memcpy, Tys: ArgTys));
1998
1999 // For MemorySSA nothing really changes (except that memcpy may imply stricter
2000 // aliasing guarantees).
2001
2002 ++NumMoveToCpy;
2003 return true;
2004}
2005
2006/// This is called on every byval argument in call sites.
2007bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
2008 const DataLayout &DL = CB.getDataLayout();
2009 // Find out what feeds this byval argument.
2010 Value *ByValArg = CB.getArgOperand(i: ArgNo);
2011 Type *ByValTy = CB.getParamByValType(ArgNo);
2012 TypeSize ByValSize = DL.getTypeAllocSize(Ty: ByValTy);
2013 MemoryLocation Loc(ByValArg, LocationSize::precise(Value: ByValSize));
2014 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(I: &CB);
2015 if (!CallAccess)
2016 return false;
2017 MemCpyInst *MDep = nullptr;
2018 BatchAAResults BAA(*AA, EEA);
2019 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
2020 CallAccess->getDefiningAccess(), Loc, AA&: BAA);
2021 if (auto *MD = dyn_cast<MemoryDef>(Val: Clobber))
2022 MDep = dyn_cast_or_null<MemCpyInst>(Val: MD->getMemoryInst());
2023
2024 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
2025 // a memcpy, see if we can byval from the source of the memcpy instead of the
2026 // result.
2027 if (!MDep || MDep->isVolatile() ||
2028 ByValArg->stripPointerCasts() != MDep->getDest())
2029 return false;
2030
2031 // The length of the memcpy must be larger or equal to the size of the byval.
2032 auto *C1 = dyn_cast<ConstantInt>(Val: MDep->getLength());
2033 if (!C1 || !TypeSize::isKnownGE(
2034 LHS: TypeSize::getFixed(ExactSize: C1->getValue().getZExtValue()), RHS: ByValSize))
2035 return false;
2036
2037 // Get the alignment of the byval. If the call doesn't specify the alignment,
2038 // then it is some target specific value that we can't know.
2039 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
2040 if (!ByValAlign)
2041 return false;
2042
2043 // If it is greater than the memcpy, then we check to see if we can force the
2044 // source of the memcpy to the alignment we need. If we fail, we bail out.
2045 MaybeAlign MemDepAlign = MDep->getSourceAlign();
2046 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) &&
2047 getOrEnforceKnownAlignment(V: MDep->getSource(), PrefAlign: ByValAlign, DL, CxtI: &CB, AC,
2048 DT) < *ByValAlign)
2049 return false;
2050
2051 // The type of the memcpy source must match the byval argument
2052 if (MDep->getSource()->getType() != ByValArg->getType())
2053 return false;
2054
2055 // Verify that the copied-from memory doesn't change in between the memcpy and
2056 // the byval call.
2057 // memcpy(a <- b)
2058 // *b = 42;
2059 // foo(*a)
2060 // It would be invalid to transform the second memcpy into foo(*b).
2061 if (writtenBetween(MSSA, AA&: BAA, Loc: MemoryLocation::getForSource(MTI: MDep),
2062 Start: MSSA->getMemoryAccess(I: MDep), End: CallAccess))
2063 return false;
2064
2065 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
2066 << " " << *MDep << "\n"
2067 << " " << CB << "\n");
2068
2069 // Otherwise we're good! Update the byval argument.
2070 combineAAMetadata(K: &CB, J: MDep);
2071 CB.setArgOperand(i: ArgNo, v: MDep->getSource());
2072 ++NumMemCpyInstr;
2073 return true;
2074}
2075
2076/// This is called on memcpy dest pointer arguments attributed as immutable
2077/// during call. Try to use memcpy source directly if all of the following
2078/// conditions are satisfied.
2079/// 1. The memcpy dst is neither modified during the call nor captured by the
2080/// call.
2081/// 2. The memcpy dst is an alloca with known alignment & size.
2082/// 2-1. The memcpy length == the alloca size which ensures that the new
2083/// pointer is dereferenceable for the required range
2084/// 2-2. The src pointer has alignment >= the alloca alignment or can be
2085/// enforced so.
2086/// 3. The memcpy dst and src is not modified between the memcpy and the call.
2087/// (if MSSA clobber check is safe.)
2088/// 4. The memcpy src is not modified during the call. (ModRef check shows no
2089/// Mod.)
2090bool MemCpyOptPass::processImmutArgument(CallBase &CB, unsigned ArgNo) {
2091 BatchAAResults BAA(*AA, EEA);
2092 Value *ImmutArg = CB.getArgOperand(i: ArgNo);
2093
2094 // 1. Ensure passed argument is immutable during call.
2095 if (!CB.doesNotCapture(OpNo: ArgNo))
2096 return false;
2097
2098 // We know that the argument is readonly at this point, but the function
2099 // might still modify the same memory through a different pointer. Exclude
2100 // this either via noalias, or alias analysis.
2101 if (!CB.paramHasAttr(ArgNo, Kind: Attribute::NoAlias) &&
2102 isModSet(
2103 MRI: BAA.getModRefInfo(I: &CB, OptLoc: MemoryLocation::getBeforeOrAfter(Ptr: ImmutArg))))
2104 return false;
2105
2106 const DataLayout &DL = CB.getDataLayout();
2107
2108 // 2. Check that arg is alloca
2109 // TODO: Even if the arg gets back to branches, we can remove memcpy if all
2110 // the alloca alignments can be enforced to source alignment.
2111 auto *AI = dyn_cast<AllocaInst>(Val: ImmutArg->stripPointerCasts());
2112 if (!AI)
2113 return false;
2114
2115 std::optional<TypeSize> AllocaSize = AI->getAllocationSize(DL);
2116 // Can't handle unknown size alloca.
2117 // (e.g. Variable Length Array, Scalable Vector)
2118 if (!AllocaSize || AllocaSize->isScalable())
2119 return false;
2120 MemoryLocation Loc(ImmutArg, LocationSize::precise(Value: *AllocaSize));
2121 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(I: &CB);
2122 if (!CallAccess)
2123 return false;
2124
2125 MemCpyInst *MDep = nullptr;
2126 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
2127 CallAccess->getDefiningAccess(), Loc, AA&: BAA);
2128 if (auto *MD = dyn_cast<MemoryDef>(Val: Clobber))
2129 MDep = dyn_cast_or_null<MemCpyInst>(Val: MD->getMemoryInst());
2130
2131 // If the immut argument isn't fed by a memcpy, ignore it. If it is fed by
2132 // a memcpy, check that the arg equals the memcpy dest.
2133 if (!MDep || MDep->isVolatile() || AI != MDep->getDest())
2134 return false;
2135
2136 // The type of the memcpy source must match the immut argument
2137 if (MDep->getSource()->getType() != ImmutArg->getType())
2138 return false;
2139
2140 // 2-1. The length of the memcpy must be equal to the size of the alloca.
2141 auto *MDepLen = dyn_cast<ConstantInt>(Val: MDep->getLength());
2142 if (!MDepLen || AllocaSize != MDepLen->getValue())
2143 return false;
2144
2145 // 2-2. the memcpy source align must be larger than or equal the alloca's
2146 // align. If not so, we check to see if we can force the source of the memcpy
2147 // to the alignment we need. If we fail, we bail out.
2148 Align MemDepAlign = MDep->getSourceAlign().valueOrOne();
2149 Align AllocaAlign = AI->getAlign();
2150 if (MemDepAlign < AllocaAlign &&
2151 getOrEnforceKnownAlignment(V: MDep->getSource(), PrefAlign: AllocaAlign, DL, CxtI: &CB, AC,
2152 DT) < AllocaAlign)
2153 return false;
2154
2155 // 3. Verify that the source doesn't change in between the memcpy and
2156 // the call.
2157 // memcpy(a <- b)
2158 // *b = 42;
2159 // foo(*a)
2160 // It would be invalid to transform the second memcpy into foo(*b).
2161 if (writtenBetween(MSSA, AA&: BAA, Loc: MemoryLocation::getForSource(MTI: MDep),
2162 Start: MSSA->getMemoryAccess(I: MDep), End: CallAccess))
2163 return false;
2164
2165 // 4. The memcpy src must not be modified during the call.
2166 if (isModSet(MRI: BAA.getModRefInfo(I: &CB, OptLoc: MemoryLocation::getForSource(MTI: MDep))))
2167 return false;
2168
2169 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to Immut src:\n"
2170 << " " << *MDep << "\n"
2171 << " " << CB << "\n");
2172
2173 // Otherwise we're good! Update the immut argument.
2174 combineAAMetadata(K: &CB, J: MDep);
2175 CB.setArgOperand(i: ArgNo, v: MDep->getSource());
2176 ++NumMemCpyInstr;
2177 return true;
2178}
2179
2180/// Executes one iteration of MemCpyOptPass.
2181bool MemCpyOptPass::iterateOnFunction(Function &F) {
2182 bool MadeChange = false;
2183
2184 // Walk all instruction in the function.
2185 for (BasicBlock &BB : F) {
2186 // Skip unreachable blocks. For example processStore assumes that an
2187 // instruction in a BB can't be dominated by a later instruction in the
2188 // same BB (which is a scenario that can happen for an unreachable BB that
2189 // has itself as a predecessor).
2190 if (!DT->isReachableFromEntry(A: &BB))
2191 continue;
2192
2193 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
2194 // Avoid invalidating the iterator.
2195 Instruction *I = &*BI++;
2196
2197 bool RepeatInstruction = false;
2198
2199 if (auto *SI = dyn_cast<StoreInst>(Val: I))
2200 MadeChange |= processStore(SI, BBI&: BI);
2201 else if (auto *M = dyn_cast<MemSetInst>(Val: I))
2202 RepeatInstruction = processMemSet(MSI: M, BBI&: BI);
2203 else if (auto *M = dyn_cast<MemCpyInst>(Val: I))
2204 RepeatInstruction = processMemCpy(M, BBI&: BI);
2205 else if (auto *M = dyn_cast<MemMoveInst>(Val: I))
2206 RepeatInstruction = processMemMove(M, BBI&: BI);
2207 else if (auto *CB = dyn_cast<CallBase>(Val: I)) {
2208 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) {
2209 if (CB->isByValArgument(ArgNo: i))
2210 MadeChange |= processByValArgument(CB&: *CB, ArgNo: i);
2211 else if (CB->onlyReadsMemory(OpNo: i))
2212 MadeChange |= processImmutArgument(CB&: *CB, ArgNo: i);
2213 }
2214 }
2215
2216 // Reprocess the instruction if desired.
2217 if (RepeatInstruction) {
2218 if (BI != BB.begin())
2219 --BI;
2220 MadeChange = true;
2221 }
2222 }
2223 }
2224
2225 return MadeChange;
2226}
2227
2228PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
2229 auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F);
2230 auto *AA = &AM.getResult<AAManager>(IR&: F);
2231 auto *AC = &AM.getResult<AssumptionAnalysis>(IR&: F);
2232 auto *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F);
2233 auto *PDT = &AM.getResult<PostDominatorTreeAnalysis>(IR&: F);
2234 auto *MSSA = &AM.getResult<MemorySSAAnalysis>(IR&: F);
2235
2236 bool MadeChange = runImpl(F, TLI: &TLI, AA, AC, DT, PDT, MSSA: &MSSA->getMSSA());
2237 if (!MadeChange)
2238 return PreservedAnalyses::all();
2239
2240 PreservedAnalyses PA;
2241 PA.preserveSet<CFGAnalyses>();
2242 PA.preserve<MemorySSAAnalysis>();
2243 return PA;
2244}
2245
2246bool MemCpyOptPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
2247 AliasAnalysis *AA_, AssumptionCache *AC_,
2248 DominatorTree *DT_, PostDominatorTree *PDT_,
2249 MemorySSA *MSSA_) {
2250 bool MadeChange = false;
2251 TLI = TLI_;
2252 AA = AA_;
2253 AC = AC_;
2254 DT = DT_;
2255 PDT = PDT_;
2256 MSSA = MSSA_;
2257 MemorySSAUpdater MSSAU_(MSSA_);
2258 MSSAU = &MSSAU_;
2259 EarliestEscapeAnalysis EEA_(*DT);
2260 EEA = &EEA_;
2261
2262 while (true) {
2263 if (!iterateOnFunction(F))
2264 break;
2265 MadeChange = true;
2266 }
2267
2268 if (VerifyMemorySSA)
2269 MSSA_->verifyMemorySSA();
2270
2271 return MadeChange;
2272}
2273