1//===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass tries to expand memcmp() calls into optimally-sized loads and
10// compares for the target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/ExpandMemCmp.h"
15#include "llvm/ADT/Statistic.h"
16#include "llvm/Analysis/ConstantFolding.h"
17#include "llvm/Analysis/DomTreeUpdater.h"
18#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
19#include "llvm/Analysis/ProfileSummaryInfo.h"
20#include "llvm/Analysis/TargetLibraryInfo.h"
21#include "llvm/Analysis/TargetTransformInfo.h"
22#include "llvm/Analysis/ValueTracking.h"
23#include "llvm/IR/Dominators.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/PatternMatch.h"
26#include "llvm/IR/ProfDataUtils.h"
27#include "llvm/Transforms/Utils/BasicBlockUtils.h"
28#include "llvm/Transforms/Utils/Local.h"
29#include "llvm/Transforms/Utils/SizeOpts.h"
30#include <optional>
31
32using namespace llvm;
33using namespace llvm::PatternMatch;
34
35#define DEBUG_TYPE "expand-memcmp"
36
37STATISTIC(NumMemCmpCalls, "Number of memcmp calls");
38STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size");
39STATISTIC(NumMemCmpGreaterThanMax,
40 "Number of memcmp calls with size greater than max size");
41STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls");
42
43static cl::opt<unsigned> MemCmpEqZeroNumLoadsPerBlock(
44 "memcmp-num-loads-per-block", cl::Hidden, cl::init(Val: 1),
45 cl::desc("The number of loads per basic block for inline expansion of "
46 "memcmp that is only being compared against zero."));
47
48static cl::opt<unsigned> MaxLoadsPerMemcmp(
49 "max-loads-per-memcmp", cl::Hidden,
50 cl::desc("Set maximum number of loads used in expanded memcmp"));
51
52static cl::opt<unsigned> MaxLoadsPerMemcmpOptSize(
53 "max-loads-per-memcmp-opt-size", cl::Hidden,
54 cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
55
56namespace {
57
58
59// This class provides helper functions to expand a memcmp library call into an
60// inline expansion.
61class MemCmpExpansion {
62 struct ResultBlock {
63 BasicBlock *BB = nullptr;
64 PHINode *PhiSrc1 = nullptr;
65 PHINode *PhiSrc2 = nullptr;
66
67 ResultBlock() = default;
68 };
69
70 CallInst *const CI = nullptr;
71 ResultBlock ResBlock;
72 const uint64_t Size;
73 unsigned MaxLoadSize = 0;
74 uint64_t NumLoadsNonOneByte = 0;
75 const uint64_t NumLoadsPerBlockForZeroCmp;
76 std::vector<BasicBlock *> LoadCmpBlocks;
77 BasicBlock *EndBlock = nullptr;
78 PHINode *PhiRes = nullptr;
79 const bool IsUsedForZeroCmp;
80 const DataLayout &DL;
81 DomTreeUpdater *DTU = nullptr;
82 IRBuilder<> Builder;
83 // Represents the decomposition in blocks of the expansion. For example,
84 // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
85 // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {1, 32}.
86 struct LoadEntry {
87 LoadEntry(unsigned LoadSize, uint64_t Offset)
88 : LoadSize(LoadSize), Offset(Offset) {
89 }
90
91 // The size of the load for this block, in bytes.
92 unsigned LoadSize;
93 // The offset of this load from the base pointer, in bytes.
94 uint64_t Offset;
95 };
96 using LoadEntryVector = SmallVector<LoadEntry, 8>;
97 LoadEntryVector LoadSequence;
98
99 void createLoadCmpBlocks();
100 void createResultBlock();
101 void setupResultBlockPHINodes();
102 void setupEndBlockPHINodes();
103 Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex);
104 void emitLoadCompareBlock(unsigned BlockIndex);
105 void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
106 unsigned &LoadIndex);
107 void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
108 void emitMemCmpResultBlock();
109 Value *getMemCmpExpansionZeroCase();
110 Value *getMemCmpEqZeroOneBlock();
111 Value *getMemCmpOneBlock();
112 struct LoadPair {
113 Value *Lhs = nullptr;
114 Value *Rhs = nullptr;
115 };
116 LoadPair getLoadPair(Type *LoadSizeType, Type *BSwapSizeType,
117 Type *CmpSizeType, unsigned OffsetBytes);
118
119 static LoadEntryVector
120 computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
121 unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
122 static LoadEntryVector
123 computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
124 unsigned MaxNumLoads,
125 unsigned &NumLoadsNonOneByte);
126
127 static void optimiseLoadSequence(
128 LoadEntryVector &LoadSequence,
129 const TargetTransformInfo::MemCmpExpansionOptions &Options,
130 bool IsUsedForZeroCmp);
131
132public:
133 MemCmpExpansion(CallInst *CI, uint64_t Size,
134 const TargetTransformInfo::MemCmpExpansionOptions &Options,
135 const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
136 DomTreeUpdater *DTU);
137
138 unsigned getNumBlocks();
139 uint64_t getNumLoads() const { return LoadSequence.size(); }
140
141 Value *getMemCmpExpansion();
142};
143
144MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
145 uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
146 const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
147 NumLoadsNonOneByte = 0;
148 LoadEntryVector LoadSequence;
149 uint64_t Offset = 0;
150 while (Size && !LoadSizes.empty()) {
151 const unsigned LoadSize = LoadSizes.front();
152 const uint64_t NumLoadsForThisSize = Size / LoadSize;
153 if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
154 // Do not expand if the total number of loads is larger than what the
155 // target allows. Note that it's important that we exit before completing
156 // the expansion to avoid using a ton of memory to store the expansion for
157 // large sizes.
158 return {};
159 }
160 if (NumLoadsForThisSize > 0) {
161 for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
162 LoadSequence.push_back(Elt: {LoadSize, Offset});
163 Offset += LoadSize;
164 }
165 if (LoadSize > 1)
166 ++NumLoadsNonOneByte;
167 Size = Size % LoadSize;
168 }
169 LoadSizes = LoadSizes.drop_front();
170 }
171 return LoadSequence;
172}
173
174MemCmpExpansion::LoadEntryVector
175MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
176 const unsigned MaxLoadSize,
177 const unsigned MaxNumLoads,
178 unsigned &NumLoadsNonOneByte) {
179 // These are already handled by the greedy approach.
180 if (Size < 2 || MaxLoadSize < 2)
181 return {};
182
183 // We try to do as many non-overlapping loads as possible starting from the
184 // beginning.
185 const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
186 assert(NumNonOverlappingLoads && "there must be at least one load");
187 // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
188 // an overlapping load.
189 Size = Size - NumNonOverlappingLoads * MaxLoadSize;
190 // Bail if we do not need an overloapping store, this is already handled by
191 // the greedy approach.
192 if (Size == 0)
193 return {};
194 // Bail if the number of loads (non-overlapping + potential overlapping one)
195 // is larger than the max allowed.
196 if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
197 return {};
198
199 // Add non-overlapping loads.
200 LoadEntryVector LoadSequence;
201 uint64_t Offset = 0;
202 for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
203 LoadSequence.push_back(Elt: {MaxLoadSize, Offset});
204 Offset += MaxLoadSize;
205 }
206
207 // Add the last overlapping load.
208 assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
209 LoadSequence.push_back(Elt: {MaxLoadSize, Offset - (MaxLoadSize - Size)});
210 NumLoadsNonOneByte = 1;
211 return LoadSequence;
212}
213
214void MemCmpExpansion::optimiseLoadSequence(
215 LoadEntryVector &LoadSequence,
216 const TargetTransformInfo::MemCmpExpansionOptions &Options,
217 bool IsUsedForZeroCmp) {
218 // This part of code attempts to optimize the LoadSequence by merging allowed
219 // subsequences into single loads of allowed sizes from
220 // `MemCmpExpansionOptions::AllowedTailExpansions`. If it is for zero
221 // comparison or if no allowed tail expansions are specified, we exit early.
222 if (IsUsedForZeroCmp || Options.AllowedTailExpansions.empty())
223 return;
224
225 while (LoadSequence.size() >= 2) {
226 auto Last = LoadSequence[LoadSequence.size() - 1];
227 auto PreLast = LoadSequence[LoadSequence.size() - 2];
228
229 // Exit the loop if the two sequences are not contiguous
230 if (PreLast.Offset + PreLast.LoadSize != Last.Offset)
231 break;
232
233 auto LoadSize = Last.LoadSize + PreLast.LoadSize;
234 if (find(Range: Options.AllowedTailExpansions, Val: LoadSize) ==
235 Options.AllowedTailExpansions.end())
236 break;
237
238 // Remove the last two sequences and replace with the combined sequence
239 LoadSequence.pop_back();
240 LoadSequence.pop_back();
241 LoadSequence.emplace_back(Args&: PreLast.Offset, Args&: LoadSize);
242 }
243}
244
245// Initialize the basic block structure required for expansion of memcmp call
246// with given maximum load size and memcmp size parameter.
247// This structure includes:
248// 1. A list of load compare blocks - LoadCmpBlocks.
249// 2. An EndBlock, split from original instruction point, which is the block to
250// return from.
251// 3. ResultBlock, block to branch to for early exit when a
252// LoadCmpBlock finds a difference.
253MemCmpExpansion::MemCmpExpansion(
254 CallInst *const CI, uint64_t Size,
255 const TargetTransformInfo::MemCmpExpansionOptions &Options,
256 const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
257 DomTreeUpdater *DTU)
258 : CI(CI), Size(Size), NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
259 IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), DTU(DTU),
260 Builder(CI) {
261 assert(Size > 0 && "zero blocks");
262 // Scale the max size down if the target can load more bytes than we need.
263 llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
264 while (!LoadSizes.empty() && LoadSizes.front() > Size) {
265 LoadSizes = LoadSizes.drop_front();
266 }
267 assert(!LoadSizes.empty() && "cannot load Size bytes");
268 MaxLoadSize = LoadSizes.front();
269 // Compute the decomposition.
270 unsigned GreedyNumLoadsNonOneByte = 0;
271 LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, MaxNumLoads: Options.MaxNumLoads,
272 NumLoadsNonOneByte&: GreedyNumLoadsNonOneByte);
273 NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
274 assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
275 // If we allow overlapping loads and the load sequence is not already optimal,
276 // use overlapping loads.
277 if (Options.AllowOverlappingLoads &&
278 (LoadSequence.empty() || LoadSequence.size() > 2)) {
279 unsigned OverlappingNumLoadsNonOneByte = 0;
280 auto OverlappingLoads = computeOverlappingLoadSequence(
281 Size, MaxLoadSize, MaxNumLoads: Options.MaxNumLoads, NumLoadsNonOneByte&: OverlappingNumLoadsNonOneByte);
282 if (!OverlappingLoads.empty() &&
283 (LoadSequence.empty() ||
284 OverlappingLoads.size() < LoadSequence.size())) {
285 LoadSequence = OverlappingLoads;
286 NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
287 }
288 }
289 assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
290 optimiseLoadSequence(LoadSequence, Options, IsUsedForZeroCmp);
291}
292
293unsigned MemCmpExpansion::getNumBlocks() {
294 if (IsUsedForZeroCmp)
295 return getNumLoads() / NumLoadsPerBlockForZeroCmp +
296 (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
297 return getNumLoads();
298}
299
300void MemCmpExpansion::createLoadCmpBlocks() {
301 for (unsigned i = 0; i < getNumBlocks(); i++) {
302 BasicBlock *BB = BasicBlock::Create(Context&: CI->getContext(), Name: "loadbb",
303 Parent: EndBlock->getParent(), InsertBefore: EndBlock);
304 LoadCmpBlocks.push_back(x: BB);
305 }
306}
307
308void MemCmpExpansion::createResultBlock() {
309 ResBlock.BB = BasicBlock::Create(Context&: CI->getContext(), Name: "res_block",
310 Parent: EndBlock->getParent(), InsertBefore: EndBlock);
311}
312
313MemCmpExpansion::LoadPair MemCmpExpansion::getLoadPair(Type *LoadSizeType,
314 Type *BSwapSizeType,
315 Type *CmpSizeType,
316 unsigned OffsetBytes) {
317 // Get the memory source at offset `OffsetBytes`.
318 Value *LhsSource = CI->getArgOperand(i: 0);
319 Value *RhsSource = CI->getArgOperand(i: 1);
320 Align LhsAlign = LhsSource->getPointerAlignment(DL);
321 Align RhsAlign = RhsSource->getPointerAlignment(DL);
322 if (OffsetBytes > 0) {
323 auto *ByteType = Type::getInt8Ty(C&: CI->getContext());
324 LhsSource = Builder.CreateConstGEP1_64(Ty: ByteType, Ptr: LhsSource, Idx0: OffsetBytes);
325 RhsSource = Builder.CreateConstGEP1_64(Ty: ByteType, Ptr: RhsSource, Idx0: OffsetBytes);
326 LhsAlign = commonAlignment(A: LhsAlign, Offset: OffsetBytes);
327 RhsAlign = commonAlignment(A: RhsAlign, Offset: OffsetBytes);
328 }
329
330 // Create a constant or a load from the source.
331 Value *Lhs = nullptr;
332 if (auto *C = dyn_cast<Constant>(Val: LhsSource))
333 Lhs = ConstantFoldLoadFromConstPtr(C, Ty: LoadSizeType, DL);
334 if (!Lhs)
335 Lhs = Builder.CreateAlignedLoad(Ty: LoadSizeType, Ptr: LhsSource, Align: LhsAlign);
336
337 Value *Rhs = nullptr;
338 if (auto *C = dyn_cast<Constant>(Val: RhsSource))
339 Rhs = ConstantFoldLoadFromConstPtr(C, Ty: LoadSizeType, DL);
340 if (!Rhs)
341 Rhs = Builder.CreateAlignedLoad(Ty: LoadSizeType, Ptr: RhsSource, Align: RhsAlign);
342
343 // Zero extend if Byte Swap intrinsic has different type
344 if (BSwapSizeType && LoadSizeType != BSwapSizeType) {
345 Lhs = Builder.CreateZExt(V: Lhs, DestTy: BSwapSizeType);
346 Rhs = Builder.CreateZExt(V: Rhs, DestTy: BSwapSizeType);
347 }
348
349 // Swap bytes if required.
350 if (BSwapSizeType) {
351 Function *Bswap = Intrinsic::getOrInsertDeclaration(
352 M: CI->getModule(), id: Intrinsic::bswap, OverloadTys: BSwapSizeType);
353 Lhs = Builder.CreateCall(Callee: Bswap, Args: Lhs);
354 Rhs = Builder.CreateCall(Callee: Bswap, Args: Rhs);
355 }
356
357 // Zero extend if required.
358 if (CmpSizeType != nullptr && CmpSizeType != Lhs->getType()) {
359 Lhs = Builder.CreateZExt(V: Lhs, DestTy: CmpSizeType);
360 Rhs = Builder.CreateZExt(V: Rhs, DestTy: CmpSizeType);
361 }
362 return {.Lhs: Lhs, .Rhs: Rhs};
363}
364
365// This function creates the IR instructions for loading and comparing 1 byte.
366// It loads 1 byte from each source of the memcmp parameters with the given
367// GEPIndex. It then subtracts the two loaded values and adds this result to the
368// final phi node for selecting the memcmp result.
369void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
370 unsigned OffsetBytes) {
371 BasicBlock *BB = LoadCmpBlocks[BlockIndex];
372 Builder.SetInsertPoint(BB);
373 const LoadPair Loads =
374 getLoadPair(LoadSizeType: Type::getInt8Ty(C&: CI->getContext()), BSwapSizeType: nullptr,
375 CmpSizeType: Type::getInt32Ty(C&: CI->getContext()), OffsetBytes);
376 Value *Diff = Builder.CreateSub(LHS: Loads.Lhs, RHS: Loads.Rhs);
377
378 PhiRes->addIncoming(V: Diff, BB);
379
380 if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
381 // Early exit branch if difference found to EndBlock. Otherwise, continue to
382 // next LoadCmpBlock,
383 Value *Cmp = Builder.CreateICmp(P: ICmpInst::ICMP_NE, LHS: Diff,
384 RHS: ConstantInt::get(Ty: Diff->getType(), V: 0));
385 Builder.CreateCondBr(Cond: Cmp, True: EndBlock, False: LoadCmpBlocks[BlockIndex + 1]);
386 if (DTU)
387 DTU->applyUpdates(
388 Updates: {{DominatorTree::Insert, BB, EndBlock},
389 {DominatorTree::Insert, BB, LoadCmpBlocks[BlockIndex + 1]}});
390 } else {
391 // The last block has an unconditional branch to EndBlock.
392 Builder.CreateBr(Dest: EndBlock);
393 if (DTU)
394 DTU->applyUpdates(Updates: {{DominatorTree::Insert, BB, EndBlock}});
395 }
396}
397
398/// Generate an equality comparison for one or more pairs of loaded values.
399/// This is used in the case where the memcmp() call is compared equal or not
400/// equal to zero.
401Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex,
402 unsigned &LoadIndex) {
403 assert(LoadIndex < getNumLoads() &&
404 "getCompareLoadPairs() called with no remaining loads");
405 std::vector<Value *> XorList, OrList;
406 Value *Diff = nullptr;
407
408 const unsigned NumLoads =
409 std::min(a: getNumLoads() - LoadIndex, b: NumLoadsPerBlockForZeroCmp);
410
411 // For a single-block expansion, start inserting before the memcmp call.
412 if (LoadCmpBlocks.empty())
413 Builder.SetInsertPoint(CI);
414 else
415 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
416
417 Value *Cmp = nullptr;
418 // If we have multiple loads per block, we need to generate a composite
419 // comparison using xor+or. The type for the combinations is the largest load
420 // type.
421 IntegerType *const MaxLoadType =
422 NumLoads == 1 ? nullptr
423 : IntegerType::get(C&: CI->getContext(), NumBits: MaxLoadSize * 8);
424
425 for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
426 const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
427 const LoadPair Loads = getLoadPair(
428 LoadSizeType: IntegerType::get(C&: CI->getContext(), NumBits: CurLoadEntry.LoadSize * 8), BSwapSizeType: nullptr,
429 CmpSizeType: MaxLoadType, OffsetBytes: CurLoadEntry.Offset);
430
431 if (NumLoads != 1) {
432 // If we have multiple loads per block, we need to generate a composite
433 // comparison using xor+or.
434 Diff = Builder.CreateXor(LHS: Loads.Lhs, RHS: Loads.Rhs);
435 Diff = Builder.CreateZExt(V: Diff, DestTy: MaxLoadType);
436 XorList.push_back(x: Diff);
437 } else {
438 // If there's only one load per block, we just compare the loaded values.
439 Cmp = Builder.CreateICmpNE(LHS: Loads.Lhs, RHS: Loads.Rhs);
440 }
441 }
442
443 auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
444 std::vector<Value *> OutList;
445 for (unsigned i = 0; i < InList.size() - 1; i = i + 2) {
446 Value *Or = Builder.CreateOr(LHS: InList[i], RHS: InList[i + 1]);
447 OutList.push_back(x: Or);
448 }
449 if (InList.size() % 2 != 0)
450 OutList.push_back(x: InList.back());
451 return OutList;
452 };
453
454 if (!Cmp) {
455 // Pairwise OR the XOR results.
456 OrList = pairWiseOr(XorList);
457
458 // Pairwise OR the OR results until one result left.
459 while (OrList.size() != 1) {
460 OrList = pairWiseOr(OrList);
461 }
462
463 assert(Diff && "Failed to find comparison diff");
464 Cmp = Builder.CreateICmpNE(LHS: OrList[0], RHS: ConstantInt::get(Ty: Diff->getType(), V: 0));
465 }
466
467 return Cmp;
468}
469
470void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
471 unsigned &LoadIndex) {
472 Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
473
474 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
475 ? EndBlock
476 : LoadCmpBlocks[BlockIndex + 1];
477 // Early exit branch if difference found to ResultBlock. Otherwise,
478 // continue to next LoadCmpBlock or EndBlock.
479 BasicBlock *BB = Builder.GetInsertBlock();
480 CondBrInst *CmpBr = Builder.CreateCondBr(Cond: Cmp, True: ResBlock.BB, False: NextBB);
481 setExplicitlyUnknownBranchWeightsIfProfiled(I&: *CmpBr, DEBUG_TYPE,
482 F: CI->getFunction());
483 if (DTU)
484 DTU->applyUpdates(Updates: {{DominatorTree::Insert, BB, ResBlock.BB},
485 {DominatorTree::Insert, BB, NextBB}});
486
487 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
488 // since early exit to ResultBlock was not taken (no difference was found in
489 // any of the bytes).
490 if (BlockIndex == LoadCmpBlocks.size() - 1) {
491 Value *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: CI->getContext()), V: 0);
492 PhiRes->addIncoming(V: Zero, BB: LoadCmpBlocks[BlockIndex]);
493 }
494}
495
496// This function creates the IR intructions for loading and comparing using the
497// given LoadSize. It loads the number of bytes specified by LoadSize from each
498// source of the memcmp parameters. It then does a subtract to see if there was
499// a difference in the loaded values. If a difference is found, it branches
500// with an early exit to the ResultBlock for calculating which source was
501// larger. Otherwise, it falls through to the either the next LoadCmpBlock or
502// the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
503// a special case through emitLoadCompareByteBlock. The special handling can
504// simply subtract the loaded values and add it to the result phi node.
505void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
506 // There is one load per block in this case, BlockIndex == LoadIndex.
507 const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
508
509 if (CurLoadEntry.LoadSize == 1) {
510 MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, OffsetBytes: CurLoadEntry.Offset);
511 return;
512 }
513
514 Type *LoadSizeType =
515 IntegerType::get(C&: CI->getContext(), NumBits: CurLoadEntry.LoadSize * 8);
516 Type *BSwapSizeType =
517 DL.isLittleEndian()
518 ? IntegerType::get(C&: CI->getContext(),
519 NumBits: PowerOf2Ceil(A: CurLoadEntry.LoadSize * 8))
520 : nullptr;
521 Type *MaxLoadType = IntegerType::get(
522 C&: CI->getContext(),
523 NumBits: std::max(a: MaxLoadSize, b: (unsigned)PowerOf2Ceil(A: CurLoadEntry.LoadSize)) * 8);
524 assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
525
526 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
527
528 const LoadPair Loads = getLoadPair(LoadSizeType, BSwapSizeType, CmpSizeType: MaxLoadType,
529 OffsetBytes: CurLoadEntry.Offset);
530
531 // Add the loaded values to the phi nodes for calculating memcmp result only
532 // if result is not used in a zero equality.
533 if (!IsUsedForZeroCmp) {
534 ResBlock.PhiSrc1->addIncoming(V: Loads.Lhs, BB: LoadCmpBlocks[BlockIndex]);
535 ResBlock.PhiSrc2->addIncoming(V: Loads.Rhs, BB: LoadCmpBlocks[BlockIndex]);
536 }
537
538 Value *Cmp = Builder.CreateICmp(P: ICmpInst::ICMP_EQ, LHS: Loads.Lhs, RHS: Loads.Rhs);
539 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
540 ? EndBlock
541 : LoadCmpBlocks[BlockIndex + 1];
542 // Early exit branch if difference found to ResultBlock. Otherwise, continue
543 // to next LoadCmpBlock or EndBlock.
544 BasicBlock *BB = Builder.GetInsertBlock();
545 CondBrInst *CmpBr = Builder.CreateCondBr(Cond: Cmp, True: NextBB, False: ResBlock.BB);
546 setExplicitlyUnknownBranchWeightsIfProfiled(I&: *CmpBr, DEBUG_TYPE,
547 F: CI->getFunction());
548 if (DTU)
549 DTU->applyUpdates(Updates: {{DominatorTree::Insert, BB, NextBB},
550 {DominatorTree::Insert, BB, ResBlock.BB}});
551
552 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
553 // since early exit to ResultBlock was not taken (no difference was found in
554 // any of the bytes).
555 if (BlockIndex == LoadCmpBlocks.size() - 1) {
556 Value *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: CI->getContext()), V: 0);
557 PhiRes->addIncoming(V: Zero, BB: LoadCmpBlocks[BlockIndex]);
558 }
559}
560
561// This function populates the ResultBlock with a sequence to calculate the
562// memcmp result. It compares the two loaded source values and returns -1 if
563// src1 < src2 and 1 if src1 > src2.
564void MemCmpExpansion::emitMemCmpResultBlock() {
565 // Special case: if memcmp result is used in a zero equality, result does not
566 // need to be calculated and can simply return 1.
567 if (IsUsedForZeroCmp) {
568 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
569 Builder.SetInsertPoint(TheBB: ResBlock.BB, IP: InsertPt);
570 Value *Res = ConstantInt::get(Ty: Type::getInt32Ty(C&: CI->getContext()), V: 1);
571 PhiRes->addIncoming(V: Res, BB: ResBlock.BB);
572 Builder.CreateBr(Dest: EndBlock);
573 if (DTU)
574 DTU->applyUpdates(Updates: {{DominatorTree::Insert, ResBlock.BB, EndBlock}});
575 return;
576 }
577 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
578 Builder.SetInsertPoint(TheBB: ResBlock.BB, IP: InsertPt);
579
580 Value *Cmp = Builder.CreateICmp(P: ICmpInst::ICMP_ULT, LHS: ResBlock.PhiSrc1,
581 RHS: ResBlock.PhiSrc2);
582
583 Value *Res =
584 Builder.CreateSelect(C: Cmp, True: Constant::getAllOnesValue(Ty: Builder.getInt32Ty()),
585 False: ConstantInt::get(Ty: Builder.getInt32Ty(), V: 1));
586 setExplicitlyUnknownBranchWeightsIfProfiled(I&: *cast<Instruction>(Val: Res),
587 DEBUG_TYPE, F: CI->getFunction());
588
589 PhiRes->addIncoming(V: Res, BB: ResBlock.BB);
590 Builder.CreateBr(Dest: EndBlock);
591 if (DTU)
592 DTU->applyUpdates(Updates: {{DominatorTree::Insert, ResBlock.BB, EndBlock}});
593}
594
595void MemCmpExpansion::setupResultBlockPHINodes() {
596 Type *MaxLoadType = IntegerType::get(C&: CI->getContext(), NumBits: MaxLoadSize * 8);
597 Builder.SetInsertPoint(ResBlock.BB);
598 // Note: this assumes one load per block.
599 ResBlock.PhiSrc1 =
600 Builder.CreatePHI(Ty: MaxLoadType, NumReservedValues: NumLoadsNonOneByte, Name: "phi.src1");
601 ResBlock.PhiSrc2 =
602 Builder.CreatePHI(Ty: MaxLoadType, NumReservedValues: NumLoadsNonOneByte, Name: "phi.src2");
603}
604
605void MemCmpExpansion::setupEndBlockPHINodes() {
606 Builder.SetInsertPoint(TheBB: EndBlock, IP: EndBlock->begin());
607 PhiRes = Builder.CreatePHI(Ty: Type::getInt32Ty(C&: CI->getContext()), NumReservedValues: 2, Name: "phi.res");
608}
609
610Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
611 unsigned LoadIndex = 0;
612 // This loop populates each of the LoadCmpBlocks with the IR sequence to
613 // handle multiple loads per block.
614 for (unsigned I = 0; I < getNumBlocks(); ++I) {
615 emitLoadCompareBlockMultipleLoads(BlockIndex: I, LoadIndex);
616 }
617
618 emitMemCmpResultBlock();
619 return PhiRes;
620}
621
622/// A memcmp expansion that compares equality with 0 and only has one block of
623/// load and compare can bypass the compare, branch, and phi IR that is required
624/// in the general case.
625Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
626 unsigned LoadIndex = 0;
627 Value *Cmp = getCompareLoadPairs(BlockIndex: 0, LoadIndex);
628 assert(LoadIndex == getNumLoads() && "some entries were not consumed");
629 return Builder.CreateZExt(V: Cmp, DestTy: Type::getInt32Ty(C&: CI->getContext()));
630}
631
632/// A memcmp expansion that only has one block of load and compare can bypass
633/// the compare, branch, and phi IR that is required in the general case.
634/// This function also analyses users of memcmp, and if there is only one user
635/// from which we can conclude that only 2 out of 3 memcmp outcomes really
636/// matter, then it generates more efficient code with only one comparison.
637Value *MemCmpExpansion::getMemCmpOneBlock() {
638 bool NeedsBSwap = DL.isLittleEndian() && Size != 1;
639 Type *LoadSizeType = IntegerType::get(C&: CI->getContext(), NumBits: Size * 8);
640 Type *BSwapSizeType =
641 NeedsBSwap ? IntegerType::get(C&: CI->getContext(), NumBits: PowerOf2Ceil(A: Size * 8))
642 : nullptr;
643 Type *MaxLoadType =
644 IntegerType::get(C&: CI->getContext(),
645 NumBits: std::max(a: MaxLoadSize, b: (unsigned)PowerOf2Ceil(A: Size)) * 8);
646
647 // The i8 and i16 cases don't need compares. We zext the loaded values and
648 // subtract them to get the suitable negative, zero, or positive i32 result.
649 if (Size == 1 || Size == 2) {
650 const LoadPair Loads = getLoadPair(LoadSizeType, BSwapSizeType,
651 CmpSizeType: Builder.getInt32Ty(), /*Offset*/ OffsetBytes: 0);
652 return Builder.CreateSub(LHS: Loads.Lhs, RHS: Loads.Rhs);
653 }
654
655 const LoadPair Loads = getLoadPair(LoadSizeType, BSwapSizeType, CmpSizeType: MaxLoadType,
656 /*Offset*/ OffsetBytes: 0);
657
658 // If a user of memcmp cares only about two outcomes, for example:
659 // bool result = memcmp(a, b, NBYTES) > 0;
660 // We can generate more optimal code with a smaller number of operations
661 if (CI->hasOneUser()) {
662 auto *UI = cast<Instruction>(Val: *CI->user_begin());
663 CmpPredicate Pred = ICmpInst::Predicate::BAD_ICMP_PREDICATE;
664 bool NeedsZExt = false;
665 // This is a special case because instead of checking if the result is less
666 // than zero:
667 // bool result = memcmp(a, b, NBYTES) < 0;
668 // Compiler is clever enough to generate the following code:
669 // bool result = memcmp(a, b, NBYTES) >> 31;
670 if (match(V: UI,
671 P: m_LShr(L: m_Value(),
672 R: m_SpecificInt(V: CI->getType()->getIntegerBitWidth() - 1)))) {
673 Pred = ICmpInst::ICMP_SLT;
674 NeedsZExt = true;
675 } else if (match(V: UI, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_SGT, L: m_Specific(V: CI),
676 R: m_AllOnes()))) {
677 // Adjust predicate as if it compared with 0.
678 Pred = ICmpInst::ICMP_SGE;
679 } else if (match(V: UI, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_SLT, L: m_Specific(V: CI),
680 R: m_One()))) {
681 // Adjust predicate as if it compared with 0.
682 Pred = ICmpInst::ICMP_SLE;
683 } else {
684 // In case of a successful match this call will set `Pred` variable
685 match(V: UI, P: m_ICmp(Pred, L: m_Specific(V: CI), R: m_Zero()));
686 }
687 // Generate new code and remove the original memcmp call and the user
688 if (ICmpInst::isSigned(Pred)) {
689 Value *Cmp = Builder.CreateICmp(P: ICmpInst::getUnsignedPredicate(Pred),
690 LHS: Loads.Lhs, RHS: Loads.Rhs);
691 auto *Result = NeedsZExt ? Builder.CreateZExt(V: Cmp, DestTy: UI->getType()) : Cmp;
692 UI->replaceAllUsesWith(V: Result);
693 UI->eraseFromParent();
694 CI->eraseFromParent();
695 return nullptr;
696 }
697 }
698
699 // The result of memcmp is negative, zero, or positive.
700 return Builder.CreateIntrinsic(RetTy: Builder.getInt32Ty(), ID: Intrinsic::ucmp,
701 Args: {Loads.Lhs, Loads.Rhs});
702}
703
704// This function expands the memcmp call into an inline expansion and returns
705// the memcmp result. Returns nullptr if the memcmp is already replaced.
706Value *MemCmpExpansion::getMemCmpExpansion() {
707 // Create the basic block framework for a multi-block expansion.
708 if (getNumBlocks() != 1) {
709 BasicBlock *StartBlock = CI->getParent();
710 EndBlock = SplitBlock(Old: StartBlock, SplitPt: CI, DTU, /*LI=*/nullptr,
711 /*MSSAU=*/nullptr, BBName: "endblock");
712 setupEndBlockPHINodes();
713 createResultBlock();
714
715 // If return value of memcmp is not used in a zero equality, we need to
716 // calculate which source was larger. The calculation requires the
717 // two loaded source values of each load compare block.
718 // These will be saved in the phi nodes created by setupResultBlockPHINodes.
719 if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
720
721 // Create the number of required load compare basic blocks.
722 createLoadCmpBlocks();
723
724 // Update the terminator added by SplitBlock to branch to the first
725 // LoadCmpBlock.
726 StartBlock->getTerminator()->setSuccessor(Idx: 0, BB: LoadCmpBlocks[0]);
727 if (DTU)
728 DTU->applyUpdates(Updates: {{DominatorTree::Insert, StartBlock, LoadCmpBlocks[0]},
729 {DominatorTree::Delete, StartBlock, EndBlock}});
730 }
731
732 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
733
734 if (IsUsedForZeroCmp)
735 return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
736 : getMemCmpExpansionZeroCase();
737
738 if (getNumBlocks() == 1)
739 return getMemCmpOneBlock();
740
741 for (unsigned I = 0; I < getNumBlocks(); ++I) {
742 emitLoadCompareBlock(BlockIndex: I);
743 }
744
745 emitMemCmpResultBlock();
746 return PhiRes;
747}
748
749// This function checks to see if an expansion of memcmp can be generated.
750// It checks for constant compare size that is less than the max inline size.
751// If an expansion cannot occur, returns false to leave as a library call.
752// Otherwise, the library call is replaced with a new IR instruction sequence.
753/// We want to transform:
754/// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
755/// To:
756/// loadbb:
757/// %0 = bitcast i32* %buffer2 to i8*
758/// %1 = bitcast i32* %buffer1 to i8*
759/// %2 = bitcast i8* %1 to i64*
760/// %3 = bitcast i8* %0 to i64*
761/// %4 = load i64, i64* %2
762/// %5 = load i64, i64* %3
763/// %6 = call i64 @llvm.bswap.i64(i64 %4)
764/// %7 = call i64 @llvm.bswap.i64(i64 %5)
765/// %8 = sub i64 %6, %7
766/// %9 = icmp ne i64 %8, 0
767/// br i1 %9, label %res_block, label %loadbb1
768/// res_block: ; preds = %loadbb2,
769/// %loadbb1, %loadbb
770/// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
771/// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
772/// %10 = icmp ult i64 %phi.src1, %phi.src2
773/// %11 = select i1 %10, i32 -1, i32 1
774/// br label %endblock
775/// loadbb1: ; preds = %loadbb
776/// %12 = bitcast i32* %buffer2 to i8*
777/// %13 = bitcast i32* %buffer1 to i8*
778/// %14 = bitcast i8* %13 to i32*
779/// %15 = bitcast i8* %12 to i32*
780/// %16 = getelementptr i32, i32* %14, i32 2
781/// %17 = getelementptr i32, i32* %15, i32 2
782/// %18 = load i32, i32* %16
783/// %19 = load i32, i32* %17
784/// %20 = call i32 @llvm.bswap.i32(i32 %18)
785/// %21 = call i32 @llvm.bswap.i32(i32 %19)
786/// %22 = zext i32 %20 to i64
787/// %23 = zext i32 %21 to i64
788/// %24 = sub i64 %22, %23
789/// %25 = icmp ne i64 %24, 0
790/// br i1 %25, label %res_block, label %loadbb2
791/// loadbb2: ; preds = %loadbb1
792/// %26 = bitcast i32* %buffer2 to i8*
793/// %27 = bitcast i32* %buffer1 to i8*
794/// %28 = bitcast i8* %27 to i16*
795/// %29 = bitcast i8* %26 to i16*
796/// %30 = getelementptr i16, i16* %28, i16 6
797/// %31 = getelementptr i16, i16* %29, i16 6
798/// %32 = load i16, i16* %30
799/// %33 = load i16, i16* %31
800/// %34 = call i16 @llvm.bswap.i16(i16 %32)
801/// %35 = call i16 @llvm.bswap.i16(i16 %33)
802/// %36 = zext i16 %34 to i64
803/// %37 = zext i16 %35 to i64
804/// %38 = sub i64 %36, %37
805/// %39 = icmp ne i64 %38, 0
806/// br i1 %39, label %res_block, label %loadbb3
807/// loadbb3: ; preds = %loadbb2
808/// %40 = bitcast i32* %buffer2 to i8*
809/// %41 = bitcast i32* %buffer1 to i8*
810/// %42 = getelementptr i8, i8* %41, i8 14
811/// %43 = getelementptr i8, i8* %40, i8 14
812/// %44 = load i8, i8* %42
813/// %45 = load i8, i8* %43
814/// %46 = zext i8 %44 to i32
815/// %47 = zext i8 %45 to i32
816/// %48 = sub i32 %46, %47
817/// br label %endblock
818/// endblock: ; preds = %res_block,
819/// %loadbb3
820/// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
821/// ret i32 %phi.res
822static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
823 const DataLayout *DL, ProfileSummaryInfo *PSI,
824 BlockFrequencyInfo *BFI, DomTreeUpdater *DTU,
825 const bool IsBCmp) {
826 NumMemCmpCalls++;
827
828 // Early exit from expansion if -Oz.
829 if (CI->getFunction()->hasMinSize())
830 return false;
831
832 // Early exit from expansion if size is not a constant.
833 ConstantInt *SizeCast = dyn_cast<ConstantInt>(Val: CI->getArgOperand(i: 2));
834 if (!SizeCast) {
835 NumMemCmpNotConstant++;
836 return false;
837 }
838 const uint64_t SizeVal = SizeCast->getZExtValue();
839
840 if (SizeVal == 0) {
841 return false;
842 }
843 // TTI call to check if target would like to expand memcmp. Also, get the
844 // available load sizes.
845 const bool IsUsedForZeroCmp =
846 IsBCmp || isOnlyUsedInZeroEqualityComparison(CxtI: CI);
847 bool OptForSize = llvm::shouldOptimizeForSize(BB: CI->getParent(), PSI, BFI);
848 auto Options = TTI->enableMemCmpExpansion(OptSize: OptForSize,
849 IsZeroCmp: IsUsedForZeroCmp);
850 if (!Options) return false;
851
852 if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
853 Options.NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock;
854
855 if (OptForSize &&
856 MaxLoadsPerMemcmpOptSize.getNumOccurrences())
857 Options.MaxNumLoads = MaxLoadsPerMemcmpOptSize;
858
859 if (!OptForSize && MaxLoadsPerMemcmp.getNumOccurrences())
860 Options.MaxNumLoads = MaxLoadsPerMemcmp;
861
862 MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL, DTU);
863
864 // Don't expand if this will require more loads than desired by the target.
865 if (Expansion.getNumLoads() == 0) {
866 NumMemCmpGreaterThanMax++;
867 return false;
868 }
869
870 NumMemCmpInlined++;
871
872 if (Value *Res = Expansion.getMemCmpExpansion()) {
873 // Replace call with result of expansion and erase call.
874 CI->replaceAllUsesWith(V: Res);
875 CI->eraseFromParent();
876 }
877
878 return true;
879}
880
881// Returns true if a change was made.
882static bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
883 const TargetTransformInfo *TTI, const DataLayout &DL,
884 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI,
885 DomTreeUpdater *DTU);
886
887static PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
888 const TargetTransformInfo *TTI,
889 ProfileSummaryInfo *PSI,
890 BlockFrequencyInfo *BFI, DominatorTree *DT);
891
892bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
893 const TargetTransformInfo *TTI, const DataLayout &DL,
894 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI,
895 DomTreeUpdater *DTU) {
896 for (Instruction &I : BB) {
897 CallInst *CI = dyn_cast<CallInst>(Val: &I);
898 if (!CI) {
899 continue;
900 }
901 LibFunc Func;
902 if (TLI->getLibFunc(CB: *CI, F&: Func) &&
903 (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
904 expandMemCmp(CI, TTI, DL: &DL, PSI, BFI, DTU, IsBCmp: Func == LibFunc_bcmp)) {
905 return true;
906 }
907 }
908 return false;
909}
910
911PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
912 const TargetTransformInfo *TTI,
913 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI,
914 DominatorTree *DT) {
915 std::optional<DomTreeUpdater> DTU;
916 if (DT)
917 DTU.emplace(args&: DT, args: DomTreeUpdater::UpdateStrategy::Lazy);
918
919 const DataLayout& DL = F.getDataLayout();
920 bool MadeChanges = false;
921 for (auto BBIt = F.begin(); BBIt != F.end();) {
922 if (runOnBlock(BB&: *BBIt, TLI, TTI, DL, PSI, BFI, DTU: DTU ? &*DTU : nullptr)) {
923 MadeChanges = true;
924 // If changes were made, restart the function from the beginning, since
925 // the structure of the function was changed.
926 BBIt = F.begin();
927 } else {
928 ++BBIt;
929 }
930 }
931 if (MadeChanges)
932 for (BasicBlock &BB : F)
933 SimplifyInstructionsInBlock(BB: &BB);
934 if (!MadeChanges)
935 return PreservedAnalyses::all();
936 PreservedAnalyses PA;
937 PA.preserve<DominatorTreeAnalysis>();
938 return PA;
939}
940
941} // namespace
942
943PreservedAnalyses ExpandMemCmpPass::run(Function &F,
944 FunctionAnalysisManager &FAM) {
945 // Don't expand memcmp in sanitized functions — sanitizers intercept memcmp
946 // calls to check for memory errors, and expanding would bypass that.
947 if (F.hasFnAttribute(Kind: Attribute::SanitizeAddress) ||
948 F.hasFnAttribute(Kind: Attribute::SanitizeMemory) ||
949 F.hasFnAttribute(Kind: Attribute::SanitizeThread) ||
950 F.hasFnAttribute(Kind: Attribute::SanitizeHWAddress))
951 return PreservedAnalyses::all();
952
953 const auto &TLI = FAM.getResult<TargetLibraryAnalysis>(IR&: F);
954 const auto &TTI = FAM.getResult<TargetIRAnalysis>(IR&: F);
955 auto *PSI = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(IR&: F)
956 .getCachedResult<ProfileSummaryAnalysis>(IR&: *F.getParent());
957 BlockFrequencyInfo *BFI = (PSI && PSI->hasProfileSummary())
958 ? &FAM.getResult<BlockFrequencyAnalysis>(IR&: F)
959 : nullptr;
960 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(IR&: F);
961
962 return runImpl(F, TLI: &TLI, TTI: &TTI, PSI, BFI, DT);
963}
964