1//===- AArch64TargetTransformInfo.h - AArch64 specific TTI ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file a TargetTransformInfoImplBase conforming object specific to the
10/// AArch64 target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
18
19#include "AArch64.h"
20#include "AArch64Subtarget.h"
21#include "AArch64TargetMachine.h"
22#include "llvm/Analysis/TargetTransformInfo.h"
23#include "llvm/CodeGen/BasicTTIImpl.h"
24#include "llvm/IR/FMF.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/Support/InstructionCost.h"
28#include <cstdint>
29#include <optional>
30
31namespace llvm {
32
33class APInt;
34class Instruction;
35class IntrinsicInst;
36class Loop;
37class SCEV;
38class ScalarEvolution;
39class Type;
40class Value;
41class VectorType;
42
43class AArch64TTIImpl final : public BasicTTIImplBase<AArch64TTIImpl> {
44 using BaseT = BasicTTIImplBase<AArch64TTIImpl>;
45 using TTI = TargetTransformInfo;
46
47 friend BaseT;
48
49 const AArch64Subtarget *ST;
50 const AArch64TargetLowering *TLI;
51
52 static const FeatureBitset InlineInverseFeatures;
53
54 const AArch64Subtarget *getST() const { return ST; }
55 const AArch64TargetLowering *getTLI() const { return TLI; }
56
57 enum MemIntrinsicType {
58 VECTOR_LDST_TWO_ELEMENTS,
59 VECTOR_LDST_THREE_ELEMENTS,
60 VECTOR_LDST_FOUR_ELEMENTS
61 };
62
63 /// Given a add/sub/mul operation, detect a widening addl/subl/mull pattern
64 /// where both operands can be treated like extends. Returns the minimal type
65 /// needed to compute the operation.
66 Type *isBinExtWideningInstruction(unsigned Opcode, Type *DstTy,
67 ArrayRef<const Value *> Args,
68 Type *SrcOverrideTy = nullptr) const;
69 /// Given a add/sub operation with a single extend operand, detect a
70 /// widening addw/subw pattern.
71 bool isSingleExtWideningInstruction(unsigned Opcode, Type *DstTy,
72 ArrayRef<const Value *> Args,
73 Type *SrcOverrideTy = nullptr) const;
74
75 // A helper function called by 'getVectorInstrCost'.
76 //
77 // 'Val' and 'Index' are forwarded from 'getVectorInstrCost';
78 // \param ScalarUserAndIdx encodes the information about extracts from a
79 /// vector with 'Scalar' being the value being extracted,'User' being the user
80 /// of the extract(nullptr if user is not known before vectorization) and
81 /// 'Idx' being the extract lane.
82 InstructionCost getVectorInstrCostHelper(
83 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
84 const Instruction *I = nullptr, Value *Scalar = nullptr,
85 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx = {},
86 TTI::VectorInstrContext VIC = TTI::VectorInstrContext::None) const;
87
88public:
89 explicit AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F)
90 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
91 TLI(ST->getTargetLowering()) {}
92
93 bool areInlineCompatible(const Function *Caller,
94 const Function *Callee) const override;
95
96 bool areTypesABICompatible(const Function *Caller, const Function *Callee,
97 ArrayRef<Type *> Types) const override;
98
99 unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
100 unsigned DefaultCallPenalty) const override;
101
102 APInt getFeatureMask(const Function &F) const override;
103 APInt getPriorityMask(const Function &F) const override;
104
105 bool isMultiversionedFunction(const Function &F) const override;
106
107 /// \name Scalar TTI Implementations
108 /// @{
109
110 using BaseT::getIntImmCost;
111 InstructionCost getIntImmCost(int64_t Val) const;
112 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
113 TTI::TargetCostKind CostKind) const override;
114 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
115 const APInt &Imm, Type *Ty,
116 TTI::TargetCostKind CostKind,
117 Instruction *Inst = nullptr) const override;
118 InstructionCost
119 getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
120 Type *Ty, TTI::TargetCostKind CostKind) const override;
121 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
122
123 /// @}
124
125 /// \name Vector TTI Implementations
126 /// @{
127
128 bool enableInterleavedAccessVectorization() const override { return true; }
129
130 bool enableMaskedInterleavedAccessVectorization() const override {
131 return ST->hasSVE();
132 }
133
134 unsigned getNumberOfRegisters(unsigned ClassID) const override {
135 bool Vector = (ClassID == 1);
136 if (Vector) {
137 if (ST->hasNEON())
138 return 32;
139 return 0;
140 }
141 return 31;
142 }
143
144 InstructionCost
145 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
146 TTI::TargetCostKind CostKind) const override;
147
148 std::optional<Instruction *>
149 instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override;
150
151 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
152 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
153 APInt &UndefElts2, APInt &UndefElts3,
154 std::function<void(Instruction *, unsigned, APInt, APInt &)>
155 SimplifyAndSetOp) const override;
156
157 TypeSize
158 getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override;
159
160 unsigned getMinVectorRegisterBitWidth() const override {
161 return ST->getMinVectorRegisterBitWidth();
162 }
163
164 std::optional<unsigned> getVScaleForTuning() const override {
165 return ST->getVScaleForTuning();
166 }
167
168 bool shouldMaximizeVectorBandwidth(
169 TargetTransformInfo::RegisterKind K) const override;
170
171 /// Try to return an estimate cost factor that can be used as a multiplier
172 /// when scalarizing an operation for a vector with ElementCount \p VF.
173 /// For scalable vectors this currently takes the most pessimistic view based
174 /// upon the maximum possible value for vscale.
175 unsigned getMaxNumElements(ElementCount VF) const {
176 if (!VF.isScalable())
177 return VF.getFixedValue();
178
179 return VF.getKnownMinValue() * ST->getVScaleForTuning();
180 }
181
182 unsigned getMaxInterleaveFactor(ElementCount VF) const override;
183
184 bool prefersVectorizedAddressing() const override;
185
186 /// Check whether Opcode1 has less throughput according to the scheduling
187 /// model than Opcode2.
188 bool hasKnownLowerThroughputFromSchedulingModel(unsigned Opcode1,
189 unsigned Opcode2) const;
190
191 InstructionCost
192 getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA,
193 TTI::TargetCostKind CostKind) const override;
194
195 InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
196 TTI::TargetCostKind CostKind) const;
197
198 InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA,
199 TTI::TargetCostKind CostKind) const;
200
201 bool isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst,
202 Type *Src) const;
203
204 InstructionCost
205 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
206 TTI::CastContextHint CCH, TTI::TargetCostKind CostKind,
207 const Instruction *I = nullptr) const override;
208
209 InstructionCost
210 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
211 unsigned Index,
212 TTI::TargetCostKind CostKind) const override;
213
214 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
215 const Instruction *I = nullptr) const override;
216
217 InstructionCost
218 getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind,
219 unsigned Index, const Value *Op0, const Value *Op1,
220 TTI::VectorInstrContext VIC =
221 TTI::VectorInstrContext::None) const override;
222
223 /// \param ScalarUserAndIdx encodes the information about extracts from a
224 /// vector with 'Scalar' being the value being extracted,'User' being the user
225 /// of the extract(nullptr if user is not known before vectorization) and
226 /// 'Idx' being the extract lane.
227 InstructionCost getVectorInstrCost(
228 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
229 Value *Scalar,
230 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
231 TTI::VectorInstrContext VIC =
232 TTI::VectorInstrContext::None) const override;
233
234 InstructionCost
235 getVectorInstrCost(const Instruction &I, Type *Val,
236 TTI::TargetCostKind CostKind, unsigned Index,
237 TTI::VectorInstrContext VIC =
238 TTI::VectorInstrContext::None) const override;
239
240 InstructionCost
241 getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val,
242 TTI::TargetCostKind CostKind,
243 unsigned Index) const override;
244
245 InstructionCost
246 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
247 TTI::TargetCostKind CostKind) const override;
248
249 InstructionCost
250 getArithmeticReductionCostSVE(unsigned Opcode, VectorType *ValTy,
251 TTI::TargetCostKind CostKind) const;
252
253 InstructionCost getSpliceCost(VectorType *Tp, int Index,
254 TTI::TargetCostKind CostKind) const;
255
256 InstructionCost getArithmeticInstrCost(
257 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
258 TTI::OperandValueInfo Op1Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
259 TTI::OperandValueInfo Op2Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
260 ArrayRef<const Value *> Args = {},
261 const Instruction *CxtI = nullptr) const override;
262
263 InstructionCost
264 getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr,
265 TTI::TargetCostKind CostKind) const override;
266
267 InstructionCost getCmpSelInstrCost(
268 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
269 TTI::TargetCostKind CostKind,
270 TTI::OperandValueInfo Op1Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
271 TTI::OperandValueInfo Op2Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
272 const Instruction *I = nullptr) const override;
273
274 TTI::MemCmpExpansionOptions
275 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override;
276 bool useNeonVector(const Type *Ty) const;
277
278 InstructionCost getMemoryOpCost(
279 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
280 TTI::TargetCostKind CostKind,
281 TTI::OperandValueInfo OpInfo = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
282 const Instruction *I = nullptr) const override;
283
284 InstructionCost
285 getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const override;
286
287 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
288 TTI::UnrollingPreferences &UP,
289 OptimizationRemarkEmitter *ORE) const override;
290
291 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
292 TTI::PeelingPreferences &PP) const override;
293
294 Value *
295 getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType,
296 bool CanCreate = true) const override;
297
298 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
299 MemIntrinsicInfo &Info) const override;
300
301 bool isElementTypeLegalForScalableVector(Type *Ty) const override {
302 if (Ty->isPointerTy())
303 return true;
304
305 if (Ty->isBFloatTy() && ST->hasBF16())
306 return true;
307
308 if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
309 return true;
310
311 if (Ty->isIntegerTy(Bitwidth: 1) || Ty->isIntegerTy(Bitwidth: 8) || Ty->isIntegerTy(Bitwidth: 16) ||
312 Ty->isIntegerTy(Bitwidth: 32) || Ty->isIntegerTy(Bitwidth: 64))
313 return true;
314
315 return false;
316 }
317
318 bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const {
319 if (!ST->isSVEorStreamingSVEAvailable())
320 return false;
321
322 // For fixed vectors, avoid scalarization if using SVE for them.
323 if (isa<FixedVectorType>(Val: DataType) && !ST->useSVEForFixedLengthVectors() &&
324 DataType->getPrimitiveSizeInBits() != 128)
325 return false; // Fall back to scalarization of masked operations.
326
327 return isElementTypeLegalForScalableVector(Ty: DataType->getScalarType());
328 }
329
330 bool isLegalMaskedLoad(Type *DataType, Align Alignment,
331 unsigned /*AddressSpace*/,
332 TTI::MaskKind /*MaskKind*/) const override {
333 return isLegalMaskedLoadStore(DataType, Alignment);
334 }
335
336 bool isLegalMaskedStore(Type *DataType, Align Alignment,
337 unsigned /*AddressSpace*/,
338 TTI::MaskKind /*MaskKind*/) const override {
339 return isLegalMaskedLoadStore(DataType, Alignment);
340 }
341
342 bool isElementTypeLegalForCompressStore(Type *Ty) const {
343 return Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isIntegerTy(Bitwidth: 32) ||
344 Ty->isIntegerTy(Bitwidth: 64);
345 }
346
347 bool isLegalMaskedCompressStore(Type *DataType,
348 Align Alignment) const override {
349 if (!ST->isSVEAvailable())
350 return false;
351
352 if (isa<FixedVectorType>(Val: DataType) &&
353 DataType->getPrimitiveSizeInBits() < 128)
354 return false;
355
356 return isElementTypeLegalForCompressStore(Ty: DataType->getScalarType());
357 }
358
359 bool isLegalMaskedGatherScatter(Type *DataType) const {
360 if (!ST->isSVEAvailable())
361 return false;
362
363 // For fixed vectors, scalarize if not using SVE for them.
364 auto *DataTypeFVTy = dyn_cast<FixedVectorType>(Val: DataType);
365 if (DataTypeFVTy && (!ST->useSVEForFixedLengthVectors() ||
366 DataTypeFVTy->getNumElements() < 2))
367 return false;
368
369 return isElementTypeLegalForScalableVector(Ty: DataType->getScalarType());
370 }
371
372 bool isLegalMaskedGather(Type *DataType, Align Alignment) const override {
373 return isLegalMaskedGatherScatter(DataType);
374 }
375
376 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override {
377 return isLegalMaskedGatherScatter(DataType);
378 }
379
380 bool isLegalBroadcastLoad(Type *ElementTy,
381 ElementCount NumElements) const override {
382 // Return true if we can generate a `ld1r` splat load instruction.
383 if (!ST->hasNEON() || NumElements.isScalable())
384 return false;
385 switch (unsigned ElementBits = ElementTy->getScalarSizeInBits()) {
386 case 8:
387 case 16:
388 case 32:
389 case 64: {
390 // We accept bit-widths >= 64bits and elements {8,16,32,64} bits.
391 unsigned VectorBits = NumElements.getFixedValue() * ElementBits;
392 return VectorBits >= 64;
393 }
394 }
395 return false;
396 }
397
398 std::optional<bool> isLegalNTStoreLoad(Type *DataType,
399 Align Alignment) const {
400 // Currently we only support NT load and store lowering for little-endian
401 // targets.
402 //
403 // Coordinated with LDNP and STNP constraints in
404 // `llvm/lib/Target/AArch64/AArch64InstrInfo.td` and
405 // `AArch64ISelLowering.cpp`
406 if (!ST->isLittleEndian())
407 return false;
408
409 // NOTE: The logic below is mostly geared towards LV, which calls it with
410 // vectors with 2 elements. We might want to improve that, if other
411 // users show up.
412 // Nontemporal vector loads/stores can be directly lowered to LDNP/STNP, if
413 // the vector can be halved so that each half fits into a register. That's
414 // the case if the element type fits into a register and the number of
415 // elements is a power of 2 > 1.
416 if (auto *DataTypeTy = dyn_cast<FixedVectorType>(Val: DataType)) {
417 unsigned NumElements = DataTypeTy->getNumElements();
418 unsigned EltSize = DataTypeTy->getElementType()->getScalarSizeInBits();
419 return NumElements > 1 && isPowerOf2_64(Value: NumElements) && EltSize >= 8 &&
420 EltSize <= 128 && isPowerOf2_64(Value: EltSize);
421 }
422 return std::nullopt;
423 }
424
425 bool isLegalNTStore(Type *DataType, Align Alignment) const override {
426 if (auto Result = isLegalNTStoreLoad(DataType, Alignment))
427 return *Result;
428 // Fallback to target independent logic
429 return BaseT::isLegalNTStore(DataType, Alignment);
430 }
431
432 bool isLegalNTLoad(Type *DataType, Align Alignment) const override {
433 if (auto Result = isLegalNTStoreLoad(DataType, Alignment))
434 return *Result;
435 // Fallback to target independent logic
436 return BaseT::isLegalNTLoad(DataType, Alignment);
437 }
438
439 InstructionCost getPartialReductionCost(
440 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
441 ElementCount VF, TTI::PartialReductionExtendKind OpAExtend,
442 TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
443 TTI::TargetCostKind CostKind,
444 std::optional<FastMathFlags> FMF) const override;
445
446 bool enableOrderedReductions() const override { return true; }
447
448 InstructionCost getInterleavedMemoryOpCost(
449 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
450 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
451 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;
452
453 bool shouldConsiderAddressTypePromotion(
454 const Instruction &I,
455 bool &AllowPromotionWithoutCommonHeader) const override;
456
457 bool shouldExpandReduction(const IntrinsicInst *II) const override {
458 return false;
459 }
460
461 unsigned getGISelRematGlobalCost() const override { return 2; }
462
463 unsigned getMinTripCountTailFoldingThreshold() const override {
464 return ST->hasSVE() ? 5 : 0;
465 }
466
467 TailFoldingStyle getPreferredTailFoldingStyle() const override {
468 return ST->hasSVE() ? TailFoldingStyle::DataAndControlFlow
469 : TailFoldingStyle::DataWithoutLaneMask;
470 }
471
472 bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const override;
473
474 unsigned getEpilogueVectorizationMinVF() const override;
475
476 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override;
477
478 bool supportsScalableVectors() const override {
479 return ST->isSVEorStreamingSVEAvailable();
480 }
481
482 bool enableScalableVectorization() const override;
483
484 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
485 ElementCount VF) const override;
486
487 bool preferPredicatedReductionSelect() const override { return ST->hasSVE(); }
488
489 /// FP16 and BF16 operations are lowered to fptrunc(op(fpext, fpext) if the
490 /// architecture features are not present.
491 std::optional<InstructionCost> getFP16BF16PromoteCost(
492 Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info,
493 TTI::OperandValueInfo Op2Info, bool IncludeTrunc, bool CanUseSVE,
494 std::function<InstructionCost(Type *)> InstCost) const;
495
496 InstructionCost
497 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
498 std::optional<FastMathFlags> FMF,
499 TTI::TargetCostKind CostKind) const override;
500
501 InstructionCost
502 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
503 VectorType *ValTy, std::optional<FastMathFlags> FMF,
504 TTI::TargetCostKind CostKind) const override;
505
506 InstructionCost getMulAccReductionCost(
507 bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty,
508 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const override;
509
510 InstructionCost
511 getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy,
512 ArrayRef<int> Mask, TTI::TargetCostKind CostKind, int Index,
513 VectorType *SubTp, ArrayRef<const Value *> Args = {},
514 const Instruction *CxtI = nullptr) const override;
515
516 InstructionCost
517 getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
518 bool Insert, bool Extract,
519 TTI::TargetCostKind CostKind,
520 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
521 TTI::VectorInstrContext VIC =
522 TTI::VectorInstrContext::None) const override;
523
524 /// Return the cost of the scaling factor used in the addressing
525 /// mode represented by AM for this target, for a load/store
526 /// of the specified type.
527 /// If the AM is supported, the return value must be >= 0.
528 /// If the AM is not supported, it returns an invalid cost.
529 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
530 StackOffset BaseOffset, bool HasBaseReg,
531 int64_t Scale,
532 unsigned AddrSpace) const override;
533
534 bool enableSelectOptimize() const override {
535 return ST->enableSelectOptimize();
536 }
537
538 bool shouldTreatInstructionLikeSelect(const Instruction *I) const override;
539
540 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
541 Type *ScalarValTy) const override {
542 // We can vectorize store v4i8.
543 if (ScalarMemTy->isIntegerTy(Bitwidth: 8) && isPowerOf2_32(Value: VF) && VF >= 4)
544 return 4;
545
546 return BaseT::getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
547 }
548
549 std::optional<unsigned> getMinPageSize() const override { return 4096; }
550
551 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
552 const TargetTransformInfo::LSRCost &C2) const override;
553
554 bool isProfitableToSinkOperands(Instruction *I,
555 SmallVectorImpl<Use *> &Ops) const override;
556
557 bool enableAggressiveInterleaving(bool) const override {
558 return ST->enableAggressiveInterleaving();
559 }
560 /// @}
561};
562
563} // end namespace llvm
564
565#endif // LLVM_LIB_TARGET_AARCH64_AARCH64TARGETTRANSFORMINFO_H
566