1//===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "PPCTargetTransformInfo.h"
10#include "llvm/Analysis/CodeMetrics.h"
11#include "llvm/Analysis/TargetLibraryInfo.h"
12#include "llvm/Analysis/TargetTransformInfo.h"
13#include "llvm/CodeGen/BasicTTIImpl.h"
14#include "llvm/CodeGen/TargetLowering.h"
15#include "llvm/CodeGen/TargetSchedule.h"
16#include "llvm/IR/IntrinsicsPowerPC.h"
17#include "llvm/IR/ProfDataUtils.h"
18#include "llvm/Support/CommandLine.h"
19#include "llvm/Transforms/InstCombine/InstCombiner.h"
20#include "llvm/Transforms/Utils/Local.h"
21#include <optional>
22
23using namespace llvm;
24
25#define DEBUG_TYPE "ppctti"
26
27static cl::opt<bool> PPCEVL("ppc-evl",
28 cl::desc("Allow EVL type vp.load/vp.store"),
29 cl::init(Val: false), cl::Hidden);
30
31static cl::opt<bool> Pwr9EVL("ppc-pwr9-evl",
32 cl::desc("Allow vp.load and vp.store for pwr9"),
33 cl::init(Val: false), cl::Hidden);
34
35static cl::opt<bool> VecMaskCost("ppc-vec-mask-cost",
36cl::desc("add masking cost for i1 vectors"), cl::init(Val: true), cl::Hidden);
37
38static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
39cl::desc("disable constant hoisting on PPC"), cl::init(Val: false), cl::Hidden);
40
41static cl::opt<bool>
42EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(Val: false),
43 cl::desc("Enable using coldcc calling conv for cold "
44 "internal functions"));
45
46static cl::opt<bool>
47LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(Val: false),
48 cl::desc("Do not add instruction count to lsr cost model"));
49
50// The latency of mtctr is only justified if there are more than 4
51// comparisons that will be removed as a result.
52static cl::opt<unsigned>
53SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(Val: 4), cl::Hidden,
54 cl::desc("Loops with a constant trip count smaller than "
55 "this value will not use the count register."));
56
57//===----------------------------------------------------------------------===//
58//
59// PPC cost model.
60//
61//===----------------------------------------------------------------------===//
62
63TargetTransformInfo::PopcntSupportKind
64PPCTTIImpl::getPopcntSupport(unsigned TyWidth) const {
65 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
66 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
67 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
68 TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
69 return TTI::PSK_Software;
70}
71
72std::optional<Instruction *>
73PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
74 Intrinsic::ID IID = II.getIntrinsicID();
75 switch (IID) {
76 default:
77 break;
78 case Intrinsic::ppc_altivec_lvx:
79 case Intrinsic::ppc_altivec_lvxl:
80 // Turn PPC lvx -> load if the pointer is known aligned.
81 if (getOrEnforceKnownAlignment(
82 V: II.getArgOperand(i: 0), PrefAlign: Align(16), DL: IC.getDataLayout(), CxtI: &II,
83 AC: &IC.getAssumptionCache(), DT: &IC.getDominatorTree()) >= 16) {
84 Value *Ptr = II.getArgOperand(i: 0);
85 return new LoadInst(II.getType(), Ptr, "", false, Align(16));
86 }
87 break;
88 case Intrinsic::ppc_vsx_lxvw4x:
89 case Intrinsic::ppc_vsx_lxvd2x: {
90 // Turn PPC VSX loads into normal loads.
91 Value *Ptr = II.getArgOperand(i: 0);
92 return new LoadInst(II.getType(), Ptr, Twine(""), false, Align(1));
93 }
94 case Intrinsic::ppc_altivec_stvx:
95 case Intrinsic::ppc_altivec_stvxl:
96 // Turn stvx -> store if the pointer is known aligned.
97 if (getOrEnforceKnownAlignment(
98 V: II.getArgOperand(i: 1), PrefAlign: Align(16), DL: IC.getDataLayout(), CxtI: &II,
99 AC: &IC.getAssumptionCache(), DT: &IC.getDominatorTree()) >= 16) {
100 Value *Ptr = II.getArgOperand(i: 1);
101 return new StoreInst(II.getArgOperand(i: 0), Ptr, false, Align(16));
102 }
103 break;
104 case Intrinsic::ppc_vsx_stxvw4x:
105 case Intrinsic::ppc_vsx_stxvd2x: {
106 // Turn PPC VSX stores into normal stores.
107 Value *Ptr = II.getArgOperand(i: 1);
108 return new StoreInst(II.getArgOperand(i: 0), Ptr, false, Align(1));
109 }
110 case Intrinsic::ppc_altivec_vperm:
111 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
112 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
113 // a vectorshuffle for little endian, we must undo the transformation
114 // performed on vec_perm in altivec.h. That is, we must complement
115 // the permutation mask with respect to 31 and reverse the order of
116 // V1 and V2.
117 if (Constant *Mask = dyn_cast<Constant>(Val: II.getArgOperand(i: 2))) {
118 assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
119 "Bad type for intrinsic!");
120
121 // Check that all of the elements are integer constants or undefs.
122 bool AllEltsOk = true;
123 for (unsigned I = 0; I != 16; ++I) {
124 Constant *Elt = Mask->getAggregateElement(Elt: I);
125 if (!Elt || !(isa<ConstantInt>(Val: Elt) || isa<UndefValue>(Val: Elt))) {
126 AllEltsOk = false;
127 break;
128 }
129 }
130
131 if (AllEltsOk) {
132 // Cast the input vectors to byte vectors.
133 Value *Op0 =
134 IC.Builder.CreateBitCast(V: II.getArgOperand(i: 0), DestTy: Mask->getType());
135 Value *Op1 =
136 IC.Builder.CreateBitCast(V: II.getArgOperand(i: 1), DestTy: Mask->getType());
137 Value *Result = PoisonValue::get(T: Op0->getType());
138
139 // Only extract each element once.
140 Value *ExtractedElts[32];
141 memset(s: ExtractedElts, c: 0, n: sizeof(ExtractedElts));
142
143 for (unsigned I = 0; I != 16; ++I) {
144 if (isa<UndefValue>(Val: Mask->getAggregateElement(Elt: I)))
145 continue;
146 unsigned Idx =
147 cast<ConstantInt>(Val: Mask->getAggregateElement(Elt: I))->getZExtValue();
148 Idx &= 31; // Match the hardware behavior.
149 if (DL.isLittleEndian())
150 Idx = 31 - Idx;
151
152 if (!ExtractedElts[Idx]) {
153 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
154 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
155 ExtractedElts[Idx] = IC.Builder.CreateExtractElement(
156 Vec: Idx < 16 ? Op0ToUse : Op1ToUse, Idx: IC.Builder.getInt32(C: Idx & 15));
157 }
158
159 // Insert this value into the result vector.
160 Result = IC.Builder.CreateInsertElement(Vec: Result, NewElt: ExtractedElts[Idx],
161 Idx: IC.Builder.getInt32(C: I));
162 }
163 return CastInst::Create(Instruction::BitCast, S: Result, Ty: II.getType());
164 }
165 }
166 break;
167 }
168 return std::nullopt;
169}
170
171InstructionCost PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
172 TTI::TargetCostKind CostKind) const {
173 if (DisablePPCConstHoist)
174 return BaseT::getIntImmCost(Imm, Ty, CostKind);
175
176 assert(Ty->isIntegerTy());
177
178 unsigned BitSize = Ty->getPrimitiveSizeInBits();
179 if (BitSize == 0)
180 return ~0U;
181
182 if (Imm == 0)
183 return TTI::TCC_Free;
184
185 if (Imm.getBitWidth() <= 64) {
186 if (isInt<16>(x: Imm.getSExtValue()))
187 return TTI::TCC_Basic;
188
189 if (isInt<32>(x: Imm.getSExtValue())) {
190 // A constant that can be materialized using lis.
191 if ((Imm.getZExtValue() & 0xFFFF) == 0)
192 return TTI::TCC_Basic;
193
194 return 2 * TTI::TCC_Basic;
195 }
196 }
197
198 return 4 * TTI::TCC_Basic;
199}
200
201InstructionCost
202PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
203 const APInt &Imm, Type *Ty,
204 TTI::TargetCostKind CostKind) const {
205 if (DisablePPCConstHoist)
206 return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
207
208 assert(Ty->isIntegerTy());
209
210 unsigned BitSize = Ty->getPrimitiveSizeInBits();
211 if (BitSize == 0)
212 return ~0U;
213
214 switch (IID) {
215 default:
216 return TTI::TCC_Free;
217 case Intrinsic::sadd_with_overflow:
218 case Intrinsic::uadd_with_overflow:
219 case Intrinsic::ssub_with_overflow:
220 case Intrinsic::usub_with_overflow:
221 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(x: Imm.getSExtValue()))
222 return TTI::TCC_Free;
223 break;
224 case Intrinsic::experimental_stackmap:
225 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(x: Imm.getSExtValue())))
226 return TTI::TCC_Free;
227 break;
228 case Intrinsic::experimental_patchpoint_void:
229 case Intrinsic::experimental_patchpoint:
230 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(x: Imm.getSExtValue())))
231 return TTI::TCC_Free;
232 break;
233 }
234 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
235}
236
237InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
238 const APInt &Imm, Type *Ty,
239 TTI::TargetCostKind CostKind,
240 Instruction *Inst) const {
241 if (DisablePPCConstHoist)
242 return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
243
244 assert(Ty->isIntegerTy());
245
246 unsigned BitSize = Ty->getPrimitiveSizeInBits();
247 if (BitSize == 0)
248 return ~0U;
249
250 unsigned ImmIdx = ~0U;
251 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
252 ZeroFree = false;
253 switch (Opcode) {
254 default:
255 return TTI::TCC_Free;
256 case Instruction::GetElementPtr:
257 // Always hoist the base address of a GetElementPtr. This prevents the
258 // creation of new constants for every base constant that gets constant
259 // folded with the offset.
260 if (Idx == 0)
261 return 2 * TTI::TCC_Basic;
262 return TTI::TCC_Free;
263 case Instruction::And:
264 RunFree = true; // (for the rotate-and-mask instructions)
265 [[fallthrough]];
266 case Instruction::Add:
267 case Instruction::Or:
268 case Instruction::Xor:
269 ShiftedFree = true;
270 [[fallthrough]];
271 case Instruction::Sub:
272 case Instruction::Mul:
273 case Instruction::Shl:
274 case Instruction::LShr:
275 case Instruction::AShr:
276 ImmIdx = 1;
277 break;
278 case Instruction::ICmp:
279 UnsignedFree = true;
280 ImmIdx = 1;
281 // Zero comparisons can use record-form instructions.
282 [[fallthrough]];
283 case Instruction::Select:
284 ZeroFree = true;
285 break;
286 case Instruction::PHI:
287 case Instruction::Call:
288 case Instruction::Ret:
289 case Instruction::Load:
290 case Instruction::Store:
291 break;
292 }
293
294 if (ZeroFree && Imm == 0)
295 return TTI::TCC_Free;
296
297 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
298 if (isInt<16>(x: Imm.getSExtValue()))
299 return TTI::TCC_Free;
300
301 if (RunFree) {
302 if (Imm.getBitWidth() <= 32 &&
303 (isShiftedMask_32(Value: Imm.getZExtValue()) ||
304 isShiftedMask_32(Value: ~Imm.getZExtValue())))
305 return TTI::TCC_Free;
306
307 if (ST->isPPC64() &&
308 (isShiftedMask_64(Value: Imm.getZExtValue()) ||
309 isShiftedMask_64(Value: ~Imm.getZExtValue())))
310 return TTI::TCC_Free;
311 }
312
313 if (UnsignedFree && isUInt<16>(x: Imm.getZExtValue()))
314 return TTI::TCC_Free;
315
316 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
317 return TTI::TCC_Free;
318 }
319
320 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
321}
322
323// Check if the current Type is an MMA vector type. Valid MMA types are
324// v256i1 and v512i1 respectively.
325static bool isMMAType(Type *Ty) {
326 return Ty->isVectorTy() && (Ty->getScalarSizeInBits() == 1) &&
327 (Ty->getPrimitiveSizeInBits() > 128);
328}
329
330InstructionCost
331PPCTTIImpl::getInstructionCost(const User *U, ArrayRef<const Value *> Operands,
332 TTI::TargetCostKind CostKind) const {
333 // We already implement getCastInstrCost and getMemoryOpCost where we perform
334 // the vector adjustment there.
335 if (isa<CastInst>(Val: U) || isa<LoadInst>(Val: U) || isa<StoreInst>(Val: U))
336 return BaseT::getInstructionCost(U, Operands, CostKind);
337
338 if (U->getType()->isVectorTy()) {
339 // Instructions that need to be split should cost more.
340 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: U->getType());
341 return LT.first * BaseT::getInstructionCost(U, Operands, CostKind);
342 }
343
344 return BaseT::getInstructionCost(U, Operands, CostKind);
345}
346
347bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
348 AssumptionCache &AC,
349 TargetLibraryInfo *LibInfo,
350 HardwareLoopInfo &HWLoopInfo) const {
351 const PPCTargetMachine &TM = ST->getTargetMachine();
352 TargetSchedModel SchedModel;
353 SchedModel.init(TSInfo: ST);
354
355 // FIXME: Sure there is no other way to get TTI? This should be cheap though.
356 TargetTransformInfo TTI =
357 TM.getTargetTransformInfo(F: *L->getHeader()->getParent());
358
359 // Do not convert small short loops to CTR loop.
360 unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
361 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
362 SmallPtrSet<const Value *, 32> EphValues;
363 CodeMetrics::collectEphemeralValues(L, AC: &AC, EphValues);
364 CodeMetrics Metrics;
365 for (BasicBlock *BB : L->blocks())
366 Metrics.analyzeBasicBlock(BB, TTI, EphValues);
367 // 6 is an approximate latency for the mtctr instruction.
368 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
369 return false;
370 }
371
372 // Check that there is no hardware loop related intrinsics in the loop.
373 for (auto *BB : L->getBlocks())
374 for (auto &I : *BB)
375 if (auto *Call = dyn_cast<IntrinsicInst>(Val: &I))
376 if (Call->getIntrinsicID() == Intrinsic::set_loop_iterations ||
377 Call->getIntrinsicID() == Intrinsic::loop_decrement)
378 return false;
379
380 SmallVector<BasicBlock*, 4> ExitingBlocks;
381 L->getExitingBlocks(ExitingBlocks);
382
383 // If there is an exit edge known to be frequently taken,
384 // we should not transform this loop.
385 for (auto &BB : ExitingBlocks) {
386 Instruction *TI = BB->getTerminator();
387 if (!TI) continue;
388
389 if (CondBrInst *BI = dyn_cast<CondBrInst>(Val: TI)) {
390 uint64_t TrueWeight = 0, FalseWeight = 0;
391 if (!extractBranchWeights(I: *BI, TrueVal&: TrueWeight, FalseVal&: FalseWeight))
392 continue;
393
394 // If the exit path is more frequent than the loop path,
395 // we return here without further analysis for this loop.
396 bool TrueIsExit = !L->contains(BB: BI->getSuccessor(i: 0));
397 if (( TrueIsExit && FalseWeight < TrueWeight) ||
398 (!TrueIsExit && FalseWeight > TrueWeight))
399 return false;
400 }
401 }
402
403 LLVMContext &C = L->getHeader()->getContext();
404 HWLoopInfo.CountType = TM.isPPC64() ?
405 Type::getInt64Ty(C) : Type::getInt32Ty(C);
406 HWLoopInfo.LoopDecrement = ConstantInt::get(Ty: HWLoopInfo.CountType, V: 1);
407 return true;
408}
409
410void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
411 TTI::UnrollingPreferences &UP,
412 OptimizationRemarkEmitter *ORE) const {
413 if (ST->getCPUDirective() == PPC::DIR_A2) {
414 // The A2 is in-order with a deep pipeline, and concatenation unrolling
415 // helps expose latency-hiding opportunities to the instruction scheduler.
416 UP.Partial = UP.Runtime = true;
417
418 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
419 // often outweigh the cost of a division to compute the trip count.
420 UP.AllowExpensiveTripCount = true;
421 }
422
423 BaseT::getUnrollingPreferences(L, SE, UP, ORE);
424}
425
426void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
427 TTI::PeelingPreferences &PP) const {
428 BaseT::getPeelingPreferences(L, SE, PP);
429}
430// This function returns true to allow using coldcc calling convention.
431// Returning true results in coldcc being used for functions which are cold at
432// all call sites when the callers of the functions are not calling any other
433// non coldcc functions.
434bool PPCTTIImpl::useColdCCForColdCall(Function &F) const {
435 return EnablePPCColdCC;
436}
437
438bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) const {
439 // On the A2, always unroll aggressively.
440 if (ST->getCPUDirective() == PPC::DIR_A2)
441 return true;
442
443 return LoopHasReductions;
444}
445
446PPCTTIImpl::TTI::MemCmpExpansionOptions
447PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
448 TTI::MemCmpExpansionOptions Options;
449 if (getST()->hasAltivec())
450 Options.LoadSizes = {16, 8, 4, 2, 1};
451 else
452 Options.LoadSizes = {8, 4, 2, 1};
453
454 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
455 return Options;
456}
457
458bool PPCTTIImpl::enableInterleavedAccessVectorization() const { return true; }
459
460unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
461 assert(ClassID == GPRRC || ClassID == FPRRC ||
462 ClassID == VRRC || ClassID == VSXRC);
463 if (ST->hasVSX()) {
464 assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
465 return ClassID == VSXRC ? 64 : 32;
466 }
467 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
468 return 32;
469}
470
471unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
472 if (Vector)
473 return ST->hasVSX() ? VSXRC : VRRC;
474 if (Ty &&
475 (Ty->getScalarType()->isFloatTy() || Ty->getScalarType()->isDoubleTy()))
476 return ST->hasVSX() ? VSXRC : FPRRC;
477 if (Ty && (Ty->getScalarType()->isFP128Ty() ||
478 Ty->getScalarType()->isPPC_FP128Ty()))
479 return VRRC;
480 if (Ty && Ty->getScalarType()->isHalfTy())
481 return VSXRC;
482 return GPRRC;
483}
484
485const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
486
487 switch (ClassID) {
488 default:
489 llvm_unreachable("unknown register class");
490 return "PPC::unknown register class";
491 case GPRRC: return "PPC::GPRRC";
492 case FPRRC: return "PPC::FPRRC";
493 case VRRC: return "PPC::VRRC";
494 case VSXRC: return "PPC::VSXRC";
495 }
496}
497
498TypeSize
499PPCTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
500 switch (K) {
501 case TargetTransformInfo::RGK_Scalar:
502 return TypeSize::getFixed(ExactSize: ST->isPPC64() ? 64 : 32);
503 case TargetTransformInfo::RGK_FixedWidthVector:
504 return TypeSize::getFixed(ExactSize: ST->hasAltivec() ? 128 : 0);
505 case TargetTransformInfo::RGK_ScalableVector:
506 return TypeSize::getScalable(MinimumSize: 0);
507 }
508
509 llvm_unreachable("Unsupported register kind");
510}
511
512unsigned PPCTTIImpl::getCacheLineSize() const {
513 // Starting with P7 we have a cache line size of 128.
514 unsigned Directive = ST->getCPUDirective();
515 // Assume that Future CPU has the same cache line size as the others.
516 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
517 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
518 Directive == PPC::DIR_PWR11 || Directive == PPC::DIR_PWR_FUTURE)
519 return 128;
520
521 // On other processors return a default of 64 bytes.
522 return 64;
523}
524
525unsigned PPCTTIImpl::getPrefetchDistance() const {
526 return 300;
527}
528
529unsigned PPCTTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
530 unsigned Directive = ST->getCPUDirective();
531 // The 440 has no SIMD support, but floating-point instructions
532 // have a 5-cycle latency, so unroll by 5x for latency hiding.
533 if (Directive == PPC::DIR_440)
534 return 5;
535
536 // The A2 has no SIMD support, but floating-point instructions
537 // have a 6-cycle latency, so unroll by 6x for latency hiding.
538 if (Directive == PPC::DIR_A2)
539 return 6;
540
541 // FIXME: For lack of any better information, do no harm...
542 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
543 return 1;
544
545 // For P7 and P8, floating-point instructions have a 6-cycle latency and
546 // there are two execution units, so unroll by 12x for latency hiding.
547 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
548 // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
549 // Assume that future is the same as the others.
550 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
551 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
552 Directive == PPC::DIR_PWR11 || Directive == PPC::DIR_PWR_FUTURE)
553 return 12;
554
555 // For most things, modern systems have two execution units (and
556 // out-of-order execution).
557 return 2;
558}
559
560// Returns a cost adjustment factor to adjust the cost of vector instructions
561// on targets which there is overlap between the vector and scalar units,
562// thereby reducing the overall throughput of vector code wrt. scalar code.
563// An invalid instruction cost is returned if the type is an MMA vector type.
564InstructionCost PPCTTIImpl::vectorCostAdjustmentFactor(unsigned Opcode,
565 Type *Ty1,
566 Type *Ty2) const {
567 // If the vector type is of an MMA type (v256i1, v512i1), an invalid
568 // instruction cost is returned. This is to signify to other cost computing
569 // functions to return the maximum instruction cost in order to prevent any
570 // opportunities for the optimizer to produce MMA types within the IR.
571 if (isMMAType(Ty: Ty1))
572 return InstructionCost::getInvalid();
573
574 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
575 return InstructionCost(1);
576
577 std::pair<InstructionCost, MVT> LT1 = getTypeLegalizationCost(Ty: Ty1);
578 // If type legalization involves splitting the vector, we don't want to
579 // double the cost at every step - only the last step.
580 if (LT1.first != 1 || !LT1.second.isVector())
581 return InstructionCost(1);
582
583 int ISD = TLI->InstructionOpcodeToISD(Opcode);
584 if (TLI->isOperationExpand(Op: ISD, VT: LT1.second))
585 return InstructionCost(1);
586
587 if (Ty2) {
588 std::pair<InstructionCost, MVT> LT2 = getTypeLegalizationCost(Ty: Ty2);
589 if (LT2.first != 1 || !LT2.second.isVector())
590 return InstructionCost(1);
591 }
592
593 return InstructionCost(2);
594}
595
596InstructionCost PPCTTIImpl::getArithmeticInstrCost(
597 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
598 TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
599 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
600 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
601
602 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Ty, Ty2: nullptr);
603 if (!CostFactor.isValid())
604 return InstructionCost::getMax();
605
606 // TODO: Handle more cost kinds.
607 if (CostKind != TTI::TCK_RecipThroughput)
608 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info,
609 Opd2Info: Op2Info, Args, CxtI);
610
611 // Fallback to the default implementation.
612 InstructionCost Cost = BaseT::getArithmeticInstrCost(
613 Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info);
614 return Cost * CostFactor;
615}
616
617InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
618 VectorType *DstTy, VectorType *SrcTy,
619 ArrayRef<int> Mask,
620 TTI::TargetCostKind CostKind,
621 int Index, VectorType *SubTp,
622 ArrayRef<const Value *> Args,
623 const Instruction *CxtI) const {
624
625 InstructionCost CostFactor =
626 vectorCostAdjustmentFactor(Opcode: Instruction::ShuffleVector, Ty1: SrcTy, Ty2: nullptr);
627 if (!CostFactor.isValid())
628 return InstructionCost::getMax();
629
630 // Legalize the type.
631 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: SrcTy);
632
633 // PPC, for both Altivec/VSX, support cheap arbitrary permutations
634 // (at least in the sense that there need only be one non-loop-invariant
635 // instruction). We need one such shuffle instruction for each actual
636 // register (this is not true for arbitrary shuffles, but is true for the
637 // structured types of shuffles covered by TTI::ShuffleKind).
638 return LT.first * CostFactor;
639}
640
641InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode,
642 TTI::TargetCostKind CostKind,
643 const Instruction *I) const {
644 if (CostKind != TTI::TCK_RecipThroughput)
645 return Opcode == Instruction::PHI ? 0 : 1;
646 // Branches are assumed to be predicted.
647 return 0;
648}
649
650InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
651 Type *Src,
652 TTI::CastContextHint CCH,
653 TTI::TargetCostKind CostKind,
654 const Instruction *I) const {
655 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
656
657 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Dst, Ty2: Src);
658 if (!CostFactor.isValid())
659 return InstructionCost::getMax();
660
661 InstructionCost Cost =
662 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
663 Cost *= CostFactor;
664 // TODO: Allow non-throughput costs that aren't binary.
665 if (CostKind != TTI::TCK_RecipThroughput)
666 return Cost == 0 ? 0 : 1;
667 return Cost;
668}
669
670InstructionCost PPCTTIImpl::getCmpSelInstrCost(
671 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
672 TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info,
673 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
674 InstructionCost CostFactor =
675 vectorCostAdjustmentFactor(Opcode, Ty1: ValTy, Ty2: nullptr);
676 if (!CostFactor.isValid())
677 return InstructionCost::getMax();
678
679 InstructionCost Cost = BaseT::getCmpSelInstrCost(
680 Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I);
681 // TODO: Handle other cost kinds.
682 if (CostKind != TTI::TCK_RecipThroughput)
683 return Cost;
684 return Cost * CostFactor;
685}
686
687InstructionCost PPCTTIImpl::getVectorInstrCost(
688 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
689 const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC) const {
690 assert(Val->isVectorTy() && "This must be a vector type");
691
692 int ISD = TLI->InstructionOpcodeToISD(Opcode);
693 assert(ISD && "Invalid opcode");
694
695 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Val, Ty2: nullptr);
696 if (!CostFactor.isValid())
697 return InstructionCost::getMax();
698
699 InstructionCost Cost =
700 BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1, VIC);
701 Cost *= CostFactor;
702
703 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
704 // Double-precision scalars are already located in index #0 (or #1 if LE).
705 if (ISD == ISD::EXTRACT_VECTOR_ELT &&
706 Index == (ST->isLittleEndian() ? 1 : 0))
707 return 0;
708
709 return Cost;
710 }
711 if (Val->getScalarType()->isIntegerTy()) {
712 unsigned EltSize = Val->getScalarSizeInBits();
713 // Computing on 1 bit values requires extra mask or compare operations.
714 unsigned MaskCostForOneBitSize = (VecMaskCost && EltSize == 1) ? 1 : 0;
715 // Computing on non const index requires extra mask or compare operations.
716 unsigned MaskCostForIdx = (Index != -1U) ? 0 : 1;
717 if (ST->hasP9Altivec()) {
718 // P10 has vxform insert which can handle non const index. The
719 // MaskCostForIdx is for masking the index.
720 // P9 has insert for const index. A move-to VSR and a permute/insert.
721 // Assume vector operation cost for both (cost will be 2x on P9).
722 if (ISD == ISD::INSERT_VECTOR_ELT) {
723 if (ST->hasP10Vector())
724 return CostFactor + MaskCostForIdx;
725 if (Index != -1U)
726 return 2 * CostFactor;
727 } else if (ISD == ISD::EXTRACT_VECTOR_ELT) {
728 // It's an extract. Maybe we can do a cheap move-from VSR.
729 unsigned EltSize = Val->getScalarSizeInBits();
730 // P9 has both mfvsrd and mfvsrld for 64 bit integer.
731 if (EltSize == 64 && Index != -1U)
732 return 1;
733 if (EltSize == 32) {
734 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
735 if (Index == MfvsrwzIndex)
736 return 1;
737
738 // For other indexs like non const, P9 has vxform extract. The
739 // MaskCostForIdx is for masking the index.
740 return CostFactor + MaskCostForIdx;
741 }
742
743 // We need a vector extract (or mfvsrld). Assume vector operation cost.
744 // The cost of the load constant for a vector extract is disregarded
745 // (invariant, easily schedulable).
746 return CostFactor + MaskCostForOneBitSize + MaskCostForIdx;
747 }
748 } else if (ST->hasDirectMove() && Index != -1U) {
749 // Assume permute has standard cost.
750 // Assume move-to/move-from VSR have 2x standard cost.
751 if (ISD == ISD::INSERT_VECTOR_ELT)
752 return 3;
753 return 3 + MaskCostForOneBitSize;
754 }
755 }
756
757 // Estimated cost of a load-hit-store delay. This was obtained
758 // experimentally as a minimum needed to prevent unprofitable
759 // vectorization for the paq8p benchmark. It may need to be
760 // raised further if other unprofitable cases remain.
761 unsigned LHSPenalty = 2;
762 if (ISD == ISD::INSERT_VECTOR_ELT)
763 LHSPenalty += 7;
764
765 // Vector element insert/extract with Altivec is very expensive,
766 // because they require store and reload with the attendant
767 // processor stall for load-hit-store. Until VSX is available,
768 // these need to be estimated as very costly.
769 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
770 ISD == ISD::INSERT_VECTOR_ELT)
771 return LHSPenalty + Cost;
772
773 return Cost;
774}
775
776InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
777 Align Alignment,
778 unsigned AddressSpace,
779 TTI::TargetCostKind CostKind,
780 TTI::OperandValueInfo OpInfo,
781 const Instruction *I) const {
782
783 InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Src, Ty2: nullptr);
784 if (!CostFactor.isValid())
785 return InstructionCost::getMax();
786
787 if (TLI->getValueType(DL, Ty: Src, AllowUnknown: true) == MVT::Other)
788 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
789 CostKind);
790 // Legalize the type.
791 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Src);
792 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
793 "Invalid Opcode");
794
795 InstructionCost Cost =
796 BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
797 // TODO: Handle other cost kinds.
798 if (CostKind != TTI::TCK_RecipThroughput)
799 return Cost;
800
801 Cost *= CostFactor;
802
803 bool IsAltivecType = ST->hasAltivec() &&
804 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
805 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
806 bool IsVSXType = ST->hasVSX() &&
807 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
808
809 // VSX has 32b/64b load instructions. Legalization can handle loading of
810 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
811 // PPCTargetLowering can't compute the cost appropriately. So here we
812 // explicitly check this case. There are also corresponding store
813 // instructions.
814 unsigned MemBits = Src->getPrimitiveSizeInBits();
815 unsigned SrcBytes = LT.second.getStoreSize();
816 if (ST->hasVSX() && IsAltivecType) {
817 if (MemBits == 64 || (ST->hasP8Vector() && MemBits == 32))
818 return 1;
819
820 // Use lfiwax/xxspltw
821 if (Opcode == Instruction::Load && MemBits == 32 && Alignment < SrcBytes)
822 return 2;
823 }
824
825 // Aligned loads and stores are easy.
826 if (!SrcBytes || Alignment >= SrcBytes)
827 return Cost;
828
829 // If we can use the permutation-based load sequence, then this is also
830 // relatively cheap (not counting loop-invariant instructions): one load plus
831 // one permute (the last load in a series has extra cost, but we're
832 // neglecting that here). Note that on the P7, we could do unaligned loads
833 // for Altivec types using the VSX instructions, but that's more expensive
834 // than using the permutation-based load sequence. On the P8, that's no
835 // longer true.
836 if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
837 Alignment >= LT.second.getScalarType().getStoreSize())
838 return Cost + LT.first; // Add the cost of the permutations.
839
840 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
841 // P7, unaligned vector loads are more expensive than the permutation-based
842 // load sequence, so that might be used instead, but regardless, the net cost
843 // is about the same (not counting loop-invariant instructions).
844 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
845 return Cost;
846
847 // Newer PPC supports unaligned memory access.
848 if (TLI->allowsMisalignedMemoryAccesses(VT: LT.second, AddrSpace: 0))
849 return Cost;
850
851 // PPC in general does not support unaligned loads and stores. They'll need
852 // to be decomposed based on the alignment factor.
853
854 // Add the cost of each scalar load or store.
855 Cost += LT.first * ((SrcBytes / Alignment.value()) - 1);
856
857 // For a vector type, there is also scalarization overhead (only for
858 // stores, loads are expanded using the vector-load + permutation sequence,
859 // which is much less expensive).
860 if (Src->isVectorTy() && Opcode == Instruction::Store)
861 for (int I = 0, E = cast<FixedVectorType>(Val: Src)->getNumElements(); I < E;
862 ++I)
863 Cost +=
864 getVectorInstrCost(Opcode: Instruction::ExtractElement, Val: Src, CostKind, Index: I,
865 Op0: nullptr, Op1: nullptr, VIC: TTI::VectorInstrContext::None);
866
867 return Cost;
868}
869
870InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost(
871 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
872 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
873 bool UseMaskForCond, bool UseMaskForGaps) const {
874 InstructionCost CostFactor =
875 vectorCostAdjustmentFactor(Opcode, Ty1: VecTy, Ty2: nullptr);
876 if (!CostFactor.isValid())
877 return InstructionCost::getMax();
878
879 if (UseMaskForCond || UseMaskForGaps)
880 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
881 Alignment, AddressSpace, CostKind,
882 UseMaskForCond, UseMaskForGaps);
883
884 assert(isa<VectorType>(VecTy) &&
885 "Expect a vector type for interleaved memory op");
886
887 // Legalize the type.
888 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: VecTy);
889
890 // Firstly, the cost of load/store operation.
891 InstructionCost Cost =
892 getMemoryOpCost(Opcode, Src: VecTy, Alignment, AddressSpace, CostKind);
893
894 // PPC, for both Altivec/VSX, support cheap arbitrary permutations
895 // (at least in the sense that there need only be one non-loop-invariant
896 // instruction). For each result vector, we need one shuffle per incoming
897 // vector (except that the first shuffle can take two incoming vectors
898 // because it does not need to take itself).
899 Cost += Factor*(LT.first-1);
900
901 return Cost;
902}
903
904InstructionCost
905PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
906 TTI::TargetCostKind CostKind) const {
907
908 if (!VPIntrinsic::isVPIntrinsic(ICA.getID()))
909 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
910
911 if (ICA.getID() == Intrinsic::vp_load) {
912 MemIntrinsicCostAttributes MICA(Intrinsic::masked_load, ICA.getReturnType(),
913 Align(1), 0);
914 return getMemIntrinsicInstrCost(MICA, CostKind);
915 }
916
917 if (ICA.getID() == Intrinsic::vp_store) {
918 MemIntrinsicCostAttributes MICA(Intrinsic::masked_store,
919 ICA.getArgTypes()[0], Align(1), 0);
920 return getMemIntrinsicInstrCost(MICA, CostKind);
921 }
922
923 return InstructionCost::getInvalid();
924}
925
926bool PPCTTIImpl::areInlineCompatible(const Function *Caller,
927 const Function *Callee) const {
928 const TargetMachine &TM = getTLI()->getTargetMachine();
929
930 const FeatureBitset &CallerBits =
931 TM.getSubtargetImpl(*Caller)->getFeatureBits();
932 const FeatureBitset &CalleeBits =
933 TM.getSubtargetImpl(*Callee)->getFeatureBits();
934
935 // Check that targets features are exactly the same. We can revisit to see if
936 // we can improve this.
937 return CallerBits == CalleeBits;
938}
939
940bool PPCTTIImpl::areTypesABICompatible(const Function *Caller,
941 const Function *Callee,
942 ArrayRef<Type *> Types) const {
943
944 // We need to ensure that argument promotion does not
945 // attempt to promote pointers to MMA types (__vector_pair
946 // and __vector_quad) since these types explicitly cannot be
947 // passed as arguments. Both of these types are larger than
948 // the 128-bit Altivec vectors and have a scalar size of 1 bit.
949 if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
950 return false;
951
952 return llvm::none_of(Range&: Types, P: [](Type *Ty) {
953 if (Ty->isSized())
954 return Ty->isIntOrIntVectorTy(BitWidth: 1) && Ty->getPrimitiveSizeInBits() > 128;
955 return false;
956 });
957}
958
959bool PPCTTIImpl::canSaveCmp(Loop *L, CondBrInst **BI, ScalarEvolution *SE,
960 LoopInfo *LI, DominatorTree *DT,
961 AssumptionCache *AC,
962 TargetLibraryInfo *LibInfo) const {
963 // Process nested loops first.
964 for (Loop *I : *L)
965 if (canSaveCmp(L: I, BI, SE, LI, DT, AC, LibInfo))
966 return false; // Stop search.
967
968 HardwareLoopInfo HWLoopInfo(L);
969
970 if (!HWLoopInfo.canAnalyze(LI&: *LI))
971 return false;
972
973 if (!isHardwareLoopProfitable(L, SE&: *SE, AC&: *AC, LibInfo, HWLoopInfo))
974 return false;
975
976 if (!HWLoopInfo.isHardwareLoopCandidate(SE&: *SE, LI&: *LI, DT&: *DT))
977 return false;
978
979 *BI = HWLoopInfo.ExitBranch;
980 return true;
981}
982
983bool PPCTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
984 const TargetTransformInfo::LSRCost &C2) const {
985 // PowerPC default behaviour here is "instruction number 1st priority".
986 // If LsrNoInsnsCost is set, call default implementation.
987 if (!LsrNoInsnsCost)
988 return std::tie(args: C1.Insns, args: C1.NumRegs, args: C1.AddRecCost, args: C1.NumIVMuls,
989 args: C1.NumBaseAdds, args: C1.ScaleCost, args: C1.ImmCost, args: C1.SetupCost) <
990 std::tie(args: C2.Insns, args: C2.NumRegs, args: C2.AddRecCost, args: C2.NumIVMuls,
991 args: C2.NumBaseAdds, args: C2.ScaleCost, args: C2.ImmCost, args: C2.SetupCost);
992 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
993}
994
995bool PPCTTIImpl::isNumRegsMajorCostOfLSR() const { return false; }
996
997bool PPCTTIImpl::shouldBuildRelLookupTables() const {
998 const PPCTargetMachine &TM = ST->getTargetMachine();
999 // XCOFF hasn't implemented lowerRelativeReference, disable non-ELF for now.
1000 if (!TM.isELFv2ABI())
1001 return false;
1002 return BaseT::shouldBuildRelLookupTables();
1003}
1004
1005bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1006 MemIntrinsicInfo &Info) const {
1007 switch (Inst->getIntrinsicID()) {
1008 case Intrinsic::ppc_altivec_lvx:
1009 case Intrinsic::ppc_altivec_lvxl:
1010 case Intrinsic::ppc_altivec_lvebx:
1011 case Intrinsic::ppc_altivec_lvehx:
1012 case Intrinsic::ppc_altivec_lvewx:
1013 case Intrinsic::ppc_vsx_lxvd2x:
1014 case Intrinsic::ppc_vsx_lxvw4x:
1015 case Intrinsic::ppc_vsx_lxvd2x_be:
1016 case Intrinsic::ppc_vsx_lxvw4x_be:
1017 case Intrinsic::ppc_vsx_lxvl:
1018 case Intrinsic::ppc_vsx_lxvll:
1019 case Intrinsic::ppc_vsx_lxvp: {
1020 Info.PtrVal = Inst->getArgOperand(i: 0);
1021 Info.ReadMem = true;
1022 Info.WriteMem = false;
1023 return true;
1024 }
1025 case Intrinsic::ppc_altivec_stvx:
1026 case Intrinsic::ppc_altivec_stvxl:
1027 case Intrinsic::ppc_altivec_stvebx:
1028 case Intrinsic::ppc_altivec_stvehx:
1029 case Intrinsic::ppc_altivec_stvewx:
1030 case Intrinsic::ppc_vsx_stxvd2x:
1031 case Intrinsic::ppc_vsx_stxvw4x:
1032 case Intrinsic::ppc_vsx_stxvd2x_be:
1033 case Intrinsic::ppc_vsx_stxvw4x_be:
1034 case Intrinsic::ppc_vsx_stxvl:
1035 case Intrinsic::ppc_vsx_stxvll:
1036 case Intrinsic::ppc_vsx_stxvp: {
1037 Info.PtrVal = Inst->getArgOperand(i: 1);
1038 Info.ReadMem = false;
1039 Info.WriteMem = true;
1040 return true;
1041 }
1042 case Intrinsic::ppc_stbcx:
1043 case Intrinsic::ppc_sthcx:
1044 case Intrinsic::ppc_stdcx:
1045 case Intrinsic::ppc_stwcx: {
1046 Info.PtrVal = Inst->getArgOperand(i: 0);
1047 Info.ReadMem = false;
1048 Info.WriteMem = true;
1049 return true;
1050 }
1051 default:
1052 break;
1053 }
1054
1055 return false;
1056}
1057
1058bool PPCTTIImpl::supportsTailCallFor(const CallBase *CB) const {
1059 return TLI->supportsTailCallFor(CB);
1060}
1061
1062// Target hook used by CodeGen to decide whether to expand vector predication
1063// intrinsics into scalar operations or to use special ISD nodes to represent
1064// them. The Target will not see the intrinsics.
1065TargetTransformInfo::VPLegalization
1066PPCTTIImpl::getVPLegalizationStrategy(const VPIntrinsic &PI) const {
1067 using VPLegalization = TargetTransformInfo::VPLegalization;
1068 unsigned Directive = ST->getCPUDirective();
1069 VPLegalization DefaultLegalization = BaseT::getVPLegalizationStrategy(PI);
1070 if (Directive != PPC::DIR_PWR10 && Directive != PPC::DIR_PWR_FUTURE &&
1071 (!Pwr9EVL || Directive != PPC::DIR_PWR9))
1072 return DefaultLegalization;
1073
1074 if (!ST->isPPC64())
1075 return DefaultLegalization;
1076
1077 unsigned IID = PI.getIntrinsicID();
1078 if (IID != Intrinsic::vp_load && IID != Intrinsic::vp_store)
1079 return DefaultLegalization;
1080
1081 bool IsLoad = IID == Intrinsic::vp_load;
1082 Type *VecTy = IsLoad ? PI.getType() : PI.getOperand(i_nocapture: 0)->getType();
1083 EVT VT = TLI->getValueType(DL, Ty: VecTy, AllowUnknown: true);
1084 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
1085 VT != MVT::v16i8)
1086 return DefaultLegalization;
1087
1088 auto IsAllTrueMask = [](Value *MaskVal) {
1089 if (Value *SplattedVal = getSplatValue(V: MaskVal))
1090 if (auto *ConstValue = dyn_cast<Constant>(Val: SplattedVal))
1091 return ConstValue->isAllOnesValue();
1092 return false;
1093 };
1094 unsigned MaskIx = IsLoad ? 1 : 2;
1095 if (!IsAllTrueMask(PI.getOperand(i_nocapture: MaskIx)))
1096 return DefaultLegalization;
1097
1098 return VPLegalization(VPLegalization::Legal, VPLegalization::Legal);
1099}
1100
1101bool PPCTTIImpl::hasActiveVectorLength() const {
1102 if (!PPCEVL || !ST->isPPC64())
1103 return false;
1104 unsigned CPU = ST->getCPUDirective();
1105 return CPU == PPC::DIR_PWR10 || CPU == PPC::DIR_PWR_FUTURE ||
1106 (Pwr9EVL && CPU == PPC::DIR_PWR9);
1107}
1108
1109bool PPCTTIImpl::isLegalMaskedLoad(Type *DataType, Align Alignment,
1110 unsigned AddressSpace,
1111 TTI::MaskKind MaskKind) const {
1112 if (!hasActiveVectorLength())
1113 return false;
1114
1115 auto IsLegalLoadWithLengthType = [](EVT VT) {
1116 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8)
1117 return false;
1118 return true;
1119 };
1120
1121 return IsLegalLoadWithLengthType(TLI->getValueType(DL, Ty: DataType, AllowUnknown: true));
1122}
1123
1124bool PPCTTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment,
1125 unsigned AddressSpace,
1126 TTI::MaskKind MaskKind) const {
1127 return isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1128}
1129
1130InstructionCost
1131PPCTTIImpl::getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA,
1132 TTI::TargetCostKind CostKind) const {
1133
1134 InstructionCost BaseCost = BaseT::getMemIntrinsicInstrCost(MICA, CostKind);
1135
1136 unsigned Opcode;
1137 switch (MICA.getID()) {
1138 case Intrinsic::masked_load:
1139 Opcode = Instruction::Load;
1140 break;
1141 case Intrinsic::masked_store:
1142 Opcode = Instruction::Store;
1143 break;
1144 default:
1145 return BaseCost;
1146 }
1147
1148 Type *DataTy = MICA.getDataType();
1149 Align Alignment = MICA.getAlignment();
1150 unsigned AddressSpace = MICA.getAddressSpace();
1151
1152 auto VecTy = dyn_cast<FixedVectorType>(Val: DataTy);
1153 if (!VecTy)
1154 return BaseCost;
1155 if (Opcode == Instruction::Load) {
1156 if (!isLegalMaskedLoad(DataType: VecTy->getScalarType(), Alignment, AddressSpace))
1157 return BaseCost;
1158 } else {
1159 if (!isLegalMaskedStore(DataType: VecTy->getScalarType(), Alignment, AddressSpace))
1160 return BaseCost;
1161 }
1162 if (VecTy->getPrimitiveSizeInBits() > 128)
1163 return BaseCost;
1164
1165 // Cost is 1 (scalar compare) + 1 (scalar select) +
1166 // 1 * vectorCostAdjustmentFactor (vector load with length)
1167 // Maybe + 1 (scalar shift)
1168 InstructionCost Cost =
1169 1 + 1 + vectorCostAdjustmentFactor(Opcode, Ty1: DataTy, Ty2: nullptr);
1170 if (ST->getCPUDirective() != PPC::DIR_PWR_FUTURE ||
1171 VecTy->getScalarSizeInBits() != 8)
1172 Cost += 1; // need shift for length
1173 return Cost;
1174}
1175