1 | //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "PPCTargetTransformInfo.h" |
10 | #include "llvm/Analysis/CodeMetrics.h" |
11 | #include "llvm/Analysis/TargetLibraryInfo.h" |
12 | #include "llvm/Analysis/TargetTransformInfo.h" |
13 | #include "llvm/CodeGen/BasicTTIImpl.h" |
14 | #include "llvm/CodeGen/CostTable.h" |
15 | #include "llvm/CodeGen/TargetLowering.h" |
16 | #include "llvm/CodeGen/TargetSchedule.h" |
17 | #include "llvm/IR/IntrinsicsPowerPC.h" |
18 | #include "llvm/IR/ProfDataUtils.h" |
19 | #include "llvm/Support/CommandLine.h" |
20 | #include "llvm/Support/Debug.h" |
21 | #include "llvm/Transforms/InstCombine/InstCombiner.h" |
22 | #include "llvm/Transforms/Utils/Local.h" |
23 | #include <optional> |
24 | |
25 | using namespace llvm; |
26 | |
27 | #define DEBUG_TYPE "ppctti" |
28 | |
29 | static cl::opt<bool> VecMaskCost("ppc-vec-mask-cost" , |
30 | cl::desc("add masking cost for i1 vectors" ), cl::init(Val: true), cl::Hidden); |
31 | |
32 | static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting" , |
33 | cl::desc("disable constant hoisting on PPC" ), cl::init(Val: false), cl::Hidden); |
34 | |
35 | static cl::opt<bool> |
36 | EnablePPCColdCC("ppc-enable-coldcc" , cl::Hidden, cl::init(Val: false), |
37 | cl::desc("Enable using coldcc calling conv for cold " |
38 | "internal functions" )); |
39 | |
40 | static cl::opt<bool> |
41 | LsrNoInsnsCost("ppc-lsr-no-insns-cost" , cl::Hidden, cl::init(Val: false), |
42 | cl::desc("Do not add instruction count to lsr cost model" )); |
43 | |
44 | // The latency of mtctr is only justified if there are more than 4 |
45 | // comparisons that will be removed as a result. |
46 | static cl::opt<unsigned> |
47 | SmallCTRLoopThreshold("min-ctr-loop-threshold" , cl::init(Val: 4), cl::Hidden, |
48 | cl::desc("Loops with a constant trip count smaller than " |
49 | "this value will not use the count register." )); |
50 | |
51 | //===----------------------------------------------------------------------===// |
52 | // |
53 | // PPC cost model. |
54 | // |
55 | //===----------------------------------------------------------------------===// |
56 | |
57 | TargetTransformInfo::PopcntSupportKind |
58 | PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { |
59 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2" ); |
60 | if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) |
61 | return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? |
62 | TTI::PSK_SlowHardware : TTI::PSK_FastHardware; |
63 | return TTI::PSK_Software; |
64 | } |
65 | |
66 | std::optional<Instruction *> |
67 | PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { |
68 | Intrinsic::ID IID = II.getIntrinsicID(); |
69 | switch (IID) { |
70 | default: |
71 | break; |
72 | case Intrinsic::ppc_altivec_lvx: |
73 | case Intrinsic::ppc_altivec_lvxl: |
74 | // Turn PPC lvx -> load if the pointer is known aligned. |
75 | if (getOrEnforceKnownAlignment( |
76 | V: II.getArgOperand(i: 0), PrefAlign: Align(16), DL: IC.getDataLayout(), CxtI: &II, |
77 | AC: &IC.getAssumptionCache(), DT: &IC.getDominatorTree()) >= 16) { |
78 | Value *Ptr = II.getArgOperand(i: 0); |
79 | return new LoadInst(II.getType(), Ptr, "" , false, Align(16)); |
80 | } |
81 | break; |
82 | case Intrinsic::ppc_vsx_lxvw4x: |
83 | case Intrinsic::ppc_vsx_lxvd2x: { |
84 | // Turn PPC VSX loads into normal loads. |
85 | Value *Ptr = II.getArgOperand(i: 0); |
86 | return new LoadInst(II.getType(), Ptr, Twine("" ), false, Align(1)); |
87 | } |
88 | case Intrinsic::ppc_altivec_stvx: |
89 | case Intrinsic::ppc_altivec_stvxl: |
90 | // Turn stvx -> store if the pointer is known aligned. |
91 | if (getOrEnforceKnownAlignment( |
92 | V: II.getArgOperand(i: 1), PrefAlign: Align(16), DL: IC.getDataLayout(), CxtI: &II, |
93 | AC: &IC.getAssumptionCache(), DT: &IC.getDominatorTree()) >= 16) { |
94 | Value *Ptr = II.getArgOperand(i: 1); |
95 | return new StoreInst(II.getArgOperand(i: 0), Ptr, false, Align(16)); |
96 | } |
97 | break; |
98 | case Intrinsic::ppc_vsx_stxvw4x: |
99 | case Intrinsic::ppc_vsx_stxvd2x: { |
100 | // Turn PPC VSX stores into normal stores. |
101 | Value *Ptr = II.getArgOperand(i: 1); |
102 | return new StoreInst(II.getArgOperand(i: 0), Ptr, false, Align(1)); |
103 | } |
104 | case Intrinsic::ppc_altivec_vperm: |
105 | // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. |
106 | // Note that ppc_altivec_vperm has a big-endian bias, so when creating |
107 | // a vectorshuffle for little endian, we must undo the transformation |
108 | // performed on vec_perm in altivec.h. That is, we must complement |
109 | // the permutation mask with respect to 31 and reverse the order of |
110 | // V1 and V2. |
111 | if (Constant *Mask = dyn_cast<Constant>(Val: II.getArgOperand(i: 2))) { |
112 | assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 && |
113 | "Bad type for intrinsic!" ); |
114 | |
115 | // Check that all of the elements are integer constants or undefs. |
116 | bool AllEltsOk = true; |
117 | for (unsigned i = 0; i != 16; ++i) { |
118 | Constant *Elt = Mask->getAggregateElement(Elt: i); |
119 | if (!Elt || !(isa<ConstantInt>(Val: Elt) || isa<UndefValue>(Val: Elt))) { |
120 | AllEltsOk = false; |
121 | break; |
122 | } |
123 | } |
124 | |
125 | if (AllEltsOk) { |
126 | // Cast the input vectors to byte vectors. |
127 | Value *Op0 = |
128 | IC.Builder.CreateBitCast(V: II.getArgOperand(i: 0), DestTy: Mask->getType()); |
129 | Value *Op1 = |
130 | IC.Builder.CreateBitCast(V: II.getArgOperand(i: 1), DestTy: Mask->getType()); |
131 | Value *Result = UndefValue::get(T: Op0->getType()); |
132 | |
133 | // Only extract each element once. |
134 | Value *[32]; |
135 | memset(s: ExtractedElts, c: 0, n: sizeof(ExtractedElts)); |
136 | |
137 | for (unsigned i = 0; i != 16; ++i) { |
138 | if (isa<UndefValue>(Val: Mask->getAggregateElement(Elt: i))) |
139 | continue; |
140 | unsigned Idx = |
141 | cast<ConstantInt>(Val: Mask->getAggregateElement(Elt: i))->getZExtValue(); |
142 | Idx &= 31; // Match the hardware behavior. |
143 | if (DL.isLittleEndian()) |
144 | Idx = 31 - Idx; |
145 | |
146 | if (!ExtractedElts[Idx]) { |
147 | Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0; |
148 | Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1; |
149 | ExtractedElts[Idx] = IC.Builder.CreateExtractElement( |
150 | Vec: Idx < 16 ? Op0ToUse : Op1ToUse, Idx: IC.Builder.getInt32(C: Idx & 15)); |
151 | } |
152 | |
153 | // Insert this value into the result vector. |
154 | Result = IC.Builder.CreateInsertElement(Vec: Result, NewElt: ExtractedElts[Idx], |
155 | Idx: IC.Builder.getInt32(C: i)); |
156 | } |
157 | return CastInst::Create(Instruction::BitCast, S: Result, Ty: II.getType()); |
158 | } |
159 | } |
160 | break; |
161 | } |
162 | return std::nullopt; |
163 | } |
164 | |
165 | InstructionCost PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, |
166 | TTI::TargetCostKind CostKind) { |
167 | if (DisablePPCConstHoist) |
168 | return BaseT::getIntImmCost(Imm, Ty, CostKind); |
169 | |
170 | assert(Ty->isIntegerTy()); |
171 | |
172 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); |
173 | if (BitSize == 0) |
174 | return ~0U; |
175 | |
176 | if (Imm == 0) |
177 | return TTI::TCC_Free; |
178 | |
179 | if (Imm.getBitWidth() <= 64) { |
180 | if (isInt<16>(x: Imm.getSExtValue())) |
181 | return TTI::TCC_Basic; |
182 | |
183 | if (isInt<32>(x: Imm.getSExtValue())) { |
184 | // A constant that can be materialized using lis. |
185 | if ((Imm.getZExtValue() & 0xFFFF) == 0) |
186 | return TTI::TCC_Basic; |
187 | |
188 | return 2 * TTI::TCC_Basic; |
189 | } |
190 | } |
191 | |
192 | return 4 * TTI::TCC_Basic; |
193 | } |
194 | |
195 | InstructionCost PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, |
196 | const APInt &Imm, Type *Ty, |
197 | TTI::TargetCostKind CostKind) { |
198 | if (DisablePPCConstHoist) |
199 | return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); |
200 | |
201 | assert(Ty->isIntegerTy()); |
202 | |
203 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); |
204 | if (BitSize == 0) |
205 | return ~0U; |
206 | |
207 | switch (IID) { |
208 | default: |
209 | return TTI::TCC_Free; |
210 | case Intrinsic::sadd_with_overflow: |
211 | case Intrinsic::uadd_with_overflow: |
212 | case Intrinsic::ssub_with_overflow: |
213 | case Intrinsic::usub_with_overflow: |
214 | if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(x: Imm.getSExtValue())) |
215 | return TTI::TCC_Free; |
216 | break; |
217 | case Intrinsic::experimental_stackmap: |
218 | if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(x: Imm.getSExtValue()))) |
219 | return TTI::TCC_Free; |
220 | break; |
221 | case Intrinsic::experimental_patchpoint_void: |
222 | case Intrinsic::experimental_patchpoint: |
223 | if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(x: Imm.getSExtValue()))) |
224 | return TTI::TCC_Free; |
225 | break; |
226 | } |
227 | return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); |
228 | } |
229 | |
230 | InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, |
231 | const APInt &Imm, Type *Ty, |
232 | TTI::TargetCostKind CostKind, |
233 | Instruction *Inst) { |
234 | if (DisablePPCConstHoist) |
235 | return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst); |
236 | |
237 | assert(Ty->isIntegerTy()); |
238 | |
239 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); |
240 | if (BitSize == 0) |
241 | return ~0U; |
242 | |
243 | unsigned ImmIdx = ~0U; |
244 | bool ShiftedFree = false, RunFree = false, UnsignedFree = false, |
245 | ZeroFree = false; |
246 | switch (Opcode) { |
247 | default: |
248 | return TTI::TCC_Free; |
249 | case Instruction::GetElementPtr: |
250 | // Always hoist the base address of a GetElementPtr. This prevents the |
251 | // creation of new constants for every base constant that gets constant |
252 | // folded with the offset. |
253 | if (Idx == 0) |
254 | return 2 * TTI::TCC_Basic; |
255 | return TTI::TCC_Free; |
256 | case Instruction::And: |
257 | RunFree = true; // (for the rotate-and-mask instructions) |
258 | [[fallthrough]]; |
259 | case Instruction::Add: |
260 | case Instruction::Or: |
261 | case Instruction::Xor: |
262 | ShiftedFree = true; |
263 | [[fallthrough]]; |
264 | case Instruction::Sub: |
265 | case Instruction::Mul: |
266 | case Instruction::Shl: |
267 | case Instruction::LShr: |
268 | case Instruction::AShr: |
269 | ImmIdx = 1; |
270 | break; |
271 | case Instruction::ICmp: |
272 | UnsignedFree = true; |
273 | ImmIdx = 1; |
274 | // Zero comparisons can use record-form instructions. |
275 | [[fallthrough]]; |
276 | case Instruction::Select: |
277 | ZeroFree = true; |
278 | break; |
279 | case Instruction::PHI: |
280 | case Instruction::Call: |
281 | case Instruction::Ret: |
282 | case Instruction::Load: |
283 | case Instruction::Store: |
284 | break; |
285 | } |
286 | |
287 | if (ZeroFree && Imm == 0) |
288 | return TTI::TCC_Free; |
289 | |
290 | if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { |
291 | if (isInt<16>(x: Imm.getSExtValue())) |
292 | return TTI::TCC_Free; |
293 | |
294 | if (RunFree) { |
295 | if (Imm.getBitWidth() <= 32 && |
296 | (isShiftedMask_32(Value: Imm.getZExtValue()) || |
297 | isShiftedMask_32(Value: ~Imm.getZExtValue()))) |
298 | return TTI::TCC_Free; |
299 | |
300 | if (ST->isPPC64() && |
301 | (isShiftedMask_64(Value: Imm.getZExtValue()) || |
302 | isShiftedMask_64(Value: ~Imm.getZExtValue()))) |
303 | return TTI::TCC_Free; |
304 | } |
305 | |
306 | if (UnsignedFree && isUInt<16>(x: Imm.getZExtValue())) |
307 | return TTI::TCC_Free; |
308 | |
309 | if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) |
310 | return TTI::TCC_Free; |
311 | } |
312 | |
313 | return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); |
314 | } |
315 | |
316 | // Check if the current Type is an MMA vector type. Valid MMA types are |
317 | // v256i1 and v512i1 respectively. |
318 | static bool isMMAType(Type *Ty) { |
319 | return Ty->isVectorTy() && (Ty->getScalarSizeInBits() == 1) && |
320 | (Ty->getPrimitiveSizeInBits() > 128); |
321 | } |
322 | |
323 | InstructionCost PPCTTIImpl::getInstructionCost(const User *U, |
324 | ArrayRef<const Value *> Operands, |
325 | TTI::TargetCostKind CostKind) { |
326 | // We already implement getCastInstrCost and getMemoryOpCost where we perform |
327 | // the vector adjustment there. |
328 | if (isa<CastInst>(Val: U) || isa<LoadInst>(Val: U) || isa<StoreInst>(Val: U)) |
329 | return BaseT::getInstructionCost(U, Operands, CostKind); |
330 | |
331 | if (U->getType()->isVectorTy()) { |
332 | // Instructions that need to be split should cost more. |
333 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: U->getType()); |
334 | return LT.first * BaseT::getInstructionCost(U, Operands, CostKind); |
335 | } |
336 | |
337 | return BaseT::getInstructionCost(U, Operands, CostKind); |
338 | } |
339 | |
340 | bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, |
341 | AssumptionCache &AC, |
342 | TargetLibraryInfo *LibInfo, |
343 | HardwareLoopInfo &HWLoopInfo) { |
344 | const PPCTargetMachine &TM = ST->getTargetMachine(); |
345 | TargetSchedModel SchedModel; |
346 | SchedModel.init(TSInfo: ST); |
347 | |
348 | // Do not convert small short loops to CTR loop. |
349 | unsigned ConstTripCount = SE.getSmallConstantTripCount(L); |
350 | if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { |
351 | SmallPtrSet<const Value *, 32> EphValues; |
352 | CodeMetrics::collectEphemeralValues(L, AC: &AC, EphValues); |
353 | CodeMetrics Metrics; |
354 | for (BasicBlock *BB : L->blocks()) |
355 | Metrics.analyzeBasicBlock(BB, TTI: *this, EphValues); |
356 | // 6 is an approximate latency for the mtctr instruction. |
357 | if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) |
358 | return false; |
359 | } |
360 | |
361 | // Check that there is no hardware loop related intrinsics in the loop. |
362 | for (auto *BB : L->getBlocks()) |
363 | for (auto &I : *BB) |
364 | if (auto *Call = dyn_cast<IntrinsicInst>(Val: &I)) |
365 | if (Call->getIntrinsicID() == Intrinsic::set_loop_iterations || |
366 | Call->getIntrinsicID() == Intrinsic::loop_decrement) |
367 | return false; |
368 | |
369 | SmallVector<BasicBlock*, 4> ExitingBlocks; |
370 | L->getExitingBlocks(ExitingBlocks); |
371 | |
372 | // If there is an exit edge known to be frequently taken, |
373 | // we should not transform this loop. |
374 | for (auto &BB : ExitingBlocks) { |
375 | Instruction *TI = BB->getTerminator(); |
376 | if (!TI) continue; |
377 | |
378 | if (BranchInst *BI = dyn_cast<BranchInst>(Val: TI)) { |
379 | uint64_t TrueWeight = 0, FalseWeight = 0; |
380 | if (!BI->isConditional() || |
381 | !extractBranchWeights(I: *BI, TrueVal&: TrueWeight, FalseVal&: FalseWeight)) |
382 | continue; |
383 | |
384 | // If the exit path is more frequent than the loop path, |
385 | // we return here without further analysis for this loop. |
386 | bool TrueIsExit = !L->contains(BB: BI->getSuccessor(i: 0)); |
387 | if (( TrueIsExit && FalseWeight < TrueWeight) || |
388 | (!TrueIsExit && FalseWeight > TrueWeight)) |
389 | return false; |
390 | } |
391 | } |
392 | |
393 | LLVMContext &C = L->getHeader()->getContext(); |
394 | HWLoopInfo.CountType = TM.isPPC64() ? |
395 | Type::getInt64Ty(C) : Type::getInt32Ty(C); |
396 | HWLoopInfo.LoopDecrement = ConstantInt::get(Ty: HWLoopInfo.CountType, V: 1); |
397 | return true; |
398 | } |
399 | |
400 | void PPCTTIImpl::(Loop *L, ScalarEvolution &SE, |
401 | TTI::UnrollingPreferences &UP, |
402 | OptimizationRemarkEmitter *ORE) { |
403 | if (ST->getCPUDirective() == PPC::DIR_A2) { |
404 | // The A2 is in-order with a deep pipeline, and concatenation unrolling |
405 | // helps expose latency-hiding opportunities to the instruction scheduler. |
406 | UP.Partial = UP.Runtime = true; |
407 | |
408 | // We unroll a lot on the A2 (hundreds of instructions), and the benefits |
409 | // often outweigh the cost of a division to compute the trip count. |
410 | UP.AllowExpensiveTripCount = true; |
411 | } |
412 | |
413 | BaseT::getUnrollingPreferences(L, SE, UP, ORE); |
414 | } |
415 | |
416 | void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, |
417 | TTI::PeelingPreferences &PP) { |
418 | BaseT::getPeelingPreferences(L, SE, PP); |
419 | } |
420 | // This function returns true to allow using coldcc calling convention. |
421 | // Returning true results in coldcc being used for functions which are cold at |
422 | // all call sites when the callers of the functions are not calling any other |
423 | // non coldcc functions. |
424 | bool PPCTTIImpl::useColdCCForColdCall(Function &F) { |
425 | return EnablePPCColdCC; |
426 | } |
427 | |
428 | bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { |
429 | // On the A2, always unroll aggressively. |
430 | if (ST->getCPUDirective() == PPC::DIR_A2) |
431 | return true; |
432 | |
433 | return LoopHasReductions; |
434 | } |
435 | |
436 | PPCTTIImpl::TTI::MemCmpExpansionOptions |
437 | PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { |
438 | TTI::MemCmpExpansionOptions Options; |
439 | Options.LoadSizes = {8, 4, 2, 1}; |
440 | Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); |
441 | return Options; |
442 | } |
443 | |
444 | bool PPCTTIImpl::enableInterleavedAccessVectorization() { |
445 | return true; |
446 | } |
447 | |
448 | unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const { |
449 | assert(ClassID == GPRRC || ClassID == FPRRC || |
450 | ClassID == VRRC || ClassID == VSXRC); |
451 | if (ST->hasVSX()) { |
452 | assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC); |
453 | return ClassID == VSXRC ? 64 : 32; |
454 | } |
455 | assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC); |
456 | return 32; |
457 | } |
458 | |
459 | unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const { |
460 | if (Vector) |
461 | return ST->hasVSX() ? VSXRC : VRRC; |
462 | else if (Ty && (Ty->getScalarType()->isFloatTy() || |
463 | Ty->getScalarType()->isDoubleTy())) |
464 | return ST->hasVSX() ? VSXRC : FPRRC; |
465 | else if (Ty && (Ty->getScalarType()->isFP128Ty() || |
466 | Ty->getScalarType()->isPPC_FP128Ty())) |
467 | return VRRC; |
468 | else if (Ty && Ty->getScalarType()->isHalfTy()) |
469 | return VSXRC; |
470 | else |
471 | return GPRRC; |
472 | } |
473 | |
474 | const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const { |
475 | |
476 | switch (ClassID) { |
477 | default: |
478 | llvm_unreachable("unknown register class" ); |
479 | return "PPC::unknown register class" ; |
480 | case GPRRC: return "PPC::GPRRC" ; |
481 | case FPRRC: return "PPC::FPRRC" ; |
482 | case VRRC: return "PPC::VRRC" ; |
483 | case VSXRC: return "PPC::VSXRC" ; |
484 | } |
485 | } |
486 | |
487 | TypeSize |
488 | PPCTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { |
489 | switch (K) { |
490 | case TargetTransformInfo::RGK_Scalar: |
491 | return TypeSize::getFixed(ExactSize: ST->isPPC64() ? 64 : 32); |
492 | case TargetTransformInfo::RGK_FixedWidthVector: |
493 | return TypeSize::getFixed(ExactSize: ST->hasAltivec() ? 128 : 0); |
494 | case TargetTransformInfo::RGK_ScalableVector: |
495 | return TypeSize::getScalable(MinimumSize: 0); |
496 | } |
497 | |
498 | llvm_unreachable("Unsupported register kind" ); |
499 | } |
500 | |
501 | unsigned PPCTTIImpl::getCacheLineSize() const { |
502 | // Starting with P7 we have a cache line size of 128. |
503 | unsigned Directive = ST->getCPUDirective(); |
504 | // Assume that Future CPU has the same cache line size as the others. |
505 | if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || |
506 | Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || |
507 | Directive == PPC::DIR_PWR11 || Directive == PPC::DIR_PWR_FUTURE) |
508 | return 128; |
509 | |
510 | // On other processors return a default of 64 bytes. |
511 | return 64; |
512 | } |
513 | |
514 | unsigned PPCTTIImpl::getPrefetchDistance() const { |
515 | return 300; |
516 | } |
517 | |
518 | unsigned PPCTTIImpl::getMaxInterleaveFactor(ElementCount VF) { |
519 | unsigned Directive = ST->getCPUDirective(); |
520 | // The 440 has no SIMD support, but floating-point instructions |
521 | // have a 5-cycle latency, so unroll by 5x for latency hiding. |
522 | if (Directive == PPC::DIR_440) |
523 | return 5; |
524 | |
525 | // The A2 has no SIMD support, but floating-point instructions |
526 | // have a 6-cycle latency, so unroll by 6x for latency hiding. |
527 | if (Directive == PPC::DIR_A2) |
528 | return 6; |
529 | |
530 | // FIXME: For lack of any better information, do no harm... |
531 | if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) |
532 | return 1; |
533 | |
534 | // For P7 and P8, floating-point instructions have a 6-cycle latency and |
535 | // there are two execution units, so unroll by 12x for latency hiding. |
536 | // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready |
537 | // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready |
538 | // Assume that future is the same as the others. |
539 | if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || |
540 | Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || |
541 | Directive == PPC::DIR_PWR11 || Directive == PPC::DIR_PWR_FUTURE) |
542 | return 12; |
543 | |
544 | // For most things, modern systems have two execution units (and |
545 | // out-of-order execution). |
546 | return 2; |
547 | } |
548 | |
549 | // Returns a cost adjustment factor to adjust the cost of vector instructions |
550 | // on targets which there is overlap between the vector and scalar units, |
551 | // thereby reducing the overall throughput of vector code wrt. scalar code. |
552 | // An invalid instruction cost is returned if the type is an MMA vector type. |
553 | InstructionCost PPCTTIImpl::vectorCostAdjustmentFactor(unsigned Opcode, |
554 | Type *Ty1, Type *Ty2) { |
555 | // If the vector type is of an MMA type (v256i1, v512i1), an invalid |
556 | // instruction cost is returned. This is to signify to other cost computing |
557 | // functions to return the maximum instruction cost in order to prevent any |
558 | // opportunities for the optimizer to produce MMA types within the IR. |
559 | if (isMMAType(Ty: Ty1)) |
560 | return InstructionCost::getInvalid(); |
561 | |
562 | if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) |
563 | return InstructionCost(1); |
564 | |
565 | std::pair<InstructionCost, MVT> LT1 = getTypeLegalizationCost(Ty: Ty1); |
566 | // If type legalization involves splitting the vector, we don't want to |
567 | // double the cost at every step - only the last step. |
568 | if (LT1.first != 1 || !LT1.second.isVector()) |
569 | return InstructionCost(1); |
570 | |
571 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
572 | if (TLI->isOperationExpand(Op: ISD, VT: LT1.second)) |
573 | return InstructionCost(1); |
574 | |
575 | if (Ty2) { |
576 | std::pair<InstructionCost, MVT> LT2 = getTypeLegalizationCost(Ty: Ty2); |
577 | if (LT2.first != 1 || !LT2.second.isVector()) |
578 | return InstructionCost(1); |
579 | } |
580 | |
581 | return InstructionCost(2); |
582 | } |
583 | |
584 | InstructionCost PPCTTIImpl::getArithmeticInstrCost( |
585 | unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, |
586 | TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, |
587 | ArrayRef<const Value *> Args, |
588 | const Instruction *CxtI) { |
589 | assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode" ); |
590 | |
591 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Ty, Ty2: nullptr); |
592 | if (!CostFactor.isValid()) |
593 | return InstructionCost::getMax(); |
594 | |
595 | // TODO: Handle more cost kinds. |
596 | if (CostKind != TTI::TCK_RecipThroughput) |
597 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, |
598 | Opd2Info: Op2Info, Args, CxtI); |
599 | |
600 | // Fallback to the default implementation. |
601 | InstructionCost Cost = BaseT::getArithmeticInstrCost( |
602 | Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info); |
603 | return Cost * CostFactor; |
604 | } |
605 | |
606 | InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, |
607 | ArrayRef<int> Mask, |
608 | TTI::TargetCostKind CostKind, |
609 | int Index, Type *SubTp, |
610 | ArrayRef<const Value *> Args, |
611 | const Instruction *CxtI) { |
612 | |
613 | InstructionCost CostFactor = |
614 | vectorCostAdjustmentFactor(Opcode: Instruction::ShuffleVector, Ty1: Tp, Ty2: nullptr); |
615 | if (!CostFactor.isValid()) |
616 | return InstructionCost::getMax(); |
617 | |
618 | // Legalize the type. |
619 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Tp); |
620 | |
621 | // PPC, for both Altivec/VSX, support cheap arbitrary permutations |
622 | // (at least in the sense that there need only be one non-loop-invariant |
623 | // instruction). We need one such shuffle instruction for each actual |
624 | // register (this is not true for arbitrary shuffles, but is true for the |
625 | // structured types of shuffles covered by TTI::ShuffleKind). |
626 | return LT.first * CostFactor; |
627 | } |
628 | |
629 | InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode, |
630 | TTI::TargetCostKind CostKind, |
631 | const Instruction *I) { |
632 | if (CostKind != TTI::TCK_RecipThroughput) |
633 | return Opcode == Instruction::PHI ? 0 : 1; |
634 | // Branches are assumed to be predicted. |
635 | return 0; |
636 | } |
637 | |
638 | InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, |
639 | Type *Src, |
640 | TTI::CastContextHint CCH, |
641 | TTI::TargetCostKind CostKind, |
642 | const Instruction *I) { |
643 | assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode" ); |
644 | |
645 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Dst, Ty2: Src); |
646 | if (!CostFactor.isValid()) |
647 | return InstructionCost::getMax(); |
648 | |
649 | InstructionCost Cost = |
650 | BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); |
651 | Cost *= CostFactor; |
652 | // TODO: Allow non-throughput costs that aren't binary. |
653 | if (CostKind != TTI::TCK_RecipThroughput) |
654 | return Cost == 0 ? 0 : 1; |
655 | return Cost; |
656 | } |
657 | |
658 | InstructionCost PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, |
659 | Type *CondTy, |
660 | CmpInst::Predicate VecPred, |
661 | TTI::TargetCostKind CostKind, |
662 | const Instruction *I) { |
663 | InstructionCost CostFactor = |
664 | vectorCostAdjustmentFactor(Opcode, Ty1: ValTy, Ty2: nullptr); |
665 | if (!CostFactor.isValid()) |
666 | return InstructionCost::getMax(); |
667 | |
668 | InstructionCost Cost = |
669 | BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); |
670 | // TODO: Handle other cost kinds. |
671 | if (CostKind != TTI::TCK_RecipThroughput) |
672 | return Cost; |
673 | return Cost * CostFactor; |
674 | } |
675 | |
676 | InstructionCost PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, |
677 | TTI::TargetCostKind CostKind, |
678 | unsigned Index, Value *Op0, |
679 | Value *Op1) { |
680 | assert(Val->isVectorTy() && "This must be a vector type" ); |
681 | |
682 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
683 | assert(ISD && "Invalid opcode" ); |
684 | |
685 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Val, Ty2: nullptr); |
686 | if (!CostFactor.isValid()) |
687 | return InstructionCost::getMax(); |
688 | |
689 | InstructionCost Cost = |
690 | BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1); |
691 | Cost *= CostFactor; |
692 | |
693 | if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { |
694 | // Double-precision scalars are already located in index #0 (or #1 if LE). |
695 | if (ISD == ISD::EXTRACT_VECTOR_ELT && |
696 | Index == (ST->isLittleEndian() ? 1 : 0)) |
697 | return 0; |
698 | |
699 | return Cost; |
700 | |
701 | } else if (Val->getScalarType()->isIntegerTy()) { |
702 | unsigned EltSize = Val->getScalarSizeInBits(); |
703 | // Computing on 1 bit values requires extra mask or compare operations. |
704 | unsigned MaskCostForOneBitSize = (VecMaskCost && EltSize == 1) ? 1 : 0; |
705 | // Computing on non const index requires extra mask or compare operations. |
706 | unsigned MaskCostForIdx = (Index != -1U) ? 0 : 1; |
707 | if (ST->hasP9Altivec()) { |
708 | // P10 has vxform insert which can handle non const index. The |
709 | // MaskCostForIdx is for masking the index. |
710 | // P9 has insert for const index. A move-to VSR and a permute/insert. |
711 | // Assume vector operation cost for both (cost will be 2x on P9). |
712 | if (ISD == ISD::INSERT_VECTOR_ELT) { |
713 | if (ST->hasP10Vector()) |
714 | return CostFactor + MaskCostForIdx; |
715 | else if (Index != -1U) |
716 | return 2 * CostFactor; |
717 | } else if (ISD == ISD::EXTRACT_VECTOR_ELT) { |
718 | // It's an extract. Maybe we can do a cheap move-from VSR. |
719 | unsigned EltSize = Val->getScalarSizeInBits(); |
720 | // P9 has both mfvsrd and mfvsrld for 64 bit integer. |
721 | if (EltSize == 64 && Index != -1U) |
722 | return 1; |
723 | else if (EltSize == 32) { |
724 | unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1; |
725 | if (Index == MfvsrwzIndex) |
726 | return 1; |
727 | |
728 | // For other indexs like non const, P9 has vxform extract. The |
729 | // MaskCostForIdx is for masking the index. |
730 | return CostFactor + MaskCostForIdx; |
731 | } |
732 | |
733 | // We need a vector extract (or mfvsrld). Assume vector operation cost. |
734 | // The cost of the load constant for a vector extract is disregarded |
735 | // (invariant, easily schedulable). |
736 | return CostFactor + MaskCostForOneBitSize + MaskCostForIdx; |
737 | } |
738 | } else if (ST->hasDirectMove() && Index != -1U) { |
739 | // Assume permute has standard cost. |
740 | // Assume move-to/move-from VSR have 2x standard cost. |
741 | if (ISD == ISD::INSERT_VECTOR_ELT) |
742 | return 3; |
743 | return 3 + MaskCostForOneBitSize; |
744 | } |
745 | } |
746 | |
747 | // Estimated cost of a load-hit-store delay. This was obtained |
748 | // experimentally as a minimum needed to prevent unprofitable |
749 | // vectorization for the paq8p benchmark. It may need to be |
750 | // raised further if other unprofitable cases remain. |
751 | unsigned LHSPenalty = 2; |
752 | if (ISD == ISD::INSERT_VECTOR_ELT) |
753 | LHSPenalty += 7; |
754 | |
755 | // Vector element insert/extract with Altivec is very expensive, |
756 | // because they require store and reload with the attendant |
757 | // processor stall for load-hit-store. Until VSX is available, |
758 | // these need to be estimated as very costly. |
759 | if (ISD == ISD::EXTRACT_VECTOR_ELT || |
760 | ISD == ISD::INSERT_VECTOR_ELT) |
761 | return LHSPenalty + Cost; |
762 | |
763 | return Cost; |
764 | } |
765 | |
766 | InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, |
767 | MaybeAlign Alignment, |
768 | unsigned AddressSpace, |
769 | TTI::TargetCostKind CostKind, |
770 | TTI::OperandValueInfo OpInfo, |
771 | const Instruction *I) { |
772 | |
773 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Src, Ty2: nullptr); |
774 | if (!CostFactor.isValid()) |
775 | return InstructionCost::getMax(); |
776 | |
777 | if (TLI->getValueType(DL, Ty: Src, AllowUnknown: true) == MVT::Other) |
778 | return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, |
779 | CostKind); |
780 | // Legalize the type. |
781 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Src); |
782 | assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && |
783 | "Invalid Opcode" ); |
784 | |
785 | InstructionCost Cost = |
786 | BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); |
787 | // TODO: Handle other cost kinds. |
788 | if (CostKind != TTI::TCK_RecipThroughput) |
789 | return Cost; |
790 | |
791 | Cost *= CostFactor; |
792 | |
793 | bool IsAltivecType = ST->hasAltivec() && |
794 | (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || |
795 | LT.second == MVT::v4i32 || LT.second == MVT::v4f32); |
796 | bool IsVSXType = ST->hasVSX() && |
797 | (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); |
798 | |
799 | // VSX has 32b/64b load instructions. Legalization can handle loading of |
800 | // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and |
801 | // PPCTargetLowering can't compute the cost appropriately. So here we |
802 | // explicitly check this case. There are also corresponding store |
803 | // instructions. |
804 | unsigned MemBytes = Src->getPrimitiveSizeInBits(); |
805 | if (ST->hasVSX() && IsAltivecType && |
806 | (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) |
807 | return 1; |
808 | |
809 | // Aligned loads and stores are easy. |
810 | unsigned SrcBytes = LT.second.getStoreSize(); |
811 | if (!SrcBytes || !Alignment || *Alignment >= SrcBytes) |
812 | return Cost; |
813 | |
814 | // If we can use the permutation-based load sequence, then this is also |
815 | // relatively cheap (not counting loop-invariant instructions): one load plus |
816 | // one permute (the last load in a series has extra cost, but we're |
817 | // neglecting that here). Note that on the P7, we could do unaligned loads |
818 | // for Altivec types using the VSX instructions, but that's more expensive |
819 | // than using the permutation-based load sequence. On the P8, that's no |
820 | // longer true. |
821 | if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) && |
822 | *Alignment >= LT.second.getScalarType().getStoreSize()) |
823 | return Cost + LT.first; // Add the cost of the permutations. |
824 | |
825 | // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the |
826 | // P7, unaligned vector loads are more expensive than the permutation-based |
827 | // load sequence, so that might be used instead, but regardless, the net cost |
828 | // is about the same (not counting loop-invariant instructions). |
829 | if (IsVSXType || (ST->hasVSX() && IsAltivecType)) |
830 | return Cost; |
831 | |
832 | // Newer PPC supports unaligned memory access. |
833 | if (TLI->allowsMisalignedMemoryAccesses(VT: LT.second, AddrSpace: 0)) |
834 | return Cost; |
835 | |
836 | // PPC in general does not support unaligned loads and stores. They'll need |
837 | // to be decomposed based on the alignment factor. |
838 | |
839 | // Add the cost of each scalar load or store. |
840 | assert(Alignment); |
841 | Cost += LT.first * ((SrcBytes / Alignment->value()) - 1); |
842 | |
843 | // For a vector type, there is also scalarization overhead (only for |
844 | // stores, loads are expanded using the vector-load + permutation sequence, |
845 | // which is much less expensive). |
846 | if (Src->isVectorTy() && Opcode == Instruction::Store) |
847 | for (int i = 0, e = cast<FixedVectorType>(Val: Src)->getNumElements(); i < e; |
848 | ++i) |
849 | Cost += getVectorInstrCost(Opcode: Instruction::ExtractElement, Val: Src, CostKind, Index: i, |
850 | Op0: nullptr, Op1: nullptr); |
851 | |
852 | return Cost; |
853 | } |
854 | |
855 | InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost( |
856 | unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, |
857 | Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, |
858 | bool UseMaskForCond, bool UseMaskForGaps) { |
859 | InstructionCost CostFactor = |
860 | vectorCostAdjustmentFactor(Opcode, Ty1: VecTy, Ty2: nullptr); |
861 | if (!CostFactor.isValid()) |
862 | return InstructionCost::getMax(); |
863 | |
864 | if (UseMaskForCond || UseMaskForGaps) |
865 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, |
866 | Alignment, AddressSpace, CostKind, |
867 | UseMaskForCond, UseMaskForGaps); |
868 | |
869 | assert(isa<VectorType>(VecTy) && |
870 | "Expect a vector type for interleaved memory op" ); |
871 | |
872 | // Legalize the type. |
873 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: VecTy); |
874 | |
875 | // Firstly, the cost of load/store operation. |
876 | InstructionCost Cost = getMemoryOpCost(Opcode, Src: VecTy, Alignment: MaybeAlign(Alignment), |
877 | AddressSpace, CostKind); |
878 | |
879 | // PPC, for both Altivec/VSX, support cheap arbitrary permutations |
880 | // (at least in the sense that there need only be one non-loop-invariant |
881 | // instruction). For each result vector, we need one shuffle per incoming |
882 | // vector (except that the first shuffle can take two incoming vectors |
883 | // because it does not need to take itself). |
884 | Cost += Factor*(LT.first-1); |
885 | |
886 | return Cost; |
887 | } |
888 | |
889 | InstructionCost |
890 | PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, |
891 | TTI::TargetCostKind CostKind) { |
892 | return BaseT::getIntrinsicInstrCost(ICA, CostKind); |
893 | } |
894 | |
895 | bool PPCTTIImpl::areTypesABICompatible(const Function *Caller, |
896 | const Function *Callee, |
897 | const ArrayRef<Type *> &Types) const { |
898 | |
899 | // We need to ensure that argument promotion does not |
900 | // attempt to promote pointers to MMA types (__vector_pair |
901 | // and __vector_quad) since these types explicitly cannot be |
902 | // passed as arguments. Both of these types are larger than |
903 | // the 128-bit Altivec vectors and have a scalar size of 1 bit. |
904 | if (!BaseT::areTypesABICompatible(Caller, Callee, Types)) |
905 | return false; |
906 | |
907 | return llvm::none_of(Range: Types, P: [](Type *Ty) { |
908 | if (Ty->isSized()) |
909 | return Ty->isIntOrIntVectorTy(BitWidth: 1) && Ty->getPrimitiveSizeInBits() > 128; |
910 | return false; |
911 | }); |
912 | } |
913 | |
914 | bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, |
915 | LoopInfo *LI, DominatorTree *DT, |
916 | AssumptionCache *AC, TargetLibraryInfo *LibInfo) { |
917 | // Process nested loops first. |
918 | for (Loop *I : *L) |
919 | if (canSaveCmp(L: I, BI, SE, LI, DT, AC, LibInfo)) |
920 | return false; // Stop search. |
921 | |
922 | HardwareLoopInfo HWLoopInfo(L); |
923 | |
924 | if (!HWLoopInfo.canAnalyze(LI&: *LI)) |
925 | return false; |
926 | |
927 | if (!isHardwareLoopProfitable(L, SE&: *SE, AC&: *AC, LibInfo, HWLoopInfo)) |
928 | return false; |
929 | |
930 | if (!HWLoopInfo.isHardwareLoopCandidate(SE&: *SE, LI&: *LI, DT&: *DT)) |
931 | return false; |
932 | |
933 | *BI = HWLoopInfo.ExitBranch; |
934 | return true; |
935 | } |
936 | |
937 | bool PPCTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1, |
938 | const TargetTransformInfo::LSRCost &C2) { |
939 | // PowerPC default behaviour here is "instruction number 1st priority". |
940 | // If LsrNoInsnsCost is set, call default implementation. |
941 | if (!LsrNoInsnsCost) |
942 | return std::tie(args: C1.Insns, args: C1.NumRegs, args: C1.AddRecCost, args: C1.NumIVMuls, |
943 | args: C1.NumBaseAdds, args: C1.ScaleCost, args: C1.ImmCost, args: C1.SetupCost) < |
944 | std::tie(args: C2.Insns, args: C2.NumRegs, args: C2.AddRecCost, args: C2.NumIVMuls, |
945 | args: C2.NumBaseAdds, args: C2.ScaleCost, args: C2.ImmCost, args: C2.SetupCost); |
946 | else |
947 | return TargetTransformInfoImplBase::isLSRCostLess(C1, C2); |
948 | } |
949 | |
950 | bool PPCTTIImpl::isNumRegsMajorCostOfLSR() { |
951 | return false; |
952 | } |
953 | |
954 | bool PPCTTIImpl::shouldBuildRelLookupTables() const { |
955 | const PPCTargetMachine &TM = ST->getTargetMachine(); |
956 | // XCOFF hasn't implemented lowerRelativeReference, disable non-ELF for now. |
957 | if (!TM.isELFv2ABI()) |
958 | return false; |
959 | return BaseT::shouldBuildRelLookupTables(); |
960 | } |
961 | |
962 | bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, |
963 | MemIntrinsicInfo &Info) { |
964 | switch (Inst->getIntrinsicID()) { |
965 | case Intrinsic::ppc_altivec_lvx: |
966 | case Intrinsic::ppc_altivec_lvxl: |
967 | case Intrinsic::ppc_altivec_lvebx: |
968 | case Intrinsic::ppc_altivec_lvehx: |
969 | case Intrinsic::ppc_altivec_lvewx: |
970 | case Intrinsic::ppc_vsx_lxvd2x: |
971 | case Intrinsic::ppc_vsx_lxvw4x: |
972 | case Intrinsic::ppc_vsx_lxvd2x_be: |
973 | case Intrinsic::ppc_vsx_lxvw4x_be: |
974 | case Intrinsic::ppc_vsx_lxvl: |
975 | case Intrinsic::ppc_vsx_lxvll: |
976 | case Intrinsic::ppc_vsx_lxvp: { |
977 | Info.PtrVal = Inst->getArgOperand(i: 0); |
978 | Info.ReadMem = true; |
979 | Info.WriteMem = false; |
980 | return true; |
981 | } |
982 | case Intrinsic::ppc_altivec_stvx: |
983 | case Intrinsic::ppc_altivec_stvxl: |
984 | case Intrinsic::ppc_altivec_stvebx: |
985 | case Intrinsic::ppc_altivec_stvehx: |
986 | case Intrinsic::ppc_altivec_stvewx: |
987 | case Intrinsic::ppc_vsx_stxvd2x: |
988 | case Intrinsic::ppc_vsx_stxvw4x: |
989 | case Intrinsic::ppc_vsx_stxvd2x_be: |
990 | case Intrinsic::ppc_vsx_stxvw4x_be: |
991 | case Intrinsic::ppc_vsx_stxvl: |
992 | case Intrinsic::ppc_vsx_stxvll: |
993 | case Intrinsic::ppc_vsx_stxvp: { |
994 | Info.PtrVal = Inst->getArgOperand(i: 1); |
995 | Info.ReadMem = false; |
996 | Info.WriteMem = true; |
997 | return true; |
998 | } |
999 | case Intrinsic::ppc_stbcx: |
1000 | case Intrinsic::ppc_sthcx: |
1001 | case Intrinsic::ppc_stdcx: |
1002 | case Intrinsic::ppc_stwcx: { |
1003 | Info.PtrVal = Inst->getArgOperand(i: 0); |
1004 | Info.ReadMem = false; |
1005 | Info.WriteMem = true; |
1006 | return true; |
1007 | } |
1008 | default: |
1009 | break; |
1010 | } |
1011 | |
1012 | return false; |
1013 | } |
1014 | |
1015 | bool PPCTTIImpl::hasActiveVectorLength(unsigned Opcode, Type *DataType, |
1016 | Align Alignment) const { |
1017 | // Only load and stores instructions can have variable vector length on Power. |
1018 | if (Opcode != Instruction::Load && Opcode != Instruction::Store) |
1019 | return false; |
1020 | // Loads/stores with length instructions use bits 0-7 of the GPR operand and |
1021 | // therefore cannot be used in 32-bit mode. |
1022 | if ((!ST->hasP9Vector() && !ST->hasP10Vector()) || !ST->isPPC64()) |
1023 | return false; |
1024 | if (isa<FixedVectorType>(Val: DataType)) { |
1025 | unsigned VecWidth = DataType->getPrimitiveSizeInBits(); |
1026 | return VecWidth == 128; |
1027 | } |
1028 | Type *ScalarTy = DataType->getScalarType(); |
1029 | |
1030 | if (ScalarTy->isPointerTy()) |
1031 | return true; |
1032 | |
1033 | if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) |
1034 | return true; |
1035 | |
1036 | if (!ScalarTy->isIntegerTy()) |
1037 | return false; |
1038 | |
1039 | unsigned IntWidth = ScalarTy->getIntegerBitWidth(); |
1040 | return IntWidth == 8 || IntWidth == 16 || IntWidth == 32 || IntWidth == 64; |
1041 | } |
1042 | |
1043 | InstructionCost PPCTTIImpl::getVPMemoryOpCost(unsigned Opcode, Type *Src, |
1044 | Align Alignment, |
1045 | unsigned AddressSpace, |
1046 | TTI::TargetCostKind CostKind, |
1047 | const Instruction *I) { |
1048 | InstructionCost Cost = BaseT::getVPMemoryOpCost(Opcode, Src, Alignment, |
1049 | AddressSpace, CostKind, I); |
1050 | if (TLI->getValueType(DL, Ty: Src, AllowUnknown: true) == MVT::Other) |
1051 | return Cost; |
1052 | // TODO: Handle other cost kinds. |
1053 | if (CostKind != TTI::TCK_RecipThroughput) |
1054 | return Cost; |
1055 | |
1056 | assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && |
1057 | "Invalid Opcode" ); |
1058 | |
1059 | auto *SrcVTy = dyn_cast<FixedVectorType>(Val: Src); |
1060 | assert(SrcVTy && "Expected a vector type for VP memory operations" ); |
1061 | |
1062 | if (hasActiveVectorLength(Opcode, DataType: Src, Alignment)) { |
1063 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: SrcVTy); |
1064 | |
1065 | InstructionCost CostFactor = |
1066 | vectorCostAdjustmentFactor(Opcode, Ty1: Src, Ty2: nullptr); |
1067 | if (!CostFactor.isValid()) |
1068 | return InstructionCost::getMax(); |
1069 | |
1070 | InstructionCost Cost = LT.first * CostFactor; |
1071 | assert(Cost.isValid() && "Expected valid cost" ); |
1072 | |
1073 | // On P9 but not on P10, if the op is misaligned then it will cause a |
1074 | // pipeline flush. Otherwise the VSX masked memops cost the same as unmasked |
1075 | // ones. |
1076 | const Align DesiredAlignment(16); |
1077 | if (Alignment >= DesiredAlignment || ST->getCPUDirective() != PPC::DIR_PWR9) |
1078 | return Cost; |
1079 | |
1080 | // Since alignment may be under estimated, we try to compute the probability |
1081 | // that the actual address is aligned to the desired boundary. For example |
1082 | // an 8-byte aligned load is assumed to be actually 16-byte aligned half the |
1083 | // time, while a 4-byte aligned load has a 25% chance of being 16-byte |
1084 | // aligned. |
1085 | float AlignmentProb = ((float)Alignment.value()) / DesiredAlignment.value(); |
1086 | float MisalignmentProb = 1.0 - AlignmentProb; |
1087 | return (MisalignmentProb * P9PipelineFlushEstimate) + |
1088 | (AlignmentProb * *Cost.getValue()); |
1089 | } |
1090 | |
1091 | // Usually we should not get to this point, but the following is an attempt to |
1092 | // model the cost of legalization. Currently we can only lower intrinsics with |
1093 | // evl but no mask, on Power 9/10. Otherwise, we must scalarize. |
1094 | return getMaskedMemoryOpCost(Opcode, DataTy: Src, Alignment, AddressSpace, CostKind); |
1095 | } |
1096 | |
1097 | bool PPCTTIImpl::supportsTailCallFor(const CallBase *CB) const { |
1098 | return TLI->supportsTailCallFor(CB); |
1099 | } |
1100 | |