1 | //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "PPCTargetTransformInfo.h" |
10 | #include "llvm/Analysis/CodeMetrics.h" |
11 | #include "llvm/Analysis/TargetLibraryInfo.h" |
12 | #include "llvm/Analysis/TargetTransformInfo.h" |
13 | #include "llvm/CodeGen/BasicTTIImpl.h" |
14 | #include "llvm/CodeGen/TargetLowering.h" |
15 | #include "llvm/CodeGen/TargetSchedule.h" |
16 | #include "llvm/IR/IntrinsicsPowerPC.h" |
17 | #include "llvm/IR/ProfDataUtils.h" |
18 | #include "llvm/Support/CommandLine.h" |
19 | #include "llvm/Transforms/InstCombine/InstCombiner.h" |
20 | #include "llvm/Transforms/Utils/Local.h" |
21 | #include <optional> |
22 | |
23 | using namespace llvm; |
24 | |
25 | #define DEBUG_TYPE "ppctti" |
26 | |
27 | static cl::opt<bool> VecMaskCost("ppc-vec-mask-cost" , |
28 | cl::desc("add masking cost for i1 vectors" ), cl::init(Val: true), cl::Hidden); |
29 | |
30 | static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting" , |
31 | cl::desc("disable constant hoisting on PPC" ), cl::init(Val: false), cl::Hidden); |
32 | |
33 | static cl::opt<bool> |
34 | EnablePPCColdCC("ppc-enable-coldcc" , cl::Hidden, cl::init(Val: false), |
35 | cl::desc("Enable using coldcc calling conv for cold " |
36 | "internal functions" )); |
37 | |
38 | static cl::opt<bool> |
39 | LsrNoInsnsCost("ppc-lsr-no-insns-cost" , cl::Hidden, cl::init(Val: false), |
40 | cl::desc("Do not add instruction count to lsr cost model" )); |
41 | |
42 | // The latency of mtctr is only justified if there are more than 4 |
43 | // comparisons that will be removed as a result. |
44 | static cl::opt<unsigned> |
45 | SmallCTRLoopThreshold("min-ctr-loop-threshold" , cl::init(Val: 4), cl::Hidden, |
46 | cl::desc("Loops with a constant trip count smaller than " |
47 | "this value will not use the count register." )); |
48 | |
49 | //===----------------------------------------------------------------------===// |
50 | // |
51 | // PPC cost model. |
52 | // |
53 | //===----------------------------------------------------------------------===// |
54 | |
55 | TargetTransformInfo::PopcntSupportKind |
56 | PPCTTIImpl::getPopcntSupport(unsigned TyWidth) const { |
57 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2" ); |
58 | if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) |
59 | return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? |
60 | TTI::PSK_SlowHardware : TTI::PSK_FastHardware; |
61 | return TTI::PSK_Software; |
62 | } |
63 | |
64 | std::optional<Instruction *> |
65 | PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { |
66 | Intrinsic::ID IID = II.getIntrinsicID(); |
67 | switch (IID) { |
68 | default: |
69 | break; |
70 | case Intrinsic::ppc_altivec_lvx: |
71 | case Intrinsic::ppc_altivec_lvxl: |
72 | // Turn PPC lvx -> load if the pointer is known aligned. |
73 | if (getOrEnforceKnownAlignment( |
74 | V: II.getArgOperand(i: 0), PrefAlign: Align(16), DL: IC.getDataLayout(), CxtI: &II, |
75 | AC: &IC.getAssumptionCache(), DT: &IC.getDominatorTree()) >= 16) { |
76 | Value *Ptr = II.getArgOperand(i: 0); |
77 | return new LoadInst(II.getType(), Ptr, "" , false, Align(16)); |
78 | } |
79 | break; |
80 | case Intrinsic::ppc_vsx_lxvw4x: |
81 | case Intrinsic::ppc_vsx_lxvd2x: { |
82 | // Turn PPC VSX loads into normal loads. |
83 | Value *Ptr = II.getArgOperand(i: 0); |
84 | return new LoadInst(II.getType(), Ptr, Twine("" ), false, Align(1)); |
85 | } |
86 | case Intrinsic::ppc_altivec_stvx: |
87 | case Intrinsic::ppc_altivec_stvxl: |
88 | // Turn stvx -> store if the pointer is known aligned. |
89 | if (getOrEnforceKnownAlignment( |
90 | V: II.getArgOperand(i: 1), PrefAlign: Align(16), DL: IC.getDataLayout(), CxtI: &II, |
91 | AC: &IC.getAssumptionCache(), DT: &IC.getDominatorTree()) >= 16) { |
92 | Value *Ptr = II.getArgOperand(i: 1); |
93 | return new StoreInst(II.getArgOperand(i: 0), Ptr, false, Align(16)); |
94 | } |
95 | break; |
96 | case Intrinsic::ppc_vsx_stxvw4x: |
97 | case Intrinsic::ppc_vsx_stxvd2x: { |
98 | // Turn PPC VSX stores into normal stores. |
99 | Value *Ptr = II.getArgOperand(i: 1); |
100 | return new StoreInst(II.getArgOperand(i: 0), Ptr, false, Align(1)); |
101 | } |
102 | case Intrinsic::ppc_altivec_vperm: |
103 | // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. |
104 | // Note that ppc_altivec_vperm has a big-endian bias, so when creating |
105 | // a vectorshuffle for little endian, we must undo the transformation |
106 | // performed on vec_perm in altivec.h. That is, we must complement |
107 | // the permutation mask with respect to 31 and reverse the order of |
108 | // V1 and V2. |
109 | if (Constant *Mask = dyn_cast<Constant>(Val: II.getArgOperand(i: 2))) { |
110 | assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 && |
111 | "Bad type for intrinsic!" ); |
112 | |
113 | // Check that all of the elements are integer constants or undefs. |
114 | bool AllEltsOk = true; |
115 | for (unsigned I = 0; I != 16; ++I) { |
116 | Constant *Elt = Mask->getAggregateElement(Elt: I); |
117 | if (!Elt || !(isa<ConstantInt>(Val: Elt) || isa<UndefValue>(Val: Elt))) { |
118 | AllEltsOk = false; |
119 | break; |
120 | } |
121 | } |
122 | |
123 | if (AllEltsOk) { |
124 | // Cast the input vectors to byte vectors. |
125 | Value *Op0 = |
126 | IC.Builder.CreateBitCast(V: II.getArgOperand(i: 0), DestTy: Mask->getType()); |
127 | Value *Op1 = |
128 | IC.Builder.CreateBitCast(V: II.getArgOperand(i: 1), DestTy: Mask->getType()); |
129 | Value *Result = PoisonValue::get(T: Op0->getType()); |
130 | |
131 | // Only extract each element once. |
132 | Value *[32]; |
133 | memset(s: ExtractedElts, c: 0, n: sizeof(ExtractedElts)); |
134 | |
135 | for (unsigned I = 0; I != 16; ++I) { |
136 | if (isa<UndefValue>(Val: Mask->getAggregateElement(Elt: I))) |
137 | continue; |
138 | unsigned Idx = |
139 | cast<ConstantInt>(Val: Mask->getAggregateElement(Elt: I))->getZExtValue(); |
140 | Idx &= 31; // Match the hardware behavior. |
141 | if (DL.isLittleEndian()) |
142 | Idx = 31 - Idx; |
143 | |
144 | if (!ExtractedElts[Idx]) { |
145 | Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0; |
146 | Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1; |
147 | ExtractedElts[Idx] = IC.Builder.CreateExtractElement( |
148 | Vec: Idx < 16 ? Op0ToUse : Op1ToUse, Idx: IC.Builder.getInt32(C: Idx & 15)); |
149 | } |
150 | |
151 | // Insert this value into the result vector. |
152 | Result = IC.Builder.CreateInsertElement(Vec: Result, NewElt: ExtractedElts[Idx], |
153 | Idx: IC.Builder.getInt32(C: I)); |
154 | } |
155 | return CastInst::Create(Instruction::BitCast, S: Result, Ty: II.getType()); |
156 | } |
157 | } |
158 | break; |
159 | } |
160 | return std::nullopt; |
161 | } |
162 | |
163 | InstructionCost PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, |
164 | TTI::TargetCostKind CostKind) const { |
165 | if (DisablePPCConstHoist) |
166 | return BaseT::getIntImmCost(Imm, Ty, CostKind); |
167 | |
168 | assert(Ty->isIntegerTy()); |
169 | |
170 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); |
171 | if (BitSize == 0) |
172 | return ~0U; |
173 | |
174 | if (Imm == 0) |
175 | return TTI::TCC_Free; |
176 | |
177 | if (Imm.getBitWidth() <= 64) { |
178 | if (isInt<16>(x: Imm.getSExtValue())) |
179 | return TTI::TCC_Basic; |
180 | |
181 | if (isInt<32>(x: Imm.getSExtValue())) { |
182 | // A constant that can be materialized using lis. |
183 | if ((Imm.getZExtValue() & 0xFFFF) == 0) |
184 | return TTI::TCC_Basic; |
185 | |
186 | return 2 * TTI::TCC_Basic; |
187 | } |
188 | } |
189 | |
190 | return 4 * TTI::TCC_Basic; |
191 | } |
192 | |
193 | InstructionCost |
194 | PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, |
195 | const APInt &Imm, Type *Ty, |
196 | TTI::TargetCostKind CostKind) const { |
197 | if (DisablePPCConstHoist) |
198 | return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); |
199 | |
200 | assert(Ty->isIntegerTy()); |
201 | |
202 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); |
203 | if (BitSize == 0) |
204 | return ~0U; |
205 | |
206 | switch (IID) { |
207 | default: |
208 | return TTI::TCC_Free; |
209 | case Intrinsic::sadd_with_overflow: |
210 | case Intrinsic::uadd_with_overflow: |
211 | case Intrinsic::ssub_with_overflow: |
212 | case Intrinsic::usub_with_overflow: |
213 | if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(x: Imm.getSExtValue())) |
214 | return TTI::TCC_Free; |
215 | break; |
216 | case Intrinsic::experimental_stackmap: |
217 | if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(x: Imm.getSExtValue()))) |
218 | return TTI::TCC_Free; |
219 | break; |
220 | case Intrinsic::experimental_patchpoint_void: |
221 | case Intrinsic::experimental_patchpoint: |
222 | if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(x: Imm.getSExtValue()))) |
223 | return TTI::TCC_Free; |
224 | break; |
225 | } |
226 | return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); |
227 | } |
228 | |
229 | InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, |
230 | const APInt &Imm, Type *Ty, |
231 | TTI::TargetCostKind CostKind, |
232 | Instruction *Inst) const { |
233 | if (DisablePPCConstHoist) |
234 | return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst); |
235 | |
236 | assert(Ty->isIntegerTy()); |
237 | |
238 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); |
239 | if (BitSize == 0) |
240 | return ~0U; |
241 | |
242 | unsigned ImmIdx = ~0U; |
243 | bool ShiftedFree = false, RunFree = false, UnsignedFree = false, |
244 | ZeroFree = false; |
245 | switch (Opcode) { |
246 | default: |
247 | return TTI::TCC_Free; |
248 | case Instruction::GetElementPtr: |
249 | // Always hoist the base address of a GetElementPtr. This prevents the |
250 | // creation of new constants for every base constant that gets constant |
251 | // folded with the offset. |
252 | if (Idx == 0) |
253 | return 2 * TTI::TCC_Basic; |
254 | return TTI::TCC_Free; |
255 | case Instruction::And: |
256 | RunFree = true; // (for the rotate-and-mask instructions) |
257 | [[fallthrough]]; |
258 | case Instruction::Add: |
259 | case Instruction::Or: |
260 | case Instruction::Xor: |
261 | ShiftedFree = true; |
262 | [[fallthrough]]; |
263 | case Instruction::Sub: |
264 | case Instruction::Mul: |
265 | case Instruction::Shl: |
266 | case Instruction::LShr: |
267 | case Instruction::AShr: |
268 | ImmIdx = 1; |
269 | break; |
270 | case Instruction::ICmp: |
271 | UnsignedFree = true; |
272 | ImmIdx = 1; |
273 | // Zero comparisons can use record-form instructions. |
274 | [[fallthrough]]; |
275 | case Instruction::Select: |
276 | ZeroFree = true; |
277 | break; |
278 | case Instruction::PHI: |
279 | case Instruction::Call: |
280 | case Instruction::Ret: |
281 | case Instruction::Load: |
282 | case Instruction::Store: |
283 | break; |
284 | } |
285 | |
286 | if (ZeroFree && Imm == 0) |
287 | return TTI::TCC_Free; |
288 | |
289 | if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { |
290 | if (isInt<16>(x: Imm.getSExtValue())) |
291 | return TTI::TCC_Free; |
292 | |
293 | if (RunFree) { |
294 | if (Imm.getBitWidth() <= 32 && |
295 | (isShiftedMask_32(Value: Imm.getZExtValue()) || |
296 | isShiftedMask_32(Value: ~Imm.getZExtValue()))) |
297 | return TTI::TCC_Free; |
298 | |
299 | if (ST->isPPC64() && |
300 | (isShiftedMask_64(Value: Imm.getZExtValue()) || |
301 | isShiftedMask_64(Value: ~Imm.getZExtValue()))) |
302 | return TTI::TCC_Free; |
303 | } |
304 | |
305 | if (UnsignedFree && isUInt<16>(x: Imm.getZExtValue())) |
306 | return TTI::TCC_Free; |
307 | |
308 | if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) |
309 | return TTI::TCC_Free; |
310 | } |
311 | |
312 | return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); |
313 | } |
314 | |
315 | // Check if the current Type is an MMA vector type. Valid MMA types are |
316 | // v256i1 and v512i1 respectively. |
317 | static bool isMMAType(Type *Ty) { |
318 | return Ty->isVectorTy() && (Ty->getScalarSizeInBits() == 1) && |
319 | (Ty->getPrimitiveSizeInBits() > 128); |
320 | } |
321 | |
322 | InstructionCost |
323 | PPCTTIImpl::getInstructionCost(const User *U, ArrayRef<const Value *> Operands, |
324 | TTI::TargetCostKind CostKind) const { |
325 | // We already implement getCastInstrCost and getMemoryOpCost where we perform |
326 | // the vector adjustment there. |
327 | if (isa<CastInst>(Val: U) || isa<LoadInst>(Val: U) || isa<StoreInst>(Val: U)) |
328 | return BaseT::getInstructionCost(U, Operands, CostKind); |
329 | |
330 | if (U->getType()->isVectorTy()) { |
331 | // Instructions that need to be split should cost more. |
332 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: U->getType()); |
333 | return LT.first * BaseT::getInstructionCost(U, Operands, CostKind); |
334 | } |
335 | |
336 | return BaseT::getInstructionCost(U, Operands, CostKind); |
337 | } |
338 | |
339 | bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, |
340 | AssumptionCache &AC, |
341 | TargetLibraryInfo *LibInfo, |
342 | HardwareLoopInfo &HWLoopInfo) const { |
343 | const PPCTargetMachine &TM = ST->getTargetMachine(); |
344 | TargetSchedModel SchedModel; |
345 | SchedModel.init(TSInfo: ST); |
346 | |
347 | // FIXME: Sure there is no other way to get TTI? This should be cheap though. |
348 | TargetTransformInfo TTI = |
349 | TM.getTargetTransformInfo(F: *L->getHeader()->getParent()); |
350 | |
351 | // Do not convert small short loops to CTR loop. |
352 | unsigned ConstTripCount = SE.getSmallConstantTripCount(L); |
353 | if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { |
354 | SmallPtrSet<const Value *, 32> EphValues; |
355 | CodeMetrics::collectEphemeralValues(L, AC: &AC, EphValues); |
356 | CodeMetrics Metrics; |
357 | for (BasicBlock *BB : L->blocks()) |
358 | Metrics.analyzeBasicBlock(BB, TTI, EphValues); |
359 | // 6 is an approximate latency for the mtctr instruction. |
360 | if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) |
361 | return false; |
362 | } |
363 | |
364 | // Check that there is no hardware loop related intrinsics in the loop. |
365 | for (auto *BB : L->getBlocks()) |
366 | for (auto &I : *BB) |
367 | if (auto *Call = dyn_cast<IntrinsicInst>(Val: &I)) |
368 | if (Call->getIntrinsicID() == Intrinsic::set_loop_iterations || |
369 | Call->getIntrinsicID() == Intrinsic::loop_decrement) |
370 | return false; |
371 | |
372 | SmallVector<BasicBlock*, 4> ExitingBlocks; |
373 | L->getExitingBlocks(ExitingBlocks); |
374 | |
375 | // If there is an exit edge known to be frequently taken, |
376 | // we should not transform this loop. |
377 | for (auto &BB : ExitingBlocks) { |
378 | Instruction *TI = BB->getTerminator(); |
379 | if (!TI) continue; |
380 | |
381 | if (BranchInst *BI = dyn_cast<BranchInst>(Val: TI)) { |
382 | uint64_t TrueWeight = 0, FalseWeight = 0; |
383 | if (!BI->isConditional() || |
384 | !extractBranchWeights(I: *BI, TrueVal&: TrueWeight, FalseVal&: FalseWeight)) |
385 | continue; |
386 | |
387 | // If the exit path is more frequent than the loop path, |
388 | // we return here without further analysis for this loop. |
389 | bool TrueIsExit = !L->contains(BB: BI->getSuccessor(i: 0)); |
390 | if (( TrueIsExit && FalseWeight < TrueWeight) || |
391 | (!TrueIsExit && FalseWeight > TrueWeight)) |
392 | return false; |
393 | } |
394 | } |
395 | |
396 | LLVMContext &C = L->getHeader()->getContext(); |
397 | HWLoopInfo.CountType = TM.isPPC64() ? |
398 | Type::getInt64Ty(C) : Type::getInt32Ty(C); |
399 | HWLoopInfo.LoopDecrement = ConstantInt::get(Ty: HWLoopInfo.CountType, V: 1); |
400 | return true; |
401 | } |
402 | |
403 | void PPCTTIImpl::(Loop *L, ScalarEvolution &SE, |
404 | TTI::UnrollingPreferences &UP, |
405 | OptimizationRemarkEmitter *ORE) const { |
406 | if (ST->getCPUDirective() == PPC::DIR_A2) { |
407 | // The A2 is in-order with a deep pipeline, and concatenation unrolling |
408 | // helps expose latency-hiding opportunities to the instruction scheduler. |
409 | UP.Partial = UP.Runtime = true; |
410 | |
411 | // We unroll a lot on the A2 (hundreds of instructions), and the benefits |
412 | // often outweigh the cost of a division to compute the trip count. |
413 | UP.AllowExpensiveTripCount = true; |
414 | } |
415 | |
416 | BaseT::getUnrollingPreferences(L, SE, UP, ORE); |
417 | } |
418 | |
419 | void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, |
420 | TTI::PeelingPreferences &PP) const { |
421 | BaseT::getPeelingPreferences(L, SE, PP); |
422 | } |
423 | // This function returns true to allow using coldcc calling convention. |
424 | // Returning true results in coldcc being used for functions which are cold at |
425 | // all call sites when the callers of the functions are not calling any other |
426 | // non coldcc functions. |
427 | bool PPCTTIImpl::useColdCCForColdCall(Function &F) const { |
428 | return EnablePPCColdCC; |
429 | } |
430 | |
431 | bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) const { |
432 | // On the A2, always unroll aggressively. |
433 | if (ST->getCPUDirective() == PPC::DIR_A2) |
434 | return true; |
435 | |
436 | return LoopHasReductions; |
437 | } |
438 | |
439 | PPCTTIImpl::TTI::MemCmpExpansionOptions |
440 | PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { |
441 | TTI::MemCmpExpansionOptions Options; |
442 | Options.LoadSizes = {8, 4, 2, 1}; |
443 | Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); |
444 | return Options; |
445 | } |
446 | |
447 | bool PPCTTIImpl::enableInterleavedAccessVectorization() const { return true; } |
448 | |
449 | unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const { |
450 | assert(ClassID == GPRRC || ClassID == FPRRC || |
451 | ClassID == VRRC || ClassID == VSXRC); |
452 | if (ST->hasVSX()) { |
453 | assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC); |
454 | return ClassID == VSXRC ? 64 : 32; |
455 | } |
456 | assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC); |
457 | return 32; |
458 | } |
459 | |
460 | unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const { |
461 | if (Vector) |
462 | return ST->hasVSX() ? VSXRC : VRRC; |
463 | if (Ty && |
464 | (Ty->getScalarType()->isFloatTy() || Ty->getScalarType()->isDoubleTy())) |
465 | return ST->hasVSX() ? VSXRC : FPRRC; |
466 | if (Ty && (Ty->getScalarType()->isFP128Ty() || |
467 | Ty->getScalarType()->isPPC_FP128Ty())) |
468 | return VRRC; |
469 | if (Ty && Ty->getScalarType()->isHalfTy()) |
470 | return VSXRC; |
471 | return GPRRC; |
472 | } |
473 | |
474 | const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const { |
475 | |
476 | switch (ClassID) { |
477 | default: |
478 | llvm_unreachable("unknown register class" ); |
479 | return "PPC::unknown register class" ; |
480 | case GPRRC: return "PPC::GPRRC" ; |
481 | case FPRRC: return "PPC::FPRRC" ; |
482 | case VRRC: return "PPC::VRRC" ; |
483 | case VSXRC: return "PPC::VSXRC" ; |
484 | } |
485 | } |
486 | |
487 | TypeSize |
488 | PPCTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { |
489 | switch (K) { |
490 | case TargetTransformInfo::RGK_Scalar: |
491 | return TypeSize::getFixed(ExactSize: ST->isPPC64() ? 64 : 32); |
492 | case TargetTransformInfo::RGK_FixedWidthVector: |
493 | return TypeSize::getFixed(ExactSize: ST->hasAltivec() ? 128 : 0); |
494 | case TargetTransformInfo::RGK_ScalableVector: |
495 | return TypeSize::getScalable(MinimumSize: 0); |
496 | } |
497 | |
498 | llvm_unreachable("Unsupported register kind" ); |
499 | } |
500 | |
501 | unsigned PPCTTIImpl::getCacheLineSize() const { |
502 | // Starting with P7 we have a cache line size of 128. |
503 | unsigned Directive = ST->getCPUDirective(); |
504 | // Assume that Future CPU has the same cache line size as the others. |
505 | if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || |
506 | Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || |
507 | Directive == PPC::DIR_PWR11 || Directive == PPC::DIR_PWR_FUTURE) |
508 | return 128; |
509 | |
510 | // On other processors return a default of 64 bytes. |
511 | return 64; |
512 | } |
513 | |
514 | unsigned PPCTTIImpl::getPrefetchDistance() const { |
515 | return 300; |
516 | } |
517 | |
518 | unsigned PPCTTIImpl::getMaxInterleaveFactor(ElementCount VF) const { |
519 | unsigned Directive = ST->getCPUDirective(); |
520 | // The 440 has no SIMD support, but floating-point instructions |
521 | // have a 5-cycle latency, so unroll by 5x for latency hiding. |
522 | if (Directive == PPC::DIR_440) |
523 | return 5; |
524 | |
525 | // The A2 has no SIMD support, but floating-point instructions |
526 | // have a 6-cycle latency, so unroll by 6x for latency hiding. |
527 | if (Directive == PPC::DIR_A2) |
528 | return 6; |
529 | |
530 | // FIXME: For lack of any better information, do no harm... |
531 | if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) |
532 | return 1; |
533 | |
534 | // For P7 and P8, floating-point instructions have a 6-cycle latency and |
535 | // there are two execution units, so unroll by 12x for latency hiding. |
536 | // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready |
537 | // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready |
538 | // Assume that future is the same as the others. |
539 | if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || |
540 | Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || |
541 | Directive == PPC::DIR_PWR11 || Directive == PPC::DIR_PWR_FUTURE) |
542 | return 12; |
543 | |
544 | // For most things, modern systems have two execution units (and |
545 | // out-of-order execution). |
546 | return 2; |
547 | } |
548 | |
549 | // Returns a cost adjustment factor to adjust the cost of vector instructions |
550 | // on targets which there is overlap between the vector and scalar units, |
551 | // thereby reducing the overall throughput of vector code wrt. scalar code. |
552 | // An invalid instruction cost is returned if the type is an MMA vector type. |
553 | InstructionCost PPCTTIImpl::vectorCostAdjustmentFactor(unsigned Opcode, |
554 | Type *Ty1, |
555 | Type *Ty2) const { |
556 | // If the vector type is of an MMA type (v256i1, v512i1), an invalid |
557 | // instruction cost is returned. This is to signify to other cost computing |
558 | // functions to return the maximum instruction cost in order to prevent any |
559 | // opportunities for the optimizer to produce MMA types within the IR. |
560 | if (isMMAType(Ty: Ty1)) |
561 | return InstructionCost::getInvalid(); |
562 | |
563 | if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) |
564 | return InstructionCost(1); |
565 | |
566 | std::pair<InstructionCost, MVT> LT1 = getTypeLegalizationCost(Ty: Ty1); |
567 | // If type legalization involves splitting the vector, we don't want to |
568 | // double the cost at every step - only the last step. |
569 | if (LT1.first != 1 || !LT1.second.isVector()) |
570 | return InstructionCost(1); |
571 | |
572 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
573 | if (TLI->isOperationExpand(Op: ISD, VT: LT1.second)) |
574 | return InstructionCost(1); |
575 | |
576 | if (Ty2) { |
577 | std::pair<InstructionCost, MVT> LT2 = getTypeLegalizationCost(Ty: Ty2); |
578 | if (LT2.first != 1 || !LT2.second.isVector()) |
579 | return InstructionCost(1); |
580 | } |
581 | |
582 | return InstructionCost(2); |
583 | } |
584 | |
585 | InstructionCost PPCTTIImpl::getArithmeticInstrCost( |
586 | unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, |
587 | TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, |
588 | ArrayRef<const Value *> Args, const Instruction *CxtI) const { |
589 | assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode" ); |
590 | |
591 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Ty, Ty2: nullptr); |
592 | if (!CostFactor.isValid()) |
593 | return InstructionCost::getMax(); |
594 | |
595 | // TODO: Handle more cost kinds. |
596 | if (CostKind != TTI::TCK_RecipThroughput) |
597 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, |
598 | Opd2Info: Op2Info, Args, CxtI); |
599 | |
600 | // Fallback to the default implementation. |
601 | InstructionCost Cost = BaseT::getArithmeticInstrCost( |
602 | Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info); |
603 | return Cost * CostFactor; |
604 | } |
605 | |
606 | InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, |
607 | VectorType *DstTy, VectorType *SrcTy, |
608 | ArrayRef<int> Mask, |
609 | TTI::TargetCostKind CostKind, |
610 | int Index, VectorType *SubTp, |
611 | ArrayRef<const Value *> Args, |
612 | const Instruction *CxtI) const { |
613 | |
614 | InstructionCost CostFactor = |
615 | vectorCostAdjustmentFactor(Opcode: Instruction::ShuffleVector, Ty1: SrcTy, Ty2: nullptr); |
616 | if (!CostFactor.isValid()) |
617 | return InstructionCost::getMax(); |
618 | |
619 | // Legalize the type. |
620 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: SrcTy); |
621 | |
622 | // PPC, for both Altivec/VSX, support cheap arbitrary permutations |
623 | // (at least in the sense that there need only be one non-loop-invariant |
624 | // instruction). We need one such shuffle instruction for each actual |
625 | // register (this is not true for arbitrary shuffles, but is true for the |
626 | // structured types of shuffles covered by TTI::ShuffleKind). |
627 | return LT.first * CostFactor; |
628 | } |
629 | |
630 | InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode, |
631 | TTI::TargetCostKind CostKind, |
632 | const Instruction *I) const { |
633 | if (CostKind != TTI::TCK_RecipThroughput) |
634 | return Opcode == Instruction::PHI ? 0 : 1; |
635 | // Branches are assumed to be predicted. |
636 | return 0; |
637 | } |
638 | |
639 | InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, |
640 | Type *Src, |
641 | TTI::CastContextHint CCH, |
642 | TTI::TargetCostKind CostKind, |
643 | const Instruction *I) const { |
644 | assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode" ); |
645 | |
646 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Dst, Ty2: Src); |
647 | if (!CostFactor.isValid()) |
648 | return InstructionCost::getMax(); |
649 | |
650 | InstructionCost Cost = |
651 | BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); |
652 | Cost *= CostFactor; |
653 | // TODO: Allow non-throughput costs that aren't binary. |
654 | if (CostKind != TTI::TCK_RecipThroughput) |
655 | return Cost == 0 ? 0 : 1; |
656 | return Cost; |
657 | } |
658 | |
659 | InstructionCost PPCTTIImpl::getCmpSelInstrCost( |
660 | unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, |
661 | TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, |
662 | TTI::OperandValueInfo Op2Info, const Instruction *I) const { |
663 | InstructionCost CostFactor = |
664 | vectorCostAdjustmentFactor(Opcode, Ty1: ValTy, Ty2: nullptr); |
665 | if (!CostFactor.isValid()) |
666 | return InstructionCost::getMax(); |
667 | |
668 | InstructionCost Cost = BaseT::getCmpSelInstrCost( |
669 | Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I); |
670 | // TODO: Handle other cost kinds. |
671 | if (CostKind != TTI::TCK_RecipThroughput) |
672 | return Cost; |
673 | return Cost * CostFactor; |
674 | } |
675 | |
676 | InstructionCost PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, |
677 | TTI::TargetCostKind CostKind, |
678 | unsigned Index, const Value *Op0, |
679 | const Value *Op1) const { |
680 | assert(Val->isVectorTy() && "This must be a vector type" ); |
681 | |
682 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
683 | assert(ISD && "Invalid opcode" ); |
684 | |
685 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Val, Ty2: nullptr); |
686 | if (!CostFactor.isValid()) |
687 | return InstructionCost::getMax(); |
688 | |
689 | InstructionCost Cost = |
690 | BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1); |
691 | Cost *= CostFactor; |
692 | |
693 | if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { |
694 | // Double-precision scalars are already located in index #0 (or #1 if LE). |
695 | if (ISD == ISD::EXTRACT_VECTOR_ELT && |
696 | Index == (ST->isLittleEndian() ? 1 : 0)) |
697 | return 0; |
698 | |
699 | return Cost; |
700 | } |
701 | if (Val->getScalarType()->isIntegerTy()) { |
702 | unsigned EltSize = Val->getScalarSizeInBits(); |
703 | // Computing on 1 bit values requires extra mask or compare operations. |
704 | unsigned MaskCostForOneBitSize = (VecMaskCost && EltSize == 1) ? 1 : 0; |
705 | // Computing on non const index requires extra mask or compare operations. |
706 | unsigned MaskCostForIdx = (Index != -1U) ? 0 : 1; |
707 | if (ST->hasP9Altivec()) { |
708 | // P10 has vxform insert which can handle non const index. The |
709 | // MaskCostForIdx is for masking the index. |
710 | // P9 has insert for const index. A move-to VSR and a permute/insert. |
711 | // Assume vector operation cost for both (cost will be 2x on P9). |
712 | if (ISD == ISD::INSERT_VECTOR_ELT) { |
713 | if (ST->hasP10Vector()) |
714 | return CostFactor + MaskCostForIdx; |
715 | if (Index != -1U) |
716 | return 2 * CostFactor; |
717 | } else if (ISD == ISD::EXTRACT_VECTOR_ELT) { |
718 | // It's an extract. Maybe we can do a cheap move-from VSR. |
719 | unsigned EltSize = Val->getScalarSizeInBits(); |
720 | // P9 has both mfvsrd and mfvsrld for 64 bit integer. |
721 | if (EltSize == 64 && Index != -1U) |
722 | return 1; |
723 | if (EltSize == 32) { |
724 | unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1; |
725 | if (Index == MfvsrwzIndex) |
726 | return 1; |
727 | |
728 | // For other indexs like non const, P9 has vxform extract. The |
729 | // MaskCostForIdx is for masking the index. |
730 | return CostFactor + MaskCostForIdx; |
731 | } |
732 | |
733 | // We need a vector extract (or mfvsrld). Assume vector operation cost. |
734 | // The cost of the load constant for a vector extract is disregarded |
735 | // (invariant, easily schedulable). |
736 | return CostFactor + MaskCostForOneBitSize + MaskCostForIdx; |
737 | } |
738 | } else if (ST->hasDirectMove() && Index != -1U) { |
739 | // Assume permute has standard cost. |
740 | // Assume move-to/move-from VSR have 2x standard cost. |
741 | if (ISD == ISD::INSERT_VECTOR_ELT) |
742 | return 3; |
743 | return 3 + MaskCostForOneBitSize; |
744 | } |
745 | } |
746 | |
747 | // Estimated cost of a load-hit-store delay. This was obtained |
748 | // experimentally as a minimum needed to prevent unprofitable |
749 | // vectorization for the paq8p benchmark. It may need to be |
750 | // raised further if other unprofitable cases remain. |
751 | unsigned LHSPenalty = 2; |
752 | if (ISD == ISD::INSERT_VECTOR_ELT) |
753 | LHSPenalty += 7; |
754 | |
755 | // Vector element insert/extract with Altivec is very expensive, |
756 | // because they require store and reload with the attendant |
757 | // processor stall for load-hit-store. Until VSX is available, |
758 | // these need to be estimated as very costly. |
759 | if (ISD == ISD::EXTRACT_VECTOR_ELT || |
760 | ISD == ISD::INSERT_VECTOR_ELT) |
761 | return LHSPenalty + Cost; |
762 | |
763 | return Cost; |
764 | } |
765 | |
766 | InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, |
767 | Align Alignment, |
768 | unsigned AddressSpace, |
769 | TTI::TargetCostKind CostKind, |
770 | TTI::OperandValueInfo OpInfo, |
771 | const Instruction *I) const { |
772 | |
773 | InstructionCost CostFactor = vectorCostAdjustmentFactor(Opcode, Ty1: Src, Ty2: nullptr); |
774 | if (!CostFactor.isValid()) |
775 | return InstructionCost::getMax(); |
776 | |
777 | if (TLI->getValueType(DL, Ty: Src, AllowUnknown: true) == MVT::Other) |
778 | return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, |
779 | CostKind); |
780 | // Legalize the type. |
781 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Src); |
782 | assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && |
783 | "Invalid Opcode" ); |
784 | |
785 | InstructionCost Cost = |
786 | BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); |
787 | // TODO: Handle other cost kinds. |
788 | if (CostKind != TTI::TCK_RecipThroughput) |
789 | return Cost; |
790 | |
791 | Cost *= CostFactor; |
792 | |
793 | bool IsAltivecType = ST->hasAltivec() && |
794 | (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || |
795 | LT.second == MVT::v4i32 || LT.second == MVT::v4f32); |
796 | bool IsVSXType = ST->hasVSX() && |
797 | (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); |
798 | |
799 | // VSX has 32b/64b load instructions. Legalization can handle loading of |
800 | // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and |
801 | // PPCTargetLowering can't compute the cost appropriately. So here we |
802 | // explicitly check this case. There are also corresponding store |
803 | // instructions. |
804 | unsigned MemBits = Src->getPrimitiveSizeInBits(); |
805 | unsigned SrcBytes = LT.second.getStoreSize(); |
806 | if (ST->hasVSX() && IsAltivecType) { |
807 | if (MemBits == 64 || (ST->hasP8Vector() && MemBits == 32)) |
808 | return 1; |
809 | |
810 | // Use lfiwax/xxspltw |
811 | if (Opcode == Instruction::Load && MemBits == 32 && Alignment < SrcBytes) |
812 | return 2; |
813 | } |
814 | |
815 | // Aligned loads and stores are easy. |
816 | if (!SrcBytes || Alignment >= SrcBytes) |
817 | return Cost; |
818 | |
819 | // If we can use the permutation-based load sequence, then this is also |
820 | // relatively cheap (not counting loop-invariant instructions): one load plus |
821 | // one permute (the last load in a series has extra cost, but we're |
822 | // neglecting that here). Note that on the P7, we could do unaligned loads |
823 | // for Altivec types using the VSX instructions, but that's more expensive |
824 | // than using the permutation-based load sequence. On the P8, that's no |
825 | // longer true. |
826 | if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) && |
827 | Alignment >= LT.second.getScalarType().getStoreSize()) |
828 | return Cost + LT.first; // Add the cost of the permutations. |
829 | |
830 | // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the |
831 | // P7, unaligned vector loads are more expensive than the permutation-based |
832 | // load sequence, so that might be used instead, but regardless, the net cost |
833 | // is about the same (not counting loop-invariant instructions). |
834 | if (IsVSXType || (ST->hasVSX() && IsAltivecType)) |
835 | return Cost; |
836 | |
837 | // Newer PPC supports unaligned memory access. |
838 | if (TLI->allowsMisalignedMemoryAccesses(VT: LT.second, AddrSpace: 0)) |
839 | return Cost; |
840 | |
841 | // PPC in general does not support unaligned loads and stores. They'll need |
842 | // to be decomposed based on the alignment factor. |
843 | |
844 | // Add the cost of each scalar load or store. |
845 | Cost += LT.first * ((SrcBytes / Alignment.value()) - 1); |
846 | |
847 | // For a vector type, there is also scalarization overhead (only for |
848 | // stores, loads are expanded using the vector-load + permutation sequence, |
849 | // which is much less expensive). |
850 | if (Src->isVectorTy() && Opcode == Instruction::Store) |
851 | for (int I = 0, E = cast<FixedVectorType>(Val: Src)->getNumElements(); I < E; |
852 | ++I) |
853 | Cost += getVectorInstrCost(Opcode: Instruction::ExtractElement, Val: Src, CostKind, Index: I, |
854 | Op0: nullptr, Op1: nullptr); |
855 | |
856 | return Cost; |
857 | } |
858 | |
859 | InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost( |
860 | unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, |
861 | Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, |
862 | bool UseMaskForCond, bool UseMaskForGaps) const { |
863 | InstructionCost CostFactor = |
864 | vectorCostAdjustmentFactor(Opcode, Ty1: VecTy, Ty2: nullptr); |
865 | if (!CostFactor.isValid()) |
866 | return InstructionCost::getMax(); |
867 | |
868 | if (UseMaskForCond || UseMaskForGaps) |
869 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, |
870 | Alignment, AddressSpace, CostKind, |
871 | UseMaskForCond, UseMaskForGaps); |
872 | |
873 | assert(isa<VectorType>(VecTy) && |
874 | "Expect a vector type for interleaved memory op" ); |
875 | |
876 | // Legalize the type. |
877 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: VecTy); |
878 | |
879 | // Firstly, the cost of load/store operation. |
880 | InstructionCost Cost = |
881 | getMemoryOpCost(Opcode, Src: VecTy, Alignment, AddressSpace, CostKind); |
882 | |
883 | // PPC, for both Altivec/VSX, support cheap arbitrary permutations |
884 | // (at least in the sense that there need only be one non-loop-invariant |
885 | // instruction). For each result vector, we need one shuffle per incoming |
886 | // vector (except that the first shuffle can take two incoming vectors |
887 | // because it does not need to take itself). |
888 | Cost += Factor*(LT.first-1); |
889 | |
890 | return Cost; |
891 | } |
892 | |
893 | InstructionCost |
894 | PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, |
895 | TTI::TargetCostKind CostKind) const { |
896 | return BaseT::getIntrinsicInstrCost(ICA, CostKind); |
897 | } |
898 | |
899 | bool PPCTTIImpl::areInlineCompatible(const Function *Caller, |
900 | const Function *Callee) const { |
901 | const TargetMachine &TM = getTLI()->getTargetMachine(); |
902 | |
903 | const FeatureBitset &CallerBits = |
904 | TM.getSubtargetImpl(*Caller)->getFeatureBits(); |
905 | const FeatureBitset &CalleeBits = |
906 | TM.getSubtargetImpl(*Callee)->getFeatureBits(); |
907 | |
908 | // Check that targets features are exactly the same. We can revisit to see if |
909 | // we can improve this. |
910 | return CallerBits == CalleeBits; |
911 | } |
912 | |
913 | bool PPCTTIImpl::areTypesABICompatible(const Function *Caller, |
914 | const Function *Callee, |
915 | const ArrayRef<Type *> &Types) const { |
916 | |
917 | // We need to ensure that argument promotion does not |
918 | // attempt to promote pointers to MMA types (__vector_pair |
919 | // and __vector_quad) since these types explicitly cannot be |
920 | // passed as arguments. Both of these types are larger than |
921 | // the 128-bit Altivec vectors and have a scalar size of 1 bit. |
922 | if (!BaseT::areTypesABICompatible(Caller, Callee, Types)) |
923 | return false; |
924 | |
925 | return llvm::none_of(Range: Types, P: [](Type *Ty) { |
926 | if (Ty->isSized()) |
927 | return Ty->isIntOrIntVectorTy(BitWidth: 1) && Ty->getPrimitiveSizeInBits() > 128; |
928 | return false; |
929 | }); |
930 | } |
931 | |
932 | bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, |
933 | LoopInfo *LI, DominatorTree *DT, |
934 | AssumptionCache *AC, |
935 | TargetLibraryInfo *LibInfo) const { |
936 | // Process nested loops first. |
937 | for (Loop *I : *L) |
938 | if (canSaveCmp(L: I, BI, SE, LI, DT, AC, LibInfo)) |
939 | return false; // Stop search. |
940 | |
941 | HardwareLoopInfo HWLoopInfo(L); |
942 | |
943 | if (!HWLoopInfo.canAnalyze(LI&: *LI)) |
944 | return false; |
945 | |
946 | if (!isHardwareLoopProfitable(L, SE&: *SE, AC&: *AC, LibInfo, HWLoopInfo)) |
947 | return false; |
948 | |
949 | if (!HWLoopInfo.isHardwareLoopCandidate(SE&: *SE, LI&: *LI, DT&: *DT)) |
950 | return false; |
951 | |
952 | *BI = HWLoopInfo.ExitBranch; |
953 | return true; |
954 | } |
955 | |
956 | bool PPCTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1, |
957 | const TargetTransformInfo::LSRCost &C2) const { |
958 | // PowerPC default behaviour here is "instruction number 1st priority". |
959 | // If LsrNoInsnsCost is set, call default implementation. |
960 | if (!LsrNoInsnsCost) |
961 | return std::tie(args: C1.Insns, args: C1.NumRegs, args: C1.AddRecCost, args: C1.NumIVMuls, |
962 | args: C1.NumBaseAdds, args: C1.ScaleCost, args: C1.ImmCost, args: C1.SetupCost) < |
963 | std::tie(args: C2.Insns, args: C2.NumRegs, args: C2.AddRecCost, args: C2.NumIVMuls, |
964 | args: C2.NumBaseAdds, args: C2.ScaleCost, args: C2.ImmCost, args: C2.SetupCost); |
965 | return TargetTransformInfoImplBase::isLSRCostLess(C1, C2); |
966 | } |
967 | |
968 | bool PPCTTIImpl::isNumRegsMajorCostOfLSR() const { return false; } |
969 | |
970 | bool PPCTTIImpl::shouldBuildRelLookupTables() const { |
971 | const PPCTargetMachine &TM = ST->getTargetMachine(); |
972 | // XCOFF hasn't implemented lowerRelativeReference, disable non-ELF for now. |
973 | if (!TM.isELFv2ABI()) |
974 | return false; |
975 | return BaseT::shouldBuildRelLookupTables(); |
976 | } |
977 | |
978 | bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, |
979 | MemIntrinsicInfo &Info) const { |
980 | switch (Inst->getIntrinsicID()) { |
981 | case Intrinsic::ppc_altivec_lvx: |
982 | case Intrinsic::ppc_altivec_lvxl: |
983 | case Intrinsic::ppc_altivec_lvebx: |
984 | case Intrinsic::ppc_altivec_lvehx: |
985 | case Intrinsic::ppc_altivec_lvewx: |
986 | case Intrinsic::ppc_vsx_lxvd2x: |
987 | case Intrinsic::ppc_vsx_lxvw4x: |
988 | case Intrinsic::ppc_vsx_lxvd2x_be: |
989 | case Intrinsic::ppc_vsx_lxvw4x_be: |
990 | case Intrinsic::ppc_vsx_lxvl: |
991 | case Intrinsic::ppc_vsx_lxvll: |
992 | case Intrinsic::ppc_vsx_lxvp: { |
993 | Info.PtrVal = Inst->getArgOperand(i: 0); |
994 | Info.ReadMem = true; |
995 | Info.WriteMem = false; |
996 | return true; |
997 | } |
998 | case Intrinsic::ppc_altivec_stvx: |
999 | case Intrinsic::ppc_altivec_stvxl: |
1000 | case Intrinsic::ppc_altivec_stvebx: |
1001 | case Intrinsic::ppc_altivec_stvehx: |
1002 | case Intrinsic::ppc_altivec_stvewx: |
1003 | case Intrinsic::ppc_vsx_stxvd2x: |
1004 | case Intrinsic::ppc_vsx_stxvw4x: |
1005 | case Intrinsic::ppc_vsx_stxvd2x_be: |
1006 | case Intrinsic::ppc_vsx_stxvw4x_be: |
1007 | case Intrinsic::ppc_vsx_stxvl: |
1008 | case Intrinsic::ppc_vsx_stxvll: |
1009 | case Intrinsic::ppc_vsx_stxvp: { |
1010 | Info.PtrVal = Inst->getArgOperand(i: 1); |
1011 | Info.ReadMem = false; |
1012 | Info.WriteMem = true; |
1013 | return true; |
1014 | } |
1015 | case Intrinsic::ppc_stbcx: |
1016 | case Intrinsic::ppc_sthcx: |
1017 | case Intrinsic::ppc_stdcx: |
1018 | case Intrinsic::ppc_stwcx: { |
1019 | Info.PtrVal = Inst->getArgOperand(i: 0); |
1020 | Info.ReadMem = false; |
1021 | Info.WriteMem = true; |
1022 | return true; |
1023 | } |
1024 | default: |
1025 | break; |
1026 | } |
1027 | |
1028 | return false; |
1029 | } |
1030 | |
1031 | bool PPCTTIImpl::supportsTailCallFor(const CallBase *CB) const { |
1032 | return TLI->supportsTailCallFor(CB); |
1033 | } |
1034 | |