1 | //===- HexagonSubtarget.cpp - Hexagon Subtarget Information ---------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the Hexagon specific subclass of TargetSubtarget. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "HexagonSubtarget.h" |
14 | #include "HexagonInstrInfo.h" |
15 | #include "HexagonRegisterInfo.h" |
16 | #include "MCTargetDesc/HexagonMCTargetDesc.h" |
17 | #include "llvm/ADT/STLExtras.h" |
18 | #include "llvm/ADT/SmallSet.h" |
19 | #include "llvm/ADT/SmallVector.h" |
20 | #include "llvm/ADT/StringRef.h" |
21 | #include "llvm/CodeGen/MachineInstr.h" |
22 | #include "llvm/CodeGen/MachineOperand.h" |
23 | #include "llvm/CodeGen/MachineScheduler.h" |
24 | #include "llvm/CodeGen/ScheduleDAG.h" |
25 | #include "llvm/CodeGen/ScheduleDAGInstrs.h" |
26 | #include "llvm/IR/IntrinsicsHexagon.h" |
27 | #include "llvm/Support/CommandLine.h" |
28 | #include "llvm/Support/ErrorHandling.h" |
29 | #include "llvm/Target/TargetMachine.h" |
30 | #include <algorithm> |
31 | #include <cassert> |
32 | #include <map> |
33 | #include <optional> |
34 | |
35 | using namespace llvm; |
36 | |
37 | #define DEBUG_TYPE "hexagon-subtarget" |
38 | |
39 | #define GET_SUBTARGETINFO_CTOR |
40 | #define GET_SUBTARGETINFO_TARGET_DESC |
41 | #include "HexagonGenSubtargetInfo.inc" |
42 | |
43 | static cl::opt<bool> EnableBSBSched("enable-bsb-sched" , cl::Hidden, |
44 | cl::init(Val: true)); |
45 | |
46 | static cl::opt<bool> EnableTCLatencySched("enable-tc-latency-sched" , cl::Hidden, |
47 | cl::init(Val: false)); |
48 | |
49 | static cl::opt<bool> |
50 | EnableDotCurSched("enable-cur-sched" , cl::Hidden, cl::init(Val: true), |
51 | cl::desc("Enable the scheduler to generate .cur" )); |
52 | |
53 | static cl::opt<bool> |
54 | DisableHexagonMISched("disable-hexagon-misched" , cl::Hidden, |
55 | cl::desc("Disable Hexagon MI Scheduling" )); |
56 | |
57 | static cl::opt<bool> OverrideLongCalls( |
58 | "hexagon-long-calls" , cl::Hidden, |
59 | cl::desc("If present, forces/disables the use of long calls" )); |
60 | |
61 | static cl::opt<bool> |
62 | EnablePredicatedCalls("hexagon-pred-calls" , cl::Hidden, |
63 | cl::desc("Consider calls to be predicable" )); |
64 | |
65 | static cl::opt<bool> SchedPredsCloser("sched-preds-closer" , cl::Hidden, |
66 | cl::init(Val: true)); |
67 | |
68 | static cl::opt<bool> SchedRetvalOptimization("sched-retval-optimization" , |
69 | cl::Hidden, cl::init(Val: true)); |
70 | |
71 | static cl::opt<bool> EnableCheckBankConflict( |
72 | "hexagon-check-bank-conflict" , cl::Hidden, cl::init(Val: true), |
73 | cl::desc("Enable checking for cache bank conflicts" )); |
74 | |
75 | HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU, |
76 | StringRef FS, const TargetMachine &TM) |
77 | : HexagonGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), |
78 | OptLevel(TM.getOptLevel()), |
79 | CPUString(std::string(Hexagon_MC::selectHexagonCPU(CPU))), |
80 | TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)), |
81 | RegInfo(getHwMode()), TLInfo(TM, *this), |
82 | InstrItins(getInstrItineraryForCPU(CPU: CPUString)) { |
83 | Hexagon_MC::addArchSubtarget(STI: this, FS); |
84 | // Beware of the default constructor of InstrItineraryData: it will |
85 | // reset all members to 0. |
86 | assert(InstrItins.Itineraries != nullptr && "InstrItins not initialized" ); |
87 | } |
88 | |
89 | HexagonSubtarget & |
90 | HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { |
91 | std::optional<Hexagon::ArchEnum> ArchVer = Hexagon::getCpu(CPU: CPUString); |
92 | if (ArchVer) |
93 | HexagonArchVersion = *ArchVer; |
94 | else |
95 | llvm_unreachable("Unrecognized Hexagon processor version" ); |
96 | |
97 | UseHVX128BOps = false; |
98 | UseHVX64BOps = false; |
99 | UseAudioOps = false; |
100 | UseLongCalls = false; |
101 | |
102 | SubtargetFeatures Features(FS); |
103 | |
104 | // Turn on QFloat if the HVX version is v68+. |
105 | // The function ParseSubtargetFeatures will set feature bits and initialize |
106 | // subtarget's variables all in one, so there isn't a good way to preprocess |
107 | // the feature string, other than by tinkering with it directly. |
108 | auto IsQFloatFS = [](StringRef F) { |
109 | return F == "+hvx-qfloat" || F == "-hvx-qfloat" ; |
110 | }; |
111 | if (!llvm::count_if(Range: Features.getFeatures(), P: IsQFloatFS)) { |
112 | auto getHvxVersion = [&Features](StringRef FS) -> StringRef { |
113 | for (StringRef F : llvm::reverse(C: Features.getFeatures())) { |
114 | if (F.starts_with(Prefix: "+hvxv" )) |
115 | return F; |
116 | } |
117 | for (StringRef F : llvm::reverse(C: Features.getFeatures())) { |
118 | if (F == "-hvx" ) |
119 | return StringRef(); |
120 | if (F.starts_with(Prefix: "+hvx" ) || F == "-hvx" ) |
121 | return F.take_front(N: 4); // Return "+hvx" or "-hvx". |
122 | } |
123 | return StringRef(); |
124 | }; |
125 | |
126 | bool AddQFloat = false; |
127 | StringRef HvxVer = getHvxVersion(FS); |
128 | if (HvxVer.starts_with(Prefix: "+hvxv" )) { |
129 | int Ver = 0; |
130 | if (!HvxVer.drop_front(N: 5).consumeInteger(Radix: 10, Result&: Ver) && Ver >= 68) |
131 | AddQFloat = true; |
132 | } else if (HvxVer == "+hvx" ) { |
133 | if (hasV68Ops()) |
134 | AddQFloat = true; |
135 | } |
136 | |
137 | if (AddQFloat) |
138 | Features.AddFeature(String: "+hvx-qfloat" ); |
139 | } |
140 | |
141 | std::string FeatureString = Features.getString(); |
142 | ParseSubtargetFeatures(CPU: CPUString, /*TuneCPU*/ CPUString, FS: FeatureString); |
143 | |
144 | if (useHVXV68Ops()) |
145 | UseHVXFloatingPoint = UseHVXIEEEFPOps || UseHVXQFloatOps; |
146 | |
147 | if (UseHVXQFloatOps && UseHVXIEEEFPOps && UseHVXFloatingPoint) |
148 | LLVM_DEBUG( |
149 | dbgs() << "Behavior is undefined for simultaneous qfloat and ieee hvx codegen..." ); |
150 | |
151 | if (OverrideLongCalls.getPosition()) |
152 | UseLongCalls = OverrideLongCalls; |
153 | |
154 | UseBSBScheduling = hasV60Ops() && EnableBSBSched; |
155 | |
156 | if (isTinyCore()) { |
157 | // Tiny core has a single thread, so back-to-back scheduling is enabled by |
158 | // default. |
159 | if (!EnableBSBSched.getPosition()) |
160 | UseBSBScheduling = false; |
161 | } |
162 | |
163 | FeatureBitset FeatureBits = getFeatureBits(); |
164 | if (HexagonDisableDuplex) |
165 | setFeatureBits(FeatureBits.reset(I: Hexagon::FeatureDuplex)); |
166 | setFeatureBits(Hexagon_MC::completeHVXFeatures(FB: FeatureBits)); |
167 | |
168 | return *this; |
169 | } |
170 | |
171 | bool HexagonSubtarget::isHVXElementType(MVT Ty, bool IncludeBool) const { |
172 | if (!useHVXOps()) |
173 | return false; |
174 | if (Ty.isVector()) |
175 | Ty = Ty.getVectorElementType(); |
176 | if (IncludeBool && Ty == MVT::i1) |
177 | return true; |
178 | ArrayRef<MVT> ElemTypes = getHVXElementTypes(); |
179 | return llvm::is_contained(Range&: ElemTypes, Element: Ty); |
180 | } |
181 | |
182 | bool HexagonSubtarget::isHVXVectorType(EVT VecTy, bool IncludeBool) const { |
183 | if (!VecTy.isSimple()) |
184 | return false; |
185 | if (!VecTy.isVector() || !useHVXOps() || VecTy.isScalableVector()) |
186 | return false; |
187 | MVT ElemTy = VecTy.getSimpleVT().getVectorElementType(); |
188 | if (!IncludeBool && ElemTy == MVT::i1) |
189 | return false; |
190 | |
191 | unsigned HwLen = getVectorLength(); |
192 | unsigned NumElems = VecTy.getVectorNumElements(); |
193 | ArrayRef<MVT> ElemTypes = getHVXElementTypes(); |
194 | |
195 | if (IncludeBool && ElemTy == MVT::i1) { |
196 | // Boolean HVX vector types are formed from regular HVX vector types |
197 | // by replacing the element type with i1. |
198 | for (MVT T : ElemTypes) |
199 | if (NumElems * T.getSizeInBits() == 8 * HwLen) |
200 | return true; |
201 | return false; |
202 | } |
203 | |
204 | unsigned VecWidth = VecTy.getSizeInBits(); |
205 | if (VecWidth != 8 * HwLen && VecWidth != 16 * HwLen) |
206 | return false; |
207 | return llvm::is_contained(Range&: ElemTypes, Element: ElemTy); |
208 | } |
209 | |
210 | bool HexagonSubtarget::isTypeForHVX(Type *VecTy, bool IncludeBool) const { |
211 | if (!VecTy->isVectorTy() || isa<ScalableVectorType>(Val: VecTy)) |
212 | return false; |
213 | // Avoid types like <2 x i32*>. |
214 | Type *ScalTy = VecTy->getScalarType(); |
215 | if (!ScalTy->isIntegerTy() && |
216 | !(ScalTy->isFloatingPointTy() && useHVXFloatingPoint())) |
217 | return false; |
218 | // The given type may be something like <17 x i32>, which is not MVT, |
219 | // but can be represented as (non-simple) EVT. |
220 | EVT Ty = EVT::getEVT(Ty: VecTy, /*HandleUnknown*/false); |
221 | if (!Ty.getVectorElementType().isSimple()) |
222 | return false; |
223 | |
224 | auto isHvxTy = [this, IncludeBool](MVT SimpleTy) { |
225 | if (isHVXVectorType(VecTy: SimpleTy, IncludeBool)) |
226 | return true; |
227 | auto Action = getTargetLowering()->getPreferredVectorAction(VT: SimpleTy); |
228 | return Action == TargetLoweringBase::TypeWidenVector; |
229 | }; |
230 | |
231 | // Round up EVT to have power-of-2 elements, and keep checking if it |
232 | // qualifies for HVX, dividing it in half after each step. |
233 | MVT ElemTy = Ty.getVectorElementType().getSimpleVT(); |
234 | unsigned VecLen = PowerOf2Ceil(A: Ty.getVectorNumElements()); |
235 | while (VecLen > 1) { |
236 | MVT SimpleTy = MVT::getVectorVT(VT: ElemTy, NumElements: VecLen); |
237 | if (SimpleTy.isValid() && isHvxTy(SimpleTy)) |
238 | return true; |
239 | VecLen /= 2; |
240 | } |
241 | |
242 | return false; |
243 | } |
244 | |
245 | void HexagonSubtarget::UsrOverflowMutation::apply(ScheduleDAGInstrs *DAG) { |
246 | for (SUnit &SU : DAG->SUnits) { |
247 | if (!SU.isInstr()) |
248 | continue; |
249 | SmallVector<SDep, 4> Erase; |
250 | for (auto &D : SU.Preds) |
251 | if (D.getKind() == SDep::Output && D.getReg() == Hexagon::USR_OVF) |
252 | Erase.push_back(Elt: D); |
253 | for (auto &E : Erase) |
254 | SU.removePred(D: E); |
255 | } |
256 | } |
257 | |
258 | void HexagonSubtarget::HVXMemLatencyMutation::apply(ScheduleDAGInstrs *DAG) { |
259 | for (SUnit &SU : DAG->SUnits) { |
260 | // Update the latency of chain edges between v60 vector load or store |
261 | // instructions to be 1. These instruction cannot be scheduled in the |
262 | // same packet. |
263 | MachineInstr &MI1 = *SU.getInstr(); |
264 | auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII); |
265 | bool IsStoreMI1 = MI1.mayStore(); |
266 | bool IsLoadMI1 = MI1.mayLoad(); |
267 | if (!QII->isHVXVec(MI: MI1) || !(IsStoreMI1 || IsLoadMI1)) |
268 | continue; |
269 | for (SDep &SI : SU.Succs) { |
270 | if (SI.getKind() != SDep::Order || SI.getLatency() != 0) |
271 | continue; |
272 | MachineInstr &MI2 = *SI.getSUnit()->getInstr(); |
273 | if (!QII->isHVXVec(MI: MI2)) |
274 | continue; |
275 | if ((IsStoreMI1 && MI2.mayStore()) || (IsLoadMI1 && MI2.mayLoad())) { |
276 | SI.setLatency(1); |
277 | SU.setHeightDirty(); |
278 | // Change the dependence in the opposite direction too. |
279 | for (SDep &PI : SI.getSUnit()->Preds) { |
280 | if (PI.getSUnit() != &SU || PI.getKind() != SDep::Order) |
281 | continue; |
282 | PI.setLatency(1); |
283 | SI.getSUnit()->setDepthDirty(); |
284 | } |
285 | } |
286 | } |
287 | } |
288 | } |
289 | |
290 | // Check if a call and subsequent A2_tfrpi instructions should maintain |
291 | // scheduling affinity. We are looking for the TFRI to be consumed in |
292 | // the next instruction. This should help reduce the instances of |
293 | // double register pairs being allocated and scheduled before a call |
294 | // when not used until after the call. This situation is exacerbated |
295 | // by the fact that we allocate the pair from the callee saves list, |
296 | // leading to excess spills and restores. |
297 | bool HexagonSubtarget::CallMutation::shouldTFRICallBind( |
298 | const HexagonInstrInfo &HII, const SUnit &Inst1, |
299 | const SUnit &Inst2) const { |
300 | if (Inst1.getInstr()->getOpcode() != Hexagon::A2_tfrpi) |
301 | return false; |
302 | |
303 | // TypeXTYPE are 64 bit operations. |
304 | unsigned Type = HII.getType(MI: *Inst2.getInstr()); |
305 | return Type == HexagonII::TypeS_2op || Type == HexagonII::TypeS_3op || |
306 | Type == HexagonII::TypeALU64 || Type == HexagonII::TypeM; |
307 | } |
308 | |
309 | void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAGInstrs) { |
310 | ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); |
311 | SUnit* LastSequentialCall = nullptr; |
312 | // Map from virtual register to physical register from the copy. |
313 | DenseMap<unsigned, unsigned> VRegHoldingReg; |
314 | // Map from the physical register to the instruction that uses virtual |
315 | // register. This is used to create the barrier edge. |
316 | DenseMap<unsigned, SUnit *> LastVRegUse; |
317 | auto &TRI = *DAG->MF.getSubtarget().getRegisterInfo(); |
318 | auto &HII = *DAG->MF.getSubtarget<HexagonSubtarget>().getInstrInfo(); |
319 | |
320 | // Currently we only catch the situation when compare gets scheduled |
321 | // before preceding call. |
322 | for (unsigned su = 0, e = DAG->SUnits.size(); su != e; ++su) { |
323 | // Remember the call. |
324 | if (DAG->SUnits[su].getInstr()->isCall()) |
325 | LastSequentialCall = &DAG->SUnits[su]; |
326 | // Look for a compare that defines a predicate. |
327 | else if (DAG->SUnits[su].getInstr()->isCompare() && LastSequentialCall) |
328 | DAG->addEdge(SuccSU: &DAG->SUnits[su], PredDep: SDep(LastSequentialCall, SDep::Barrier)); |
329 | // Look for call and tfri* instructions. |
330 | else if (SchedPredsCloser && LastSequentialCall && su > 1 && su < e-1 && |
331 | shouldTFRICallBind(HII, Inst1: DAG->SUnits[su], Inst2: DAG->SUnits[su+1])) |
332 | DAG->addEdge(SuccSU: &DAG->SUnits[su], PredDep: SDep(&DAG->SUnits[su-1], SDep::Barrier)); |
333 | // Prevent redundant register copies due to reads and writes of physical |
334 | // registers. The original motivation for this was the code generated |
335 | // between two calls, which are caused both the return value and the |
336 | // argument for the next call being in %r0. |
337 | // Example: |
338 | // 1: <call1> |
339 | // 2: %vreg = COPY %r0 |
340 | // 3: <use of %vreg> |
341 | // 4: %r0 = ... |
342 | // 5: <call2> |
343 | // The scheduler would often swap 3 and 4, so an additional register is |
344 | // needed. This code inserts a Barrier dependence between 3 & 4 to prevent |
345 | // this. |
346 | // The code below checks for all the physical registers, not just R0/D0/V0. |
347 | else if (SchedRetvalOptimization) { |
348 | const MachineInstr *MI = DAG->SUnits[su].getInstr(); |
349 | if (MI->isCopy() && MI->getOperand(i: 1).getReg().isPhysical()) { |
350 | // %vregX = COPY %r0 |
351 | VRegHoldingReg[MI->getOperand(i: 0).getReg()] = MI->getOperand(i: 1).getReg(); |
352 | LastVRegUse.erase(Val: MI->getOperand(i: 1).getReg()); |
353 | } else { |
354 | for (const MachineOperand &MO : MI->operands()) { |
355 | if (!MO.isReg()) |
356 | continue; |
357 | if (MO.isUse() && !MI->isCopy() && |
358 | VRegHoldingReg.count(Val: MO.getReg())) { |
359 | // <use of %vregX> |
360 | LastVRegUse[VRegHoldingReg[MO.getReg()]] = &DAG->SUnits[su]; |
361 | } else if (MO.isDef() && MO.getReg().isPhysical()) { |
362 | for (MCRegAliasIterator AI(MO.getReg(), &TRI, true); AI.isValid(); |
363 | ++AI) { |
364 | if (auto It = LastVRegUse.find(Val: *AI); It != LastVRegUse.end()) { |
365 | if (It->second != &DAG->SUnits[su]) |
366 | // %r0 = ... |
367 | DAG->addEdge(SuccSU: &DAG->SUnits[su], |
368 | PredDep: SDep(It->second, SDep::Barrier)); |
369 | LastVRegUse.erase(I: It); |
370 | } |
371 | } |
372 | } |
373 | } |
374 | } |
375 | } |
376 | } |
377 | } |
378 | |
379 | void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) { |
380 | if (!EnableCheckBankConflict) |
381 | return; |
382 | |
383 | const auto &HII = static_cast<const HexagonInstrInfo&>(*DAG->TII); |
384 | |
385 | // Create artificial edges between loads that could likely cause a bank |
386 | // conflict. Since such loads would normally not have any dependency |
387 | // between them, we cannot rely on existing edges. |
388 | for (unsigned i = 0, e = DAG->SUnits.size(); i != e; ++i) { |
389 | SUnit &S0 = DAG->SUnits[i]; |
390 | MachineInstr &L0 = *S0.getInstr(); |
391 | if (!L0.mayLoad() || L0.mayStore() || |
392 | HII.getAddrMode(MI: L0) != HexagonII::BaseImmOffset) |
393 | continue; |
394 | int64_t Offset0; |
395 | LocationSize Size0 = LocationSize::precise(Value: 0); |
396 | MachineOperand *BaseOp0 = HII.getBaseAndOffset(MI: L0, Offset&: Offset0, AccessSize&: Size0); |
397 | // Is the access size is longer than the L1 cache line, skip the check. |
398 | if (BaseOp0 == nullptr || !BaseOp0->isReg() || !Size0.hasValue() || |
399 | Size0.getValue() >= 32) |
400 | continue; |
401 | // Scan only up to 32 instructions ahead (to avoid n^2 complexity). |
402 | for (unsigned j = i+1, m = std::min(a: i+32, b: e); j != m; ++j) { |
403 | SUnit &S1 = DAG->SUnits[j]; |
404 | MachineInstr &L1 = *S1.getInstr(); |
405 | if (!L1.mayLoad() || L1.mayStore() || |
406 | HII.getAddrMode(MI: L1) != HexagonII::BaseImmOffset) |
407 | continue; |
408 | int64_t Offset1; |
409 | LocationSize Size1 = LocationSize::precise(Value: 0); |
410 | MachineOperand *BaseOp1 = HII.getBaseAndOffset(MI: L1, Offset&: Offset1, AccessSize&: Size1); |
411 | if (BaseOp1 == nullptr || !BaseOp1->isReg() || !Size0.hasValue() || |
412 | Size1.getValue() >= 32 || BaseOp0->getReg() != BaseOp1->getReg()) |
413 | continue; |
414 | // Check bits 3 and 4 of the offset: if they differ, a bank conflict |
415 | // is unlikely. |
416 | if (((Offset0 ^ Offset1) & 0x18) != 0) |
417 | continue; |
418 | // Bits 3 and 4 are the same, add an artificial edge and set extra |
419 | // latency. |
420 | SDep A(&S0, SDep::Artificial); |
421 | A.setLatency(1); |
422 | S1.addPred(D: A, Required: true); |
423 | } |
424 | } |
425 | } |
426 | |
427 | /// Enable use of alias analysis during code generation (during MI |
428 | /// scheduling, DAGCombine, etc.). |
429 | bool HexagonSubtarget::useAA() const { |
430 | if (OptLevel != CodeGenOptLevel::None) |
431 | return true; |
432 | return false; |
433 | } |
434 | |
435 | /// Perform target specific adjustments to the latency of a schedule |
436 | /// dependency. |
437 | void HexagonSubtarget::adjustSchedDependency( |
438 | SUnit *Src, int SrcOpIdx, SUnit *Dst, int DstOpIdx, SDep &Dep, |
439 | const TargetSchedModel *SchedModel) const { |
440 | if (!Src->isInstr() || !Dst->isInstr()) |
441 | return; |
442 | |
443 | MachineInstr *SrcInst = Src->getInstr(); |
444 | MachineInstr *DstInst = Dst->getInstr(); |
445 | const HexagonInstrInfo *QII = getInstrInfo(); |
446 | |
447 | // Instructions with .new operands have zero latency. |
448 | SmallSet<SUnit *, 4> ExclSrc; |
449 | SmallSet<SUnit *, 4> ExclDst; |
450 | if (QII->canExecuteInBundle(First: *SrcInst, Second: *DstInst) && |
451 | isBestZeroLatency(Src, Dst, TII: QII, ExclSrc, ExclDst)) { |
452 | Dep.setLatency(0); |
453 | return; |
454 | } |
455 | |
456 | // Set the latency for a copy to zero since we hope that is will get |
457 | // removed. |
458 | if (DstInst->isCopy()) |
459 | Dep.setLatency(0); |
460 | |
461 | // If it's a REG_SEQUENCE/COPY, use its destination instruction to determine |
462 | // the correct latency. |
463 | // If there are multiple uses of the def of COPY/REG_SEQUENCE, set the latency |
464 | // only if the latencies on all the uses are equal, otherwise set it to |
465 | // default. |
466 | if ((DstInst->isRegSequence() || DstInst->isCopy())) { |
467 | Register DReg = DstInst->getOperand(i: 0).getReg(); |
468 | std::optional<unsigned> DLatency; |
469 | for (const auto &DDep : Dst->Succs) { |
470 | MachineInstr *DDst = DDep.getSUnit()->getInstr(); |
471 | int UseIdx = -1; |
472 | for (unsigned OpNum = 0; OpNum < DDst->getNumOperands(); OpNum++) { |
473 | const MachineOperand &MO = DDst->getOperand(i: OpNum); |
474 | if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == DReg) { |
475 | UseIdx = OpNum; |
476 | break; |
477 | } |
478 | } |
479 | |
480 | if (UseIdx == -1) |
481 | continue; |
482 | |
483 | std::optional<unsigned> Latency = |
484 | InstrInfo.getOperandLatency(ItinData: &InstrItins, DefMI: *SrcInst, DefIdx: 0, UseMI: *DDst, UseIdx); |
485 | |
486 | // Set DLatency for the first time. |
487 | if (!DLatency) |
488 | DLatency = Latency; |
489 | |
490 | // For multiple uses, if the Latency is different across uses, reset |
491 | // DLatency. |
492 | if (DLatency != Latency) { |
493 | DLatency = std::nullopt; |
494 | break; |
495 | } |
496 | } |
497 | Dep.setLatency(DLatency.value_or(u: 0)); |
498 | } |
499 | |
500 | // Try to schedule uses near definitions to generate .cur. |
501 | ExclSrc.clear(); |
502 | ExclDst.clear(); |
503 | if (EnableDotCurSched && QII->isToBeScheduledASAP(MI1: *SrcInst, MI2: *DstInst) && |
504 | isBestZeroLatency(Src, Dst, TII: QII, ExclSrc, ExclDst)) { |
505 | Dep.setLatency(0); |
506 | return; |
507 | } |
508 | int Latency = Dep.getLatency(); |
509 | bool IsArtificial = Dep.isArtificial(); |
510 | Latency = updateLatency(SrcInst&: *SrcInst, DstInst&: *DstInst, IsArtificial, Latency); |
511 | Dep.setLatency(Latency); |
512 | } |
513 | |
514 | void HexagonSubtarget::getPostRAMutations( |
515 | std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const { |
516 | Mutations.push_back(x: std::make_unique<UsrOverflowMutation>()); |
517 | Mutations.push_back(x: std::make_unique<HVXMemLatencyMutation>()); |
518 | Mutations.push_back(x: std::make_unique<BankConflictMutation>()); |
519 | } |
520 | |
521 | void HexagonSubtarget::getSMSMutations( |
522 | std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const { |
523 | Mutations.push_back(x: std::make_unique<UsrOverflowMutation>()); |
524 | Mutations.push_back(x: std::make_unique<HVXMemLatencyMutation>()); |
525 | } |
526 | |
527 | // Pin the vtable to this file. |
528 | void HexagonSubtarget::anchor() {} |
529 | |
530 | bool HexagonSubtarget::enableMachineScheduler() const { |
531 | if (DisableHexagonMISched.getNumOccurrences()) |
532 | return !DisableHexagonMISched; |
533 | return true; |
534 | } |
535 | |
536 | bool HexagonSubtarget::usePredicatedCalls() const { |
537 | return EnablePredicatedCalls; |
538 | } |
539 | |
540 | int HexagonSubtarget::updateLatency(MachineInstr &SrcInst, |
541 | MachineInstr &DstInst, bool IsArtificial, |
542 | int Latency) const { |
543 | if (IsArtificial) |
544 | return 1; |
545 | if (!hasV60Ops()) |
546 | return Latency; |
547 | |
548 | auto &QII = static_cast<const HexagonInstrInfo &>(*getInstrInfo()); |
549 | // BSB scheduling. |
550 | if (QII.isHVXVec(MI: SrcInst) || useBSBScheduling()) |
551 | Latency = (Latency + 1) >> 1; |
552 | return Latency; |
553 | } |
554 | |
555 | void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const { |
556 | MachineInstr *SrcI = Src->getInstr(); |
557 | for (auto &I : Src->Succs) { |
558 | if (!I.isAssignedRegDep() || I.getSUnit() != Dst) |
559 | continue; |
560 | Register DepR = I.getReg(); |
561 | int DefIdx = -1; |
562 | for (unsigned OpNum = 0; OpNum < SrcI->getNumOperands(); OpNum++) { |
563 | const MachineOperand &MO = SrcI->getOperand(i: OpNum); |
564 | bool IsSameOrSubReg = false; |
565 | if (MO.isReg()) { |
566 | Register MOReg = MO.getReg(); |
567 | if (DepR.isVirtual()) { |
568 | IsSameOrSubReg = (MOReg == DepR); |
569 | } else { |
570 | IsSameOrSubReg = getRegisterInfo()->isSubRegisterEq(RegA: DepR, RegB: MOReg); |
571 | } |
572 | if (MO.isDef() && IsSameOrSubReg) |
573 | DefIdx = OpNum; |
574 | } |
575 | } |
576 | assert(DefIdx >= 0 && "Def Reg not found in Src MI" ); |
577 | MachineInstr *DstI = Dst->getInstr(); |
578 | SDep T = I; |
579 | for (unsigned OpNum = 0; OpNum < DstI->getNumOperands(); OpNum++) { |
580 | const MachineOperand &MO = DstI->getOperand(i: OpNum); |
581 | if (MO.isReg() && MO.isUse() && MO.getReg() == DepR) { |
582 | std::optional<unsigned> Latency = InstrInfo.getOperandLatency( |
583 | ItinData: &InstrItins, DefMI: *SrcI, DefIdx, UseMI: *DstI, UseIdx: OpNum); |
584 | |
585 | // For some instructions (ex: COPY), we might end up with < 0 latency |
586 | // as they don't have any Itinerary class associated with them. |
587 | if (!Latency) |
588 | Latency = 0; |
589 | bool IsArtificial = I.isArtificial(); |
590 | Latency = updateLatency(SrcInst&: *SrcI, DstInst&: *DstI, IsArtificial, Latency: *Latency); |
591 | I.setLatency(*Latency); |
592 | } |
593 | } |
594 | |
595 | // Update the latency of opposite edge too. |
596 | T.setSUnit(Src); |
597 | auto F = find(Range&: Dst->Preds, Val: T); |
598 | assert(F != Dst->Preds.end()); |
599 | F->setLatency(I.getLatency()); |
600 | } |
601 | } |
602 | |
603 | /// Change the latency between the two SUnits. |
604 | void HexagonSubtarget::changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) |
605 | const { |
606 | for (auto &I : Src->Succs) { |
607 | if (!I.isAssignedRegDep() || I.getSUnit() != Dst) |
608 | continue; |
609 | SDep T = I; |
610 | I.setLatency(Lat); |
611 | |
612 | // Update the latency of opposite edge too. |
613 | T.setSUnit(Src); |
614 | auto F = find(Range&: Dst->Preds, Val: T); |
615 | assert(F != Dst->Preds.end()); |
616 | F->setLatency(Lat); |
617 | } |
618 | } |
619 | |
620 | /// If the SUnit has a zero latency edge, return the other SUnit. |
621 | static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) { |
622 | for (auto &I : Deps) |
623 | if (I.isAssignedRegDep() && I.getLatency() == 0 && |
624 | !I.getSUnit()->getInstr()->isPseudo()) |
625 | return I.getSUnit(); |
626 | return nullptr; |
627 | } |
628 | |
629 | // Return true if these are the best two instructions to schedule |
630 | // together with a zero latency. Only one dependence should have a zero |
631 | // latency. If there are multiple choices, choose the best, and change |
632 | // the others, if needed. |
633 | bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst, |
634 | const HexagonInstrInfo *TII, SmallSet<SUnit*, 4> &ExclSrc, |
635 | SmallSet<SUnit*, 4> &ExclDst) const { |
636 | MachineInstr &SrcInst = *Src->getInstr(); |
637 | MachineInstr &DstInst = *Dst->getInstr(); |
638 | |
639 | // Ignore Boundary SU nodes as these have null instructions. |
640 | if (Dst->isBoundaryNode()) |
641 | return false; |
642 | |
643 | if (SrcInst.isPHI() || DstInst.isPHI()) |
644 | return false; |
645 | |
646 | if (!TII->isToBeScheduledASAP(MI1: SrcInst, MI2: DstInst) && |
647 | !TII->canExecuteInBundle(First: SrcInst, Second: DstInst)) |
648 | return false; |
649 | |
650 | // The architecture doesn't allow three dependent instructions in the same |
651 | // packet. So, if the destination has a zero latency successor, then it's |
652 | // not a candidate for a zero latency predecessor. |
653 | if (getZeroLatency(N: Dst, Deps&: Dst->Succs) != nullptr) |
654 | return false; |
655 | |
656 | // Check if the Dst instruction is the best candidate first. |
657 | SUnit *Best = nullptr; |
658 | SUnit *DstBest = nullptr; |
659 | SUnit *SrcBest = getZeroLatency(N: Dst, Deps&: Dst->Preds); |
660 | if (SrcBest == nullptr || Src->NodeNum >= SrcBest->NodeNum) { |
661 | // Check that Src doesn't have a better candidate. |
662 | DstBest = getZeroLatency(N: Src, Deps&: Src->Succs); |
663 | if (DstBest == nullptr || Dst->NodeNum <= DstBest->NodeNum) |
664 | Best = Dst; |
665 | } |
666 | if (Best != Dst) |
667 | return false; |
668 | |
669 | // The caller frequently adds the same dependence twice. If so, then |
670 | // return true for this case too. |
671 | if ((Src == SrcBest && Dst == DstBest ) || |
672 | (SrcBest == nullptr && Dst == DstBest) || |
673 | (Src == SrcBest && Dst == nullptr)) |
674 | return true; |
675 | |
676 | // Reassign the latency for the previous bests, which requires setting |
677 | // the dependence edge in both directions. |
678 | if (SrcBest != nullptr) { |
679 | if (!hasV60Ops()) |
680 | changeLatency(Src: SrcBest, Dst, Lat: 1); |
681 | else |
682 | restoreLatency(Src: SrcBest, Dst); |
683 | } |
684 | if (DstBest != nullptr) { |
685 | if (!hasV60Ops()) |
686 | changeLatency(Src, Dst: DstBest, Lat: 1); |
687 | else |
688 | restoreLatency(Src, Dst: DstBest); |
689 | } |
690 | |
691 | // Attempt to find another opportunity for zero latency in a different |
692 | // dependence. |
693 | if (SrcBest && DstBest) |
694 | // If there is an edge from SrcBest to DstBst, then try to change that |
695 | // to 0 now. |
696 | changeLatency(Src: SrcBest, Dst: DstBest, Lat: 0); |
697 | else if (DstBest) { |
698 | // Check if the previous best destination instruction has a new zero |
699 | // latency dependence opportunity. |
700 | ExclSrc.insert(Ptr: Src); |
701 | for (auto &I : DstBest->Preds) |
702 | if (ExclSrc.count(Ptr: I.getSUnit()) == 0 && |
703 | isBestZeroLatency(Src: I.getSUnit(), Dst: DstBest, TII, ExclSrc, ExclDst)) |
704 | changeLatency(Src: I.getSUnit(), Dst: DstBest, Lat: 0); |
705 | } else if (SrcBest) { |
706 | // Check if previous best source instruction has a new zero latency |
707 | // dependence opportunity. |
708 | ExclDst.insert(Ptr: Dst); |
709 | for (auto &I : SrcBest->Succs) |
710 | if (ExclDst.count(Ptr: I.getSUnit()) == 0 && |
711 | isBestZeroLatency(Src: SrcBest, Dst: I.getSUnit(), TII, ExclSrc, ExclDst)) |
712 | changeLatency(Src: SrcBest, Dst: I.getSUnit(), Lat: 0); |
713 | } |
714 | |
715 | return true; |
716 | } |
717 | |
718 | unsigned HexagonSubtarget::getL1CacheLineSize() const { |
719 | return 32; |
720 | } |
721 | |
722 | unsigned HexagonSubtarget::getL1PrefetchDistance() const { |
723 | return 32; |
724 | } |
725 | |
726 | bool HexagonSubtarget::enableSubRegLiveness() const { return true; } |
727 | |
728 | Intrinsic::ID HexagonSubtarget::getIntrinsicId(unsigned Opc) const { |
729 | struct Scalar { |
730 | unsigned Opcode; |
731 | Intrinsic::ID IntId; |
732 | }; |
733 | struct Hvx { |
734 | unsigned Opcode; |
735 | Intrinsic::ID Int64Id, Int128Id; |
736 | }; |
737 | |
738 | static Scalar ScalarInts[] = { |
739 | #define GET_SCALAR_INTRINSICS |
740 | #include "HexagonDepInstrIntrinsics.inc" |
741 | #undef GET_SCALAR_INTRINSICS |
742 | }; |
743 | |
744 | static Hvx HvxInts[] = { |
745 | #define GET_HVX_INTRINSICS |
746 | #include "HexagonDepInstrIntrinsics.inc" |
747 | #undef GET_HVX_INTRINSICS |
748 | }; |
749 | |
750 | const auto CmpOpcode = [](auto A, auto B) { return A.Opcode < B.Opcode; }; |
751 | [[maybe_unused]] static bool SortedScalar = |
752 | (llvm::sort(C&: ScalarInts, Comp: CmpOpcode), true); |
753 | [[maybe_unused]] static bool SortedHvx = |
754 | (llvm::sort(C&: HvxInts, Comp: CmpOpcode), true); |
755 | |
756 | auto [BS, ES] = std::make_pair(x: std::begin(arr&: ScalarInts), y: std::end(arr&: ScalarInts)); |
757 | auto [BH, EH] = std::make_pair(x: std::begin(arr&: HvxInts), y: std::end(arr&: HvxInts)); |
758 | |
759 | auto FoundScalar = std::lower_bound(first: BS, last: ES, val: Scalar{.Opcode: Opc, .IntId: 0}, comp: CmpOpcode); |
760 | if (FoundScalar != ES && FoundScalar->Opcode == Opc) |
761 | return FoundScalar->IntId; |
762 | |
763 | auto FoundHvx = std::lower_bound(first: BH, last: EH, val: Hvx{.Opcode: Opc, .Int64Id: 0, .Int128Id: 0}, comp: CmpOpcode); |
764 | if (FoundHvx != EH && FoundHvx->Opcode == Opc) { |
765 | unsigned HwLen = getVectorLength(); |
766 | if (HwLen == 64) |
767 | return FoundHvx->Int64Id; |
768 | if (HwLen == 128) |
769 | return FoundHvx->Int128Id; |
770 | } |
771 | |
772 | std::string error = "Invalid opcode (" + std::to_string(val: Opc) + ")" ; |
773 | llvm_unreachable(error.c_str()); |
774 | return 0; |
775 | } |
776 | |