| 1 | //===- RegisterCoalescer.cpp - Generic Register Coalescing Interface ------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the generic RegisterCoalescer interface which |
| 10 | // is used as the common interface used by all clients and |
| 11 | // implementations of register coalescing. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "RegisterCoalescer.h" |
| 16 | #include "llvm/ADT/ArrayRef.h" |
| 17 | #include "llvm/ADT/BitVector.h" |
| 18 | #include "llvm/ADT/DenseSet.h" |
| 19 | #include "llvm/ADT/STLExtras.h" |
| 20 | #include "llvm/ADT/SmallPtrSet.h" |
| 21 | #include "llvm/ADT/SmallVector.h" |
| 22 | #include "llvm/ADT/Statistic.h" |
| 23 | #include "llvm/CodeGen/CalcSpillWeights.h" |
| 24 | #include "llvm/CodeGen/LiveInterval.h" |
| 25 | #include "llvm/CodeGen/LiveIntervals.h" |
| 26 | #include "llvm/CodeGen/LiveRangeEdit.h" |
| 27 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 28 | #include "llvm/CodeGen/MachineDominators.h" |
| 29 | #include "llvm/CodeGen/MachineFunction.h" |
| 30 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 31 | #include "llvm/CodeGen/MachineInstr.h" |
| 32 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 33 | #include "llvm/CodeGen/MachineLoopInfo.h" |
| 34 | #include "llvm/CodeGen/MachineOperand.h" |
| 35 | #include "llvm/CodeGen/MachinePassManager.h" |
| 36 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 37 | #include "llvm/CodeGen/Passes.h" |
| 38 | #include "llvm/CodeGen/RegisterClassInfo.h" |
| 39 | #include "llvm/CodeGen/RegisterCoalescerPass.h" |
| 40 | #include "llvm/CodeGen/SlotIndexes.h" |
| 41 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 42 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 43 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 44 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 45 | #include "llvm/IR/DebugLoc.h" |
| 46 | #include "llvm/InitializePasses.h" |
| 47 | #include "llvm/MC/LaneBitmask.h" |
| 48 | #include "llvm/MC/MCInstrDesc.h" |
| 49 | #include "llvm/MC/MCRegisterInfo.h" |
| 50 | #include "llvm/Pass.h" |
| 51 | #include "llvm/Support/CommandLine.h" |
| 52 | #include "llvm/Support/Compiler.h" |
| 53 | #include "llvm/Support/Debug.h" |
| 54 | #include "llvm/Support/ErrorHandling.h" |
| 55 | #include "llvm/Support/raw_ostream.h" |
| 56 | #include <algorithm> |
| 57 | #include <cassert> |
| 58 | #include <iterator> |
| 59 | #include <limits> |
| 60 | #include <tuple> |
| 61 | #include <utility> |
| 62 | #include <vector> |
| 63 | |
| 64 | using namespace llvm; |
| 65 | |
| 66 | #define DEBUG_TYPE "regalloc" |
| 67 | |
| 68 | STATISTIC(numJoins, "Number of interval joins performed" ); |
| 69 | STATISTIC(numCrossRCs, "Number of cross class joins performed" ); |
| 70 | STATISTIC(numCommutes, "Number of instruction commuting performed" ); |
| 71 | STATISTIC(numExtends, "Number of copies extended" ); |
| 72 | STATISTIC(NumReMats, "Number of instructions re-materialized" ); |
| 73 | STATISTIC(NumInflated, "Number of register classes inflated" ); |
| 74 | STATISTIC(NumLaneConflicts, "Number of dead lane conflicts tested" ); |
| 75 | STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved" ); |
| 76 | STATISTIC(NumShrinkToUses, "Number of shrinkToUses called" ); |
| 77 | |
| 78 | static cl::opt<bool> EnableJoining("join-liveintervals" , |
| 79 | cl::desc("Coalesce copies (default=true)" ), |
| 80 | cl::init(Val: true), cl::Hidden); |
| 81 | |
| 82 | static cl::opt<bool> UseTerminalRule("terminal-rule" , |
| 83 | cl::desc("Apply the terminal rule" ), |
| 84 | cl::init(Val: true), cl::Hidden); |
| 85 | |
| 86 | /// Temporary flag to test critical edge unsplitting. |
| 87 | static cl::opt<bool> EnableJoinSplits( |
| 88 | "join-splitedges" , |
| 89 | cl::desc("Coalesce copies on split edges (default=subtarget)" ), cl::Hidden); |
| 90 | |
| 91 | /// Temporary flag to test global copy optimization. |
| 92 | static cl::opt<cl::boolOrDefault> EnableGlobalCopies( |
| 93 | "join-globalcopies" , |
| 94 | cl::desc("Coalesce copies that span blocks (default=subtarget)" ), |
| 95 | cl::init(Val: cl::BOU_UNSET), cl::Hidden); |
| 96 | |
| 97 | static cl::opt<bool> VerifyCoalescing( |
| 98 | "verify-coalescing" , |
| 99 | cl::desc("Verify machine instrs before and after register coalescing" ), |
| 100 | cl::Hidden); |
| 101 | |
| 102 | static cl::opt<unsigned> LateRematUpdateThreshold( |
| 103 | "late-remat-update-threshold" , cl::Hidden, |
| 104 | cl::desc("During rematerialization for a copy, if the def instruction has " |
| 105 | "many other copy uses to be rematerialized, delay the multiple " |
| 106 | "separate live interval update work and do them all at once after " |
| 107 | "all those rematerialization are done. It will save a lot of " |
| 108 | "repeated work. " ), |
| 109 | cl::init(Val: 100)); |
| 110 | |
| 111 | static cl::opt<unsigned> LargeIntervalSizeThreshold( |
| 112 | "large-interval-size-threshold" , cl::Hidden, |
| 113 | cl::desc("If the valnos size of an interval is larger than the threshold, " |
| 114 | "it is regarded as a large interval. " ), |
| 115 | cl::init(Val: 100)); |
| 116 | |
| 117 | static cl::opt<unsigned> LargeIntervalFreqThreshold( |
| 118 | "large-interval-freq-threshold" , cl::Hidden, |
| 119 | cl::desc("For a large interval, if it is coalesced with other live " |
| 120 | "intervals many times more than the threshold, stop its " |
| 121 | "coalescing to control the compile time. " ), |
| 122 | cl::init(Val: 256)); |
| 123 | |
| 124 | namespace { |
| 125 | |
| 126 | class JoinVals; |
| 127 | |
| 128 | class RegisterCoalescer : private LiveRangeEdit::Delegate { |
| 129 | MachineFunction *MF = nullptr; |
| 130 | MachineRegisterInfo *MRI = nullptr; |
| 131 | const TargetRegisterInfo *TRI = nullptr; |
| 132 | const TargetInstrInfo *TII = nullptr; |
| 133 | LiveIntervals *LIS = nullptr; |
| 134 | SlotIndexes *SI = nullptr; |
| 135 | const MachineLoopInfo *Loops = nullptr; |
| 136 | RegisterClassInfo RegClassInfo; |
| 137 | |
| 138 | /// Position and VReg of a PHI instruction during coalescing. |
| 139 | struct PHIValPos { |
| 140 | SlotIndex SI; ///< Slot where this PHI occurs. |
| 141 | Register Reg; ///< VReg the PHI occurs in. |
| 142 | unsigned SubReg; ///< Qualifying subregister for Reg. |
| 143 | }; |
| 144 | |
| 145 | /// Map from debug instruction number to PHI position during coalescing. |
| 146 | DenseMap<unsigned, PHIValPos> PHIValToPos; |
| 147 | /// Index of, for each VReg, which debug instruction numbers and |
| 148 | /// corresponding PHIs are sensitive to coalescing. Each VReg may have |
| 149 | /// multiple PHI defs, at different positions. |
| 150 | DenseMap<Register, SmallVector<unsigned, 2>> RegToPHIIdx; |
| 151 | |
| 152 | /// Debug variable location tracking -- for each VReg, maintain an |
| 153 | /// ordered-by-slot-index set of DBG_VALUEs, to help quick |
| 154 | /// identification of whether coalescing may change location validity. |
| 155 | using DbgValueLoc = std::pair<SlotIndex, MachineInstr *>; |
| 156 | DenseMap<Register, std::vector<DbgValueLoc>> DbgVRegToValues; |
| 157 | |
| 158 | /// A LaneMask to remember on which subregister live ranges we need to call |
| 159 | /// shrinkToUses() later. |
| 160 | LaneBitmask ShrinkMask; |
| 161 | |
| 162 | /// True if the main range of the currently coalesced intervals should be |
| 163 | /// checked for smaller live intervals. |
| 164 | bool ShrinkMainRange = false; |
| 165 | |
| 166 | /// True if the coalescer should aggressively coalesce global copies |
| 167 | /// in favor of keeping local copies. |
| 168 | bool JoinGlobalCopies = false; |
| 169 | |
| 170 | /// True if the coalescer should aggressively coalesce fall-thru |
| 171 | /// blocks exclusively containing copies. |
| 172 | bool JoinSplitEdges = false; |
| 173 | |
| 174 | /// Copy instructions yet to be coalesced. |
| 175 | SmallVector<MachineInstr *, 8> WorkList; |
| 176 | SmallVector<MachineInstr *, 8> LocalWorkList; |
| 177 | |
| 178 | /// Set of instruction pointers that have been erased, and |
| 179 | /// that may be present in WorkList. |
| 180 | SmallPtrSet<MachineInstr *, 8> ErasedInstrs; |
| 181 | |
| 182 | /// Dead instructions that are about to be deleted. |
| 183 | SmallVector<MachineInstr *, 8> DeadDefs; |
| 184 | |
| 185 | /// Virtual registers to be considered for register class inflation. |
| 186 | SmallVector<Register, 8> InflateRegs; |
| 187 | |
| 188 | /// The collection of live intervals which should have been updated |
| 189 | /// immediately after rematerialiation but delayed until |
| 190 | /// lateLiveIntervalUpdate is called. |
| 191 | DenseSet<Register> ToBeUpdated; |
| 192 | |
| 193 | /// Record how many times the large live interval with many valnos |
| 194 | /// has been tried to join with other live interval. |
| 195 | DenseMap<Register, unsigned long> LargeLIVisitCounter; |
| 196 | |
| 197 | /// Recursively eliminate dead defs in DeadDefs. |
| 198 | void eliminateDeadDefs(LiveRangeEdit *Edit = nullptr); |
| 199 | |
| 200 | /// LiveRangeEdit callback for eliminateDeadDefs(). |
| 201 | void LRE_WillEraseInstruction(MachineInstr *MI) override; |
| 202 | |
| 203 | /// Coalesce the LocalWorkList. |
| 204 | void coalesceLocals(); |
| 205 | |
| 206 | /// Join compatible live intervals |
| 207 | void joinAllIntervals(); |
| 208 | |
| 209 | /// Coalesce copies in the specified MBB, putting |
| 210 | /// copies that cannot yet be coalesced into WorkList. |
| 211 | void copyCoalesceInMBB(MachineBasicBlock *MBB); |
| 212 | |
| 213 | /// Tries to coalesce all copies in CurrList. Returns true if any progress |
| 214 | /// was made. |
| 215 | bool copyCoalesceWorkList(MutableArrayRef<MachineInstr *> CurrList); |
| 216 | |
| 217 | /// If one def has many copy like uses, and those copy uses are all |
| 218 | /// rematerialized, the live interval update needed for those |
| 219 | /// rematerializations will be delayed and done all at once instead |
| 220 | /// of being done multiple times. This is to save compile cost because |
| 221 | /// live interval update is costly. |
| 222 | void lateLiveIntervalUpdate(); |
| 223 | |
| 224 | /// Check if the incoming value defined by a COPY at \p SLRQ in the subrange |
| 225 | /// has no value defined in the predecessors. If the incoming value is the |
| 226 | /// same as defined by the copy itself, the value is considered undefined. |
| 227 | bool copyValueUndefInPredecessors(LiveRange &S, const MachineBasicBlock *MBB, |
| 228 | LiveQueryResult SLRQ); |
| 229 | |
| 230 | /// Set necessary undef flags on subregister uses after pruning out undef |
| 231 | /// lane segments from the subrange. |
| 232 | void setUndefOnPrunedSubRegUses(LiveInterval &LI, Register Reg, |
| 233 | LaneBitmask PrunedLanes); |
| 234 | |
| 235 | /// Attempt to join intervals corresponding to SrcReg/DstReg, which are the |
| 236 | /// src/dst of the copy instruction CopyMI. This returns true if the copy |
| 237 | /// was successfully coalesced away. If it is not currently possible to |
| 238 | /// coalesce this interval, but it may be possible if other things get |
| 239 | /// coalesced, then it returns true by reference in 'Again'. |
| 240 | bool joinCopy(MachineInstr *CopyMI, bool &Again, |
| 241 | SmallPtrSetImpl<MachineInstr *> &CurrentErasedInstrs); |
| 242 | |
| 243 | /// Attempt to join these two intervals. On failure, this |
| 244 | /// returns false. The output "SrcInt" will not have been modified, so we |
| 245 | /// can use this information below to update aliases. |
| 246 | bool joinIntervals(CoalescerPair &CP); |
| 247 | |
| 248 | /// Attempt joining two virtual registers. Return true on success. |
| 249 | bool joinVirtRegs(CoalescerPair &CP); |
| 250 | |
| 251 | /// If a live interval has many valnos and is coalesced with other |
| 252 | /// live intervals many times, we regard such live interval as having |
| 253 | /// high compile time cost. |
| 254 | bool isHighCostLiveInterval(LiveInterval &LI); |
| 255 | |
| 256 | /// Attempt joining with a reserved physreg. |
| 257 | bool joinReservedPhysReg(CoalescerPair &CP); |
| 258 | |
| 259 | /// Add the LiveRange @p ToMerge as a subregister liverange of @p LI. |
| 260 | /// Subranges in @p LI which only partially interfere with the desired |
| 261 | /// LaneMask are split as necessary. @p LaneMask are the lanes that |
| 262 | /// @p ToMerge will occupy in the coalescer register. @p LI has its subrange |
| 263 | /// lanemasks already adjusted to the coalesced register. |
| 264 | void mergeSubRangeInto(LiveInterval &LI, const LiveRange &ToMerge, |
| 265 | LaneBitmask LaneMask, CoalescerPair &CP, |
| 266 | unsigned DstIdx); |
| 267 | |
| 268 | /// Join the liveranges of two subregisters. Joins @p RRange into |
| 269 | /// @p LRange, @p RRange may be invalid afterwards. |
| 270 | void joinSubRegRanges(LiveRange &LRange, LiveRange &RRange, |
| 271 | LaneBitmask LaneMask, const CoalescerPair &CP); |
| 272 | |
| 273 | /// We found a non-trivially-coalescable copy. If the source value number is |
| 274 | /// defined by a copy from the destination reg see if we can merge these two |
| 275 | /// destination reg valno# into a single value number, eliminating a copy. |
| 276 | /// This returns true if an interval was modified. |
| 277 | bool adjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI); |
| 278 | |
| 279 | /// Return true if there are definitions of IntB |
| 280 | /// other than BValNo val# that can reach uses of AValno val# of IntA. |
| 281 | bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB, |
| 282 | VNInfo *AValNo, VNInfo *BValNo); |
| 283 | |
| 284 | /// We found a non-trivially-coalescable copy. |
| 285 | /// If the source value number is defined by a commutable instruction and |
| 286 | /// its other operand is coalesced to the copy dest register, see if we |
| 287 | /// can transform the copy into a noop by commuting the definition. |
| 288 | /// This returns a pair of two flags: |
| 289 | /// - the first element is true if an interval was modified, |
| 290 | /// - the second element is true if the destination interval needs |
| 291 | /// to be shrunk after deleting the copy. |
| 292 | std::pair<bool, bool> removeCopyByCommutingDef(const CoalescerPair &CP, |
| 293 | MachineInstr *CopyMI); |
| 294 | |
| 295 | /// We found a copy which can be moved to its less frequent predecessor. |
| 296 | bool removePartialRedundancy(const CoalescerPair &CP, MachineInstr &CopyMI); |
| 297 | |
| 298 | /// If the source of a copy is defined by a CheapAsAMove computation, |
| 299 | /// replace the copy by rematerialize the definition. |
| 300 | bool reMaterializeDef(const CoalescerPair &CP, MachineInstr *CopyMI, |
| 301 | bool &IsDefCopy); |
| 302 | |
| 303 | /// Return true if a copy involving a physreg should be joined. |
| 304 | bool canJoinPhys(const CoalescerPair &CP); |
| 305 | |
| 306 | /// Replace all defs and uses of SrcReg to DstReg and update the subregister |
| 307 | /// number if it is not zero. If DstReg is a physical register and the |
| 308 | /// existing subregister number of the def / use being updated is not zero, |
| 309 | /// make sure to set it to the correct physical subregister. |
| 310 | void updateRegDefsUses(Register SrcReg, Register DstReg, unsigned SubIdx); |
| 311 | |
| 312 | /// If the given machine operand reads only undefined lanes add an undef |
| 313 | /// flag. |
| 314 | /// This can happen when undef uses were previously concealed by a copy |
| 315 | /// which we coalesced. Example: |
| 316 | /// %0:sub0<def,read-undef> = ... |
| 317 | /// %1 = COPY %0 <-- Coalescing COPY reveals undef |
| 318 | /// = use %1:sub1 <-- hidden undef use |
| 319 | void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx, |
| 320 | MachineOperand &MO, unsigned SubRegIdx); |
| 321 | |
| 322 | /// Handle copies of undef values. If the undef value is an incoming |
| 323 | /// PHI value, it will convert @p CopyMI to an IMPLICIT_DEF. |
| 324 | /// Returns nullptr if @p CopyMI was not in any way eliminable. Otherwise, |
| 325 | /// it returns @p CopyMI (which could be an IMPLICIT_DEF at this point). |
| 326 | MachineInstr *eliminateUndefCopy(MachineInstr *CopyMI); |
| 327 | |
| 328 | /// Check whether or not we should apply the terminal rule on the |
| 329 | /// destination (Dst) of \p Copy. |
| 330 | /// When the terminal rule applies, Copy is not profitable to |
| 331 | /// coalesce. |
| 332 | /// Dst is terminal if it has exactly one affinity (Dst, Src) and |
| 333 | /// at least one interference (Dst, Dst2). If Dst is terminal, the |
| 334 | /// terminal rule consists in checking that at least one of |
| 335 | /// interfering node, say Dst2, has an affinity of equal or greater |
| 336 | /// weight with Src. |
| 337 | /// In that case, Dst2 and Dst will not be able to be both coalesced |
| 338 | /// with Src. Since Dst2 exposes more coalescing opportunities than |
| 339 | /// Dst, we can drop \p Copy. |
| 340 | bool applyTerminalRule(const MachineInstr &Copy) const; |
| 341 | |
| 342 | /// Wrapper method for \see LiveIntervals::shrinkToUses. |
| 343 | /// This method does the proper fixing of the live-ranges when the afore |
| 344 | /// mentioned method returns true. |
| 345 | void shrinkToUses(LiveInterval *LI, |
| 346 | SmallVectorImpl<MachineInstr *> *Dead = nullptr) { |
| 347 | NumShrinkToUses++; |
| 348 | if (LIS->shrinkToUses(li: LI, dead: Dead)) { |
| 349 | /// Check whether or not \p LI is composed by multiple connected |
| 350 | /// components and if that is the case, fix that. |
| 351 | SmallVector<LiveInterval *, 8> SplitLIs; |
| 352 | LIS->splitSeparateComponents(LI&: *LI, SplitLIs); |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | /// Wrapper Method to do all the necessary work when an Instruction is |
| 357 | /// deleted. |
| 358 | /// Optimizations should use this to make sure that deleted instructions |
| 359 | /// are always accounted for. |
| 360 | void deleteInstr(MachineInstr *MI) { |
| 361 | ErasedInstrs.insert(Ptr: MI); |
| 362 | LIS->RemoveMachineInstrFromMaps(MI&: *MI); |
| 363 | MI->eraseFromParent(); |
| 364 | } |
| 365 | |
| 366 | /// Walk over function and initialize the DbgVRegToValues map. |
| 367 | void buildVRegToDbgValueMap(MachineFunction &MF); |
| 368 | |
| 369 | /// Test whether, after merging, any DBG_VALUEs would refer to a |
| 370 | /// different value number than before merging, and whether this can |
| 371 | /// be resolved. If not, mark the DBG_VALUE as being undef. |
| 372 | void checkMergingChangesDbgValues(CoalescerPair &CP, LiveRange &LHS, |
| 373 | JoinVals &LHSVals, LiveRange &RHS, |
| 374 | JoinVals &RHSVals); |
| 375 | |
| 376 | void checkMergingChangesDbgValuesImpl(Register Reg, LiveRange &OtherRange, |
| 377 | LiveRange &RegRange, JoinVals &Vals2); |
| 378 | |
| 379 | public: |
| 380 | // For legacy pass only. |
| 381 | RegisterCoalescer() = default; |
| 382 | RegisterCoalescer &operator=(RegisterCoalescer &&Other) = default; |
| 383 | |
| 384 | RegisterCoalescer(LiveIntervals *LIS, SlotIndexes *SI, |
| 385 | const MachineLoopInfo *Loops) |
| 386 | : LIS(LIS), SI(SI), Loops(Loops) {} |
| 387 | |
| 388 | bool run(MachineFunction &MF); |
| 389 | }; |
| 390 | |
| 391 | class RegisterCoalescerLegacy : public MachineFunctionPass { |
| 392 | public: |
| 393 | static char ID; ///< Class identification, replacement for typeinfo |
| 394 | |
| 395 | RegisterCoalescerLegacy() : MachineFunctionPass(ID) {} |
| 396 | |
| 397 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
| 398 | |
| 399 | MachineFunctionProperties getClearedProperties() const override { |
| 400 | return MachineFunctionProperties().setIsSSA(); |
| 401 | } |
| 402 | |
| 403 | /// This is the pass entry point. |
| 404 | bool runOnMachineFunction(MachineFunction &) override; |
| 405 | }; |
| 406 | |
| 407 | } // end anonymous namespace |
| 408 | |
| 409 | char RegisterCoalescerLegacy::ID = 0; |
| 410 | |
| 411 | char &llvm::RegisterCoalescerID = RegisterCoalescerLegacy::ID; |
| 412 | |
| 413 | INITIALIZE_PASS_BEGIN(RegisterCoalescerLegacy, "register-coalescer" , |
| 414 | "Register Coalescer" , false, false) |
| 415 | INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass) |
| 416 | INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass) |
| 417 | INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass) |
| 418 | INITIALIZE_PASS_END(RegisterCoalescerLegacy, "register-coalescer" , |
| 419 | "Register Coalescer" , false, false) |
| 420 | |
| 421 | [[nodiscard]] static bool isMoveInstr(const TargetRegisterInfo &tri, |
| 422 | const MachineInstr *MI, Register &Src, |
| 423 | Register &Dst, unsigned &SrcSub, |
| 424 | unsigned &DstSub) { |
| 425 | if (MI->isCopy()) { |
| 426 | Dst = MI->getOperand(i: 0).getReg(); |
| 427 | DstSub = MI->getOperand(i: 0).getSubReg(); |
| 428 | Src = MI->getOperand(i: 1).getReg(); |
| 429 | SrcSub = MI->getOperand(i: 1).getSubReg(); |
| 430 | } else if (MI->isSubregToReg()) { |
| 431 | Dst = MI->getOperand(i: 0).getReg(); |
| 432 | DstSub = tri.composeSubRegIndices(a: MI->getOperand(i: 0).getSubReg(), |
| 433 | b: MI->getOperand(i: 3).getImm()); |
| 434 | Src = MI->getOperand(i: 2).getReg(); |
| 435 | SrcSub = MI->getOperand(i: 2).getSubReg(); |
| 436 | } else |
| 437 | return false; |
| 438 | return true; |
| 439 | } |
| 440 | |
| 441 | /// Return true if this block should be vacated by the coalescer to eliminate |
| 442 | /// branches. The important cases to handle in the coalescer are critical edges |
| 443 | /// split during phi elimination which contain only copies. Simple blocks that |
| 444 | /// contain non-branches should also be vacated, but this can be handled by an |
| 445 | /// earlier pass similar to early if-conversion. |
| 446 | static bool isSplitEdge(const MachineBasicBlock *MBB) { |
| 447 | if (MBB->pred_size() != 1 || MBB->succ_size() != 1) |
| 448 | return false; |
| 449 | |
| 450 | for (const auto &MI : *MBB) { |
| 451 | if (!MI.isCopyLike() && !MI.isUnconditionalBranch()) |
| 452 | return false; |
| 453 | } |
| 454 | return true; |
| 455 | } |
| 456 | |
| 457 | bool CoalescerPair::setRegisters(const MachineInstr *MI) { |
| 458 | SrcReg = DstReg = Register(); |
| 459 | SrcIdx = DstIdx = 0; |
| 460 | NewRC = nullptr; |
| 461 | Flipped = CrossClass = false; |
| 462 | |
| 463 | Register Src, Dst; |
| 464 | unsigned SrcSub = 0, DstSub = 0; |
| 465 | if (!isMoveInstr(tri: TRI, MI, Src, Dst, SrcSub, DstSub)) |
| 466 | return false; |
| 467 | Partial = SrcSub || DstSub; |
| 468 | |
| 469 | // If one register is a physreg, it must be Dst. |
| 470 | if (Src.isPhysical()) { |
| 471 | if (Dst.isPhysical()) |
| 472 | return false; |
| 473 | std::swap(a&: Src, b&: Dst); |
| 474 | std::swap(a&: SrcSub, b&: DstSub); |
| 475 | Flipped = true; |
| 476 | } |
| 477 | |
| 478 | const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); |
| 479 | const TargetRegisterClass *SrcRC = MRI.getRegClass(Reg: Src); |
| 480 | |
| 481 | if (Dst.isPhysical()) { |
| 482 | // Eliminate DstSub on a physreg. |
| 483 | if (DstSub) { |
| 484 | Dst = TRI.getSubReg(Reg: Dst, Idx: DstSub); |
| 485 | if (!Dst) |
| 486 | return false; |
| 487 | DstSub = 0; |
| 488 | } |
| 489 | |
| 490 | // Eliminate SrcSub by picking a corresponding Dst superregister. |
| 491 | if (SrcSub) { |
| 492 | Dst = TRI.getMatchingSuperReg(Reg: Dst, SubIdx: SrcSub, RC: SrcRC); |
| 493 | if (!Dst) |
| 494 | return false; |
| 495 | } else if (!SrcRC->contains(Reg: Dst)) { |
| 496 | return false; |
| 497 | } |
| 498 | } else { |
| 499 | // Both registers are virtual. |
| 500 | const TargetRegisterClass *DstRC = MRI.getRegClass(Reg: Dst); |
| 501 | |
| 502 | // Both registers have subreg indices. |
| 503 | if (SrcSub && DstSub) { |
| 504 | // Copies between different sub-registers are never coalescable. |
| 505 | if (Src == Dst && SrcSub != DstSub) |
| 506 | return false; |
| 507 | |
| 508 | NewRC = TRI.getCommonSuperRegClass(RCA: SrcRC, SubA: SrcSub, RCB: DstRC, SubB: DstSub, PreA&: SrcIdx, |
| 509 | PreB&: DstIdx); |
| 510 | if (!NewRC) |
| 511 | return false; |
| 512 | } else if (DstSub) { |
| 513 | // SrcReg will be merged with a sub-register of DstReg. |
| 514 | SrcIdx = DstSub; |
| 515 | NewRC = TRI.getMatchingSuperRegClass(A: DstRC, B: SrcRC, Idx: DstSub); |
| 516 | } else if (SrcSub) { |
| 517 | // DstReg will be merged with a sub-register of SrcReg. |
| 518 | DstIdx = SrcSub; |
| 519 | NewRC = TRI.getMatchingSuperRegClass(A: SrcRC, B: DstRC, Idx: SrcSub); |
| 520 | } else { |
| 521 | // This is a straight copy without sub-registers. |
| 522 | NewRC = TRI.getCommonSubClass(A: DstRC, B: SrcRC); |
| 523 | } |
| 524 | |
| 525 | // The combined constraint may be impossible to satisfy. |
| 526 | if (!NewRC) |
| 527 | return false; |
| 528 | |
| 529 | // Prefer SrcReg to be a sub-register of DstReg. |
| 530 | // FIXME: Coalescer should support subregs symmetrically. |
| 531 | if (DstIdx && !SrcIdx) { |
| 532 | std::swap(a&: Src, b&: Dst); |
| 533 | std::swap(a&: SrcIdx, b&: DstIdx); |
| 534 | Flipped = !Flipped; |
| 535 | } |
| 536 | |
| 537 | CrossClass = NewRC != DstRC || NewRC != SrcRC; |
| 538 | } |
| 539 | // Check our invariants |
| 540 | assert(Src.isVirtual() && "Src must be virtual" ); |
| 541 | assert(!(Dst.isPhysical() && DstSub) && "Cannot have a physical SubIdx" ); |
| 542 | SrcReg = Src; |
| 543 | DstReg = Dst; |
| 544 | return true; |
| 545 | } |
| 546 | |
| 547 | bool CoalescerPair::flip() { |
| 548 | if (DstReg.isPhysical()) |
| 549 | return false; |
| 550 | std::swap(a&: SrcReg, b&: DstReg); |
| 551 | std::swap(a&: SrcIdx, b&: DstIdx); |
| 552 | Flipped = !Flipped; |
| 553 | return true; |
| 554 | } |
| 555 | |
| 556 | bool CoalescerPair::isCoalescable(const MachineInstr *MI) const { |
| 557 | if (!MI) |
| 558 | return false; |
| 559 | Register Src, Dst; |
| 560 | unsigned SrcSub = 0, DstSub = 0; |
| 561 | if (!isMoveInstr(tri: TRI, MI, Src, Dst, SrcSub, DstSub)) |
| 562 | return false; |
| 563 | |
| 564 | // Find the virtual register that is SrcReg. |
| 565 | if (Dst == SrcReg) { |
| 566 | std::swap(a&: Src, b&: Dst); |
| 567 | std::swap(a&: SrcSub, b&: DstSub); |
| 568 | } else if (Src != SrcReg) { |
| 569 | return false; |
| 570 | } |
| 571 | |
| 572 | // Now check that Dst matches DstReg. |
| 573 | if (DstReg.isPhysical()) { |
| 574 | if (!Dst.isPhysical()) |
| 575 | return false; |
| 576 | assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state." ); |
| 577 | // DstSub could be set for a physreg from INSERT_SUBREG. |
| 578 | if (DstSub) |
| 579 | Dst = TRI.getSubReg(Reg: Dst, Idx: DstSub); |
| 580 | // Full copy of Src. |
| 581 | if (!SrcSub) |
| 582 | return DstReg == Dst; |
| 583 | // This is a partial register copy. Check that the parts match. |
| 584 | return Register(TRI.getSubReg(Reg: DstReg, Idx: SrcSub)) == Dst; |
| 585 | } |
| 586 | |
| 587 | // DstReg is virtual. |
| 588 | if (DstReg != Dst) |
| 589 | return false; |
| 590 | // Registers match, do the subregisters line up? |
| 591 | return TRI.composeSubRegIndices(a: SrcIdx, b: SrcSub) == |
| 592 | TRI.composeSubRegIndices(a: DstIdx, b: DstSub); |
| 593 | } |
| 594 | |
| 595 | void RegisterCoalescerLegacy::getAnalysisUsage(AnalysisUsage &AU) const { |
| 596 | AU.setPreservesCFG(); |
| 597 | AU.addUsedIfAvailable<SlotIndexesWrapperPass>(); |
| 598 | AU.addRequired<LiveIntervalsWrapperPass>(); |
| 599 | AU.addPreserved<LiveIntervalsWrapperPass>(); |
| 600 | AU.addPreserved<SlotIndexesWrapperPass>(); |
| 601 | AU.addRequired<MachineLoopInfoWrapperPass>(); |
| 602 | AU.addPreserved<MachineLoopInfoWrapperPass>(); |
| 603 | AU.addPreservedID(ID&: MachineDominatorsID); |
| 604 | MachineFunctionPass::getAnalysisUsage(AU); |
| 605 | } |
| 606 | |
| 607 | void RegisterCoalescer::eliminateDeadDefs(LiveRangeEdit *Edit) { |
| 608 | if (Edit) { |
| 609 | Edit->eliminateDeadDefs(Dead&: DeadDefs); |
| 610 | return; |
| 611 | } |
| 612 | SmallVector<Register, 8> NewRegs; |
| 613 | LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this) |
| 614 | .eliminateDeadDefs(Dead&: DeadDefs); |
| 615 | } |
| 616 | |
| 617 | void RegisterCoalescer::LRE_WillEraseInstruction(MachineInstr *MI) { |
| 618 | // MI may be in WorkList. Make sure we don't visit it. |
| 619 | ErasedInstrs.insert(Ptr: MI); |
| 620 | } |
| 621 | |
| 622 | bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP, |
| 623 | MachineInstr *CopyMI) { |
| 624 | assert(!CP.isPartial() && "This doesn't work for partial copies." ); |
| 625 | assert(!CP.isPhys() && "This doesn't work for physreg copies." ); |
| 626 | |
| 627 | LiveInterval &IntA = |
| 628 | LIS->getInterval(Reg: CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg()); |
| 629 | LiveInterval &IntB = |
| 630 | LIS->getInterval(Reg: CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); |
| 631 | SlotIndex CopyIdx = LIS->getInstructionIndex(Instr: *CopyMI).getRegSlot(); |
| 632 | |
| 633 | // We have a non-trivially-coalescable copy with IntA being the source and |
| 634 | // IntB being the dest, thus this defines a value number in IntB. If the |
| 635 | // source value number (in IntA) is defined by a copy from B, see if we can |
| 636 | // merge these two pieces of B into a single value number, eliminating a copy. |
| 637 | // For example: |
| 638 | // |
| 639 | // A3 = B0 |
| 640 | // ... |
| 641 | // B1 = A3 <- this copy |
| 642 | // |
| 643 | // In this case, B0 can be extended to where the B1 copy lives, allowing the |
| 644 | // B1 value number to be replaced with B0 (which simplifies the B |
| 645 | // liveinterval). |
| 646 | |
| 647 | // BValNo is a value number in B that is defined by a copy from A. 'B1' in |
| 648 | // the example above. |
| 649 | LiveInterval::iterator BS = IntB.FindSegmentContaining(Idx: CopyIdx); |
| 650 | if (BS == IntB.end()) |
| 651 | return false; |
| 652 | VNInfo *BValNo = BS->valno; |
| 653 | |
| 654 | // Get the location that B is defined at. Two options: either this value has |
| 655 | // an unknown definition point or it is defined at CopyIdx. If unknown, we |
| 656 | // can't process it. |
| 657 | if (BValNo->def != CopyIdx) |
| 658 | return false; |
| 659 | |
| 660 | // AValNo is the value number in A that defines the copy, A3 in the example. |
| 661 | SlotIndex CopyUseIdx = CopyIdx.getRegSlot(EC: true); |
| 662 | LiveInterval::iterator AS = IntA.FindSegmentContaining(Idx: CopyUseIdx); |
| 663 | // The live segment might not exist after fun with physreg coalescing. |
| 664 | if (AS == IntA.end()) |
| 665 | return false; |
| 666 | VNInfo *AValNo = AS->valno; |
| 667 | |
| 668 | // If AValNo is defined as a copy from IntB, we can potentially process this. |
| 669 | // Get the instruction that defines this value number. |
| 670 | MachineInstr *ACopyMI = LIS->getInstructionFromIndex(index: AValNo->def); |
| 671 | // Don't allow any partial copies, even if isCoalescable() allows them. |
| 672 | if (!CP.isCoalescable(MI: ACopyMI) || !ACopyMI->isFullCopy()) |
| 673 | return false; |
| 674 | |
| 675 | // Get the Segment in IntB that this value number starts with. |
| 676 | LiveInterval::iterator ValS = |
| 677 | IntB.FindSegmentContaining(Idx: AValNo->def.getPrevSlot()); |
| 678 | if (ValS == IntB.end()) |
| 679 | return false; |
| 680 | |
| 681 | // Make sure that the end of the live segment is inside the same block as |
| 682 | // CopyMI. |
| 683 | MachineInstr *ValSEndInst = |
| 684 | LIS->getInstructionFromIndex(index: ValS->end.getPrevSlot()); |
| 685 | if (!ValSEndInst || ValSEndInst->getParent() != CopyMI->getParent()) |
| 686 | return false; |
| 687 | |
| 688 | // Okay, we now know that ValS ends in the same block that the CopyMI |
| 689 | // live-range starts. If there are no intervening live segments between them |
| 690 | // in IntB, we can merge them. |
| 691 | if (ValS + 1 != BS) |
| 692 | return false; |
| 693 | |
| 694 | LLVM_DEBUG(dbgs() << "Extending: " << printReg(IntB.reg(), TRI)); |
| 695 | |
| 696 | SlotIndex FillerStart = ValS->end, FillerEnd = BS->start; |
| 697 | // We are about to delete CopyMI, so need to remove it as the 'instruction |
| 698 | // that defines this value #'. Update the valnum with the new defining |
| 699 | // instruction #. |
| 700 | BValNo->def = FillerStart; |
| 701 | |
| 702 | // Okay, we can merge them. We need to insert a new liverange: |
| 703 | // [ValS.end, BS.begin) of either value number, then we merge the |
| 704 | // two value numbers. |
| 705 | IntB.addSegment(S: LiveInterval::Segment(FillerStart, FillerEnd, BValNo)); |
| 706 | |
| 707 | // Okay, merge "B1" into the same value number as "B0". |
| 708 | if (BValNo != ValS->valno) |
| 709 | IntB.MergeValueNumberInto(V1: BValNo, V2: ValS->valno); |
| 710 | |
| 711 | // Do the same for the subregister segments. |
| 712 | for (LiveInterval::SubRange &S : IntB.subranges()) { |
| 713 | // Check for SubRange Segments of the form [1234r,1234d:0) which can be |
| 714 | // removed to prevent creating bogus SubRange Segments. |
| 715 | LiveInterval::iterator SS = S.FindSegmentContaining(Idx: CopyIdx); |
| 716 | if (SS != S.end() && SlotIndex::isSameInstr(A: SS->start, B: SS->end)) { |
| 717 | S.removeSegment(S: *SS, RemoveDeadValNo: true); |
| 718 | continue; |
| 719 | } |
| 720 | // The subrange may have ended before FillerStart. If so, extend it. |
| 721 | if (!S.getVNInfoAt(Idx: FillerStart)) { |
| 722 | SlotIndex BBStart = |
| 723 | LIS->getMBBStartIdx(mbb: LIS->getMBBFromIndex(index: FillerStart)); |
| 724 | S.extendInBlock(StartIdx: BBStart, Kill: FillerStart); |
| 725 | } |
| 726 | VNInfo *SubBValNo = S.getVNInfoAt(Idx: CopyIdx); |
| 727 | S.addSegment(S: LiveInterval::Segment(FillerStart, FillerEnd, SubBValNo)); |
| 728 | VNInfo *SubValSNo = S.getVNInfoAt(Idx: AValNo->def.getPrevSlot()); |
| 729 | if (SubBValNo != SubValSNo) |
| 730 | S.MergeValueNumberInto(V1: SubBValNo, V2: SubValSNo); |
| 731 | } |
| 732 | |
| 733 | LLVM_DEBUG(dbgs() << " result = " << IntB << '\n'); |
| 734 | |
| 735 | // If the source instruction was killing the source register before the |
| 736 | // merge, unset the isKill marker given the live range has been extended. |
| 737 | int UIdx = |
| 738 | ValSEndInst->findRegisterUseOperandIdx(Reg: IntB.reg(), /*TRI=*/nullptr, isKill: true); |
| 739 | if (UIdx != -1) { |
| 740 | ValSEndInst->getOperand(i: UIdx).setIsKill(false); |
| 741 | } |
| 742 | |
| 743 | // Rewrite the copy. |
| 744 | CopyMI->substituteRegister(FromReg: IntA.reg(), ToReg: IntB.reg(), SubIdx: 0, RegInfo: *TRI); |
| 745 | // If the copy instruction was killing the destination register or any |
| 746 | // subrange before the merge trim the live range. |
| 747 | bool RecomputeLiveRange = AS->end == CopyIdx; |
| 748 | if (!RecomputeLiveRange) { |
| 749 | for (LiveInterval::SubRange &S : IntA.subranges()) { |
| 750 | LiveInterval::iterator SS = S.FindSegmentContaining(Idx: CopyUseIdx); |
| 751 | if (SS != S.end() && SS->end == CopyIdx) { |
| 752 | RecomputeLiveRange = true; |
| 753 | break; |
| 754 | } |
| 755 | } |
| 756 | } |
| 757 | if (RecomputeLiveRange) |
| 758 | shrinkToUses(LI: &IntA); |
| 759 | |
| 760 | ++numExtends; |
| 761 | return true; |
| 762 | } |
| 763 | |
| 764 | bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA, |
| 765 | LiveInterval &IntB, VNInfo *AValNo, |
| 766 | VNInfo *BValNo) { |
| 767 | // If AValNo has PHI kills, conservatively assume that IntB defs can reach |
| 768 | // the PHI values. |
| 769 | if (LIS->hasPHIKill(LI: IntA, VNI: AValNo)) |
| 770 | return true; |
| 771 | |
| 772 | for (LiveRange::Segment &ASeg : IntA.segments) { |
| 773 | if (ASeg.valno != AValNo) |
| 774 | continue; |
| 775 | LiveInterval::iterator BI = llvm::upper_bound(Range&: IntB, Value&: ASeg.start); |
| 776 | if (BI != IntB.begin()) |
| 777 | --BI; |
| 778 | for (; BI != IntB.end() && ASeg.end >= BI->start; ++BI) { |
| 779 | if (BI->valno == BValNo) |
| 780 | continue; |
| 781 | if (BI->start <= ASeg.start && BI->end > ASeg.start) |
| 782 | return true; |
| 783 | if (BI->start > ASeg.start && BI->start < ASeg.end) |
| 784 | return true; |
| 785 | } |
| 786 | } |
| 787 | return false; |
| 788 | } |
| 789 | |
| 790 | /// Copy segments with value number @p SrcValNo from liverange @p Src to live |
| 791 | /// range @Dst and use value number @p DstValNo there. |
| 792 | static std::pair<bool, bool> addSegmentsWithValNo(LiveRange &Dst, |
| 793 | VNInfo *DstValNo, |
| 794 | const LiveRange &Src, |
| 795 | const VNInfo *SrcValNo) { |
| 796 | bool Changed = false; |
| 797 | bool MergedWithDead = false; |
| 798 | for (const LiveRange::Segment &S : Src.segments) { |
| 799 | if (S.valno != SrcValNo) |
| 800 | continue; |
| 801 | // This is adding a segment from Src that ends in a copy that is about |
| 802 | // to be removed. This segment is going to be merged with a pre-existing |
| 803 | // segment in Dst. This works, except in cases when the corresponding |
| 804 | // segment in Dst is dead. For example: adding [192r,208r:1) from Src |
| 805 | // to [208r,208d:1) in Dst would create [192r,208d:1) in Dst. |
| 806 | // Recognized such cases, so that the segments can be shrunk. |
| 807 | LiveRange::Segment Added = LiveRange::Segment(S.start, S.end, DstValNo); |
| 808 | LiveRange::Segment &Merged = *Dst.addSegment(S: Added); |
| 809 | if (Merged.end.isDead()) |
| 810 | MergedWithDead = true; |
| 811 | Changed = true; |
| 812 | } |
| 813 | return std::make_pair(x&: Changed, y&: MergedWithDead); |
| 814 | } |
| 815 | |
| 816 | std::pair<bool, bool> |
| 817 | RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, |
| 818 | MachineInstr *CopyMI) { |
| 819 | assert(!CP.isPhys()); |
| 820 | |
| 821 | LiveInterval &IntA = |
| 822 | LIS->getInterval(Reg: CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg()); |
| 823 | LiveInterval &IntB = |
| 824 | LIS->getInterval(Reg: CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); |
| 825 | |
| 826 | // We found a non-trivially-coalescable copy with IntA being the source and |
| 827 | // IntB being the dest, thus this defines a value number in IntB. If the |
| 828 | // source value number (in IntA) is defined by a commutable instruction and |
| 829 | // its other operand is coalesced to the copy dest register, see if we can |
| 830 | // transform the copy into a noop by commuting the definition. For example, |
| 831 | // |
| 832 | // A3 = op A2 killed B0 |
| 833 | // ... |
| 834 | // B1 = A3 <- this copy |
| 835 | // ... |
| 836 | // = op A3 <- more uses |
| 837 | // |
| 838 | // ==> |
| 839 | // |
| 840 | // B2 = op B0 killed A2 |
| 841 | // ... |
| 842 | // B1 = B2 <- now an identity copy |
| 843 | // ... |
| 844 | // = op B2 <- more uses |
| 845 | |
| 846 | // BValNo is a value number in B that is defined by a copy from A. 'B1' in |
| 847 | // the example above. |
| 848 | SlotIndex CopyIdx = LIS->getInstructionIndex(Instr: *CopyMI).getRegSlot(); |
| 849 | VNInfo *BValNo = IntB.getVNInfoAt(Idx: CopyIdx); |
| 850 | assert(BValNo != nullptr && BValNo->def == CopyIdx); |
| 851 | |
| 852 | // AValNo is the value number in A that defines the copy, A3 in the example. |
| 853 | VNInfo *AValNo = IntA.getVNInfoAt(Idx: CopyIdx.getRegSlot(EC: true)); |
| 854 | assert(AValNo && !AValNo->isUnused() && "COPY source not live" ); |
| 855 | if (AValNo->isPHIDef()) |
| 856 | return {false, false}; |
| 857 | MachineInstr *DefMI = LIS->getInstructionFromIndex(index: AValNo->def); |
| 858 | if (!DefMI) |
| 859 | return {false, false}; |
| 860 | if (!DefMI->isCommutable()) |
| 861 | return {false, false}; |
| 862 | // If DefMI is a two-address instruction then commuting it will change the |
| 863 | // destination register. |
| 864 | int DefIdx = DefMI->findRegisterDefOperandIdx(Reg: IntA.reg(), /*TRI=*/nullptr); |
| 865 | assert(DefIdx != -1); |
| 866 | unsigned UseOpIdx; |
| 867 | if (!DefMI->isRegTiedToUseOperand(DefOpIdx: DefIdx, UseOpIdx: &UseOpIdx)) |
| 868 | return {false, false}; |
| 869 | |
| 870 | // If DefMI only defines the register partially, we can't replace uses of the |
| 871 | // full register with the new destination register after commuting it. |
| 872 | if (IntA.reg().isVirtual() && |
| 873 | none_of(Range: DefMI->all_defs(), P: [&](const MachineOperand &DefMO) { |
| 874 | return DefMO.getReg() == IntA.reg() && !DefMO.getSubReg(); |
| 875 | })) |
| 876 | return {false, false}; |
| 877 | |
| 878 | // FIXME: The code below tries to commute 'UseOpIdx' operand with some other |
| 879 | // commutable operand which is expressed by 'CommuteAnyOperandIndex'value |
| 880 | // passed to the method. That _other_ operand is chosen by |
| 881 | // the findCommutedOpIndices() method. |
| 882 | // |
| 883 | // That is obviously an area for improvement in case of instructions having |
| 884 | // more than 2 operands. For example, if some instruction has 3 commutable |
| 885 | // operands then all possible variants (i.e. op#1<->op#2, op#1<->op#3, |
| 886 | // op#2<->op#3) of commute transformation should be considered/tried here. |
| 887 | unsigned NewDstIdx = TargetInstrInfo::CommuteAnyOperandIndex; |
| 888 | if (!TII->findCommutedOpIndices(MI: *DefMI, SrcOpIdx1&: UseOpIdx, SrcOpIdx2&: NewDstIdx)) |
| 889 | return {false, false}; |
| 890 | |
| 891 | MachineOperand &NewDstMO = DefMI->getOperand(i: NewDstIdx); |
| 892 | Register NewReg = NewDstMO.getReg(); |
| 893 | if (NewReg != IntB.reg() || !IntB.Query(Idx: AValNo->def).isKill()) |
| 894 | return {false, false}; |
| 895 | |
| 896 | // Make sure there are no other definitions of IntB that would reach the |
| 897 | // uses which the new definition can reach. |
| 898 | if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo)) |
| 899 | return {false, false}; |
| 900 | |
| 901 | // If some of the uses of IntA.reg is already coalesced away, return false. |
| 902 | // It's not possible to determine whether it's safe to perform the coalescing. |
| 903 | for (MachineOperand &MO : MRI->use_nodbg_operands(Reg: IntA.reg())) { |
| 904 | MachineInstr *UseMI = MO.getParent(); |
| 905 | unsigned OpNo = &MO - &UseMI->getOperand(i: 0); |
| 906 | SlotIndex UseIdx = LIS->getInstructionIndex(Instr: *UseMI); |
| 907 | LiveInterval::iterator US = IntA.FindSegmentContaining(Idx: UseIdx); |
| 908 | if (US == IntA.end() || US->valno != AValNo) |
| 909 | continue; |
| 910 | // If this use is tied to a def, we can't rewrite the register. |
| 911 | if (UseMI->isRegTiedToDefOperand(UseOpIdx: OpNo)) |
| 912 | return {false, false}; |
| 913 | } |
| 914 | |
| 915 | LLVM_DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t' |
| 916 | << *DefMI); |
| 917 | |
| 918 | // At this point we have decided that it is legal to do this |
| 919 | // transformation. Start by commuting the instruction. |
| 920 | MachineBasicBlock *MBB = DefMI->getParent(); |
| 921 | MachineInstr *NewMI = |
| 922 | TII->commuteInstruction(MI&: *DefMI, NewMI: false, OpIdx1: UseOpIdx, OpIdx2: NewDstIdx); |
| 923 | if (!NewMI) |
| 924 | return {false, false}; |
| 925 | if (IntA.reg().isVirtual() && IntB.reg().isVirtual() && |
| 926 | !MRI->constrainRegClass(Reg: IntB.reg(), RC: MRI->getRegClass(Reg: IntA.reg()))) |
| 927 | return {false, false}; |
| 928 | if (NewMI != DefMI) { |
| 929 | LIS->ReplaceMachineInstrInMaps(MI&: *DefMI, NewMI&: *NewMI); |
| 930 | MachineBasicBlock::iterator Pos = DefMI; |
| 931 | MBB->insert(I: Pos, MI: NewMI); |
| 932 | MBB->erase(I: DefMI); |
| 933 | } |
| 934 | |
| 935 | // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g. |
| 936 | // A = or A, B |
| 937 | // ... |
| 938 | // B = A |
| 939 | // ... |
| 940 | // C = killed A |
| 941 | // ... |
| 942 | // = B |
| 943 | |
| 944 | // Update uses of IntA of the specific Val# with IntB. |
| 945 | for (MachineOperand &UseMO : |
| 946 | llvm::make_early_inc_range(Range: MRI->use_operands(Reg: IntA.reg()))) { |
| 947 | if (UseMO.isUndef()) |
| 948 | continue; |
| 949 | MachineInstr *UseMI = UseMO.getParent(); |
| 950 | if (UseMI->isDebugInstr()) { |
| 951 | // FIXME These don't have an instruction index. Not clear we have enough |
| 952 | // info to decide whether to do this replacement or not. For now do it. |
| 953 | UseMO.setReg(NewReg); |
| 954 | continue; |
| 955 | } |
| 956 | SlotIndex UseIdx = LIS->getInstructionIndex(Instr: *UseMI).getRegSlot(EC: true); |
| 957 | LiveInterval::iterator US = IntA.FindSegmentContaining(Idx: UseIdx); |
| 958 | assert(US != IntA.end() && "Use must be live" ); |
| 959 | if (US->valno != AValNo) |
| 960 | continue; |
| 961 | // Kill flags are no longer accurate. They are recomputed after RA. |
| 962 | UseMO.setIsKill(false); |
| 963 | if (NewReg.isPhysical()) |
| 964 | UseMO.substPhysReg(Reg: NewReg, *TRI); |
| 965 | else |
| 966 | UseMO.setReg(NewReg); |
| 967 | if (UseMI == CopyMI) |
| 968 | continue; |
| 969 | if (!UseMI->isCopy()) |
| 970 | continue; |
| 971 | if (UseMI->getOperand(i: 0).getReg() != IntB.reg() || |
| 972 | UseMI->getOperand(i: 0).getSubReg()) |
| 973 | continue; |
| 974 | |
| 975 | // This copy will become a noop. If it's defining a new val#, merge it into |
| 976 | // BValNo. |
| 977 | SlotIndex DefIdx = UseIdx.getRegSlot(); |
| 978 | VNInfo *DVNI = IntB.getVNInfoAt(Idx: DefIdx); |
| 979 | if (!DVNI) |
| 980 | continue; |
| 981 | LLVM_DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI); |
| 982 | assert(DVNI->def == DefIdx); |
| 983 | BValNo = IntB.MergeValueNumberInto(V1: DVNI, V2: BValNo); |
| 984 | for (LiveInterval::SubRange &S : IntB.subranges()) { |
| 985 | VNInfo *SubDVNI = S.getVNInfoAt(Idx: DefIdx); |
| 986 | if (!SubDVNI) |
| 987 | continue; |
| 988 | VNInfo *SubBValNo = S.getVNInfoAt(Idx: CopyIdx); |
| 989 | assert(SubBValNo->def == CopyIdx); |
| 990 | S.MergeValueNumberInto(V1: SubDVNI, V2: SubBValNo); |
| 991 | } |
| 992 | |
| 993 | deleteInstr(MI: UseMI); |
| 994 | } |
| 995 | |
| 996 | // Extend BValNo by merging in IntA live segments of AValNo. Val# definition |
| 997 | // is updated. |
| 998 | bool ShrinkB = false; |
| 999 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); |
| 1000 | if (IntA.hasSubRanges() || IntB.hasSubRanges()) { |
| 1001 | if (!IntA.hasSubRanges()) { |
| 1002 | LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(Reg: IntA.reg()); |
| 1003 | IntA.createSubRangeFrom(Allocator, LaneMask: Mask, CopyFrom: IntA); |
| 1004 | } else if (!IntB.hasSubRanges()) { |
| 1005 | LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(Reg: IntB.reg()); |
| 1006 | IntB.createSubRangeFrom(Allocator, LaneMask: Mask, CopyFrom: IntB); |
| 1007 | } |
| 1008 | SlotIndex AIdx = CopyIdx.getRegSlot(EC: true); |
| 1009 | LaneBitmask MaskA; |
| 1010 | const SlotIndexes &Indexes = *LIS->getSlotIndexes(); |
| 1011 | for (LiveInterval::SubRange &SA : IntA.subranges()) { |
| 1012 | VNInfo *ASubValNo = SA.getVNInfoAt(Idx: AIdx); |
| 1013 | // Even if we are dealing with a full copy, some lanes can |
| 1014 | // still be undefined. |
| 1015 | // E.g., |
| 1016 | // undef A.subLow = ... |
| 1017 | // B = COPY A <== A.subHigh is undefined here and does |
| 1018 | // not have a value number. |
| 1019 | if (!ASubValNo) |
| 1020 | continue; |
| 1021 | MaskA |= SA.LaneMask; |
| 1022 | |
| 1023 | IntB.refineSubRanges( |
| 1024 | Allocator, LaneMask: SA.LaneMask, |
| 1025 | Apply: [&Allocator, &SA, CopyIdx, ASubValNo, |
| 1026 | &ShrinkB](LiveInterval::SubRange &SR) { |
| 1027 | VNInfo *BSubValNo = SR.empty() ? SR.getNextValue(Def: CopyIdx, VNInfoAllocator&: Allocator) |
| 1028 | : SR.getVNInfoAt(Idx: CopyIdx); |
| 1029 | assert(BSubValNo != nullptr); |
| 1030 | auto P = addSegmentsWithValNo(Dst&: SR, DstValNo: BSubValNo, Src: SA, SrcValNo: ASubValNo); |
| 1031 | ShrinkB |= P.second; |
| 1032 | if (P.first) |
| 1033 | BSubValNo->def = ASubValNo->def; |
| 1034 | }, |
| 1035 | Indexes, TRI: *TRI); |
| 1036 | } |
| 1037 | // Go over all subranges of IntB that have not been covered by IntA, |
| 1038 | // and delete the segments starting at CopyIdx. This can happen if |
| 1039 | // IntA has undef lanes that are defined in IntB. |
| 1040 | for (LiveInterval::SubRange &SB : IntB.subranges()) { |
| 1041 | if ((SB.LaneMask & MaskA).any()) |
| 1042 | continue; |
| 1043 | if (LiveRange::Segment *S = SB.getSegmentContaining(Idx: CopyIdx)) |
| 1044 | if (S->start.getBaseIndex() == CopyIdx.getBaseIndex()) |
| 1045 | SB.removeSegment(S: *S, RemoveDeadValNo: true); |
| 1046 | } |
| 1047 | } |
| 1048 | |
| 1049 | BValNo->def = AValNo->def; |
| 1050 | auto P = addSegmentsWithValNo(Dst&: IntB, DstValNo: BValNo, Src: IntA, SrcValNo: AValNo); |
| 1051 | ShrinkB |= P.second; |
| 1052 | LLVM_DEBUG(dbgs() << "\t\textended: " << IntB << '\n'); |
| 1053 | |
| 1054 | LIS->removeVRegDefAt(LI&: IntA, Pos: AValNo->def); |
| 1055 | |
| 1056 | LLVM_DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n'); |
| 1057 | ++numCommutes; |
| 1058 | return {true, ShrinkB}; |
| 1059 | } |
| 1060 | |
| 1061 | /// For copy B = A in BB2, if A is defined by A = B in BB0 which is a |
| 1062 | /// predecessor of BB2, and if B is not redefined on the way from A = B |
| 1063 | /// in BB0 to B = A in BB2, B = A in BB2 is partially redundant if the |
| 1064 | /// execution goes through the path from BB0 to BB2. We may move B = A |
| 1065 | /// to the predecessor without such reversed copy. |
| 1066 | /// So we will transform the program from: |
| 1067 | /// BB0: |
| 1068 | /// A = B; BB1: |
| 1069 | /// ... ... |
| 1070 | /// / \ / |
| 1071 | /// BB2: |
| 1072 | /// ... |
| 1073 | /// B = A; |
| 1074 | /// |
| 1075 | /// to: |
| 1076 | /// |
| 1077 | /// BB0: BB1: |
| 1078 | /// A = B; ... |
| 1079 | /// ... B = A; |
| 1080 | /// / \ / |
| 1081 | /// BB2: |
| 1082 | /// ... |
| 1083 | /// |
| 1084 | /// A special case is when BB0 and BB2 are the same BB which is the only |
| 1085 | /// BB in a loop: |
| 1086 | /// BB1: |
| 1087 | /// ... |
| 1088 | /// BB0/BB2: ---- |
| 1089 | /// B = A; | |
| 1090 | /// ... | |
| 1091 | /// A = B; | |
| 1092 | /// |------- |
| 1093 | /// | |
| 1094 | /// We may hoist B = A from BB0/BB2 to BB1. |
| 1095 | /// |
| 1096 | /// The major preconditions for correctness to remove such partial |
| 1097 | /// redundancy include: |
| 1098 | /// 1. A in B = A in BB2 is defined by a PHI in BB2, and one operand of |
| 1099 | /// the PHI is defined by the reversed copy A = B in BB0. |
| 1100 | /// 2. No B is referenced from the start of BB2 to B = A. |
| 1101 | /// 3. No B is defined from A = B to the end of BB0. |
| 1102 | /// 4. BB1 has only one successor. |
| 1103 | /// |
| 1104 | /// 2 and 4 implicitly ensure B is not live at the end of BB1. |
| 1105 | /// 4 guarantees BB2 is hotter than BB1, so we can only move a copy to a |
| 1106 | /// colder place, which not only prevent endless loop, but also make sure |
| 1107 | /// the movement of copy is beneficial. |
| 1108 | bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP, |
| 1109 | MachineInstr &CopyMI) { |
| 1110 | assert(!CP.isPhys()); |
| 1111 | if (!CopyMI.isFullCopy()) |
| 1112 | return false; |
| 1113 | |
| 1114 | MachineBasicBlock &MBB = *CopyMI.getParent(); |
| 1115 | // If this block is the target of an invoke/inlineasm_br, moving the copy into |
| 1116 | // the predecessor is tricker, and we don't handle it. |
| 1117 | if (MBB.isEHPad() || MBB.isInlineAsmBrIndirectTarget()) |
| 1118 | return false; |
| 1119 | |
| 1120 | if (MBB.pred_size() != 2) |
| 1121 | return false; |
| 1122 | |
| 1123 | LiveInterval &IntA = |
| 1124 | LIS->getInterval(Reg: CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg()); |
| 1125 | LiveInterval &IntB = |
| 1126 | LIS->getInterval(Reg: CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); |
| 1127 | |
| 1128 | // A is defined by PHI at the entry of MBB. |
| 1129 | SlotIndex CopyIdx = LIS->getInstructionIndex(Instr: CopyMI).getRegSlot(EC: true); |
| 1130 | VNInfo *AValNo = IntA.getVNInfoAt(Idx: CopyIdx); |
| 1131 | assert(AValNo && !AValNo->isUnused() && "COPY source not live" ); |
| 1132 | if (!AValNo->isPHIDef()) |
| 1133 | return false; |
| 1134 | |
| 1135 | // No B is referenced before CopyMI in MBB. |
| 1136 | if (IntB.overlaps(Start: LIS->getMBBStartIdx(mbb: &MBB), End: CopyIdx)) |
| 1137 | return false; |
| 1138 | |
| 1139 | // MBB has two predecessors: one contains A = B so no copy will be inserted |
| 1140 | // for it. The other one will have a copy moved from MBB. |
| 1141 | bool FoundReverseCopy = false; |
| 1142 | MachineBasicBlock *CopyLeftBB = nullptr; |
| 1143 | for (MachineBasicBlock *Pred : MBB.predecessors()) { |
| 1144 | VNInfo *PVal = IntA.getVNInfoBefore(Idx: LIS->getMBBEndIdx(mbb: Pred)); |
| 1145 | MachineInstr *DefMI = LIS->getInstructionFromIndex(index: PVal->def); |
| 1146 | if (!DefMI || !DefMI->isFullCopy()) { |
| 1147 | CopyLeftBB = Pred; |
| 1148 | continue; |
| 1149 | } |
| 1150 | // Check DefMI is a reverse copy and it is in BB Pred. |
| 1151 | if (DefMI->getOperand(i: 0).getReg() != IntA.reg() || |
| 1152 | DefMI->getOperand(i: 1).getReg() != IntB.reg() || |
| 1153 | DefMI->getParent() != Pred) { |
| 1154 | CopyLeftBB = Pred; |
| 1155 | continue; |
| 1156 | } |
| 1157 | // If there is any other def of B after DefMI and before the end of Pred, |
| 1158 | // we need to keep the copy of B = A at the end of Pred if we remove |
| 1159 | // B = A from MBB. |
| 1160 | bool ValB_Changed = false; |
| 1161 | for (auto *VNI : IntB.valnos) { |
| 1162 | if (VNI->isUnused()) |
| 1163 | continue; |
| 1164 | if (PVal->def < VNI->def && VNI->def < LIS->getMBBEndIdx(mbb: Pred)) { |
| 1165 | ValB_Changed = true; |
| 1166 | break; |
| 1167 | } |
| 1168 | } |
| 1169 | if (ValB_Changed) { |
| 1170 | CopyLeftBB = Pred; |
| 1171 | continue; |
| 1172 | } |
| 1173 | FoundReverseCopy = true; |
| 1174 | } |
| 1175 | |
| 1176 | // If no reverse copy is found in predecessors, nothing to do. |
| 1177 | if (!FoundReverseCopy) |
| 1178 | return false; |
| 1179 | |
| 1180 | // If CopyLeftBB is nullptr, it means every predecessor of MBB contains |
| 1181 | // reverse copy, CopyMI can be removed trivially if only IntA/IntB is updated. |
| 1182 | // If CopyLeftBB is not nullptr, move CopyMI from MBB to CopyLeftBB and |
| 1183 | // update IntA/IntB. |
| 1184 | // |
| 1185 | // If CopyLeftBB is not nullptr, ensure CopyLeftBB has a single succ so |
| 1186 | // MBB is hotter than CopyLeftBB. |
| 1187 | if (CopyLeftBB && CopyLeftBB->succ_size() > 1) |
| 1188 | return false; |
| 1189 | |
| 1190 | // Now (almost sure it's) ok to move copy. |
| 1191 | if (CopyLeftBB) { |
| 1192 | // Position in CopyLeftBB where we should insert new copy. |
| 1193 | auto InsPos = CopyLeftBB->getFirstTerminator(); |
| 1194 | |
| 1195 | // Make sure that B isn't referenced in the terminators (if any) at the end |
| 1196 | // of the predecessor since we're about to insert a new definition of B |
| 1197 | // before them. |
| 1198 | if (InsPos != CopyLeftBB->end()) { |
| 1199 | SlotIndex InsPosIdx = LIS->getInstructionIndex(Instr: *InsPos).getRegSlot(EC: true); |
| 1200 | if (IntB.overlaps(Start: InsPosIdx, End: LIS->getMBBEndIdx(mbb: CopyLeftBB))) |
| 1201 | return false; |
| 1202 | } |
| 1203 | |
| 1204 | LLVM_DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to " |
| 1205 | << printMBBReference(*CopyLeftBB) << '\t' << CopyMI); |
| 1206 | |
| 1207 | // Insert new copy to CopyLeftBB. |
| 1208 | MachineInstr *NewCopyMI = BuildMI(BB&: *CopyLeftBB, I: InsPos, MIMD: CopyMI.getDebugLoc(), |
| 1209 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: IntB.reg()) |
| 1210 | .addReg(RegNo: IntA.reg()); |
| 1211 | SlotIndex NewCopyIdx = |
| 1212 | LIS->InsertMachineInstrInMaps(MI&: *NewCopyMI).getRegSlot(); |
| 1213 | IntB.createDeadDef(Def: NewCopyIdx, VNIAlloc&: LIS->getVNInfoAllocator()); |
| 1214 | for (LiveInterval::SubRange &SR : IntB.subranges()) |
| 1215 | SR.createDeadDef(Def: NewCopyIdx, VNIAlloc&: LIS->getVNInfoAllocator()); |
| 1216 | |
| 1217 | // If the newly created Instruction has an address of an instruction that |
| 1218 | // was deleted before (object recycled by the allocator) it needs to be |
| 1219 | // removed from the deleted list. |
| 1220 | ErasedInstrs.erase(Ptr: NewCopyMI); |
| 1221 | } else { |
| 1222 | LLVM_DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from " |
| 1223 | << printMBBReference(MBB) << '\t' << CopyMI); |
| 1224 | } |
| 1225 | |
| 1226 | const bool IsUndefCopy = CopyMI.getOperand(i: 1).isUndef(); |
| 1227 | |
| 1228 | // Remove CopyMI. |
| 1229 | // Note: This is fine to remove the copy before updating the live-ranges. |
| 1230 | // While updating the live-ranges, we only look at slot indices and |
| 1231 | // never go back to the instruction. |
| 1232 | // Mark instructions as deleted. |
| 1233 | deleteInstr(MI: &CopyMI); |
| 1234 | |
| 1235 | // Update the liveness. |
| 1236 | SmallVector<SlotIndex, 8> EndPoints; |
| 1237 | VNInfo *BValNo = IntB.Query(Idx: CopyIdx).valueOutOrDead(); |
| 1238 | LIS->pruneValue(LR&: *static_cast<LiveRange *>(&IntB), Kill: CopyIdx.getRegSlot(), |
| 1239 | EndPoints: &EndPoints); |
| 1240 | BValNo->markUnused(); |
| 1241 | |
| 1242 | if (IsUndefCopy) { |
| 1243 | // We're introducing an undef phi def, and need to set undef on any users of |
| 1244 | // the previously local def to avoid artifically extending the lifetime |
| 1245 | // through the block. |
| 1246 | for (MachineOperand &MO : MRI->use_nodbg_operands(Reg: IntB.reg())) { |
| 1247 | const MachineInstr &MI = *MO.getParent(); |
| 1248 | SlotIndex UseIdx = LIS->getInstructionIndex(Instr: MI); |
| 1249 | if (!IntB.liveAt(index: UseIdx)) |
| 1250 | MO.setIsUndef(true); |
| 1251 | } |
| 1252 | } |
| 1253 | |
| 1254 | // Extend IntB to the EndPoints of its original live interval. |
| 1255 | LIS->extendToIndices(LR&: IntB, Indices: EndPoints); |
| 1256 | |
| 1257 | // Now, do the same for its subranges. |
| 1258 | for (LiveInterval::SubRange &SR : IntB.subranges()) { |
| 1259 | EndPoints.clear(); |
| 1260 | VNInfo *BValNo = SR.Query(Idx: CopyIdx).valueOutOrDead(); |
| 1261 | assert(BValNo && "All sublanes should be live" ); |
| 1262 | LIS->pruneValue(LR&: SR, Kill: CopyIdx.getRegSlot(), EndPoints: &EndPoints); |
| 1263 | BValNo->markUnused(); |
| 1264 | // We can have a situation where the result of the original copy is live, |
| 1265 | // but is immediately dead in this subrange, e.g. [336r,336d:0). That makes |
| 1266 | // the copy appear as an endpoint from pruneValue(), but we don't want it |
| 1267 | // to because the copy has been removed. We can go ahead and remove that |
| 1268 | // endpoint; there is no other situation here that there could be a use at |
| 1269 | // the same place as we know that the copy is a full copy. |
| 1270 | for (unsigned I = 0; I != EndPoints.size();) { |
| 1271 | if (SlotIndex::isSameInstr(A: EndPoints[I], B: CopyIdx)) { |
| 1272 | EndPoints[I] = EndPoints.back(); |
| 1273 | EndPoints.pop_back(); |
| 1274 | continue; |
| 1275 | } |
| 1276 | ++I; |
| 1277 | } |
| 1278 | SmallVector<SlotIndex, 8> Undefs; |
| 1279 | IntB.computeSubRangeUndefs(Undefs, LaneMask: SR.LaneMask, MRI: *MRI, |
| 1280 | Indexes: *LIS->getSlotIndexes()); |
| 1281 | LIS->extendToIndices(LR&: SR, Indices: EndPoints, Undefs); |
| 1282 | } |
| 1283 | // If any dead defs were extended, truncate them. |
| 1284 | shrinkToUses(LI: &IntB); |
| 1285 | |
| 1286 | // Finally, update the live-range of IntA. |
| 1287 | shrinkToUses(LI: &IntA); |
| 1288 | return true; |
| 1289 | } |
| 1290 | |
| 1291 | bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, |
| 1292 | MachineInstr *CopyMI, |
| 1293 | bool &IsDefCopy) { |
| 1294 | IsDefCopy = false; |
| 1295 | Register SrcReg = CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg(); |
| 1296 | unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx(); |
| 1297 | Register DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg(); |
| 1298 | unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx(); |
| 1299 | if (SrcReg.isPhysical()) |
| 1300 | return false; |
| 1301 | |
| 1302 | LiveInterval &SrcInt = LIS->getInterval(Reg: SrcReg); |
| 1303 | SlotIndex CopyIdx = LIS->getInstructionIndex(Instr: *CopyMI); |
| 1304 | VNInfo *ValNo = SrcInt.Query(Idx: CopyIdx).valueIn(); |
| 1305 | if (!ValNo) |
| 1306 | return false; |
| 1307 | if (ValNo->isPHIDef() || ValNo->isUnused()) |
| 1308 | return false; |
| 1309 | MachineInstr *DefMI = LIS->getInstructionFromIndex(index: ValNo->def); |
| 1310 | if (!DefMI) |
| 1311 | return false; |
| 1312 | if (DefMI->isCopyLike()) { |
| 1313 | IsDefCopy = true; |
| 1314 | return false; |
| 1315 | } |
| 1316 | if (!TII->isAsCheapAsAMove(MI: *DefMI)) |
| 1317 | return false; |
| 1318 | |
| 1319 | if (!TII->isReMaterializable(MI: *DefMI)) |
| 1320 | return false; |
| 1321 | |
| 1322 | bool SawStore = false; |
| 1323 | if (!DefMI->isSafeToMove(SawStore)) |
| 1324 | return false; |
| 1325 | const MCInstrDesc &MCID = DefMI->getDesc(); |
| 1326 | if (MCID.getNumDefs() != 1) |
| 1327 | return false; |
| 1328 | |
| 1329 | // If both SrcIdx and DstIdx are set, correct rematerialization would widen |
| 1330 | // the register substantially (beyond both source and dest size). This is bad |
| 1331 | // for performance since it can cascade through a function, introducing many |
| 1332 | // extra spills and fills (e.g. ARM can easily end up copying QQQQPR registers |
| 1333 | // around after a few subreg copies). |
| 1334 | if (SrcIdx && DstIdx) |
| 1335 | return false; |
| 1336 | |
| 1337 | // Only support subregister destinations when the def is read-undef. |
| 1338 | MachineOperand &DstOperand = CopyMI->getOperand(i: 0); |
| 1339 | Register CopyDstReg = DstOperand.getReg(); |
| 1340 | if (DstOperand.getSubReg() && !DstOperand.isUndef()) |
| 1341 | return false; |
| 1342 | |
| 1343 | // In the physical register case, checking that the def is read-undef is not |
| 1344 | // enough. We're widening the def and need to avoid clobbering other live |
| 1345 | // values in the unused register pieces. |
| 1346 | // |
| 1347 | // TODO: Targets may support rewriting the rematerialized instruction to only |
| 1348 | // touch relevant lanes, in which case we don't need any liveness check. |
| 1349 | if (CopyDstReg.isPhysical() && CP.isPartial()) { |
| 1350 | for (MCRegUnit Unit : TRI->regunits(Reg: DstReg)) { |
| 1351 | // Ignore the register units we are writing anyway. |
| 1352 | if (is_contained(Range: TRI->regunits(Reg: CopyDstReg), Element: Unit)) |
| 1353 | continue; |
| 1354 | |
| 1355 | // Check if the other lanes we are defining are live at the |
| 1356 | // rematerialization point. |
| 1357 | LiveRange &LR = LIS->getRegUnit(Unit); |
| 1358 | if (LR.liveAt(index: CopyIdx)) |
| 1359 | return false; |
| 1360 | } |
| 1361 | } |
| 1362 | |
| 1363 | const unsigned DefSubIdx = DefMI->getOperand(i: 0).getSubReg(); |
| 1364 | const TargetRegisterClass *DefRC = TII->getRegClass(MCID, OpNum: 0); |
| 1365 | if (!DefMI->isImplicitDef()) { |
| 1366 | if (DstReg.isPhysical()) { |
| 1367 | Register NewDstReg = DstReg; |
| 1368 | |
| 1369 | unsigned NewDstIdx = TRI->composeSubRegIndices(a: CP.getSrcIdx(), b: DefSubIdx); |
| 1370 | if (NewDstIdx) |
| 1371 | NewDstReg = TRI->getSubReg(Reg: DstReg, Idx: NewDstIdx); |
| 1372 | |
| 1373 | // Finally, make sure that the physical subregister that will be |
| 1374 | // constructed later is permitted for the instruction. |
| 1375 | if (!DefRC->contains(Reg: NewDstReg)) |
| 1376 | return false; |
| 1377 | } else { |
| 1378 | // Theoretically, some stack frame reference could exist. Just make sure |
| 1379 | // it hasn't actually happened. |
| 1380 | assert(DstReg.isVirtual() && |
| 1381 | "Only expect to deal with virtual or physical registers" ); |
| 1382 | } |
| 1383 | } |
| 1384 | |
| 1385 | if (!VirtRegAuxInfo::allUsesAvailableAt(MI: DefMI, UseIdx: CopyIdx, LIS: *LIS, MRI: *MRI, TII: *TII)) |
| 1386 | return false; |
| 1387 | |
| 1388 | DebugLoc DL = CopyMI->getDebugLoc(); |
| 1389 | MachineBasicBlock *MBB = CopyMI->getParent(); |
| 1390 | MachineBasicBlock::iterator MII = |
| 1391 | std::next(x: MachineBasicBlock::iterator(CopyMI)); |
| 1392 | LiveRangeEdit::Remat RM(ValNo); |
| 1393 | RM.OrigMI = DefMI; |
| 1394 | SmallVector<Register, 8> NewRegs; |
| 1395 | LiveRangeEdit Edit(&SrcInt, NewRegs, *MF, *LIS, nullptr, this); |
| 1396 | Edit.rematerializeAt(MBB&: *MBB, MI: MII, DestReg: DstReg, RM, *TRI, Late: false, SubIdx: SrcIdx, ReplaceIndexMI: CopyMI); |
| 1397 | MachineInstr &NewMI = *std::prev(x: MII); |
| 1398 | NewMI.setDebugLoc(DL); |
| 1399 | |
| 1400 | // In a situation like the following: |
| 1401 | // %0:subreg = instr ; DefMI, subreg = DstIdx |
| 1402 | // %1 = copy %0:subreg ; CopyMI, SrcIdx = 0 |
| 1403 | // instead of widening %1 to the register class of %0 simply do: |
| 1404 | // %1 = instr |
| 1405 | const TargetRegisterClass *NewRC = CP.getNewRC(); |
| 1406 | if (DstIdx != 0) { |
| 1407 | MachineOperand &DefMO = NewMI.getOperand(i: 0); |
| 1408 | if (DefMO.getSubReg() == DstIdx) { |
| 1409 | assert(SrcIdx == 0 && CP.isFlipped() && |
| 1410 | "Shouldn't have SrcIdx+DstIdx at this point" ); |
| 1411 | const TargetRegisterClass *DstRC = MRI->getRegClass(Reg: DstReg); |
| 1412 | const TargetRegisterClass *CommonRC = |
| 1413 | TRI->getCommonSubClass(A: DefRC, B: DstRC); |
| 1414 | if (CommonRC != nullptr) { |
| 1415 | NewRC = CommonRC; |
| 1416 | |
| 1417 | // Instruction might contain "undef %0:subreg" as use operand: |
| 1418 | // %0:subreg = instr op_1, ..., op_N, undef %0:subreg, op_N+2, ... |
| 1419 | // |
| 1420 | // Need to check all operands. |
| 1421 | for (MachineOperand &MO : NewMI.operands()) { |
| 1422 | if (MO.isReg() && MO.getReg() == DstReg && MO.getSubReg() == DstIdx) { |
| 1423 | MO.setSubReg(0); |
| 1424 | } |
| 1425 | } |
| 1426 | |
| 1427 | DstIdx = 0; |
| 1428 | DefMO.setIsUndef(false); // Only subregs can have def+undef. |
| 1429 | } |
| 1430 | } |
| 1431 | } |
| 1432 | |
| 1433 | // CopyMI may have implicit operands, save them so that we can transfer them |
| 1434 | // over to the newly materialized instruction after CopyMI is removed. |
| 1435 | SmallVector<MachineOperand, 4> ImplicitOps; |
| 1436 | ImplicitOps.reserve(N: CopyMI->getNumOperands() - |
| 1437 | CopyMI->getDesc().getNumOperands()); |
| 1438 | for (unsigned I = CopyMI->getDesc().getNumOperands(), |
| 1439 | E = CopyMI->getNumOperands(); |
| 1440 | I != E; ++I) { |
| 1441 | MachineOperand &MO = CopyMI->getOperand(i: I); |
| 1442 | if (MO.isReg()) { |
| 1443 | assert(MO.isImplicit() && |
| 1444 | "No explicit operands after implicit operands." ); |
| 1445 | assert((MO.getReg().isPhysical() || |
| 1446 | (MO.getSubReg() == 0 && MO.getReg() == DstOperand.getReg())) && |
| 1447 | "unexpected implicit virtual register def" ); |
| 1448 | ImplicitOps.push_back(Elt: MO); |
| 1449 | } |
| 1450 | } |
| 1451 | |
| 1452 | CopyMI->eraseFromParent(); |
| 1453 | ErasedInstrs.insert(Ptr: CopyMI); |
| 1454 | |
| 1455 | // NewMI may have dead implicit defs (E.g. EFLAGS for MOV<bits>r0 on X86). |
| 1456 | // We need to remember these so we can add intervals once we insert |
| 1457 | // NewMI into SlotIndexes. |
| 1458 | // |
| 1459 | // We also expect to have tied implicit-defs of super registers originating |
| 1460 | // from SUBREG_TO_REG, such as: |
| 1461 | // $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi |
| 1462 | // undef %0.sub_32bit = MOV32r0 implicit-def dead $eflags, implicit-def %0 |
| 1463 | // |
| 1464 | // The implicit-def of the super register may have been reduced to |
| 1465 | // subregisters depending on the uses. |
| 1466 | SmallVector<std::pair<unsigned, Register>, 4> NewMIImplDefs; |
| 1467 | for (unsigned i = NewMI.getDesc().getNumOperands(), |
| 1468 | e = NewMI.getNumOperands(); |
| 1469 | i != e; ++i) { |
| 1470 | MachineOperand &MO = NewMI.getOperand(i); |
| 1471 | if (MO.isReg() && MO.isDef()) { |
| 1472 | assert(MO.isImplicit()); |
| 1473 | if (MO.getReg().isPhysical()) { |
| 1474 | assert(MO.isImplicit() && MO.getReg().isPhysical() && |
| 1475 | (MO.isDead() || |
| 1476 | (DefSubIdx && |
| 1477 | ((TRI->getSubReg(MO.getReg(), DefSubIdx) == |
| 1478 | MCRegister((unsigned)NewMI.getOperand(0).getReg())) || |
| 1479 | TRI->isSubRegisterEq(NewMI.getOperand(0).getReg(), |
| 1480 | MO.getReg()))))); |
| 1481 | NewMIImplDefs.push_back(Elt: {i, MO.getReg()}); |
| 1482 | } else { |
| 1483 | assert(MO.getReg() == NewMI.getOperand(0).getReg()); |
| 1484 | |
| 1485 | // We're only expecting another def of the main output, so the range |
| 1486 | // should get updated with the regular output range. |
| 1487 | // |
| 1488 | // FIXME: The range updating below probably needs updating to look at |
| 1489 | // the super register if subranges are tracked. |
| 1490 | assert(!MRI->shouldTrackSubRegLiveness(DstReg) && |
| 1491 | "subrange update for implicit-def of super register may not be " |
| 1492 | "properly handled" ); |
| 1493 | } |
| 1494 | } |
| 1495 | } |
| 1496 | |
| 1497 | if (DstReg.isVirtual()) { |
| 1498 | unsigned NewIdx = NewMI.getOperand(i: 0).getSubReg(); |
| 1499 | |
| 1500 | if (DefRC != nullptr) { |
| 1501 | if (NewIdx) |
| 1502 | NewRC = TRI->getMatchingSuperRegClass(A: NewRC, B: DefRC, Idx: NewIdx); |
| 1503 | else |
| 1504 | NewRC = TRI->getCommonSubClass(A: NewRC, B: DefRC); |
| 1505 | assert(NewRC && "subreg chosen for remat incompatible with instruction" ); |
| 1506 | } |
| 1507 | |
| 1508 | // Remap subranges to new lanemask and change register class. |
| 1509 | LiveInterval &DstInt = LIS->getInterval(Reg: DstReg); |
| 1510 | for (LiveInterval::SubRange &SR : DstInt.subranges()) { |
| 1511 | SR.LaneMask = TRI->composeSubRegIndexLaneMask(IdxA: DstIdx, Mask: SR.LaneMask); |
| 1512 | } |
| 1513 | MRI->setRegClass(Reg: DstReg, RC: NewRC); |
| 1514 | |
| 1515 | // Update machine operands and add flags. |
| 1516 | updateRegDefsUses(SrcReg: DstReg, DstReg, SubIdx: DstIdx); |
| 1517 | NewMI.getOperand(i: 0).setSubReg(NewIdx); |
| 1518 | // updateRegDefUses can add an "undef" flag to the definition, since |
| 1519 | // it will replace DstReg with DstReg.DstIdx. If NewIdx is 0, make |
| 1520 | // sure that "undef" is not set. |
| 1521 | if (NewIdx == 0) |
| 1522 | NewMI.getOperand(i: 0).setIsUndef(false); |
| 1523 | |
| 1524 | // In a situation like the following: |
| 1525 | // |
| 1526 | // undef %2.subreg:reg = INST %1:reg ; DefMI (rematerializable), |
| 1527 | // ; Defines only some of lanes, |
| 1528 | // ; so DefSubIdx = NewIdx = subreg |
| 1529 | // %3:reg = COPY %2 ; Copy full reg |
| 1530 | // .... = SOMEINSTR %3:reg ; Use full reg |
| 1531 | // |
| 1532 | // there are no subranges for %3 so after rematerialization we need |
| 1533 | // to explicitly create them. Undefined subranges are removed later on. |
| 1534 | if (NewIdx && !DstInt.hasSubRanges() && |
| 1535 | MRI->shouldTrackSubRegLiveness(VReg: DstReg)) { |
| 1536 | LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(Reg: DstReg); |
| 1537 | LaneBitmask UsedLanes = TRI->getSubRegIndexLaneMask(SubIdx: NewIdx); |
| 1538 | LaneBitmask UnusedLanes = FullMask & ~UsedLanes; |
| 1539 | VNInfo::Allocator &Alloc = LIS->getVNInfoAllocator(); |
| 1540 | DstInt.createSubRangeFrom(Allocator&: Alloc, LaneMask: UsedLanes, CopyFrom: DstInt); |
| 1541 | DstInt.createSubRangeFrom(Allocator&: Alloc, LaneMask: UnusedLanes, CopyFrom: DstInt); |
| 1542 | } |
| 1543 | |
| 1544 | // Add dead subregister definitions if we are defining the whole register |
| 1545 | // but only part of it is live. |
| 1546 | // This could happen if the rematerialization instruction is rematerializing |
| 1547 | // more than actually is used in the register. |
| 1548 | // An example would be: |
| 1549 | // %1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs |
| 1550 | // ; Copying only part of the register here, but the rest is undef. |
| 1551 | // %2:sub_16bit<def, read-undef> = COPY %1:sub_16bit |
| 1552 | // ==> |
| 1553 | // ; Materialize all the constants but only using one |
| 1554 | // %2 = LOAD_CONSTANTS 5, 8 |
| 1555 | // |
| 1556 | // at this point for the part that wasn't defined before we could have |
| 1557 | // subranges missing the definition. |
| 1558 | if (NewIdx == 0 && DstInt.hasSubRanges()) { |
| 1559 | SlotIndex CurrIdx = LIS->getInstructionIndex(Instr: NewMI); |
| 1560 | SlotIndex DefIndex = |
| 1561 | CurrIdx.getRegSlot(EC: NewMI.getOperand(i: 0).isEarlyClobber()); |
| 1562 | LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg: DstReg); |
| 1563 | VNInfo::Allocator &Alloc = LIS->getVNInfoAllocator(); |
| 1564 | for (LiveInterval::SubRange &SR : DstInt.subranges()) { |
| 1565 | if (!SR.liveAt(index: DefIndex)) |
| 1566 | SR.createDeadDef(Def: DefIndex, VNIAlloc&: Alloc); |
| 1567 | MaxMask &= ~SR.LaneMask; |
| 1568 | } |
| 1569 | if (MaxMask.any()) { |
| 1570 | LiveInterval::SubRange *SR = DstInt.createSubRange(Allocator&: Alloc, LaneMask: MaxMask); |
| 1571 | SR->createDeadDef(Def: DefIndex, VNIAlloc&: Alloc); |
| 1572 | } |
| 1573 | } |
| 1574 | |
| 1575 | // Make sure that the subrange for resultant undef is removed |
| 1576 | // For example: |
| 1577 | // %1:sub1<def,read-undef> = LOAD CONSTANT 1 |
| 1578 | // %2 = COPY %1 |
| 1579 | // ==> |
| 1580 | // %2:sub1<def, read-undef> = LOAD CONSTANT 1 |
| 1581 | // ; Correct but need to remove the subrange for %2:sub0 |
| 1582 | // ; as it is now undef |
| 1583 | if (NewIdx != 0 && DstInt.hasSubRanges()) { |
| 1584 | // The affected subregister segments can be removed. |
| 1585 | SlotIndex CurrIdx = LIS->getInstructionIndex(Instr: NewMI); |
| 1586 | LaneBitmask DstMask = TRI->getSubRegIndexLaneMask(SubIdx: NewIdx); |
| 1587 | bool UpdatedSubRanges = false; |
| 1588 | SlotIndex DefIndex = |
| 1589 | CurrIdx.getRegSlot(EC: NewMI.getOperand(i: 0).isEarlyClobber()); |
| 1590 | VNInfo::Allocator &Alloc = LIS->getVNInfoAllocator(); |
| 1591 | |
| 1592 | // Refine the subranges that are now defined by the remat. |
| 1593 | // This will split existing subranges if necessary. |
| 1594 | DstInt.refineSubRanges( |
| 1595 | Allocator&: Alloc, LaneMask: DstMask, |
| 1596 | Apply: [&DefIndex, &Alloc](LiveInterval::SubRange &SR) { |
| 1597 | // We know that this lane is defined by this instruction, |
| 1598 | // but at this point it might not be live because it was not defined |
| 1599 | // by the original instruction. This happens when the |
| 1600 | // rematerialization widens the defined register. Assign that lane a |
| 1601 | // dead def so that the interferences are properly modeled. |
| 1602 | if (!SR.liveAt(index: DefIndex)) |
| 1603 | SR.createDeadDef(Def: DefIndex, VNIAlloc&: Alloc); |
| 1604 | }, |
| 1605 | Indexes: *LIS->getSlotIndexes(), TRI: *TRI); |
| 1606 | |
| 1607 | for (LiveInterval::SubRange &SR : DstInt.subranges()) { |
| 1608 | if ((SR.LaneMask & DstMask).none()) { |
| 1609 | LLVM_DEBUG(dbgs() |
| 1610 | << "Removing undefined SubRange " |
| 1611 | << PrintLaneMask(SR.LaneMask) << " : " << SR << "\n" ); |
| 1612 | |
| 1613 | if (VNInfo *RmValNo = SR.getVNInfoAt(Idx: CurrIdx.getRegSlot())) { |
| 1614 | // VNI is in ValNo - remove any segments in this SubRange that have |
| 1615 | // this ValNo |
| 1616 | SR.removeValNo(ValNo: RmValNo); |
| 1617 | } |
| 1618 | |
| 1619 | // We may not have a defined value at this point, but still need to |
| 1620 | // clear out any empty subranges tentatively created by |
| 1621 | // updateRegDefUses. The original subrange def may have only undefed |
| 1622 | // some lanes. |
| 1623 | UpdatedSubRanges = true; |
| 1624 | } |
| 1625 | } |
| 1626 | if (UpdatedSubRanges) |
| 1627 | DstInt.removeEmptySubRanges(); |
| 1628 | } |
| 1629 | } else if (NewMI.getOperand(i: 0).getReg() != CopyDstReg) { |
| 1630 | // The New instruction may be defining a sub-register of what's actually |
| 1631 | // been asked for. If so it must implicitly define the whole thing. |
| 1632 | assert(DstReg.isPhysical() && |
| 1633 | "Only expect virtual or physical registers in remat" ); |
| 1634 | |
| 1635 | // When we're rematerializing into a not-quite-right register we already add |
| 1636 | // the real definition as an implicit-def, but we should also be marking the |
| 1637 | // "official" register as dead, since nothing else is going to use it as a |
| 1638 | // result of this remat. Not doing this can affect pressure tracking. |
| 1639 | NewMI.getOperand(i: 0).setIsDead(true); |
| 1640 | |
| 1641 | bool HasDefMatchingCopy = false; |
| 1642 | for (auto [OpIndex, Reg] : NewMIImplDefs) { |
| 1643 | if (Reg != DstReg) |
| 1644 | continue; |
| 1645 | // Also, if CopyDstReg is a sub-register of DstReg (and it is defined), we |
| 1646 | // must mark DstReg as dead since it is not going to used as a result of |
| 1647 | // this remat. |
| 1648 | if (DstReg != CopyDstReg) |
| 1649 | NewMI.getOperand(i: OpIndex).setIsDead(true); |
| 1650 | else |
| 1651 | HasDefMatchingCopy = true; |
| 1652 | } |
| 1653 | |
| 1654 | // If NewMI does not already have an implicit-def CopyDstReg add one now. |
| 1655 | if (!HasDefMatchingCopy) |
| 1656 | NewMI.addOperand(Op: MachineOperand::CreateReg( |
| 1657 | Reg: CopyDstReg, isDef: true /*IsDef*/, isImp: true /*IsImp*/, isKill: false /*IsKill*/)); |
| 1658 | |
| 1659 | // Record small dead def live-ranges for all the subregisters |
| 1660 | // of the destination register. |
| 1661 | // Otherwise, variables that live through may miss some |
| 1662 | // interferences, thus creating invalid allocation. |
| 1663 | // E.g., i386 code: |
| 1664 | // %1 = somedef ; %1 GR8 |
| 1665 | // %2 = remat ; %2 GR32 |
| 1666 | // CL = COPY %2.sub_8bit |
| 1667 | // = somedef %1 ; %1 GR8 |
| 1668 | // => |
| 1669 | // %1 = somedef ; %1 GR8 |
| 1670 | // dead ECX = remat ; implicit-def CL |
| 1671 | // = somedef %1 ; %1 GR8 |
| 1672 | // %1 will see the interferences with CL but not with CH since |
| 1673 | // no live-ranges would have been created for ECX. |
| 1674 | // Fix that! |
| 1675 | SlotIndex NewMIIdx = LIS->getInstructionIndex(Instr: NewMI); |
| 1676 | for (MCRegUnit Unit : TRI->regunits(Reg: NewMI.getOperand(i: 0).getReg())) |
| 1677 | if (LiveRange *LR = LIS->getCachedRegUnit(Unit)) |
| 1678 | LR->createDeadDef(Def: NewMIIdx.getRegSlot(), VNIAlloc&: LIS->getVNInfoAllocator()); |
| 1679 | } |
| 1680 | |
| 1681 | NewMI.setRegisterDefReadUndef(Reg: NewMI.getOperand(i: 0).getReg()); |
| 1682 | |
| 1683 | // Transfer over implicit operands to the rematerialized instruction. |
| 1684 | for (MachineOperand &MO : ImplicitOps) |
| 1685 | NewMI.addOperand(Op: MO); |
| 1686 | |
| 1687 | SlotIndex NewMIIdx = LIS->getInstructionIndex(Instr: NewMI); |
| 1688 | for (Register Reg : make_second_range(c&: NewMIImplDefs)) { |
| 1689 | for (MCRegUnit Unit : TRI->regunits(Reg: Reg.asMCReg())) |
| 1690 | if (LiveRange *LR = LIS->getCachedRegUnit(Unit)) |
| 1691 | LR->createDeadDef(Def: NewMIIdx.getRegSlot(), VNIAlloc&: LIS->getVNInfoAllocator()); |
| 1692 | } |
| 1693 | |
| 1694 | LLVM_DEBUG(dbgs() << "Remat: " << NewMI); |
| 1695 | ++NumReMats; |
| 1696 | |
| 1697 | // If the virtual SrcReg is completely eliminated, update all DBG_VALUEs |
| 1698 | // to describe DstReg instead. |
| 1699 | if (MRI->use_nodbg_empty(RegNo: SrcReg)) { |
| 1700 | for (MachineOperand &UseMO : |
| 1701 | llvm::make_early_inc_range(Range: MRI->use_operands(Reg: SrcReg))) { |
| 1702 | MachineInstr *UseMI = UseMO.getParent(); |
| 1703 | if (UseMI->isDebugInstr()) { |
| 1704 | if (DstReg.isPhysical()) |
| 1705 | UseMO.substPhysReg(Reg: DstReg, *TRI); |
| 1706 | else |
| 1707 | UseMO.setReg(DstReg); |
| 1708 | // Move the debug value directly after the def of the rematerialized |
| 1709 | // value in DstReg. |
| 1710 | MBB->splice(Where: std::next(x: NewMI.getIterator()), Other: UseMI->getParent(), From: UseMI); |
| 1711 | LLVM_DEBUG(dbgs() << "\t\tupdated: " << *UseMI); |
| 1712 | } |
| 1713 | } |
| 1714 | } |
| 1715 | |
| 1716 | if (ToBeUpdated.count(V: SrcReg)) |
| 1717 | return true; |
| 1718 | |
| 1719 | unsigned NumCopyUses = 0; |
| 1720 | for (MachineOperand &UseMO : MRI->use_nodbg_operands(Reg: SrcReg)) { |
| 1721 | if (UseMO.getParent()->isCopyLike()) |
| 1722 | NumCopyUses++; |
| 1723 | } |
| 1724 | if (NumCopyUses < LateRematUpdateThreshold) { |
| 1725 | // The source interval can become smaller because we removed a use. |
| 1726 | shrinkToUses(LI: &SrcInt, Dead: &DeadDefs); |
| 1727 | if (!DeadDefs.empty()) |
| 1728 | eliminateDeadDefs(Edit: &Edit); |
| 1729 | } else { |
| 1730 | ToBeUpdated.insert(V: SrcReg); |
| 1731 | } |
| 1732 | return true; |
| 1733 | } |
| 1734 | |
| 1735 | MachineInstr *RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) { |
| 1736 | // ProcessImplicitDefs may leave some copies of <undef> values, it only |
| 1737 | // removes local variables. When we have a copy like: |
| 1738 | // |
| 1739 | // %1 = COPY undef %2 |
| 1740 | // |
| 1741 | // We delete the copy and remove the corresponding value number from %1. |
| 1742 | // Any uses of that value number are marked as <undef>. |
| 1743 | |
| 1744 | // Note that we do not query CoalescerPair here but redo isMoveInstr as the |
| 1745 | // CoalescerPair may have a new register class with adjusted subreg indices |
| 1746 | // at this point. |
| 1747 | Register SrcReg, DstReg; |
| 1748 | unsigned SrcSubIdx = 0, DstSubIdx = 0; |
| 1749 | if (!isMoveInstr(tri: *TRI, MI: CopyMI, Src&: SrcReg, Dst&: DstReg, SrcSub&: SrcSubIdx, DstSub&: DstSubIdx)) |
| 1750 | return nullptr; |
| 1751 | |
| 1752 | SlotIndex Idx = LIS->getInstructionIndex(Instr: *CopyMI); |
| 1753 | const LiveInterval &SrcLI = LIS->getInterval(Reg: SrcReg); |
| 1754 | // CopyMI is undef iff SrcReg is not live before the instruction. |
| 1755 | if (SrcSubIdx != 0 && SrcLI.hasSubRanges()) { |
| 1756 | LaneBitmask SrcMask = TRI->getSubRegIndexLaneMask(SubIdx: SrcSubIdx); |
| 1757 | for (const LiveInterval::SubRange &SR : SrcLI.subranges()) { |
| 1758 | if ((SR.LaneMask & SrcMask).none()) |
| 1759 | continue; |
| 1760 | if (SR.liveAt(index: Idx)) |
| 1761 | return nullptr; |
| 1762 | } |
| 1763 | } else if (SrcLI.liveAt(index: Idx)) |
| 1764 | return nullptr; |
| 1765 | |
| 1766 | // If the undef copy defines a live-out value (i.e. an input to a PHI def), |
| 1767 | // then replace it with an IMPLICIT_DEF. |
| 1768 | LiveInterval &DstLI = LIS->getInterval(Reg: DstReg); |
| 1769 | SlotIndex RegIndex = Idx.getRegSlot(); |
| 1770 | LiveRange::Segment *Seg = DstLI.getSegmentContaining(Idx: RegIndex); |
| 1771 | assert(Seg != nullptr && "No segment for defining instruction" ); |
| 1772 | VNInfo *V = DstLI.getVNInfoAt(Idx: Seg->end); |
| 1773 | |
| 1774 | // The source interval may also have been on an undef use, in which case the |
| 1775 | // copy introduced a live value. |
| 1776 | if (((V && V->isPHIDef()) || (!V && !DstLI.liveAt(index: Idx)))) { |
| 1777 | for (unsigned i = CopyMI->getNumOperands(); i != 0; --i) { |
| 1778 | MachineOperand &MO = CopyMI->getOperand(i: i - 1); |
| 1779 | if (MO.isReg()) { |
| 1780 | if (MO.isUse()) |
| 1781 | CopyMI->removeOperand(OpNo: i - 1); |
| 1782 | } else { |
| 1783 | assert(MO.isImm() && |
| 1784 | CopyMI->getOpcode() == TargetOpcode::SUBREG_TO_REG); |
| 1785 | CopyMI->removeOperand(OpNo: i - 1); |
| 1786 | } |
| 1787 | } |
| 1788 | |
| 1789 | CopyMI->setDesc(TII->get(Opcode: TargetOpcode::IMPLICIT_DEF)); |
| 1790 | LLVM_DEBUG(dbgs() << "\tReplaced copy of <undef> value with an " |
| 1791 | "implicit def\n" ); |
| 1792 | return CopyMI; |
| 1793 | } |
| 1794 | |
| 1795 | // Remove any DstReg segments starting at the instruction. |
| 1796 | LLVM_DEBUG(dbgs() << "\tEliminating copy of <undef> value\n" ); |
| 1797 | |
| 1798 | // Remove value or merge with previous one in case of a subregister def. |
| 1799 | if (VNInfo *PrevVNI = DstLI.getVNInfoAt(Idx)) { |
| 1800 | VNInfo *VNI = DstLI.getVNInfoAt(Idx: RegIndex); |
| 1801 | DstLI.MergeValueNumberInto(V1: VNI, V2: PrevVNI); |
| 1802 | |
| 1803 | // The affected subregister segments can be removed. |
| 1804 | LaneBitmask DstMask = TRI->getSubRegIndexLaneMask(SubIdx: DstSubIdx); |
| 1805 | for (LiveInterval::SubRange &SR : DstLI.subranges()) { |
| 1806 | if ((SR.LaneMask & DstMask).none()) |
| 1807 | continue; |
| 1808 | |
| 1809 | VNInfo *SVNI = SR.getVNInfoAt(Idx: RegIndex); |
| 1810 | assert(SVNI != nullptr && SlotIndex::isSameInstr(SVNI->def, RegIndex)); |
| 1811 | SR.removeValNo(ValNo: SVNI); |
| 1812 | } |
| 1813 | DstLI.removeEmptySubRanges(); |
| 1814 | } else |
| 1815 | LIS->removeVRegDefAt(LI&: DstLI, Pos: RegIndex); |
| 1816 | |
| 1817 | // Mark uses as undef. |
| 1818 | for (MachineOperand &MO : MRI->reg_nodbg_operands(Reg: DstReg)) { |
| 1819 | if (MO.isDef() /*|| MO.isUndef()*/) |
| 1820 | continue; |
| 1821 | const MachineInstr &MI = *MO.getParent(); |
| 1822 | SlotIndex UseIdx = LIS->getInstructionIndex(Instr: MI); |
| 1823 | LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(SubIdx: MO.getSubReg()); |
| 1824 | bool isLive; |
| 1825 | if (!UseMask.all() && DstLI.hasSubRanges()) { |
| 1826 | isLive = false; |
| 1827 | for (const LiveInterval::SubRange &SR : DstLI.subranges()) { |
| 1828 | if ((SR.LaneMask & UseMask).none()) |
| 1829 | continue; |
| 1830 | if (SR.liveAt(index: UseIdx)) { |
| 1831 | isLive = true; |
| 1832 | break; |
| 1833 | } |
| 1834 | } |
| 1835 | } else |
| 1836 | isLive = DstLI.liveAt(index: UseIdx); |
| 1837 | if (isLive) |
| 1838 | continue; |
| 1839 | MO.setIsUndef(true); |
| 1840 | LLVM_DEBUG(dbgs() << "\tnew undef: " << UseIdx << '\t' << MI); |
| 1841 | } |
| 1842 | |
| 1843 | // A def of a subregister may be a use of the other subregisters, so |
| 1844 | // deleting a def of a subregister may also remove uses. Since CopyMI |
| 1845 | // is still part of the function (but about to be erased), mark all |
| 1846 | // defs of DstReg in it as <undef>, so that shrinkToUses would |
| 1847 | // ignore them. |
| 1848 | for (MachineOperand &MO : CopyMI->all_defs()) |
| 1849 | if (MO.getReg() == DstReg) |
| 1850 | MO.setIsUndef(true); |
| 1851 | LIS->shrinkToUses(li: &DstLI); |
| 1852 | |
| 1853 | return CopyMI; |
| 1854 | } |
| 1855 | |
| 1856 | void RegisterCoalescer::addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx, |
| 1857 | MachineOperand &MO, unsigned SubRegIdx) { |
| 1858 | LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubIdx: SubRegIdx); |
| 1859 | if (MO.isDef()) |
| 1860 | Mask = ~Mask; |
| 1861 | bool IsUndef = true; |
| 1862 | for (const LiveInterval::SubRange &S : Int.subranges()) { |
| 1863 | if ((S.LaneMask & Mask).none()) |
| 1864 | continue; |
| 1865 | if (S.liveAt(index: UseIdx)) { |
| 1866 | IsUndef = false; |
| 1867 | break; |
| 1868 | } |
| 1869 | } |
| 1870 | if (IsUndef) { |
| 1871 | MO.setIsUndef(true); |
| 1872 | // We found out some subregister use is actually reading an undefined |
| 1873 | // value. In some cases the whole vreg has become undefined at this |
| 1874 | // point so we have to potentially shrink the main range if the |
| 1875 | // use was ending a live segment there. |
| 1876 | LiveQueryResult Q = Int.Query(Idx: UseIdx); |
| 1877 | if (Q.valueOut() == nullptr) |
| 1878 | ShrinkMainRange = true; |
| 1879 | } |
| 1880 | } |
| 1881 | |
| 1882 | void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg, |
| 1883 | unsigned SubIdx) { |
| 1884 | bool DstIsPhys = DstReg.isPhysical(); |
| 1885 | LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(Reg: DstReg); |
| 1886 | |
| 1887 | if (DstInt && DstInt->hasSubRanges() && DstReg != SrcReg) { |
| 1888 | for (MachineOperand &MO : MRI->reg_operands(Reg: DstReg)) { |
| 1889 | if (MO.isUndef()) |
| 1890 | continue; |
| 1891 | unsigned SubReg = MO.getSubReg(); |
| 1892 | if (SubReg == 0 && MO.isDef()) |
| 1893 | continue; |
| 1894 | |
| 1895 | MachineInstr &MI = *MO.getParent(); |
| 1896 | if (MI.isDebugInstr()) |
| 1897 | continue; |
| 1898 | SlotIndex UseIdx = LIS->getInstructionIndex(Instr: MI).getRegSlot(EC: true); |
| 1899 | addUndefFlag(Int: *DstInt, UseIdx, MO, SubRegIdx: SubReg); |
| 1900 | } |
| 1901 | } |
| 1902 | |
| 1903 | SmallPtrSet<MachineInstr *, 8> Visited; |
| 1904 | for (MachineRegisterInfo::reg_instr_iterator I = MRI->reg_instr_begin(RegNo: SrcReg), |
| 1905 | E = MRI->reg_instr_end(); |
| 1906 | I != E;) { |
| 1907 | MachineInstr *UseMI = &*(I++); |
| 1908 | |
| 1909 | // Each instruction can only be rewritten once because sub-register |
| 1910 | // composition is not always idempotent. When SrcReg != DstReg, rewriting |
| 1911 | // the UseMI operands removes them from the SrcReg use-def chain, but when |
| 1912 | // SrcReg is DstReg we could encounter UseMI twice if it has multiple |
| 1913 | // operands mentioning the virtual register. |
| 1914 | if (SrcReg == DstReg && !Visited.insert(Ptr: UseMI).second) |
| 1915 | continue; |
| 1916 | |
| 1917 | SmallVector<unsigned, 8> Ops; |
| 1918 | bool Reads, Writes; |
| 1919 | std::tie(args&: Reads, args&: Writes) = UseMI->readsWritesVirtualRegister(Reg: SrcReg, Ops: &Ops); |
| 1920 | |
| 1921 | // If SrcReg wasn't read, it may still be the case that DstReg is live-in |
| 1922 | // because SrcReg is a sub-register. |
| 1923 | if (DstInt && !Reads && SubIdx && !UseMI->isDebugInstr()) |
| 1924 | Reads = DstInt->liveAt(index: LIS->getInstructionIndex(Instr: *UseMI)); |
| 1925 | |
| 1926 | // Replace SrcReg with DstReg in all UseMI operands. |
| 1927 | for (unsigned Op : Ops) { |
| 1928 | MachineOperand &MO = UseMI->getOperand(i: Op); |
| 1929 | |
| 1930 | // Adjust <undef> flags in case of sub-register joins. We don't want to |
| 1931 | // turn a full def into a read-modify-write sub-register def and vice |
| 1932 | // versa. |
| 1933 | if (SubIdx && MO.isDef()) |
| 1934 | MO.setIsUndef(!Reads); |
| 1935 | |
| 1936 | // A subreg use of a partially undef (super) register may be a complete |
| 1937 | // undef use now and then has to be marked that way. |
| 1938 | if (MO.isUse() && !MO.isUndef() && !DstIsPhys) { |
| 1939 | unsigned SubUseIdx = TRI->composeSubRegIndices(a: SubIdx, b: MO.getSubReg()); |
| 1940 | if (SubUseIdx != 0 && MRI->shouldTrackSubRegLiveness(VReg: DstReg)) { |
| 1941 | if (!DstInt->hasSubRanges()) { |
| 1942 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); |
| 1943 | LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(Reg: DstInt->reg()); |
| 1944 | LaneBitmask UsedLanes = TRI->getSubRegIndexLaneMask(SubIdx); |
| 1945 | LaneBitmask UnusedLanes = FullMask & ~UsedLanes; |
| 1946 | DstInt->createSubRangeFrom(Allocator, LaneMask: UsedLanes, CopyFrom: *DstInt); |
| 1947 | // The unused lanes are just empty live-ranges at this point. |
| 1948 | // It is the caller responsibility to set the proper |
| 1949 | // dead segments if there is an actual dead def of the |
| 1950 | // unused lanes. This may happen with rematerialization. |
| 1951 | DstInt->createSubRange(Allocator, LaneMask: UnusedLanes); |
| 1952 | } |
| 1953 | SlotIndex MIIdx = UseMI->isDebugInstr() |
| 1954 | ? LIS->getSlotIndexes()->getIndexBefore(MI: *UseMI) |
| 1955 | : LIS->getInstructionIndex(Instr: *UseMI); |
| 1956 | SlotIndex UseIdx = MIIdx.getRegSlot(EC: true); |
| 1957 | addUndefFlag(Int: *DstInt, UseIdx, MO, SubRegIdx: SubUseIdx); |
| 1958 | } |
| 1959 | } |
| 1960 | |
| 1961 | if (DstIsPhys) |
| 1962 | MO.substPhysReg(Reg: DstReg, *TRI); |
| 1963 | else |
| 1964 | MO.substVirtReg(Reg: DstReg, SubIdx, *TRI); |
| 1965 | } |
| 1966 | |
| 1967 | LLVM_DEBUG({ |
| 1968 | dbgs() << "\t\tupdated: " ; |
| 1969 | if (!UseMI->isDebugInstr()) |
| 1970 | dbgs() << LIS->getInstructionIndex(*UseMI) << "\t" ; |
| 1971 | dbgs() << *UseMI; |
| 1972 | }); |
| 1973 | } |
| 1974 | } |
| 1975 | |
| 1976 | bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) { |
| 1977 | // Always join simple intervals that are defined by a single copy from a |
| 1978 | // reserved register. This doesn't increase register pressure, so it is |
| 1979 | // always beneficial. |
| 1980 | if (!MRI->isReserved(PhysReg: CP.getDstReg())) { |
| 1981 | LLVM_DEBUG(dbgs() << "\tCan only merge into reserved registers.\n" ); |
| 1982 | return false; |
| 1983 | } |
| 1984 | |
| 1985 | LiveInterval &JoinVInt = LIS->getInterval(Reg: CP.getSrcReg()); |
| 1986 | if (JoinVInt.containsOneValue()) |
| 1987 | return true; |
| 1988 | |
| 1989 | LLVM_DEBUG( |
| 1990 | dbgs() << "\tCannot join complex intervals into reserved register.\n" ); |
| 1991 | return false; |
| 1992 | } |
| 1993 | |
| 1994 | bool RegisterCoalescer::copyValueUndefInPredecessors( |
| 1995 | LiveRange &S, const MachineBasicBlock *MBB, LiveQueryResult SLRQ) { |
| 1996 | for (const MachineBasicBlock *Pred : MBB->predecessors()) { |
| 1997 | SlotIndex PredEnd = LIS->getMBBEndIdx(mbb: Pred); |
| 1998 | if (VNInfo *V = S.getVNInfoAt(Idx: PredEnd.getPrevSlot())) { |
| 1999 | // If this is a self loop, we may be reading the same value. |
| 2000 | if (V->id != SLRQ.valueOutOrDead()->id) |
| 2001 | return false; |
| 2002 | } |
| 2003 | } |
| 2004 | |
| 2005 | return true; |
| 2006 | } |
| 2007 | |
| 2008 | void RegisterCoalescer::setUndefOnPrunedSubRegUses(LiveInterval &LI, |
| 2009 | Register Reg, |
| 2010 | LaneBitmask PrunedLanes) { |
| 2011 | // If we had other instructions in the segment reading the undef sublane |
| 2012 | // value, we need to mark them with undef. |
| 2013 | for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { |
| 2014 | unsigned SubRegIdx = MO.getSubReg(); |
| 2015 | if (SubRegIdx == 0 || MO.isUndef()) |
| 2016 | continue; |
| 2017 | |
| 2018 | LaneBitmask SubRegMask = TRI->getSubRegIndexLaneMask(SubIdx: SubRegIdx); |
| 2019 | SlotIndex Pos = LIS->getInstructionIndex(Instr: *MO.getParent()); |
| 2020 | for (LiveInterval::SubRange &S : LI.subranges()) { |
| 2021 | if (!S.liveAt(index: Pos) && (PrunedLanes & SubRegMask).any()) { |
| 2022 | MO.setIsUndef(); |
| 2023 | break; |
| 2024 | } |
| 2025 | } |
| 2026 | } |
| 2027 | |
| 2028 | LI.removeEmptySubRanges(); |
| 2029 | |
| 2030 | // A def of a subregister may be a use of other register lanes. Replacing |
| 2031 | // such a def with a def of a different register will eliminate the use, |
| 2032 | // and may cause the recorded live range to be larger than the actual |
| 2033 | // liveness in the program IR. |
| 2034 | LIS->shrinkToUses(li: &LI); |
| 2035 | } |
| 2036 | |
| 2037 | bool RegisterCoalescer::joinCopy( |
| 2038 | MachineInstr *CopyMI, bool &Again, |
| 2039 | SmallPtrSetImpl<MachineInstr *> &CurrentErasedInstrs) { |
| 2040 | Again = false; |
| 2041 | LLVM_DEBUG(dbgs() << LIS->getInstructionIndex(*CopyMI) << '\t' << *CopyMI); |
| 2042 | |
| 2043 | CoalescerPair CP(*TRI); |
| 2044 | if (!CP.setRegisters(CopyMI)) { |
| 2045 | LLVM_DEBUG(dbgs() << "\tNot coalescable.\n" ); |
| 2046 | return false; |
| 2047 | } |
| 2048 | |
| 2049 | if (CP.getNewRC()) { |
| 2050 | if (RegClassInfo.getNumAllocatableRegs(RC: CP.getNewRC()) == 0) { |
| 2051 | LLVM_DEBUG(dbgs() << "\tNo " << TRI->getRegClassName(CP.getNewRC()) |
| 2052 | << "are available for allocation\n" ); |
| 2053 | return false; |
| 2054 | } |
| 2055 | |
| 2056 | auto SrcRC = MRI->getRegClass(Reg: CP.getSrcReg()); |
| 2057 | auto DstRC = MRI->getRegClass(Reg: CP.getDstReg()); |
| 2058 | unsigned SrcIdx = CP.getSrcIdx(); |
| 2059 | unsigned DstIdx = CP.getDstIdx(); |
| 2060 | if (CP.isFlipped()) { |
| 2061 | std::swap(a&: SrcIdx, b&: DstIdx); |
| 2062 | std::swap(a&: SrcRC, b&: DstRC); |
| 2063 | } |
| 2064 | if (!TRI->shouldCoalesce(MI: CopyMI, SrcRC, SubReg: SrcIdx, DstRC, DstSubReg: DstIdx, |
| 2065 | NewRC: CP.getNewRC(), LIS&: *LIS)) { |
| 2066 | LLVM_DEBUG(dbgs() << "\tSubtarget bailed on coalescing.\n" ); |
| 2067 | return false; |
| 2068 | } |
| 2069 | } |
| 2070 | |
| 2071 | // Dead code elimination. This really should be handled by MachineDCE, but |
| 2072 | // sometimes dead copies slip through, and we can't generate invalid live |
| 2073 | // ranges. |
| 2074 | if (!CP.isPhys() && CopyMI->allDefsAreDead()) { |
| 2075 | LLVM_DEBUG(dbgs() << "\tCopy is dead.\n" ); |
| 2076 | DeadDefs.push_back(Elt: CopyMI); |
| 2077 | eliminateDeadDefs(); |
| 2078 | return true; |
| 2079 | } |
| 2080 | |
| 2081 | // Eliminate undefs. |
| 2082 | if (!CP.isPhys()) { |
| 2083 | // If this is an IMPLICIT_DEF, leave it alone, but don't try to coalesce. |
| 2084 | if (MachineInstr *UndefMI = eliminateUndefCopy(CopyMI)) { |
| 2085 | if (UndefMI->isImplicitDef()) |
| 2086 | return false; |
| 2087 | deleteInstr(MI: CopyMI); |
| 2088 | return false; // Not coalescable. |
| 2089 | } |
| 2090 | } |
| 2091 | |
| 2092 | // Coalesced copies are normally removed immediately, but transformations |
| 2093 | // like removeCopyByCommutingDef() can inadvertently create identity copies. |
| 2094 | // When that happens, just join the values and remove the copy. |
| 2095 | if (CP.getSrcReg() == CP.getDstReg()) { |
| 2096 | LiveInterval &LI = LIS->getInterval(Reg: CP.getSrcReg()); |
| 2097 | LLVM_DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n'); |
| 2098 | const SlotIndex CopyIdx = LIS->getInstructionIndex(Instr: *CopyMI); |
| 2099 | LiveQueryResult LRQ = LI.Query(Idx: CopyIdx); |
| 2100 | if (VNInfo *DefVNI = LRQ.valueDefined()) { |
| 2101 | VNInfo *ReadVNI = LRQ.valueIn(); |
| 2102 | assert(ReadVNI && "No value before copy and no <undef> flag." ); |
| 2103 | assert(ReadVNI != DefVNI && "Cannot read and define the same value." ); |
| 2104 | |
| 2105 | // Track incoming undef lanes we need to eliminate from the subrange. |
| 2106 | LaneBitmask PrunedLanes; |
| 2107 | MachineBasicBlock *MBB = CopyMI->getParent(); |
| 2108 | |
| 2109 | // Process subregister liveranges. |
| 2110 | for (LiveInterval::SubRange &S : LI.subranges()) { |
| 2111 | LiveQueryResult SLRQ = S.Query(Idx: CopyIdx); |
| 2112 | if (VNInfo *SDefVNI = SLRQ.valueDefined()) { |
| 2113 | if (VNInfo *SReadVNI = SLRQ.valueIn()) |
| 2114 | SDefVNI = S.MergeValueNumberInto(V1: SDefVNI, V2: SReadVNI); |
| 2115 | |
| 2116 | // If this copy introduced an undef subrange from an incoming value, |
| 2117 | // we need to eliminate the undef live in values from the subrange. |
| 2118 | if (copyValueUndefInPredecessors(S, MBB, SLRQ)) { |
| 2119 | LLVM_DEBUG(dbgs() << "Incoming sublane value is undef at copy\n" ); |
| 2120 | PrunedLanes |= S.LaneMask; |
| 2121 | S.removeValNo(ValNo: SDefVNI); |
| 2122 | } |
| 2123 | } |
| 2124 | } |
| 2125 | |
| 2126 | LI.MergeValueNumberInto(V1: DefVNI, V2: ReadVNI); |
| 2127 | if (PrunedLanes.any()) { |
| 2128 | LLVM_DEBUG(dbgs() << "Pruning undef incoming lanes: " << PrunedLanes |
| 2129 | << '\n'); |
| 2130 | setUndefOnPrunedSubRegUses(LI, Reg: CP.getSrcReg(), PrunedLanes); |
| 2131 | } |
| 2132 | |
| 2133 | LLVM_DEBUG(dbgs() << "\tMerged values: " << LI << '\n'); |
| 2134 | } |
| 2135 | deleteInstr(MI: CopyMI); |
| 2136 | return true; |
| 2137 | } |
| 2138 | |
| 2139 | // Enforce policies. |
| 2140 | if (CP.isPhys()) { |
| 2141 | LLVM_DEBUG(dbgs() << "\tConsidering merging " |
| 2142 | << printReg(CP.getSrcReg(), TRI) << " with " |
| 2143 | << printReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n'); |
| 2144 | if (!canJoinPhys(CP)) { |
| 2145 | // Before giving up coalescing, try rematerializing the source of |
| 2146 | // the copy instead if it is cheap. |
| 2147 | bool IsDefCopy = false; |
| 2148 | if (reMaterializeDef(CP, CopyMI, IsDefCopy)) |
| 2149 | return true; |
| 2150 | if (IsDefCopy) |
| 2151 | Again = true; // May be possible to coalesce later. |
| 2152 | return false; |
| 2153 | } |
| 2154 | } else { |
| 2155 | // When possible, let DstReg be the larger interval. |
| 2156 | if (!CP.isPartial() && LIS->getInterval(Reg: CP.getSrcReg()).size() > |
| 2157 | LIS->getInterval(Reg: CP.getDstReg()).size()) |
| 2158 | CP.flip(); |
| 2159 | |
| 2160 | LLVM_DEBUG({ |
| 2161 | dbgs() << "\tConsidering merging to " |
| 2162 | << TRI->getRegClassName(CP.getNewRC()) << " with " ; |
| 2163 | if (CP.getDstIdx() && CP.getSrcIdx()) |
| 2164 | dbgs() << printReg(CP.getDstReg()) << " in " |
| 2165 | << TRI->getSubRegIndexName(CP.getDstIdx()) << " and " |
| 2166 | << printReg(CP.getSrcReg()) << " in " |
| 2167 | << TRI->getSubRegIndexName(CP.getSrcIdx()) << '\n'; |
| 2168 | else |
| 2169 | dbgs() << printReg(CP.getSrcReg(), TRI) << " in " |
| 2170 | << printReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n'; |
| 2171 | }); |
| 2172 | } |
| 2173 | |
| 2174 | ShrinkMask = LaneBitmask::getNone(); |
| 2175 | ShrinkMainRange = false; |
| 2176 | |
| 2177 | // Okay, attempt to join these two intervals. On failure, this returns false. |
| 2178 | // Otherwise, if one of the intervals being joined is a physreg, this method |
| 2179 | // always canonicalizes DstInt to be it. The output "SrcInt" will not have |
| 2180 | // been modified, so we can use this information below to update aliases. |
| 2181 | if (!joinIntervals(CP)) { |
| 2182 | // Coalescing failed. |
| 2183 | |
| 2184 | // Try rematerializing the definition of the source if it is cheap. |
| 2185 | bool IsDefCopy = false; |
| 2186 | if (reMaterializeDef(CP, CopyMI, IsDefCopy)) |
| 2187 | return true; |
| 2188 | |
| 2189 | // If we can eliminate the copy without merging the live segments, do so |
| 2190 | // now. |
| 2191 | if (!CP.isPartial() && !CP.isPhys()) { |
| 2192 | bool Changed = adjustCopiesBackFrom(CP, CopyMI); |
| 2193 | bool Shrink = false; |
| 2194 | if (!Changed) |
| 2195 | std::tie(args&: Changed, args&: Shrink) = removeCopyByCommutingDef(CP, CopyMI); |
| 2196 | if (Changed) { |
| 2197 | deleteInstr(MI: CopyMI); |
| 2198 | if (Shrink) { |
| 2199 | Register DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg(); |
| 2200 | LiveInterval &DstLI = LIS->getInterval(Reg: DstReg); |
| 2201 | shrinkToUses(LI: &DstLI); |
| 2202 | LLVM_DEBUG(dbgs() << "\t\tshrunk: " << DstLI << '\n'); |
| 2203 | } |
| 2204 | LLVM_DEBUG(dbgs() << "\tTrivial!\n" ); |
| 2205 | return true; |
| 2206 | } |
| 2207 | } |
| 2208 | |
| 2209 | // Try and see if we can partially eliminate the copy by moving the copy to |
| 2210 | // its predecessor. |
| 2211 | if (!CP.isPartial() && !CP.isPhys()) |
| 2212 | if (removePartialRedundancy(CP, CopyMI&: *CopyMI)) |
| 2213 | return true; |
| 2214 | |
| 2215 | // Otherwise, we are unable to join the intervals. |
| 2216 | LLVM_DEBUG(dbgs() << "\tInterference!\n" ); |
| 2217 | Again = true; // May be possible to coalesce later. |
| 2218 | return false; |
| 2219 | } |
| 2220 | |
| 2221 | // Coalescing to a virtual register that is of a sub-register class of the |
| 2222 | // other. Make sure the resulting register is set to the right register class. |
| 2223 | if (CP.isCrossClass()) { |
| 2224 | ++numCrossRCs; |
| 2225 | MRI->setRegClass(Reg: CP.getDstReg(), RC: CP.getNewRC()); |
| 2226 | } |
| 2227 | |
| 2228 | // Removing sub-register copies can ease the register class constraints. |
| 2229 | // Make sure we attempt to inflate the register class of DstReg. |
| 2230 | if (!CP.isPhys() && RegClassInfo.isProperSubClass(RC: CP.getNewRC())) |
| 2231 | InflateRegs.push_back(Elt: CP.getDstReg()); |
| 2232 | |
| 2233 | // CopyMI has been erased by joinIntervals at this point. Remove it from |
| 2234 | // ErasedInstrs since copyCoalesceWorkList() won't add a successful join back |
| 2235 | // to the work list. This keeps ErasedInstrs from growing needlessly. |
| 2236 | if (ErasedInstrs.erase(Ptr: CopyMI)) |
| 2237 | // But we may encounter the instruction again in this iteration. |
| 2238 | CurrentErasedInstrs.insert(Ptr: CopyMI); |
| 2239 | |
| 2240 | // Rewrite all SrcReg operands to DstReg. |
| 2241 | // Also update DstReg operands to include DstIdx if it is set. |
| 2242 | if (CP.getDstIdx()) |
| 2243 | updateRegDefsUses(SrcReg: CP.getDstReg(), DstReg: CP.getDstReg(), SubIdx: CP.getDstIdx()); |
| 2244 | updateRegDefsUses(SrcReg: CP.getSrcReg(), DstReg: CP.getDstReg(), SubIdx: CP.getSrcIdx()); |
| 2245 | |
| 2246 | // Shrink subregister ranges if necessary. |
| 2247 | if (ShrinkMask.any()) { |
| 2248 | LiveInterval &LI = LIS->getInterval(Reg: CP.getDstReg()); |
| 2249 | for (LiveInterval::SubRange &S : LI.subranges()) { |
| 2250 | if ((S.LaneMask & ShrinkMask).none()) |
| 2251 | continue; |
| 2252 | LLVM_DEBUG(dbgs() << "Shrink LaneUses (Lane " << PrintLaneMask(S.LaneMask) |
| 2253 | << ")\n" ); |
| 2254 | LIS->shrinkToUses(SR&: S, Reg: LI.reg()); |
| 2255 | ShrinkMainRange = true; |
| 2256 | } |
| 2257 | LI.removeEmptySubRanges(); |
| 2258 | } |
| 2259 | |
| 2260 | // CP.getSrcReg()'s live interval has been merged into CP.getDstReg's live |
| 2261 | // interval. Since CP.getSrcReg() is in ToBeUpdated set and its live interval |
| 2262 | // is not up-to-date, need to update the merged live interval here. |
| 2263 | if (ToBeUpdated.count(V: CP.getSrcReg())) |
| 2264 | ShrinkMainRange = true; |
| 2265 | |
| 2266 | if (ShrinkMainRange) { |
| 2267 | LiveInterval &LI = LIS->getInterval(Reg: CP.getDstReg()); |
| 2268 | shrinkToUses(LI: &LI); |
| 2269 | } |
| 2270 | |
| 2271 | // SrcReg is guaranteed to be the register whose live interval that is |
| 2272 | // being merged. |
| 2273 | LIS->removeInterval(Reg: CP.getSrcReg()); |
| 2274 | |
| 2275 | // Update regalloc hint. |
| 2276 | TRI->updateRegAllocHint(Reg: CP.getSrcReg(), NewReg: CP.getDstReg(), MF&: *MF); |
| 2277 | |
| 2278 | LLVM_DEBUG({ |
| 2279 | dbgs() << "\tSuccess: " << printReg(CP.getSrcReg(), TRI, CP.getSrcIdx()) |
| 2280 | << " -> " << printReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n'; |
| 2281 | dbgs() << "\tResult = " ; |
| 2282 | if (CP.isPhys()) |
| 2283 | dbgs() << printReg(CP.getDstReg(), TRI); |
| 2284 | else |
| 2285 | dbgs() << LIS->getInterval(CP.getDstReg()); |
| 2286 | dbgs() << '\n'; |
| 2287 | }); |
| 2288 | |
| 2289 | ++numJoins; |
| 2290 | return true; |
| 2291 | } |
| 2292 | |
| 2293 | bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) { |
| 2294 | Register DstReg = CP.getDstReg(); |
| 2295 | Register SrcReg = CP.getSrcReg(); |
| 2296 | assert(CP.isPhys() && "Must be a physreg copy" ); |
| 2297 | assert(MRI->isReserved(DstReg) && "Not a reserved register" ); |
| 2298 | LiveInterval &RHS = LIS->getInterval(Reg: SrcReg); |
| 2299 | LLVM_DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n'); |
| 2300 | |
| 2301 | assert(RHS.containsOneValue() && "Invalid join with reserved register" ); |
| 2302 | |
| 2303 | // Optimization for reserved registers like ESP. We can only merge with a |
| 2304 | // reserved physreg if RHS has a single value that is a copy of DstReg. |
| 2305 | // The live range of the reserved register will look like a set of dead defs |
| 2306 | // - we don't properly track the live range of reserved registers. |
| 2307 | |
| 2308 | // Deny any overlapping intervals. This depends on all the reserved |
| 2309 | // register live ranges to look like dead defs. |
| 2310 | if (!MRI->isConstantPhysReg(PhysReg: DstReg)) { |
| 2311 | for (MCRegUnit Unit : TRI->regunits(Reg: DstReg)) { |
| 2312 | // Abort if not all the regunits are reserved. |
| 2313 | for (MCRegUnitRootIterator RI(Unit, TRI); RI.isValid(); ++RI) { |
| 2314 | if (!MRI->isReserved(PhysReg: *RI)) |
| 2315 | return false; |
| 2316 | } |
| 2317 | if (RHS.overlaps(other: LIS->getRegUnit(Unit))) { |
| 2318 | LLVM_DEBUG(dbgs() << "\t\tInterference: " << printRegUnit(Unit, TRI) |
| 2319 | << '\n'); |
| 2320 | return false; |
| 2321 | } |
| 2322 | } |
| 2323 | |
| 2324 | // We must also check for overlaps with regmask clobbers. |
| 2325 | BitVector RegMaskUsable; |
| 2326 | if (LIS->checkRegMaskInterference(LI: RHS, UsableRegs&: RegMaskUsable) && |
| 2327 | !RegMaskUsable.test(Idx: DstReg.id())) { |
| 2328 | LLVM_DEBUG(dbgs() << "\t\tRegMask interference\n" ); |
| 2329 | return false; |
| 2330 | } |
| 2331 | } |
| 2332 | |
| 2333 | // Skip any value computations, we are not adding new values to the |
| 2334 | // reserved register. Also skip merging the live ranges, the reserved |
| 2335 | // register live range doesn't need to be accurate as long as all the |
| 2336 | // defs are there. |
| 2337 | |
| 2338 | // Delete the identity copy. |
| 2339 | MachineInstr *CopyMI; |
| 2340 | if (CP.isFlipped()) { |
| 2341 | // Physreg is copied into vreg |
| 2342 | // %y = COPY %physreg_x |
| 2343 | // ... //< no other def of %physreg_x here |
| 2344 | // use %y |
| 2345 | // => |
| 2346 | // ... |
| 2347 | // use %physreg_x |
| 2348 | CopyMI = MRI->getVRegDef(Reg: SrcReg); |
| 2349 | deleteInstr(MI: CopyMI); |
| 2350 | } else { |
| 2351 | // VReg is copied into physreg: |
| 2352 | // %y = def |
| 2353 | // ... //< no other def or use of %physreg_x here |
| 2354 | // %physreg_x = COPY %y |
| 2355 | // => |
| 2356 | // %physreg_x = def |
| 2357 | // ... |
| 2358 | if (!MRI->hasOneNonDBGUse(RegNo: SrcReg)) { |
| 2359 | LLVM_DEBUG(dbgs() << "\t\tMultiple vreg uses!\n" ); |
| 2360 | return false; |
| 2361 | } |
| 2362 | |
| 2363 | if (!LIS->intervalIsInOneMBB(LI: RHS)) { |
| 2364 | LLVM_DEBUG(dbgs() << "\t\tComplex control flow!\n" ); |
| 2365 | return false; |
| 2366 | } |
| 2367 | |
| 2368 | MachineInstr &DestMI = *MRI->getVRegDef(Reg: SrcReg); |
| 2369 | CopyMI = &*MRI->use_instr_nodbg_begin(RegNo: SrcReg); |
| 2370 | SlotIndex CopyRegIdx = LIS->getInstructionIndex(Instr: *CopyMI).getRegSlot(); |
| 2371 | SlotIndex DestRegIdx = LIS->getInstructionIndex(Instr: DestMI).getRegSlot(); |
| 2372 | |
| 2373 | if (!MRI->isConstantPhysReg(PhysReg: DstReg)) { |
| 2374 | // We checked above that there are no interfering defs of the physical |
| 2375 | // register. However, for this case, where we intend to move up the def of |
| 2376 | // the physical register, we also need to check for interfering uses. |
| 2377 | SlotIndexes *Indexes = LIS->getSlotIndexes(); |
| 2378 | for (SlotIndex SI = Indexes->getNextNonNullIndex(Index: DestRegIdx); |
| 2379 | SI != CopyRegIdx; SI = Indexes->getNextNonNullIndex(Index: SI)) { |
| 2380 | MachineInstr *MI = LIS->getInstructionFromIndex(index: SI); |
| 2381 | if (MI->readsRegister(Reg: DstReg, TRI)) { |
| 2382 | LLVM_DEBUG(dbgs() << "\t\tInterference (read): " << *MI); |
| 2383 | return false; |
| 2384 | } |
| 2385 | } |
| 2386 | } |
| 2387 | |
| 2388 | // We're going to remove the copy which defines a physical reserved |
| 2389 | // register, so remove its valno, etc. |
| 2390 | LLVM_DEBUG(dbgs() << "\t\tRemoving phys reg def of " |
| 2391 | << printReg(DstReg, TRI) << " at " << CopyRegIdx << "\n" ); |
| 2392 | |
| 2393 | LIS->removePhysRegDefAt(Reg: DstReg.asMCReg(), Pos: CopyRegIdx); |
| 2394 | deleteInstr(MI: CopyMI); |
| 2395 | |
| 2396 | // Create a new dead def at the new def location. |
| 2397 | for (MCRegUnit Unit : TRI->regunits(Reg: DstReg)) { |
| 2398 | LiveRange &LR = LIS->getRegUnit(Unit); |
| 2399 | LR.createDeadDef(Def: DestRegIdx, VNIAlloc&: LIS->getVNInfoAllocator()); |
| 2400 | } |
| 2401 | } |
| 2402 | |
| 2403 | // We don't track kills for reserved registers. |
| 2404 | MRI->clearKillFlags(Reg: CP.getSrcReg()); |
| 2405 | |
| 2406 | return true; |
| 2407 | } |
| 2408 | |
| 2409 | //===----------------------------------------------------------------------===// |
| 2410 | // Interference checking and interval joining |
| 2411 | //===----------------------------------------------------------------------===// |
| 2412 | // |
| 2413 | // In the easiest case, the two live ranges being joined are disjoint, and |
| 2414 | // there is no interference to consider. It is quite common, though, to have |
| 2415 | // overlapping live ranges, and we need to check if the interference can be |
| 2416 | // resolved. |
| 2417 | // |
| 2418 | // The live range of a single SSA value forms a sub-tree of the dominator tree. |
| 2419 | // This means that two SSA values overlap if and only if the def of one value |
| 2420 | // is contained in the live range of the other value. As a special case, the |
| 2421 | // overlapping values can be defined at the same index. |
| 2422 | // |
| 2423 | // The interference from an overlapping def can be resolved in these cases: |
| 2424 | // |
| 2425 | // 1. Coalescable copies. The value is defined by a copy that would become an |
| 2426 | // identity copy after joining SrcReg and DstReg. The copy instruction will |
| 2427 | // be removed, and the value will be merged with the source value. |
| 2428 | // |
| 2429 | // There can be several copies back and forth, causing many values to be |
| 2430 | // merged into one. We compute a list of ultimate values in the joined live |
| 2431 | // range as well as a mappings from the old value numbers. |
| 2432 | // |
| 2433 | // 2. IMPLICIT_DEF. This instruction is only inserted to ensure all PHI |
| 2434 | // predecessors have a live out value. It doesn't cause real interference, |
| 2435 | // and can be merged into the value it overlaps. Like a coalescable copy, it |
| 2436 | // can be erased after joining. |
| 2437 | // |
| 2438 | // 3. Copy of external value. The overlapping def may be a copy of a value that |
| 2439 | // is already in the other register. This is like a coalescable copy, but |
| 2440 | // the live range of the source register must be trimmed after erasing the |
| 2441 | // copy instruction: |
| 2442 | // |
| 2443 | // %src = COPY %ext |
| 2444 | // %dst = COPY %ext <-- Remove this COPY, trim the live range of %ext. |
| 2445 | // |
| 2446 | // 4. Clobbering undefined lanes. Vector registers are sometimes built by |
| 2447 | // defining one lane at a time: |
| 2448 | // |
| 2449 | // %dst:ssub0<def,read-undef> = FOO |
| 2450 | // %src = BAR |
| 2451 | // %dst:ssub1 = COPY %src |
| 2452 | // |
| 2453 | // The live range of %src overlaps the %dst value defined by FOO, but |
| 2454 | // merging %src into %dst:ssub1 is only going to clobber the ssub1 lane |
| 2455 | // which was undef anyway. |
| 2456 | // |
| 2457 | // The value mapping is more complicated in this case. The final live range |
| 2458 | // will have different value numbers for both FOO and BAR, but there is no |
| 2459 | // simple mapping from old to new values. It may even be necessary to add |
| 2460 | // new PHI values. |
| 2461 | // |
| 2462 | // 5. Clobbering dead lanes. A def may clobber a lane of a vector register that |
| 2463 | // is live, but never read. This can happen because we don't compute |
| 2464 | // individual live ranges per lane. |
| 2465 | // |
| 2466 | // %dst = FOO |
| 2467 | // %src = BAR |
| 2468 | // %dst:ssub1 = COPY %src |
| 2469 | // |
| 2470 | // This kind of interference is only resolved locally. If the clobbered |
| 2471 | // lane value escapes the block, the join is aborted. |
| 2472 | |
| 2473 | namespace { |
| 2474 | |
| 2475 | /// Track information about values in a single virtual register about to be |
| 2476 | /// joined. Objects of this class are always created in pairs - one for each |
| 2477 | /// side of the CoalescerPair (or one for each lane of a side of the coalescer |
| 2478 | /// pair) |
| 2479 | class JoinVals { |
| 2480 | /// Live range we work on. |
| 2481 | LiveRange &LR; |
| 2482 | |
| 2483 | /// (Main) register we work on. |
| 2484 | const Register Reg; |
| 2485 | |
| 2486 | /// Reg (and therefore the values in this liverange) will end up as |
| 2487 | /// subregister SubIdx in the coalesced register. Either CP.DstIdx or |
| 2488 | /// CP.SrcIdx. |
| 2489 | const unsigned SubIdx; |
| 2490 | |
| 2491 | /// The LaneMask that this liverange will occupy the coalesced register. May |
| 2492 | /// be smaller than the lanemask produced by SubIdx when merging subranges. |
| 2493 | const LaneBitmask LaneMask; |
| 2494 | |
| 2495 | /// This is true when joining sub register ranges, false when joining main |
| 2496 | /// ranges. |
| 2497 | const bool SubRangeJoin; |
| 2498 | |
| 2499 | /// Whether the current LiveInterval tracks subregister liveness. |
| 2500 | const bool TrackSubRegLiveness; |
| 2501 | |
| 2502 | /// Values that will be present in the final live range. |
| 2503 | SmallVectorImpl<VNInfo *> &NewVNInfo; |
| 2504 | |
| 2505 | const CoalescerPair &CP; |
| 2506 | LiveIntervals *LIS; |
| 2507 | SlotIndexes *Indexes; |
| 2508 | const TargetRegisterInfo *TRI; |
| 2509 | |
| 2510 | /// Value number assignments. Maps value numbers in LI to entries in |
| 2511 | /// NewVNInfo. This is suitable for passing to LiveInterval::join(). |
| 2512 | SmallVector<int, 8> Assignments; |
| 2513 | |
| 2514 | public: |
| 2515 | /// Conflict resolution for overlapping values. |
| 2516 | enum ConflictResolution { |
| 2517 | /// No overlap, simply keep this value. |
| 2518 | CR_Keep, |
| 2519 | |
| 2520 | /// Merge this value into OtherVNI and erase the defining instruction. |
| 2521 | /// Used for IMPLICIT_DEF, coalescable copies, and copies from external |
| 2522 | /// values. |
| 2523 | CR_Erase, |
| 2524 | |
| 2525 | /// Merge this value into OtherVNI but keep the defining instruction. |
| 2526 | /// This is for the special case where OtherVNI is defined by the same |
| 2527 | /// instruction. |
| 2528 | CR_Merge, |
| 2529 | |
| 2530 | /// Keep this value, and have it replace OtherVNI where possible. This |
| 2531 | /// complicates value mapping since OtherVNI maps to two different values |
| 2532 | /// before and after this def. |
| 2533 | /// Used when clobbering undefined or dead lanes. |
| 2534 | CR_Replace, |
| 2535 | |
| 2536 | /// Unresolved conflict. Visit later when all values have been mapped. |
| 2537 | CR_Unresolved, |
| 2538 | |
| 2539 | /// Unresolvable conflict. Abort the join. |
| 2540 | CR_Impossible |
| 2541 | }; |
| 2542 | |
| 2543 | private: |
| 2544 | /// Per-value info for LI. The lane bit masks are all relative to the final |
| 2545 | /// joined register, so they can be compared directly between SrcReg and |
| 2546 | /// DstReg. |
| 2547 | struct Val { |
| 2548 | ConflictResolution Resolution = CR_Keep; |
| 2549 | |
| 2550 | /// Lanes written by this def, 0 for unanalyzed values. |
| 2551 | LaneBitmask WriteLanes; |
| 2552 | |
| 2553 | /// Lanes with defined values in this register. Other lanes are undef and |
| 2554 | /// safe to clobber. |
| 2555 | LaneBitmask ValidLanes; |
| 2556 | |
| 2557 | /// Value in LI being redefined by this def. |
| 2558 | VNInfo *RedefVNI = nullptr; |
| 2559 | |
| 2560 | /// Value in the other live range that overlaps this def, if any. |
| 2561 | VNInfo *OtherVNI = nullptr; |
| 2562 | |
| 2563 | /// Is this value an IMPLICIT_DEF that can be erased? |
| 2564 | /// |
| 2565 | /// IMPLICIT_DEF values should only exist at the end of a basic block that |
| 2566 | /// is a predecessor to a phi-value. These IMPLICIT_DEF instructions can be |
| 2567 | /// safely erased if they are overlapping a live value in the other live |
| 2568 | /// interval. |
| 2569 | /// |
| 2570 | /// Weird control flow graphs and incomplete PHI handling in |
| 2571 | /// ProcessImplicitDefs can very rarely create IMPLICIT_DEF values with |
| 2572 | /// longer live ranges. Such IMPLICIT_DEF values should be treated like |
| 2573 | /// normal values. |
| 2574 | bool ErasableImplicitDef = false; |
| 2575 | |
| 2576 | /// True when the live range of this value will be pruned because of an |
| 2577 | /// overlapping CR_Replace value in the other live range. |
| 2578 | bool Pruned = false; |
| 2579 | |
| 2580 | /// True once Pruned above has been computed. |
| 2581 | bool PrunedComputed = false; |
| 2582 | |
| 2583 | /// True if this value is determined to be identical to OtherVNI |
| 2584 | /// (in valuesIdentical). This is used with CR_Erase where the erased |
| 2585 | /// copy is redundant, i.e. the source value is already the same as |
| 2586 | /// the destination. In such cases the subranges need to be updated |
| 2587 | /// properly. See comment at pruneSubRegValues for more info. |
| 2588 | bool Identical = false; |
| 2589 | |
| 2590 | Val() = default; |
| 2591 | |
| 2592 | bool isAnalyzed() const { return WriteLanes.any(); } |
| 2593 | |
| 2594 | /// Mark this value as an IMPLICIT_DEF which must be kept as if it were an |
| 2595 | /// ordinary value. |
| 2596 | void mustKeepImplicitDef(const TargetRegisterInfo &TRI, |
| 2597 | const MachineInstr &ImpDef) { |
| 2598 | assert(ImpDef.isImplicitDef()); |
| 2599 | ErasableImplicitDef = false; |
| 2600 | ValidLanes = TRI.getSubRegIndexLaneMask(SubIdx: ImpDef.getOperand(i: 0).getSubReg()); |
| 2601 | } |
| 2602 | }; |
| 2603 | |
| 2604 | /// One entry per value number in LI. |
| 2605 | SmallVector<Val, 8> Vals; |
| 2606 | |
| 2607 | /// Compute the bitmask of lanes actually written by DefMI. |
| 2608 | /// Set Redef if there are any partial register definitions that depend on the |
| 2609 | /// previous value of the register. |
| 2610 | LaneBitmask computeWriteLanes(const MachineInstr *DefMI, bool &Redef) const; |
| 2611 | |
| 2612 | /// Find the ultimate value that VNI was copied from. |
| 2613 | std::pair<const VNInfo *, Register> followCopyChain(const VNInfo *VNI) const; |
| 2614 | |
| 2615 | bool valuesIdentical(VNInfo *Value0, VNInfo *Value1, |
| 2616 | const JoinVals &Other) const; |
| 2617 | |
| 2618 | /// Analyze ValNo in this live range, and set all fields of Vals[ValNo]. |
| 2619 | /// Return a conflict resolution when possible, but leave the hard cases as |
| 2620 | /// CR_Unresolved. |
| 2621 | /// Recursively calls computeAssignment() on this and Other, guaranteeing that |
| 2622 | /// both OtherVNI and RedefVNI have been analyzed and mapped before returning. |
| 2623 | /// The recursion always goes upwards in the dominator tree, making loops |
| 2624 | /// impossible. |
| 2625 | ConflictResolution analyzeValue(unsigned ValNo, JoinVals &Other); |
| 2626 | |
| 2627 | /// Compute the value assignment for ValNo in RI. |
| 2628 | /// This may be called recursively by analyzeValue(), but never for a ValNo on |
| 2629 | /// the stack. |
| 2630 | void computeAssignment(unsigned ValNo, JoinVals &Other); |
| 2631 | |
| 2632 | /// Assuming ValNo is going to clobber some valid lanes in Other.LR, compute |
| 2633 | /// the extent of the tainted lanes in the block. |
| 2634 | /// |
| 2635 | /// Multiple values in Other.LR can be affected since partial redefinitions |
| 2636 | /// can preserve previously tainted lanes. |
| 2637 | /// |
| 2638 | /// 1 %dst = VLOAD <-- Define all lanes in %dst |
| 2639 | /// 2 %src = FOO <-- ValNo to be joined with %dst:ssub0 |
| 2640 | /// 3 %dst:ssub1 = BAR <-- Partial redef doesn't clear taint in ssub0 |
| 2641 | /// 4 %dst:ssub0 = COPY %src <-- Conflict resolved, ssub0 wasn't read |
| 2642 | /// |
| 2643 | /// For each ValNo in Other that is affected, add an (EndIndex, TaintedLanes) |
| 2644 | /// entry to TaintedVals. |
| 2645 | /// |
| 2646 | /// Returns false if the tainted lanes extend beyond the basic block. |
| 2647 | bool |
| 2648 | taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other, |
| 2649 | SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent); |
| 2650 | |
| 2651 | /// Return true if MI uses any of the given Lanes from Reg. |
| 2652 | /// This does not include partial redefinitions of Reg. |
| 2653 | bool usesLanes(const MachineInstr &MI, Register, unsigned, LaneBitmask) const; |
| 2654 | |
| 2655 | /// Determine if ValNo is a copy of a value number in LR or Other.LR that will |
| 2656 | /// be pruned: |
| 2657 | /// |
| 2658 | /// %dst = COPY %src |
| 2659 | /// %src = COPY %dst <-- This value to be pruned. |
| 2660 | /// %dst = COPY %src <-- This value is a copy of a pruned value. |
| 2661 | bool isPrunedValue(unsigned ValNo, JoinVals &Other); |
| 2662 | |
| 2663 | public: |
| 2664 | JoinVals(LiveRange &LR, Register Reg, unsigned SubIdx, LaneBitmask LaneMask, |
| 2665 | SmallVectorImpl<VNInfo *> &newVNInfo, const CoalescerPair &cp, |
| 2666 | LiveIntervals *lis, const TargetRegisterInfo *TRI, bool SubRangeJoin, |
| 2667 | bool TrackSubRegLiveness) |
| 2668 | : LR(LR), Reg(Reg), SubIdx(SubIdx), LaneMask(LaneMask), |
| 2669 | SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness), |
| 2670 | NewVNInfo(newVNInfo), CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()), |
| 2671 | TRI(TRI), Assignments(LR.getNumValNums(), -1), |
| 2672 | Vals(LR.getNumValNums()) {} |
| 2673 | |
| 2674 | /// Analyze defs in LR and compute a value mapping in NewVNInfo. |
| 2675 | /// Returns false if any conflicts were impossible to resolve. |
| 2676 | bool mapValues(JoinVals &Other); |
| 2677 | |
| 2678 | /// Try to resolve conflicts that require all values to be mapped. |
| 2679 | /// Returns false if any conflicts were impossible to resolve. |
| 2680 | bool resolveConflicts(JoinVals &Other); |
| 2681 | |
| 2682 | /// Prune the live range of values in Other.LR where they would conflict with |
| 2683 | /// CR_Replace values in LR. Collect end points for restoring the live range |
| 2684 | /// after joining. |
| 2685 | void pruneValues(JoinVals &Other, SmallVectorImpl<SlotIndex> &EndPoints, |
| 2686 | bool changeInstrs); |
| 2687 | |
| 2688 | /// Removes subranges starting at copies that get removed. This sometimes |
| 2689 | /// happens when undefined subranges are copied around. These ranges contain |
| 2690 | /// no useful information and can be removed. |
| 2691 | void pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask); |
| 2692 | |
| 2693 | /// Pruning values in subranges can lead to removing segments in these |
| 2694 | /// subranges started by IMPLICIT_DEFs. The corresponding segments in |
| 2695 | /// the main range also need to be removed. This function will mark |
| 2696 | /// the corresponding values in the main range as pruned, so that |
| 2697 | /// eraseInstrs can do the final cleanup. |
| 2698 | /// The parameter @p LI must be the interval whose main range is the |
| 2699 | /// live range LR. |
| 2700 | void pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange); |
| 2701 | |
| 2702 | /// Erase any machine instructions that have been coalesced away. |
| 2703 | /// Add erased instructions to ErasedInstrs. |
| 2704 | /// Add foreign virtual registers to ShrinkRegs if their live range ended at |
| 2705 | /// the erased instrs. |
| 2706 | void eraseInstrs(SmallPtrSetImpl<MachineInstr *> &ErasedInstrs, |
| 2707 | SmallVectorImpl<Register> &ShrinkRegs, |
| 2708 | LiveInterval *LI = nullptr); |
| 2709 | |
| 2710 | /// Remove liverange defs at places where implicit defs will be removed. |
| 2711 | void removeImplicitDefs(); |
| 2712 | |
| 2713 | /// Get the value assignments suitable for passing to LiveInterval::join. |
| 2714 | const int *getAssignments() const { return Assignments.data(); } |
| 2715 | |
| 2716 | /// Get the conflict resolution for a value number. |
| 2717 | ConflictResolution getResolution(unsigned Num) const { |
| 2718 | return Vals[Num].Resolution; |
| 2719 | } |
| 2720 | }; |
| 2721 | |
| 2722 | } // end anonymous namespace |
| 2723 | |
| 2724 | LaneBitmask JoinVals::computeWriteLanes(const MachineInstr *DefMI, |
| 2725 | bool &Redef) const { |
| 2726 | LaneBitmask L; |
| 2727 | for (const MachineOperand &MO : DefMI->all_defs()) { |
| 2728 | if (MO.getReg() != Reg) |
| 2729 | continue; |
| 2730 | L |= TRI->getSubRegIndexLaneMask( |
| 2731 | SubIdx: TRI->composeSubRegIndices(a: SubIdx, b: MO.getSubReg())); |
| 2732 | if (MO.readsReg()) |
| 2733 | Redef = true; |
| 2734 | } |
| 2735 | return L; |
| 2736 | } |
| 2737 | |
| 2738 | std::pair<const VNInfo *, Register> |
| 2739 | JoinVals::followCopyChain(const VNInfo *VNI) const { |
| 2740 | Register TrackReg = Reg; |
| 2741 | |
| 2742 | while (!VNI->isPHIDef()) { |
| 2743 | SlotIndex Def = VNI->def; |
| 2744 | MachineInstr *MI = Indexes->getInstructionFromIndex(index: Def); |
| 2745 | assert(MI && "No defining instruction" ); |
| 2746 | if (!MI->isFullCopy()) |
| 2747 | return std::make_pair(x&: VNI, y&: TrackReg); |
| 2748 | Register SrcReg = MI->getOperand(i: 1).getReg(); |
| 2749 | if (!SrcReg.isVirtual()) |
| 2750 | return std::make_pair(x&: VNI, y&: TrackReg); |
| 2751 | |
| 2752 | const LiveInterval &LI = LIS->getInterval(Reg: SrcReg); |
| 2753 | const VNInfo *ValueIn; |
| 2754 | // No subrange involved. |
| 2755 | if (!SubRangeJoin || !LI.hasSubRanges()) { |
| 2756 | LiveQueryResult LRQ = LI.Query(Idx: Def); |
| 2757 | ValueIn = LRQ.valueIn(); |
| 2758 | } else { |
| 2759 | // Query subranges. Ensure that all matching ones take us to the same def |
| 2760 | // (allowing some of them to be undef). |
| 2761 | ValueIn = nullptr; |
| 2762 | for (const LiveInterval::SubRange &S : LI.subranges()) { |
| 2763 | // Transform lanemask to a mask in the joined live interval. |
| 2764 | LaneBitmask SMask = TRI->composeSubRegIndexLaneMask(IdxA: SubIdx, Mask: S.LaneMask); |
| 2765 | if ((SMask & LaneMask).none()) |
| 2766 | continue; |
| 2767 | LiveQueryResult LRQ = S.Query(Idx: Def); |
| 2768 | if (!ValueIn) { |
| 2769 | ValueIn = LRQ.valueIn(); |
| 2770 | continue; |
| 2771 | } |
| 2772 | if (LRQ.valueIn() && ValueIn != LRQ.valueIn()) |
| 2773 | return std::make_pair(x&: VNI, y&: TrackReg); |
| 2774 | } |
| 2775 | } |
| 2776 | if (ValueIn == nullptr) { |
| 2777 | // Reaching an undefined value is legitimate, for example: |
| 2778 | // |
| 2779 | // 1 undef %0.sub1 = ... ;; %0.sub0 == undef |
| 2780 | // 2 %1 = COPY %0 ;; %1 is defined here. |
| 2781 | // 3 %0 = COPY %1 ;; Now %0.sub0 has a definition, |
| 2782 | // ;; but it's equivalent to "undef". |
| 2783 | return std::make_pair(x: nullptr, y&: SrcReg); |
| 2784 | } |
| 2785 | VNI = ValueIn; |
| 2786 | TrackReg = SrcReg; |
| 2787 | } |
| 2788 | return std::make_pair(x&: VNI, y&: TrackReg); |
| 2789 | } |
| 2790 | |
| 2791 | bool JoinVals::valuesIdentical(VNInfo *Value0, VNInfo *Value1, |
| 2792 | const JoinVals &Other) const { |
| 2793 | const VNInfo *Orig0; |
| 2794 | Register Reg0; |
| 2795 | std::tie(args&: Orig0, args&: Reg0) = followCopyChain(VNI: Value0); |
| 2796 | if (Orig0 == Value1 && Reg0 == Other.Reg) |
| 2797 | return true; |
| 2798 | |
| 2799 | const VNInfo *Orig1; |
| 2800 | Register Reg1; |
| 2801 | std::tie(args&: Orig1, args&: Reg1) = Other.followCopyChain(VNI: Value1); |
| 2802 | // If both values are undefined, and the source registers are the same |
| 2803 | // register, the values are identical. Filter out cases where only one |
| 2804 | // value is defined. |
| 2805 | if (Orig0 == nullptr || Orig1 == nullptr) |
| 2806 | return Orig0 == Orig1 && Reg0 == Reg1; |
| 2807 | |
| 2808 | // The values are equal if they are defined at the same place and use the |
| 2809 | // same register. Note that we cannot compare VNInfos directly as some of |
| 2810 | // them might be from a copy created in mergeSubRangeInto() while the other |
| 2811 | // is from the original LiveInterval. |
| 2812 | return Orig0->def == Orig1->def && Reg0 == Reg1; |
| 2813 | } |
| 2814 | |
| 2815 | JoinVals::ConflictResolution JoinVals::analyzeValue(unsigned ValNo, |
| 2816 | JoinVals &Other) { |
| 2817 | Val &V = Vals[ValNo]; |
| 2818 | assert(!V.isAnalyzed() && "Value has already been analyzed!" ); |
| 2819 | VNInfo *VNI = LR.getValNumInfo(ValNo); |
| 2820 | if (VNI->isUnused()) { |
| 2821 | V.WriteLanes = LaneBitmask::getAll(); |
| 2822 | return CR_Keep; |
| 2823 | } |
| 2824 | |
| 2825 | // Get the instruction defining this value, compute the lanes written. |
| 2826 | const MachineInstr *DefMI = nullptr; |
| 2827 | if (VNI->isPHIDef()) { |
| 2828 | // Conservatively assume that all lanes in a PHI are valid. |
| 2829 | LaneBitmask Lanes = SubRangeJoin ? LaneBitmask::getLane(Lane: 0) |
| 2830 | : TRI->getSubRegIndexLaneMask(SubIdx); |
| 2831 | V.ValidLanes = V.WriteLanes = Lanes; |
| 2832 | } else { |
| 2833 | DefMI = Indexes->getInstructionFromIndex(index: VNI->def); |
| 2834 | assert(DefMI != nullptr); |
| 2835 | if (SubRangeJoin) { |
| 2836 | // We don't care about the lanes when joining subregister ranges. |
| 2837 | V.WriteLanes = V.ValidLanes = LaneBitmask::getLane(Lane: 0); |
| 2838 | if (DefMI->isImplicitDef()) { |
| 2839 | V.ValidLanes = LaneBitmask::getNone(); |
| 2840 | V.ErasableImplicitDef = true; |
| 2841 | } |
| 2842 | } else { |
| 2843 | bool Redef = false; |
| 2844 | V.ValidLanes = V.WriteLanes = computeWriteLanes(DefMI, Redef); |
| 2845 | |
| 2846 | // If this is a read-modify-write instruction, there may be more valid |
| 2847 | // lanes than the ones written by this instruction. |
| 2848 | // This only covers partial redef operands. DefMI may have normal use |
| 2849 | // operands reading the register. They don't contribute valid lanes. |
| 2850 | // |
| 2851 | // This adds ssub1 to the set of valid lanes in %src: |
| 2852 | // |
| 2853 | // %src:ssub1 = FOO |
| 2854 | // |
| 2855 | // This leaves only ssub1 valid, making any other lanes undef: |
| 2856 | // |
| 2857 | // %src:ssub1<def,read-undef> = FOO %src:ssub2 |
| 2858 | // |
| 2859 | // The <read-undef> flag on the def operand means that old lane values are |
| 2860 | // not important. |
| 2861 | if (Redef) { |
| 2862 | V.RedefVNI = LR.Query(Idx: VNI->def).valueIn(); |
| 2863 | assert((TrackSubRegLiveness || V.RedefVNI) && |
| 2864 | "Instruction is reading nonexistent value" ); |
| 2865 | if (V.RedefVNI != nullptr) { |
| 2866 | computeAssignment(ValNo: V.RedefVNI->id, Other); |
| 2867 | V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes; |
| 2868 | } |
| 2869 | } |
| 2870 | |
| 2871 | // An IMPLICIT_DEF writes undef values. |
| 2872 | if (DefMI->isImplicitDef()) { |
| 2873 | // We normally expect IMPLICIT_DEF values to be live only until the end |
| 2874 | // of their block. If the value is really live longer and gets pruned in |
| 2875 | // another block, this flag is cleared again. |
| 2876 | // |
| 2877 | // Clearing the valid lanes is deferred until it is sure this can be |
| 2878 | // erased. |
| 2879 | V.ErasableImplicitDef = true; |
| 2880 | } |
| 2881 | } |
| 2882 | } |
| 2883 | |
| 2884 | // Find the value in Other that overlaps VNI->def, if any. |
| 2885 | LiveQueryResult OtherLRQ = Other.LR.Query(Idx: VNI->def); |
| 2886 | |
| 2887 | // It is possible that both values are defined by the same instruction, or |
| 2888 | // the values are PHIs defined in the same block. When that happens, the two |
| 2889 | // values should be merged into one, but not into any preceding value. |
| 2890 | // The first value defined or visited gets CR_Keep, the other gets CR_Merge. |
| 2891 | if (VNInfo *OtherVNI = OtherLRQ.valueDefined()) { |
| 2892 | assert(SlotIndex::isSameInstr(VNI->def, OtherVNI->def) && "Broken LRQ" ); |
| 2893 | |
| 2894 | // One value stays, the other is merged. Keep the earlier one, or the first |
| 2895 | // one we see. |
| 2896 | if (OtherVNI->def < VNI->def) |
| 2897 | Other.computeAssignment(ValNo: OtherVNI->id, Other&: *this); |
| 2898 | else if (VNI->def < OtherVNI->def && OtherLRQ.valueIn()) { |
| 2899 | // This is an early-clobber def overlapping a live-in value in the other |
| 2900 | // register. Not mergeable. |
| 2901 | V.OtherVNI = OtherLRQ.valueIn(); |
| 2902 | return CR_Impossible; |
| 2903 | } |
| 2904 | V.OtherVNI = OtherVNI; |
| 2905 | Val &OtherV = Other.Vals[OtherVNI->id]; |
| 2906 | // Keep this value, check for conflicts when analyzing OtherVNI. Avoid |
| 2907 | // revisiting OtherVNI->id in JoinVals::computeAssignment() below before it |
| 2908 | // is assigned. |
| 2909 | if (!OtherV.isAnalyzed() || Other.Assignments[OtherVNI->id] == -1) |
| 2910 | return CR_Keep; |
| 2911 | // Both sides have been analyzed now. |
| 2912 | // Allow overlapping PHI values. Any real interference would show up in a |
| 2913 | // predecessor, the PHI itself can't introduce any conflicts. |
| 2914 | if (VNI->isPHIDef()) |
| 2915 | return CR_Merge; |
| 2916 | if ((V.ValidLanes & OtherV.ValidLanes).any()) |
| 2917 | // Overlapping lanes can't be resolved. |
| 2918 | return CR_Impossible; |
| 2919 | return CR_Merge; |
| 2920 | } |
| 2921 | |
| 2922 | // No simultaneous def. Is Other live at the def? |
| 2923 | V.OtherVNI = OtherLRQ.valueIn(); |
| 2924 | if (!V.OtherVNI) |
| 2925 | // No overlap, no conflict. |
| 2926 | return CR_Keep; |
| 2927 | |
| 2928 | assert(!SlotIndex::isSameInstr(VNI->def, V.OtherVNI->def) && "Broken LRQ" ); |
| 2929 | |
| 2930 | // We have overlapping values, or possibly a kill of Other. |
| 2931 | // Recursively compute assignments up the dominator tree. |
| 2932 | Other.computeAssignment(ValNo: V.OtherVNI->id, Other&: *this); |
| 2933 | Val &OtherV = Other.Vals[V.OtherVNI->id]; |
| 2934 | |
| 2935 | if (OtherV.ErasableImplicitDef) { |
| 2936 | // Check if OtherV is an IMPLICIT_DEF that extends beyond its basic block. |
| 2937 | // This shouldn't normally happen, but ProcessImplicitDefs can leave such |
| 2938 | // IMPLICIT_DEF instructions behind, and there is nothing wrong with it |
| 2939 | // technically. |
| 2940 | // |
| 2941 | // When it happens, treat that IMPLICIT_DEF as a normal value, and don't try |
| 2942 | // to erase the IMPLICIT_DEF instruction. |
| 2943 | // |
| 2944 | // Additionally we must keep an IMPLICIT_DEF if we're redefining an incoming |
| 2945 | // value. |
| 2946 | |
| 2947 | MachineInstr *OtherImpDef = |
| 2948 | Indexes->getInstructionFromIndex(index: V.OtherVNI->def); |
| 2949 | MachineBasicBlock *OtherMBB = OtherImpDef->getParent(); |
| 2950 | if (DefMI && |
| 2951 | (DefMI->getParent() != OtherMBB || LIS->isLiveInToMBB(LR, mbb: OtherMBB))) { |
| 2952 | LLVM_DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def |
| 2953 | << " extends into " |
| 2954 | << printMBBReference(*DefMI->getParent()) |
| 2955 | << ", keeping it.\n" ); |
| 2956 | OtherV.mustKeepImplicitDef(TRI: *TRI, ImpDef: *OtherImpDef); |
| 2957 | } else if (OtherMBB->hasEHPadSuccessor()) { |
| 2958 | // If OtherV is defined in a basic block that has EH pad successors then |
| 2959 | // we get the same problem not just if OtherV is live beyond its basic |
| 2960 | // block, but beyond the last call instruction in its basic block. Handle |
| 2961 | // this case conservatively. |
| 2962 | LLVM_DEBUG( |
| 2963 | dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def |
| 2964 | << " may be live into EH pad successors, keeping it.\n" ); |
| 2965 | OtherV.mustKeepImplicitDef(TRI: *TRI, ImpDef: *OtherImpDef); |
| 2966 | } else { |
| 2967 | // We deferred clearing these lanes in case we needed to save them |
| 2968 | OtherV.ValidLanes &= ~OtherV.WriteLanes; |
| 2969 | } |
| 2970 | } |
| 2971 | |
| 2972 | // Allow overlapping PHI values. Any real interference would show up in a |
| 2973 | // predecessor, the PHI itself can't introduce any conflicts. |
| 2974 | if (VNI->isPHIDef()) |
| 2975 | return CR_Replace; |
| 2976 | |
| 2977 | // Check for simple erasable conflicts. |
| 2978 | if (DefMI->isImplicitDef()) |
| 2979 | return CR_Erase; |
| 2980 | |
| 2981 | // Include the non-conflict where DefMI is a coalescable copy that kills |
| 2982 | // OtherVNI. We still want the copy erased and value numbers merged. |
| 2983 | if (CP.isCoalescable(MI: DefMI)) { |
| 2984 | // Some of the lanes copied from OtherVNI may be undef, making them undef |
| 2985 | // here too. |
| 2986 | V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes; |
| 2987 | return CR_Erase; |
| 2988 | } |
| 2989 | |
| 2990 | // This may not be a real conflict if DefMI simply kills Other and defines |
| 2991 | // VNI. |
| 2992 | if (OtherLRQ.isKill() && OtherLRQ.endPoint() <= VNI->def) |
| 2993 | return CR_Keep; |
| 2994 | |
| 2995 | // Handle the case where VNI and OtherVNI can be proven to be identical: |
| 2996 | // |
| 2997 | // %other = COPY %ext |
| 2998 | // %this = COPY %ext <-- Erase this copy |
| 2999 | // |
| 3000 | if (DefMI->isFullCopy() && !CP.isPartial() && |
| 3001 | valuesIdentical(Value0: VNI, Value1: V.OtherVNI, Other)) { |
| 3002 | V.Identical = true; |
| 3003 | return CR_Erase; |
| 3004 | } |
| 3005 | |
| 3006 | // The remaining checks apply to the lanes, which aren't tracked here. This |
| 3007 | // was already decided to be OK via the following CR_Replace condition. |
| 3008 | // CR_Replace. |
| 3009 | if (SubRangeJoin) |
| 3010 | return CR_Replace; |
| 3011 | |
| 3012 | // If the lanes written by this instruction were all undef in OtherVNI, it is |
| 3013 | // still safe to join the live ranges. This can't be done with a simple value |
| 3014 | // mapping, though - OtherVNI will map to multiple values: |
| 3015 | // |
| 3016 | // 1 %dst:ssub0 = FOO <-- OtherVNI |
| 3017 | // 2 %src = BAR <-- VNI |
| 3018 | // 3 %dst:ssub1 = COPY killed %src <-- Eliminate this copy. |
| 3019 | // 4 BAZ killed %dst |
| 3020 | // 5 QUUX killed %src |
| 3021 | // |
| 3022 | // Here OtherVNI will map to itself in [1;2), but to VNI in [2;5). CR_Replace |
| 3023 | // handles this complex value mapping. |
| 3024 | if ((V.WriteLanes & OtherV.ValidLanes).none()) |
| 3025 | return CR_Replace; |
| 3026 | |
| 3027 | // If the other live range is killed by DefMI and the live ranges are still |
| 3028 | // overlapping, it must be because we're looking at an early clobber def: |
| 3029 | // |
| 3030 | // %dst<def,early-clobber> = ASM killed %src |
| 3031 | // |
| 3032 | // In this case, it is illegal to merge the two live ranges since the early |
| 3033 | // clobber def would clobber %src before it was read. |
| 3034 | if (OtherLRQ.isKill()) { |
| 3035 | // This case where the def doesn't overlap the kill is handled above. |
| 3036 | assert(VNI->def.isEarlyClobber() && |
| 3037 | "Only early clobber defs can overlap a kill" ); |
| 3038 | return CR_Impossible; |
| 3039 | } |
| 3040 | |
| 3041 | // VNI is clobbering live lanes in OtherVNI, but there is still the |
| 3042 | // possibility that no instructions actually read the clobbered lanes. |
| 3043 | // If we're clobbering all the lanes in OtherVNI, at least one must be read. |
| 3044 | // Otherwise Other.RI wouldn't be live here. |
| 3045 | if ((TRI->getSubRegIndexLaneMask(SubIdx: Other.SubIdx) & ~V.WriteLanes).none()) |
| 3046 | return CR_Impossible; |
| 3047 | |
| 3048 | if (TrackSubRegLiveness) { |
| 3049 | auto &OtherLI = LIS->getInterval(Reg: Other.Reg); |
| 3050 | // If OtherVNI does not have subranges, it means all the lanes of OtherVNI |
| 3051 | // share the same live range, so we just need to check whether they have |
| 3052 | // any conflict bit in their LaneMask. |
| 3053 | if (!OtherLI.hasSubRanges()) { |
| 3054 | LaneBitmask OtherMask = TRI->getSubRegIndexLaneMask(SubIdx: Other.SubIdx); |
| 3055 | return (OtherMask & V.WriteLanes).none() ? CR_Replace : CR_Impossible; |
| 3056 | } |
| 3057 | |
| 3058 | // If we are clobbering some active lanes of OtherVNI at VNI->def, it is |
| 3059 | // impossible to resolve the conflict. Otherwise, we can just replace |
| 3060 | // OtherVNI because of no real conflict. |
| 3061 | for (LiveInterval::SubRange &OtherSR : OtherLI.subranges()) { |
| 3062 | LaneBitmask OtherMask = |
| 3063 | TRI->composeSubRegIndexLaneMask(IdxA: Other.SubIdx, Mask: OtherSR.LaneMask); |
| 3064 | if ((OtherMask & V.WriteLanes).none()) |
| 3065 | continue; |
| 3066 | |
| 3067 | auto OtherSRQ = OtherSR.Query(Idx: VNI->def); |
| 3068 | if (OtherSRQ.valueIn() && OtherSRQ.endPoint() > VNI->def) { |
| 3069 | // VNI is clobbering some lanes of OtherVNI, they have real conflict. |
| 3070 | return CR_Impossible; |
| 3071 | } |
| 3072 | } |
| 3073 | |
| 3074 | // VNI is NOT clobbering any lane of OtherVNI, just replace OtherVNI. |
| 3075 | return CR_Replace; |
| 3076 | } |
| 3077 | |
| 3078 | // We need to verify that no instructions are reading the clobbered lanes. |
| 3079 | // To save compile time, we'll only check that locally. Don't allow the |
| 3080 | // tainted value to escape the basic block. |
| 3081 | MachineBasicBlock *MBB = Indexes->getMBBFromIndex(index: VNI->def); |
| 3082 | if (OtherLRQ.endPoint() >= Indexes->getMBBEndIdx(mbb: MBB)) |
| 3083 | return CR_Impossible; |
| 3084 | |
| 3085 | // There are still some things that could go wrong besides clobbered lanes |
| 3086 | // being read, for example OtherVNI may be only partially redefined in MBB, |
| 3087 | // and some clobbered lanes could escape the block. Save this analysis for |
| 3088 | // resolveConflicts() when all values have been mapped. We need to know |
| 3089 | // RedefVNI and WriteLanes for any later defs in MBB, and we can't compute |
| 3090 | // that now - the recursive analyzeValue() calls must go upwards in the |
| 3091 | // dominator tree. |
| 3092 | return CR_Unresolved; |
| 3093 | } |
| 3094 | |
| 3095 | void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) { |
| 3096 | Val &V = Vals[ValNo]; |
| 3097 | if (V.isAnalyzed()) { |
| 3098 | // Recursion should always move up the dominator tree, so ValNo is not |
| 3099 | // supposed to reappear before it has been assigned. |
| 3100 | assert(Assignments[ValNo] != -1 && "Bad recursion?" ); |
| 3101 | return; |
| 3102 | } |
| 3103 | switch ((V.Resolution = analyzeValue(ValNo, Other))) { |
| 3104 | case CR_Erase: |
| 3105 | case CR_Merge: |
| 3106 | // Merge this ValNo into OtherVNI. |
| 3107 | assert(V.OtherVNI && "OtherVNI not assigned, can't merge." ); |
| 3108 | assert(Other.Vals[V.OtherVNI->id].isAnalyzed() && "Missing recursion" ); |
| 3109 | Assignments[ValNo] = Other.Assignments[V.OtherVNI->id]; |
| 3110 | LLVM_DEBUG(dbgs() << "\t\tmerge " << printReg(Reg) << ':' << ValNo << '@' |
| 3111 | << LR.getValNumInfo(ValNo)->def << " into " |
| 3112 | << printReg(Other.Reg) << ':' << V.OtherVNI->id << '@' |
| 3113 | << V.OtherVNI->def << " --> @" |
| 3114 | << NewVNInfo[Assignments[ValNo]]->def << '\n'); |
| 3115 | break; |
| 3116 | case CR_Replace: |
| 3117 | case CR_Unresolved: { |
| 3118 | // The other value is going to be pruned if this join is successful. |
| 3119 | assert(V.OtherVNI && "OtherVNI not assigned, can't prune" ); |
| 3120 | Val &OtherV = Other.Vals[V.OtherVNI->id]; |
| 3121 | OtherV.Pruned = true; |
| 3122 | [[fallthrough]]; |
| 3123 | } |
| 3124 | default: |
| 3125 | // This value number needs to go in the final joined live range. |
| 3126 | Assignments[ValNo] = NewVNInfo.size(); |
| 3127 | NewVNInfo.push_back(Elt: LR.getValNumInfo(ValNo)); |
| 3128 | break; |
| 3129 | } |
| 3130 | } |
| 3131 | |
| 3132 | bool JoinVals::mapValues(JoinVals &Other) { |
| 3133 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { |
| 3134 | computeAssignment(ValNo: i, Other); |
| 3135 | if (Vals[i].Resolution == CR_Impossible) { |
| 3136 | LLVM_DEBUG(dbgs() << "\t\tinterference at " << printReg(Reg) << ':' << i |
| 3137 | << '@' << LR.getValNumInfo(i)->def << '\n'); |
| 3138 | return false; |
| 3139 | } |
| 3140 | } |
| 3141 | return true; |
| 3142 | } |
| 3143 | |
| 3144 | bool JoinVals::taintExtent( |
| 3145 | unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other, |
| 3146 | SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent) { |
| 3147 | VNInfo *VNI = LR.getValNumInfo(ValNo); |
| 3148 | MachineBasicBlock *MBB = Indexes->getMBBFromIndex(index: VNI->def); |
| 3149 | SlotIndex MBBEnd = Indexes->getMBBEndIdx(mbb: MBB); |
| 3150 | |
| 3151 | // Scan Other.LR from VNI.def to MBBEnd. |
| 3152 | LiveInterval::iterator OtherI = Other.LR.find(Pos: VNI->def); |
| 3153 | assert(OtherI != Other.LR.end() && "No conflict?" ); |
| 3154 | do { |
| 3155 | // OtherI is pointing to a tainted value. Abort the join if the tainted |
| 3156 | // lanes escape the block. |
| 3157 | SlotIndex End = OtherI->end; |
| 3158 | if (End >= MBBEnd) { |
| 3159 | LLVM_DEBUG(dbgs() << "\t\ttaints global " << printReg(Other.Reg) << ':' |
| 3160 | << OtherI->valno->id << '@' << OtherI->start << '\n'); |
| 3161 | return false; |
| 3162 | } |
| 3163 | LLVM_DEBUG(dbgs() << "\t\ttaints local " << printReg(Other.Reg) << ':' |
| 3164 | << OtherI->valno->id << '@' << OtherI->start << " to " |
| 3165 | << End << '\n'); |
| 3166 | // A dead def is not a problem. |
| 3167 | if (End.isDead()) |
| 3168 | break; |
| 3169 | TaintExtent.push_back(Elt: std::make_pair(x&: End, y&: TaintedLanes)); |
| 3170 | |
| 3171 | // Check for another def in the MBB. |
| 3172 | if (++OtherI == Other.LR.end() || OtherI->start >= MBBEnd) |
| 3173 | break; |
| 3174 | |
| 3175 | // Lanes written by the new def are no longer tainted. |
| 3176 | const Val &OV = Other.Vals[OtherI->valno->id]; |
| 3177 | TaintedLanes &= ~OV.WriteLanes; |
| 3178 | if (!OV.RedefVNI) |
| 3179 | break; |
| 3180 | } while (TaintedLanes.any()); |
| 3181 | return true; |
| 3182 | } |
| 3183 | |
| 3184 | bool JoinVals::usesLanes(const MachineInstr &MI, Register Reg, unsigned SubIdx, |
| 3185 | LaneBitmask Lanes) const { |
| 3186 | if (MI.isDebugOrPseudoInstr()) |
| 3187 | return false; |
| 3188 | for (const MachineOperand &MO : MI.all_uses()) { |
| 3189 | if (MO.getReg() != Reg) |
| 3190 | continue; |
| 3191 | if (!MO.readsReg()) |
| 3192 | continue; |
| 3193 | unsigned S = TRI->composeSubRegIndices(a: SubIdx, b: MO.getSubReg()); |
| 3194 | if ((Lanes & TRI->getSubRegIndexLaneMask(SubIdx: S)).any()) |
| 3195 | return true; |
| 3196 | } |
| 3197 | return false; |
| 3198 | } |
| 3199 | |
| 3200 | bool JoinVals::resolveConflicts(JoinVals &Other) { |
| 3201 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { |
| 3202 | Val &V = Vals[i]; |
| 3203 | assert(V.Resolution != CR_Impossible && "Unresolvable conflict" ); |
| 3204 | if (V.Resolution != CR_Unresolved) |
| 3205 | continue; |
| 3206 | LLVM_DEBUG(dbgs() << "\t\tconflict at " << printReg(Reg) << ':' << i << '@' |
| 3207 | << LR.getValNumInfo(i)->def << ' ' |
| 3208 | << PrintLaneMask(LaneMask) << '\n'); |
| 3209 | if (SubRangeJoin) |
| 3210 | return false; |
| 3211 | |
| 3212 | ++NumLaneConflicts; |
| 3213 | assert(V.OtherVNI && "Inconsistent conflict resolution." ); |
| 3214 | VNInfo *VNI = LR.getValNumInfo(ValNo: i); |
| 3215 | const Val &OtherV = Other.Vals[V.OtherVNI->id]; |
| 3216 | |
| 3217 | // VNI is known to clobber some lanes in OtherVNI. If we go ahead with the |
| 3218 | // join, those lanes will be tainted with a wrong value. Get the extent of |
| 3219 | // the tainted lanes. |
| 3220 | LaneBitmask TaintedLanes = V.WriteLanes & OtherV.ValidLanes; |
| 3221 | SmallVector<std::pair<SlotIndex, LaneBitmask>, 8> TaintExtent; |
| 3222 | if (!taintExtent(ValNo: i, TaintedLanes, Other, TaintExtent)) |
| 3223 | // Tainted lanes would extend beyond the basic block. |
| 3224 | return false; |
| 3225 | |
| 3226 | assert(!TaintExtent.empty() && "There should be at least one conflict." ); |
| 3227 | |
| 3228 | // Now look at the instructions from VNI->def to TaintExtent (inclusive). |
| 3229 | MachineBasicBlock *MBB = Indexes->getMBBFromIndex(index: VNI->def); |
| 3230 | MachineBasicBlock::iterator MI = MBB->begin(); |
| 3231 | if (!VNI->isPHIDef()) { |
| 3232 | MI = Indexes->getInstructionFromIndex(index: VNI->def); |
| 3233 | if (!VNI->def.isEarlyClobber()) { |
| 3234 | // No need to check the instruction defining VNI for reads. |
| 3235 | ++MI; |
| 3236 | } |
| 3237 | } |
| 3238 | assert(!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) && |
| 3239 | "Interference ends on VNI->def. Should have been handled earlier" ); |
| 3240 | MachineInstr *LastMI = |
| 3241 | Indexes->getInstructionFromIndex(index: TaintExtent.front().first); |
| 3242 | assert(LastMI && "Range must end at a proper instruction" ); |
| 3243 | unsigned TaintNum = 0; |
| 3244 | while (true) { |
| 3245 | assert(MI != MBB->end() && "Bad LastMI" ); |
| 3246 | if (usesLanes(MI: *MI, Reg: Other.Reg, SubIdx: Other.SubIdx, Lanes: TaintedLanes)) { |
| 3247 | LLVM_DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI); |
| 3248 | return false; |
| 3249 | } |
| 3250 | // LastMI is the last instruction to use the current value. |
| 3251 | if (&*MI == LastMI) { |
| 3252 | if (++TaintNum == TaintExtent.size()) |
| 3253 | break; |
| 3254 | LastMI = Indexes->getInstructionFromIndex(index: TaintExtent[TaintNum].first); |
| 3255 | assert(LastMI && "Range must end at a proper instruction" ); |
| 3256 | TaintedLanes = TaintExtent[TaintNum].second; |
| 3257 | } |
| 3258 | ++MI; |
| 3259 | } |
| 3260 | |
| 3261 | // The tainted lanes are unused. |
| 3262 | V.Resolution = CR_Replace; |
| 3263 | ++NumLaneResolves; |
| 3264 | } |
| 3265 | return true; |
| 3266 | } |
| 3267 | |
| 3268 | bool JoinVals::isPrunedValue(unsigned ValNo, JoinVals &Other) { |
| 3269 | Val &V = Vals[ValNo]; |
| 3270 | if (V.Pruned || V.PrunedComputed) |
| 3271 | return V.Pruned; |
| 3272 | |
| 3273 | if (V.Resolution != CR_Erase && V.Resolution != CR_Merge) |
| 3274 | return V.Pruned; |
| 3275 | |
| 3276 | // Follow copies up the dominator tree and check if any intermediate value |
| 3277 | // has been pruned. |
| 3278 | V.PrunedComputed = true; |
| 3279 | V.Pruned = Other.isPrunedValue(ValNo: V.OtherVNI->id, Other&: *this); |
| 3280 | return V.Pruned; |
| 3281 | } |
| 3282 | |
| 3283 | void JoinVals::pruneValues(JoinVals &Other, |
| 3284 | SmallVectorImpl<SlotIndex> &EndPoints, |
| 3285 | bool changeInstrs) { |
| 3286 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { |
| 3287 | SlotIndex Def = LR.getValNumInfo(ValNo: i)->def; |
| 3288 | switch (Vals[i].Resolution) { |
| 3289 | case CR_Keep: |
| 3290 | break; |
| 3291 | case CR_Replace: { |
| 3292 | // This value takes precedence over the value in Other.LR. |
| 3293 | LIS->pruneValue(LR&: Other.LR, Kill: Def, EndPoints: &EndPoints); |
| 3294 | // Check if we're replacing an IMPLICIT_DEF value. The IMPLICIT_DEF |
| 3295 | // instructions are only inserted to provide a live-out value for PHI |
| 3296 | // predecessors, so the instruction should simply go away once its value |
| 3297 | // has been replaced. |
| 3298 | Val &OtherV = Other.Vals[Vals[i].OtherVNI->id]; |
| 3299 | bool EraseImpDef = |
| 3300 | OtherV.ErasableImplicitDef && OtherV.Resolution == CR_Keep; |
| 3301 | if (!Def.isBlock()) { |
| 3302 | if (changeInstrs) { |
| 3303 | // Remove <def,read-undef> flags. This def is now a partial redef. |
| 3304 | // Also remove dead flags since the joined live range will |
| 3305 | // continue past this instruction. |
| 3306 | for (MachineOperand &MO : |
| 3307 | Indexes->getInstructionFromIndex(index: Def)->all_defs()) { |
| 3308 | if (MO.getReg() == Reg) { |
| 3309 | if (MO.getSubReg() != 0 && MO.isUndef() && !EraseImpDef) |
| 3310 | MO.setIsUndef(false); |
| 3311 | MO.setIsDead(false); |
| 3312 | } |
| 3313 | } |
| 3314 | } |
| 3315 | // This value will reach instructions below, but we need to make sure |
| 3316 | // the live range also reaches the instruction at Def. |
| 3317 | if (!EraseImpDef) |
| 3318 | EndPoints.push_back(Elt: Def); |
| 3319 | } |
| 3320 | LLVM_DEBUG(dbgs() << "\t\tpruned " << printReg(Other.Reg) << " at " << Def |
| 3321 | << ": " << Other.LR << '\n'); |
| 3322 | break; |
| 3323 | } |
| 3324 | case CR_Erase: |
| 3325 | case CR_Merge: |
| 3326 | if (isPrunedValue(ValNo: i, Other)) { |
| 3327 | // This value is ultimately a copy of a pruned value in LR or Other.LR. |
| 3328 | // We can no longer trust the value mapping computed by |
| 3329 | // computeAssignment(), the value that was originally copied could have |
| 3330 | // been replaced. |
| 3331 | LIS->pruneValue(LR, Kill: Def, EndPoints: &EndPoints); |
| 3332 | LLVM_DEBUG(dbgs() << "\t\tpruned all of " << printReg(Reg) << " at " |
| 3333 | << Def << ": " << LR << '\n'); |
| 3334 | } |
| 3335 | break; |
| 3336 | case CR_Unresolved: |
| 3337 | case CR_Impossible: |
| 3338 | llvm_unreachable("Unresolved conflicts" ); |
| 3339 | } |
| 3340 | } |
| 3341 | } |
| 3342 | |
| 3343 | // Check if the segment consists of a copied live-through value (i.e. the copy |
| 3344 | // in the block only extended the liveness, of an undef value which we may need |
| 3345 | // to handle). |
| 3346 | static bool isLiveThrough(const LiveQueryResult Q) { |
| 3347 | return Q.valueIn() && Q.valueIn()->isPHIDef() && Q.valueIn() == Q.valueOut(); |
| 3348 | } |
| 3349 | |
| 3350 | /// Consider the following situation when coalescing the copy between |
| 3351 | /// %31 and %45 at 800. (The vertical lines represent live range segments.) |
| 3352 | /// |
| 3353 | /// Main range Subrange 0004 (sub2) |
| 3354 | /// %31 %45 %31 %45 |
| 3355 | /// 544 %45 = COPY %28 + + |
| 3356 | /// | v1 | v1 |
| 3357 | /// 560B bb.1: + + |
| 3358 | /// 624 = %45.sub2 | v2 | v2 |
| 3359 | /// 800 %31 = COPY %45 + + + + |
| 3360 | /// | v0 | v0 |
| 3361 | /// 816 %31.sub1 = ... + | |
| 3362 | /// 880 %30 = COPY %31 | v1 + |
| 3363 | /// 928 %45 = COPY %30 | + + |
| 3364 | /// | | v0 | v0 <--+ |
| 3365 | /// 992B ; backedge -> bb.1 | + + | |
| 3366 | /// 1040 = %31.sub0 + | |
| 3367 | /// This value must remain |
| 3368 | /// live-out! |
| 3369 | /// |
| 3370 | /// Assuming that %31 is coalesced into %45, the copy at 928 becomes |
| 3371 | /// redundant, since it copies the value from %45 back into it. The |
| 3372 | /// conflict resolution for the main range determines that %45.v0 is |
| 3373 | /// to be erased, which is ok since %31.v1 is identical to it. |
| 3374 | /// The problem happens with the subrange for sub2: it has to be live |
| 3375 | /// on exit from the block, but since 928 was actually a point of |
| 3376 | /// definition of %45.sub2, %45.sub2 was not live immediately prior |
| 3377 | /// to that definition. As a result, when 928 was erased, the value v0 |
| 3378 | /// for %45.sub2 was pruned in pruneSubRegValues. Consequently, an |
| 3379 | /// IMPLICIT_DEF was inserted as a "backedge" definition for %45.sub2, |
| 3380 | /// providing an incorrect value to the use at 624. |
| 3381 | /// |
| 3382 | /// Since the main-range values %31.v1 and %45.v0 were proved to be |
| 3383 | /// identical, the corresponding values in subranges must also be the |
| 3384 | /// same. A redundant copy is removed because it's not needed, and not |
| 3385 | /// because it copied an undefined value, so any liveness that originated |
| 3386 | /// from that copy cannot disappear. When pruning a value that started |
| 3387 | /// at the removed copy, the corresponding identical value must be |
| 3388 | /// extended to replace it. |
| 3389 | void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) { |
| 3390 | // Look for values being erased. |
| 3391 | bool DidPrune = false; |
| 3392 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { |
| 3393 | Val &V = Vals[i]; |
| 3394 | // We should trigger in all cases in which eraseInstrs() does something. |
| 3395 | // match what eraseInstrs() is doing, print a message so |
| 3396 | if (V.Resolution != CR_Erase && |
| 3397 | (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned)) |
| 3398 | continue; |
| 3399 | |
| 3400 | // Check subranges at the point where the copy will be removed. |
| 3401 | SlotIndex Def = LR.getValNumInfo(ValNo: i)->def; |
| 3402 | SlotIndex OtherDef; |
| 3403 | if (V.Identical) |
| 3404 | OtherDef = V.OtherVNI->def; |
| 3405 | |
| 3406 | // Print message so mismatches with eraseInstrs() can be diagnosed. |
| 3407 | LLVM_DEBUG(dbgs() << "\t\tExpecting instruction removal at " << Def |
| 3408 | << '\n'); |
| 3409 | for (LiveInterval::SubRange &S : LI.subranges()) { |
| 3410 | LiveQueryResult Q = S.Query(Idx: Def); |
| 3411 | |
| 3412 | // If a subrange starts at the copy then an undefined value has been |
| 3413 | // copied and we must remove that subrange value as well. |
| 3414 | VNInfo *ValueOut = Q.valueOutOrDead(); |
| 3415 | if (ValueOut != nullptr && |
| 3416 | (Q.valueIn() == nullptr || |
| 3417 | (V.Identical && V.Resolution == CR_Erase && ValueOut->def == Def))) { |
| 3418 | LLVM_DEBUG(dbgs() << "\t\tPrune sublane " << PrintLaneMask(S.LaneMask) |
| 3419 | << " at " << Def << "\n" ); |
| 3420 | SmallVector<SlotIndex, 8> EndPoints; |
| 3421 | LIS->pruneValue(LR&: S, Kill: Def, EndPoints: &EndPoints); |
| 3422 | DidPrune = true; |
| 3423 | // Mark value number as unused. |
| 3424 | ValueOut->markUnused(); |
| 3425 | |
| 3426 | if (V.Identical && S.Query(Idx: OtherDef).valueOutOrDead()) { |
| 3427 | // If V is identical to V.OtherVNI (and S was live at OtherDef), |
| 3428 | // then we can't simply prune V from S. V needs to be replaced |
| 3429 | // with V.OtherVNI. |
| 3430 | LIS->extendToIndices(LR&: S, Indices: EndPoints); |
| 3431 | } |
| 3432 | |
| 3433 | // We may need to eliminate the subrange if the copy introduced a live |
| 3434 | // out undef value. |
| 3435 | if (ValueOut->isPHIDef()) |
| 3436 | ShrinkMask |= S.LaneMask; |
| 3437 | continue; |
| 3438 | } |
| 3439 | |
| 3440 | // If a subrange ends at the copy, then a value was copied but only |
| 3441 | // partially used later. Shrink the subregister range appropriately. |
| 3442 | // |
| 3443 | // Ultimately this calls shrinkToUses, so assuming ShrinkMask is |
| 3444 | // conservatively correct. |
| 3445 | if ((Q.valueIn() != nullptr && Q.valueOut() == nullptr) || |
| 3446 | (V.Resolution == CR_Erase && isLiveThrough(Q))) { |
| 3447 | LLVM_DEBUG(dbgs() << "\t\tDead uses at sublane " |
| 3448 | << PrintLaneMask(S.LaneMask) << " at " << Def |
| 3449 | << "\n" ); |
| 3450 | ShrinkMask |= S.LaneMask; |
| 3451 | } |
| 3452 | } |
| 3453 | } |
| 3454 | if (DidPrune) |
| 3455 | LI.removeEmptySubRanges(); |
| 3456 | } |
| 3457 | |
| 3458 | /// Check if any of the subranges of @p LI contain a definition at @p Def. |
| 3459 | static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def) { |
| 3460 | for (LiveInterval::SubRange &SR : LI.subranges()) { |
| 3461 | if (VNInfo *VNI = SR.Query(Idx: Def).valueOutOrDead()) |
| 3462 | if (VNI->def == Def) |
| 3463 | return true; |
| 3464 | } |
| 3465 | return false; |
| 3466 | } |
| 3467 | |
| 3468 | void JoinVals::pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange) { |
| 3469 | assert(&static_cast<LiveRange &>(LI) == &LR); |
| 3470 | |
| 3471 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { |
| 3472 | if (Vals[i].Resolution != CR_Keep) |
| 3473 | continue; |
| 3474 | VNInfo *VNI = LR.getValNumInfo(ValNo: i); |
| 3475 | if (VNI->isUnused() || VNI->isPHIDef() || isDefInSubRange(LI, Def: VNI->def)) |
| 3476 | continue; |
| 3477 | Vals[i].Pruned = true; |
| 3478 | ShrinkMainRange = true; |
| 3479 | } |
| 3480 | } |
| 3481 | |
| 3482 | void JoinVals::removeImplicitDefs() { |
| 3483 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { |
| 3484 | Val &V = Vals[i]; |
| 3485 | if (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned) |
| 3486 | continue; |
| 3487 | |
| 3488 | VNInfo *VNI = LR.getValNumInfo(ValNo: i); |
| 3489 | VNI->markUnused(); |
| 3490 | LR.removeValNo(ValNo: VNI); |
| 3491 | } |
| 3492 | } |
| 3493 | |
| 3494 | void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr *> &ErasedInstrs, |
| 3495 | SmallVectorImpl<Register> &ShrinkRegs, |
| 3496 | LiveInterval *LI) { |
| 3497 | for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { |
| 3498 | // Get the def location before markUnused() below invalidates it. |
| 3499 | VNInfo *VNI = LR.getValNumInfo(ValNo: i); |
| 3500 | SlotIndex Def = VNI->def; |
| 3501 | switch (Vals[i].Resolution) { |
| 3502 | case CR_Keep: { |
| 3503 | // If an IMPLICIT_DEF value is pruned, it doesn't serve a purpose any |
| 3504 | // longer. The IMPLICIT_DEF instructions are only inserted by |
| 3505 | // PHIElimination to guarantee that all PHI predecessors have a value. |
| 3506 | if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned) |
| 3507 | break; |
| 3508 | // Remove value number i from LR. |
| 3509 | // For intervals with subranges, removing a segment from the main range |
| 3510 | // may require extending the previous segment: for each definition of |
| 3511 | // a subregister, there will be a corresponding def in the main range. |
| 3512 | // That def may fall in the middle of a segment from another subrange. |
| 3513 | // In such cases, removing this def from the main range must be |
| 3514 | // complemented by extending the main range to account for the liveness |
| 3515 | // of the other subrange. |
| 3516 | // The new end point of the main range segment to be extended. |
| 3517 | SlotIndex NewEnd; |
| 3518 | if (LI != nullptr) { |
| 3519 | LiveRange::iterator I = LR.FindSegmentContaining(Idx: Def); |
| 3520 | assert(I != LR.end()); |
| 3521 | // Do not extend beyond the end of the segment being removed. |
| 3522 | // The segment may have been pruned in preparation for joining |
| 3523 | // live ranges. |
| 3524 | NewEnd = I->end; |
| 3525 | } |
| 3526 | |
| 3527 | LR.removeValNo(ValNo: VNI); |
| 3528 | // Note that this VNInfo is reused and still referenced in NewVNInfo, |
| 3529 | // make it appear like an unused value number. |
| 3530 | VNI->markUnused(); |
| 3531 | |
| 3532 | if (LI != nullptr && LI->hasSubRanges()) { |
| 3533 | assert(static_cast<LiveRange *>(LI) == &LR); |
| 3534 | // Determine the end point based on the subrange information: |
| 3535 | // minimum of (earliest def of next segment, |
| 3536 | // latest end point of containing segment) |
| 3537 | SlotIndex ED, LE; |
| 3538 | for (LiveInterval::SubRange &SR : LI->subranges()) { |
| 3539 | LiveRange::iterator I = SR.find(Pos: Def); |
| 3540 | if (I == SR.end()) |
| 3541 | continue; |
| 3542 | if (I->start > Def) |
| 3543 | ED = ED.isValid() ? std::min(a: ED, b: I->start) : I->start; |
| 3544 | else |
| 3545 | LE = LE.isValid() ? std::max(a: LE, b: I->end) : I->end; |
| 3546 | } |
| 3547 | if (LE.isValid()) |
| 3548 | NewEnd = std::min(a: NewEnd, b: LE); |
| 3549 | if (ED.isValid()) |
| 3550 | NewEnd = std::min(a: NewEnd, b: ED); |
| 3551 | |
| 3552 | // We only want to do the extension if there was a subrange that |
| 3553 | // was live across Def. |
| 3554 | if (LE.isValid()) { |
| 3555 | LiveRange::iterator S = LR.find(Pos: Def); |
| 3556 | if (S != LR.begin()) |
| 3557 | std::prev(x: S)->end = NewEnd; |
| 3558 | } |
| 3559 | } |
| 3560 | LLVM_DEBUG({ |
| 3561 | dbgs() << "\t\tremoved " << i << '@' << Def << ": " << LR << '\n'; |
| 3562 | if (LI != nullptr) |
| 3563 | dbgs() << "\t\t LHS = " << *LI << '\n'; |
| 3564 | }); |
| 3565 | [[fallthrough]]; |
| 3566 | } |
| 3567 | |
| 3568 | case CR_Erase: { |
| 3569 | MachineInstr *MI = Indexes->getInstructionFromIndex(index: Def); |
| 3570 | assert(MI && "No instruction to erase" ); |
| 3571 | if (MI->isCopy()) { |
| 3572 | Register Reg = MI->getOperand(i: 1).getReg(); |
| 3573 | if (Reg.isVirtual() && Reg != CP.getSrcReg() && Reg != CP.getDstReg()) |
| 3574 | ShrinkRegs.push_back(Elt: Reg); |
| 3575 | } |
| 3576 | ErasedInstrs.insert(Ptr: MI); |
| 3577 | LLVM_DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI); |
| 3578 | LIS->RemoveMachineInstrFromMaps(MI&: *MI); |
| 3579 | MI->eraseFromParent(); |
| 3580 | break; |
| 3581 | } |
| 3582 | default: |
| 3583 | break; |
| 3584 | } |
| 3585 | } |
| 3586 | } |
| 3587 | |
| 3588 | void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange, |
| 3589 | LaneBitmask LaneMask, |
| 3590 | const CoalescerPair &CP) { |
| 3591 | SmallVector<VNInfo *, 16> NewVNInfo; |
| 3592 | JoinVals RHSVals(RRange, CP.getSrcReg(), CP.getSrcIdx(), LaneMask, NewVNInfo, |
| 3593 | CP, LIS, TRI, true, true); |
| 3594 | JoinVals LHSVals(LRange, CP.getDstReg(), CP.getDstIdx(), LaneMask, NewVNInfo, |
| 3595 | CP, LIS, TRI, true, true); |
| 3596 | |
| 3597 | // Compute NewVNInfo and resolve conflicts (see also joinVirtRegs()) |
| 3598 | // We should be able to resolve all conflicts here as we could successfully do |
| 3599 | // it on the mainrange already. There is however a problem when multiple |
| 3600 | // ranges get mapped to the "overflow" lane mask bit which creates unexpected |
| 3601 | // interferences. |
| 3602 | if (!LHSVals.mapValues(Other&: RHSVals) || !RHSVals.mapValues(Other&: LHSVals)) { |
| 3603 | // We already determined that it is legal to merge the intervals, so this |
| 3604 | // should never fail. |
| 3605 | llvm_unreachable("*** Couldn't join subrange!\n" ); |
| 3606 | } |
| 3607 | if (!LHSVals.resolveConflicts(Other&: RHSVals) || |
| 3608 | !RHSVals.resolveConflicts(Other&: LHSVals)) { |
| 3609 | // We already determined that it is legal to merge the intervals, so this |
| 3610 | // should never fail. |
| 3611 | llvm_unreachable("*** Couldn't join subrange!\n" ); |
| 3612 | } |
| 3613 | |
| 3614 | // The merging algorithm in LiveInterval::join() can't handle conflicting |
| 3615 | // value mappings, so we need to remove any live ranges that overlap a |
| 3616 | // CR_Replace resolution. Collect a set of end points that can be used to |
| 3617 | // restore the live range after joining. |
| 3618 | SmallVector<SlotIndex, 8> EndPoints; |
| 3619 | LHSVals.pruneValues(Other&: RHSVals, EndPoints, changeInstrs: false); |
| 3620 | RHSVals.pruneValues(Other&: LHSVals, EndPoints, changeInstrs: false); |
| 3621 | |
| 3622 | LHSVals.removeImplicitDefs(); |
| 3623 | RHSVals.removeImplicitDefs(); |
| 3624 | |
| 3625 | assert(LRange.verify() && RRange.verify()); |
| 3626 | |
| 3627 | // Join RRange into LHS. |
| 3628 | LRange.join(Other&: RRange, ValNoAssignments: LHSVals.getAssignments(), RHSValNoAssignments: RHSVals.getAssignments(), |
| 3629 | NewVNInfo); |
| 3630 | |
| 3631 | LLVM_DEBUG(dbgs() << "\t\tjoined lanes: " << PrintLaneMask(LaneMask) << ' ' |
| 3632 | << LRange << "\n" ); |
| 3633 | if (EndPoints.empty()) |
| 3634 | return; |
| 3635 | |
| 3636 | // Recompute the parts of the live range we had to remove because of |
| 3637 | // CR_Replace conflicts. |
| 3638 | LLVM_DEBUG({ |
| 3639 | dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: " ; |
| 3640 | for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) { |
| 3641 | dbgs() << EndPoints[i]; |
| 3642 | if (i != n - 1) |
| 3643 | dbgs() << ','; |
| 3644 | } |
| 3645 | dbgs() << ": " << LRange << '\n'; |
| 3646 | }); |
| 3647 | LIS->extendToIndices(LR&: LRange, Indices: EndPoints); |
| 3648 | } |
| 3649 | |
| 3650 | void RegisterCoalescer::mergeSubRangeInto(LiveInterval &LI, |
| 3651 | const LiveRange &ToMerge, |
| 3652 | LaneBitmask LaneMask, |
| 3653 | CoalescerPair &CP, |
| 3654 | unsigned ComposeSubRegIdx) { |
| 3655 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); |
| 3656 | LI.refineSubRanges( |
| 3657 | Allocator, LaneMask, |
| 3658 | Apply: [this, &Allocator, &ToMerge, &CP](LiveInterval::SubRange &SR) { |
| 3659 | if (SR.empty()) { |
| 3660 | SR.assign(Other: ToMerge, Allocator); |
| 3661 | } else { |
| 3662 | // joinSubRegRange() destroys the merged range, so we need a copy. |
| 3663 | LiveRange RangeCopy(ToMerge, Allocator); |
| 3664 | joinSubRegRanges(LRange&: SR, RRange&: RangeCopy, LaneMask: SR.LaneMask, CP); |
| 3665 | } |
| 3666 | }, |
| 3667 | Indexes: *LIS->getSlotIndexes(), TRI: *TRI, ComposeSubRegIdx); |
| 3668 | } |
| 3669 | |
| 3670 | bool RegisterCoalescer::isHighCostLiveInterval(LiveInterval &LI) { |
| 3671 | if (LI.valnos.size() < LargeIntervalSizeThreshold) |
| 3672 | return false; |
| 3673 | auto &Counter = LargeLIVisitCounter[LI.reg()]; |
| 3674 | if (Counter < LargeIntervalFreqThreshold) { |
| 3675 | Counter++; |
| 3676 | return false; |
| 3677 | } |
| 3678 | return true; |
| 3679 | } |
| 3680 | |
| 3681 | bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) { |
| 3682 | SmallVector<VNInfo *, 16> NewVNInfo; |
| 3683 | LiveInterval &RHS = LIS->getInterval(Reg: CP.getSrcReg()); |
| 3684 | LiveInterval &LHS = LIS->getInterval(Reg: CP.getDstReg()); |
| 3685 | bool TrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(RC: *CP.getNewRC()); |
| 3686 | JoinVals RHSVals(RHS, CP.getSrcReg(), CP.getSrcIdx(), LaneBitmask::getNone(), |
| 3687 | NewVNInfo, CP, LIS, TRI, false, TrackSubRegLiveness); |
| 3688 | JoinVals LHSVals(LHS, CP.getDstReg(), CP.getDstIdx(), LaneBitmask::getNone(), |
| 3689 | NewVNInfo, CP, LIS, TRI, false, TrackSubRegLiveness); |
| 3690 | |
| 3691 | LLVM_DEBUG(dbgs() << "\t\tRHS = " << RHS << "\n\t\tLHS = " << LHS << '\n'); |
| 3692 | |
| 3693 | if (isHighCostLiveInterval(LI&: LHS) || isHighCostLiveInterval(LI&: RHS)) |
| 3694 | return false; |
| 3695 | |
| 3696 | // First compute NewVNInfo and the simple value mappings. |
| 3697 | // Detect impossible conflicts early. |
| 3698 | if (!LHSVals.mapValues(Other&: RHSVals) || !RHSVals.mapValues(Other&: LHSVals)) |
| 3699 | return false; |
| 3700 | |
| 3701 | // Some conflicts can only be resolved after all values have been mapped. |
| 3702 | if (!LHSVals.resolveConflicts(Other&: RHSVals) || !RHSVals.resolveConflicts(Other&: LHSVals)) |
| 3703 | return false; |
| 3704 | |
| 3705 | // All clear, the live ranges can be merged. |
| 3706 | if (RHS.hasSubRanges() || LHS.hasSubRanges()) { |
| 3707 | BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator(); |
| 3708 | |
| 3709 | // Transform lanemasks from the LHS to masks in the coalesced register and |
| 3710 | // create initial subranges if necessary. |
| 3711 | unsigned DstIdx = CP.getDstIdx(); |
| 3712 | if (!LHS.hasSubRanges()) { |
| 3713 | LaneBitmask Mask = DstIdx == 0 ? CP.getNewRC()->getLaneMask() |
| 3714 | : TRI->getSubRegIndexLaneMask(SubIdx: DstIdx); |
| 3715 | // LHS must support subregs or we wouldn't be in this codepath. |
| 3716 | assert(Mask.any()); |
| 3717 | LHS.createSubRangeFrom(Allocator, LaneMask: Mask, CopyFrom: LHS); |
| 3718 | } else if (DstIdx != 0) { |
| 3719 | // Transform LHS lanemasks to new register class if necessary. |
| 3720 | for (LiveInterval::SubRange &R : LHS.subranges()) { |
| 3721 | LaneBitmask Mask = TRI->composeSubRegIndexLaneMask(IdxA: DstIdx, Mask: R.LaneMask); |
| 3722 | R.LaneMask = Mask; |
| 3723 | } |
| 3724 | } |
| 3725 | LLVM_DEBUG(dbgs() << "\t\tLHST = " << printReg(CP.getDstReg()) << ' ' << LHS |
| 3726 | << '\n'); |
| 3727 | |
| 3728 | // Determine lanemasks of RHS in the coalesced register and merge subranges. |
| 3729 | unsigned SrcIdx = CP.getSrcIdx(); |
| 3730 | if (!RHS.hasSubRanges()) { |
| 3731 | LaneBitmask Mask = SrcIdx == 0 ? CP.getNewRC()->getLaneMask() |
| 3732 | : TRI->getSubRegIndexLaneMask(SubIdx: SrcIdx); |
| 3733 | mergeSubRangeInto(LI&: LHS, ToMerge: RHS, LaneMask: Mask, CP, ComposeSubRegIdx: DstIdx); |
| 3734 | } else { |
| 3735 | // Pair up subranges and merge. |
| 3736 | for (LiveInterval::SubRange &R : RHS.subranges()) { |
| 3737 | LaneBitmask Mask = TRI->composeSubRegIndexLaneMask(IdxA: SrcIdx, Mask: R.LaneMask); |
| 3738 | mergeSubRangeInto(LI&: LHS, ToMerge: R, LaneMask: Mask, CP, ComposeSubRegIdx: DstIdx); |
| 3739 | } |
| 3740 | } |
| 3741 | LLVM_DEBUG(dbgs() << "\tJoined SubRanges " << LHS << "\n" ); |
| 3742 | |
| 3743 | // Pruning implicit defs from subranges may result in the main range |
| 3744 | // having stale segments. |
| 3745 | LHSVals.pruneMainSegments(LI&: LHS, ShrinkMainRange); |
| 3746 | |
| 3747 | LHSVals.pruneSubRegValues(LI&: LHS, ShrinkMask); |
| 3748 | RHSVals.pruneSubRegValues(LI&: LHS, ShrinkMask); |
| 3749 | } else if (TrackSubRegLiveness && !CP.getDstIdx() && CP.getSrcIdx()) { |
| 3750 | LHS.createSubRangeFrom(Allocator&: LIS->getVNInfoAllocator(), |
| 3751 | LaneMask: CP.getNewRC()->getLaneMask(), CopyFrom: LHS); |
| 3752 | mergeSubRangeInto(LI&: LHS, ToMerge: RHS, LaneMask: TRI->getSubRegIndexLaneMask(SubIdx: CP.getSrcIdx()), CP, |
| 3753 | ComposeSubRegIdx: CP.getDstIdx()); |
| 3754 | LHSVals.pruneMainSegments(LI&: LHS, ShrinkMainRange); |
| 3755 | LHSVals.pruneSubRegValues(LI&: LHS, ShrinkMask); |
| 3756 | } |
| 3757 | |
| 3758 | // The merging algorithm in LiveInterval::join() can't handle conflicting |
| 3759 | // value mappings, so we need to remove any live ranges that overlap a |
| 3760 | // CR_Replace resolution. Collect a set of end points that can be used to |
| 3761 | // restore the live range after joining. |
| 3762 | SmallVector<SlotIndex, 8> EndPoints; |
| 3763 | LHSVals.pruneValues(Other&: RHSVals, EndPoints, changeInstrs: true); |
| 3764 | RHSVals.pruneValues(Other&: LHSVals, EndPoints, changeInstrs: true); |
| 3765 | |
| 3766 | // Erase COPY and IMPLICIT_DEF instructions. This may cause some external |
| 3767 | // registers to require trimming. |
| 3768 | SmallVector<Register, 8> ShrinkRegs; |
| 3769 | LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, LI: &LHS); |
| 3770 | RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs); |
| 3771 | while (!ShrinkRegs.empty()) |
| 3772 | shrinkToUses(LI: &LIS->getInterval(Reg: ShrinkRegs.pop_back_val())); |
| 3773 | |
| 3774 | // Scan and mark undef any DBG_VALUEs that would refer to a different value. |
| 3775 | checkMergingChangesDbgValues(CP, LHS, LHSVals, RHS, RHSVals); |
| 3776 | |
| 3777 | // If the RHS covers any PHI locations that were tracked for debug-info, we |
| 3778 | // must update tracking information to reflect the join. |
| 3779 | auto RegIt = RegToPHIIdx.find(Val: CP.getSrcReg()); |
| 3780 | if (RegIt != RegToPHIIdx.end()) { |
| 3781 | // Iterate over all the debug instruction numbers assigned this register. |
| 3782 | for (unsigned InstID : RegIt->second) { |
| 3783 | auto PHIIt = PHIValToPos.find(Val: InstID); |
| 3784 | assert(PHIIt != PHIValToPos.end()); |
| 3785 | const SlotIndex &SI = PHIIt->second.SI; |
| 3786 | |
| 3787 | // Does the RHS cover the position of this PHI? |
| 3788 | auto LII = RHS.find(Pos: SI); |
| 3789 | if (LII == RHS.end() || LII->start > SI) |
| 3790 | continue; |
| 3791 | |
| 3792 | // Accept two kinds of subregister movement: |
| 3793 | // * When we merge from one register class into a larger register: |
| 3794 | // %1:gr16 = some-inst |
| 3795 | // -> |
| 3796 | // %2:gr32.sub_16bit = some-inst |
| 3797 | // * When the PHI is already in a subregister, and the larger class |
| 3798 | // is coalesced: |
| 3799 | // %2:gr32.sub_16bit = some-inst |
| 3800 | // %3:gr32 = COPY %2 |
| 3801 | // -> |
| 3802 | // %3:gr32.sub_16bit = some-inst |
| 3803 | // Test for subregister move: |
| 3804 | if (CP.getSrcIdx() != 0 || CP.getDstIdx() != 0) |
| 3805 | // If we're moving between different subregisters, ignore this join. |
| 3806 | // The PHI will not get a location, dropping variable locations. |
| 3807 | if (PHIIt->second.SubReg && PHIIt->second.SubReg != CP.getSrcIdx()) |
| 3808 | continue; |
| 3809 | |
| 3810 | // Update our tracking of where the PHI is. |
| 3811 | PHIIt->second.Reg = CP.getDstReg(); |
| 3812 | |
| 3813 | // If we merge into a sub-register of a larger class (test above), |
| 3814 | // update SubReg. |
| 3815 | if (CP.getSrcIdx() != 0) |
| 3816 | PHIIt->second.SubReg = CP.getSrcIdx(); |
| 3817 | } |
| 3818 | |
| 3819 | // Rebuild the register index in RegToPHIIdx to account for PHIs tracking |
| 3820 | // different VRegs now. Copy old collection of debug instruction numbers and |
| 3821 | // erase the old one: |
| 3822 | auto InstrNums = RegIt->second; |
| 3823 | RegToPHIIdx.erase(I: RegIt); |
| 3824 | |
| 3825 | // There might already be PHIs being tracked in the destination VReg. Insert |
| 3826 | // into an existing tracking collection, or insert a new one. |
| 3827 | RegIt = RegToPHIIdx.find(Val: CP.getDstReg()); |
| 3828 | if (RegIt != RegToPHIIdx.end()) |
| 3829 | llvm::append_range(C&: RegIt->second, R&: InstrNums); |
| 3830 | else |
| 3831 | RegToPHIIdx.insert(KV: {CP.getDstReg(), InstrNums}); |
| 3832 | } |
| 3833 | |
| 3834 | // Join RHS into LHS. |
| 3835 | LHS.join(Other&: RHS, ValNoAssignments: LHSVals.getAssignments(), RHSValNoAssignments: RHSVals.getAssignments(), NewVNInfo); |
| 3836 | |
| 3837 | // Kill flags are going to be wrong if the live ranges were overlapping. |
| 3838 | // Eventually, we should simply clear all kill flags when computing live |
| 3839 | // ranges. They are reinserted after register allocation. |
| 3840 | MRI->clearKillFlags(Reg: LHS.reg()); |
| 3841 | MRI->clearKillFlags(Reg: RHS.reg()); |
| 3842 | |
| 3843 | if (!EndPoints.empty()) { |
| 3844 | // Recompute the parts of the live range we had to remove because of |
| 3845 | // CR_Replace conflicts. |
| 3846 | LLVM_DEBUG({ |
| 3847 | dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: " ; |
| 3848 | for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) { |
| 3849 | dbgs() << EndPoints[i]; |
| 3850 | if (i != n - 1) |
| 3851 | dbgs() << ','; |
| 3852 | } |
| 3853 | dbgs() << ": " << LHS << '\n'; |
| 3854 | }); |
| 3855 | LIS->extendToIndices(LR&: (LiveRange &)LHS, Indices: EndPoints); |
| 3856 | } |
| 3857 | |
| 3858 | return true; |
| 3859 | } |
| 3860 | |
| 3861 | bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) { |
| 3862 | return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP); |
| 3863 | } |
| 3864 | |
| 3865 | void RegisterCoalescer::buildVRegToDbgValueMap(MachineFunction &MF) { |
| 3866 | const SlotIndexes &Slots = *LIS->getSlotIndexes(); |
| 3867 | SmallVector<MachineInstr *, 8> ToInsert; |
| 3868 | |
| 3869 | // After collecting a block of DBG_VALUEs into ToInsert, enter them into the |
| 3870 | // vreg => DbgValueLoc map. |
| 3871 | auto CloseNewDVRange = [this, &ToInsert](SlotIndex Slot) { |
| 3872 | for (auto *X : ToInsert) { |
| 3873 | for (const auto &Op : X->debug_operands()) { |
| 3874 | if (Op.isReg() && Op.getReg().isVirtual()) |
| 3875 | DbgVRegToValues[Op.getReg()].push_back(x: {Slot, X}); |
| 3876 | } |
| 3877 | } |
| 3878 | |
| 3879 | ToInsert.clear(); |
| 3880 | }; |
| 3881 | |
| 3882 | // Iterate over all instructions, collecting them into the ToInsert vector. |
| 3883 | // Once a non-debug instruction is found, record the slot index of the |
| 3884 | // collected DBG_VALUEs. |
| 3885 | for (auto &MBB : MF) { |
| 3886 | SlotIndex CurrentSlot = Slots.getMBBStartIdx(mbb: &MBB); |
| 3887 | |
| 3888 | for (auto &MI : MBB) { |
| 3889 | if (MI.isDebugValue()) { |
| 3890 | if (any_of(Range: MI.debug_operands(), P: [](const MachineOperand &MO) { |
| 3891 | return MO.isReg() && MO.getReg().isVirtual(); |
| 3892 | })) |
| 3893 | ToInsert.push_back(Elt: &MI); |
| 3894 | } else if (!MI.isDebugOrPseudoInstr()) { |
| 3895 | CurrentSlot = Slots.getInstructionIndex(MI); |
| 3896 | CloseNewDVRange(CurrentSlot); |
| 3897 | } |
| 3898 | } |
| 3899 | |
| 3900 | // Close range of DBG_VALUEs at the end of blocks. |
| 3901 | CloseNewDVRange(Slots.getMBBEndIdx(mbb: &MBB)); |
| 3902 | } |
| 3903 | |
| 3904 | // Sort all DBG_VALUEs we've seen by slot number. |
| 3905 | for (auto &Pair : DbgVRegToValues) |
| 3906 | llvm::sort(C&: Pair.second); |
| 3907 | } |
| 3908 | |
| 3909 | void RegisterCoalescer::checkMergingChangesDbgValues(CoalescerPair &CP, |
| 3910 | LiveRange &LHS, |
| 3911 | JoinVals &LHSVals, |
| 3912 | LiveRange &RHS, |
| 3913 | JoinVals &RHSVals) { |
| 3914 | auto ScanForDstReg = [&](Register Reg) { |
| 3915 | checkMergingChangesDbgValuesImpl(Reg, OtherRange&: RHS, RegRange&: LHS, Vals2&: LHSVals); |
| 3916 | }; |
| 3917 | |
| 3918 | auto ScanForSrcReg = [&](Register Reg) { |
| 3919 | checkMergingChangesDbgValuesImpl(Reg, OtherRange&: LHS, RegRange&: RHS, Vals2&: RHSVals); |
| 3920 | }; |
| 3921 | |
| 3922 | // Scan for unsound updates of both the source and destination register. |
| 3923 | ScanForSrcReg(CP.getSrcReg()); |
| 3924 | ScanForDstReg(CP.getDstReg()); |
| 3925 | } |
| 3926 | |
| 3927 | void RegisterCoalescer::checkMergingChangesDbgValuesImpl(Register Reg, |
| 3928 | LiveRange &OtherLR, |
| 3929 | LiveRange &RegLR, |
| 3930 | JoinVals &RegVals) { |
| 3931 | // Are there any DBG_VALUEs to examine? |
| 3932 | auto VRegMapIt = DbgVRegToValues.find(Val: Reg); |
| 3933 | if (VRegMapIt == DbgVRegToValues.end()) |
| 3934 | return; |
| 3935 | |
| 3936 | auto &DbgValueSet = VRegMapIt->second; |
| 3937 | auto DbgValueSetIt = DbgValueSet.begin(); |
| 3938 | auto SegmentIt = OtherLR.begin(); |
| 3939 | |
| 3940 | bool LastUndefResult = false; |
| 3941 | SlotIndex LastUndefIdx; |
| 3942 | |
| 3943 | // If the "Other" register is live at a slot Idx, test whether Reg can |
| 3944 | // safely be merged with it, or should be marked undef. |
| 3945 | auto ShouldUndef = [&RegVals, &RegLR, &LastUndefResult, |
| 3946 | &LastUndefIdx](SlotIndex Idx) -> bool { |
| 3947 | // Our worst-case performance typically happens with asan, causing very |
| 3948 | // many DBG_VALUEs of the same location. Cache a copy of the most recent |
| 3949 | // result for this edge-case. |
| 3950 | if (LastUndefIdx == Idx) |
| 3951 | return LastUndefResult; |
| 3952 | |
| 3953 | // If the other range was live, and Reg's was not, the register coalescer |
| 3954 | // will not have tried to resolve any conflicts. We don't know whether |
| 3955 | // the DBG_VALUE will refer to the same value number, so it must be made |
| 3956 | // undef. |
| 3957 | auto OtherIt = RegLR.find(Pos: Idx); |
| 3958 | if (OtherIt == RegLR.end()) |
| 3959 | return true; |
| 3960 | |
| 3961 | // Both the registers were live: examine the conflict resolution record for |
| 3962 | // the value number Reg refers to. CR_Keep meant that this value number |
| 3963 | // "won" and the merged register definitely refers to that value. CR_Erase |
| 3964 | // means the value number was a redundant copy of the other value, which |
| 3965 | // was coalesced and Reg deleted. It's safe to refer to the other register |
| 3966 | // (which will be the source of the copy). |
| 3967 | auto Resolution = RegVals.getResolution(Num: OtherIt->valno->id); |
| 3968 | LastUndefResult = |
| 3969 | Resolution != JoinVals::CR_Keep && Resolution != JoinVals::CR_Erase; |
| 3970 | LastUndefIdx = Idx; |
| 3971 | return LastUndefResult; |
| 3972 | }; |
| 3973 | |
| 3974 | // Iterate over both the live-range of the "Other" register, and the set of |
| 3975 | // DBG_VALUEs for Reg at the same time. Advance whichever one has the lowest |
| 3976 | // slot index. This relies on the DbgValueSet being ordered. |
| 3977 | while (DbgValueSetIt != DbgValueSet.end() && SegmentIt != OtherLR.end()) { |
| 3978 | if (DbgValueSetIt->first < SegmentIt->end) { |
| 3979 | // "Other" is live and there is a DBG_VALUE of Reg: test if we should |
| 3980 | // set it undef. |
| 3981 | if (DbgValueSetIt->first >= SegmentIt->start) { |
| 3982 | bool HasReg = DbgValueSetIt->second->hasDebugOperandForReg(Reg); |
| 3983 | bool ShouldUndefReg = ShouldUndef(DbgValueSetIt->first); |
| 3984 | if (HasReg && ShouldUndefReg) { |
| 3985 | // Mark undef, erase record of this DBG_VALUE to avoid revisiting. |
| 3986 | DbgValueSetIt->second->setDebugValueUndef(); |
| 3987 | continue; |
| 3988 | } |
| 3989 | } |
| 3990 | ++DbgValueSetIt; |
| 3991 | } else { |
| 3992 | ++SegmentIt; |
| 3993 | } |
| 3994 | } |
| 3995 | } |
| 3996 | |
| 3997 | namespace { |
| 3998 | |
| 3999 | /// Information concerning MBB coalescing priority. |
| 4000 | struct MBBPriorityInfo { |
| 4001 | MachineBasicBlock *MBB; |
| 4002 | unsigned Depth; |
| 4003 | bool IsSplit; |
| 4004 | |
| 4005 | MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit) |
| 4006 | : MBB(mbb), Depth(depth), IsSplit(issplit) {} |
| 4007 | }; |
| 4008 | |
| 4009 | } // end anonymous namespace |
| 4010 | |
| 4011 | /// C-style comparator that sorts first based on the loop depth of the basic |
| 4012 | /// block (the unsigned), and then on the MBB number. |
| 4013 | /// |
| 4014 | /// EnableGlobalCopies assumes that the primary sort key is loop depth. |
| 4015 | static int compareMBBPriority(const MBBPriorityInfo *LHS, |
| 4016 | const MBBPriorityInfo *RHS) { |
| 4017 | // Deeper loops first |
| 4018 | if (LHS->Depth != RHS->Depth) |
| 4019 | return LHS->Depth > RHS->Depth ? -1 : 1; |
| 4020 | |
| 4021 | // Try to unsplit critical edges next. |
| 4022 | if (LHS->IsSplit != RHS->IsSplit) |
| 4023 | return LHS->IsSplit ? -1 : 1; |
| 4024 | |
| 4025 | // Prefer blocks that are more connected in the CFG. This takes care of |
| 4026 | // the most difficult copies first while intervals are short. |
| 4027 | unsigned cl = LHS->MBB->pred_size() + LHS->MBB->succ_size(); |
| 4028 | unsigned cr = RHS->MBB->pred_size() + RHS->MBB->succ_size(); |
| 4029 | if (cl != cr) |
| 4030 | return cl > cr ? -1 : 1; |
| 4031 | |
| 4032 | // As a last resort, sort by block number. |
| 4033 | return LHS->MBB->getNumber() < RHS->MBB->getNumber() ? -1 : 1; |
| 4034 | } |
| 4035 | |
| 4036 | /// \returns true if the given copy uses or defines a local live range. |
| 4037 | static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) { |
| 4038 | if (!Copy->isCopy()) |
| 4039 | return false; |
| 4040 | |
| 4041 | if (Copy->getOperand(i: 1).isUndef()) |
| 4042 | return false; |
| 4043 | |
| 4044 | Register SrcReg = Copy->getOperand(i: 1).getReg(); |
| 4045 | Register DstReg = Copy->getOperand(i: 0).getReg(); |
| 4046 | if (SrcReg.isPhysical() || DstReg.isPhysical()) |
| 4047 | return false; |
| 4048 | |
| 4049 | return LIS->intervalIsInOneMBB(LI: LIS->getInterval(Reg: SrcReg)) || |
| 4050 | LIS->intervalIsInOneMBB(LI: LIS->getInterval(Reg: DstReg)); |
| 4051 | } |
| 4052 | |
| 4053 | void RegisterCoalescer::lateLiveIntervalUpdate() { |
| 4054 | for (Register reg : ToBeUpdated) { |
| 4055 | if (!LIS->hasInterval(Reg: reg)) |
| 4056 | continue; |
| 4057 | LiveInterval &LI = LIS->getInterval(Reg: reg); |
| 4058 | shrinkToUses(LI: &LI, Dead: &DeadDefs); |
| 4059 | if (!DeadDefs.empty()) |
| 4060 | eliminateDeadDefs(); |
| 4061 | } |
| 4062 | ToBeUpdated.clear(); |
| 4063 | } |
| 4064 | |
| 4065 | bool RegisterCoalescer::copyCoalesceWorkList( |
| 4066 | MutableArrayRef<MachineInstr *> CurrList) { |
| 4067 | bool Progress = false; |
| 4068 | SmallPtrSet<MachineInstr *, 4> CurrentErasedInstrs; |
| 4069 | for (MachineInstr *&MI : CurrList) { |
| 4070 | if (!MI) |
| 4071 | continue; |
| 4072 | // Skip instruction pointers that have already been erased, for example by |
| 4073 | // dead code elimination. |
| 4074 | if (ErasedInstrs.count(Ptr: MI) || CurrentErasedInstrs.count(Ptr: MI)) { |
| 4075 | MI = nullptr; |
| 4076 | continue; |
| 4077 | } |
| 4078 | bool Again = false; |
| 4079 | bool Success = joinCopy(CopyMI: MI, Again, CurrentErasedInstrs); |
| 4080 | Progress |= Success; |
| 4081 | if (Success || !Again) |
| 4082 | MI = nullptr; |
| 4083 | } |
| 4084 | // Clear instructions not recorded in `ErasedInstrs` but erased. |
| 4085 | if (!CurrentErasedInstrs.empty()) { |
| 4086 | for (MachineInstr *&MI : CurrList) { |
| 4087 | if (MI && CurrentErasedInstrs.count(Ptr: MI)) |
| 4088 | MI = nullptr; |
| 4089 | } |
| 4090 | for (MachineInstr *&MI : WorkList) { |
| 4091 | if (MI && CurrentErasedInstrs.count(Ptr: MI)) |
| 4092 | MI = nullptr; |
| 4093 | } |
| 4094 | } |
| 4095 | return Progress; |
| 4096 | } |
| 4097 | |
| 4098 | /// Check if DstReg is a terminal node. |
| 4099 | /// I.e., it does not have any affinity other than \p Copy. |
| 4100 | static bool isTerminalReg(Register DstReg, const MachineInstr &Copy, |
| 4101 | const MachineRegisterInfo *MRI) { |
| 4102 | assert(Copy.isCopyLike()); |
| 4103 | // Check if the destination of this copy as any other affinity. |
| 4104 | for (const MachineInstr &MI : MRI->reg_nodbg_instructions(Reg: DstReg)) |
| 4105 | if (&MI != &Copy && MI.isCopyLike()) |
| 4106 | return false; |
| 4107 | return true; |
| 4108 | } |
| 4109 | |
| 4110 | bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const { |
| 4111 | assert(Copy.isCopyLike()); |
| 4112 | if (!UseTerminalRule) |
| 4113 | return false; |
| 4114 | Register SrcReg, DstReg; |
| 4115 | unsigned SrcSubReg = 0, DstSubReg = 0; |
| 4116 | if (!isMoveInstr(tri: *TRI, MI: &Copy, Src&: SrcReg, Dst&: DstReg, SrcSub&: SrcSubReg, DstSub&: DstSubReg)) |
| 4117 | return false; |
| 4118 | // Check if the destination of this copy has any other affinity. |
| 4119 | if (DstReg.isPhysical() || |
| 4120 | // If SrcReg is a physical register, the copy won't be coalesced. |
| 4121 | // Ignoring it may have other side effect (like missing |
| 4122 | // rematerialization). So keep it. |
| 4123 | SrcReg.isPhysical() || !isTerminalReg(DstReg, Copy, MRI)) |
| 4124 | return false; |
| 4125 | |
| 4126 | // DstReg is a terminal node. Check if it interferes with any other |
| 4127 | // copy involving SrcReg. |
| 4128 | const MachineBasicBlock *OrigBB = Copy.getParent(); |
| 4129 | const LiveInterval &DstLI = LIS->getInterval(Reg: DstReg); |
| 4130 | for (const MachineInstr &MI : MRI->reg_nodbg_instructions(Reg: SrcReg)) { |
| 4131 | // Technically we should check if the weight of the new copy is |
| 4132 | // interesting compared to the other one and update the weight |
| 4133 | // of the copies accordingly. However, this would only work if |
| 4134 | // we would gather all the copies first then coalesce, whereas |
| 4135 | // right now we interleave both actions. |
| 4136 | // For now, just consider the copies that are in the same block. |
| 4137 | if (&MI == &Copy || !MI.isCopyLike() || MI.getParent() != OrigBB) |
| 4138 | continue; |
| 4139 | Register OtherSrcReg, OtherReg; |
| 4140 | unsigned OtherSrcSubReg = 0, OtherSubReg = 0; |
| 4141 | if (!isMoveInstr(tri: *TRI, MI: &MI, Src&: OtherSrcReg, Dst&: OtherReg, SrcSub&: OtherSrcSubReg, |
| 4142 | DstSub&: OtherSubReg)) |
| 4143 | return false; |
| 4144 | if (OtherReg == SrcReg) |
| 4145 | OtherReg = OtherSrcReg; |
| 4146 | // Check if OtherReg is a non-terminal. |
| 4147 | if (OtherReg.isPhysical() || isTerminalReg(DstReg: OtherReg, Copy: MI, MRI)) |
| 4148 | continue; |
| 4149 | // Check that OtherReg interfere with DstReg. |
| 4150 | if (LIS->getInterval(Reg: OtherReg).overlaps(other: DstLI)) { |
| 4151 | LLVM_DEBUG(dbgs() << "Apply terminal rule for: " << printReg(DstReg) |
| 4152 | << '\n'); |
| 4153 | return true; |
| 4154 | } |
| 4155 | } |
| 4156 | return false; |
| 4157 | } |
| 4158 | |
| 4159 | void RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) { |
| 4160 | LLVM_DEBUG(dbgs() << MBB->getName() << ":\n" ); |
| 4161 | |
| 4162 | // Collect all copy-like instructions in MBB. Don't start coalescing anything |
| 4163 | // yet, it might invalidate the iterator. |
| 4164 | const unsigned PrevSize = WorkList.size(); |
| 4165 | if (JoinGlobalCopies) { |
| 4166 | SmallVector<MachineInstr *, 2> LocalTerminals; |
| 4167 | SmallVector<MachineInstr *, 2> GlobalTerminals; |
| 4168 | // Coalesce copies top-down to propagate coalescing and rematerialization |
| 4169 | // forward. |
| 4170 | for (MachineInstr &MI : *MBB) { |
| 4171 | if (!MI.isCopyLike()) |
| 4172 | continue; |
| 4173 | bool ApplyTerminalRule = applyTerminalRule(Copy: MI); |
| 4174 | if (isLocalCopy(Copy: &MI, LIS)) { |
| 4175 | if (ApplyTerminalRule) |
| 4176 | LocalTerminals.push_back(Elt: &MI); |
| 4177 | else |
| 4178 | LocalWorkList.push_back(Elt: &MI); |
| 4179 | } else { |
| 4180 | if (ApplyTerminalRule) |
| 4181 | GlobalTerminals.push_back(Elt: &MI); |
| 4182 | else |
| 4183 | WorkList.push_back(Elt: &MI); |
| 4184 | } |
| 4185 | } |
| 4186 | // Append the copies evicted by the terminal rule at the end of the list. |
| 4187 | LocalWorkList.append(in_start: LocalTerminals.begin(), in_end: LocalTerminals.end()); |
| 4188 | WorkList.append(in_start: GlobalTerminals.begin(), in_end: GlobalTerminals.end()); |
| 4189 | } else { |
| 4190 | SmallVector<MachineInstr *, 2> Terminals; |
| 4191 | // Coalesce copies top-down to propagate coalescing and rematerialization |
| 4192 | // forward. |
| 4193 | for (MachineInstr &MII : *MBB) |
| 4194 | if (MII.isCopyLike()) { |
| 4195 | if (applyTerminalRule(Copy: MII)) |
| 4196 | Terminals.push_back(Elt: &MII); |
| 4197 | else |
| 4198 | WorkList.push_back(Elt: &MII); |
| 4199 | } |
| 4200 | // Append the copies evicted by the terminal rule at the end of the list. |
| 4201 | WorkList.append(in_start: Terminals.begin(), in_end: Terminals.end()); |
| 4202 | } |
| 4203 | // Try coalescing the collected copies immediately, and remove the nulls. |
| 4204 | // This prevents the WorkList from getting too large since most copies are |
| 4205 | // joinable on the first attempt. |
| 4206 | MutableArrayRef<MachineInstr *> CurrList(WorkList.begin() + PrevSize, |
| 4207 | WorkList.end()); |
| 4208 | if (copyCoalesceWorkList(CurrList)) |
| 4209 | WorkList.erase( |
| 4210 | CS: std::remove(first: WorkList.begin() + PrevSize, last: WorkList.end(), value: nullptr), |
| 4211 | CE: WorkList.end()); |
| 4212 | } |
| 4213 | |
| 4214 | void RegisterCoalescer::coalesceLocals() { |
| 4215 | copyCoalesceWorkList(CurrList: LocalWorkList); |
| 4216 | for (MachineInstr *MI : LocalWorkList) { |
| 4217 | if (MI) |
| 4218 | WorkList.push_back(Elt: MI); |
| 4219 | } |
| 4220 | LocalWorkList.clear(); |
| 4221 | } |
| 4222 | |
| 4223 | void RegisterCoalescer::joinAllIntervals() { |
| 4224 | LLVM_DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n" ); |
| 4225 | assert(WorkList.empty() && LocalWorkList.empty() && "Old data still around." ); |
| 4226 | |
| 4227 | std::vector<MBBPriorityInfo> MBBs; |
| 4228 | MBBs.reserve(n: MF->size()); |
| 4229 | for (MachineBasicBlock &MBB : *MF) { |
| 4230 | MBBs.push_back(x: MBBPriorityInfo(&MBB, Loops->getLoopDepth(BB: &MBB), |
| 4231 | JoinSplitEdges && isSplitEdge(MBB: &MBB))); |
| 4232 | } |
| 4233 | array_pod_sort(Start: MBBs.begin(), End: MBBs.end(), Compare: compareMBBPriority); |
| 4234 | |
| 4235 | // Coalesce intervals in MBB priority order. |
| 4236 | unsigned CurrDepth = std::numeric_limits<unsigned>::max(); |
| 4237 | for (MBBPriorityInfo &MBB : MBBs) { |
| 4238 | // Try coalescing the collected local copies for deeper loops. |
| 4239 | if (JoinGlobalCopies && MBB.Depth < CurrDepth) { |
| 4240 | coalesceLocals(); |
| 4241 | CurrDepth = MBB.Depth; |
| 4242 | } |
| 4243 | copyCoalesceInMBB(MBB: MBB.MBB); |
| 4244 | } |
| 4245 | lateLiveIntervalUpdate(); |
| 4246 | coalesceLocals(); |
| 4247 | |
| 4248 | // Joining intervals can allow other intervals to be joined. Iteratively join |
| 4249 | // until we make no progress. |
| 4250 | while (copyCoalesceWorkList(CurrList: WorkList)) |
| 4251 | /* empty */; |
| 4252 | lateLiveIntervalUpdate(); |
| 4253 | } |
| 4254 | |
| 4255 | PreservedAnalyses |
| 4256 | RegisterCoalescerPass::run(MachineFunction &MF, |
| 4257 | MachineFunctionAnalysisManager &MFAM) { |
| 4258 | MFPropsModifier _(*this, MF); |
| 4259 | auto &LIS = MFAM.getResult<LiveIntervalsAnalysis>(IR&: MF); |
| 4260 | auto &Loops = MFAM.getResult<MachineLoopAnalysis>(IR&: MF); |
| 4261 | auto *SI = MFAM.getCachedResult<SlotIndexesAnalysis>(IR&: MF); |
| 4262 | RegisterCoalescer Impl(&LIS, SI, &Loops); |
| 4263 | if (!Impl.run(MF)) |
| 4264 | return PreservedAnalyses::all(); |
| 4265 | auto PA = getMachineFunctionPassPreservedAnalyses(); |
| 4266 | PA.preserveSet<CFGAnalyses>(); |
| 4267 | PA.preserve<LiveIntervalsAnalysis>(); |
| 4268 | PA.preserve<SlotIndexesAnalysis>(); |
| 4269 | PA.preserve<MachineLoopAnalysis>(); |
| 4270 | PA.preserve<MachineDominatorTreeAnalysis>(); |
| 4271 | return PA; |
| 4272 | } |
| 4273 | |
| 4274 | bool RegisterCoalescerLegacy::runOnMachineFunction(MachineFunction &MF) { |
| 4275 | auto *LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS(); |
| 4276 | auto *Loops = &getAnalysis<MachineLoopInfoWrapperPass>().getLI(); |
| 4277 | auto *SIWrapper = getAnalysisIfAvailable<SlotIndexesWrapperPass>(); |
| 4278 | SlotIndexes *SI = SIWrapper ? &SIWrapper->getSI() : nullptr; |
| 4279 | RegisterCoalescer Impl(LIS, SI, Loops); |
| 4280 | return Impl.run(MF); |
| 4281 | } |
| 4282 | |
| 4283 | bool RegisterCoalescer::run(MachineFunction &fn) { |
| 4284 | LLVM_DEBUG(dbgs() << "********** REGISTER COALESCER **********\n" |
| 4285 | << "********** Function: " << fn.getName() << '\n'); |
| 4286 | |
| 4287 | // Variables changed between a setjmp and a longjump can have undefined value |
| 4288 | // after the longjmp. This behaviour can be observed if such a variable is |
| 4289 | // spilled, so longjmp won't restore the value in the spill slot. |
| 4290 | // RegisterCoalescer should not run in functions with a setjmp to avoid |
| 4291 | // merging such undefined variables with predictable ones. |
| 4292 | // |
| 4293 | // TODO: Could specifically disable coalescing registers live across setjmp |
| 4294 | // calls |
| 4295 | if (fn.exposesReturnsTwice()) { |
| 4296 | LLVM_DEBUG( |
| 4297 | dbgs() << "* Skipped as it exposes functions that returns twice.\n" ); |
| 4298 | return false; |
| 4299 | } |
| 4300 | |
| 4301 | MF = &fn; |
| 4302 | MRI = &fn.getRegInfo(); |
| 4303 | const TargetSubtargetInfo &STI = fn.getSubtarget(); |
| 4304 | TRI = STI.getRegisterInfo(); |
| 4305 | TII = STI.getInstrInfo(); |
| 4306 | if (EnableGlobalCopies == cl::BOU_UNSET) |
| 4307 | JoinGlobalCopies = STI.enableJoinGlobalCopies(); |
| 4308 | else |
| 4309 | JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE); |
| 4310 | |
| 4311 | // If there are PHIs tracked by debug-info, they will need updating during |
| 4312 | // coalescing. Build an index of those PHIs to ease updating. |
| 4313 | SlotIndexes *Slots = LIS->getSlotIndexes(); |
| 4314 | for (const auto &DebugPHI : MF->DebugPHIPositions) { |
| 4315 | MachineBasicBlock *MBB = DebugPHI.second.MBB; |
| 4316 | Register Reg = DebugPHI.second.Reg; |
| 4317 | unsigned SubReg = DebugPHI.second.SubReg; |
| 4318 | SlotIndex SI = Slots->getMBBStartIdx(mbb: MBB); |
| 4319 | PHIValPos P = {.SI: SI, .Reg: Reg, .SubReg: SubReg}; |
| 4320 | PHIValToPos.insert(KV: std::make_pair(x: DebugPHI.first, y&: P)); |
| 4321 | RegToPHIIdx[Reg].push_back(Elt: DebugPHI.first); |
| 4322 | } |
| 4323 | |
| 4324 | // The MachineScheduler does not currently require JoinSplitEdges. This will |
| 4325 | // either be enabled unconditionally or replaced by a more general live range |
| 4326 | // splitting optimization. |
| 4327 | JoinSplitEdges = EnableJoinSplits; |
| 4328 | |
| 4329 | if (VerifyCoalescing) |
| 4330 | MF->verify(LiveInts: LIS, Indexes: SI, Banner: "Before register coalescing" , OS: &errs()); |
| 4331 | |
| 4332 | DbgVRegToValues.clear(); |
| 4333 | buildVRegToDbgValueMap(MF&: fn); |
| 4334 | |
| 4335 | RegClassInfo.runOnMachineFunction(MF: fn); |
| 4336 | |
| 4337 | // Join (coalesce) intervals if requested. |
| 4338 | if (EnableJoining) |
| 4339 | joinAllIntervals(); |
| 4340 | |
| 4341 | // After deleting a lot of copies, register classes may be less constrained. |
| 4342 | // Removing sub-register operands may allow GR32_ABCD -> GR32 and DPR_VFP2 -> |
| 4343 | // DPR inflation. |
| 4344 | array_pod_sort(Start: InflateRegs.begin(), End: InflateRegs.end()); |
| 4345 | InflateRegs.erase(CS: llvm::unique(R&: InflateRegs), CE: InflateRegs.end()); |
| 4346 | LLVM_DEBUG(dbgs() << "Trying to inflate " << InflateRegs.size() |
| 4347 | << " regs.\n" ); |
| 4348 | for (Register Reg : InflateRegs) { |
| 4349 | if (MRI->reg_nodbg_empty(RegNo: Reg)) |
| 4350 | continue; |
| 4351 | if (MRI->recomputeRegClass(Reg)) { |
| 4352 | LLVM_DEBUG(dbgs() << printReg(Reg) << " inflated to " |
| 4353 | << TRI->getRegClassName(MRI->getRegClass(Reg)) << '\n'); |
| 4354 | ++NumInflated; |
| 4355 | |
| 4356 | LiveInterval &LI = LIS->getInterval(Reg); |
| 4357 | if (LI.hasSubRanges()) { |
| 4358 | // If the inflated register class does not support subregisters anymore |
| 4359 | // remove the subranges. |
| 4360 | if (!MRI->shouldTrackSubRegLiveness(VReg: Reg)) { |
| 4361 | LI.clearSubRanges(); |
| 4362 | } else { |
| 4363 | #ifndef NDEBUG |
| 4364 | LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg); |
| 4365 | // If subranges are still supported, then the same subregs |
| 4366 | // should still be supported. |
| 4367 | for (LiveInterval::SubRange &S : LI.subranges()) { |
| 4368 | assert((S.LaneMask & ~MaxMask).none()); |
| 4369 | } |
| 4370 | #endif |
| 4371 | } |
| 4372 | } |
| 4373 | } |
| 4374 | } |
| 4375 | |
| 4376 | // After coalescing, update any PHIs that are being tracked by debug-info |
| 4377 | // with their new VReg locations. |
| 4378 | for (auto &p : MF->DebugPHIPositions) { |
| 4379 | auto it = PHIValToPos.find(Val: p.first); |
| 4380 | assert(it != PHIValToPos.end()); |
| 4381 | p.second.Reg = it->second.Reg; |
| 4382 | p.second.SubReg = it->second.SubReg; |
| 4383 | } |
| 4384 | |
| 4385 | PHIValToPos.clear(); |
| 4386 | RegToPHIIdx.clear(); |
| 4387 | |
| 4388 | LLVM_DEBUG(LIS->dump()); |
| 4389 | |
| 4390 | if (VerifyCoalescing) |
| 4391 | MF->verify(LiveInts: LIS, Indexes: SI, Banner: "After register coalescing" , OS: &errs()); |
| 4392 | return true; |
| 4393 | } |
| 4394 | |