| 1 | //===- HexagonGenInsert.cpp -----------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "BitTracker.h" |
| 10 | #include "Hexagon.h" |
| 11 | #include "HexagonBitTracker.h" |
| 12 | #include "HexagonInstrInfo.h" |
| 13 | #include "HexagonRegisterInfo.h" |
| 14 | #include "HexagonSubtarget.h" |
| 15 | #include "llvm/ADT/BitVector.h" |
| 16 | #include "llvm/ADT/DenseMap.h" |
| 17 | #include "llvm/ADT/GraphTraits.h" |
| 18 | #include "llvm/ADT/PostOrderIterator.h" |
| 19 | #include "llvm/ADT/STLExtras.h" |
| 20 | #include "llvm/ADT/SmallSet.h" |
| 21 | #include "llvm/ADT/SmallVector.h" |
| 22 | #include "llvm/ADT/StringRef.h" |
| 23 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 24 | #include "llvm/CodeGen/MachineDominators.h" |
| 25 | #include "llvm/CodeGen/MachineFunction.h" |
| 26 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 27 | #include "llvm/CodeGen/MachineInstr.h" |
| 28 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 29 | #include "llvm/CodeGen/MachineOperand.h" |
| 30 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 31 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 32 | #include "llvm/IR/DebugLoc.h" |
| 33 | #include "llvm/InitializePasses.h" |
| 34 | #include "llvm/Pass.h" |
| 35 | #include "llvm/Support/CommandLine.h" |
| 36 | #include "llvm/Support/Debug.h" |
| 37 | #include "llvm/Support/MathExtras.h" |
| 38 | #include "llvm/Support/Timer.h" |
| 39 | #include "llvm/Support/raw_ostream.h" |
| 40 | #include <algorithm> |
| 41 | #include <cassert> |
| 42 | #include <cstdint> |
| 43 | #include <iterator> |
| 44 | #include <utility> |
| 45 | #include <vector> |
| 46 | |
| 47 | #define DEBUG_TYPE "hexinsert" |
| 48 | |
| 49 | using namespace llvm; |
| 50 | |
| 51 | static cl::opt<unsigned> |
| 52 | VRegIndexCutoff("insert-vreg-cutoff" , cl::init(Val: ~0U), cl::Hidden, |
| 53 | cl::desc("Vreg# cutoff for insert generation." )); |
| 54 | // The distance cutoff is selected based on the precheckin-perf results: |
| 55 | // cutoffs 20, 25, 35, and 40 are worse than 30. |
| 56 | static cl::opt<unsigned> |
| 57 | VRegDistCutoff("insert-dist-cutoff" , cl::init(Val: 30U), cl::Hidden, |
| 58 | cl::desc("Vreg distance cutoff for insert " |
| 59 | "generation." )); |
| 60 | |
| 61 | // Limit the container sizes for extreme cases where we run out of memory. |
| 62 | static cl::opt<unsigned> |
| 63 | MaxORLSize("insert-max-orl" , cl::init(Val: 4096), cl::Hidden, |
| 64 | cl::desc("Maximum size of OrderedRegisterList" )); |
| 65 | static cl::opt<unsigned> MaxIFMSize("insert-max-ifmap" , cl::init(Val: 1024), |
| 66 | cl::Hidden, |
| 67 | cl::desc("Maximum size of IFMap" )); |
| 68 | |
| 69 | static cl::opt<bool> OptTiming("insert-timing" , cl::Hidden, |
| 70 | cl::desc("Enable timing of insert generation" )); |
| 71 | static cl::opt<bool> |
| 72 | OptTimingDetail("insert-timing-detail" , cl::Hidden, |
| 73 | cl::desc("Enable detailed timing of insert " |
| 74 | "generation" )); |
| 75 | |
| 76 | static cl::opt<bool> OptSelectAll0("insert-all0" , cl::init(Val: false), cl::Hidden); |
| 77 | static cl::opt<bool> OptSelectHas0("insert-has0" , cl::init(Val: false), cl::Hidden); |
| 78 | // Whether to construct constant values via "insert". Could eliminate constant |
| 79 | // extenders, but often not practical. |
| 80 | static cl::opt<bool> OptConst("insert-const" , cl::init(Val: false), cl::Hidden); |
| 81 | |
| 82 | // The preprocessor gets confused when the DEBUG macro is passed larger |
| 83 | // chunks of code. Use this function to detect debugging. |
| 84 | inline static bool isDebug() { |
| 85 | #ifndef NDEBUG |
| 86 | return DebugFlag && isCurrentDebugType(DEBUG_TYPE); |
| 87 | #else |
| 88 | return false; |
| 89 | #endif |
| 90 | } |
| 91 | |
| 92 | namespace { |
| 93 | |
| 94 | // Set of virtual registers, based on BitVector. |
| 95 | struct RegisterSet : private BitVector { |
| 96 | RegisterSet() = default; |
| 97 | explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} |
| 98 | RegisterSet(const RegisterSet &RS) = default; |
| 99 | RegisterSet &operator=(const RegisterSet &RS) = default; |
| 100 | |
| 101 | using BitVector::clear; |
| 102 | |
| 103 | unsigned find_first() const { |
| 104 | int First = BitVector::find_first(); |
| 105 | if (First < 0) |
| 106 | return 0; |
| 107 | return x2v(x: First); |
| 108 | } |
| 109 | |
| 110 | unsigned find_next(unsigned Prev) const { |
| 111 | int Next = BitVector::find_next(Prev: v2x(v: Prev)); |
| 112 | if (Next < 0) |
| 113 | return 0; |
| 114 | return x2v(x: Next); |
| 115 | } |
| 116 | |
| 117 | RegisterSet &insert(unsigned R) { |
| 118 | unsigned Idx = v2x(v: R); |
| 119 | ensure(Idx); |
| 120 | return static_cast<RegisterSet&>(BitVector::set(Idx)); |
| 121 | } |
| 122 | RegisterSet &remove(unsigned R) { |
| 123 | unsigned Idx = v2x(v: R); |
| 124 | if (Idx >= size()) |
| 125 | return *this; |
| 126 | return static_cast<RegisterSet&>(BitVector::reset(Idx)); |
| 127 | } |
| 128 | |
| 129 | RegisterSet &insert(const RegisterSet &Rs) { |
| 130 | return static_cast<RegisterSet&>(BitVector::operator|=(RHS: Rs)); |
| 131 | } |
| 132 | RegisterSet &remove(const RegisterSet &Rs) { |
| 133 | return static_cast<RegisterSet&>(BitVector::reset(RHS: Rs)); |
| 134 | } |
| 135 | |
| 136 | reference operator[](unsigned R) { |
| 137 | unsigned Idx = v2x(v: R); |
| 138 | ensure(Idx); |
| 139 | return BitVector::operator[](Idx); |
| 140 | } |
| 141 | bool operator[](unsigned R) const { |
| 142 | unsigned Idx = v2x(v: R); |
| 143 | assert(Idx < size()); |
| 144 | return BitVector::operator[](Idx); |
| 145 | } |
| 146 | bool has(unsigned R) const { |
| 147 | unsigned Idx = v2x(v: R); |
| 148 | if (Idx >= size()) |
| 149 | return false; |
| 150 | return BitVector::test(Idx); |
| 151 | } |
| 152 | |
| 153 | bool empty() const { |
| 154 | return !BitVector::any(); |
| 155 | } |
| 156 | bool includes(const RegisterSet &Rs) const { |
| 157 | // A.BitVector::test(B) <=> A-B != {} |
| 158 | return !Rs.BitVector::test(RHS: *this); |
| 159 | } |
| 160 | bool intersects(const RegisterSet &Rs) const { |
| 161 | return BitVector::anyCommon(RHS: Rs); |
| 162 | } |
| 163 | |
| 164 | private: |
| 165 | void ensure(unsigned Idx) { |
| 166 | if (size() <= Idx) |
| 167 | resize(N: std::max(a: Idx+1, b: 32U)); |
| 168 | } |
| 169 | |
| 170 | static inline unsigned v2x(unsigned v) { |
| 171 | return Register(v).virtRegIndex(); |
| 172 | } |
| 173 | |
| 174 | static inline unsigned x2v(unsigned x) { |
| 175 | return Register::index2VirtReg(Index: x); |
| 176 | } |
| 177 | }; |
| 178 | |
| 179 | struct PrintRegSet { |
| 180 | PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) |
| 181 | : RS(S), TRI(RI) {} |
| 182 | |
| 183 | friend raw_ostream &operator<< (raw_ostream &OS, |
| 184 | const PrintRegSet &P); |
| 185 | |
| 186 | private: |
| 187 | const RegisterSet &RS; |
| 188 | const TargetRegisterInfo *TRI; |
| 189 | }; |
| 190 | |
| 191 | raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { |
| 192 | OS << '{'; |
| 193 | for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(Prev: R)) |
| 194 | OS << ' ' << printReg(Reg: R, TRI: P.TRI); |
| 195 | OS << " }" ; |
| 196 | return OS; |
| 197 | } |
| 198 | |
| 199 | // A convenience class to associate unsigned numbers (such as virtual |
| 200 | // registers) with unsigned numbers. |
| 201 | struct UnsignedMap : public DenseMap<unsigned,unsigned> { |
| 202 | UnsignedMap() = default; |
| 203 | |
| 204 | private: |
| 205 | using BaseType = DenseMap<unsigned, unsigned>; |
| 206 | }; |
| 207 | |
| 208 | // A utility to establish an ordering between virtual registers: |
| 209 | // VRegA < VRegB <=> RegisterOrdering[VRegA] < RegisterOrdering[VRegB] |
| 210 | // This is meant as a cache for the ordering of virtual registers defined |
| 211 | // by a potentially expensive comparison function, or obtained by a proce- |
| 212 | // dure that should not be repeated each time two registers are compared. |
| 213 | struct RegisterOrdering : public UnsignedMap { |
| 214 | RegisterOrdering() = default; |
| 215 | |
| 216 | unsigned operator[](unsigned VR) const { |
| 217 | const_iterator F = find(Val: VR); |
| 218 | assert(F != end()); |
| 219 | return F->second; |
| 220 | } |
| 221 | |
| 222 | // Add operator(), so that objects of this class can be used as |
| 223 | // comparators in std::sort et al. |
| 224 | bool operator() (unsigned VR1, unsigned VR2) const { |
| 225 | return operator[](VR: VR1) < operator[](VR: VR2); |
| 226 | } |
| 227 | }; |
| 228 | |
| 229 | // Ordering of bit values. This class does not have operator[], but |
| 230 | // is supplies a comparison operator() for use in std:: algorithms. |
| 231 | // The order is as follows: |
| 232 | // - 0 < 1 < ref |
| 233 | // - ref1 < ref2, if ord(ref1.Reg) < ord(ref2.Reg), |
| 234 | // or ord(ref1.Reg) == ord(ref2.Reg), and ref1.Pos < ref2.Pos. |
| 235 | struct BitValueOrdering { |
| 236 | BitValueOrdering(const RegisterOrdering &RB) : BaseOrd(RB) {} |
| 237 | |
| 238 | bool operator() (const BitTracker::BitValue &V1, |
| 239 | const BitTracker::BitValue &V2) const; |
| 240 | |
| 241 | const RegisterOrdering &BaseOrd; |
| 242 | }; |
| 243 | |
| 244 | } // end anonymous namespace |
| 245 | |
| 246 | bool BitValueOrdering::operator() (const BitTracker::BitValue &V1, |
| 247 | const BitTracker::BitValue &V2) const { |
| 248 | if (V1 == V2) |
| 249 | return false; |
| 250 | // V1==0 => true, V2==0 => false |
| 251 | if (V1.is(T: 0) || V2.is(T: 0)) |
| 252 | return V1.is(T: 0); |
| 253 | // Neither of V1,V2 is 0, and V1!=V2. |
| 254 | // V2==1 => false, V1==1 => true |
| 255 | if (V2.is(T: 1) || V1.is(T: 1)) |
| 256 | return !V2.is(T: 1); |
| 257 | // Both V1,V2 are refs. |
| 258 | unsigned Ind1 = BaseOrd[V1.RefI.Reg], Ind2 = BaseOrd[V2.RefI.Reg]; |
| 259 | if (Ind1 != Ind2) |
| 260 | return Ind1 < Ind2; |
| 261 | // If V1.Pos==V2.Pos |
| 262 | assert(V1.RefI.Pos != V2.RefI.Pos && "Bit values should be different" ); |
| 263 | return V1.RefI.Pos < V2.RefI.Pos; |
| 264 | } |
| 265 | |
| 266 | namespace { |
| 267 | |
| 268 | // Cache for the BitTracker's cell map. Map lookup has a logarithmic |
| 269 | // complexity, this class will memoize the lookup results to reduce |
| 270 | // the access time for repeated lookups of the same cell. |
| 271 | struct CellMapShadow { |
| 272 | CellMapShadow(const BitTracker &T) : BT(T) {} |
| 273 | |
| 274 | const BitTracker::RegisterCell &lookup(unsigned VR) { |
| 275 | unsigned RInd = Register(VR).virtRegIndex(); |
| 276 | // Grow the vector to at least 32 elements. |
| 277 | if (RInd >= CVect.size()) |
| 278 | CVect.resize(new_size: std::max(a: RInd+16, b: 32U), x: nullptr); |
| 279 | const BitTracker::RegisterCell *CP = CVect[RInd]; |
| 280 | if (CP == nullptr) |
| 281 | CP = CVect[RInd] = &BT.lookup(Reg: VR); |
| 282 | return *CP; |
| 283 | } |
| 284 | |
| 285 | const BitTracker &BT; |
| 286 | |
| 287 | private: |
| 288 | using CellVectType = std::vector<const BitTracker::RegisterCell *>; |
| 289 | |
| 290 | CellVectType CVect; |
| 291 | }; |
| 292 | |
| 293 | // Comparator class for lexicographic ordering of virtual registers |
| 294 | // according to the corresponding BitTracker::RegisterCell objects. |
| 295 | struct RegisterCellLexCompare { |
| 296 | RegisterCellLexCompare(const BitValueOrdering &BO, CellMapShadow &M) |
| 297 | : BitOrd(BO), CM(M) {} |
| 298 | |
| 299 | bool operator() (unsigned VR1, unsigned VR2) const; |
| 300 | |
| 301 | private: |
| 302 | const BitValueOrdering &BitOrd; |
| 303 | CellMapShadow &CM; |
| 304 | }; |
| 305 | |
| 306 | // Comparator class for lexicographic ordering of virtual registers |
| 307 | // according to the specified bits of the corresponding BitTracker:: |
| 308 | // RegisterCell objects. |
| 309 | // Specifically, this class will be used to compare bit B of a register |
| 310 | // cell for a selected virtual register R with bit N of any register |
| 311 | // other than R. |
| 312 | struct RegisterCellBitCompareSel { |
| 313 | RegisterCellBitCompareSel(unsigned R, unsigned B, unsigned N, |
| 314 | const BitValueOrdering &BO, CellMapShadow &M) |
| 315 | : SelR(R), SelB(B), BitN(N), BitOrd(BO), CM(M) {} |
| 316 | |
| 317 | bool operator() (unsigned VR1, unsigned VR2) const; |
| 318 | |
| 319 | private: |
| 320 | const unsigned SelR, SelB; |
| 321 | const unsigned BitN; |
| 322 | const BitValueOrdering &BitOrd; |
| 323 | CellMapShadow &CM; |
| 324 | }; |
| 325 | |
| 326 | } // end anonymous namespace |
| 327 | |
| 328 | bool RegisterCellLexCompare::operator() (unsigned VR1, unsigned VR2) const { |
| 329 | // Ordering of registers, made up from two given orderings: |
| 330 | // - the ordering of the register numbers, and |
| 331 | // - the ordering of register cells. |
| 332 | // Def. R1 < R2 if: |
| 333 | // - cell(R1) < cell(R2), or |
| 334 | // - cell(R1) == cell(R2), and index(R1) < index(R2). |
| 335 | // |
| 336 | // For register cells, the ordering is lexicographic, with index 0 being |
| 337 | // the most significant. |
| 338 | if (VR1 == VR2) |
| 339 | return false; |
| 340 | |
| 341 | const BitTracker::RegisterCell &RC1 = CM.lookup(VR: VR1), &RC2 = CM.lookup(VR: VR2); |
| 342 | uint16_t W1 = RC1.width(), W2 = RC2.width(); |
| 343 | for (uint16_t i = 0, w = std::min(a: W1, b: W2); i < w; ++i) { |
| 344 | const BitTracker::BitValue &V1 = RC1[i], &V2 = RC2[i]; |
| 345 | if (V1 != V2) |
| 346 | return BitOrd(V1, V2); |
| 347 | } |
| 348 | // Cells are equal up until the common length. |
| 349 | if (W1 != W2) |
| 350 | return W1 < W2; |
| 351 | |
| 352 | return BitOrd.BaseOrd[VR1] < BitOrd.BaseOrd[VR2]; |
| 353 | } |
| 354 | |
| 355 | bool RegisterCellBitCompareSel::operator() (unsigned VR1, unsigned VR2) const { |
| 356 | if (VR1 == VR2) |
| 357 | return false; |
| 358 | const BitTracker::RegisterCell &RC1 = CM.lookup(VR: VR1); |
| 359 | const BitTracker::RegisterCell &RC2 = CM.lookup(VR: VR2); |
| 360 | uint16_t W1 = RC1.width(), W2 = RC2.width(); |
| 361 | uint16_t Bit1 = (VR1 == SelR) ? SelB : BitN; |
| 362 | uint16_t Bit2 = (VR2 == SelR) ? SelB : BitN; |
| 363 | // If Bit1 exceeds the width of VR1, then: |
| 364 | // - return false, if at the same time Bit2 exceeds VR2, or |
| 365 | // - return true, otherwise. |
| 366 | // (I.e. "a bit value that does not exist is less than any bit value |
| 367 | // that does exist".) |
| 368 | if (W1 <= Bit1) |
| 369 | return Bit2 < W2; |
| 370 | // If Bit1 is within VR1, but Bit2 is not within VR2, return false. |
| 371 | if (W2 <= Bit2) |
| 372 | return false; |
| 373 | |
| 374 | const BitTracker::BitValue &V1 = RC1[Bit1], V2 = RC2[Bit2]; |
| 375 | if (V1 != V2) |
| 376 | return BitOrd(V1, V2); |
| 377 | return false; |
| 378 | } |
| 379 | |
| 380 | namespace { |
| 381 | |
| 382 | class OrderedRegisterList { |
| 383 | using ListType = std::vector<unsigned>; |
| 384 | const unsigned MaxSize; |
| 385 | |
| 386 | public: |
| 387 | OrderedRegisterList(const RegisterOrdering &RO) |
| 388 | : MaxSize(MaxORLSize), Ord(RO) {} |
| 389 | |
| 390 | void insert(unsigned VR); |
| 391 | void remove(unsigned VR); |
| 392 | |
| 393 | unsigned operator[](unsigned Idx) const { |
| 394 | assert(Idx < Seq.size()); |
| 395 | return Seq[Idx]; |
| 396 | } |
| 397 | |
| 398 | unsigned size() const { |
| 399 | return Seq.size(); |
| 400 | } |
| 401 | |
| 402 | using iterator = ListType::iterator; |
| 403 | using const_iterator = ListType::const_iterator; |
| 404 | |
| 405 | iterator begin() { return Seq.begin(); } |
| 406 | iterator end() { return Seq.end(); } |
| 407 | const_iterator begin() const { return Seq.begin(); } |
| 408 | const_iterator end() const { return Seq.end(); } |
| 409 | |
| 410 | // Convenience function to convert an iterator to the corresponding index. |
| 411 | unsigned idx(iterator It) const { return It-begin(); } |
| 412 | |
| 413 | private: |
| 414 | ListType Seq; |
| 415 | const RegisterOrdering &Ord; |
| 416 | }; |
| 417 | |
| 418 | struct PrintORL { |
| 419 | PrintORL(const OrderedRegisterList &L, const TargetRegisterInfo *RI) |
| 420 | : RL(L), TRI(RI) {} |
| 421 | |
| 422 | friend raw_ostream &operator<< (raw_ostream &OS, const PrintORL &P); |
| 423 | |
| 424 | private: |
| 425 | const OrderedRegisterList &RL; |
| 426 | const TargetRegisterInfo *TRI; |
| 427 | }; |
| 428 | |
| 429 | raw_ostream &operator<< (raw_ostream &OS, const PrintORL &P) { |
| 430 | OS << '('; |
| 431 | OrderedRegisterList::const_iterator B = P.RL.begin(), E = P.RL.end(); |
| 432 | for (OrderedRegisterList::const_iterator I = B; I != E; ++I) { |
| 433 | if (I != B) |
| 434 | OS << ", " ; |
| 435 | OS << printReg(Reg: *I, TRI: P.TRI); |
| 436 | } |
| 437 | OS << ')'; |
| 438 | return OS; |
| 439 | } |
| 440 | |
| 441 | } // end anonymous namespace |
| 442 | |
| 443 | void OrderedRegisterList::insert(unsigned VR) { |
| 444 | iterator L = llvm::lower_bound(Range&: Seq, Value&: VR, C: Ord); |
| 445 | if (L == Seq.end()) |
| 446 | Seq.push_back(x: VR); |
| 447 | else |
| 448 | Seq.insert(position: L, x: VR); |
| 449 | |
| 450 | unsigned S = Seq.size(); |
| 451 | if (S > MaxSize) |
| 452 | Seq.resize(new_size: MaxSize); |
| 453 | assert(Seq.size() <= MaxSize); |
| 454 | } |
| 455 | |
| 456 | void OrderedRegisterList::remove(unsigned VR) { |
| 457 | iterator L = llvm::lower_bound(Range&: Seq, Value&: VR, C: Ord); |
| 458 | if (L != Seq.end()) |
| 459 | Seq.erase(position: L); |
| 460 | } |
| 461 | |
| 462 | namespace { |
| 463 | |
| 464 | // A record of the insert form. The fields correspond to the operands |
| 465 | // of the "insert" instruction: |
| 466 | // ... = insert(SrcR, InsR, #Wdh, #Off) |
| 467 | struct IFRecord { |
| 468 | IFRecord(unsigned SR = 0, unsigned IR = 0, uint16_t W = 0, uint16_t O = 0) |
| 469 | : SrcR(SR), InsR(IR), Wdh(W), Off(O) {} |
| 470 | |
| 471 | unsigned SrcR, InsR; |
| 472 | uint16_t Wdh, Off; |
| 473 | }; |
| 474 | |
| 475 | struct PrintIFR { |
| 476 | PrintIFR(const IFRecord &R, const TargetRegisterInfo *RI) |
| 477 | : IFR(R), TRI(RI) {} |
| 478 | |
| 479 | private: |
| 480 | friend raw_ostream &operator<< (raw_ostream &OS, const PrintIFR &P); |
| 481 | |
| 482 | const IFRecord &IFR; |
| 483 | const TargetRegisterInfo *TRI; |
| 484 | }; |
| 485 | |
| 486 | raw_ostream &operator<< (raw_ostream &OS, const PrintIFR &P) { |
| 487 | unsigned SrcR = P.IFR.SrcR, InsR = P.IFR.InsR; |
| 488 | OS << '(' << printReg(Reg: SrcR, TRI: P.TRI) << ',' << printReg(Reg: InsR, TRI: P.TRI) |
| 489 | << ",#" << P.IFR.Wdh << ",#" << P.IFR.Off << ')'; |
| 490 | return OS; |
| 491 | } |
| 492 | |
| 493 | using IFRecordWithRegSet = std::pair<IFRecord, RegisterSet>; |
| 494 | |
| 495 | } // end anonymous namespace |
| 496 | |
| 497 | namespace { |
| 498 | |
| 499 | class HexagonGenInsert : public MachineFunctionPass { |
| 500 | public: |
| 501 | static char ID; |
| 502 | |
| 503 | HexagonGenInsert() : MachineFunctionPass(ID) {} |
| 504 | |
| 505 | StringRef getPassName() const override { |
| 506 | return "Hexagon generate \"insert\" instructions" ; |
| 507 | } |
| 508 | |
| 509 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 510 | AU.addRequired<MachineDominatorTreeWrapperPass>(); |
| 511 | AU.addPreserved<MachineDominatorTreeWrapperPass>(); |
| 512 | MachineFunctionPass::getAnalysisUsage(AU); |
| 513 | } |
| 514 | |
| 515 | bool runOnMachineFunction(MachineFunction &MF) override; |
| 516 | |
| 517 | private: |
| 518 | using PairMapType = DenseMap<std::pair<unsigned, unsigned>, unsigned>; |
| 519 | |
| 520 | void buildOrderingMF(RegisterOrdering &RO) const; |
| 521 | void buildOrderingBT(RegisterOrdering &RB, RegisterOrdering &RO) const; |
| 522 | bool isIntClass(const TargetRegisterClass *RC) const; |
| 523 | bool isConstant(unsigned VR) const; |
| 524 | bool isSmallConstant(unsigned VR) const; |
| 525 | bool isValidInsertForm(unsigned DstR, unsigned SrcR, unsigned InsR, |
| 526 | uint16_t L, uint16_t S) const; |
| 527 | bool findSelfReference(unsigned VR) const; |
| 528 | bool findNonSelfReference(unsigned VR) const; |
| 529 | void getInstrDefs(const MachineInstr *MI, RegisterSet &Defs) const; |
| 530 | void getInstrUses(const MachineInstr *MI, RegisterSet &Uses) const; |
| 531 | unsigned distance(const MachineBasicBlock *FromB, |
| 532 | const MachineBasicBlock *ToB, const UnsignedMap &RPO, |
| 533 | PairMapType &M) const; |
| 534 | unsigned distance(MachineBasicBlock::const_iterator FromI, |
| 535 | MachineBasicBlock::const_iterator ToI, const UnsignedMap &RPO, |
| 536 | PairMapType &M) const; |
| 537 | bool findRecordInsertForms(unsigned VR, OrderedRegisterList &AVs); |
| 538 | void collectInBlock(MachineBasicBlock *B, OrderedRegisterList &AVs); |
| 539 | void findRemovableRegisters(unsigned VR, IFRecord IF, |
| 540 | RegisterSet &RMs) const; |
| 541 | void computeRemovableRegisters(); |
| 542 | |
| 543 | void pruneEmptyLists(); |
| 544 | void pruneCoveredSets(unsigned VR); |
| 545 | void pruneUsesTooFar(unsigned VR, const UnsignedMap &RPO, PairMapType &M); |
| 546 | void pruneRegCopies(unsigned VR); |
| 547 | void pruneCandidates(); |
| 548 | void selectCandidates(); |
| 549 | bool generateInserts(); |
| 550 | |
| 551 | bool removeDeadCode(MachineDomTreeNode *N); |
| 552 | |
| 553 | // IFRecord coupled with a set of potentially removable registers: |
| 554 | using IFListType = std::vector<IFRecordWithRegSet>; |
| 555 | using IFMapType = DenseMap<unsigned, IFListType>; // vreg -> IFListType |
| 556 | |
| 557 | void dump_map() const; |
| 558 | |
| 559 | const HexagonInstrInfo *HII = nullptr; |
| 560 | const HexagonRegisterInfo *HRI = nullptr; |
| 561 | |
| 562 | MachineFunction *MFN; |
| 563 | MachineRegisterInfo *MRI; |
| 564 | MachineDominatorTree *MDT; |
| 565 | CellMapShadow *CMS; |
| 566 | |
| 567 | RegisterOrdering BaseOrd; |
| 568 | RegisterOrdering CellOrd; |
| 569 | IFMapType IFMap; |
| 570 | }; |
| 571 | |
| 572 | } // end anonymous namespace |
| 573 | |
| 574 | char HexagonGenInsert::ID = 0; |
| 575 | |
| 576 | void HexagonGenInsert::dump_map() const { |
| 577 | for (const auto &I : IFMap) { |
| 578 | dbgs() << " " << printReg(Reg: I.first, TRI: HRI) << ":\n" ; |
| 579 | const IFListType &LL = I.second; |
| 580 | for (const auto &J : LL) |
| 581 | dbgs() << " " << PrintIFR(J.first, HRI) << ", " |
| 582 | << PrintRegSet(J.second, HRI) << '\n'; |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | void HexagonGenInsert::buildOrderingMF(RegisterOrdering &RO) const { |
| 587 | unsigned Index = 0; |
| 588 | |
| 589 | for (const MachineBasicBlock &B : *MFN) { |
| 590 | if (!CMS->BT.reached(B: &B)) |
| 591 | continue; |
| 592 | |
| 593 | for (const MachineInstr &MI : B) { |
| 594 | for (const MachineOperand &MO : MI.operands()) { |
| 595 | if (MO.isReg() && MO.isDef()) { |
| 596 | Register R = MO.getReg(); |
| 597 | assert(MO.getSubReg() == 0 && "Unexpected subregister in definition" ); |
| 598 | if (R.isVirtual()) |
| 599 | RO.insert(KV: std::make_pair(x&: R, y: Index++)); |
| 600 | } |
| 601 | } |
| 602 | } |
| 603 | } |
| 604 | // Since some virtual registers may have had their def and uses eliminated, |
| 605 | // they are no longer referenced in the code, and so they will not appear |
| 606 | // in the map. |
| 607 | } |
| 608 | |
| 609 | void HexagonGenInsert::buildOrderingBT(RegisterOrdering &RB, |
| 610 | RegisterOrdering &RO) const { |
| 611 | // Create a vector of all virtual registers (collect them from the base |
| 612 | // ordering RB), and then sort it using the RegisterCell comparator. |
| 613 | BitValueOrdering BVO(RB); |
| 614 | RegisterCellLexCompare LexCmp(BVO, *CMS); |
| 615 | |
| 616 | using SortableVectorType = std::vector<unsigned>; |
| 617 | |
| 618 | SortableVectorType VRs; |
| 619 | for (auto &I : RB) |
| 620 | VRs.push_back(x: I.first); |
| 621 | llvm::sort(C&: VRs, Comp: LexCmp); |
| 622 | // Transfer the results to the outgoing register ordering. |
| 623 | for (unsigned i = 0, n = VRs.size(); i < n; ++i) |
| 624 | RO.insert(KV: std::make_pair(x&: VRs[i], y&: i)); |
| 625 | } |
| 626 | |
| 627 | inline bool HexagonGenInsert::isIntClass(const TargetRegisterClass *RC) const { |
| 628 | return RC == &Hexagon::IntRegsRegClass || RC == &Hexagon::DoubleRegsRegClass; |
| 629 | } |
| 630 | |
| 631 | bool HexagonGenInsert::isConstant(unsigned VR) const { |
| 632 | const BitTracker::RegisterCell &RC = CMS->lookup(VR); |
| 633 | uint16_t W = RC.width(); |
| 634 | for (uint16_t i = 0; i < W; ++i) { |
| 635 | const BitTracker::BitValue &BV = RC[i]; |
| 636 | if (BV.is(T: 0) || BV.is(T: 1)) |
| 637 | continue; |
| 638 | return false; |
| 639 | } |
| 640 | return true; |
| 641 | } |
| 642 | |
| 643 | bool HexagonGenInsert::isSmallConstant(unsigned VR) const { |
| 644 | const BitTracker::RegisterCell &RC = CMS->lookup(VR); |
| 645 | uint16_t W = RC.width(); |
| 646 | if (W > 64) |
| 647 | return false; |
| 648 | uint64_t V = 0, B = 1; |
| 649 | for (uint16_t i = 0; i < W; ++i) { |
| 650 | const BitTracker::BitValue &BV = RC[i]; |
| 651 | if (BV.is(T: 1)) |
| 652 | V |= B; |
| 653 | else if (!BV.is(T: 0)) |
| 654 | return false; |
| 655 | B <<= 1; |
| 656 | } |
| 657 | |
| 658 | // For 32-bit registers, consider: Rd = #s16. |
| 659 | if (W == 32) |
| 660 | return isInt<16>(x: V); |
| 661 | |
| 662 | // For 64-bit registers, it's Rdd = #s8 or Rdd = combine(#s8,#s8) |
| 663 | return isInt<8>(x: Lo_32(Value: V)) && isInt<8>(x: Hi_32(Value: V)); |
| 664 | } |
| 665 | |
| 666 | bool HexagonGenInsert::isValidInsertForm(unsigned DstR, unsigned SrcR, |
| 667 | unsigned InsR, uint16_t L, uint16_t S) const { |
| 668 | const TargetRegisterClass *DstRC = MRI->getRegClass(Reg: DstR); |
| 669 | const TargetRegisterClass *SrcRC = MRI->getRegClass(Reg: SrcR); |
| 670 | const TargetRegisterClass *InsRC = MRI->getRegClass(Reg: InsR); |
| 671 | // Only integet (32-/64-bit) register classes. |
| 672 | if (!isIntClass(RC: DstRC) || !isIntClass(RC: SrcRC) || !isIntClass(RC: InsRC)) |
| 673 | return false; |
| 674 | // The "source" register must be of the same class as DstR. |
| 675 | if (DstRC != SrcRC) |
| 676 | return false; |
| 677 | if (DstRC == InsRC) |
| 678 | return true; |
| 679 | // A 64-bit register can only be generated from other 64-bit registers. |
| 680 | if (DstRC == &Hexagon::DoubleRegsRegClass) |
| 681 | return false; |
| 682 | // Otherwise, the L and S cannot span 32-bit word boundary. |
| 683 | if (S < 32 && S+L > 32) |
| 684 | return false; |
| 685 | return true; |
| 686 | } |
| 687 | |
| 688 | bool HexagonGenInsert::findSelfReference(unsigned VR) const { |
| 689 | const BitTracker::RegisterCell &RC = CMS->lookup(VR); |
| 690 | for (uint16_t i = 0, w = RC.width(); i < w; ++i) { |
| 691 | const BitTracker::BitValue &V = RC[i]; |
| 692 | if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg == VR) |
| 693 | return true; |
| 694 | } |
| 695 | return false; |
| 696 | } |
| 697 | |
| 698 | bool HexagonGenInsert::findNonSelfReference(unsigned VR) const { |
| 699 | BitTracker::RegisterCell RC = CMS->lookup(VR); |
| 700 | for (uint16_t i = 0, w = RC.width(); i < w; ++i) { |
| 701 | const BitTracker::BitValue &V = RC[i]; |
| 702 | if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != VR) |
| 703 | return true; |
| 704 | } |
| 705 | return false; |
| 706 | } |
| 707 | |
| 708 | void HexagonGenInsert::getInstrDefs(const MachineInstr *MI, |
| 709 | RegisterSet &Defs) const { |
| 710 | for (const MachineOperand &MO : MI->operands()) { |
| 711 | if (!MO.isReg() || !MO.isDef()) |
| 712 | continue; |
| 713 | Register R = MO.getReg(); |
| 714 | if (!R.isVirtual()) |
| 715 | continue; |
| 716 | Defs.insert(R); |
| 717 | } |
| 718 | } |
| 719 | |
| 720 | void HexagonGenInsert::getInstrUses(const MachineInstr *MI, |
| 721 | RegisterSet &Uses) const { |
| 722 | for (const MachineOperand &MO : MI->operands()) { |
| 723 | if (!MO.isReg() || !MO.isUse()) |
| 724 | continue; |
| 725 | Register R = MO.getReg(); |
| 726 | if (!R.isVirtual()) |
| 727 | continue; |
| 728 | Uses.insert(R); |
| 729 | } |
| 730 | } |
| 731 | |
| 732 | unsigned HexagonGenInsert::distance(const MachineBasicBlock *FromB, |
| 733 | const MachineBasicBlock *ToB, const UnsignedMap &RPO, |
| 734 | PairMapType &M) const { |
| 735 | // Forward distance from the end of a block to the beginning of it does |
| 736 | // not make sense. This function should not be called with FromB == ToB. |
| 737 | assert(FromB != ToB); |
| 738 | |
| 739 | unsigned FromN = FromB->getNumber(), ToN = ToB->getNumber(); |
| 740 | // If we have already computed it, return the cached result. |
| 741 | PairMapType::iterator F = M.find(Val: std::make_pair(x&: FromN, y&: ToN)); |
| 742 | if (F != M.end()) |
| 743 | return F->second; |
| 744 | unsigned ToRPO = RPO.lookup(Val: ToN); |
| 745 | |
| 746 | unsigned MaxD = 0; |
| 747 | |
| 748 | for (const MachineBasicBlock *PB : ToB->predecessors()) { |
| 749 | // Skip back edges. Also, if FromB is a predecessor of ToB, the distance |
| 750 | // along that path will be 0, and we don't need to do any calculations |
| 751 | // on it. |
| 752 | if (PB == FromB || RPO.lookup(Val: PB->getNumber()) >= ToRPO) |
| 753 | continue; |
| 754 | unsigned D = PB->size() + distance(FromB, ToB: PB, RPO, M); |
| 755 | if (D > MaxD) |
| 756 | MaxD = D; |
| 757 | } |
| 758 | |
| 759 | // Memoize the result for later lookup. |
| 760 | M.insert(KV: std::make_pair(x: std::make_pair(x&: FromN, y&: ToN), y&: MaxD)); |
| 761 | return MaxD; |
| 762 | } |
| 763 | |
| 764 | unsigned HexagonGenInsert::distance(MachineBasicBlock::const_iterator FromI, |
| 765 | MachineBasicBlock::const_iterator ToI, const UnsignedMap &RPO, |
| 766 | PairMapType &M) const { |
| 767 | const MachineBasicBlock *FB = FromI->getParent(), *TB = ToI->getParent(); |
| 768 | if (FB == TB) |
| 769 | return std::distance(first: FromI, last: ToI); |
| 770 | unsigned D1 = std::distance(first: TB->begin(), last: ToI); |
| 771 | unsigned D2 = distance(FromB: FB, ToB: TB, RPO, M); |
| 772 | unsigned D3 = std::distance(first: FromI, last: FB->end()); |
| 773 | return D1+D2+D3; |
| 774 | } |
| 775 | |
| 776 | bool HexagonGenInsert::findRecordInsertForms(unsigned VR, |
| 777 | OrderedRegisterList &AVs) { |
| 778 | if (isDebug()) { |
| 779 | dbgs() << __func__ << ": " << printReg(Reg: VR, TRI: HRI) |
| 780 | << " AVs: " << PrintORL(AVs, HRI) << "\n" ; |
| 781 | } |
| 782 | if (AVs.size() == 0) |
| 783 | return false; |
| 784 | |
| 785 | using iterator = OrderedRegisterList::iterator; |
| 786 | |
| 787 | BitValueOrdering BVO(BaseOrd); |
| 788 | const BitTracker::RegisterCell &RC = CMS->lookup(VR); |
| 789 | uint16_t W = RC.width(); |
| 790 | |
| 791 | using RSRecord = std::pair<unsigned, uint16_t>; // (reg,shift) |
| 792 | using RSListType = std::vector<RSRecord>; |
| 793 | // Have a map, with key being the matching prefix length, and the value |
| 794 | // being the list of pairs (R,S), where R's prefix matches VR at S. |
| 795 | // (DenseMap<uint16_t,RSListType> fails to instantiate.) |
| 796 | using LRSMapType = DenseMap<unsigned, RSListType>; |
| 797 | LRSMapType LM; |
| 798 | |
| 799 | // Conceptually, rotate the cell RC right (i.e. towards the LSB) by S, |
| 800 | // and find matching prefixes from AVs with the rotated RC. Such a prefix |
| 801 | // would match a string of bits (of length L) in RC starting at S. |
| 802 | for (uint16_t S = 0; S < W; ++S) { |
| 803 | iterator B = AVs.begin(), E = AVs.end(); |
| 804 | // The registers in AVs are ordered according to the lexical order of |
| 805 | // the corresponding register cells. This means that the range of regis- |
| 806 | // ters in AVs that match a prefix of length L+1 will be contained in |
| 807 | // the range that matches a prefix of length L. This means that we can |
| 808 | // keep narrowing the search space as the prefix length goes up. This |
| 809 | // helps reduce the overall complexity of the search. |
| 810 | uint16_t L; |
| 811 | for (L = 0; L < W-S; ++L) { |
| 812 | // Compare against VR's bits starting at S, which emulates rotation |
| 813 | // of VR by S. |
| 814 | RegisterCellBitCompareSel RCB(VR, S+L, L, BVO, *CMS); |
| 815 | iterator NewB = std::lower_bound(first: B, last: E, val: VR, comp: RCB); |
| 816 | iterator NewE = std::upper_bound(first: NewB, last: E, val: VR, comp: RCB); |
| 817 | // For the registers that are eliminated from the next range, L is |
| 818 | // the longest prefix matching VR at position S (their prefixes |
| 819 | // differ from VR at S+L). If L>0, record this information for later |
| 820 | // use. |
| 821 | if (L > 0) { |
| 822 | for (iterator I = B; I != NewB; ++I) |
| 823 | LM[L].push_back(x: std::make_pair(x&: *I, y&: S)); |
| 824 | for (iterator I = NewE; I != E; ++I) |
| 825 | LM[L].push_back(x: std::make_pair(x&: *I, y&: S)); |
| 826 | } |
| 827 | B = NewB, E = NewE; |
| 828 | if (B == E) |
| 829 | break; |
| 830 | } |
| 831 | // Record the final register range. If this range is non-empty, then |
| 832 | // L=W-S. |
| 833 | assert(B == E || L == W-S); |
| 834 | if (B != E) { |
| 835 | for (iterator I = B; I != E; ++I) |
| 836 | LM[L].push_back(x: std::make_pair(x&: *I, y&: S)); |
| 837 | // If B!=E, then we found a range of registers whose prefixes cover the |
| 838 | // rest of VR from position S. There is no need to further advance S. |
| 839 | break; |
| 840 | } |
| 841 | } |
| 842 | |
| 843 | if (isDebug()) { |
| 844 | dbgs() << "Prefixes matching register " << printReg(Reg: VR, TRI: HRI) << "\n" ; |
| 845 | for (const auto &I : LM) { |
| 846 | dbgs() << " L=" << I.first << ':'; |
| 847 | const RSListType &LL = I.second; |
| 848 | for (const auto &J : LL) |
| 849 | dbgs() << " (" << printReg(Reg: J.first, TRI: HRI) << ",@" << J.second << ')'; |
| 850 | dbgs() << '\n'; |
| 851 | } |
| 852 | } |
| 853 | |
| 854 | bool Recorded = false; |
| 855 | |
| 856 | for (unsigned SrcR : AVs) { |
| 857 | int FDi = -1, LDi = -1; // First/last different bit. |
| 858 | const BitTracker::RegisterCell &AC = CMS->lookup(VR: SrcR); |
| 859 | uint16_t AW = AC.width(); |
| 860 | for (uint16_t i = 0, w = std::min(a: W, b: AW); i < w; ++i) { |
| 861 | if (RC[i] == AC[i]) |
| 862 | continue; |
| 863 | if (FDi == -1) |
| 864 | FDi = i; |
| 865 | LDi = i; |
| 866 | } |
| 867 | if (FDi == -1) |
| 868 | continue; // TODO (future): Record identical registers. |
| 869 | // Look for a register whose prefix could patch the range [FD..LD] |
| 870 | // where VR and SrcR differ. |
| 871 | uint16_t FD = FDi, LD = LDi; // Switch to unsigned type. |
| 872 | uint16_t MinL = LD-FD+1; |
| 873 | for (uint16_t L = MinL; L < W; ++L) { |
| 874 | LRSMapType::iterator F = LM.find(Val: L); |
| 875 | if (F == LM.end()) |
| 876 | continue; |
| 877 | RSListType &LL = F->second; |
| 878 | for (const auto &I : LL) { |
| 879 | uint16_t S = I.second; |
| 880 | // MinL is the minimum length of the prefix. Any length above MinL |
| 881 | // allows some flexibility as to where the prefix can start: |
| 882 | // given the extra length EL=L-MinL, the prefix must start between |
| 883 | // max(0,FD-EL) and FD. |
| 884 | if (S > FD) // Starts too late. |
| 885 | continue; |
| 886 | uint16_t EL = L-MinL; |
| 887 | uint16_t LowS = (EL < FD) ? FD-EL : 0; |
| 888 | if (S < LowS) // Starts too early. |
| 889 | continue; |
| 890 | unsigned InsR = I.first; |
| 891 | if (!isValidInsertForm(DstR: VR, SrcR, InsR, L, S)) |
| 892 | continue; |
| 893 | if (isDebug()) { |
| 894 | dbgs() << printReg(Reg: VR, TRI: HRI) << " = insert(" << printReg(Reg: SrcR, TRI: HRI) |
| 895 | << ',' << printReg(Reg: InsR, TRI: HRI) << ",#" << L << ",#" |
| 896 | << S << ")\n" ; |
| 897 | } |
| 898 | IFRecordWithRegSet RR(IFRecord(SrcR, InsR, L, S), RegisterSet()); |
| 899 | IFMap[VR].push_back(x: RR); |
| 900 | Recorded = true; |
| 901 | } |
| 902 | } |
| 903 | } |
| 904 | |
| 905 | return Recorded; |
| 906 | } |
| 907 | |
| 908 | void HexagonGenInsert::collectInBlock(MachineBasicBlock *B, |
| 909 | OrderedRegisterList &AVs) { |
| 910 | if (isDebug()) |
| 911 | dbgs() << "visiting block " << printMBBReference(MBB: *B) << "\n" ; |
| 912 | |
| 913 | // First, check if this block is reachable at all. If not, the bit tracker |
| 914 | // will not have any information about registers in it. |
| 915 | if (!CMS->BT.reached(B)) |
| 916 | return; |
| 917 | |
| 918 | bool DoConst = OptConst; |
| 919 | // Keep a separate set of registers defined in this block, so that we |
| 920 | // can remove them from the list of available registers once all DT |
| 921 | // successors have been processed. |
| 922 | RegisterSet BlockDefs, InsDefs; |
| 923 | for (MachineInstr &MI : *B) { |
| 924 | InsDefs.clear(); |
| 925 | getInstrDefs(MI: &MI, Defs&: InsDefs); |
| 926 | // Leave those alone. They are more transparent than "insert". |
| 927 | bool Skip = MI.isCopy() || MI.isRegSequence(); |
| 928 | |
| 929 | if (!Skip) { |
| 930 | // Visit all defined registers, and attempt to find the corresponding |
| 931 | // "insert" representations. |
| 932 | for (unsigned VR = InsDefs.find_first(); VR; VR = InsDefs.find_next(Prev: VR)) { |
| 933 | // Do not collect registers that are known to be compile-time cons- |
| 934 | // tants, unless requested. |
| 935 | if (!DoConst && isConstant(VR)) |
| 936 | continue; |
| 937 | // If VR's cell contains a reference to VR, then VR cannot be defined |
| 938 | // via "insert". If VR is a constant that can be generated in a single |
| 939 | // instruction (without constant extenders), generating it via insert |
| 940 | // makes no sense. |
| 941 | if (findSelfReference(VR) || isSmallConstant(VR)) |
| 942 | continue; |
| 943 | |
| 944 | findRecordInsertForms(VR, AVs); |
| 945 | // Stop if the map size is too large. |
| 946 | if (IFMap.size() > MaxIFMSize) |
| 947 | return; |
| 948 | } |
| 949 | } |
| 950 | |
| 951 | // Insert the defined registers into the list of available registers |
| 952 | // after they have been processed. |
| 953 | for (unsigned VR = InsDefs.find_first(); VR; VR = InsDefs.find_next(Prev: VR)) |
| 954 | AVs.insert(VR); |
| 955 | BlockDefs.insert(Rs: InsDefs); |
| 956 | } |
| 957 | |
| 958 | for (auto *DTN : children<MachineDomTreeNode*>(G: MDT->getNode(BB: B))) { |
| 959 | MachineBasicBlock *SB = DTN->getBlock(); |
| 960 | collectInBlock(B: SB, AVs); |
| 961 | } |
| 962 | |
| 963 | for (unsigned VR = BlockDefs.find_first(); VR; VR = BlockDefs.find_next(Prev: VR)) |
| 964 | AVs.remove(VR); |
| 965 | } |
| 966 | |
| 967 | void HexagonGenInsert::findRemovableRegisters(unsigned VR, IFRecord IF, |
| 968 | RegisterSet &RMs) const { |
| 969 | // For a given register VR and a insert form, find the registers that are |
| 970 | // used by the current definition of VR, and which would no longer be |
| 971 | // needed for it after the definition of VR is replaced with the insert |
| 972 | // form. These are the registers that could potentially become dead. |
| 973 | RegisterSet Regs[2]; |
| 974 | |
| 975 | unsigned S = 0; // Register set selector. |
| 976 | Regs[S].insert(R: VR); |
| 977 | |
| 978 | while (!Regs[S].empty()) { |
| 979 | // Breadth-first search. |
| 980 | unsigned OtherS = 1-S; |
| 981 | Regs[OtherS].clear(); |
| 982 | for (unsigned R = Regs[S].find_first(); R; R = Regs[S].find_next(Prev: R)) { |
| 983 | Regs[S].remove(R); |
| 984 | if (R == IF.SrcR || R == IF.InsR) |
| 985 | continue; |
| 986 | // Check if a given register has bits that are references to any other |
| 987 | // registers. This is to detect situations where the instruction that |
| 988 | // defines register R takes register Q as an operand, but R itself does |
| 989 | // not contain any bits from Q. Loads are examples of how this could |
| 990 | // happen: |
| 991 | // R = load Q |
| 992 | // In this case (assuming we do not have any knowledge about the loaded |
| 993 | // value), we must not treat R as a "conveyance" of the bits from Q. |
| 994 | // (The information in BT about R's bits would have them as constants, |
| 995 | // in case of zero-extending loads, or refs to R.) |
| 996 | if (!findNonSelfReference(VR: R)) |
| 997 | continue; |
| 998 | RMs.insert(R); |
| 999 | const MachineInstr *DefI = MRI->getVRegDef(Reg: R); |
| 1000 | assert(DefI); |
| 1001 | // Do not iterate past PHI nodes to avoid infinite loops. This can |
| 1002 | // make the final set a bit less accurate, but the removable register |
| 1003 | // sets are an approximation anyway. |
| 1004 | if (DefI->isPHI()) |
| 1005 | continue; |
| 1006 | getInstrUses(MI: DefI, Uses&: Regs[OtherS]); |
| 1007 | } |
| 1008 | S = OtherS; |
| 1009 | } |
| 1010 | // The register VR is added to the list as a side-effect of the algorithm, |
| 1011 | // but it is not "potentially removable". A potentially removable register |
| 1012 | // is one that may become unused (dead) after conversion to the insert form |
| 1013 | // IF, and obviously VR (or its replacement) will not become dead by apply- |
| 1014 | // ing IF. |
| 1015 | RMs.remove(R: VR); |
| 1016 | } |
| 1017 | |
| 1018 | void HexagonGenInsert::computeRemovableRegisters() { |
| 1019 | for (auto &I : IFMap) { |
| 1020 | IFListType &LL = I.second; |
| 1021 | for (auto &J : LL) |
| 1022 | findRemovableRegisters(VR: I.first, IF: J.first, RMs&: J.second); |
| 1023 | } |
| 1024 | } |
| 1025 | |
| 1026 | void HexagonGenInsert::pruneEmptyLists() { |
| 1027 | // Remove all entries from the map, where the register has no insert forms |
| 1028 | // associated with it. |
| 1029 | using IterListType = SmallVector<IFMapType::iterator, 16>; |
| 1030 | IterListType Prune; |
| 1031 | for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { |
| 1032 | if (I->second.empty()) |
| 1033 | Prune.push_back(Elt: I); |
| 1034 | } |
| 1035 | for (const auto &It : Prune) |
| 1036 | IFMap.erase(I: It); |
| 1037 | } |
| 1038 | |
| 1039 | void HexagonGenInsert::pruneCoveredSets(unsigned VR) { |
| 1040 | IFMapType::iterator F = IFMap.find(Val: VR); |
| 1041 | assert(F != IFMap.end()); |
| 1042 | IFListType &LL = F->second; |
| 1043 | |
| 1044 | // First, examine the IF candidates for register VR whose removable-regis- |
| 1045 | // ter sets are empty. This means that a given candidate will not help eli- |
| 1046 | // minate any registers, but since "insert" is not a constant-extendable |
| 1047 | // instruction, using such a candidate may reduce code size if the defini- |
| 1048 | // tion of VR is constant-extended. |
| 1049 | // If there exists a candidate with a non-empty set, the ones with empty |
| 1050 | // sets will not be used and can be removed. |
| 1051 | MachineInstr *DefVR = MRI->getVRegDef(Reg: VR); |
| 1052 | bool DefEx = HII->isConstExtended(MI: *DefVR); |
| 1053 | bool HasNE = false; |
| 1054 | for (const auto &I : LL) { |
| 1055 | if (I.second.empty()) |
| 1056 | continue; |
| 1057 | HasNE = true; |
| 1058 | break; |
| 1059 | } |
| 1060 | if (!DefEx || HasNE) { |
| 1061 | // The definition of VR is not constant-extended, or there is a candidate |
| 1062 | // with a non-empty set. Remove all candidates with empty sets. |
| 1063 | auto IsEmpty = [] (const IFRecordWithRegSet &IR) -> bool { |
| 1064 | return IR.second.empty(); |
| 1065 | }; |
| 1066 | llvm::erase_if(C&: LL, P: IsEmpty); |
| 1067 | } else { |
| 1068 | // The definition of VR is constant-extended, and all candidates have |
| 1069 | // empty removable-register sets. Pick the maximum candidate, and remove |
| 1070 | // all others. The "maximum" does not have any special meaning here, it |
| 1071 | // is only so that the candidate that will remain on the list is selec- |
| 1072 | // ted deterministically. |
| 1073 | IFRecord MaxIF = LL[0].first; |
| 1074 | for (unsigned i = 1, n = LL.size(); i < n; ++i) { |
| 1075 | // If LL[MaxI] < LL[i], then MaxI = i. |
| 1076 | const IFRecord &IF = LL[i].first; |
| 1077 | unsigned M0 = BaseOrd[MaxIF.SrcR], M1 = BaseOrd[MaxIF.InsR]; |
| 1078 | unsigned R0 = BaseOrd[IF.SrcR], R1 = BaseOrd[IF.InsR]; |
| 1079 | if (M0 > R0) |
| 1080 | continue; |
| 1081 | if (M0 == R0) { |
| 1082 | if (M1 > R1) |
| 1083 | continue; |
| 1084 | if (M1 == R1) { |
| 1085 | if (MaxIF.Wdh > IF.Wdh) |
| 1086 | continue; |
| 1087 | if (MaxIF.Wdh == IF.Wdh && MaxIF.Off >= IF.Off) |
| 1088 | continue; |
| 1089 | } |
| 1090 | } |
| 1091 | // MaxIF < IF. |
| 1092 | MaxIF = IF; |
| 1093 | } |
| 1094 | // Remove everything except the maximum candidate. All register sets |
| 1095 | // are empty, so no need to preserve anything. |
| 1096 | LL.clear(); |
| 1097 | LL.push_back(x: std::make_pair(x&: MaxIF, y: RegisterSet())); |
| 1098 | } |
| 1099 | |
| 1100 | // Now, remove those whose sets of potentially removable registers are |
| 1101 | // contained in another IF candidate for VR. For example, given these |
| 1102 | // candidates for %45, |
| 1103 | // %45: |
| 1104 | // (%44,%41,#9,#8), { %42 } |
| 1105 | // (%43,%41,#9,#8), { %42 %44 } |
| 1106 | // remove the first one, since it is contained in the second one. |
| 1107 | for (unsigned i = 0, n = LL.size(); i < n; ) { |
| 1108 | const RegisterSet &RMi = LL[i].second; |
| 1109 | unsigned j = 0; |
| 1110 | while (j < n) { |
| 1111 | if (j != i && LL[j].second.includes(Rs: RMi)) |
| 1112 | break; |
| 1113 | j++; |
| 1114 | } |
| 1115 | if (j == n) { // RMi not contained in anything else. |
| 1116 | i++; |
| 1117 | continue; |
| 1118 | } |
| 1119 | LL.erase(position: LL.begin()+i); |
| 1120 | n = LL.size(); |
| 1121 | } |
| 1122 | } |
| 1123 | |
| 1124 | void HexagonGenInsert::pruneUsesTooFar(unsigned VR, const UnsignedMap &RPO, |
| 1125 | PairMapType &M) { |
| 1126 | IFMapType::iterator F = IFMap.find(Val: VR); |
| 1127 | assert(F != IFMap.end()); |
| 1128 | IFListType &LL = F->second; |
| 1129 | unsigned Cutoff = VRegDistCutoff; |
| 1130 | const MachineInstr *DefV = MRI->getVRegDef(Reg: VR); |
| 1131 | |
| 1132 | for (unsigned i = LL.size(); i > 0; --i) { |
| 1133 | unsigned SR = LL[i-1].first.SrcR, IR = LL[i-1].first.InsR; |
| 1134 | const MachineInstr *DefS = MRI->getVRegDef(Reg: SR); |
| 1135 | const MachineInstr *DefI = MRI->getVRegDef(Reg: IR); |
| 1136 | unsigned DSV = distance(FromI: DefS, ToI: DefV, RPO, M); |
| 1137 | if (DSV < Cutoff) { |
| 1138 | unsigned DIV = distance(FromI: DefI, ToI: DefV, RPO, M); |
| 1139 | if (DIV < Cutoff) |
| 1140 | continue; |
| 1141 | } |
| 1142 | LL.erase(position: LL.begin()+(i-1)); |
| 1143 | } |
| 1144 | } |
| 1145 | |
| 1146 | void HexagonGenInsert::pruneRegCopies(unsigned VR) { |
| 1147 | IFMapType::iterator F = IFMap.find(Val: VR); |
| 1148 | assert(F != IFMap.end()); |
| 1149 | IFListType &LL = F->second; |
| 1150 | |
| 1151 | auto IsCopy = [] (const IFRecordWithRegSet &IR) -> bool { |
| 1152 | return IR.first.Wdh == 32 && (IR.first.Off == 0 || IR.first.Off == 32); |
| 1153 | }; |
| 1154 | llvm::erase_if(C&: LL, P: IsCopy); |
| 1155 | } |
| 1156 | |
| 1157 | void HexagonGenInsert::pruneCandidates() { |
| 1158 | // Remove candidates that are not beneficial, regardless of the final |
| 1159 | // selection method. |
| 1160 | // First, remove candidates whose potentially removable set is a subset |
| 1161 | // of another candidate's set. |
| 1162 | for (const auto &I : IFMap) |
| 1163 | pruneCoveredSets(VR: I.first); |
| 1164 | |
| 1165 | UnsignedMap RPO; |
| 1166 | |
| 1167 | using RPOTType = ReversePostOrderTraversal<const MachineFunction *>; |
| 1168 | |
| 1169 | RPOTType RPOT(MFN); |
| 1170 | unsigned RPON = 0; |
| 1171 | for (const auto &I : RPOT) |
| 1172 | RPO[I->getNumber()] = RPON++; |
| 1173 | |
| 1174 | PairMapType Memo; // Memoization map for distance calculation. |
| 1175 | // Remove candidates that would use registers defined too far away. |
| 1176 | for (const auto &I : IFMap) |
| 1177 | pruneUsesTooFar(VR: I.first, RPO, M&: Memo); |
| 1178 | |
| 1179 | pruneEmptyLists(); |
| 1180 | |
| 1181 | for (const auto &I : IFMap) |
| 1182 | pruneRegCopies(VR: I.first); |
| 1183 | } |
| 1184 | |
| 1185 | namespace { |
| 1186 | |
| 1187 | // Class for comparing IF candidates for registers that have multiple of |
| 1188 | // them. The smaller the candidate, according to this ordering, the better. |
| 1189 | // First, compare the number of zeros in the associated potentially remova- |
| 1190 | // ble register sets. "Zero" indicates that the register is very likely to |
| 1191 | // become dead after this transformation. |
| 1192 | // Second, compare "averages", i.e. use-count per size. The lower wins. |
| 1193 | // After that, it does not really matter which one is smaller. Resolve |
| 1194 | // the tie in some deterministic way. |
| 1195 | struct IFOrdering { |
| 1196 | IFOrdering(const UnsignedMap &UC, const RegisterOrdering &BO) |
| 1197 | : UseC(UC), BaseOrd(BO) {} |
| 1198 | |
| 1199 | bool operator() (const IFRecordWithRegSet &A, |
| 1200 | const IFRecordWithRegSet &B) const; |
| 1201 | |
| 1202 | private: |
| 1203 | void stats(const RegisterSet &Rs, unsigned &Size, unsigned &Zero, |
| 1204 | unsigned &Sum) const; |
| 1205 | |
| 1206 | const UnsignedMap &UseC; |
| 1207 | const RegisterOrdering &BaseOrd; |
| 1208 | }; |
| 1209 | |
| 1210 | } // end anonymous namespace |
| 1211 | |
| 1212 | bool IFOrdering::operator() (const IFRecordWithRegSet &A, |
| 1213 | const IFRecordWithRegSet &B) const { |
| 1214 | unsigned SizeA = 0, ZeroA = 0, SumA = 0; |
| 1215 | unsigned SizeB = 0, ZeroB = 0, SumB = 0; |
| 1216 | stats(Rs: A.second, Size&: SizeA, Zero&: ZeroA, Sum&: SumA); |
| 1217 | stats(Rs: B.second, Size&: SizeB, Zero&: ZeroB, Sum&: SumB); |
| 1218 | |
| 1219 | // We will pick the minimum element. The more zeros, the better. |
| 1220 | if (ZeroA != ZeroB) |
| 1221 | return ZeroA > ZeroB; |
| 1222 | // Compare SumA/SizeA with SumB/SizeB, lower is better. |
| 1223 | uint64_t AvgA = SumA*SizeB, AvgB = SumB*SizeA; |
| 1224 | if (AvgA != AvgB) |
| 1225 | return AvgA < AvgB; |
| 1226 | |
| 1227 | // The sets compare identical so far. Resort to comparing the IF records. |
| 1228 | // The actual values don't matter, this is only for determinism. |
| 1229 | unsigned OSA = BaseOrd[A.first.SrcR], OSB = BaseOrd[B.first.SrcR]; |
| 1230 | if (OSA != OSB) |
| 1231 | return OSA < OSB; |
| 1232 | unsigned OIA = BaseOrd[A.first.InsR], OIB = BaseOrd[B.first.InsR]; |
| 1233 | if (OIA != OIB) |
| 1234 | return OIA < OIB; |
| 1235 | if (A.first.Wdh != B.first.Wdh) |
| 1236 | return A.first.Wdh < B.first.Wdh; |
| 1237 | return A.first.Off < B.first.Off; |
| 1238 | } |
| 1239 | |
| 1240 | void IFOrdering::stats(const RegisterSet &Rs, unsigned &Size, unsigned &Zero, |
| 1241 | unsigned &Sum) const { |
| 1242 | for (unsigned R = Rs.find_first(); R; R = Rs.find_next(Prev: R)) { |
| 1243 | UnsignedMap::const_iterator F = UseC.find(Val: R); |
| 1244 | assert(F != UseC.end()); |
| 1245 | unsigned UC = F->second; |
| 1246 | if (UC == 0) |
| 1247 | Zero++; |
| 1248 | Sum += UC; |
| 1249 | Size++; |
| 1250 | } |
| 1251 | } |
| 1252 | |
| 1253 | void HexagonGenInsert::selectCandidates() { |
| 1254 | // Some registers may have multiple valid candidates. Pick the best one |
| 1255 | // (or decide not to use any). |
| 1256 | |
| 1257 | // Compute the "removability" measure of R: |
| 1258 | // For each potentially removable register R, record the number of regis- |
| 1259 | // ters with IF candidates, where R appears in at least one set. |
| 1260 | RegisterSet AllRMs; |
| 1261 | UnsignedMap UseC, RemC; |
| 1262 | IFMapType::iterator End = IFMap.end(); |
| 1263 | |
| 1264 | for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { |
| 1265 | const IFListType &LL = I->second; |
| 1266 | RegisterSet TT; |
| 1267 | for (const auto &J : LL) |
| 1268 | TT.insert(Rs: J.second); |
| 1269 | for (unsigned R = TT.find_first(); R; R = TT.find_next(Prev: R)) |
| 1270 | RemC[R]++; |
| 1271 | AllRMs.insert(Rs: TT); |
| 1272 | } |
| 1273 | |
| 1274 | for (unsigned R = AllRMs.find_first(); R; R = AllRMs.find_next(Prev: R)) { |
| 1275 | using use_iterator = MachineRegisterInfo::use_nodbg_iterator; |
| 1276 | using InstrSet = SmallSet<const MachineInstr *, 16>; |
| 1277 | |
| 1278 | InstrSet UIs; |
| 1279 | // Count as the number of instructions in which R is used, not the |
| 1280 | // number of operands. |
| 1281 | use_iterator E = MRI->use_nodbg_end(); |
| 1282 | for (use_iterator I = MRI->use_nodbg_begin(RegNo: R); I != E; ++I) |
| 1283 | UIs.insert(Ptr: I->getParent()); |
| 1284 | unsigned C = UIs.size(); |
| 1285 | // Calculate a measure, which is the number of instructions using R, |
| 1286 | // minus the "removability" count computed earlier. |
| 1287 | unsigned D = RemC[R]; |
| 1288 | UseC[R] = (C > D) ? C-D : 0; // doz |
| 1289 | } |
| 1290 | |
| 1291 | bool SelectAll0 = OptSelectAll0, SelectHas0 = OptSelectHas0; |
| 1292 | if (!SelectAll0 && !SelectHas0) |
| 1293 | SelectAll0 = true; |
| 1294 | |
| 1295 | // The smaller the number UseC for a given register R, the "less used" |
| 1296 | // R is aside from the opportunities for removal offered by generating |
| 1297 | // "insert" instructions. |
| 1298 | // Iterate over the IF map, and for those registers that have multiple |
| 1299 | // candidates, pick the minimum one according to IFOrdering. |
| 1300 | IFOrdering IFO(UseC, BaseOrd); |
| 1301 | for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { |
| 1302 | IFListType &LL = I->second; |
| 1303 | if (LL.empty()) |
| 1304 | continue; |
| 1305 | // Get the minimum element, remember it and clear the list. If the |
| 1306 | // element found is adequate, we will put it back on the list, other- |
| 1307 | // wise the list will remain empty, and the entry for this register |
| 1308 | // will be removed (i.e. this register will not be replaced by insert). |
| 1309 | IFListType::iterator MinI = llvm::min_element(Range&: LL, C: IFO); |
| 1310 | assert(MinI != LL.end()); |
| 1311 | IFRecordWithRegSet M = *MinI; |
| 1312 | LL.clear(); |
| 1313 | |
| 1314 | // We want to make sure that this replacement will have a chance to be |
| 1315 | // beneficial, and that means that we want to have indication that some |
| 1316 | // register will be removed. The most likely registers to be eliminated |
| 1317 | // are the use operands in the definition of I->first. Accept/reject a |
| 1318 | // candidate based on how many of its uses it can potentially eliminate. |
| 1319 | |
| 1320 | RegisterSet Us; |
| 1321 | const MachineInstr *DefI = MRI->getVRegDef(Reg: I->first); |
| 1322 | getInstrUses(MI: DefI, Uses&: Us); |
| 1323 | bool Accept = false; |
| 1324 | |
| 1325 | if (SelectAll0) { |
| 1326 | bool All0 = true; |
| 1327 | for (unsigned R = Us.find_first(); R; R = Us.find_next(Prev: R)) { |
| 1328 | if (UseC[R] == 0) |
| 1329 | continue; |
| 1330 | All0 = false; |
| 1331 | break; |
| 1332 | } |
| 1333 | Accept = All0; |
| 1334 | } else if (SelectHas0) { |
| 1335 | bool Has0 = false; |
| 1336 | for (unsigned R = Us.find_first(); R; R = Us.find_next(Prev: R)) { |
| 1337 | if (UseC[R] != 0) |
| 1338 | continue; |
| 1339 | Has0 = true; |
| 1340 | break; |
| 1341 | } |
| 1342 | Accept = Has0; |
| 1343 | } |
| 1344 | if (Accept) |
| 1345 | LL.push_back(x: M); |
| 1346 | } |
| 1347 | |
| 1348 | // Remove candidates that add uses of removable registers, unless the |
| 1349 | // removable registers are among replacement candidates. |
| 1350 | // Recompute the removable registers, since some candidates may have |
| 1351 | // been eliminated. |
| 1352 | AllRMs.clear(); |
| 1353 | for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { |
| 1354 | const IFListType &LL = I->second; |
| 1355 | if (!LL.empty()) |
| 1356 | AllRMs.insert(Rs: LL[0].second); |
| 1357 | } |
| 1358 | for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { |
| 1359 | IFListType &LL = I->second; |
| 1360 | if (LL.empty()) |
| 1361 | continue; |
| 1362 | unsigned SR = LL[0].first.SrcR, IR = LL[0].first.InsR; |
| 1363 | if (AllRMs[SR] || AllRMs[IR]) |
| 1364 | LL.clear(); |
| 1365 | } |
| 1366 | |
| 1367 | pruneEmptyLists(); |
| 1368 | } |
| 1369 | |
| 1370 | bool HexagonGenInsert::generateInserts() { |
| 1371 | // Create a new register for each one from IFMap, and store them in the |
| 1372 | // map. |
| 1373 | UnsignedMap RegMap; |
| 1374 | for (auto &I : IFMap) { |
| 1375 | unsigned VR = I.first; |
| 1376 | const TargetRegisterClass *RC = MRI->getRegClass(Reg: VR); |
| 1377 | Register NewVR = MRI->createVirtualRegister(RegClass: RC); |
| 1378 | RegMap[VR] = NewVR; |
| 1379 | } |
| 1380 | |
| 1381 | // We can generate the "insert" instructions using potentially stale re- |
| 1382 | // gisters: SrcR and InsR for a given VR may be among other registers that |
| 1383 | // are also replaced. This is fine, we will do the mass "rauw" a bit later. |
| 1384 | for (auto &I : IFMap) { |
| 1385 | MachineInstr *MI = MRI->getVRegDef(Reg: I.first); |
| 1386 | MachineBasicBlock &B = *MI->getParent(); |
| 1387 | DebugLoc DL = MI->getDebugLoc(); |
| 1388 | unsigned NewR = RegMap[I.first]; |
| 1389 | bool R32 = MRI->getRegClass(Reg: NewR) == &Hexagon::IntRegsRegClass; |
| 1390 | const MCInstrDesc &D = R32 ? HII->get(Opcode: Hexagon::S2_insert) |
| 1391 | : HII->get(Opcode: Hexagon::S2_insertp); |
| 1392 | IFRecord IF = I.second[0].first; |
| 1393 | unsigned Wdh = IF.Wdh, Off = IF.Off; |
| 1394 | unsigned InsS = 0; |
| 1395 | if (R32 && MRI->getRegClass(Reg: IF.InsR) == &Hexagon::DoubleRegsRegClass) { |
| 1396 | InsS = Hexagon::isub_lo; |
| 1397 | if (Off >= 32) { |
| 1398 | InsS = Hexagon::isub_hi; |
| 1399 | Off -= 32; |
| 1400 | } |
| 1401 | } |
| 1402 | // Advance to the proper location for inserting instructions. This could |
| 1403 | // be B.end(). |
| 1404 | MachineBasicBlock::iterator At = MI; |
| 1405 | if (MI->isPHI()) |
| 1406 | At = B.getFirstNonPHI(); |
| 1407 | |
| 1408 | BuildMI(BB&: B, I: At, MIMD: DL, MCID: D, DestReg: NewR) |
| 1409 | .addReg(RegNo: IF.SrcR) |
| 1410 | .addReg(RegNo: IF.InsR, flags: 0, SubReg: InsS) |
| 1411 | .addImm(Val: Wdh) |
| 1412 | .addImm(Val: Off); |
| 1413 | |
| 1414 | MRI->clearKillFlags(Reg: IF.SrcR); |
| 1415 | MRI->clearKillFlags(Reg: IF.InsR); |
| 1416 | } |
| 1417 | |
| 1418 | for (const auto &I : IFMap) { |
| 1419 | MachineInstr *DefI = MRI->getVRegDef(Reg: I.first); |
| 1420 | MRI->replaceRegWith(FromReg: I.first, ToReg: RegMap[I.first]); |
| 1421 | DefI->eraseFromParent(); |
| 1422 | } |
| 1423 | |
| 1424 | return true; |
| 1425 | } |
| 1426 | |
| 1427 | bool HexagonGenInsert::removeDeadCode(MachineDomTreeNode *N) { |
| 1428 | bool Changed = false; |
| 1429 | |
| 1430 | for (auto *DTN : children<MachineDomTreeNode*>(G: N)) |
| 1431 | Changed |= removeDeadCode(N: DTN); |
| 1432 | |
| 1433 | MachineBasicBlock *B = N->getBlock(); |
| 1434 | std::vector<MachineInstr*> Instrs; |
| 1435 | for (MachineInstr &MI : llvm::reverse(C&: *B)) |
| 1436 | Instrs.push_back(x: &MI); |
| 1437 | |
| 1438 | for (MachineInstr *MI : Instrs) { |
| 1439 | unsigned Opc = MI->getOpcode(); |
| 1440 | // Do not touch lifetime markers. This is why the target-independent DCE |
| 1441 | // cannot be used. |
| 1442 | if (Opc == TargetOpcode::LIFETIME_START || |
| 1443 | Opc == TargetOpcode::LIFETIME_END) |
| 1444 | continue; |
| 1445 | bool Store = false; |
| 1446 | if (MI->isInlineAsm() || !MI->isSafeToMove(SawStore&: Store)) |
| 1447 | continue; |
| 1448 | |
| 1449 | bool AllDead = true; |
| 1450 | SmallVector<unsigned,2> Regs; |
| 1451 | for (const MachineOperand &MO : MI->operands()) { |
| 1452 | if (!MO.isReg() || !MO.isDef()) |
| 1453 | continue; |
| 1454 | Register R = MO.getReg(); |
| 1455 | if (!R.isVirtual() || !MRI->use_nodbg_empty(RegNo: R)) { |
| 1456 | AllDead = false; |
| 1457 | break; |
| 1458 | } |
| 1459 | Regs.push_back(Elt: R); |
| 1460 | } |
| 1461 | if (!AllDead) |
| 1462 | continue; |
| 1463 | |
| 1464 | B->erase(I: MI); |
| 1465 | for (unsigned Reg : Regs) |
| 1466 | MRI->markUsesInDebugValueAsUndef(Reg); |
| 1467 | Changed = true; |
| 1468 | } |
| 1469 | |
| 1470 | return Changed; |
| 1471 | } |
| 1472 | |
| 1473 | bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) { |
| 1474 | if (skipFunction(F: MF.getFunction())) |
| 1475 | return false; |
| 1476 | |
| 1477 | bool Timing = OptTiming, TimingDetail = Timing && OptTimingDetail; |
| 1478 | bool Changed = false; |
| 1479 | |
| 1480 | // Verify: one, but not both. |
| 1481 | assert(!OptSelectAll0 || !OptSelectHas0); |
| 1482 | |
| 1483 | IFMap.clear(); |
| 1484 | BaseOrd.clear(); |
| 1485 | CellOrd.clear(); |
| 1486 | |
| 1487 | const auto &ST = MF.getSubtarget<HexagonSubtarget>(); |
| 1488 | HII = ST.getInstrInfo(); |
| 1489 | HRI = ST.getRegisterInfo(); |
| 1490 | MFN = &MF; |
| 1491 | MRI = &MF.getRegInfo(); |
| 1492 | MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree(); |
| 1493 | |
| 1494 | // Clean up before any further processing, so that dead code does not |
| 1495 | // get used in a newly generated "insert" instruction. Have a custom |
| 1496 | // version of DCE that preserves lifetime markers. Without it, merging |
| 1497 | // of stack objects can fail to recognize and merge disjoint objects |
| 1498 | // leading to unnecessary stack growth. |
| 1499 | Changed = removeDeadCode(N: MDT->getRootNode()); |
| 1500 | |
| 1501 | const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); |
| 1502 | BitTracker BTLoc(HE, MF); |
| 1503 | BTLoc.trace(On: isDebug()); |
| 1504 | BTLoc.run(); |
| 1505 | CellMapShadow MS(BTLoc); |
| 1506 | CMS = &MS; |
| 1507 | |
| 1508 | buildOrderingMF(RO&: BaseOrd); |
| 1509 | buildOrderingBT(RB&: BaseOrd, RO&: CellOrd); |
| 1510 | |
| 1511 | if (isDebug()) { |
| 1512 | dbgs() << "Cell ordering:\n" ; |
| 1513 | for (const auto &I : CellOrd) { |
| 1514 | unsigned VR = I.first, Pos = I.second; |
| 1515 | dbgs() << printReg(Reg: VR, TRI: HRI) << " -> " << Pos << "\n" ; |
| 1516 | } |
| 1517 | } |
| 1518 | |
| 1519 | // Collect candidates for conversion into the insert forms. |
| 1520 | MachineBasicBlock *RootB = MDT->getRoot(); |
| 1521 | OrderedRegisterList AvailR(CellOrd); |
| 1522 | |
| 1523 | const char *const TGName = "hexinsert" ; |
| 1524 | const char *const TGDesc = "Generate Insert Instructions" ; |
| 1525 | |
| 1526 | { |
| 1527 | NamedRegionTimer _T("collection" , "collection" , TGName, TGDesc, |
| 1528 | TimingDetail); |
| 1529 | collectInBlock(B: RootB, AVs&: AvailR); |
| 1530 | // Complete the information gathered in IFMap. |
| 1531 | computeRemovableRegisters(); |
| 1532 | } |
| 1533 | |
| 1534 | if (isDebug()) { |
| 1535 | dbgs() << "Candidates after collection:\n" ; |
| 1536 | dump_map(); |
| 1537 | } |
| 1538 | |
| 1539 | if (IFMap.empty()) |
| 1540 | return Changed; |
| 1541 | |
| 1542 | { |
| 1543 | NamedRegionTimer _T("pruning" , "pruning" , TGName, TGDesc, TimingDetail); |
| 1544 | pruneCandidates(); |
| 1545 | } |
| 1546 | |
| 1547 | if (isDebug()) { |
| 1548 | dbgs() << "Candidates after pruning:\n" ; |
| 1549 | dump_map(); |
| 1550 | } |
| 1551 | |
| 1552 | if (IFMap.empty()) |
| 1553 | return Changed; |
| 1554 | |
| 1555 | { |
| 1556 | NamedRegionTimer _T("selection" , "selection" , TGName, TGDesc, TimingDetail); |
| 1557 | selectCandidates(); |
| 1558 | } |
| 1559 | |
| 1560 | if (isDebug()) { |
| 1561 | dbgs() << "Candidates after selection:\n" ; |
| 1562 | dump_map(); |
| 1563 | } |
| 1564 | |
| 1565 | // Filter out vregs beyond the cutoff. |
| 1566 | if (VRegIndexCutoff.getPosition()) { |
| 1567 | unsigned Cutoff = VRegIndexCutoff; |
| 1568 | |
| 1569 | using IterListType = SmallVector<IFMapType::iterator, 16>; |
| 1570 | |
| 1571 | IterListType Out; |
| 1572 | for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { |
| 1573 | unsigned Idx = Register(I->first).virtRegIndex(); |
| 1574 | if (Idx >= Cutoff) |
| 1575 | Out.push_back(Elt: I); |
| 1576 | } |
| 1577 | for (const auto &It : Out) |
| 1578 | IFMap.erase(I: It); |
| 1579 | } |
| 1580 | if (IFMap.empty()) |
| 1581 | return Changed; |
| 1582 | |
| 1583 | { |
| 1584 | NamedRegionTimer _T("generation" , "generation" , TGName, TGDesc, |
| 1585 | TimingDetail); |
| 1586 | generateInserts(); |
| 1587 | } |
| 1588 | |
| 1589 | return true; |
| 1590 | } |
| 1591 | |
| 1592 | FunctionPass *llvm::createHexagonGenInsert() { |
| 1593 | return new HexagonGenInsert(); |
| 1594 | } |
| 1595 | |
| 1596 | //===----------------------------------------------------------------------===// |
| 1597 | // Public Constructor Functions |
| 1598 | //===----------------------------------------------------------------------===// |
| 1599 | |
| 1600 | INITIALIZE_PASS_BEGIN(HexagonGenInsert, "hexinsert" , |
| 1601 | "Hexagon generate \"insert\" instructions" , false, false) |
| 1602 | INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) |
| 1603 | INITIALIZE_PASS_END(HexagonGenInsert, "hexinsert" , |
| 1604 | "Hexagon generate \"insert\" instructions" , false, false) |
| 1605 | |