| 1 | //===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the interfaces that SystemZ uses to lower LLVM code into a |
| 10 | // selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H |
| 15 | #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H |
| 16 | |
| 17 | #include "SystemZ.h" |
| 18 | #include "SystemZInstrInfo.h" |
| 19 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 20 | #include "llvm/CodeGen/SelectionDAG.h" |
| 21 | #include "llvm/CodeGen/TargetLowering.h" |
| 22 | #include <optional> |
| 23 | |
| 24 | namespace llvm { |
| 25 | namespace SystemZISD { |
| 26 | enum NodeType : unsigned { |
| 27 | FIRST_NUMBER = ISD::BUILTIN_OP_END, |
| 28 | |
| 29 | // Return with a glue operand. Operand 0 is the chain operand. |
| 30 | RET_GLUE, |
| 31 | |
| 32 | // Calls a function. Operand 0 is the chain operand and operand 1 |
| 33 | // is the target address. The arguments start at operand 2. |
| 34 | // There is an optional glue operand at the end. |
| 35 | CALL, |
| 36 | SIBCALL, |
| 37 | |
| 38 | // TLS calls. Like regular calls, except operand 1 is the TLS symbol. |
| 39 | // (The call target is implicitly __tls_get_offset.) |
| 40 | TLS_GDCALL, |
| 41 | TLS_LDCALL, |
| 42 | |
| 43 | // Wraps a TargetGlobalAddress that should be loaded using PC-relative |
| 44 | // accesses (LARL). Operand 0 is the address. |
| 45 | PCREL_WRAPPER, |
| 46 | |
| 47 | // Used in cases where an offset is applied to a TargetGlobalAddress. |
| 48 | // Operand 0 is the full TargetGlobalAddress and operand 1 is a |
| 49 | // PCREL_WRAPPER for an anchor point. This is used so that we can |
| 50 | // cheaply refer to either the full address or the anchor point |
| 51 | // as a register base. |
| 52 | PCREL_OFFSET, |
| 53 | |
| 54 | // Integer comparisons. There are three operands: the two values |
| 55 | // to compare, and an integer of type SystemZICMP. |
| 56 | ICMP, |
| 57 | |
| 58 | // Floating-point comparisons. The two operands are the values to compare. |
| 59 | FCMP, |
| 60 | |
| 61 | // Test under mask. The first operand is ANDed with the second operand |
| 62 | // and the condition codes are set on the result. The third operand is |
| 63 | // a boolean that is true if the condition codes need to distinguish |
| 64 | // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the |
| 65 | // register forms do but the memory forms don't). |
| 66 | TM, |
| 67 | |
| 68 | // Branches if a condition is true. Operand 0 is the chain operand; |
| 69 | // operand 1 is the 4-bit condition-code mask, with bit N in |
| 70 | // big-endian order meaning "branch if CC=N"; operand 2 is the |
| 71 | // target block and operand 3 is the flag operand. |
| 72 | BR_CCMASK, |
| 73 | |
| 74 | // Selects between operand 0 and operand 1. Operand 2 is the |
| 75 | // mask of condition-code values for which operand 0 should be |
| 76 | // chosen over operand 1; it has the same form as BR_CCMASK. |
| 77 | // Operand 3 is the flag operand. |
| 78 | SELECT_CCMASK, |
| 79 | |
| 80 | // Evaluates to the gap between the stack pointer and the |
| 81 | // base of the dynamically-allocatable area. |
| 82 | ADJDYNALLOC, |
| 83 | |
| 84 | // For allocating stack space when using stack clash protector. |
| 85 | // Allocation is performed by block, and each block is probed. |
| 86 | PROBED_ALLOCA, |
| 87 | |
| 88 | // Count number of bits set in operand 0 per byte. |
| 89 | POPCNT, |
| 90 | |
| 91 | // Wrappers around the ISD opcodes of the same name. The output is GR128. |
| 92 | // Input operands may be GR64 or GR32, depending on the instruction. |
| 93 | SMUL_LOHI, |
| 94 | UMUL_LOHI, |
| 95 | SDIVREM, |
| 96 | UDIVREM, |
| 97 | |
| 98 | // Add/subtract with overflow/carry. These have the same operands as |
| 99 | // the corresponding standard operations, except with the carry flag |
| 100 | // replaced by a condition code value. |
| 101 | SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY, |
| 102 | |
| 103 | // Set the condition code from a boolean value in operand 0. |
| 104 | // Operand 1 is a mask of all condition-code values that may result of this |
| 105 | // operation, operand 2 is a mask of condition-code values that may result |
| 106 | // if the boolean is true. |
| 107 | // Note that this operation is always optimized away, we will never |
| 108 | // generate any code for it. |
| 109 | GET_CCMASK, |
| 110 | |
| 111 | // Use a series of MVCs to copy bytes from one memory location to another. |
| 112 | // The operands are: |
| 113 | // - the target address |
| 114 | // - the source address |
| 115 | // - the constant length |
| 116 | // |
| 117 | // This isn't a memory opcode because we'd need to attach two |
| 118 | // MachineMemOperands rather than one. |
| 119 | MVC, |
| 120 | |
| 121 | // Similar to MVC, but for logic operations (AND, OR, XOR). |
| 122 | NC, |
| 123 | OC, |
| 124 | XC, |
| 125 | |
| 126 | // Use CLC to compare two blocks of memory, with the same comments |
| 127 | // as for MVC. |
| 128 | CLC, |
| 129 | |
| 130 | // Use MVC to set a block of memory after storing the first byte. |
| 131 | MEMSET_MVC, |
| 132 | |
| 133 | // Use an MVST-based sequence to implement stpcpy(). |
| 134 | STPCPY, |
| 135 | |
| 136 | // Use a CLST-based sequence to implement strcmp(). The two input operands |
| 137 | // are the addresses of the strings to compare. |
| 138 | STRCMP, |
| 139 | |
| 140 | // Use an SRST-based sequence to search a block of memory. The first |
| 141 | // operand is the end address, the second is the start, and the third |
| 142 | // is the character to search for. CC is set to 1 on success and 2 |
| 143 | // on failure. |
| 144 | SEARCH_STRING, |
| 145 | |
| 146 | // Store the CC value in bits 29 and 28 of an integer. |
| 147 | IPM, |
| 148 | |
| 149 | // Transaction begin. The first operand is the chain, the second |
| 150 | // the TDB pointer, and the third the immediate control field. |
| 151 | // Returns CC value and chain. |
| 152 | TBEGIN, |
| 153 | TBEGIN_NOFLOAT, |
| 154 | |
| 155 | // Transaction end. Just the chain operand. Returns CC value and chain. |
| 156 | TEND, |
| 157 | |
| 158 | // Create a vector constant by filling byte N of the result with bit |
| 159 | // 15-N of the single operand. |
| 160 | BYTE_MASK, |
| 161 | |
| 162 | // Create a vector constant by replicating an element-sized RISBG-style mask. |
| 163 | // The first operand specifies the starting set bit and the second operand |
| 164 | // specifies the ending set bit. Both operands count from the MSB of the |
| 165 | // element. |
| 166 | ROTATE_MASK, |
| 167 | |
| 168 | // Replicate a GPR scalar value into all elements of a vector. |
| 169 | REPLICATE, |
| 170 | |
| 171 | // Create a vector from two i64 GPRs. |
| 172 | JOIN_DWORDS, |
| 173 | |
| 174 | // Replicate one element of a vector into all elements. The first operand |
| 175 | // is the vector and the second is the index of the element to replicate. |
| 176 | SPLAT, |
| 177 | |
| 178 | // Interleave elements from the high half of operand 0 and the high half |
| 179 | // of operand 1. |
| 180 | MERGE_HIGH, |
| 181 | |
| 182 | // Likewise for the low halves. |
| 183 | MERGE_LOW, |
| 184 | |
| 185 | // Concatenate the vectors in the first two operands, shift them left |
| 186 | // by the third operand, and take the first half of the result. |
| 187 | SHL_DOUBLE, |
| 188 | |
| 189 | // Take one element of the first v2i64 operand and the one element of |
| 190 | // the second v2i64 operand and concatenate them to form a v2i64 result. |
| 191 | // The third operand is a 4-bit value of the form 0A0B, where A and B |
| 192 | // are the element selectors for the first operand and second operands |
| 193 | // respectively. |
| 194 | PERMUTE_DWORDS, |
| 195 | |
| 196 | // Perform a general vector permute on vector operands 0 and 1. |
| 197 | // Each byte of operand 2 controls the corresponding byte of the result, |
| 198 | // in the same way as a byte-level VECTOR_SHUFFLE mask. |
| 199 | PERMUTE, |
| 200 | |
| 201 | // Pack vector operands 0 and 1 into a single vector with half-sized elements. |
| 202 | PACK, |
| 203 | |
| 204 | // Likewise, but saturate the result and set CC. PACKS_CC does signed |
| 205 | // saturation and PACKLS_CC does unsigned saturation. |
| 206 | PACKS_CC, |
| 207 | PACKLS_CC, |
| 208 | |
| 209 | // Unpack the first half of vector operand 0 into double-sized elements. |
| 210 | // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends. |
| 211 | UNPACK_HIGH, |
| 212 | UNPACKL_HIGH, |
| 213 | |
| 214 | // Likewise for the second half. |
| 215 | UNPACK_LOW, |
| 216 | UNPACKL_LOW, |
| 217 | |
| 218 | // Shift/rotate each element of vector operand 0 by the number of bits |
| 219 | // specified by scalar operand 1. |
| 220 | VSHL_BY_SCALAR, |
| 221 | VSRL_BY_SCALAR, |
| 222 | VSRA_BY_SCALAR, |
| 223 | VROTL_BY_SCALAR, |
| 224 | |
| 225 | // Concatenate the vectors in the first two operands, shift them left/right |
| 226 | // bitwise by the third operand, and take the first/last half of the result. |
| 227 | SHL_DOUBLE_BIT, |
| 228 | SHR_DOUBLE_BIT, |
| 229 | |
| 230 | // For each element of the output type, sum across all sub-elements of |
| 231 | // operand 0 belonging to the corresponding element, and add in the |
| 232 | // rightmost sub-element of the corresponding element of operand 1. |
| 233 | VSUM, |
| 234 | |
| 235 | // Compute carry/borrow indication for add/subtract. |
| 236 | VACC, VSCBI, |
| 237 | // Add/subtract with carry/borrow. |
| 238 | VAC, VSBI, |
| 239 | // Compute carry/borrow indication for add/subtract with carry/borrow. |
| 240 | VACCC, VSBCBI, |
| 241 | |
| 242 | // High-word multiply-and-add. |
| 243 | VMAH, VMALH, |
| 244 | // Widen and multiply even/odd vector elements. |
| 245 | VME, VMLE, VMO, VMLO, |
| 246 | |
| 247 | // Compare integer vector operands 0 and 1 to produce the usual 0/-1 |
| 248 | // vector result. VICMPE is for equality, VICMPH for "signed greater than" |
| 249 | // and VICMPHL for "unsigned greater than". |
| 250 | VICMPE, |
| 251 | VICMPH, |
| 252 | VICMPHL, |
| 253 | |
| 254 | // Likewise, but also set the condition codes on the result. |
| 255 | VICMPES, |
| 256 | VICMPHS, |
| 257 | VICMPHLS, |
| 258 | |
| 259 | // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1 |
| 260 | // vector result. VFCMPE is for "ordered and equal", VFCMPH for "ordered and |
| 261 | // greater than" and VFCMPHE for "ordered and greater than or equal to". |
| 262 | VFCMPE, |
| 263 | VFCMPH, |
| 264 | VFCMPHE, |
| 265 | |
| 266 | // Likewise, but also set the condition codes on the result. |
| 267 | VFCMPES, |
| 268 | VFCMPHS, |
| 269 | VFCMPHES, |
| 270 | |
| 271 | // Test floating-point data class for vectors. |
| 272 | VFTCI, |
| 273 | |
| 274 | // Extend the even f32 elements of vector operand 0 to produce a vector |
| 275 | // of f64 elements. |
| 276 | VEXTEND, |
| 277 | |
| 278 | // Round the f64 elements of vector operand 0 to f32s and store them in the |
| 279 | // even elements of the result. |
| 280 | VROUND, |
| 281 | |
| 282 | // AND the two vector operands together and set CC based on the result. |
| 283 | VTM, |
| 284 | |
| 285 | // i128 high integer comparisons. |
| 286 | SCMP128HI, |
| 287 | UCMP128HI, |
| 288 | |
| 289 | // String operations that set CC as a side-effect. |
| 290 | VFAE_CC, |
| 291 | VFAEZ_CC, |
| 292 | VFEE_CC, |
| 293 | VFEEZ_CC, |
| 294 | VFENE_CC, |
| 295 | VFENEZ_CC, |
| 296 | VISTR_CC, |
| 297 | VSTRC_CC, |
| 298 | VSTRCZ_CC, |
| 299 | VSTRS_CC, |
| 300 | VSTRSZ_CC, |
| 301 | |
| 302 | // Test Data Class. |
| 303 | // |
| 304 | // Operand 0: the value to test |
| 305 | // Operand 1: the bit mask |
| 306 | TDC, |
| 307 | |
| 308 | // z/OS XPLINK ADA Entry |
| 309 | // Wraps a TargetGlobalAddress that should be loaded from a function's |
| 310 | // AssociatedData Area (ADA). Tha ADA is passed to the function by the |
| 311 | // caller in the XPLink ABI defined register R5. |
| 312 | // Operand 0: the GlobalValue/External Symbol |
| 313 | // Operand 1: the ADA register |
| 314 | // Operand 2: the offset (0 for the first and 8 for the second element in the |
| 315 | // function descriptor) |
| 316 | ADA_ENTRY, |
| 317 | |
| 318 | // Strict variants of scalar floating-point comparisons. |
| 319 | // Quiet and signaling versions. |
| 320 | FIRST_STRICTFP_OPCODE, |
| 321 | STRICT_FCMP = FIRST_STRICTFP_OPCODE, |
| 322 | STRICT_FCMPS, |
| 323 | |
| 324 | // Strict variants of vector floating-point comparisons. |
| 325 | // Quiet and signaling versions. |
| 326 | STRICT_VFCMPE, |
| 327 | STRICT_VFCMPH, |
| 328 | STRICT_VFCMPHE, |
| 329 | STRICT_VFCMPES, |
| 330 | STRICT_VFCMPHS, |
| 331 | STRICT_VFCMPHES, |
| 332 | |
| 333 | // Strict variants of VEXTEND and VROUND. |
| 334 | STRICT_VEXTEND, |
| 335 | STRICT_VROUND, |
| 336 | LAST_STRICTFP_OPCODE = STRICT_VROUND, |
| 337 | |
| 338 | // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or |
| 339 | // ATOMIC_LOAD_<op>. |
| 340 | // |
| 341 | // Operand 0: the address of the containing 32-bit-aligned field |
| 342 | // Operand 1: the second operand of <op>, in the high bits of an i32 |
| 343 | // for everything except ATOMIC_SWAPW |
| 344 | // Operand 2: how many bits to rotate the i32 left to bring the first |
| 345 | // operand into the high bits |
| 346 | // Operand 3: the negative of operand 2, for rotating the other way |
| 347 | // Operand 4: the width of the field in bits (8 or 16) |
| 348 | FIRST_MEMORY_OPCODE, |
| 349 | ATOMIC_SWAPW = FIRST_MEMORY_OPCODE, |
| 350 | ATOMIC_LOADW_ADD, |
| 351 | ATOMIC_LOADW_SUB, |
| 352 | ATOMIC_LOADW_AND, |
| 353 | ATOMIC_LOADW_OR, |
| 354 | ATOMIC_LOADW_XOR, |
| 355 | ATOMIC_LOADW_NAND, |
| 356 | ATOMIC_LOADW_MIN, |
| 357 | ATOMIC_LOADW_MAX, |
| 358 | ATOMIC_LOADW_UMIN, |
| 359 | ATOMIC_LOADW_UMAX, |
| 360 | |
| 361 | // A wrapper around the inner loop of an ATOMIC_CMP_SWAP. |
| 362 | // |
| 363 | // Operand 0: the address of the containing 32-bit-aligned field |
| 364 | // Operand 1: the compare value, in the low bits of an i32 |
| 365 | // Operand 2: the swap value, in the low bits of an i32 |
| 366 | // Operand 3: how many bits to rotate the i32 left to bring the first |
| 367 | // operand into the high bits |
| 368 | // Operand 4: the negative of operand 2, for rotating the other way |
| 369 | // Operand 5: the width of the field in bits (8 or 16) |
| 370 | ATOMIC_CMP_SWAPW, |
| 371 | |
| 372 | // Atomic compare-and-swap returning CC value. |
| 373 | // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) |
| 374 | ATOMIC_CMP_SWAP, |
| 375 | |
| 376 | // 128-bit atomic load. |
| 377 | // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr) |
| 378 | ATOMIC_LOAD_128, |
| 379 | |
| 380 | // 128-bit atomic store. |
| 381 | // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr) |
| 382 | ATOMIC_STORE_128, |
| 383 | |
| 384 | // 128-bit atomic compare-and-swap. |
| 385 | // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) |
| 386 | ATOMIC_CMP_SWAP_128, |
| 387 | |
| 388 | // Byte swapping load/store. Same operands as regular load/store. |
| 389 | LRV, STRV, |
| 390 | |
| 391 | // Element swapping load/store. Same operands as regular load/store. |
| 392 | VLER, VSTER, |
| 393 | |
| 394 | // Use STORE CLOCK FAST to store current TOD clock value. |
| 395 | STCKF, |
| 396 | |
| 397 | // Prefetch from the second operand using the 4-bit control code in |
| 398 | // the first operand. The code is 1 for a load prefetch and 2 for |
| 399 | // a store prefetch. |
| 400 | PREFETCH, |
| 401 | LAST_MEMORY_OPCODE = PREFETCH, |
| 402 | }; |
| 403 | |
| 404 | // Return true if OPCODE is some kind of PC-relative address. |
| 405 | inline bool isPCREL(unsigned Opcode) { |
| 406 | return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET; |
| 407 | } |
| 408 | } // end namespace SystemZISD |
| 409 | |
| 410 | namespace SystemZICMP { |
| 411 | // Describes whether an integer comparison needs to be signed or unsigned, |
| 412 | // or whether either type is OK. |
| 413 | enum { |
| 414 | Any, |
| 415 | UnsignedOnly, |
| 416 | SignedOnly |
| 417 | }; |
| 418 | } // end namespace SystemZICMP |
| 419 | |
| 420 | class SystemZSubtarget; |
| 421 | |
| 422 | class SystemZTargetLowering : public TargetLowering { |
| 423 | public: |
| 424 | explicit SystemZTargetLowering(const TargetMachine &TM, |
| 425 | const SystemZSubtarget &STI); |
| 426 | |
| 427 | bool useSoftFloat() const override; |
| 428 | |
| 429 | // Override TargetLowering. |
| 430 | MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { |
| 431 | return MVT::i32; |
| 432 | } |
| 433 | unsigned getVectorIdxWidth(const DataLayout &DL) const override { |
| 434 | // Only the lower 12 bits of an element index are used, so we don't |
| 435 | // want to clobber the upper 32 bits of a GPR unnecessarily. |
| 436 | return 32; |
| 437 | } |
| 438 | TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) |
| 439 | const override { |
| 440 | // Widen subvectors to the full width rather than promoting integer |
| 441 | // elements. This is better because: |
| 442 | // |
| 443 | // (a) it means that we can handle the ABI for passing and returning |
| 444 | // sub-128 vectors without having to handle them as legal types. |
| 445 | // |
| 446 | // (b) we don't have instructions to extend on load and truncate on store, |
| 447 | // so promoting the integers is less efficient. |
| 448 | // |
| 449 | // (c) there are no multiplication instructions for the widest integer |
| 450 | // type (v2i64). |
| 451 | if (VT.getScalarSizeInBits() % 8 == 0) |
| 452 | return TypeWidenVector; |
| 453 | return TargetLoweringBase::getPreferredVectorAction(VT); |
| 454 | } |
| 455 | unsigned |
| 456 | getNumRegisters(LLVMContext &Context, EVT VT, |
| 457 | std::optional<MVT> RegisterVT) const override { |
| 458 | // i128 inline assembly operand. |
| 459 | if (VT == MVT::i128 && RegisterVT && *RegisterVT == MVT::Untyped) |
| 460 | return 1; |
| 461 | return TargetLowering::getNumRegisters(Context, VT); |
| 462 | } |
| 463 | MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, |
| 464 | EVT VT) const override { |
| 465 | // 128-bit single-element vector types are passed like other vectors, |
| 466 | // not like their element type. |
| 467 | if (VT.isVector() && VT.getSizeInBits() == 128 && |
| 468 | VT.getVectorNumElements() == 1) |
| 469 | return MVT::v16i8; |
| 470 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
| 471 | } |
| 472 | bool isCheapToSpeculateCtlz(Type *) const override { return true; } |
| 473 | bool isCheapToSpeculateCttz(Type *) const override { return true; } |
| 474 | bool preferZeroCompareBranch() const override { return true; } |
| 475 | bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override { |
| 476 | ConstantInt* Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1)); |
| 477 | return Mask && Mask->getValue().isIntN(N: 16); |
| 478 | } |
| 479 | bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { |
| 480 | return VT.isScalarInteger(); |
| 481 | } |
| 482 | EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, |
| 483 | EVT) const override; |
| 484 | bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
| 485 | EVT VT) const override; |
| 486 | bool isFPImmLegal(const APFloat &Imm, EVT VT, |
| 487 | bool ForCodeSize) const override; |
| 488 | bool ShouldShrinkFPConstant(EVT VT) const override { |
| 489 | // Do not shrink 64-bit FP constpool entries since LDEB is slower than |
| 490 | // LD, and having the full constant in memory enables reg/mem opcodes. |
| 491 | return VT != MVT::f64; |
| 492 | } |
| 493 | MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI, |
| 494 | MachineBasicBlock *MBB) const; |
| 495 | |
| 496 | MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI, |
| 497 | MachineBasicBlock *MBB) const; |
| 498 | |
| 499 | bool hasInlineStackProbe(const MachineFunction &MF) const override; |
| 500 | AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override; |
| 501 | AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override; |
| 502 | AtomicExpansionKind |
| 503 | shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override; |
| 504 | bool isLegalICmpImmediate(int64_t Imm) const override; |
| 505 | bool isLegalAddImmediate(int64_t Imm) const override; |
| 506 | bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, |
| 507 | unsigned AS, |
| 508 | Instruction *I = nullptr) const override; |
| 509 | bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, |
| 510 | MachineMemOperand::Flags Flags, |
| 511 | unsigned *Fast) const override; |
| 512 | bool |
| 513 | findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, |
| 514 | const MemOp &Op, unsigned DstAS, unsigned SrcAS, |
| 515 | const AttributeList &FuncAttributes) const override; |
| 516 | EVT getOptimalMemOpType(const MemOp &Op, |
| 517 | const AttributeList &FuncAttributes) const override; |
| 518 | bool isTruncateFree(Type *, Type *) const override; |
| 519 | bool isTruncateFree(EVT, EVT) const override; |
| 520 | |
| 521 | bool shouldFormOverflowOp(unsigned Opcode, EVT VT, |
| 522 | bool MathUsed) const override { |
| 523 | // Form add and sub with overflow intrinsics regardless of any extra |
| 524 | // users of the math result. |
| 525 | return VT == MVT::i32 || VT == MVT::i64; |
| 526 | } |
| 527 | |
| 528 | bool shouldConsiderGEPOffsetSplit() const override { return true; } |
| 529 | |
| 530 | bool shouldExpandCmpUsingSelects(EVT VT) const override { return true; } |
| 531 | |
| 532 | const char *getTargetNodeName(unsigned Opcode) const override; |
| 533 | std::pair<unsigned, const TargetRegisterClass *> |
| 534 | getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
| 535 | StringRef Constraint, MVT VT) const override; |
| 536 | TargetLowering::ConstraintType |
| 537 | getConstraintType(StringRef Constraint) const override; |
| 538 | TargetLowering::ConstraintWeight |
| 539 | getSingleConstraintMatchWeight(AsmOperandInfo &info, |
| 540 | const char *constraint) const override; |
| 541 | void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, |
| 542 | std::vector<SDValue> &Ops, |
| 543 | SelectionDAG &DAG) const override; |
| 544 | |
| 545 | InlineAsm::ConstraintCode |
| 546 | getInlineAsmMemConstraint(StringRef ConstraintCode) const override { |
| 547 | if (ConstraintCode.size() == 1) { |
| 548 | switch(ConstraintCode[0]) { |
| 549 | default: |
| 550 | break; |
| 551 | case 'o': |
| 552 | return InlineAsm::ConstraintCode::o; |
| 553 | case 'Q': |
| 554 | return InlineAsm::ConstraintCode::Q; |
| 555 | case 'R': |
| 556 | return InlineAsm::ConstraintCode::R; |
| 557 | case 'S': |
| 558 | return InlineAsm::ConstraintCode::S; |
| 559 | case 'T': |
| 560 | return InlineAsm::ConstraintCode::T; |
| 561 | } |
| 562 | } else if (ConstraintCode.size() == 2 && ConstraintCode[0] == 'Z') { |
| 563 | switch (ConstraintCode[1]) { |
| 564 | default: |
| 565 | break; |
| 566 | case 'Q': |
| 567 | return InlineAsm::ConstraintCode::ZQ; |
| 568 | case 'R': |
| 569 | return InlineAsm::ConstraintCode::ZR; |
| 570 | case 'S': |
| 571 | return InlineAsm::ConstraintCode::ZS; |
| 572 | case 'T': |
| 573 | return InlineAsm::ConstraintCode::ZT; |
| 574 | } |
| 575 | } |
| 576 | return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); |
| 577 | } |
| 578 | |
| 579 | Register getRegisterByName(const char *RegName, LLT VT, |
| 580 | const MachineFunction &MF) const override; |
| 581 | |
| 582 | /// If a physical register, this returns the register that receives the |
| 583 | /// exception address on entry to an EH pad. |
| 584 | Register |
| 585 | getExceptionPointerRegister(const Constant *PersonalityFn) const override; |
| 586 | |
| 587 | /// If a physical register, this returns the register that receives the |
| 588 | /// exception typeid on entry to a landing pad. |
| 589 | Register |
| 590 | getExceptionSelectorRegister(const Constant *PersonalityFn) const override; |
| 591 | |
| 592 | /// Override to support customized stack guard loading. |
| 593 | bool useLoadStackGuardNode(const Module &M) const override { return true; } |
| 594 | void insertSSPDeclarations(Module &M) const override { |
| 595 | } |
| 596 | |
| 597 | MachineBasicBlock * |
| 598 | EmitInstrWithCustomInserter(MachineInstr &MI, |
| 599 | MachineBasicBlock *BB) const override; |
| 600 | SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; |
| 601 | void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 602 | SelectionDAG &DAG) const override; |
| 603 | void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, |
| 604 | SelectionDAG &DAG) const override; |
| 605 | const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; |
| 606 | bool allowTruncateForTailCall(Type *, Type *) const override; |
| 607 | bool mayBeEmittedAsTailCall(const CallInst *CI) const override; |
| 608 | bool splitValueIntoRegisterParts( |
| 609 | SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
| 610 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) |
| 611 | const override; |
| 612 | SDValue joinRegisterPartsIntoValue( |
| 613 | SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts, |
| 614 | unsigned NumParts, MVT PartVT, EVT ValueVT, |
| 615 | std::optional<CallingConv::ID> CC) const override; |
| 616 | SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, |
| 617 | bool isVarArg, |
| 618 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 619 | const SDLoc &DL, SelectionDAG &DAG, |
| 620 | SmallVectorImpl<SDValue> &InVals) const override; |
| 621 | SDValue LowerCall(CallLoweringInfo &CLI, |
| 622 | SmallVectorImpl<SDValue> &InVals) const override; |
| 623 | |
| 624 | std::pair<SDValue, SDValue> |
| 625 | makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, |
| 626 | EVT RetVT, ArrayRef<SDValue> Ops, CallingConv::ID CallConv, |
| 627 | bool IsSigned, SDLoc DL, bool DoesNotReturn, |
| 628 | bool IsReturnValueUsed) const; |
| 629 | |
| 630 | SDValue useLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, MVT VT, SDValue Arg, |
| 631 | SDLoc DL, SDValue Chain, bool IsStrict) const; |
| 632 | |
| 633 | bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, |
| 634 | bool isVarArg, |
| 635 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 636 | LLVMContext &Context, |
| 637 | const Type *RetTy) const override; |
| 638 | SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, |
| 639 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 640 | const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, |
| 641 | SelectionDAG &DAG) const override; |
| 642 | SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; |
| 643 | |
| 644 | /// Determine which of the bits specified in Mask are known to be either |
| 645 | /// zero or one and return them in the KnownZero/KnownOne bitsets. |
| 646 | void computeKnownBitsForTargetNode(const SDValue Op, |
| 647 | KnownBits &Known, |
| 648 | const APInt &DemandedElts, |
| 649 | const SelectionDAG &DAG, |
| 650 | unsigned Depth = 0) const override; |
| 651 | |
| 652 | /// Determine the number of bits in the operation that are sign bits. |
| 653 | unsigned ComputeNumSignBitsForTargetNode(SDValue Op, |
| 654 | const APInt &DemandedElts, |
| 655 | const SelectionDAG &DAG, |
| 656 | unsigned Depth) const override; |
| 657 | |
| 658 | bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( |
| 659 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, |
| 660 | bool PoisonOnly, unsigned Depth) const override; |
| 661 | |
| 662 | ISD::NodeType getExtendForAtomicOps() const override { |
| 663 | return ISD::ANY_EXTEND; |
| 664 | } |
| 665 | ISD::NodeType getExtendForAtomicCmpSwapArg() const override { |
| 666 | return ISD::ZERO_EXTEND; |
| 667 | } |
| 668 | |
| 669 | bool supportSwiftError() const override { |
| 670 | return true; |
| 671 | } |
| 672 | |
| 673 | unsigned getStackProbeSize(const MachineFunction &MF) const; |
| 674 | bool hasAndNot(SDValue Y) const override; |
| 675 | |
| 676 | private: |
| 677 | const SystemZSubtarget &Subtarget; |
| 678 | |
| 679 | // Implement LowerOperation for individual opcodes. |
| 680 | SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, |
| 681 | const SDLoc &DL, EVT VT, |
| 682 | SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const; |
| 683 | SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, |
| 684 | EVT VT, ISD::CondCode CC, |
| 685 | SDValue CmpOp0, SDValue CmpOp1, |
| 686 | SDValue Chain = SDValue(), |
| 687 | bool IsSignaling = false) const; |
| 688 | SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
| 689 | SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG, |
| 690 | bool IsSignaling) const; |
| 691 | SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const; |
| 692 | SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; |
| 693 | SDValue lowerGlobalAddress(GlobalAddressSDNode *Node, |
| 694 | SelectionDAG &DAG) const; |
| 695 | SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node, |
| 696 | SelectionDAG &DAG, unsigned Opcode, |
| 697 | SDValue GOTOffset) const; |
| 698 | SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const; |
| 699 | SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node, |
| 700 | SelectionDAG &DAG) const; |
| 701 | SDValue lowerBlockAddress(BlockAddressSDNode *Node, |
| 702 | SelectionDAG &DAG) const; |
| 703 | SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const; |
| 704 | SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; |
| 705 | SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; |
| 706 | SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; |
| 707 | SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
| 708 | SDValue lowerVASTART_ELF(SDValue Op, SelectionDAG &DAG) const; |
| 709 | SDValue lowerVASTART_XPLINK(SDValue Op, SelectionDAG &DAG) const; |
| 710 | SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const; |
| 711 | SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; |
| 712 | SDValue lowerDYNAMIC_STACKALLOC_ELF(SDValue Op, SelectionDAG &DAG) const; |
| 713 | SDValue lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op, SelectionDAG &DAG) const; |
| 714 | SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const; |
| 715 | SDValue lowerMULH(SDValue Op, SelectionDAG &DAG, unsigned Opcode) const; |
| 716 | SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; |
| 717 | SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; |
| 718 | SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; |
| 719 | SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; |
| 720 | SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const; |
| 721 | SDValue lowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const; |
| 722 | SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const; |
| 723 | SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const; |
| 724 | SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const; |
| 725 | SDValue lowerVECREDUCE_ADD(SDValue Op, SelectionDAG &DAG) const; |
| 726 | SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; |
| 727 | SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const; |
| 728 | SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const; |
| 729 | SDValue lowerATOMIC_LDST_I128(SDValue Op, SelectionDAG &DAG) const; |
| 730 | SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG, |
| 731 | unsigned Opcode) const; |
| 732 | SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; |
| 733 | SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; |
| 734 | SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; |
| 735 | SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; |
| 736 | SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; |
| 737 | SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
| 738 | SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
| 739 | bool isVectorElementLoad(SDValue Op) const; |
| 740 | SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
| 741 | SmallVectorImpl<SDValue> &Elems) const; |
| 742 | SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
| 743 | SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; |
| 744 | SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
| 745 | SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
| 746 | SDValue (SDValue Op, SelectionDAG &DAG) const; |
| 747 | SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; |
| 748 | SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; |
| 749 | SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const; |
| 750 | SDValue lowerFSHL(SDValue Op, SelectionDAG &DAG) const; |
| 751 | SDValue lowerFSHR(SDValue Op, SelectionDAG &DAG) const; |
| 752 | SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
| 753 | SDValue lower_FP_TO_INT(SDValue Op, SelectionDAG &DAG) const; |
| 754 | SDValue lower_INT_TO_FP(SDValue Op, SelectionDAG &DAG) const; |
| 755 | SDValue lowerLoadF16(SDValue Op, SelectionDAG &DAG) const; |
| 756 | SDValue lowerStoreF16(SDValue Op, SelectionDAG &DAG) const; |
| 757 | |
| 758 | SDValue lowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; |
| 759 | SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; |
| 760 | SDValue lowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; |
| 761 | |
| 762 | bool canTreatAsByteVector(EVT VT) const; |
| 763 | SDValue (const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp, |
| 764 | unsigned Index, DAGCombinerInfo &DCI, |
| 765 | bool Force) const; |
| 766 | SDValue (const SDLoc &DL, EVT TruncVT, SDValue Op, |
| 767 | DAGCombinerInfo &DCI) const; |
| 768 | SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; |
| 769 | SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; |
| 770 | SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const; |
| 771 | SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 772 | bool canLoadStoreByteSwapped(EVT VT) const; |
| 773 | SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const; |
| 774 | SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 775 | SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 776 | SDValue (SDNode *N, DAGCombinerInfo &DCI) const; |
| 777 | SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const; |
| 778 | SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const; |
| 779 | SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; |
| 780 | SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const; |
| 781 | SDValue combineFCOPYSIGN(SDNode *N, DAGCombinerInfo &DCI) const; |
| 782 | SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const; |
| 783 | SDValue combineSETCC(SDNode *N, DAGCombinerInfo &DCI) const; |
| 784 | SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; |
| 785 | SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; |
| 786 | SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; |
| 787 | SDValue combineShiftToMulAddHigh(SDNode *N, DAGCombinerInfo &DCI) const; |
| 788 | SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const; |
| 789 | SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const; |
| 790 | SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const; |
| 791 | |
| 792 | SDValue unwrapAddress(SDValue N) const override; |
| 793 | |
| 794 | // If the last instruction before MBBI in MBB was some form of COMPARE, |
| 795 | // try to replace it with a COMPARE AND BRANCH just before MBBI. |
| 796 | // CCMask and Target are the BRC-like operands for the branch. |
| 797 | // Return true if the change was made. |
| 798 | bool convertPrevCompareToBranch(MachineBasicBlock *MBB, |
| 799 | MachineBasicBlock::iterator MBBI, |
| 800 | unsigned CCMask, |
| 801 | MachineBasicBlock *Target) const; |
| 802 | |
| 803 | // Implement EmitInstrWithCustomInserter for individual operation types. |
| 804 | MachineBasicBlock *emitAdjCallStack(MachineInstr &MI, |
| 805 | MachineBasicBlock *BB) const; |
| 806 | MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const; |
| 807 | MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB, |
| 808 | unsigned StoreOpcode, unsigned STOCOpcode, |
| 809 | bool Invert) const; |
| 810 | MachineBasicBlock *emitICmp128Hi(MachineInstr &MI, MachineBasicBlock *BB, |
| 811 | bool Unsigned) const; |
| 812 | MachineBasicBlock *emitPair128(MachineInstr &MI, |
| 813 | MachineBasicBlock *MBB) const; |
| 814 | MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB, |
| 815 | bool ClearEven) const; |
| 816 | MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI, |
| 817 | MachineBasicBlock *BB, |
| 818 | unsigned BinOpcode, |
| 819 | bool Invert = false) const; |
| 820 | MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI, |
| 821 | MachineBasicBlock *MBB, |
| 822 | unsigned CompareOpcode, |
| 823 | unsigned KeepOldMask) const; |
| 824 | MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI, |
| 825 | MachineBasicBlock *BB) const; |
| 826 | MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB, |
| 827 | unsigned Opcode, |
| 828 | bool IsMemset = false) const; |
| 829 | MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB, |
| 830 | unsigned Opcode) const; |
| 831 | MachineBasicBlock *emitTransactionBegin(MachineInstr &MI, |
| 832 | MachineBasicBlock *MBB, |
| 833 | unsigned Opcode, bool NoFloat) const; |
| 834 | MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI, |
| 835 | MachineBasicBlock *MBB, |
| 836 | unsigned Opcode) const; |
| 837 | MachineBasicBlock *emitProbedAlloca(MachineInstr &MI, |
| 838 | MachineBasicBlock *MBB) const; |
| 839 | |
| 840 | SDValue getBackchainAddress(SDValue SP, SelectionDAG &DAG) const; |
| 841 | |
| 842 | MachineMemOperand::Flags |
| 843 | getTargetMMOFlags(const Instruction &I) const override; |
| 844 | const TargetRegisterClass *getRepRegClassFor(MVT VT) const override; |
| 845 | |
| 846 | private: |
| 847 | bool isInternal(const Function *Fn) const; |
| 848 | mutable std::map<const Function *, bool> IsInternalCache; |
| 849 | void verifyNarrowIntegerArgs_Call(const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 850 | const Function *F, SDValue Callee) const; |
| 851 | void verifyNarrowIntegerArgs_Ret(const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 852 | const Function *F) const; |
| 853 | bool |
| 854 | verifyNarrowIntegerArgs(const SmallVectorImpl<ISD::OutputArg> &Outs) const; |
| 855 | |
| 856 | public: |
| 857 | }; |
| 858 | |
| 859 | struct SystemZVectorConstantInfo { |
| 860 | private: |
| 861 | APInt IntBits; // The 128 bits as an integer. |
| 862 | APInt SplatBits; // Smallest splat value. |
| 863 | APInt SplatUndef; // Bits correspoding to undef operands of the BVN. |
| 864 | unsigned SplatBitSize = 0; |
| 865 | bool isFP128 = false; |
| 866 | public: |
| 867 | unsigned Opcode = 0; |
| 868 | SmallVector<unsigned, 2> OpVals; |
| 869 | MVT VecVT; |
| 870 | SystemZVectorConstantInfo(APInt IntImm); |
| 871 | SystemZVectorConstantInfo(APFloat FPImm) |
| 872 | : SystemZVectorConstantInfo(FPImm.bitcastToAPInt()) { |
| 873 | isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad()); |
| 874 | } |
| 875 | SystemZVectorConstantInfo(BuildVectorSDNode *BVN); |
| 876 | bool isVectorConstantLegal(const SystemZSubtarget &Subtarget); |
| 877 | }; |
| 878 | |
| 879 | } // end namespace llvm |
| 880 | |
| 881 | #endif |
| 882 | |