1 | //===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the interfaces that SystemZ uses to lower LLVM code into a |
10 | // selection DAG. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H |
15 | #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H |
16 | |
17 | #include "SystemZ.h" |
18 | #include "SystemZInstrInfo.h" |
19 | #include "llvm/CodeGen/MachineBasicBlock.h" |
20 | #include "llvm/CodeGen/SelectionDAG.h" |
21 | #include "llvm/CodeGen/TargetLowering.h" |
22 | #include <optional> |
23 | |
24 | namespace llvm { |
25 | namespace SystemZISD { |
26 | enum NodeType : unsigned { |
27 | FIRST_NUMBER = ISD::BUILTIN_OP_END, |
28 | |
29 | // Return with a glue operand. Operand 0 is the chain operand. |
30 | RET_GLUE, |
31 | |
32 | // Calls a function. Operand 0 is the chain operand and operand 1 |
33 | // is the target address. The arguments start at operand 2. |
34 | // There is an optional glue operand at the end. |
35 | CALL, |
36 | SIBCALL, |
37 | |
38 | // TLS calls. Like regular calls, except operand 1 is the TLS symbol. |
39 | // (The call target is implicitly __tls_get_offset.) |
40 | TLS_GDCALL, |
41 | TLS_LDCALL, |
42 | |
43 | // Wraps a TargetGlobalAddress that should be loaded using PC-relative |
44 | // accesses (LARL). Operand 0 is the address. |
45 | PCREL_WRAPPER, |
46 | |
47 | // Used in cases where an offset is applied to a TargetGlobalAddress. |
48 | // Operand 0 is the full TargetGlobalAddress and operand 1 is a |
49 | // PCREL_WRAPPER for an anchor point. This is used so that we can |
50 | // cheaply refer to either the full address or the anchor point |
51 | // as a register base. |
52 | PCREL_OFFSET, |
53 | |
54 | // Integer comparisons. There are three operands: the two values |
55 | // to compare, and an integer of type SystemZICMP. |
56 | ICMP, |
57 | |
58 | // Floating-point comparisons. The two operands are the values to compare. |
59 | FCMP, |
60 | |
61 | // Test under mask. The first operand is ANDed with the second operand |
62 | // and the condition codes are set on the result. The third operand is |
63 | // a boolean that is true if the condition codes need to distinguish |
64 | // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the |
65 | // register forms do but the memory forms don't). |
66 | TM, |
67 | |
68 | // Branches if a condition is true. Operand 0 is the chain operand; |
69 | // operand 1 is the 4-bit condition-code mask, with bit N in |
70 | // big-endian order meaning "branch if CC=N"; operand 2 is the |
71 | // target block and operand 3 is the flag operand. |
72 | BR_CCMASK, |
73 | |
74 | // Selects between operand 0 and operand 1. Operand 2 is the |
75 | // mask of condition-code values for which operand 0 should be |
76 | // chosen over operand 1; it has the same form as BR_CCMASK. |
77 | // Operand 3 is the flag operand. |
78 | SELECT_CCMASK, |
79 | |
80 | // Evaluates to the gap between the stack pointer and the |
81 | // base of the dynamically-allocatable area. |
82 | ADJDYNALLOC, |
83 | |
84 | // For allocating stack space when using stack clash protector. |
85 | // Allocation is performed by block, and each block is probed. |
86 | PROBED_ALLOCA, |
87 | |
88 | // Count number of bits set in operand 0 per byte. |
89 | POPCNT, |
90 | |
91 | // Wrappers around the ISD opcodes of the same name. The output is GR128. |
92 | // Input operands may be GR64 or GR32, depending on the instruction. |
93 | SMUL_LOHI, |
94 | UMUL_LOHI, |
95 | SDIVREM, |
96 | UDIVREM, |
97 | |
98 | // Add/subtract with overflow/carry. These have the same operands as |
99 | // the corresponding standard operations, except with the carry flag |
100 | // replaced by a condition code value. |
101 | SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY, |
102 | |
103 | // Set the condition code from a boolean value in operand 0. |
104 | // Operand 1 is a mask of all condition-code values that may result of this |
105 | // operation, operand 2 is a mask of condition-code values that may result |
106 | // if the boolean is true. |
107 | // Note that this operation is always optimized away, we will never |
108 | // generate any code for it. |
109 | GET_CCMASK, |
110 | |
111 | // Use a series of MVCs to copy bytes from one memory location to another. |
112 | // The operands are: |
113 | // - the target address |
114 | // - the source address |
115 | // - the constant length |
116 | // |
117 | // This isn't a memory opcode because we'd need to attach two |
118 | // MachineMemOperands rather than one. |
119 | MVC, |
120 | |
121 | // Similar to MVC, but for logic operations (AND, OR, XOR). |
122 | NC, |
123 | OC, |
124 | XC, |
125 | |
126 | // Use CLC to compare two blocks of memory, with the same comments |
127 | // as for MVC. |
128 | CLC, |
129 | |
130 | // Use MVC to set a block of memory after storing the first byte. |
131 | MEMSET_MVC, |
132 | |
133 | // Use an MVST-based sequence to implement stpcpy(). |
134 | STPCPY, |
135 | |
136 | // Use a CLST-based sequence to implement strcmp(). The two input operands |
137 | // are the addresses of the strings to compare. |
138 | STRCMP, |
139 | |
140 | // Use an SRST-based sequence to search a block of memory. The first |
141 | // operand is the end address, the second is the start, and the third |
142 | // is the character to search for. CC is set to 1 on success and 2 |
143 | // on failure. |
144 | SEARCH_STRING, |
145 | |
146 | // Store the CC value in bits 29 and 28 of an integer. |
147 | IPM, |
148 | |
149 | // Transaction begin. The first operand is the chain, the second |
150 | // the TDB pointer, and the third the immediate control field. |
151 | // Returns CC value and chain. |
152 | TBEGIN, |
153 | TBEGIN_NOFLOAT, |
154 | |
155 | // Transaction end. Just the chain operand. Returns CC value and chain. |
156 | TEND, |
157 | |
158 | // Create a vector constant by filling byte N of the result with bit |
159 | // 15-N of the single operand. |
160 | BYTE_MASK, |
161 | |
162 | // Create a vector constant by replicating an element-sized RISBG-style mask. |
163 | // The first operand specifies the starting set bit and the second operand |
164 | // specifies the ending set bit. Both operands count from the MSB of the |
165 | // element. |
166 | ROTATE_MASK, |
167 | |
168 | // Replicate a GPR scalar value into all elements of a vector. |
169 | REPLICATE, |
170 | |
171 | // Create a vector from two i64 GPRs. |
172 | JOIN_DWORDS, |
173 | |
174 | // Replicate one element of a vector into all elements. The first operand |
175 | // is the vector and the second is the index of the element to replicate. |
176 | SPLAT, |
177 | |
178 | // Interleave elements from the high half of operand 0 and the high half |
179 | // of operand 1. |
180 | MERGE_HIGH, |
181 | |
182 | // Likewise for the low halves. |
183 | MERGE_LOW, |
184 | |
185 | // Concatenate the vectors in the first two operands, shift them left |
186 | // by the third operand, and take the first half of the result. |
187 | SHL_DOUBLE, |
188 | |
189 | // Take one element of the first v2i64 operand and the one element of |
190 | // the second v2i64 operand and concatenate them to form a v2i64 result. |
191 | // The third operand is a 4-bit value of the form 0A0B, where A and B |
192 | // are the element selectors for the first operand and second operands |
193 | // respectively. |
194 | PERMUTE_DWORDS, |
195 | |
196 | // Perform a general vector permute on vector operands 0 and 1. |
197 | // Each byte of operand 2 controls the corresponding byte of the result, |
198 | // in the same way as a byte-level VECTOR_SHUFFLE mask. |
199 | PERMUTE, |
200 | |
201 | // Pack vector operands 0 and 1 into a single vector with half-sized elements. |
202 | PACK, |
203 | |
204 | // Likewise, but saturate the result and set CC. PACKS_CC does signed |
205 | // saturation and PACKLS_CC does unsigned saturation. |
206 | PACKS_CC, |
207 | PACKLS_CC, |
208 | |
209 | // Unpack the first half of vector operand 0 into double-sized elements. |
210 | // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends. |
211 | UNPACK_HIGH, |
212 | UNPACKL_HIGH, |
213 | |
214 | // Likewise for the second half. |
215 | UNPACK_LOW, |
216 | UNPACKL_LOW, |
217 | |
218 | // Shift/rotate each element of vector operand 0 by the number of bits |
219 | // specified by scalar operand 1. |
220 | VSHL_BY_SCALAR, |
221 | VSRL_BY_SCALAR, |
222 | VSRA_BY_SCALAR, |
223 | VROTL_BY_SCALAR, |
224 | |
225 | // For each element of the output type, sum across all sub-elements of |
226 | // operand 0 belonging to the corresponding element, and add in the |
227 | // rightmost sub-element of the corresponding element of operand 1. |
228 | VSUM, |
229 | |
230 | // Compute carry/borrow indication for add/subtract. |
231 | VACC, VSCBI, |
232 | // Add/subtract with carry/borrow. |
233 | VAC, VSBI, |
234 | // Compute carry/borrow indication for add/subtract with carry/borrow. |
235 | VACCC, VSBCBI, |
236 | |
237 | // Compare integer vector operands 0 and 1 to produce the usual 0/-1 |
238 | // vector result. VICMPE is for equality, VICMPH for "signed greater than" |
239 | // and VICMPHL for "unsigned greater than". |
240 | VICMPE, |
241 | VICMPH, |
242 | VICMPHL, |
243 | |
244 | // Likewise, but also set the condition codes on the result. |
245 | VICMPES, |
246 | VICMPHS, |
247 | VICMPHLS, |
248 | |
249 | // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1 |
250 | // vector result. VFCMPE is for "ordered and equal", VFCMPH for "ordered and |
251 | // greater than" and VFCMPHE for "ordered and greater than or equal to". |
252 | VFCMPE, |
253 | VFCMPH, |
254 | VFCMPHE, |
255 | |
256 | // Likewise, but also set the condition codes on the result. |
257 | VFCMPES, |
258 | VFCMPHS, |
259 | VFCMPHES, |
260 | |
261 | // Test floating-point data class for vectors. |
262 | VFTCI, |
263 | |
264 | // Extend the even f32 elements of vector operand 0 to produce a vector |
265 | // of f64 elements. |
266 | VEXTEND, |
267 | |
268 | // Round the f64 elements of vector operand 0 to f32s and store them in the |
269 | // even elements of the result. |
270 | VROUND, |
271 | |
272 | // AND the two vector operands together and set CC based on the result. |
273 | VTM, |
274 | |
275 | // i128 high integer comparisons. |
276 | SCMP128HI, |
277 | UCMP128HI, |
278 | |
279 | // String operations that set CC as a side-effect. |
280 | VFAE_CC, |
281 | VFAEZ_CC, |
282 | VFEE_CC, |
283 | VFEEZ_CC, |
284 | VFENE_CC, |
285 | VFENEZ_CC, |
286 | VISTR_CC, |
287 | VSTRC_CC, |
288 | VSTRCZ_CC, |
289 | VSTRS_CC, |
290 | VSTRSZ_CC, |
291 | |
292 | // Test Data Class. |
293 | // |
294 | // Operand 0: the value to test |
295 | // Operand 1: the bit mask |
296 | TDC, |
297 | |
298 | // z/OS XPLINK ADA Entry |
299 | // Wraps a TargetGlobalAddress that should be loaded from a function's |
300 | // AssociatedData Area (ADA). Tha ADA is passed to the function by the |
301 | // caller in the XPLink ABI defined register R5. |
302 | // Operand 0: the GlobalValue/External Symbol |
303 | // Operand 1: the ADA register |
304 | // Operand 2: the offset (0 for the first and 8 for the second element in the |
305 | // function descriptor) |
306 | ADA_ENTRY, |
307 | |
308 | // Strict variants of scalar floating-point comparisons. |
309 | // Quiet and signaling versions. |
310 | STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE, |
311 | STRICT_FCMPS, |
312 | |
313 | // Strict variants of vector floating-point comparisons. |
314 | // Quiet and signaling versions. |
315 | STRICT_VFCMPE, |
316 | STRICT_VFCMPH, |
317 | STRICT_VFCMPHE, |
318 | STRICT_VFCMPES, |
319 | STRICT_VFCMPHS, |
320 | STRICT_VFCMPHES, |
321 | |
322 | // Strict variants of VEXTEND and VROUND. |
323 | STRICT_VEXTEND, |
324 | STRICT_VROUND, |
325 | |
326 | // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or |
327 | // ATOMIC_LOAD_<op>. |
328 | // |
329 | // Operand 0: the address of the containing 32-bit-aligned field |
330 | // Operand 1: the second operand of <op>, in the high bits of an i32 |
331 | // for everything except ATOMIC_SWAPW |
332 | // Operand 2: how many bits to rotate the i32 left to bring the first |
333 | // operand into the high bits |
334 | // Operand 3: the negative of operand 2, for rotating the other way |
335 | // Operand 4: the width of the field in bits (8 or 16) |
336 | ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE, |
337 | ATOMIC_LOADW_ADD, |
338 | ATOMIC_LOADW_SUB, |
339 | ATOMIC_LOADW_AND, |
340 | ATOMIC_LOADW_OR, |
341 | ATOMIC_LOADW_XOR, |
342 | ATOMIC_LOADW_NAND, |
343 | ATOMIC_LOADW_MIN, |
344 | ATOMIC_LOADW_MAX, |
345 | ATOMIC_LOADW_UMIN, |
346 | ATOMIC_LOADW_UMAX, |
347 | |
348 | // A wrapper around the inner loop of an ATOMIC_CMP_SWAP. |
349 | // |
350 | // Operand 0: the address of the containing 32-bit-aligned field |
351 | // Operand 1: the compare value, in the low bits of an i32 |
352 | // Operand 2: the swap value, in the low bits of an i32 |
353 | // Operand 3: how many bits to rotate the i32 left to bring the first |
354 | // operand into the high bits |
355 | // Operand 4: the negative of operand 2, for rotating the other way |
356 | // Operand 5: the width of the field in bits (8 or 16) |
357 | ATOMIC_CMP_SWAPW, |
358 | |
359 | // Atomic compare-and-swap returning CC value. |
360 | // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) |
361 | ATOMIC_CMP_SWAP, |
362 | |
363 | // 128-bit atomic load. |
364 | // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr) |
365 | ATOMIC_LOAD_128, |
366 | |
367 | // 128-bit atomic store. |
368 | // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr) |
369 | ATOMIC_STORE_128, |
370 | |
371 | // 128-bit atomic compare-and-swap. |
372 | // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) |
373 | ATOMIC_CMP_SWAP_128, |
374 | |
375 | // Byte swapping load/store. Same operands as regular load/store. |
376 | LRV, STRV, |
377 | |
378 | // Element swapping load/store. Same operands as regular load/store. |
379 | VLER, VSTER, |
380 | |
381 | // Use STORE CLOCK FAST to store current TOD clock value. |
382 | STCKF, |
383 | |
384 | // Prefetch from the second operand using the 4-bit control code in |
385 | // the first operand. The code is 1 for a load prefetch and 2 for |
386 | // a store prefetch. |
387 | PREFETCH |
388 | }; |
389 | |
390 | // Return true if OPCODE is some kind of PC-relative address. |
391 | inline bool isPCREL(unsigned Opcode) { |
392 | return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET; |
393 | } |
394 | } // end namespace SystemZISD |
395 | |
396 | namespace SystemZICMP { |
397 | // Describes whether an integer comparison needs to be signed or unsigned, |
398 | // or whether either type is OK. |
399 | enum { |
400 | Any, |
401 | UnsignedOnly, |
402 | SignedOnly |
403 | }; |
404 | } // end namespace SystemZICMP |
405 | |
406 | class SystemZSubtarget; |
407 | |
408 | class SystemZTargetLowering : public TargetLowering { |
409 | public: |
410 | explicit SystemZTargetLowering(const TargetMachine &TM, |
411 | const SystemZSubtarget &STI); |
412 | |
413 | bool useSoftFloat() const override; |
414 | |
415 | // Override TargetLowering. |
416 | MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { |
417 | return MVT::i32; |
418 | } |
419 | MVT getVectorIdxTy(const DataLayout &DL) const override { |
420 | // Only the lower 12 bits of an element index are used, so we don't |
421 | // want to clobber the upper 32 bits of a GPR unnecessarily. |
422 | return MVT::i32; |
423 | } |
424 | TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) |
425 | const override { |
426 | // Widen subvectors to the full width rather than promoting integer |
427 | // elements. This is better because: |
428 | // |
429 | // (a) it means that we can handle the ABI for passing and returning |
430 | // sub-128 vectors without having to handle them as legal types. |
431 | // |
432 | // (b) we don't have instructions to extend on load and truncate on store, |
433 | // so promoting the integers is less efficient. |
434 | // |
435 | // (c) there are no multiplication instructions for the widest integer |
436 | // type (v2i64). |
437 | if (VT.getScalarSizeInBits() % 8 == 0) |
438 | return TypeWidenVector; |
439 | return TargetLoweringBase::getPreferredVectorAction(VT); |
440 | } |
441 | unsigned |
442 | getNumRegisters(LLVMContext &Context, EVT VT, |
443 | std::optional<MVT> RegisterVT) const override { |
444 | // i128 inline assembly operand. |
445 | if (VT == MVT::i128 && RegisterVT && *RegisterVT == MVT::Untyped) |
446 | return 1; |
447 | return TargetLowering::getNumRegisters(Context, VT); |
448 | } |
449 | MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, |
450 | EVT VT) const override { |
451 | // 128-bit single-element vector types are passed like other vectors, |
452 | // not like their element type. |
453 | if (VT.isVector() && VT.getSizeInBits() == 128 && |
454 | VT.getVectorNumElements() == 1) |
455 | return MVT::v16i8; |
456 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
457 | } |
458 | bool isCheapToSpeculateCtlz(Type *) const override { return true; } |
459 | bool isCheapToSpeculateCttz(Type *) const override { return true; } |
460 | bool preferZeroCompareBranch() const override { return true; } |
461 | bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override { |
462 | ConstantInt* Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1)); |
463 | return Mask && Mask->getValue().isIntN(N: 16); |
464 | } |
465 | bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { |
466 | return VT.isScalarInteger(); |
467 | } |
468 | EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, |
469 | EVT) const override; |
470 | bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
471 | EVT VT) const override; |
472 | bool isFPImmLegal(const APFloat &Imm, EVT VT, |
473 | bool ForCodeSize) const override; |
474 | bool ShouldShrinkFPConstant(EVT VT) const override { |
475 | // Do not shrink 64-bit FP constpool entries since LDEB is slower than |
476 | // LD, and having the full constant in memory enables reg/mem opcodes. |
477 | return VT != MVT::f64; |
478 | } |
479 | bool hasInlineStackProbe(const MachineFunction &MF) const override; |
480 | AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override; |
481 | AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override; |
482 | AtomicExpansionKind |
483 | shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override; |
484 | bool isLegalICmpImmediate(int64_t Imm) const override; |
485 | bool isLegalAddImmediate(int64_t Imm) const override; |
486 | bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, |
487 | unsigned AS, |
488 | Instruction *I = nullptr) const override; |
489 | bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, |
490 | MachineMemOperand::Flags Flags, |
491 | unsigned *Fast) const override; |
492 | bool |
493 | findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, |
494 | const MemOp &Op, unsigned DstAS, unsigned SrcAS, |
495 | const AttributeList &FuncAttributes) const override; |
496 | EVT getOptimalMemOpType(const MemOp &Op, |
497 | const AttributeList &FuncAttributes) const override; |
498 | bool isTruncateFree(Type *, Type *) const override; |
499 | bool isTruncateFree(EVT, EVT) const override; |
500 | |
501 | bool shouldFormOverflowOp(unsigned Opcode, EVT VT, |
502 | bool MathUsed) const override { |
503 | // Form add and sub with overflow intrinsics regardless of any extra |
504 | // users of the math result. |
505 | return VT == MVT::i32 || VT == MVT::i64; |
506 | } |
507 | |
508 | bool shouldConsiderGEPOffsetSplit() const override { return true; } |
509 | |
510 | bool shouldExpandCmpUsingSelects() const override { return true; } |
511 | |
512 | const char *getTargetNodeName(unsigned Opcode) const override; |
513 | std::pair<unsigned, const TargetRegisterClass *> |
514 | getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
515 | StringRef Constraint, MVT VT) const override; |
516 | TargetLowering::ConstraintType |
517 | getConstraintType(StringRef Constraint) const override; |
518 | TargetLowering::ConstraintWeight |
519 | getSingleConstraintMatchWeight(AsmOperandInfo &info, |
520 | const char *constraint) const override; |
521 | void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, |
522 | std::vector<SDValue> &Ops, |
523 | SelectionDAG &DAG) const override; |
524 | |
525 | InlineAsm::ConstraintCode |
526 | getInlineAsmMemConstraint(StringRef ConstraintCode) const override { |
527 | if (ConstraintCode.size() == 1) { |
528 | switch(ConstraintCode[0]) { |
529 | default: |
530 | break; |
531 | case 'o': |
532 | return InlineAsm::ConstraintCode::o; |
533 | case 'Q': |
534 | return InlineAsm::ConstraintCode::Q; |
535 | case 'R': |
536 | return InlineAsm::ConstraintCode::R; |
537 | case 'S': |
538 | return InlineAsm::ConstraintCode::S; |
539 | case 'T': |
540 | return InlineAsm::ConstraintCode::T; |
541 | } |
542 | } else if (ConstraintCode.size() == 2 && ConstraintCode[0] == 'Z') { |
543 | switch (ConstraintCode[1]) { |
544 | default: |
545 | break; |
546 | case 'Q': |
547 | return InlineAsm::ConstraintCode::ZQ; |
548 | case 'R': |
549 | return InlineAsm::ConstraintCode::ZR; |
550 | case 'S': |
551 | return InlineAsm::ConstraintCode::ZS; |
552 | case 'T': |
553 | return InlineAsm::ConstraintCode::ZT; |
554 | } |
555 | } |
556 | return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); |
557 | } |
558 | |
559 | Register getRegisterByName(const char *RegName, LLT VT, |
560 | const MachineFunction &MF) const override; |
561 | |
562 | /// If a physical register, this returns the register that receives the |
563 | /// exception address on entry to an EH pad. |
564 | Register |
565 | getExceptionPointerRegister(const Constant *PersonalityFn) const override; |
566 | |
567 | /// If a physical register, this returns the register that receives the |
568 | /// exception typeid on entry to a landing pad. |
569 | Register |
570 | getExceptionSelectorRegister(const Constant *PersonalityFn) const override; |
571 | |
572 | /// Override to support customized stack guard loading. |
573 | bool useLoadStackGuardNode() const override { |
574 | return true; |
575 | } |
576 | void insertSSPDeclarations(Module &M) const override { |
577 | } |
578 | |
579 | MachineBasicBlock * |
580 | EmitInstrWithCustomInserter(MachineInstr &MI, |
581 | MachineBasicBlock *BB) const override; |
582 | SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; |
583 | void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, |
584 | SelectionDAG &DAG) const override; |
585 | void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, |
586 | SelectionDAG &DAG) const override; |
587 | const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; |
588 | bool allowTruncateForTailCall(Type *, Type *) const override; |
589 | bool mayBeEmittedAsTailCall(const CallInst *CI) const override; |
590 | bool splitValueIntoRegisterParts( |
591 | SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
592 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) |
593 | const override; |
594 | SDValue joinRegisterPartsIntoValue( |
595 | SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts, |
596 | unsigned NumParts, MVT PartVT, EVT ValueVT, |
597 | std::optional<CallingConv::ID> CC) const override; |
598 | SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, |
599 | bool isVarArg, |
600 | const SmallVectorImpl<ISD::InputArg> &Ins, |
601 | const SDLoc &DL, SelectionDAG &DAG, |
602 | SmallVectorImpl<SDValue> &InVals) const override; |
603 | SDValue LowerCall(CallLoweringInfo &CLI, |
604 | SmallVectorImpl<SDValue> &InVals) const override; |
605 | |
606 | std::pair<SDValue, SDValue> |
607 | makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, |
608 | EVT RetVT, ArrayRef<SDValue> Ops, CallingConv::ID CallConv, |
609 | bool IsSigned, SDLoc DL, bool DoesNotReturn, |
610 | bool IsReturnValueUsed) const; |
611 | |
612 | bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, |
613 | bool isVarArg, |
614 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
615 | LLVMContext &Context) const override; |
616 | SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, |
617 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
618 | const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, |
619 | SelectionDAG &DAG) const override; |
620 | SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; |
621 | |
622 | /// Determine which of the bits specified in Mask are known to be either |
623 | /// zero or one and return them in the KnownZero/KnownOne bitsets. |
624 | void computeKnownBitsForTargetNode(const SDValue Op, |
625 | KnownBits &Known, |
626 | const APInt &DemandedElts, |
627 | const SelectionDAG &DAG, |
628 | unsigned Depth = 0) const override; |
629 | |
630 | /// Determine the number of bits in the operation that are sign bits. |
631 | unsigned ComputeNumSignBitsForTargetNode(SDValue Op, |
632 | const APInt &DemandedElts, |
633 | const SelectionDAG &DAG, |
634 | unsigned Depth) const override; |
635 | |
636 | bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( |
637 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, |
638 | bool PoisonOnly, unsigned Depth) const override; |
639 | |
640 | ISD::NodeType getExtendForAtomicOps() const override { |
641 | return ISD::ANY_EXTEND; |
642 | } |
643 | ISD::NodeType getExtendForAtomicCmpSwapArg() const override { |
644 | return ISD::ZERO_EXTEND; |
645 | } |
646 | |
647 | bool supportSwiftError() const override { |
648 | return true; |
649 | } |
650 | |
651 | unsigned getStackProbeSize(const MachineFunction &MF) const; |
652 | |
653 | private: |
654 | const SystemZSubtarget &Subtarget; |
655 | |
656 | // Implement LowerOperation for individual opcodes. |
657 | SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, |
658 | const SDLoc &DL, EVT VT, |
659 | SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const; |
660 | SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, |
661 | EVT VT, ISD::CondCode CC, |
662 | SDValue CmpOp0, SDValue CmpOp1, |
663 | SDValue Chain = SDValue(), |
664 | bool IsSignaling = false) const; |
665 | SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
666 | SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG, |
667 | bool IsSignaling) const; |
668 | SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const; |
669 | SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; |
670 | SDValue lowerGlobalAddress(GlobalAddressSDNode *Node, |
671 | SelectionDAG &DAG) const; |
672 | SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node, |
673 | SelectionDAG &DAG, unsigned Opcode, |
674 | SDValue GOTOffset) const; |
675 | SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const; |
676 | SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node, |
677 | SelectionDAG &DAG) const; |
678 | SDValue lowerBlockAddress(BlockAddressSDNode *Node, |
679 | SelectionDAG &DAG) const; |
680 | SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const; |
681 | SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; |
682 | SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; |
683 | SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; |
684 | SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
685 | SDValue lowerVASTART_ELF(SDValue Op, SelectionDAG &DAG) const; |
686 | SDValue lowerVASTART_XPLINK(SDValue Op, SelectionDAG &DAG) const; |
687 | SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const; |
688 | SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; |
689 | SDValue lowerDYNAMIC_STACKALLOC_ELF(SDValue Op, SelectionDAG &DAG) const; |
690 | SDValue lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op, SelectionDAG &DAG) const; |
691 | SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const; |
692 | SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; |
693 | SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; |
694 | SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; |
695 | SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; |
696 | SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const; |
697 | SDValue lowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const; |
698 | SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const; |
699 | SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const; |
700 | SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const; |
701 | SDValue lowerVECREDUCE_ADD(SDValue Op, SelectionDAG &DAG) const; |
702 | SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; |
703 | SDValue lowerATOMIC_LDST_I128(SDValue Op, SelectionDAG &DAG) const; |
704 | SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG, |
705 | unsigned Opcode) const; |
706 | SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; |
707 | SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; |
708 | SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; |
709 | SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; |
710 | SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; |
711 | SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
712 | SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
713 | bool isVectorElementLoad(SDValue Op) const; |
714 | SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
715 | SmallVectorImpl<SDValue> &Elems) const; |
716 | SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
717 | SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; |
718 | SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
719 | SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
720 | SDValue (SDValue Op, SelectionDAG &DAG) const; |
721 | SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; |
722 | SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; |
723 | SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const; |
724 | SDValue lowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; |
725 | SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; |
726 | SDValue lowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; |
727 | |
728 | bool canTreatAsByteVector(EVT VT) const; |
729 | SDValue (const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp, |
730 | unsigned Index, DAGCombinerInfo &DCI, |
731 | bool Force) const; |
732 | SDValue (const SDLoc &DL, EVT TruncVT, SDValue Op, |
733 | DAGCombinerInfo &DCI) const; |
734 | SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; |
735 | SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; |
736 | SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const; |
737 | SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const; |
738 | bool canLoadStoreByteSwapped(EVT VT) const; |
739 | SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const; |
740 | SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const; |
741 | SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const; |
742 | SDValue (SDNode *N, DAGCombinerInfo &DCI) const; |
743 | SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const; |
744 | SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const; |
745 | SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; |
746 | SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const; |
747 | SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const; |
748 | SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; |
749 | SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; |
750 | SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; |
751 | SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const; |
752 | SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const; |
753 | |
754 | SDValue unwrapAddress(SDValue N) const override; |
755 | |
756 | // If the last instruction before MBBI in MBB was some form of COMPARE, |
757 | // try to replace it with a COMPARE AND BRANCH just before MBBI. |
758 | // CCMask and Target are the BRC-like operands for the branch. |
759 | // Return true if the change was made. |
760 | bool convertPrevCompareToBranch(MachineBasicBlock *MBB, |
761 | MachineBasicBlock::iterator MBBI, |
762 | unsigned CCMask, |
763 | MachineBasicBlock *Target) const; |
764 | |
765 | // Implement EmitInstrWithCustomInserter for individual operation types. |
766 | MachineBasicBlock *emitAdjCallStack(MachineInstr &MI, |
767 | MachineBasicBlock *BB) const; |
768 | MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const; |
769 | MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB, |
770 | unsigned StoreOpcode, unsigned STOCOpcode, |
771 | bool Invert) const; |
772 | MachineBasicBlock *emitICmp128Hi(MachineInstr &MI, MachineBasicBlock *BB, |
773 | bool Unsigned) const; |
774 | MachineBasicBlock *emitPair128(MachineInstr &MI, |
775 | MachineBasicBlock *MBB) const; |
776 | MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB, |
777 | bool ClearEven) const; |
778 | MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI, |
779 | MachineBasicBlock *BB, |
780 | unsigned BinOpcode, |
781 | bool Invert = false) const; |
782 | MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI, |
783 | MachineBasicBlock *MBB, |
784 | unsigned CompareOpcode, |
785 | unsigned KeepOldMask) const; |
786 | MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI, |
787 | MachineBasicBlock *BB) const; |
788 | MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB, |
789 | unsigned Opcode, |
790 | bool IsMemset = false) const; |
791 | MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB, |
792 | unsigned Opcode) const; |
793 | MachineBasicBlock *emitTransactionBegin(MachineInstr &MI, |
794 | MachineBasicBlock *MBB, |
795 | unsigned Opcode, bool NoFloat) const; |
796 | MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI, |
797 | MachineBasicBlock *MBB, |
798 | unsigned Opcode) const; |
799 | MachineBasicBlock *emitProbedAlloca(MachineInstr &MI, |
800 | MachineBasicBlock *MBB) const; |
801 | |
802 | SDValue getBackchainAddress(SDValue SP, SelectionDAG &DAG) const; |
803 | |
804 | MachineMemOperand::Flags |
805 | getTargetMMOFlags(const Instruction &I) const override; |
806 | const TargetRegisterClass *getRepRegClassFor(MVT VT) const override; |
807 | }; |
808 | |
809 | struct SystemZVectorConstantInfo { |
810 | private: |
811 | APInt IntBits; // The 128 bits as an integer. |
812 | APInt SplatBits; // Smallest splat value. |
813 | APInt SplatUndef; // Bits correspoding to undef operands of the BVN. |
814 | unsigned SplatBitSize = 0; |
815 | bool isFP128 = false; |
816 | public: |
817 | unsigned Opcode = 0; |
818 | SmallVector<unsigned, 2> OpVals; |
819 | MVT VecVT; |
820 | SystemZVectorConstantInfo(APInt IntImm); |
821 | SystemZVectorConstantInfo(APFloat FPImm) |
822 | : SystemZVectorConstantInfo(FPImm.bitcastToAPInt()) { |
823 | isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad()); |
824 | } |
825 | SystemZVectorConstantInfo(BuildVectorSDNode *BVN); |
826 | bool isVectorConstantLegal(const SystemZSubtarget &Subtarget); |
827 | }; |
828 | |
829 | } // end namespace llvm |
830 | |
831 | #endif |
832 | |