| 1 | //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This implements routines for translating from LLVM IR into SelectionDAG IR. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "SelectionDAGBuilder.h" |
| 14 | #include "SDNodeDbgValue.h" |
| 15 | #include "llvm/ADT/APFloat.h" |
| 16 | #include "llvm/ADT/APInt.h" |
| 17 | #include "llvm/ADT/BitVector.h" |
| 18 | #include "llvm/ADT/STLExtras.h" |
| 19 | #include "llvm/ADT/SmallPtrSet.h" |
| 20 | #include "llvm/ADT/SmallSet.h" |
| 21 | #include "llvm/ADT/StringRef.h" |
| 22 | #include "llvm/ADT/Twine.h" |
| 23 | #include "llvm/Analysis/AliasAnalysis.h" |
| 24 | #include "llvm/Analysis/BranchProbabilityInfo.h" |
| 25 | #include "llvm/Analysis/ConstantFolding.h" |
| 26 | #include "llvm/Analysis/Loads.h" |
| 27 | #include "llvm/Analysis/MemoryLocation.h" |
| 28 | #include "llvm/Analysis/TargetLibraryInfo.h" |
| 29 | #include "llvm/Analysis/TargetTransformInfo.h" |
| 30 | #include "llvm/Analysis/ValueTracking.h" |
| 31 | #include "llvm/Analysis/VectorUtils.h" |
| 32 | #include "llvm/CodeGen/Analysis.h" |
| 33 | #include "llvm/CodeGen/AssignmentTrackingAnalysis.h" |
| 34 | #include "llvm/CodeGen/CodeGenCommonISel.h" |
| 35 | #include "llvm/CodeGen/FunctionLoweringInfo.h" |
| 36 | #include "llvm/CodeGen/GCMetadata.h" |
| 37 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 38 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 39 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 40 | #include "llvm/CodeGen/MachineFunction.h" |
| 41 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 42 | #include "llvm/CodeGen/MachineInstrBundleIterator.h" |
| 43 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 44 | #include "llvm/CodeGen/MachineModuleInfo.h" |
| 45 | #include "llvm/CodeGen/MachineOperand.h" |
| 46 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 47 | #include "llvm/CodeGen/SelectionDAG.h" |
| 48 | #include "llvm/CodeGen/SelectionDAGTargetInfo.h" |
| 49 | #include "llvm/CodeGen/StackMaps.h" |
| 50 | #include "llvm/CodeGen/SwiftErrorValueTracking.h" |
| 51 | #include "llvm/CodeGen/TargetFrameLowering.h" |
| 52 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 53 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 54 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 55 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 56 | #include "llvm/CodeGen/WinEHFuncInfo.h" |
| 57 | #include "llvm/IR/Argument.h" |
| 58 | #include "llvm/IR/Attributes.h" |
| 59 | #include "llvm/IR/BasicBlock.h" |
| 60 | #include "llvm/IR/CFG.h" |
| 61 | #include "llvm/IR/CallingConv.h" |
| 62 | #include "llvm/IR/Constant.h" |
| 63 | #include "llvm/IR/ConstantRange.h" |
| 64 | #include "llvm/IR/Constants.h" |
| 65 | #include "llvm/IR/DataLayout.h" |
| 66 | #include "llvm/IR/DebugInfo.h" |
| 67 | #include "llvm/IR/DebugInfoMetadata.h" |
| 68 | #include "llvm/IR/DerivedTypes.h" |
| 69 | #include "llvm/IR/DiagnosticInfo.h" |
| 70 | #include "llvm/IR/EHPersonalities.h" |
| 71 | #include "llvm/IR/Function.h" |
| 72 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
| 73 | #include "llvm/IR/InlineAsm.h" |
| 74 | #include "llvm/IR/InstrTypes.h" |
| 75 | #include "llvm/IR/Instructions.h" |
| 76 | #include "llvm/IR/IntrinsicInst.h" |
| 77 | #include "llvm/IR/Intrinsics.h" |
| 78 | #include "llvm/IR/IntrinsicsAArch64.h" |
| 79 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
| 80 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
| 81 | #include "llvm/IR/LLVMContext.h" |
| 82 | #include "llvm/IR/MemoryModelRelaxationAnnotations.h" |
| 83 | #include "llvm/IR/Metadata.h" |
| 84 | #include "llvm/IR/Module.h" |
| 85 | #include "llvm/IR/Operator.h" |
| 86 | #include "llvm/IR/PatternMatch.h" |
| 87 | #include "llvm/IR/Statepoint.h" |
| 88 | #include "llvm/IR/Type.h" |
| 89 | #include "llvm/IR/User.h" |
| 90 | #include "llvm/IR/Value.h" |
| 91 | #include "llvm/MC/MCContext.h" |
| 92 | #include "llvm/Support/AtomicOrdering.h" |
| 93 | #include "llvm/Support/Casting.h" |
| 94 | #include "llvm/Support/CommandLine.h" |
| 95 | #include "llvm/Support/Compiler.h" |
| 96 | #include "llvm/Support/Debug.h" |
| 97 | #include "llvm/Support/InstructionCost.h" |
| 98 | #include "llvm/Support/MathExtras.h" |
| 99 | #include "llvm/Support/raw_ostream.h" |
| 100 | #include "llvm/Target/TargetMachine.h" |
| 101 | #include "llvm/Target/TargetOptions.h" |
| 102 | #include "llvm/TargetParser/Triple.h" |
| 103 | #include "llvm/Transforms/Utils/Local.h" |
| 104 | #include <cstddef> |
| 105 | #include <limits> |
| 106 | #include <optional> |
| 107 | #include <tuple> |
| 108 | |
| 109 | using namespace llvm; |
| 110 | using namespace PatternMatch; |
| 111 | using namespace SwitchCG; |
| 112 | |
| 113 | #define DEBUG_TYPE "isel" |
| 114 | |
| 115 | /// LimitFloatPrecision - Generate low-precision inline sequences for |
| 116 | /// some float libcalls (6, 8 or 12 bits). |
| 117 | static unsigned LimitFloatPrecision; |
| 118 | |
| 119 | static cl::opt<bool> |
| 120 | InsertAssertAlign("insert-assert-align" , cl::init(Val: true), |
| 121 | cl::desc("Insert the experimental `assertalign` node." ), |
| 122 | cl::ReallyHidden); |
| 123 | |
| 124 | static cl::opt<unsigned, true> |
| 125 | LimitFPPrecision("limit-float-precision" , |
| 126 | cl::desc("Generate low-precision inline sequences " |
| 127 | "for some float libcalls" ), |
| 128 | cl::location(L&: LimitFloatPrecision), cl::Hidden, |
| 129 | cl::init(Val: 0)); |
| 130 | |
| 131 | static cl::opt<unsigned> SwitchPeelThreshold( |
| 132 | "switch-peel-threshold" , cl::Hidden, cl::init(Val: 66), |
| 133 | cl::desc("Set the case probability threshold for peeling the case from a " |
| 134 | "switch statement. A value greater than 100 will void this " |
| 135 | "optimization" )); |
| 136 | |
| 137 | // Limit the width of DAG chains. This is important in general to prevent |
| 138 | // DAG-based analysis from blowing up. For example, alias analysis and |
| 139 | // load clustering may not complete in reasonable time. It is difficult to |
| 140 | // recognize and avoid this situation within each individual analysis, and |
| 141 | // future analyses are likely to have the same behavior. Limiting DAG width is |
| 142 | // the safe approach and will be especially important with global DAGs. |
| 143 | // |
| 144 | // MaxParallelChains default is arbitrarily high to avoid affecting |
| 145 | // optimization, but could be lowered to improve compile time. Any ld-ld-st-st |
| 146 | // sequence over this should have been converted to llvm.memcpy by the |
| 147 | // frontend. It is easy to induce this behavior with .ll code such as: |
| 148 | // %buffer = alloca [4096 x i8] |
| 149 | // %data = load [4096 x i8]* %argPtr |
| 150 | // store [4096 x i8] %data, [4096 x i8]* %buffer |
| 151 | static const unsigned MaxParallelChains = 64; |
| 152 | |
| 153 | static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, |
| 154 | const SDValue *Parts, unsigned NumParts, |
| 155 | MVT PartVT, EVT ValueVT, const Value *V, |
| 156 | SDValue InChain, |
| 157 | std::optional<CallingConv::ID> CC); |
| 158 | |
| 159 | /// getCopyFromParts - Create a value that contains the specified legal parts |
| 160 | /// combined into the value they represent. If the parts combine to a type |
| 161 | /// larger than ValueVT then AssertOp can be used to specify whether the extra |
| 162 | /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT |
| 163 | /// (ISD::AssertSext). |
| 164 | static SDValue |
| 165 | getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, |
| 166 | unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, |
| 167 | SDValue InChain, |
| 168 | std::optional<CallingConv::ID> CC = std::nullopt, |
| 169 | std::optional<ISD::NodeType> AssertOp = std::nullopt) { |
| 170 | // Let the target assemble the parts if it wants to |
| 171 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 172 | if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts, |
| 173 | PartVT, ValueVT, CC)) |
| 174 | return Val; |
| 175 | |
| 176 | if (ValueVT.isVector()) |
| 177 | return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V, |
| 178 | InChain, CC); |
| 179 | |
| 180 | assert(NumParts > 0 && "No parts to assemble!" ); |
| 181 | SDValue Val = Parts[0]; |
| 182 | |
| 183 | if (NumParts > 1) { |
| 184 | // Assemble the value from multiple parts. |
| 185 | if (ValueVT.isInteger()) { |
| 186 | unsigned PartBits = PartVT.getSizeInBits(); |
| 187 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 188 | |
| 189 | // Assemble the power of 2 part. |
| 190 | unsigned RoundParts = llvm::bit_floor(Value: NumParts); |
| 191 | unsigned RoundBits = PartBits * RoundParts; |
| 192 | EVT RoundVT = RoundBits == ValueBits ? |
| 193 | ValueVT : EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: RoundBits); |
| 194 | SDValue Lo, Hi; |
| 195 | |
| 196 | EVT HalfVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: RoundBits/2); |
| 197 | |
| 198 | if (RoundParts > 2) { |
| 199 | Lo = getCopyFromParts(DAG, DL, Parts, NumParts: RoundParts / 2, PartVT, ValueVT: HalfVT, V, |
| 200 | InChain); |
| 201 | Hi = getCopyFromParts(DAG, DL, Parts: Parts + RoundParts / 2, NumParts: RoundParts / 2, |
| 202 | PartVT, ValueVT: HalfVT, V, InChain); |
| 203 | } else { |
| 204 | Lo = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: HalfVT, Operand: Parts[0]); |
| 205 | Hi = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: HalfVT, Operand: Parts[1]); |
| 206 | } |
| 207 | |
| 208 | if (DAG.getDataLayout().isBigEndian()) |
| 209 | std::swap(a&: Lo, b&: Hi); |
| 210 | |
| 211 | Val = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: RoundVT, N1: Lo, N2: Hi); |
| 212 | |
| 213 | if (RoundParts < NumParts) { |
| 214 | // Assemble the trailing non-power-of-2 part. |
| 215 | unsigned OddParts = NumParts - RoundParts; |
| 216 | EVT OddVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: OddParts * PartBits); |
| 217 | Hi = getCopyFromParts(DAG, DL, Parts: Parts + RoundParts, NumParts: OddParts, PartVT, |
| 218 | ValueVT: OddVT, V, InChain, CC); |
| 219 | |
| 220 | // Combine the round and odd parts. |
| 221 | Lo = Val; |
| 222 | if (DAG.getDataLayout().isBigEndian()) |
| 223 | std::swap(a&: Lo, b&: Hi); |
| 224 | EVT TotalVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: NumParts * PartBits); |
| 225 | Hi = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: TotalVT, Operand: Hi); |
| 226 | Hi = DAG.getNode(Opcode: ISD::SHL, DL, VT: TotalVT, N1: Hi, |
| 227 | N2: DAG.getConstant(Val: Lo.getValueSizeInBits(), DL, |
| 228 | VT: TLI.getShiftAmountTy( |
| 229 | LHSTy: TotalVT, DL: DAG.getDataLayout()))); |
| 230 | Lo = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: TotalVT, Operand: Lo); |
| 231 | Val = DAG.getNode(Opcode: ISD::OR, DL, VT: TotalVT, N1: Lo, N2: Hi); |
| 232 | } |
| 233 | } else if (PartVT.isFloatingPoint()) { |
| 234 | // FP split into multiple FP parts (for ppcf128) |
| 235 | assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && |
| 236 | "Unexpected split" ); |
| 237 | SDValue Lo, Hi; |
| 238 | Lo = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: EVT(MVT::f64), Operand: Parts[0]); |
| 239 | Hi = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: EVT(MVT::f64), Operand: Parts[1]); |
| 240 | if (TLI.hasBigEndianPartOrdering(VT: ValueVT, DL: DAG.getDataLayout())) |
| 241 | std::swap(a&: Lo, b&: Hi); |
| 242 | Val = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: ValueVT, N1: Lo, N2: Hi); |
| 243 | } else { |
| 244 | // FP split into integer parts (soft fp) |
| 245 | assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && |
| 246 | !PartVT.isVector() && "Unexpected split" ); |
| 247 | EVT IntVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ValueVT.getSizeInBits()); |
| 248 | Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, ValueVT: IntVT, V, |
| 249 | InChain, CC); |
| 250 | } |
| 251 | } |
| 252 | |
| 253 | // There is now one part, held in Val. Correct it to match ValueVT. |
| 254 | // PartEVT is the type of the register class that holds the value. |
| 255 | // ValueVT is the type of the inline asm operation. |
| 256 | EVT PartEVT = Val.getValueType(); |
| 257 | |
| 258 | if (PartEVT == ValueVT) |
| 259 | return Val; |
| 260 | |
| 261 | if (PartEVT.isInteger() && ValueVT.isFloatingPoint() && |
| 262 | ValueVT.bitsLT(VT: PartEVT)) { |
| 263 | // For an FP value in an integer part, we need to truncate to the right |
| 264 | // width first. |
| 265 | PartEVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ValueVT.getSizeInBits()); |
| 266 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: PartEVT, Operand: Val); |
| 267 | } |
| 268 | |
| 269 | // Handle types that have the same size. |
| 270 | if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits()) |
| 271 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 272 | |
| 273 | // Handle types with different sizes. |
| 274 | if (PartEVT.isInteger() && ValueVT.isInteger()) { |
| 275 | if (ValueVT.bitsLT(VT: PartEVT)) { |
| 276 | // For a truncate, see if we have any information to |
| 277 | // indicate whether the truncated bits will always be |
| 278 | // zero or sign-extension. |
| 279 | if (AssertOp) |
| 280 | Val = DAG.getNode(Opcode: *AssertOp, DL, VT: PartEVT, N1: Val, |
| 281 | N2: DAG.getValueType(ValueVT)); |
| 282 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ValueVT, Operand: Val); |
| 283 | } |
| 284 | return DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ValueVT, Operand: Val); |
| 285 | } |
| 286 | |
| 287 | if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { |
| 288 | // FP_ROUND's are always exact here. |
| 289 | if (ValueVT.bitsLT(VT: Val.getValueType())) { |
| 290 | |
| 291 | SDValue NoChange = |
| 292 | DAG.getTargetConstant(Val: 1, DL, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 293 | |
| 294 | if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr( |
| 295 | Kind: llvm::Attribute::StrictFP)) { |
| 296 | return DAG.getNode(Opcode: ISD::STRICT_FP_ROUND, DL, |
| 297 | VTList: DAG.getVTList(VT1: ValueVT, VT2: MVT::Other), N1: InChain, N2: Val, |
| 298 | N3: NoChange); |
| 299 | } |
| 300 | |
| 301 | return DAG.getNode(Opcode: ISD::FP_ROUND, DL, VT: ValueVT, N1: Val, N2: NoChange); |
| 302 | } |
| 303 | |
| 304 | return DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: ValueVT, Operand: Val); |
| 305 | } |
| 306 | |
| 307 | // Handle MMX to a narrower integer type by bitcasting MMX to integer and |
| 308 | // then truncating. |
| 309 | if (PartEVT == MVT::x86mmx && ValueVT.isInteger() && |
| 310 | ValueVT.bitsLT(VT: PartEVT)) { |
| 311 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i64, Operand: Val); |
| 312 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ValueVT, Operand: Val); |
| 313 | } |
| 314 | |
| 315 | report_fatal_error(reason: "Unknown mismatch in getCopyFromParts!" ); |
| 316 | } |
| 317 | |
| 318 | static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, |
| 319 | const Twine &ErrMsg) { |
| 320 | const Instruction *I = dyn_cast_or_null<Instruction>(Val: V); |
| 321 | if (!I) |
| 322 | return Ctx.emitError(ErrorStr: ErrMsg); |
| 323 | |
| 324 | if (const CallInst *CI = dyn_cast<CallInst>(Val: I)) |
| 325 | if (CI->isInlineAsm()) { |
| 326 | return Ctx.diagnose(DI: DiagnosticInfoInlineAsm( |
| 327 | *CI, ErrMsg + ", possible invalid constraint for vector type" )); |
| 328 | } |
| 329 | |
| 330 | return Ctx.emitError(I, ErrorStr: ErrMsg); |
| 331 | } |
| 332 | |
| 333 | /// getCopyFromPartsVector - Create a value that contains the specified legal |
| 334 | /// parts combined into the value they represent. If the parts combine to a |
| 335 | /// type larger than ValueVT then AssertOp can be used to specify whether the |
| 336 | /// extra bits are known to be zero (ISD::AssertZext) or sign extended from |
| 337 | /// ValueVT (ISD::AssertSext). |
| 338 | static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, |
| 339 | const SDValue *Parts, unsigned NumParts, |
| 340 | MVT PartVT, EVT ValueVT, const Value *V, |
| 341 | SDValue InChain, |
| 342 | std::optional<CallingConv::ID> CallConv) { |
| 343 | assert(ValueVT.isVector() && "Not a vector value" ); |
| 344 | assert(NumParts > 0 && "No parts to assemble!" ); |
| 345 | const bool IsABIRegCopy = CallConv.has_value(); |
| 346 | |
| 347 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 348 | SDValue Val = Parts[0]; |
| 349 | |
| 350 | // Handle a multi-element vector. |
| 351 | if (NumParts > 1) { |
| 352 | EVT IntermediateVT; |
| 353 | MVT RegisterVT; |
| 354 | unsigned NumIntermediates; |
| 355 | unsigned NumRegs; |
| 356 | |
| 357 | if (IsABIRegCopy) { |
| 358 | NumRegs = TLI.getVectorTypeBreakdownForCallingConv( |
| 359 | Context&: *DAG.getContext(), CC: *CallConv, VT: ValueVT, IntermediateVT, |
| 360 | NumIntermediates, RegisterVT); |
| 361 | } else { |
| 362 | NumRegs = |
| 363 | TLI.getVectorTypeBreakdown(Context&: *DAG.getContext(), VT: ValueVT, IntermediateVT, |
| 364 | NumIntermediates, RegisterVT); |
| 365 | } |
| 366 | |
| 367 | assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!" ); |
| 368 | NumParts = NumRegs; // Silence a compiler warning. |
| 369 | assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!" ); |
| 370 | assert(RegisterVT.getSizeInBits() == |
| 371 | Parts[0].getSimpleValueType().getSizeInBits() && |
| 372 | "Part type sizes don't match!" ); |
| 373 | |
| 374 | // Assemble the parts into intermediate operands. |
| 375 | SmallVector<SDValue, 8> Ops(NumIntermediates); |
| 376 | if (NumIntermediates == NumParts) { |
| 377 | // If the register was not expanded, truncate or copy the value, |
| 378 | // as appropriate. |
| 379 | for (unsigned i = 0; i != NumParts; ++i) |
| 380 | Ops[i] = getCopyFromParts(DAG, DL, Parts: &Parts[i], NumParts: 1, PartVT, ValueVT: IntermediateVT, |
| 381 | V, InChain, CC: CallConv); |
| 382 | } else if (NumParts > 0) { |
| 383 | // If the intermediate type was expanded, build the intermediate |
| 384 | // operands from the parts. |
| 385 | assert(NumParts % NumIntermediates == 0 && |
| 386 | "Must expand into a divisible number of parts!" ); |
| 387 | unsigned Factor = NumParts / NumIntermediates; |
| 388 | for (unsigned i = 0; i != NumIntermediates; ++i) |
| 389 | Ops[i] = getCopyFromParts(DAG, DL, Parts: &Parts[i * Factor], NumParts: Factor, PartVT, |
| 390 | ValueVT: IntermediateVT, V, InChain, CC: CallConv); |
| 391 | } |
| 392 | |
| 393 | // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the |
| 394 | // intermediate operands. |
| 395 | EVT BuiltVectorTy = |
| 396 | IntermediateVT.isVector() |
| 397 | ? EVT::getVectorVT( |
| 398 | Context&: *DAG.getContext(), VT: IntermediateVT.getScalarType(), |
| 399 | EC: IntermediateVT.getVectorElementCount() * NumParts) |
| 400 | : EVT::getVectorVT(Context&: *DAG.getContext(), |
| 401 | VT: IntermediateVT.getScalarType(), |
| 402 | NumElements: NumIntermediates); |
| 403 | Val = DAG.getNode(Opcode: IntermediateVT.isVector() ? ISD::CONCAT_VECTORS |
| 404 | : ISD::BUILD_VECTOR, |
| 405 | DL, VT: BuiltVectorTy, Ops); |
| 406 | } |
| 407 | |
| 408 | // There is now one part, held in Val. Correct it to match ValueVT. |
| 409 | EVT PartEVT = Val.getValueType(); |
| 410 | |
| 411 | if (PartEVT == ValueVT) |
| 412 | return Val; |
| 413 | |
| 414 | if (PartEVT.isVector()) { |
| 415 | // Vector/Vector bitcast. |
| 416 | if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) |
| 417 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 418 | |
| 419 | // If the parts vector has more elements than the value vector, then we |
| 420 | // have a vector widening case (e.g. <2 x float> -> <4 x float>). |
| 421 | // Extract the elements we want. |
| 422 | if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) { |
| 423 | assert((PartEVT.getVectorElementCount().getKnownMinValue() > |
| 424 | ValueVT.getVectorElementCount().getKnownMinValue()) && |
| 425 | (PartEVT.getVectorElementCount().isScalable() == |
| 426 | ValueVT.getVectorElementCount().isScalable()) && |
| 427 | "Cannot narrow, it would be a lossy transformation" ); |
| 428 | PartEVT = |
| 429 | EVT::getVectorVT(Context&: *DAG.getContext(), VT: PartEVT.getVectorElementType(), |
| 430 | EC: ValueVT.getVectorElementCount()); |
| 431 | Val = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: PartEVT, N1: Val, |
| 432 | N2: DAG.getVectorIdxConstant(Val: 0, DL)); |
| 433 | if (PartEVT == ValueVT) |
| 434 | return Val; |
| 435 | if (PartEVT.isInteger() && ValueVT.isFloatingPoint()) |
| 436 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 437 | |
| 438 | // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>). |
| 439 | if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) |
| 440 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 441 | } |
| 442 | |
| 443 | // Promoted vector extract |
| 444 | return DAG.getAnyExtOrTrunc(Op: Val, DL, VT: ValueVT); |
| 445 | } |
| 446 | |
| 447 | // Trivial bitcast if the types are the same size and the destination |
| 448 | // vector type is legal. |
| 449 | if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() && |
| 450 | TLI.isTypeLegal(VT: ValueVT)) |
| 451 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 452 | |
| 453 | if (ValueVT.getVectorNumElements() != 1) { |
| 454 | // Certain ABIs require that vectors are passed as integers. For vectors |
| 455 | // are the same size, this is an obvious bitcast. |
| 456 | if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) { |
| 457 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 458 | } else if (ValueVT.bitsLT(VT: PartEVT)) { |
| 459 | const uint64_t ValueSize = ValueVT.getFixedSizeInBits(); |
| 460 | EVT IntermediateType = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ValueSize); |
| 461 | // Drop the extra bits. |
| 462 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: IntermediateType, Operand: Val); |
| 463 | return DAG.getBitcast(VT: ValueVT, V: Val); |
| 464 | } |
| 465 | |
| 466 | diagnosePossiblyInvalidConstraint( |
| 467 | Ctx&: *DAG.getContext(), V, ErrMsg: "non-trivial scalar-to-vector conversion" ); |
| 468 | return DAG.getUNDEF(VT: ValueVT); |
| 469 | } |
| 470 | |
| 471 | // Handle cases such as i8 -> <1 x i1> |
| 472 | EVT ValueSVT = ValueVT.getVectorElementType(); |
| 473 | if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) { |
| 474 | unsigned ValueSize = ValueSVT.getSizeInBits(); |
| 475 | if (ValueSize == PartEVT.getSizeInBits()) { |
| 476 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueSVT, Operand: Val); |
| 477 | } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) { |
| 478 | // It's possible a scalar floating point type gets softened to integer and |
| 479 | // then promoted to a larger integer. If PartEVT is the larger integer |
| 480 | // we need to truncate it and then bitcast to the FP type. |
| 481 | assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types" ); |
| 482 | EVT IntermediateType = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ValueSize); |
| 483 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: IntermediateType, Operand: Val); |
| 484 | Val = DAG.getBitcast(VT: ValueSVT, V: Val); |
| 485 | } else { |
| 486 | Val = ValueVT.isFloatingPoint() |
| 487 | ? DAG.getFPExtendOrRound(Op: Val, DL, VT: ValueSVT) |
| 488 | : DAG.getAnyExtOrTrunc(Op: Val, DL, VT: ValueSVT); |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | return DAG.getBuildVector(VT: ValueVT, DL, Ops: Val); |
| 493 | } |
| 494 | |
| 495 | static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, |
| 496 | SDValue Val, SDValue *Parts, unsigned NumParts, |
| 497 | MVT PartVT, const Value *V, |
| 498 | std::optional<CallingConv::ID> CallConv); |
| 499 | |
| 500 | /// getCopyToParts - Create a series of nodes that contain the specified value |
| 501 | /// split into legal parts. If the parts contain more bits than Val, then, for |
| 502 | /// integers, ExtendKind can be used to specify how to generate the extra bits. |
| 503 | static void |
| 504 | getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
| 505 | unsigned NumParts, MVT PartVT, const Value *V, |
| 506 | std::optional<CallingConv::ID> CallConv = std::nullopt, |
| 507 | ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { |
| 508 | // Let the target split the parts if it wants to |
| 509 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 510 | if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT, |
| 511 | CC: CallConv)) |
| 512 | return; |
| 513 | EVT ValueVT = Val.getValueType(); |
| 514 | |
| 515 | // Handle the vector case separately. |
| 516 | if (ValueVT.isVector()) |
| 517 | return getCopyToPartsVector(DAG, dl: DL, Val, Parts, NumParts, PartVT, V, |
| 518 | CallConv); |
| 519 | |
| 520 | unsigned OrigNumParts = NumParts; |
| 521 | assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && |
| 522 | "Copying to an illegal type!" ); |
| 523 | |
| 524 | if (NumParts == 0) |
| 525 | return; |
| 526 | |
| 527 | assert(!ValueVT.isVector() && "Vector case handled elsewhere" ); |
| 528 | EVT PartEVT = PartVT; |
| 529 | if (PartEVT == ValueVT) { |
| 530 | assert(NumParts == 1 && "No-op copy with multiple parts!" ); |
| 531 | Parts[0] = Val; |
| 532 | return; |
| 533 | } |
| 534 | |
| 535 | unsigned PartBits = PartVT.getSizeInBits(); |
| 536 | if (NumParts * PartBits > ValueVT.getSizeInBits()) { |
| 537 | // If the parts cover more bits than the value has, promote the value. |
| 538 | if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { |
| 539 | assert(NumParts == 1 && "Do not know what to promote to!" ); |
| 540 | Val = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: PartVT, Operand: Val); |
| 541 | } else { |
| 542 | if (ValueVT.isFloatingPoint()) { |
| 543 | // FP values need to be bitcast, then extended if they are being put |
| 544 | // into a larger container. |
| 545 | ValueVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ValueVT.getSizeInBits()); |
| 546 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
| 547 | } |
| 548 | assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && |
| 549 | ValueVT.isInteger() && |
| 550 | "Unknown mismatch!" ); |
| 551 | ValueVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: NumParts * PartBits); |
| 552 | Val = DAG.getNode(Opcode: ExtendKind, DL, VT: ValueVT, Operand: Val); |
| 553 | if (PartVT == MVT::x86mmx) |
| 554 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 555 | } |
| 556 | } else if (PartBits == ValueVT.getSizeInBits()) { |
| 557 | // Different types of the same size. |
| 558 | assert(NumParts == 1 && PartEVT != ValueVT); |
| 559 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 560 | } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { |
| 561 | // If the parts cover less bits than value has, truncate the value. |
| 562 | assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && |
| 563 | ValueVT.isInteger() && |
| 564 | "Unknown mismatch!" ); |
| 565 | ValueVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: NumParts * PartBits); |
| 566 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ValueVT, Operand: Val); |
| 567 | if (PartVT == MVT::x86mmx) |
| 568 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 569 | } |
| 570 | |
| 571 | // The value may have changed - recompute ValueVT. |
| 572 | ValueVT = Val.getValueType(); |
| 573 | assert(NumParts * PartBits == ValueVT.getSizeInBits() && |
| 574 | "Failed to tile the value with PartVT!" ); |
| 575 | |
| 576 | if (NumParts == 1) { |
| 577 | if (PartEVT != ValueVT) { |
| 578 | diagnosePossiblyInvalidConstraint(Ctx&: *DAG.getContext(), V, |
| 579 | ErrMsg: "scalar-to-vector conversion failed" ); |
| 580 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 581 | } |
| 582 | |
| 583 | Parts[0] = Val; |
| 584 | return; |
| 585 | } |
| 586 | |
| 587 | // Expand the value into multiple parts. |
| 588 | if (NumParts & (NumParts - 1)) { |
| 589 | // The number of parts is not a power of 2. Split off and copy the tail. |
| 590 | assert(PartVT.isInteger() && ValueVT.isInteger() && |
| 591 | "Do not know what to expand to!" ); |
| 592 | unsigned RoundParts = llvm::bit_floor(Value: NumParts); |
| 593 | unsigned RoundBits = RoundParts * PartBits; |
| 594 | unsigned OddParts = NumParts - RoundParts; |
| 595 | SDValue OddVal = DAG.getNode(Opcode: ISD::SRL, DL, VT: ValueVT, N1: Val, |
| 596 | N2: DAG.getShiftAmountConstant(Val: RoundBits, VT: ValueVT, DL)); |
| 597 | |
| 598 | getCopyToParts(DAG, DL, Val: OddVal, Parts: Parts + RoundParts, NumParts: OddParts, PartVT, V, |
| 599 | CallConv); |
| 600 | |
| 601 | if (DAG.getDataLayout().isBigEndian()) |
| 602 | // The odd parts were reversed by getCopyToParts - unreverse them. |
| 603 | std::reverse(first: Parts + RoundParts, last: Parts + NumParts); |
| 604 | |
| 605 | NumParts = RoundParts; |
| 606 | ValueVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: NumParts * PartBits); |
| 607 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ValueVT, Operand: Val); |
| 608 | } |
| 609 | |
| 610 | // The number of parts is a power of 2. Repeatedly bisect the value using |
| 611 | // EXTRACT_ELEMENT. |
| 612 | Parts[0] = DAG.getNode(Opcode: ISD::BITCAST, DL, |
| 613 | VT: EVT::getIntegerVT(Context&: *DAG.getContext(), |
| 614 | BitWidth: ValueVT.getSizeInBits()), |
| 615 | Operand: Val); |
| 616 | |
| 617 | for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { |
| 618 | for (unsigned i = 0; i < NumParts; i += StepSize) { |
| 619 | unsigned ThisBits = StepSize * PartBits / 2; |
| 620 | EVT ThisVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ThisBits); |
| 621 | SDValue &Part0 = Parts[i]; |
| 622 | SDValue &Part1 = Parts[i+StepSize/2]; |
| 623 | |
| 624 | Part1 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, |
| 625 | VT: ThisVT, N1: Part0, N2: DAG.getIntPtrConstant(Val: 1, DL)); |
| 626 | Part0 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, |
| 627 | VT: ThisVT, N1: Part0, N2: DAG.getIntPtrConstant(Val: 0, DL)); |
| 628 | |
| 629 | if (ThisBits == PartBits && ThisVT != PartVT) { |
| 630 | Part0 = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Part0); |
| 631 | Part1 = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Part1); |
| 632 | } |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | if (DAG.getDataLayout().isBigEndian()) |
| 637 | std::reverse(first: Parts, last: Parts + OrigNumParts); |
| 638 | } |
| 639 | |
| 640 | static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, |
| 641 | const SDLoc &DL, EVT PartVT) { |
| 642 | if (!PartVT.isVector()) |
| 643 | return SDValue(); |
| 644 | |
| 645 | EVT ValueVT = Val.getValueType(); |
| 646 | EVT PartEVT = PartVT.getVectorElementType(); |
| 647 | EVT ValueEVT = ValueVT.getVectorElementType(); |
| 648 | ElementCount PartNumElts = PartVT.getVectorElementCount(); |
| 649 | ElementCount ValueNumElts = ValueVT.getVectorElementCount(); |
| 650 | |
| 651 | // We only support widening vectors with equivalent element types and |
| 652 | // fixed/scalable properties. If a target needs to widen a fixed-length type |
| 653 | // to a scalable one, it should be possible to use INSERT_SUBVECTOR below. |
| 654 | if (ElementCount::isKnownLE(LHS: PartNumElts, RHS: ValueNumElts) || |
| 655 | PartNumElts.isScalable() != ValueNumElts.isScalable()) |
| 656 | return SDValue(); |
| 657 | |
| 658 | // Have a try for bf16 because some targets share its ABI with fp16. |
| 659 | if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) { |
| 660 | assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && |
| 661 | "Cannot widen to illegal type" ); |
| 662 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, |
| 663 | VT: ValueVT.changeVectorElementType(EltVT: MVT::f16), Operand: Val); |
| 664 | } else if (PartEVT != ValueEVT) { |
| 665 | return SDValue(); |
| 666 | } |
| 667 | |
| 668 | // Widening a scalable vector to another scalable vector is done by inserting |
| 669 | // the vector into a larger undef one. |
| 670 | if (PartNumElts.isScalable()) |
| 671 | return DAG.getNode(Opcode: ISD::INSERT_SUBVECTOR, DL, VT: PartVT, N1: DAG.getUNDEF(VT: PartVT), |
| 672 | N2: Val, N3: DAG.getVectorIdxConstant(Val: 0, DL)); |
| 673 | |
| 674 | // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in |
| 675 | // undef elements. |
| 676 | SmallVector<SDValue, 16> Ops; |
| 677 | DAG.ExtractVectorElements(Op: Val, Args&: Ops); |
| 678 | SDValue EltUndef = DAG.getUNDEF(VT: PartEVT); |
| 679 | Ops.append(NumInputs: (PartNumElts - ValueNumElts).getFixedValue(), Elt: EltUndef); |
| 680 | |
| 681 | // FIXME: Use CONCAT for 2x -> 4x. |
| 682 | return DAG.getBuildVector(VT: PartVT, DL, Ops); |
| 683 | } |
| 684 | |
| 685 | /// getCopyToPartsVector - Create a series of nodes that contain the specified |
| 686 | /// value split into legal parts. |
| 687 | static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, |
| 688 | SDValue Val, SDValue *Parts, unsigned NumParts, |
| 689 | MVT PartVT, const Value *V, |
| 690 | std::optional<CallingConv::ID> CallConv) { |
| 691 | EVT ValueVT = Val.getValueType(); |
| 692 | assert(ValueVT.isVector() && "Not a vector" ); |
| 693 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 694 | const bool IsABIRegCopy = CallConv.has_value(); |
| 695 | |
| 696 | if (NumParts == 1) { |
| 697 | EVT PartEVT = PartVT; |
| 698 | if (PartEVT == ValueVT) { |
| 699 | // Nothing to do. |
| 700 | } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { |
| 701 | // Bitconvert vector->vector case. |
| 702 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
| 703 | } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) { |
| 704 | Val = Widened; |
| 705 | } else if (PartVT.isVector() && |
| 706 | PartEVT.getVectorElementType().bitsGE( |
| 707 | VT: ValueVT.getVectorElementType()) && |
| 708 | PartEVT.getVectorElementCount() == |
| 709 | ValueVT.getVectorElementCount()) { |
| 710 | |
| 711 | // Promoted vector extract |
| 712 | Val = DAG.getAnyExtOrTrunc(Op: Val, DL, VT: PartVT); |
| 713 | } else if (PartEVT.isVector() && |
| 714 | PartEVT.getVectorElementType() != |
| 715 | ValueVT.getVectorElementType() && |
| 716 | TLI.getTypeAction(Context&: *DAG.getContext(), VT: ValueVT) == |
| 717 | TargetLowering::TypeWidenVector) { |
| 718 | // Combination of widening and promotion. |
| 719 | EVT WidenVT = |
| 720 | EVT::getVectorVT(Context&: *DAG.getContext(), VT: ValueVT.getVectorElementType(), |
| 721 | EC: PartVT.getVectorElementCount()); |
| 722 | SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT: WidenVT); |
| 723 | Val = DAG.getAnyExtOrTrunc(Op: Widened, DL, VT: PartVT); |
| 724 | } else { |
| 725 | // Don't extract an integer from a float vector. This can happen if the |
| 726 | // FP type gets softened to integer and then promoted. The promotion |
| 727 | // prevents it from being picked up by the earlier bitcast case. |
| 728 | if (ValueVT.getVectorElementCount().isScalar() && |
| 729 | (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) { |
| 730 | // If we reach this condition and PartVT is FP, this means that |
| 731 | // ValueVT is also FP and both have a different size, otherwise we |
| 732 | // would have bitcasted them. Producing an EXTRACT_VECTOR_ELT here |
| 733 | // would be invalid since that would mean the smaller FP type has to |
| 734 | // be extended to the larger one. |
| 735 | if (PartVT.isFloatingPoint()) { |
| 736 | Val = DAG.getBitcast(VT: ValueVT.getScalarType(), V: Val); |
| 737 | Val = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: PartVT, Operand: Val); |
| 738 | } else |
| 739 | Val = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: PartVT, N1: Val, |
| 740 | N2: DAG.getVectorIdxConstant(Val: 0, DL)); |
| 741 | } else { |
| 742 | uint64_t ValueSize = ValueVT.getFixedSizeInBits(); |
| 743 | assert(PartVT.getFixedSizeInBits() > ValueSize && |
| 744 | "lossy conversion of vector to scalar type" ); |
| 745 | EVT IntermediateType = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ValueSize); |
| 746 | Val = DAG.getBitcast(VT: IntermediateType, V: Val); |
| 747 | Val = DAG.getAnyExtOrTrunc(Op: Val, DL, VT: PartVT); |
| 748 | } |
| 749 | } |
| 750 | |
| 751 | assert(Val.getValueType() == PartVT && "Unexpected vector part value type" ); |
| 752 | Parts[0] = Val; |
| 753 | return; |
| 754 | } |
| 755 | |
| 756 | // Handle a multi-element vector. |
| 757 | EVT IntermediateVT; |
| 758 | MVT RegisterVT; |
| 759 | unsigned NumIntermediates; |
| 760 | unsigned NumRegs; |
| 761 | if (IsABIRegCopy) { |
| 762 | NumRegs = TLI.getVectorTypeBreakdownForCallingConv( |
| 763 | Context&: *DAG.getContext(), CC: *CallConv, VT: ValueVT, IntermediateVT, NumIntermediates, |
| 764 | RegisterVT); |
| 765 | } else { |
| 766 | NumRegs = |
| 767 | TLI.getVectorTypeBreakdown(Context&: *DAG.getContext(), VT: ValueVT, IntermediateVT, |
| 768 | NumIntermediates, RegisterVT); |
| 769 | } |
| 770 | |
| 771 | assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!" ); |
| 772 | NumParts = NumRegs; // Silence a compiler warning. |
| 773 | assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!" ); |
| 774 | |
| 775 | assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() && |
| 776 | "Mixing scalable and fixed vectors when copying in parts" ); |
| 777 | |
| 778 | std::optional<ElementCount> DestEltCnt; |
| 779 | |
| 780 | if (IntermediateVT.isVector()) |
| 781 | DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates; |
| 782 | else |
| 783 | DestEltCnt = ElementCount::getFixed(MinVal: NumIntermediates); |
| 784 | |
| 785 | EVT BuiltVectorTy = EVT::getVectorVT( |
| 786 | Context&: *DAG.getContext(), VT: IntermediateVT.getScalarType(), EC: *DestEltCnt); |
| 787 | |
| 788 | if (ValueVT == BuiltVectorTy) { |
| 789 | // Nothing to do. |
| 790 | } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) { |
| 791 | // Bitconvert vector->vector case. |
| 792 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: BuiltVectorTy, Operand: Val); |
| 793 | } else { |
| 794 | if (BuiltVectorTy.getVectorElementType().bitsGT( |
| 795 | VT: ValueVT.getVectorElementType())) { |
| 796 | // Integer promotion. |
| 797 | ValueVT = EVT::getVectorVT(Context&: *DAG.getContext(), |
| 798 | VT: BuiltVectorTy.getVectorElementType(), |
| 799 | EC: ValueVT.getVectorElementCount()); |
| 800 | Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ValueVT, Operand: Val); |
| 801 | } |
| 802 | |
| 803 | if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT: BuiltVectorTy)) { |
| 804 | Val = Widened; |
| 805 | } |
| 806 | } |
| 807 | |
| 808 | assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type" ); |
| 809 | |
| 810 | // Split the vector into intermediate operands. |
| 811 | SmallVector<SDValue, 8> Ops(NumIntermediates); |
| 812 | for (unsigned i = 0; i != NumIntermediates; ++i) { |
| 813 | if (IntermediateVT.isVector()) { |
| 814 | // This does something sensible for scalable vectors - see the |
| 815 | // definition of EXTRACT_SUBVECTOR for further details. |
| 816 | unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements(); |
| 817 | Ops[i] = |
| 818 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: IntermediateVT, N1: Val, |
| 819 | N2: DAG.getVectorIdxConstant(Val: i * IntermediateNumElts, DL)); |
| 820 | } else { |
| 821 | Ops[i] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: IntermediateVT, N1: Val, |
| 822 | N2: DAG.getVectorIdxConstant(Val: i, DL)); |
| 823 | } |
| 824 | } |
| 825 | |
| 826 | // Split the intermediate operands into legal parts. |
| 827 | if (NumParts == NumIntermediates) { |
| 828 | // If the register was not expanded, promote or copy the value, |
| 829 | // as appropriate. |
| 830 | for (unsigned i = 0; i != NumParts; ++i) |
| 831 | getCopyToParts(DAG, DL, Val: Ops[i], Parts: &Parts[i], NumParts: 1, PartVT, V, CallConv); |
| 832 | } else if (NumParts > 0) { |
| 833 | // If the intermediate type was expanded, split each the value into |
| 834 | // legal parts. |
| 835 | assert(NumIntermediates != 0 && "division by zero" ); |
| 836 | assert(NumParts % NumIntermediates == 0 && |
| 837 | "Must expand into a divisible number of parts!" ); |
| 838 | unsigned Factor = NumParts / NumIntermediates; |
| 839 | for (unsigned i = 0; i != NumIntermediates; ++i) |
| 840 | getCopyToParts(DAG, DL, Val: Ops[i], Parts: &Parts[i * Factor], NumParts: Factor, PartVT, V, |
| 841 | CallConv); |
| 842 | } |
| 843 | } |
| 844 | |
| 845 | RegsForValue::RegsForValue(const SmallVector<Register, 4> ®s, MVT regvt, |
| 846 | EVT valuevt, std::optional<CallingConv::ID> CC) |
| 847 | : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs), |
| 848 | RegCount(1, regs.size()), CallConv(CC) {} |
| 849 | |
| 850 | RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, |
| 851 | const DataLayout &DL, Register Reg, Type *Ty, |
| 852 | std::optional<CallingConv::ID> CC) { |
| 853 | ComputeValueVTs(TLI, DL, Ty, ValueVTs); |
| 854 | |
| 855 | CallConv = CC; |
| 856 | |
| 857 | for (EVT ValueVT : ValueVTs) { |
| 858 | unsigned NumRegs = |
| 859 | isABIMangled() |
| 860 | ? TLI.getNumRegistersForCallingConv(Context, CC: *CC, VT: ValueVT) |
| 861 | : TLI.getNumRegisters(Context, VT: ValueVT); |
| 862 | MVT RegisterVT = |
| 863 | isABIMangled() |
| 864 | ? TLI.getRegisterTypeForCallingConv(Context, CC: *CC, VT: ValueVT) |
| 865 | : TLI.getRegisterType(Context, VT: ValueVT); |
| 866 | for (unsigned i = 0; i != NumRegs; ++i) |
| 867 | Regs.push_back(Elt: Reg + i); |
| 868 | RegVTs.push_back(Elt: RegisterVT); |
| 869 | RegCount.push_back(Elt: NumRegs); |
| 870 | Reg = Reg.id() + NumRegs; |
| 871 | } |
| 872 | } |
| 873 | |
| 874 | SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, |
| 875 | FunctionLoweringInfo &FuncInfo, |
| 876 | const SDLoc &dl, SDValue &Chain, |
| 877 | SDValue *Glue, const Value *V) const { |
| 878 | // A Value with type {} or [0 x %t] needs no registers. |
| 879 | if (ValueVTs.empty()) |
| 880 | return SDValue(); |
| 881 | |
| 882 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 883 | |
| 884 | // Assemble the legal parts into the final values. |
| 885 | SmallVector<SDValue, 4> Values(ValueVTs.size()); |
| 886 | SmallVector<SDValue, 8> Parts; |
| 887 | for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { |
| 888 | // Copy the legal parts from the registers. |
| 889 | EVT ValueVT = ValueVTs[Value]; |
| 890 | unsigned NumRegs = RegCount[Value]; |
| 891 | MVT RegisterVT = isABIMangled() |
| 892 | ? TLI.getRegisterTypeForCallingConv( |
| 893 | Context&: *DAG.getContext(), CC: *CallConv, VT: RegVTs[Value]) |
| 894 | : RegVTs[Value]; |
| 895 | |
| 896 | Parts.resize(N: NumRegs); |
| 897 | for (unsigned i = 0; i != NumRegs; ++i) { |
| 898 | SDValue P; |
| 899 | if (!Glue) { |
| 900 | P = DAG.getCopyFromReg(Chain, dl, Reg: Regs[Part+i], VT: RegisterVT); |
| 901 | } else { |
| 902 | P = DAG.getCopyFromReg(Chain, dl, Reg: Regs[Part+i], VT: RegisterVT, Glue: *Glue); |
| 903 | *Glue = P.getValue(R: 2); |
| 904 | } |
| 905 | |
| 906 | Chain = P.getValue(R: 1); |
| 907 | Parts[i] = P; |
| 908 | |
| 909 | // If the source register was virtual and if we know something about it, |
| 910 | // add an assert node. |
| 911 | if (!Regs[Part + i].isVirtual() || !RegisterVT.isInteger()) |
| 912 | continue; |
| 913 | |
| 914 | const FunctionLoweringInfo::LiveOutInfo *LOI = |
| 915 | FuncInfo.GetLiveOutRegInfo(Reg: Regs[Part+i]); |
| 916 | if (!LOI) |
| 917 | continue; |
| 918 | |
| 919 | unsigned RegSize = RegisterVT.getScalarSizeInBits(); |
| 920 | unsigned NumSignBits = LOI->NumSignBits; |
| 921 | unsigned NumZeroBits = LOI->Known.countMinLeadingZeros(); |
| 922 | |
| 923 | if (NumZeroBits == RegSize) { |
| 924 | // The current value is a zero. |
| 925 | // Explicitly express that as it would be easier for |
| 926 | // optimizations to kick in. |
| 927 | Parts[i] = DAG.getConstant(Val: 0, DL: dl, VT: RegisterVT); |
| 928 | continue; |
| 929 | } |
| 930 | |
| 931 | // FIXME: We capture more information than the dag can represent. For |
| 932 | // now, just use the tightest assertzext/assertsext possible. |
| 933 | bool isSExt; |
| 934 | EVT FromVT(MVT::Other); |
| 935 | if (NumZeroBits) { |
| 936 | FromVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: RegSize - NumZeroBits); |
| 937 | isSExt = false; |
| 938 | } else if (NumSignBits > 1) { |
| 939 | FromVT = |
| 940 | EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: RegSize - NumSignBits + 1); |
| 941 | isSExt = true; |
| 942 | } else { |
| 943 | continue; |
| 944 | } |
| 945 | // Add an assertion node. |
| 946 | assert(FromVT != MVT::Other); |
| 947 | Parts[i] = DAG.getNode(Opcode: isSExt ? ISD::AssertSext : ISD::AssertZext, DL: dl, |
| 948 | VT: RegisterVT, N1: P, N2: DAG.getValueType(FromVT)); |
| 949 | } |
| 950 | |
| 951 | Values[Value] = getCopyFromParts(DAG, DL: dl, Parts: Parts.begin(), NumParts: NumRegs, |
| 952 | PartVT: RegisterVT, ValueVT, V, InChain: Chain, CC: CallConv); |
| 953 | Part += NumRegs; |
| 954 | Parts.clear(); |
| 955 | } |
| 956 | |
| 957 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: DAG.getVTList(VTs: ValueVTs), Ops: Values); |
| 958 | } |
| 959 | |
| 960 | void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, |
| 961 | const SDLoc &dl, SDValue &Chain, SDValue *Glue, |
| 962 | const Value *V, |
| 963 | ISD::NodeType PreferredExtendType) const { |
| 964 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 965 | ISD::NodeType ExtendKind = PreferredExtendType; |
| 966 | |
| 967 | // Get the list of the values's legal parts. |
| 968 | unsigned NumRegs = Regs.size(); |
| 969 | SmallVector<SDValue, 8> Parts(NumRegs); |
| 970 | for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { |
| 971 | unsigned NumParts = RegCount[Value]; |
| 972 | |
| 973 | MVT RegisterVT = isABIMangled() |
| 974 | ? TLI.getRegisterTypeForCallingConv( |
| 975 | Context&: *DAG.getContext(), CC: *CallConv, VT: RegVTs[Value]) |
| 976 | : RegVTs[Value]; |
| 977 | |
| 978 | if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, VT2: RegisterVT)) |
| 979 | ExtendKind = ISD::ZERO_EXTEND; |
| 980 | |
| 981 | getCopyToParts(DAG, DL: dl, Val: Val.getValue(R: Val.getResNo() + Value), Parts: &Parts[Part], |
| 982 | NumParts, PartVT: RegisterVT, V, CallConv, ExtendKind); |
| 983 | Part += NumParts; |
| 984 | } |
| 985 | |
| 986 | // Copy the parts into the registers. |
| 987 | SmallVector<SDValue, 8> Chains(NumRegs); |
| 988 | for (unsigned i = 0; i != NumRegs; ++i) { |
| 989 | SDValue Part; |
| 990 | if (!Glue) { |
| 991 | Part = DAG.getCopyToReg(Chain, dl, Reg: Regs[i], N: Parts[i]); |
| 992 | } else { |
| 993 | Part = DAG.getCopyToReg(Chain, dl, Reg: Regs[i], N: Parts[i], Glue: *Glue); |
| 994 | *Glue = Part.getValue(R: 1); |
| 995 | } |
| 996 | |
| 997 | Chains[i] = Part.getValue(R: 0); |
| 998 | } |
| 999 | |
| 1000 | if (NumRegs == 1 || Glue) |
| 1001 | // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is |
| 1002 | // flagged to it. That is the CopyToReg nodes and the user are considered |
| 1003 | // a single scheduling unit. If we create a TokenFactor and return it as |
| 1004 | // chain, then the TokenFactor is both a predecessor (operand) of the |
| 1005 | // user as well as a successor (the TF operands are flagged to the user). |
| 1006 | // c1, f1 = CopyToReg |
| 1007 | // c2, f2 = CopyToReg |
| 1008 | // c3 = TokenFactor c1, c2 |
| 1009 | // ... |
| 1010 | // = op c3, ..., f2 |
| 1011 | Chain = Chains[NumRegs-1]; |
| 1012 | else |
| 1013 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: Chains); |
| 1014 | } |
| 1015 | |
| 1016 | void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, |
| 1017 | unsigned MatchingIdx, const SDLoc &dl, |
| 1018 | SelectionDAG &DAG, |
| 1019 | std::vector<SDValue> &Ops) const { |
| 1020 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 1021 | |
| 1022 | InlineAsm::Flag Flag(Code, Regs.size()); |
| 1023 | if (HasMatching) |
| 1024 | Flag.setMatchingOp(MatchingIdx); |
| 1025 | else if (!Regs.empty() && Regs.front().isVirtual()) { |
| 1026 | // Put the register class of the virtual registers in the flag word. That |
| 1027 | // way, later passes can recompute register class constraints for inline |
| 1028 | // assembly as well as normal instructions. |
| 1029 | // Don't do this for tied operands that can use the regclass information |
| 1030 | // from the def. |
| 1031 | const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
| 1032 | const TargetRegisterClass *RC = MRI.getRegClass(Reg: Regs.front()); |
| 1033 | Flag.setRegClass(RC->getID()); |
| 1034 | } |
| 1035 | |
| 1036 | SDValue Res = DAG.getTargetConstant(Val: Flag, DL: dl, VT: MVT::i32); |
| 1037 | Ops.push_back(x: Res); |
| 1038 | |
| 1039 | if (Code == InlineAsm::Kind::Clobber) { |
| 1040 | // Clobbers should always have a 1:1 mapping with registers, and may |
| 1041 | // reference registers that have illegal (e.g. vector) types. Hence, we |
| 1042 | // shouldn't try to apply any sort of splitting logic to them. |
| 1043 | assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && |
| 1044 | "No 1:1 mapping from clobbers to regs?" ); |
| 1045 | Register SP = TLI.getStackPointerRegisterToSaveRestore(); |
| 1046 | (void)SP; |
| 1047 | for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) { |
| 1048 | Ops.push_back(x: DAG.getRegister(Reg: Regs[I], VT: RegVTs[I])); |
| 1049 | assert( |
| 1050 | (Regs[I] != SP || |
| 1051 | DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && |
| 1052 | "If we clobbered the stack pointer, MFI should know about it." ); |
| 1053 | } |
| 1054 | return; |
| 1055 | } |
| 1056 | |
| 1057 | for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { |
| 1058 | MVT RegisterVT = RegVTs[Value]; |
| 1059 | unsigned NumRegs = TLI.getNumRegisters(Context&: *DAG.getContext(), VT: ValueVTs[Value], |
| 1060 | RegisterVT); |
| 1061 | for (unsigned i = 0; i != NumRegs; ++i) { |
| 1062 | assert(Reg < Regs.size() && "Mismatch in # registers expected" ); |
| 1063 | Register TheReg = Regs[Reg++]; |
| 1064 | Ops.push_back(x: DAG.getRegister(Reg: TheReg, VT: RegisterVT)); |
| 1065 | } |
| 1066 | } |
| 1067 | } |
| 1068 | |
| 1069 | SmallVector<std::pair<Register, TypeSize>, 4> |
| 1070 | RegsForValue::getRegsAndSizes() const { |
| 1071 | SmallVector<std::pair<Register, TypeSize>, 4> OutVec; |
| 1072 | unsigned I = 0; |
| 1073 | for (auto CountAndVT : zip_first(t: RegCount, u: RegVTs)) { |
| 1074 | unsigned RegCount = std::get<0>(t&: CountAndVT); |
| 1075 | MVT RegisterVT = std::get<1>(t&: CountAndVT); |
| 1076 | TypeSize RegisterSize = RegisterVT.getSizeInBits(); |
| 1077 | for (unsigned E = I + RegCount; I != E; ++I) |
| 1078 | OutVec.push_back(Elt: std::make_pair(x: Regs[I], y&: RegisterSize)); |
| 1079 | } |
| 1080 | return OutVec; |
| 1081 | } |
| 1082 | |
| 1083 | void SelectionDAGBuilder::init(GCFunctionInfo *gfi, BatchAAResults *aa, |
| 1084 | AssumptionCache *ac, |
| 1085 | const TargetLibraryInfo *li) { |
| 1086 | BatchAA = aa; |
| 1087 | AC = ac; |
| 1088 | GFI = gfi; |
| 1089 | LibInfo = li; |
| 1090 | Context = DAG.getContext(); |
| 1091 | LPadToCallSiteMap.clear(); |
| 1092 | SL->init(tli: DAG.getTargetLoweringInfo(), tm: TM, dl: DAG.getDataLayout()); |
| 1093 | AssignmentTrackingEnabled = isAssignmentTrackingEnabled( |
| 1094 | M: *DAG.getMachineFunction().getFunction().getParent()); |
| 1095 | } |
| 1096 | |
| 1097 | void SelectionDAGBuilder::clear() { |
| 1098 | NodeMap.clear(); |
| 1099 | UnusedArgNodeMap.clear(); |
| 1100 | PendingLoads.clear(); |
| 1101 | PendingExports.clear(); |
| 1102 | PendingConstrainedFP.clear(); |
| 1103 | PendingConstrainedFPStrict.clear(); |
| 1104 | CurInst = nullptr; |
| 1105 | HasTailCall = false; |
| 1106 | SDNodeOrder = LowestSDNodeOrder; |
| 1107 | StatepointLowering.clear(); |
| 1108 | } |
| 1109 | |
| 1110 | void SelectionDAGBuilder::clearDanglingDebugInfo() { |
| 1111 | DanglingDebugInfoMap.clear(); |
| 1112 | } |
| 1113 | |
| 1114 | // Update DAG root to include dependencies on Pending chains. |
| 1115 | SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) { |
| 1116 | SDValue Root = DAG.getRoot(); |
| 1117 | |
| 1118 | if (Pending.empty()) |
| 1119 | return Root; |
| 1120 | |
| 1121 | // Add current root to PendingChains, unless we already indirectly |
| 1122 | // depend on it. |
| 1123 | if (Root.getOpcode() != ISD::EntryToken) { |
| 1124 | unsigned i = 0, e = Pending.size(); |
| 1125 | for (; i != e; ++i) { |
| 1126 | assert(Pending[i].getNode()->getNumOperands() > 1); |
| 1127 | if (Pending[i].getNode()->getOperand(Num: 0) == Root) |
| 1128 | break; // Don't add the root if we already indirectly depend on it. |
| 1129 | } |
| 1130 | |
| 1131 | if (i == e) |
| 1132 | Pending.push_back(Elt: Root); |
| 1133 | } |
| 1134 | |
| 1135 | if (Pending.size() == 1) |
| 1136 | Root = Pending[0]; |
| 1137 | else |
| 1138 | Root = DAG.getTokenFactor(DL: getCurSDLoc(), Vals&: Pending); |
| 1139 | |
| 1140 | DAG.setRoot(Root); |
| 1141 | Pending.clear(); |
| 1142 | return Root; |
| 1143 | } |
| 1144 | |
| 1145 | SDValue SelectionDAGBuilder::getMemoryRoot() { |
| 1146 | return updateRoot(Pending&: PendingLoads); |
| 1147 | } |
| 1148 | |
| 1149 | SDValue SelectionDAGBuilder::getRoot() { |
| 1150 | // Chain up all pending constrained intrinsics together with all |
| 1151 | // pending loads, by simply appending them to PendingLoads and |
| 1152 | // then calling getMemoryRoot(). |
| 1153 | PendingLoads.reserve(N: PendingLoads.size() + |
| 1154 | PendingConstrainedFP.size() + |
| 1155 | PendingConstrainedFPStrict.size()); |
| 1156 | PendingLoads.append(in_start: PendingConstrainedFP.begin(), |
| 1157 | in_end: PendingConstrainedFP.end()); |
| 1158 | PendingLoads.append(in_start: PendingConstrainedFPStrict.begin(), |
| 1159 | in_end: PendingConstrainedFPStrict.end()); |
| 1160 | PendingConstrainedFP.clear(); |
| 1161 | PendingConstrainedFPStrict.clear(); |
| 1162 | return getMemoryRoot(); |
| 1163 | } |
| 1164 | |
| 1165 | SDValue SelectionDAGBuilder::getControlRoot() { |
| 1166 | // We need to emit pending fpexcept.strict constrained intrinsics, |
| 1167 | // so append them to the PendingExports list. |
| 1168 | PendingExports.append(in_start: PendingConstrainedFPStrict.begin(), |
| 1169 | in_end: PendingConstrainedFPStrict.end()); |
| 1170 | PendingConstrainedFPStrict.clear(); |
| 1171 | return updateRoot(Pending&: PendingExports); |
| 1172 | } |
| 1173 | |
| 1174 | void SelectionDAGBuilder::handleDebugDeclare(Value *Address, |
| 1175 | DILocalVariable *Variable, |
| 1176 | DIExpression *Expression, |
| 1177 | DebugLoc DL) { |
| 1178 | assert(Variable && "Missing variable" ); |
| 1179 | |
| 1180 | // Check if address has undef value. |
| 1181 | if (!Address || isa<UndefValue>(Val: Address) || |
| 1182 | (Address->use_empty() && !isa<Argument>(Val: Address))) { |
| 1183 | LLVM_DEBUG( |
| 1184 | dbgs() |
| 1185 | << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n" ); |
| 1186 | return; |
| 1187 | } |
| 1188 | |
| 1189 | bool IsParameter = Variable->isParameter() || isa<Argument>(Val: Address); |
| 1190 | |
| 1191 | SDValue &N = NodeMap[Address]; |
| 1192 | if (!N.getNode() && isa<Argument>(Val: Address)) |
| 1193 | // Check unused arguments map. |
| 1194 | N = UnusedArgNodeMap[Address]; |
| 1195 | SDDbgValue *SDV; |
| 1196 | if (N.getNode()) { |
| 1197 | if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Val: Address)) |
| 1198 | Address = BCI->getOperand(i_nocapture: 0); |
| 1199 | // Parameters are handled specially. |
| 1200 | auto *FINode = dyn_cast<FrameIndexSDNode>(Val: N.getNode()); |
| 1201 | if (IsParameter && FINode) { |
| 1202 | // Byval parameter. We have a frame index at this point. |
| 1203 | SDV = DAG.getFrameIndexDbgValue(Var: Variable, Expr: Expression, FI: FINode->getIndex(), |
| 1204 | /*IsIndirect*/ true, DL, O: SDNodeOrder); |
| 1205 | } else if (isa<Argument>(Val: Address)) { |
| 1206 | // Address is an argument, so try to emit its dbg value using |
| 1207 | // virtual register info from the FuncInfo.ValueMap. |
| 1208 | EmitFuncArgumentDbgValue(V: Address, Variable, Expr: Expression, DL, |
| 1209 | Kind: FuncArgumentDbgValueKind::Declare, N); |
| 1210 | return; |
| 1211 | } else { |
| 1212 | SDV = DAG.getDbgValue(Var: Variable, Expr: Expression, N: N.getNode(), R: N.getResNo(), |
| 1213 | IsIndirect: true, DL, O: SDNodeOrder); |
| 1214 | } |
| 1215 | DAG.AddDbgValue(DB: SDV, isParameter: IsParameter); |
| 1216 | } else { |
| 1217 | // If Address is an argument then try to emit its dbg value using |
| 1218 | // virtual register info from the FuncInfo.ValueMap. |
| 1219 | if (!EmitFuncArgumentDbgValue(V: Address, Variable, Expr: Expression, DL, |
| 1220 | Kind: FuncArgumentDbgValueKind::Declare, N)) { |
| 1221 | LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info" |
| 1222 | << " (could not emit func-arg dbg_value)\n" ); |
| 1223 | } |
| 1224 | } |
| 1225 | } |
| 1226 | |
| 1227 | void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) { |
| 1228 | // Add SDDbgValue nodes for any var locs here. Do so before updating |
| 1229 | // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}. |
| 1230 | if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) { |
| 1231 | // Add SDDbgValue nodes for any var locs here. Do so before updating |
| 1232 | // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}. |
| 1233 | for (auto It = FnVarLocs->locs_begin(Before: &I), End = FnVarLocs->locs_end(Before: &I); |
| 1234 | It != End; ++It) { |
| 1235 | auto *Var = FnVarLocs->getDILocalVariable(ID: It->VariableID); |
| 1236 | dropDanglingDebugInfo(Variable: Var, Expr: It->Expr); |
| 1237 | if (It->Values.isKillLocation(Expression: It->Expr)) { |
| 1238 | handleKillDebugValue(Var, Expr: It->Expr, DbgLoc: It->DL, Order: SDNodeOrder); |
| 1239 | continue; |
| 1240 | } |
| 1241 | SmallVector<Value *> Values(It->Values.location_ops()); |
| 1242 | if (!handleDebugValue(Values, Var, Expr: It->Expr, DbgLoc: It->DL, Order: SDNodeOrder, |
| 1243 | IsVariadic: It->Values.hasArgList())) { |
| 1244 | SmallVector<Value *, 4> Vals(It->Values.location_ops()); |
| 1245 | addDanglingDebugInfo(Values&: Vals, |
| 1246 | Var: FnVarLocs->getDILocalVariable(ID: It->VariableID), |
| 1247 | Expr: It->Expr, IsVariadic: Vals.size() > 1, DL: It->DL, Order: SDNodeOrder); |
| 1248 | } |
| 1249 | } |
| 1250 | } |
| 1251 | |
| 1252 | // We must skip DbgVariableRecords if they've already been processed above as |
| 1253 | // we have just emitted the debug values resulting from assignment tracking |
| 1254 | // analysis, making any existing DbgVariableRecords redundant (and probably |
| 1255 | // less correct). We still need to process DbgLabelRecords. This does sink |
| 1256 | // DbgLabelRecords to the bottom of the group of debug records. That sholdn't |
| 1257 | // be important as it does so deterministcally and ordering between |
| 1258 | // DbgLabelRecords and DbgVariableRecords is immaterial (other than for MIR/IR |
| 1259 | // printing). |
| 1260 | bool SkipDbgVariableRecords = DAG.getFunctionVarLocs(); |
| 1261 | // Is there is any debug-info attached to this instruction, in the form of |
| 1262 | // DbgRecord non-instruction debug-info records. |
| 1263 | for (DbgRecord &DR : I.getDbgRecordRange()) { |
| 1264 | if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(Val: &DR)) { |
| 1265 | assert(DLR->getLabel() && "Missing label" ); |
| 1266 | SDDbgLabel *SDV = |
| 1267 | DAG.getDbgLabel(Label: DLR->getLabel(), DL: DLR->getDebugLoc(), O: SDNodeOrder); |
| 1268 | DAG.AddDbgLabel(DB: SDV); |
| 1269 | continue; |
| 1270 | } |
| 1271 | |
| 1272 | if (SkipDbgVariableRecords) |
| 1273 | continue; |
| 1274 | DbgVariableRecord &DVR = cast<DbgVariableRecord>(Val&: DR); |
| 1275 | DILocalVariable *Variable = DVR.getVariable(); |
| 1276 | DIExpression *Expression = DVR.getExpression(); |
| 1277 | dropDanglingDebugInfo(Variable, Expr: Expression); |
| 1278 | |
| 1279 | if (DVR.getType() == DbgVariableRecord::LocationType::Declare) { |
| 1280 | if (FuncInfo.PreprocessedDVRDeclares.contains(Ptr: &DVR)) |
| 1281 | continue; |
| 1282 | LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DVR |
| 1283 | << "\n" ); |
| 1284 | handleDebugDeclare(Address: DVR.getVariableLocationOp(OpIdx: 0), Variable, Expression, |
| 1285 | DL: DVR.getDebugLoc()); |
| 1286 | continue; |
| 1287 | } |
| 1288 | |
| 1289 | // A DbgVariableRecord with no locations is a kill location. |
| 1290 | SmallVector<Value *, 4> Values(DVR.location_ops()); |
| 1291 | if (Values.empty()) { |
| 1292 | handleKillDebugValue(Var: Variable, Expr: Expression, DbgLoc: DVR.getDebugLoc(), |
| 1293 | Order: SDNodeOrder); |
| 1294 | continue; |
| 1295 | } |
| 1296 | |
| 1297 | // A DbgVariableRecord with an undef or absent location is also a kill |
| 1298 | // location. |
| 1299 | if (llvm::any_of(Range&: Values, |
| 1300 | P: [](Value *V) { return !V || isa<UndefValue>(Val: V); })) { |
| 1301 | handleKillDebugValue(Var: Variable, Expr: Expression, DbgLoc: DVR.getDebugLoc(), |
| 1302 | Order: SDNodeOrder); |
| 1303 | continue; |
| 1304 | } |
| 1305 | |
| 1306 | bool IsVariadic = DVR.hasArgList(); |
| 1307 | if (!handleDebugValue(Values, Var: Variable, Expr: Expression, DbgLoc: DVR.getDebugLoc(), |
| 1308 | Order: SDNodeOrder, IsVariadic)) { |
| 1309 | addDanglingDebugInfo(Values, Var: Variable, Expr: Expression, IsVariadic, |
| 1310 | DL: DVR.getDebugLoc(), Order: SDNodeOrder); |
| 1311 | } |
| 1312 | } |
| 1313 | } |
| 1314 | |
| 1315 | void SelectionDAGBuilder::visit(const Instruction &I) { |
| 1316 | visitDbgInfo(I); |
| 1317 | |
| 1318 | // Set up outgoing PHI node register values before emitting the terminator. |
| 1319 | if (I.isTerminator()) { |
| 1320 | HandlePHINodesInSuccessorBlocks(LLVMBB: I.getParent()); |
| 1321 | } |
| 1322 | |
| 1323 | ++SDNodeOrder; |
| 1324 | CurInst = &I; |
| 1325 | |
| 1326 | // Set inserted listener only if required. |
| 1327 | bool NodeInserted = false; |
| 1328 | std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener; |
| 1329 | MDNode *PCSectionsMD = I.getMetadata(KindID: LLVMContext::MD_pcsections); |
| 1330 | MDNode *MMRA = I.getMetadata(KindID: LLVMContext::MD_mmra); |
| 1331 | if (PCSectionsMD || MMRA) { |
| 1332 | InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>( |
| 1333 | args&: DAG, args: [&](SDNode *) { NodeInserted = true; }); |
| 1334 | } |
| 1335 | |
| 1336 | visit(Opcode: I.getOpcode(), I); |
| 1337 | |
| 1338 | if (!I.isTerminator() && !HasTailCall && |
| 1339 | !isa<GCStatepointInst>(Val: I)) // statepoints handle their exports internally |
| 1340 | CopyToExportRegsIfNeeded(V: &I); |
| 1341 | |
| 1342 | // Handle metadata. |
| 1343 | if (PCSectionsMD || MMRA) { |
| 1344 | auto It = NodeMap.find(Val: &I); |
| 1345 | if (It != NodeMap.end()) { |
| 1346 | if (PCSectionsMD) |
| 1347 | DAG.addPCSections(Node: It->second.getNode(), MD: PCSectionsMD); |
| 1348 | if (MMRA) |
| 1349 | DAG.addMMRAMetadata(Node: It->second.getNode(), MMRA); |
| 1350 | } else if (NodeInserted) { |
| 1351 | // This should not happen; if it does, don't let it go unnoticed so we can |
| 1352 | // fix it. Relevant visit*() function is probably missing a setValue(). |
| 1353 | errs() << "warning: loosing !pcsections and/or !mmra metadata [" |
| 1354 | << I.getModule()->getName() << "]\n" ; |
| 1355 | LLVM_DEBUG(I.dump()); |
| 1356 | assert(false); |
| 1357 | } |
| 1358 | } |
| 1359 | |
| 1360 | CurInst = nullptr; |
| 1361 | } |
| 1362 | |
| 1363 | void SelectionDAGBuilder::visitPHI(const PHINode &) { |
| 1364 | llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!" ); |
| 1365 | } |
| 1366 | |
| 1367 | void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) { |
| 1368 | // Note: this doesn't use InstVisitor, because it has to work with |
| 1369 | // ConstantExpr's in addition to instructions. |
| 1370 | switch (Opcode) { |
| 1371 | default: llvm_unreachable("Unknown instruction type encountered!" ); |
| 1372 | // Build the switch statement using the Instruction.def file. |
| 1373 | #define HANDLE_INST(NUM, OPCODE, CLASS) \ |
| 1374 | case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; |
| 1375 | #include "llvm/IR/Instruction.def" |
| 1376 | } |
| 1377 | } |
| 1378 | |
| 1379 | static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, |
| 1380 | DILocalVariable *Variable, |
| 1381 | DebugLoc DL, unsigned Order, |
| 1382 | SmallVectorImpl<Value *> &Values, |
| 1383 | DIExpression *Expression) { |
| 1384 | // For variadic dbg_values we will now insert poison. |
| 1385 | // FIXME: We can potentially recover these! |
| 1386 | SmallVector<SDDbgOperand, 2> Locs; |
| 1387 | for (const Value *V : Values) { |
| 1388 | auto *Poison = PoisonValue::get(T: V->getType()); |
| 1389 | Locs.push_back(Elt: SDDbgOperand::fromConst(Const: Poison)); |
| 1390 | } |
| 1391 | SDDbgValue *SDV = DAG.getDbgValueList(Var: Variable, Expr: Expression, Locs, Dependencies: {}, |
| 1392 | /*IsIndirect=*/false, DL, O: Order, |
| 1393 | /*IsVariadic=*/true); |
| 1394 | DAG.AddDbgValue(DB: SDV, /*isParameter=*/false); |
| 1395 | return true; |
| 1396 | } |
| 1397 | |
| 1398 | void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values, |
| 1399 | DILocalVariable *Var, |
| 1400 | DIExpression *Expr, |
| 1401 | bool IsVariadic, DebugLoc DL, |
| 1402 | unsigned Order) { |
| 1403 | if (IsVariadic) { |
| 1404 | handleDanglingVariadicDebugInfo(DAG, Variable: Var, DL, Order, Values, Expression: Expr); |
| 1405 | return; |
| 1406 | } |
| 1407 | // TODO: Dangling debug info will eventually either be resolved or produce |
| 1408 | // a poison DBG_VALUE. However in the resolution case, a gap may appear |
| 1409 | // between the original dbg.value location and its resolved DBG_VALUE, |
| 1410 | // which we should ideally fill with an extra poison DBG_VALUE. |
| 1411 | assert(Values.size() == 1); |
| 1412 | DanglingDebugInfoMap[Values[0]].emplace_back(args&: Var, args&: Expr, args&: DL, args&: Order); |
| 1413 | } |
| 1414 | |
| 1415 | void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable, |
| 1416 | const DIExpression *Expr) { |
| 1417 | auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) { |
| 1418 | DIVariable *DanglingVariable = DDI.getVariable(); |
| 1419 | DIExpression *DanglingExpr = DDI.getExpression(); |
| 1420 | if (DanglingVariable == Variable && Expr->fragmentsOverlap(Other: DanglingExpr)) { |
| 1421 | LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " |
| 1422 | << printDDI(nullptr, DDI) << "\n" ); |
| 1423 | return true; |
| 1424 | } |
| 1425 | return false; |
| 1426 | }; |
| 1427 | |
| 1428 | for (auto &DDIMI : DanglingDebugInfoMap) { |
| 1429 | DanglingDebugInfoVector &DDIV = DDIMI.second; |
| 1430 | |
| 1431 | // If debug info is to be dropped, run it through final checks to see |
| 1432 | // whether it can be salvaged. |
| 1433 | for (auto &DDI : DDIV) |
| 1434 | if (isMatchingDbgValue(DDI)) |
| 1435 | salvageUnresolvedDbgValue(V: DDIMI.first, DDI); |
| 1436 | |
| 1437 | erase_if(C&: DDIV, P: isMatchingDbgValue); |
| 1438 | } |
| 1439 | } |
| 1440 | |
| 1441 | // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, |
| 1442 | // generate the debug data structures now that we've seen its definition. |
| 1443 | void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V, |
| 1444 | SDValue Val) { |
| 1445 | auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(Key: V); |
| 1446 | if (DanglingDbgInfoIt == DanglingDebugInfoMap.end()) |
| 1447 | return; |
| 1448 | |
| 1449 | DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second; |
| 1450 | for (auto &DDI : DDIV) { |
| 1451 | DebugLoc DL = DDI.getDebugLoc(); |
| 1452 | unsigned ValSDNodeOrder = Val.getNode()->getIROrder(); |
| 1453 | unsigned DbgSDNodeOrder = DDI.getSDNodeOrder(); |
| 1454 | DILocalVariable *Variable = DDI.getVariable(); |
| 1455 | DIExpression *Expr = DDI.getExpression(); |
| 1456 | assert(Variable->isValidLocationForIntrinsic(DL) && |
| 1457 | "Expected inlined-at fields to agree" ); |
| 1458 | SDDbgValue *SDV; |
| 1459 | if (Val.getNode()) { |
| 1460 | // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a |
| 1461 | // FuncArgumentDbgValue (it would be hoisted to the function entry, and if |
| 1462 | // we couldn't resolve it directly when examining the DbgValue intrinsic |
| 1463 | // in the first place we should not be more successful here). Unless we |
| 1464 | // have some test case that prove this to be correct we should avoid |
| 1465 | // calling EmitFuncArgumentDbgValue here. |
| 1466 | if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL, |
| 1467 | Kind: FuncArgumentDbgValueKind::Value, N: Val)) { |
| 1468 | LLVM_DEBUG(dbgs() << "Resolve dangling debug info for " |
| 1469 | << printDDI(V, DDI) << "\n" ); |
| 1470 | LLVM_DEBUG(dbgs() << " By mapping to:\n " ; Val.dump()); |
| 1471 | // Increase the SDNodeOrder for the DbgValue here to make sure it is |
| 1472 | // inserted after the definition of Val when emitting the instructions |
| 1473 | // after ISel. An alternative could be to teach |
| 1474 | // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly. |
| 1475 | LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() |
| 1476 | << "changing SDNodeOrder from " << DbgSDNodeOrder << " to " |
| 1477 | << ValSDNodeOrder << "\n" ); |
| 1478 | SDV = getDbgValue(N: Val, Variable, Expr, dl: DL, |
| 1479 | DbgSDNodeOrder: std::max(a: DbgSDNodeOrder, b: ValSDNodeOrder)); |
| 1480 | DAG.AddDbgValue(DB: SDV, isParameter: false); |
| 1481 | } else |
| 1482 | LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " |
| 1483 | << printDDI(V, DDI) |
| 1484 | << " in EmitFuncArgumentDbgValue\n" ); |
| 1485 | } else { |
| 1486 | LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI) |
| 1487 | << "\n" ); |
| 1488 | auto Poison = PoisonValue::get(T: V->getType()); |
| 1489 | auto SDV = |
| 1490 | DAG.getConstantDbgValue(Var: Variable, Expr, C: Poison, DL, O: DbgSDNodeOrder); |
| 1491 | DAG.AddDbgValue(DB: SDV, isParameter: false); |
| 1492 | } |
| 1493 | } |
| 1494 | DDIV.clear(); |
| 1495 | } |
| 1496 | |
| 1497 | void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V, |
| 1498 | DanglingDebugInfo &DDI) { |
| 1499 | // TODO: For the variadic implementation, instead of only checking the fail |
| 1500 | // state of `handleDebugValue`, we need know specifically which values were |
| 1501 | // invalid, so that we attempt to salvage only those values when processing |
| 1502 | // a DIArgList. |
| 1503 | const Value *OrigV = V; |
| 1504 | DILocalVariable *Var = DDI.getVariable(); |
| 1505 | DIExpression *Expr = DDI.getExpression(); |
| 1506 | DebugLoc DL = DDI.getDebugLoc(); |
| 1507 | unsigned SDOrder = DDI.getSDNodeOrder(); |
| 1508 | |
| 1509 | // Currently we consider only dbg.value intrinsics -- we tell the salvager |
| 1510 | // that DW_OP_stack_value is desired. |
| 1511 | bool StackValue = true; |
| 1512 | |
| 1513 | // Can this Value can be encoded without any further work? |
| 1514 | if (handleDebugValue(Values: V, Var, Expr, DbgLoc: DL, Order: SDOrder, /*IsVariadic=*/false)) |
| 1515 | return; |
| 1516 | |
| 1517 | // Attempt to salvage back through as many instructions as possible. Bail if |
| 1518 | // a non-instruction is seen, such as a constant expression or global |
| 1519 | // variable. FIXME: Further work could recover those too. |
| 1520 | while (isa<Instruction>(Val: V)) { |
| 1521 | const Instruction &VAsInst = *cast<const Instruction>(Val: V); |
| 1522 | // Temporary "0", awaiting real implementation. |
| 1523 | SmallVector<uint64_t, 16> Ops; |
| 1524 | SmallVector<Value *, 4> AdditionalValues; |
| 1525 | V = salvageDebugInfoImpl(I&: const_cast<Instruction &>(VAsInst), |
| 1526 | CurrentLocOps: Expr->getNumLocationOperands(), Ops, |
| 1527 | AdditionalValues); |
| 1528 | // If we cannot salvage any further, and haven't yet found a suitable debug |
| 1529 | // expression, bail out. |
| 1530 | if (!V) |
| 1531 | break; |
| 1532 | |
| 1533 | // TODO: If AdditionalValues isn't empty, then the salvage can only be |
| 1534 | // represented with a DBG_VALUE_LIST, so we give up. When we have support |
| 1535 | // here for variadic dbg_values, remove that condition. |
| 1536 | if (!AdditionalValues.empty()) |
| 1537 | break; |
| 1538 | |
| 1539 | // New value and expr now represent this debuginfo. |
| 1540 | Expr = DIExpression::appendOpsToArg(Expr, Ops, ArgNo: 0, StackValue); |
| 1541 | |
| 1542 | // Some kind of simplification occurred: check whether the operand of the |
| 1543 | // salvaged debug expression can be encoded in this DAG. |
| 1544 | if (handleDebugValue(Values: V, Var, Expr, DbgLoc: DL, Order: SDOrder, /*IsVariadic=*/false)) { |
| 1545 | LLVM_DEBUG( |
| 1546 | dbgs() << "Salvaged debug location info for:\n " << *Var << "\n" |
| 1547 | << *OrigV << "\nBy stripping back to:\n " << *V << "\n" ); |
| 1548 | return; |
| 1549 | } |
| 1550 | } |
| 1551 | |
| 1552 | // This was the final opportunity to salvage this debug information, and it |
| 1553 | // couldn't be done. Place a poison DBG_VALUE at this location to terminate |
| 1554 | // any earlier variable location. |
| 1555 | assert(OrigV && "V shouldn't be null" ); |
| 1556 | auto *Poison = PoisonValue::get(T: OrigV->getType()); |
| 1557 | auto *SDV = DAG.getConstantDbgValue(Var, Expr, C: Poison, DL, O: SDNodeOrder); |
| 1558 | DAG.AddDbgValue(DB: SDV, isParameter: false); |
| 1559 | LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " |
| 1560 | << printDDI(OrigV, DDI) << "\n" ); |
| 1561 | } |
| 1562 | |
| 1563 | void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var, |
| 1564 | DIExpression *Expr, |
| 1565 | DebugLoc DbgLoc, |
| 1566 | unsigned Order) { |
| 1567 | Value *Poison = PoisonValue::get(T: Type::getInt1Ty(C&: *Context)); |
| 1568 | DIExpression *NewExpr = |
| 1569 | const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr)); |
| 1570 | handleDebugValue(Values: Poison, Var, Expr: NewExpr, DbgLoc, Order, |
| 1571 | /*IsVariadic*/ false); |
| 1572 | } |
| 1573 | |
| 1574 | bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values, |
| 1575 | DILocalVariable *Var, |
| 1576 | DIExpression *Expr, DebugLoc DbgLoc, |
| 1577 | unsigned Order, bool IsVariadic) { |
| 1578 | if (Values.empty()) |
| 1579 | return true; |
| 1580 | |
| 1581 | // Filter EntryValue locations out early. |
| 1582 | if (visitEntryValueDbgValue(Values, Variable: Var, Expr, DbgLoc)) |
| 1583 | return true; |
| 1584 | |
| 1585 | SmallVector<SDDbgOperand> LocationOps; |
| 1586 | SmallVector<SDNode *> Dependencies; |
| 1587 | for (const Value *V : Values) { |
| 1588 | // Constant value. |
| 1589 | if (isa<ConstantInt>(Val: V) || isa<ConstantFP>(Val: V) || isa<UndefValue>(Val: V) || |
| 1590 | isa<ConstantPointerNull>(Val: V)) { |
| 1591 | LocationOps.emplace_back(Args: SDDbgOperand::fromConst(Const: V)); |
| 1592 | continue; |
| 1593 | } |
| 1594 | |
| 1595 | // Look through IntToPtr constants. |
| 1596 | if (auto *CE = dyn_cast<ConstantExpr>(Val: V)) |
| 1597 | if (CE->getOpcode() == Instruction::IntToPtr) { |
| 1598 | LocationOps.emplace_back(Args: SDDbgOperand::fromConst(Const: CE->getOperand(i_nocapture: 0))); |
| 1599 | continue; |
| 1600 | } |
| 1601 | |
| 1602 | // If the Value is a frame index, we can create a FrameIndex debug value |
| 1603 | // without relying on the DAG at all. |
| 1604 | if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: V)) { |
| 1605 | auto SI = FuncInfo.StaticAllocaMap.find(Val: AI); |
| 1606 | if (SI != FuncInfo.StaticAllocaMap.end()) { |
| 1607 | LocationOps.emplace_back(Args: SDDbgOperand::fromFrameIdx(FrameIdx: SI->second)); |
| 1608 | continue; |
| 1609 | } |
| 1610 | } |
| 1611 | |
| 1612 | // Do not use getValue() in here; we don't want to generate code at |
| 1613 | // this point if it hasn't been done yet. |
| 1614 | SDValue N = NodeMap[V]; |
| 1615 | if (!N.getNode() && isa<Argument>(Val: V)) // Check unused arguments map. |
| 1616 | N = UnusedArgNodeMap[V]; |
| 1617 | |
| 1618 | if (N.getNode()) { |
| 1619 | // Only emit func arg dbg value for non-variadic dbg.values for now. |
| 1620 | if (!IsVariadic && |
| 1621 | EmitFuncArgumentDbgValue(V, Variable: Var, Expr, DL: DbgLoc, |
| 1622 | Kind: FuncArgumentDbgValueKind::Value, N)) |
| 1623 | return true; |
| 1624 | if (auto *FISDN = dyn_cast<FrameIndexSDNode>(Val: N.getNode())) { |
| 1625 | // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can |
| 1626 | // describe stack slot locations. |
| 1627 | // |
| 1628 | // Consider "int x = 0; int *px = &x;". There are two kinds of |
| 1629 | // interesting debug values here after optimization: |
| 1630 | // |
| 1631 | // dbg.value(i32* %px, !"int *px", !DIExpression()), and |
| 1632 | // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref)) |
| 1633 | // |
| 1634 | // Both describe the direct values of their associated variables. |
| 1635 | Dependencies.push_back(Elt: N.getNode()); |
| 1636 | LocationOps.emplace_back(Args: SDDbgOperand::fromFrameIdx(FrameIdx: FISDN->getIndex())); |
| 1637 | continue; |
| 1638 | } |
| 1639 | LocationOps.emplace_back( |
| 1640 | Args: SDDbgOperand::fromNode(Node: N.getNode(), ResNo: N.getResNo())); |
| 1641 | continue; |
| 1642 | } |
| 1643 | |
| 1644 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 1645 | // Special rules apply for the first dbg.values of parameter variables in a |
| 1646 | // function. Identify them by the fact they reference Argument Values, that |
| 1647 | // they're parameters, and they are parameters of the current function. We |
| 1648 | // need to let them dangle until they get an SDNode. |
| 1649 | bool IsParamOfFunc = |
| 1650 | isa<Argument>(Val: V) && Var->isParameter() && !DbgLoc.getInlinedAt(); |
| 1651 | if (IsParamOfFunc) |
| 1652 | return false; |
| 1653 | |
| 1654 | // The value is not used in this block yet (or it would have an SDNode). |
| 1655 | // We still want the value to appear for the user if possible -- if it has |
| 1656 | // an associated VReg, we can refer to that instead. |
| 1657 | auto VMI = FuncInfo.ValueMap.find(Val: V); |
| 1658 | if (VMI != FuncInfo.ValueMap.end()) { |
| 1659 | Register Reg = VMI->second; |
| 1660 | // If this is a PHI node, it may be split up into several MI PHI nodes |
| 1661 | // (in FunctionLoweringInfo::set). |
| 1662 | RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, |
| 1663 | V->getType(), std::nullopt); |
| 1664 | if (RFV.occupiesMultipleRegs()) { |
| 1665 | // FIXME: We could potentially support variadic dbg_values here. |
| 1666 | if (IsVariadic) |
| 1667 | return false; |
| 1668 | unsigned Offset = 0; |
| 1669 | unsigned BitsToDescribe = 0; |
| 1670 | if (auto VarSize = Var->getSizeInBits()) |
| 1671 | BitsToDescribe = *VarSize; |
| 1672 | if (auto Fragment = Expr->getFragmentInfo()) |
| 1673 | BitsToDescribe = Fragment->SizeInBits; |
| 1674 | for (const auto &RegAndSize : RFV.getRegsAndSizes()) { |
| 1675 | // Bail out if all bits are described already. |
| 1676 | if (Offset >= BitsToDescribe) |
| 1677 | break; |
| 1678 | // TODO: handle scalable vectors. |
| 1679 | unsigned RegisterSize = RegAndSize.second; |
| 1680 | unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe) |
| 1681 | ? BitsToDescribe - Offset |
| 1682 | : RegisterSize; |
| 1683 | auto FragmentExpr = DIExpression::createFragmentExpression( |
| 1684 | Expr, OffsetInBits: Offset, SizeInBits: FragmentSize); |
| 1685 | if (!FragmentExpr) |
| 1686 | continue; |
| 1687 | SDDbgValue *SDV = DAG.getVRegDbgValue( |
| 1688 | Var, Expr: *FragmentExpr, VReg: RegAndSize.first, IsIndirect: false, DL: DbgLoc, O: Order); |
| 1689 | DAG.AddDbgValue(DB: SDV, isParameter: false); |
| 1690 | Offset += RegisterSize; |
| 1691 | } |
| 1692 | return true; |
| 1693 | } |
| 1694 | // We can use simple vreg locations for variadic dbg_values as well. |
| 1695 | LocationOps.emplace_back(Args: SDDbgOperand::fromVReg(VReg: Reg)); |
| 1696 | continue; |
| 1697 | } |
| 1698 | // We failed to create a SDDbgOperand for V. |
| 1699 | return false; |
| 1700 | } |
| 1701 | |
| 1702 | // We have created a SDDbgOperand for each Value in Values. |
| 1703 | assert(!LocationOps.empty()); |
| 1704 | SDDbgValue *SDV = |
| 1705 | DAG.getDbgValueList(Var, Expr, Locs: LocationOps, Dependencies, |
| 1706 | /*IsIndirect=*/false, DL: DbgLoc, O: Order, IsVariadic); |
| 1707 | DAG.AddDbgValue(DB: SDV, /*isParameter=*/false); |
| 1708 | return true; |
| 1709 | } |
| 1710 | |
| 1711 | void SelectionDAGBuilder::resolveOrClearDbgInfo() { |
| 1712 | // Try to fixup any remaining dangling debug info -- and drop it if we can't. |
| 1713 | for (auto &Pair : DanglingDebugInfoMap) |
| 1714 | for (auto &DDI : Pair.second) |
| 1715 | salvageUnresolvedDbgValue(V: const_cast<Value *>(Pair.first), DDI); |
| 1716 | clearDanglingDebugInfo(); |
| 1717 | } |
| 1718 | |
| 1719 | /// getCopyFromRegs - If there was virtual register allocated for the value V |
| 1720 | /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. |
| 1721 | SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { |
| 1722 | DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(Val: V); |
| 1723 | SDValue Result; |
| 1724 | |
| 1725 | if (It != FuncInfo.ValueMap.end()) { |
| 1726 | Register InReg = It->second; |
| 1727 | |
| 1728 | RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), |
| 1729 | DAG.getDataLayout(), InReg, Ty, |
| 1730 | std::nullopt); // This is not an ABI copy. |
| 1731 | SDValue Chain = DAG.getEntryNode(); |
| 1732 | Result = RFV.getCopyFromRegs(DAG, FuncInfo, dl: getCurSDLoc(), Chain, Glue: nullptr, |
| 1733 | V); |
| 1734 | resolveDanglingDebugInfo(V, Val: Result); |
| 1735 | } |
| 1736 | |
| 1737 | return Result; |
| 1738 | } |
| 1739 | |
| 1740 | /// getValue - Return an SDValue for the given Value. |
| 1741 | SDValue SelectionDAGBuilder::getValue(const Value *V) { |
| 1742 | // If we already have an SDValue for this value, use it. It's important |
| 1743 | // to do this first, so that we don't create a CopyFromReg if we already |
| 1744 | // have a regular SDValue. |
| 1745 | SDValue &N = NodeMap[V]; |
| 1746 | if (N.getNode()) return N; |
| 1747 | |
| 1748 | // If there's a virtual register allocated and initialized for this |
| 1749 | // value, use it. |
| 1750 | if (SDValue copyFromReg = getCopyFromRegs(V, Ty: V->getType())) |
| 1751 | return copyFromReg; |
| 1752 | |
| 1753 | // Otherwise create a new SDValue and remember it. |
| 1754 | SDValue Val = getValueImpl(V); |
| 1755 | NodeMap[V] = Val; |
| 1756 | resolveDanglingDebugInfo(V, Val); |
| 1757 | return Val; |
| 1758 | } |
| 1759 | |
| 1760 | /// getNonRegisterValue - Return an SDValue for the given Value, but |
| 1761 | /// don't look in FuncInfo.ValueMap for a virtual register. |
| 1762 | SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) { |
| 1763 | // If we already have an SDValue for this value, use it. |
| 1764 | SDValue &N = NodeMap[V]; |
| 1765 | if (N.getNode()) { |
| 1766 | if (isIntOrFPConstant(V: N)) { |
| 1767 | // Remove the debug location from the node as the node is about to be used |
| 1768 | // in a location which may differ from the original debug location. This |
| 1769 | // is relevant to Constant and ConstantFP nodes because they can appear |
| 1770 | // as constant expressions inside PHI nodes. |
| 1771 | N->setDebugLoc(DebugLoc()); |
| 1772 | } |
| 1773 | return N; |
| 1774 | } |
| 1775 | |
| 1776 | // Otherwise create a new SDValue and remember it. |
| 1777 | SDValue Val = getValueImpl(V); |
| 1778 | NodeMap[V] = Val; |
| 1779 | resolveDanglingDebugInfo(V, Val); |
| 1780 | return Val; |
| 1781 | } |
| 1782 | |
| 1783 | /// getValueImpl - Helper function for getValue and getNonRegisterValue. |
| 1784 | /// Create an SDValue for the given value. |
| 1785 | SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { |
| 1786 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 1787 | |
| 1788 | if (const Constant *C = dyn_cast<Constant>(Val: V)) { |
| 1789 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: V->getType(), AllowUnknown: true); |
| 1790 | |
| 1791 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val: C)) { |
| 1792 | SDLoc DL = getCurSDLoc(); |
| 1793 | |
| 1794 | // DAG.getConstant() may attempt to legalise the vector constant which can |
| 1795 | // significantly change the combines applied to the DAG. To reduce the |
| 1796 | // divergence when enabling ConstantInt based vectors we try to construct |
| 1797 | // the DAG in the same way as shufflevector based splats. TODO: The |
| 1798 | // divergence sometimes leads to better optimisations. Ideally we should |
| 1799 | // prevent DAG.getConstant() from legalising too early but there are some |
| 1800 | // degradations preventing this. |
| 1801 | if (VT.isScalableVector()) |
| 1802 | return DAG.getNode( |
| 1803 | Opcode: ISD::SPLAT_VECTOR, DL, VT, |
| 1804 | Operand: DAG.getConstant(Val: CI->getValue(), DL, VT: VT.getVectorElementType())); |
| 1805 | if (VT.isFixedLengthVector()) |
| 1806 | return DAG.getSplatBuildVector( |
| 1807 | VT, DL, |
| 1808 | Op: DAG.getConstant(Val: CI->getValue(), DL, VT: VT.getVectorElementType())); |
| 1809 | return DAG.getConstant(Val: *CI, DL, VT); |
| 1810 | } |
| 1811 | |
| 1812 | if (const GlobalValue *GV = dyn_cast<GlobalValue>(Val: C)) |
| 1813 | return DAG.getGlobalAddress(GV, DL: getCurSDLoc(), VT); |
| 1814 | |
| 1815 | if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(Val: C)) { |
| 1816 | return DAG.getNode(Opcode: ISD::PtrAuthGlobalAddress, DL: getCurSDLoc(), VT, |
| 1817 | N1: getValue(V: CPA->getPointer()), N2: getValue(V: CPA->getKey()), |
| 1818 | N3: getValue(V: CPA->getAddrDiscriminator()), |
| 1819 | N4: getValue(V: CPA->getDiscriminator())); |
| 1820 | } |
| 1821 | |
| 1822 | if (isa<ConstantPointerNull>(Val: C)) { |
| 1823 | unsigned AS = V->getType()->getPointerAddressSpace(); |
| 1824 | return DAG.getConstant(Val: 0, DL: getCurSDLoc(), |
| 1825 | VT: TLI.getPointerTy(DL: DAG.getDataLayout(), AS)); |
| 1826 | } |
| 1827 | |
| 1828 | if (match(V: C, P: m_VScale())) |
| 1829 | return DAG.getVScale(DL: getCurSDLoc(), VT, MulImm: APInt(VT.getSizeInBits(), 1)); |
| 1830 | |
| 1831 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Val: C)) |
| 1832 | return DAG.getConstantFP(V: *CFP, DL: getCurSDLoc(), VT); |
| 1833 | |
| 1834 | if (isa<UndefValue>(Val: C) && !V->getType()->isAggregateType()) |
| 1835 | return isa<PoisonValue>(Val: C) ? DAG.getPOISON(VT) : DAG.getUNDEF(VT); |
| 1836 | |
| 1837 | if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: C)) { |
| 1838 | visit(Opcode: CE->getOpcode(), I: *CE); |
| 1839 | SDValue N1 = NodeMap[V]; |
| 1840 | assert(N1.getNode() && "visit didn't populate the NodeMap!" ); |
| 1841 | return N1; |
| 1842 | } |
| 1843 | |
| 1844 | if (isa<ConstantStruct>(Val: C) || isa<ConstantArray>(Val: C)) { |
| 1845 | SmallVector<SDValue, 4> Constants; |
| 1846 | for (const Use &U : C->operands()) { |
| 1847 | SDNode *Val = getValue(V: U).getNode(); |
| 1848 | // If the operand is an empty aggregate, there are no values. |
| 1849 | if (!Val) continue; |
| 1850 | // Add each leaf value from the operand to the Constants list |
| 1851 | // to form a flattened list of all the values. |
| 1852 | for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) |
| 1853 | Constants.push_back(Elt: SDValue(Val, i)); |
| 1854 | } |
| 1855 | |
| 1856 | return DAG.getMergeValues(Ops: Constants, dl: getCurSDLoc()); |
| 1857 | } |
| 1858 | |
| 1859 | if (const ConstantDataSequential *CDS = |
| 1860 | dyn_cast<ConstantDataSequential>(Val: C)) { |
| 1861 | SmallVector<SDValue, 4> Ops; |
| 1862 | for (uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) { |
| 1863 | SDNode *Val = getValue(V: CDS->getElementAsConstant(i)).getNode(); |
| 1864 | // Add each leaf value from the operand to the Constants list |
| 1865 | // to form a flattened list of all the values. |
| 1866 | for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) |
| 1867 | Ops.push_back(Elt: SDValue(Val, i)); |
| 1868 | } |
| 1869 | |
| 1870 | if (isa<ArrayType>(Val: CDS->getType())) |
| 1871 | return DAG.getMergeValues(Ops, dl: getCurSDLoc()); |
| 1872 | return DAG.getBuildVector(VT, DL: getCurSDLoc(), Ops); |
| 1873 | } |
| 1874 | |
| 1875 | if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { |
| 1876 | assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && |
| 1877 | "Unknown struct or array constant!" ); |
| 1878 | |
| 1879 | SmallVector<EVT, 4> ValueVTs; |
| 1880 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: C->getType(), ValueVTs); |
| 1881 | unsigned NumElts = ValueVTs.size(); |
| 1882 | if (NumElts == 0) |
| 1883 | return SDValue(); // empty struct |
| 1884 | SmallVector<SDValue, 4> Constants(NumElts); |
| 1885 | for (unsigned i = 0; i != NumElts; ++i) { |
| 1886 | EVT EltVT = ValueVTs[i]; |
| 1887 | if (isa<UndefValue>(Val: C)) |
| 1888 | Constants[i] = DAG.getUNDEF(VT: EltVT); |
| 1889 | else if (EltVT.isFloatingPoint()) |
| 1890 | Constants[i] = DAG.getConstantFP(Val: 0, DL: getCurSDLoc(), VT: EltVT); |
| 1891 | else |
| 1892 | Constants[i] = DAG.getConstant(Val: 0, DL: getCurSDLoc(), VT: EltVT); |
| 1893 | } |
| 1894 | |
| 1895 | return DAG.getMergeValues(Ops: Constants, dl: getCurSDLoc()); |
| 1896 | } |
| 1897 | |
| 1898 | if (const BlockAddress *BA = dyn_cast<BlockAddress>(Val: C)) |
| 1899 | return DAG.getBlockAddress(BA, VT); |
| 1900 | |
| 1901 | if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(Val: C)) |
| 1902 | return getValue(V: Equiv->getGlobalValue()); |
| 1903 | |
| 1904 | if (const auto *NC = dyn_cast<NoCFIValue>(Val: C)) |
| 1905 | return getValue(V: NC->getGlobalValue()); |
| 1906 | |
| 1907 | if (VT == MVT::aarch64svcount) { |
| 1908 | assert(C->isNullValue() && "Can only zero this target type!" ); |
| 1909 | return DAG.getNode(Opcode: ISD::BITCAST, DL: getCurSDLoc(), VT, |
| 1910 | Operand: DAG.getConstant(Val: 0, DL: getCurSDLoc(), VT: MVT::nxv16i1)); |
| 1911 | } |
| 1912 | |
| 1913 | if (VT.isRISCVVectorTuple()) { |
| 1914 | assert(C->isNullValue() && "Can only zero this target type!" ); |
| 1915 | return DAG.getNode( |
| 1916 | Opcode: ISD::BITCAST, DL: getCurSDLoc(), VT, |
| 1917 | Operand: DAG.getNode( |
| 1918 | Opcode: ISD::SPLAT_VECTOR, DL: getCurSDLoc(), |
| 1919 | VT: EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i8, |
| 1920 | NumElements: VT.getSizeInBits().getKnownMinValue() / 8, IsScalable: true), |
| 1921 | Operand: DAG.getConstant(Val: 0, DL: getCurSDLoc(), VT: MVT::getIntegerVT(BitWidth: 8)))); |
| 1922 | } |
| 1923 | |
| 1924 | VectorType *VecTy = cast<VectorType>(Val: V->getType()); |
| 1925 | |
| 1926 | // Now that we know the number and type of the elements, get that number of |
| 1927 | // elements into the Ops array based on what kind of constant it is. |
| 1928 | if (const ConstantVector *CV = dyn_cast<ConstantVector>(Val: C)) { |
| 1929 | SmallVector<SDValue, 16> Ops; |
| 1930 | unsigned NumElements = cast<FixedVectorType>(Val: VecTy)->getNumElements(); |
| 1931 | for (unsigned i = 0; i != NumElements; ++i) |
| 1932 | Ops.push_back(Elt: getValue(V: CV->getOperand(i_nocapture: i))); |
| 1933 | |
| 1934 | return DAG.getBuildVector(VT, DL: getCurSDLoc(), Ops); |
| 1935 | } |
| 1936 | |
| 1937 | if (isa<ConstantAggregateZero>(Val: C)) { |
| 1938 | EVT EltVT = |
| 1939 | TLI.getValueType(DL: DAG.getDataLayout(), Ty: VecTy->getElementType()); |
| 1940 | |
| 1941 | SDValue Op; |
| 1942 | if (EltVT.isFloatingPoint()) |
| 1943 | Op = DAG.getConstantFP(Val: 0, DL: getCurSDLoc(), VT: EltVT); |
| 1944 | else |
| 1945 | Op = DAG.getConstant(Val: 0, DL: getCurSDLoc(), VT: EltVT); |
| 1946 | |
| 1947 | return DAG.getSplat(VT, DL: getCurSDLoc(), Op); |
| 1948 | } |
| 1949 | |
| 1950 | llvm_unreachable("Unknown vector constant" ); |
| 1951 | } |
| 1952 | |
| 1953 | // If this is a static alloca, generate it as the frameindex instead of |
| 1954 | // computation. |
| 1955 | if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: V)) { |
| 1956 | DenseMap<const AllocaInst*, int>::iterator SI = |
| 1957 | FuncInfo.StaticAllocaMap.find(Val: AI); |
| 1958 | if (SI != FuncInfo.StaticAllocaMap.end()) |
| 1959 | return DAG.getFrameIndex( |
| 1960 | FI: SI->second, VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: AI->getType())); |
| 1961 | } |
| 1962 | |
| 1963 | // If this is an instruction which fast-isel has deferred, select it now. |
| 1964 | if (const Instruction *Inst = dyn_cast<Instruction>(Val: V)) { |
| 1965 | Register InReg = FuncInfo.InitializeRegForValue(V: Inst); |
| 1966 | |
| 1967 | RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, |
| 1968 | Inst->getType(), std::nullopt); |
| 1969 | SDValue Chain = DAG.getEntryNode(); |
| 1970 | return RFV.getCopyFromRegs(DAG, FuncInfo, dl: getCurSDLoc(), Chain, Glue: nullptr, V); |
| 1971 | } |
| 1972 | |
| 1973 | if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(Val: V)) |
| 1974 | return DAG.getMDNode(MD: cast<MDNode>(Val: MD->getMetadata())); |
| 1975 | |
| 1976 | if (const auto *BB = dyn_cast<BasicBlock>(Val: V)) |
| 1977 | return DAG.getBasicBlock(MBB: FuncInfo.getMBB(BB)); |
| 1978 | |
| 1979 | llvm_unreachable("Can't get register for value!" ); |
| 1980 | } |
| 1981 | |
| 1982 | void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) { |
| 1983 | auto Pers = classifyEHPersonality(Pers: FuncInfo.Fn->getPersonalityFn()); |
| 1984 | bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX; |
| 1985 | bool IsCoreCLR = Pers == EHPersonality::CoreCLR; |
| 1986 | bool IsSEH = isAsynchronousEHPersonality(Pers); |
| 1987 | MachineBasicBlock *CatchPadMBB = FuncInfo.MBB; |
| 1988 | if (IsSEH) { |
| 1989 | // For SEH, EHCont Guard needs to know that this catchpad is a target. |
| 1990 | CatchPadMBB->setIsEHContTarget(true); |
| 1991 | DAG.getMachineFunction().setHasEHContTarget(true); |
| 1992 | } else |
| 1993 | CatchPadMBB->setIsEHScopeEntry(); |
| 1994 | // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues. |
| 1995 | if (IsMSVCCXX || IsCoreCLR) |
| 1996 | CatchPadMBB->setIsEHFuncletEntry(); |
| 1997 | } |
| 1998 | |
| 1999 | void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) { |
| 2000 | // Update machine-CFG edge. |
| 2001 | MachineBasicBlock *TargetMBB = FuncInfo.getMBB(BB: I.getSuccessor()); |
| 2002 | FuncInfo.MBB->addSuccessor(Succ: TargetMBB); |
| 2003 | |
| 2004 | auto Pers = classifyEHPersonality(Pers: FuncInfo.Fn->getPersonalityFn()); |
| 2005 | bool IsSEH = isAsynchronousEHPersonality(Pers); |
| 2006 | if (IsSEH) { |
| 2007 | // If this is not a fall-through branch or optimizations are switched off, |
| 2008 | // emit the branch. |
| 2009 | if (TargetMBB != NextBlock(MBB: FuncInfo.MBB) || |
| 2010 | TM.getOptLevel() == CodeGenOptLevel::None) |
| 2011 | DAG.setRoot(DAG.getNode(Opcode: ISD::BR, DL: getCurSDLoc(), VT: MVT::Other, |
| 2012 | N1: getControlRoot(), N2: DAG.getBasicBlock(MBB: TargetMBB))); |
| 2013 | return; |
| 2014 | } |
| 2015 | |
| 2016 | // For non-SEH, EHCont Guard needs to know that this catchret is a target. |
| 2017 | TargetMBB->setIsEHContTarget(true); |
| 2018 | DAG.getMachineFunction().setHasEHContTarget(true); |
| 2019 | |
| 2020 | // Figure out the funclet membership for the catchret's successor. |
| 2021 | // This will be used by the FuncletLayout pass to determine how to order the |
| 2022 | // BB's. |
| 2023 | // A 'catchret' returns to the outer scope's color. |
| 2024 | Value *ParentPad = I.getCatchSwitchParentPad(); |
| 2025 | const BasicBlock *SuccessorColor; |
| 2026 | if (isa<ConstantTokenNone>(Val: ParentPad)) |
| 2027 | SuccessorColor = &FuncInfo.Fn->getEntryBlock(); |
| 2028 | else |
| 2029 | SuccessorColor = cast<Instruction>(Val: ParentPad)->getParent(); |
| 2030 | assert(SuccessorColor && "No parent funclet for catchret!" ); |
| 2031 | MachineBasicBlock *SuccessorColorMBB = FuncInfo.getMBB(BB: SuccessorColor); |
| 2032 | assert(SuccessorColorMBB && "No MBB for SuccessorColor!" ); |
| 2033 | |
| 2034 | // Create the terminator node. |
| 2035 | SDValue Ret = DAG.getNode(Opcode: ISD::CATCHRET, DL: getCurSDLoc(), VT: MVT::Other, |
| 2036 | N1: getControlRoot(), N2: DAG.getBasicBlock(MBB: TargetMBB), |
| 2037 | N3: DAG.getBasicBlock(MBB: SuccessorColorMBB)); |
| 2038 | DAG.setRoot(Ret); |
| 2039 | } |
| 2040 | |
| 2041 | void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) { |
| 2042 | // Don't emit any special code for the cleanuppad instruction. It just marks |
| 2043 | // the start of an EH scope/funclet. |
| 2044 | FuncInfo.MBB->setIsEHScopeEntry(); |
| 2045 | auto Pers = classifyEHPersonality(Pers: FuncInfo.Fn->getPersonalityFn()); |
| 2046 | if (Pers != EHPersonality::Wasm_CXX) { |
| 2047 | FuncInfo.MBB->setIsEHFuncletEntry(); |
| 2048 | FuncInfo.MBB->setIsCleanupFuncletEntry(); |
| 2049 | } |
| 2050 | } |
| 2051 | |
| 2052 | /// When an invoke or a cleanupret unwinds to the next EH pad, there are |
| 2053 | /// many places it could ultimately go. In the IR, we have a single unwind |
| 2054 | /// destination, but in the machine CFG, we enumerate all the possible blocks. |
| 2055 | /// This function skips over imaginary basic blocks that hold catchswitch |
| 2056 | /// instructions, and finds all the "real" machine |
| 2057 | /// basic block destinations. As those destinations may not be successors of |
| 2058 | /// EHPadBB, here we also calculate the edge probability to those destinations. |
| 2059 | /// The passed-in Prob is the edge probability to EHPadBB. |
| 2060 | static void findUnwindDestinations( |
| 2061 | FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, |
| 2062 | BranchProbability Prob, |
| 2063 | SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> |
| 2064 | &UnwindDests) { |
| 2065 | EHPersonality Personality = |
| 2066 | classifyEHPersonality(Pers: FuncInfo.Fn->getPersonalityFn()); |
| 2067 | bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; |
| 2068 | bool IsCoreCLR = Personality == EHPersonality::CoreCLR; |
| 2069 | bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; |
| 2070 | bool IsSEH = isAsynchronousEHPersonality(Pers: Personality); |
| 2071 | |
| 2072 | while (EHPadBB) { |
| 2073 | BasicBlock::const_iterator Pad = EHPadBB->getFirstNonPHIIt(); |
| 2074 | BasicBlock *NewEHPadBB = nullptr; |
| 2075 | if (isa<LandingPadInst>(Val: Pad)) { |
| 2076 | // Stop on landingpads. They are not funclets. |
| 2077 | UnwindDests.emplace_back(Args: FuncInfo.getMBB(BB: EHPadBB), Args&: Prob); |
| 2078 | break; |
| 2079 | } else if (isa<CleanupPadInst>(Val: Pad)) { |
| 2080 | // Stop on cleanup pads. Cleanups are always funclet entries for all known |
| 2081 | // personalities except Wasm. And in Wasm this becomes a catch_all(_ref), |
| 2082 | // which always catches an exception. |
| 2083 | UnwindDests.emplace_back(Args: FuncInfo.getMBB(BB: EHPadBB), Args&: Prob); |
| 2084 | UnwindDests.back().first->setIsEHScopeEntry(); |
| 2085 | // In Wasm, EH scopes are not funclets |
| 2086 | if (!IsWasmCXX) |
| 2087 | UnwindDests.back().first->setIsEHFuncletEntry(); |
| 2088 | break; |
| 2089 | } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val&: Pad)) { |
| 2090 | // Add the catchpad handlers to the possible destinations. |
| 2091 | for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { |
| 2092 | UnwindDests.emplace_back(Args: FuncInfo.getMBB(BB: CatchPadBB), Args&: Prob); |
| 2093 | // For MSVC++ and the CLR, catchblocks are funclets and need prologues. |
| 2094 | if (IsMSVCCXX || IsCoreCLR) |
| 2095 | UnwindDests.back().first->setIsEHFuncletEntry(); |
| 2096 | if (!IsSEH) |
| 2097 | UnwindDests.back().first->setIsEHScopeEntry(); |
| 2098 | } |
| 2099 | NewEHPadBB = CatchSwitch->getUnwindDest(); |
| 2100 | } else { |
| 2101 | continue; |
| 2102 | } |
| 2103 | |
| 2104 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 2105 | if (BPI && NewEHPadBB) |
| 2106 | Prob *= BPI->getEdgeProbability(Src: EHPadBB, Dst: NewEHPadBB); |
| 2107 | EHPadBB = NewEHPadBB; |
| 2108 | } |
| 2109 | } |
| 2110 | |
| 2111 | void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) { |
| 2112 | // Update successor info. |
| 2113 | SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; |
| 2114 | auto UnwindDest = I.getUnwindDest(); |
| 2115 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 2116 | BranchProbability UnwindDestProb = |
| 2117 | (BPI && UnwindDest) |
| 2118 | ? BPI->getEdgeProbability(Src: FuncInfo.MBB->getBasicBlock(), Dst: UnwindDest) |
| 2119 | : BranchProbability::getZero(); |
| 2120 | findUnwindDestinations(FuncInfo, EHPadBB: UnwindDest, Prob: UnwindDestProb, UnwindDests); |
| 2121 | for (auto &UnwindDest : UnwindDests) { |
| 2122 | UnwindDest.first->setIsEHPad(); |
| 2123 | addSuccessorWithProb(Src: FuncInfo.MBB, Dst: UnwindDest.first, Prob: UnwindDest.second); |
| 2124 | } |
| 2125 | FuncInfo.MBB->normalizeSuccProbs(); |
| 2126 | |
| 2127 | // Create the terminator node. |
| 2128 | MachineBasicBlock *CleanupPadMBB = |
| 2129 | FuncInfo.getMBB(BB: I.getCleanupPad()->getParent()); |
| 2130 | SDValue Ret = DAG.getNode(Opcode: ISD::CLEANUPRET, DL: getCurSDLoc(), VT: MVT::Other, |
| 2131 | N1: getControlRoot(), N2: DAG.getBasicBlock(MBB: CleanupPadMBB)); |
| 2132 | DAG.setRoot(Ret); |
| 2133 | } |
| 2134 | |
| 2135 | void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) { |
| 2136 | report_fatal_error(reason: "visitCatchSwitch not yet implemented!" ); |
| 2137 | } |
| 2138 | |
| 2139 | void SelectionDAGBuilder::visitRet(const ReturnInst &I) { |
| 2140 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 2141 | auto &DL = DAG.getDataLayout(); |
| 2142 | SDValue Chain = getControlRoot(); |
| 2143 | SmallVector<ISD::OutputArg, 8> Outs; |
| 2144 | SmallVector<SDValue, 8> OutVals; |
| 2145 | |
| 2146 | // Calls to @llvm.experimental.deoptimize don't generate a return value, so |
| 2147 | // lower |
| 2148 | // |
| 2149 | // %val = call <ty> @llvm.experimental.deoptimize() |
| 2150 | // ret <ty> %val |
| 2151 | // |
| 2152 | // differently. |
| 2153 | if (I.getParent()->getTerminatingDeoptimizeCall()) { |
| 2154 | LowerDeoptimizingReturn(); |
| 2155 | return; |
| 2156 | } |
| 2157 | |
| 2158 | if (!FuncInfo.CanLowerReturn) { |
| 2159 | Register DemoteReg = FuncInfo.DemoteRegister; |
| 2160 | |
| 2161 | // Emit a store of the return value through the virtual register. |
| 2162 | // Leave Outs empty so that LowerReturn won't try to load return |
| 2163 | // registers the usual way. |
| 2164 | MVT PtrValueVT = TLI.getPointerTy(DL, AS: DL.getAllocaAddrSpace()); |
| 2165 | SDValue RetPtr = |
| 2166 | DAG.getCopyFromReg(Chain, dl: getCurSDLoc(), Reg: DemoteReg, VT: PtrValueVT); |
| 2167 | SDValue RetOp = getValue(V: I.getOperand(i_nocapture: 0)); |
| 2168 | |
| 2169 | SmallVector<EVT, 4> ValueVTs, MemVTs; |
| 2170 | SmallVector<uint64_t, 4> Offsets; |
| 2171 | ComputeValueVTs(TLI, DL, Ty: I.getOperand(i_nocapture: 0)->getType(), ValueVTs, MemVTs: &MemVTs, |
| 2172 | FixedOffsets: &Offsets, StartingOffset: 0); |
| 2173 | unsigned NumValues = ValueVTs.size(); |
| 2174 | |
| 2175 | SmallVector<SDValue, 4> Chains(NumValues); |
| 2176 | Align BaseAlign = DL.getPrefTypeAlign(Ty: I.getOperand(i_nocapture: 0)->getType()); |
| 2177 | for (unsigned i = 0; i != NumValues; ++i) { |
| 2178 | // An aggregate return value cannot wrap around the address space, so |
| 2179 | // offsets to its parts don't wrap either. |
| 2180 | SDValue Ptr = DAG.getObjectPtrOffset(SL: getCurSDLoc(), Ptr: RetPtr, |
| 2181 | Offset: TypeSize::getFixed(ExactSize: Offsets[i])); |
| 2182 | |
| 2183 | SDValue Val = RetOp.getValue(R: RetOp.getResNo() + i); |
| 2184 | if (MemVTs[i] != ValueVTs[i]) |
| 2185 | Val = DAG.getPtrExtOrTrunc(Op: Val, DL: getCurSDLoc(), VT: MemVTs[i]); |
| 2186 | Chains[i] = DAG.getStore( |
| 2187 | Chain, dl: getCurSDLoc(), Val, |
| 2188 | // FIXME: better loc info would be nice. |
| 2189 | Ptr, PtrInfo: MachinePointerInfo::getUnknownStack(MF&: DAG.getMachineFunction()), |
| 2190 | Alignment: commonAlignment(A: BaseAlign, Offset: Offsets[i])); |
| 2191 | } |
| 2192 | |
| 2193 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: getCurSDLoc(), |
| 2194 | VT: MVT::Other, Ops: Chains); |
| 2195 | } else if (I.getNumOperands() != 0) { |
| 2196 | SmallVector<EVT, 4> ValueVTs; |
| 2197 | ComputeValueVTs(TLI, DL, Ty: I.getOperand(i_nocapture: 0)->getType(), ValueVTs); |
| 2198 | unsigned NumValues = ValueVTs.size(); |
| 2199 | if (NumValues) { |
| 2200 | SDValue RetOp = getValue(V: I.getOperand(i_nocapture: 0)); |
| 2201 | |
| 2202 | const Function *F = I.getParent()->getParent(); |
| 2203 | |
| 2204 | bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( |
| 2205 | Ty: I.getOperand(i_nocapture: 0)->getType(), CallConv: F->getCallingConv(), |
| 2206 | /*IsVarArg*/ isVarArg: false, DL); |
| 2207 | |
| 2208 | ISD::NodeType ExtendKind = ISD::ANY_EXTEND; |
| 2209 | if (F->getAttributes().hasRetAttr(Kind: Attribute::SExt)) |
| 2210 | ExtendKind = ISD::SIGN_EXTEND; |
| 2211 | else if (F->getAttributes().hasRetAttr(Kind: Attribute::ZExt)) |
| 2212 | ExtendKind = ISD::ZERO_EXTEND; |
| 2213 | |
| 2214 | LLVMContext &Context = F->getContext(); |
| 2215 | bool RetInReg = F->getAttributes().hasRetAttr(Kind: Attribute::InReg); |
| 2216 | |
| 2217 | for (unsigned j = 0; j != NumValues; ++j) { |
| 2218 | EVT VT = ValueVTs[j]; |
| 2219 | |
| 2220 | if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) |
| 2221 | VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind); |
| 2222 | |
| 2223 | CallingConv::ID CC = F->getCallingConv(); |
| 2224 | |
| 2225 | unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT); |
| 2226 | MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT); |
| 2227 | SmallVector<SDValue, 4> Parts(NumParts); |
| 2228 | getCopyToParts(DAG, DL: getCurSDLoc(), |
| 2229 | Val: SDValue(RetOp.getNode(), RetOp.getResNo() + j), |
| 2230 | Parts: &Parts[0], NumParts, PartVT, V: &I, CallConv: CC, ExtendKind); |
| 2231 | |
| 2232 | // 'inreg' on function refers to return value |
| 2233 | ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); |
| 2234 | if (RetInReg) |
| 2235 | Flags.setInReg(); |
| 2236 | |
| 2237 | if (I.getOperand(i_nocapture: 0)->getType()->isPointerTy()) { |
| 2238 | Flags.setPointer(); |
| 2239 | Flags.setPointerAddrSpace( |
| 2240 | cast<PointerType>(Val: I.getOperand(i_nocapture: 0)->getType())->getAddressSpace()); |
| 2241 | } |
| 2242 | |
| 2243 | if (NeedsRegBlock) { |
| 2244 | Flags.setInConsecutiveRegs(); |
| 2245 | if (j == NumValues - 1) |
| 2246 | Flags.setInConsecutiveRegsLast(); |
| 2247 | } |
| 2248 | |
| 2249 | // Propagate extension type if any |
| 2250 | if (ExtendKind == ISD::SIGN_EXTEND) |
| 2251 | Flags.setSExt(); |
| 2252 | else if (ExtendKind == ISD::ZERO_EXTEND) |
| 2253 | Flags.setZExt(); |
| 2254 | else if (F->getAttributes().hasRetAttr(Kind: Attribute::NoExt)) |
| 2255 | Flags.setNoExt(); |
| 2256 | |
| 2257 | for (unsigned i = 0; i < NumParts; ++i) { |
| 2258 | Outs.push_back(Elt: ISD::OutputArg(Flags, |
| 2259 | Parts[i].getValueType().getSimpleVT(), |
| 2260 | VT, /*isfixed=*/true, 0, 0)); |
| 2261 | OutVals.push_back(Elt: Parts[i]); |
| 2262 | } |
| 2263 | } |
| 2264 | } |
| 2265 | } |
| 2266 | |
| 2267 | // Push in swifterror virtual register as the last element of Outs. This makes |
| 2268 | // sure swifterror virtual register will be returned in the swifterror |
| 2269 | // physical register. |
| 2270 | const Function *F = I.getParent()->getParent(); |
| 2271 | if (TLI.supportSwiftError() && |
| 2272 | F->getAttributes().hasAttrSomewhere(Kind: Attribute::SwiftError)) { |
| 2273 | assert(SwiftError.getFunctionArg() && "Need a swift error argument" ); |
| 2274 | ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); |
| 2275 | Flags.setSwiftError(); |
| 2276 | Outs.push_back(Elt: ISD::OutputArg( |
| 2277 | Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)), |
| 2278 | /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0)); |
| 2279 | // Create SDNode for the swifterror virtual register. |
| 2280 | OutVals.push_back( |
| 2281 | Elt: DAG.getRegister(Reg: SwiftError.getOrCreateVRegUseAt( |
| 2282 | &I, FuncInfo.MBB, SwiftError.getFunctionArg()), |
| 2283 | VT: EVT(TLI.getPointerTy(DL)))); |
| 2284 | } |
| 2285 | |
| 2286 | bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg(); |
| 2287 | CallingConv::ID CallConv = |
| 2288 | DAG.getMachineFunction().getFunction().getCallingConv(); |
| 2289 | Chain = DAG.getTargetLoweringInfo().LowerReturn( |
| 2290 | Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG); |
| 2291 | |
| 2292 | // Verify that the target's LowerReturn behaved as expected. |
| 2293 | assert(Chain.getNode() && Chain.getValueType() == MVT::Other && |
| 2294 | "LowerReturn didn't return a valid chain!" ); |
| 2295 | |
| 2296 | // Update the DAG with the new chain value resulting from return lowering. |
| 2297 | DAG.setRoot(Chain); |
| 2298 | } |
| 2299 | |
| 2300 | /// CopyToExportRegsIfNeeded - If the given value has virtual registers |
| 2301 | /// created for it, emit nodes to copy the value into the virtual |
| 2302 | /// registers. |
| 2303 | void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) { |
| 2304 | // Skip empty types |
| 2305 | if (V->getType()->isEmptyTy()) |
| 2306 | return; |
| 2307 | |
| 2308 | DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(Val: V); |
| 2309 | if (VMI != FuncInfo.ValueMap.end()) { |
| 2310 | assert((!V->use_empty() || isa<CallBrInst>(V)) && |
| 2311 | "Unused value assigned virtual registers!" ); |
| 2312 | CopyValueToVirtualRegister(V, Reg: VMI->second); |
| 2313 | } |
| 2314 | } |
| 2315 | |
| 2316 | /// ExportFromCurrentBlock - If this condition isn't known to be exported from |
| 2317 | /// the current basic block, add it to ValueMap now so that we'll get a |
| 2318 | /// CopyTo/FromReg. |
| 2319 | void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) { |
| 2320 | // No need to export constants. |
| 2321 | if (!isa<Instruction>(Val: V) && !isa<Argument>(Val: V)) return; |
| 2322 | |
| 2323 | // Already exported? |
| 2324 | if (FuncInfo.isExportedInst(V)) return; |
| 2325 | |
| 2326 | Register Reg = FuncInfo.InitializeRegForValue(V); |
| 2327 | CopyValueToVirtualRegister(V, Reg); |
| 2328 | } |
| 2329 | |
| 2330 | bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V, |
| 2331 | const BasicBlock *FromBB) { |
| 2332 | // The operands of the setcc have to be in this block. We don't know |
| 2333 | // how to export them from some other block. |
| 2334 | if (const Instruction *VI = dyn_cast<Instruction>(Val: V)) { |
| 2335 | // Can export from current BB. |
| 2336 | if (VI->getParent() == FromBB) |
| 2337 | return true; |
| 2338 | |
| 2339 | // Is already exported, noop. |
| 2340 | return FuncInfo.isExportedInst(V); |
| 2341 | } |
| 2342 | |
| 2343 | // If this is an argument, we can export it if the BB is the entry block or |
| 2344 | // if it is already exported. |
| 2345 | if (isa<Argument>(Val: V)) { |
| 2346 | if (FromBB->isEntryBlock()) |
| 2347 | return true; |
| 2348 | |
| 2349 | // Otherwise, can only export this if it is already exported. |
| 2350 | return FuncInfo.isExportedInst(V); |
| 2351 | } |
| 2352 | |
| 2353 | // Otherwise, constants can always be exported. |
| 2354 | return true; |
| 2355 | } |
| 2356 | |
| 2357 | /// Return branch probability calculated by BranchProbabilityInfo for IR blocks. |
| 2358 | BranchProbability |
| 2359 | SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src, |
| 2360 | const MachineBasicBlock *Dst) const { |
| 2361 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 2362 | const BasicBlock *SrcBB = Src->getBasicBlock(); |
| 2363 | const BasicBlock *DstBB = Dst->getBasicBlock(); |
| 2364 | if (!BPI) { |
| 2365 | // If BPI is not available, set the default probability as 1 / N, where N is |
| 2366 | // the number of successors. |
| 2367 | auto SuccSize = std::max<uint32_t>(a: succ_size(BB: SrcBB), b: 1); |
| 2368 | return BranchProbability(1, SuccSize); |
| 2369 | } |
| 2370 | return BPI->getEdgeProbability(Src: SrcBB, Dst: DstBB); |
| 2371 | } |
| 2372 | |
| 2373 | void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src, |
| 2374 | MachineBasicBlock *Dst, |
| 2375 | BranchProbability Prob) { |
| 2376 | if (!FuncInfo.BPI) |
| 2377 | Src->addSuccessorWithoutProb(Succ: Dst); |
| 2378 | else { |
| 2379 | if (Prob.isUnknown()) |
| 2380 | Prob = getEdgeProbability(Src, Dst); |
| 2381 | Src->addSuccessor(Succ: Dst, Prob); |
| 2382 | } |
| 2383 | } |
| 2384 | |
| 2385 | static bool InBlock(const Value *V, const BasicBlock *BB) { |
| 2386 | if (const Instruction *I = dyn_cast<Instruction>(Val: V)) |
| 2387 | return I->getParent() == BB; |
| 2388 | return true; |
| 2389 | } |
| 2390 | |
| 2391 | /// EmitBranchForMergedCondition - Helper method for FindMergedConditions. |
| 2392 | /// This function emits a branch and is used at the leaves of an OR or an |
| 2393 | /// AND operator tree. |
| 2394 | void |
| 2395 | SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, |
| 2396 | MachineBasicBlock *TBB, |
| 2397 | MachineBasicBlock *FBB, |
| 2398 | MachineBasicBlock *CurBB, |
| 2399 | MachineBasicBlock *SwitchBB, |
| 2400 | BranchProbability TProb, |
| 2401 | BranchProbability FProb, |
| 2402 | bool InvertCond) { |
| 2403 | const BasicBlock *BB = CurBB->getBasicBlock(); |
| 2404 | |
| 2405 | // If the leaf of the tree is a comparison, merge the condition into |
| 2406 | // the caseblock. |
| 2407 | if (const CmpInst *BOp = dyn_cast<CmpInst>(Val: Cond)) { |
| 2408 | // The operands of the cmp have to be in this block. We don't know |
| 2409 | // how to export them from some other block. If this is the first block |
| 2410 | // of the sequence, no exporting is needed. |
| 2411 | if (CurBB == SwitchBB || |
| 2412 | (isExportableFromCurrentBlock(V: BOp->getOperand(i_nocapture: 0), FromBB: BB) && |
| 2413 | isExportableFromCurrentBlock(V: BOp->getOperand(i_nocapture: 1), FromBB: BB))) { |
| 2414 | ISD::CondCode Condition; |
| 2415 | if (const ICmpInst *IC = dyn_cast<ICmpInst>(Val: Cond)) { |
| 2416 | ICmpInst::Predicate Pred = |
| 2417 | InvertCond ? IC->getInversePredicate() : IC->getPredicate(); |
| 2418 | Condition = getICmpCondCode(Pred); |
| 2419 | } else { |
| 2420 | const FCmpInst *FC = cast<FCmpInst>(Val: Cond); |
| 2421 | FCmpInst::Predicate Pred = |
| 2422 | InvertCond ? FC->getInversePredicate() : FC->getPredicate(); |
| 2423 | Condition = getFCmpCondCode(Pred); |
| 2424 | if (TM.Options.NoNaNsFPMath) |
| 2425 | Condition = getFCmpCodeWithoutNaN(CC: Condition); |
| 2426 | } |
| 2427 | |
| 2428 | CaseBlock CB(Condition, BOp->getOperand(i_nocapture: 0), BOp->getOperand(i_nocapture: 1), nullptr, |
| 2429 | TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); |
| 2430 | SL->SwitchCases.push_back(x: CB); |
| 2431 | return; |
| 2432 | } |
| 2433 | } |
| 2434 | |
| 2435 | // Create a CaseBlock record representing this branch. |
| 2436 | ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ; |
| 2437 | CaseBlock CB(Opc, Cond, ConstantInt::getTrue(Context&: *DAG.getContext()), |
| 2438 | nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); |
| 2439 | SL->SwitchCases.push_back(x: CB); |
| 2440 | } |
| 2441 | |
| 2442 | // Collect dependencies on V recursively. This is used for the cost analysis in |
| 2443 | // `shouldKeepJumpConditionsTogether`. |
| 2444 | static bool collectInstructionDeps( |
| 2445 | SmallMapVector<const Instruction *, bool, 8> *Deps, const Value *V, |
| 2446 | SmallMapVector<const Instruction *, bool, 8> *Necessary = nullptr, |
| 2447 | unsigned Depth = 0) { |
| 2448 | // Return false if we have an incomplete count. |
| 2449 | if (Depth >= SelectionDAG::MaxRecursionDepth) |
| 2450 | return false; |
| 2451 | |
| 2452 | auto *I = dyn_cast<Instruction>(Val: V); |
| 2453 | if (I == nullptr) |
| 2454 | return true; |
| 2455 | |
| 2456 | if (Necessary != nullptr) { |
| 2457 | // This instruction is necessary for the other side of the condition so |
| 2458 | // don't count it. |
| 2459 | if (Necessary->contains(Key: I)) |
| 2460 | return true; |
| 2461 | } |
| 2462 | |
| 2463 | // Already added this dep. |
| 2464 | if (!Deps->try_emplace(Key: I, Args: false).second) |
| 2465 | return true; |
| 2466 | |
| 2467 | for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx) |
| 2468 | if (!collectInstructionDeps(Deps, V: I->getOperand(i: OpIdx), Necessary, |
| 2469 | Depth: Depth + 1)) |
| 2470 | return false; |
| 2471 | return true; |
| 2472 | } |
| 2473 | |
| 2474 | bool SelectionDAGBuilder::shouldKeepJumpConditionsTogether( |
| 2475 | const FunctionLoweringInfo &FuncInfo, const BranchInst &I, |
| 2476 | Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, |
| 2477 | TargetLoweringBase::CondMergingParams Params) const { |
| 2478 | if (I.getNumSuccessors() != 2) |
| 2479 | return false; |
| 2480 | |
| 2481 | if (!I.isConditional()) |
| 2482 | return false; |
| 2483 | |
| 2484 | if (Params.BaseCost < 0) |
| 2485 | return false; |
| 2486 | |
| 2487 | // Baseline cost. |
| 2488 | InstructionCost CostThresh = Params.BaseCost; |
| 2489 | |
| 2490 | BranchProbabilityInfo *BPI = nullptr; |
| 2491 | if (Params.LikelyBias || Params.UnlikelyBias) |
| 2492 | BPI = FuncInfo.BPI; |
| 2493 | if (BPI != nullptr) { |
| 2494 | // See if we are either likely to get an early out or compute both lhs/rhs |
| 2495 | // of the condition. |
| 2496 | BasicBlock *IfFalse = I.getSuccessor(i: 0); |
| 2497 | BasicBlock *IfTrue = I.getSuccessor(i: 1); |
| 2498 | |
| 2499 | std::optional<bool> Likely; |
| 2500 | if (BPI->isEdgeHot(Src: I.getParent(), Dst: IfTrue)) |
| 2501 | Likely = true; |
| 2502 | else if (BPI->isEdgeHot(Src: I.getParent(), Dst: IfFalse)) |
| 2503 | Likely = false; |
| 2504 | |
| 2505 | if (Likely) { |
| 2506 | if (Opc == (*Likely ? Instruction::And : Instruction::Or)) |
| 2507 | // Its likely we will have to compute both lhs and rhs of condition |
| 2508 | CostThresh += Params.LikelyBias; |
| 2509 | else { |
| 2510 | if (Params.UnlikelyBias < 0) |
| 2511 | return false; |
| 2512 | // Its likely we will get an early out. |
| 2513 | CostThresh -= Params.UnlikelyBias; |
| 2514 | } |
| 2515 | } |
| 2516 | } |
| 2517 | |
| 2518 | if (CostThresh <= 0) |
| 2519 | return false; |
| 2520 | |
| 2521 | // Collect "all" instructions that lhs condition is dependent on. |
| 2522 | // Use map for stable iteration (to avoid non-determanism of iteration of |
| 2523 | // SmallPtrSet). The `bool` value is just a dummy. |
| 2524 | SmallMapVector<const Instruction *, bool, 8> LhsDeps, RhsDeps; |
| 2525 | collectInstructionDeps(Deps: &LhsDeps, V: Lhs); |
| 2526 | // Collect "all" instructions that rhs condition is dependent on AND are |
| 2527 | // dependencies of lhs. This gives us an estimate on which instructions we |
| 2528 | // stand to save by splitting the condition. |
| 2529 | if (!collectInstructionDeps(Deps: &RhsDeps, V: Rhs, Necessary: &LhsDeps)) |
| 2530 | return false; |
| 2531 | // Add the compare instruction itself unless its a dependency on the LHS. |
| 2532 | if (const auto *RhsI = dyn_cast<Instruction>(Val: Rhs)) |
| 2533 | if (!LhsDeps.contains(Key: RhsI)) |
| 2534 | RhsDeps.try_emplace(Key: RhsI, Args: false); |
| 2535 | |
| 2536 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 2537 | const auto &TTI = |
| 2538 | TLI.getTargetMachine().getTargetTransformInfo(F: *I.getFunction()); |
| 2539 | |
| 2540 | InstructionCost CostOfIncluding = 0; |
| 2541 | // See if this instruction will need to computed independently of whether RHS |
| 2542 | // is. |
| 2543 | Value *BrCond = I.getCondition(); |
| 2544 | auto ShouldCountInsn = [&RhsDeps, &BrCond](const Instruction *Ins) { |
| 2545 | for (const auto *U : Ins->users()) { |
| 2546 | // If user is independent of RHS calculation we don't need to count it. |
| 2547 | if (auto *UIns = dyn_cast<Instruction>(Val: U)) |
| 2548 | if (UIns != BrCond && !RhsDeps.contains(Key: UIns)) |
| 2549 | return false; |
| 2550 | } |
| 2551 | return true; |
| 2552 | }; |
| 2553 | |
| 2554 | // Prune instructions from RHS Deps that are dependencies of unrelated |
| 2555 | // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly |
| 2556 | // arbitrary and just meant to cap the how much time we spend in the pruning |
| 2557 | // loop. Its highly unlikely to come into affect. |
| 2558 | const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth; |
| 2559 | // Stop after a certain point. No incorrectness from including too many |
| 2560 | // instructions. |
| 2561 | for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) { |
| 2562 | const Instruction *ToDrop = nullptr; |
| 2563 | for (const auto &InsPair : RhsDeps) { |
| 2564 | if (!ShouldCountInsn(InsPair.first)) { |
| 2565 | ToDrop = InsPair.first; |
| 2566 | break; |
| 2567 | } |
| 2568 | } |
| 2569 | if (ToDrop == nullptr) |
| 2570 | break; |
| 2571 | RhsDeps.erase(Key: ToDrop); |
| 2572 | } |
| 2573 | |
| 2574 | for (const auto &InsPair : RhsDeps) { |
| 2575 | // Finally accumulate latency that we can only attribute to computing the |
| 2576 | // RHS condition. Use latency because we are essentially trying to calculate |
| 2577 | // the cost of the dependency chain. |
| 2578 | // Possible TODO: We could try to estimate ILP and make this more precise. |
| 2579 | CostOfIncluding += |
| 2580 | TTI.getInstructionCost(U: InsPair.first, CostKind: TargetTransformInfo::TCK_Latency); |
| 2581 | |
| 2582 | if (CostOfIncluding > CostThresh) |
| 2583 | return false; |
| 2584 | } |
| 2585 | return true; |
| 2586 | } |
| 2587 | |
| 2588 | void SelectionDAGBuilder::FindMergedConditions(const Value *Cond, |
| 2589 | MachineBasicBlock *TBB, |
| 2590 | MachineBasicBlock *FBB, |
| 2591 | MachineBasicBlock *CurBB, |
| 2592 | MachineBasicBlock *SwitchBB, |
| 2593 | Instruction::BinaryOps Opc, |
| 2594 | BranchProbability TProb, |
| 2595 | BranchProbability FProb, |
| 2596 | bool InvertCond) { |
| 2597 | // Skip over not part of the tree and remember to invert op and operands at |
| 2598 | // next level. |
| 2599 | Value *NotCond; |
| 2600 | if (match(V: Cond, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: NotCond)))) && |
| 2601 | InBlock(V: NotCond, BB: CurBB->getBasicBlock())) { |
| 2602 | FindMergedConditions(Cond: NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, |
| 2603 | InvertCond: !InvertCond); |
| 2604 | return; |
| 2605 | } |
| 2606 | |
| 2607 | const Instruction *BOp = dyn_cast<Instruction>(Val: Cond); |
| 2608 | const Value *BOpOp0, *BOpOp1; |
| 2609 | // Compute the effective opcode for Cond, taking into account whether it needs |
| 2610 | // to be inverted, e.g. |
| 2611 | // and (not (or A, B)), C |
| 2612 | // gets lowered as |
| 2613 | // and (and (not A, not B), C) |
| 2614 | Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0; |
| 2615 | if (BOp) { |
| 2616 | BOpc = match(V: BOp, P: m_LogicalAnd(L: m_Value(V&: BOpOp0), R: m_Value(V&: BOpOp1))) |
| 2617 | ? Instruction::And |
| 2618 | : (match(V: BOp, P: m_LogicalOr(L: m_Value(V&: BOpOp0), R: m_Value(V&: BOpOp1))) |
| 2619 | ? Instruction::Or |
| 2620 | : (Instruction::BinaryOps)0); |
| 2621 | if (InvertCond) { |
| 2622 | if (BOpc == Instruction::And) |
| 2623 | BOpc = Instruction::Or; |
| 2624 | else if (BOpc == Instruction::Or) |
| 2625 | BOpc = Instruction::And; |
| 2626 | } |
| 2627 | } |
| 2628 | |
| 2629 | // If this node is not part of the or/and tree, emit it as a branch. |
| 2630 | // Note that all nodes in the tree should have same opcode. |
| 2631 | bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse(); |
| 2632 | if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() || |
| 2633 | !InBlock(V: BOpOp0, BB: CurBB->getBasicBlock()) || |
| 2634 | !InBlock(V: BOpOp1, BB: CurBB->getBasicBlock())) { |
| 2635 | EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, |
| 2636 | TProb, FProb, InvertCond); |
| 2637 | return; |
| 2638 | } |
| 2639 | |
| 2640 | // Create TmpBB after CurBB. |
| 2641 | MachineFunction::iterator BBI(CurBB); |
| 2642 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2643 | MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(BB: CurBB->getBasicBlock()); |
| 2644 | CurBB->getParent()->insert(MBBI: ++BBI, MBB: TmpBB); |
| 2645 | |
| 2646 | if (Opc == Instruction::Or) { |
| 2647 | // Codegen X | Y as: |
| 2648 | // BB1: |
| 2649 | // jmp_if_X TBB |
| 2650 | // jmp TmpBB |
| 2651 | // TmpBB: |
| 2652 | // jmp_if_Y TBB |
| 2653 | // jmp FBB |
| 2654 | // |
| 2655 | |
| 2656 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. |
| 2657 | // The requirement is that |
| 2658 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) |
| 2659 | // = TrueProb for original BB. |
| 2660 | // Assuming the original probabilities are A and B, one choice is to set |
| 2661 | // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to |
| 2662 | // A/(1+B) and 2B/(1+B). This choice assumes that |
| 2663 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. |
| 2664 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for |
| 2665 | // TmpBB, but the math is more complicated. |
| 2666 | |
| 2667 | auto NewTrueProb = TProb / 2; |
| 2668 | auto NewFalseProb = TProb / 2 + FProb; |
| 2669 | // Emit the LHS condition. |
| 2670 | FindMergedConditions(Cond: BOpOp0, TBB, FBB: TmpBB, CurBB, SwitchBB, Opc, TProb: NewTrueProb, |
| 2671 | FProb: NewFalseProb, InvertCond); |
| 2672 | |
| 2673 | // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). |
| 2674 | SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; |
| 2675 | BranchProbability::normalizeProbabilities(Begin: Probs.begin(), End: Probs.end()); |
| 2676 | // Emit the RHS condition into TmpBB. |
| 2677 | FindMergedConditions(Cond: BOpOp1, TBB, FBB, CurBB: TmpBB, SwitchBB, Opc, TProb: Probs[0], |
| 2678 | FProb: Probs[1], InvertCond); |
| 2679 | } else { |
| 2680 | assert(Opc == Instruction::And && "Unknown merge op!" ); |
| 2681 | // Codegen X & Y as: |
| 2682 | // BB1: |
| 2683 | // jmp_if_X TmpBB |
| 2684 | // jmp FBB |
| 2685 | // TmpBB: |
| 2686 | // jmp_if_Y TBB |
| 2687 | // jmp FBB |
| 2688 | // |
| 2689 | // This requires creation of TmpBB after CurBB. |
| 2690 | |
| 2691 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. |
| 2692 | // The requirement is that |
| 2693 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) |
| 2694 | // = FalseProb for original BB. |
| 2695 | // Assuming the original probabilities are A and B, one choice is to set |
| 2696 | // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to |
| 2697 | // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == |
| 2698 | // TrueProb for BB1 * FalseProb for TmpBB. |
| 2699 | |
| 2700 | auto NewTrueProb = TProb + FProb / 2; |
| 2701 | auto NewFalseProb = FProb / 2; |
| 2702 | // Emit the LHS condition. |
| 2703 | FindMergedConditions(Cond: BOpOp0, TBB: TmpBB, FBB, CurBB, SwitchBB, Opc, TProb: NewTrueProb, |
| 2704 | FProb: NewFalseProb, InvertCond); |
| 2705 | |
| 2706 | // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). |
| 2707 | SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; |
| 2708 | BranchProbability::normalizeProbabilities(Begin: Probs.begin(), End: Probs.end()); |
| 2709 | // Emit the RHS condition into TmpBB. |
| 2710 | FindMergedConditions(Cond: BOpOp1, TBB, FBB, CurBB: TmpBB, SwitchBB, Opc, TProb: Probs[0], |
| 2711 | FProb: Probs[1], InvertCond); |
| 2712 | } |
| 2713 | } |
| 2714 | |
| 2715 | /// If the set of cases should be emitted as a series of branches, return true. |
| 2716 | /// If we should emit this as a bunch of and/or'd together conditions, return |
| 2717 | /// false. |
| 2718 | bool |
| 2719 | SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) { |
| 2720 | if (Cases.size() != 2) return true; |
| 2721 | |
| 2722 | // If this is two comparisons of the same values or'd or and'd together, they |
| 2723 | // will get folded into a single comparison, so don't emit two blocks. |
| 2724 | if ((Cases[0].CmpLHS == Cases[1].CmpLHS && |
| 2725 | Cases[0].CmpRHS == Cases[1].CmpRHS) || |
| 2726 | (Cases[0].CmpRHS == Cases[1].CmpLHS && |
| 2727 | Cases[0].CmpLHS == Cases[1].CmpRHS)) { |
| 2728 | return false; |
| 2729 | } |
| 2730 | |
| 2731 | // Handle: (X != null) | (Y != null) --> (X|Y) != 0 |
| 2732 | // Handle: (X == null) & (Y == null) --> (X|Y) == 0 |
| 2733 | if (Cases[0].CmpRHS == Cases[1].CmpRHS && |
| 2734 | Cases[0].CC == Cases[1].CC && |
| 2735 | isa<Constant>(Val: Cases[0].CmpRHS) && |
| 2736 | cast<Constant>(Val: Cases[0].CmpRHS)->isNullValue()) { |
| 2737 | if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB) |
| 2738 | return false; |
| 2739 | if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB) |
| 2740 | return false; |
| 2741 | } |
| 2742 | |
| 2743 | return true; |
| 2744 | } |
| 2745 | |
| 2746 | void SelectionDAGBuilder::visitBr(const BranchInst &I) { |
| 2747 | MachineBasicBlock *BrMBB = FuncInfo.MBB; |
| 2748 | |
| 2749 | // Update machine-CFG edges. |
| 2750 | MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(BB: I.getSuccessor(i: 0)); |
| 2751 | |
| 2752 | if (I.isUnconditional()) { |
| 2753 | // Update machine-CFG edges. |
| 2754 | BrMBB->addSuccessor(Succ: Succ0MBB); |
| 2755 | |
| 2756 | // If this is not a fall-through branch or optimizations are switched off, |
| 2757 | // emit the branch. |
| 2758 | if (Succ0MBB != NextBlock(MBB: BrMBB) || |
| 2759 | TM.getOptLevel() == CodeGenOptLevel::None) { |
| 2760 | auto Br = DAG.getNode(Opcode: ISD::BR, DL: getCurSDLoc(), VT: MVT::Other, |
| 2761 | N1: getControlRoot(), N2: DAG.getBasicBlock(MBB: Succ0MBB)); |
| 2762 | setValue(V: &I, NewN: Br); |
| 2763 | DAG.setRoot(Br); |
| 2764 | } |
| 2765 | |
| 2766 | return; |
| 2767 | } |
| 2768 | |
| 2769 | // If this condition is one of the special cases we handle, do special stuff |
| 2770 | // now. |
| 2771 | const Value *CondVal = I.getCondition(); |
| 2772 | MachineBasicBlock *Succ1MBB = FuncInfo.getMBB(BB: I.getSuccessor(i: 1)); |
| 2773 | |
| 2774 | // If this is a series of conditions that are or'd or and'd together, emit |
| 2775 | // this as a sequence of branches instead of setcc's with and/or operations. |
| 2776 | // As long as jumps are not expensive (exceptions for multi-use logic ops, |
| 2777 | // unpredictable branches, and vector extracts because those jumps are likely |
| 2778 | // expensive for any target), this should improve performance. |
| 2779 | // For example, instead of something like: |
| 2780 | // cmp A, B |
| 2781 | // C = seteq |
| 2782 | // cmp D, E |
| 2783 | // F = setle |
| 2784 | // or C, F |
| 2785 | // jnz foo |
| 2786 | // Emit: |
| 2787 | // cmp A, B |
| 2788 | // je foo |
| 2789 | // cmp D, E |
| 2790 | // jle foo |
| 2791 | bool IsUnpredictable = I.hasMetadata(KindID: LLVMContext::MD_unpredictable); |
| 2792 | const Instruction *BOp = dyn_cast<Instruction>(Val: CondVal); |
| 2793 | if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp && |
| 2794 | BOp->hasOneUse() && !IsUnpredictable) { |
| 2795 | Value *Vec; |
| 2796 | const Value *BOp0, *BOp1; |
| 2797 | Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0; |
| 2798 | if (match(V: BOp, P: m_LogicalAnd(L: m_Value(V&: BOp0), R: m_Value(V&: BOp1)))) |
| 2799 | Opcode = Instruction::And; |
| 2800 | else if (match(V: BOp, P: m_LogicalOr(L: m_Value(V&: BOp0), R: m_Value(V&: BOp1)))) |
| 2801 | Opcode = Instruction::Or; |
| 2802 | |
| 2803 | if (Opcode && |
| 2804 | !(match(V: BOp0, P: m_ExtractElt(Val: m_Value(V&: Vec), Idx: m_Value())) && |
| 2805 | match(V: BOp1, P: m_ExtractElt(Val: m_Specific(V: Vec), Idx: m_Value()))) && |
| 2806 | !shouldKeepJumpConditionsTogether( |
| 2807 | FuncInfo, I, Opc: Opcode, Lhs: BOp0, Rhs: BOp1, |
| 2808 | Params: DAG.getTargetLoweringInfo().getJumpConditionMergingParams( |
| 2809 | Opcode, BOp0, BOp1))) { |
| 2810 | FindMergedConditions(Cond: BOp, TBB: Succ0MBB, FBB: Succ1MBB, CurBB: BrMBB, SwitchBB: BrMBB, Opc: Opcode, |
| 2811 | TProb: getEdgeProbability(Src: BrMBB, Dst: Succ0MBB), |
| 2812 | FProb: getEdgeProbability(Src: BrMBB, Dst: Succ1MBB), |
| 2813 | /*InvertCond=*/false); |
| 2814 | // If the compares in later blocks need to use values not currently |
| 2815 | // exported from this block, export them now. This block should always |
| 2816 | // be the first entry. |
| 2817 | assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!" ); |
| 2818 | |
| 2819 | // Allow some cases to be rejected. |
| 2820 | if (ShouldEmitAsBranches(Cases: SL->SwitchCases)) { |
| 2821 | for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) { |
| 2822 | ExportFromCurrentBlock(V: SL->SwitchCases[i].CmpLHS); |
| 2823 | ExportFromCurrentBlock(V: SL->SwitchCases[i].CmpRHS); |
| 2824 | } |
| 2825 | |
| 2826 | // Emit the branch for this block. |
| 2827 | visitSwitchCase(CB&: SL->SwitchCases[0], SwitchBB: BrMBB); |
| 2828 | SL->SwitchCases.erase(position: SL->SwitchCases.begin()); |
| 2829 | return; |
| 2830 | } |
| 2831 | |
| 2832 | // Okay, we decided not to do this, remove any inserted MBB's and clear |
| 2833 | // SwitchCases. |
| 2834 | for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) |
| 2835 | FuncInfo.MF->erase(MBBI: SL->SwitchCases[i].ThisBB); |
| 2836 | |
| 2837 | SL->SwitchCases.clear(); |
| 2838 | } |
| 2839 | } |
| 2840 | |
| 2841 | // Create a CaseBlock record representing this branch. |
| 2842 | CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(Context&: *DAG.getContext()), |
| 2843 | nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc(), |
| 2844 | BranchProbability::getUnknown(), BranchProbability::getUnknown(), |
| 2845 | IsUnpredictable); |
| 2846 | |
| 2847 | // Use visitSwitchCase to actually insert the fast branch sequence for this |
| 2848 | // cond branch. |
| 2849 | visitSwitchCase(CB, SwitchBB: BrMBB); |
| 2850 | } |
| 2851 | |
| 2852 | /// visitSwitchCase - Emits the necessary code to represent a single node in |
| 2853 | /// the binary search tree resulting from lowering a switch instruction. |
| 2854 | void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, |
| 2855 | MachineBasicBlock *SwitchBB) { |
| 2856 | SDValue Cond; |
| 2857 | SDValue CondLHS = getValue(V: CB.CmpLHS); |
| 2858 | SDLoc dl = CB.DL; |
| 2859 | |
| 2860 | if (CB.CC == ISD::SETTRUE) { |
| 2861 | // Branch or fall through to TrueBB. |
| 2862 | addSuccessorWithProb(Src: SwitchBB, Dst: CB.TrueBB, Prob: CB.TrueProb); |
| 2863 | SwitchBB->normalizeSuccProbs(); |
| 2864 | if (CB.TrueBB != NextBlock(MBB: SwitchBB)) { |
| 2865 | DAG.setRoot(DAG.getNode(Opcode: ISD::BR, DL: dl, VT: MVT::Other, N1: getControlRoot(), |
| 2866 | N2: DAG.getBasicBlock(MBB: CB.TrueBB))); |
| 2867 | } |
| 2868 | return; |
| 2869 | } |
| 2870 | |
| 2871 | auto &TLI = DAG.getTargetLoweringInfo(); |
| 2872 | EVT MemVT = TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: CB.CmpLHS->getType()); |
| 2873 | |
| 2874 | // Build the setcc now. |
| 2875 | if (!CB.CmpMHS) { |
| 2876 | // Fold "(X == true)" to X and "(X == false)" to !X to |
| 2877 | // handle common cases produced by branch lowering. |
| 2878 | if (CB.CmpRHS == ConstantInt::getTrue(Context&: *DAG.getContext()) && |
| 2879 | CB.CC == ISD::SETEQ) |
| 2880 | Cond = CondLHS; |
| 2881 | else if (CB.CmpRHS == ConstantInt::getFalse(Context&: *DAG.getContext()) && |
| 2882 | CB.CC == ISD::SETEQ) { |
| 2883 | SDValue True = DAG.getConstant(Val: 1, DL: dl, VT: CondLHS.getValueType()); |
| 2884 | Cond = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: CondLHS.getValueType(), N1: CondLHS, N2: True); |
| 2885 | } else { |
| 2886 | SDValue CondRHS = getValue(V: CB.CmpRHS); |
| 2887 | |
| 2888 | // If a pointer's DAG type is larger than its memory type then the DAG |
| 2889 | // values are zero-extended. This breaks signed comparisons so truncate |
| 2890 | // back to the underlying type before doing the compare. |
| 2891 | if (CondLHS.getValueType() != MemVT) { |
| 2892 | CondLHS = DAG.getPtrExtOrTrunc(Op: CondLHS, DL: getCurSDLoc(), VT: MemVT); |
| 2893 | CondRHS = DAG.getPtrExtOrTrunc(Op: CondRHS, DL: getCurSDLoc(), VT: MemVT); |
| 2894 | } |
| 2895 | Cond = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: CondLHS, RHS: CondRHS, Cond: CB.CC); |
| 2896 | } |
| 2897 | } else { |
| 2898 | assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now" ); |
| 2899 | |
| 2900 | const APInt& Low = cast<ConstantInt>(Val: CB.CmpLHS)->getValue(); |
| 2901 | const APInt& High = cast<ConstantInt>(Val: CB.CmpRHS)->getValue(); |
| 2902 | |
| 2903 | SDValue CmpOp = getValue(V: CB.CmpMHS); |
| 2904 | EVT VT = CmpOp.getValueType(); |
| 2905 | |
| 2906 | if (cast<ConstantInt>(Val: CB.CmpLHS)->isMinValue(IsSigned: true)) { |
| 2907 | Cond = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: CmpOp, RHS: DAG.getConstant(Val: High, DL: dl, VT), |
| 2908 | Cond: ISD::SETLE); |
| 2909 | } else { |
| 2910 | SDValue SUB = DAG.getNode(Opcode: ISD::SUB, DL: dl, |
| 2911 | VT, N1: CmpOp, N2: DAG.getConstant(Val: Low, DL: dl, VT)); |
| 2912 | Cond = DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: SUB, |
| 2913 | RHS: DAG.getConstant(Val: High-Low, DL: dl, VT), Cond: ISD::SETULE); |
| 2914 | } |
| 2915 | } |
| 2916 | |
| 2917 | // Update successor info |
| 2918 | addSuccessorWithProb(Src: SwitchBB, Dst: CB.TrueBB, Prob: CB.TrueProb); |
| 2919 | // TrueBB and FalseBB are always different unless the incoming IR is |
| 2920 | // degenerate. This only happens when running llc on weird IR. |
| 2921 | if (CB.TrueBB != CB.FalseBB) |
| 2922 | addSuccessorWithProb(Src: SwitchBB, Dst: CB.FalseBB, Prob: CB.FalseProb); |
| 2923 | SwitchBB->normalizeSuccProbs(); |
| 2924 | |
| 2925 | // If the lhs block is the next block, invert the condition so that we can |
| 2926 | // fall through to the lhs instead of the rhs block. |
| 2927 | if (CB.TrueBB == NextBlock(MBB: SwitchBB)) { |
| 2928 | std::swap(a&: CB.TrueBB, b&: CB.FalseBB); |
| 2929 | SDValue True = DAG.getConstant(Val: 1, DL: dl, VT: Cond.getValueType()); |
| 2930 | Cond = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: Cond.getValueType(), N1: Cond, N2: True); |
| 2931 | } |
| 2932 | |
| 2933 | SDNodeFlags Flags; |
| 2934 | Flags.setUnpredictable(CB.IsUnpredictable); |
| 2935 | SDValue BrCond = DAG.getNode(Opcode: ISD::BRCOND, DL: dl, VT: MVT::Other, N1: getControlRoot(), |
| 2936 | N2: Cond, N3: DAG.getBasicBlock(MBB: CB.TrueBB), Flags); |
| 2937 | |
| 2938 | setValue(V: CurInst, NewN: BrCond); |
| 2939 | |
| 2940 | // Insert the false branch. Do this even if it's a fall through branch, |
| 2941 | // this makes it easier to do DAG optimizations which require inverting |
| 2942 | // the branch condition. |
| 2943 | BrCond = DAG.getNode(Opcode: ISD::BR, DL: dl, VT: MVT::Other, N1: BrCond, |
| 2944 | N2: DAG.getBasicBlock(MBB: CB.FalseBB)); |
| 2945 | |
| 2946 | DAG.setRoot(BrCond); |
| 2947 | } |
| 2948 | |
| 2949 | /// visitJumpTable - Emit JumpTable node in the current MBB |
| 2950 | void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) { |
| 2951 | // Emit the code for the jump table |
| 2952 | assert(JT.SL && "Should set SDLoc for SelectionDAG!" ); |
| 2953 | assert(JT.Reg && "Should lower JT Header first!" ); |
| 2954 | EVT PTy = DAG.getTargetLoweringInfo().getJumpTableRegTy(DL: DAG.getDataLayout()); |
| 2955 | SDValue Index = DAG.getCopyFromReg(Chain: getControlRoot(), dl: *JT.SL, Reg: JT.Reg, VT: PTy); |
| 2956 | SDValue Table = DAG.getJumpTable(JTI: JT.JTI, VT: PTy); |
| 2957 | SDValue BrJumpTable = DAG.getNode(Opcode: ISD::BR_JT, DL: *JT.SL, VT: MVT::Other, |
| 2958 | N1: Index.getValue(R: 1), N2: Table, N3: Index); |
| 2959 | DAG.setRoot(BrJumpTable); |
| 2960 | } |
| 2961 | |
| 2962 | /// visitJumpTableHeader - This function emits necessary code to produce index |
| 2963 | /// in the JumpTable from switch case. |
| 2964 | void SelectionDAGBuilder::(SwitchCG::JumpTable &JT, |
| 2965 | JumpTableHeader &JTH, |
| 2966 | MachineBasicBlock *SwitchBB) { |
| 2967 | assert(JT.SL && "Should set SDLoc for SelectionDAG!" ); |
| 2968 | const SDLoc &dl = *JT.SL; |
| 2969 | |
| 2970 | // Subtract the lowest switch case value from the value being switched on. |
| 2971 | SDValue SwitchOp = getValue(V: JTH.SValue); |
| 2972 | EVT VT = SwitchOp.getValueType(); |
| 2973 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: SwitchOp, |
| 2974 | N2: DAG.getConstant(Val: JTH.First, DL: dl, VT)); |
| 2975 | |
| 2976 | // The SDNode we just created, which holds the value being switched on minus |
| 2977 | // the smallest case value, needs to be copied to a virtual register so it |
| 2978 | // can be used as an index into the jump table in a subsequent basic block. |
| 2979 | // This value may be smaller or larger than the target's pointer type, and |
| 2980 | // therefore require extension or truncating. |
| 2981 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 2982 | SwitchOp = |
| 2983 | DAG.getZExtOrTrunc(Op: Sub, DL: dl, VT: TLI.getJumpTableRegTy(DL: DAG.getDataLayout())); |
| 2984 | |
| 2985 | Register JumpTableReg = |
| 2986 | FuncInfo.CreateReg(VT: TLI.getJumpTableRegTy(DL: DAG.getDataLayout())); |
| 2987 | SDValue CopyTo = |
| 2988 | DAG.getCopyToReg(Chain: getControlRoot(), dl, Reg: JumpTableReg, N: SwitchOp); |
| 2989 | JT.Reg = JumpTableReg; |
| 2990 | |
| 2991 | if (!JTH.FallthroughUnreachable) { |
| 2992 | // Emit the range check for the jump table, and branch to the default block |
| 2993 | // for the switch statement if the value being switched on exceeds the |
| 2994 | // largest case in the switch. |
| 2995 | SDValue CMP = DAG.getSetCC( |
| 2996 | DL: dl, VT: TLI.getSetCCResultType(DL: DAG.getDataLayout(), Context&: *DAG.getContext(), |
| 2997 | VT: Sub.getValueType()), |
| 2998 | LHS: Sub, RHS: DAG.getConstant(Val: JTH.Last - JTH.First, DL: dl, VT), Cond: ISD::SETUGT); |
| 2999 | |
| 3000 | SDValue BrCond = DAG.getNode(Opcode: ISD::BRCOND, DL: dl, |
| 3001 | VT: MVT::Other, N1: CopyTo, N2: CMP, |
| 3002 | N3: DAG.getBasicBlock(MBB: JT.Default)); |
| 3003 | |
| 3004 | // Avoid emitting unnecessary branches to the next block. |
| 3005 | if (JT.MBB != NextBlock(MBB: SwitchBB)) |
| 3006 | BrCond = DAG.getNode(Opcode: ISD::BR, DL: dl, VT: MVT::Other, N1: BrCond, |
| 3007 | N2: DAG.getBasicBlock(MBB: JT.MBB)); |
| 3008 | |
| 3009 | DAG.setRoot(BrCond); |
| 3010 | } else { |
| 3011 | // Avoid emitting unnecessary branches to the next block. |
| 3012 | if (JT.MBB != NextBlock(MBB: SwitchBB)) |
| 3013 | DAG.setRoot(DAG.getNode(Opcode: ISD::BR, DL: dl, VT: MVT::Other, N1: CopyTo, |
| 3014 | N2: DAG.getBasicBlock(MBB: JT.MBB))); |
| 3015 | else |
| 3016 | DAG.setRoot(CopyTo); |
| 3017 | } |
| 3018 | } |
| 3019 | |
| 3020 | /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global |
| 3021 | /// variable if there exists one. |
| 3022 | static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, |
| 3023 | SDValue &Chain) { |
| 3024 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3025 | EVT PtrTy = TLI.getPointerTy(DL: DAG.getDataLayout()); |
| 3026 | EVT PtrMemTy = TLI.getPointerMemTy(DL: DAG.getDataLayout()); |
| 3027 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3028 | Value *Global = TLI.getSDagStackGuard(M: *MF.getFunction().getParent()); |
| 3029 | MachineSDNode *Node = |
| 3030 | DAG.getMachineNode(Opcode: TargetOpcode::LOAD_STACK_GUARD, dl: DL, VT: PtrTy, Op1: Chain); |
| 3031 | if (Global) { |
| 3032 | MachinePointerInfo MPInfo(Global); |
| 3033 | auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | |
| 3034 | MachineMemOperand::MODereferenceable; |
| 3035 | MachineMemOperand *MemRef = MF.getMachineMemOperand( |
| 3036 | PtrInfo: MPInfo, F: Flags, Size: PtrTy.getSizeInBits() / 8, BaseAlignment: DAG.getEVTAlign(MemoryVT: PtrTy)); |
| 3037 | DAG.setNodeMemRefs(N: Node, NewMemRefs: {MemRef}); |
| 3038 | } |
| 3039 | if (PtrTy != PtrMemTy) |
| 3040 | return DAG.getPtrExtOrTrunc(Op: SDValue(Node, 0), DL, VT: PtrMemTy); |
| 3041 | return SDValue(Node, 0); |
| 3042 | } |
| 3043 | |
| 3044 | /// Codegen a new tail for a stack protector check ParentMBB which has had its |
| 3045 | /// tail spliced into a stack protector check success bb. |
| 3046 | /// |
| 3047 | /// For a high level explanation of how this fits into the stack protector |
| 3048 | /// generation see the comment on the declaration of class |
| 3049 | /// StackProtectorDescriptor. |
| 3050 | void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, |
| 3051 | MachineBasicBlock *ParentBB) { |
| 3052 | |
| 3053 | // First create the loads to the guard/stack slot for the comparison. |
| 3054 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3055 | auto &DL = DAG.getDataLayout(); |
| 3056 | EVT PtrTy = TLI.getFrameIndexTy(DL); |
| 3057 | EVT PtrMemTy = TLI.getPointerMemTy(DL, AS: DL.getAllocaAddrSpace()); |
| 3058 | |
| 3059 | MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); |
| 3060 | int FI = MFI.getStackProtectorIndex(); |
| 3061 | |
| 3062 | SDValue Guard; |
| 3063 | SDLoc dl = getCurSDLoc(); |
| 3064 | SDValue StackSlotPtr = DAG.getFrameIndex(FI, VT: PtrTy); |
| 3065 | const Module &M = *ParentBB->getParent()->getFunction().getParent(); |
| 3066 | Align Align = DL.getPrefTypeAlign( |
| 3067 | Ty: PointerType::get(C&: M.getContext(), AddressSpace: DL.getAllocaAddrSpace())); |
| 3068 | |
| 3069 | // Generate code to load the content of the guard slot. |
| 3070 | SDValue GuardVal = DAG.getLoad( |
| 3071 | VT: PtrMemTy, dl, Chain: DAG.getEntryNode(), Ptr: StackSlotPtr, |
| 3072 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI), Alignment: Align, |
| 3073 | MMOFlags: MachineMemOperand::MOVolatile); |
| 3074 | |
| 3075 | if (TLI.useStackGuardXorFP()) |
| 3076 | GuardVal = TLI.emitStackGuardXorFP(DAG, Val: GuardVal, DL: dl); |
| 3077 | |
| 3078 | // If we're using function-based instrumentation, call the guard check |
| 3079 | // function |
| 3080 | if (SPD.shouldEmitFunctionBasedCheckStackProtector()) { |
| 3081 | // Get the guard check function from the target and verify it exists since |
| 3082 | // we're using function-based instrumentation |
| 3083 | const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M); |
| 3084 | assert(GuardCheckFn && "Guard check function is null" ); |
| 3085 | |
| 3086 | // The target provides a guard check function to validate the guard value. |
| 3087 | // Generate a call to that function with the content of the guard slot as |
| 3088 | // argument. |
| 3089 | FunctionType *FnTy = GuardCheckFn->getFunctionType(); |
| 3090 | assert(FnTy->getNumParams() == 1 && "Invalid function signature" ); |
| 3091 | |
| 3092 | TargetLowering::ArgListTy Args; |
| 3093 | TargetLowering::ArgListEntry Entry; |
| 3094 | Entry.Node = GuardVal; |
| 3095 | Entry.Ty = FnTy->getParamType(i: 0); |
| 3096 | if (GuardCheckFn->hasParamAttribute(ArgNo: 0, Kind: Attribute::AttrKind::InReg)) |
| 3097 | Entry.IsInReg = true; |
| 3098 | Args.push_back(x: Entry); |
| 3099 | |
| 3100 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 3101 | CLI.setDebugLoc(getCurSDLoc()) |
| 3102 | .setChain(DAG.getEntryNode()) |
| 3103 | .setCallee(CC: GuardCheckFn->getCallingConv(), ResultType: FnTy->getReturnType(), |
| 3104 | Target: getValue(V: GuardCheckFn), ArgsList: std::move(Args)); |
| 3105 | |
| 3106 | std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); |
| 3107 | DAG.setRoot(Result.second); |
| 3108 | return; |
| 3109 | } |
| 3110 | |
| 3111 | // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. |
| 3112 | // Otherwise, emit a volatile load to retrieve the stack guard value. |
| 3113 | SDValue Chain = DAG.getEntryNode(); |
| 3114 | if (TLI.useLoadStackGuardNode(M)) { |
| 3115 | Guard = getLoadStackGuard(DAG, DL: dl, Chain); |
| 3116 | } else { |
| 3117 | const Value *IRGuard = TLI.getSDagStackGuard(M); |
| 3118 | SDValue GuardPtr = getValue(V: IRGuard); |
| 3119 | |
| 3120 | Guard = DAG.getLoad(VT: PtrMemTy, dl, Chain, Ptr: GuardPtr, |
| 3121 | PtrInfo: MachinePointerInfo(IRGuard, 0), Alignment: Align, |
| 3122 | MMOFlags: MachineMemOperand::MOVolatile); |
| 3123 | } |
| 3124 | |
| 3125 | // Perform the comparison via a getsetcc. |
| 3126 | SDValue Cmp = DAG.getSetCC( |
| 3127 | DL: dl, VT: TLI.getSetCCResultType(DL, Context&: *DAG.getContext(), VT: Guard.getValueType()), |
| 3128 | LHS: Guard, RHS: GuardVal, Cond: ISD::SETNE); |
| 3129 | |
| 3130 | // If the guard/stackslot do not equal, branch to failure MBB. |
| 3131 | SDValue BrCond = DAG.getNode(Opcode: ISD::BRCOND, DL: dl, |
| 3132 | VT: MVT::Other, N1: GuardVal.getOperand(i: 0), |
| 3133 | N2: Cmp, N3: DAG.getBasicBlock(MBB: SPD.getFailureMBB())); |
| 3134 | // Otherwise branch to success MBB. |
| 3135 | SDValue Br = DAG.getNode(Opcode: ISD::BR, DL: dl, |
| 3136 | VT: MVT::Other, N1: BrCond, |
| 3137 | N2: DAG.getBasicBlock(MBB: SPD.getSuccessMBB())); |
| 3138 | |
| 3139 | DAG.setRoot(Br); |
| 3140 | } |
| 3141 | |
| 3142 | /// Codegen the failure basic block for a stack protector check. |
| 3143 | /// |
| 3144 | /// A failure stack protector machine basic block consists simply of a call to |
| 3145 | /// __stack_chk_fail(). |
| 3146 | /// |
| 3147 | /// For a high level explanation of how this fits into the stack protector |
| 3148 | /// generation see the comment on the declaration of class |
| 3149 | /// StackProtectorDescriptor. |
| 3150 | void SelectionDAGBuilder::visitSPDescriptorFailure( |
| 3151 | StackProtectorDescriptor &SPD) { |
| 3152 | |
| 3153 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3154 | MachineBasicBlock *ParentBB = SPD.getParentMBB(); |
| 3155 | const Module &M = *ParentBB->getParent()->getFunction().getParent(); |
| 3156 | SDValue Chain; |
| 3157 | |
| 3158 | // For -Oz builds with a guard check function, we use function-based |
| 3159 | // instrumentation. Otherwise, if we have a guard check function, we call it |
| 3160 | // in the failure block. |
| 3161 | auto *GuardCheckFn = TLI.getSSPStackGuardCheck(M); |
| 3162 | if (GuardCheckFn && !SPD.shouldEmitFunctionBasedCheckStackProtector()) { |
| 3163 | // First create the loads to the guard/stack slot for the comparison. |
| 3164 | auto &DL = DAG.getDataLayout(); |
| 3165 | EVT PtrTy = TLI.getFrameIndexTy(DL); |
| 3166 | EVT PtrMemTy = TLI.getPointerMemTy(DL, AS: DL.getAllocaAddrSpace()); |
| 3167 | |
| 3168 | MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); |
| 3169 | int FI = MFI.getStackProtectorIndex(); |
| 3170 | |
| 3171 | SDLoc dl = getCurSDLoc(); |
| 3172 | SDValue StackSlotPtr = DAG.getFrameIndex(FI, VT: PtrTy); |
| 3173 | Align Align = DL.getPrefTypeAlign( |
| 3174 | Ty: PointerType::get(C&: M.getContext(), AddressSpace: DL.getAllocaAddrSpace())); |
| 3175 | |
| 3176 | // Generate code to load the content of the guard slot. |
| 3177 | SDValue GuardVal = DAG.getLoad( |
| 3178 | VT: PtrMemTy, dl, Chain: DAG.getEntryNode(), Ptr: StackSlotPtr, |
| 3179 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI), Alignment: Align, |
| 3180 | MMOFlags: MachineMemOperand::MOVolatile); |
| 3181 | |
| 3182 | if (TLI.useStackGuardXorFP()) |
| 3183 | GuardVal = TLI.emitStackGuardXorFP(DAG, Val: GuardVal, DL: dl); |
| 3184 | |
| 3185 | // The target provides a guard check function to validate the guard value. |
| 3186 | // Generate a call to that function with the content of the guard slot as |
| 3187 | // argument. |
| 3188 | FunctionType *FnTy = GuardCheckFn->getFunctionType(); |
| 3189 | assert(FnTy->getNumParams() == 1 && "Invalid function signature" ); |
| 3190 | |
| 3191 | TargetLowering::ArgListTy Args; |
| 3192 | TargetLowering::ArgListEntry Entry; |
| 3193 | Entry.Node = GuardVal; |
| 3194 | Entry.Ty = FnTy->getParamType(i: 0); |
| 3195 | if (GuardCheckFn->hasParamAttribute(ArgNo: 0, Kind: Attribute::AttrKind::InReg)) |
| 3196 | Entry.IsInReg = true; |
| 3197 | Args.push_back(x: Entry); |
| 3198 | |
| 3199 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 3200 | CLI.setDebugLoc(getCurSDLoc()) |
| 3201 | .setChain(DAG.getEntryNode()) |
| 3202 | .setCallee(CC: GuardCheckFn->getCallingConv(), ResultType: FnTy->getReturnType(), |
| 3203 | Target: getValue(V: GuardCheckFn), ArgsList: std::move(Args)); |
| 3204 | |
| 3205 | Chain = TLI.LowerCallTo(CLI).second; |
| 3206 | } else { |
| 3207 | TargetLowering::MakeLibCallOptions CallOptions; |
| 3208 | CallOptions.setDiscardResult(true); |
| 3209 | Chain = TLI.makeLibCall(DAG, LC: RTLIB::STACKPROTECTOR_CHECK_FAIL, RetVT: MVT::isVoid, |
| 3210 | Ops: {}, CallOptions, dl: getCurSDLoc()) |
| 3211 | .second; |
| 3212 | } |
| 3213 | |
| 3214 | // Emit a trap instruction if we are required to do so. |
| 3215 | const TargetOptions &TargetOpts = DAG.getTarget().Options; |
| 3216 | if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn) |
| 3217 | Chain = DAG.getNode(Opcode: ISD::TRAP, DL: getCurSDLoc(), VT: MVT::Other, Operand: Chain); |
| 3218 | |
| 3219 | DAG.setRoot(Chain); |
| 3220 | } |
| 3221 | |
| 3222 | /// visitBitTestHeader - This function emits necessary code to produce value |
| 3223 | /// suitable for "bit tests" |
| 3224 | void SelectionDAGBuilder::(BitTestBlock &B, |
| 3225 | MachineBasicBlock *SwitchBB) { |
| 3226 | SDLoc dl = getCurSDLoc(); |
| 3227 | |
| 3228 | // Subtract the minimum value. |
| 3229 | SDValue SwitchOp = getValue(V: B.SValue); |
| 3230 | EVT VT = SwitchOp.getValueType(); |
| 3231 | SDValue RangeSub = |
| 3232 | DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: SwitchOp, N2: DAG.getConstant(Val: B.First, DL: dl, VT)); |
| 3233 | |
| 3234 | // Determine the type of the test operands. |
| 3235 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3236 | bool UsePtrType = false; |
| 3237 | if (!TLI.isTypeLegal(VT)) { |
| 3238 | UsePtrType = true; |
| 3239 | } else { |
| 3240 | for (const BitTestCase &Case : B.Cases) |
| 3241 | if (!isUIntN(N: VT.getSizeInBits(), x: Case.Mask)) { |
| 3242 | // Switch table case range are encoded into series of masks. |
| 3243 | // Just use pointer type, it's guaranteed to fit. |
| 3244 | UsePtrType = true; |
| 3245 | break; |
| 3246 | } |
| 3247 | } |
| 3248 | SDValue Sub = RangeSub; |
| 3249 | if (UsePtrType) { |
| 3250 | VT = TLI.getPointerTy(DL: DAG.getDataLayout()); |
| 3251 | Sub = DAG.getZExtOrTrunc(Op: Sub, DL: dl, VT); |
| 3252 | } |
| 3253 | |
| 3254 | B.RegVT = VT.getSimpleVT(); |
| 3255 | B.Reg = FuncInfo.CreateReg(VT: B.RegVT); |
| 3256 | SDValue CopyTo = DAG.getCopyToReg(Chain: getControlRoot(), dl, Reg: B.Reg, N: Sub); |
| 3257 | |
| 3258 | MachineBasicBlock* MBB = B.Cases[0].ThisBB; |
| 3259 | |
| 3260 | if (!B.FallthroughUnreachable) |
| 3261 | addSuccessorWithProb(Src: SwitchBB, Dst: B.Default, Prob: B.DefaultProb); |
| 3262 | addSuccessorWithProb(Src: SwitchBB, Dst: MBB, Prob: B.Prob); |
| 3263 | SwitchBB->normalizeSuccProbs(); |
| 3264 | |
| 3265 | SDValue Root = CopyTo; |
| 3266 | if (!B.FallthroughUnreachable) { |
| 3267 | // Conditional branch to the default block. |
| 3268 | SDValue RangeCmp = DAG.getSetCC(DL: dl, |
| 3269 | VT: TLI.getSetCCResultType(DL: DAG.getDataLayout(), Context&: *DAG.getContext(), |
| 3270 | VT: RangeSub.getValueType()), |
| 3271 | LHS: RangeSub, RHS: DAG.getConstant(Val: B.Range, DL: dl, VT: RangeSub.getValueType()), |
| 3272 | Cond: ISD::SETUGT); |
| 3273 | |
| 3274 | Root = DAG.getNode(Opcode: ISD::BRCOND, DL: dl, VT: MVT::Other, N1: Root, N2: RangeCmp, |
| 3275 | N3: DAG.getBasicBlock(MBB: B.Default)); |
| 3276 | } |
| 3277 | |
| 3278 | // Avoid emitting unnecessary branches to the next block. |
| 3279 | if (MBB != NextBlock(MBB: SwitchBB)) |
| 3280 | Root = DAG.getNode(Opcode: ISD::BR, DL: dl, VT: MVT::Other, N1: Root, N2: DAG.getBasicBlock(MBB)); |
| 3281 | |
| 3282 | DAG.setRoot(Root); |
| 3283 | } |
| 3284 | |
| 3285 | /// visitBitTestCase - this function produces one "bit test" |
| 3286 | void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, |
| 3287 | MachineBasicBlock *NextMBB, |
| 3288 | BranchProbability BranchProbToNext, |
| 3289 | Register Reg, BitTestCase &B, |
| 3290 | MachineBasicBlock *SwitchBB) { |
| 3291 | SDLoc dl = getCurSDLoc(); |
| 3292 | MVT VT = BB.RegVT; |
| 3293 | SDValue ShiftOp = DAG.getCopyFromReg(Chain: getControlRoot(), dl, Reg, VT); |
| 3294 | SDValue Cmp; |
| 3295 | unsigned PopCount = llvm::popcount(Value: B.Mask); |
| 3296 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3297 | if (PopCount == 1) { |
| 3298 | // Testing for a single bit; just compare the shift count with what it |
| 3299 | // would need to be to shift a 1 bit in that position. |
| 3300 | Cmp = DAG.getSetCC( |
| 3301 | DL: dl, VT: TLI.getSetCCResultType(DL: DAG.getDataLayout(), Context&: *DAG.getContext(), VT), |
| 3302 | LHS: ShiftOp, RHS: DAG.getConstant(Val: llvm::countr_zero(Val: B.Mask), DL: dl, VT), |
| 3303 | Cond: ISD::SETEQ); |
| 3304 | } else if (PopCount == BB.Range) { |
| 3305 | // There is only one zero bit in the range, test for it directly. |
| 3306 | Cmp = DAG.getSetCC( |
| 3307 | DL: dl, VT: TLI.getSetCCResultType(DL: DAG.getDataLayout(), Context&: *DAG.getContext(), VT), |
| 3308 | LHS: ShiftOp, RHS: DAG.getConstant(Val: llvm::countr_one(Value: B.Mask), DL: dl, VT), Cond: ISD::SETNE); |
| 3309 | } else { |
| 3310 | // Make desired shift |
| 3311 | SDValue SwitchVal = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, |
| 3312 | N1: DAG.getConstant(Val: 1, DL: dl, VT), N2: ShiftOp); |
| 3313 | |
| 3314 | // Emit bit tests and jumps |
| 3315 | SDValue AndOp = DAG.getNode(Opcode: ISD::AND, DL: dl, |
| 3316 | VT, N1: SwitchVal, N2: DAG.getConstant(Val: B.Mask, DL: dl, VT)); |
| 3317 | Cmp = DAG.getSetCC( |
| 3318 | DL: dl, VT: TLI.getSetCCResultType(DL: DAG.getDataLayout(), Context&: *DAG.getContext(), VT), |
| 3319 | LHS: AndOp, RHS: DAG.getConstant(Val: 0, DL: dl, VT), Cond: ISD::SETNE); |
| 3320 | } |
| 3321 | |
| 3322 | // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. |
| 3323 | addSuccessorWithProb(Src: SwitchBB, Dst: B.TargetBB, Prob: B.ExtraProb); |
| 3324 | // The branch probability from SwitchBB to NextMBB is BranchProbToNext. |
| 3325 | addSuccessorWithProb(Src: SwitchBB, Dst: NextMBB, Prob: BranchProbToNext); |
| 3326 | // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is |
| 3327 | // one as they are relative probabilities (and thus work more like weights), |
| 3328 | // and hence we need to normalize them to let the sum of them become one. |
| 3329 | SwitchBB->normalizeSuccProbs(); |
| 3330 | |
| 3331 | SDValue BrAnd = DAG.getNode(Opcode: ISD::BRCOND, DL: dl, |
| 3332 | VT: MVT::Other, N1: getControlRoot(), |
| 3333 | N2: Cmp, N3: DAG.getBasicBlock(MBB: B.TargetBB)); |
| 3334 | |
| 3335 | // Avoid emitting unnecessary branches to the next block. |
| 3336 | if (NextMBB != NextBlock(MBB: SwitchBB)) |
| 3337 | BrAnd = DAG.getNode(Opcode: ISD::BR, DL: dl, VT: MVT::Other, N1: BrAnd, |
| 3338 | N2: DAG.getBasicBlock(MBB: NextMBB)); |
| 3339 | |
| 3340 | DAG.setRoot(BrAnd); |
| 3341 | } |
| 3342 | |
| 3343 | void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { |
| 3344 | MachineBasicBlock *InvokeMBB = FuncInfo.MBB; |
| 3345 | |
| 3346 | // Retrieve successors. Look through artificial IR level blocks like |
| 3347 | // catchswitch for successors. |
| 3348 | MachineBasicBlock *Return = FuncInfo.getMBB(BB: I.getSuccessor(i: 0)); |
| 3349 | const BasicBlock *EHPadBB = I.getSuccessor(i: 1); |
| 3350 | MachineBasicBlock *EHPadMBB = FuncInfo.getMBB(BB: EHPadBB); |
| 3351 | |
| 3352 | // Deopt and ptrauth bundles are lowered in helper functions, and we don't |
| 3353 | // have to do anything here to lower funclet bundles. |
| 3354 | if (I.hasOperandBundlesOtherThan( |
| 3355 | IDs: {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition, |
| 3356 | LLVMContext::OB_gc_live, LLVMContext::OB_funclet, |
| 3357 | LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth, |
| 3358 | LLVMContext::OB_clang_arc_attachedcall})) |
| 3359 | reportFatalUsageError( |
| 3360 | reason: "cannot lower invokes with arbitrary operand bundles!" ); |
| 3361 | |
| 3362 | const Value *Callee(I.getCalledOperand()); |
| 3363 | const Function *Fn = dyn_cast<Function>(Val: Callee); |
| 3364 | if (isa<InlineAsm>(Val: Callee)) |
| 3365 | visitInlineAsm(Call: I, EHPadBB); |
| 3366 | else if (Fn && Fn->isIntrinsic()) { |
| 3367 | switch (Fn->getIntrinsicID()) { |
| 3368 | default: |
| 3369 | llvm_unreachable("Cannot invoke this intrinsic" ); |
| 3370 | case Intrinsic::donothing: |
| 3371 | // Ignore invokes to @llvm.donothing: jump directly to the next BB. |
| 3372 | case Intrinsic::seh_try_begin: |
| 3373 | case Intrinsic::seh_scope_begin: |
| 3374 | case Intrinsic::seh_try_end: |
| 3375 | case Intrinsic::seh_scope_end: |
| 3376 | if (EHPadMBB) |
| 3377 | // a block referenced by EH table |
| 3378 | // so dtor-funclet not removed by opts |
| 3379 | EHPadMBB->setMachineBlockAddressTaken(); |
| 3380 | break; |
| 3381 | case Intrinsic::experimental_patchpoint_void: |
| 3382 | case Intrinsic::experimental_patchpoint: |
| 3383 | visitPatchpoint(CB: I, EHPadBB); |
| 3384 | break; |
| 3385 | case Intrinsic::experimental_gc_statepoint: |
| 3386 | LowerStatepoint(I: cast<GCStatepointInst>(Val: I), EHPadBB); |
| 3387 | break; |
| 3388 | // wasm_throw, wasm_rethrow: This is usually done in visitTargetIntrinsic, |
| 3389 | // but these intrinsics are special because they can be invoked, so we |
| 3390 | // manually lower it to a DAG node here. |
| 3391 | case Intrinsic::wasm_throw: { |
| 3392 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3393 | std::array<SDValue, 4> Ops = { |
| 3394 | getControlRoot(), // inchain for the terminator node |
| 3395 | DAG.getTargetConstant(Val: Intrinsic::wasm_throw, DL: getCurSDLoc(), |
| 3396 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())), |
| 3397 | getValue(V: I.getArgOperand(i: 0)), // tag |
| 3398 | getValue(V: I.getArgOperand(i: 1)) // thrown value |
| 3399 | }; |
| 3400 | SDVTList VTs = DAG.getVTList(VTs: ArrayRef<EVT>({MVT::Other})); // outchain |
| 3401 | DAG.setRoot(DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL: getCurSDLoc(), VTList: VTs, Ops)); |
| 3402 | break; |
| 3403 | } |
| 3404 | case Intrinsic::wasm_rethrow: { |
| 3405 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3406 | std::array<SDValue, 2> Ops = { |
| 3407 | getControlRoot(), // inchain for the terminator node |
| 3408 | DAG.getTargetConstant(Val: Intrinsic::wasm_rethrow, DL: getCurSDLoc(), |
| 3409 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))}; |
| 3410 | SDVTList VTs = DAG.getVTList(VTs: ArrayRef<EVT>({MVT::Other})); // outchain |
| 3411 | DAG.setRoot(DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL: getCurSDLoc(), VTList: VTs, Ops)); |
| 3412 | break; |
| 3413 | } |
| 3414 | } |
| 3415 | } else if (I.hasDeoptState()) { |
| 3416 | // Currently we do not lower any intrinsic calls with deopt operand bundles. |
| 3417 | // Eventually we will support lowering the @llvm.experimental.deoptimize |
| 3418 | // intrinsic, and right now there are no plans to support other intrinsics |
| 3419 | // with deopt state. |
| 3420 | LowerCallSiteWithDeoptBundle(Call: &I, Callee: getValue(V: Callee), EHPadBB); |
| 3421 | } else if (I.countOperandBundlesOfType(ID: LLVMContext::OB_ptrauth)) { |
| 3422 | LowerCallSiteWithPtrAuthBundle(CB: cast<CallBase>(Val: I), EHPadBB); |
| 3423 | } else { |
| 3424 | LowerCallTo(CB: I, Callee: getValue(V: Callee), IsTailCall: false, IsMustTailCall: false, EHPadBB); |
| 3425 | } |
| 3426 | |
| 3427 | // If the value of the invoke is used outside of its defining block, make it |
| 3428 | // available as a virtual register. |
| 3429 | // We already took care of the exported value for the statepoint instruction |
| 3430 | // during call to the LowerStatepoint. |
| 3431 | if (!isa<GCStatepointInst>(Val: I)) { |
| 3432 | CopyToExportRegsIfNeeded(V: &I); |
| 3433 | } |
| 3434 | |
| 3435 | SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; |
| 3436 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 3437 | BranchProbability EHPadBBProb = |
| 3438 | BPI ? BPI->getEdgeProbability(Src: InvokeMBB->getBasicBlock(), Dst: EHPadBB) |
| 3439 | : BranchProbability::getZero(); |
| 3440 | findUnwindDestinations(FuncInfo, EHPadBB, Prob: EHPadBBProb, UnwindDests); |
| 3441 | |
| 3442 | // Update successor info. |
| 3443 | addSuccessorWithProb(Src: InvokeMBB, Dst: Return); |
| 3444 | for (auto &UnwindDest : UnwindDests) { |
| 3445 | UnwindDest.first->setIsEHPad(); |
| 3446 | addSuccessorWithProb(Src: InvokeMBB, Dst: UnwindDest.first, Prob: UnwindDest.second); |
| 3447 | } |
| 3448 | InvokeMBB->normalizeSuccProbs(); |
| 3449 | |
| 3450 | // Drop into normal successor. |
| 3451 | DAG.setRoot(DAG.getNode(Opcode: ISD::BR, DL: getCurSDLoc(), VT: MVT::Other, N1: getControlRoot(), |
| 3452 | N2: DAG.getBasicBlock(MBB: Return))); |
| 3453 | } |
| 3454 | |
| 3455 | void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) { |
| 3456 | MachineBasicBlock *CallBrMBB = FuncInfo.MBB; |
| 3457 | |
| 3458 | // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't |
| 3459 | // have to do anything here to lower funclet bundles. |
| 3460 | if (I.hasOperandBundlesOtherThan( |
| 3461 | IDs: {LLVMContext::OB_deopt, LLVMContext::OB_funclet})) |
| 3462 | reportFatalUsageError( |
| 3463 | reason: "cannot lower callbrs with arbitrary operand bundles!" ); |
| 3464 | |
| 3465 | assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr" ); |
| 3466 | visitInlineAsm(Call: I); |
| 3467 | CopyToExportRegsIfNeeded(V: &I); |
| 3468 | |
| 3469 | // Retrieve successors. |
| 3470 | SmallPtrSet<BasicBlock *, 8> Dests; |
| 3471 | Dests.insert(Ptr: I.getDefaultDest()); |
| 3472 | MachineBasicBlock *Return = FuncInfo.getMBB(BB: I.getDefaultDest()); |
| 3473 | |
| 3474 | // Update successor info. |
| 3475 | addSuccessorWithProb(Src: CallBrMBB, Dst: Return, Prob: BranchProbability::getOne()); |
| 3476 | for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) { |
| 3477 | BasicBlock *Dest = I.getIndirectDest(i); |
| 3478 | MachineBasicBlock *Target = FuncInfo.getMBB(BB: Dest); |
| 3479 | Target->setIsInlineAsmBrIndirectTarget(); |
| 3480 | // If we introduce a type of asm goto statement that is permitted to use an |
| 3481 | // indirect call instruction to jump to its labels, then we should add a |
| 3482 | // call to Target->setMachineBlockAddressTaken() here, to mark the target |
| 3483 | // block as requiring a BTI. |
| 3484 | |
| 3485 | Target->setLabelMustBeEmitted(); |
| 3486 | // Don't add duplicate machine successors. |
| 3487 | if (Dests.insert(Ptr: Dest).second) |
| 3488 | addSuccessorWithProb(Src: CallBrMBB, Dst: Target, Prob: BranchProbability::getZero()); |
| 3489 | } |
| 3490 | CallBrMBB->normalizeSuccProbs(); |
| 3491 | |
| 3492 | // Drop into default successor. |
| 3493 | DAG.setRoot(DAG.getNode(Opcode: ISD::BR, DL: getCurSDLoc(), |
| 3494 | VT: MVT::Other, N1: getControlRoot(), |
| 3495 | N2: DAG.getBasicBlock(MBB: Return))); |
| 3496 | } |
| 3497 | |
| 3498 | void SelectionDAGBuilder::visitResume(const ResumeInst &RI) { |
| 3499 | llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!" ); |
| 3500 | } |
| 3501 | |
| 3502 | void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) { |
| 3503 | assert(FuncInfo.MBB->isEHPad() && |
| 3504 | "Call to landingpad not in landing pad!" ); |
| 3505 | |
| 3506 | // If there aren't registers to copy the values into (e.g., during SjLj |
| 3507 | // exceptions), then don't bother to create these DAG nodes. |
| 3508 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3509 | const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn(); |
| 3510 | if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && |
| 3511 | TLI.getExceptionSelectorRegister(PersonalityFn) == 0) |
| 3512 | return; |
| 3513 | |
| 3514 | // If landingpad's return type is token type, we don't create DAG nodes |
| 3515 | // for its exception pointer and selector value. The extraction of exception |
| 3516 | // pointer or selector value from token type landingpads is not currently |
| 3517 | // supported. |
| 3518 | if (LP.getType()->isTokenTy()) |
| 3519 | return; |
| 3520 | |
| 3521 | SmallVector<EVT, 2> ValueVTs; |
| 3522 | SDLoc dl = getCurSDLoc(); |
| 3523 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: LP.getType(), ValueVTs); |
| 3524 | assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported" ); |
| 3525 | |
| 3526 | // Get the two live-in registers as SDValues. The physregs have already been |
| 3527 | // copied into virtual registers. |
| 3528 | SDValue Ops[2]; |
| 3529 | if (FuncInfo.ExceptionPointerVirtReg) { |
| 3530 | Ops[0] = DAG.getZExtOrTrunc( |
| 3531 | Op: DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, |
| 3532 | Reg: FuncInfo.ExceptionPointerVirtReg, |
| 3533 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())), |
| 3534 | DL: dl, VT: ValueVTs[0]); |
| 3535 | } else { |
| 3536 | Ops[0] = DAG.getConstant(Val: 0, DL: dl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 3537 | } |
| 3538 | Ops[1] = DAG.getZExtOrTrunc( |
| 3539 | Op: DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, |
| 3540 | Reg: FuncInfo.ExceptionSelectorVirtReg, |
| 3541 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())), |
| 3542 | DL: dl, VT: ValueVTs[1]); |
| 3543 | |
| 3544 | // Merge into one. |
| 3545 | SDValue Res = DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, |
| 3546 | VTList: DAG.getVTList(VTs: ValueVTs), Ops); |
| 3547 | setValue(V: &LP, NewN: Res); |
| 3548 | } |
| 3549 | |
| 3550 | void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First, |
| 3551 | MachineBasicBlock *Last) { |
| 3552 | // Update JTCases. |
| 3553 | for (JumpTableBlock &JTB : SL->JTCases) |
| 3554 | if (JTB.first.HeaderBB == First) |
| 3555 | JTB.first.HeaderBB = Last; |
| 3556 | |
| 3557 | // Update BitTestCases. |
| 3558 | for (BitTestBlock &BTB : SL->BitTestCases) |
| 3559 | if (BTB.Parent == First) |
| 3560 | BTB.Parent = Last; |
| 3561 | } |
| 3562 | |
| 3563 | void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { |
| 3564 | MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; |
| 3565 | |
| 3566 | // Update machine-CFG edges with unique successors. |
| 3567 | SmallSet<BasicBlock*, 32> Done; |
| 3568 | for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { |
| 3569 | BasicBlock *BB = I.getSuccessor(i); |
| 3570 | bool Inserted = Done.insert(Ptr: BB).second; |
| 3571 | if (!Inserted) |
| 3572 | continue; |
| 3573 | |
| 3574 | MachineBasicBlock *Succ = FuncInfo.getMBB(BB); |
| 3575 | addSuccessorWithProb(Src: IndirectBrMBB, Dst: Succ); |
| 3576 | } |
| 3577 | IndirectBrMBB->normalizeSuccProbs(); |
| 3578 | |
| 3579 | DAG.setRoot(DAG.getNode(Opcode: ISD::BRIND, DL: getCurSDLoc(), |
| 3580 | VT: MVT::Other, N1: getControlRoot(), |
| 3581 | N2: getValue(V: I.getAddress()))); |
| 3582 | } |
| 3583 | |
| 3584 | void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) { |
| 3585 | if (!I.shouldLowerToTrap(TrapUnreachable: DAG.getTarget().Options.TrapUnreachable, |
| 3586 | NoTrapAfterNoreturn: DAG.getTarget().Options.NoTrapAfterNoreturn)) |
| 3587 | return; |
| 3588 | |
| 3589 | DAG.setRoot(DAG.getNode(Opcode: ISD::TRAP, DL: getCurSDLoc(), VT: MVT::Other, Operand: DAG.getRoot())); |
| 3590 | } |
| 3591 | |
| 3592 | void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) { |
| 3593 | SDNodeFlags Flags; |
| 3594 | if (auto *FPOp = dyn_cast<FPMathOperator>(Val: &I)) |
| 3595 | Flags.copyFMF(FPMO: *FPOp); |
| 3596 | |
| 3597 | SDValue Op = getValue(V: I.getOperand(i: 0)); |
| 3598 | SDValue UnNodeValue = DAG.getNode(Opcode, DL: getCurSDLoc(), VT: Op.getValueType(), |
| 3599 | Operand: Op, Flags); |
| 3600 | setValue(V: &I, NewN: UnNodeValue); |
| 3601 | } |
| 3602 | |
| 3603 | void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) { |
| 3604 | SDNodeFlags Flags; |
| 3605 | if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(Val: &I)) { |
| 3606 | Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap()); |
| 3607 | Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap()); |
| 3608 | } |
| 3609 | if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(Val: &I)) |
| 3610 | Flags.setExact(ExactOp->isExact()); |
| 3611 | if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(Val: &I)) |
| 3612 | Flags.setDisjoint(DisjointOp->isDisjoint()); |
| 3613 | if (auto *FPOp = dyn_cast<FPMathOperator>(Val: &I)) |
| 3614 | Flags.copyFMF(FPMO: *FPOp); |
| 3615 | |
| 3616 | SDValue Op1 = getValue(V: I.getOperand(i: 0)); |
| 3617 | SDValue Op2 = getValue(V: I.getOperand(i: 1)); |
| 3618 | SDValue BinNodeValue = DAG.getNode(Opcode, DL: getCurSDLoc(), VT: Op1.getValueType(), |
| 3619 | N1: Op1, N2: Op2, Flags); |
| 3620 | setValue(V: &I, NewN: BinNodeValue); |
| 3621 | } |
| 3622 | |
| 3623 | void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { |
| 3624 | SDValue Op1 = getValue(V: I.getOperand(i: 0)); |
| 3625 | SDValue Op2 = getValue(V: I.getOperand(i: 1)); |
| 3626 | |
| 3627 | EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy( |
| 3628 | LHSTy: Op1.getValueType(), DL: DAG.getDataLayout()); |
| 3629 | |
| 3630 | // Coerce the shift amount to the right type if we can. This exposes the |
| 3631 | // truncate or zext to optimization early. |
| 3632 | if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { |
| 3633 | assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) && |
| 3634 | "Unexpected shift type" ); |
| 3635 | Op2 = DAG.getZExtOrTrunc(Op: Op2, DL: getCurSDLoc(), VT: ShiftTy); |
| 3636 | } |
| 3637 | |
| 3638 | bool nuw = false; |
| 3639 | bool nsw = false; |
| 3640 | bool exact = false; |
| 3641 | |
| 3642 | if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) { |
| 3643 | |
| 3644 | if (const OverflowingBinaryOperator *OFBinOp = |
| 3645 | dyn_cast<const OverflowingBinaryOperator>(Val: &I)) { |
| 3646 | nuw = OFBinOp->hasNoUnsignedWrap(); |
| 3647 | nsw = OFBinOp->hasNoSignedWrap(); |
| 3648 | } |
| 3649 | if (const PossiblyExactOperator *ExactOp = |
| 3650 | dyn_cast<const PossiblyExactOperator>(Val: &I)) |
| 3651 | exact = ExactOp->isExact(); |
| 3652 | } |
| 3653 | SDNodeFlags Flags; |
| 3654 | Flags.setExact(exact); |
| 3655 | Flags.setNoSignedWrap(nsw); |
| 3656 | Flags.setNoUnsignedWrap(nuw); |
| 3657 | SDValue Res = DAG.getNode(Opcode, DL: getCurSDLoc(), VT: Op1.getValueType(), N1: Op1, N2: Op2, |
| 3658 | Flags); |
| 3659 | setValue(V: &I, NewN: Res); |
| 3660 | } |
| 3661 | |
| 3662 | void SelectionDAGBuilder::visitSDiv(const User &I) { |
| 3663 | SDValue Op1 = getValue(V: I.getOperand(i: 0)); |
| 3664 | SDValue Op2 = getValue(V: I.getOperand(i: 1)); |
| 3665 | |
| 3666 | SDNodeFlags Flags; |
| 3667 | Flags.setExact(isa<PossiblyExactOperator>(Val: &I) && |
| 3668 | cast<PossiblyExactOperator>(Val: &I)->isExact()); |
| 3669 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SDIV, DL: getCurSDLoc(), VT: Op1.getValueType(), N1: Op1, |
| 3670 | N2: Op2, Flags)); |
| 3671 | } |
| 3672 | |
| 3673 | void SelectionDAGBuilder::visitICmp(const ICmpInst &I) { |
| 3674 | ICmpInst::Predicate predicate = I.getPredicate(); |
| 3675 | SDValue Op1 = getValue(V: I.getOperand(i_nocapture: 0)); |
| 3676 | SDValue Op2 = getValue(V: I.getOperand(i_nocapture: 1)); |
| 3677 | ISD::CondCode Opcode = getICmpCondCode(Pred: predicate); |
| 3678 | |
| 3679 | auto &TLI = DAG.getTargetLoweringInfo(); |
| 3680 | EVT MemVT = |
| 3681 | TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: I.getOperand(i_nocapture: 0)->getType()); |
| 3682 | |
| 3683 | // If a pointer's DAG type is larger than its memory type then the DAG values |
| 3684 | // are zero-extended. This breaks signed comparisons so truncate back to the |
| 3685 | // underlying type before doing the compare. |
| 3686 | if (Op1.getValueType() != MemVT) { |
| 3687 | Op1 = DAG.getPtrExtOrTrunc(Op: Op1, DL: getCurSDLoc(), VT: MemVT); |
| 3688 | Op2 = DAG.getPtrExtOrTrunc(Op: Op2, DL: getCurSDLoc(), VT: MemVT); |
| 3689 | } |
| 3690 | |
| 3691 | SDNodeFlags Flags; |
| 3692 | Flags.setSameSign(I.hasSameSign()); |
| 3693 | SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); |
| 3694 | |
| 3695 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3696 | Ty: I.getType()); |
| 3697 | setValue(V: &I, NewN: DAG.getSetCC(DL: getCurSDLoc(), VT: DestVT, LHS: Op1, RHS: Op2, Cond: Opcode)); |
| 3698 | } |
| 3699 | |
| 3700 | void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) { |
| 3701 | FCmpInst::Predicate predicate = I.getPredicate(); |
| 3702 | SDValue Op1 = getValue(V: I.getOperand(i_nocapture: 0)); |
| 3703 | SDValue Op2 = getValue(V: I.getOperand(i_nocapture: 1)); |
| 3704 | |
| 3705 | ISD::CondCode Condition = getFCmpCondCode(Pred: predicate); |
| 3706 | auto *FPMO = cast<FPMathOperator>(Val: &I); |
| 3707 | if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath) |
| 3708 | Condition = getFCmpCodeWithoutNaN(CC: Condition); |
| 3709 | |
| 3710 | SDNodeFlags Flags; |
| 3711 | Flags.copyFMF(FPMO: *FPMO); |
| 3712 | SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); |
| 3713 | |
| 3714 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3715 | Ty: I.getType()); |
| 3716 | setValue(V: &I, NewN: DAG.getSetCC(DL: getCurSDLoc(), VT: DestVT, LHS: Op1, RHS: Op2, Cond: Condition)); |
| 3717 | } |
| 3718 | |
| 3719 | // Check if the condition of the select has one use or two users that are both |
| 3720 | // selects with the same condition. |
| 3721 | static bool hasOnlySelectUsers(const Value *Cond) { |
| 3722 | return llvm::all_of(Range: Cond->users(), P: [](const Value *V) { |
| 3723 | return isa<SelectInst>(Val: V); |
| 3724 | }); |
| 3725 | } |
| 3726 | |
| 3727 | void SelectionDAGBuilder::visitSelect(const User &I) { |
| 3728 | SmallVector<EVT, 4> ValueVTs; |
| 3729 | ComputeValueVTs(TLI: DAG.getTargetLoweringInfo(), DL: DAG.getDataLayout(), Ty: I.getType(), |
| 3730 | ValueVTs); |
| 3731 | unsigned NumValues = ValueVTs.size(); |
| 3732 | if (NumValues == 0) return; |
| 3733 | |
| 3734 | SmallVector<SDValue, 4> Values(NumValues); |
| 3735 | SDValue Cond = getValue(V: I.getOperand(i: 0)); |
| 3736 | SDValue LHSVal = getValue(V: I.getOperand(i: 1)); |
| 3737 | SDValue RHSVal = getValue(V: I.getOperand(i: 2)); |
| 3738 | SmallVector<SDValue, 1> BaseOps(1, Cond); |
| 3739 | ISD::NodeType OpCode = |
| 3740 | Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT; |
| 3741 | |
| 3742 | bool IsUnaryAbs = false; |
| 3743 | bool Negate = false; |
| 3744 | |
| 3745 | SDNodeFlags Flags; |
| 3746 | if (auto *FPOp = dyn_cast<FPMathOperator>(Val: &I)) |
| 3747 | Flags.copyFMF(FPMO: *FPOp); |
| 3748 | |
| 3749 | Flags.setUnpredictable( |
| 3750 | cast<SelectInst>(Val: I).getMetadata(KindID: LLVMContext::MD_unpredictable)); |
| 3751 | |
| 3752 | // Min/max matching is only viable if all output VTs are the same. |
| 3753 | if (all_equal(Range&: ValueVTs)) { |
| 3754 | EVT VT = ValueVTs[0]; |
| 3755 | LLVMContext &Ctx = *DAG.getContext(); |
| 3756 | auto &TLI = DAG.getTargetLoweringInfo(); |
| 3757 | |
| 3758 | // We care about the legality of the operation after it has been type |
| 3759 | // legalized. |
| 3760 | while (TLI.getTypeAction(Context&: Ctx, VT) != TargetLoweringBase::TypeLegal) |
| 3761 | VT = TLI.getTypeToTransformTo(Context&: Ctx, VT); |
| 3762 | |
| 3763 | // If the vselect is legal, assume we want to leave this as a vector setcc + |
| 3764 | // vselect. Otherwise, if this is going to be scalarized, we want to see if |
| 3765 | // min/max is legal on the scalar type. |
| 3766 | bool UseScalarMinMax = VT.isVector() && |
| 3767 | !TLI.isOperationLegalOrCustom(Op: ISD::VSELECT, VT); |
| 3768 | |
| 3769 | // ValueTracking's select pattern matching does not account for -0.0, |
| 3770 | // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that |
| 3771 | // -0.0 is less than +0.0. |
| 3772 | const Value *LHS, *RHS; |
| 3773 | auto SPR = matchSelectPattern(V: &I, LHS, RHS); |
| 3774 | ISD::NodeType Opc = ISD::DELETED_NODE; |
| 3775 | switch (SPR.Flavor) { |
| 3776 | case SPF_UMAX: Opc = ISD::UMAX; break; |
| 3777 | case SPF_UMIN: Opc = ISD::UMIN; break; |
| 3778 | case SPF_SMAX: Opc = ISD::SMAX; break; |
| 3779 | case SPF_SMIN: Opc = ISD::SMIN; break; |
| 3780 | case SPF_FMINNUM: |
| 3781 | switch (SPR.NaNBehavior) { |
| 3782 | case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?" ); |
| 3783 | case SPNB_RETURNS_NAN: break; |
| 3784 | case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break; |
| 3785 | case SPNB_RETURNS_ANY: |
| 3786 | if (TLI.isOperationLegalOrCustom(Op: ISD::FMINNUM, VT) || |
| 3787 | (UseScalarMinMax && |
| 3788 | TLI.isOperationLegalOrCustom(Op: ISD::FMINNUM, VT: VT.getScalarType()))) |
| 3789 | Opc = ISD::FMINNUM; |
| 3790 | break; |
| 3791 | } |
| 3792 | break; |
| 3793 | case SPF_FMAXNUM: |
| 3794 | switch (SPR.NaNBehavior) { |
| 3795 | case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?" ); |
| 3796 | case SPNB_RETURNS_NAN: break; |
| 3797 | case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break; |
| 3798 | case SPNB_RETURNS_ANY: |
| 3799 | if (TLI.isOperationLegalOrCustom(Op: ISD::FMAXNUM, VT) || |
| 3800 | (UseScalarMinMax && |
| 3801 | TLI.isOperationLegalOrCustom(Op: ISD::FMAXNUM, VT: VT.getScalarType()))) |
| 3802 | Opc = ISD::FMAXNUM; |
| 3803 | break; |
| 3804 | } |
| 3805 | break; |
| 3806 | case SPF_NABS: |
| 3807 | Negate = true; |
| 3808 | [[fallthrough]]; |
| 3809 | case SPF_ABS: |
| 3810 | IsUnaryAbs = true; |
| 3811 | Opc = ISD::ABS; |
| 3812 | break; |
| 3813 | default: break; |
| 3814 | } |
| 3815 | |
| 3816 | if (!IsUnaryAbs && Opc != ISD::DELETED_NODE && |
| 3817 | (TLI.isOperationLegalOrCustom(Op: Opc, VT) || |
| 3818 | (UseScalarMinMax && |
| 3819 | TLI.isOperationLegalOrCustom(Op: Opc, VT: VT.getScalarType()))) && |
| 3820 | // If the underlying comparison instruction is used by any other |
| 3821 | // instruction, the consumed instructions won't be destroyed, so it is |
| 3822 | // not profitable to convert to a min/max. |
| 3823 | hasOnlySelectUsers(Cond: cast<SelectInst>(Val: I).getCondition())) { |
| 3824 | OpCode = Opc; |
| 3825 | LHSVal = getValue(V: LHS); |
| 3826 | RHSVal = getValue(V: RHS); |
| 3827 | BaseOps.clear(); |
| 3828 | } |
| 3829 | |
| 3830 | if (IsUnaryAbs) { |
| 3831 | OpCode = Opc; |
| 3832 | LHSVal = getValue(V: LHS); |
| 3833 | BaseOps.clear(); |
| 3834 | } |
| 3835 | } |
| 3836 | |
| 3837 | if (IsUnaryAbs) { |
| 3838 | for (unsigned i = 0; i != NumValues; ++i) { |
| 3839 | SDLoc dl = getCurSDLoc(); |
| 3840 | EVT VT = LHSVal.getNode()->getValueType(ResNo: LHSVal.getResNo() + i); |
| 3841 | Values[i] = |
| 3842 | DAG.getNode(Opcode: OpCode, DL: dl, VT, Operand: LHSVal.getValue(R: LHSVal.getResNo() + i)); |
| 3843 | if (Negate) |
| 3844 | Values[i] = DAG.getNegative(Val: Values[i], DL: dl, VT); |
| 3845 | } |
| 3846 | } else { |
| 3847 | for (unsigned i = 0; i != NumValues; ++i) { |
| 3848 | SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end()); |
| 3849 | Ops.push_back(Elt: SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); |
| 3850 | Ops.push_back(Elt: SDValue(RHSVal.getNode(), RHSVal.getResNo() + i)); |
| 3851 | Values[i] = DAG.getNode( |
| 3852 | Opcode: OpCode, DL: getCurSDLoc(), |
| 3853 | VT: LHSVal.getNode()->getValueType(ResNo: LHSVal.getResNo() + i), Ops, Flags); |
| 3854 | } |
| 3855 | } |
| 3856 | |
| 3857 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: getCurSDLoc(), |
| 3858 | VTList: DAG.getVTList(VTs: ValueVTs), Ops: Values)); |
| 3859 | } |
| 3860 | |
| 3861 | void SelectionDAGBuilder::visitTrunc(const User &I) { |
| 3862 | // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). |
| 3863 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3864 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3865 | Ty: I.getType()); |
| 3866 | SDNodeFlags Flags; |
| 3867 | if (auto *Trunc = dyn_cast<TruncInst>(Val: &I)) { |
| 3868 | Flags.setNoSignedWrap(Trunc->hasNoSignedWrap()); |
| 3869 | Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap()); |
| 3870 | } |
| 3871 | |
| 3872 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::TRUNCATE, DL: getCurSDLoc(), VT: DestVT, Operand: N, Flags)); |
| 3873 | } |
| 3874 | |
| 3875 | void SelectionDAGBuilder::visitZExt(const User &I) { |
| 3876 | // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). |
| 3877 | // ZExt also can't be a cast to bool for same reason. So, nothing much to do |
| 3878 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3879 | auto &TLI = DAG.getTargetLoweringInfo(); |
| 3880 | EVT DestVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 3881 | |
| 3882 | SDNodeFlags Flags; |
| 3883 | if (auto *PNI = dyn_cast<PossiblyNonNegInst>(Val: &I)) |
| 3884 | Flags.setNonNeg(PNI->hasNonNeg()); |
| 3885 | |
| 3886 | // Eagerly use nonneg information to canonicalize towards sign_extend if |
| 3887 | // that is the target's preference. |
| 3888 | // TODO: Let the target do this later. |
| 3889 | if (Flags.hasNonNeg() && |
| 3890 | TLI.isSExtCheaperThanZExt(FromTy: N.getValueType(), ToTy: DestVT)) { |
| 3891 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: getCurSDLoc(), VT: DestVT, Operand: N)); |
| 3892 | return; |
| 3893 | } |
| 3894 | |
| 3895 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: getCurSDLoc(), VT: DestVT, Operand: N, Flags)); |
| 3896 | } |
| 3897 | |
| 3898 | void SelectionDAGBuilder::visitSExt(const User &I) { |
| 3899 | // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). |
| 3900 | // SExt also can't be a cast to bool for same reason. So, nothing much to do |
| 3901 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3902 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3903 | Ty: I.getType()); |
| 3904 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: getCurSDLoc(), VT: DestVT, Operand: N)); |
| 3905 | } |
| 3906 | |
| 3907 | void SelectionDAGBuilder::visitFPTrunc(const User &I) { |
| 3908 | // FPTrunc is never a no-op cast, no need to check |
| 3909 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3910 | SDLoc dl = getCurSDLoc(); |
| 3911 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 3912 | EVT DestVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 3913 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FP_ROUND, DL: dl, VT: DestVT, N1: N, |
| 3914 | N2: DAG.getTargetConstant( |
| 3915 | Val: 0, DL: dl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())))); |
| 3916 | } |
| 3917 | |
| 3918 | void SelectionDAGBuilder::visitFPExt(const User &I) { |
| 3919 | // FPExt is never a no-op cast, no need to check |
| 3920 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3921 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3922 | Ty: I.getType()); |
| 3923 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FP_EXTEND, DL: getCurSDLoc(), VT: DestVT, Operand: N)); |
| 3924 | } |
| 3925 | |
| 3926 | void SelectionDAGBuilder::visitFPToUI(const User &I) { |
| 3927 | // FPToUI is never a no-op cast, no need to check |
| 3928 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3929 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3930 | Ty: I.getType()); |
| 3931 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FP_TO_UINT, DL: getCurSDLoc(), VT: DestVT, Operand: N)); |
| 3932 | } |
| 3933 | |
| 3934 | void SelectionDAGBuilder::visitFPToSI(const User &I) { |
| 3935 | // FPToSI is never a no-op cast, no need to check |
| 3936 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3937 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3938 | Ty: I.getType()); |
| 3939 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: getCurSDLoc(), VT: DestVT, Operand: N)); |
| 3940 | } |
| 3941 | |
| 3942 | void SelectionDAGBuilder::visitUIToFP(const User &I) { |
| 3943 | // UIToFP is never a no-op cast, no need to check |
| 3944 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3945 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3946 | Ty: I.getType()); |
| 3947 | SDNodeFlags Flags; |
| 3948 | if (auto *PNI = dyn_cast<PossiblyNonNegInst>(Val: &I)) |
| 3949 | Flags.setNonNeg(PNI->hasNonNeg()); |
| 3950 | |
| 3951 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::UINT_TO_FP, DL: getCurSDLoc(), VT: DestVT, Operand: N, Flags)); |
| 3952 | } |
| 3953 | |
| 3954 | void SelectionDAGBuilder::visitSIToFP(const User &I) { |
| 3955 | // SIToFP is never a no-op cast, no need to check |
| 3956 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3957 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3958 | Ty: I.getType()); |
| 3959 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: getCurSDLoc(), VT: DestVT, Operand: N)); |
| 3960 | } |
| 3961 | |
| 3962 | void SelectionDAGBuilder::visitPtrToInt(const User &I) { |
| 3963 | // What to do depends on the size of the integer and the size of the pointer. |
| 3964 | // We can either truncate, zero extend, or no-op, accordingly. |
| 3965 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3966 | auto &TLI = DAG.getTargetLoweringInfo(); |
| 3967 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3968 | Ty: I.getType()); |
| 3969 | EVT PtrMemVT = |
| 3970 | TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: I.getOperand(i: 0)->getType()); |
| 3971 | N = DAG.getPtrExtOrTrunc(Op: N, DL: getCurSDLoc(), VT: PtrMemVT); |
| 3972 | N = DAG.getZExtOrTrunc(Op: N, DL: getCurSDLoc(), VT: DestVT); |
| 3973 | setValue(V: &I, NewN: N); |
| 3974 | } |
| 3975 | |
| 3976 | void SelectionDAGBuilder::visitIntToPtr(const User &I) { |
| 3977 | // What to do depends on the size of the integer and the size of the pointer. |
| 3978 | // We can either truncate, zero extend, or no-op, accordingly. |
| 3979 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3980 | auto &TLI = DAG.getTargetLoweringInfo(); |
| 3981 | EVT DestVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 3982 | EVT PtrMemVT = TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 3983 | N = DAG.getZExtOrTrunc(Op: N, DL: getCurSDLoc(), VT: PtrMemVT); |
| 3984 | N = DAG.getPtrExtOrTrunc(Op: N, DL: getCurSDLoc(), VT: DestVT); |
| 3985 | setValue(V: &I, NewN: N); |
| 3986 | } |
| 3987 | |
| 3988 | void SelectionDAGBuilder::visitBitCast(const User &I) { |
| 3989 | SDValue N = getValue(V: I.getOperand(i: 0)); |
| 3990 | SDLoc dl = getCurSDLoc(); |
| 3991 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 3992 | Ty: I.getType()); |
| 3993 | |
| 3994 | // BitCast assures us that source and destination are the same size so this is |
| 3995 | // either a BITCAST or a no-op. |
| 3996 | if (DestVT != N.getValueType()) |
| 3997 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, |
| 3998 | VT: DestVT, Operand: N)); // convert types. |
| 3999 | // Check if the original LLVM IR Operand was a ConstantInt, because getValue() |
| 4000 | // might fold any kind of constant expression to an integer constant and that |
| 4001 | // is not what we are looking for. Only recognize a bitcast of a genuine |
| 4002 | // constant integer as an opaque constant. |
| 4003 | else if(ConstantInt *C = dyn_cast<ConstantInt>(Val: I.getOperand(i: 0))) |
| 4004 | setValue(V: &I, NewN: DAG.getConstant(Val: C->getValue(), DL: dl, VT: DestVT, /*isTarget=*/false, |
| 4005 | /*isOpaque*/true)); |
| 4006 | else |
| 4007 | setValue(V: &I, NewN: N); // noop cast. |
| 4008 | } |
| 4009 | |
| 4010 | void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) { |
| 4011 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4012 | const Value *SV = I.getOperand(i: 0); |
| 4013 | SDValue N = getValue(V: SV); |
| 4014 | EVT DestVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 4015 | |
| 4016 | unsigned SrcAS = SV->getType()->getPointerAddressSpace(); |
| 4017 | unsigned DestAS = I.getType()->getPointerAddressSpace(); |
| 4018 | |
| 4019 | if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS)) |
| 4020 | N = DAG.getAddrSpaceCast(dl: getCurSDLoc(), VT: DestVT, Ptr: N, SrcAS, DestAS); |
| 4021 | |
| 4022 | setValue(V: &I, NewN: N); |
| 4023 | } |
| 4024 | |
| 4025 | void SelectionDAGBuilder::visitInsertElement(const User &I) { |
| 4026 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4027 | SDValue InVec = getValue(V: I.getOperand(i: 0)); |
| 4028 | SDValue InVal = getValue(V: I.getOperand(i: 1)); |
| 4029 | SDValue InIdx = DAG.getZExtOrTrunc(Op: getValue(V: I.getOperand(i: 2)), DL: getCurSDLoc(), |
| 4030 | VT: TLI.getVectorIdxTy(DL: DAG.getDataLayout())); |
| 4031 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: getCurSDLoc(), |
| 4032 | VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()), |
| 4033 | N1: InVec, N2: InVal, N3: InIdx)); |
| 4034 | } |
| 4035 | |
| 4036 | void SelectionDAGBuilder::(const User &I) { |
| 4037 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4038 | SDValue InVec = getValue(V: I.getOperand(i: 0)); |
| 4039 | SDValue InIdx = DAG.getZExtOrTrunc(Op: getValue(V: I.getOperand(i: 1)), DL: getCurSDLoc(), |
| 4040 | VT: TLI.getVectorIdxTy(DL: DAG.getDataLayout())); |
| 4041 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: getCurSDLoc(), |
| 4042 | VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()), |
| 4043 | N1: InVec, N2: InIdx)); |
| 4044 | } |
| 4045 | |
| 4046 | void SelectionDAGBuilder::visitShuffleVector(const User &I) { |
| 4047 | SDValue Src1 = getValue(V: I.getOperand(i: 0)); |
| 4048 | SDValue Src2 = getValue(V: I.getOperand(i: 1)); |
| 4049 | ArrayRef<int> Mask; |
| 4050 | if (auto *SVI = dyn_cast<ShuffleVectorInst>(Val: &I)) |
| 4051 | Mask = SVI->getShuffleMask(); |
| 4052 | else |
| 4053 | Mask = cast<ConstantExpr>(Val: I).getShuffleMask(); |
| 4054 | SDLoc DL = getCurSDLoc(); |
| 4055 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4056 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 4057 | EVT SrcVT = Src1.getValueType(); |
| 4058 | |
| 4059 | if (all_of(Range&: Mask, P: [](int Elem) { return Elem == 0; }) && |
| 4060 | VT.isScalableVector()) { |
| 4061 | // Canonical splat form of first element of first input vector. |
| 4062 | SDValue FirstElt = |
| 4063 | DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: SrcVT.getScalarType(), N1: Src1, |
| 4064 | N2: DAG.getVectorIdxConstant(Val: 0, DL)); |
| 4065 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL, VT, Operand: FirstElt)); |
| 4066 | return; |
| 4067 | } |
| 4068 | |
| 4069 | // For now, we only handle splats for scalable vectors. |
| 4070 | // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation |
| 4071 | // for targets that support a SPLAT_VECTOR for non-scalable vector types. |
| 4072 | assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle" ); |
| 4073 | |
| 4074 | unsigned SrcNumElts = SrcVT.getVectorNumElements(); |
| 4075 | unsigned MaskNumElts = Mask.size(); |
| 4076 | |
| 4077 | if (SrcNumElts == MaskNumElts) { |
| 4078 | setValue(V: &I, NewN: DAG.getVectorShuffle(VT, dl: DL, N1: Src1, N2: Src2, Mask)); |
| 4079 | return; |
| 4080 | } |
| 4081 | |
| 4082 | // Normalize the shuffle vector since mask and vector length don't match. |
| 4083 | if (SrcNumElts < MaskNumElts) { |
| 4084 | // Mask is longer than the source vectors. We can use concatenate vector to |
| 4085 | // make the mask and vectors lengths match. |
| 4086 | |
| 4087 | if (MaskNumElts % SrcNumElts == 0) { |
| 4088 | // Mask length is a multiple of the source vector length. |
| 4089 | // Check if the shuffle is some kind of concatenation of the input |
| 4090 | // vectors. |
| 4091 | unsigned NumConcat = MaskNumElts / SrcNumElts; |
| 4092 | bool IsConcat = true; |
| 4093 | SmallVector<int, 8> ConcatSrcs(NumConcat, -1); |
| 4094 | for (unsigned i = 0; i != MaskNumElts; ++i) { |
| 4095 | int Idx = Mask[i]; |
| 4096 | if (Idx < 0) |
| 4097 | continue; |
| 4098 | // Ensure the indices in each SrcVT sized piece are sequential and that |
| 4099 | // the same source is used for the whole piece. |
| 4100 | if ((Idx % SrcNumElts != (i % SrcNumElts)) || |
| 4101 | (ConcatSrcs[i / SrcNumElts] >= 0 && |
| 4102 | ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) { |
| 4103 | IsConcat = false; |
| 4104 | break; |
| 4105 | } |
| 4106 | // Remember which source this index came from. |
| 4107 | ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; |
| 4108 | } |
| 4109 | |
| 4110 | // The shuffle is concatenating multiple vectors together. Just emit |
| 4111 | // a CONCAT_VECTORS operation. |
| 4112 | if (IsConcat) { |
| 4113 | SmallVector<SDValue, 8> ConcatOps; |
| 4114 | for (auto Src : ConcatSrcs) { |
| 4115 | if (Src < 0) |
| 4116 | ConcatOps.push_back(Elt: DAG.getUNDEF(VT: SrcVT)); |
| 4117 | else if (Src == 0) |
| 4118 | ConcatOps.push_back(Elt: Src1); |
| 4119 | else |
| 4120 | ConcatOps.push_back(Elt: Src2); |
| 4121 | } |
| 4122 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT, Ops: ConcatOps)); |
| 4123 | return; |
| 4124 | } |
| 4125 | } |
| 4126 | |
| 4127 | unsigned PaddedMaskNumElts = alignTo(Value: MaskNumElts, Align: SrcNumElts); |
| 4128 | unsigned NumConcat = PaddedMaskNumElts / SrcNumElts; |
| 4129 | EVT PaddedVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: VT.getScalarType(), |
| 4130 | NumElements: PaddedMaskNumElts); |
| 4131 | |
| 4132 | // Pad both vectors with undefs to make them the same length as the mask. |
| 4133 | SDValue UndefVal = DAG.getUNDEF(VT: SrcVT); |
| 4134 | |
| 4135 | SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); |
| 4136 | SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); |
| 4137 | MOps1[0] = Src1; |
| 4138 | MOps2[0] = Src2; |
| 4139 | |
| 4140 | Src1 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: PaddedVT, Ops: MOps1); |
| 4141 | Src2 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: PaddedVT, Ops: MOps2); |
| 4142 | |
| 4143 | // Readjust mask for new input vector length. |
| 4144 | SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1); |
| 4145 | for (unsigned i = 0; i != MaskNumElts; ++i) { |
| 4146 | int Idx = Mask[i]; |
| 4147 | if (Idx >= (int)SrcNumElts) |
| 4148 | Idx -= SrcNumElts - PaddedMaskNumElts; |
| 4149 | MappedOps[i] = Idx; |
| 4150 | } |
| 4151 | |
| 4152 | SDValue Result = DAG.getVectorShuffle(VT: PaddedVT, dl: DL, N1: Src1, N2: Src2, Mask: MappedOps); |
| 4153 | |
| 4154 | // If the concatenated vector was padded, extract a subvector with the |
| 4155 | // correct number of elements. |
| 4156 | if (MaskNumElts != PaddedMaskNumElts) |
| 4157 | Result = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT, N1: Result, |
| 4158 | N2: DAG.getVectorIdxConstant(Val: 0, DL)); |
| 4159 | |
| 4160 | setValue(V: &I, NewN: Result); |
| 4161 | return; |
| 4162 | } |
| 4163 | |
| 4164 | assert(SrcNumElts > MaskNumElts); |
| 4165 | |
| 4166 | // Analyze the access pattern of the vector to see if we can extract |
| 4167 | // two subvectors and do the shuffle. |
| 4168 | int StartIdx[2] = {-1, -1}; // StartIdx to extract from |
| 4169 | bool = true; |
| 4170 | for (int Idx : Mask) { |
| 4171 | unsigned Input = 0; |
| 4172 | if (Idx < 0) |
| 4173 | continue; |
| 4174 | |
| 4175 | if (Idx >= (int)SrcNumElts) { |
| 4176 | Input = 1; |
| 4177 | Idx -= SrcNumElts; |
| 4178 | } |
| 4179 | |
| 4180 | // If all the indices come from the same MaskNumElts sized portion of |
| 4181 | // the sources we can use extract. Also make sure the extract wouldn't |
| 4182 | // extract past the end of the source. |
| 4183 | int NewStartIdx = alignDown(Value: Idx, Align: MaskNumElts); |
| 4184 | if (NewStartIdx + MaskNumElts > SrcNumElts || |
| 4185 | (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx)) |
| 4186 | CanExtract = false; |
| 4187 | // Make sure we always update StartIdx as we use it to track if all |
| 4188 | // elements are undef. |
| 4189 | StartIdx[Input] = NewStartIdx; |
| 4190 | } |
| 4191 | |
| 4192 | if (StartIdx[0] < 0 && StartIdx[1] < 0) { |
| 4193 | setValue(V: &I, NewN: DAG.getUNDEF(VT)); // Vectors are not used. |
| 4194 | return; |
| 4195 | } |
| 4196 | if (CanExtract) { |
| 4197 | // Extract appropriate subvector and generate a vector shuffle |
| 4198 | for (unsigned Input = 0; Input < 2; ++Input) { |
| 4199 | SDValue &Src = Input == 0 ? Src1 : Src2; |
| 4200 | if (StartIdx[Input] < 0) |
| 4201 | Src = DAG.getUNDEF(VT); |
| 4202 | else { |
| 4203 | Src = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT, N1: Src, |
| 4204 | N2: DAG.getVectorIdxConstant(Val: StartIdx[Input], DL)); |
| 4205 | } |
| 4206 | } |
| 4207 | |
| 4208 | // Calculate new mask. |
| 4209 | SmallVector<int, 8> MappedOps(Mask); |
| 4210 | for (int &Idx : MappedOps) { |
| 4211 | if (Idx >= (int)SrcNumElts) |
| 4212 | Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; |
| 4213 | else if (Idx >= 0) |
| 4214 | Idx -= StartIdx[0]; |
| 4215 | } |
| 4216 | |
| 4217 | setValue(V: &I, NewN: DAG.getVectorShuffle(VT, dl: DL, N1: Src1, N2: Src2, Mask: MappedOps)); |
| 4218 | return; |
| 4219 | } |
| 4220 | |
| 4221 | // We can't use either concat vectors or extract subvectors so fall back to |
| 4222 | // replacing the shuffle with extract and build vector. |
| 4223 | // to insert and build vector. |
| 4224 | EVT EltVT = VT.getVectorElementType(); |
| 4225 | SmallVector<SDValue,8> Ops; |
| 4226 | for (int Idx : Mask) { |
| 4227 | SDValue Res; |
| 4228 | |
| 4229 | if (Idx < 0) { |
| 4230 | Res = DAG.getUNDEF(VT: EltVT); |
| 4231 | } else { |
| 4232 | SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2; |
| 4233 | if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; |
| 4234 | |
| 4235 | Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: EltVT, N1: Src, |
| 4236 | N2: DAG.getVectorIdxConstant(Val: Idx, DL)); |
| 4237 | } |
| 4238 | |
| 4239 | Ops.push_back(Elt: Res); |
| 4240 | } |
| 4241 | |
| 4242 | setValue(V: &I, NewN: DAG.getBuildVector(VT, DL, Ops)); |
| 4243 | } |
| 4244 | |
| 4245 | void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) { |
| 4246 | ArrayRef<unsigned> Indices = I.getIndices(); |
| 4247 | const Value *Op0 = I.getOperand(i_nocapture: 0); |
| 4248 | const Value *Op1 = I.getOperand(i_nocapture: 1); |
| 4249 | Type *AggTy = I.getType(); |
| 4250 | Type *ValTy = Op1->getType(); |
| 4251 | bool IntoUndef = isa<UndefValue>(Val: Op0); |
| 4252 | bool FromUndef = isa<UndefValue>(Val: Op1); |
| 4253 | |
| 4254 | unsigned LinearIndex = ComputeLinearIndex(Ty: AggTy, Indices); |
| 4255 | |
| 4256 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4257 | SmallVector<EVT, 4> AggValueVTs; |
| 4258 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: AggTy, ValueVTs&: AggValueVTs); |
| 4259 | SmallVector<EVT, 4> ValValueVTs; |
| 4260 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: ValTy, ValueVTs&: ValValueVTs); |
| 4261 | |
| 4262 | unsigned NumAggValues = AggValueVTs.size(); |
| 4263 | unsigned NumValValues = ValValueVTs.size(); |
| 4264 | SmallVector<SDValue, 4> Values(NumAggValues); |
| 4265 | |
| 4266 | // Ignore an insertvalue that produces an empty object |
| 4267 | if (!NumAggValues) { |
| 4268 | setValue(V: &I, NewN: DAG.getUNDEF(VT: MVT(MVT::Other))); |
| 4269 | return; |
| 4270 | } |
| 4271 | |
| 4272 | SDValue Agg = getValue(V: Op0); |
| 4273 | unsigned i = 0; |
| 4274 | // Copy the beginning value(s) from the original aggregate. |
| 4275 | for (; i != LinearIndex; ++i) |
| 4276 | Values[i] = IntoUndef ? DAG.getUNDEF(VT: AggValueVTs[i]) : |
| 4277 | SDValue(Agg.getNode(), Agg.getResNo() + i); |
| 4278 | // Copy values from the inserted value(s). |
| 4279 | if (NumValValues) { |
| 4280 | SDValue Val = getValue(V: Op1); |
| 4281 | for (; i != LinearIndex + NumValValues; ++i) |
| 4282 | Values[i] = FromUndef ? DAG.getUNDEF(VT: AggValueVTs[i]) : |
| 4283 | SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); |
| 4284 | } |
| 4285 | // Copy remaining value(s) from the original aggregate. |
| 4286 | for (; i != NumAggValues; ++i) |
| 4287 | Values[i] = IntoUndef ? DAG.getUNDEF(VT: AggValueVTs[i]) : |
| 4288 | SDValue(Agg.getNode(), Agg.getResNo() + i); |
| 4289 | |
| 4290 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: getCurSDLoc(), |
| 4291 | VTList: DAG.getVTList(VTs: AggValueVTs), Ops: Values)); |
| 4292 | } |
| 4293 | |
| 4294 | void SelectionDAGBuilder::(const ExtractValueInst &I) { |
| 4295 | ArrayRef<unsigned> Indices = I.getIndices(); |
| 4296 | const Value *Op0 = I.getOperand(i_nocapture: 0); |
| 4297 | Type *AggTy = Op0->getType(); |
| 4298 | Type *ValTy = I.getType(); |
| 4299 | bool OutOfUndef = isa<UndefValue>(Val: Op0); |
| 4300 | |
| 4301 | unsigned LinearIndex = ComputeLinearIndex(Ty: AggTy, Indices); |
| 4302 | |
| 4303 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4304 | SmallVector<EVT, 4> ValValueVTs; |
| 4305 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: ValTy, ValueVTs&: ValValueVTs); |
| 4306 | |
| 4307 | unsigned NumValValues = ValValueVTs.size(); |
| 4308 | |
| 4309 | // Ignore a extractvalue that produces an empty object |
| 4310 | if (!NumValValues) { |
| 4311 | setValue(V: &I, NewN: DAG.getUNDEF(VT: MVT(MVT::Other))); |
| 4312 | return; |
| 4313 | } |
| 4314 | |
| 4315 | SmallVector<SDValue, 4> Values(NumValValues); |
| 4316 | |
| 4317 | SDValue Agg = getValue(V: Op0); |
| 4318 | // Copy out the selected value(s). |
| 4319 | for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) |
| 4320 | Values[i - LinearIndex] = |
| 4321 | OutOfUndef ? |
| 4322 | DAG.getUNDEF(VT: Agg.getNode()->getValueType(ResNo: Agg.getResNo() + i)) : |
| 4323 | SDValue(Agg.getNode(), Agg.getResNo() + i); |
| 4324 | |
| 4325 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: getCurSDLoc(), |
| 4326 | VTList: DAG.getVTList(VTs: ValValueVTs), Ops: Values)); |
| 4327 | } |
| 4328 | |
| 4329 | void SelectionDAGBuilder::visitGetElementPtr(const User &I) { |
| 4330 | Value *Op0 = I.getOperand(i: 0); |
| 4331 | // Note that the pointer operand may be a vector of pointers. Take the scalar |
| 4332 | // element which holds a pointer. |
| 4333 | unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace(); |
| 4334 | SDValue N = getValue(V: Op0); |
| 4335 | SDLoc dl = getCurSDLoc(); |
| 4336 | auto &TLI = DAG.getTargetLoweringInfo(); |
| 4337 | GEPNoWrapFlags NW = cast<GEPOperator>(Val: I).getNoWrapFlags(); |
| 4338 | |
| 4339 | // For a vector GEP, keep the prefix scalar as long as possible, then |
| 4340 | // convert any scalars encountered after the first vector operand to vectors. |
| 4341 | bool IsVectorGEP = I.getType()->isVectorTy(); |
| 4342 | ElementCount VectorElementCount = |
| 4343 | IsVectorGEP ? cast<VectorType>(Val: I.getType())->getElementCount() |
| 4344 | : ElementCount::getFixed(MinVal: 0); |
| 4345 | |
| 4346 | for (gep_type_iterator GTI = gep_type_begin(GEP: &I), E = gep_type_end(GEP: &I); |
| 4347 | GTI != E; ++GTI) { |
| 4348 | const Value *Idx = GTI.getOperand(); |
| 4349 | if (StructType *StTy = GTI.getStructTypeOrNull()) { |
| 4350 | unsigned Field = cast<Constant>(Val: Idx)->getUniqueInteger().getZExtValue(); |
| 4351 | if (Field) { |
| 4352 | // N = N + Offset |
| 4353 | uint64_t Offset = |
| 4354 | DAG.getDataLayout().getStructLayout(Ty: StTy)->getElementOffset(Idx: Field); |
| 4355 | |
| 4356 | // In an inbounds GEP with an offset that is nonnegative even when |
| 4357 | // interpreted as signed, assume there is no unsigned overflow. |
| 4358 | SDNodeFlags Flags; |
| 4359 | if (NW.hasNoUnsignedWrap() || |
| 4360 | (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap())) |
| 4361 | Flags |= SDNodeFlags::NoUnsignedWrap; |
| 4362 | |
| 4363 | N = DAG.getMemBasePlusOffset( |
| 4364 | Base: N, Offset: DAG.getConstant(Val: Offset, DL: dl, VT: N.getValueType()), DL: dl, Flags); |
| 4365 | } |
| 4366 | } else { |
| 4367 | // IdxSize is the width of the arithmetic according to IR semantics. |
| 4368 | // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth |
| 4369 | // (and fix up the result later). |
| 4370 | unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS); |
| 4371 | MVT IdxTy = MVT::getIntegerVT(BitWidth: IdxSize); |
| 4372 | TypeSize ElementSize = |
| 4373 | GTI.getSequentialElementStride(DL: DAG.getDataLayout()); |
| 4374 | // We intentionally mask away the high bits here; ElementSize may not |
| 4375 | // fit in IdxTy. |
| 4376 | APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(), |
| 4377 | /*isSigned=*/false, /*implicitTrunc=*/true); |
| 4378 | bool ElementScalable = ElementSize.isScalable(); |
| 4379 | |
| 4380 | // If this is a scalar constant or a splat vector of constants, |
| 4381 | // handle it quickly. |
| 4382 | const auto *C = dyn_cast<Constant>(Val: Idx); |
| 4383 | if (C && isa<VectorType>(Val: C->getType())) |
| 4384 | C = C->getSplatValue(); |
| 4385 | |
| 4386 | const auto *CI = dyn_cast_or_null<ConstantInt>(Val: C); |
| 4387 | if (CI && CI->isZero()) |
| 4388 | continue; |
| 4389 | if (CI && !ElementScalable) { |
| 4390 | APInt Offs = ElementMul * CI->getValue().sextOrTrunc(width: IdxSize); |
| 4391 | LLVMContext &Context = *DAG.getContext(); |
| 4392 | SDValue OffsVal; |
| 4393 | if (N.getValueType().isVector()) |
| 4394 | OffsVal = DAG.getConstant( |
| 4395 | Val: Offs, DL: dl, VT: EVT::getVectorVT(Context, VT: IdxTy, EC: VectorElementCount)); |
| 4396 | else |
| 4397 | OffsVal = DAG.getConstant(Val: Offs, DL: dl, VT: IdxTy); |
| 4398 | |
| 4399 | // In an inbounds GEP with an offset that is nonnegative even when |
| 4400 | // interpreted as signed, assume there is no unsigned overflow. |
| 4401 | SDNodeFlags Flags; |
| 4402 | if (NW.hasNoUnsignedWrap() || |
| 4403 | (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap())) |
| 4404 | Flags.setNoUnsignedWrap(true); |
| 4405 | |
| 4406 | OffsVal = DAG.getSExtOrTrunc(Op: OffsVal, DL: dl, VT: N.getValueType()); |
| 4407 | |
| 4408 | N = DAG.getMemBasePlusOffset(Base: N, Offset: OffsVal, DL: dl, Flags); |
| 4409 | continue; |
| 4410 | } |
| 4411 | |
| 4412 | // N = N + Idx * ElementMul; |
| 4413 | SDValue IdxN = getValue(V: Idx); |
| 4414 | |
| 4415 | if (IdxN.getValueType().isVector() != N.getValueType().isVector()) { |
| 4416 | if (N.getValueType().isVector()) { |
| 4417 | EVT VT = EVT::getVectorVT(Context&: *Context, VT: IdxN.getValueType(), |
| 4418 | EC: VectorElementCount); |
| 4419 | IdxN = DAG.getSplat(VT, DL: dl, Op: IdxN); |
| 4420 | } else { |
| 4421 | EVT VT = |
| 4422 | EVT::getVectorVT(Context&: *Context, VT: N.getValueType(), EC: VectorElementCount); |
| 4423 | N = DAG.getSplat(VT, DL: dl, Op: N); |
| 4424 | } |
| 4425 | } |
| 4426 | |
| 4427 | // If the index is smaller or larger than intptr_t, truncate or extend |
| 4428 | // it. |
| 4429 | IdxN = DAG.getSExtOrTrunc(Op: IdxN, DL: dl, VT: N.getValueType()); |
| 4430 | |
| 4431 | SDNodeFlags ScaleFlags; |
| 4432 | // The multiplication of an index by the type size does not wrap the |
| 4433 | // pointer index type in a signed sense (mul nsw). |
| 4434 | ScaleFlags.setNoSignedWrap(NW.hasNoUnsignedSignedWrap()); |
| 4435 | |
| 4436 | // The multiplication of an index by the type size does not wrap the |
| 4437 | // pointer index type in an unsigned sense (mul nuw). |
| 4438 | ScaleFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap()); |
| 4439 | |
| 4440 | if (ElementScalable) { |
| 4441 | EVT VScaleTy = N.getValueType().getScalarType(); |
| 4442 | SDValue VScale = DAG.getNode( |
| 4443 | Opcode: ISD::VSCALE, DL: dl, VT: VScaleTy, |
| 4444 | Operand: DAG.getConstant(Val: ElementMul.getZExtValue(), DL: dl, VT: VScaleTy)); |
| 4445 | if (N.getValueType().isVector()) |
| 4446 | VScale = DAG.getSplatVector(VT: N.getValueType(), DL: dl, Op: VScale); |
| 4447 | IdxN = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: N.getValueType(), N1: IdxN, N2: VScale, |
| 4448 | Flags: ScaleFlags); |
| 4449 | } else { |
| 4450 | // If this is a multiply by a power of two, turn it into a shl |
| 4451 | // immediately. This is a very common case. |
| 4452 | if (ElementMul != 1) { |
| 4453 | if (ElementMul.isPowerOf2()) { |
| 4454 | unsigned Amt = ElementMul.logBase2(); |
| 4455 | IdxN = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: N.getValueType(), N1: IdxN, |
| 4456 | N2: DAG.getConstant(Val: Amt, DL: dl, VT: IdxN.getValueType()), |
| 4457 | Flags: ScaleFlags); |
| 4458 | } else { |
| 4459 | SDValue Scale = DAG.getConstant(Val: ElementMul.getZExtValue(), DL: dl, |
| 4460 | VT: IdxN.getValueType()); |
| 4461 | IdxN = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: N.getValueType(), N1: IdxN, N2: Scale, |
| 4462 | Flags: ScaleFlags); |
| 4463 | } |
| 4464 | } |
| 4465 | } |
| 4466 | |
| 4467 | // The successive addition of the current address, truncated to the |
| 4468 | // pointer index type and interpreted as an unsigned number, and each |
| 4469 | // offset, also interpreted as an unsigned number, does not wrap the |
| 4470 | // pointer index type (add nuw). |
| 4471 | SDNodeFlags AddFlags; |
| 4472 | AddFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap()); |
| 4473 | |
| 4474 | N = DAG.getMemBasePlusOffset(Base: N, Offset: IdxN, DL: dl, Flags: AddFlags); |
| 4475 | } |
| 4476 | } |
| 4477 | |
| 4478 | if (IsVectorGEP && !N.getValueType().isVector()) { |
| 4479 | EVT VT = EVT::getVectorVT(Context&: *Context, VT: N.getValueType(), EC: VectorElementCount); |
| 4480 | N = DAG.getSplat(VT, DL: dl, Op: N); |
| 4481 | } |
| 4482 | |
| 4483 | MVT PtrTy = TLI.getPointerTy(DL: DAG.getDataLayout(), AS); |
| 4484 | MVT PtrMemTy = TLI.getPointerMemTy(DL: DAG.getDataLayout(), AS); |
| 4485 | if (IsVectorGEP) { |
| 4486 | PtrTy = MVT::getVectorVT(VT: PtrTy, EC: VectorElementCount); |
| 4487 | PtrMemTy = MVT::getVectorVT(VT: PtrMemTy, EC: VectorElementCount); |
| 4488 | } |
| 4489 | |
| 4490 | if (PtrMemTy != PtrTy && !cast<GEPOperator>(Val: I).isInBounds()) |
| 4491 | N = DAG.getPtrExtendInReg(Op: N, DL: dl, VT: PtrMemTy); |
| 4492 | |
| 4493 | setValue(V: &I, NewN: N); |
| 4494 | } |
| 4495 | |
| 4496 | void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { |
| 4497 | // If this is a fixed sized alloca in the entry block of the function, |
| 4498 | // allocate it statically on the stack. |
| 4499 | if (FuncInfo.StaticAllocaMap.count(Val: &I)) |
| 4500 | return; // getValue will auto-populate this. |
| 4501 | |
| 4502 | SDLoc dl = getCurSDLoc(); |
| 4503 | Type *Ty = I.getAllocatedType(); |
| 4504 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4505 | auto &DL = DAG.getDataLayout(); |
| 4506 | TypeSize TySize = DL.getTypeAllocSize(Ty); |
| 4507 | MaybeAlign Alignment = std::max(a: DL.getPrefTypeAlign(Ty), b: I.getAlign()); |
| 4508 | |
| 4509 | SDValue AllocSize = getValue(V: I.getArraySize()); |
| 4510 | |
| 4511 | EVT IntPtr = TLI.getPointerTy(DL, AS: I.getAddressSpace()); |
| 4512 | if (AllocSize.getValueType() != IntPtr) |
| 4513 | AllocSize = DAG.getZExtOrTrunc(Op: AllocSize, DL: dl, VT: IntPtr); |
| 4514 | |
| 4515 | if (TySize.isScalable()) |
| 4516 | AllocSize = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: IntPtr, N1: AllocSize, |
| 4517 | N2: DAG.getVScale(DL: dl, VT: IntPtr, |
| 4518 | MulImm: APInt(IntPtr.getScalarSizeInBits(), |
| 4519 | TySize.getKnownMinValue()))); |
| 4520 | else { |
| 4521 | SDValue TySizeValue = |
| 4522 | DAG.getConstant(Val: TySize.getFixedValue(), DL: dl, VT: MVT::getIntegerVT(BitWidth: 64)); |
| 4523 | AllocSize = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: IntPtr, N1: AllocSize, |
| 4524 | N2: DAG.getZExtOrTrunc(Op: TySizeValue, DL: dl, VT: IntPtr)); |
| 4525 | } |
| 4526 | |
| 4527 | // Handle alignment. If the requested alignment is less than or equal to |
| 4528 | // the stack alignment, ignore it. If the size is greater than or equal to |
| 4529 | // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. |
| 4530 | Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign(); |
| 4531 | if (*Alignment <= StackAlign) |
| 4532 | Alignment = std::nullopt; |
| 4533 | |
| 4534 | const uint64_t StackAlignMask = StackAlign.value() - 1U; |
| 4535 | // Round the size of the allocation up to the stack alignment size |
| 4536 | // by add SA-1 to the size. This doesn't overflow because we're computing |
| 4537 | // an address inside an alloca. |
| 4538 | AllocSize = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: AllocSize.getValueType(), N1: AllocSize, |
| 4539 | N2: DAG.getConstant(Val: StackAlignMask, DL: dl, VT: IntPtr), |
| 4540 | Flags: SDNodeFlags::NoUnsignedWrap); |
| 4541 | |
| 4542 | // Mask out the low bits for alignment purposes. |
| 4543 | AllocSize = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: AllocSize.getValueType(), N1: AllocSize, |
| 4544 | N2: DAG.getSignedConstant(Val: ~StackAlignMask, DL: dl, VT: IntPtr)); |
| 4545 | |
| 4546 | SDValue Ops[] = { |
| 4547 | getRoot(), AllocSize, |
| 4548 | DAG.getConstant(Val: Alignment ? Alignment->value() : 0, DL: dl, VT: IntPtr)}; |
| 4549 | SDVTList VTs = DAG.getVTList(VT1: AllocSize.getValueType(), VT2: MVT::Other); |
| 4550 | SDValue DSA = DAG.getNode(Opcode: ISD::DYNAMIC_STACKALLOC, DL: dl, VTList: VTs, Ops); |
| 4551 | setValue(V: &I, NewN: DSA); |
| 4552 | DAG.setRoot(DSA.getValue(R: 1)); |
| 4553 | |
| 4554 | assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects()); |
| 4555 | } |
| 4556 | |
| 4557 | static const MDNode *getRangeMetadata(const Instruction &I) { |
| 4558 | return I.getMetadata(KindID: LLVMContext::MD_range); |
| 4559 | } |
| 4560 | |
| 4561 | static std::optional<ConstantRange> getRange(const Instruction &I) { |
| 4562 | if (const auto *CB = dyn_cast<CallBase>(Val: &I)) |
| 4563 | if (std::optional<ConstantRange> CR = CB->getRange()) |
| 4564 | return CR; |
| 4565 | if (const MDNode *Range = getRangeMetadata(I)) |
| 4566 | return getConstantRangeFromMetadata(RangeMD: *Range); |
| 4567 | return std::nullopt; |
| 4568 | } |
| 4569 | |
| 4570 | void SelectionDAGBuilder::visitLoad(const LoadInst &I) { |
| 4571 | if (I.isAtomic()) |
| 4572 | return visitAtomicLoad(I); |
| 4573 | |
| 4574 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4575 | const Value *SV = I.getOperand(i_nocapture: 0); |
| 4576 | if (TLI.supportSwiftError()) { |
| 4577 | // Swifterror values can come from either a function parameter with |
| 4578 | // swifterror attribute or an alloca with swifterror attribute. |
| 4579 | if (const Argument *Arg = dyn_cast<Argument>(Val: SV)) { |
| 4580 | if (Arg->hasSwiftErrorAttr()) |
| 4581 | return visitLoadFromSwiftError(I); |
| 4582 | } |
| 4583 | |
| 4584 | if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(Val: SV)) { |
| 4585 | if (Alloca->isSwiftError()) |
| 4586 | return visitLoadFromSwiftError(I); |
| 4587 | } |
| 4588 | } |
| 4589 | |
| 4590 | SDValue Ptr = getValue(V: SV); |
| 4591 | |
| 4592 | Type *Ty = I.getType(); |
| 4593 | SmallVector<EVT, 4> ValueVTs, MemVTs; |
| 4594 | SmallVector<TypeSize, 4> Offsets; |
| 4595 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty, ValueVTs, MemVTs: &MemVTs, Offsets: &Offsets); |
| 4596 | unsigned NumValues = ValueVTs.size(); |
| 4597 | if (NumValues == 0) |
| 4598 | return; |
| 4599 | |
| 4600 | Align Alignment = I.getAlign(); |
| 4601 | AAMDNodes AAInfo = I.getAAMetadata(); |
| 4602 | const MDNode *Ranges = getRangeMetadata(I); |
| 4603 | bool isVolatile = I.isVolatile(); |
| 4604 | MachineMemOperand::Flags MMOFlags = |
| 4605 | TLI.getLoadMemOperandFlags(LI: I, DL: DAG.getDataLayout(), AC, LibInfo); |
| 4606 | |
| 4607 | SDValue Root; |
| 4608 | bool ConstantMemory = false; |
| 4609 | if (isVolatile) |
| 4610 | // Serialize volatile loads with other side effects. |
| 4611 | Root = getRoot(); |
| 4612 | else if (NumValues > MaxParallelChains) |
| 4613 | Root = getMemoryRoot(); |
| 4614 | else if (BatchAA && |
| 4615 | BatchAA->pointsToConstantMemory(Loc: MemoryLocation( |
| 4616 | SV, |
| 4617 | LocationSize::precise(Value: DAG.getDataLayout().getTypeStoreSize(Ty)), |
| 4618 | AAInfo))) { |
| 4619 | // Do not serialize (non-volatile) loads of constant memory with anything. |
| 4620 | Root = DAG.getEntryNode(); |
| 4621 | ConstantMemory = true; |
| 4622 | MMOFlags |= MachineMemOperand::MOInvariant; |
| 4623 | } else { |
| 4624 | // Do not serialize non-volatile loads against each other. |
| 4625 | Root = DAG.getRoot(); |
| 4626 | } |
| 4627 | |
| 4628 | SDLoc dl = getCurSDLoc(); |
| 4629 | |
| 4630 | if (isVolatile) |
| 4631 | Root = TLI.prepareVolatileOrAtomicLoad(Chain: Root, DL: dl, DAG); |
| 4632 | |
| 4633 | SmallVector<SDValue, 4> Values(NumValues); |
| 4634 | SmallVector<SDValue, 4> Chains(std::min(a: MaxParallelChains, b: NumValues)); |
| 4635 | |
| 4636 | unsigned ChainI = 0; |
| 4637 | for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { |
| 4638 | // Serializing loads here may result in excessive register pressure, and |
| 4639 | // TokenFactor places arbitrary choke points on the scheduler. SD scheduling |
| 4640 | // could recover a bit by hoisting nodes upward in the chain by recognizing |
| 4641 | // they are side-effect free or do not alias. The optimizer should really |
| 4642 | // avoid this case by converting large object/array copies to llvm.memcpy |
| 4643 | // (MaxParallelChains should always remain as failsafe). |
| 4644 | if (ChainI == MaxParallelChains) { |
| 4645 | assert(PendingLoads.empty() && "PendingLoads must be serialized first" ); |
| 4646 | SDValue Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 4647 | Ops: ArrayRef(Chains.data(), ChainI)); |
| 4648 | Root = Chain; |
| 4649 | ChainI = 0; |
| 4650 | } |
| 4651 | |
| 4652 | // TODO: MachinePointerInfo only supports a fixed length offset. |
| 4653 | MachinePointerInfo PtrInfo = |
| 4654 | !Offsets[i].isScalable() || Offsets[i].isZero() |
| 4655 | ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue()) |
| 4656 | : MachinePointerInfo(); |
| 4657 | |
| 4658 | SDValue A = DAG.getObjectPtrOffset(SL: dl, Ptr, Offset: Offsets[i]); |
| 4659 | SDValue L = DAG.getLoad(VT: MemVTs[i], dl, Chain: Root, Ptr: A, PtrInfo, Alignment, |
| 4660 | MMOFlags, AAInfo, Ranges); |
| 4661 | Chains[ChainI] = L.getValue(R: 1); |
| 4662 | |
| 4663 | if (MemVTs[i] != ValueVTs[i]) |
| 4664 | L = DAG.getPtrExtOrTrunc(Op: L, DL: dl, VT: ValueVTs[i]); |
| 4665 | |
| 4666 | Values[i] = L; |
| 4667 | } |
| 4668 | |
| 4669 | if (!ConstantMemory) { |
| 4670 | SDValue Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 4671 | Ops: ArrayRef(Chains.data(), ChainI)); |
| 4672 | if (isVolatile) |
| 4673 | DAG.setRoot(Chain); |
| 4674 | else |
| 4675 | PendingLoads.push_back(Elt: Chain); |
| 4676 | } |
| 4677 | |
| 4678 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, |
| 4679 | VTList: DAG.getVTList(VTs: ValueVTs), Ops: Values)); |
| 4680 | } |
| 4681 | |
| 4682 | void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) { |
| 4683 | assert(DAG.getTargetLoweringInfo().supportSwiftError() && |
| 4684 | "call visitStoreToSwiftError when backend supports swifterror" ); |
| 4685 | |
| 4686 | SmallVector<EVT, 4> ValueVTs; |
| 4687 | SmallVector<uint64_t, 4> Offsets; |
| 4688 | const Value *SrcV = I.getOperand(i_nocapture: 0); |
| 4689 | ComputeValueVTs(TLI: DAG.getTargetLoweringInfo(), DL: DAG.getDataLayout(), |
| 4690 | Ty: SrcV->getType(), ValueVTs, FixedOffsets: &Offsets, StartingOffset: 0); |
| 4691 | assert(ValueVTs.size() == 1 && Offsets[0] == 0 && |
| 4692 | "expect a single EVT for swifterror" ); |
| 4693 | |
| 4694 | SDValue Src = getValue(V: SrcV); |
| 4695 | // Create a virtual register, then update the virtual register. |
| 4696 | Register VReg = |
| 4697 | SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand()); |
| 4698 | // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue |
| 4699 | // Chain can be getRoot or getControlRoot. |
| 4700 | SDValue CopyNode = DAG.getCopyToReg(Chain: getRoot(), dl: getCurSDLoc(), Reg: VReg, |
| 4701 | N: SDValue(Src.getNode(), Src.getResNo())); |
| 4702 | DAG.setRoot(CopyNode); |
| 4703 | } |
| 4704 | |
| 4705 | void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) { |
| 4706 | assert(DAG.getTargetLoweringInfo().supportSwiftError() && |
| 4707 | "call visitLoadFromSwiftError when backend supports swifterror" ); |
| 4708 | |
| 4709 | assert(!I.isVolatile() && |
| 4710 | !I.hasMetadata(LLVMContext::MD_nontemporal) && |
| 4711 | !I.hasMetadata(LLVMContext::MD_invariant_load) && |
| 4712 | "Support volatile, non temporal, invariant for load_from_swift_error" ); |
| 4713 | |
| 4714 | const Value *SV = I.getOperand(i_nocapture: 0); |
| 4715 | Type *Ty = I.getType(); |
| 4716 | assert( |
| 4717 | (!BatchAA || |
| 4718 | !BatchAA->pointsToConstantMemory(MemoryLocation( |
| 4719 | SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), |
| 4720 | I.getAAMetadata()))) && |
| 4721 | "load_from_swift_error should not be constant memory" ); |
| 4722 | |
| 4723 | SmallVector<EVT, 4> ValueVTs; |
| 4724 | SmallVector<uint64_t, 4> Offsets; |
| 4725 | ComputeValueVTs(TLI: DAG.getTargetLoweringInfo(), DL: DAG.getDataLayout(), Ty, |
| 4726 | ValueVTs, FixedOffsets: &Offsets, StartingOffset: 0); |
| 4727 | assert(ValueVTs.size() == 1 && Offsets[0] == 0 && |
| 4728 | "expect a single EVT for swifterror" ); |
| 4729 | |
| 4730 | // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT |
| 4731 | SDValue L = DAG.getCopyFromReg( |
| 4732 | Chain: getRoot(), dl: getCurSDLoc(), |
| 4733 | Reg: SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), VT: ValueVTs[0]); |
| 4734 | |
| 4735 | setValue(V: &I, NewN: L); |
| 4736 | } |
| 4737 | |
| 4738 | void SelectionDAGBuilder::visitStore(const StoreInst &I) { |
| 4739 | if (I.isAtomic()) |
| 4740 | return visitAtomicStore(I); |
| 4741 | |
| 4742 | const Value *SrcV = I.getOperand(i_nocapture: 0); |
| 4743 | const Value *PtrV = I.getOperand(i_nocapture: 1); |
| 4744 | |
| 4745 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4746 | if (TLI.supportSwiftError()) { |
| 4747 | // Swifterror values can come from either a function parameter with |
| 4748 | // swifterror attribute or an alloca with swifterror attribute. |
| 4749 | if (const Argument *Arg = dyn_cast<Argument>(Val: PtrV)) { |
| 4750 | if (Arg->hasSwiftErrorAttr()) |
| 4751 | return visitStoreToSwiftError(I); |
| 4752 | } |
| 4753 | |
| 4754 | if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(Val: PtrV)) { |
| 4755 | if (Alloca->isSwiftError()) |
| 4756 | return visitStoreToSwiftError(I); |
| 4757 | } |
| 4758 | } |
| 4759 | |
| 4760 | SmallVector<EVT, 4> ValueVTs, MemVTs; |
| 4761 | SmallVector<TypeSize, 4> Offsets; |
| 4762 | ComputeValueVTs(TLI: DAG.getTargetLoweringInfo(), DL: DAG.getDataLayout(), |
| 4763 | Ty: SrcV->getType(), ValueVTs, MemVTs: &MemVTs, Offsets: &Offsets); |
| 4764 | unsigned NumValues = ValueVTs.size(); |
| 4765 | if (NumValues == 0) |
| 4766 | return; |
| 4767 | |
| 4768 | // Get the lowered operands. Note that we do this after |
| 4769 | // checking if NumResults is zero, because with zero results |
| 4770 | // the operands won't have values in the map. |
| 4771 | SDValue Src = getValue(V: SrcV); |
| 4772 | SDValue Ptr = getValue(V: PtrV); |
| 4773 | |
| 4774 | SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot(); |
| 4775 | SmallVector<SDValue, 4> Chains(std::min(a: MaxParallelChains, b: NumValues)); |
| 4776 | SDLoc dl = getCurSDLoc(); |
| 4777 | Align Alignment = I.getAlign(); |
| 4778 | AAMDNodes AAInfo = I.getAAMetadata(); |
| 4779 | |
| 4780 | auto MMOFlags = TLI.getStoreMemOperandFlags(SI: I, DL: DAG.getDataLayout()); |
| 4781 | |
| 4782 | unsigned ChainI = 0; |
| 4783 | for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { |
| 4784 | // See visitLoad comments. |
| 4785 | if (ChainI == MaxParallelChains) { |
| 4786 | SDValue Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 4787 | Ops: ArrayRef(Chains.data(), ChainI)); |
| 4788 | Root = Chain; |
| 4789 | ChainI = 0; |
| 4790 | } |
| 4791 | |
| 4792 | // TODO: MachinePointerInfo only supports a fixed length offset. |
| 4793 | MachinePointerInfo PtrInfo = |
| 4794 | !Offsets[i].isScalable() || Offsets[i].isZero() |
| 4795 | ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue()) |
| 4796 | : MachinePointerInfo(); |
| 4797 | |
| 4798 | SDValue Add = DAG.getObjectPtrOffset(SL: dl, Ptr, Offset: Offsets[i]); |
| 4799 | SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i); |
| 4800 | if (MemVTs[i] != ValueVTs[i]) |
| 4801 | Val = DAG.getPtrExtOrTrunc(Op: Val, DL: dl, VT: MemVTs[i]); |
| 4802 | SDValue St = |
| 4803 | DAG.getStore(Chain: Root, dl, Val, Ptr: Add, PtrInfo, Alignment, MMOFlags, AAInfo); |
| 4804 | Chains[ChainI] = St; |
| 4805 | } |
| 4806 | |
| 4807 | SDValue StoreNode = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
| 4808 | Ops: ArrayRef(Chains.data(), ChainI)); |
| 4809 | setValue(V: &I, NewN: StoreNode); |
| 4810 | DAG.setRoot(StoreNode); |
| 4811 | } |
| 4812 | |
| 4813 | void SelectionDAGBuilder::visitMaskedStore(const CallInst &I, |
| 4814 | bool IsCompressing) { |
| 4815 | SDLoc sdl = getCurSDLoc(); |
| 4816 | |
| 4817 | auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, |
| 4818 | Align &Alignment) { |
| 4819 | // llvm.masked.store.*(Src0, Ptr, alignment, Mask) |
| 4820 | Src0 = I.getArgOperand(i: 0); |
| 4821 | Ptr = I.getArgOperand(i: 1); |
| 4822 | Alignment = cast<ConstantInt>(Val: I.getArgOperand(i: 2))->getAlignValue(); |
| 4823 | Mask = I.getArgOperand(i: 3); |
| 4824 | }; |
| 4825 | auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, |
| 4826 | Align &Alignment) { |
| 4827 | // llvm.masked.compressstore.*(Src0, Ptr, Mask) |
| 4828 | Src0 = I.getArgOperand(i: 0); |
| 4829 | Ptr = I.getArgOperand(i: 1); |
| 4830 | Mask = I.getArgOperand(i: 2); |
| 4831 | Alignment = I.getParamAlign(ArgNo: 1).valueOrOne(); |
| 4832 | }; |
| 4833 | |
| 4834 | Value *PtrOperand, *MaskOperand, *Src0Operand; |
| 4835 | Align Alignment; |
| 4836 | if (IsCompressing) |
| 4837 | getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); |
| 4838 | else |
| 4839 | getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); |
| 4840 | |
| 4841 | SDValue Ptr = getValue(V: PtrOperand); |
| 4842 | SDValue Src0 = getValue(V: Src0Operand); |
| 4843 | SDValue Mask = getValue(V: MaskOperand); |
| 4844 | SDValue Offset = DAG.getUNDEF(VT: Ptr.getValueType()); |
| 4845 | |
| 4846 | EVT VT = Src0.getValueType(); |
| 4847 | |
| 4848 | auto MMOFlags = MachineMemOperand::MOStore; |
| 4849 | if (I.hasMetadata(KindID: LLVMContext::MD_nontemporal)) |
| 4850 | MMOFlags |= MachineMemOperand::MONonTemporal; |
| 4851 | |
| 4852 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 4853 | PtrInfo: MachinePointerInfo(PtrOperand), F: MMOFlags, |
| 4854 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: Alignment, AAInfo: I.getAAMetadata()); |
| 4855 | |
| 4856 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 4857 | const auto &TTI = |
| 4858 | TLI.getTargetMachine().getTargetTransformInfo(F: *I.getFunction()); |
| 4859 | SDValue StoreNode = |
| 4860 | !IsCompressing && TTI.hasConditionalLoadStoreForType( |
| 4861 | Ty: I.getArgOperand(i: 0)->getType(), /*IsStore=*/true) |
| 4862 | ? TLI.visitMaskedStore(DAG, DL: sdl, Chain: getMemoryRoot(), MMO, Ptr, Val: Src0, |
| 4863 | Mask) |
| 4864 | : DAG.getMaskedStore(Chain: getMemoryRoot(), dl: sdl, Val: Src0, Base: Ptr, Offset, Mask, |
| 4865 | MemVT: VT, MMO, AM: ISD::UNINDEXED, /*Truncating=*/IsTruncating: false, |
| 4866 | IsCompressing); |
| 4867 | DAG.setRoot(StoreNode); |
| 4868 | setValue(V: &I, NewN: StoreNode); |
| 4869 | } |
| 4870 | |
| 4871 | // Get a uniform base for the Gather/Scatter intrinsic. |
| 4872 | // The first argument of the Gather/Scatter intrinsic is a vector of pointers. |
| 4873 | // We try to represent it as a base pointer + vector of indices. |
| 4874 | // Usually, the vector of pointers comes from a 'getelementptr' instruction. |
| 4875 | // The first operand of the GEP may be a single pointer or a vector of pointers |
| 4876 | // Example: |
| 4877 | // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind |
| 4878 | // or |
| 4879 | // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind |
| 4880 | // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, .. |
| 4881 | // |
| 4882 | // When the first GEP operand is a single pointer - it is the uniform base we |
| 4883 | // are looking for. If first operand of the GEP is a splat vector - we |
| 4884 | // extract the splat value and use it as a uniform base. |
| 4885 | // In all other cases the function returns 'false'. |
| 4886 | static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, |
| 4887 | ISD::MemIndexType &IndexType, SDValue &Scale, |
| 4888 | SelectionDAGBuilder *SDB, const BasicBlock *CurBB, |
| 4889 | uint64_t ElemSize) { |
| 4890 | SelectionDAG& DAG = SDB->DAG; |
| 4891 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4892 | const DataLayout &DL = DAG.getDataLayout(); |
| 4893 | |
| 4894 | assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type" ); |
| 4895 | |
| 4896 | // Handle splat constant pointer. |
| 4897 | if (auto *C = dyn_cast<Constant>(Val: Ptr)) { |
| 4898 | C = C->getSplatValue(); |
| 4899 | if (!C) |
| 4900 | return false; |
| 4901 | |
| 4902 | Base = SDB->getValue(V: C); |
| 4903 | |
| 4904 | ElementCount NumElts = cast<VectorType>(Val: Ptr->getType())->getElementCount(); |
| 4905 | EVT VT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: TLI.getPointerTy(DL), EC: NumElts); |
| 4906 | Index = DAG.getConstant(Val: 0, DL: SDB->getCurSDLoc(), VT); |
| 4907 | IndexType = ISD::SIGNED_SCALED; |
| 4908 | Scale = DAG.getTargetConstant(Val: 1, DL: SDB->getCurSDLoc(), VT: TLI.getPointerTy(DL)); |
| 4909 | return true; |
| 4910 | } |
| 4911 | |
| 4912 | const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: Ptr); |
| 4913 | if (!GEP || GEP->getParent() != CurBB) |
| 4914 | return false; |
| 4915 | |
| 4916 | if (GEP->getNumOperands() != 2) |
| 4917 | return false; |
| 4918 | |
| 4919 | const Value *BasePtr = GEP->getPointerOperand(); |
| 4920 | const Value *IndexVal = GEP->getOperand(i_nocapture: GEP->getNumOperands() - 1); |
| 4921 | |
| 4922 | // Make sure the base is scalar and the index is a vector. |
| 4923 | if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy()) |
| 4924 | return false; |
| 4925 | |
| 4926 | TypeSize ScaleVal = DL.getTypeAllocSize(Ty: GEP->getResultElementType()); |
| 4927 | if (ScaleVal.isScalable()) |
| 4928 | return false; |
| 4929 | |
| 4930 | // Target may not support the required addressing mode. |
| 4931 | if (ScaleVal != 1 && |
| 4932 | !TLI.isLegalScaleForGatherScatter(Scale: ScaleVal.getFixedValue(), ElemSize)) |
| 4933 | return false; |
| 4934 | |
| 4935 | Base = SDB->getValue(V: BasePtr); |
| 4936 | Index = SDB->getValue(V: IndexVal); |
| 4937 | IndexType = ISD::SIGNED_SCALED; |
| 4938 | |
| 4939 | Scale = |
| 4940 | DAG.getTargetConstant(Val: ScaleVal, DL: SDB->getCurSDLoc(), VT: TLI.getPointerTy(DL)); |
| 4941 | return true; |
| 4942 | } |
| 4943 | |
| 4944 | void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) { |
| 4945 | SDLoc sdl = getCurSDLoc(); |
| 4946 | |
| 4947 | // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask) |
| 4948 | const Value *Ptr = I.getArgOperand(i: 1); |
| 4949 | SDValue Src0 = getValue(V: I.getArgOperand(i: 0)); |
| 4950 | SDValue Mask = getValue(V: I.getArgOperand(i: 3)); |
| 4951 | EVT VT = Src0.getValueType(); |
| 4952 | Align Alignment = cast<ConstantInt>(Val: I.getArgOperand(i: 2)) |
| 4953 | ->getMaybeAlignValue() |
| 4954 | .value_or(u: DAG.getEVTAlign(MemoryVT: VT.getScalarType())); |
| 4955 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 4956 | |
| 4957 | SDValue Base; |
| 4958 | SDValue Index; |
| 4959 | ISD::MemIndexType IndexType; |
| 4960 | SDValue Scale; |
| 4961 | bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, SDB: this, |
| 4962 | CurBB: I.getParent(), ElemSize: VT.getScalarStoreSize()); |
| 4963 | |
| 4964 | unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); |
| 4965 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 4966 | PtrInfo: MachinePointerInfo(AS), F: MachineMemOperand::MOStore, |
| 4967 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: Alignment, AAInfo: I.getAAMetadata()); |
| 4968 | if (!UniformBase) { |
| 4969 | Base = DAG.getConstant(Val: 0, DL: sdl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 4970 | Index = getValue(V: Ptr); |
| 4971 | IndexType = ISD::SIGNED_SCALED; |
| 4972 | Scale = DAG.getTargetConstant(Val: 1, DL: sdl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 4973 | } |
| 4974 | |
| 4975 | EVT IdxVT = Index.getValueType(); |
| 4976 | EVT EltTy = IdxVT.getVectorElementType(); |
| 4977 | if (TLI.shouldExtendGSIndex(VT: IdxVT, EltTy)) { |
| 4978 | EVT NewIdxVT = IdxVT.changeVectorElementType(EltVT: EltTy); |
| 4979 | Index = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: sdl, VT: NewIdxVT, Operand: Index); |
| 4980 | } |
| 4981 | |
| 4982 | SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale }; |
| 4983 | SDValue Scatter = DAG.getMaskedScatter(VTs: DAG.getVTList(VT: MVT::Other), MemVT: VT, dl: sdl, |
| 4984 | Ops, MMO, IndexType, IsTruncating: false); |
| 4985 | DAG.setRoot(Scatter); |
| 4986 | setValue(V: &I, NewN: Scatter); |
| 4987 | } |
| 4988 | |
| 4989 | void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { |
| 4990 | SDLoc sdl = getCurSDLoc(); |
| 4991 | |
| 4992 | auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, |
| 4993 | Align &Alignment) { |
| 4994 | // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) |
| 4995 | Ptr = I.getArgOperand(i: 0); |
| 4996 | Alignment = cast<ConstantInt>(Val: I.getArgOperand(i: 1))->getAlignValue(); |
| 4997 | Mask = I.getArgOperand(i: 2); |
| 4998 | Src0 = I.getArgOperand(i: 3); |
| 4999 | }; |
| 5000 | auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, |
| 5001 | Align &Alignment) { |
| 5002 | // @llvm.masked.expandload.*(Ptr, Mask, Src0) |
| 5003 | Ptr = I.getArgOperand(i: 0); |
| 5004 | Alignment = I.getParamAlign(ArgNo: 0).valueOrOne(); |
| 5005 | Mask = I.getArgOperand(i: 1); |
| 5006 | Src0 = I.getArgOperand(i: 2); |
| 5007 | }; |
| 5008 | |
| 5009 | Value *PtrOperand, *MaskOperand, *Src0Operand; |
| 5010 | Align Alignment; |
| 5011 | if (IsExpanding) |
| 5012 | getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); |
| 5013 | else |
| 5014 | getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); |
| 5015 | |
| 5016 | SDValue Ptr = getValue(V: PtrOperand); |
| 5017 | SDValue Src0 = getValue(V: Src0Operand); |
| 5018 | SDValue Mask = getValue(V: MaskOperand); |
| 5019 | SDValue Offset = DAG.getUNDEF(VT: Ptr.getValueType()); |
| 5020 | |
| 5021 | EVT VT = Src0.getValueType(); |
| 5022 | AAMDNodes AAInfo = I.getAAMetadata(); |
| 5023 | const MDNode *Ranges = getRangeMetadata(I); |
| 5024 | |
| 5025 | // Do not serialize masked loads of constant memory with anything. |
| 5026 | MemoryLocation ML = MemoryLocation::getAfter(Ptr: PtrOperand, AATags: AAInfo); |
| 5027 | bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(Loc: ML); |
| 5028 | |
| 5029 | SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); |
| 5030 | |
| 5031 | auto MMOFlags = MachineMemOperand::MOLoad; |
| 5032 | if (I.hasMetadata(KindID: LLVMContext::MD_nontemporal)) |
| 5033 | MMOFlags |= MachineMemOperand::MONonTemporal; |
| 5034 | |
| 5035 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 5036 | PtrInfo: MachinePointerInfo(PtrOperand), F: MMOFlags, |
| 5037 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: Alignment, AAInfo, Ranges); |
| 5038 | |
| 5039 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 5040 | const auto &TTI = |
| 5041 | TLI.getTargetMachine().getTargetTransformInfo(F: *I.getFunction()); |
| 5042 | // The Load/Res may point to different values and both of them are output |
| 5043 | // variables. |
| 5044 | SDValue Load; |
| 5045 | SDValue Res; |
| 5046 | if (!IsExpanding && TTI.hasConditionalLoadStoreForType(Ty: Src0Operand->getType(), |
| 5047 | /*IsStore=*/false)) |
| 5048 | Res = TLI.visitMaskedLoad(DAG, DL: sdl, Chain: InChain, MMO, NewLoad&: Load, Ptr, PassThru: Src0, Mask); |
| 5049 | else |
| 5050 | Res = Load = |
| 5051 | DAG.getMaskedLoad(VT, dl: sdl, Chain: InChain, Base: Ptr, Offset, Mask, Src0, MemVT: VT, MMO, |
| 5052 | AM: ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding); |
| 5053 | if (AddToChain) |
| 5054 | PendingLoads.push_back(Elt: Load.getValue(R: 1)); |
| 5055 | setValue(V: &I, NewN: Res); |
| 5056 | } |
| 5057 | |
| 5058 | void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) { |
| 5059 | SDLoc sdl = getCurSDLoc(); |
| 5060 | |
| 5061 | // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) |
| 5062 | const Value *Ptr = I.getArgOperand(i: 0); |
| 5063 | SDValue Src0 = getValue(V: I.getArgOperand(i: 3)); |
| 5064 | SDValue Mask = getValue(V: I.getArgOperand(i: 2)); |
| 5065 | |
| 5066 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5067 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 5068 | Align Alignment = cast<ConstantInt>(Val: I.getArgOperand(i: 1)) |
| 5069 | ->getMaybeAlignValue() |
| 5070 | .value_or(u: DAG.getEVTAlign(MemoryVT: VT.getScalarType())); |
| 5071 | |
| 5072 | const MDNode *Ranges = getRangeMetadata(I); |
| 5073 | |
| 5074 | SDValue Root = DAG.getRoot(); |
| 5075 | SDValue Base; |
| 5076 | SDValue Index; |
| 5077 | ISD::MemIndexType IndexType; |
| 5078 | SDValue Scale; |
| 5079 | bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, SDB: this, |
| 5080 | CurBB: I.getParent(), ElemSize: VT.getScalarStoreSize()); |
| 5081 | unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); |
| 5082 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 5083 | PtrInfo: MachinePointerInfo(AS), F: MachineMemOperand::MOLoad, |
| 5084 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: Alignment, AAInfo: I.getAAMetadata(), |
| 5085 | Ranges); |
| 5086 | |
| 5087 | if (!UniformBase) { |
| 5088 | Base = DAG.getConstant(Val: 0, DL: sdl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 5089 | Index = getValue(V: Ptr); |
| 5090 | IndexType = ISD::SIGNED_SCALED; |
| 5091 | Scale = DAG.getTargetConstant(Val: 1, DL: sdl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 5092 | } |
| 5093 | |
| 5094 | EVT IdxVT = Index.getValueType(); |
| 5095 | EVT EltTy = IdxVT.getVectorElementType(); |
| 5096 | if (TLI.shouldExtendGSIndex(VT: IdxVT, EltTy)) { |
| 5097 | EVT NewIdxVT = IdxVT.changeVectorElementType(EltVT: EltTy); |
| 5098 | Index = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: sdl, VT: NewIdxVT, Operand: Index); |
| 5099 | } |
| 5100 | |
| 5101 | SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale }; |
| 5102 | SDValue Gather = DAG.getMaskedGather(VTs: DAG.getVTList(VT1: VT, VT2: MVT::Other), MemVT: VT, dl: sdl, |
| 5103 | Ops, MMO, IndexType, ExtTy: ISD::NON_EXTLOAD); |
| 5104 | |
| 5105 | PendingLoads.push_back(Elt: Gather.getValue(R: 1)); |
| 5106 | setValue(V: &I, NewN: Gather); |
| 5107 | } |
| 5108 | |
| 5109 | void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { |
| 5110 | SDLoc dl = getCurSDLoc(); |
| 5111 | AtomicOrdering SuccessOrdering = I.getSuccessOrdering(); |
| 5112 | AtomicOrdering FailureOrdering = I.getFailureOrdering(); |
| 5113 | SyncScope::ID SSID = I.getSyncScopeID(); |
| 5114 | |
| 5115 | SDValue InChain = getRoot(); |
| 5116 | |
| 5117 | MVT MemVT = getValue(V: I.getCompareOperand()).getSimpleValueType(); |
| 5118 | SDVTList VTs = DAG.getVTList(VT1: MemVT, VT2: MVT::i1, VT3: MVT::Other); |
| 5119 | |
| 5120 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5121 | auto Flags = TLI.getAtomicMemOperandFlags(AI: I, DL: DAG.getDataLayout()); |
| 5122 | |
| 5123 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5124 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
| 5125 | PtrInfo: MachinePointerInfo(I.getPointerOperand()), F: Flags, Size: MemVT.getStoreSize(), |
| 5126 | BaseAlignment: DAG.getEVTAlign(MemoryVT: MemVT), AAInfo: AAMDNodes(), Ranges: nullptr, SSID, Ordering: SuccessOrdering, |
| 5127 | FailureOrdering); |
| 5128 | |
| 5129 | SDValue L = DAG.getAtomicCmpSwap(Opcode: ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, |
| 5130 | dl, MemVT, VTs, Chain: InChain, |
| 5131 | Ptr: getValue(V: I.getPointerOperand()), |
| 5132 | Cmp: getValue(V: I.getCompareOperand()), |
| 5133 | Swp: getValue(V: I.getNewValOperand()), MMO); |
| 5134 | |
| 5135 | SDValue OutChain = L.getValue(R: 2); |
| 5136 | |
| 5137 | setValue(V: &I, NewN: L); |
| 5138 | DAG.setRoot(OutChain); |
| 5139 | } |
| 5140 | |
| 5141 | void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { |
| 5142 | SDLoc dl = getCurSDLoc(); |
| 5143 | ISD::NodeType NT; |
| 5144 | switch (I.getOperation()) { |
| 5145 | default: llvm_unreachable("Unknown atomicrmw operation" ); |
| 5146 | case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break; |
| 5147 | case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break; |
| 5148 | case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break; |
| 5149 | case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break; |
| 5150 | case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; |
| 5151 | case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break; |
| 5152 | case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break; |
| 5153 | case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break; |
| 5154 | case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; |
| 5155 | case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; |
| 5156 | case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; |
| 5157 | case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break; |
| 5158 | case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break; |
| 5159 | case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break; |
| 5160 | case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break; |
| 5161 | case AtomicRMWInst::FMaximum: |
| 5162 | NT = ISD::ATOMIC_LOAD_FMAXIMUM; |
| 5163 | break; |
| 5164 | case AtomicRMWInst::FMinimum: |
| 5165 | NT = ISD::ATOMIC_LOAD_FMINIMUM; |
| 5166 | break; |
| 5167 | case AtomicRMWInst::UIncWrap: |
| 5168 | NT = ISD::ATOMIC_LOAD_UINC_WRAP; |
| 5169 | break; |
| 5170 | case AtomicRMWInst::UDecWrap: |
| 5171 | NT = ISD::ATOMIC_LOAD_UDEC_WRAP; |
| 5172 | break; |
| 5173 | case AtomicRMWInst::USubCond: |
| 5174 | NT = ISD::ATOMIC_LOAD_USUB_COND; |
| 5175 | break; |
| 5176 | case AtomicRMWInst::USubSat: |
| 5177 | NT = ISD::ATOMIC_LOAD_USUB_SAT; |
| 5178 | break; |
| 5179 | } |
| 5180 | AtomicOrdering Ordering = I.getOrdering(); |
| 5181 | SyncScope::ID SSID = I.getSyncScopeID(); |
| 5182 | |
| 5183 | SDValue InChain = getRoot(); |
| 5184 | |
| 5185 | auto MemVT = getValue(V: I.getValOperand()).getSimpleValueType(); |
| 5186 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5187 | auto Flags = TLI.getAtomicMemOperandFlags(AI: I, DL: DAG.getDataLayout()); |
| 5188 | |
| 5189 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5190 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
| 5191 | PtrInfo: MachinePointerInfo(I.getPointerOperand()), F: Flags, Size: MemVT.getStoreSize(), |
| 5192 | BaseAlignment: DAG.getEVTAlign(MemoryVT: MemVT), AAInfo: AAMDNodes(), Ranges: nullptr, SSID, Ordering); |
| 5193 | |
| 5194 | SDValue L = |
| 5195 | DAG.getAtomic(Opcode: NT, dl, MemVT, Chain: InChain, |
| 5196 | Ptr: getValue(V: I.getPointerOperand()), Val: getValue(V: I.getValOperand()), |
| 5197 | MMO); |
| 5198 | |
| 5199 | SDValue OutChain = L.getValue(R: 1); |
| 5200 | |
| 5201 | setValue(V: &I, NewN: L); |
| 5202 | DAG.setRoot(OutChain); |
| 5203 | } |
| 5204 | |
| 5205 | void SelectionDAGBuilder::visitFence(const FenceInst &I) { |
| 5206 | SDLoc dl = getCurSDLoc(); |
| 5207 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5208 | SDValue Ops[3]; |
| 5209 | Ops[0] = getRoot(); |
| 5210 | Ops[1] = DAG.getTargetConstant(Val: (unsigned)I.getOrdering(), DL: dl, |
| 5211 | VT: TLI.getFenceOperandTy(DL: DAG.getDataLayout())); |
| 5212 | Ops[2] = DAG.getTargetConstant(Val: I.getSyncScopeID(), DL: dl, |
| 5213 | VT: TLI.getFenceOperandTy(DL: DAG.getDataLayout())); |
| 5214 | SDValue N = DAG.getNode(Opcode: ISD::ATOMIC_FENCE, DL: dl, VT: MVT::Other, Ops); |
| 5215 | setValue(V: &I, NewN: N); |
| 5216 | DAG.setRoot(N); |
| 5217 | } |
| 5218 | |
| 5219 | void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { |
| 5220 | SDLoc dl = getCurSDLoc(); |
| 5221 | AtomicOrdering Order = I.getOrdering(); |
| 5222 | SyncScope::ID SSID = I.getSyncScopeID(); |
| 5223 | |
| 5224 | SDValue InChain = getRoot(); |
| 5225 | |
| 5226 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5227 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 5228 | EVT MemVT = TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 5229 | |
| 5230 | if (!TLI.supportsUnalignedAtomics() && |
| 5231 | I.getAlign().value() < MemVT.getSizeInBits() / 8) |
| 5232 | report_fatal_error(reason: "Cannot generate unaligned atomic load" ); |
| 5233 | |
| 5234 | auto Flags = TLI.getLoadMemOperandFlags(LI: I, DL: DAG.getDataLayout(), AC, LibInfo); |
| 5235 | |
| 5236 | const MDNode *Ranges = getRangeMetadata(I); |
| 5237 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 5238 | PtrInfo: MachinePointerInfo(I.getPointerOperand()), F: Flags, Size: MemVT.getStoreSize(), |
| 5239 | BaseAlignment: I.getAlign(), AAInfo: AAMDNodes(), Ranges, SSID, Ordering: Order); |
| 5240 | |
| 5241 | InChain = TLI.prepareVolatileOrAtomicLoad(Chain: InChain, DL: dl, DAG); |
| 5242 | |
| 5243 | SDValue Ptr = getValue(V: I.getPointerOperand()); |
| 5244 | SDValue L = |
| 5245 | DAG.getAtomicLoad(ExtType: ISD::NON_EXTLOAD, dl, MemVT, VT: MemVT, Chain: InChain, Ptr, MMO); |
| 5246 | |
| 5247 | SDValue OutChain = L.getValue(R: 1); |
| 5248 | if (MemVT != VT) |
| 5249 | L = DAG.getPtrExtOrTrunc(Op: L, DL: dl, VT); |
| 5250 | |
| 5251 | setValue(V: &I, NewN: L); |
| 5252 | DAG.setRoot(OutChain); |
| 5253 | } |
| 5254 | |
| 5255 | void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { |
| 5256 | SDLoc dl = getCurSDLoc(); |
| 5257 | |
| 5258 | AtomicOrdering Ordering = I.getOrdering(); |
| 5259 | SyncScope::ID SSID = I.getSyncScopeID(); |
| 5260 | |
| 5261 | SDValue InChain = getRoot(); |
| 5262 | |
| 5263 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5264 | EVT MemVT = |
| 5265 | TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: I.getValueOperand()->getType()); |
| 5266 | |
| 5267 | if (!TLI.supportsUnalignedAtomics() && |
| 5268 | I.getAlign().value() < MemVT.getSizeInBits() / 8) |
| 5269 | report_fatal_error(reason: "Cannot generate unaligned atomic store" ); |
| 5270 | |
| 5271 | auto Flags = TLI.getStoreMemOperandFlags(SI: I, DL: DAG.getDataLayout()); |
| 5272 | |
| 5273 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5274 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
| 5275 | PtrInfo: MachinePointerInfo(I.getPointerOperand()), F: Flags, Size: MemVT.getStoreSize(), |
| 5276 | BaseAlignment: I.getAlign(), AAInfo: AAMDNodes(), Ranges: nullptr, SSID, Ordering); |
| 5277 | |
| 5278 | SDValue Val = getValue(V: I.getValueOperand()); |
| 5279 | if (Val.getValueType() != MemVT) |
| 5280 | Val = DAG.getPtrExtOrTrunc(Op: Val, DL: dl, VT: MemVT); |
| 5281 | SDValue Ptr = getValue(V: I.getPointerOperand()); |
| 5282 | |
| 5283 | SDValue OutChain = |
| 5284 | DAG.getAtomic(Opcode: ISD::ATOMIC_STORE, dl, MemVT, Chain: InChain, Ptr: Val, Val: Ptr, MMO); |
| 5285 | |
| 5286 | setValue(V: &I, NewN: OutChain); |
| 5287 | DAG.setRoot(OutChain); |
| 5288 | } |
| 5289 | |
| 5290 | /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC |
| 5291 | /// node. |
| 5292 | void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I, |
| 5293 | unsigned Intrinsic) { |
| 5294 | // Ignore the callsite's attributes. A specific call site may be marked with |
| 5295 | // readnone, but the lowering code will expect the chain based on the |
| 5296 | // definition. |
| 5297 | const Function *F = I.getCalledFunction(); |
| 5298 | bool HasChain = !F->doesNotAccessMemory(); |
| 5299 | bool OnlyLoad = |
| 5300 | HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow(); |
| 5301 | |
| 5302 | // Build the operand list. |
| 5303 | SmallVector<SDValue, 8> Ops; |
| 5304 | if (HasChain) { // If this intrinsic has side-effects, chainify it. |
| 5305 | if (OnlyLoad) { |
| 5306 | // We don't need to serialize loads against other loads. |
| 5307 | Ops.push_back(Elt: DAG.getRoot()); |
| 5308 | } else { |
| 5309 | Ops.push_back(Elt: getRoot()); |
| 5310 | } |
| 5311 | } |
| 5312 | |
| 5313 | // Info is set by getTgtMemIntrinsic |
| 5314 | TargetLowering::IntrinsicInfo Info; |
| 5315 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5316 | bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, |
| 5317 | DAG.getMachineFunction(), |
| 5318 | Intrinsic); |
| 5319 | |
| 5320 | // Add the intrinsic ID as an integer operand if it's not a target intrinsic. |
| 5321 | if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID || |
| 5322 | Info.opc == ISD::INTRINSIC_W_CHAIN) |
| 5323 | Ops.push_back(Elt: DAG.getTargetConstant(Val: Intrinsic, DL: getCurSDLoc(), |
| 5324 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 5325 | |
| 5326 | // Add all operands of the call to the operand list. |
| 5327 | for (unsigned i = 0, e = I.arg_size(); i != e; ++i) { |
| 5328 | const Value *Arg = I.getArgOperand(i); |
| 5329 | if (!I.paramHasAttr(ArgNo: i, Kind: Attribute::ImmArg)) { |
| 5330 | Ops.push_back(Elt: getValue(V: Arg)); |
| 5331 | continue; |
| 5332 | } |
| 5333 | |
| 5334 | // Use TargetConstant instead of a regular constant for immarg. |
| 5335 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: Arg->getType(), AllowUnknown: true); |
| 5336 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val: Arg)) { |
| 5337 | assert(CI->getBitWidth() <= 64 && |
| 5338 | "large intrinsic immediates not handled" ); |
| 5339 | Ops.push_back(Elt: DAG.getTargetConstant(Val: *CI, DL: SDLoc(), VT)); |
| 5340 | } else { |
| 5341 | Ops.push_back( |
| 5342 | Elt: DAG.getTargetConstantFP(Val: *cast<ConstantFP>(Val: Arg), DL: SDLoc(), VT)); |
| 5343 | } |
| 5344 | } |
| 5345 | |
| 5346 | SmallVector<EVT, 4> ValueVTs; |
| 5347 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: I.getType(), ValueVTs); |
| 5348 | |
| 5349 | if (HasChain) |
| 5350 | ValueVTs.push_back(Elt: MVT::Other); |
| 5351 | |
| 5352 | SDVTList VTs = DAG.getVTList(VTs: ValueVTs); |
| 5353 | |
| 5354 | // Propagate fast-math-flags from IR to node(s). |
| 5355 | SDNodeFlags Flags; |
| 5356 | if (auto *FPMO = dyn_cast<FPMathOperator>(Val: &I)) |
| 5357 | Flags.copyFMF(FPMO: *FPMO); |
| 5358 | SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); |
| 5359 | |
| 5360 | // Create the node. |
| 5361 | SDValue Result; |
| 5362 | |
| 5363 | if (auto Bundle = I.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) { |
| 5364 | auto *Token = Bundle->Inputs[0].get(); |
| 5365 | SDValue ConvControlToken = getValue(V: Token); |
| 5366 | assert(Ops.back().getValueType() != MVT::Glue && |
| 5367 | "Did not expected another glue node here." ); |
| 5368 | ConvControlToken = |
| 5369 | DAG.getNode(Opcode: ISD::CONVERGENCECTRL_GLUE, DL: {}, VT: MVT::Glue, Operand: ConvControlToken); |
| 5370 | Ops.push_back(Elt: ConvControlToken); |
| 5371 | } |
| 5372 | |
| 5373 | // In some cases, custom collection of operands from CallInst I may be needed. |
| 5374 | TLI.CollectTargetIntrinsicOperands(I, Ops, DAG); |
| 5375 | if (IsTgtIntrinsic) { |
| 5376 | // This is target intrinsic that touches memory |
| 5377 | // |
| 5378 | // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic |
| 5379 | // didn't yield anything useful. |
| 5380 | MachinePointerInfo MPI; |
| 5381 | if (Info.ptrVal) |
| 5382 | MPI = MachinePointerInfo(Info.ptrVal, Info.offset); |
| 5383 | else if (Info.fallbackAddressSpace) |
| 5384 | MPI = MachinePointerInfo(*Info.fallbackAddressSpace); |
| 5385 | EVT MemVT = Info.memVT; |
| 5386 | LocationSize Size = LocationSize::precise(Value: Info.size); |
| 5387 | if (Size.hasValue() && !Size.getValue()) |
| 5388 | Size = LocationSize::precise(Value: MemVT.getStoreSize()); |
| 5389 | Align Alignment = Info.align.value_or(u: DAG.getEVTAlign(MemoryVT: MemVT)); |
| 5390 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 5391 | PtrInfo: MPI, F: Info.flags, Size, BaseAlignment: Alignment, AAInfo: I.getAAMetadata(), /*Ranges=*/nullptr, |
| 5392 | SSID: Info.ssid, Ordering: Info.order, FailureOrdering: Info.failureOrder); |
| 5393 | Result = |
| 5394 | DAG.getMemIntrinsicNode(Opcode: Info.opc, dl: getCurSDLoc(), VTList: VTs, Ops, MemVT, MMO); |
| 5395 | } else if (!HasChain) { |
| 5396 | Result = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: getCurSDLoc(), VTList: VTs, Ops); |
| 5397 | } else if (!I.getType()->isVoidTy()) { |
| 5398 | Result = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL: getCurSDLoc(), VTList: VTs, Ops); |
| 5399 | } else { |
| 5400 | Result = DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL: getCurSDLoc(), VTList: VTs, Ops); |
| 5401 | } |
| 5402 | |
| 5403 | if (HasChain) { |
| 5404 | SDValue Chain = Result.getValue(R: Result.getNode()->getNumValues()-1); |
| 5405 | if (OnlyLoad) |
| 5406 | PendingLoads.push_back(Elt: Chain); |
| 5407 | else |
| 5408 | DAG.setRoot(Chain); |
| 5409 | } |
| 5410 | |
| 5411 | if (!I.getType()->isVoidTy()) { |
| 5412 | if (!isa<VectorType>(Val: I.getType())) |
| 5413 | Result = lowerRangeToAssertZExt(DAG, I, Op: Result); |
| 5414 | |
| 5415 | MaybeAlign Alignment = I.getRetAlign(); |
| 5416 | |
| 5417 | // Insert `assertalign` node if there's an alignment. |
| 5418 | if (InsertAssertAlign && Alignment) { |
| 5419 | Result = |
| 5420 | DAG.getAssertAlign(DL: getCurSDLoc(), V: Result, A: Alignment.valueOrOne()); |
| 5421 | } |
| 5422 | } |
| 5423 | |
| 5424 | setValue(V: &I, NewN: Result); |
| 5425 | } |
| 5426 | |
| 5427 | /// GetSignificand - Get the significand and build it into a floating-point |
| 5428 | /// number with exponent of 1: |
| 5429 | /// |
| 5430 | /// Op = (Op & 0x007fffff) | 0x3f800000; |
| 5431 | /// |
| 5432 | /// where Op is the hexadecimal representation of floating point value. |
| 5433 | static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) { |
| 5434 | SDValue t1 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Op, |
| 5435 | N2: DAG.getConstant(Val: 0x007fffff, DL: dl, VT: MVT::i32)); |
| 5436 | SDValue t2 = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: t1, |
| 5437 | N2: DAG.getConstant(Val: 0x3f800000, DL: dl, VT: MVT::i32)); |
| 5438 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f32, Operand: t2); |
| 5439 | } |
| 5440 | |
| 5441 | /// GetExponent - Get the exponent: |
| 5442 | /// |
| 5443 | /// (float)(int)(((Op & 0x7f800000) >> 23) - 127); |
| 5444 | /// |
| 5445 | /// where Op is the hexadecimal representation of floating point value. |
| 5446 | static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, |
| 5447 | const TargetLowering &TLI, const SDLoc &dl) { |
| 5448 | SDValue t0 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Op, |
| 5449 | N2: DAG.getConstant(Val: 0x7f800000, DL: dl, VT: MVT::i32)); |
| 5450 | SDValue t1 = DAG.getNode( |
| 5451 | Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: t0, |
| 5452 | N2: DAG.getConstant(Val: 23, DL: dl, |
| 5453 | VT: TLI.getShiftAmountTy(LHSTy: MVT::i32, DL: DAG.getDataLayout()))); |
| 5454 | SDValue t2 = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: t1, |
| 5455 | N2: DAG.getConstant(Val: 127, DL: dl, VT: MVT::i32)); |
| 5456 | return DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::f32, Operand: t2); |
| 5457 | } |
| 5458 | |
| 5459 | /// getF32Constant - Get 32-bit floating point constant. |
| 5460 | static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, |
| 5461 | const SDLoc &dl) { |
| 5462 | return DAG.getConstantFP(Val: APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), DL: dl, |
| 5463 | VT: MVT::f32); |
| 5464 | } |
| 5465 | |
| 5466 | static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, |
| 5467 | SelectionDAG &DAG) { |
| 5468 | // TODO: What fast-math-flags should be set on the floating-point nodes? |
| 5469 | |
| 5470 | // IntegerPartOfX = ((int32_t)(t0); |
| 5471 | SDValue IntegerPartOfX = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::i32, Operand: t0); |
| 5472 | |
| 5473 | // FractionalPartOfX = t0 - (float)IntegerPartOfX; |
| 5474 | SDValue t1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::f32, Operand: IntegerPartOfX); |
| 5475 | SDValue X = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t0, N2: t1); |
| 5476 | |
| 5477 | // IntegerPartOfX <<= 23; |
| 5478 | IntegerPartOfX = |
| 5479 | DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: IntegerPartOfX, |
| 5480 | N2: DAG.getConstant(Val: 23, DL: dl, |
| 5481 | VT: DAG.getTargetLoweringInfo().getShiftAmountTy( |
| 5482 | LHSTy: MVT::i32, DL: DAG.getDataLayout()))); |
| 5483 | |
| 5484 | SDValue TwoToFractionalPartOfX; |
| 5485 | if (LimitFloatPrecision <= 6) { |
| 5486 | // For floating-point precision of 6: |
| 5487 | // |
| 5488 | // TwoToFractionalPartOfX = |
| 5489 | // 0.997535578f + |
| 5490 | // (0.735607626f + 0.252464424f * x) * x; |
| 5491 | // |
| 5492 | // error 0.0144103317, which is 6 bits |
| 5493 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5494 | N2: getF32Constant(DAG, Flt: 0x3e814304, dl)); |
| 5495 | SDValue t3 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t2, |
| 5496 | N2: getF32Constant(DAG, Flt: 0x3f3c50c8, dl)); |
| 5497 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5498 | TwoToFractionalPartOfX = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t4, |
| 5499 | N2: getF32Constant(DAG, Flt: 0x3f7f5e7e, dl)); |
| 5500 | } else if (LimitFloatPrecision <= 12) { |
| 5501 | // For floating-point precision of 12: |
| 5502 | // |
| 5503 | // TwoToFractionalPartOfX = |
| 5504 | // 0.999892986f + |
| 5505 | // (0.696457318f + |
| 5506 | // (0.224338339f + 0.792043434e-1f * x) * x) * x; |
| 5507 | // |
| 5508 | // error 0.000107046256, which is 13 to 14 bits |
| 5509 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5510 | N2: getF32Constant(DAG, Flt: 0x3da235e3, dl)); |
| 5511 | SDValue t3 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t2, |
| 5512 | N2: getF32Constant(DAG, Flt: 0x3e65b8f3, dl)); |
| 5513 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5514 | SDValue t5 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t4, |
| 5515 | N2: getF32Constant(DAG, Flt: 0x3f324b07, dl)); |
| 5516 | SDValue t6 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t5, N2: X); |
| 5517 | TwoToFractionalPartOfX = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t6, |
| 5518 | N2: getF32Constant(DAG, Flt: 0x3f7ff8fd, dl)); |
| 5519 | } else { // LimitFloatPrecision <= 18 |
| 5520 | // For floating-point precision of 18: |
| 5521 | // |
| 5522 | // TwoToFractionalPartOfX = |
| 5523 | // 0.999999982f + |
| 5524 | // (0.693148872f + |
| 5525 | // (0.240227044f + |
| 5526 | // (0.554906021e-1f + |
| 5527 | // (0.961591928e-2f + |
| 5528 | // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; |
| 5529 | // error 2.47208000*10^(-7), which is better than 18 bits |
| 5530 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5531 | N2: getF32Constant(DAG, Flt: 0x3924b03e, dl)); |
| 5532 | SDValue t3 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t2, |
| 5533 | N2: getF32Constant(DAG, Flt: 0x3ab24b87, dl)); |
| 5534 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5535 | SDValue t5 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t4, |
| 5536 | N2: getF32Constant(DAG, Flt: 0x3c1d8c17, dl)); |
| 5537 | SDValue t6 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t5, N2: X); |
| 5538 | SDValue t7 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t6, |
| 5539 | N2: getF32Constant(DAG, Flt: 0x3d634a1d, dl)); |
| 5540 | SDValue t8 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t7, N2: X); |
| 5541 | SDValue t9 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t8, |
| 5542 | N2: getF32Constant(DAG, Flt: 0x3e75fe14, dl)); |
| 5543 | SDValue t10 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t9, N2: X); |
| 5544 | SDValue t11 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t10, |
| 5545 | N2: getF32Constant(DAG, Flt: 0x3f317234, dl)); |
| 5546 | SDValue t12 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t11, N2: X); |
| 5547 | TwoToFractionalPartOfX = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t12, |
| 5548 | N2: getF32Constant(DAG, Flt: 0x3f800000, dl)); |
| 5549 | } |
| 5550 | |
| 5551 | // Add the exponent into the result in integer domain. |
| 5552 | SDValue t13 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: TwoToFractionalPartOfX); |
| 5553 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f32, |
| 5554 | Operand: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: t13, N2: IntegerPartOfX)); |
| 5555 | } |
| 5556 | |
| 5557 | /// expandExp - Lower an exp intrinsic. Handles the special sequences for |
| 5558 | /// limited-precision mode. |
| 5559 | static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, |
| 5560 | const TargetLowering &TLI, SDNodeFlags Flags) { |
| 5561 | if (Op.getValueType() == MVT::f32 && |
| 5562 | LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { |
| 5563 | |
| 5564 | // Put the exponent in the right bit position for later addition to the |
| 5565 | // final result: |
| 5566 | // |
| 5567 | // t0 = Op * log2(e) |
| 5568 | |
| 5569 | // TODO: What fast-math-flags should be set here? |
| 5570 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: Op, |
| 5571 | N2: DAG.getConstantFP(Val: numbers::log2ef, DL: dl, VT: MVT::f32)); |
| 5572 | return getLimitedPrecisionExp2(t0, dl, DAG); |
| 5573 | } |
| 5574 | |
| 5575 | // No special expansion. |
| 5576 | return DAG.getNode(Opcode: ISD::FEXP, DL: dl, VT: Op.getValueType(), Operand: Op, Flags); |
| 5577 | } |
| 5578 | |
| 5579 | /// expandLog - Lower a log intrinsic. Handles the special sequences for |
| 5580 | /// limited-precision mode. |
| 5581 | static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, |
| 5582 | const TargetLowering &TLI, SDNodeFlags Flags) { |
| 5583 | // TODO: What fast-math-flags should be set on the floating-point nodes? |
| 5584 | |
| 5585 | if (Op.getValueType() == MVT::f32 && |
| 5586 | LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { |
| 5587 | SDValue Op1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Op); |
| 5588 | |
| 5589 | // Scale the exponent by log(2). |
| 5590 | SDValue Exp = GetExponent(DAG, Op: Op1, TLI, dl); |
| 5591 | SDValue LogOfExponent = |
| 5592 | DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: Exp, |
| 5593 | N2: DAG.getConstantFP(Val: numbers::ln2f, DL: dl, VT: MVT::f32)); |
| 5594 | |
| 5595 | // Get the significand and build it into a floating-point number with |
| 5596 | // exponent of 1. |
| 5597 | SDValue X = GetSignificand(DAG, Op: Op1, dl); |
| 5598 | |
| 5599 | SDValue LogOfMantissa; |
| 5600 | if (LimitFloatPrecision <= 6) { |
| 5601 | // For floating-point precision of 6: |
| 5602 | // |
| 5603 | // LogofMantissa = |
| 5604 | // -1.1609546f + |
| 5605 | // (1.4034025f - 0.23903021f * x) * x; |
| 5606 | // |
| 5607 | // error 0.0034276066, which is better than 8 bits |
| 5608 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5609 | N2: getF32Constant(DAG, Flt: 0xbe74c456, dl)); |
| 5610 | SDValue t1 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t0, |
| 5611 | N2: getF32Constant(DAG, Flt: 0x3fb3a2b1, dl)); |
| 5612 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5613 | LogOfMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t2, |
| 5614 | N2: getF32Constant(DAG, Flt: 0x3f949a29, dl)); |
| 5615 | } else if (LimitFloatPrecision <= 12) { |
| 5616 | // For floating-point precision of 12: |
| 5617 | // |
| 5618 | // LogOfMantissa = |
| 5619 | // -1.7417939f + |
| 5620 | // (2.8212026f + |
| 5621 | // (-1.4699568f + |
| 5622 | // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; |
| 5623 | // |
| 5624 | // error 0.000061011436, which is 14 bits |
| 5625 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5626 | N2: getF32Constant(DAG, Flt: 0xbd67b6d6, dl)); |
| 5627 | SDValue t1 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t0, |
| 5628 | N2: getF32Constant(DAG, Flt: 0x3ee4f4b8, dl)); |
| 5629 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5630 | SDValue t3 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t2, |
| 5631 | N2: getF32Constant(DAG, Flt: 0x3fbc278b, dl)); |
| 5632 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5633 | SDValue t5 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t4, |
| 5634 | N2: getF32Constant(DAG, Flt: 0x40348e95, dl)); |
| 5635 | SDValue t6 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t5, N2: X); |
| 5636 | LogOfMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t6, |
| 5637 | N2: getF32Constant(DAG, Flt: 0x3fdef31a, dl)); |
| 5638 | } else { // LimitFloatPrecision <= 18 |
| 5639 | // For floating-point precision of 18: |
| 5640 | // |
| 5641 | // LogOfMantissa = |
| 5642 | // -2.1072184f + |
| 5643 | // (4.2372794f + |
| 5644 | // (-3.7029485f + |
| 5645 | // (2.2781945f + |
| 5646 | // (-0.87823314f + |
| 5647 | // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; |
| 5648 | // |
| 5649 | // error 0.0000023660568, which is better than 18 bits |
| 5650 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5651 | N2: getF32Constant(DAG, Flt: 0xbc91e5ac, dl)); |
| 5652 | SDValue t1 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t0, |
| 5653 | N2: getF32Constant(DAG, Flt: 0x3e4350aa, dl)); |
| 5654 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5655 | SDValue t3 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t2, |
| 5656 | N2: getF32Constant(DAG, Flt: 0x3f60d3e3, dl)); |
| 5657 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5658 | SDValue t5 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t4, |
| 5659 | N2: getF32Constant(DAG, Flt: 0x4011cdf0, dl)); |
| 5660 | SDValue t6 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t5, N2: X); |
| 5661 | SDValue t7 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t6, |
| 5662 | N2: getF32Constant(DAG, Flt: 0x406cfd1c, dl)); |
| 5663 | SDValue t8 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t7, N2: X); |
| 5664 | SDValue t9 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t8, |
| 5665 | N2: getF32Constant(DAG, Flt: 0x408797cb, dl)); |
| 5666 | SDValue t10 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t9, N2: X); |
| 5667 | LogOfMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t10, |
| 5668 | N2: getF32Constant(DAG, Flt: 0x4006dcab, dl)); |
| 5669 | } |
| 5670 | |
| 5671 | return DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: LogOfExponent, N2: LogOfMantissa); |
| 5672 | } |
| 5673 | |
| 5674 | // No special expansion. |
| 5675 | return DAG.getNode(Opcode: ISD::FLOG, DL: dl, VT: Op.getValueType(), Operand: Op, Flags); |
| 5676 | } |
| 5677 | |
| 5678 | /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for |
| 5679 | /// limited-precision mode. |
| 5680 | static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, |
| 5681 | const TargetLowering &TLI, SDNodeFlags Flags) { |
| 5682 | // TODO: What fast-math-flags should be set on the floating-point nodes? |
| 5683 | |
| 5684 | if (Op.getValueType() == MVT::f32 && |
| 5685 | LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { |
| 5686 | SDValue Op1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Op); |
| 5687 | |
| 5688 | // Get the exponent. |
| 5689 | SDValue LogOfExponent = GetExponent(DAG, Op: Op1, TLI, dl); |
| 5690 | |
| 5691 | // Get the significand and build it into a floating-point number with |
| 5692 | // exponent of 1. |
| 5693 | SDValue X = GetSignificand(DAG, Op: Op1, dl); |
| 5694 | |
| 5695 | // Different possible minimax approximations of significand in |
| 5696 | // floating-point for various degrees of accuracy over [1,2]. |
| 5697 | SDValue Log2ofMantissa; |
| 5698 | if (LimitFloatPrecision <= 6) { |
| 5699 | // For floating-point precision of 6: |
| 5700 | // |
| 5701 | // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; |
| 5702 | // |
| 5703 | // error 0.0049451742, which is more than 7 bits |
| 5704 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5705 | N2: getF32Constant(DAG, Flt: 0xbeb08fe0, dl)); |
| 5706 | SDValue t1 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t0, |
| 5707 | N2: getF32Constant(DAG, Flt: 0x40019463, dl)); |
| 5708 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5709 | Log2ofMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t2, |
| 5710 | N2: getF32Constant(DAG, Flt: 0x3fd6633d, dl)); |
| 5711 | } else if (LimitFloatPrecision <= 12) { |
| 5712 | // For floating-point precision of 12: |
| 5713 | // |
| 5714 | // Log2ofMantissa = |
| 5715 | // -2.51285454f + |
| 5716 | // (4.07009056f + |
| 5717 | // (-2.12067489f + |
| 5718 | // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; |
| 5719 | // |
| 5720 | // error 0.0000876136000, which is better than 13 bits |
| 5721 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5722 | N2: getF32Constant(DAG, Flt: 0xbda7262e, dl)); |
| 5723 | SDValue t1 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t0, |
| 5724 | N2: getF32Constant(DAG, Flt: 0x3f25280b, dl)); |
| 5725 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5726 | SDValue t3 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t2, |
| 5727 | N2: getF32Constant(DAG, Flt: 0x4007b923, dl)); |
| 5728 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5729 | SDValue t5 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t4, |
| 5730 | N2: getF32Constant(DAG, Flt: 0x40823e2f, dl)); |
| 5731 | SDValue t6 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t5, N2: X); |
| 5732 | Log2ofMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t6, |
| 5733 | N2: getF32Constant(DAG, Flt: 0x4020d29c, dl)); |
| 5734 | } else { // LimitFloatPrecision <= 18 |
| 5735 | // For floating-point precision of 18: |
| 5736 | // |
| 5737 | // Log2ofMantissa = |
| 5738 | // -3.0400495f + |
| 5739 | // (6.1129976f + |
| 5740 | // (-5.3420409f + |
| 5741 | // (3.2865683f + |
| 5742 | // (-1.2669343f + |
| 5743 | // (0.27515199f - |
| 5744 | // 0.25691327e-1f * x) * x) * x) * x) * x) * x; |
| 5745 | // |
| 5746 | // error 0.0000018516, which is better than 18 bits |
| 5747 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5748 | N2: getF32Constant(DAG, Flt: 0xbcd2769e, dl)); |
| 5749 | SDValue t1 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t0, |
| 5750 | N2: getF32Constant(DAG, Flt: 0x3e8ce0b9, dl)); |
| 5751 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5752 | SDValue t3 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t2, |
| 5753 | N2: getF32Constant(DAG, Flt: 0x3fa22ae7, dl)); |
| 5754 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5755 | SDValue t5 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t4, |
| 5756 | N2: getF32Constant(DAG, Flt: 0x40525723, dl)); |
| 5757 | SDValue t6 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t5, N2: X); |
| 5758 | SDValue t7 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t6, |
| 5759 | N2: getF32Constant(DAG, Flt: 0x40aaf200, dl)); |
| 5760 | SDValue t8 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t7, N2: X); |
| 5761 | SDValue t9 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t8, |
| 5762 | N2: getF32Constant(DAG, Flt: 0x40c39dad, dl)); |
| 5763 | SDValue t10 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t9, N2: X); |
| 5764 | Log2ofMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t10, |
| 5765 | N2: getF32Constant(DAG, Flt: 0x4042902c, dl)); |
| 5766 | } |
| 5767 | |
| 5768 | return DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: LogOfExponent, N2: Log2ofMantissa); |
| 5769 | } |
| 5770 | |
| 5771 | // No special expansion. |
| 5772 | return DAG.getNode(Opcode: ISD::FLOG2, DL: dl, VT: Op.getValueType(), Operand: Op, Flags); |
| 5773 | } |
| 5774 | |
| 5775 | /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for |
| 5776 | /// limited-precision mode. |
| 5777 | static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, |
| 5778 | const TargetLowering &TLI, SDNodeFlags Flags) { |
| 5779 | // TODO: What fast-math-flags should be set on the floating-point nodes? |
| 5780 | |
| 5781 | if (Op.getValueType() == MVT::f32 && |
| 5782 | LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { |
| 5783 | SDValue Op1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Op); |
| 5784 | |
| 5785 | // Scale the exponent by log10(2) [0.30102999f]. |
| 5786 | SDValue Exp = GetExponent(DAG, Op: Op1, TLI, dl); |
| 5787 | SDValue LogOfExponent = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: Exp, |
| 5788 | N2: getF32Constant(DAG, Flt: 0x3e9a209a, dl)); |
| 5789 | |
| 5790 | // Get the significand and build it into a floating-point number with |
| 5791 | // exponent of 1. |
| 5792 | SDValue X = GetSignificand(DAG, Op: Op1, dl); |
| 5793 | |
| 5794 | SDValue Log10ofMantissa; |
| 5795 | if (LimitFloatPrecision <= 6) { |
| 5796 | // For floating-point precision of 6: |
| 5797 | // |
| 5798 | // Log10ofMantissa = |
| 5799 | // -0.50419619f + |
| 5800 | // (0.60948995f - 0.10380950f * x) * x; |
| 5801 | // |
| 5802 | // error 0.0014886165, which is 6 bits |
| 5803 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5804 | N2: getF32Constant(DAG, Flt: 0xbdd49a13, dl)); |
| 5805 | SDValue t1 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t0, |
| 5806 | N2: getF32Constant(DAG, Flt: 0x3f1c0789, dl)); |
| 5807 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5808 | Log10ofMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t2, |
| 5809 | N2: getF32Constant(DAG, Flt: 0x3f011300, dl)); |
| 5810 | } else if (LimitFloatPrecision <= 12) { |
| 5811 | // For floating-point precision of 12: |
| 5812 | // |
| 5813 | // Log10ofMantissa = |
| 5814 | // -0.64831180f + |
| 5815 | // (0.91751397f + |
| 5816 | // (-0.31664806f + 0.47637168e-1f * x) * x) * x; |
| 5817 | // |
| 5818 | // error 0.00019228036, which is better than 12 bits |
| 5819 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5820 | N2: getF32Constant(DAG, Flt: 0x3d431f31, dl)); |
| 5821 | SDValue t1 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t0, |
| 5822 | N2: getF32Constant(DAG, Flt: 0x3ea21fb2, dl)); |
| 5823 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5824 | SDValue t3 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t2, |
| 5825 | N2: getF32Constant(DAG, Flt: 0x3f6ae232, dl)); |
| 5826 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5827 | Log10ofMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t4, |
| 5828 | N2: getF32Constant(DAG, Flt: 0x3f25f7c3, dl)); |
| 5829 | } else { // LimitFloatPrecision <= 18 |
| 5830 | // For floating-point precision of 18: |
| 5831 | // |
| 5832 | // Log10ofMantissa = |
| 5833 | // -0.84299375f + |
| 5834 | // (1.5327582f + |
| 5835 | // (-1.0688956f + |
| 5836 | // (0.49102474f + |
| 5837 | // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; |
| 5838 | // |
| 5839 | // error 0.0000037995730, which is better than 18 bits |
| 5840 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: X, |
| 5841 | N2: getF32Constant(DAG, Flt: 0x3c5d51ce, dl)); |
| 5842 | SDValue t1 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t0, |
| 5843 | N2: getF32Constant(DAG, Flt: 0x3e00685a, dl)); |
| 5844 | SDValue t2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t1, N2: X); |
| 5845 | SDValue t3 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t2, |
| 5846 | N2: getF32Constant(DAG, Flt: 0x3efb6798, dl)); |
| 5847 | SDValue t4 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t3, N2: X); |
| 5848 | SDValue t5 = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t4, |
| 5849 | N2: getF32Constant(DAG, Flt: 0x3f88d192, dl)); |
| 5850 | SDValue t6 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t5, N2: X); |
| 5851 | SDValue t7 = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: t6, |
| 5852 | N2: getF32Constant(DAG, Flt: 0x3fc4316c, dl)); |
| 5853 | SDValue t8 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: t7, N2: X); |
| 5854 | Log10ofMantissa = DAG.getNode(Opcode: ISD::FSUB, DL: dl, VT: MVT::f32, N1: t8, |
| 5855 | N2: getF32Constant(DAG, Flt: 0x3f57ce70, dl)); |
| 5856 | } |
| 5857 | |
| 5858 | return DAG.getNode(Opcode: ISD::FADD, DL: dl, VT: MVT::f32, N1: LogOfExponent, N2: Log10ofMantissa); |
| 5859 | } |
| 5860 | |
| 5861 | // No special expansion. |
| 5862 | return DAG.getNode(Opcode: ISD::FLOG10, DL: dl, VT: Op.getValueType(), Operand: Op, Flags); |
| 5863 | } |
| 5864 | |
| 5865 | /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for |
| 5866 | /// limited-precision mode. |
| 5867 | static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, |
| 5868 | const TargetLowering &TLI, SDNodeFlags Flags) { |
| 5869 | if (Op.getValueType() == MVT::f32 && |
| 5870 | LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) |
| 5871 | return getLimitedPrecisionExp2(t0: Op, dl, DAG); |
| 5872 | |
| 5873 | // No special expansion. |
| 5874 | return DAG.getNode(Opcode: ISD::FEXP2, DL: dl, VT: Op.getValueType(), Operand: Op, Flags); |
| 5875 | } |
| 5876 | |
| 5877 | /// visitPow - Lower a pow intrinsic. Handles the special sequences for |
| 5878 | /// limited-precision mode with x == 10.0f. |
| 5879 | static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, |
| 5880 | SelectionDAG &DAG, const TargetLowering &TLI, |
| 5881 | SDNodeFlags Flags) { |
| 5882 | bool IsExp10 = false; |
| 5883 | if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && |
| 5884 | LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { |
| 5885 | if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(Val&: LHS)) { |
| 5886 | APFloat Ten(10.0f); |
| 5887 | IsExp10 = LHSC->isExactlyValue(V: Ten); |
| 5888 | } |
| 5889 | } |
| 5890 | |
| 5891 | // TODO: What fast-math-flags should be set on the FMUL node? |
| 5892 | if (IsExp10) { |
| 5893 | // Put the exponent in the right bit position for later addition to the |
| 5894 | // final result: |
| 5895 | // |
| 5896 | // #define LOG2OF10 3.3219281f |
| 5897 | // t0 = Op * LOG2OF10; |
| 5898 | SDValue t0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::f32, N1: RHS, |
| 5899 | N2: getF32Constant(DAG, Flt: 0x40549a78, dl)); |
| 5900 | return getLimitedPrecisionExp2(t0, dl, DAG); |
| 5901 | } |
| 5902 | |
| 5903 | // No special expansion. |
| 5904 | return DAG.getNode(Opcode: ISD::FPOW, DL: dl, VT: LHS.getValueType(), N1: LHS, N2: RHS, Flags); |
| 5905 | } |
| 5906 | |
| 5907 | /// ExpandPowI - Expand a llvm.powi intrinsic. |
| 5908 | static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, |
| 5909 | SelectionDAG &DAG) { |
| 5910 | // If RHS is a constant, we can expand this out to a multiplication tree if |
| 5911 | // it's beneficial on the target, otherwise we end up lowering to a call to |
| 5912 | // __powidf2 (for example). |
| 5913 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
| 5914 | unsigned Val = RHSC->getSExtValue(); |
| 5915 | |
| 5916 | // powi(x, 0) -> 1.0 |
| 5917 | if (Val == 0) |
| 5918 | return DAG.getConstantFP(Val: 1.0, DL, VT: LHS.getValueType()); |
| 5919 | |
| 5920 | if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI( |
| 5921 | Exponent: Val, OptForSize: DAG.shouldOptForSize())) { |
| 5922 | // Get the exponent as a positive value. |
| 5923 | if ((int)Val < 0) |
| 5924 | Val = -Val; |
| 5925 | // We use the simple binary decomposition method to generate the multiply |
| 5926 | // sequence. There are more optimal ways to do this (for example, |
| 5927 | // powi(x,15) generates one more multiply than it should), but this has |
| 5928 | // the benefit of being both really simple and much better than a libcall. |
| 5929 | SDValue Res; // Logically starts equal to 1.0 |
| 5930 | SDValue CurSquare = LHS; |
| 5931 | // TODO: Intrinsics should have fast-math-flags that propagate to these |
| 5932 | // nodes. |
| 5933 | while (Val) { |
| 5934 | if (Val & 1) { |
| 5935 | if (Res.getNode()) |
| 5936 | Res = |
| 5937 | DAG.getNode(Opcode: ISD::FMUL, DL, VT: Res.getValueType(), N1: Res, N2: CurSquare); |
| 5938 | else |
| 5939 | Res = CurSquare; // 1.0*CurSquare. |
| 5940 | } |
| 5941 | |
| 5942 | CurSquare = DAG.getNode(Opcode: ISD::FMUL, DL, VT: CurSquare.getValueType(), |
| 5943 | N1: CurSquare, N2: CurSquare); |
| 5944 | Val >>= 1; |
| 5945 | } |
| 5946 | |
| 5947 | // If the original was negative, invert the result, producing 1/(x*x*x). |
| 5948 | if (RHSC->getSExtValue() < 0) |
| 5949 | Res = DAG.getNode(Opcode: ISD::FDIV, DL, VT: LHS.getValueType(), |
| 5950 | N1: DAG.getConstantFP(Val: 1.0, DL, VT: LHS.getValueType()), N2: Res); |
| 5951 | return Res; |
| 5952 | } |
| 5953 | } |
| 5954 | |
| 5955 | // Otherwise, expand to a libcall. |
| 5956 | return DAG.getNode(Opcode: ISD::FPOWI, DL, VT: LHS.getValueType(), N1: LHS, N2: RHS); |
| 5957 | } |
| 5958 | |
| 5959 | static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, |
| 5960 | SDValue LHS, SDValue RHS, SDValue Scale, |
| 5961 | SelectionDAG &DAG, const TargetLowering &TLI) { |
| 5962 | EVT VT = LHS.getValueType(); |
| 5963 | bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; |
| 5964 | bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; |
| 5965 | LLVMContext &Ctx = *DAG.getContext(); |
| 5966 | |
| 5967 | // If the type is legal but the operation isn't, this node might survive all |
| 5968 | // the way to operation legalization. If we end up there and we do not have |
| 5969 | // the ability to widen the type (if VT*2 is not legal), we cannot expand the |
| 5970 | // node. |
| 5971 | |
| 5972 | // Coax the legalizer into expanding the node during type legalization instead |
| 5973 | // by bumping the size by one bit. This will force it to Promote, enabling the |
| 5974 | // early expansion and avoiding the need to expand later. |
| 5975 | |
| 5976 | // We don't have to do this if Scale is 0; that can always be expanded, unless |
| 5977 | // it's a saturating signed operation. Those can experience true integer |
| 5978 | // division overflow, a case which we must avoid. |
| 5979 | |
| 5980 | // FIXME: We wouldn't have to do this (or any of the early |
| 5981 | // expansion/promotion) if it was possible to expand a libcall of an |
| 5982 | // illegal type during operation legalization. But it's not, so things |
| 5983 | // get a bit hacky. |
| 5984 | unsigned ScaleInt = Scale->getAsZExtVal(); |
| 5985 | if ((ScaleInt > 0 || (Saturating && Signed)) && |
| 5986 | (TLI.isTypeLegal(VT) || |
| 5987 | (VT.isVector() && TLI.isTypeLegal(VT: VT.getVectorElementType())))) { |
| 5988 | TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction( |
| 5989 | Op: Opcode, VT, Scale: ScaleInt); |
| 5990 | if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) { |
| 5991 | EVT PromVT; |
| 5992 | if (VT.isScalarInteger()) |
| 5993 | PromVT = EVT::getIntegerVT(Context&: Ctx, BitWidth: VT.getSizeInBits() + 1); |
| 5994 | else if (VT.isVector()) { |
| 5995 | PromVT = VT.getVectorElementType(); |
| 5996 | PromVT = EVT::getIntegerVT(Context&: Ctx, BitWidth: PromVT.getSizeInBits() + 1); |
| 5997 | PromVT = EVT::getVectorVT(Context&: Ctx, VT: PromVT, EC: VT.getVectorElementCount()); |
| 5998 | } else |
| 5999 | llvm_unreachable("Wrong VT for DIVFIX?" ); |
| 6000 | LHS = DAG.getExtOrTrunc(IsSigned: Signed, Op: LHS, DL, VT: PromVT); |
| 6001 | RHS = DAG.getExtOrTrunc(IsSigned: Signed, Op: RHS, DL, VT: PromVT); |
| 6002 | EVT ShiftTy = TLI.getShiftAmountTy(LHSTy: PromVT, DL: DAG.getDataLayout()); |
| 6003 | // For saturating operations, we need to shift up the LHS to get the |
| 6004 | // proper saturation width, and then shift down again afterwards. |
| 6005 | if (Saturating) |
| 6006 | LHS = DAG.getNode(Opcode: ISD::SHL, DL, VT: PromVT, N1: LHS, |
| 6007 | N2: DAG.getConstant(Val: 1, DL, VT: ShiftTy)); |
| 6008 | SDValue Res = DAG.getNode(Opcode, DL, VT: PromVT, N1: LHS, N2: RHS, N3: Scale); |
| 6009 | if (Saturating) |
| 6010 | Res = DAG.getNode(Opcode: Signed ? ISD::SRA : ISD::SRL, DL, VT: PromVT, N1: Res, |
| 6011 | N2: DAG.getConstant(Val: 1, DL, VT: ShiftTy)); |
| 6012 | return DAG.getZExtOrTrunc(Op: Res, DL, VT); |
| 6013 | } |
| 6014 | } |
| 6015 | |
| 6016 | return DAG.getNode(Opcode, DL, VT, N1: LHS, N2: RHS, N3: Scale); |
| 6017 | } |
| 6018 | |
| 6019 | // getUnderlyingArgRegs - Find underlying registers used for a truncated, |
| 6020 | // bitcasted, or split argument. Returns a list of <Register, size in bits> |
| 6021 | static void |
| 6022 | getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register, TypeSize>> &Regs, |
| 6023 | const SDValue &N) { |
| 6024 | switch (N.getOpcode()) { |
| 6025 | case ISD::CopyFromReg: { |
| 6026 | SDValue Op = N.getOperand(i: 1); |
| 6027 | Regs.emplace_back(Args: cast<RegisterSDNode>(Val&: Op)->getReg(), |
| 6028 | Args: Op.getValueType().getSizeInBits()); |
| 6029 | return; |
| 6030 | } |
| 6031 | case ISD::BITCAST: |
| 6032 | case ISD::AssertZext: |
| 6033 | case ISD::AssertSext: |
| 6034 | case ISD::TRUNCATE: |
| 6035 | getUnderlyingArgRegs(Regs, N: N.getOperand(i: 0)); |
| 6036 | return; |
| 6037 | case ISD::BUILD_PAIR: |
| 6038 | case ISD::BUILD_VECTOR: |
| 6039 | case ISD::CONCAT_VECTORS: |
| 6040 | for (SDValue Op : N->op_values()) |
| 6041 | getUnderlyingArgRegs(Regs, N: Op); |
| 6042 | return; |
| 6043 | default: |
| 6044 | return; |
| 6045 | } |
| 6046 | } |
| 6047 | |
| 6048 | /// If the DbgValueInst is a dbg_value of a function argument, create the |
| 6049 | /// corresponding DBG_VALUE machine instruction for it now. At the end of |
| 6050 | /// instruction selection, they will be inserted to the entry BB. |
| 6051 | /// We don't currently support this for variadic dbg_values, as they shouldn't |
| 6052 | /// appear for function arguments or in the prologue. |
| 6053 | bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( |
| 6054 | const Value *V, DILocalVariable *Variable, DIExpression *Expr, |
| 6055 | DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) { |
| 6056 | const Argument *Arg = dyn_cast<Argument>(Val: V); |
| 6057 | if (!Arg) |
| 6058 | return false; |
| 6059 | |
| 6060 | MachineFunction &MF = DAG.getMachineFunction(); |
| 6061 | const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); |
| 6062 | |
| 6063 | // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind |
| 6064 | // we've been asked to pursue. |
| 6065 | auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr, |
| 6066 | bool Indirect) { |
| 6067 | if (Reg.isVirtual() && MF.useDebugInstrRef()) { |
| 6068 | // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF |
| 6069 | // pointing at the VReg, which will be patched up later. |
| 6070 | auto &Inst = TII->get(Opcode: TargetOpcode::DBG_INSTR_REF); |
| 6071 | SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg( |
| 6072 | /* Reg */ Reg, /* isDef */ false, /* isImp */ false, |
| 6073 | /* isKill */ false, /* isDead */ false, |
| 6074 | /* isUndef */ false, /* isEarlyClobber */ false, |
| 6075 | /* SubReg */ 0, /* isDebug */ true)}); |
| 6076 | |
| 6077 | auto *NewDIExpr = FragExpr; |
| 6078 | // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into |
| 6079 | // the DIExpression. |
| 6080 | if (Indirect) |
| 6081 | NewDIExpr = DIExpression::prepend(Expr: FragExpr, Flags: DIExpression::DerefBefore); |
| 6082 | SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0}); |
| 6083 | NewDIExpr = DIExpression::prependOpcodes(Expr: NewDIExpr, Ops); |
| 6084 | return BuildMI(MF, DL, MCID: Inst, IsIndirect: false, MOs, Variable, Expr: NewDIExpr); |
| 6085 | } else { |
| 6086 | // Create a completely standard DBG_VALUE. |
| 6087 | auto &Inst = TII->get(Opcode: TargetOpcode::DBG_VALUE); |
| 6088 | return BuildMI(MF, DL, MCID: Inst, IsIndirect: Indirect, Reg, Variable, Expr: FragExpr); |
| 6089 | } |
| 6090 | }; |
| 6091 | |
| 6092 | if (Kind == FuncArgumentDbgValueKind::Value) { |
| 6093 | // ArgDbgValues are hoisted to the beginning of the entry block. So we |
| 6094 | // should only emit as ArgDbgValue if the dbg.value intrinsic is found in |
| 6095 | // the entry block. |
| 6096 | bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front(); |
| 6097 | if (!IsInEntryBlock) |
| 6098 | return false; |
| 6099 | |
| 6100 | // ArgDbgValues are hoisted to the beginning of the entry block. So we |
| 6101 | // should only emit as ArgDbgValue if the dbg.value intrinsic describes a |
| 6102 | // variable that also is a param. |
| 6103 | // |
| 6104 | // Although, if we are at the top of the entry block already, we can still |
| 6105 | // emit using ArgDbgValue. This might catch some situations when the |
| 6106 | // dbg.value refers to an argument that isn't used in the entry block, so |
| 6107 | // any CopyToReg node would be optimized out and the only way to express |
| 6108 | // this DBG_VALUE is by using the physical reg (or FI) as done in this |
| 6109 | // method. ArgDbgValues are hoisted to the beginning of the entry block. So |
| 6110 | // we should only emit as ArgDbgValue if the Variable is an argument to the |
| 6111 | // current function, and the dbg.value intrinsic is found in the entry |
| 6112 | // block. |
| 6113 | bool VariableIsFunctionInputArg = Variable->isParameter() && |
| 6114 | !DL->getInlinedAt(); |
| 6115 | bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder; |
| 6116 | if (!IsInPrologue && !VariableIsFunctionInputArg) |
| 6117 | return false; |
| 6118 | |
| 6119 | // Here we assume that a function argument on IR level only can be used to |
| 6120 | // describe one input parameter on source level. If we for example have |
| 6121 | // source code like this |
| 6122 | // |
| 6123 | // struct A { long x, y; }; |
| 6124 | // void foo(struct A a, long b) { |
| 6125 | // ... |
| 6126 | // b = a.x; |
| 6127 | // ... |
| 6128 | // } |
| 6129 | // |
| 6130 | // and IR like this |
| 6131 | // |
| 6132 | // define void @foo(i32 %a1, i32 %a2, i32 %b) { |
| 6133 | // entry: |
| 6134 | // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment |
| 6135 | // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment |
| 6136 | // call void @llvm.dbg.value(metadata i32 %b, "b", |
| 6137 | // ... |
| 6138 | // call void @llvm.dbg.value(metadata i32 %a1, "b" |
| 6139 | // ... |
| 6140 | // |
| 6141 | // then the last dbg.value is describing a parameter "b" using a value that |
| 6142 | // is an argument. But since we already has used %a1 to describe a parameter |
| 6143 | // we should not handle that last dbg.value here (that would result in an |
| 6144 | // incorrect hoisting of the DBG_VALUE to the function entry). |
| 6145 | // Notice that we allow one dbg.value per IR level argument, to accommodate |
| 6146 | // for the situation with fragments above. |
| 6147 | // If there is no node for the value being handled, we return true to skip |
| 6148 | // the normal generation of debug info, as it would kill existing debug |
| 6149 | // info for the parameter in case of duplicates. |
| 6150 | if (VariableIsFunctionInputArg) { |
| 6151 | unsigned ArgNo = Arg->getArgNo(); |
| 6152 | if (ArgNo >= FuncInfo.DescribedArgs.size()) |
| 6153 | FuncInfo.DescribedArgs.resize(N: ArgNo + 1, t: false); |
| 6154 | else if (!IsInPrologue && FuncInfo.DescribedArgs.test(Idx: ArgNo)) |
| 6155 | return !NodeMap[V].getNode(); |
| 6156 | FuncInfo.DescribedArgs.set(ArgNo); |
| 6157 | } |
| 6158 | } |
| 6159 | |
| 6160 | bool IsIndirect = false; |
| 6161 | std::optional<MachineOperand> Op; |
| 6162 | // Some arguments' frame index is recorded during argument lowering. |
| 6163 | int FI = FuncInfo.getArgumentFrameIndex(A: Arg); |
| 6164 | if (FI != std::numeric_limits<int>::max()) |
| 6165 | Op = MachineOperand::CreateFI(Idx: FI); |
| 6166 | |
| 6167 | SmallVector<std::pair<Register, TypeSize>, 8> ArgRegsAndSizes; |
| 6168 | if (!Op && N.getNode()) { |
| 6169 | getUnderlyingArgRegs(Regs&: ArgRegsAndSizes, N); |
| 6170 | Register Reg; |
| 6171 | if (ArgRegsAndSizes.size() == 1) |
| 6172 | Reg = ArgRegsAndSizes.front().first; |
| 6173 | |
| 6174 | if (Reg && Reg.isVirtual()) { |
| 6175 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
| 6176 | Register PR = RegInfo.getLiveInPhysReg(VReg: Reg); |
| 6177 | if (PR) |
| 6178 | Reg = PR; |
| 6179 | } |
| 6180 | if (Reg) { |
| 6181 | Op = MachineOperand::CreateReg(Reg, isDef: false); |
| 6182 | IsIndirect = Kind != FuncArgumentDbgValueKind::Value; |
| 6183 | } |
| 6184 | } |
| 6185 | |
| 6186 | if (!Op && N.getNode()) { |
| 6187 | // Check if frame index is available. |
| 6188 | SDValue LCandidate = peekThroughBitcasts(V: N); |
| 6189 | if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(Val: LCandidate.getNode())) |
| 6190 | if (FrameIndexSDNode *FINode = |
| 6191 | dyn_cast<FrameIndexSDNode>(Val: LNode->getBasePtr().getNode())) |
| 6192 | Op = MachineOperand::CreateFI(Idx: FINode->getIndex()); |
| 6193 | } |
| 6194 | |
| 6195 | if (!Op) { |
| 6196 | // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg |
| 6197 | auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<Register, TypeSize>> |
| 6198 | SplitRegs) { |
| 6199 | unsigned Offset = 0; |
| 6200 | for (const auto &RegAndSize : SplitRegs) { |
| 6201 | // If the expression is already a fragment, the current register |
| 6202 | // offset+size might extend beyond the fragment. In this case, only |
| 6203 | // the register bits that are inside the fragment are relevant. |
| 6204 | int RegFragmentSizeInBits = RegAndSize.second; |
| 6205 | if (auto ExprFragmentInfo = Expr->getFragmentInfo()) { |
| 6206 | uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits; |
| 6207 | // The register is entirely outside the expression fragment, |
| 6208 | // so is irrelevant for debug info. |
| 6209 | if (Offset >= ExprFragmentSizeInBits) |
| 6210 | break; |
| 6211 | // The register is partially outside the expression fragment, only |
| 6212 | // the low bits within the fragment are relevant for debug info. |
| 6213 | if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) { |
| 6214 | RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset; |
| 6215 | } |
| 6216 | } |
| 6217 | |
| 6218 | auto FragmentExpr = DIExpression::createFragmentExpression( |
| 6219 | Expr, OffsetInBits: Offset, SizeInBits: RegFragmentSizeInBits); |
| 6220 | Offset += RegAndSize.second; |
| 6221 | // If a valid fragment expression cannot be created, the variable's |
| 6222 | // correct value cannot be determined and so it is set as poison. |
| 6223 | if (!FragmentExpr) { |
| 6224 | SDDbgValue *SDV = DAG.getConstantDbgValue( |
| 6225 | Var: Variable, Expr, C: PoisonValue::get(T: V->getType()), DL, O: SDNodeOrder); |
| 6226 | DAG.AddDbgValue(DB: SDV, isParameter: false); |
| 6227 | continue; |
| 6228 | } |
| 6229 | MachineInstr *NewMI = |
| 6230 | MakeVRegDbgValue(RegAndSize.first, *FragmentExpr, |
| 6231 | Kind != FuncArgumentDbgValueKind::Value); |
| 6232 | FuncInfo.ArgDbgValues.push_back(Elt: NewMI); |
| 6233 | } |
| 6234 | }; |
| 6235 | |
| 6236 | // Check if ValueMap has reg number. |
| 6237 | DenseMap<const Value *, Register>::const_iterator |
| 6238 | VMI = FuncInfo.ValueMap.find(Val: V); |
| 6239 | if (VMI != FuncInfo.ValueMap.end()) { |
| 6240 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 6241 | RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, |
| 6242 | V->getType(), std::nullopt); |
| 6243 | if (RFV.occupiesMultipleRegs()) { |
| 6244 | splitMultiRegDbgValue(RFV.getRegsAndSizes()); |
| 6245 | return true; |
| 6246 | } |
| 6247 | |
| 6248 | Op = MachineOperand::CreateReg(Reg: VMI->second, isDef: false); |
| 6249 | IsIndirect = Kind != FuncArgumentDbgValueKind::Value; |
| 6250 | } else if (ArgRegsAndSizes.size() > 1) { |
| 6251 | // This was split due to the calling convention, and no virtual register |
| 6252 | // mapping exists for the value. |
| 6253 | splitMultiRegDbgValue(ArgRegsAndSizes); |
| 6254 | return true; |
| 6255 | } |
| 6256 | } |
| 6257 | |
| 6258 | if (!Op) |
| 6259 | return false; |
| 6260 | |
| 6261 | assert(Variable->isValidLocationForIntrinsic(DL) && |
| 6262 | "Expected inlined-at fields to agree" ); |
| 6263 | MachineInstr *NewMI = nullptr; |
| 6264 | |
| 6265 | if (Op->isReg()) |
| 6266 | NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect); |
| 6267 | else |
| 6268 | NewMI = BuildMI(MF, DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE), IsIndirect: true, MOs: *Op, |
| 6269 | Variable, Expr); |
| 6270 | |
| 6271 | // Otherwise, use ArgDbgValues. |
| 6272 | FuncInfo.ArgDbgValues.push_back(Elt: NewMI); |
| 6273 | return true; |
| 6274 | } |
| 6275 | |
| 6276 | /// Return the appropriate SDDbgValue based on N. |
| 6277 | SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N, |
| 6278 | DILocalVariable *Variable, |
| 6279 | DIExpression *Expr, |
| 6280 | const DebugLoc &dl, |
| 6281 | unsigned DbgSDNodeOrder) { |
| 6282 | if (auto *FISDN = dyn_cast<FrameIndexSDNode>(Val: N.getNode())) { |
| 6283 | // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe |
| 6284 | // stack slot locations. |
| 6285 | // |
| 6286 | // Consider "int x = 0; int *px = &x;". There are two kinds of interesting |
| 6287 | // debug values here after optimization: |
| 6288 | // |
| 6289 | // dbg.value(i32* %px, !"int *px", !DIExpression()), and |
| 6290 | // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref)) |
| 6291 | // |
| 6292 | // Both describe the direct values of their associated variables. |
| 6293 | return DAG.getFrameIndexDbgValue(Var: Variable, Expr, FI: FISDN->getIndex(), |
| 6294 | /*IsIndirect*/ false, DL: dl, O: DbgSDNodeOrder); |
| 6295 | } |
| 6296 | return DAG.getDbgValue(Var: Variable, Expr, N: N.getNode(), R: N.getResNo(), |
| 6297 | /*IsIndirect*/ false, DL: dl, O: DbgSDNodeOrder); |
| 6298 | } |
| 6299 | |
| 6300 | static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) { |
| 6301 | switch (Intrinsic) { |
| 6302 | case Intrinsic::smul_fix: |
| 6303 | return ISD::SMULFIX; |
| 6304 | case Intrinsic::umul_fix: |
| 6305 | return ISD::UMULFIX; |
| 6306 | case Intrinsic::smul_fix_sat: |
| 6307 | return ISD::SMULFIXSAT; |
| 6308 | case Intrinsic::umul_fix_sat: |
| 6309 | return ISD::UMULFIXSAT; |
| 6310 | case Intrinsic::sdiv_fix: |
| 6311 | return ISD::SDIVFIX; |
| 6312 | case Intrinsic::udiv_fix: |
| 6313 | return ISD::UDIVFIX; |
| 6314 | case Intrinsic::sdiv_fix_sat: |
| 6315 | return ISD::SDIVFIXSAT; |
| 6316 | case Intrinsic::udiv_fix_sat: |
| 6317 | return ISD::UDIVFIXSAT; |
| 6318 | default: |
| 6319 | llvm_unreachable("Unhandled fixed point intrinsic" ); |
| 6320 | } |
| 6321 | } |
| 6322 | |
| 6323 | /// Given a @llvm.call.preallocated.setup, return the corresponding |
| 6324 | /// preallocated call. |
| 6325 | static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) { |
| 6326 | assert(cast<CallBase>(PreallocatedSetup) |
| 6327 | ->getCalledFunction() |
| 6328 | ->getIntrinsicID() == Intrinsic::call_preallocated_setup && |
| 6329 | "expected call_preallocated_setup Value" ); |
| 6330 | for (const auto *U : PreallocatedSetup->users()) { |
| 6331 | auto *UseCall = cast<CallBase>(Val: U); |
| 6332 | const Function *Fn = UseCall->getCalledFunction(); |
| 6333 | if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) { |
| 6334 | return UseCall; |
| 6335 | } |
| 6336 | } |
| 6337 | llvm_unreachable("expected corresponding call to preallocated setup/arg" ); |
| 6338 | } |
| 6339 | |
| 6340 | /// If DI is a debug value with an EntryValue expression, lower it using the |
| 6341 | /// corresponding physical register of the associated Argument value |
| 6342 | /// (guaranteed to exist by the verifier). |
| 6343 | bool SelectionDAGBuilder::visitEntryValueDbgValue( |
| 6344 | ArrayRef<const Value *> Values, DILocalVariable *Variable, |
| 6345 | DIExpression *Expr, DebugLoc DbgLoc) { |
| 6346 | if (!Expr->isEntryValue() || !hasSingleElement(C&: Values)) |
| 6347 | return false; |
| 6348 | |
| 6349 | // These properties are guaranteed by the verifier. |
| 6350 | const Argument *Arg = cast<Argument>(Val: Values[0]); |
| 6351 | assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync)); |
| 6352 | |
| 6353 | auto ArgIt = FuncInfo.ValueMap.find(Val: Arg); |
| 6354 | if (ArgIt == FuncInfo.ValueMap.end()) { |
| 6355 | LLVM_DEBUG( |
| 6356 | dbgs() << "Dropping dbg.value: expression is entry_value but " |
| 6357 | "couldn't find an associated register for the Argument\n" ); |
| 6358 | return true; |
| 6359 | } |
| 6360 | Register ArgVReg = ArgIt->getSecond(); |
| 6361 | |
| 6362 | for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins()) |
| 6363 | if (ArgVReg == VirtReg || ArgVReg == PhysReg) { |
| 6364 | SDDbgValue *SDV = DAG.getVRegDbgValue( |
| 6365 | Var: Variable, Expr, VReg: PhysReg, IsIndirect: false /*IsIndidrect*/, DL: DbgLoc, O: SDNodeOrder); |
| 6366 | DAG.AddDbgValue(DB: SDV, isParameter: false /*treat as dbg.declare byval parameter*/); |
| 6367 | return true; |
| 6368 | } |
| 6369 | LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but " |
| 6370 | "couldn't find a physical register\n" ); |
| 6371 | return true; |
| 6372 | } |
| 6373 | |
| 6374 | /// Lower the call to the specified intrinsic function. |
| 6375 | void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I, |
| 6376 | unsigned Intrinsic) { |
| 6377 | SDLoc sdl = getCurSDLoc(); |
| 6378 | switch (Intrinsic) { |
| 6379 | case Intrinsic::experimental_convergence_anchor: |
| 6380 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::CONVERGENCECTRL_ANCHOR, DL: sdl, VT: MVT::Untyped)); |
| 6381 | break; |
| 6382 | case Intrinsic::experimental_convergence_entry: |
| 6383 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::CONVERGENCECTRL_ENTRY, DL: sdl, VT: MVT::Untyped)); |
| 6384 | break; |
| 6385 | case Intrinsic::experimental_convergence_loop: { |
| 6386 | auto Bundle = I.getOperandBundle(ID: LLVMContext::OB_convergencectrl); |
| 6387 | auto *Token = Bundle->Inputs[0].get(); |
| 6388 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::CONVERGENCECTRL_LOOP, DL: sdl, VT: MVT::Untyped, |
| 6389 | Operand: getValue(V: Token))); |
| 6390 | break; |
| 6391 | } |
| 6392 | } |
| 6393 | } |
| 6394 | |
| 6395 | void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I, |
| 6396 | unsigned IntrinsicID) { |
| 6397 | // For now, we're only lowering an 'add' histogram. |
| 6398 | // We can add others later, e.g. saturating adds, min/max. |
| 6399 | assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add && |
| 6400 | "Tried to lower unsupported histogram type" ); |
| 6401 | SDLoc sdl = getCurSDLoc(); |
| 6402 | Value *Ptr = I.getOperand(i_nocapture: 0); |
| 6403 | SDValue Inc = getValue(V: I.getOperand(i_nocapture: 1)); |
| 6404 | SDValue Mask = getValue(V: I.getOperand(i_nocapture: 2)); |
| 6405 | |
| 6406 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 6407 | DataLayout TargetDL = DAG.getDataLayout(); |
| 6408 | EVT VT = Inc.getValueType(); |
| 6409 | Align Alignment = DAG.getEVTAlign(MemoryVT: VT); |
| 6410 | |
| 6411 | const MDNode *Ranges = getRangeMetadata(I); |
| 6412 | |
| 6413 | SDValue Root = DAG.getRoot(); |
| 6414 | SDValue Base; |
| 6415 | SDValue Index; |
| 6416 | ISD::MemIndexType IndexType; |
| 6417 | SDValue Scale; |
| 6418 | bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, SDB: this, |
| 6419 | CurBB: I.getParent(), ElemSize: VT.getScalarStoreSize()); |
| 6420 | |
| 6421 | unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); |
| 6422 | |
| 6423 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 6424 | PtrInfo: MachinePointerInfo(AS), |
| 6425 | F: MachineMemOperand::MOLoad | MachineMemOperand::MOStore, |
| 6426 | Size: MemoryLocation::UnknownSize, BaseAlignment: Alignment, AAInfo: I.getAAMetadata(), Ranges); |
| 6427 | |
| 6428 | if (!UniformBase) { |
| 6429 | Base = DAG.getConstant(Val: 0, DL: sdl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 6430 | Index = getValue(V: Ptr); |
| 6431 | IndexType = ISD::SIGNED_SCALED; |
| 6432 | Scale = |
| 6433 | DAG.getTargetConstant(Val: 1, DL: sdl, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 6434 | } |
| 6435 | |
| 6436 | EVT IdxVT = Index.getValueType(); |
| 6437 | EVT EltTy = IdxVT.getVectorElementType(); |
| 6438 | if (TLI.shouldExtendGSIndex(VT: IdxVT, EltTy)) { |
| 6439 | EVT NewIdxVT = IdxVT.changeVectorElementType(EltVT: EltTy); |
| 6440 | Index = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: sdl, VT: NewIdxVT, Operand: Index); |
| 6441 | } |
| 6442 | |
| 6443 | SDValue ID = DAG.getTargetConstant(Val: IntrinsicID, DL: sdl, VT: MVT::i32); |
| 6444 | |
| 6445 | SDValue Ops[] = {Root, Inc, Mask, Base, Index, Scale, ID}; |
| 6446 | SDValue Histogram = DAG.getMaskedHistogram(VTs: DAG.getVTList(VT: MVT::Other), MemVT: VT, dl: sdl, |
| 6447 | Ops, MMO, IndexType); |
| 6448 | |
| 6449 | setValue(V: &I, NewN: Histogram); |
| 6450 | DAG.setRoot(Histogram); |
| 6451 | } |
| 6452 | |
| 6453 | void SelectionDAGBuilder::(const CallInst &I, |
| 6454 | unsigned Intrinsic) { |
| 6455 | assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active && |
| 6456 | "Tried lowering invalid vector extract last" ); |
| 6457 | SDLoc sdl = getCurSDLoc(); |
| 6458 | const DataLayout &Layout = DAG.getDataLayout(); |
| 6459 | SDValue Data = getValue(V: I.getOperand(i_nocapture: 0)); |
| 6460 | SDValue Mask = getValue(V: I.getOperand(i_nocapture: 1)); |
| 6461 | |
| 6462 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 6463 | EVT ResVT = TLI.getValueType(DL: Layout, Ty: I.getType()); |
| 6464 | |
| 6465 | EVT ExtVT = TLI.getVectorIdxTy(DL: Layout); |
| 6466 | SDValue Idx = DAG.getNode(Opcode: ISD::VECTOR_FIND_LAST_ACTIVE, DL: sdl, VT: ExtVT, Operand: Mask); |
| 6467 | SDValue Result = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: sdl, VT: ResVT, N1: Data, N2: Idx); |
| 6468 | |
| 6469 | Value *Default = I.getOperand(i_nocapture: 2); |
| 6470 | if (!isa<PoisonValue>(Val: Default) && !isa<UndefValue>(Val: Default)) { |
| 6471 | SDValue PassThru = getValue(V: Default); |
| 6472 | EVT BoolVT = Mask.getValueType().getScalarType(); |
| 6473 | SDValue AnyActive = DAG.getNode(Opcode: ISD::VECREDUCE_OR, DL: sdl, VT: BoolVT, Operand: Mask); |
| 6474 | Result = DAG.getSelect(DL: sdl, VT: ResVT, Cond: AnyActive, LHS: Result, RHS: PassThru); |
| 6475 | } |
| 6476 | |
| 6477 | setValue(V: &I, NewN: Result); |
| 6478 | } |
| 6479 | |
| 6480 | /// Lower the call to the specified intrinsic function. |
| 6481 | void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, |
| 6482 | unsigned Intrinsic) { |
| 6483 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 6484 | SDLoc sdl = getCurSDLoc(); |
| 6485 | DebugLoc dl = getCurDebugLoc(); |
| 6486 | SDValue Res; |
| 6487 | |
| 6488 | SDNodeFlags Flags; |
| 6489 | if (auto *FPOp = dyn_cast<FPMathOperator>(Val: &I)) |
| 6490 | Flags.copyFMF(FPMO: *FPOp); |
| 6491 | |
| 6492 | switch (Intrinsic) { |
| 6493 | default: |
| 6494 | // By default, turn this into a target intrinsic node. |
| 6495 | visitTargetIntrinsic(I, Intrinsic); |
| 6496 | return; |
| 6497 | case Intrinsic::vscale: { |
| 6498 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 6499 | setValue(V: &I, NewN: DAG.getVScale(DL: sdl, VT, MulImm: APInt(VT.getSizeInBits(), 1))); |
| 6500 | return; |
| 6501 | } |
| 6502 | case Intrinsic::vastart: visitVAStart(I); return; |
| 6503 | case Intrinsic::vaend: visitVAEnd(I); return; |
| 6504 | case Intrinsic::vacopy: visitVACopy(I); return; |
| 6505 | case Intrinsic::returnaddress: |
| 6506 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::RETURNADDR, DL: sdl, |
| 6507 | VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()), |
| 6508 | Operand: getValue(V: I.getArgOperand(i: 0)))); |
| 6509 | return; |
| 6510 | case Intrinsic::addressofreturnaddress: |
| 6511 | setValue(V: &I, |
| 6512 | NewN: DAG.getNode(Opcode: ISD::ADDROFRETURNADDR, DL: sdl, |
| 6513 | VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()))); |
| 6514 | return; |
| 6515 | case Intrinsic::sponentry: |
| 6516 | setValue(V: &I, |
| 6517 | NewN: DAG.getNode(Opcode: ISD::SPONENTRY, DL: sdl, |
| 6518 | VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()))); |
| 6519 | return; |
| 6520 | case Intrinsic::frameaddress: |
| 6521 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FRAMEADDR, DL: sdl, |
| 6522 | VT: TLI.getFrameIndexTy(DL: DAG.getDataLayout()), |
| 6523 | Operand: getValue(V: I.getArgOperand(i: 0)))); |
| 6524 | return; |
| 6525 | case Intrinsic::read_volatile_register: |
| 6526 | case Intrinsic::read_register: { |
| 6527 | Value *Reg = I.getArgOperand(i: 0); |
| 6528 | SDValue Chain = getRoot(); |
| 6529 | SDValue RegName = |
| 6530 | DAG.getMDNode(MD: cast<MDNode>(Val: cast<MetadataAsValue>(Val: Reg)->getMetadata())); |
| 6531 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 6532 | Res = DAG.getNode(Opcode: ISD::READ_REGISTER, DL: sdl, |
| 6533 | VTList: DAG.getVTList(VT1: VT, VT2: MVT::Other), N1: Chain, N2: RegName); |
| 6534 | setValue(V: &I, NewN: Res); |
| 6535 | DAG.setRoot(Res.getValue(R: 1)); |
| 6536 | return; |
| 6537 | } |
| 6538 | case Intrinsic::write_register: { |
| 6539 | Value *Reg = I.getArgOperand(i: 0); |
| 6540 | Value *RegValue = I.getArgOperand(i: 1); |
| 6541 | SDValue Chain = getRoot(); |
| 6542 | SDValue RegName = |
| 6543 | DAG.getMDNode(MD: cast<MDNode>(Val: cast<MetadataAsValue>(Val: Reg)->getMetadata())); |
| 6544 | DAG.setRoot(DAG.getNode(Opcode: ISD::WRITE_REGISTER, DL: sdl, VT: MVT::Other, N1: Chain, |
| 6545 | N2: RegName, N3: getValue(V: RegValue))); |
| 6546 | return; |
| 6547 | } |
| 6548 | case Intrinsic::memcpy: |
| 6549 | case Intrinsic::memcpy_inline: { |
| 6550 | const auto &MCI = cast<MemCpyInst>(Val: I); |
| 6551 | SDValue Dst = getValue(V: I.getArgOperand(i: 0)); |
| 6552 | SDValue Src = getValue(V: I.getArgOperand(i: 1)); |
| 6553 | SDValue Size = getValue(V: I.getArgOperand(i: 2)); |
| 6554 | assert((!MCI.isForceInlined() || isa<ConstantSDNode>(Size)) && |
| 6555 | "memcpy_inline needs constant size" ); |
| 6556 | // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment. |
| 6557 | Align DstAlign = MCI.getDestAlign().valueOrOne(); |
| 6558 | Align SrcAlign = MCI.getSourceAlign().valueOrOne(); |
| 6559 | Align Alignment = std::min(a: DstAlign, b: SrcAlign); |
| 6560 | bool isVol = MCI.isVolatile(); |
| 6561 | // FIXME: Support passing different dest/src alignments to the memcpy DAG |
| 6562 | // node. |
| 6563 | SDValue Root = isVol ? getRoot() : getMemoryRoot(); |
| 6564 | SDValue MC = DAG.getMemcpy(Chain: Root, dl: sdl, Dst, Src, Size, Alignment, isVol, |
| 6565 | AlwaysInline: MCI.isForceInlined(), CI: &I, OverrideTailCall: std::nullopt, |
| 6566 | DstPtrInfo: MachinePointerInfo(I.getArgOperand(i: 0)), |
| 6567 | SrcPtrInfo: MachinePointerInfo(I.getArgOperand(i: 1)), |
| 6568 | AAInfo: I.getAAMetadata(), BatchAA); |
| 6569 | updateDAGForMaybeTailCall(MaybeTC: MC); |
| 6570 | return; |
| 6571 | } |
| 6572 | case Intrinsic::memset: |
| 6573 | case Intrinsic::memset_inline: { |
| 6574 | const auto &MSII = cast<MemSetInst>(Val: I); |
| 6575 | SDValue Dst = getValue(V: I.getArgOperand(i: 0)); |
| 6576 | SDValue Value = getValue(V: I.getArgOperand(i: 1)); |
| 6577 | SDValue Size = getValue(V: I.getArgOperand(i: 2)); |
| 6578 | assert((!MSII.isForceInlined() || isa<ConstantSDNode>(Size)) && |
| 6579 | "memset_inline needs constant size" ); |
| 6580 | // @llvm.memset defines 0 and 1 to both mean no alignment. |
| 6581 | Align DstAlign = MSII.getDestAlign().valueOrOne(); |
| 6582 | bool isVol = MSII.isVolatile(); |
| 6583 | SDValue Root = isVol ? getRoot() : getMemoryRoot(); |
| 6584 | SDValue MC = DAG.getMemset( |
| 6585 | Chain: Root, dl: sdl, Dst, Src: Value, Size, Alignment: DstAlign, isVol, AlwaysInline: MSII.isForceInlined(), |
| 6586 | CI: &I, DstPtrInfo: MachinePointerInfo(I.getArgOperand(i: 0)), AAInfo: I.getAAMetadata()); |
| 6587 | updateDAGForMaybeTailCall(MaybeTC: MC); |
| 6588 | return; |
| 6589 | } |
| 6590 | case Intrinsic::memmove: { |
| 6591 | const auto &MMI = cast<MemMoveInst>(Val: I); |
| 6592 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 6593 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 6594 | SDValue Op3 = getValue(V: I.getArgOperand(i: 2)); |
| 6595 | // @llvm.memmove defines 0 and 1 to both mean no alignment. |
| 6596 | Align DstAlign = MMI.getDestAlign().valueOrOne(); |
| 6597 | Align SrcAlign = MMI.getSourceAlign().valueOrOne(); |
| 6598 | Align Alignment = std::min(a: DstAlign, b: SrcAlign); |
| 6599 | bool isVol = MMI.isVolatile(); |
| 6600 | // FIXME: Support passing different dest/src alignments to the memmove DAG |
| 6601 | // node. |
| 6602 | SDValue Root = isVol ? getRoot() : getMemoryRoot(); |
| 6603 | SDValue MM = DAG.getMemmove(Chain: Root, dl: sdl, Dst: Op1, Src: Op2, Size: Op3, Alignment, isVol, CI: &I, |
| 6604 | /* OverrideTailCall */ std::nullopt, |
| 6605 | DstPtrInfo: MachinePointerInfo(I.getArgOperand(i: 0)), |
| 6606 | SrcPtrInfo: MachinePointerInfo(I.getArgOperand(i: 1)), |
| 6607 | AAInfo: I.getAAMetadata(), BatchAA); |
| 6608 | updateDAGForMaybeTailCall(MaybeTC: MM); |
| 6609 | return; |
| 6610 | } |
| 6611 | case Intrinsic::memcpy_element_unordered_atomic: { |
| 6612 | auto &MI = cast<AnyMemCpyInst>(Val: I); |
| 6613 | SDValue Dst = getValue(V: MI.getRawDest()); |
| 6614 | SDValue Src = getValue(V: MI.getRawSource()); |
| 6615 | SDValue Length = getValue(V: MI.getLength()); |
| 6616 | |
| 6617 | Type *LengthTy = MI.getLength()->getType(); |
| 6618 | unsigned ElemSz = MI.getElementSizeInBytes(); |
| 6619 | bool isTC = I.isTailCall() && isInTailCallPosition(Call: I, TM: DAG.getTarget()); |
| 6620 | SDValue MC = |
| 6621 | DAG.getAtomicMemcpy(Chain: getRoot(), dl: sdl, Dst, Src, Size: Length, SizeTy: LengthTy, ElemSz, |
| 6622 | isTailCall: isTC, DstPtrInfo: MachinePointerInfo(MI.getRawDest()), |
| 6623 | SrcPtrInfo: MachinePointerInfo(MI.getRawSource())); |
| 6624 | updateDAGForMaybeTailCall(MaybeTC: MC); |
| 6625 | return; |
| 6626 | } |
| 6627 | case Intrinsic::memmove_element_unordered_atomic: { |
| 6628 | auto &MI = cast<AnyMemMoveInst>(Val: I); |
| 6629 | SDValue Dst = getValue(V: MI.getRawDest()); |
| 6630 | SDValue Src = getValue(V: MI.getRawSource()); |
| 6631 | SDValue Length = getValue(V: MI.getLength()); |
| 6632 | |
| 6633 | Type *LengthTy = MI.getLength()->getType(); |
| 6634 | unsigned ElemSz = MI.getElementSizeInBytes(); |
| 6635 | bool isTC = I.isTailCall() && isInTailCallPosition(Call: I, TM: DAG.getTarget()); |
| 6636 | SDValue MC = |
| 6637 | DAG.getAtomicMemmove(Chain: getRoot(), dl: sdl, Dst, Src, Size: Length, SizeTy: LengthTy, ElemSz, |
| 6638 | isTailCall: isTC, DstPtrInfo: MachinePointerInfo(MI.getRawDest()), |
| 6639 | SrcPtrInfo: MachinePointerInfo(MI.getRawSource())); |
| 6640 | updateDAGForMaybeTailCall(MaybeTC: MC); |
| 6641 | return; |
| 6642 | } |
| 6643 | case Intrinsic::memset_element_unordered_atomic: { |
| 6644 | auto &MI = cast<AnyMemSetInst>(Val: I); |
| 6645 | SDValue Dst = getValue(V: MI.getRawDest()); |
| 6646 | SDValue Val = getValue(V: MI.getValue()); |
| 6647 | SDValue Length = getValue(V: MI.getLength()); |
| 6648 | |
| 6649 | Type *LengthTy = MI.getLength()->getType(); |
| 6650 | unsigned ElemSz = MI.getElementSizeInBytes(); |
| 6651 | bool isTC = I.isTailCall() && isInTailCallPosition(Call: I, TM: DAG.getTarget()); |
| 6652 | SDValue MC = |
| 6653 | DAG.getAtomicMemset(Chain: getRoot(), dl: sdl, Dst, Value: Val, Size: Length, SizeTy: LengthTy, ElemSz, |
| 6654 | isTailCall: isTC, DstPtrInfo: MachinePointerInfo(MI.getRawDest())); |
| 6655 | updateDAGForMaybeTailCall(MaybeTC: MC); |
| 6656 | return; |
| 6657 | } |
| 6658 | case Intrinsic::call_preallocated_setup: { |
| 6659 | const CallBase *PreallocatedCall = FindPreallocatedCall(PreallocatedSetup: &I); |
| 6660 | SDValue SrcValue = DAG.getSrcValue(v: PreallocatedCall); |
| 6661 | SDValue Res = DAG.getNode(Opcode: ISD::PREALLOCATED_SETUP, DL: sdl, VT: MVT::Other, |
| 6662 | N1: getRoot(), N2: SrcValue); |
| 6663 | setValue(V: &I, NewN: Res); |
| 6664 | DAG.setRoot(Res); |
| 6665 | return; |
| 6666 | } |
| 6667 | case Intrinsic::call_preallocated_arg: { |
| 6668 | const CallBase *PreallocatedCall = FindPreallocatedCall(PreallocatedSetup: I.getOperand(i_nocapture: 0)); |
| 6669 | SDValue SrcValue = DAG.getSrcValue(v: PreallocatedCall); |
| 6670 | SDValue Ops[3]; |
| 6671 | Ops[0] = getRoot(); |
| 6672 | Ops[1] = SrcValue; |
| 6673 | Ops[2] = DAG.getTargetConstant(Val: *cast<ConstantInt>(Val: I.getArgOperand(i: 1)), DL: sdl, |
| 6674 | VT: MVT::i32); // arg index |
| 6675 | SDValue Res = DAG.getNode( |
| 6676 | Opcode: ISD::PREALLOCATED_ARG, DL: sdl, |
| 6677 | VTList: DAG.getVTList(VT1: TLI.getPointerTy(DL: DAG.getDataLayout()), VT2: MVT::Other), Ops); |
| 6678 | setValue(V: &I, NewN: Res); |
| 6679 | DAG.setRoot(Res.getValue(R: 1)); |
| 6680 | return; |
| 6681 | } |
| 6682 | |
| 6683 | case Intrinsic::eh_typeid_for: { |
| 6684 | // Find the type id for the given typeinfo. |
| 6685 | GlobalValue *GV = ExtractTypeInfo(V: I.getArgOperand(i: 0)); |
| 6686 | unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(TI: GV); |
| 6687 | Res = DAG.getConstant(Val: TypeID, DL: sdl, VT: MVT::i32); |
| 6688 | setValue(V: &I, NewN: Res); |
| 6689 | return; |
| 6690 | } |
| 6691 | |
| 6692 | case Intrinsic::eh_return_i32: |
| 6693 | case Intrinsic::eh_return_i64: |
| 6694 | DAG.getMachineFunction().setCallsEHReturn(true); |
| 6695 | DAG.setRoot(DAG.getNode(Opcode: ISD::EH_RETURN, DL: sdl, |
| 6696 | VT: MVT::Other, |
| 6697 | N1: getControlRoot(), |
| 6698 | N2: getValue(V: I.getArgOperand(i: 0)), |
| 6699 | N3: getValue(V: I.getArgOperand(i: 1)))); |
| 6700 | return; |
| 6701 | case Intrinsic::eh_unwind_init: |
| 6702 | DAG.getMachineFunction().setCallsUnwindInit(true); |
| 6703 | return; |
| 6704 | case Intrinsic::eh_dwarf_cfa: |
| 6705 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::EH_DWARF_CFA, DL: sdl, |
| 6706 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()), |
| 6707 | Operand: getValue(V: I.getArgOperand(i: 0)))); |
| 6708 | return; |
| 6709 | case Intrinsic::eh_sjlj_callsite: { |
| 6710 | ConstantInt *CI = cast<ConstantInt>(Val: I.getArgOperand(i: 0)); |
| 6711 | assert(FuncInfo.getCurrentCallSite() == 0 && "Overlapping call sites!" ); |
| 6712 | |
| 6713 | FuncInfo.setCurrentCallSite(CI->getZExtValue()); |
| 6714 | return; |
| 6715 | } |
| 6716 | case Intrinsic::eh_sjlj_functioncontext: { |
| 6717 | // Get and store the index of the function context. |
| 6718 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 6719 | AllocaInst *FnCtx = |
| 6720 | cast<AllocaInst>(Val: I.getArgOperand(i: 0)->stripPointerCasts()); |
| 6721 | int FI = FuncInfo.StaticAllocaMap[FnCtx]; |
| 6722 | MFI.setFunctionContextIndex(FI); |
| 6723 | return; |
| 6724 | } |
| 6725 | case Intrinsic::eh_sjlj_setjmp: { |
| 6726 | SDValue Ops[2]; |
| 6727 | Ops[0] = getRoot(); |
| 6728 | Ops[1] = getValue(V: I.getArgOperand(i: 0)); |
| 6729 | SDValue Op = DAG.getNode(Opcode: ISD::EH_SJLJ_SETJMP, DL: sdl, |
| 6730 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
| 6731 | setValue(V: &I, NewN: Op.getValue(R: 0)); |
| 6732 | DAG.setRoot(Op.getValue(R: 1)); |
| 6733 | return; |
| 6734 | } |
| 6735 | case Intrinsic::eh_sjlj_longjmp: |
| 6736 | DAG.setRoot(DAG.getNode(Opcode: ISD::EH_SJLJ_LONGJMP, DL: sdl, VT: MVT::Other, |
| 6737 | N1: getRoot(), N2: getValue(V: I.getArgOperand(i: 0)))); |
| 6738 | return; |
| 6739 | case Intrinsic::eh_sjlj_setup_dispatch: |
| 6740 | DAG.setRoot(DAG.getNode(Opcode: ISD::EH_SJLJ_SETUP_DISPATCH, DL: sdl, VT: MVT::Other, |
| 6741 | Operand: getRoot())); |
| 6742 | return; |
| 6743 | case Intrinsic::masked_gather: |
| 6744 | visitMaskedGather(I); |
| 6745 | return; |
| 6746 | case Intrinsic::masked_load: |
| 6747 | visitMaskedLoad(I); |
| 6748 | return; |
| 6749 | case Intrinsic::masked_scatter: |
| 6750 | visitMaskedScatter(I); |
| 6751 | return; |
| 6752 | case Intrinsic::masked_store: |
| 6753 | visitMaskedStore(I); |
| 6754 | return; |
| 6755 | case Intrinsic::masked_expandload: |
| 6756 | visitMaskedLoad(I, IsExpanding: true /* IsExpanding */); |
| 6757 | return; |
| 6758 | case Intrinsic::masked_compressstore: |
| 6759 | visitMaskedStore(I, IsCompressing: true /* IsCompressing */); |
| 6760 | return; |
| 6761 | case Intrinsic::powi: |
| 6762 | setValue(V: &I, NewN: ExpandPowI(DL: sdl, LHS: getValue(V: I.getArgOperand(i: 0)), |
| 6763 | RHS: getValue(V: I.getArgOperand(i: 1)), DAG)); |
| 6764 | return; |
| 6765 | case Intrinsic::log: |
| 6766 | setValue(V: &I, NewN: expandLog(dl: sdl, Op: getValue(V: I.getArgOperand(i: 0)), DAG, TLI, Flags)); |
| 6767 | return; |
| 6768 | case Intrinsic::log2: |
| 6769 | setValue(V: &I, |
| 6770 | NewN: expandLog2(dl: sdl, Op: getValue(V: I.getArgOperand(i: 0)), DAG, TLI, Flags)); |
| 6771 | return; |
| 6772 | case Intrinsic::log10: |
| 6773 | setValue(V: &I, |
| 6774 | NewN: expandLog10(dl: sdl, Op: getValue(V: I.getArgOperand(i: 0)), DAG, TLI, Flags)); |
| 6775 | return; |
| 6776 | case Intrinsic::exp: |
| 6777 | setValue(V: &I, NewN: expandExp(dl: sdl, Op: getValue(V: I.getArgOperand(i: 0)), DAG, TLI, Flags)); |
| 6778 | return; |
| 6779 | case Intrinsic::exp2: |
| 6780 | setValue(V: &I, |
| 6781 | NewN: expandExp2(dl: sdl, Op: getValue(V: I.getArgOperand(i: 0)), DAG, TLI, Flags)); |
| 6782 | return; |
| 6783 | case Intrinsic::pow: |
| 6784 | setValue(V: &I, NewN: expandPow(dl: sdl, LHS: getValue(V: I.getArgOperand(i: 0)), |
| 6785 | RHS: getValue(V: I.getArgOperand(i: 1)), DAG, TLI, Flags)); |
| 6786 | return; |
| 6787 | case Intrinsic::sqrt: |
| 6788 | case Intrinsic::fabs: |
| 6789 | case Intrinsic::sin: |
| 6790 | case Intrinsic::cos: |
| 6791 | case Intrinsic::tan: |
| 6792 | case Intrinsic::asin: |
| 6793 | case Intrinsic::acos: |
| 6794 | case Intrinsic::atan: |
| 6795 | case Intrinsic::sinh: |
| 6796 | case Intrinsic::cosh: |
| 6797 | case Intrinsic::tanh: |
| 6798 | case Intrinsic::exp10: |
| 6799 | case Intrinsic::floor: |
| 6800 | case Intrinsic::ceil: |
| 6801 | case Intrinsic::trunc: |
| 6802 | case Intrinsic::rint: |
| 6803 | case Intrinsic::nearbyint: |
| 6804 | case Intrinsic::round: |
| 6805 | case Intrinsic::roundeven: |
| 6806 | case Intrinsic::canonicalize: { |
| 6807 | unsigned Opcode; |
| 6808 | // clang-format off |
| 6809 | switch (Intrinsic) { |
| 6810 | default: llvm_unreachable("Impossible intrinsic" ); // Can't reach here. |
| 6811 | case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; |
| 6812 | case Intrinsic::fabs: Opcode = ISD::FABS; break; |
| 6813 | case Intrinsic::sin: Opcode = ISD::FSIN; break; |
| 6814 | case Intrinsic::cos: Opcode = ISD::FCOS; break; |
| 6815 | case Intrinsic::tan: Opcode = ISD::FTAN; break; |
| 6816 | case Intrinsic::asin: Opcode = ISD::FASIN; break; |
| 6817 | case Intrinsic::acos: Opcode = ISD::FACOS; break; |
| 6818 | case Intrinsic::atan: Opcode = ISD::FATAN; break; |
| 6819 | case Intrinsic::sinh: Opcode = ISD::FSINH; break; |
| 6820 | case Intrinsic::cosh: Opcode = ISD::FCOSH; break; |
| 6821 | case Intrinsic::tanh: Opcode = ISD::FTANH; break; |
| 6822 | case Intrinsic::exp10: Opcode = ISD::FEXP10; break; |
| 6823 | case Intrinsic::floor: Opcode = ISD::FFLOOR; break; |
| 6824 | case Intrinsic::ceil: Opcode = ISD::FCEIL; break; |
| 6825 | case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; |
| 6826 | case Intrinsic::rint: Opcode = ISD::FRINT; break; |
| 6827 | case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; |
| 6828 | case Intrinsic::round: Opcode = ISD::FROUND; break; |
| 6829 | case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break; |
| 6830 | case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break; |
| 6831 | } |
| 6832 | // clang-format on |
| 6833 | |
| 6834 | setValue(V: &I, NewN: DAG.getNode(Opcode, DL: sdl, |
| 6835 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6836 | Operand: getValue(V: I.getArgOperand(i: 0)), Flags)); |
| 6837 | return; |
| 6838 | } |
| 6839 | case Intrinsic::atan2: |
| 6840 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FATAN2, DL: sdl, |
| 6841 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6842 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6843 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6844 | return; |
| 6845 | case Intrinsic::lround: |
| 6846 | case Intrinsic::llround: |
| 6847 | case Intrinsic::lrint: |
| 6848 | case Intrinsic::llrint: { |
| 6849 | unsigned Opcode; |
| 6850 | // clang-format off |
| 6851 | switch (Intrinsic) { |
| 6852 | default: llvm_unreachable("Impossible intrinsic" ); // Can't reach here. |
| 6853 | case Intrinsic::lround: Opcode = ISD::LROUND; break; |
| 6854 | case Intrinsic::llround: Opcode = ISD::LLROUND; break; |
| 6855 | case Intrinsic::lrint: Opcode = ISD::LRINT; break; |
| 6856 | case Intrinsic::llrint: Opcode = ISD::LLRINT; break; |
| 6857 | } |
| 6858 | // clang-format on |
| 6859 | |
| 6860 | EVT RetVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 6861 | setValue(V: &I, NewN: DAG.getNode(Opcode, DL: sdl, VT: RetVT, |
| 6862 | Operand: getValue(V: I.getArgOperand(i: 0)))); |
| 6863 | return; |
| 6864 | } |
| 6865 | case Intrinsic::minnum: |
| 6866 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FMINNUM, DL: sdl, |
| 6867 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6868 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6869 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6870 | return; |
| 6871 | case Intrinsic::maxnum: |
| 6872 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FMAXNUM, DL: sdl, |
| 6873 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6874 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6875 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6876 | return; |
| 6877 | case Intrinsic::minimum: |
| 6878 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FMINIMUM, DL: sdl, |
| 6879 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6880 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6881 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6882 | return; |
| 6883 | case Intrinsic::maximum: |
| 6884 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FMAXIMUM, DL: sdl, |
| 6885 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6886 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6887 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6888 | return; |
| 6889 | case Intrinsic::minimumnum: |
| 6890 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FMINIMUMNUM, DL: sdl, |
| 6891 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6892 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6893 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6894 | return; |
| 6895 | case Intrinsic::maximumnum: |
| 6896 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FMAXIMUMNUM, DL: sdl, |
| 6897 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6898 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6899 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6900 | return; |
| 6901 | case Intrinsic::copysign: |
| 6902 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FCOPYSIGN, DL: sdl, |
| 6903 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6904 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6905 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6906 | return; |
| 6907 | case Intrinsic::ldexp: |
| 6908 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FLDEXP, DL: sdl, |
| 6909 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6910 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6911 | N2: getValue(V: I.getArgOperand(i: 1)), Flags)); |
| 6912 | return; |
| 6913 | case Intrinsic::modf: |
| 6914 | case Intrinsic::sincos: |
| 6915 | case Intrinsic::sincospi: |
| 6916 | case Intrinsic::frexp: { |
| 6917 | unsigned Opcode; |
| 6918 | switch (Intrinsic) { |
| 6919 | default: |
| 6920 | llvm_unreachable("unexpected intrinsic" ); |
| 6921 | case Intrinsic::sincos: |
| 6922 | Opcode = ISD::FSINCOS; |
| 6923 | break; |
| 6924 | case Intrinsic::sincospi: |
| 6925 | Opcode = ISD::FSINCOSPI; |
| 6926 | break; |
| 6927 | case Intrinsic::modf: |
| 6928 | Opcode = ISD::FMODF; |
| 6929 | break; |
| 6930 | case Intrinsic::frexp: |
| 6931 | Opcode = ISD::FFREXP; |
| 6932 | break; |
| 6933 | } |
| 6934 | SmallVector<EVT, 2> ValueVTs; |
| 6935 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: I.getType(), ValueVTs); |
| 6936 | SDVTList VTs = DAG.getVTList(VTs: ValueVTs); |
| 6937 | setValue( |
| 6938 | V: &I, NewN: DAG.getNode(Opcode, DL: sdl, VTList: VTs, Ops: getValue(V: I.getArgOperand(i: 0)), Flags)); |
| 6939 | return; |
| 6940 | } |
| 6941 | case Intrinsic::arithmetic_fence: { |
| 6942 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::ARITH_FENCE, DL: sdl, |
| 6943 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6944 | Operand: getValue(V: I.getArgOperand(i: 0)), Flags)); |
| 6945 | return; |
| 6946 | } |
| 6947 | case Intrinsic::fma: |
| 6948 | setValue(V: &I, NewN: DAG.getNode( |
| 6949 | Opcode: ISD::FMA, DL: sdl, VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6950 | N1: getValue(V: I.getArgOperand(i: 0)), N2: getValue(V: I.getArgOperand(i: 1)), |
| 6951 | N3: getValue(V: I.getArgOperand(i: 2)), Flags)); |
| 6952 | return; |
| 6953 | #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ |
| 6954 | case Intrinsic::INTRINSIC: |
| 6955 | #include "llvm/IR/ConstrainedOps.def" |
| 6956 | visitConstrainedFPIntrinsic(FPI: cast<ConstrainedFPIntrinsic>(Val: I)); |
| 6957 | return; |
| 6958 | #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: |
| 6959 | #include "llvm/IR/VPIntrinsics.def" |
| 6960 | visitVectorPredicationIntrinsic(VPIntrin: cast<VPIntrinsic>(Val: I)); |
| 6961 | return; |
| 6962 | case Intrinsic::fptrunc_round: { |
| 6963 | // Get the last argument, the metadata and convert it to an integer in the |
| 6964 | // call |
| 6965 | Metadata *MD = cast<MetadataAsValue>(Val: I.getArgOperand(i: 1))->getMetadata(); |
| 6966 | std::optional<RoundingMode> RoundMode = |
| 6967 | convertStrToRoundingMode(cast<MDString>(Val: MD)->getString()); |
| 6968 | |
| 6969 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 6970 | |
| 6971 | // Propagate fast-math-flags from IR to node(s). |
| 6972 | SDNodeFlags Flags; |
| 6973 | Flags.copyFMF(FPMO: *cast<FPMathOperator>(Val: &I)); |
| 6974 | SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); |
| 6975 | |
| 6976 | SDValue Result; |
| 6977 | Result = DAG.getNode( |
| 6978 | Opcode: ISD::FPTRUNC_ROUND, DL: sdl, VT, N1: getValue(V: I.getArgOperand(i: 0)), |
| 6979 | N2: DAG.getTargetConstant(Val: (int)*RoundMode, DL: sdl, VT: MVT::i32)); |
| 6980 | setValue(V: &I, NewN: Result); |
| 6981 | |
| 6982 | return; |
| 6983 | } |
| 6984 | case Intrinsic::fmuladd: { |
| 6985 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 6986 | if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && |
| 6987 | TLI.isFMAFasterThanFMulAndFAdd(MF: DAG.getMachineFunction(), VT)) { |
| 6988 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FMA, DL: sdl, |
| 6989 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6990 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 6991 | N2: getValue(V: I.getArgOperand(i: 1)), |
| 6992 | N3: getValue(V: I.getArgOperand(i: 2)), Flags)); |
| 6993 | } else { |
| 6994 | // TODO: Intrinsic calls should have fast-math-flags. |
| 6995 | SDValue Mul = DAG.getNode( |
| 6996 | Opcode: ISD::FMUL, DL: sdl, VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 6997 | N1: getValue(V: I.getArgOperand(i: 0)), N2: getValue(V: I.getArgOperand(i: 1)), Flags); |
| 6998 | SDValue Add = DAG.getNode(Opcode: ISD::FADD, DL: sdl, |
| 6999 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 7000 | N1: Mul, N2: getValue(V: I.getArgOperand(i: 2)), Flags); |
| 7001 | setValue(V: &I, NewN: Add); |
| 7002 | } |
| 7003 | return; |
| 7004 | } |
| 7005 | case Intrinsic::convert_to_fp16: |
| 7006 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::BITCAST, DL: sdl, VT: MVT::i16, |
| 7007 | Operand: DAG.getNode(Opcode: ISD::FP_ROUND, DL: sdl, VT: MVT::f16, |
| 7008 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 7009 | N2: DAG.getTargetConstant(Val: 0, DL: sdl, |
| 7010 | VT: MVT::i32)))); |
| 7011 | return; |
| 7012 | case Intrinsic::convert_from_fp16: |
| 7013 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FP_EXTEND, DL: sdl, |
| 7014 | VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()), |
| 7015 | Operand: DAG.getNode(Opcode: ISD::BITCAST, DL: sdl, VT: MVT::f16, |
| 7016 | Operand: getValue(V: I.getArgOperand(i: 0))))); |
| 7017 | return; |
| 7018 | case Intrinsic::fptosi_sat: { |
| 7019 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 7020 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FP_TO_SINT_SAT, DL: sdl, VT, |
| 7021 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 7022 | N2: DAG.getValueType(VT.getScalarType()))); |
| 7023 | return; |
| 7024 | } |
| 7025 | case Intrinsic::fptoui_sat: { |
| 7026 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 7027 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FP_TO_UINT_SAT, DL: sdl, VT, |
| 7028 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 7029 | N2: DAG.getValueType(VT.getScalarType()))); |
| 7030 | return; |
| 7031 | } |
| 7032 | case Intrinsic::set_rounding: |
| 7033 | Res = DAG.getNode(Opcode: ISD::SET_ROUNDING, DL: sdl, VT: MVT::Other, |
| 7034 | Ops: {getRoot(), getValue(V: I.getArgOperand(i: 0))}); |
| 7035 | setValue(V: &I, NewN: Res); |
| 7036 | DAG.setRoot(Res.getValue(R: 0)); |
| 7037 | return; |
| 7038 | case Intrinsic::is_fpclass: { |
| 7039 | const DataLayout DLayout = DAG.getDataLayout(); |
| 7040 | EVT DestVT = TLI.getValueType(DL: DLayout, Ty: I.getType()); |
| 7041 | EVT ArgVT = TLI.getValueType(DL: DLayout, Ty: I.getArgOperand(i: 0)->getType()); |
| 7042 | FPClassTest Test = static_cast<FPClassTest>( |
| 7043 | cast<ConstantInt>(Val: I.getArgOperand(i: 1))->getZExtValue()); |
| 7044 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7045 | const Function &F = MF.getFunction(); |
| 7046 | SDValue Op = getValue(V: I.getArgOperand(i: 0)); |
| 7047 | SDNodeFlags Flags; |
| 7048 | Flags.setNoFPExcept( |
| 7049 | !F.getAttributes().hasFnAttr(Kind: llvm::Attribute::StrictFP)); |
| 7050 | // If ISD::IS_FPCLASS should be expanded, do it right now, because the |
| 7051 | // expansion can use illegal types. Making expansion early allows |
| 7052 | // legalizing these types prior to selection. |
| 7053 | if (!TLI.isOperationLegal(Op: ISD::IS_FPCLASS, VT: ArgVT) && |
| 7054 | !TLI.isOperationCustom(Op: ISD::IS_FPCLASS, VT: ArgVT)) { |
| 7055 | SDValue Result = TLI.expandIS_FPCLASS(ResultVT: DestVT, Op, Test, Flags, DL: sdl, DAG); |
| 7056 | setValue(V: &I, NewN: Result); |
| 7057 | return; |
| 7058 | } |
| 7059 | |
| 7060 | SDValue Check = DAG.getTargetConstant(Val: Test, DL: sdl, VT: MVT::i32); |
| 7061 | SDValue V = DAG.getNode(Opcode: ISD::IS_FPCLASS, DL: sdl, VT: DestVT, Ops: {Op, Check}, Flags); |
| 7062 | setValue(V: &I, NewN: V); |
| 7063 | return; |
| 7064 | } |
| 7065 | case Intrinsic::get_fpenv: { |
| 7066 | const DataLayout DLayout = DAG.getDataLayout(); |
| 7067 | EVT EnvVT = TLI.getValueType(DL: DLayout, Ty: I.getType()); |
| 7068 | Align TempAlign = DAG.getEVTAlign(MemoryVT: EnvVT); |
| 7069 | SDValue Chain = getRoot(); |
| 7070 | // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node |
| 7071 | // and temporary storage in stack. |
| 7072 | if (TLI.isOperationLegalOrCustom(Op: ISD::GET_FPENV, VT: EnvVT)) { |
| 7073 | Res = DAG.getNode( |
| 7074 | Opcode: ISD::GET_FPENV, DL: sdl, |
| 7075 | VTList: DAG.getVTList(VT1: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()), |
| 7076 | VT2: MVT::Other), |
| 7077 | N: Chain); |
| 7078 | } else { |
| 7079 | SDValue Temp = DAG.CreateStackTemporary(VT: EnvVT, minAlign: TempAlign.value()); |
| 7080 | int SPFI = cast<FrameIndexSDNode>(Val: Temp.getNode())->getIndex(); |
| 7081 | auto MPI = |
| 7082 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI); |
| 7083 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 7084 | PtrInfo: MPI, F: MachineMemOperand::MOStore, Size: LocationSize::beforeOrAfterPointer(), |
| 7085 | BaseAlignment: TempAlign); |
| 7086 | Chain = DAG.getGetFPEnv(Chain, dl: sdl, Ptr: Temp, MemVT: EnvVT, MMO); |
| 7087 | Res = DAG.getLoad(VT: EnvVT, dl: sdl, Chain, Ptr: Temp, PtrInfo: MPI); |
| 7088 | } |
| 7089 | setValue(V: &I, NewN: Res); |
| 7090 | DAG.setRoot(Res.getValue(R: 1)); |
| 7091 | return; |
| 7092 | } |
| 7093 | case Intrinsic::set_fpenv: { |
| 7094 | const DataLayout DLayout = DAG.getDataLayout(); |
| 7095 | SDValue Env = getValue(V: I.getArgOperand(i: 0)); |
| 7096 | EVT EnvVT = Env.getValueType(); |
| 7097 | Align TempAlign = DAG.getEVTAlign(MemoryVT: EnvVT); |
| 7098 | SDValue Chain = getRoot(); |
| 7099 | // If SET_FPENV is custom or legal, use it. Otherwise use loading |
| 7100 | // environment from memory. |
| 7101 | if (TLI.isOperationLegalOrCustom(Op: ISD::SET_FPENV, VT: EnvVT)) { |
| 7102 | Chain = DAG.getNode(Opcode: ISD::SET_FPENV, DL: sdl, VT: MVT::Other, N1: Chain, N2: Env); |
| 7103 | } else { |
| 7104 | // Allocate space in stack, copy environment bits into it and use this |
| 7105 | // memory in SET_FPENV_MEM. |
| 7106 | SDValue Temp = DAG.CreateStackTemporary(VT: EnvVT, minAlign: TempAlign.value()); |
| 7107 | int SPFI = cast<FrameIndexSDNode>(Val: Temp.getNode())->getIndex(); |
| 7108 | auto MPI = |
| 7109 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI); |
| 7110 | Chain = DAG.getStore(Chain, dl: sdl, Val: Env, Ptr: Temp, PtrInfo: MPI, Alignment: TempAlign, |
| 7111 | MMOFlags: MachineMemOperand::MOStore); |
| 7112 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 7113 | PtrInfo: MPI, F: MachineMemOperand::MOLoad, Size: LocationSize::beforeOrAfterPointer(), |
| 7114 | BaseAlignment: TempAlign); |
| 7115 | Chain = DAG.getSetFPEnv(Chain, dl: sdl, Ptr: Temp, MemVT: EnvVT, MMO); |
| 7116 | } |
| 7117 | DAG.setRoot(Chain); |
| 7118 | return; |
| 7119 | } |
| 7120 | case Intrinsic::reset_fpenv: |
| 7121 | DAG.setRoot(DAG.getNode(Opcode: ISD::RESET_FPENV, DL: sdl, VT: MVT::Other, Operand: getRoot())); |
| 7122 | return; |
| 7123 | case Intrinsic::get_fpmode: |
| 7124 | Res = DAG.getNode( |
| 7125 | Opcode: ISD::GET_FPMODE, DL: sdl, |
| 7126 | VTList: DAG.getVTList(VT1: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()), |
| 7127 | VT2: MVT::Other), |
| 7128 | N: DAG.getRoot()); |
| 7129 | setValue(V: &I, NewN: Res); |
| 7130 | DAG.setRoot(Res.getValue(R: 1)); |
| 7131 | return; |
| 7132 | case Intrinsic::set_fpmode: |
| 7133 | Res = DAG.getNode(Opcode: ISD::SET_FPMODE, DL: sdl, VT: MVT::Other, N1: {DAG.getRoot()}, |
| 7134 | N2: getValue(V: I.getArgOperand(i: 0))); |
| 7135 | DAG.setRoot(Res); |
| 7136 | return; |
| 7137 | case Intrinsic::reset_fpmode: { |
| 7138 | Res = DAG.getNode(Opcode: ISD::RESET_FPMODE, DL: sdl, VT: MVT::Other, Operand: getRoot()); |
| 7139 | DAG.setRoot(Res); |
| 7140 | return; |
| 7141 | } |
| 7142 | case Intrinsic::pcmarker: { |
| 7143 | SDValue Tmp = getValue(V: I.getArgOperand(i: 0)); |
| 7144 | DAG.setRoot(DAG.getNode(Opcode: ISD::PCMARKER, DL: sdl, VT: MVT::Other, N1: getRoot(), N2: Tmp)); |
| 7145 | return; |
| 7146 | } |
| 7147 | case Intrinsic::readcyclecounter: { |
| 7148 | SDValue Op = getRoot(); |
| 7149 | Res = DAG.getNode(Opcode: ISD::READCYCLECOUNTER, DL: sdl, |
| 7150 | VTList: DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other), N: Op); |
| 7151 | setValue(V: &I, NewN: Res); |
| 7152 | DAG.setRoot(Res.getValue(R: 1)); |
| 7153 | return; |
| 7154 | } |
| 7155 | case Intrinsic::readsteadycounter: { |
| 7156 | SDValue Op = getRoot(); |
| 7157 | Res = DAG.getNode(Opcode: ISD::READSTEADYCOUNTER, DL: sdl, |
| 7158 | VTList: DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other), N: Op); |
| 7159 | setValue(V: &I, NewN: Res); |
| 7160 | DAG.setRoot(Res.getValue(R: 1)); |
| 7161 | return; |
| 7162 | } |
| 7163 | case Intrinsic::bitreverse: |
| 7164 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::BITREVERSE, DL: sdl, |
| 7165 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 7166 | Operand: getValue(V: I.getArgOperand(i: 0)))); |
| 7167 | return; |
| 7168 | case Intrinsic::bswap: |
| 7169 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::BSWAP, DL: sdl, |
| 7170 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 7171 | Operand: getValue(V: I.getArgOperand(i: 0)))); |
| 7172 | return; |
| 7173 | case Intrinsic::cttz: { |
| 7174 | SDValue Arg = getValue(V: I.getArgOperand(i: 0)); |
| 7175 | ConstantInt *CI = cast<ConstantInt>(Val: I.getArgOperand(i: 1)); |
| 7176 | EVT Ty = Arg.getValueType(); |
| 7177 | setValue(V: &I, NewN: DAG.getNode(Opcode: CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, |
| 7178 | DL: sdl, VT: Ty, Operand: Arg)); |
| 7179 | return; |
| 7180 | } |
| 7181 | case Intrinsic::ctlz: { |
| 7182 | SDValue Arg = getValue(V: I.getArgOperand(i: 0)); |
| 7183 | ConstantInt *CI = cast<ConstantInt>(Val: I.getArgOperand(i: 1)); |
| 7184 | EVT Ty = Arg.getValueType(); |
| 7185 | setValue(V: &I, NewN: DAG.getNode(Opcode: CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, |
| 7186 | DL: sdl, VT: Ty, Operand: Arg)); |
| 7187 | return; |
| 7188 | } |
| 7189 | case Intrinsic::ctpop: { |
| 7190 | SDValue Arg = getValue(V: I.getArgOperand(i: 0)); |
| 7191 | EVT Ty = Arg.getValueType(); |
| 7192 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::CTPOP, DL: sdl, VT: Ty, Operand: Arg)); |
| 7193 | return; |
| 7194 | } |
| 7195 | case Intrinsic::fshl: |
| 7196 | case Intrinsic::fshr: { |
| 7197 | bool IsFSHL = Intrinsic == Intrinsic::fshl; |
| 7198 | SDValue X = getValue(V: I.getArgOperand(i: 0)); |
| 7199 | SDValue Y = getValue(V: I.getArgOperand(i: 1)); |
| 7200 | SDValue Z = getValue(V: I.getArgOperand(i: 2)); |
| 7201 | EVT VT = X.getValueType(); |
| 7202 | |
| 7203 | if (X == Y) { |
| 7204 | auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR; |
| 7205 | setValue(V: &I, NewN: DAG.getNode(Opcode: RotateOpcode, DL: sdl, VT, N1: X, N2: Z)); |
| 7206 | } else { |
| 7207 | auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR; |
| 7208 | setValue(V: &I, NewN: DAG.getNode(Opcode: FunnelOpcode, DL: sdl, VT, N1: X, N2: Y, N3: Z)); |
| 7209 | } |
| 7210 | return; |
| 7211 | } |
| 7212 | case Intrinsic::sadd_sat: { |
| 7213 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7214 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7215 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SADDSAT, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7216 | return; |
| 7217 | } |
| 7218 | case Intrinsic::uadd_sat: { |
| 7219 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7220 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7221 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::UADDSAT, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7222 | return; |
| 7223 | } |
| 7224 | case Intrinsic::ssub_sat: { |
| 7225 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7226 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7227 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SSUBSAT, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7228 | return; |
| 7229 | } |
| 7230 | case Intrinsic::usub_sat: { |
| 7231 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7232 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7233 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::USUBSAT, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7234 | return; |
| 7235 | } |
| 7236 | case Intrinsic::sshl_sat: { |
| 7237 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7238 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7239 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SSHLSAT, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7240 | return; |
| 7241 | } |
| 7242 | case Intrinsic::ushl_sat: { |
| 7243 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7244 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7245 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::USHLSAT, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7246 | return; |
| 7247 | } |
| 7248 | case Intrinsic::smul_fix: |
| 7249 | case Intrinsic::umul_fix: |
| 7250 | case Intrinsic::smul_fix_sat: |
| 7251 | case Intrinsic::umul_fix_sat: { |
| 7252 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7253 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7254 | SDValue Op3 = getValue(V: I.getArgOperand(i: 2)); |
| 7255 | setValue(V: &I, NewN: DAG.getNode(Opcode: FixedPointIntrinsicToOpcode(Intrinsic), DL: sdl, |
| 7256 | VT: Op1.getValueType(), N1: Op1, N2: Op2, N3: Op3)); |
| 7257 | return; |
| 7258 | } |
| 7259 | case Intrinsic::sdiv_fix: |
| 7260 | case Intrinsic::udiv_fix: |
| 7261 | case Intrinsic::sdiv_fix_sat: |
| 7262 | case Intrinsic::udiv_fix_sat: { |
| 7263 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7264 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7265 | SDValue Op3 = getValue(V: I.getArgOperand(i: 2)); |
| 7266 | setValue(V: &I, NewN: expandDivFix(Opcode: FixedPointIntrinsicToOpcode(Intrinsic), DL: sdl, |
| 7267 | LHS: Op1, RHS: Op2, Scale: Op3, DAG, TLI)); |
| 7268 | return; |
| 7269 | } |
| 7270 | case Intrinsic::smax: { |
| 7271 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7272 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7273 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SMAX, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7274 | return; |
| 7275 | } |
| 7276 | case Intrinsic::smin: { |
| 7277 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7278 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7279 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SMIN, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7280 | return; |
| 7281 | } |
| 7282 | case Intrinsic::umax: { |
| 7283 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7284 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7285 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::UMAX, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7286 | return; |
| 7287 | } |
| 7288 | case Intrinsic::umin: { |
| 7289 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7290 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7291 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::UMIN, DL: sdl, VT: Op1.getValueType(), N1: Op1, N2: Op2)); |
| 7292 | return; |
| 7293 | } |
| 7294 | case Intrinsic::abs: { |
| 7295 | // TODO: Preserve "int min is poison" arg in SDAG? |
| 7296 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7297 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::ABS, DL: sdl, VT: Op1.getValueType(), Operand: Op1)); |
| 7298 | return; |
| 7299 | } |
| 7300 | case Intrinsic::scmp: { |
| 7301 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7302 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7303 | EVT DestVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 7304 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::SCMP, DL: sdl, VT: DestVT, N1: Op1, N2: Op2)); |
| 7305 | break; |
| 7306 | } |
| 7307 | case Intrinsic::ucmp: { |
| 7308 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7309 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7310 | EVT DestVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 7311 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::UCMP, DL: sdl, VT: DestVT, N1: Op1, N2: Op2)); |
| 7312 | break; |
| 7313 | } |
| 7314 | case Intrinsic::stacksave: { |
| 7315 | SDValue Op = getRoot(); |
| 7316 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 7317 | Res = DAG.getNode(Opcode: ISD::STACKSAVE, DL: sdl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::Other), N: Op); |
| 7318 | setValue(V: &I, NewN: Res); |
| 7319 | DAG.setRoot(Res.getValue(R: 1)); |
| 7320 | return; |
| 7321 | } |
| 7322 | case Intrinsic::stackrestore: |
| 7323 | Res = getValue(V: I.getArgOperand(i: 0)); |
| 7324 | DAG.setRoot(DAG.getNode(Opcode: ISD::STACKRESTORE, DL: sdl, VT: MVT::Other, N1: getRoot(), N2: Res)); |
| 7325 | return; |
| 7326 | case Intrinsic::get_dynamic_area_offset: { |
| 7327 | SDValue Op = getRoot(); |
| 7328 | EVT ResTy = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 7329 | Res = DAG.getNode(Opcode: ISD::GET_DYNAMIC_AREA_OFFSET, DL: sdl, VTList: DAG.getVTList(VT: ResTy), |
| 7330 | N: Op); |
| 7331 | DAG.setRoot(Op); |
| 7332 | setValue(V: &I, NewN: Res); |
| 7333 | return; |
| 7334 | } |
| 7335 | case Intrinsic::stackguard: { |
| 7336 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7337 | const Module &M = *MF.getFunction().getParent(); |
| 7338 | EVT PtrTy = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 7339 | SDValue Chain = getRoot(); |
| 7340 | if (TLI.useLoadStackGuardNode(M)) { |
| 7341 | Res = getLoadStackGuard(DAG, DL: sdl, Chain); |
| 7342 | Res = DAG.getPtrExtOrTrunc(Op: Res, DL: sdl, VT: PtrTy); |
| 7343 | } else { |
| 7344 | const Value *Global = TLI.getSDagStackGuard(M); |
| 7345 | Align Align = DAG.getDataLayout().getPrefTypeAlign(Ty: Global->getType()); |
| 7346 | Res = DAG.getLoad(VT: PtrTy, dl: sdl, Chain, Ptr: getValue(V: Global), |
| 7347 | PtrInfo: MachinePointerInfo(Global, 0), Alignment: Align, |
| 7348 | MMOFlags: MachineMemOperand::MOVolatile); |
| 7349 | } |
| 7350 | if (TLI.useStackGuardXorFP()) |
| 7351 | Res = TLI.emitStackGuardXorFP(DAG, Val: Res, DL: sdl); |
| 7352 | DAG.setRoot(Chain); |
| 7353 | setValue(V: &I, NewN: Res); |
| 7354 | return; |
| 7355 | } |
| 7356 | case Intrinsic::stackprotector: { |
| 7357 | // Emit code into the DAG to store the stack guard onto the stack. |
| 7358 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7359 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 7360 | const Module &M = *MF.getFunction().getParent(); |
| 7361 | SDValue Src, Chain = getRoot(); |
| 7362 | |
| 7363 | if (TLI.useLoadStackGuardNode(M)) |
| 7364 | Src = getLoadStackGuard(DAG, DL: sdl, Chain); |
| 7365 | else |
| 7366 | Src = getValue(V: I.getArgOperand(i: 0)); // The guard's value. |
| 7367 | |
| 7368 | AllocaInst *Slot = cast<AllocaInst>(Val: I.getArgOperand(i: 1)); |
| 7369 | |
| 7370 | int FI = FuncInfo.StaticAllocaMap[Slot]; |
| 7371 | MFI.setStackProtectorIndex(FI); |
| 7372 | EVT PtrTy = TLI.getFrameIndexTy(DL: DAG.getDataLayout()); |
| 7373 | |
| 7374 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrTy); |
| 7375 | |
| 7376 | // Store the stack protector onto the stack. |
| 7377 | Res = DAG.getStore( |
| 7378 | Chain, dl: sdl, Val: Src, Ptr: FIN, |
| 7379 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI), |
| 7380 | Alignment: MaybeAlign(), MMOFlags: MachineMemOperand::MOVolatile); |
| 7381 | setValue(V: &I, NewN: Res); |
| 7382 | DAG.setRoot(Res); |
| 7383 | return; |
| 7384 | } |
| 7385 | case Intrinsic::objectsize: |
| 7386 | llvm_unreachable("llvm.objectsize.* should have been lowered already" ); |
| 7387 | |
| 7388 | case Intrinsic::is_constant: |
| 7389 | llvm_unreachable("llvm.is.constant.* should have been lowered already" ); |
| 7390 | |
| 7391 | case Intrinsic::annotation: |
| 7392 | case Intrinsic::ptr_annotation: |
| 7393 | case Intrinsic::launder_invariant_group: |
| 7394 | case Intrinsic::strip_invariant_group: |
| 7395 | // Drop the intrinsic, but forward the value |
| 7396 | setValue(V: &I, NewN: getValue(V: I.getOperand(i_nocapture: 0))); |
| 7397 | return; |
| 7398 | |
| 7399 | case Intrinsic::type_test: |
| 7400 | case Intrinsic::public_type_test: |
| 7401 | setValue(V: &I, NewN: getValue(V: ConstantInt::getTrue(Ty: I.getType()))); |
| 7402 | return; |
| 7403 | |
| 7404 | case Intrinsic::assume: |
| 7405 | case Intrinsic::experimental_noalias_scope_decl: |
| 7406 | case Intrinsic::var_annotation: |
| 7407 | case Intrinsic::sideeffect: |
| 7408 | // Discard annotate attributes, noalias scope declarations, assumptions, and |
| 7409 | // artificial side-effects. |
| 7410 | return; |
| 7411 | |
| 7412 | case Intrinsic::codeview_annotation: { |
| 7413 | // Emit a label associated with this metadata. |
| 7414 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7415 | MCSymbol *Label = MF.getContext().createTempSymbol(Name: "annotation" , AlwaysAddSuffix: true); |
| 7416 | Metadata *MD = cast<MetadataAsValue>(Val: I.getArgOperand(i: 0))->getMetadata(); |
| 7417 | MF.addCodeViewAnnotation(Label, MD: cast<MDNode>(Val: MD)); |
| 7418 | Res = DAG.getLabelNode(Opcode: ISD::ANNOTATION_LABEL, dl: sdl, Root: getRoot(), Label); |
| 7419 | DAG.setRoot(Res); |
| 7420 | return; |
| 7421 | } |
| 7422 | |
| 7423 | case Intrinsic::init_trampoline: { |
| 7424 | const Function *F = cast<Function>(Val: I.getArgOperand(i: 1)->stripPointerCasts()); |
| 7425 | |
| 7426 | SDValue Ops[6]; |
| 7427 | Ops[0] = getRoot(); |
| 7428 | Ops[1] = getValue(V: I.getArgOperand(i: 0)); |
| 7429 | Ops[2] = getValue(V: I.getArgOperand(i: 1)); |
| 7430 | Ops[3] = getValue(V: I.getArgOperand(i: 2)); |
| 7431 | Ops[4] = DAG.getSrcValue(v: I.getArgOperand(i: 0)); |
| 7432 | Ops[5] = DAG.getSrcValue(v: F); |
| 7433 | |
| 7434 | Res = DAG.getNode(Opcode: ISD::INIT_TRAMPOLINE, DL: sdl, VT: MVT::Other, Ops); |
| 7435 | |
| 7436 | DAG.setRoot(Res); |
| 7437 | return; |
| 7438 | } |
| 7439 | case Intrinsic::adjust_trampoline: |
| 7440 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::ADJUST_TRAMPOLINE, DL: sdl, |
| 7441 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()), |
| 7442 | Operand: getValue(V: I.getArgOperand(i: 0)))); |
| 7443 | return; |
| 7444 | case Intrinsic::gcroot: { |
| 7445 | assert(DAG.getMachineFunction().getFunction().hasGC() && |
| 7446 | "only valid in functions with gc specified, enforced by Verifier" ); |
| 7447 | assert(GFI && "implied by previous" ); |
| 7448 | const Value *Alloca = I.getArgOperand(i: 0)->stripPointerCasts(); |
| 7449 | const Constant *TypeMap = cast<Constant>(Val: I.getArgOperand(i: 1)); |
| 7450 | |
| 7451 | FrameIndexSDNode *FI = cast<FrameIndexSDNode>(Val: getValue(V: Alloca).getNode()); |
| 7452 | GFI->addStackRoot(Num: FI->getIndex(), Metadata: TypeMap); |
| 7453 | return; |
| 7454 | } |
| 7455 | case Intrinsic::gcread: |
| 7456 | case Intrinsic::gcwrite: |
| 7457 | llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!" ); |
| 7458 | case Intrinsic::get_rounding: |
| 7459 | Res = DAG.getNode(Opcode: ISD::GET_ROUNDING, DL: sdl, ResultTys: {MVT::i32, MVT::Other}, Ops: getRoot()); |
| 7460 | setValue(V: &I, NewN: Res); |
| 7461 | DAG.setRoot(Res.getValue(R: 1)); |
| 7462 | return; |
| 7463 | |
| 7464 | case Intrinsic::expect: |
| 7465 | case Intrinsic::expect_with_probability: |
| 7466 | // Just replace __builtin_expect(exp, c) and |
| 7467 | // __builtin_expect_with_probability(exp, c, p) with EXP. |
| 7468 | setValue(V: &I, NewN: getValue(V: I.getArgOperand(i: 0))); |
| 7469 | return; |
| 7470 | |
| 7471 | case Intrinsic::ubsantrap: |
| 7472 | case Intrinsic::debugtrap: |
| 7473 | case Intrinsic::trap: { |
| 7474 | StringRef TrapFuncName = |
| 7475 | I.getAttributes().getFnAttr(Kind: "trap-func-name" ).getValueAsString(); |
| 7476 | if (TrapFuncName.empty()) { |
| 7477 | switch (Intrinsic) { |
| 7478 | case Intrinsic::trap: |
| 7479 | DAG.setRoot(DAG.getNode(Opcode: ISD::TRAP, DL: sdl, VT: MVT::Other, Operand: getRoot())); |
| 7480 | break; |
| 7481 | case Intrinsic::debugtrap: |
| 7482 | DAG.setRoot(DAG.getNode(Opcode: ISD::DEBUGTRAP, DL: sdl, VT: MVT::Other, Operand: getRoot())); |
| 7483 | break; |
| 7484 | case Intrinsic::ubsantrap: |
| 7485 | DAG.setRoot(DAG.getNode( |
| 7486 | Opcode: ISD::UBSANTRAP, DL: sdl, VT: MVT::Other, N1: getRoot(), |
| 7487 | N2: DAG.getTargetConstant( |
| 7488 | Val: cast<ConstantInt>(Val: I.getArgOperand(i: 0))->getZExtValue(), DL: sdl, |
| 7489 | VT: MVT::i32))); |
| 7490 | break; |
| 7491 | default: llvm_unreachable("unknown trap intrinsic" ); |
| 7492 | } |
| 7493 | DAG.addNoMergeSiteInfo(Node: DAG.getRoot().getNode(), |
| 7494 | NoMerge: I.hasFnAttr(Kind: Attribute::NoMerge)); |
| 7495 | return; |
| 7496 | } |
| 7497 | TargetLowering::ArgListTy Args; |
| 7498 | if (Intrinsic == Intrinsic::ubsantrap) { |
| 7499 | Args.push_back(x: TargetLoweringBase::ArgListEntry()); |
| 7500 | Args[0].Val = I.getArgOperand(i: 0); |
| 7501 | Args[0].Node = getValue(V: Args[0].Val); |
| 7502 | Args[0].Ty = Args[0].Val->getType(); |
| 7503 | } |
| 7504 | |
| 7505 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 7506 | CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( |
| 7507 | CC: CallingConv::C, ResultType: I.getType(), |
| 7508 | Target: DAG.getExternalSymbol(Sym: TrapFuncName.data(), |
| 7509 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())), |
| 7510 | ArgsList: std::move(Args)); |
| 7511 | CLI.NoMerge = I.hasFnAttr(Kind: Attribute::NoMerge); |
| 7512 | std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); |
| 7513 | DAG.setRoot(Result.second); |
| 7514 | return; |
| 7515 | } |
| 7516 | |
| 7517 | case Intrinsic::allow_runtime_check: |
| 7518 | case Intrinsic::allow_ubsan_check: |
| 7519 | setValue(V: &I, NewN: getValue(V: ConstantInt::getTrue(Ty: I.getType()))); |
| 7520 | return; |
| 7521 | |
| 7522 | case Intrinsic::uadd_with_overflow: |
| 7523 | case Intrinsic::sadd_with_overflow: |
| 7524 | case Intrinsic::usub_with_overflow: |
| 7525 | case Intrinsic::ssub_with_overflow: |
| 7526 | case Intrinsic::umul_with_overflow: |
| 7527 | case Intrinsic::smul_with_overflow: { |
| 7528 | ISD::NodeType Op; |
| 7529 | switch (Intrinsic) { |
| 7530 | default: llvm_unreachable("Impossible intrinsic" ); // Can't reach here. |
| 7531 | case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break; |
| 7532 | case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break; |
| 7533 | case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break; |
| 7534 | case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break; |
| 7535 | case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break; |
| 7536 | case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break; |
| 7537 | } |
| 7538 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 7539 | SDValue Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 7540 | |
| 7541 | EVT ResultVT = Op1.getValueType(); |
| 7542 | EVT OverflowVT = MVT::i1; |
| 7543 | if (ResultVT.isVector()) |
| 7544 | OverflowVT = EVT::getVectorVT( |
| 7545 | Context&: *Context, VT: OverflowVT, EC: ResultVT.getVectorElementCount()); |
| 7546 | |
| 7547 | SDVTList VTs = DAG.getVTList(VT1: ResultVT, VT2: OverflowVT); |
| 7548 | setValue(V: &I, NewN: DAG.getNode(Opcode: Op, DL: sdl, VTList: VTs, N1: Op1, N2: Op2)); |
| 7549 | return; |
| 7550 | } |
| 7551 | case Intrinsic::prefetch: { |
| 7552 | SDValue Ops[5]; |
| 7553 | unsigned rw = cast<ConstantInt>(Val: I.getArgOperand(i: 1))->getZExtValue(); |
| 7554 | auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore; |
| 7555 | Ops[0] = DAG.getRoot(); |
| 7556 | Ops[1] = getValue(V: I.getArgOperand(i: 0)); |
| 7557 | Ops[2] = DAG.getTargetConstant(Val: *cast<ConstantInt>(Val: I.getArgOperand(i: 1)), DL: sdl, |
| 7558 | VT: MVT::i32); |
| 7559 | Ops[3] = DAG.getTargetConstant(Val: *cast<ConstantInt>(Val: I.getArgOperand(i: 2)), DL: sdl, |
| 7560 | VT: MVT::i32); |
| 7561 | Ops[4] = DAG.getTargetConstant(Val: *cast<ConstantInt>(Val: I.getArgOperand(i: 3)), DL: sdl, |
| 7562 | VT: MVT::i32); |
| 7563 | SDValue Result = DAG.getMemIntrinsicNode( |
| 7564 | Opcode: ISD::PREFETCH, dl: sdl, VTList: DAG.getVTList(VT: MVT::Other), Ops, |
| 7565 | MemVT: EVT::getIntegerVT(Context&: *Context, BitWidth: 8), PtrInfo: MachinePointerInfo(I.getArgOperand(i: 0)), |
| 7566 | /* align */ Alignment: std::nullopt, Flags); |
| 7567 | |
| 7568 | // Chain the prefetch in parallel with any pending loads, to stay out of |
| 7569 | // the way of later optimizations. |
| 7570 | PendingLoads.push_back(Elt: Result); |
| 7571 | Result = getRoot(); |
| 7572 | DAG.setRoot(Result); |
| 7573 | return; |
| 7574 | } |
| 7575 | case Intrinsic::lifetime_start: |
| 7576 | case Intrinsic::lifetime_end: { |
| 7577 | bool IsStart = (Intrinsic == Intrinsic::lifetime_start); |
| 7578 | // Stack coloring is not enabled in O0, discard region information. |
| 7579 | if (TM.getOptLevel() == CodeGenOptLevel::None) |
| 7580 | return; |
| 7581 | |
| 7582 | const int64_t ObjectSize = |
| 7583 | cast<ConstantInt>(Val: I.getArgOperand(i: 0))->getSExtValue(); |
| 7584 | Value *const ObjectPtr = I.getArgOperand(i: 1); |
| 7585 | SmallVector<const Value *, 4> Allocas; |
| 7586 | getUnderlyingObjects(V: ObjectPtr, Objects&: Allocas); |
| 7587 | |
| 7588 | for (const Value *Alloca : Allocas) { |
| 7589 | const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Val: Alloca); |
| 7590 | |
| 7591 | // Could not find an Alloca. |
| 7592 | if (!LifetimeObject) |
| 7593 | continue; |
| 7594 | |
| 7595 | // First check that the Alloca is static, otherwise it won't have a |
| 7596 | // valid frame index. |
| 7597 | auto SI = FuncInfo.StaticAllocaMap.find(Val: LifetimeObject); |
| 7598 | if (SI == FuncInfo.StaticAllocaMap.end()) |
| 7599 | return; |
| 7600 | |
| 7601 | const int FrameIndex = SI->second; |
| 7602 | int64_t Offset; |
| 7603 | if (GetPointerBaseWithConstantOffset( |
| 7604 | Ptr: ObjectPtr, Offset, DL: DAG.getDataLayout()) != LifetimeObject) |
| 7605 | Offset = -1; // Cannot determine offset from alloca to lifetime object. |
| 7606 | Res = DAG.getLifetimeNode(IsStart, dl: sdl, Chain: getRoot(), FrameIndex, Size: ObjectSize, |
| 7607 | Offset); |
| 7608 | DAG.setRoot(Res); |
| 7609 | } |
| 7610 | return; |
| 7611 | } |
| 7612 | case Intrinsic::pseudoprobe: { |
| 7613 | auto Guid = cast<ConstantInt>(Val: I.getArgOperand(i: 0))->getZExtValue(); |
| 7614 | auto Index = cast<ConstantInt>(Val: I.getArgOperand(i: 1))->getZExtValue(); |
| 7615 | auto Attr = cast<ConstantInt>(Val: I.getArgOperand(i: 2))->getZExtValue(); |
| 7616 | Res = DAG.getPseudoProbeNode(Dl: sdl, Chain: getRoot(), Guid, Index, Attr); |
| 7617 | DAG.setRoot(Res); |
| 7618 | return; |
| 7619 | } |
| 7620 | case Intrinsic::invariant_start: |
| 7621 | // Discard region information. |
| 7622 | setValue(V: &I, |
| 7623 | NewN: DAG.getUNDEF(VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()))); |
| 7624 | return; |
| 7625 | case Intrinsic::invariant_end: |
| 7626 | // Discard region information. |
| 7627 | return; |
| 7628 | case Intrinsic::clear_cache: { |
| 7629 | SDValue InputChain = DAG.getRoot(); |
| 7630 | SDValue StartVal = getValue(V: I.getArgOperand(i: 0)); |
| 7631 | SDValue EndVal = getValue(V: I.getArgOperand(i: 1)); |
| 7632 | Res = DAG.getNode(Opcode: ISD::CLEAR_CACHE, DL: sdl, VTList: DAG.getVTList(VT: MVT::Other), |
| 7633 | Ops: {InputChain, StartVal, EndVal}); |
| 7634 | setValue(V: &I, NewN: Res); |
| 7635 | DAG.setRoot(Res); |
| 7636 | return; |
| 7637 | } |
| 7638 | case Intrinsic::donothing: |
| 7639 | case Intrinsic::seh_try_begin: |
| 7640 | case Intrinsic::seh_scope_begin: |
| 7641 | case Intrinsic::seh_try_end: |
| 7642 | case Intrinsic::seh_scope_end: |
| 7643 | // ignore |
| 7644 | return; |
| 7645 | case Intrinsic::experimental_stackmap: |
| 7646 | visitStackmap(I); |
| 7647 | return; |
| 7648 | case Intrinsic::experimental_patchpoint_void: |
| 7649 | case Intrinsic::experimental_patchpoint: |
| 7650 | visitPatchpoint(CB: I); |
| 7651 | return; |
| 7652 | case Intrinsic::experimental_gc_statepoint: |
| 7653 | LowerStatepoint(I: cast<GCStatepointInst>(Val: I)); |
| 7654 | return; |
| 7655 | case Intrinsic::experimental_gc_result: |
| 7656 | visitGCResult(I: cast<GCResultInst>(Val: I)); |
| 7657 | return; |
| 7658 | case Intrinsic::experimental_gc_relocate: |
| 7659 | visitGCRelocate(Relocate: cast<GCRelocateInst>(Val: I)); |
| 7660 | return; |
| 7661 | case Intrinsic::instrprof_cover: |
| 7662 | llvm_unreachable("instrprof failed to lower a cover" ); |
| 7663 | case Intrinsic::instrprof_increment: |
| 7664 | llvm_unreachable("instrprof failed to lower an increment" ); |
| 7665 | case Intrinsic::instrprof_timestamp: |
| 7666 | llvm_unreachable("instrprof failed to lower a timestamp" ); |
| 7667 | case Intrinsic::instrprof_value_profile: |
| 7668 | llvm_unreachable("instrprof failed to lower a value profiling call" ); |
| 7669 | case Intrinsic::instrprof_mcdc_parameters: |
| 7670 | llvm_unreachable("instrprof failed to lower mcdc parameters" ); |
| 7671 | case Intrinsic::instrprof_mcdc_tvbitmap_update: |
| 7672 | llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update" ); |
| 7673 | case Intrinsic::localescape: { |
| 7674 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7675 | const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); |
| 7676 | |
| 7677 | // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission |
| 7678 | // is the same on all targets. |
| 7679 | for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) { |
| 7680 | Value *Arg = I.getArgOperand(i: Idx)->stripPointerCasts(); |
| 7681 | if (isa<ConstantPointerNull>(Val: Arg)) |
| 7682 | continue; // Skip null pointers. They represent a hole in index space. |
| 7683 | AllocaInst *Slot = cast<AllocaInst>(Val: Arg); |
| 7684 | assert(FuncInfo.StaticAllocaMap.count(Slot) && |
| 7685 | "can only escape static allocas" ); |
| 7686 | int FI = FuncInfo.StaticAllocaMap[Slot]; |
| 7687 | MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol( |
| 7688 | FuncName: GlobalValue::dropLLVMManglingEscape(Name: MF.getName()), Idx); |
| 7689 | BuildMI(BB&: *FuncInfo.MBB, I: FuncInfo.InsertPt, MIMD: dl, |
| 7690 | MCID: TII->get(Opcode: TargetOpcode::LOCAL_ESCAPE)) |
| 7691 | .addSym(Sym: FrameAllocSym) |
| 7692 | .addFrameIndex(Idx: FI); |
| 7693 | } |
| 7694 | |
| 7695 | return; |
| 7696 | } |
| 7697 | |
| 7698 | case Intrinsic::localrecover: { |
| 7699 | // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx) |
| 7700 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7701 | |
| 7702 | // Get the symbol that defines the frame offset. |
| 7703 | auto *Fn = cast<Function>(Val: I.getArgOperand(i: 0)->stripPointerCasts()); |
| 7704 | auto *Idx = cast<ConstantInt>(Val: I.getArgOperand(i: 2)); |
| 7705 | unsigned IdxVal = |
| 7706 | unsigned(Idx->getLimitedValue(Limit: std::numeric_limits<int>::max())); |
| 7707 | MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol( |
| 7708 | FuncName: GlobalValue::dropLLVMManglingEscape(Name: Fn->getName()), Idx: IdxVal); |
| 7709 | |
| 7710 | Value *FP = I.getArgOperand(i: 1); |
| 7711 | SDValue FPVal = getValue(V: FP); |
| 7712 | EVT PtrVT = FPVal.getValueType(); |
| 7713 | |
| 7714 | // Create a MCSymbol for the label to avoid any target lowering |
| 7715 | // that would make this PC relative. |
| 7716 | SDValue OffsetSym = DAG.getMCSymbol(Sym: FrameAllocSym, VT: PtrVT); |
| 7717 | SDValue OffsetVal = |
| 7718 | DAG.getNode(Opcode: ISD::LOCAL_RECOVER, DL: sdl, VT: PtrVT, Operand: OffsetSym); |
| 7719 | |
| 7720 | // Add the offset to the FP. |
| 7721 | SDValue Add = DAG.getMemBasePlusOffset(Base: FPVal, Offset: OffsetVal, DL: sdl); |
| 7722 | setValue(V: &I, NewN: Add); |
| 7723 | |
| 7724 | return; |
| 7725 | } |
| 7726 | |
| 7727 | case Intrinsic::fake_use: { |
| 7728 | Value *V = I.getArgOperand(i: 0); |
| 7729 | SDValue Ops[2]; |
| 7730 | // For Values not declared or previously used in this basic block, the |
| 7731 | // NodeMap will not have an entry, and `getValue` will assert if V has no |
| 7732 | // valid register value. |
| 7733 | auto FakeUseValue = [&]() -> SDValue { |
| 7734 | SDValue &N = NodeMap[V]; |
| 7735 | if (N.getNode()) |
| 7736 | return N; |
| 7737 | |
| 7738 | // If there's a virtual register allocated and initialized for this |
| 7739 | // value, use it. |
| 7740 | if (SDValue copyFromReg = getCopyFromRegs(V, Ty: V->getType())) |
| 7741 | return copyFromReg; |
| 7742 | // FIXME: Do we want to preserve constants? It seems pointless. |
| 7743 | if (isa<Constant>(Val: V)) |
| 7744 | return getValue(V); |
| 7745 | return SDValue(); |
| 7746 | }(); |
| 7747 | if (!FakeUseValue || FakeUseValue.isUndef()) |
| 7748 | return; |
| 7749 | Ops[0] = getRoot(); |
| 7750 | Ops[1] = FakeUseValue; |
| 7751 | // Also, do not translate a fake use with an undef operand, or any other |
| 7752 | // empty SDValues. |
| 7753 | if (!Ops[1] || Ops[1].isUndef()) |
| 7754 | return; |
| 7755 | DAG.setRoot(DAG.getNode(Opcode: ISD::FAKE_USE, DL: sdl, VT: MVT::Other, Ops)); |
| 7756 | return; |
| 7757 | } |
| 7758 | |
| 7759 | case Intrinsic::eh_exceptionpointer: |
| 7760 | case Intrinsic::eh_exceptioncode: { |
| 7761 | // Get the exception pointer vreg, copy from it, and resize it to fit. |
| 7762 | const auto *CPI = cast<CatchPadInst>(Val: I.getArgOperand(i: 0)); |
| 7763 | MVT PtrVT = TLI.getPointerTy(DL: DAG.getDataLayout()); |
| 7764 | const TargetRegisterClass *PtrRC = TLI.getRegClassFor(VT: PtrVT); |
| 7765 | Register VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, RC: PtrRC); |
| 7766 | SDValue N = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl: sdl, Reg: VReg, VT: PtrVT); |
| 7767 | if (Intrinsic == Intrinsic::eh_exceptioncode) |
| 7768 | N = DAG.getZExtOrTrunc(Op: N, DL: sdl, VT: MVT::i32); |
| 7769 | setValue(V: &I, NewN: N); |
| 7770 | return; |
| 7771 | } |
| 7772 | case Intrinsic::xray_customevent: { |
| 7773 | // Here we want to make sure that the intrinsic behaves as if it has a |
| 7774 | // specific calling convention. |
| 7775 | const auto &Triple = DAG.getTarget().getTargetTriple(); |
| 7776 | if (!Triple.isAArch64(PointerWidth: 64) && Triple.getArch() != Triple::x86_64) |
| 7777 | return; |
| 7778 | |
| 7779 | SmallVector<SDValue, 8> Ops; |
| 7780 | |
| 7781 | // We want to say that we always want the arguments in registers. |
| 7782 | SDValue LogEntryVal = getValue(V: I.getArgOperand(i: 0)); |
| 7783 | SDValue StrSizeVal = getValue(V: I.getArgOperand(i: 1)); |
| 7784 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 7785 | SDValue Chain = getRoot(); |
| 7786 | Ops.push_back(Elt: LogEntryVal); |
| 7787 | Ops.push_back(Elt: StrSizeVal); |
| 7788 | Ops.push_back(Elt: Chain); |
| 7789 | |
| 7790 | // We need to enforce the calling convention for the callsite, so that |
| 7791 | // argument ordering is enforced correctly, and that register allocation can |
| 7792 | // see that some registers may be assumed clobbered and have to preserve |
| 7793 | // them across calls to the intrinsic. |
| 7794 | MachineSDNode *MN = DAG.getMachineNode(Opcode: TargetOpcode::PATCHABLE_EVENT_CALL, |
| 7795 | dl: sdl, VTs: NodeTys, Ops); |
| 7796 | SDValue patchableNode = SDValue(MN, 0); |
| 7797 | DAG.setRoot(patchableNode); |
| 7798 | setValue(V: &I, NewN: patchableNode); |
| 7799 | return; |
| 7800 | } |
| 7801 | case Intrinsic::xray_typedevent: { |
| 7802 | // Here we want to make sure that the intrinsic behaves as if it has a |
| 7803 | // specific calling convention. |
| 7804 | const auto &Triple = DAG.getTarget().getTargetTriple(); |
| 7805 | if (!Triple.isAArch64(PointerWidth: 64) && Triple.getArch() != Triple::x86_64) |
| 7806 | return; |
| 7807 | |
| 7808 | SmallVector<SDValue, 8> Ops; |
| 7809 | |
| 7810 | // We want to say that we always want the arguments in registers. |
| 7811 | // It's unclear to me how manipulating the selection DAG here forces callers |
| 7812 | // to provide arguments in registers instead of on the stack. |
| 7813 | SDValue LogTypeId = getValue(V: I.getArgOperand(i: 0)); |
| 7814 | SDValue LogEntryVal = getValue(V: I.getArgOperand(i: 1)); |
| 7815 | SDValue StrSizeVal = getValue(V: I.getArgOperand(i: 2)); |
| 7816 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 7817 | SDValue Chain = getRoot(); |
| 7818 | Ops.push_back(Elt: LogTypeId); |
| 7819 | Ops.push_back(Elt: LogEntryVal); |
| 7820 | Ops.push_back(Elt: StrSizeVal); |
| 7821 | Ops.push_back(Elt: Chain); |
| 7822 | |
| 7823 | // We need to enforce the calling convention for the callsite, so that |
| 7824 | // argument ordering is enforced correctly, and that register allocation can |
| 7825 | // see that some registers may be assumed clobbered and have to preserve |
| 7826 | // them across calls to the intrinsic. |
| 7827 | MachineSDNode *MN = DAG.getMachineNode( |
| 7828 | Opcode: TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, dl: sdl, VTs: NodeTys, Ops); |
| 7829 | SDValue patchableNode = SDValue(MN, 0); |
| 7830 | DAG.setRoot(patchableNode); |
| 7831 | setValue(V: &I, NewN: patchableNode); |
| 7832 | return; |
| 7833 | } |
| 7834 | case Intrinsic::experimental_deoptimize: |
| 7835 | LowerDeoptimizeCall(CI: &I); |
| 7836 | return; |
| 7837 | case Intrinsic::stepvector: |
| 7838 | visitStepVector(I); |
| 7839 | return; |
| 7840 | case Intrinsic::vector_reduce_fadd: |
| 7841 | case Intrinsic::vector_reduce_fmul: |
| 7842 | case Intrinsic::vector_reduce_add: |
| 7843 | case Intrinsic::vector_reduce_mul: |
| 7844 | case Intrinsic::vector_reduce_and: |
| 7845 | case Intrinsic::vector_reduce_or: |
| 7846 | case Intrinsic::vector_reduce_xor: |
| 7847 | case Intrinsic::vector_reduce_smax: |
| 7848 | case Intrinsic::vector_reduce_smin: |
| 7849 | case Intrinsic::vector_reduce_umax: |
| 7850 | case Intrinsic::vector_reduce_umin: |
| 7851 | case Intrinsic::vector_reduce_fmax: |
| 7852 | case Intrinsic::vector_reduce_fmin: |
| 7853 | case Intrinsic::vector_reduce_fmaximum: |
| 7854 | case Intrinsic::vector_reduce_fminimum: |
| 7855 | visitVectorReduce(I, Intrinsic); |
| 7856 | return; |
| 7857 | |
| 7858 | case Intrinsic::icall_branch_funnel: { |
| 7859 | SmallVector<SDValue, 16> Ops; |
| 7860 | Ops.push_back(Elt: getValue(V: I.getArgOperand(i: 0))); |
| 7861 | |
| 7862 | int64_t Offset; |
| 7863 | auto *Base = dyn_cast<GlobalObject>(Val: GetPointerBaseWithConstantOffset( |
| 7864 | Ptr: I.getArgOperand(i: 1), Offset, DL: DAG.getDataLayout())); |
| 7865 | if (!Base) |
| 7866 | report_fatal_error( |
| 7867 | reason: "llvm.icall.branch.funnel operand must be a GlobalValue" ); |
| 7868 | Ops.push_back(Elt: DAG.getTargetGlobalAddress(GV: Base, DL: sdl, VT: MVT::i64, offset: 0)); |
| 7869 | |
| 7870 | struct BranchFunnelTarget { |
| 7871 | int64_t Offset; |
| 7872 | SDValue Target; |
| 7873 | }; |
| 7874 | SmallVector<BranchFunnelTarget, 8> Targets; |
| 7875 | |
| 7876 | for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) { |
| 7877 | auto *ElemBase = dyn_cast<GlobalObject>(Val: GetPointerBaseWithConstantOffset( |
| 7878 | Ptr: I.getArgOperand(i: Op), Offset, DL: DAG.getDataLayout())); |
| 7879 | if (ElemBase != Base) |
| 7880 | report_fatal_error(reason: "all llvm.icall.branch.funnel operands must refer " |
| 7881 | "to the same GlobalValue" ); |
| 7882 | |
| 7883 | SDValue Val = getValue(V: I.getArgOperand(i: Op + 1)); |
| 7884 | auto *GA = dyn_cast<GlobalAddressSDNode>(Val); |
| 7885 | if (!GA) |
| 7886 | report_fatal_error( |
| 7887 | reason: "llvm.icall.branch.funnel operand must be a GlobalValue" ); |
| 7888 | Targets.push_back(Elt: {.Offset: Offset, .Target: DAG.getTargetGlobalAddress( |
| 7889 | GV: GA->getGlobal(), DL: sdl, VT: Val.getValueType(), |
| 7890 | offset: GA->getOffset())}); |
| 7891 | } |
| 7892 | llvm::sort(C&: Targets, |
| 7893 | Comp: [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) { |
| 7894 | return T1.Offset < T2.Offset; |
| 7895 | }); |
| 7896 | |
| 7897 | for (auto &T : Targets) { |
| 7898 | Ops.push_back(Elt: DAG.getTargetConstant(Val: T.Offset, DL: sdl, VT: MVT::i32)); |
| 7899 | Ops.push_back(Elt: T.Target); |
| 7900 | } |
| 7901 | |
| 7902 | Ops.push_back(Elt: DAG.getRoot()); // Chain |
| 7903 | SDValue N(DAG.getMachineNode(Opcode: TargetOpcode::ICALL_BRANCH_FUNNEL, dl: sdl, |
| 7904 | VT: MVT::Other, Ops), |
| 7905 | 0); |
| 7906 | DAG.setRoot(N); |
| 7907 | setValue(V: &I, NewN: N); |
| 7908 | HasTailCall = true; |
| 7909 | return; |
| 7910 | } |
| 7911 | |
| 7912 | case Intrinsic::wasm_landingpad_index: |
| 7913 | // Information this intrinsic contained has been transferred to |
| 7914 | // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely |
| 7915 | // delete it now. |
| 7916 | return; |
| 7917 | |
| 7918 | case Intrinsic::aarch64_settag: |
| 7919 | case Intrinsic::aarch64_settag_zero: { |
| 7920 | const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); |
| 7921 | bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero; |
| 7922 | SDValue Val = TSI.EmitTargetCodeForSetTag( |
| 7923 | DAG, dl: sdl, Chain: getRoot(), Addr: getValue(V: I.getArgOperand(i: 0)), |
| 7924 | Size: getValue(V: I.getArgOperand(i: 1)), DstPtrInfo: MachinePointerInfo(I.getArgOperand(i: 0)), |
| 7925 | ZeroData: ZeroMemory); |
| 7926 | DAG.setRoot(Val); |
| 7927 | setValue(V: &I, NewN: Val); |
| 7928 | return; |
| 7929 | } |
| 7930 | case Intrinsic::amdgcn_cs_chain: { |
| 7931 | // At this point we don't care if it's amdgpu_cs_chain or |
| 7932 | // amdgpu_cs_chain_preserve. |
| 7933 | CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain; |
| 7934 | |
| 7935 | Type *RetTy = I.getType(); |
| 7936 | assert(RetTy->isVoidTy() && "Should not return" ); |
| 7937 | |
| 7938 | SDValue Callee = getValue(V: I.getOperand(i_nocapture: 0)); |
| 7939 | |
| 7940 | // We only have 2 actual args: one for the SGPRs and one for the VGPRs. |
| 7941 | // We'll also tack the value of the EXEC mask at the end. |
| 7942 | TargetLowering::ArgListTy Args; |
| 7943 | Args.reserve(n: 3); |
| 7944 | |
| 7945 | for (unsigned Idx : {2, 3, 1}) { |
| 7946 | TargetLowering::ArgListEntry Arg; |
| 7947 | Arg.Node = getValue(V: I.getOperand(i_nocapture: Idx)); |
| 7948 | Arg.Ty = I.getOperand(i_nocapture: Idx)->getType(); |
| 7949 | Arg.setAttributes(Call: &I, ArgIdx: Idx); |
| 7950 | Args.push_back(x: Arg); |
| 7951 | } |
| 7952 | |
| 7953 | assert(Args[0].IsInReg && "SGPR args should be marked inreg" ); |
| 7954 | assert(!Args[1].IsInReg && "VGPR args should not be marked inreg" ); |
| 7955 | Args[2].IsInReg = true; // EXEC should be inreg |
| 7956 | |
| 7957 | // Forward the flags and any additional arguments. |
| 7958 | for (unsigned Idx = 4; Idx < I.arg_size(); ++Idx) { |
| 7959 | TargetLowering::ArgListEntry Arg; |
| 7960 | Arg.Node = getValue(V: I.getOperand(i_nocapture: Idx)); |
| 7961 | Arg.Ty = I.getOperand(i_nocapture: Idx)->getType(); |
| 7962 | Arg.setAttributes(Call: &I, ArgIdx: Idx); |
| 7963 | Args.push_back(x: Arg); |
| 7964 | } |
| 7965 | |
| 7966 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 7967 | CLI.setDebugLoc(getCurSDLoc()) |
| 7968 | .setChain(getRoot()) |
| 7969 | .setCallee(CC, ResultType: RetTy, Target: Callee, ArgsList: std::move(Args)) |
| 7970 | .setNoReturn(true) |
| 7971 | .setTailCall(true) |
| 7972 | .setConvergent(I.isConvergent()); |
| 7973 | CLI.CB = &I; |
| 7974 | std::pair<SDValue, SDValue> Result = |
| 7975 | lowerInvokable(CLI, /*EHPadBB*/ nullptr); |
| 7976 | (void)Result; |
| 7977 | assert(!Result.first.getNode() && !Result.second.getNode() && |
| 7978 | "Should've lowered as tail call" ); |
| 7979 | |
| 7980 | HasTailCall = true; |
| 7981 | return; |
| 7982 | } |
| 7983 | case Intrinsic::ptrmask: { |
| 7984 | SDValue Ptr = getValue(V: I.getOperand(i_nocapture: 0)); |
| 7985 | SDValue Mask = getValue(V: I.getOperand(i_nocapture: 1)); |
| 7986 | |
| 7987 | // On arm64_32, pointers are 32 bits when stored in memory, but |
| 7988 | // zero-extended to 64 bits when in registers. Thus the mask is 32 bits to |
| 7989 | // match the index type, but the pointer is 64 bits, so the mask must be |
| 7990 | // zero-extended up to 64 bits to match the pointer. |
| 7991 | EVT PtrVT = |
| 7992 | TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getOperand(i_nocapture: 0)->getType()); |
| 7993 | EVT MemVT = |
| 7994 | TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: I.getOperand(i_nocapture: 0)->getType()); |
| 7995 | assert(PtrVT == Ptr.getValueType()); |
| 7996 | if (Mask.getValueType().getFixedSizeInBits() < MemVT.getFixedSizeInBits()) { |
| 7997 | // For AMDGPU buffer descriptors the mask is 48 bits, but the pointer is |
| 7998 | // 128-bit, so we have to pad the mask with ones for unused bits. |
| 7999 | auto HighOnes = DAG.getNode( |
| 8000 | Opcode: ISD::SHL, DL: sdl, VT: PtrVT, N1: DAG.getAllOnesConstant(DL: sdl, VT: PtrVT), |
| 8001 | N2: DAG.getShiftAmountConstant(Val: Mask.getValueType().getFixedSizeInBits(), |
| 8002 | VT: PtrVT, DL: sdl)); |
| 8003 | Mask = DAG.getNode(Opcode: ISD::OR, DL: sdl, VT: PtrVT, |
| 8004 | N1: DAG.getZExtOrTrunc(Op: Mask, DL: sdl, VT: PtrVT), N2: HighOnes); |
| 8005 | } else if (Mask.getValueType() != PtrVT) |
| 8006 | Mask = DAG.getPtrExtOrTrunc(Op: Mask, DL: sdl, VT: PtrVT); |
| 8007 | |
| 8008 | assert(Mask.getValueType() == PtrVT); |
| 8009 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::AND, DL: sdl, VT: PtrVT, N1: Ptr, N2: Mask)); |
| 8010 | return; |
| 8011 | } |
| 8012 | case Intrinsic::threadlocal_address: { |
| 8013 | setValue(V: &I, NewN: getValue(V: I.getOperand(i_nocapture: 0))); |
| 8014 | return; |
| 8015 | } |
| 8016 | case Intrinsic::get_active_lane_mask: { |
| 8017 | EVT CCVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 8018 | SDValue Index = getValue(V: I.getOperand(i_nocapture: 0)); |
| 8019 | SDValue TripCount = getValue(V: I.getOperand(i_nocapture: 1)); |
| 8020 | EVT ElementVT = Index.getValueType(); |
| 8021 | |
| 8022 | if (!TLI.shouldExpandGetActiveLaneMask(VT: CCVT, OpVT: ElementVT)) { |
| 8023 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::GET_ACTIVE_LANE_MASK, DL: sdl, VT: CCVT, N1: Index, |
| 8024 | N2: TripCount)); |
| 8025 | return; |
| 8026 | } |
| 8027 | |
| 8028 | EVT VecTy = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ElementVT, |
| 8029 | EC: CCVT.getVectorElementCount()); |
| 8030 | |
| 8031 | SDValue VectorIndex = DAG.getSplat(VT: VecTy, DL: sdl, Op: Index); |
| 8032 | SDValue VectorTripCount = DAG.getSplat(VT: VecTy, DL: sdl, Op: TripCount); |
| 8033 | SDValue VectorStep = DAG.getStepVector(DL: sdl, ResVT: VecTy); |
| 8034 | SDValue VectorInduction = DAG.getNode( |
| 8035 | Opcode: ISD::UADDSAT, DL: sdl, VT: VecTy, N1: VectorIndex, N2: VectorStep); |
| 8036 | SDValue SetCC = DAG.getSetCC(DL: sdl, VT: CCVT, LHS: VectorInduction, |
| 8037 | RHS: VectorTripCount, Cond: ISD::CondCode::SETULT); |
| 8038 | setValue(V: &I, NewN: SetCC); |
| 8039 | return; |
| 8040 | } |
| 8041 | case Intrinsic::experimental_get_vector_length: { |
| 8042 | assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 && |
| 8043 | "Expected positive VF" ); |
| 8044 | unsigned VF = cast<ConstantInt>(Val: I.getOperand(i_nocapture: 1))->getZExtValue(); |
| 8045 | bool IsScalable = cast<ConstantInt>(Val: I.getOperand(i_nocapture: 2))->isOne(); |
| 8046 | |
| 8047 | SDValue Count = getValue(V: I.getOperand(i_nocapture: 0)); |
| 8048 | EVT CountVT = Count.getValueType(); |
| 8049 | |
| 8050 | if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) { |
| 8051 | visitTargetIntrinsic(I, Intrinsic); |
| 8052 | return; |
| 8053 | } |
| 8054 | |
| 8055 | // Expand to a umin between the trip count and the maximum elements the type |
| 8056 | // can hold. |
| 8057 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 8058 | |
| 8059 | // Extend the trip count to at least the result VT. |
| 8060 | if (CountVT.bitsLT(VT)) { |
| 8061 | Count = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: sdl, VT, Operand: Count); |
| 8062 | CountVT = VT; |
| 8063 | } |
| 8064 | |
| 8065 | SDValue MaxEVL = DAG.getElementCount(DL: sdl, VT: CountVT, |
| 8066 | EC: ElementCount::get(MinVal: VF, Scalable: IsScalable)); |
| 8067 | |
| 8068 | SDValue UMin = DAG.getNode(Opcode: ISD::UMIN, DL: sdl, VT: CountVT, N1: Count, N2: MaxEVL); |
| 8069 | // Clip to the result type if needed. |
| 8070 | SDValue Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL: sdl, VT, Operand: UMin); |
| 8071 | |
| 8072 | setValue(V: &I, NewN: Trunc); |
| 8073 | return; |
| 8074 | } |
| 8075 | case Intrinsic::experimental_vector_partial_reduce_add: { |
| 8076 | if (!TLI.shouldExpandPartialReductionIntrinsic(I: cast<IntrinsicInst>(Val: &I))) { |
| 8077 | visitTargetIntrinsic(I, Intrinsic); |
| 8078 | return; |
| 8079 | } |
| 8080 | SDValue Acc = getValue(V: I.getOperand(i_nocapture: 0)); |
| 8081 | SDValue Input = getValue(V: I.getOperand(i_nocapture: 1)); |
| 8082 | setValue(V: &I, |
| 8083 | NewN: DAG.getNode(Opcode: ISD::PARTIAL_REDUCE_UMLA, DL: sdl, VT: Acc.getValueType(), N1: Acc, |
| 8084 | N2: Input, N3: DAG.getConstant(Val: 1, DL: sdl, VT: Input.getValueType()))); |
| 8085 | return; |
| 8086 | } |
| 8087 | case Intrinsic::experimental_cttz_elts: { |
| 8088 | auto DL = getCurSDLoc(); |
| 8089 | SDValue Op = getValue(V: I.getOperand(i_nocapture: 0)); |
| 8090 | EVT OpVT = Op.getValueType(); |
| 8091 | |
| 8092 | if (!TLI.shouldExpandCttzElements(VT: OpVT)) { |
| 8093 | visitTargetIntrinsic(I, Intrinsic); |
| 8094 | return; |
| 8095 | } |
| 8096 | |
| 8097 | if (OpVT.getScalarType() != MVT::i1) { |
| 8098 | // Compare the input vector elements to zero & use to count trailing zeros |
| 8099 | SDValue AllZero = DAG.getConstant(Val: 0, DL, VT: OpVT); |
| 8100 | OpVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i1, |
| 8101 | EC: OpVT.getVectorElementCount()); |
| 8102 | Op = DAG.getSetCC(DL, VT: OpVT, LHS: Op, RHS: AllZero, Cond: ISD::SETNE); |
| 8103 | } |
| 8104 | |
| 8105 | // If the zero-is-poison flag is set, we can assume the upper limit |
| 8106 | // of the result is VF-1. |
| 8107 | bool ZeroIsPoison = |
| 8108 | !cast<ConstantSDNode>(Val: getValue(V: I.getOperand(i_nocapture: 1)))->isZero(); |
| 8109 | ConstantRange VScaleRange(1, true); // Dummy value. |
| 8110 | if (isa<ScalableVectorType>(Val: I.getOperand(i_nocapture: 0)->getType())) |
| 8111 | VScaleRange = getVScaleRange(F: I.getCaller(), BitWidth: 64); |
| 8112 | unsigned EltWidth = TLI.getBitWidthForCttzElements( |
| 8113 | RetTy: I.getType(), EC: OpVT.getVectorElementCount(), ZeroIsPoison, VScaleRange: &VScaleRange); |
| 8114 | |
| 8115 | MVT NewEltTy = MVT::getIntegerVT(BitWidth: EltWidth); |
| 8116 | |
| 8117 | // Create the new vector type & get the vector length |
| 8118 | EVT NewVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: NewEltTy, |
| 8119 | EC: OpVT.getVectorElementCount()); |
| 8120 | |
| 8121 | SDValue VL = |
| 8122 | DAG.getElementCount(DL, VT: NewEltTy, EC: OpVT.getVectorElementCount()); |
| 8123 | |
| 8124 | SDValue StepVec = DAG.getStepVector(DL, ResVT: NewVT); |
| 8125 | SDValue SplatVL = DAG.getSplat(VT: NewVT, DL, Op: VL); |
| 8126 | SDValue StepVL = DAG.getNode(Opcode: ISD::SUB, DL, VT: NewVT, N1: SplatVL, N2: StepVec); |
| 8127 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: NewVT, Operand: Op); |
| 8128 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT: NewVT, N1: StepVL, N2: Ext); |
| 8129 | SDValue Max = DAG.getNode(Opcode: ISD::VECREDUCE_UMAX, DL, VT: NewEltTy, Operand: And); |
| 8130 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL, VT: NewEltTy, N1: VL, N2: Max); |
| 8131 | |
| 8132 | EVT RetTy = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 8133 | SDValue Ret = DAG.getZExtOrTrunc(Op: Sub, DL, VT: RetTy); |
| 8134 | |
| 8135 | setValue(V: &I, NewN: Ret); |
| 8136 | return; |
| 8137 | } |
| 8138 | case Intrinsic::vector_insert: { |
| 8139 | SDValue Vec = getValue(V: I.getOperand(i_nocapture: 0)); |
| 8140 | SDValue SubVec = getValue(V: I.getOperand(i_nocapture: 1)); |
| 8141 | SDValue Index = getValue(V: I.getOperand(i_nocapture: 2)); |
| 8142 | |
| 8143 | // The intrinsic's index type is i64, but the SDNode requires an index type |
| 8144 | // suitable for the target. Convert the index as required. |
| 8145 | MVT VectorIdxTy = TLI.getVectorIdxTy(DL: DAG.getDataLayout()); |
| 8146 | if (Index.getValueType() != VectorIdxTy) |
| 8147 | Index = DAG.getVectorIdxConstant(Val: Index->getAsZExtVal(), DL: sdl); |
| 8148 | |
| 8149 | EVT ResultVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 8150 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::INSERT_SUBVECTOR, DL: sdl, VT: ResultVT, N1: Vec, N2: SubVec, |
| 8151 | N3: Index)); |
| 8152 | return; |
| 8153 | } |
| 8154 | case Intrinsic::vector_extract: { |
| 8155 | SDValue Vec = getValue(V: I.getOperand(i_nocapture: 0)); |
| 8156 | SDValue Index = getValue(V: I.getOperand(i_nocapture: 1)); |
| 8157 | EVT ResultVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 8158 | |
| 8159 | // The intrinsic's index type is i64, but the SDNode requires an index type |
| 8160 | // suitable for the target. Convert the index as required. |
| 8161 | MVT VectorIdxTy = TLI.getVectorIdxTy(DL: DAG.getDataLayout()); |
| 8162 | if (Index.getValueType() != VectorIdxTy) |
| 8163 | Index = DAG.getVectorIdxConstant(Val: Index->getAsZExtVal(), DL: sdl); |
| 8164 | |
| 8165 | setValue(V: &I, |
| 8166 | NewN: DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: sdl, VT: ResultVT, N1: Vec, N2: Index)); |
| 8167 | return; |
| 8168 | } |
| 8169 | case Intrinsic::experimental_vector_match: { |
| 8170 | SDValue Op1 = getValue(V: I.getOperand(i_nocapture: 0)); |
| 8171 | SDValue Op2 = getValue(V: I.getOperand(i_nocapture: 1)); |
| 8172 | SDValue Mask = getValue(V: I.getOperand(i_nocapture: 2)); |
| 8173 | EVT Op1VT = Op1.getValueType(); |
| 8174 | EVT Op2VT = Op2.getValueType(); |
| 8175 | EVT ResVT = Mask.getValueType(); |
| 8176 | unsigned SearchSize = Op2VT.getVectorNumElements(); |
| 8177 | |
| 8178 | // If the target has native support for this vector match operation, lower |
| 8179 | // the intrinsic untouched; otherwise, expand it below. |
| 8180 | if (!TLI.shouldExpandVectorMatch(VT: Op1VT, SearchSize)) { |
| 8181 | visitTargetIntrinsic(I, Intrinsic); |
| 8182 | return; |
| 8183 | } |
| 8184 | |
| 8185 | SDValue Ret = DAG.getConstant(Val: 0, DL: sdl, VT: ResVT); |
| 8186 | |
| 8187 | for (unsigned i = 0; i < SearchSize; ++i) { |
| 8188 | SDValue Op2Elem = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: sdl, |
| 8189 | VT: Op2VT.getVectorElementType(), N1: Op2, |
| 8190 | N2: DAG.getVectorIdxConstant(Val: i, DL: sdl)); |
| 8191 | SDValue Splat = DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL: sdl, VT: Op1VT, Operand: Op2Elem); |
| 8192 | SDValue Cmp = DAG.getSetCC(DL: sdl, VT: ResVT, LHS: Op1, RHS: Splat, Cond: ISD::SETEQ); |
| 8193 | Ret = DAG.getNode(Opcode: ISD::OR, DL: sdl, VT: ResVT, N1: Ret, N2: Cmp); |
| 8194 | } |
| 8195 | |
| 8196 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::AND, DL: sdl, VT: ResVT, N1: Ret, N2: Mask)); |
| 8197 | return; |
| 8198 | } |
| 8199 | case Intrinsic::vector_reverse: |
| 8200 | visitVectorReverse(I); |
| 8201 | return; |
| 8202 | case Intrinsic::vector_splice: |
| 8203 | visitVectorSplice(I); |
| 8204 | return; |
| 8205 | case Intrinsic::callbr_landingpad: |
| 8206 | visitCallBrLandingPad(I); |
| 8207 | return; |
| 8208 | case Intrinsic::vector_interleave2: |
| 8209 | visitVectorInterleave(I, Factor: 2); |
| 8210 | return; |
| 8211 | case Intrinsic::vector_interleave3: |
| 8212 | visitVectorInterleave(I, Factor: 3); |
| 8213 | return; |
| 8214 | case Intrinsic::vector_interleave4: |
| 8215 | visitVectorInterleave(I, Factor: 4); |
| 8216 | return; |
| 8217 | case Intrinsic::vector_interleave5: |
| 8218 | visitVectorInterleave(I, Factor: 5); |
| 8219 | return; |
| 8220 | case Intrinsic::vector_interleave6: |
| 8221 | visitVectorInterleave(I, Factor: 6); |
| 8222 | return; |
| 8223 | case Intrinsic::vector_interleave7: |
| 8224 | visitVectorInterleave(I, Factor: 7); |
| 8225 | return; |
| 8226 | case Intrinsic::vector_interleave8: |
| 8227 | visitVectorInterleave(I, Factor: 8); |
| 8228 | return; |
| 8229 | case Intrinsic::vector_deinterleave2: |
| 8230 | visitVectorDeinterleave(I, Factor: 2); |
| 8231 | return; |
| 8232 | case Intrinsic::vector_deinterleave3: |
| 8233 | visitVectorDeinterleave(I, Factor: 3); |
| 8234 | return; |
| 8235 | case Intrinsic::vector_deinterleave4: |
| 8236 | visitVectorDeinterleave(I, Factor: 4); |
| 8237 | return; |
| 8238 | case Intrinsic::vector_deinterleave5: |
| 8239 | visitVectorDeinterleave(I, Factor: 5); |
| 8240 | return; |
| 8241 | case Intrinsic::vector_deinterleave6: |
| 8242 | visitVectorDeinterleave(I, Factor: 6); |
| 8243 | return; |
| 8244 | case Intrinsic::vector_deinterleave7: |
| 8245 | visitVectorDeinterleave(I, Factor: 7); |
| 8246 | return; |
| 8247 | case Intrinsic::vector_deinterleave8: |
| 8248 | visitVectorDeinterleave(I, Factor: 8); |
| 8249 | return; |
| 8250 | case Intrinsic::experimental_vector_compress: |
| 8251 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::VECTOR_COMPRESS, DL: sdl, |
| 8252 | VT: getValue(V: I.getArgOperand(i: 0)).getValueType(), |
| 8253 | N1: getValue(V: I.getArgOperand(i: 0)), |
| 8254 | N2: getValue(V: I.getArgOperand(i: 1)), |
| 8255 | N3: getValue(V: I.getArgOperand(i: 2)), Flags)); |
| 8256 | return; |
| 8257 | case Intrinsic::experimental_convergence_anchor: |
| 8258 | case Intrinsic::experimental_convergence_entry: |
| 8259 | case Intrinsic::experimental_convergence_loop: |
| 8260 | visitConvergenceControl(I, Intrinsic); |
| 8261 | return; |
| 8262 | case Intrinsic::experimental_vector_histogram_add: { |
| 8263 | visitVectorHistogram(I, IntrinsicID: Intrinsic); |
| 8264 | return; |
| 8265 | } |
| 8266 | case Intrinsic::experimental_vector_extract_last_active: { |
| 8267 | visitVectorExtractLastActive(I, Intrinsic); |
| 8268 | return; |
| 8269 | } |
| 8270 | } |
| 8271 | } |
| 8272 | |
| 8273 | void SelectionDAGBuilder::visitConstrainedFPIntrinsic( |
| 8274 | const ConstrainedFPIntrinsic &FPI) { |
| 8275 | SDLoc sdl = getCurSDLoc(); |
| 8276 | |
| 8277 | // We do not need to serialize constrained FP intrinsics against |
| 8278 | // each other or against (nonvolatile) loads, so they can be |
| 8279 | // chained like loads. |
| 8280 | SDValue Chain = DAG.getRoot(); |
| 8281 | SmallVector<SDValue, 4> Opers; |
| 8282 | Opers.push_back(Elt: Chain); |
| 8283 | for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I) |
| 8284 | Opers.push_back(Elt: getValue(V: FPI.getArgOperand(i: I))); |
| 8285 | |
| 8286 | auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) { |
| 8287 | assert(Result.getNode()->getNumValues() == 2); |
| 8288 | |
| 8289 | // Push node to the appropriate list so that future instructions can be |
| 8290 | // chained up correctly. |
| 8291 | SDValue OutChain = Result.getValue(R: 1); |
| 8292 | switch (EB) { |
| 8293 | case fp::ExceptionBehavior::ebIgnore: |
| 8294 | // The only reason why ebIgnore nodes still need to be chained is that |
| 8295 | // they might depend on the current rounding mode, and therefore must |
| 8296 | // not be moved across instruction that may change that mode. |
| 8297 | [[fallthrough]]; |
| 8298 | case fp::ExceptionBehavior::ebMayTrap: |
| 8299 | // These must not be moved across calls or instructions that may change |
| 8300 | // floating-point exception masks. |
| 8301 | PendingConstrainedFP.push_back(Elt: OutChain); |
| 8302 | break; |
| 8303 | case fp::ExceptionBehavior::ebStrict: |
| 8304 | // These must not be moved across calls or instructions that may change |
| 8305 | // floating-point exception masks or read floating-point exception flags. |
| 8306 | // In addition, they cannot be optimized out even if unused. |
| 8307 | PendingConstrainedFPStrict.push_back(Elt: OutChain); |
| 8308 | break; |
| 8309 | } |
| 8310 | }; |
| 8311 | |
| 8312 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 8313 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: FPI.getType()); |
| 8314 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::Other); |
| 8315 | fp::ExceptionBehavior EB = *FPI.getExceptionBehavior(); |
| 8316 | |
| 8317 | SDNodeFlags Flags; |
| 8318 | if (EB == fp::ExceptionBehavior::ebIgnore) |
| 8319 | Flags.setNoFPExcept(true); |
| 8320 | |
| 8321 | if (auto *FPOp = dyn_cast<FPMathOperator>(Val: &FPI)) |
| 8322 | Flags.copyFMF(FPMO: *FPOp); |
| 8323 | |
| 8324 | unsigned Opcode; |
| 8325 | switch (FPI.getIntrinsicID()) { |
| 8326 | default: llvm_unreachable("Impossible intrinsic" ); // Can't reach here. |
| 8327 | #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ |
| 8328 | case Intrinsic::INTRINSIC: \ |
| 8329 | Opcode = ISD::STRICT_##DAGN; \ |
| 8330 | break; |
| 8331 | #include "llvm/IR/ConstrainedOps.def" |
| 8332 | case Intrinsic::experimental_constrained_fmuladd: { |
| 8333 | Opcode = ISD::STRICT_FMA; |
| 8334 | // Break fmuladd into fmul and fadd. |
| 8335 | if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict || |
| 8336 | !TLI.isFMAFasterThanFMulAndFAdd(MF: DAG.getMachineFunction(), VT)) { |
| 8337 | Opers.pop_back(); |
| 8338 | SDValue Mul = DAG.getNode(Opcode: ISD::STRICT_FMUL, DL: sdl, VTList: VTs, Ops: Opers, Flags); |
| 8339 | pushOutChain(Mul, EB); |
| 8340 | Opcode = ISD::STRICT_FADD; |
| 8341 | Opers.clear(); |
| 8342 | Opers.push_back(Elt: Mul.getValue(R: 1)); |
| 8343 | Opers.push_back(Elt: Mul.getValue(R: 0)); |
| 8344 | Opers.push_back(Elt: getValue(V: FPI.getArgOperand(i: 2))); |
| 8345 | } |
| 8346 | break; |
| 8347 | } |
| 8348 | } |
| 8349 | |
| 8350 | // A few strict DAG nodes carry additional operands that are not |
| 8351 | // set up by the default code above. |
| 8352 | switch (Opcode) { |
| 8353 | default: break; |
| 8354 | case ISD::STRICT_FP_ROUND: |
| 8355 | Opers.push_back( |
| 8356 | Elt: DAG.getTargetConstant(Val: 0, DL: sdl, VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 8357 | break; |
| 8358 | case ISD::STRICT_FSETCC: |
| 8359 | case ISD::STRICT_FSETCCS: { |
| 8360 | auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(Val: &FPI); |
| 8361 | ISD::CondCode Condition = getFCmpCondCode(Pred: FPCmp->getPredicate()); |
| 8362 | if (TM.Options.NoNaNsFPMath) |
| 8363 | Condition = getFCmpCodeWithoutNaN(CC: Condition); |
| 8364 | Opers.push_back(Elt: DAG.getCondCode(Cond: Condition)); |
| 8365 | break; |
| 8366 | } |
| 8367 | } |
| 8368 | |
| 8369 | SDValue Result = DAG.getNode(Opcode, DL: sdl, VTList: VTs, Ops: Opers, Flags); |
| 8370 | pushOutChain(Result, EB); |
| 8371 | |
| 8372 | SDValue FPResult = Result.getValue(R: 0); |
| 8373 | setValue(V: &FPI, NewN: FPResult); |
| 8374 | } |
| 8375 | |
| 8376 | static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) { |
| 8377 | std::optional<unsigned> ResOPC; |
| 8378 | switch (VPIntrin.getIntrinsicID()) { |
| 8379 | case Intrinsic::vp_ctlz: { |
| 8380 | bool IsZeroUndef = cast<ConstantInt>(Val: VPIntrin.getArgOperand(i: 1))->isOne(); |
| 8381 | ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ; |
| 8382 | break; |
| 8383 | } |
| 8384 | case Intrinsic::vp_cttz: { |
| 8385 | bool IsZeroUndef = cast<ConstantInt>(Val: VPIntrin.getArgOperand(i: 1))->isOne(); |
| 8386 | ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ; |
| 8387 | break; |
| 8388 | } |
| 8389 | case Intrinsic::vp_cttz_elts: { |
| 8390 | bool IsZeroPoison = cast<ConstantInt>(Val: VPIntrin.getArgOperand(i: 1))->isOne(); |
| 8391 | ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS; |
| 8392 | break; |
| 8393 | } |
| 8394 | #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \ |
| 8395 | case Intrinsic::VPID: \ |
| 8396 | ResOPC = ISD::VPSD; \ |
| 8397 | break; |
| 8398 | #include "llvm/IR/VPIntrinsics.def" |
| 8399 | } |
| 8400 | |
| 8401 | if (!ResOPC) |
| 8402 | llvm_unreachable( |
| 8403 | "Inconsistency: no SDNode available for this VPIntrinsic!" ); |
| 8404 | |
| 8405 | if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD || |
| 8406 | *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) { |
| 8407 | if (VPIntrin.getFastMathFlags().allowReassoc()) |
| 8408 | return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD |
| 8409 | : ISD::VP_REDUCE_FMUL; |
| 8410 | } |
| 8411 | |
| 8412 | return *ResOPC; |
| 8413 | } |
| 8414 | |
| 8415 | void SelectionDAGBuilder::visitVPLoad( |
| 8416 | const VPIntrinsic &VPIntrin, EVT VT, |
| 8417 | const SmallVectorImpl<SDValue> &OpValues) { |
| 8418 | SDLoc DL = getCurSDLoc(); |
| 8419 | Value *PtrOperand = VPIntrin.getArgOperand(i: 0); |
| 8420 | MaybeAlign Alignment = VPIntrin.getPointerAlignment(); |
| 8421 | AAMDNodes AAInfo = VPIntrin.getAAMetadata(); |
| 8422 | const MDNode *Ranges = getRangeMetadata(I: VPIntrin); |
| 8423 | SDValue LD; |
| 8424 | // Do not serialize variable-length loads of constant memory with |
| 8425 | // anything. |
| 8426 | if (!Alignment) |
| 8427 | Alignment = DAG.getEVTAlign(MemoryVT: VT); |
| 8428 | MemoryLocation ML = MemoryLocation::getAfter(Ptr: PtrOperand, AATags: AAInfo); |
| 8429 | bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(Loc: ML); |
| 8430 | SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); |
| 8431 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 8432 | PtrInfo: MachinePointerInfo(PtrOperand), F: MachineMemOperand::MOLoad, |
| 8433 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: *Alignment, AAInfo, Ranges); |
| 8434 | LD = DAG.getLoadVP(VT, dl: DL, Chain: InChain, Ptr: OpValues[0], Mask: OpValues[1], EVL: OpValues[2], |
| 8435 | MMO, IsExpanding: false /*IsExpanding */); |
| 8436 | if (AddToChain) |
| 8437 | PendingLoads.push_back(Elt: LD.getValue(R: 1)); |
| 8438 | setValue(V: &VPIntrin, NewN: LD); |
| 8439 | } |
| 8440 | |
| 8441 | void SelectionDAGBuilder::visitVPGather( |
| 8442 | const VPIntrinsic &VPIntrin, EVT VT, |
| 8443 | const SmallVectorImpl<SDValue> &OpValues) { |
| 8444 | SDLoc DL = getCurSDLoc(); |
| 8445 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 8446 | Value *PtrOperand = VPIntrin.getArgOperand(i: 0); |
| 8447 | MaybeAlign Alignment = VPIntrin.getPointerAlignment(); |
| 8448 | AAMDNodes AAInfo = VPIntrin.getAAMetadata(); |
| 8449 | const MDNode *Ranges = getRangeMetadata(I: VPIntrin); |
| 8450 | SDValue LD; |
| 8451 | if (!Alignment) |
| 8452 | Alignment = DAG.getEVTAlign(MemoryVT: VT.getScalarType()); |
| 8453 | unsigned AS = |
| 8454 | PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); |
| 8455 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 8456 | PtrInfo: MachinePointerInfo(AS), F: MachineMemOperand::MOLoad, |
| 8457 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: *Alignment, AAInfo, Ranges); |
| 8458 | SDValue Base, Index, Scale; |
| 8459 | ISD::MemIndexType IndexType; |
| 8460 | bool UniformBase = getUniformBase(Ptr: PtrOperand, Base, Index, IndexType, Scale, |
| 8461 | SDB: this, CurBB: VPIntrin.getParent(), |
| 8462 | ElemSize: VT.getScalarStoreSize()); |
| 8463 | if (!UniformBase) { |
| 8464 | Base = DAG.getConstant(Val: 0, DL, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 8465 | Index = getValue(V: PtrOperand); |
| 8466 | IndexType = ISD::SIGNED_SCALED; |
| 8467 | Scale = DAG.getTargetConstant(Val: 1, DL, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 8468 | } |
| 8469 | EVT IdxVT = Index.getValueType(); |
| 8470 | EVT EltTy = IdxVT.getVectorElementType(); |
| 8471 | if (TLI.shouldExtendGSIndex(VT: IdxVT, EltTy)) { |
| 8472 | EVT NewIdxVT = IdxVT.changeVectorElementType(EltVT: EltTy); |
| 8473 | Index = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: NewIdxVT, Operand: Index); |
| 8474 | } |
| 8475 | LD = DAG.getGatherVP( |
| 8476 | VTs: DAG.getVTList(VT1: VT, VT2: MVT::Other), VT, dl: DL, |
| 8477 | Ops: {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO, |
| 8478 | IndexType); |
| 8479 | PendingLoads.push_back(Elt: LD.getValue(R: 1)); |
| 8480 | setValue(V: &VPIntrin, NewN: LD); |
| 8481 | } |
| 8482 | |
| 8483 | void SelectionDAGBuilder::visitVPStore( |
| 8484 | const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) { |
| 8485 | SDLoc DL = getCurSDLoc(); |
| 8486 | Value *PtrOperand = VPIntrin.getArgOperand(i: 1); |
| 8487 | EVT VT = OpValues[0].getValueType(); |
| 8488 | MaybeAlign Alignment = VPIntrin.getPointerAlignment(); |
| 8489 | AAMDNodes AAInfo = VPIntrin.getAAMetadata(); |
| 8490 | SDValue ST; |
| 8491 | if (!Alignment) |
| 8492 | Alignment = DAG.getEVTAlign(MemoryVT: VT); |
| 8493 | SDValue Ptr = OpValues[1]; |
| 8494 | SDValue Offset = DAG.getUNDEF(VT: Ptr.getValueType()); |
| 8495 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 8496 | PtrInfo: MachinePointerInfo(PtrOperand), F: MachineMemOperand::MOStore, |
| 8497 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: *Alignment, AAInfo); |
| 8498 | ST = DAG.getStoreVP(Chain: getMemoryRoot(), dl: DL, Val: OpValues[0], Ptr, Offset, |
| 8499 | Mask: OpValues[2], EVL: OpValues[3], MemVT: VT, MMO, AM: ISD::UNINDEXED, |
| 8500 | /* IsTruncating */ false, /*IsCompressing*/ false); |
| 8501 | DAG.setRoot(ST); |
| 8502 | setValue(V: &VPIntrin, NewN: ST); |
| 8503 | } |
| 8504 | |
| 8505 | void SelectionDAGBuilder::visitVPScatter( |
| 8506 | const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) { |
| 8507 | SDLoc DL = getCurSDLoc(); |
| 8508 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 8509 | Value *PtrOperand = VPIntrin.getArgOperand(i: 1); |
| 8510 | EVT VT = OpValues[0].getValueType(); |
| 8511 | MaybeAlign Alignment = VPIntrin.getPointerAlignment(); |
| 8512 | AAMDNodes AAInfo = VPIntrin.getAAMetadata(); |
| 8513 | SDValue ST; |
| 8514 | if (!Alignment) |
| 8515 | Alignment = DAG.getEVTAlign(MemoryVT: VT.getScalarType()); |
| 8516 | unsigned AS = |
| 8517 | PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); |
| 8518 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 8519 | PtrInfo: MachinePointerInfo(AS), F: MachineMemOperand::MOStore, |
| 8520 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: *Alignment, AAInfo); |
| 8521 | SDValue Base, Index, Scale; |
| 8522 | ISD::MemIndexType IndexType; |
| 8523 | bool UniformBase = getUniformBase(Ptr: PtrOperand, Base, Index, IndexType, Scale, |
| 8524 | SDB: this, CurBB: VPIntrin.getParent(), |
| 8525 | ElemSize: VT.getScalarStoreSize()); |
| 8526 | if (!UniformBase) { |
| 8527 | Base = DAG.getConstant(Val: 0, DL, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 8528 | Index = getValue(V: PtrOperand); |
| 8529 | IndexType = ISD::SIGNED_SCALED; |
| 8530 | Scale = |
| 8531 | DAG.getTargetConstant(Val: 1, DL, VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 8532 | } |
| 8533 | EVT IdxVT = Index.getValueType(); |
| 8534 | EVT EltTy = IdxVT.getVectorElementType(); |
| 8535 | if (TLI.shouldExtendGSIndex(VT: IdxVT, EltTy)) { |
| 8536 | EVT NewIdxVT = IdxVT.changeVectorElementType(EltVT: EltTy); |
| 8537 | Index = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: NewIdxVT, Operand: Index); |
| 8538 | } |
| 8539 | ST = DAG.getScatterVP(VTs: DAG.getVTList(VT: MVT::Other), VT, dl: DL, |
| 8540 | Ops: {getMemoryRoot(), OpValues[0], Base, Index, Scale, |
| 8541 | OpValues[2], OpValues[3]}, |
| 8542 | MMO, IndexType); |
| 8543 | DAG.setRoot(ST); |
| 8544 | setValue(V: &VPIntrin, NewN: ST); |
| 8545 | } |
| 8546 | |
| 8547 | void SelectionDAGBuilder::visitVPStridedLoad( |
| 8548 | const VPIntrinsic &VPIntrin, EVT VT, |
| 8549 | const SmallVectorImpl<SDValue> &OpValues) { |
| 8550 | SDLoc DL = getCurSDLoc(); |
| 8551 | Value *PtrOperand = VPIntrin.getArgOperand(i: 0); |
| 8552 | MaybeAlign Alignment = VPIntrin.getPointerAlignment(); |
| 8553 | if (!Alignment) |
| 8554 | Alignment = DAG.getEVTAlign(MemoryVT: VT.getScalarType()); |
| 8555 | AAMDNodes AAInfo = VPIntrin.getAAMetadata(); |
| 8556 | const MDNode *Ranges = getRangeMetadata(I: VPIntrin); |
| 8557 | MemoryLocation ML = MemoryLocation::getAfter(Ptr: PtrOperand, AATags: AAInfo); |
| 8558 | bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(Loc: ML); |
| 8559 | SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); |
| 8560 | unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); |
| 8561 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 8562 | PtrInfo: MachinePointerInfo(AS), F: MachineMemOperand::MOLoad, |
| 8563 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: *Alignment, AAInfo, Ranges); |
| 8564 | |
| 8565 | SDValue LD = DAG.getStridedLoadVP(VT, DL, Chain: InChain, Ptr: OpValues[0], Stride: OpValues[1], |
| 8566 | Mask: OpValues[2], EVL: OpValues[3], MMO, |
| 8567 | IsExpanding: false /*IsExpanding*/); |
| 8568 | |
| 8569 | if (AddToChain) |
| 8570 | PendingLoads.push_back(Elt: LD.getValue(R: 1)); |
| 8571 | setValue(V: &VPIntrin, NewN: LD); |
| 8572 | } |
| 8573 | |
| 8574 | void SelectionDAGBuilder::visitVPStridedStore( |
| 8575 | const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) { |
| 8576 | SDLoc DL = getCurSDLoc(); |
| 8577 | Value *PtrOperand = VPIntrin.getArgOperand(i: 1); |
| 8578 | EVT VT = OpValues[0].getValueType(); |
| 8579 | MaybeAlign Alignment = VPIntrin.getPointerAlignment(); |
| 8580 | if (!Alignment) |
| 8581 | Alignment = DAG.getEVTAlign(MemoryVT: VT.getScalarType()); |
| 8582 | AAMDNodes AAInfo = VPIntrin.getAAMetadata(); |
| 8583 | unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); |
| 8584 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( |
| 8585 | PtrInfo: MachinePointerInfo(AS), F: MachineMemOperand::MOStore, |
| 8586 | Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: *Alignment, AAInfo); |
| 8587 | |
| 8588 | SDValue ST = DAG.getStridedStoreVP( |
| 8589 | Chain: getMemoryRoot(), DL, Val: OpValues[0], Ptr: OpValues[1], |
| 8590 | Offset: DAG.getUNDEF(VT: OpValues[1].getValueType()), Stride: OpValues[2], Mask: OpValues[3], |
| 8591 | EVL: OpValues[4], MemVT: VT, MMO, AM: ISD::UNINDEXED, /*IsTruncating*/ false, |
| 8592 | /*IsCompressing*/ false); |
| 8593 | |
| 8594 | DAG.setRoot(ST); |
| 8595 | setValue(V: &VPIntrin, NewN: ST); |
| 8596 | } |
| 8597 | |
| 8598 | void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) { |
| 8599 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 8600 | SDLoc DL = getCurSDLoc(); |
| 8601 | |
| 8602 | ISD::CondCode Condition; |
| 8603 | CmpInst::Predicate CondCode = VPIntrin.getPredicate(); |
| 8604 | bool IsFP = VPIntrin.getOperand(i_nocapture: 0)->getType()->isFPOrFPVectorTy(); |
| 8605 | if (IsFP) { |
| 8606 | // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan) |
| 8607 | // flags, but calls that don't return floating-point types can't be |
| 8608 | // FPMathOperators, like vp.fcmp. This affects constrained fcmp too. |
| 8609 | Condition = getFCmpCondCode(Pred: CondCode); |
| 8610 | if (TM.Options.NoNaNsFPMath) |
| 8611 | Condition = getFCmpCodeWithoutNaN(CC: Condition); |
| 8612 | } else { |
| 8613 | Condition = getICmpCondCode(Pred: CondCode); |
| 8614 | } |
| 8615 | |
| 8616 | SDValue Op1 = getValue(V: VPIntrin.getOperand(i_nocapture: 0)); |
| 8617 | SDValue Op2 = getValue(V: VPIntrin.getOperand(i_nocapture: 1)); |
| 8618 | // #2 is the condition code |
| 8619 | SDValue MaskOp = getValue(V: VPIntrin.getOperand(i_nocapture: 3)); |
| 8620 | SDValue EVL = getValue(V: VPIntrin.getOperand(i_nocapture: 4)); |
| 8621 | MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy(); |
| 8622 | assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) && |
| 8623 | "Unexpected target EVL type" ); |
| 8624 | EVL = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: EVLParamVT, Operand: EVL); |
| 8625 | |
| 8626 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 8627 | Ty: VPIntrin.getType()); |
| 8628 | setValue(V: &VPIntrin, |
| 8629 | NewN: DAG.getSetCCVP(DL, VT: DestVT, LHS: Op1, RHS: Op2, Cond: Condition, Mask: MaskOp, EVL)); |
| 8630 | } |
| 8631 | |
| 8632 | void SelectionDAGBuilder::visitVectorPredicationIntrinsic( |
| 8633 | const VPIntrinsic &VPIntrin) { |
| 8634 | SDLoc DL = getCurSDLoc(); |
| 8635 | unsigned Opcode = getISDForVPIntrinsic(VPIntrin); |
| 8636 | |
| 8637 | auto IID = VPIntrin.getIntrinsicID(); |
| 8638 | |
| 8639 | if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(Val: &VPIntrin)) |
| 8640 | return visitVPCmp(VPIntrin: *CmpI); |
| 8641 | |
| 8642 | SmallVector<EVT, 4> ValueVTs; |
| 8643 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 8644 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: VPIntrin.getType(), ValueVTs); |
| 8645 | SDVTList VTs = DAG.getVTList(VTs: ValueVTs); |
| 8646 | |
| 8647 | auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IntrinsicID: IID); |
| 8648 | |
| 8649 | MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy(); |
| 8650 | assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) && |
| 8651 | "Unexpected target EVL type" ); |
| 8652 | |
| 8653 | // Request operands. |
| 8654 | SmallVector<SDValue, 7> OpValues; |
| 8655 | for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) { |
| 8656 | auto Op = getValue(V: VPIntrin.getArgOperand(i: I)); |
| 8657 | if (I == EVLParamPos) |
| 8658 | Op = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: EVLParamVT, Operand: Op); |
| 8659 | OpValues.push_back(Elt: Op); |
| 8660 | } |
| 8661 | |
| 8662 | switch (Opcode) { |
| 8663 | default: { |
| 8664 | SDNodeFlags SDFlags; |
| 8665 | if (auto *FPMO = dyn_cast<FPMathOperator>(Val: &VPIntrin)) |
| 8666 | SDFlags.copyFMF(FPMO: *FPMO); |
| 8667 | SDValue Result = DAG.getNode(Opcode, DL, VTList: VTs, Ops: OpValues, Flags: SDFlags); |
| 8668 | setValue(V: &VPIntrin, NewN: Result); |
| 8669 | break; |
| 8670 | } |
| 8671 | case ISD::VP_LOAD: |
| 8672 | visitVPLoad(VPIntrin, VT: ValueVTs[0], OpValues); |
| 8673 | break; |
| 8674 | case ISD::VP_GATHER: |
| 8675 | visitVPGather(VPIntrin, VT: ValueVTs[0], OpValues); |
| 8676 | break; |
| 8677 | case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: |
| 8678 | visitVPStridedLoad(VPIntrin, VT: ValueVTs[0], OpValues); |
| 8679 | break; |
| 8680 | case ISD::VP_STORE: |
| 8681 | visitVPStore(VPIntrin, OpValues); |
| 8682 | break; |
| 8683 | case ISD::VP_SCATTER: |
| 8684 | visitVPScatter(VPIntrin, OpValues); |
| 8685 | break; |
| 8686 | case ISD::EXPERIMENTAL_VP_STRIDED_STORE: |
| 8687 | visitVPStridedStore(VPIntrin, OpValues); |
| 8688 | break; |
| 8689 | case ISD::VP_FMULADD: { |
| 8690 | assert(OpValues.size() == 5 && "Unexpected number of operands" ); |
| 8691 | SDNodeFlags SDFlags; |
| 8692 | if (auto *FPMO = dyn_cast<FPMathOperator>(Val: &VPIntrin)) |
| 8693 | SDFlags.copyFMF(FPMO: *FPMO); |
| 8694 | if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && |
| 8695 | TLI.isFMAFasterThanFMulAndFAdd(MF: DAG.getMachineFunction(), ValueVTs[0])) { |
| 8696 | setValue(V: &VPIntrin, NewN: DAG.getNode(Opcode: ISD::VP_FMA, DL, VTList: VTs, Ops: OpValues, Flags: SDFlags)); |
| 8697 | } else { |
| 8698 | SDValue Mul = DAG.getNode( |
| 8699 | Opcode: ISD::VP_FMUL, DL, VTList: VTs, |
| 8700 | Ops: {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, Flags: SDFlags); |
| 8701 | SDValue Add = |
| 8702 | DAG.getNode(Opcode: ISD::VP_FADD, DL, VTList: VTs, |
| 8703 | Ops: {Mul, OpValues[2], OpValues[3], OpValues[4]}, Flags: SDFlags); |
| 8704 | setValue(V: &VPIntrin, NewN: Add); |
| 8705 | } |
| 8706 | break; |
| 8707 | } |
| 8708 | case ISD::VP_IS_FPCLASS: { |
| 8709 | const DataLayout DLayout = DAG.getDataLayout(); |
| 8710 | EVT DestVT = TLI.getValueType(DL: DLayout, Ty: VPIntrin.getType()); |
| 8711 | auto Constant = OpValues[1]->getAsZExtVal(); |
| 8712 | SDValue Check = DAG.getTargetConstant(Val: Constant, DL, VT: MVT::i32); |
| 8713 | SDValue V = DAG.getNode(Opcode: ISD::VP_IS_FPCLASS, DL, VT: DestVT, |
| 8714 | Ops: {OpValues[0], Check, OpValues[2], OpValues[3]}); |
| 8715 | setValue(V: &VPIntrin, NewN: V); |
| 8716 | return; |
| 8717 | } |
| 8718 | case ISD::VP_INTTOPTR: { |
| 8719 | SDValue N = OpValues[0]; |
| 8720 | EVT DestVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: VPIntrin.getType()); |
| 8721 | EVT PtrMemVT = TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: VPIntrin.getType()); |
| 8722 | N = DAG.getVPPtrExtOrTrunc(DL: getCurSDLoc(), VT: DestVT, Op: N, Mask: OpValues[1], |
| 8723 | EVL: OpValues[2]); |
| 8724 | N = DAG.getVPZExtOrTrunc(DL: getCurSDLoc(), VT: PtrMemVT, Op: N, Mask: OpValues[1], |
| 8725 | EVL: OpValues[2]); |
| 8726 | setValue(V: &VPIntrin, NewN: N); |
| 8727 | break; |
| 8728 | } |
| 8729 | case ISD::VP_PTRTOINT: { |
| 8730 | SDValue N = OpValues[0]; |
| 8731 | EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 8732 | Ty: VPIntrin.getType()); |
| 8733 | EVT PtrMemVT = TLI.getMemValueType(DL: DAG.getDataLayout(), |
| 8734 | Ty: VPIntrin.getOperand(i_nocapture: 0)->getType()); |
| 8735 | N = DAG.getVPPtrExtOrTrunc(DL: getCurSDLoc(), VT: PtrMemVT, Op: N, Mask: OpValues[1], |
| 8736 | EVL: OpValues[2]); |
| 8737 | N = DAG.getVPZExtOrTrunc(DL: getCurSDLoc(), VT: DestVT, Op: N, Mask: OpValues[1], |
| 8738 | EVL: OpValues[2]); |
| 8739 | setValue(V: &VPIntrin, NewN: N); |
| 8740 | break; |
| 8741 | } |
| 8742 | case ISD::VP_ABS: |
| 8743 | case ISD::VP_CTLZ: |
| 8744 | case ISD::VP_CTLZ_ZERO_UNDEF: |
| 8745 | case ISD::VP_CTTZ: |
| 8746 | case ISD::VP_CTTZ_ZERO_UNDEF: |
| 8747 | case ISD::VP_CTTZ_ELTS_ZERO_UNDEF: |
| 8748 | case ISD::VP_CTTZ_ELTS: { |
| 8749 | SDValue Result = |
| 8750 | DAG.getNode(Opcode, DL, VTList: VTs, Ops: {OpValues[0], OpValues[2], OpValues[3]}); |
| 8751 | setValue(V: &VPIntrin, NewN: Result); |
| 8752 | break; |
| 8753 | } |
| 8754 | } |
| 8755 | } |
| 8756 | |
| 8757 | SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain, |
| 8758 | const BasicBlock *EHPadBB, |
| 8759 | MCSymbol *&BeginLabel) { |
| 8760 | MachineFunction &MF = DAG.getMachineFunction(); |
| 8761 | |
| 8762 | // Insert a label before the invoke call to mark the try range. This can be |
| 8763 | // used to detect deletion of the invoke via the MachineModuleInfo. |
| 8764 | BeginLabel = MF.getContext().createTempSymbol(); |
| 8765 | |
| 8766 | // For SjLj, keep track of which landing pads go with which invokes |
| 8767 | // so as to maintain the ordering of pads in the LSDA. |
| 8768 | unsigned CallSiteIndex = FuncInfo.getCurrentCallSite(); |
| 8769 | if (CallSiteIndex) { |
| 8770 | MF.setCallSiteBeginLabel(BeginLabel, Site: CallSiteIndex); |
| 8771 | LPadToCallSiteMap[FuncInfo.getMBB(BB: EHPadBB)].push_back(Elt: CallSiteIndex); |
| 8772 | |
| 8773 | // Now that the call site is handled, stop tracking it. |
| 8774 | FuncInfo.setCurrentCallSite(0); |
| 8775 | } |
| 8776 | |
| 8777 | return DAG.getEHLabel(dl: getCurSDLoc(), Root: Chain, Label: BeginLabel); |
| 8778 | } |
| 8779 | |
| 8780 | SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II, |
| 8781 | const BasicBlock *EHPadBB, |
| 8782 | MCSymbol *BeginLabel) { |
| 8783 | assert(BeginLabel && "BeginLabel should've been set" ); |
| 8784 | |
| 8785 | MachineFunction &MF = DAG.getMachineFunction(); |
| 8786 | |
| 8787 | // Insert a label at the end of the invoke call to mark the try range. This |
| 8788 | // can be used to detect deletion of the invoke via the MachineModuleInfo. |
| 8789 | MCSymbol *EndLabel = MF.getContext().createTempSymbol(); |
| 8790 | Chain = DAG.getEHLabel(dl: getCurSDLoc(), Root: Chain, Label: EndLabel); |
| 8791 | |
| 8792 | // Inform MachineModuleInfo of range. |
| 8793 | auto Pers = classifyEHPersonality(Pers: FuncInfo.Fn->getPersonalityFn()); |
| 8794 | // There is a platform (e.g. wasm) that uses funclet style IR but does not |
| 8795 | // actually use outlined funclets and their LSDA info style. |
| 8796 | if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) { |
| 8797 | assert(II && "II should've been set" ); |
| 8798 | WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo(); |
| 8799 | EHInfo->addIPToStateRange(II, InvokeBegin: BeginLabel, InvokeEnd: EndLabel); |
| 8800 | } else if (!isScopedEHPersonality(Pers)) { |
| 8801 | assert(EHPadBB); |
| 8802 | MF.addInvoke(LandingPad: FuncInfo.getMBB(BB: EHPadBB), BeginLabel, EndLabel); |
| 8803 | } |
| 8804 | |
| 8805 | return Chain; |
| 8806 | } |
| 8807 | |
| 8808 | std::pair<SDValue, SDValue> |
| 8809 | SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, |
| 8810 | const BasicBlock *EHPadBB) { |
| 8811 | MCSymbol *BeginLabel = nullptr; |
| 8812 | |
| 8813 | if (EHPadBB) { |
| 8814 | // Both PendingLoads and PendingExports must be flushed here; |
| 8815 | // this call might not return. |
| 8816 | (void)getRoot(); |
| 8817 | DAG.setRoot(lowerStartEH(Chain: getControlRoot(), EHPadBB, BeginLabel)); |
| 8818 | CLI.setChain(getRoot()); |
| 8819 | } |
| 8820 | |
| 8821 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 8822 | std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); |
| 8823 | |
| 8824 | assert((CLI.IsTailCall || Result.second.getNode()) && |
| 8825 | "Non-null chain expected with non-tail call!" ); |
| 8826 | assert((Result.second.getNode() || !Result.first.getNode()) && |
| 8827 | "Null value expected with tail call!" ); |
| 8828 | |
| 8829 | if (!Result.second.getNode()) { |
| 8830 | // As a special case, a null chain means that a tail call has been emitted |
| 8831 | // and the DAG root is already updated. |
| 8832 | HasTailCall = true; |
| 8833 | |
| 8834 | // Since there's no actual continuation from this block, nothing can be |
| 8835 | // relying on us setting vregs for them. |
| 8836 | PendingExports.clear(); |
| 8837 | } else { |
| 8838 | DAG.setRoot(Result.second); |
| 8839 | } |
| 8840 | |
| 8841 | if (EHPadBB) { |
| 8842 | DAG.setRoot(lowerEndEH(Chain: getRoot(), II: cast_or_null<InvokeInst>(Val: CLI.CB), EHPadBB, |
| 8843 | BeginLabel)); |
| 8844 | Result.second = getRoot(); |
| 8845 | } |
| 8846 | |
| 8847 | return Result; |
| 8848 | } |
| 8849 | |
| 8850 | void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, |
| 8851 | bool isTailCall, bool isMustTailCall, |
| 8852 | const BasicBlock *EHPadBB, |
| 8853 | const TargetLowering::PtrAuthInfo *PAI) { |
| 8854 | auto &DL = DAG.getDataLayout(); |
| 8855 | FunctionType *FTy = CB.getFunctionType(); |
| 8856 | Type *RetTy = CB.getType(); |
| 8857 | |
| 8858 | TargetLowering::ArgListTy Args; |
| 8859 | Args.reserve(n: CB.arg_size()); |
| 8860 | |
| 8861 | const Value *SwiftErrorVal = nullptr; |
| 8862 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 8863 | |
| 8864 | if (isTailCall) { |
| 8865 | // Avoid emitting tail calls in functions with the disable-tail-calls |
| 8866 | // attribute. |
| 8867 | auto *Caller = CB.getParent()->getParent(); |
| 8868 | if (Caller->getFnAttribute(Kind: "disable-tail-calls" ).getValueAsString() == |
| 8869 | "true" && !isMustTailCall) |
| 8870 | isTailCall = false; |
| 8871 | |
| 8872 | // We can't tail call inside a function with a swifterror argument. Lowering |
| 8873 | // does not support this yet. It would have to move into the swifterror |
| 8874 | // register before the call. |
| 8875 | if (TLI.supportSwiftError() && |
| 8876 | Caller->getAttributes().hasAttrSomewhere(Kind: Attribute::SwiftError)) |
| 8877 | isTailCall = false; |
| 8878 | } |
| 8879 | |
| 8880 | for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) { |
| 8881 | TargetLowering::ArgListEntry Entry; |
| 8882 | const Value *V = *I; |
| 8883 | |
| 8884 | // Skip empty types |
| 8885 | if (V->getType()->isEmptyTy()) |
| 8886 | continue; |
| 8887 | |
| 8888 | SDValue ArgNode = getValue(V); |
| 8889 | Entry.Node = ArgNode; Entry.Ty = V->getType(); |
| 8890 | |
| 8891 | Entry.setAttributes(Call: &CB, ArgIdx: I - CB.arg_begin()); |
| 8892 | |
| 8893 | // Use swifterror virtual register as input to the call. |
| 8894 | if (Entry.IsSwiftError && TLI.supportSwiftError()) { |
| 8895 | SwiftErrorVal = V; |
| 8896 | // We find the virtual register for the actual swifterror argument. |
| 8897 | // Instead of using the Value, we use the virtual register instead. |
| 8898 | Entry.Node = |
| 8899 | DAG.getRegister(Reg: SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V), |
| 8900 | VT: EVT(TLI.getPointerTy(DL))); |
| 8901 | } |
| 8902 | |
| 8903 | Args.push_back(x: Entry); |
| 8904 | |
| 8905 | // If we have an explicit sret argument that is an Instruction, (i.e., it |
| 8906 | // might point to function-local memory), we can't meaningfully tail-call. |
| 8907 | if (Entry.IsSRet && isa<Instruction>(Val: V)) |
| 8908 | isTailCall = false; |
| 8909 | } |
| 8910 | |
| 8911 | // If call site has a cfguardtarget operand bundle, create and add an |
| 8912 | // additional ArgListEntry. |
| 8913 | if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_cfguardtarget)) { |
| 8914 | TargetLowering::ArgListEntry Entry; |
| 8915 | Value *V = Bundle->Inputs[0]; |
| 8916 | SDValue ArgNode = getValue(V); |
| 8917 | Entry.Node = ArgNode; |
| 8918 | Entry.Ty = V->getType(); |
| 8919 | Entry.IsCFGuardTarget = true; |
| 8920 | Args.push_back(x: Entry); |
| 8921 | } |
| 8922 | |
| 8923 | // Check if target-independent constraints permit a tail call here. |
| 8924 | // Target-dependent constraints are checked within TLI->LowerCallTo. |
| 8925 | if (isTailCall && !isInTailCallPosition(Call: CB, TM: DAG.getTarget())) |
| 8926 | isTailCall = false; |
| 8927 | |
| 8928 | // Disable tail calls if there is an swifterror argument. Targets have not |
| 8929 | // been updated to support tail calls. |
| 8930 | if (TLI.supportSwiftError() && SwiftErrorVal) |
| 8931 | isTailCall = false; |
| 8932 | |
| 8933 | ConstantInt *CFIType = nullptr; |
| 8934 | if (CB.isIndirectCall()) { |
| 8935 | if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_kcfi)) { |
| 8936 | if (!TLI.supportKCFIBundles()) |
| 8937 | report_fatal_error( |
| 8938 | reason: "Target doesn't support calls with kcfi operand bundles." ); |
| 8939 | CFIType = cast<ConstantInt>(Val: Bundle->Inputs[0]); |
| 8940 | assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type" ); |
| 8941 | } |
| 8942 | } |
| 8943 | |
| 8944 | SDValue ConvControlToken; |
| 8945 | if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) { |
| 8946 | auto *Token = Bundle->Inputs[0].get(); |
| 8947 | ConvControlToken = getValue(V: Token); |
| 8948 | } |
| 8949 | |
| 8950 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 8951 | CLI.setDebugLoc(getCurSDLoc()) |
| 8952 | .setChain(getRoot()) |
| 8953 | .setCallee(ResultType: RetTy, FTy, Target: Callee, ArgsList: std::move(Args), Call: CB) |
| 8954 | .setTailCall(isTailCall) |
| 8955 | .setConvergent(CB.isConvergent()) |
| 8956 | .setIsPreallocated( |
| 8957 | CB.countOperandBundlesOfType(ID: LLVMContext::OB_preallocated) != 0) |
| 8958 | .setCFIType(CFIType) |
| 8959 | .setConvergenceControlToken(ConvControlToken); |
| 8960 | |
| 8961 | // Set the pointer authentication info if we have it. |
| 8962 | if (PAI) { |
| 8963 | if (!TLI.supportPtrAuthBundles()) |
| 8964 | report_fatal_error( |
| 8965 | reason: "This target doesn't support calls with ptrauth operand bundles." ); |
| 8966 | CLI.setPtrAuth(*PAI); |
| 8967 | } |
| 8968 | |
| 8969 | std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); |
| 8970 | |
| 8971 | if (Result.first.getNode()) { |
| 8972 | Result.first = lowerRangeToAssertZExt(DAG, I: CB, Op: Result.first); |
| 8973 | setValue(V: &CB, NewN: Result.first); |
| 8974 | } |
| 8975 | |
| 8976 | // The last element of CLI.InVals has the SDValue for swifterror return. |
| 8977 | // Here we copy it to a virtual register and update SwiftErrorMap for |
| 8978 | // book-keeping. |
| 8979 | if (SwiftErrorVal && TLI.supportSwiftError()) { |
| 8980 | // Get the last element of InVals. |
| 8981 | SDValue Src = CLI.InVals.back(); |
| 8982 | Register VReg = |
| 8983 | SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal); |
| 8984 | SDValue CopyNode = CLI.DAG.getCopyToReg(Chain: Result.second, dl: CLI.DL, Reg: VReg, N: Src); |
| 8985 | DAG.setRoot(CopyNode); |
| 8986 | } |
| 8987 | } |
| 8988 | |
| 8989 | static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, |
| 8990 | SelectionDAGBuilder &Builder) { |
| 8991 | // Check to see if this load can be trivially constant folded, e.g. if the |
| 8992 | // input is from a string literal. |
| 8993 | if (const Constant *LoadInput = dyn_cast<Constant>(Val: PtrVal)) { |
| 8994 | // Cast pointer to the type we really want to load. |
| 8995 | Type *LoadTy = |
| 8996 | Type::getIntNTy(C&: PtrVal->getContext(), N: LoadVT.getScalarSizeInBits()); |
| 8997 | if (LoadVT.isVector()) |
| 8998 | LoadTy = FixedVectorType::get(ElementType: LoadTy, NumElts: LoadVT.getVectorNumElements()); |
| 8999 | if (const Constant *LoadCst = |
| 9000 | ConstantFoldLoadFromConstPtr(C: const_cast<Constant *>(LoadInput), |
| 9001 | Ty: LoadTy, DL: Builder.DAG.getDataLayout())) |
| 9002 | return Builder.getValue(V: LoadCst); |
| 9003 | } |
| 9004 | |
| 9005 | // Otherwise, we have to emit the load. If the pointer is to unfoldable but |
| 9006 | // still constant memory, the input chain can be the entry node. |
| 9007 | SDValue Root; |
| 9008 | bool ConstantMemory = false; |
| 9009 | |
| 9010 | // Do not serialize (non-volatile) loads of constant memory with anything. |
| 9011 | if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(P: PtrVal)) { |
| 9012 | Root = Builder.DAG.getEntryNode(); |
| 9013 | ConstantMemory = true; |
| 9014 | } else { |
| 9015 | // Do not serialize non-volatile loads against each other. |
| 9016 | Root = Builder.DAG.getRoot(); |
| 9017 | } |
| 9018 | |
| 9019 | SDValue Ptr = Builder.getValue(V: PtrVal); |
| 9020 | SDValue LoadVal = |
| 9021 | Builder.DAG.getLoad(VT: LoadVT, dl: Builder.getCurSDLoc(), Chain: Root, Ptr, |
| 9022 | PtrInfo: MachinePointerInfo(PtrVal), Alignment: Align(1)); |
| 9023 | |
| 9024 | if (!ConstantMemory) |
| 9025 | Builder.PendingLoads.push_back(Elt: LoadVal.getValue(R: 1)); |
| 9026 | return LoadVal; |
| 9027 | } |
| 9028 | |
| 9029 | /// Record the value for an instruction that produces an integer result, |
| 9030 | /// converting the type where necessary. |
| 9031 | void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I, |
| 9032 | SDValue Value, |
| 9033 | bool IsSigned) { |
| 9034 | EVT VT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 9035 | Ty: I.getType(), AllowUnknown: true); |
| 9036 | Value = DAG.getExtOrTrunc(IsSigned, Op: Value, DL: getCurSDLoc(), VT); |
| 9037 | setValue(V: &I, NewN: Value); |
| 9038 | } |
| 9039 | |
| 9040 | /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return |
| 9041 | /// true and lower it. Otherwise return false, and it will be lowered like a |
| 9042 | /// normal call. |
| 9043 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9044 | /// correct prototype. |
| 9045 | bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) { |
| 9046 | const Value *LHS = I.getArgOperand(i: 0), *RHS = I.getArgOperand(i: 1); |
| 9047 | const Value *Size = I.getArgOperand(i: 2); |
| 9048 | const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Val: getValue(V: Size)); |
| 9049 | if (CSize && CSize->getZExtValue() == 0) { |
| 9050 | EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DL: DAG.getDataLayout(), |
| 9051 | Ty: I.getType(), AllowUnknown: true); |
| 9052 | setValue(V: &I, NewN: DAG.getConstant(Val: 0, DL: getCurSDLoc(), VT: CallVT)); |
| 9053 | return true; |
| 9054 | } |
| 9055 | |
| 9056 | const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); |
| 9057 | std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp( |
| 9058 | DAG, dl: getCurSDLoc(), Chain: DAG.getRoot(), Op1: getValue(V: LHS), Op2: getValue(V: RHS), |
| 9059 | Op3: getValue(V: Size), Op1PtrInfo: MachinePointerInfo(LHS), Op2PtrInfo: MachinePointerInfo(RHS)); |
| 9060 | if (Res.first.getNode()) { |
| 9061 | processIntegerCallValue(I, Value: Res.first, IsSigned: true); |
| 9062 | PendingLoads.push_back(Elt: Res.second); |
| 9063 | return true; |
| 9064 | } |
| 9065 | |
| 9066 | // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 |
| 9067 | // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 |
| 9068 | if (!CSize || !isOnlyUsedInZeroEqualityComparison(CxtI: &I)) |
| 9069 | return false; |
| 9070 | |
| 9071 | // If the target has a fast compare for the given size, it will return a |
| 9072 | // preferred load type for that size. Require that the load VT is legal and |
| 9073 | // that the target supports unaligned loads of that type. Otherwise, return |
| 9074 | // INVALID. |
| 9075 | auto hasFastLoadsAndCompare = [&](unsigned NumBits) { |
| 9076 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9077 | MVT LVT = TLI.hasFastEqualityCompare(NumBits); |
| 9078 | if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) { |
| 9079 | // TODO: Handle 5 byte compare as 4-byte + 1 byte. |
| 9080 | // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. |
| 9081 | // TODO: Check alignment of src and dest ptrs. |
| 9082 | unsigned DstAS = LHS->getType()->getPointerAddressSpace(); |
| 9083 | unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); |
| 9084 | if (!TLI.isTypeLegal(VT: LVT) || |
| 9085 | !TLI.allowsMisalignedMemoryAccesses(LVT, AddrSpace: SrcAS) || |
| 9086 | !TLI.allowsMisalignedMemoryAccesses(LVT, AddrSpace: DstAS)) |
| 9087 | LVT = MVT::INVALID_SIMPLE_VALUE_TYPE; |
| 9088 | } |
| 9089 | |
| 9090 | return LVT; |
| 9091 | }; |
| 9092 | |
| 9093 | // This turns into unaligned loads. We only do this if the target natively |
| 9094 | // supports the MVT we'll be loading or if it is small enough (<= 4) that |
| 9095 | // we'll only produce a small number of byte loads. |
| 9096 | MVT LoadVT; |
| 9097 | unsigned NumBitsToCompare = CSize->getZExtValue() * 8; |
| 9098 | switch (NumBitsToCompare) { |
| 9099 | default: |
| 9100 | return false; |
| 9101 | case 16: |
| 9102 | LoadVT = MVT::i16; |
| 9103 | break; |
| 9104 | case 32: |
| 9105 | LoadVT = MVT::i32; |
| 9106 | break; |
| 9107 | case 64: |
| 9108 | case 128: |
| 9109 | case 256: |
| 9110 | LoadVT = hasFastLoadsAndCompare(NumBitsToCompare); |
| 9111 | break; |
| 9112 | } |
| 9113 | |
| 9114 | if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE) |
| 9115 | return false; |
| 9116 | |
| 9117 | SDValue LoadL = getMemCmpLoad(PtrVal: LHS, LoadVT, Builder&: *this); |
| 9118 | SDValue LoadR = getMemCmpLoad(PtrVal: RHS, LoadVT, Builder&: *this); |
| 9119 | |
| 9120 | // Bitcast to a wide integer type if the loads are vectors. |
| 9121 | if (LoadVT.isVector()) { |
| 9122 | EVT CmpVT = EVT::getIntegerVT(Context&: LHS->getContext(), BitWidth: LoadVT.getSizeInBits()); |
| 9123 | LoadL = DAG.getBitcast(VT: CmpVT, V: LoadL); |
| 9124 | LoadR = DAG.getBitcast(VT: CmpVT, V: LoadR); |
| 9125 | } |
| 9126 | |
| 9127 | SDValue Cmp = DAG.getSetCC(DL: getCurSDLoc(), VT: MVT::i1, LHS: LoadL, RHS: LoadR, Cond: ISD::SETNE); |
| 9128 | processIntegerCallValue(I, Value: Cmp, IsSigned: false); |
| 9129 | return true; |
| 9130 | } |
| 9131 | |
| 9132 | /// See if we can lower a memchr call into an optimized form. If so, return |
| 9133 | /// true and lower it. Otherwise return false, and it will be lowered like a |
| 9134 | /// normal call. |
| 9135 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9136 | /// correct prototype. |
| 9137 | bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) { |
| 9138 | const Value *Src = I.getArgOperand(i: 0); |
| 9139 | const Value *Char = I.getArgOperand(i: 1); |
| 9140 | const Value *Length = I.getArgOperand(i: 2); |
| 9141 | |
| 9142 | const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); |
| 9143 | std::pair<SDValue, SDValue> Res = |
| 9144 | TSI.EmitTargetCodeForMemchr(DAG, dl: getCurSDLoc(), Chain: DAG.getRoot(), |
| 9145 | Src: getValue(V: Src), Char: getValue(V: Char), Length: getValue(V: Length), |
| 9146 | SrcPtrInfo: MachinePointerInfo(Src)); |
| 9147 | if (Res.first.getNode()) { |
| 9148 | setValue(V: &I, NewN: Res.first); |
| 9149 | PendingLoads.push_back(Elt: Res.second); |
| 9150 | return true; |
| 9151 | } |
| 9152 | |
| 9153 | return false; |
| 9154 | } |
| 9155 | |
| 9156 | /// See if we can lower a mempcpy call into an optimized form. If so, return |
| 9157 | /// true and lower it. Otherwise return false, and it will be lowered like a |
| 9158 | /// normal call. |
| 9159 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9160 | /// correct prototype. |
| 9161 | bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) { |
| 9162 | SDValue Dst = getValue(V: I.getArgOperand(i: 0)); |
| 9163 | SDValue Src = getValue(V: I.getArgOperand(i: 1)); |
| 9164 | SDValue Size = getValue(V: I.getArgOperand(i: 2)); |
| 9165 | |
| 9166 | Align DstAlign = DAG.InferPtrAlign(Ptr: Dst).valueOrOne(); |
| 9167 | Align SrcAlign = DAG.InferPtrAlign(Ptr: Src).valueOrOne(); |
| 9168 | // DAG::getMemcpy needs Alignment to be defined. |
| 9169 | Align Alignment = std::min(a: DstAlign, b: SrcAlign); |
| 9170 | |
| 9171 | SDLoc sdl = getCurSDLoc(); |
| 9172 | |
| 9173 | // In the mempcpy context we need to pass in a false value for isTailCall |
| 9174 | // because the return pointer needs to be adjusted by the size of |
| 9175 | // the copied memory. |
| 9176 | SDValue Root = getMemoryRoot(); |
| 9177 | SDValue MC = DAG.getMemcpy( |
| 9178 | Chain: Root, dl: sdl, Dst, Src, Size, Alignment, isVol: false, AlwaysInline: false, /*CI=*/nullptr, |
| 9179 | OverrideTailCall: std::nullopt, DstPtrInfo: MachinePointerInfo(I.getArgOperand(i: 0)), |
| 9180 | SrcPtrInfo: MachinePointerInfo(I.getArgOperand(i: 1)), AAInfo: I.getAAMetadata()); |
| 9181 | assert(MC.getNode() != nullptr && |
| 9182 | "** memcpy should not be lowered as TailCall in mempcpy context **" ); |
| 9183 | DAG.setRoot(MC); |
| 9184 | |
| 9185 | // Check if Size needs to be truncated or extended. |
| 9186 | Size = DAG.getSExtOrTrunc(Op: Size, DL: sdl, VT: Dst.getValueType()); |
| 9187 | |
| 9188 | // Adjust return pointer to point just past the last dst byte. |
| 9189 | SDValue DstPlusSize = DAG.getMemBasePlusOffset(Base: Dst, Offset: Size, DL: sdl); |
| 9190 | setValue(V: &I, NewN: DstPlusSize); |
| 9191 | return true; |
| 9192 | } |
| 9193 | |
| 9194 | /// See if we can lower a strcpy call into an optimized form. If so, return |
| 9195 | /// true and lower it, otherwise return false and it will be lowered like a |
| 9196 | /// normal call. |
| 9197 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9198 | /// correct prototype. |
| 9199 | bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) { |
| 9200 | const Value *Arg0 = I.getArgOperand(i: 0), *Arg1 = I.getArgOperand(i: 1); |
| 9201 | |
| 9202 | const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); |
| 9203 | std::pair<SDValue, SDValue> Res = |
| 9204 | TSI.EmitTargetCodeForStrcpy(DAG, DL: getCurSDLoc(), Chain: getRoot(), |
| 9205 | Dest: getValue(V: Arg0), Src: getValue(V: Arg1), |
| 9206 | DestPtrInfo: MachinePointerInfo(Arg0), |
| 9207 | SrcPtrInfo: MachinePointerInfo(Arg1), isStpcpy); |
| 9208 | if (Res.first.getNode()) { |
| 9209 | setValue(V: &I, NewN: Res.first); |
| 9210 | DAG.setRoot(Res.second); |
| 9211 | return true; |
| 9212 | } |
| 9213 | |
| 9214 | return false; |
| 9215 | } |
| 9216 | |
| 9217 | /// See if we can lower a strcmp call into an optimized form. If so, return |
| 9218 | /// true and lower it, otherwise return false and it will be lowered like a |
| 9219 | /// normal call. |
| 9220 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9221 | /// correct prototype. |
| 9222 | bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) { |
| 9223 | const Value *Arg0 = I.getArgOperand(i: 0), *Arg1 = I.getArgOperand(i: 1); |
| 9224 | |
| 9225 | const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); |
| 9226 | std::pair<SDValue, SDValue> Res = |
| 9227 | TSI.EmitTargetCodeForStrcmp(DAG, dl: getCurSDLoc(), Chain: DAG.getRoot(), |
| 9228 | Op1: getValue(V: Arg0), Op2: getValue(V: Arg1), |
| 9229 | Op1PtrInfo: MachinePointerInfo(Arg0), |
| 9230 | Op2PtrInfo: MachinePointerInfo(Arg1)); |
| 9231 | if (Res.first.getNode()) { |
| 9232 | processIntegerCallValue(I, Value: Res.first, IsSigned: true); |
| 9233 | PendingLoads.push_back(Elt: Res.second); |
| 9234 | return true; |
| 9235 | } |
| 9236 | |
| 9237 | return false; |
| 9238 | } |
| 9239 | |
| 9240 | /// See if we can lower a strlen call into an optimized form. If so, return |
| 9241 | /// true and lower it, otherwise return false and it will be lowered like a |
| 9242 | /// normal call. |
| 9243 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9244 | /// correct prototype. |
| 9245 | bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) { |
| 9246 | const Value *Arg0 = I.getArgOperand(i: 0); |
| 9247 | |
| 9248 | const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); |
| 9249 | std::pair<SDValue, SDValue> Res = |
| 9250 | TSI.EmitTargetCodeForStrlen(DAG, DL: getCurSDLoc(), Chain: DAG.getRoot(), |
| 9251 | Src: getValue(V: Arg0), SrcPtrInfo: MachinePointerInfo(Arg0)); |
| 9252 | if (Res.first.getNode()) { |
| 9253 | processIntegerCallValue(I, Value: Res.first, IsSigned: false); |
| 9254 | PendingLoads.push_back(Elt: Res.second); |
| 9255 | return true; |
| 9256 | } |
| 9257 | |
| 9258 | return false; |
| 9259 | } |
| 9260 | |
| 9261 | /// See if we can lower a strnlen call into an optimized form. If so, return |
| 9262 | /// true and lower it, otherwise return false and it will be lowered like a |
| 9263 | /// normal call. |
| 9264 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9265 | /// correct prototype. |
| 9266 | bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) { |
| 9267 | const Value *Arg0 = I.getArgOperand(i: 0), *Arg1 = I.getArgOperand(i: 1); |
| 9268 | |
| 9269 | const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); |
| 9270 | std::pair<SDValue, SDValue> Res = |
| 9271 | TSI.EmitTargetCodeForStrnlen(DAG, DL: getCurSDLoc(), Chain: DAG.getRoot(), |
| 9272 | Src: getValue(V: Arg0), MaxLength: getValue(V: Arg1), |
| 9273 | SrcPtrInfo: MachinePointerInfo(Arg0)); |
| 9274 | if (Res.first.getNode()) { |
| 9275 | processIntegerCallValue(I, Value: Res.first, IsSigned: false); |
| 9276 | PendingLoads.push_back(Elt: Res.second); |
| 9277 | return true; |
| 9278 | } |
| 9279 | |
| 9280 | return false; |
| 9281 | } |
| 9282 | |
| 9283 | /// See if we can lower a unary floating-point operation into an SDNode with |
| 9284 | /// the specified Opcode. If so, return true and lower it, otherwise return |
| 9285 | /// false and it will be lowered like a normal call. |
| 9286 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9287 | /// correct prototype. |
| 9288 | bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I, |
| 9289 | unsigned Opcode) { |
| 9290 | // We already checked this call's prototype; verify it doesn't modify errno. |
| 9291 | if (!I.onlyReadsMemory()) |
| 9292 | return false; |
| 9293 | |
| 9294 | SDNodeFlags Flags; |
| 9295 | Flags.copyFMF(FPMO: cast<FPMathOperator>(Val: I)); |
| 9296 | |
| 9297 | SDValue Tmp = getValue(V: I.getArgOperand(i: 0)); |
| 9298 | setValue(V: &I, |
| 9299 | NewN: DAG.getNode(Opcode, DL: getCurSDLoc(), VT: Tmp.getValueType(), Operand: Tmp, Flags)); |
| 9300 | return true; |
| 9301 | } |
| 9302 | |
| 9303 | /// See if we can lower a binary floating-point operation into an SDNode with |
| 9304 | /// the specified Opcode. If so, return true and lower it. Otherwise return |
| 9305 | /// false, and it will be lowered like a normal call. |
| 9306 | /// The caller already checked that \p I calls the appropriate LibFunc with a |
| 9307 | /// correct prototype. |
| 9308 | bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I, |
| 9309 | unsigned Opcode) { |
| 9310 | // We already checked this call's prototype; verify it doesn't modify errno. |
| 9311 | if (!I.onlyReadsMemory()) |
| 9312 | return false; |
| 9313 | |
| 9314 | SDNodeFlags Flags; |
| 9315 | Flags.copyFMF(FPMO: cast<FPMathOperator>(Val: I)); |
| 9316 | |
| 9317 | SDValue Tmp0 = getValue(V: I.getArgOperand(i: 0)); |
| 9318 | SDValue Tmp1 = getValue(V: I.getArgOperand(i: 1)); |
| 9319 | EVT VT = Tmp0.getValueType(); |
| 9320 | setValue(V: &I, NewN: DAG.getNode(Opcode, DL: getCurSDLoc(), VT, N1: Tmp0, N2: Tmp1, Flags)); |
| 9321 | return true; |
| 9322 | } |
| 9323 | |
| 9324 | void SelectionDAGBuilder::visitCall(const CallInst &I) { |
| 9325 | // Handle inline assembly differently. |
| 9326 | if (I.isInlineAsm()) { |
| 9327 | visitInlineAsm(Call: I); |
| 9328 | return; |
| 9329 | } |
| 9330 | |
| 9331 | diagnoseDontCall(CI: I); |
| 9332 | |
| 9333 | if (Function *F = I.getCalledFunction()) { |
| 9334 | if (F->isDeclaration()) { |
| 9335 | // Is this an LLVM intrinsic? |
| 9336 | if (unsigned IID = F->getIntrinsicID()) { |
| 9337 | visitIntrinsicCall(I, Intrinsic: IID); |
| 9338 | return; |
| 9339 | } |
| 9340 | } |
| 9341 | |
| 9342 | // Check for well-known libc/libm calls. If the function is internal, it |
| 9343 | // can't be a library call. Don't do the check if marked as nobuiltin for |
| 9344 | // some reason or the call site requires strict floating point semantics. |
| 9345 | LibFunc Func; |
| 9346 | if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() && |
| 9347 | F->hasName() && LibInfo->getLibFunc(FDecl: *F, F&: Func) && |
| 9348 | LibInfo->hasOptimizedCodeGen(F: Func)) { |
| 9349 | switch (Func) { |
| 9350 | default: break; |
| 9351 | case LibFunc_bcmp: |
| 9352 | if (visitMemCmpBCmpCall(I)) |
| 9353 | return; |
| 9354 | break; |
| 9355 | case LibFunc_copysign: |
| 9356 | case LibFunc_copysignf: |
| 9357 | case LibFunc_copysignl: |
| 9358 | // We already checked this call's prototype; verify it doesn't modify |
| 9359 | // errno. |
| 9360 | if (I.onlyReadsMemory()) { |
| 9361 | SDValue LHS = getValue(V: I.getArgOperand(i: 0)); |
| 9362 | SDValue RHS = getValue(V: I.getArgOperand(i: 1)); |
| 9363 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::FCOPYSIGN, DL: getCurSDLoc(), |
| 9364 | VT: LHS.getValueType(), N1: LHS, N2: RHS)); |
| 9365 | return; |
| 9366 | } |
| 9367 | break; |
| 9368 | case LibFunc_fabs: |
| 9369 | case LibFunc_fabsf: |
| 9370 | case LibFunc_fabsl: |
| 9371 | if (visitUnaryFloatCall(I, Opcode: ISD::FABS)) |
| 9372 | return; |
| 9373 | break; |
| 9374 | case LibFunc_fmin: |
| 9375 | case LibFunc_fminf: |
| 9376 | case LibFunc_fminl: |
| 9377 | if (visitBinaryFloatCall(I, Opcode: ISD::FMINNUM)) |
| 9378 | return; |
| 9379 | break; |
| 9380 | case LibFunc_fmax: |
| 9381 | case LibFunc_fmaxf: |
| 9382 | case LibFunc_fmaxl: |
| 9383 | if (visitBinaryFloatCall(I, Opcode: ISD::FMAXNUM)) |
| 9384 | return; |
| 9385 | break; |
| 9386 | case LibFunc_fminimum_num: |
| 9387 | case LibFunc_fminimum_numf: |
| 9388 | case LibFunc_fminimum_numl: |
| 9389 | if (visitBinaryFloatCall(I, Opcode: ISD::FMINIMUMNUM)) |
| 9390 | return; |
| 9391 | break; |
| 9392 | case LibFunc_fmaximum_num: |
| 9393 | case LibFunc_fmaximum_numf: |
| 9394 | case LibFunc_fmaximum_numl: |
| 9395 | if (visitBinaryFloatCall(I, Opcode: ISD::FMAXIMUMNUM)) |
| 9396 | return; |
| 9397 | break; |
| 9398 | case LibFunc_sin: |
| 9399 | case LibFunc_sinf: |
| 9400 | case LibFunc_sinl: |
| 9401 | if (visitUnaryFloatCall(I, Opcode: ISD::FSIN)) |
| 9402 | return; |
| 9403 | break; |
| 9404 | case LibFunc_cos: |
| 9405 | case LibFunc_cosf: |
| 9406 | case LibFunc_cosl: |
| 9407 | if (visitUnaryFloatCall(I, Opcode: ISD::FCOS)) |
| 9408 | return; |
| 9409 | break; |
| 9410 | case LibFunc_tan: |
| 9411 | case LibFunc_tanf: |
| 9412 | case LibFunc_tanl: |
| 9413 | if (visitUnaryFloatCall(I, Opcode: ISD::FTAN)) |
| 9414 | return; |
| 9415 | break; |
| 9416 | case LibFunc_asin: |
| 9417 | case LibFunc_asinf: |
| 9418 | case LibFunc_asinl: |
| 9419 | if (visitUnaryFloatCall(I, Opcode: ISD::FASIN)) |
| 9420 | return; |
| 9421 | break; |
| 9422 | case LibFunc_acos: |
| 9423 | case LibFunc_acosf: |
| 9424 | case LibFunc_acosl: |
| 9425 | if (visitUnaryFloatCall(I, Opcode: ISD::FACOS)) |
| 9426 | return; |
| 9427 | break; |
| 9428 | case LibFunc_atan: |
| 9429 | case LibFunc_atanf: |
| 9430 | case LibFunc_atanl: |
| 9431 | if (visitUnaryFloatCall(I, Opcode: ISD::FATAN)) |
| 9432 | return; |
| 9433 | break; |
| 9434 | case LibFunc_atan2: |
| 9435 | case LibFunc_atan2f: |
| 9436 | case LibFunc_atan2l: |
| 9437 | if (visitBinaryFloatCall(I, Opcode: ISD::FATAN2)) |
| 9438 | return; |
| 9439 | break; |
| 9440 | case LibFunc_sinh: |
| 9441 | case LibFunc_sinhf: |
| 9442 | case LibFunc_sinhl: |
| 9443 | if (visitUnaryFloatCall(I, Opcode: ISD::FSINH)) |
| 9444 | return; |
| 9445 | break; |
| 9446 | case LibFunc_cosh: |
| 9447 | case LibFunc_coshf: |
| 9448 | case LibFunc_coshl: |
| 9449 | if (visitUnaryFloatCall(I, Opcode: ISD::FCOSH)) |
| 9450 | return; |
| 9451 | break; |
| 9452 | case LibFunc_tanh: |
| 9453 | case LibFunc_tanhf: |
| 9454 | case LibFunc_tanhl: |
| 9455 | if (visitUnaryFloatCall(I, Opcode: ISD::FTANH)) |
| 9456 | return; |
| 9457 | break; |
| 9458 | case LibFunc_sqrt: |
| 9459 | case LibFunc_sqrtf: |
| 9460 | case LibFunc_sqrtl: |
| 9461 | case LibFunc_sqrt_finite: |
| 9462 | case LibFunc_sqrtf_finite: |
| 9463 | case LibFunc_sqrtl_finite: |
| 9464 | if (visitUnaryFloatCall(I, Opcode: ISD::FSQRT)) |
| 9465 | return; |
| 9466 | break; |
| 9467 | case LibFunc_floor: |
| 9468 | case LibFunc_floorf: |
| 9469 | case LibFunc_floorl: |
| 9470 | if (visitUnaryFloatCall(I, Opcode: ISD::FFLOOR)) |
| 9471 | return; |
| 9472 | break; |
| 9473 | case LibFunc_nearbyint: |
| 9474 | case LibFunc_nearbyintf: |
| 9475 | case LibFunc_nearbyintl: |
| 9476 | if (visitUnaryFloatCall(I, Opcode: ISD::FNEARBYINT)) |
| 9477 | return; |
| 9478 | break; |
| 9479 | case LibFunc_ceil: |
| 9480 | case LibFunc_ceilf: |
| 9481 | case LibFunc_ceill: |
| 9482 | if (visitUnaryFloatCall(I, Opcode: ISD::FCEIL)) |
| 9483 | return; |
| 9484 | break; |
| 9485 | case LibFunc_rint: |
| 9486 | case LibFunc_rintf: |
| 9487 | case LibFunc_rintl: |
| 9488 | if (visitUnaryFloatCall(I, Opcode: ISD::FRINT)) |
| 9489 | return; |
| 9490 | break; |
| 9491 | case LibFunc_round: |
| 9492 | case LibFunc_roundf: |
| 9493 | case LibFunc_roundl: |
| 9494 | if (visitUnaryFloatCall(I, Opcode: ISD::FROUND)) |
| 9495 | return; |
| 9496 | break; |
| 9497 | case LibFunc_trunc: |
| 9498 | case LibFunc_truncf: |
| 9499 | case LibFunc_truncl: |
| 9500 | if (visitUnaryFloatCall(I, Opcode: ISD::FTRUNC)) |
| 9501 | return; |
| 9502 | break; |
| 9503 | case LibFunc_log2: |
| 9504 | case LibFunc_log2f: |
| 9505 | case LibFunc_log2l: |
| 9506 | if (visitUnaryFloatCall(I, Opcode: ISD::FLOG2)) |
| 9507 | return; |
| 9508 | break; |
| 9509 | case LibFunc_exp2: |
| 9510 | case LibFunc_exp2f: |
| 9511 | case LibFunc_exp2l: |
| 9512 | if (visitUnaryFloatCall(I, Opcode: ISD::FEXP2)) |
| 9513 | return; |
| 9514 | break; |
| 9515 | case LibFunc_exp10: |
| 9516 | case LibFunc_exp10f: |
| 9517 | case LibFunc_exp10l: |
| 9518 | if (visitUnaryFloatCall(I, Opcode: ISD::FEXP10)) |
| 9519 | return; |
| 9520 | break; |
| 9521 | case LibFunc_ldexp: |
| 9522 | case LibFunc_ldexpf: |
| 9523 | case LibFunc_ldexpl: |
| 9524 | if (visitBinaryFloatCall(I, Opcode: ISD::FLDEXP)) |
| 9525 | return; |
| 9526 | break; |
| 9527 | case LibFunc_memcmp: |
| 9528 | if (visitMemCmpBCmpCall(I)) |
| 9529 | return; |
| 9530 | break; |
| 9531 | case LibFunc_mempcpy: |
| 9532 | if (visitMemPCpyCall(I)) |
| 9533 | return; |
| 9534 | break; |
| 9535 | case LibFunc_memchr: |
| 9536 | if (visitMemChrCall(I)) |
| 9537 | return; |
| 9538 | break; |
| 9539 | case LibFunc_strcpy: |
| 9540 | if (visitStrCpyCall(I, isStpcpy: false)) |
| 9541 | return; |
| 9542 | break; |
| 9543 | case LibFunc_stpcpy: |
| 9544 | if (visitStrCpyCall(I, isStpcpy: true)) |
| 9545 | return; |
| 9546 | break; |
| 9547 | case LibFunc_strcmp: |
| 9548 | if (visitStrCmpCall(I)) |
| 9549 | return; |
| 9550 | break; |
| 9551 | case LibFunc_strlen: |
| 9552 | if (visitStrLenCall(I)) |
| 9553 | return; |
| 9554 | break; |
| 9555 | case LibFunc_strnlen: |
| 9556 | if (visitStrNLenCall(I)) |
| 9557 | return; |
| 9558 | break; |
| 9559 | } |
| 9560 | } |
| 9561 | } |
| 9562 | |
| 9563 | if (I.countOperandBundlesOfType(ID: LLVMContext::OB_ptrauth)) { |
| 9564 | LowerCallSiteWithPtrAuthBundle(CB: cast<CallBase>(Val: I), /*EHPadBB=*/nullptr); |
| 9565 | return; |
| 9566 | } |
| 9567 | |
| 9568 | // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't |
| 9569 | // have to do anything here to lower funclet bundles. |
| 9570 | // CFGuardTarget bundles are lowered in LowerCallTo. |
| 9571 | if (I.hasOperandBundlesOtherThan( |
| 9572 | IDs: {LLVMContext::OB_deopt, LLVMContext::OB_funclet, |
| 9573 | LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated, |
| 9574 | LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi, |
| 9575 | LLVMContext::OB_convergencectrl})) |
| 9576 | reportFatalUsageError(reason: "cannot lower calls with arbitrary operand bundles!" ); |
| 9577 | |
| 9578 | SDValue Callee = getValue(V: I.getCalledOperand()); |
| 9579 | |
| 9580 | if (I.hasDeoptState()) |
| 9581 | LowerCallSiteWithDeoptBundle(Call: &I, Callee, EHPadBB: nullptr); |
| 9582 | else |
| 9583 | // Check if we can potentially perform a tail call. More detailed checking |
| 9584 | // is be done within LowerCallTo, after more information about the call is |
| 9585 | // known. |
| 9586 | LowerCallTo(CB: I, Callee, isTailCall: I.isTailCall(), isMustTailCall: I.isMustTailCall()); |
| 9587 | } |
| 9588 | |
| 9589 | void SelectionDAGBuilder::LowerCallSiteWithPtrAuthBundle( |
| 9590 | const CallBase &CB, const BasicBlock *EHPadBB) { |
| 9591 | auto PAB = CB.getOperandBundle(Name: "ptrauth" ); |
| 9592 | const Value *CalleeV = CB.getCalledOperand(); |
| 9593 | |
| 9594 | // Gather the call ptrauth data from the operand bundle: |
| 9595 | // [ i32 <key>, i64 <discriminator> ] |
| 9596 | const auto *Key = cast<ConstantInt>(Val: PAB->Inputs[0]); |
| 9597 | const Value *Discriminator = PAB->Inputs[1]; |
| 9598 | |
| 9599 | assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key" ); |
| 9600 | assert(Discriminator->getType()->isIntegerTy(64) && |
| 9601 | "Invalid ptrauth discriminator" ); |
| 9602 | |
| 9603 | // Look through ptrauth constants to find the raw callee. |
| 9604 | // Do a direct unauthenticated call if we found it and everything matches. |
| 9605 | if (const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(Val: CalleeV)) |
| 9606 | if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator, |
| 9607 | DL: DAG.getDataLayout())) |
| 9608 | return LowerCallTo(CB, Callee: getValue(V: CalleeCPA->getPointer()), isTailCall: CB.isTailCall(), |
| 9609 | isMustTailCall: CB.isMustTailCall(), EHPadBB); |
| 9610 | |
| 9611 | // Functions should never be ptrauth-called directly. |
| 9612 | assert(!isa<Function>(CalleeV) && "invalid direct ptrauth call" ); |
| 9613 | |
| 9614 | // Otherwise, do an authenticated indirect call. |
| 9615 | TargetLowering::PtrAuthInfo PAI = {.Key: Key->getZExtValue(), |
| 9616 | .Discriminator: getValue(V: Discriminator)}; |
| 9617 | |
| 9618 | LowerCallTo(CB, Callee: getValue(V: CalleeV), isTailCall: CB.isTailCall(), isMustTailCall: CB.isMustTailCall(), |
| 9619 | EHPadBB, PAI: &PAI); |
| 9620 | } |
| 9621 | |
| 9622 | namespace { |
| 9623 | |
| 9624 | /// AsmOperandInfo - This contains information for each constraint that we are |
| 9625 | /// lowering. |
| 9626 | class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { |
| 9627 | public: |
| 9628 | /// CallOperand - If this is the result output operand or a clobber |
| 9629 | /// this is null, otherwise it is the incoming operand to the CallInst. |
| 9630 | /// This gets modified as the asm is processed. |
| 9631 | SDValue CallOperand; |
| 9632 | |
| 9633 | /// AssignedRegs - If this is a register or register class operand, this |
| 9634 | /// contains the set of register corresponding to the operand. |
| 9635 | RegsForValue AssignedRegs; |
| 9636 | |
| 9637 | explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info) |
| 9638 | : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) { |
| 9639 | } |
| 9640 | |
| 9641 | /// Whether or not this operand accesses memory |
| 9642 | bool hasMemory(const TargetLowering &TLI) const { |
| 9643 | // Indirect operand accesses access memory. |
| 9644 | if (isIndirect) |
| 9645 | return true; |
| 9646 | |
| 9647 | for (const auto &Code : Codes) |
| 9648 | if (TLI.getConstraintType(Constraint: Code) == TargetLowering::C_Memory) |
| 9649 | return true; |
| 9650 | |
| 9651 | return false; |
| 9652 | } |
| 9653 | }; |
| 9654 | |
| 9655 | |
| 9656 | } // end anonymous namespace |
| 9657 | |
| 9658 | /// Make sure that the output operand \p OpInfo and its corresponding input |
| 9659 | /// operand \p MatchingOpInfo have compatible constraint types (otherwise error |
| 9660 | /// out). |
| 9661 | static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, |
| 9662 | SDISelAsmOperandInfo &MatchingOpInfo, |
| 9663 | SelectionDAG &DAG) { |
| 9664 | if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT) |
| 9665 | return; |
| 9666 | |
| 9667 | const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); |
| 9668 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 9669 | |
| 9670 | std::pair<unsigned, const TargetRegisterClass *> MatchRC = |
| 9671 | TLI.getRegForInlineAsmConstraint(TRI, Constraint: OpInfo.ConstraintCode, |
| 9672 | VT: OpInfo.ConstraintVT); |
| 9673 | std::pair<unsigned, const TargetRegisterClass *> InputRC = |
| 9674 | TLI.getRegForInlineAsmConstraint(TRI, Constraint: MatchingOpInfo.ConstraintCode, |
| 9675 | VT: MatchingOpInfo.ConstraintVT); |
| 9676 | const bool OutOpIsIntOrFP = |
| 9677 | OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint(); |
| 9678 | const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() || |
| 9679 | MatchingOpInfo.ConstraintVT.isFloatingPoint(); |
| 9680 | if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) { |
| 9681 | // FIXME: error out in a more elegant fashion |
| 9682 | report_fatal_error(reason: "Unsupported asm: input constraint" |
| 9683 | " with a matching output constraint of" |
| 9684 | " incompatible type!" ); |
| 9685 | } |
| 9686 | MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT; |
| 9687 | } |
| 9688 | |
| 9689 | /// Get a direct memory input to behave well as an indirect operand. |
| 9690 | /// This may introduce stores, hence the need for a \p Chain. |
| 9691 | /// \return The (possibly updated) chain. |
| 9692 | static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, |
| 9693 | SDISelAsmOperandInfo &OpInfo, |
| 9694 | SelectionDAG &DAG) { |
| 9695 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9696 | |
| 9697 | // If we don't have an indirect input, put it in the constpool if we can, |
| 9698 | // otherwise spill it to a stack slot. |
| 9699 | // TODO: This isn't quite right. We need to handle these according to |
| 9700 | // the addressing mode that the constraint wants. Also, this may take |
| 9701 | // an additional register for the computation and we don't want that |
| 9702 | // either. |
| 9703 | |
| 9704 | // If the operand is a float, integer, or vector constant, spill to a |
| 9705 | // constant pool entry to get its address. |
| 9706 | const Value *OpVal = OpInfo.CallOperandVal; |
| 9707 | if (isa<ConstantFP>(Val: OpVal) || isa<ConstantInt>(Val: OpVal) || |
| 9708 | isa<ConstantVector>(Val: OpVal) || isa<ConstantDataVector>(Val: OpVal)) { |
| 9709 | OpInfo.CallOperand = DAG.getConstantPool( |
| 9710 | C: cast<Constant>(Val: OpVal), VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
| 9711 | return Chain; |
| 9712 | } |
| 9713 | |
| 9714 | // Otherwise, create a stack slot and emit a store to it before the asm. |
| 9715 | Type *Ty = OpVal->getType(); |
| 9716 | auto &DL = DAG.getDataLayout(); |
| 9717 | TypeSize TySize = DL.getTypeAllocSize(Ty); |
| 9718 | MachineFunction &MF = DAG.getMachineFunction(); |
| 9719 | const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); |
| 9720 | int StackID = 0; |
| 9721 | if (TySize.isScalable()) |
| 9722 | StackID = TFI->getStackIDForScalableVectors(); |
| 9723 | int SSFI = MF.getFrameInfo().CreateStackObject(Size: TySize.getKnownMinValue(), |
| 9724 | Alignment: DL.getPrefTypeAlign(Ty), isSpillSlot: false, |
| 9725 | Alloca: nullptr, ID: StackID); |
| 9726 | SDValue StackSlot = DAG.getFrameIndex(FI: SSFI, VT: TLI.getFrameIndexTy(DL)); |
| 9727 | Chain = DAG.getTruncStore(Chain, dl: Location, Val: OpInfo.CallOperand, Ptr: StackSlot, |
| 9728 | PtrInfo: MachinePointerInfo::getFixedStack(MF, FI: SSFI), |
| 9729 | SVT: TLI.getMemValueType(DL, Ty)); |
| 9730 | OpInfo.CallOperand = StackSlot; |
| 9731 | |
| 9732 | return Chain; |
| 9733 | } |
| 9734 | |
| 9735 | /// GetRegistersForValue - Assign registers (virtual or physical) for the |
| 9736 | /// specified operand. We prefer to assign virtual registers, to allow the |
| 9737 | /// register allocator to handle the assignment process. However, if the asm |
| 9738 | /// uses features that we can't model on machineinstrs, we have SDISel do the |
| 9739 | /// allocation. This produces generally horrible, but correct, code. |
| 9740 | /// |
| 9741 | /// OpInfo describes the operand |
| 9742 | /// RefOpInfo describes the matching operand if any, the operand otherwise |
| 9743 | static std::optional<unsigned> |
| 9744 | getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL, |
| 9745 | SDISelAsmOperandInfo &OpInfo, |
| 9746 | SDISelAsmOperandInfo &RefOpInfo) { |
| 9747 | LLVMContext &Context = *DAG.getContext(); |
| 9748 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9749 | |
| 9750 | MachineFunction &MF = DAG.getMachineFunction(); |
| 9751 | SmallVector<Register, 4> Regs; |
| 9752 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
| 9753 | |
| 9754 | // No work to do for memory/address operands. |
| 9755 | if (OpInfo.ConstraintType == TargetLowering::C_Memory || |
| 9756 | OpInfo.ConstraintType == TargetLowering::C_Address) |
| 9757 | return std::nullopt; |
| 9758 | |
| 9759 | // If this is a constraint for a single physreg, or a constraint for a |
| 9760 | // register class, find it. |
| 9761 | unsigned AssignedReg; |
| 9762 | const TargetRegisterClass *RC; |
| 9763 | std::tie(args&: AssignedReg, args&: RC) = TLI.getRegForInlineAsmConstraint( |
| 9764 | TRI: &TRI, Constraint: RefOpInfo.ConstraintCode, VT: RefOpInfo.ConstraintVT); |
| 9765 | // RC is unset only on failure. Return immediately. |
| 9766 | if (!RC) |
| 9767 | return std::nullopt; |
| 9768 | |
| 9769 | // Get the actual register value type. This is important, because the user |
| 9770 | // may have asked for (e.g.) the AX register in i32 type. We need to |
| 9771 | // remember that AX is actually i16 to get the right extension. |
| 9772 | const MVT RegVT = *TRI.legalclasstypes_begin(RC: *RC); |
| 9773 | |
| 9774 | if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) { |
| 9775 | // If this is an FP operand in an integer register (or visa versa), or more |
| 9776 | // generally if the operand value disagrees with the register class we plan |
| 9777 | // to stick it in, fix the operand type. |
| 9778 | // |
| 9779 | // If this is an input value, the bitcast to the new type is done now. |
| 9780 | // Bitcast for output value is done at the end of visitInlineAsm(). |
| 9781 | if ((OpInfo.Type == InlineAsm::isOutput || |
| 9782 | OpInfo.Type == InlineAsm::isInput) && |
| 9783 | !TRI.isTypeLegalForClass(RC: *RC, T: OpInfo.ConstraintVT)) { |
| 9784 | // Try to convert to the first EVT that the reg class contains. If the |
| 9785 | // types are identical size, use a bitcast to convert (e.g. two differing |
| 9786 | // vector types). Note: output bitcast is done at the end of |
| 9787 | // visitInlineAsm(). |
| 9788 | if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) { |
| 9789 | // Exclude indirect inputs while they are unsupported because the code |
| 9790 | // to perform the load is missing and thus OpInfo.CallOperand still |
| 9791 | // refers to the input address rather than the pointed-to value. |
| 9792 | if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect) |
| 9793 | OpInfo.CallOperand = |
| 9794 | DAG.getNode(Opcode: ISD::BITCAST, DL, VT: RegVT, Operand: OpInfo.CallOperand); |
| 9795 | OpInfo.ConstraintVT = RegVT; |
| 9796 | // If the operand is an FP value and we want it in integer registers, |
| 9797 | // use the corresponding integer type. This turns an f64 value into |
| 9798 | // i64, which can be passed with two i32 values on a 32-bit machine. |
| 9799 | } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { |
| 9800 | MVT VT = MVT::getIntegerVT(BitWidth: OpInfo.ConstraintVT.getSizeInBits()); |
| 9801 | if (OpInfo.Type == InlineAsm::isInput) |
| 9802 | OpInfo.CallOperand = |
| 9803 | DAG.getNode(Opcode: ISD::BITCAST, DL, VT, Operand: OpInfo.CallOperand); |
| 9804 | OpInfo.ConstraintVT = VT; |
| 9805 | } |
| 9806 | } |
| 9807 | } |
| 9808 | |
| 9809 | // No need to allocate a matching input constraint since the constraint it's |
| 9810 | // matching to has already been allocated. |
| 9811 | if (OpInfo.isMatchingInputConstraint()) |
| 9812 | return std::nullopt; |
| 9813 | |
| 9814 | EVT ValueVT = OpInfo.ConstraintVT; |
| 9815 | if (OpInfo.ConstraintVT == MVT::Other) |
| 9816 | ValueVT = RegVT; |
| 9817 | |
| 9818 | // Initialize NumRegs. |
| 9819 | unsigned NumRegs = 1; |
| 9820 | if (OpInfo.ConstraintVT != MVT::Other) |
| 9821 | NumRegs = TLI.getNumRegisters(Context, VT: OpInfo.ConstraintVT, RegisterVT: RegVT); |
| 9822 | |
| 9823 | // If this is a constraint for a specific physical register, like {r17}, |
| 9824 | // assign it now. |
| 9825 | |
| 9826 | // If this associated to a specific register, initialize iterator to correct |
| 9827 | // place. If virtual, make sure we have enough registers |
| 9828 | |
| 9829 | // Initialize iterator if necessary |
| 9830 | TargetRegisterClass::iterator I = RC->begin(); |
| 9831 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
| 9832 | |
| 9833 | // Do not check for single registers. |
| 9834 | if (AssignedReg) { |
| 9835 | I = std::find(first: I, last: RC->end(), val: AssignedReg); |
| 9836 | if (I == RC->end()) { |
| 9837 | // RC does not contain the selected register, which indicates a |
| 9838 | // mismatch between the register and the required type/bitwidth. |
| 9839 | return {AssignedReg}; |
| 9840 | } |
| 9841 | } |
| 9842 | |
| 9843 | for (; NumRegs; --NumRegs, ++I) { |
| 9844 | assert(I != RC->end() && "Ran out of registers to allocate!" ); |
| 9845 | Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RegClass: RC); |
| 9846 | Regs.push_back(Elt: R); |
| 9847 | } |
| 9848 | |
| 9849 | OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); |
| 9850 | return std::nullopt; |
| 9851 | } |
| 9852 | |
| 9853 | static unsigned |
| 9854 | findMatchingInlineAsmOperand(unsigned OperandNo, |
| 9855 | const std::vector<SDValue> &AsmNodeOperands) { |
| 9856 | // Scan until we find the definition we already emitted of this operand. |
| 9857 | unsigned CurOp = InlineAsm::Op_FirstOperand; |
| 9858 | for (; OperandNo; --OperandNo) { |
| 9859 | // Advance to the next operand. |
| 9860 | unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal(); |
| 9861 | const InlineAsm::Flag F(OpFlag); |
| 9862 | assert( |
| 9863 | (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) && |
| 9864 | "Skipped past definitions?" ); |
| 9865 | CurOp += F.getNumOperandRegisters() + 1; |
| 9866 | } |
| 9867 | return CurOp; |
| 9868 | } |
| 9869 | |
| 9870 | namespace { |
| 9871 | |
| 9872 | class { |
| 9873 | unsigned = 0; |
| 9874 | |
| 9875 | public: |
| 9876 | explicit (const CallBase &Call) { |
| 9877 | const InlineAsm *IA = cast<InlineAsm>(Val: Call.getCalledOperand()); |
| 9878 | if (IA->hasSideEffects()) |
| 9879 | Flags |= InlineAsm::Extra_HasSideEffects; |
| 9880 | if (IA->isAlignStack()) |
| 9881 | Flags |= InlineAsm::Extra_IsAlignStack; |
| 9882 | if (Call.isConvergent()) |
| 9883 | Flags |= InlineAsm::Extra_IsConvergent; |
| 9884 | Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; |
| 9885 | } |
| 9886 | |
| 9887 | void update(const TargetLowering::AsmOperandInfo &OpInfo) { |
| 9888 | // Ideally, we would only check against memory constraints. However, the |
| 9889 | // meaning of an Other constraint can be target-specific and we can't easily |
| 9890 | // reason about it. Therefore, be conservative and set MayLoad/MayStore |
| 9891 | // for Other constraints as well. |
| 9892 | if (OpInfo.ConstraintType == TargetLowering::C_Memory || |
| 9893 | OpInfo.ConstraintType == TargetLowering::C_Other) { |
| 9894 | if (OpInfo.Type == InlineAsm::isInput) |
| 9895 | Flags |= InlineAsm::Extra_MayLoad; |
| 9896 | else if (OpInfo.Type == InlineAsm::isOutput) |
| 9897 | Flags |= InlineAsm::Extra_MayStore; |
| 9898 | else if (OpInfo.Type == InlineAsm::isClobber) |
| 9899 | Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); |
| 9900 | } |
| 9901 | } |
| 9902 | |
| 9903 | unsigned () const { return Flags; } |
| 9904 | }; |
| 9905 | |
| 9906 | } // end anonymous namespace |
| 9907 | |
| 9908 | static bool isFunction(SDValue Op) { |
| 9909 | if (Op && Op.getOpcode() == ISD::GlobalAddress) { |
| 9910 | if (auto *GA = dyn_cast<GlobalAddressSDNode>(Val&: Op)) { |
| 9911 | auto Fn = dyn_cast_or_null<Function>(Val: GA->getGlobal()); |
| 9912 | |
| 9913 | // In normal "call dllimport func" instruction (non-inlineasm) it force |
| 9914 | // indirect access by specifing call opcode. And usually specially print |
| 9915 | // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can |
| 9916 | // not do in this way now. (In fact, this is similar with "Data Access" |
| 9917 | // action). So here we ignore dllimport function. |
| 9918 | if (Fn && !Fn->hasDLLImportStorageClass()) |
| 9919 | return true; |
| 9920 | } |
| 9921 | } |
| 9922 | return false; |
| 9923 | } |
| 9924 | |
| 9925 | /// visitInlineAsm - Handle a call to an InlineAsm object. |
| 9926 | void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, |
| 9927 | const BasicBlock *EHPadBB) { |
| 9928 | const InlineAsm *IA = cast<InlineAsm>(Val: Call.getCalledOperand()); |
| 9929 | |
| 9930 | /// ConstraintOperands - Information about all of the constraints. |
| 9931 | SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands; |
| 9932 | |
| 9933 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9934 | TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints( |
| 9935 | DL: DAG.getDataLayout(), TRI: DAG.getSubtarget().getRegisterInfo(), Call); |
| 9936 | |
| 9937 | // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack, |
| 9938 | // AsmDialect, MayLoad, MayStore). |
| 9939 | bool HasSideEffect = IA->hasSideEffects(); |
| 9940 | ExtraFlags (Call); |
| 9941 | |
| 9942 | for (auto &T : TargetConstraints) { |
| 9943 | ConstraintOperands.push_back(Elt: SDISelAsmOperandInfo(T)); |
| 9944 | SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); |
| 9945 | |
| 9946 | if (OpInfo.CallOperandVal) |
| 9947 | OpInfo.CallOperand = getValue(V: OpInfo.CallOperandVal); |
| 9948 | |
| 9949 | if (!HasSideEffect) |
| 9950 | HasSideEffect = OpInfo.hasMemory(TLI); |
| 9951 | |
| 9952 | // Determine if this InlineAsm MayLoad or MayStore based on the constraints. |
| 9953 | // FIXME: Could we compute this on OpInfo rather than T? |
| 9954 | |
| 9955 | // Compute the constraint code and ConstraintType to use. |
| 9956 | TLI.ComputeConstraintToUse(OpInfo&: T, Op: SDValue()); |
| 9957 | |
| 9958 | if (T.ConstraintType == TargetLowering::C_Immediate && |
| 9959 | OpInfo.CallOperand && !isa<ConstantSDNode>(Val: OpInfo.CallOperand)) |
| 9960 | // We've delayed emitting a diagnostic like the "n" constraint because |
| 9961 | // inlining could cause an integer showing up. |
| 9962 | return emitInlineAsmError(Call, Message: "constraint '" + Twine(T.ConstraintCode) + |
| 9963 | "' expects an integer constant " |
| 9964 | "expression" ); |
| 9965 | |
| 9966 | ExtraInfo.update(OpInfo: T); |
| 9967 | } |
| 9968 | |
| 9969 | // We won't need to flush pending loads if this asm doesn't touch |
| 9970 | // memory and is nonvolatile. |
| 9971 | SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot(); |
| 9972 | |
| 9973 | bool EmitEHLabels = isa<InvokeInst>(Val: Call); |
| 9974 | if (EmitEHLabels) { |
| 9975 | assert(EHPadBB && "InvokeInst must have an EHPadBB" ); |
| 9976 | } |
| 9977 | bool IsCallBr = isa<CallBrInst>(Val: Call); |
| 9978 | |
| 9979 | if (IsCallBr || EmitEHLabels) { |
| 9980 | // If this is a callbr or invoke we need to flush pending exports since |
| 9981 | // inlineasm_br and invoke are terminators. |
| 9982 | // We need to do this before nodes are glued to the inlineasm_br node. |
| 9983 | Chain = getControlRoot(); |
| 9984 | } |
| 9985 | |
| 9986 | MCSymbol *BeginLabel = nullptr; |
| 9987 | if (EmitEHLabels) { |
| 9988 | Chain = lowerStartEH(Chain, EHPadBB, BeginLabel); |
| 9989 | } |
| 9990 | |
| 9991 | int OpNo = -1; |
| 9992 | SmallVector<StringRef> AsmStrs; |
| 9993 | IA->collectAsmStrs(AsmStrs); |
| 9994 | |
| 9995 | // Second pass over the constraints: compute which constraint option to use. |
| 9996 | for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { |
| 9997 | if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput) |
| 9998 | OpNo++; |
| 9999 | |
| 10000 | // If this is an output operand with a matching input operand, look up the |
| 10001 | // matching input. If their types mismatch, e.g. one is an integer, the |
| 10002 | // other is floating point, or their sizes are different, flag it as an |
| 10003 | // error. |
| 10004 | if (OpInfo.hasMatchingInput()) { |
| 10005 | SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; |
| 10006 | patchMatchingInput(OpInfo, MatchingOpInfo&: Input, DAG); |
| 10007 | } |
| 10008 | |
| 10009 | // Compute the constraint code and ConstraintType to use. |
| 10010 | TLI.ComputeConstraintToUse(OpInfo, Op: OpInfo.CallOperand, DAG: &DAG); |
| 10011 | |
| 10012 | if ((OpInfo.ConstraintType == TargetLowering::C_Memory && |
| 10013 | OpInfo.Type == InlineAsm::isClobber) || |
| 10014 | OpInfo.ConstraintType == TargetLowering::C_Address) |
| 10015 | continue; |
| 10016 | |
| 10017 | // In Linux PIC model, there are 4 cases about value/label addressing: |
| 10018 | // |
| 10019 | // 1: Function call or Label jmp inside the module. |
| 10020 | // 2: Data access (such as global variable, static variable) inside module. |
| 10021 | // 3: Function call or Label jmp outside the module. |
| 10022 | // 4: Data access (such as global variable) outside the module. |
| 10023 | // |
| 10024 | // Due to current llvm inline asm architecture designed to not "recognize" |
| 10025 | // the asm code, there are quite troubles for us to treat mem addressing |
| 10026 | // differently for same value/adress used in different instuctions. |
| 10027 | // For example, in pic model, call a func may in plt way or direclty |
| 10028 | // pc-related, but lea/mov a function adress may use got. |
| 10029 | // |
| 10030 | // Here we try to "recognize" function call for the case 1 and case 3 in |
| 10031 | // inline asm. And try to adjust the constraint for them. |
| 10032 | // |
| 10033 | // TODO: Due to current inline asm didn't encourage to jmp to the outsider |
| 10034 | // label, so here we don't handle jmp function label now, but we need to |
| 10035 | // enhance it (especilly in PIC model) if we meet meaningful requirements. |
| 10036 | if (OpInfo.isIndirect && isFunction(Op: OpInfo.CallOperand) && |
| 10037 | TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) && |
| 10038 | TM.getCodeModel() != CodeModel::Large) { |
| 10039 | OpInfo.isIndirect = false; |
| 10040 | OpInfo.ConstraintType = TargetLowering::C_Address; |
| 10041 | } |
| 10042 | |
| 10043 | // If this is a memory input, and if the operand is not indirect, do what we |
| 10044 | // need to provide an address for the memory input. |
| 10045 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && |
| 10046 | !OpInfo.isIndirect) { |
| 10047 | assert((OpInfo.isMultipleAlternative || |
| 10048 | (OpInfo.Type == InlineAsm::isInput)) && |
| 10049 | "Can only indirectify direct input operands!" ); |
| 10050 | |
| 10051 | // Memory operands really want the address of the value. |
| 10052 | Chain = getAddressForMemoryInput(Chain, Location: getCurSDLoc(), OpInfo, DAG); |
| 10053 | |
| 10054 | // There is no longer a Value* corresponding to this operand. |
| 10055 | OpInfo.CallOperandVal = nullptr; |
| 10056 | |
| 10057 | // It is now an indirect operand. |
| 10058 | OpInfo.isIndirect = true; |
| 10059 | } |
| 10060 | |
| 10061 | } |
| 10062 | |
| 10063 | // AsmNodeOperands - The operands for the ISD::INLINEASM node. |
| 10064 | std::vector<SDValue> AsmNodeOperands; |
| 10065 | AsmNodeOperands.push_back(x: SDValue()); // reserve space for input chain |
| 10066 | AsmNodeOperands.push_back(x: DAG.getTargetExternalSymbol( |
| 10067 | Sym: IA->getAsmString().data(), VT: TLI.getProgramPointerTy(DL: DAG.getDataLayout()))); |
| 10068 | |
| 10069 | // If we have a !srcloc metadata node associated with it, we want to attach |
| 10070 | // this to the ultimately generated inline asm machineinstr. To do this, we |
| 10071 | // pass in the third operand as this (potentially null) inline asm MDNode. |
| 10072 | const MDNode *SrcLoc = Call.getMetadata(Kind: "srcloc" ); |
| 10073 | AsmNodeOperands.push_back(x: DAG.getMDNode(MD: SrcLoc)); |
| 10074 | |
| 10075 | // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore |
| 10076 | // bits as operand 3. |
| 10077 | AsmNodeOperands.push_back(x: DAG.getTargetConstant( |
| 10078 | Val: ExtraInfo.get(), DL: getCurSDLoc(), VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 10079 | |
| 10080 | // Third pass: Loop over operands to prepare DAG-level operands.. As part of |
| 10081 | // this, assign virtual and physical registers for inputs and otput. |
| 10082 | for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { |
| 10083 | // Assign Registers. |
| 10084 | SDISelAsmOperandInfo &RefOpInfo = |
| 10085 | OpInfo.isMatchingInputConstraint() |
| 10086 | ? ConstraintOperands[OpInfo.getMatchedOperand()] |
| 10087 | : OpInfo; |
| 10088 | const auto RegError = |
| 10089 | getRegistersForValue(DAG, DL: getCurSDLoc(), OpInfo, RefOpInfo); |
| 10090 | if (RegError) { |
| 10091 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 10092 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
| 10093 | const char *RegName = TRI.getName(RegNo: *RegError); |
| 10094 | emitInlineAsmError(Call, Message: "register '" + Twine(RegName) + |
| 10095 | "' allocated for constraint '" + |
| 10096 | Twine(OpInfo.ConstraintCode) + |
| 10097 | "' does not match required type" ); |
| 10098 | return; |
| 10099 | } |
| 10100 | |
| 10101 | auto DetectWriteToReservedRegister = [&]() { |
| 10102 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 10103 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
| 10104 | for (Register Reg : OpInfo.AssignedRegs.Regs) { |
| 10105 | if (Reg.isPhysical() && TRI.isInlineAsmReadOnlyReg(MF, PhysReg: Reg)) { |
| 10106 | const char *RegName = TRI.getName(RegNo: Reg); |
| 10107 | emitInlineAsmError(Call, Message: "write to reserved register '" + |
| 10108 | Twine(RegName) + "'" ); |
| 10109 | return true; |
| 10110 | } |
| 10111 | } |
| 10112 | return false; |
| 10113 | }; |
| 10114 | assert((OpInfo.ConstraintType != TargetLowering::C_Address || |
| 10115 | (OpInfo.Type == InlineAsm::isInput && |
| 10116 | !OpInfo.isMatchingInputConstraint())) && |
| 10117 | "Only address as input operand is allowed." ); |
| 10118 | |
| 10119 | switch (OpInfo.Type) { |
| 10120 | case InlineAsm::isOutput: |
| 10121 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) { |
| 10122 | const InlineAsm::ConstraintCode ConstraintID = |
| 10123 | TLI.getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode); |
| 10124 | assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && |
| 10125 | "Failed to convert memory constraint code to constraint id." ); |
| 10126 | |
| 10127 | // Add information to the INLINEASM node to know about this output. |
| 10128 | InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1); |
| 10129 | OpFlags.setMemConstraint(ConstraintID); |
| 10130 | AsmNodeOperands.push_back(x: DAG.getTargetConstant(Val: OpFlags, DL: getCurSDLoc(), |
| 10131 | VT: MVT::i32)); |
| 10132 | AsmNodeOperands.push_back(x: OpInfo.CallOperand); |
| 10133 | } else { |
| 10134 | // Otherwise, this outputs to a register (directly for C_Register / |
| 10135 | // C_RegisterClass, and a target-defined fashion for |
| 10136 | // C_Immediate/C_Other). Find a register that we can use. |
| 10137 | if (OpInfo.AssignedRegs.Regs.empty()) { |
| 10138 | emitInlineAsmError( |
| 10139 | Call, Message: "couldn't allocate output register for constraint '" + |
| 10140 | Twine(OpInfo.ConstraintCode) + "'" ); |
| 10141 | return; |
| 10142 | } |
| 10143 | |
| 10144 | if (DetectWriteToReservedRegister()) |
| 10145 | return; |
| 10146 | |
| 10147 | // Add information to the INLINEASM node to know that this register is |
| 10148 | // set. |
| 10149 | OpInfo.AssignedRegs.AddInlineAsmOperands( |
| 10150 | Code: OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber |
| 10151 | : InlineAsm::Kind::RegDef, |
| 10152 | HasMatching: false, MatchingIdx: 0, dl: getCurSDLoc(), DAG, Ops&: AsmNodeOperands); |
| 10153 | } |
| 10154 | break; |
| 10155 | |
| 10156 | case InlineAsm::isInput: |
| 10157 | case InlineAsm::isLabel: { |
| 10158 | SDValue InOperandVal = OpInfo.CallOperand; |
| 10159 | |
| 10160 | if (OpInfo.isMatchingInputConstraint()) { |
| 10161 | // If this is required to match an output register we have already set, |
| 10162 | // just use its register. |
| 10163 | auto CurOp = findMatchingInlineAsmOperand(OperandNo: OpInfo.getMatchedOperand(), |
| 10164 | AsmNodeOperands); |
| 10165 | InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal()); |
| 10166 | if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) { |
| 10167 | if (OpInfo.isIndirect) { |
| 10168 | // This happens on gcc/testsuite/gcc.dg/pr8788-1.c |
| 10169 | emitInlineAsmError(Call, Message: "inline asm not supported yet: " |
| 10170 | "don't know how to handle tied " |
| 10171 | "indirect register inputs" ); |
| 10172 | return; |
| 10173 | } |
| 10174 | |
| 10175 | SmallVector<Register, 4> Regs; |
| 10176 | MachineFunction &MF = DAG.getMachineFunction(); |
| 10177 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 10178 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
| 10179 | auto *R = cast<RegisterSDNode>(Val&: AsmNodeOperands[CurOp+1]); |
| 10180 | Register TiedReg = R->getReg(); |
| 10181 | MVT RegVT = R->getSimpleValueType(ResNo: 0); |
| 10182 | const TargetRegisterClass *RC = |
| 10183 | TiedReg.isVirtual() ? MRI.getRegClass(Reg: TiedReg) |
| 10184 | : RegVT != MVT::Untyped ? TLI.getRegClassFor(VT: RegVT) |
| 10185 | : TRI.getMinimalPhysRegClass(Reg: TiedReg); |
| 10186 | for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i) |
| 10187 | Regs.push_back(Elt: MRI.createVirtualRegister(RegClass: RC)); |
| 10188 | |
| 10189 | RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType()); |
| 10190 | |
| 10191 | SDLoc dl = getCurSDLoc(); |
| 10192 | // Use the produced MatchedRegs object to |
| 10193 | MatchedRegs.getCopyToRegs(Val: InOperandVal, DAG, dl, Chain, Glue: &Glue, V: &Call); |
| 10194 | MatchedRegs.AddInlineAsmOperands(Code: InlineAsm::Kind::RegUse, HasMatching: true, |
| 10195 | MatchingIdx: OpInfo.getMatchedOperand(), dl, DAG, |
| 10196 | Ops&: AsmNodeOperands); |
| 10197 | break; |
| 10198 | } |
| 10199 | |
| 10200 | assert(Flag.isMemKind() && "Unknown matching constraint!" ); |
| 10201 | assert(Flag.getNumOperandRegisters() == 1 && |
| 10202 | "Unexpected number of operands" ); |
| 10203 | // Add information to the INLINEASM node to know about this input. |
| 10204 | // See InlineAsm.h isUseOperandTiedToDef. |
| 10205 | Flag.clearMemConstraint(); |
| 10206 | Flag.setMatchingOp(OpInfo.getMatchedOperand()); |
| 10207 | AsmNodeOperands.push_back(x: DAG.getTargetConstant( |
| 10208 | Val: Flag, DL: getCurSDLoc(), VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 10209 | AsmNodeOperands.push_back(x: AsmNodeOperands[CurOp+1]); |
| 10210 | break; |
| 10211 | } |
| 10212 | |
| 10213 | // Treat indirect 'X' constraint as memory. |
| 10214 | if (OpInfo.ConstraintType == TargetLowering::C_Other && |
| 10215 | OpInfo.isIndirect) |
| 10216 | OpInfo.ConstraintType = TargetLowering::C_Memory; |
| 10217 | |
| 10218 | if (OpInfo.ConstraintType == TargetLowering::C_Immediate || |
| 10219 | OpInfo.ConstraintType == TargetLowering::C_Other) { |
| 10220 | std::vector<SDValue> Ops; |
| 10221 | TLI.LowerAsmOperandForConstraint(Op: InOperandVal, Constraint: OpInfo.ConstraintCode, |
| 10222 | Ops, DAG); |
| 10223 | if (Ops.empty()) { |
| 10224 | if (OpInfo.ConstraintType == TargetLowering::C_Immediate) |
| 10225 | if (isa<ConstantSDNode>(Val: InOperandVal)) { |
| 10226 | emitInlineAsmError(Call, Message: "value out of range for constraint '" + |
| 10227 | Twine(OpInfo.ConstraintCode) + "'" ); |
| 10228 | return; |
| 10229 | } |
| 10230 | |
| 10231 | emitInlineAsmError(Call, |
| 10232 | Message: "invalid operand for inline asm constraint '" + |
| 10233 | Twine(OpInfo.ConstraintCode) + "'" ); |
| 10234 | return; |
| 10235 | } |
| 10236 | |
| 10237 | // Add information to the INLINEASM node to know about this input. |
| 10238 | InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size()); |
| 10239 | AsmNodeOperands.push_back(x: DAG.getTargetConstant( |
| 10240 | Val: ResOpType, DL: getCurSDLoc(), VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
| 10241 | llvm::append_range(C&: AsmNodeOperands, R&: Ops); |
| 10242 | break; |
| 10243 | } |
| 10244 | |
| 10245 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) { |
| 10246 | assert((OpInfo.isIndirect || |
| 10247 | OpInfo.ConstraintType != TargetLowering::C_Memory) && |
| 10248 | "Operand must be indirect to be a mem!" ); |
| 10249 | assert(InOperandVal.getValueType() == |
| 10250 | TLI.getPointerTy(DAG.getDataLayout()) && |
| 10251 | "Memory operands expect pointer values" ); |
| 10252 | |
| 10253 | const InlineAsm::ConstraintCode ConstraintID = |
| 10254 | TLI.getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode); |
| 10255 | assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && |
| 10256 | "Failed to convert memory constraint code to constraint id." ); |
| 10257 | |
| 10258 | // Add information to the INLINEASM node to know about this input. |
| 10259 | InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1); |
| 10260 | ResOpType.setMemConstraint(ConstraintID); |
| 10261 | AsmNodeOperands.push_back(x: DAG.getTargetConstant(Val: ResOpType, |
| 10262 | DL: getCurSDLoc(), |
| 10263 | VT: MVT::i32)); |
| 10264 | AsmNodeOperands.push_back(x: InOperandVal); |
| 10265 | break; |
| 10266 | } |
| 10267 | |
| 10268 | if (OpInfo.ConstraintType == TargetLowering::C_Address) { |
| 10269 | const InlineAsm::ConstraintCode ConstraintID = |
| 10270 | TLI.getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode); |
| 10271 | assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && |
| 10272 | "Failed to convert memory constraint code to constraint id." ); |
| 10273 | |
| 10274 | InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1); |
| 10275 | |
| 10276 | SDValue AsmOp = InOperandVal; |
| 10277 | if (isFunction(Op: InOperandVal)) { |
| 10278 | auto *GA = cast<GlobalAddressSDNode>(Val&: InOperandVal); |
| 10279 | ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1); |
| 10280 | AsmOp = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL: getCurSDLoc(), |
| 10281 | VT: InOperandVal.getValueType(), |
| 10282 | offset: GA->getOffset()); |
| 10283 | } |
| 10284 | |
| 10285 | // Add information to the INLINEASM node to know about this input. |
| 10286 | ResOpType.setMemConstraint(ConstraintID); |
| 10287 | |
| 10288 | AsmNodeOperands.push_back( |
| 10289 | x: DAG.getTargetConstant(Val: ResOpType, DL: getCurSDLoc(), VT: MVT::i32)); |
| 10290 | |
| 10291 | AsmNodeOperands.push_back(x: AsmOp); |
| 10292 | break; |
| 10293 | } |
| 10294 | |
| 10295 | if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass && |
| 10296 | OpInfo.ConstraintType != TargetLowering::C_Register) { |
| 10297 | emitInlineAsmError(Call, Message: "unknown asm constraint '" + |
| 10298 | Twine(OpInfo.ConstraintCode) + "'" ); |
| 10299 | return; |
| 10300 | } |
| 10301 | |
| 10302 | // TODO: Support this. |
| 10303 | if (OpInfo.isIndirect) { |
| 10304 | emitInlineAsmError( |
| 10305 | Call, Message: "Don't know how to handle indirect register inputs yet " |
| 10306 | "for constraint '" + |
| 10307 | Twine(OpInfo.ConstraintCode) + "'" ); |
| 10308 | return; |
| 10309 | } |
| 10310 | |
| 10311 | // Copy the input into the appropriate registers. |
| 10312 | if (OpInfo.AssignedRegs.Regs.empty()) { |
| 10313 | emitInlineAsmError(Call, |
| 10314 | Message: "couldn't allocate input reg for constraint '" + |
| 10315 | Twine(OpInfo.ConstraintCode) + "'" ); |
| 10316 | return; |
| 10317 | } |
| 10318 | |
| 10319 | if (DetectWriteToReservedRegister()) |
| 10320 | return; |
| 10321 | |
| 10322 | SDLoc dl = getCurSDLoc(); |
| 10323 | |
| 10324 | OpInfo.AssignedRegs.getCopyToRegs(Val: InOperandVal, DAG, dl, Chain, Glue: &Glue, |
| 10325 | V: &Call); |
| 10326 | |
| 10327 | OpInfo.AssignedRegs.AddInlineAsmOperands(Code: InlineAsm::Kind::RegUse, HasMatching: false, |
| 10328 | MatchingIdx: 0, dl, DAG, Ops&: AsmNodeOperands); |
| 10329 | break; |
| 10330 | } |
| 10331 | case InlineAsm::isClobber: |
| 10332 | // Add the clobbered value to the operand list, so that the register |
| 10333 | // allocator is aware that the physreg got clobbered. |
| 10334 | if (!OpInfo.AssignedRegs.Regs.empty()) |
| 10335 | OpInfo.AssignedRegs.AddInlineAsmOperands(Code: InlineAsm::Kind::Clobber, |
| 10336 | HasMatching: false, MatchingIdx: 0, dl: getCurSDLoc(), DAG, |
| 10337 | Ops&: AsmNodeOperands); |
| 10338 | break; |
| 10339 | } |
| 10340 | } |
| 10341 | |
| 10342 | // Finish up input operands. Set the input chain and add the flag last. |
| 10343 | AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; |
| 10344 | if (Glue.getNode()) AsmNodeOperands.push_back(x: Glue); |
| 10345 | |
| 10346 | unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM; |
| 10347 | Chain = DAG.getNode(Opcode: ISDOpc, DL: getCurSDLoc(), |
| 10348 | VTList: DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue), Ops: AsmNodeOperands); |
| 10349 | Glue = Chain.getValue(R: 1); |
| 10350 | |
| 10351 | // Do additional work to generate outputs. |
| 10352 | |
| 10353 | SmallVector<EVT, 1> ResultVTs; |
| 10354 | SmallVector<SDValue, 1> ResultValues; |
| 10355 | SmallVector<SDValue, 8> OutChains; |
| 10356 | |
| 10357 | llvm::Type *CallResultType = Call.getType(); |
| 10358 | ArrayRef<Type *> ResultTypes; |
| 10359 | if (StructType *StructResult = dyn_cast<StructType>(Val: CallResultType)) |
| 10360 | ResultTypes = StructResult->elements(); |
| 10361 | else if (!CallResultType->isVoidTy()) |
| 10362 | ResultTypes = ArrayRef(CallResultType); |
| 10363 | |
| 10364 | auto CurResultType = ResultTypes.begin(); |
| 10365 | auto handleRegAssign = [&](SDValue V) { |
| 10366 | assert(CurResultType != ResultTypes.end() && "Unexpected value" ); |
| 10367 | assert((*CurResultType)->isSized() && "Unexpected unsized type" ); |
| 10368 | EVT ResultVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: *CurResultType); |
| 10369 | ++CurResultType; |
| 10370 | // If the type of the inline asm call site return value is different but has |
| 10371 | // same size as the type of the asm output bitcast it. One example of this |
| 10372 | // is for vectors with different width / number of elements. This can |
| 10373 | // happen for register classes that can contain multiple different value |
| 10374 | // types. The preg or vreg allocated may not have the same VT as was |
| 10375 | // expected. |
| 10376 | // |
| 10377 | // This can also happen for a return value that disagrees with the register |
| 10378 | // class it is put in, eg. a double in a general-purpose register on a |
| 10379 | // 32-bit machine. |
| 10380 | if (ResultVT != V.getValueType() && |
| 10381 | ResultVT.getSizeInBits() == V.getValueSizeInBits()) |
| 10382 | V = DAG.getNode(Opcode: ISD::BITCAST, DL: getCurSDLoc(), VT: ResultVT, Operand: V); |
| 10383 | else if (ResultVT != V.getValueType() && ResultVT.isInteger() && |
| 10384 | V.getValueType().isInteger()) { |
| 10385 | // If a result value was tied to an input value, the computed result |
| 10386 | // may have a wider width than the expected result. Extract the |
| 10387 | // relevant portion. |
| 10388 | V = DAG.getNode(Opcode: ISD::TRUNCATE, DL: getCurSDLoc(), VT: ResultVT, Operand: V); |
| 10389 | } |
| 10390 | assert(ResultVT == V.getValueType() && "Asm result value mismatch!" ); |
| 10391 | ResultVTs.push_back(Elt: ResultVT); |
| 10392 | ResultValues.push_back(Elt: V); |
| 10393 | }; |
| 10394 | |
| 10395 | // Deal with output operands. |
| 10396 | for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { |
| 10397 | if (OpInfo.Type == InlineAsm::isOutput) { |
| 10398 | SDValue Val; |
| 10399 | // Skip trivial output operands. |
| 10400 | if (OpInfo.AssignedRegs.Regs.empty()) |
| 10401 | continue; |
| 10402 | |
| 10403 | switch (OpInfo.ConstraintType) { |
| 10404 | case TargetLowering::C_Register: |
| 10405 | case TargetLowering::C_RegisterClass: |
| 10406 | Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, dl: getCurSDLoc(), |
| 10407 | Chain, Glue: &Glue, V: &Call); |
| 10408 | break; |
| 10409 | case TargetLowering::C_Immediate: |
| 10410 | case TargetLowering::C_Other: |
| 10411 | Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, DL: getCurSDLoc(), |
| 10412 | OpInfo, DAG); |
| 10413 | break; |
| 10414 | case TargetLowering::C_Memory: |
| 10415 | break; // Already handled. |
| 10416 | case TargetLowering::C_Address: |
| 10417 | break; // Silence warning. |
| 10418 | case TargetLowering::C_Unknown: |
| 10419 | assert(false && "Unexpected unknown constraint" ); |
| 10420 | } |
| 10421 | |
| 10422 | // Indirect output manifest as stores. Record output chains. |
| 10423 | if (OpInfo.isIndirect) { |
| 10424 | const Value *Ptr = OpInfo.CallOperandVal; |
| 10425 | assert(Ptr && "Expected value CallOperandVal for indirect asm operand" ); |
| 10426 | SDValue Store = DAG.getStore(Chain, dl: getCurSDLoc(), Val, Ptr: getValue(V: Ptr), |
| 10427 | PtrInfo: MachinePointerInfo(Ptr)); |
| 10428 | OutChains.push_back(Elt: Store); |
| 10429 | } else { |
| 10430 | // generate CopyFromRegs to associated registers. |
| 10431 | assert(!Call.getType()->isVoidTy() && "Bad inline asm!" ); |
| 10432 | if (Val.getOpcode() == ISD::MERGE_VALUES) { |
| 10433 | for (const SDValue &V : Val->op_values()) |
| 10434 | handleRegAssign(V); |
| 10435 | } else |
| 10436 | handleRegAssign(Val); |
| 10437 | } |
| 10438 | } |
| 10439 | } |
| 10440 | |
| 10441 | // Set results. |
| 10442 | if (!ResultValues.empty()) { |
| 10443 | assert(CurResultType == ResultTypes.end() && |
| 10444 | "Mismatch in number of ResultTypes" ); |
| 10445 | assert(ResultValues.size() == ResultTypes.size() && |
| 10446 | "Mismatch in number of output operands in asm result" ); |
| 10447 | |
| 10448 | SDValue V = DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: getCurSDLoc(), |
| 10449 | VTList: DAG.getVTList(VTs: ResultVTs), Ops: ResultValues); |
| 10450 | setValue(V: &Call, NewN: V); |
| 10451 | } |
| 10452 | |
| 10453 | // Collect store chains. |
| 10454 | if (!OutChains.empty()) |
| 10455 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: getCurSDLoc(), VT: MVT::Other, Ops: OutChains); |
| 10456 | |
| 10457 | if (EmitEHLabels) { |
| 10458 | Chain = lowerEndEH(Chain, II: cast<InvokeInst>(Val: &Call), EHPadBB, BeginLabel); |
| 10459 | } |
| 10460 | |
| 10461 | // Only Update Root if inline assembly has a memory effect. |
| 10462 | if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr || |
| 10463 | EmitEHLabels) |
| 10464 | DAG.setRoot(Chain); |
| 10465 | } |
| 10466 | |
| 10467 | void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call, |
| 10468 | const Twine &Message) { |
| 10469 | LLVMContext &Ctx = *DAG.getContext(); |
| 10470 | Ctx.diagnose(DI: DiagnosticInfoInlineAsm(Call, Message)); |
| 10471 | |
| 10472 | // Make sure we leave the DAG in a valid state |
| 10473 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 10474 | SmallVector<EVT, 1> ValueVTs; |
| 10475 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: Call.getType(), ValueVTs); |
| 10476 | |
| 10477 | if (ValueVTs.empty()) |
| 10478 | return; |
| 10479 | |
| 10480 | SmallVector<SDValue, 1> Ops; |
| 10481 | for (const EVT &VT : ValueVTs) |
| 10482 | Ops.push_back(Elt: DAG.getUNDEF(VT)); |
| 10483 | |
| 10484 | setValue(V: &Call, NewN: DAG.getMergeValues(Ops, dl: getCurSDLoc())); |
| 10485 | } |
| 10486 | |
| 10487 | void SelectionDAGBuilder::visitVAStart(const CallInst &I) { |
| 10488 | DAG.setRoot(DAG.getNode(Opcode: ISD::VASTART, DL: getCurSDLoc(), |
| 10489 | VT: MVT::Other, N1: getRoot(), |
| 10490 | N2: getValue(V: I.getArgOperand(i: 0)), |
| 10491 | N3: DAG.getSrcValue(v: I.getArgOperand(i: 0)))); |
| 10492 | } |
| 10493 | |
| 10494 | void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { |
| 10495 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 10496 | const DataLayout &DL = DAG.getDataLayout(); |
| 10497 | SDValue V = DAG.getVAArg( |
| 10498 | VT: TLI.getMemValueType(DL: DAG.getDataLayout(), Ty: I.getType()), dl: getCurSDLoc(), |
| 10499 | Chain: getRoot(), Ptr: getValue(V: I.getOperand(i_nocapture: 0)), SV: DAG.getSrcValue(v: I.getOperand(i_nocapture: 0)), |
| 10500 | Align: DL.getABITypeAlign(Ty: I.getType()).value()); |
| 10501 | DAG.setRoot(V.getValue(R: 1)); |
| 10502 | |
| 10503 | if (I.getType()->isPointerTy()) |
| 10504 | V = DAG.getPtrExtOrTrunc( |
| 10505 | Op: V, DL: getCurSDLoc(), VT: TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType())); |
| 10506 | setValue(V: &I, NewN: V); |
| 10507 | } |
| 10508 | |
| 10509 | void SelectionDAGBuilder::visitVAEnd(const CallInst &I) { |
| 10510 | DAG.setRoot(DAG.getNode(Opcode: ISD::VAEND, DL: getCurSDLoc(), |
| 10511 | VT: MVT::Other, N1: getRoot(), |
| 10512 | N2: getValue(V: I.getArgOperand(i: 0)), |
| 10513 | N3: DAG.getSrcValue(v: I.getArgOperand(i: 0)))); |
| 10514 | } |
| 10515 | |
| 10516 | void SelectionDAGBuilder::visitVACopy(const CallInst &I) { |
| 10517 | DAG.setRoot(DAG.getNode(Opcode: ISD::VACOPY, DL: getCurSDLoc(), |
| 10518 | VT: MVT::Other, N1: getRoot(), |
| 10519 | N2: getValue(V: I.getArgOperand(i: 0)), |
| 10520 | N3: getValue(V: I.getArgOperand(i: 1)), |
| 10521 | N4: DAG.getSrcValue(v: I.getArgOperand(i: 0)), |
| 10522 | N5: DAG.getSrcValue(v: I.getArgOperand(i: 1)))); |
| 10523 | } |
| 10524 | |
| 10525 | SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, |
| 10526 | const Instruction &I, |
| 10527 | SDValue Op) { |
| 10528 | std::optional<ConstantRange> CR = getRange(I); |
| 10529 | |
| 10530 | if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped()) |
| 10531 | return Op; |
| 10532 | |
| 10533 | APInt Lo = CR->getUnsignedMin(); |
| 10534 | if (!Lo.isMinValue()) |
| 10535 | return Op; |
| 10536 | |
| 10537 | APInt Hi = CR->getUnsignedMax(); |
| 10538 | unsigned Bits = std::max(a: Hi.getActiveBits(), |
| 10539 | b: static_cast<unsigned>(IntegerType::MIN_INT_BITS)); |
| 10540 | |
| 10541 | EVT SmallVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: Bits); |
| 10542 | |
| 10543 | SDLoc SL = getCurSDLoc(); |
| 10544 | |
| 10545 | SDValue ZExt = DAG.getNode(Opcode: ISD::AssertZext, DL: SL, VT: Op.getValueType(), N1: Op, |
| 10546 | N2: DAG.getValueType(SmallVT)); |
| 10547 | unsigned NumVals = Op.getNode()->getNumValues(); |
| 10548 | if (NumVals == 1) |
| 10549 | return ZExt; |
| 10550 | |
| 10551 | SmallVector<SDValue, 4> Ops; |
| 10552 | |
| 10553 | Ops.push_back(Elt: ZExt); |
| 10554 | for (unsigned I = 1; I != NumVals; ++I) |
| 10555 | Ops.push_back(Elt: Op.getValue(R: I)); |
| 10556 | |
| 10557 | return DAG.getMergeValues(Ops, dl: SL); |
| 10558 | } |
| 10559 | |
| 10560 | /// Populate a CallLowerinInfo (into \p CLI) based on the properties of |
| 10561 | /// the call being lowered. |
| 10562 | /// |
| 10563 | /// This is a helper for lowering intrinsics that follow a target calling |
| 10564 | /// convention or require stack pointer adjustment. Only a subset of the |
| 10565 | /// intrinsic's operands need to participate in the calling convention. |
| 10566 | void SelectionDAGBuilder::populateCallLoweringInfo( |
| 10567 | TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, |
| 10568 | unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, |
| 10569 | AttributeSet RetAttrs, bool IsPatchPoint) { |
| 10570 | TargetLowering::ArgListTy Args; |
| 10571 | Args.reserve(n: NumArgs); |
| 10572 | |
| 10573 | // Populate the argument list. |
| 10574 | // Attributes for args start at offset 1, after the return attribute. |
| 10575 | for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; |
| 10576 | ArgI != ArgE; ++ArgI) { |
| 10577 | const Value *V = Call->getOperand(i_nocapture: ArgI); |
| 10578 | |
| 10579 | assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic." ); |
| 10580 | |
| 10581 | TargetLowering::ArgListEntry Entry; |
| 10582 | Entry.Node = getValue(V); |
| 10583 | Entry.Ty = V->getType(); |
| 10584 | Entry.setAttributes(Call, ArgIdx: ArgI); |
| 10585 | Args.push_back(x: Entry); |
| 10586 | } |
| 10587 | |
| 10588 | CLI.setDebugLoc(getCurSDLoc()) |
| 10589 | .setChain(getRoot()) |
| 10590 | .setCallee(CC: Call->getCallingConv(), ResultType: ReturnTy, Target: Callee, ArgsList: std::move(Args), |
| 10591 | ResultAttrs: RetAttrs) |
| 10592 | .setDiscardResult(Call->use_empty()) |
| 10593 | .setIsPatchPoint(IsPatchPoint) |
| 10594 | .setIsPreallocated( |
| 10595 | Call->countOperandBundlesOfType(ID: LLVMContext::OB_preallocated) != 0); |
| 10596 | } |
| 10597 | |
| 10598 | /// Add a stack map intrinsic call's live variable operands to a stackmap |
| 10599 | /// or patchpoint target node's operand list. |
| 10600 | /// |
| 10601 | /// Constants are converted to TargetConstants purely as an optimization to |
| 10602 | /// avoid constant materialization and register allocation. |
| 10603 | /// |
| 10604 | /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not |
| 10605 | /// generate addess computation nodes, and so FinalizeISel can convert the |
| 10606 | /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids |
| 10607 | /// address materialization and register allocation, but may also be required |
| 10608 | /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an |
| 10609 | /// alloca in the entry block, then the runtime may assume that the alloca's |
| 10610 | /// StackMap location can be read immediately after compilation and that the |
| 10611 | /// location is valid at any point during execution (this is similar to the |
| 10612 | /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were |
| 10613 | /// only available in a register, then the runtime would need to trap when |
| 10614 | /// execution reaches the StackMap in order to read the alloca's location. |
| 10615 | static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, |
| 10616 | const SDLoc &DL, SmallVectorImpl<SDValue> &Ops, |
| 10617 | SelectionDAGBuilder &Builder) { |
| 10618 | SelectionDAG &DAG = Builder.DAG; |
| 10619 | for (unsigned I = StartIdx; I < Call.arg_size(); I++) { |
| 10620 | SDValue Op = Builder.getValue(V: Call.getArgOperand(i: I)); |
| 10621 | |
| 10622 | // Things on the stack are pointer-typed, meaning that they are already |
| 10623 | // legal and can be emitted directly to target nodes. |
| 10624 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Val&: Op)) { |
| 10625 | Ops.push_back(Elt: DAG.getTargetFrameIndex(FI: FI->getIndex(), VT: Op.getValueType())); |
| 10626 | } else { |
| 10627 | // Otherwise emit a target independent node to be legalised. |
| 10628 | Ops.push_back(Elt: Builder.getValue(V: Call.getArgOperand(i: I))); |
| 10629 | } |
| 10630 | } |
| 10631 | } |
| 10632 | |
| 10633 | /// Lower llvm.experimental.stackmap. |
| 10634 | void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { |
| 10635 | // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, |
| 10636 | // [live variables...]) |
| 10637 | |
| 10638 | assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value." ); |
| 10639 | |
| 10640 | SDValue Chain, InGlue, Callee; |
| 10641 | SmallVector<SDValue, 32> Ops; |
| 10642 | |
| 10643 | SDLoc DL = getCurSDLoc(); |
| 10644 | Callee = getValue(V: CI.getCalledOperand()); |
| 10645 | |
| 10646 | // The stackmap intrinsic only records the live variables (the arguments |
| 10647 | // passed to it) and emits NOPS (if requested). Unlike the patchpoint |
| 10648 | // intrinsic, this won't be lowered to a function call. This means we don't |
| 10649 | // have to worry about calling conventions and target specific lowering code. |
| 10650 | // Instead we perform the call lowering right here. |
| 10651 | // |
| 10652 | // chain, flag = CALLSEQ_START(chain, 0, 0) |
| 10653 | // chain, flag = STACKMAP(id, nbytes, ..., chain, flag) |
| 10654 | // chain, flag = CALLSEQ_END(chain, 0, 0, flag) |
| 10655 | // |
| 10656 | Chain = DAG.getCALLSEQ_START(Chain: getRoot(), InSize: 0, OutSize: 0, DL); |
| 10657 | InGlue = Chain.getValue(R: 1); |
| 10658 | |
| 10659 | // Add the STACKMAP operands, starting with DAG house-keeping. |
| 10660 | Ops.push_back(Elt: Chain); |
| 10661 | Ops.push_back(Elt: InGlue); |
| 10662 | |
| 10663 | // Add the <id>, <numShadowBytes> operands. |
| 10664 | // |
| 10665 | // These do not require legalisation, and can be emitted directly to target |
| 10666 | // constant nodes. |
| 10667 | SDValue ID = getValue(V: CI.getArgOperand(i: 0)); |
| 10668 | assert(ID.getValueType() == MVT::i64); |
| 10669 | SDValue IDConst = |
| 10670 | DAG.getTargetConstant(Val: ID->getAsZExtVal(), DL, VT: ID.getValueType()); |
| 10671 | Ops.push_back(Elt: IDConst); |
| 10672 | |
| 10673 | SDValue Shad = getValue(V: CI.getArgOperand(i: 1)); |
| 10674 | assert(Shad.getValueType() == MVT::i32); |
| 10675 | SDValue ShadConst = |
| 10676 | DAG.getTargetConstant(Val: Shad->getAsZExtVal(), DL, VT: Shad.getValueType()); |
| 10677 | Ops.push_back(Elt: ShadConst); |
| 10678 | |
| 10679 | // Add the live variables. |
| 10680 | addStackMapLiveVars(Call: CI, StartIdx: 2, DL, Ops, Builder&: *this); |
| 10681 | |
| 10682 | // Create the STACKMAP node. |
| 10683 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 10684 | Chain = DAG.getNode(Opcode: ISD::STACKMAP, DL, VTList: NodeTys, Ops); |
| 10685 | InGlue = Chain.getValue(R: 1); |
| 10686 | |
| 10687 | Chain = DAG.getCALLSEQ_END(Chain, Size1: 0, Size2: 0, Glue: InGlue, DL); |
| 10688 | |
| 10689 | // Stackmaps don't generate values, so nothing goes into the NodeMap. |
| 10690 | |
| 10691 | // Set the root to the target-lowered call chain. |
| 10692 | DAG.setRoot(Chain); |
| 10693 | |
| 10694 | // Inform the Frame Information that we have a stackmap in this function. |
| 10695 | FuncInfo.MF->getFrameInfo().setHasStackMap(); |
| 10696 | } |
| 10697 | |
| 10698 | /// Lower llvm.experimental.patchpoint directly to its target opcode. |
| 10699 | void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB, |
| 10700 | const BasicBlock *EHPadBB) { |
| 10701 | // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>, |
| 10702 | // i32 <numBytes>, |
| 10703 | // i8* <target>, |
| 10704 | // i32 <numArgs>, |
| 10705 | // [Args...], |
| 10706 | // [live variables...]) |
| 10707 | |
| 10708 | CallingConv::ID CC = CB.getCallingConv(); |
| 10709 | bool IsAnyRegCC = CC == CallingConv::AnyReg; |
| 10710 | bool HasDef = !CB.getType()->isVoidTy(); |
| 10711 | SDLoc dl = getCurSDLoc(); |
| 10712 | SDValue Callee = getValue(V: CB.getArgOperand(i: PatchPointOpers::TargetPos)); |
| 10713 | |
| 10714 | // Handle immediate and symbolic callees. |
| 10715 | if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Val&: Callee)) |
| 10716 | Callee = DAG.getIntPtrConstant(Val: ConstCallee->getZExtValue(), DL: dl, |
| 10717 | /*isTarget=*/true); |
| 10718 | else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) |
| 10719 | Callee = DAG.getTargetGlobalAddress(GV: SymbolicCallee->getGlobal(), |
| 10720 | DL: SDLoc(SymbolicCallee), |
| 10721 | VT: SymbolicCallee->getValueType(ResNo: 0)); |
| 10722 | |
| 10723 | // Get the real number of arguments participating in the call <numArgs> |
| 10724 | SDValue NArgVal = getValue(V: CB.getArgOperand(i: PatchPointOpers::NArgPos)); |
| 10725 | unsigned NumArgs = NArgVal->getAsZExtVal(); |
| 10726 | |
| 10727 | // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> |
| 10728 | // Intrinsics include all meta-operands up to but not including CC. |
| 10729 | unsigned NumMetaOpers = PatchPointOpers::CCPos; |
| 10730 | assert(CB.arg_size() >= NumMetaOpers + NumArgs && |
| 10731 | "Not enough arguments provided to the patchpoint intrinsic" ); |
| 10732 | |
| 10733 | // For AnyRegCC the arguments are lowered later on manually. |
| 10734 | unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; |
| 10735 | Type *ReturnTy = |
| 10736 | IsAnyRegCC ? Type::getVoidTy(C&: *DAG.getContext()) : CB.getType(); |
| 10737 | |
| 10738 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 10739 | populateCallLoweringInfo(CLI, Call: &CB, ArgIdx: NumMetaOpers, NumArgs: NumCallArgs, Callee, |
| 10740 | ReturnTy, RetAttrs: CB.getAttributes().getRetAttrs(), IsPatchPoint: true); |
| 10741 | std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); |
| 10742 | |
| 10743 | SDNode *CallEnd = Result.second.getNode(); |
| 10744 | if (CallEnd->getOpcode() == ISD::EH_LABEL) |
| 10745 | CallEnd = CallEnd->getOperand(Num: 0).getNode(); |
| 10746 | if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) |
| 10747 | CallEnd = CallEnd->getOperand(Num: 0).getNode(); |
| 10748 | |
| 10749 | /// Get a call instruction from the call sequence chain. |
| 10750 | /// Tail calls are not allowed. |
| 10751 | assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && |
| 10752 | "Expected a callseq node." ); |
| 10753 | SDNode *Call = CallEnd->getOperand(Num: 0).getNode(); |
| 10754 | bool HasGlue = Call->getGluedNode(); |
| 10755 | |
| 10756 | // Replace the target specific call node with the patchable intrinsic. |
| 10757 | SmallVector<SDValue, 8> Ops; |
| 10758 | |
| 10759 | // Push the chain. |
| 10760 | Ops.push_back(Elt: *(Call->op_begin())); |
| 10761 | |
| 10762 | // Optionally, push the glue (if any). |
| 10763 | if (HasGlue) |
| 10764 | Ops.push_back(Elt: *(Call->op_end() - 1)); |
| 10765 | |
| 10766 | // Push the register mask info. |
| 10767 | if (HasGlue) |
| 10768 | Ops.push_back(Elt: *(Call->op_end() - 2)); |
| 10769 | else |
| 10770 | Ops.push_back(Elt: *(Call->op_end() - 1)); |
| 10771 | |
| 10772 | // Add the <id> and <numBytes> constants. |
| 10773 | SDValue IDVal = getValue(V: CB.getArgOperand(i: PatchPointOpers::IDPos)); |
| 10774 | Ops.push_back(Elt: DAG.getTargetConstant(Val: IDVal->getAsZExtVal(), DL: dl, VT: MVT::i64)); |
| 10775 | SDValue NBytesVal = getValue(V: CB.getArgOperand(i: PatchPointOpers::NBytesPos)); |
| 10776 | Ops.push_back(Elt: DAG.getTargetConstant(Val: NBytesVal->getAsZExtVal(), DL: dl, VT: MVT::i32)); |
| 10777 | |
| 10778 | // Add the callee. |
| 10779 | Ops.push_back(Elt: Callee); |
| 10780 | |
| 10781 | // Adjust <numArgs> to account for any arguments that have been passed on the |
| 10782 | // stack instead. |
| 10783 | // Call Node: Chain, Target, {Args}, RegMask, [Glue] |
| 10784 | unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3); |
| 10785 | NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs; |
| 10786 | Ops.push_back(Elt: DAG.getTargetConstant(Val: NumCallRegArgs, DL: dl, VT: MVT::i32)); |
| 10787 | |
| 10788 | // Add the calling convention |
| 10789 | Ops.push_back(Elt: DAG.getTargetConstant(Val: (unsigned)CC, DL: dl, VT: MVT::i32)); |
| 10790 | |
| 10791 | // Add the arguments we omitted previously. The register allocator should |
| 10792 | // place these in any free register. |
| 10793 | if (IsAnyRegCC) |
| 10794 | for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) |
| 10795 | Ops.push_back(Elt: getValue(V: CB.getArgOperand(i))); |
| 10796 | |
| 10797 | // Push the arguments from the call instruction. |
| 10798 | SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1; |
| 10799 | Ops.append(in_start: Call->op_begin() + 2, in_end: e); |
| 10800 | |
| 10801 | // Push live variables for the stack map. |
| 10802 | addStackMapLiveVars(Call: CB, StartIdx: NumMetaOpers + NumArgs, DL: dl, Ops, Builder&: *this); |
| 10803 | |
| 10804 | SDVTList NodeTys; |
| 10805 | if (IsAnyRegCC && HasDef) { |
| 10806 | // Create the return types based on the intrinsic definition |
| 10807 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 10808 | SmallVector<EVT, 3> ValueVTs; |
| 10809 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: CB.getType(), ValueVTs); |
| 10810 | assert(ValueVTs.size() == 1 && "Expected only one return value type." ); |
| 10811 | |
| 10812 | // There is always a chain and a glue type at the end |
| 10813 | ValueVTs.push_back(Elt: MVT::Other); |
| 10814 | ValueVTs.push_back(Elt: MVT::Glue); |
| 10815 | NodeTys = DAG.getVTList(VTs: ValueVTs); |
| 10816 | } else |
| 10817 | NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
| 10818 | |
| 10819 | // Replace the target specific call node with a PATCHPOINT node. |
| 10820 | SDValue PPV = DAG.getNode(Opcode: ISD::PATCHPOINT, DL: dl, VTList: NodeTys, Ops); |
| 10821 | |
| 10822 | // Update the NodeMap. |
| 10823 | if (HasDef) { |
| 10824 | if (IsAnyRegCC) |
| 10825 | setValue(V: &CB, NewN: SDValue(PPV.getNode(), 0)); |
| 10826 | else |
| 10827 | setValue(V: &CB, NewN: Result.first); |
| 10828 | } |
| 10829 | |
| 10830 | // Fixup the consumers of the intrinsic. The chain and glue may be used in the |
| 10831 | // call sequence. Furthermore the location of the chain and glue can change |
| 10832 | // when the AnyReg calling convention is used and the intrinsic returns a |
| 10833 | // value. |
| 10834 | if (IsAnyRegCC && HasDef) { |
| 10835 | SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)}; |
| 10836 | SDValue To[] = {PPV.getValue(R: 1), PPV.getValue(R: 2)}; |
| 10837 | DAG.ReplaceAllUsesOfValuesWith(From, To, Num: 2); |
| 10838 | } else |
| 10839 | DAG.ReplaceAllUsesWith(From: Call, To: PPV.getNode()); |
| 10840 | DAG.DeleteNode(N: Call); |
| 10841 | |
| 10842 | // Inform the Frame Information that we have a patchpoint in this function. |
| 10843 | FuncInfo.MF->getFrameInfo().setHasPatchPoint(); |
| 10844 | } |
| 10845 | |
| 10846 | void SelectionDAGBuilder::visitVectorReduce(const CallInst &I, |
| 10847 | unsigned Intrinsic) { |
| 10848 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 10849 | SDValue Op1 = getValue(V: I.getArgOperand(i: 0)); |
| 10850 | SDValue Op2; |
| 10851 | if (I.arg_size() > 1) |
| 10852 | Op2 = getValue(V: I.getArgOperand(i: 1)); |
| 10853 | SDLoc dl = getCurSDLoc(); |
| 10854 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 10855 | SDValue Res; |
| 10856 | SDNodeFlags SDFlags; |
| 10857 | if (auto *FPMO = dyn_cast<FPMathOperator>(Val: &I)) |
| 10858 | SDFlags.copyFMF(FPMO: *FPMO); |
| 10859 | |
| 10860 | switch (Intrinsic) { |
| 10861 | case Intrinsic::vector_reduce_fadd: |
| 10862 | if (SDFlags.hasAllowReassociation()) |
| 10863 | Res = DAG.getNode(Opcode: ISD::FADD, DL: dl, VT, N1: Op1, |
| 10864 | N2: DAG.getNode(Opcode: ISD::VECREDUCE_FADD, DL: dl, VT, Operand: Op2, Flags: SDFlags), |
| 10865 | Flags: SDFlags); |
| 10866 | else |
| 10867 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_SEQ_FADD, DL: dl, VT, N1: Op1, N2: Op2, Flags: SDFlags); |
| 10868 | break; |
| 10869 | case Intrinsic::vector_reduce_fmul: |
| 10870 | if (SDFlags.hasAllowReassociation()) |
| 10871 | Res = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT, N1: Op1, |
| 10872 | N2: DAG.getNode(Opcode: ISD::VECREDUCE_FMUL, DL: dl, VT, Operand: Op2, Flags: SDFlags), |
| 10873 | Flags: SDFlags); |
| 10874 | else |
| 10875 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_SEQ_FMUL, DL: dl, VT, N1: Op1, N2: Op2, Flags: SDFlags); |
| 10876 | break; |
| 10877 | case Intrinsic::vector_reduce_add: |
| 10878 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT, Operand: Op1); |
| 10879 | break; |
| 10880 | case Intrinsic::vector_reduce_mul: |
| 10881 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_MUL, DL: dl, VT, Operand: Op1); |
| 10882 | break; |
| 10883 | case Intrinsic::vector_reduce_and: |
| 10884 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_AND, DL: dl, VT, Operand: Op1); |
| 10885 | break; |
| 10886 | case Intrinsic::vector_reduce_or: |
| 10887 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_OR, DL: dl, VT, Operand: Op1); |
| 10888 | break; |
| 10889 | case Intrinsic::vector_reduce_xor: |
| 10890 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_XOR, DL: dl, VT, Operand: Op1); |
| 10891 | break; |
| 10892 | case Intrinsic::vector_reduce_smax: |
| 10893 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_SMAX, DL: dl, VT, Operand: Op1); |
| 10894 | break; |
| 10895 | case Intrinsic::vector_reduce_smin: |
| 10896 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_SMIN, DL: dl, VT, Operand: Op1); |
| 10897 | break; |
| 10898 | case Intrinsic::vector_reduce_umax: |
| 10899 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_UMAX, DL: dl, VT, Operand: Op1); |
| 10900 | break; |
| 10901 | case Intrinsic::vector_reduce_umin: |
| 10902 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_UMIN, DL: dl, VT, Operand: Op1); |
| 10903 | break; |
| 10904 | case Intrinsic::vector_reduce_fmax: |
| 10905 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_FMAX, DL: dl, VT, Operand: Op1, Flags: SDFlags); |
| 10906 | break; |
| 10907 | case Intrinsic::vector_reduce_fmin: |
| 10908 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_FMIN, DL: dl, VT, Operand: Op1, Flags: SDFlags); |
| 10909 | break; |
| 10910 | case Intrinsic::vector_reduce_fmaximum: |
| 10911 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_FMAXIMUM, DL: dl, VT, Operand: Op1, Flags: SDFlags); |
| 10912 | break; |
| 10913 | case Intrinsic::vector_reduce_fminimum: |
| 10914 | Res = DAG.getNode(Opcode: ISD::VECREDUCE_FMINIMUM, DL: dl, VT, Operand: Op1, Flags: SDFlags); |
| 10915 | break; |
| 10916 | default: |
| 10917 | llvm_unreachable("Unhandled vector reduce intrinsic" ); |
| 10918 | } |
| 10919 | setValue(V: &I, NewN: Res); |
| 10920 | } |
| 10921 | |
| 10922 | /// Returns an AttributeList representing the attributes applied to the return |
| 10923 | /// value of the given call. |
| 10924 | static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) { |
| 10925 | SmallVector<Attribute::AttrKind, 2> Attrs; |
| 10926 | if (CLI.RetSExt) |
| 10927 | Attrs.push_back(Elt: Attribute::SExt); |
| 10928 | if (CLI.RetZExt) |
| 10929 | Attrs.push_back(Elt: Attribute::ZExt); |
| 10930 | if (CLI.IsInReg) |
| 10931 | Attrs.push_back(Elt: Attribute::InReg); |
| 10932 | |
| 10933 | return AttributeList::get(C&: CLI.RetTy->getContext(), Index: AttributeList::ReturnIndex, |
| 10934 | Kinds: Attrs); |
| 10935 | } |
| 10936 | |
| 10937 | /// TargetLowering::LowerCallTo - This is the default LowerCallTo |
| 10938 | /// implementation, which just calls LowerCall. |
| 10939 | /// FIXME: When all targets are |
| 10940 | /// migrated to using LowerCall, this hook should be integrated into SDISel. |
| 10941 | std::pair<SDValue, SDValue> |
| 10942 | TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { |
| 10943 | // Handle the incoming return values from the call. |
| 10944 | CLI.Ins.clear(); |
| 10945 | SmallVector<EVT, 4> RetTys; |
| 10946 | SmallVector<TypeSize, 4> Offsets; |
| 10947 | auto &DL = CLI.DAG.getDataLayout(); |
| 10948 | ComputeValueVTs(TLI: *this, DL, Ty: CLI.RetTy, ValueVTs&: RetTys, Offsets: &Offsets); |
| 10949 | |
| 10950 | if (CLI.IsPostTypeLegalization) { |
| 10951 | // If we are lowering a libcall after legalization, split the return type. |
| 10952 | SmallVector<EVT, 4> OldRetTys; |
| 10953 | SmallVector<TypeSize, 4> OldOffsets; |
| 10954 | RetTys.swap(RHS&: OldRetTys); |
| 10955 | Offsets.swap(RHS&: OldOffsets); |
| 10956 | |
| 10957 | for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) { |
| 10958 | EVT RetVT = OldRetTys[i]; |
| 10959 | uint64_t Offset = OldOffsets[i]; |
| 10960 | MVT RegisterVT = getRegisterType(Context&: CLI.RetTy->getContext(), VT: RetVT); |
| 10961 | unsigned NumRegs = getNumRegisters(Context&: CLI.RetTy->getContext(), VT: RetVT); |
| 10962 | unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8; |
| 10963 | RetTys.append(NumInputs: NumRegs, Elt: RegisterVT); |
| 10964 | for (unsigned j = 0; j != NumRegs; ++j) |
| 10965 | Offsets.push_back(Elt: TypeSize::getFixed(ExactSize: Offset + j * RegisterVTByteSZ)); |
| 10966 | } |
| 10967 | } |
| 10968 | |
| 10969 | SmallVector<ISD::OutputArg, 4> Outs; |
| 10970 | GetReturnInfo(CC: CLI.CallConv, ReturnType: CLI.RetTy, attr: getReturnAttrs(CLI), Outs, TLI: *this, DL); |
| 10971 | |
| 10972 | bool CanLowerReturn = |
| 10973 | this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), |
| 10974 | CLI.IsVarArg, Outs, CLI.RetTy->getContext(), RetTy: CLI.RetTy); |
| 10975 | |
| 10976 | SDValue DemoteStackSlot; |
| 10977 | int DemoteStackIdx = -100; |
| 10978 | if (!CanLowerReturn) { |
| 10979 | // FIXME: equivalent assert? |
| 10980 | // assert(!CS.hasInAllocaArgument() && |
| 10981 | // "sret demotion is incompatible with inalloca"); |
| 10982 | uint64_t TySize = DL.getTypeAllocSize(Ty: CLI.RetTy); |
| 10983 | Align Alignment = DL.getPrefTypeAlign(Ty: CLI.RetTy); |
| 10984 | MachineFunction &MF = CLI.DAG.getMachineFunction(); |
| 10985 | DemoteStackIdx = |
| 10986 | MF.getFrameInfo().CreateStackObject(Size: TySize, Alignment, isSpillSlot: false); |
| 10987 | Type *StackSlotPtrType = |
| 10988 | PointerType::get(C&: CLI.RetTy->getContext(), AddressSpace: DL.getAllocaAddrSpace()); |
| 10989 | |
| 10990 | DemoteStackSlot = CLI.DAG.getFrameIndex(FI: DemoteStackIdx, VT: getFrameIndexTy(DL)); |
| 10991 | ArgListEntry Entry; |
| 10992 | Entry.Node = DemoteStackSlot; |
| 10993 | Entry.Ty = StackSlotPtrType; |
| 10994 | Entry.IsSExt = false; |
| 10995 | Entry.IsZExt = false; |
| 10996 | Entry.IsInReg = false; |
| 10997 | Entry.IsSRet = true; |
| 10998 | Entry.IsNest = false; |
| 10999 | Entry.IsByVal = false; |
| 11000 | Entry.IsByRef = false; |
| 11001 | Entry.IsReturned = false; |
| 11002 | Entry.IsSwiftSelf = false; |
| 11003 | Entry.IsSwiftAsync = false; |
| 11004 | Entry.IsSwiftError = false; |
| 11005 | Entry.IsCFGuardTarget = false; |
| 11006 | Entry.Alignment = Alignment; |
| 11007 | CLI.getArgs().insert(position: CLI.getArgs().begin(), x: Entry); |
| 11008 | CLI.NumFixedArgs += 1; |
| 11009 | CLI.getArgs()[0].IndirectType = CLI.RetTy; |
| 11010 | CLI.RetTy = Type::getVoidTy(C&: CLI.RetTy->getContext()); |
| 11011 | |
| 11012 | // sret demotion isn't compatible with tail-calls, since the sret argument |
| 11013 | // points into the callers stack frame. |
| 11014 | CLI.IsTailCall = false; |
| 11015 | } else { |
| 11016 | bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( |
| 11017 | Ty: CLI.RetTy, CallConv: CLI.CallConv, isVarArg: CLI.IsVarArg, DL); |
| 11018 | for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { |
| 11019 | ISD::ArgFlagsTy Flags; |
| 11020 | if (NeedsRegBlock) { |
| 11021 | Flags.setInConsecutiveRegs(); |
| 11022 | if (I == RetTys.size() - 1) |
| 11023 | Flags.setInConsecutiveRegsLast(); |
| 11024 | } |
| 11025 | EVT VT = RetTys[I]; |
| 11026 | MVT RegisterVT = getRegisterTypeForCallingConv(Context&: CLI.RetTy->getContext(), |
| 11027 | CC: CLI.CallConv, VT); |
| 11028 | unsigned NumRegs = getNumRegistersForCallingConv(Context&: CLI.RetTy->getContext(), |
| 11029 | CC: CLI.CallConv, VT); |
| 11030 | for (unsigned i = 0; i != NumRegs; ++i) { |
| 11031 | ISD::InputArg MyFlags; |
| 11032 | MyFlags.Flags = Flags; |
| 11033 | MyFlags.VT = RegisterVT; |
| 11034 | MyFlags.ArgVT = VT; |
| 11035 | MyFlags.Used = CLI.IsReturnValueUsed; |
| 11036 | if (CLI.RetTy->isPointerTy()) { |
| 11037 | MyFlags.Flags.setPointer(); |
| 11038 | MyFlags.Flags.setPointerAddrSpace( |
| 11039 | cast<PointerType>(Val: CLI.RetTy)->getAddressSpace()); |
| 11040 | } |
| 11041 | if (CLI.RetSExt) |
| 11042 | MyFlags.Flags.setSExt(); |
| 11043 | if (CLI.RetZExt) |
| 11044 | MyFlags.Flags.setZExt(); |
| 11045 | if (CLI.IsInReg) |
| 11046 | MyFlags.Flags.setInReg(); |
| 11047 | CLI.Ins.push_back(Elt: MyFlags); |
| 11048 | } |
| 11049 | } |
| 11050 | } |
| 11051 | |
| 11052 | // We push in swifterror return as the last element of CLI.Ins. |
| 11053 | ArgListTy &Args = CLI.getArgs(); |
| 11054 | if (supportSwiftError()) { |
| 11055 | for (const ArgListEntry &Arg : Args) { |
| 11056 | if (Arg.IsSwiftError) { |
| 11057 | ISD::InputArg MyFlags; |
| 11058 | MyFlags.VT = getPointerTy(DL); |
| 11059 | MyFlags.ArgVT = EVT(getPointerTy(DL)); |
| 11060 | MyFlags.Flags.setSwiftError(); |
| 11061 | CLI.Ins.push_back(Elt: MyFlags); |
| 11062 | } |
| 11063 | } |
| 11064 | } |
| 11065 | |
| 11066 | // Handle all of the outgoing arguments. |
| 11067 | CLI.Outs.clear(); |
| 11068 | CLI.OutVals.clear(); |
| 11069 | for (unsigned i = 0, e = Args.size(); i != e; ++i) { |
| 11070 | SmallVector<EVT, 4> ValueVTs; |
| 11071 | ComputeValueVTs(TLI: *this, DL, Ty: Args[i].Ty, ValueVTs); |
| 11072 | // FIXME: Split arguments if CLI.IsPostTypeLegalization |
| 11073 | Type *FinalType = Args[i].Ty; |
| 11074 | if (Args[i].IsByVal) |
| 11075 | FinalType = Args[i].IndirectType; |
| 11076 | bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( |
| 11077 | Ty: FinalType, CallConv: CLI.CallConv, isVarArg: CLI.IsVarArg, DL); |
| 11078 | for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; |
| 11079 | ++Value) { |
| 11080 | EVT VT = ValueVTs[Value]; |
| 11081 | Type *ArgTy = VT.getTypeForEVT(Context&: CLI.RetTy->getContext()); |
| 11082 | SDValue Op = SDValue(Args[i].Node.getNode(), |
| 11083 | Args[i].Node.getResNo() + Value); |
| 11084 | ISD::ArgFlagsTy Flags; |
| 11085 | |
| 11086 | // Certain targets (such as MIPS), may have a different ABI alignment |
| 11087 | // for a type depending on the context. Give the target a chance to |
| 11088 | // specify the alignment it wants. |
| 11089 | const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL)); |
| 11090 | Flags.setOrigAlign(OriginalAlignment); |
| 11091 | |
| 11092 | if (Args[i].Ty->isPointerTy()) { |
| 11093 | Flags.setPointer(); |
| 11094 | Flags.setPointerAddrSpace( |
| 11095 | cast<PointerType>(Val: Args[i].Ty)->getAddressSpace()); |
| 11096 | } |
| 11097 | if (Args[i].IsZExt) |
| 11098 | Flags.setZExt(); |
| 11099 | if (Args[i].IsSExt) |
| 11100 | Flags.setSExt(); |
| 11101 | if (Args[i].IsNoExt) |
| 11102 | Flags.setNoExt(); |
| 11103 | if (Args[i].IsInReg) { |
| 11104 | // If we are using vectorcall calling convention, a structure that is |
| 11105 | // passed InReg - is surely an HVA |
| 11106 | if (CLI.CallConv == CallingConv::X86_VectorCall && |
| 11107 | isa<StructType>(Val: FinalType)) { |
| 11108 | // The first value of a structure is marked |
| 11109 | if (0 == Value) |
| 11110 | Flags.setHvaStart(); |
| 11111 | Flags.setHva(); |
| 11112 | } |
| 11113 | // Set InReg Flag |
| 11114 | Flags.setInReg(); |
| 11115 | } |
| 11116 | if (Args[i].IsSRet) |
| 11117 | Flags.setSRet(); |
| 11118 | if (Args[i].IsSwiftSelf) |
| 11119 | Flags.setSwiftSelf(); |
| 11120 | if (Args[i].IsSwiftAsync) |
| 11121 | Flags.setSwiftAsync(); |
| 11122 | if (Args[i].IsSwiftError) |
| 11123 | Flags.setSwiftError(); |
| 11124 | if (Args[i].IsCFGuardTarget) |
| 11125 | Flags.setCFGuardTarget(); |
| 11126 | if (Args[i].IsByVal) |
| 11127 | Flags.setByVal(); |
| 11128 | if (Args[i].IsByRef) |
| 11129 | Flags.setByRef(); |
| 11130 | if (Args[i].IsPreallocated) { |
| 11131 | Flags.setPreallocated(); |
| 11132 | // Set the byval flag for CCAssignFn callbacks that don't know about |
| 11133 | // preallocated. This way we can know how many bytes we should've |
| 11134 | // allocated and how many bytes a callee cleanup function will pop. If |
| 11135 | // we port preallocated to more targets, we'll have to add custom |
| 11136 | // preallocated handling in the various CC lowering callbacks. |
| 11137 | Flags.setByVal(); |
| 11138 | } |
| 11139 | if (Args[i].IsInAlloca) { |
| 11140 | Flags.setInAlloca(); |
| 11141 | // Set the byval flag for CCAssignFn callbacks that don't know about |
| 11142 | // inalloca. This way we can know how many bytes we should've allocated |
| 11143 | // and how many bytes a callee cleanup function will pop. If we port |
| 11144 | // inalloca to more targets, we'll have to add custom inalloca handling |
| 11145 | // in the various CC lowering callbacks. |
| 11146 | Flags.setByVal(); |
| 11147 | } |
| 11148 | Align MemAlign; |
| 11149 | if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) { |
| 11150 | unsigned FrameSize = DL.getTypeAllocSize(Ty: Args[i].IndirectType); |
| 11151 | Flags.setByValSize(FrameSize); |
| 11152 | |
| 11153 | // info is not there but there are cases it cannot get right. |
| 11154 | if (auto MA = Args[i].Alignment) |
| 11155 | MemAlign = *MA; |
| 11156 | else |
| 11157 | MemAlign = getByValTypeAlignment(Ty: Args[i].IndirectType, DL); |
| 11158 | } else if (auto MA = Args[i].Alignment) { |
| 11159 | MemAlign = *MA; |
| 11160 | } else { |
| 11161 | MemAlign = OriginalAlignment; |
| 11162 | } |
| 11163 | Flags.setMemAlign(MemAlign); |
| 11164 | if (Args[i].IsNest) |
| 11165 | Flags.setNest(); |
| 11166 | if (NeedsRegBlock) |
| 11167 | Flags.setInConsecutiveRegs(); |
| 11168 | |
| 11169 | MVT PartVT = getRegisterTypeForCallingConv(Context&: CLI.RetTy->getContext(), |
| 11170 | CC: CLI.CallConv, VT); |
| 11171 | unsigned NumParts = getNumRegistersForCallingConv(Context&: CLI.RetTy->getContext(), |
| 11172 | CC: CLI.CallConv, VT); |
| 11173 | SmallVector<SDValue, 4> Parts(NumParts); |
| 11174 | ISD::NodeType ExtendKind = ISD::ANY_EXTEND; |
| 11175 | |
| 11176 | if (Args[i].IsSExt) |
| 11177 | ExtendKind = ISD::SIGN_EXTEND; |
| 11178 | else if (Args[i].IsZExt) |
| 11179 | ExtendKind = ISD::ZERO_EXTEND; |
| 11180 | |
| 11181 | // Conservatively only handle 'returned' on non-vectors that can be lowered, |
| 11182 | // for now. |
| 11183 | if (Args[i].IsReturned && !Op.getValueType().isVector() && |
| 11184 | CanLowerReturn) { |
| 11185 | assert((CLI.RetTy == Args[i].Ty || |
| 11186 | (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() && |
| 11187 | CLI.RetTy->getPointerAddressSpace() == |
| 11188 | Args[i].Ty->getPointerAddressSpace())) && |
| 11189 | RetTys.size() == NumValues && "unexpected use of 'returned'" ); |
| 11190 | // Before passing 'returned' to the target lowering code, ensure that |
| 11191 | // either the register MVT and the actual EVT are the same size or that |
| 11192 | // the return value and argument are extended in the same way; in these |
| 11193 | // cases it's safe to pass the argument register value unchanged as the |
| 11194 | // return register value (although it's at the target's option whether |
| 11195 | // to do so) |
| 11196 | // TODO: allow code generation to take advantage of partially preserved |
| 11197 | // registers rather than clobbering the entire register when the |
| 11198 | // parameter extension method is not compatible with the return |
| 11199 | // extension method |
| 11200 | if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || |
| 11201 | (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt && |
| 11202 | CLI.RetZExt == Args[i].IsZExt)) |
| 11203 | Flags.setReturned(); |
| 11204 | } |
| 11205 | |
| 11206 | getCopyToParts(DAG&: CLI.DAG, DL: CLI.DL, Val: Op, Parts: &Parts[0], NumParts, PartVT, V: CLI.CB, |
| 11207 | CallConv: CLI.CallConv, ExtendKind); |
| 11208 | |
| 11209 | for (unsigned j = 0; j != NumParts; ++j) { |
| 11210 | // if it isn't first piece, alignment must be 1 |
| 11211 | // For scalable vectors the scalable part is currently handled |
| 11212 | // by individual targets, so we just use the known minimum size here. |
| 11213 | ISD::OutputArg MyFlags( |
| 11214 | Flags, Parts[j].getValueType().getSimpleVT(), VT, |
| 11215 | i < CLI.NumFixedArgs, i, |
| 11216 | j * Parts[j].getValueType().getStoreSize().getKnownMinValue()); |
| 11217 | if (NumParts > 1 && j == 0) |
| 11218 | MyFlags.Flags.setSplit(); |
| 11219 | else if (j != 0) { |
| 11220 | MyFlags.Flags.setOrigAlign(Align(1)); |
| 11221 | if (j == NumParts - 1) |
| 11222 | MyFlags.Flags.setSplitEnd(); |
| 11223 | } |
| 11224 | |
| 11225 | CLI.Outs.push_back(Elt: MyFlags); |
| 11226 | CLI.OutVals.push_back(Elt: Parts[j]); |
| 11227 | } |
| 11228 | |
| 11229 | if (NeedsRegBlock && Value == NumValues - 1) |
| 11230 | CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); |
| 11231 | } |
| 11232 | } |
| 11233 | |
| 11234 | SmallVector<SDValue, 4> InVals; |
| 11235 | CLI.Chain = LowerCall(CLI, InVals); |
| 11236 | |
| 11237 | // Update CLI.InVals to use outside of this function. |
| 11238 | CLI.InVals = InVals; |
| 11239 | |
| 11240 | // Verify that the target's LowerCall behaved as expected. |
| 11241 | assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other && |
| 11242 | "LowerCall didn't return a valid chain!" ); |
| 11243 | assert((!CLI.IsTailCall || InVals.empty()) && |
| 11244 | "LowerCall emitted a return value for a tail call!" ); |
| 11245 | assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) && |
| 11246 | "LowerCall didn't emit the correct number of values!" ); |
| 11247 | |
| 11248 | // For a tail call, the return value is merely live-out and there aren't |
| 11249 | // any nodes in the DAG representing it. Return a special value to |
| 11250 | // indicate that a tail call has been emitted and no more Instructions |
| 11251 | // should be processed in the current block. |
| 11252 | if (CLI.IsTailCall) { |
| 11253 | CLI.DAG.setRoot(CLI.Chain); |
| 11254 | return std::make_pair(x: SDValue(), y: SDValue()); |
| 11255 | } |
| 11256 | |
| 11257 | #ifndef NDEBUG |
| 11258 | for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) { |
| 11259 | assert(InVals[i].getNode() && "LowerCall emitted a null value!" ); |
| 11260 | assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() && |
| 11261 | "LowerCall emitted a value with the wrong type!" ); |
| 11262 | } |
| 11263 | #endif |
| 11264 | |
| 11265 | SmallVector<SDValue, 4> ReturnValues; |
| 11266 | if (!CanLowerReturn) { |
| 11267 | // The instruction result is the result of loading from the |
| 11268 | // hidden sret parameter. |
| 11269 | MVT PtrVT = getPointerTy(DL, AS: DL.getAllocaAddrSpace()); |
| 11270 | |
| 11271 | unsigned NumValues = RetTys.size(); |
| 11272 | ReturnValues.resize(N: NumValues); |
| 11273 | SmallVector<SDValue, 4> Chains(NumValues); |
| 11274 | |
| 11275 | // An aggregate return value cannot wrap around the address space, so |
| 11276 | // offsets to its parts don't wrap either. |
| 11277 | MachineFunction &MF = CLI.DAG.getMachineFunction(); |
| 11278 | Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(ObjectIdx: DemoteStackIdx); |
| 11279 | for (unsigned i = 0; i < NumValues; ++i) { |
| 11280 | SDValue Add = CLI.DAG.getMemBasePlusOffset( |
| 11281 | Base: DemoteStackSlot, Offset: CLI.DAG.getConstant(Val: Offsets[i], DL: CLI.DL, VT: PtrVT), |
| 11282 | DL: CLI.DL, Flags: SDNodeFlags::NoUnsignedWrap); |
| 11283 | SDValue L = CLI.DAG.getLoad( |
| 11284 | VT: RetTys[i], dl: CLI.DL, Chain: CLI.Chain, Ptr: Add, |
| 11285 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: CLI.DAG.getMachineFunction(), |
| 11286 | FI: DemoteStackIdx, Offset: Offsets[i]), |
| 11287 | Alignment: HiddenSRetAlign); |
| 11288 | ReturnValues[i] = L; |
| 11289 | Chains[i] = L.getValue(R: 1); |
| 11290 | } |
| 11291 | |
| 11292 | CLI.Chain = CLI.DAG.getNode(Opcode: ISD::TokenFactor, DL: CLI.DL, VT: MVT::Other, Ops: Chains); |
| 11293 | } else { |
| 11294 | // Collect the legal value parts into potentially illegal values |
| 11295 | // that correspond to the original function's return values. |
| 11296 | std::optional<ISD::NodeType> AssertOp; |
| 11297 | if (CLI.RetSExt) |
| 11298 | AssertOp = ISD::AssertSext; |
| 11299 | else if (CLI.RetZExt) |
| 11300 | AssertOp = ISD::AssertZext; |
| 11301 | unsigned CurReg = 0; |
| 11302 | for (EVT VT : RetTys) { |
| 11303 | MVT RegisterVT = getRegisterTypeForCallingConv(Context&: CLI.RetTy->getContext(), |
| 11304 | CC: CLI.CallConv, VT); |
| 11305 | unsigned NumRegs = getNumRegistersForCallingConv(Context&: CLI.RetTy->getContext(), |
| 11306 | CC: CLI.CallConv, VT); |
| 11307 | |
| 11308 | ReturnValues.push_back(Elt: getCopyFromParts( |
| 11309 | DAG&: CLI.DAG, DL: CLI.DL, Parts: &InVals[CurReg], NumParts: NumRegs, PartVT: RegisterVT, ValueVT: VT, V: nullptr, |
| 11310 | InChain: CLI.Chain, CC: CLI.CallConv, AssertOp)); |
| 11311 | CurReg += NumRegs; |
| 11312 | } |
| 11313 | |
| 11314 | // For a function returning void, there is no return value. We can't create |
| 11315 | // such a node, so we just return a null return value in that case. In |
| 11316 | // that case, nothing will actually look at the value. |
| 11317 | if (ReturnValues.empty()) |
| 11318 | return std::make_pair(x: SDValue(), y&: CLI.Chain); |
| 11319 | } |
| 11320 | |
| 11321 | SDValue Res = CLI.DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: CLI.DL, |
| 11322 | VTList: CLI.DAG.getVTList(VTs: RetTys), Ops: ReturnValues); |
| 11323 | return std::make_pair(x&: Res, y&: CLI.Chain); |
| 11324 | } |
| 11325 | |
| 11326 | /// Places new result values for the node in Results (their number |
| 11327 | /// and types must exactly match those of the original return values of |
| 11328 | /// the node), or leaves Results empty, which indicates that the node is not |
| 11329 | /// to be custom lowered after all. |
| 11330 | void TargetLowering::LowerOperationWrapper(SDNode *N, |
| 11331 | SmallVectorImpl<SDValue> &Results, |
| 11332 | SelectionDAG &DAG) const { |
| 11333 | SDValue Res = LowerOperation(Op: SDValue(N, 0), DAG); |
| 11334 | |
| 11335 | if (!Res.getNode()) |
| 11336 | return; |
| 11337 | |
| 11338 | // If the original node has one result, take the return value from |
| 11339 | // LowerOperation as is. It might not be result number 0. |
| 11340 | if (N->getNumValues() == 1) { |
| 11341 | Results.push_back(Elt: Res); |
| 11342 | return; |
| 11343 | } |
| 11344 | |
| 11345 | // If the original node has multiple results, then the return node should |
| 11346 | // have the same number of results. |
| 11347 | assert((N->getNumValues() == Res->getNumValues()) && |
| 11348 | "Lowering returned the wrong number of results!" ); |
| 11349 | |
| 11350 | // Places new result values base on N result number. |
| 11351 | for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) |
| 11352 | Results.push_back(Elt: Res.getValue(R: I)); |
| 11353 | } |
| 11354 | |
| 11355 | SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 11356 | llvm_unreachable("LowerOperation not implemented for this target!" ); |
| 11357 | } |
| 11358 | |
| 11359 | void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, |
| 11360 | Register Reg, |
| 11361 | ISD::NodeType ExtendType) { |
| 11362 | SDValue Op = getNonRegisterValue(V); |
| 11363 | assert((Op.getOpcode() != ISD::CopyFromReg || |
| 11364 | cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && |
| 11365 | "Copy from a reg to the same reg!" ); |
| 11366 | assert(!Reg.isPhysical() && "Is a physreg" ); |
| 11367 | |
| 11368 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 11369 | // If this is an InlineAsm we have to match the registers required, not the |
| 11370 | // notional registers required by the type. |
| 11371 | |
| 11372 | RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(), |
| 11373 | std::nullopt); // This is not an ABI copy. |
| 11374 | SDValue Chain = DAG.getEntryNode(); |
| 11375 | |
| 11376 | if (ExtendType == ISD::ANY_EXTEND) { |
| 11377 | auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(Val: V); |
| 11378 | if (PreferredExtendIt != FuncInfo.PreferredExtendType.end()) |
| 11379 | ExtendType = PreferredExtendIt->second; |
| 11380 | } |
| 11381 | RFV.getCopyToRegs(Val: Op, DAG, dl: getCurSDLoc(), Chain, Glue: nullptr, V, PreferredExtendType: ExtendType); |
| 11382 | PendingExports.push_back(Elt: Chain); |
| 11383 | } |
| 11384 | |
| 11385 | #include "llvm/CodeGen/SelectionDAGISel.h" |
| 11386 | |
| 11387 | /// isOnlyUsedInEntryBlock - If the specified argument is only used in the |
| 11388 | /// entry block, return true. This includes arguments used by switches, since |
| 11389 | /// the switch may expand into multiple basic blocks. |
| 11390 | static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { |
| 11391 | // With FastISel active, we may be splitting blocks, so force creation |
| 11392 | // of virtual registers for all non-dead arguments. |
| 11393 | if (FastISel) |
| 11394 | return A->use_empty(); |
| 11395 | |
| 11396 | const BasicBlock &Entry = A->getParent()->front(); |
| 11397 | for (const User *U : A->users()) |
| 11398 | if (cast<Instruction>(Val: U)->getParent() != &Entry || isa<SwitchInst>(Val: U)) |
| 11399 | return false; // Use not in entry block. |
| 11400 | |
| 11401 | return true; |
| 11402 | } |
| 11403 | |
| 11404 | using ArgCopyElisionMapTy = |
| 11405 | DenseMap<const Argument *, |
| 11406 | std::pair<const AllocaInst *, const StoreInst *>>; |
| 11407 | |
| 11408 | /// Scan the entry block of the function in FuncInfo for arguments that look |
| 11409 | /// like copies into a local alloca. Record any copied arguments in |
| 11410 | /// ArgCopyElisionCandidates. |
| 11411 | static void |
| 11412 | findArgumentCopyElisionCandidates(const DataLayout &DL, |
| 11413 | FunctionLoweringInfo *FuncInfo, |
| 11414 | ArgCopyElisionMapTy &ArgCopyElisionCandidates) { |
| 11415 | // Record the state of every static alloca used in the entry block. Argument |
| 11416 | // allocas are all used in the entry block, so we need approximately as many |
| 11417 | // entries as we have arguments. |
| 11418 | enum StaticAllocaInfo { Unknown, Clobbered, Elidable }; |
| 11419 | SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas; |
| 11420 | unsigned NumArgs = FuncInfo->Fn->arg_size(); |
| 11421 | StaticAllocas.reserve(NumEntries: NumArgs * 2); |
| 11422 | |
| 11423 | auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * { |
| 11424 | if (!V) |
| 11425 | return nullptr; |
| 11426 | V = V->stripPointerCasts(); |
| 11427 | const auto *AI = dyn_cast<AllocaInst>(Val: V); |
| 11428 | if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(Val: AI)) |
| 11429 | return nullptr; |
| 11430 | auto Iter = StaticAllocas.insert(KV: {AI, Unknown}); |
| 11431 | return &Iter.first->second; |
| 11432 | }; |
| 11433 | |
| 11434 | // Look for stores of arguments to static allocas. Look through bitcasts and |
| 11435 | // GEPs to handle type coercions, as long as the alloca is fully initialized |
| 11436 | // by the store. Any non-store use of an alloca escapes it and any subsequent |
| 11437 | // unanalyzed store might write it. |
| 11438 | // FIXME: Handle structs initialized with multiple stores. |
| 11439 | for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) { |
| 11440 | // Look for stores, and handle non-store uses conservatively. |
| 11441 | const auto *SI = dyn_cast<StoreInst>(Val: &I); |
| 11442 | if (!SI) { |
| 11443 | // We will look through cast uses, so ignore them completely. |
| 11444 | if (I.isCast()) |
| 11445 | continue; |
| 11446 | // Ignore debug info and pseudo op intrinsics, they don't escape or store |
| 11447 | // to allocas. |
| 11448 | if (I.isDebugOrPseudoInst()) |
| 11449 | continue; |
| 11450 | // This is an unknown instruction. Assume it escapes or writes to all |
| 11451 | // static alloca operands. |
| 11452 | for (const Use &U : I.operands()) { |
| 11453 | if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U)) |
| 11454 | *Info = StaticAllocaInfo::Clobbered; |
| 11455 | } |
| 11456 | continue; |
| 11457 | } |
| 11458 | |
| 11459 | // If the stored value is a static alloca, mark it as escaped. |
| 11460 | if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand())) |
| 11461 | *Info = StaticAllocaInfo::Clobbered; |
| 11462 | |
| 11463 | // Check if the destination is a static alloca. |
| 11464 | const Value *Dst = SI->getPointerOperand()->stripPointerCasts(); |
| 11465 | StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst); |
| 11466 | if (!Info) |
| 11467 | continue; |
| 11468 | const AllocaInst *AI = cast<AllocaInst>(Val: Dst); |
| 11469 | |
| 11470 | // Skip allocas that have been initialized or clobbered. |
| 11471 | if (*Info != StaticAllocaInfo::Unknown) |
| 11472 | continue; |
| 11473 | |
| 11474 | // Check if the stored value is an argument, and that this store fully |
| 11475 | // initializes the alloca. |
| 11476 | // If the argument type has padding bits we can't directly forward a pointer |
| 11477 | // as the upper bits may contain garbage. |
| 11478 | // Don't elide copies from the same argument twice. |
| 11479 | const Value *Val = SI->getValueOperand()->stripPointerCasts(); |
| 11480 | const auto *Arg = dyn_cast<Argument>(Val); |
| 11481 | if (!Arg || Arg->hasPassPointeeByValueCopyAttr() || |
| 11482 | Arg->getType()->isEmptyTy() || |
| 11483 | DL.getTypeStoreSize(Ty: Arg->getType()) != |
| 11484 | DL.getTypeAllocSize(Ty: AI->getAllocatedType()) || |
| 11485 | !DL.typeSizeEqualsStoreSize(Ty: Arg->getType()) || |
| 11486 | ArgCopyElisionCandidates.count(Val: Arg)) { |
| 11487 | *Info = StaticAllocaInfo::Clobbered; |
| 11488 | continue; |
| 11489 | } |
| 11490 | |
| 11491 | LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI |
| 11492 | << '\n'); |
| 11493 | |
| 11494 | // Mark this alloca and store for argument copy elision. |
| 11495 | *Info = StaticAllocaInfo::Elidable; |
| 11496 | ArgCopyElisionCandidates.insert(KV: {Arg, {AI, SI}}); |
| 11497 | |
| 11498 | // Stop scanning if we've seen all arguments. This will happen early in -O0 |
| 11499 | // builds, which is useful, because -O0 builds have large entry blocks and |
| 11500 | // many allocas. |
| 11501 | if (ArgCopyElisionCandidates.size() == NumArgs) |
| 11502 | break; |
| 11503 | } |
| 11504 | } |
| 11505 | |
| 11506 | /// Try to elide argument copies from memory into a local alloca. Succeeds if |
| 11507 | /// ArgVal is a load from a suitable fixed stack object. |
| 11508 | static void tryToElideArgumentCopy( |
| 11509 | FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains, |
| 11510 | DenseMap<int, int> &ArgCopyElisionFrameIndexMap, |
| 11511 | SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs, |
| 11512 | ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, |
| 11513 | ArrayRef<SDValue> ArgVals, bool &ArgHasUses) { |
| 11514 | // Check if this is a load from a fixed stack object. |
| 11515 | auto *LNode = dyn_cast<LoadSDNode>(Val: ArgVals[0]); |
| 11516 | if (!LNode) |
| 11517 | return; |
| 11518 | auto *FINode = dyn_cast<FrameIndexSDNode>(Val: LNode->getBasePtr().getNode()); |
| 11519 | if (!FINode) |
| 11520 | return; |
| 11521 | |
| 11522 | // Check that the fixed stack object is the right size and alignment. |
| 11523 | // Look at the alignment that the user wrote on the alloca instead of looking |
| 11524 | // at the stack object. |
| 11525 | auto ArgCopyIter = ArgCopyElisionCandidates.find(Val: &Arg); |
| 11526 | assert(ArgCopyIter != ArgCopyElisionCandidates.end()); |
| 11527 | const AllocaInst *AI = ArgCopyIter->second.first; |
| 11528 | int FixedIndex = FINode->getIndex(); |
| 11529 | int &AllocaIndex = FuncInfo.StaticAllocaMap[AI]; |
| 11530 | int OldIndex = AllocaIndex; |
| 11531 | MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo(); |
| 11532 | if (MFI.getObjectSize(ObjectIdx: FixedIndex) != MFI.getObjectSize(ObjectIdx: OldIndex)) { |
| 11533 | LLVM_DEBUG( |
| 11534 | dbgs() << " argument copy elision failed due to bad fixed stack " |
| 11535 | "object size\n" ); |
| 11536 | return; |
| 11537 | } |
| 11538 | Align RequiredAlignment = AI->getAlign(); |
| 11539 | if (MFI.getObjectAlign(ObjectIdx: FixedIndex) < RequiredAlignment) { |
| 11540 | LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca " |
| 11541 | "greater than stack argument alignment (" |
| 11542 | << DebugStr(RequiredAlignment) << " vs " |
| 11543 | << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n" ); |
| 11544 | return; |
| 11545 | } |
| 11546 | |
| 11547 | // Perform the elision. Delete the old stack object and replace its only use |
| 11548 | // in the variable info map. Mark the stack object as mutable and aliased. |
| 11549 | LLVM_DEBUG({ |
| 11550 | dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n' |
| 11551 | << " Replacing frame index " << OldIndex << " with " << FixedIndex |
| 11552 | << '\n'; |
| 11553 | }); |
| 11554 | MFI.RemoveStackObject(ObjectIdx: OldIndex); |
| 11555 | MFI.setIsImmutableObjectIndex(ObjectIdx: FixedIndex, IsImmutable: false); |
| 11556 | MFI.setIsAliasedObjectIndex(ObjectIdx: FixedIndex, IsAliased: true); |
| 11557 | AllocaIndex = FixedIndex; |
| 11558 | ArgCopyElisionFrameIndexMap.insert(KV: {OldIndex, FixedIndex}); |
| 11559 | for (SDValue ArgVal : ArgVals) |
| 11560 | Chains.push_back(Elt: ArgVal.getValue(R: 1)); |
| 11561 | |
| 11562 | // Avoid emitting code for the store implementing the copy. |
| 11563 | const StoreInst *SI = ArgCopyIter->second.second; |
| 11564 | ElidedArgCopyInstrs.insert(Ptr: SI); |
| 11565 | |
| 11566 | // Check for uses of the argument again so that we can avoid exporting ArgVal |
| 11567 | // if it is't used by anything other than the store. |
| 11568 | for (const Value *U : Arg.users()) { |
| 11569 | if (U != SI) { |
| 11570 | ArgHasUses = true; |
| 11571 | break; |
| 11572 | } |
| 11573 | } |
| 11574 | } |
| 11575 | |
| 11576 | void SelectionDAGISel::LowerArguments(const Function &F) { |
| 11577 | SelectionDAG &DAG = SDB->DAG; |
| 11578 | SDLoc dl = SDB->getCurSDLoc(); |
| 11579 | const DataLayout &DL = DAG.getDataLayout(); |
| 11580 | SmallVector<ISD::InputArg, 16> Ins; |
| 11581 | |
| 11582 | // In Naked functions we aren't going to save any registers. |
| 11583 | if (F.hasFnAttribute(Kind: Attribute::Naked)) |
| 11584 | return; |
| 11585 | |
| 11586 | if (!FuncInfo->CanLowerReturn) { |
| 11587 | // Put in an sret pointer parameter before all the other parameters. |
| 11588 | MVT ValueVT = TLI->getPointerTy(DL, AS: DL.getAllocaAddrSpace()); |
| 11589 | |
| 11590 | ISD::ArgFlagsTy Flags; |
| 11591 | Flags.setSRet(); |
| 11592 | MVT RegisterVT = TLI->getRegisterType(Context&: *DAG.getContext(), VT: ValueVT); |
| 11593 | ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, true, |
| 11594 | ISD::InputArg::NoArgIndex, 0); |
| 11595 | Ins.push_back(Elt: RetArg); |
| 11596 | } |
| 11597 | |
| 11598 | // Look for stores of arguments to static allocas. Mark such arguments with a |
| 11599 | // flag to ask the target to give us the memory location of that argument if |
| 11600 | // available. |
| 11601 | ArgCopyElisionMapTy ArgCopyElisionCandidates; |
| 11602 | findArgumentCopyElisionCandidates(DL, FuncInfo: FuncInfo.get(), |
| 11603 | ArgCopyElisionCandidates); |
| 11604 | |
| 11605 | // Set up the incoming argument description vector. |
| 11606 | for (const Argument &Arg : F.args()) { |
| 11607 | unsigned ArgNo = Arg.getArgNo(); |
| 11608 | SmallVector<EVT, 4> ValueVTs; |
| 11609 | ComputeValueVTs(TLI: *TLI, DL: DAG.getDataLayout(), Ty: Arg.getType(), ValueVTs); |
| 11610 | bool isArgValueUsed = !Arg.use_empty(); |
| 11611 | unsigned PartBase = 0; |
| 11612 | Type *FinalType = Arg.getType(); |
| 11613 | if (Arg.hasAttribute(Kind: Attribute::ByVal)) |
| 11614 | FinalType = Arg.getParamByValType(); |
| 11615 | bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( |
| 11616 | Ty: FinalType, CallConv: F.getCallingConv(), isVarArg: F.isVarArg(), DL); |
| 11617 | for (unsigned Value = 0, NumValues = ValueVTs.size(); |
| 11618 | Value != NumValues; ++Value) { |
| 11619 | EVT VT = ValueVTs[Value]; |
| 11620 | Type *ArgTy = VT.getTypeForEVT(Context&: *DAG.getContext()); |
| 11621 | ISD::ArgFlagsTy Flags; |
| 11622 | |
| 11623 | |
| 11624 | if (Arg.getType()->isPointerTy()) { |
| 11625 | Flags.setPointer(); |
| 11626 | Flags.setPointerAddrSpace( |
| 11627 | cast<PointerType>(Val: Arg.getType())->getAddressSpace()); |
| 11628 | } |
| 11629 | if (Arg.hasAttribute(Kind: Attribute::ZExt)) |
| 11630 | Flags.setZExt(); |
| 11631 | if (Arg.hasAttribute(Kind: Attribute::SExt)) |
| 11632 | Flags.setSExt(); |
| 11633 | if (Arg.hasAttribute(Kind: Attribute::InReg)) { |
| 11634 | // If we are using vectorcall calling convention, a structure that is |
| 11635 | // passed InReg - is surely an HVA |
| 11636 | if (F.getCallingConv() == CallingConv::X86_VectorCall && |
| 11637 | isa<StructType>(Val: Arg.getType())) { |
| 11638 | // The first value of a structure is marked |
| 11639 | if (0 == Value) |
| 11640 | Flags.setHvaStart(); |
| 11641 | Flags.setHva(); |
| 11642 | } |
| 11643 | // Set InReg Flag |
| 11644 | Flags.setInReg(); |
| 11645 | } |
| 11646 | if (Arg.hasAttribute(Kind: Attribute::StructRet)) |
| 11647 | Flags.setSRet(); |
| 11648 | if (Arg.hasAttribute(Kind: Attribute::SwiftSelf)) |
| 11649 | Flags.setSwiftSelf(); |
| 11650 | if (Arg.hasAttribute(Kind: Attribute::SwiftAsync)) |
| 11651 | Flags.setSwiftAsync(); |
| 11652 | if (Arg.hasAttribute(Kind: Attribute::SwiftError)) |
| 11653 | Flags.setSwiftError(); |
| 11654 | if (Arg.hasAttribute(Kind: Attribute::ByVal)) |
| 11655 | Flags.setByVal(); |
| 11656 | if (Arg.hasAttribute(Kind: Attribute::ByRef)) |
| 11657 | Flags.setByRef(); |
| 11658 | if (Arg.hasAttribute(Kind: Attribute::InAlloca)) { |
| 11659 | Flags.setInAlloca(); |
| 11660 | // Set the byval flag for CCAssignFn callbacks that don't know about |
| 11661 | // inalloca. This way we can know how many bytes we should've allocated |
| 11662 | // and how many bytes a callee cleanup function will pop. If we port |
| 11663 | // inalloca to more targets, we'll have to add custom inalloca handling |
| 11664 | // in the various CC lowering callbacks. |
| 11665 | Flags.setByVal(); |
| 11666 | } |
| 11667 | if (Arg.hasAttribute(Kind: Attribute::Preallocated)) { |
| 11668 | Flags.setPreallocated(); |
| 11669 | // Set the byval flag for CCAssignFn callbacks that don't know about |
| 11670 | // preallocated. This way we can know how many bytes we should've |
| 11671 | // allocated and how many bytes a callee cleanup function will pop. If |
| 11672 | // we port preallocated to more targets, we'll have to add custom |
| 11673 | // preallocated handling in the various CC lowering callbacks. |
| 11674 | Flags.setByVal(); |
| 11675 | } |
| 11676 | |
| 11677 | // Certain targets (such as MIPS), may have a different ABI alignment |
| 11678 | // for a type depending on the context. Give the target a chance to |
| 11679 | // specify the alignment it wants. |
| 11680 | const Align OriginalAlignment( |
| 11681 | TLI->getABIAlignmentForCallingConv(ArgTy, DL)); |
| 11682 | Flags.setOrigAlign(OriginalAlignment); |
| 11683 | |
| 11684 | Align MemAlign; |
| 11685 | Type *ArgMemTy = nullptr; |
| 11686 | if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() || |
| 11687 | Flags.isByRef()) { |
| 11688 | if (!ArgMemTy) |
| 11689 | ArgMemTy = Arg.getPointeeInMemoryValueType(); |
| 11690 | |
| 11691 | uint64_t MemSize = DL.getTypeAllocSize(Ty: ArgMemTy); |
| 11692 | |
| 11693 | // For in-memory arguments, size and alignment should be passed from FE. |
| 11694 | // BE will guess if this info is not there but there are cases it cannot |
| 11695 | // get right. |
| 11696 | if (auto ParamAlign = Arg.getParamStackAlign()) |
| 11697 | MemAlign = *ParamAlign; |
| 11698 | else if ((ParamAlign = Arg.getParamAlign())) |
| 11699 | MemAlign = *ParamAlign; |
| 11700 | else |
| 11701 | MemAlign = TLI->getByValTypeAlignment(Ty: ArgMemTy, DL); |
| 11702 | if (Flags.isByRef()) |
| 11703 | Flags.setByRefSize(MemSize); |
| 11704 | else |
| 11705 | Flags.setByValSize(MemSize); |
| 11706 | } else if (auto ParamAlign = Arg.getParamStackAlign()) { |
| 11707 | MemAlign = *ParamAlign; |
| 11708 | } else { |
| 11709 | MemAlign = OriginalAlignment; |
| 11710 | } |
| 11711 | Flags.setMemAlign(MemAlign); |
| 11712 | |
| 11713 | if (Arg.hasAttribute(Kind: Attribute::Nest)) |
| 11714 | Flags.setNest(); |
| 11715 | if (NeedsRegBlock) |
| 11716 | Flags.setInConsecutiveRegs(); |
| 11717 | if (ArgCopyElisionCandidates.count(Val: &Arg)) |
| 11718 | Flags.setCopyElisionCandidate(); |
| 11719 | if (Arg.hasAttribute(Kind: Attribute::Returned)) |
| 11720 | Flags.setReturned(); |
| 11721 | |
| 11722 | MVT RegisterVT = TLI->getRegisterTypeForCallingConv( |
| 11723 | Context&: *CurDAG->getContext(), CC: F.getCallingConv(), VT); |
| 11724 | unsigned NumRegs = TLI->getNumRegistersForCallingConv( |
| 11725 | Context&: *CurDAG->getContext(), CC: F.getCallingConv(), VT); |
| 11726 | for (unsigned i = 0; i != NumRegs; ++i) { |
| 11727 | // For scalable vectors, use the minimum size; individual targets |
| 11728 | // are responsible for handling scalable vector arguments and |
| 11729 | // return values. |
| 11730 | ISD::InputArg MyFlags( |
| 11731 | Flags, RegisterVT, VT, isArgValueUsed, ArgNo, |
| 11732 | PartBase + i * RegisterVT.getStoreSize().getKnownMinValue()); |
| 11733 | if (NumRegs > 1 && i == 0) |
| 11734 | MyFlags.Flags.setSplit(); |
| 11735 | // if it isn't first piece, alignment must be 1 |
| 11736 | else if (i > 0) { |
| 11737 | MyFlags.Flags.setOrigAlign(Align(1)); |
| 11738 | if (i == NumRegs - 1) |
| 11739 | MyFlags.Flags.setSplitEnd(); |
| 11740 | } |
| 11741 | Ins.push_back(Elt: MyFlags); |
| 11742 | } |
| 11743 | if (NeedsRegBlock && Value == NumValues - 1) |
| 11744 | Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); |
| 11745 | PartBase += VT.getStoreSize().getKnownMinValue(); |
| 11746 | } |
| 11747 | } |
| 11748 | |
| 11749 | // Call the target to set up the argument values. |
| 11750 | SmallVector<SDValue, 8> InVals; |
| 11751 | SDValue NewRoot = TLI->LowerFormalArguments( |
| 11752 | DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals); |
| 11753 | |
| 11754 | // Verify that the target's LowerFormalArguments behaved as expected. |
| 11755 | assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other && |
| 11756 | "LowerFormalArguments didn't return a valid chain!" ); |
| 11757 | assert(InVals.size() == Ins.size() && |
| 11758 | "LowerFormalArguments didn't emit the correct number of values!" ); |
| 11759 | LLVM_DEBUG({ |
| 11760 | for (unsigned i = 0, e = Ins.size(); i != e; ++i) { |
| 11761 | assert(InVals[i].getNode() && |
| 11762 | "LowerFormalArguments emitted a null value!" ); |
| 11763 | assert(EVT(Ins[i].VT) == InVals[i].getValueType() && |
| 11764 | "LowerFormalArguments emitted a value with the wrong type!" ); |
| 11765 | } |
| 11766 | }); |
| 11767 | |
| 11768 | // Update the DAG with the new chain value resulting from argument lowering. |
| 11769 | DAG.setRoot(NewRoot); |
| 11770 | |
| 11771 | // Set up the argument values. |
| 11772 | unsigned i = 0; |
| 11773 | if (!FuncInfo->CanLowerReturn) { |
| 11774 | // Create a virtual register for the sret pointer, and put in a copy |
| 11775 | // from the sret argument into it. |
| 11776 | MVT VT = TLI->getPointerTy(DL, AS: DL.getAllocaAddrSpace()); |
| 11777 | MVT RegVT = TLI->getRegisterType(Context&: *CurDAG->getContext(), VT); |
| 11778 | std::optional<ISD::NodeType> AssertOp; |
| 11779 | SDValue ArgValue = |
| 11780 | getCopyFromParts(DAG, DL: dl, Parts: &InVals[0], NumParts: 1, PartVT: RegVT, ValueVT: VT, V: nullptr, InChain: NewRoot, |
| 11781 | CC: F.getCallingConv(), AssertOp); |
| 11782 | |
| 11783 | MachineFunction& MF = SDB->DAG.getMachineFunction(); |
| 11784 | MachineRegisterInfo& RegInfo = MF.getRegInfo(); |
| 11785 | Register SRetReg = |
| 11786 | RegInfo.createVirtualRegister(RegClass: TLI->getRegClassFor(VT: RegVT)); |
| 11787 | FuncInfo->DemoteRegister = SRetReg; |
| 11788 | NewRoot = |
| 11789 | SDB->DAG.getCopyToReg(Chain: NewRoot, dl: SDB->getCurSDLoc(), Reg: SRetReg, N: ArgValue); |
| 11790 | DAG.setRoot(NewRoot); |
| 11791 | |
| 11792 | // i indexes lowered arguments. Bump it past the hidden sret argument. |
| 11793 | ++i; |
| 11794 | } |
| 11795 | |
| 11796 | SmallVector<SDValue, 4> Chains; |
| 11797 | DenseMap<int, int> ArgCopyElisionFrameIndexMap; |
| 11798 | for (const Argument &Arg : F.args()) { |
| 11799 | SmallVector<SDValue, 4> ArgValues; |
| 11800 | SmallVector<EVT, 4> ValueVTs; |
| 11801 | ComputeValueVTs(TLI: *TLI, DL: DAG.getDataLayout(), Ty: Arg.getType(), ValueVTs); |
| 11802 | unsigned NumValues = ValueVTs.size(); |
| 11803 | if (NumValues == 0) |
| 11804 | continue; |
| 11805 | |
| 11806 | bool ArgHasUses = !Arg.use_empty(); |
| 11807 | |
| 11808 | // Elide the copying store if the target loaded this argument from a |
| 11809 | // suitable fixed stack object. |
| 11810 | if (Ins[i].Flags.isCopyElisionCandidate()) { |
| 11811 | unsigned NumParts = 0; |
| 11812 | for (EVT VT : ValueVTs) |
| 11813 | NumParts += TLI->getNumRegistersForCallingConv(Context&: *CurDAG->getContext(), |
| 11814 | CC: F.getCallingConv(), VT); |
| 11815 | |
| 11816 | tryToElideArgumentCopy(FuncInfo&: *FuncInfo, Chains, ArgCopyElisionFrameIndexMap, |
| 11817 | ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg, |
| 11818 | ArgVals: ArrayRef(&InVals[i], NumParts), ArgHasUses); |
| 11819 | } |
| 11820 | |
| 11821 | // If this argument is unused then remember its value. It is used to generate |
| 11822 | // debugging information. |
| 11823 | bool isSwiftErrorArg = |
| 11824 | TLI->supportSwiftError() && |
| 11825 | Arg.hasAttribute(Kind: Attribute::SwiftError); |
| 11826 | if (!ArgHasUses && !isSwiftErrorArg) { |
| 11827 | SDB->setUnusedArgValue(V: &Arg, NewN: InVals[i]); |
| 11828 | |
| 11829 | // Also remember any frame index for use in FastISel. |
| 11830 | if (FrameIndexSDNode *FI = |
| 11831 | dyn_cast<FrameIndexSDNode>(Val: InVals[i].getNode())) |
| 11832 | FuncInfo->setArgumentFrameIndex(A: &Arg, FI: FI->getIndex()); |
| 11833 | } |
| 11834 | |
| 11835 | for (unsigned Val = 0; Val != NumValues; ++Val) { |
| 11836 | EVT VT = ValueVTs[Val]; |
| 11837 | MVT PartVT = TLI->getRegisterTypeForCallingConv(Context&: *CurDAG->getContext(), |
| 11838 | CC: F.getCallingConv(), VT); |
| 11839 | unsigned NumParts = TLI->getNumRegistersForCallingConv( |
| 11840 | Context&: *CurDAG->getContext(), CC: F.getCallingConv(), VT); |
| 11841 | |
| 11842 | // Even an apparent 'unused' swifterror argument needs to be returned. So |
| 11843 | // we do generate a copy for it that can be used on return from the |
| 11844 | // function. |
| 11845 | if (ArgHasUses || isSwiftErrorArg) { |
| 11846 | std::optional<ISD::NodeType> AssertOp; |
| 11847 | if (Arg.hasAttribute(Kind: Attribute::SExt)) |
| 11848 | AssertOp = ISD::AssertSext; |
| 11849 | else if (Arg.hasAttribute(Kind: Attribute::ZExt)) |
| 11850 | AssertOp = ISD::AssertZext; |
| 11851 | |
| 11852 | SDValue OutVal = |
| 11853 | getCopyFromParts(DAG, DL: dl, Parts: &InVals[i], NumParts, PartVT, ValueVT: VT, V: nullptr, |
| 11854 | InChain: NewRoot, CC: F.getCallingConv(), AssertOp); |
| 11855 | |
| 11856 | FPClassTest NoFPClass = Arg.getNoFPClass(); |
| 11857 | if (NoFPClass != fcNone) { |
| 11858 | SDValue SDNoFPClass = DAG.getTargetConstant( |
| 11859 | Val: static_cast<uint64_t>(NoFPClass), DL: dl, VT: MVT::i32); |
| 11860 | OutVal = DAG.getNode(Opcode: ISD::AssertNoFPClass, DL: dl, VT: OutVal.getValueType(), |
| 11861 | N1: OutVal, N2: SDNoFPClass); |
| 11862 | } |
| 11863 | ArgValues.push_back(Elt: OutVal); |
| 11864 | } |
| 11865 | |
| 11866 | i += NumParts; |
| 11867 | } |
| 11868 | |
| 11869 | // We don't need to do anything else for unused arguments. |
| 11870 | if (ArgValues.empty()) |
| 11871 | continue; |
| 11872 | |
| 11873 | // Note down frame index. |
| 11874 | if (FrameIndexSDNode *FI = |
| 11875 | dyn_cast<FrameIndexSDNode>(Val: ArgValues[0].getNode())) |
| 11876 | FuncInfo->setArgumentFrameIndex(A: &Arg, FI: FI->getIndex()); |
| 11877 | |
| 11878 | SDValue Res = DAG.getMergeValues(Ops: ArrayRef(ArgValues.data(), NumValues), |
| 11879 | dl: SDB->getCurSDLoc()); |
| 11880 | |
| 11881 | SDB->setValue(V: &Arg, NewN: Res); |
| 11882 | if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { |
| 11883 | // We want to associate the argument with the frame index, among |
| 11884 | // involved operands, that correspond to the lowest address. The |
| 11885 | // getCopyFromParts function, called earlier, is swapping the order of |
| 11886 | // the operands to BUILD_PAIR depending on endianness. The result of |
| 11887 | // that swapping is that the least significant bits of the argument will |
| 11888 | // be in the first operand of the BUILD_PAIR node, and the most |
| 11889 | // significant bits will be in the second operand. |
| 11890 | unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 11891 | if (LoadSDNode *LNode = |
| 11892 | dyn_cast<LoadSDNode>(Val: Res.getOperand(i: LowAddressOp).getNode())) |
| 11893 | if (FrameIndexSDNode *FI = |
| 11894 | dyn_cast<FrameIndexSDNode>(Val: LNode->getBasePtr().getNode())) |
| 11895 | FuncInfo->setArgumentFrameIndex(A: &Arg, FI: FI->getIndex()); |
| 11896 | } |
| 11897 | |
| 11898 | // Analyses past this point are naive and don't expect an assertion. |
| 11899 | if (Res.getOpcode() == ISD::AssertZext) |
| 11900 | Res = Res.getOperand(i: 0); |
| 11901 | |
| 11902 | // Update the SwiftErrorVRegDefMap. |
| 11903 | if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) { |
| 11904 | Register Reg = cast<RegisterSDNode>(Val: Res.getOperand(i: 1))->getReg(); |
| 11905 | if (Reg.isVirtual()) |
| 11906 | SwiftError->setCurrentVReg(MBB: FuncInfo->MBB, SwiftError->getFunctionArg(), |
| 11907 | Reg); |
| 11908 | } |
| 11909 | |
| 11910 | // If this argument is live outside of the entry block, insert a copy from |
| 11911 | // wherever we got it to the vreg that other BB's will reference it as. |
| 11912 | if (Res.getOpcode() == ISD::CopyFromReg) { |
| 11913 | // If we can, though, try to skip creating an unnecessary vreg. |
| 11914 | // FIXME: This isn't very clean... it would be nice to make this more |
| 11915 | // general. |
| 11916 | Register Reg = cast<RegisterSDNode>(Val: Res.getOperand(i: 1))->getReg(); |
| 11917 | if (Reg.isVirtual()) { |
| 11918 | FuncInfo->ValueMap[&Arg] = Reg; |
| 11919 | continue; |
| 11920 | } |
| 11921 | } |
| 11922 | if (!isOnlyUsedInEntryBlock(A: &Arg, FastISel: TM.Options.EnableFastISel)) { |
| 11923 | FuncInfo->InitializeRegForValue(V: &Arg); |
| 11924 | SDB->CopyToExportRegsIfNeeded(V: &Arg); |
| 11925 | } |
| 11926 | } |
| 11927 | |
| 11928 | if (!Chains.empty()) { |
| 11929 | Chains.push_back(Elt: NewRoot); |
| 11930 | NewRoot = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: Chains); |
| 11931 | } |
| 11932 | |
| 11933 | DAG.setRoot(NewRoot); |
| 11934 | |
| 11935 | assert(i == InVals.size() && "Argument register count mismatch!" ); |
| 11936 | |
| 11937 | // If any argument copy elisions occurred and we have debug info, update the |
| 11938 | // stale frame indices used in the dbg.declare variable info table. |
| 11939 | if (!ArgCopyElisionFrameIndexMap.empty()) { |
| 11940 | for (MachineFunction::VariableDbgInfo &VI : |
| 11941 | MF->getInStackSlotVariableDbgInfo()) { |
| 11942 | auto I = ArgCopyElisionFrameIndexMap.find(Val: VI.getStackSlot()); |
| 11943 | if (I != ArgCopyElisionFrameIndexMap.end()) |
| 11944 | VI.updateStackSlot(NewSlot: I->second); |
| 11945 | } |
| 11946 | } |
| 11947 | |
| 11948 | // Finally, if the target has anything special to do, allow it to do so. |
| 11949 | emitFunctionEntryCode(); |
| 11950 | } |
| 11951 | |
| 11952 | /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to |
| 11953 | /// ensure constants are generated when needed. Remember the virtual registers |
| 11954 | /// that need to be added to the Machine PHI nodes as input. We cannot just |
| 11955 | /// directly add them, because expansion might result in multiple MBB's for one |
| 11956 | /// BB. As such, the start of the BB might correspond to a different MBB than |
| 11957 | /// the end. |
| 11958 | void |
| 11959 | SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { |
| 11960 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 11961 | |
| 11962 | SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; |
| 11963 | |
| 11964 | // Check PHI nodes in successors that expect a value to be available from this |
| 11965 | // block. |
| 11966 | for (const BasicBlock *SuccBB : successors(I: LLVMBB->getTerminator())) { |
| 11967 | if (!isa<PHINode>(Val: SuccBB->begin())) continue; |
| 11968 | MachineBasicBlock *SuccMBB = FuncInfo.getMBB(BB: SuccBB); |
| 11969 | |
| 11970 | // If this terminator has multiple identical successors (common for |
| 11971 | // switches), only handle each succ once. |
| 11972 | if (!SuccsHandled.insert(Ptr: SuccMBB).second) |
| 11973 | continue; |
| 11974 | |
| 11975 | MachineBasicBlock::iterator MBBI = SuccMBB->begin(); |
| 11976 | |
| 11977 | // At this point we know that there is a 1-1 correspondence between LLVM PHI |
| 11978 | // nodes and Machine PHI nodes, but the incoming operands have not been |
| 11979 | // emitted yet. |
| 11980 | for (const PHINode &PN : SuccBB->phis()) { |
| 11981 | // Ignore dead phi's. |
| 11982 | if (PN.use_empty()) |
| 11983 | continue; |
| 11984 | |
| 11985 | // Skip empty types |
| 11986 | if (PN.getType()->isEmptyTy()) |
| 11987 | continue; |
| 11988 | |
| 11989 | Register Reg; |
| 11990 | const Value *PHIOp = PN.getIncomingValueForBlock(BB: LLVMBB); |
| 11991 | |
| 11992 | if (const auto *C = dyn_cast<Constant>(Val: PHIOp)) { |
| 11993 | Register &RegOut = ConstantsOut[C]; |
| 11994 | if (!RegOut) { |
| 11995 | RegOut = FuncInfo.CreateRegs(V: &PN); |
| 11996 | // We need to zero/sign extend ConstantInt phi operands to match |
| 11997 | // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo. |
| 11998 | ISD::NodeType ExtendType = ISD::ANY_EXTEND; |
| 11999 | if (auto *CI = dyn_cast<ConstantInt>(Val: C)) |
| 12000 | ExtendType = TLI.signExtendConstant(C: CI) ? ISD::SIGN_EXTEND |
| 12001 | : ISD::ZERO_EXTEND; |
| 12002 | CopyValueToVirtualRegister(V: C, Reg: RegOut, ExtendType); |
| 12003 | } |
| 12004 | Reg = RegOut; |
| 12005 | } else { |
| 12006 | DenseMap<const Value *, Register>::iterator I = |
| 12007 | FuncInfo.ValueMap.find(Val: PHIOp); |
| 12008 | if (I != FuncInfo.ValueMap.end()) |
| 12009 | Reg = I->second; |
| 12010 | else { |
| 12011 | assert(isa<AllocaInst>(PHIOp) && |
| 12012 | FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && |
| 12013 | "Didn't codegen value into a register!??" ); |
| 12014 | Reg = FuncInfo.CreateRegs(V: &PN); |
| 12015 | CopyValueToVirtualRegister(V: PHIOp, Reg); |
| 12016 | } |
| 12017 | } |
| 12018 | |
| 12019 | // Remember that this register needs to added to the machine PHI node as |
| 12020 | // the input for this MBB. |
| 12021 | SmallVector<EVT, 4> ValueVTs; |
| 12022 | ComputeValueVTs(TLI, DL: DAG.getDataLayout(), Ty: PN.getType(), ValueVTs); |
| 12023 | for (EVT VT : ValueVTs) { |
| 12024 | const unsigned NumRegisters = TLI.getNumRegisters(Context&: *DAG.getContext(), VT); |
| 12025 | for (unsigned i = 0; i != NumRegisters; ++i) |
| 12026 | FuncInfo.PHINodesToUpdate.emplace_back(args: &*MBBI++, args: Reg + i); |
| 12027 | Reg += NumRegisters; |
| 12028 | } |
| 12029 | } |
| 12030 | } |
| 12031 | |
| 12032 | ConstantsOut.clear(); |
| 12033 | } |
| 12034 | |
| 12035 | MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) { |
| 12036 | MachineFunction::iterator I(MBB); |
| 12037 | if (++I == FuncInfo.MF->end()) |
| 12038 | return nullptr; |
| 12039 | return &*I; |
| 12040 | } |
| 12041 | |
| 12042 | /// During lowering new call nodes can be created (such as memset, etc.). |
| 12043 | /// Those will become new roots of the current DAG, but complications arise |
| 12044 | /// when they are tail calls. In such cases, the call lowering will update |
| 12045 | /// the root, but the builder still needs to know that a tail call has been |
| 12046 | /// lowered in order to avoid generating an additional return. |
| 12047 | void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) { |
| 12048 | // If the node is null, we do have a tail call. |
| 12049 | if (MaybeTC.getNode() != nullptr) |
| 12050 | DAG.setRoot(MaybeTC); |
| 12051 | else |
| 12052 | HasTailCall = true; |
| 12053 | } |
| 12054 | |
| 12055 | void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond, |
| 12056 | MachineBasicBlock *SwitchMBB, |
| 12057 | MachineBasicBlock *DefaultMBB) { |
| 12058 | MachineFunction *CurMF = FuncInfo.MF; |
| 12059 | MachineBasicBlock *NextMBB = nullptr; |
| 12060 | MachineFunction::iterator BBI(W.MBB); |
| 12061 | if (++BBI != FuncInfo.MF->end()) |
| 12062 | NextMBB = &*BBI; |
| 12063 | |
| 12064 | unsigned Size = W.LastCluster - W.FirstCluster + 1; |
| 12065 | |
| 12066 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 12067 | |
| 12068 | if (Size == 2 && W.MBB == SwitchMBB) { |
| 12069 | // If any two of the cases has the same destination, and if one value |
| 12070 | // is the same as the other, but has one bit unset that the other has set, |
| 12071 | // use bit manipulation to do two compares at once. For example: |
| 12072 | // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" |
| 12073 | // TODO: This could be extended to merge any 2 cases in switches with 3 |
| 12074 | // cases. |
| 12075 | // TODO: Handle cases where W.CaseBB != SwitchBB. |
| 12076 | CaseCluster &Small = *W.FirstCluster; |
| 12077 | CaseCluster &Big = *W.LastCluster; |
| 12078 | |
| 12079 | if (Small.Low == Small.High && Big.Low == Big.High && |
| 12080 | Small.MBB == Big.MBB) { |
| 12081 | const APInt &SmallValue = Small.Low->getValue(); |
| 12082 | const APInt &BigValue = Big.Low->getValue(); |
| 12083 | |
| 12084 | // Check that there is only one bit different. |
| 12085 | APInt CommonBit = BigValue ^ SmallValue; |
| 12086 | if (CommonBit.isPowerOf2()) { |
| 12087 | SDValue CondLHS = getValue(V: Cond); |
| 12088 | EVT VT = CondLHS.getValueType(); |
| 12089 | SDLoc DL = getCurSDLoc(); |
| 12090 | |
| 12091 | SDValue Or = DAG.getNode(Opcode: ISD::OR, DL, VT, N1: CondLHS, |
| 12092 | N2: DAG.getConstant(Val: CommonBit, DL, VT)); |
| 12093 | SDValue Cond = DAG.getSetCC( |
| 12094 | DL, VT: MVT::i1, LHS: Or, RHS: DAG.getConstant(Val: BigValue | SmallValue, DL, VT), |
| 12095 | Cond: ISD::SETEQ); |
| 12096 | |
| 12097 | // Update successor info. |
| 12098 | // Both Small and Big will jump to Small.BB, so we sum up the |
| 12099 | // probabilities. |
| 12100 | addSuccessorWithProb(Src: SwitchMBB, Dst: Small.MBB, Prob: Small.Prob + Big.Prob); |
| 12101 | if (BPI) |
| 12102 | addSuccessorWithProb( |
| 12103 | Src: SwitchMBB, Dst: DefaultMBB, |
| 12104 | // The default destination is the first successor in IR. |
| 12105 | Prob: BPI->getEdgeProbability(Src: SwitchMBB->getBasicBlock(), IndexInSuccessors: (unsigned)0)); |
| 12106 | else |
| 12107 | addSuccessorWithProb(Src: SwitchMBB, Dst: DefaultMBB); |
| 12108 | |
| 12109 | // Insert the true branch. |
| 12110 | SDValue BrCond = |
| 12111 | DAG.getNode(Opcode: ISD::BRCOND, DL, VT: MVT::Other, N1: getControlRoot(), N2: Cond, |
| 12112 | N3: DAG.getBasicBlock(MBB: Small.MBB)); |
| 12113 | // Insert the false branch. |
| 12114 | BrCond = DAG.getNode(Opcode: ISD::BR, DL, VT: MVT::Other, N1: BrCond, |
| 12115 | N2: DAG.getBasicBlock(MBB: DefaultMBB)); |
| 12116 | |
| 12117 | DAG.setRoot(BrCond); |
| 12118 | return; |
| 12119 | } |
| 12120 | } |
| 12121 | } |
| 12122 | |
| 12123 | if (TM.getOptLevel() != CodeGenOptLevel::None) { |
| 12124 | // Here, we order cases by probability so the most likely case will be |
| 12125 | // checked first. However, two clusters can have the same probability in |
| 12126 | // which case their relative ordering is non-deterministic. So we use Low |
| 12127 | // as a tie-breaker as clusters are guaranteed to never overlap. |
| 12128 | llvm::sort(Start: W.FirstCluster, End: W.LastCluster + 1, |
| 12129 | Comp: [](const CaseCluster &a, const CaseCluster &b) { |
| 12130 | return a.Prob != b.Prob ? |
| 12131 | a.Prob > b.Prob : |
| 12132 | a.Low->getValue().slt(RHS: b.Low->getValue()); |
| 12133 | }); |
| 12134 | |
| 12135 | // Rearrange the case blocks so that the last one falls through if possible |
| 12136 | // without changing the order of probabilities. |
| 12137 | for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) { |
| 12138 | --I; |
| 12139 | if (I->Prob > W.LastCluster->Prob) |
| 12140 | break; |
| 12141 | if (I->Kind == CC_Range && I->MBB == NextMBB) { |
| 12142 | std::swap(a&: *I, b&: *W.LastCluster); |
| 12143 | break; |
| 12144 | } |
| 12145 | } |
| 12146 | } |
| 12147 | |
| 12148 | // Compute total probability. |
| 12149 | BranchProbability DefaultProb = W.DefaultProb; |
| 12150 | BranchProbability UnhandledProbs = DefaultProb; |
| 12151 | for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) |
| 12152 | UnhandledProbs += I->Prob; |
| 12153 | |
| 12154 | MachineBasicBlock *CurMBB = W.MBB; |
| 12155 | for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { |
| 12156 | bool FallthroughUnreachable = false; |
| 12157 | MachineBasicBlock *Fallthrough; |
| 12158 | if (I == W.LastCluster) { |
| 12159 | // For the last cluster, fall through to the default destination. |
| 12160 | Fallthrough = DefaultMBB; |
| 12161 | FallthroughUnreachable = isa<UnreachableInst>( |
| 12162 | Val: DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); |
| 12163 | } else { |
| 12164 | Fallthrough = CurMF->CreateMachineBasicBlock(BB: CurMBB->getBasicBlock()); |
| 12165 | CurMF->insert(MBBI: BBI, MBB: Fallthrough); |
| 12166 | // Put Cond in a virtual register to make it available from the new blocks. |
| 12167 | ExportFromCurrentBlock(V: Cond); |
| 12168 | } |
| 12169 | UnhandledProbs -= I->Prob; |
| 12170 | |
| 12171 | switch (I->Kind) { |
| 12172 | case CC_JumpTable: { |
| 12173 | // FIXME: Optimize away range check based on pivot comparisons. |
| 12174 | JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; |
| 12175 | SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; |
| 12176 | |
| 12177 | // The jump block hasn't been inserted yet; insert it here. |
| 12178 | MachineBasicBlock *JumpMBB = JT->MBB; |
| 12179 | CurMF->insert(MBBI: BBI, MBB: JumpMBB); |
| 12180 | |
| 12181 | auto JumpProb = I->Prob; |
| 12182 | auto FallthroughProb = UnhandledProbs; |
| 12183 | |
| 12184 | // If the default statement is a target of the jump table, we evenly |
| 12185 | // distribute the default probability to successors of CurMBB. Also |
| 12186 | // update the probability on the edge from JumpMBB to Fallthrough. |
| 12187 | for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), |
| 12188 | SE = JumpMBB->succ_end(); |
| 12189 | SI != SE; ++SI) { |
| 12190 | if (*SI == DefaultMBB) { |
| 12191 | JumpProb += DefaultProb / 2; |
| 12192 | FallthroughProb -= DefaultProb / 2; |
| 12193 | JumpMBB->setSuccProbability(I: SI, Prob: DefaultProb / 2); |
| 12194 | JumpMBB->normalizeSuccProbs(); |
| 12195 | break; |
| 12196 | } |
| 12197 | } |
| 12198 | |
| 12199 | // If the default clause is unreachable, propagate that knowledge into |
| 12200 | // JTH->FallthroughUnreachable which will use it to suppress the range |
| 12201 | // check. |
| 12202 | // |
| 12203 | // However, don't do this if we're doing branch target enforcement, |
| 12204 | // because a table branch _without_ a range check can be a tempting JOP |
| 12205 | // gadget - out-of-bounds inputs that are impossible in correct |
| 12206 | // execution become possible again if an attacker can influence the |
| 12207 | // control flow. So if an attacker doesn't already have a BTI bypass |
| 12208 | // available, we don't want them to be able to get one out of this |
| 12209 | // table branch. |
| 12210 | if (FallthroughUnreachable) { |
| 12211 | Function &CurFunc = CurMF->getFunction(); |
| 12212 | if (!CurFunc.hasFnAttribute(Kind: "branch-target-enforcement" )) |
| 12213 | JTH->FallthroughUnreachable = true; |
| 12214 | } |
| 12215 | |
| 12216 | if (!JTH->FallthroughUnreachable) |
| 12217 | addSuccessorWithProb(Src: CurMBB, Dst: Fallthrough, Prob: FallthroughProb); |
| 12218 | addSuccessorWithProb(Src: CurMBB, Dst: JumpMBB, Prob: JumpProb); |
| 12219 | CurMBB->normalizeSuccProbs(); |
| 12220 | |
| 12221 | // The jump table header will be inserted in our current block, do the |
| 12222 | // range check, and fall through to our fallthrough block. |
| 12223 | JTH->HeaderBB = CurMBB; |
| 12224 | JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. |
| 12225 | |
| 12226 | // If we're in the right place, emit the jump table header right now. |
| 12227 | if (CurMBB == SwitchMBB) { |
| 12228 | visitJumpTableHeader(JT&: *JT, JTH&: *JTH, SwitchBB: SwitchMBB); |
| 12229 | JTH->Emitted = true; |
| 12230 | } |
| 12231 | break; |
| 12232 | } |
| 12233 | case CC_BitTests: { |
| 12234 | // FIXME: Optimize away range check based on pivot comparisons. |
| 12235 | BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; |
| 12236 | |
| 12237 | // The bit test blocks haven't been inserted yet; insert them here. |
| 12238 | for (BitTestCase &BTC : BTB->Cases) |
| 12239 | CurMF->insert(MBBI: BBI, MBB: BTC.ThisBB); |
| 12240 | |
| 12241 | // Fill in fields of the BitTestBlock. |
| 12242 | BTB->Parent = CurMBB; |
| 12243 | BTB->Default = Fallthrough; |
| 12244 | |
| 12245 | BTB->DefaultProb = UnhandledProbs; |
| 12246 | // If the cases in bit test don't form a contiguous range, we evenly |
| 12247 | // distribute the probability on the edge to Fallthrough to two |
| 12248 | // successors of CurMBB. |
| 12249 | if (!BTB->ContiguousRange) { |
| 12250 | BTB->Prob += DefaultProb / 2; |
| 12251 | BTB->DefaultProb -= DefaultProb / 2; |
| 12252 | } |
| 12253 | |
| 12254 | if (FallthroughUnreachable) |
| 12255 | BTB->FallthroughUnreachable = true; |
| 12256 | |
| 12257 | // If we're in the right place, emit the bit test header right now. |
| 12258 | if (CurMBB == SwitchMBB) { |
| 12259 | visitBitTestHeader(B&: *BTB, SwitchBB: SwitchMBB); |
| 12260 | BTB->Emitted = true; |
| 12261 | } |
| 12262 | break; |
| 12263 | } |
| 12264 | case CC_Range: { |
| 12265 | const Value *RHS, *LHS, *MHS; |
| 12266 | ISD::CondCode CC; |
| 12267 | if (I->Low == I->High) { |
| 12268 | // Check Cond == I->Low. |
| 12269 | CC = ISD::SETEQ; |
| 12270 | LHS = Cond; |
| 12271 | RHS=I->Low; |
| 12272 | MHS = nullptr; |
| 12273 | } else { |
| 12274 | // Check I->Low <= Cond <= I->High. |
| 12275 | CC = ISD::SETLE; |
| 12276 | LHS = I->Low; |
| 12277 | MHS = Cond; |
| 12278 | RHS = I->High; |
| 12279 | } |
| 12280 | |
| 12281 | // If Fallthrough is unreachable, fold away the comparison. |
| 12282 | if (FallthroughUnreachable) |
| 12283 | CC = ISD::SETTRUE; |
| 12284 | |
| 12285 | // The false probability is the sum of all unhandled cases. |
| 12286 | CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, |
| 12287 | getCurSDLoc(), I->Prob, UnhandledProbs); |
| 12288 | |
| 12289 | if (CurMBB == SwitchMBB) |
| 12290 | visitSwitchCase(CB, SwitchBB: SwitchMBB); |
| 12291 | else |
| 12292 | SL->SwitchCases.push_back(x: CB); |
| 12293 | |
| 12294 | break; |
| 12295 | } |
| 12296 | } |
| 12297 | CurMBB = Fallthrough; |
| 12298 | } |
| 12299 | } |
| 12300 | |
| 12301 | void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList, |
| 12302 | const SwitchWorkListItem &W, |
| 12303 | Value *Cond, |
| 12304 | MachineBasicBlock *SwitchMBB) { |
| 12305 | assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && |
| 12306 | "Clusters not sorted?" ); |
| 12307 | assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!" ); |
| 12308 | |
| 12309 | auto [LastLeft, FirstRight, LeftProb, RightProb] = |
| 12310 | SL->computeSplitWorkItemInfo(W); |
| 12311 | |
| 12312 | // Use the first element on the right as pivot since we will make less-than |
| 12313 | // comparisons against it. |
| 12314 | CaseClusterIt PivotCluster = FirstRight; |
| 12315 | assert(PivotCluster > W.FirstCluster); |
| 12316 | assert(PivotCluster <= W.LastCluster); |
| 12317 | |
| 12318 | CaseClusterIt FirstLeft = W.FirstCluster; |
| 12319 | CaseClusterIt LastRight = W.LastCluster; |
| 12320 | |
| 12321 | const ConstantInt *Pivot = PivotCluster->Low; |
| 12322 | |
| 12323 | // New blocks will be inserted immediately after the current one. |
| 12324 | MachineFunction::iterator BBI(W.MBB); |
| 12325 | ++BBI; |
| 12326 | |
| 12327 | // We will branch to the LHS if Value < Pivot. If LHS is a single cluster, |
| 12328 | // we can branch to its destination directly if it's squeezed exactly in |
| 12329 | // between the known lower bound and Pivot - 1. |
| 12330 | MachineBasicBlock *LeftMBB; |
| 12331 | if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && |
| 12332 | FirstLeft->Low == W.GE && |
| 12333 | (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { |
| 12334 | LeftMBB = FirstLeft->MBB; |
| 12335 | } else { |
| 12336 | LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(BB: W.MBB->getBasicBlock()); |
| 12337 | FuncInfo.MF->insert(MBBI: BBI, MBB: LeftMBB); |
| 12338 | WorkList.push_back( |
| 12339 | Elt: {.MBB: LeftMBB, .FirstCluster: FirstLeft, .LastCluster: LastLeft, .GE: W.GE, .LT: Pivot, .DefaultProb: W.DefaultProb / 2}); |
| 12340 | // Put Cond in a virtual register to make it available from the new blocks. |
| 12341 | ExportFromCurrentBlock(V: Cond); |
| 12342 | } |
| 12343 | |
| 12344 | // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a |
| 12345 | // single cluster, RHS.Low == Pivot, and we can branch to its destination |
| 12346 | // directly if RHS.High equals the current upper bound. |
| 12347 | MachineBasicBlock *RightMBB; |
| 12348 | if (FirstRight == LastRight && FirstRight->Kind == CC_Range && |
| 12349 | W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { |
| 12350 | RightMBB = FirstRight->MBB; |
| 12351 | } else { |
| 12352 | RightMBB = FuncInfo.MF->CreateMachineBasicBlock(BB: W.MBB->getBasicBlock()); |
| 12353 | FuncInfo.MF->insert(MBBI: BBI, MBB: RightMBB); |
| 12354 | WorkList.push_back( |
| 12355 | Elt: {.MBB: RightMBB, .FirstCluster: FirstRight, .LastCluster: LastRight, .GE: Pivot, .LT: W.LT, .DefaultProb: W.DefaultProb / 2}); |
| 12356 | // Put Cond in a virtual register to make it available from the new blocks. |
| 12357 | ExportFromCurrentBlock(V: Cond); |
| 12358 | } |
| 12359 | |
| 12360 | // Create the CaseBlock record that will be used to lower the branch. |
| 12361 | CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB, |
| 12362 | getCurSDLoc(), LeftProb, RightProb); |
| 12363 | |
| 12364 | if (W.MBB == SwitchMBB) |
| 12365 | visitSwitchCase(CB, SwitchBB: SwitchMBB); |
| 12366 | else |
| 12367 | SL->SwitchCases.push_back(x: CB); |
| 12368 | } |
| 12369 | |
| 12370 | // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb |
| 12371 | // from the swith statement. |
| 12372 | static BranchProbability scaleCaseProbality(BranchProbability CaseProb, |
| 12373 | BranchProbability PeeledCaseProb) { |
| 12374 | if (PeeledCaseProb == BranchProbability::getOne()) |
| 12375 | return BranchProbability::getZero(); |
| 12376 | BranchProbability SwitchProb = PeeledCaseProb.getCompl(); |
| 12377 | |
| 12378 | uint32_t Numerator = CaseProb.getNumerator(); |
| 12379 | uint32_t Denominator = SwitchProb.scale(Num: CaseProb.getDenominator()); |
| 12380 | return BranchProbability(Numerator, std::max(a: Numerator, b: Denominator)); |
| 12381 | } |
| 12382 | |
| 12383 | // Try to peel the top probability case if it exceeds the threshold. |
| 12384 | // Return current MachineBasicBlock for the switch statement if the peeling |
| 12385 | // does not occur. |
| 12386 | // If the peeling is performed, return the newly created MachineBasicBlock |
| 12387 | // for the peeled switch statement. Also update Clusters to remove the peeled |
| 12388 | // case. PeeledCaseProb is the BranchProbability for the peeled case. |
| 12389 | MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster( |
| 12390 | const SwitchInst &SI, CaseClusterVector &Clusters, |
| 12391 | BranchProbability &PeeledCaseProb) { |
| 12392 | MachineBasicBlock *SwitchMBB = FuncInfo.MBB; |
| 12393 | // Don't perform if there is only one cluster or optimizing for size. |
| 12394 | if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 || |
| 12395 | TM.getOptLevel() == CodeGenOptLevel::None || |
| 12396 | SwitchMBB->getParent()->getFunction().hasMinSize()) |
| 12397 | return SwitchMBB; |
| 12398 | |
| 12399 | BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100); |
| 12400 | unsigned PeeledCaseIndex = 0; |
| 12401 | bool SwitchPeeled = false; |
| 12402 | for (unsigned Index = 0; Index < Clusters.size(); ++Index) { |
| 12403 | CaseCluster &CC = Clusters[Index]; |
| 12404 | if (CC.Prob < TopCaseProb) |
| 12405 | continue; |
| 12406 | TopCaseProb = CC.Prob; |
| 12407 | PeeledCaseIndex = Index; |
| 12408 | SwitchPeeled = true; |
| 12409 | } |
| 12410 | if (!SwitchPeeled) |
| 12411 | return SwitchMBB; |
| 12412 | |
| 12413 | LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: " |
| 12414 | << TopCaseProb << "\n" ); |
| 12415 | |
| 12416 | // Record the MBB for the peeled switch statement. |
| 12417 | MachineFunction::iterator BBI(SwitchMBB); |
| 12418 | ++BBI; |
| 12419 | MachineBasicBlock *PeeledSwitchMBB = |
| 12420 | FuncInfo.MF->CreateMachineBasicBlock(BB: SwitchMBB->getBasicBlock()); |
| 12421 | FuncInfo.MF->insert(MBBI: BBI, MBB: PeeledSwitchMBB); |
| 12422 | |
| 12423 | ExportFromCurrentBlock(V: SI.getCondition()); |
| 12424 | auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex; |
| 12425 | SwitchWorkListItem W = {.MBB: SwitchMBB, .FirstCluster: PeeledCaseIt, .LastCluster: PeeledCaseIt, |
| 12426 | .GE: nullptr, .LT: nullptr, .DefaultProb: TopCaseProb.getCompl()}; |
| 12427 | lowerWorkItem(W, Cond: SI.getCondition(), SwitchMBB, DefaultMBB: PeeledSwitchMBB); |
| 12428 | |
| 12429 | Clusters.erase(position: PeeledCaseIt); |
| 12430 | for (CaseCluster &CC : Clusters) { |
| 12431 | LLVM_DEBUG( |
| 12432 | dbgs() << "Scale the probablity for one cluster, before scaling: " |
| 12433 | << CC.Prob << "\n" ); |
| 12434 | CC.Prob = scaleCaseProbality(CaseProb: CC.Prob, PeeledCaseProb: TopCaseProb); |
| 12435 | LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n" ); |
| 12436 | } |
| 12437 | PeeledCaseProb = TopCaseProb; |
| 12438 | return PeeledSwitchMBB; |
| 12439 | } |
| 12440 | |
| 12441 | void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { |
| 12442 | // Extract cases from the switch. |
| 12443 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 12444 | CaseClusterVector Clusters; |
| 12445 | Clusters.reserve(n: SI.getNumCases()); |
| 12446 | for (auto I : SI.cases()) { |
| 12447 | MachineBasicBlock *Succ = FuncInfo.getMBB(BB: I.getCaseSuccessor()); |
| 12448 | const ConstantInt *CaseVal = I.getCaseValue(); |
| 12449 | BranchProbability Prob = |
| 12450 | BPI ? BPI->getEdgeProbability(Src: SI.getParent(), IndexInSuccessors: I.getSuccessorIndex()) |
| 12451 | : BranchProbability(1, SI.getNumCases() + 1); |
| 12452 | Clusters.push_back(x: CaseCluster::range(Low: CaseVal, High: CaseVal, MBB: Succ, Prob)); |
| 12453 | } |
| 12454 | |
| 12455 | MachineBasicBlock *DefaultMBB = FuncInfo.getMBB(BB: SI.getDefaultDest()); |
| 12456 | |
| 12457 | // Cluster adjacent cases with the same destination. We do this at all |
| 12458 | // optimization levels because it's cheap to do and will make codegen faster |
| 12459 | // if there are many clusters. |
| 12460 | sortAndRangeify(Clusters); |
| 12461 | |
| 12462 | // The branch probablity of the peeled case. |
| 12463 | BranchProbability PeeledCaseProb = BranchProbability::getZero(); |
| 12464 | MachineBasicBlock *PeeledSwitchMBB = |
| 12465 | peelDominantCaseCluster(SI, Clusters, PeeledCaseProb); |
| 12466 | |
| 12467 | // If there is only the default destination, jump there directly. |
| 12468 | MachineBasicBlock *SwitchMBB = FuncInfo.MBB; |
| 12469 | if (Clusters.empty()) { |
| 12470 | assert(PeeledSwitchMBB == SwitchMBB); |
| 12471 | SwitchMBB->addSuccessor(Succ: DefaultMBB); |
| 12472 | if (DefaultMBB != NextBlock(MBB: SwitchMBB)) { |
| 12473 | DAG.setRoot(DAG.getNode(Opcode: ISD::BR, DL: getCurSDLoc(), VT: MVT::Other, |
| 12474 | N1: getControlRoot(), N2: DAG.getBasicBlock(MBB: DefaultMBB))); |
| 12475 | } |
| 12476 | return; |
| 12477 | } |
| 12478 | |
| 12479 | SL->findJumpTables(Clusters, SI: &SI, SL: getCurSDLoc(), DefaultMBB, PSI: DAG.getPSI(), |
| 12480 | BFI: DAG.getBFI()); |
| 12481 | SL->findBitTestClusters(Clusters, SI: &SI); |
| 12482 | |
| 12483 | LLVM_DEBUG({ |
| 12484 | dbgs() << "Case clusters: " ; |
| 12485 | for (const CaseCluster &C : Clusters) { |
| 12486 | if (C.Kind == CC_JumpTable) |
| 12487 | dbgs() << "JT:" ; |
| 12488 | if (C.Kind == CC_BitTests) |
| 12489 | dbgs() << "BT:" ; |
| 12490 | |
| 12491 | C.Low->getValue().print(dbgs(), true); |
| 12492 | if (C.Low != C.High) { |
| 12493 | dbgs() << '-'; |
| 12494 | C.High->getValue().print(dbgs(), true); |
| 12495 | } |
| 12496 | dbgs() << ' '; |
| 12497 | } |
| 12498 | dbgs() << '\n'; |
| 12499 | }); |
| 12500 | |
| 12501 | assert(!Clusters.empty()); |
| 12502 | SwitchWorkList WorkList; |
| 12503 | CaseClusterIt First = Clusters.begin(); |
| 12504 | CaseClusterIt Last = Clusters.end() - 1; |
| 12505 | auto DefaultProb = getEdgeProbability(Src: PeeledSwitchMBB, Dst: DefaultMBB); |
| 12506 | // Scale the branchprobability for DefaultMBB if the peel occurs and |
| 12507 | // DefaultMBB is not replaced. |
| 12508 | if (PeeledCaseProb != BranchProbability::getZero() && |
| 12509 | DefaultMBB == FuncInfo.getMBB(BB: SI.getDefaultDest())) |
| 12510 | DefaultProb = scaleCaseProbality(CaseProb: DefaultProb, PeeledCaseProb); |
| 12511 | WorkList.push_back( |
| 12512 | Elt: {.MBB: PeeledSwitchMBB, .FirstCluster: First, .LastCluster: Last, .GE: nullptr, .LT: nullptr, .DefaultProb: DefaultProb}); |
| 12513 | |
| 12514 | while (!WorkList.empty()) { |
| 12515 | SwitchWorkListItem W = WorkList.pop_back_val(); |
| 12516 | unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; |
| 12517 | |
| 12518 | if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None && |
| 12519 | !DefaultMBB->getParent()->getFunction().hasMinSize()) { |
| 12520 | // For optimized builds, lower large range as a balanced binary tree. |
| 12521 | splitWorkItem(WorkList, W, Cond: SI.getCondition(), SwitchMBB); |
| 12522 | continue; |
| 12523 | } |
| 12524 | |
| 12525 | lowerWorkItem(W, Cond: SI.getCondition(), SwitchMBB, DefaultMBB); |
| 12526 | } |
| 12527 | } |
| 12528 | |
| 12529 | void SelectionDAGBuilder::visitStepVector(const CallInst &I) { |
| 12530 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12531 | auto DL = getCurSDLoc(); |
| 12532 | EVT ResultVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 12533 | setValue(V: &I, NewN: DAG.getStepVector(DL, ResVT: ResultVT)); |
| 12534 | } |
| 12535 | |
| 12536 | void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) { |
| 12537 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12538 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 12539 | |
| 12540 | SDLoc DL = getCurSDLoc(); |
| 12541 | SDValue V = getValue(V: I.getOperand(i_nocapture: 0)); |
| 12542 | assert(VT == V.getValueType() && "Malformed vector.reverse!" ); |
| 12543 | |
| 12544 | if (VT.isScalableVector()) { |
| 12545 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT, Operand: V)); |
| 12546 | return; |
| 12547 | } |
| 12548 | |
| 12549 | // Use VECTOR_SHUFFLE for the fixed-length vector |
| 12550 | // to maintain existing behavior. |
| 12551 | SmallVector<int, 8> Mask; |
| 12552 | unsigned NumElts = VT.getVectorMinNumElements(); |
| 12553 | for (unsigned i = 0; i != NumElts; ++i) |
| 12554 | Mask.push_back(Elt: NumElts - 1 - i); |
| 12555 | |
| 12556 | setValue(V: &I, NewN: DAG.getVectorShuffle(VT, dl: DL, N1: V, N2: DAG.getUNDEF(VT), Mask)); |
| 12557 | } |
| 12558 | |
| 12559 | void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I, |
| 12560 | unsigned Factor) { |
| 12561 | auto DL = getCurSDLoc(); |
| 12562 | SDValue InVec = getValue(V: I.getOperand(i_nocapture: 0)); |
| 12563 | |
| 12564 | SmallVector<EVT, 4> ValueVTs; |
| 12565 | ComputeValueVTs(TLI: DAG.getTargetLoweringInfo(), DL: DAG.getDataLayout(), Ty: I.getType(), |
| 12566 | ValueVTs); |
| 12567 | |
| 12568 | EVT OutVT = ValueVTs[0]; |
| 12569 | unsigned OutNumElts = OutVT.getVectorMinNumElements(); |
| 12570 | |
| 12571 | SmallVector<SDValue, 4> SubVecs(Factor); |
| 12572 | for (unsigned i = 0; i != Factor; ++i) { |
| 12573 | assert(ValueVTs[i] == OutVT && "Expected VTs to be the same" ); |
| 12574 | SubVecs[i] = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: OutVT, N1: InVec, |
| 12575 | N2: DAG.getVectorIdxConstant(Val: OutNumElts * i, DL)); |
| 12576 | } |
| 12577 | |
| 12578 | // Use VECTOR_SHUFFLE for fixed-length vectors with factor of 2 to benefit |
| 12579 | // from existing legalisation and combines. |
| 12580 | if (OutVT.isFixedLengthVector() && Factor == 2) { |
| 12581 | SDValue Even = DAG.getVectorShuffle(VT: OutVT, dl: DL, N1: SubVecs[0], N2: SubVecs[1], |
| 12582 | Mask: createStrideMask(Start: 0, Stride: 2, VF: OutNumElts)); |
| 12583 | SDValue Odd = DAG.getVectorShuffle(VT: OutVT, dl: DL, N1: SubVecs[0], N2: SubVecs[1], |
| 12584 | Mask: createStrideMask(Start: 1, Stride: 2, VF: OutNumElts)); |
| 12585 | SDValue Res = DAG.getMergeValues(Ops: {Even, Odd}, dl: getCurSDLoc()); |
| 12586 | setValue(V: &I, NewN: Res); |
| 12587 | return; |
| 12588 | } |
| 12589 | |
| 12590 | SDValue Res = DAG.getNode(Opcode: ISD::VECTOR_DEINTERLEAVE, DL, |
| 12591 | VTList: DAG.getVTList(VTs: ValueVTs), Ops: SubVecs); |
| 12592 | setValue(V: &I, NewN: Res); |
| 12593 | } |
| 12594 | |
| 12595 | void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I, |
| 12596 | unsigned Factor) { |
| 12597 | auto DL = getCurSDLoc(); |
| 12598 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12599 | EVT InVT = getValue(V: I.getOperand(i_nocapture: 0)).getValueType(); |
| 12600 | EVT OutVT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 12601 | |
| 12602 | SmallVector<SDValue, 8> InVecs(Factor); |
| 12603 | for (unsigned i = 0; i < Factor; ++i) { |
| 12604 | InVecs[i] = getValue(V: I.getOperand(i_nocapture: i)); |
| 12605 | assert(InVecs[i].getValueType() == InVecs[0].getValueType() && |
| 12606 | "Expected VTs to be the same" ); |
| 12607 | } |
| 12608 | |
| 12609 | // Use VECTOR_SHUFFLE for fixed-length vectors with factor of 2 to benefit |
| 12610 | // from existing legalisation and combines. |
| 12611 | if (OutVT.isFixedLengthVector() && Factor == 2) { |
| 12612 | unsigned NumElts = InVT.getVectorMinNumElements(); |
| 12613 | SDValue V = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: OutVT, Ops: InVecs); |
| 12614 | setValue(V: &I, NewN: DAG.getVectorShuffle(VT: OutVT, dl: DL, N1: V, N2: DAG.getUNDEF(VT: OutVT), |
| 12615 | Mask: createInterleaveMask(VF: NumElts, NumVecs: 2))); |
| 12616 | return; |
| 12617 | } |
| 12618 | |
| 12619 | SmallVector<EVT, 8> ValueVTs(Factor, InVT); |
| 12620 | SDValue Res = |
| 12621 | DAG.getNode(Opcode: ISD::VECTOR_INTERLEAVE, DL, VTList: DAG.getVTList(VTs: ValueVTs), Ops: InVecs); |
| 12622 | |
| 12623 | SmallVector<SDValue, 8> Results(Factor); |
| 12624 | for (unsigned i = 0; i < Factor; ++i) |
| 12625 | Results[i] = Res.getValue(R: i); |
| 12626 | |
| 12627 | Res = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: OutVT, Ops: Results); |
| 12628 | setValue(V: &I, NewN: Res); |
| 12629 | } |
| 12630 | |
| 12631 | void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) { |
| 12632 | SmallVector<EVT, 4> ValueVTs; |
| 12633 | ComputeValueVTs(TLI: DAG.getTargetLoweringInfo(), DL: DAG.getDataLayout(), Ty: I.getType(), |
| 12634 | ValueVTs); |
| 12635 | unsigned NumValues = ValueVTs.size(); |
| 12636 | if (NumValues == 0) return; |
| 12637 | |
| 12638 | SmallVector<SDValue, 4> Values(NumValues); |
| 12639 | SDValue Op = getValue(V: I.getOperand(i_nocapture: 0)); |
| 12640 | |
| 12641 | for (unsigned i = 0; i != NumValues; ++i) |
| 12642 | Values[i] = DAG.getNode(Opcode: ISD::FREEZE, DL: getCurSDLoc(), VT: ValueVTs[i], |
| 12643 | Operand: SDValue(Op.getNode(), Op.getResNo() + i)); |
| 12644 | |
| 12645 | setValue(V: &I, NewN: DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: getCurSDLoc(), |
| 12646 | VTList: DAG.getVTList(VTs: ValueVTs), Ops: Values)); |
| 12647 | } |
| 12648 | |
| 12649 | void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) { |
| 12650 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12651 | EVT VT = TLI.getValueType(DL: DAG.getDataLayout(), Ty: I.getType()); |
| 12652 | |
| 12653 | SDLoc DL = getCurSDLoc(); |
| 12654 | SDValue V1 = getValue(V: I.getOperand(i_nocapture: 0)); |
| 12655 | SDValue V2 = getValue(V: I.getOperand(i_nocapture: 1)); |
| 12656 | int64_t Imm = cast<ConstantInt>(Val: I.getOperand(i_nocapture: 2))->getSExtValue(); |
| 12657 | |
| 12658 | // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node. |
| 12659 | if (VT.isScalableVector()) { |
| 12660 | setValue( |
| 12661 | V: &I, NewN: DAG.getNode(Opcode: ISD::VECTOR_SPLICE, DL, VT, N1: V1, N2: V2, |
| 12662 | N3: DAG.getSignedConstant( |
| 12663 | Val: Imm, DL, VT: TLI.getVectorIdxTy(DL: DAG.getDataLayout())))); |
| 12664 | return; |
| 12665 | } |
| 12666 | |
| 12667 | unsigned NumElts = VT.getVectorNumElements(); |
| 12668 | |
| 12669 | uint64_t Idx = (NumElts + Imm) % NumElts; |
| 12670 | |
| 12671 | // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors. |
| 12672 | SmallVector<int, 8> Mask; |
| 12673 | for (unsigned i = 0; i < NumElts; ++i) |
| 12674 | Mask.push_back(Elt: Idx + i); |
| 12675 | setValue(V: &I, NewN: DAG.getVectorShuffle(VT, dl: DL, N1: V1, N2: V2, Mask)); |
| 12676 | } |
| 12677 | |
| 12678 | // Consider the following MIR after SelectionDAG, which produces output in |
| 12679 | // phyregs in the first case or virtregs in the second case. |
| 12680 | // |
| 12681 | // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx |
| 12682 | // %5:gr32 = COPY $ebx |
| 12683 | // %6:gr32 = COPY $edx |
| 12684 | // %1:gr32 = COPY %6:gr32 |
| 12685 | // %0:gr32 = COPY %5:gr32 |
| 12686 | // |
| 12687 | // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32 |
| 12688 | // %1:gr32 = COPY %6:gr32 |
| 12689 | // %0:gr32 = COPY %5:gr32 |
| 12690 | // |
| 12691 | // Given %0, we'd like to return $ebx in the first case and %5 in the second. |
| 12692 | // Given %1, we'd like to return $edx in the first case and %6 in the second. |
| 12693 | // |
| 12694 | // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap |
| 12695 | // to a single virtreg (such as %0). The remaining outputs monotonically |
| 12696 | // increase in virtreg number from there. If a callbr has no outputs, then it |
| 12697 | // should not have a corresponding callbr landingpad; in fact, the callbr |
| 12698 | // landingpad would not even be able to refer to such a callbr. |
| 12699 | static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) { |
| 12700 | MachineInstr *MI = MRI.def_begin(RegNo: Reg)->getParent(); |
| 12701 | // There is definitely at least one copy. |
| 12702 | assert(MI->getOpcode() == TargetOpcode::COPY && |
| 12703 | "start of copy chain MUST be COPY" ); |
| 12704 | Reg = MI->getOperand(i: 1).getReg(); |
| 12705 | MI = MRI.def_begin(RegNo: Reg)->getParent(); |
| 12706 | // There may be an optional second copy. |
| 12707 | if (MI->getOpcode() == TargetOpcode::COPY) { |
| 12708 | assert(Reg.isVirtual() && "expected COPY of virtual register" ); |
| 12709 | Reg = MI->getOperand(i: 1).getReg(); |
| 12710 | assert(Reg.isPhysical() && "expected COPY of physical register" ); |
| 12711 | MI = MRI.def_begin(RegNo: Reg)->getParent(); |
| 12712 | } |
| 12713 | // The start of the chain must be an INLINEASM_BR. |
| 12714 | assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR && |
| 12715 | "end of copy chain MUST be INLINEASM_BR" ); |
| 12716 | return Reg; |
| 12717 | } |
| 12718 | |
| 12719 | // We must do this walk rather than the simpler |
| 12720 | // setValue(&I, getCopyFromRegs(CBR, CBR->getType())); |
| 12721 | // otherwise we will end up with copies of virtregs only valid along direct |
| 12722 | // edges. |
| 12723 | void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) { |
| 12724 | SmallVector<EVT, 8> ResultVTs; |
| 12725 | SmallVector<SDValue, 8> ResultValues; |
| 12726 | const auto *CBR = |
| 12727 | cast<CallBrInst>(Val: I.getParent()->getUniquePredecessor()->getTerminator()); |
| 12728 | |
| 12729 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12730 | const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); |
| 12731 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
| 12732 | |
| 12733 | Register InitialDef = FuncInfo.ValueMap[CBR]; |
| 12734 | SDValue Chain = DAG.getRoot(); |
| 12735 | |
| 12736 | // Re-parse the asm constraints string. |
| 12737 | TargetLowering::AsmOperandInfoVector TargetConstraints = |
| 12738 | TLI.ParseConstraints(DL: DAG.getDataLayout(), TRI, Call: *CBR); |
| 12739 | for (auto &T : TargetConstraints) { |
| 12740 | SDISelAsmOperandInfo OpInfo(T); |
| 12741 | if (OpInfo.Type != InlineAsm::isOutput) |
| 12742 | continue; |
| 12743 | |
| 12744 | // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the |
| 12745 | // individual constraint. |
| 12746 | TLI.ComputeConstraintToUse(OpInfo, Op: OpInfo.CallOperand, DAG: &DAG); |
| 12747 | |
| 12748 | switch (OpInfo.ConstraintType) { |
| 12749 | case TargetLowering::C_Register: |
| 12750 | case TargetLowering::C_RegisterClass: { |
| 12751 | // Fill in OpInfo.AssignedRegs.Regs. |
| 12752 | getRegistersForValue(DAG, DL: getCurSDLoc(), OpInfo, RefOpInfo&: OpInfo); |
| 12753 | |
| 12754 | // getRegistersForValue may produce 1 to many registers based on whether |
| 12755 | // the OpInfo.ConstraintVT is legal on the target or not. |
| 12756 | for (Register &Reg : OpInfo.AssignedRegs.Regs) { |
| 12757 | Register OriginalDef = FollowCopyChain(MRI, Reg: InitialDef++); |
| 12758 | if (OriginalDef.isPhysical()) |
| 12759 | FuncInfo.MBB->addLiveIn(PhysReg: OriginalDef); |
| 12760 | // Update the assigned registers to use the original defs. |
| 12761 | Reg = OriginalDef; |
| 12762 | } |
| 12763 | |
| 12764 | SDValue V = OpInfo.AssignedRegs.getCopyFromRegs( |
| 12765 | DAG, FuncInfo, dl: getCurSDLoc(), Chain, Glue: nullptr, V: CBR); |
| 12766 | ResultValues.push_back(Elt: V); |
| 12767 | ResultVTs.push_back(Elt: OpInfo.ConstraintVT); |
| 12768 | break; |
| 12769 | } |
| 12770 | case TargetLowering::C_Other: { |
| 12771 | SDValue Flag; |
| 12772 | SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Glue&: Flag, DL: getCurSDLoc(), |
| 12773 | OpInfo, DAG); |
| 12774 | ++InitialDef; |
| 12775 | ResultValues.push_back(Elt: V); |
| 12776 | ResultVTs.push_back(Elt: OpInfo.ConstraintVT); |
| 12777 | break; |
| 12778 | } |
| 12779 | default: |
| 12780 | break; |
| 12781 | } |
| 12782 | } |
| 12783 | SDValue V = DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: getCurSDLoc(), |
| 12784 | VTList: DAG.getVTList(VTs: ResultVTs), Ops: ResultValues); |
| 12785 | setValue(V: &I, NewN: V); |
| 12786 | } |
| 12787 | |