| 1 | //===--- ExpandIRInsts.cpp - Expand IR instructions -----------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // This pass expands certain instructions at the IR level. |
| 9 | // |
| 10 | // The following expansions are implemented: |
| 11 | // - Expansion of ‘fptoui .. to’, ‘fptosi .. to’, ‘uitofp .. to’, ‘sitofp |
| 12 | // .. to’ instructions with a bitwidth above a threshold. This is |
| 13 | // useful for targets like x86_64 that cannot lower fp convertions |
| 14 | // with more than 128 bits. |
| 15 | // |
| 16 | // - Expansion of ‘frem‘ for types MVT::f16, MVT::f32, and MVT::f64 for |
| 17 | // targets which use "Expand" as the legalization action for the |
| 18 | // corresponding type. |
| 19 | // |
| 20 | // - Expansion of ‘udiv‘, ‘sdiv‘, ‘urem‘, and ‘srem‘ instructions with |
| 21 | // a bitwidth above a threshold into a call to auto-generated |
| 22 | // functions. This is useful for targets like x86_64 that cannot |
| 23 | // lower divisions with more than 128 bits or targets like x86_32 that |
| 24 | // cannot lower divisions with more than 64 bits. |
| 25 | // |
| 26 | // Instructions with vector types are scalarized first if their scalar |
| 27 | // types can be expanded. Scalable vector types are not supported. |
| 28 | //===----------------------------------------------------------------------===// |
| 29 | |
| 30 | #include "llvm/CodeGen/ExpandIRInsts.h" |
| 31 | #include "llvm/ADT/SmallVector.h" |
| 32 | #include "llvm/Analysis/AssumptionCache.h" |
| 33 | #include "llvm/Analysis/GlobalsModRef.h" |
| 34 | #include "llvm/Analysis/SimplifyQuery.h" |
| 35 | #include "llvm/Analysis/ValueTracking.h" |
| 36 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 37 | #include "llvm/CodeGen/Passes.h" |
| 38 | #include "llvm/CodeGen/TargetLowering.h" |
| 39 | #include "llvm/CodeGen/TargetPassConfig.h" |
| 40 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 41 | #include "llvm/IR/IRBuilder.h" |
| 42 | #include "llvm/IR/InstIterator.h" |
| 43 | #include "llvm/IR/Module.h" |
| 44 | #include "llvm/IR/PassManager.h" |
| 45 | #include "llvm/InitializePasses.h" |
| 46 | #include "llvm/Pass.h" |
| 47 | #include "llvm/Support/CommandLine.h" |
| 48 | #include "llvm/Support/ErrorHandling.h" |
| 49 | #include "llvm/Target/TargetMachine.h" |
| 50 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
| 51 | #include "llvm/Transforms/Utils/IntegerDivision.h" |
| 52 | #include <llvm/Support/Casting.h> |
| 53 | #include <optional> |
| 54 | |
| 55 | #define DEBUG_TYPE "expand-ir-insts" |
| 56 | |
| 57 | using namespace llvm; |
| 58 | |
| 59 | static cl::opt<unsigned> |
| 60 | ExpandFpConvertBits("expand-fp-convert-bits" , cl::Hidden, |
| 61 | cl::init(Val: llvm::IntegerType::MAX_INT_BITS), |
| 62 | cl::desc("fp convert instructions on integers with " |
| 63 | "more than <N> bits are expanded." )); |
| 64 | |
| 65 | static cl::opt<unsigned> |
| 66 | ExpandDivRemBits("expand-div-rem-bits" , cl::Hidden, |
| 67 | cl::init(Val: llvm::IntegerType::MAX_INT_BITS), |
| 68 | cl::desc("div and rem instructions on integers with " |
| 69 | "more than <N> bits are expanded." )); |
| 70 | |
| 71 | namespace { |
| 72 | bool isConstantPowerOfTwo(llvm::Value *V, bool SignedOp) { |
| 73 | auto *C = dyn_cast<ConstantInt>(Val: V); |
| 74 | if (!C) |
| 75 | return false; |
| 76 | |
| 77 | APInt Val = C->getValue(); |
| 78 | if (SignedOp && Val.isNegative()) |
| 79 | Val = -Val; |
| 80 | return Val.isPowerOf2(); |
| 81 | } |
| 82 | |
| 83 | bool isSigned(unsigned int Opcode) { |
| 84 | return Opcode == Instruction::SDiv || Opcode == Instruction::SRem; |
| 85 | } |
| 86 | |
| 87 | /// This class implements a precise expansion of the frem instruction. |
| 88 | /// The generated code is based on the fmod implementation in the AMD device |
| 89 | /// libs. |
| 90 | class FRemExpander { |
| 91 | /// The IRBuilder to use for the expansion. |
| 92 | IRBuilder<> &B; |
| 93 | |
| 94 | /// Floating point type of the return value and the arguments of the FRem |
| 95 | /// instructions that should be expanded. |
| 96 | Type *FremTy; |
| 97 | |
| 98 | /// Floating point type to use for the computation. This may be |
| 99 | /// wider than the \p FremTy. |
| 100 | Type *ComputeFpTy; |
| 101 | |
| 102 | /// Integer type used to hold the exponents returned by frexp. |
| 103 | Type *ExTy; |
| 104 | |
| 105 | /// How many bits of the quotient to compute per iteration of the |
| 106 | /// algorithm, stored as a value of type \p ExTy. |
| 107 | Value *Bits; |
| 108 | |
| 109 | /// Constant 1 of type \p ExTy. |
| 110 | Value *One; |
| 111 | |
| 112 | /// The frem argument/return types that can be expanded by this class. |
| 113 | // TODO: The expansion could work for other floating point types |
| 114 | // as well, but this would require additional testing. |
| 115 | static constexpr std::array<MVT, 3> ExpandableTypes{MVT::f16, MVT::f32, |
| 116 | MVT::f64}; |
| 117 | |
| 118 | public: |
| 119 | static bool canExpandType(Type *Ty) { |
| 120 | EVT VT = EVT::getEVT(Ty); |
| 121 | assert(VT.isSimple() && "Can expand only simple types" ); |
| 122 | |
| 123 | return is_contained(Range: ExpandableTypes, Element: VT.getSimpleVT()); |
| 124 | } |
| 125 | |
| 126 | static bool shouldExpandFremType(const TargetLowering &TLI, EVT VT) { |
| 127 | assert(!VT.isVector() && "Cannot handle vector type; must scalarize first" ); |
| 128 | return TLI.getOperationAction(Op: ISD::FREM, VT) == |
| 129 | TargetLowering::LegalizeAction::Expand; |
| 130 | } |
| 131 | |
| 132 | static bool shouldExpandFremType(const TargetLowering &TLI, Type *Ty) { |
| 133 | // Consider scalar type for simplicity. It seems unlikely that a |
| 134 | // vector type can be legalized without expansion if the scalar |
| 135 | // type cannot. |
| 136 | return shouldExpandFremType(TLI, VT: EVT::getEVT(Ty: Ty->getScalarType())); |
| 137 | } |
| 138 | |
| 139 | /// Return true if the pass should expand frem instructions of any type |
| 140 | /// for the target represented by \p TLI. |
| 141 | static bool shouldExpandAnyFremType(const TargetLowering &TLI) { |
| 142 | return any_of(Range: ExpandableTypes, |
| 143 | P: [&](MVT V) { return shouldExpandFremType(TLI, VT: EVT(V)); }); |
| 144 | } |
| 145 | |
| 146 | static FRemExpander create(IRBuilder<> &B, Type *Ty) { |
| 147 | assert(canExpandType(Ty) && "Expected supported floating point type" ); |
| 148 | |
| 149 | // The type to use for the computation of the remainder. This may be |
| 150 | // wider than the input/result type which affects the ... |
| 151 | Type *ComputeTy = Ty; |
| 152 | // ... maximum number of iterations of the remainder computation loop |
| 153 | // to use. This value is for the case in which the computation |
| 154 | // uses the same input/result type. |
| 155 | unsigned MaxIter = 2; |
| 156 | |
| 157 | if (Ty->isHalfTy()) { |
| 158 | // Use the wider type and less iterations. |
| 159 | ComputeTy = B.getFloatTy(); |
| 160 | MaxIter = 1; |
| 161 | } |
| 162 | |
| 163 | unsigned Precision = |
| 164 | llvm::APFloat::semanticsPrecision(Ty->getFltSemantics()); |
| 165 | return FRemExpander{B, Ty, Precision / MaxIter, ComputeTy}; |
| 166 | } |
| 167 | |
| 168 | /// Build the FRem expansion for the numerator \p X and the |
| 169 | /// denumerator \p Y. The type of X and Y must match \p FremTy. The |
| 170 | /// code will be generated at the insertion point of \p B and the |
| 171 | /// insertion point will be reset at exit. |
| 172 | Value *buildFRem(Value *X, Value *Y, std::optional<SimplifyQuery> &SQ) const; |
| 173 | |
| 174 | /// Build an approximate FRem expansion for the numerator \p X and |
| 175 | /// the denumerator \p Y at the insertion point of builder \p B. |
| 176 | /// The type of X and Y must match \p FremTy. |
| 177 | Value *buildApproxFRem(Value *X, Value *Y) const; |
| 178 | |
| 179 | private: |
| 180 | FRemExpander(IRBuilder<> &B, Type *FremTy, unsigned Bits, Type *ComputeFpTy) |
| 181 | : B(B), FremTy(FremTy), ComputeFpTy(ComputeFpTy), ExTy(B.getInt32Ty()), |
| 182 | Bits(ConstantInt::get(Ty: ExTy, V: Bits)), One(ConstantInt::get(Ty: ExTy, V: 1)) {}; |
| 183 | |
| 184 | Value *createRcp(Value *V, const Twine &Name) const { |
| 185 | // Leave it to later optimizations to turn this into an rcp |
| 186 | // instruction if available. |
| 187 | return B.CreateFDiv(L: ConstantFP::get(Ty: ComputeFpTy, V: 1.0), R: V, Name); |
| 188 | } |
| 189 | |
| 190 | // Helper function to build the UPDATE_AX code which is common to the |
| 191 | // loop body and the "final iteration". |
| 192 | Value *buildUpdateAx(Value *Ax, Value *Ay, Value *Ayinv) const { |
| 193 | // Build: |
| 194 | // float q = rint(ax * ayinv); |
| 195 | // ax = fma(-q, ay, ax); |
| 196 | // int clt = ax < 0.0f; |
| 197 | // float axp = ax + ay; |
| 198 | // ax = clt ? axp : ax; |
| 199 | Value *Q = B.CreateUnaryIntrinsic(ID: Intrinsic::rint, V: B.CreateFMul(L: Ax, R: Ayinv), |
| 200 | FMFSource: {}, Name: "q" ); |
| 201 | Value *AxUpdate = B.CreateFMA(Factor1: B.CreateFNeg(V: Q), Factor2: Ay, Summand: Ax, FMFSource: {}, Name: "ax" ); |
| 202 | Value *Clt = B.CreateFCmp(P: CmpInst::FCMP_OLT, LHS: AxUpdate, |
| 203 | RHS: ConstantFP::getZero(Ty: ComputeFpTy), Name: "clt" ); |
| 204 | Value *Axp = B.CreateFAdd(L: AxUpdate, R: Ay, Name: "axp" ); |
| 205 | return B.CreateSelect(C: Clt, True: Axp, False: AxUpdate, Name: "ax" ); |
| 206 | } |
| 207 | |
| 208 | /// Build code to extract the exponent and mantissa of \p Src. |
| 209 | /// Return the exponent minus one for use as a loop bound and |
| 210 | /// the mantissa taken to the given \p NewExp power. |
| 211 | std::pair<Value *, Value *> buildExpAndPower(Value *Src, Value *NewExp, |
| 212 | const Twine &ExName, |
| 213 | const Twine &PowName) const { |
| 214 | // Build: |
| 215 | // ExName = frexp_exp(Src) - 1; |
| 216 | // PowName = fldexp(frexp_mant(ExName), NewExp); |
| 217 | Type *Ty = Src->getType(); |
| 218 | Type *ExTy = B.getInt32Ty(); |
| 219 | Value *Frexp = B.CreateIntrinsic(ID: Intrinsic::frexp, Types: {Ty, ExTy}, Args: Src); |
| 220 | Value *Mant = B.CreateExtractValue(Agg: Frexp, Idxs: {0}); |
| 221 | Value *Exp = B.CreateExtractValue(Agg: Frexp, Idxs: {1}); |
| 222 | |
| 223 | Exp = B.CreateSub(LHS: Exp, RHS: One, Name: ExName); |
| 224 | Value *Pow = B.CreateLdexp(Src: Mant, Exp: NewExp, FMFSource: {}, Name: PowName); |
| 225 | |
| 226 | return {Pow, Exp}; |
| 227 | } |
| 228 | |
| 229 | /// Build the main computation of the remainder for the case in which |
| 230 | /// Ax > Ay, where Ax = |X|, Ay = |Y|, and X is the numerator and Y the |
| 231 | /// denumerator. Add the incoming edge from the computation result |
| 232 | /// to \p RetPhi. |
| 233 | void buildRemainderComputation(Value *AxInitial, Value *AyInitial, Value *X, |
| 234 | PHINode *RetPhi, FastMathFlags FMF) const { |
| 235 | IRBuilder<>::FastMathFlagGuard Guard(B); |
| 236 | B.setFastMathFlags(FMF); |
| 237 | |
| 238 | // Build: |
| 239 | // ex = frexp_exp(ax) - 1; |
| 240 | // ax = fldexp(frexp_mant(ax), bits); |
| 241 | // ey = frexp_exp(ay) - 1; |
| 242 | // ay = fledxp(frexp_mant(ay), 1); |
| 243 | auto [Ax, Ex] = buildExpAndPower(Src: AxInitial, NewExp: Bits, ExName: "ex" , PowName: "ax" ); |
| 244 | auto [Ay, Ey] = buildExpAndPower(Src: AyInitial, NewExp: One, ExName: "ey" , PowName: "ay" ); |
| 245 | |
| 246 | // Build: |
| 247 | // int nb = ex - ey; |
| 248 | // float ayinv = 1.0/ay; |
| 249 | Value *Nb = B.CreateSub(LHS: Ex, RHS: Ey, Name: "nb" ); |
| 250 | Value *Ayinv = createRcp(V: Ay, Name: "ayinv" ); |
| 251 | |
| 252 | // Build: while (nb > bits) |
| 253 | BasicBlock * = B.GetInsertBlock(); |
| 254 | Function *Fun = PreheaderBB->getParent(); |
| 255 | auto *LoopBB = BasicBlock::Create(Context&: B.getContext(), Name: "frem.loop_body" , Parent: Fun); |
| 256 | auto *ExitBB = BasicBlock::Create(Context&: B.getContext(), Name: "frem.loop_exit" , Parent: Fun); |
| 257 | |
| 258 | B.CreateCondBr(Cond: B.CreateICmp(P: CmpInst::ICMP_SGT, LHS: Nb, RHS: Bits), True: LoopBB, False: ExitBB); |
| 259 | |
| 260 | // Build loop body: |
| 261 | // UPDATE_AX |
| 262 | // ax = fldexp(ax, bits); |
| 263 | // nb -= bits; |
| 264 | // One iteration of the loop is factored out. The code shared by |
| 265 | // the loop and this "iteration" is denoted by UPDATE_AX. |
| 266 | B.SetInsertPoint(LoopBB); |
| 267 | PHINode *NbIv = B.CreatePHI(Ty: Nb->getType(), NumReservedValues: 2, Name: "nb_iv" ); |
| 268 | NbIv->addIncoming(V: Nb, BB: PreheaderBB); |
| 269 | |
| 270 | auto *AxPhi = B.CreatePHI(Ty: ComputeFpTy, NumReservedValues: 2, Name: "ax_loop_phi" ); |
| 271 | AxPhi->addIncoming(V: Ax, BB: PreheaderBB); |
| 272 | |
| 273 | Value *AxPhiUpdate = buildUpdateAx(Ax: AxPhi, Ay, Ayinv); |
| 274 | AxPhiUpdate = B.CreateLdexp(Src: AxPhiUpdate, Exp: Bits, FMFSource: {}, Name: "ax_update" ); |
| 275 | AxPhi->addIncoming(V: AxPhiUpdate, BB: LoopBB); |
| 276 | NbIv->addIncoming(V: B.CreateSub(LHS: NbIv, RHS: Bits, Name: "nb_update" ), BB: LoopBB); |
| 277 | |
| 278 | B.CreateCondBr(Cond: B.CreateICmp(P: CmpInst::ICMP_SGT, LHS: NbIv, RHS: Bits), True: LoopBB, False: ExitBB); |
| 279 | |
| 280 | // Build final iteration |
| 281 | // ax = fldexp(ax, nb - bits + 1); |
| 282 | // UPDATE_AX |
| 283 | B.SetInsertPoint(ExitBB); |
| 284 | |
| 285 | auto *AxPhiExit = B.CreatePHI(Ty: ComputeFpTy, NumReservedValues: 2, Name: "ax_exit_phi" ); |
| 286 | AxPhiExit->addIncoming(V: Ax, BB: PreheaderBB); |
| 287 | AxPhiExit->addIncoming(V: AxPhi, BB: LoopBB); |
| 288 | auto *NbExitPhi = B.CreatePHI(Ty: Nb->getType(), NumReservedValues: 2, Name: "nb_exit_phi" ); |
| 289 | NbExitPhi->addIncoming(V: NbIv, BB: LoopBB); |
| 290 | NbExitPhi->addIncoming(V: Nb, BB: PreheaderBB); |
| 291 | |
| 292 | Value *AxFinal = B.CreateLdexp( |
| 293 | Src: AxPhiExit, Exp: B.CreateAdd(LHS: B.CreateSub(LHS: NbExitPhi, RHS: Bits), RHS: One), FMFSource: {}, Name: "ax" ); |
| 294 | AxFinal = buildUpdateAx(Ax: AxFinal, Ay, Ayinv); |
| 295 | |
| 296 | // Build: |
| 297 | // ax = fldexp(ax, ey); |
| 298 | // ret = copysign(ax,x); |
| 299 | AxFinal = B.CreateLdexp(Src: AxFinal, Exp: Ey, FMFSource: {}, Name: "ax" ); |
| 300 | if (ComputeFpTy != FremTy) |
| 301 | AxFinal = B.CreateFPTrunc(V: AxFinal, DestTy: FremTy); |
| 302 | Value *Ret = B.CreateCopySign(LHS: AxFinal, RHS: X); |
| 303 | |
| 304 | RetPhi->addIncoming(V: Ret, BB: ExitBB); |
| 305 | } |
| 306 | |
| 307 | /// Build the else-branch of the conditional in the FRem |
| 308 | /// expansion, i.e. the case in wich Ax <= Ay, where Ax = |X|, Ay |
| 309 | /// = |Y|, and X is the numerator and Y the denumerator. Add the |
| 310 | /// incoming edge from the result to \p RetPhi. |
| 311 | void buildElseBranch(Value *Ax, Value *Ay, Value *X, PHINode *RetPhi) const { |
| 312 | // Build: |
| 313 | // ret = ax == ay ? copysign(0.0f, x) : x; |
| 314 | Value *ZeroWithXSign = B.CreateCopySign(LHS: ConstantFP::getZero(Ty: FremTy), RHS: X); |
| 315 | Value *Ret = B.CreateSelect(C: B.CreateFCmpOEQ(LHS: Ax, RHS: Ay), True: ZeroWithXSign, False: X); |
| 316 | |
| 317 | RetPhi->addIncoming(V: Ret, BB: B.GetInsertBlock()); |
| 318 | } |
| 319 | |
| 320 | /// Return a value that is NaN if one of the corner cases concerning |
| 321 | /// the inputs \p X and \p Y is detected, and \p Ret otherwise. |
| 322 | Value *handleInputCornerCases(Value *Ret, Value *X, Value *Y, |
| 323 | std::optional<SimplifyQuery> &SQ, |
| 324 | bool NoInfs) const { |
| 325 | // Build: |
| 326 | // ret = (y == 0.0f || isnan(y)) ? QNAN : ret; |
| 327 | // ret = isfinite(x) ? ret : QNAN; |
| 328 | Value *Nan = ConstantFP::getQNaN(Ty: FremTy); |
| 329 | Ret = B.CreateSelect(C: B.CreateFCmpUEQ(LHS: Y, RHS: ConstantFP::getZero(Ty: FremTy)), True: Nan, |
| 330 | False: Ret); |
| 331 | Value *XFinite = |
| 332 | NoInfs || (SQ && isKnownNeverInfinity(V: X, SQ: *SQ)) |
| 333 | ? B.getTrue() |
| 334 | : B.CreateFCmpULT(LHS: B.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: X), |
| 335 | RHS: ConstantFP::getInfinity(Ty: FremTy)); |
| 336 | Ret = B.CreateSelect(C: XFinite, True: Ret, False: Nan); |
| 337 | |
| 338 | return Ret; |
| 339 | } |
| 340 | }; |
| 341 | |
| 342 | Value *FRemExpander::buildApproxFRem(Value *X, Value *Y) const { |
| 343 | IRBuilder<>::FastMathFlagGuard Guard(B); |
| 344 | // Propagating the approximate functions flag to the |
| 345 | // division leads to an unacceptable drop in precision |
| 346 | // on AMDGPU. |
| 347 | // TODO Find out if any flags might be worth propagating. |
| 348 | B.clearFastMathFlags(); |
| 349 | |
| 350 | Value *Quot = B.CreateFDiv(L: X, R: Y); |
| 351 | Value *Trunc = B.CreateUnaryIntrinsic(ID: Intrinsic::trunc, V: Quot, FMFSource: {}); |
| 352 | Value *Neg = B.CreateFNeg(V: Trunc); |
| 353 | |
| 354 | return B.CreateFMA(Factor1: Neg, Factor2: Y, Summand: X); |
| 355 | } |
| 356 | |
| 357 | Value *FRemExpander::buildFRem(Value *X, Value *Y, |
| 358 | std::optional<SimplifyQuery> &SQ) const { |
| 359 | assert(X->getType() == FremTy && Y->getType() == FremTy); |
| 360 | |
| 361 | FastMathFlags FMF = B.getFastMathFlags(); |
| 362 | |
| 363 | // This function generates the following code structure: |
| 364 | // if (abs(x) > abs(y)) |
| 365 | // { ret = compute remainder } |
| 366 | // else |
| 367 | // { ret = x or 0 with sign of x } |
| 368 | // Adjust ret to NaN/inf in input |
| 369 | // return ret |
| 370 | Value *Ax = B.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: X, FMFSource: {}, Name: "ax" ); |
| 371 | Value *Ay = B.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: Y, FMFSource: {}, Name: "ay" ); |
| 372 | if (ComputeFpTy != X->getType()) { |
| 373 | Ax = B.CreateFPExt(V: Ax, DestTy: ComputeFpTy, Name: "ax" ); |
| 374 | Ay = B.CreateFPExt(V: Ay, DestTy: ComputeFpTy, Name: "ay" ); |
| 375 | } |
| 376 | Value *AxAyCmp = B.CreateFCmpOGT(LHS: Ax, RHS: Ay); |
| 377 | |
| 378 | PHINode *RetPhi = B.CreatePHI(Ty: FremTy, NumReservedValues: 2, Name: "ret" ); |
| 379 | Value *Ret = RetPhi; |
| 380 | |
| 381 | // We would return NaN in all corner cases handled here. |
| 382 | // Hence, if NaNs are excluded, keep the result as it is. |
| 383 | if (!FMF.noNaNs()) |
| 384 | Ret = handleInputCornerCases(Ret, X, Y, SQ, NoInfs: FMF.noInfs()); |
| 385 | |
| 386 | Function *Fun = B.GetInsertBlock()->getParent(); |
| 387 | auto *ThenBB = BasicBlock::Create(Context&: B.getContext(), Name: "frem.compute" , Parent: Fun); |
| 388 | auto *ElseBB = BasicBlock::Create(Context&: B.getContext(), Name: "frem.else" , Parent: Fun); |
| 389 | SplitBlockAndInsertIfThenElse(Cond: AxAyCmp, SplitBefore: RetPhi, ThenBlock: &ThenBB, ElseBlock: &ElseBB); |
| 390 | |
| 391 | auto SavedInsertPt = B.GetInsertPoint(); |
| 392 | |
| 393 | // Build remainder computation for "then" branch |
| 394 | // |
| 395 | // The ordered comparison ensures that ax and ay are not NaNs |
| 396 | // in the then-branch. Furthermore, y cannot be an infinity and the |
| 397 | // check at the end of the function ensures that the result will not |
| 398 | // be used if x is an infinity. |
| 399 | FastMathFlags ComputeFMF = FMF; |
| 400 | ComputeFMF.setNoInfs(); |
| 401 | ComputeFMF.setNoNaNs(); |
| 402 | |
| 403 | B.SetInsertPoint(ThenBB); |
| 404 | buildRemainderComputation(AxInitial: Ax, AyInitial: Ay, X, RetPhi, FMF); |
| 405 | B.CreateBr(Dest: RetPhi->getParent()); |
| 406 | |
| 407 | // Build "else"-branch |
| 408 | B.SetInsertPoint(ElseBB); |
| 409 | buildElseBranch(Ax, Ay, X, RetPhi); |
| 410 | B.CreateBr(Dest: RetPhi->getParent()); |
| 411 | |
| 412 | B.SetInsertPoint(SavedInsertPt); |
| 413 | |
| 414 | return Ret; |
| 415 | } |
| 416 | } // namespace |
| 417 | |
| 418 | static bool expandFRem(BinaryOperator &I, std::optional<SimplifyQuery> &SQ) { |
| 419 | LLVM_DEBUG(dbgs() << "Expanding instruction: " << I << '\n'); |
| 420 | |
| 421 | Type *Ty = I.getType(); |
| 422 | assert(FRemExpander::canExpandType(Ty) && |
| 423 | "Expected supported floating point type" ); |
| 424 | |
| 425 | FastMathFlags FMF = I.getFastMathFlags(); |
| 426 | // TODO Make use of those flags for optimization? |
| 427 | FMF.setAllowReciprocal(false); |
| 428 | FMF.setAllowContract(false); |
| 429 | |
| 430 | IRBuilder<> B(&I); |
| 431 | B.setFastMathFlags(FMF); |
| 432 | B.SetCurrentDebugLocation(I.getDebugLoc()); |
| 433 | |
| 434 | const FRemExpander Expander = FRemExpander::create(B, Ty); |
| 435 | Value *Ret = FMF.approxFunc() |
| 436 | ? Expander.buildApproxFRem(X: I.getOperand(i_nocapture: 0), Y: I.getOperand(i_nocapture: 1)) |
| 437 | : Expander.buildFRem(X: I.getOperand(i_nocapture: 0), Y: I.getOperand(i_nocapture: 1), SQ); |
| 438 | |
| 439 | I.replaceAllUsesWith(V: Ret); |
| 440 | Ret->takeName(V: &I); |
| 441 | I.eraseFromParent(); |
| 442 | |
| 443 | return true; |
| 444 | } |
| 445 | // clang-format off: preserve formatting of the following example |
| 446 | |
| 447 | /// Generate code to convert a fp number to integer, replacing FPToS(U)I with |
| 448 | /// the generated code. This currently generates code similarly to compiler-rt's |
| 449 | /// implementations. |
| 450 | /// |
| 451 | /// An example IR generated from compiler-rt/fixsfdi.c looks like below: |
| 452 | /// define dso_local i64 @foo(float noundef %a) local_unnamed_addr #0 { |
| 453 | /// entry: |
| 454 | /// %0 = bitcast float %a to i32 |
| 455 | /// %conv.i = zext i32 %0 to i64 |
| 456 | /// %tobool.not = icmp sgt i32 %0, -1 |
| 457 | /// %conv = select i1 %tobool.not, i64 1, i64 -1 |
| 458 | /// %and = lshr i64 %conv.i, 23 |
| 459 | /// %shr = and i64 %and, 255 |
| 460 | /// %and2 = and i64 %conv.i, 8388607 |
| 461 | /// %or = or i64 %and2, 8388608 |
| 462 | /// %cmp = icmp ult i64 %shr, 127 |
| 463 | /// br i1 %cmp, label %cleanup, label %if.end |
| 464 | /// |
| 465 | /// if.end: ; preds = %entry |
| 466 | /// %sub = add nuw nsw i64 %shr, 4294967169 |
| 467 | /// %conv5 = and i64 %sub, 4294967232 |
| 468 | /// %cmp6.not = icmp eq i64 %conv5, 0 |
| 469 | /// br i1 %cmp6.not, label %if.end12, label %if.then8 |
| 470 | /// |
| 471 | /// if.then8: ; preds = %if.end |
| 472 | /// %cond11 = select i1 %tobool.not, i64 9223372036854775807, i64 |
| 473 | /// -9223372036854775808 br label %cleanup |
| 474 | /// |
| 475 | /// if.end12: ; preds = %if.end |
| 476 | /// %cmp13 = icmp ult i64 %shr, 150 |
| 477 | /// br i1 %cmp13, label %if.then15, label %if.else |
| 478 | /// |
| 479 | /// if.then15: ; preds = %if.end12 |
| 480 | /// %sub16 = sub nuw nsw i64 150, %shr |
| 481 | /// %shr17 = lshr i64 %or, %sub16 |
| 482 | /// %mul = mul nsw i64 %shr17, %conv |
| 483 | /// br label %cleanup |
| 484 | /// |
| 485 | /// if.else: ; preds = %if.end12 |
| 486 | /// %sub18 = add nsw i64 %shr, -150 |
| 487 | /// %shl = shl i64 %or, %sub18 |
| 488 | /// %mul19 = mul nsw i64 %shl, %conv |
| 489 | /// br label %cleanup |
| 490 | /// |
| 491 | /// cleanup: ; preds = %entry, |
| 492 | /// %if.else, %if.then15, %if.then8 |
| 493 | /// %retval.0 = phi i64 [ %cond11, %if.then8 ], [ %mul, %if.then15 ], [ |
| 494 | /// %mul19, %if.else ], [ 0, %entry ] ret i64 %retval.0 |
| 495 | /// } |
| 496 | /// |
| 497 | /// Replace fp to integer with generated code. |
| 498 | static void expandFPToI(Instruction *FPToI) { |
| 499 | // clang-format on |
| 500 | IRBuilder<> Builder(FPToI); |
| 501 | auto *FloatVal = FPToI->getOperand(i: 0); |
| 502 | IntegerType *IntTy = cast<IntegerType>(Val: FPToI->getType()); |
| 503 | |
| 504 | unsigned BitWidth = FPToI->getType()->getIntegerBitWidth(); |
| 505 | unsigned FPMantissaWidth = FloatVal->getType()->getFPMantissaWidth() - 1; |
| 506 | |
| 507 | // FIXME: fp16's range is covered by i32. So `fptoi half` can convert |
| 508 | // to i32 first following a sext/zext to target integer type. |
| 509 | Value *A1 = nullptr; |
| 510 | if (FloatVal->getType()->isHalfTy() && BitWidth >= 32) { |
| 511 | if (FPToI->getOpcode() == Instruction::FPToUI) { |
| 512 | Value *A0 = Builder.CreateFPToUI(V: FloatVal, DestTy: Builder.getInt32Ty()); |
| 513 | A1 = Builder.CreateZExt(V: A0, DestTy: IntTy); |
| 514 | } else { // FPToSI |
| 515 | Value *A0 = Builder.CreateFPToSI(V: FloatVal, DestTy: Builder.getInt32Ty()); |
| 516 | A1 = Builder.CreateSExt(V: A0, DestTy: IntTy); |
| 517 | } |
| 518 | FPToI->replaceAllUsesWith(V: A1); |
| 519 | FPToI->dropAllReferences(); |
| 520 | FPToI->eraseFromParent(); |
| 521 | return; |
| 522 | } |
| 523 | |
| 524 | // fp80 conversion is implemented by fpext to fp128 first then do the |
| 525 | // conversion. |
| 526 | FPMantissaWidth = FPMantissaWidth == 63 ? 112 : FPMantissaWidth; |
| 527 | unsigned FloatWidth = |
| 528 | PowerOf2Ceil(A: FloatVal->getType()->getScalarSizeInBits()); |
| 529 | unsigned ExponentWidth = FloatWidth - FPMantissaWidth - 1; |
| 530 | unsigned ExponentBias = (1 << (ExponentWidth - 1)) - 1; |
| 531 | IntegerType *FloatIntTy = Builder.getIntNTy(N: FloatWidth); |
| 532 | Value *ImplicitBit = ConstantInt::get( |
| 533 | Ty: FloatIntTy, V: APInt::getOneBitSet(numBits: FloatWidth, BitNo: FPMantissaWidth)); |
| 534 | Value *SignificandMask = ConstantInt::get( |
| 535 | Ty: FloatIntTy, V: APInt::getLowBitsSet(numBits: FloatWidth, loBitsSet: FPMantissaWidth)); |
| 536 | |
| 537 | BasicBlock *Entry = Builder.GetInsertBlock(); |
| 538 | Function *F = Entry->getParent(); |
| 539 | Entry->setName(Twine(Entry->getName(), "fp-to-i-entry" )); |
| 540 | BasicBlock *End = |
| 541 | Entry->splitBasicBlock(I: Builder.GetInsertPoint(), BBName: "fp-to-i-cleanup" ); |
| 542 | BasicBlock *CheckSaturateBB = BasicBlock::Create( |
| 543 | Context&: Builder.getContext(), Name: "fp-to-i-if-check.saturate" , Parent: F, InsertBefore: End); |
| 544 | BasicBlock *SaturateBB = |
| 545 | BasicBlock::Create(Context&: Builder.getContext(), Name: "fp-to-i-if-saturate" , Parent: F, InsertBefore: End); |
| 546 | BasicBlock *CheckExpSizeBB = BasicBlock::Create( |
| 547 | Context&: Builder.getContext(), Name: "fp-to-i-if-check.exp.size" , Parent: F, InsertBefore: End); |
| 548 | BasicBlock *ExpSmallBB = |
| 549 | BasicBlock::Create(Context&: Builder.getContext(), Name: "fp-to-i-if-exp.small" , Parent: F, InsertBefore: End); |
| 550 | BasicBlock *ExpLargeBB = |
| 551 | BasicBlock::Create(Context&: Builder.getContext(), Name: "fp-to-i-if-exp.large" , Parent: F, InsertBefore: End); |
| 552 | |
| 553 | Entry->getTerminator()->eraseFromParent(); |
| 554 | |
| 555 | // entry: |
| 556 | Builder.SetInsertPoint(Entry); |
| 557 | Value *FloatVal0 = FloatVal; |
| 558 | // fp80 conversion is implemented by fpext to fp128 first then do the |
| 559 | // conversion. |
| 560 | if (FloatVal->getType()->isX86_FP80Ty()) |
| 561 | FloatVal0 = |
| 562 | Builder.CreateFPExt(V: FloatVal, DestTy: Type::getFP128Ty(C&: Builder.getContext())); |
| 563 | Value *ARep = Builder.CreateBitCast(V: FloatVal0, DestTy: FloatIntTy); |
| 564 | Value *PosOrNeg = |
| 565 | Builder.CreateICmpSGT(LHS: ARep, RHS: ConstantInt::getSigned(Ty: FloatIntTy, V: -1)); |
| 566 | Value *Sign = Builder.CreateSelect(C: PosOrNeg, True: ConstantInt::getSigned(Ty: IntTy, V: 1), |
| 567 | False: ConstantInt::getSigned(Ty: IntTy, V: -1), Name: "sign" ); |
| 568 | Value *And = |
| 569 | Builder.CreateLShr(LHS: ARep, RHS: Builder.getIntN(N: FloatWidth, C: FPMantissaWidth)); |
| 570 | Value *BiasedExp = Builder.CreateAnd( |
| 571 | LHS: And, RHS: Builder.getIntN(N: FloatWidth, C: (1 << ExponentWidth) - 1), Name: "biased.exp" ); |
| 572 | Value *Abs = Builder.CreateAnd(LHS: ARep, RHS: SignificandMask); |
| 573 | Value *Significand = Builder.CreateOr(LHS: Abs, RHS: ImplicitBit, Name: "significand" ); |
| 574 | Value *ExpIsNegative = Builder.CreateICmpULT( |
| 575 | LHS: BiasedExp, RHS: Builder.getIntN(N: FloatWidth, C: ExponentBias), Name: "exp.is.negative" ); |
| 576 | Builder.CreateCondBr(Cond: ExpIsNegative, True: End, False: CheckSaturateBB); |
| 577 | |
| 578 | // check.saturate: |
| 579 | Builder.SetInsertPoint(CheckSaturateBB); |
| 580 | Value *Add1 = Builder.CreateAdd( |
| 581 | LHS: BiasedExp, |
| 582 | RHS: ConstantInt::getSigned(Ty: FloatIntTy, |
| 583 | V: -static_cast<int64_t>(ExponentBias + BitWidth))); |
| 584 | Value *Cmp3 = Builder.CreateICmpULT( |
| 585 | LHS: Add1, |
| 586 | RHS: ConstantInt::getSigned(Ty: FloatIntTy, V: -static_cast<int64_t>(BitWidth))); |
| 587 | Builder.CreateCondBr(Cond: Cmp3, True: SaturateBB, False: CheckExpSizeBB); |
| 588 | |
| 589 | // saturate: |
| 590 | Builder.SetInsertPoint(SaturateBB); |
| 591 | Value *SignedMax = |
| 592 | ConstantInt::get(Ty: IntTy, V: APInt::getSignedMaxValue(numBits: BitWidth)); |
| 593 | Value *SignedMin = |
| 594 | ConstantInt::get(Ty: IntTy, V: APInt::getSignedMinValue(numBits: BitWidth)); |
| 595 | Value *Saturated = |
| 596 | Builder.CreateSelect(C: PosOrNeg, True: SignedMax, False: SignedMin, Name: "saturated" ); |
| 597 | Builder.CreateBr(Dest: End); |
| 598 | |
| 599 | // if.end9: |
| 600 | Builder.SetInsertPoint(CheckExpSizeBB); |
| 601 | Value *ExpSmallerMantissaWidth = Builder.CreateICmpULT( |
| 602 | LHS: BiasedExp, RHS: Builder.getIntN(N: FloatWidth, C: ExponentBias + FPMantissaWidth), |
| 603 | Name: "exp.smaller.mantissa.width" ); |
| 604 | Builder.CreateCondBr(Cond: ExpSmallerMantissaWidth, True: ExpSmallBB, False: ExpLargeBB); |
| 605 | |
| 606 | // exp.small: |
| 607 | Builder.SetInsertPoint(ExpSmallBB); |
| 608 | Value *Sub13 = Builder.CreateSub( |
| 609 | LHS: Builder.getIntN(N: FloatWidth, C: ExponentBias + FPMantissaWidth), RHS: BiasedExp); |
| 610 | Value *Shr14 = |
| 611 | Builder.CreateZExtOrTrunc(V: Builder.CreateLShr(LHS: Significand, RHS: Sub13), DestTy: IntTy); |
| 612 | Value *Mul = Builder.CreateMul(LHS: Shr14, RHS: Sign); |
| 613 | Builder.CreateBr(Dest: End); |
| 614 | |
| 615 | // exp.large: |
| 616 | Builder.SetInsertPoint(ExpLargeBB); |
| 617 | Value *Sub15 = Builder.CreateAdd( |
| 618 | LHS: BiasedExp, |
| 619 | RHS: ConstantInt::getSigned( |
| 620 | Ty: FloatIntTy, V: -static_cast<int64_t>(ExponentBias + FPMantissaWidth))); |
| 621 | Value *SignificandCast = Builder.CreateZExtOrTrunc(V: Significand, DestTy: IntTy); |
| 622 | Value *Shl = Builder.CreateShl(LHS: SignificandCast, |
| 623 | RHS: Builder.CreateZExtOrTrunc(V: Sub15, DestTy: IntTy)); |
| 624 | Value *Mul16 = Builder.CreateMul(LHS: Shl, RHS: Sign); |
| 625 | Builder.CreateBr(Dest: End); |
| 626 | |
| 627 | // cleanup: |
| 628 | Builder.SetInsertPoint(TheBB: End, IP: End->begin()); |
| 629 | PHINode *Retval0 = Builder.CreatePHI(Ty: FPToI->getType(), NumReservedValues: 4); |
| 630 | |
| 631 | Retval0->addIncoming(V: Saturated, BB: SaturateBB); |
| 632 | Retval0->addIncoming(V: Mul, BB: ExpSmallBB); |
| 633 | Retval0->addIncoming(V: Mul16, BB: ExpLargeBB); |
| 634 | Retval0->addIncoming(V: Builder.getIntN(N: BitWidth, C: 0), BB: Entry); |
| 635 | |
| 636 | FPToI->replaceAllUsesWith(V: Retval0); |
| 637 | FPToI->dropAllReferences(); |
| 638 | FPToI->eraseFromParent(); |
| 639 | } |
| 640 | |
| 641 | // clang-format off: preserve formatting of the following example |
| 642 | |
| 643 | /// Generate code to convert a fp number to integer, replacing S(U)IToFP with |
| 644 | /// the generated code. This currently generates code similarly to compiler-rt's |
| 645 | /// implementations. This implementation has an implicit assumption that integer |
| 646 | /// width is larger than fp. |
| 647 | /// |
| 648 | /// An example IR generated from compiler-rt/floatdisf.c looks like below: |
| 649 | /// define dso_local float @__floatdisf(i64 noundef %a) local_unnamed_addr #0 { |
| 650 | /// entry: |
| 651 | /// %cmp = icmp eq i64 %a, 0 |
| 652 | /// br i1 %cmp, label %return, label %if.end |
| 653 | /// |
| 654 | /// if.end: ; preds = %entry |
| 655 | /// %shr = ashr i64 %a, 63 |
| 656 | /// %xor = xor i64 %shr, %a |
| 657 | /// %sub = sub nsw i64 %xor, %shr |
| 658 | /// %0 = tail call i64 @llvm.ctlz.i64(i64 %sub, i1 true), !range !5 |
| 659 | /// %cast = trunc i64 %0 to i32 |
| 660 | /// %sub1 = sub nuw nsw i32 64, %cast |
| 661 | /// %sub2 = xor i32 %cast, 63 |
| 662 | /// %cmp3 = icmp ult i32 %cast, 40 |
| 663 | /// br i1 %cmp3, label %if.then4, label %if.else |
| 664 | /// |
| 665 | /// if.then4: ; preds = %if.end |
| 666 | /// switch i32 %sub1, label %sw.default [ |
| 667 | /// i32 25, label %sw.bb |
| 668 | /// i32 26, label %sw.epilog |
| 669 | /// ] |
| 670 | /// |
| 671 | /// sw.bb: ; preds = %if.then4 |
| 672 | /// %shl = shl i64 %sub, 1 |
| 673 | /// br label %sw.epilog |
| 674 | /// |
| 675 | /// sw.default: ; preds = %if.then4 |
| 676 | /// %sub5 = sub nsw i64 38, %0 |
| 677 | /// %sh_prom = and i64 %sub5, 4294967295 |
| 678 | /// %shr6 = lshr i64 %sub, %sh_prom |
| 679 | /// %shr9 = lshr i64 274877906943, %0 |
| 680 | /// %and = and i64 %shr9, %sub |
| 681 | /// %cmp10 = icmp ne i64 %and, 0 |
| 682 | /// %conv11 = zext i1 %cmp10 to i64 |
| 683 | /// %or = or i64 %shr6, %conv11 |
| 684 | /// br label %sw.epilog |
| 685 | /// |
| 686 | /// sw.epilog: ; preds = %sw.default, |
| 687 | /// %if.then4, %sw.bb |
| 688 | /// %a.addr.0 = phi i64 [ %or, %sw.default ], [ %sub, %if.then4 ], [ %shl, |
| 689 | /// %sw.bb ] %1 = lshr i64 %a.addr.0, 2 %2 = and i64 %1, 1 %or16 = or i64 %2, |
| 690 | /// %a.addr.0 %inc = add nsw i64 %or16, 1 %3 = and i64 %inc, 67108864 |
| 691 | /// %tobool.not = icmp eq i64 %3, 0 |
| 692 | /// %spec.select.v = select i1 %tobool.not, i64 2, i64 3 |
| 693 | /// %spec.select = ashr i64 %inc, %spec.select.v |
| 694 | /// %spec.select56 = select i1 %tobool.not, i32 %sub2, i32 %sub1 |
| 695 | /// br label %if.end26 |
| 696 | /// |
| 697 | /// if.else: ; preds = %if.end |
| 698 | /// %sub23 = add nuw nsw i64 %0, 4294967256 |
| 699 | /// %sh_prom24 = and i64 %sub23, 4294967295 |
| 700 | /// %shl25 = shl i64 %sub, %sh_prom24 |
| 701 | /// br label %if.end26 |
| 702 | /// |
| 703 | /// if.end26: ; preds = %sw.epilog, |
| 704 | /// %if.else |
| 705 | /// %a.addr.1 = phi i64 [ %shl25, %if.else ], [ %spec.select, %sw.epilog ] |
| 706 | /// %e.0 = phi i32 [ %sub2, %if.else ], [ %spec.select56, %sw.epilog ] |
| 707 | /// %conv27 = trunc i64 %shr to i32 |
| 708 | /// %and28 = and i32 %conv27, -2147483648 |
| 709 | /// %add = shl nuw nsw i32 %e.0, 23 |
| 710 | /// %shl29 = add nuw nsw i32 %add, 1065353216 |
| 711 | /// %conv31 = trunc i64 %a.addr.1 to i32 |
| 712 | /// %and32 = and i32 %conv31, 8388607 |
| 713 | /// %or30 = or i32 %and32, %and28 |
| 714 | /// %or33 = or i32 %or30, %shl29 |
| 715 | /// %4 = bitcast i32 %or33 to float |
| 716 | /// br label %return |
| 717 | /// |
| 718 | /// return: ; preds = %entry, |
| 719 | /// %if.end26 |
| 720 | /// %retval.0 = phi float [ %4, %if.end26 ], [ 0.000000e+00, %entry ] |
| 721 | /// ret float %retval.0 |
| 722 | /// } |
| 723 | /// |
| 724 | /// Replace integer to fp with generated code. |
| 725 | static void expandIToFP(Instruction *IToFP) { |
| 726 | // clang-format on |
| 727 | IRBuilder<> Builder(IToFP); |
| 728 | auto *IntVal = IToFP->getOperand(i: 0); |
| 729 | IntegerType *IntTy = cast<IntegerType>(Val: IntVal->getType()); |
| 730 | |
| 731 | unsigned BitWidth = IntVal->getType()->getIntegerBitWidth(); |
| 732 | unsigned FPMantissaWidth = IToFP->getType()->getFPMantissaWidth() - 1; |
| 733 | // fp80 conversion is implemented by conversion tp fp128 first following |
| 734 | // a fptrunc to fp80. |
| 735 | FPMantissaWidth = FPMantissaWidth == 63 ? 112 : FPMantissaWidth; |
| 736 | // FIXME: As there is no related builtins added in compliler-rt, |
| 737 | // here currently utilized the fp32 <-> fp16 lib calls to implement. |
| 738 | FPMantissaWidth = FPMantissaWidth == 10 ? 23 : FPMantissaWidth; |
| 739 | FPMantissaWidth = FPMantissaWidth == 7 ? 23 : FPMantissaWidth; |
| 740 | unsigned FloatWidth = PowerOf2Ceil(A: FPMantissaWidth); |
| 741 | bool IsSigned = IToFP->getOpcode() == Instruction::SIToFP; |
| 742 | |
| 743 | assert(BitWidth > FloatWidth && "Unexpected conversion. expandIToFP() " |
| 744 | "assumes integer width is larger than fp." ); |
| 745 | |
| 746 | Value *Temp1 = |
| 747 | Builder.CreateShl(LHS: Builder.getIntN(N: BitWidth, C: 1), |
| 748 | RHS: Builder.getIntN(N: BitWidth, C: FPMantissaWidth + 3)); |
| 749 | |
| 750 | BasicBlock *Entry = Builder.GetInsertBlock(); |
| 751 | Function *F = Entry->getParent(); |
| 752 | Entry->setName(Twine(Entry->getName(), "itofp-entry" )); |
| 753 | BasicBlock *End = |
| 754 | Entry->splitBasicBlock(I: Builder.GetInsertPoint(), BBName: "itofp-return" ); |
| 755 | BasicBlock *IfEnd = |
| 756 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-if-end" , Parent: F, InsertBefore: End); |
| 757 | BasicBlock *IfThen4 = |
| 758 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-if-then4" , Parent: F, InsertBefore: End); |
| 759 | BasicBlock *SwBB = |
| 760 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-sw-bb" , Parent: F, InsertBefore: End); |
| 761 | BasicBlock *SwDefault = |
| 762 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-sw-default" , Parent: F, InsertBefore: End); |
| 763 | BasicBlock *SwEpilog = |
| 764 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-sw-epilog" , Parent: F, InsertBefore: End); |
| 765 | BasicBlock *IfThen20 = |
| 766 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-if-then20" , Parent: F, InsertBefore: End); |
| 767 | BasicBlock *IfElse = |
| 768 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-if-else" , Parent: F, InsertBefore: End); |
| 769 | BasicBlock *IfEnd26 = |
| 770 | BasicBlock::Create(Context&: Builder.getContext(), Name: "itofp-if-end26" , Parent: F, InsertBefore: End); |
| 771 | |
| 772 | Entry->getTerminator()->eraseFromParent(); |
| 773 | |
| 774 | Function *CTLZ = |
| 775 | Intrinsic::getOrInsertDeclaration(M: F->getParent(), id: Intrinsic::ctlz, Tys: IntTy); |
| 776 | ConstantInt *True = Builder.getTrue(); |
| 777 | |
| 778 | // entry: |
| 779 | Builder.SetInsertPoint(Entry); |
| 780 | Value *Cmp = Builder.CreateICmpEQ(LHS: IntVal, RHS: ConstantInt::getSigned(Ty: IntTy, V: 0)); |
| 781 | Builder.CreateCondBr(Cond: Cmp, True: End, False: IfEnd); |
| 782 | |
| 783 | // if.end: |
| 784 | Builder.SetInsertPoint(IfEnd); |
| 785 | Value *Shr = |
| 786 | Builder.CreateAShr(LHS: IntVal, RHS: Builder.getIntN(N: BitWidth, C: BitWidth - 1)); |
| 787 | Value *Xor = Builder.CreateXor(LHS: Shr, RHS: IntVal); |
| 788 | Value *Sub = Builder.CreateSub(LHS: Xor, RHS: Shr); |
| 789 | Value *Call = Builder.CreateCall(Callee: CTLZ, Args: {IsSigned ? Sub : IntVal, True}); |
| 790 | Value *Cast = Builder.CreateTrunc(V: Call, DestTy: Builder.getInt32Ty()); |
| 791 | int BitWidthNew = FloatWidth == 128 ? BitWidth : 32; |
| 792 | Value *Sub1 = Builder.CreateSub(LHS: Builder.getIntN(N: BitWidthNew, C: BitWidth), |
| 793 | RHS: FloatWidth == 128 ? Call : Cast); |
| 794 | Value *Sub2 = Builder.CreateSub(LHS: Builder.getIntN(N: BitWidthNew, C: BitWidth - 1), |
| 795 | RHS: FloatWidth == 128 ? Call : Cast); |
| 796 | Value *Cmp3 = Builder.CreateICmpSGT( |
| 797 | LHS: Sub1, RHS: Builder.getIntN(N: BitWidthNew, C: FPMantissaWidth + 1)); |
| 798 | Builder.CreateCondBr(Cond: Cmp3, True: IfThen4, False: IfElse); |
| 799 | |
| 800 | // if.then4: |
| 801 | Builder.SetInsertPoint(IfThen4); |
| 802 | llvm::SwitchInst *SI = Builder.CreateSwitch(V: Sub1, Dest: SwDefault); |
| 803 | SI->addCase(OnVal: Builder.getIntN(N: BitWidthNew, C: FPMantissaWidth + 2), Dest: SwBB); |
| 804 | SI->addCase(OnVal: Builder.getIntN(N: BitWidthNew, C: FPMantissaWidth + 3), Dest: SwEpilog); |
| 805 | |
| 806 | // sw.bb: |
| 807 | Builder.SetInsertPoint(SwBB); |
| 808 | Value *Shl = |
| 809 | Builder.CreateShl(LHS: IsSigned ? Sub : IntVal, RHS: Builder.getIntN(N: BitWidth, C: 1)); |
| 810 | Builder.CreateBr(Dest: SwEpilog); |
| 811 | |
| 812 | // sw.default: |
| 813 | Builder.SetInsertPoint(SwDefault); |
| 814 | Value *Sub5 = Builder.CreateSub( |
| 815 | LHS: Builder.getIntN(N: BitWidthNew, C: BitWidth - FPMantissaWidth - 3), |
| 816 | RHS: FloatWidth == 128 ? Call : Cast); |
| 817 | Value *ShProm = Builder.CreateZExt(V: Sub5, DestTy: IntTy); |
| 818 | Value *Shr6 = Builder.CreateLShr(LHS: IsSigned ? Sub : IntVal, |
| 819 | RHS: FloatWidth == 128 ? Sub5 : ShProm); |
| 820 | Value *Sub8 = |
| 821 | Builder.CreateAdd(LHS: FloatWidth == 128 ? Call : Cast, |
| 822 | RHS: Builder.getIntN(N: BitWidthNew, C: FPMantissaWidth + 3)); |
| 823 | Value *ShProm9 = Builder.CreateZExt(V: Sub8, DestTy: IntTy); |
| 824 | Value *Shr9 = Builder.CreateLShr(LHS: ConstantInt::getSigned(Ty: IntTy, V: -1), |
| 825 | RHS: FloatWidth == 128 ? Sub8 : ShProm9); |
| 826 | Value *And = Builder.CreateAnd(LHS: Shr9, RHS: IsSigned ? Sub : IntVal); |
| 827 | Value *Cmp10 = Builder.CreateICmpNE(LHS: And, RHS: Builder.getIntN(N: BitWidth, C: 0)); |
| 828 | Value *Conv11 = Builder.CreateZExt(V: Cmp10, DestTy: IntTy); |
| 829 | Value *Or = Builder.CreateOr(LHS: Shr6, RHS: Conv11); |
| 830 | Builder.CreateBr(Dest: SwEpilog); |
| 831 | |
| 832 | // sw.epilog: |
| 833 | Builder.SetInsertPoint(SwEpilog); |
| 834 | PHINode *AAddr0 = Builder.CreatePHI(Ty: IntTy, NumReservedValues: 3); |
| 835 | AAddr0->addIncoming(V: Or, BB: SwDefault); |
| 836 | AAddr0->addIncoming(V: IsSigned ? Sub : IntVal, BB: IfThen4); |
| 837 | AAddr0->addIncoming(V: Shl, BB: SwBB); |
| 838 | Value *A0 = Builder.CreateTrunc(V: AAddr0, DestTy: Builder.getInt32Ty()); |
| 839 | Value *A1 = Builder.CreateLShr(LHS: A0, RHS: Builder.getInt32(C: 2)); |
| 840 | Value *A2 = Builder.CreateAnd(LHS: A1, RHS: Builder.getInt32(C: 1)); |
| 841 | Value *Conv16 = Builder.CreateZExt(V: A2, DestTy: IntTy); |
| 842 | Value *Or17 = Builder.CreateOr(LHS: AAddr0, RHS: Conv16); |
| 843 | Value *Inc = Builder.CreateAdd(LHS: Or17, RHS: Builder.getIntN(N: BitWidth, C: 1)); |
| 844 | Value *Shr18 = nullptr; |
| 845 | if (IsSigned) |
| 846 | Shr18 = Builder.CreateAShr(LHS: Inc, RHS: Builder.getIntN(N: BitWidth, C: 2)); |
| 847 | else |
| 848 | Shr18 = Builder.CreateLShr(LHS: Inc, RHS: Builder.getIntN(N: BitWidth, C: 2)); |
| 849 | Value *A3 = Builder.CreateAnd(LHS: Inc, RHS: Temp1, Name: "a3" ); |
| 850 | Value *PosOrNeg = Builder.CreateICmpEQ(LHS: A3, RHS: Builder.getIntN(N: BitWidth, C: 0)); |
| 851 | Value * = Builder.CreateTrunc(V: Shr18, DestTy: Builder.getIntNTy(N: FloatWidth)); |
| 852 | Value * = Builder.CreateLShr(LHS: Shr18, RHS: Builder.getIntN(N: BitWidth, C: 32)); |
| 853 | Value * = nullptr; |
| 854 | if (FloatWidth > 80) |
| 855 | ExtractT64 = Builder.CreateTrunc(V: Sub2, DestTy: Builder.getInt64Ty()); |
| 856 | else |
| 857 | ExtractT64 = Builder.CreateTrunc(V: Extract63, DestTy: Builder.getInt32Ty()); |
| 858 | Builder.CreateCondBr(Cond: PosOrNeg, True: IfEnd26, False: IfThen20); |
| 859 | |
| 860 | // if.then20 |
| 861 | Builder.SetInsertPoint(IfThen20); |
| 862 | Value *Shr21 = nullptr; |
| 863 | if (IsSigned) |
| 864 | Shr21 = Builder.CreateAShr(LHS: Inc, RHS: Builder.getIntN(N: BitWidth, C: 3)); |
| 865 | else |
| 866 | Shr21 = Builder.CreateLShr(LHS: Inc, RHS: Builder.getIntN(N: BitWidth, C: 3)); |
| 867 | Value * = Builder.CreateTrunc(V: Shr21, DestTy: Builder.getIntNTy(N: FloatWidth)); |
| 868 | Value * = Builder.CreateLShr(LHS: Shr21, RHS: Builder.getIntN(N: BitWidth, C: 32)); |
| 869 | Value * = nullptr; |
| 870 | if (FloatWidth > 80) |
| 871 | ExtractT62 = Builder.CreateTrunc(V: Sub1, DestTy: Builder.getInt64Ty()); |
| 872 | else |
| 873 | ExtractT62 = Builder.CreateTrunc(V: Extract, DestTy: Builder.getInt32Ty()); |
| 874 | Builder.CreateBr(Dest: IfEnd26); |
| 875 | |
| 876 | // if.else: |
| 877 | Builder.SetInsertPoint(IfElse); |
| 878 | Value *Sub24 = Builder.CreateAdd( |
| 879 | LHS: FloatWidth == 128 ? Call : Cast, |
| 880 | RHS: ConstantInt::getSigned(Ty: Builder.getIntNTy(N: BitWidthNew), |
| 881 | V: -(int)(BitWidth - FPMantissaWidth - 1))); |
| 882 | Value *ShProm25 = Builder.CreateZExt(V: Sub24, DestTy: IntTy); |
| 883 | Value *Shl26 = Builder.CreateShl(LHS: IsSigned ? Sub : IntVal, |
| 884 | RHS: FloatWidth == 128 ? Sub24 : ShProm25); |
| 885 | Value * = Builder.CreateTrunc(V: Shl26, DestTy: Builder.getIntNTy(N: FloatWidth)); |
| 886 | Value * = Builder.CreateLShr(LHS: Shl26, RHS: Builder.getIntN(N: BitWidth, C: 32)); |
| 887 | Value * = nullptr; |
| 888 | if (FloatWidth > 80) |
| 889 | ExtractT66 = Builder.CreateTrunc(V: Sub2, DestTy: Builder.getInt64Ty()); |
| 890 | else |
| 891 | ExtractT66 = Builder.CreateTrunc(V: Extract65, DestTy: Builder.getInt32Ty()); |
| 892 | Builder.CreateBr(Dest: IfEnd26); |
| 893 | |
| 894 | // if.end26: |
| 895 | Builder.SetInsertPoint(IfEnd26); |
| 896 | PHINode *AAddr1Off0 = Builder.CreatePHI(Ty: Builder.getIntNTy(N: FloatWidth), NumReservedValues: 3); |
| 897 | AAddr1Off0->addIncoming(V: ExtractT, BB: IfThen20); |
| 898 | AAddr1Off0->addIncoming(V: ExtractT60, BB: SwEpilog); |
| 899 | AAddr1Off0->addIncoming(V: ExtractT61, BB: IfElse); |
| 900 | PHINode *AAddr1Off32 = nullptr; |
| 901 | if (FloatWidth > 32) { |
| 902 | AAddr1Off32 = |
| 903 | Builder.CreatePHI(Ty: Builder.getIntNTy(N: FloatWidth > 80 ? 64 : 32), NumReservedValues: 3); |
| 904 | AAddr1Off32->addIncoming(V: ExtractT62, BB: IfThen20); |
| 905 | AAddr1Off32->addIncoming(V: ExtractT64, BB: SwEpilog); |
| 906 | AAddr1Off32->addIncoming(V: ExtractT66, BB: IfElse); |
| 907 | } |
| 908 | PHINode *E0 = nullptr; |
| 909 | if (FloatWidth <= 80) { |
| 910 | E0 = Builder.CreatePHI(Ty: Builder.getIntNTy(N: BitWidthNew), NumReservedValues: 3); |
| 911 | E0->addIncoming(V: Sub1, BB: IfThen20); |
| 912 | E0->addIncoming(V: Sub2, BB: SwEpilog); |
| 913 | E0->addIncoming(V: Sub2, BB: IfElse); |
| 914 | } |
| 915 | Value *And29 = nullptr; |
| 916 | if (FloatWidth > 80) { |
| 917 | Value *Temp2 = Builder.CreateShl(LHS: Builder.getIntN(N: BitWidth, C: 1), |
| 918 | RHS: Builder.getIntN(N: BitWidth, C: 63)); |
| 919 | And29 = Builder.CreateAnd(LHS: Shr, RHS: Temp2, Name: "and29" ); |
| 920 | } else { |
| 921 | Value *Conv28 = Builder.CreateTrunc(V: Shr, DestTy: Builder.getInt32Ty()); |
| 922 | And29 = Builder.CreateAnd( |
| 923 | LHS: Conv28, RHS: ConstantInt::get(Context&: Builder.getContext(), V: APInt::getSignMask(BitWidth: 32))); |
| 924 | } |
| 925 | unsigned TempMod = FPMantissaWidth % 32; |
| 926 | Value *And34 = nullptr; |
| 927 | Value *Shl30 = nullptr; |
| 928 | if (FloatWidth > 80) { |
| 929 | TempMod += 32; |
| 930 | Value *Add = Builder.CreateShl(LHS: AAddr1Off32, RHS: Builder.getInt64(C: TempMod)); |
| 931 | Shl30 = Builder.CreateAdd( |
| 932 | LHS: Add, RHS: Builder.getInt64(C: ((1ull << (62ull - TempMod)) - 1ull) << TempMod)); |
| 933 | And34 = Builder.CreateZExt(V: Shl30, DestTy: Builder.getInt128Ty()); |
| 934 | } else { |
| 935 | Value *Add = Builder.CreateShl(LHS: E0, RHS: Builder.getInt32(C: TempMod)); |
| 936 | Shl30 = Builder.CreateAdd( |
| 937 | LHS: Add, RHS: Builder.getInt32(C: ((1 << (30 - TempMod)) - 1) << TempMod)); |
| 938 | And34 = Builder.CreateAnd(LHS: FloatWidth > 32 ? AAddr1Off32 : AAddr1Off0, |
| 939 | RHS: Builder.getInt32(C: (1 << TempMod) - 1)); |
| 940 | } |
| 941 | Value *Or35 = nullptr; |
| 942 | if (FloatWidth > 80) { |
| 943 | Value *And29Trunc = Builder.CreateTrunc(V: And29, DestTy: Builder.getInt128Ty()); |
| 944 | Value *Or31 = Builder.CreateOr(LHS: And29Trunc, RHS: And34); |
| 945 | Value *Or34 = Builder.CreateShl(LHS: Or31, RHS: Builder.getIntN(N: 128, C: 64)); |
| 946 | Value *Temp3 = Builder.CreateShl(LHS: Builder.getIntN(N: 128, C: 1), |
| 947 | RHS: Builder.getIntN(N: 128, C: FPMantissaWidth)); |
| 948 | Value *Temp4 = Builder.CreateSub(LHS: Temp3, RHS: Builder.getIntN(N: 128, C: 1)); |
| 949 | Value *A6 = Builder.CreateAnd(LHS: AAddr1Off0, RHS: Temp4); |
| 950 | Or35 = Builder.CreateOr(LHS: Or34, RHS: A6); |
| 951 | } else { |
| 952 | Value *Or31 = Builder.CreateOr(LHS: And34, RHS: And29); |
| 953 | Or35 = Builder.CreateOr(LHS: IsSigned ? Or31 : And34, RHS: Shl30); |
| 954 | } |
| 955 | Value *A4 = nullptr; |
| 956 | if (IToFP->getType()->isDoubleTy()) { |
| 957 | Value *ZExt1 = Builder.CreateZExt(V: Or35, DestTy: Builder.getIntNTy(N: FloatWidth)); |
| 958 | Value *Shl1 = Builder.CreateShl(LHS: ZExt1, RHS: Builder.getIntN(N: FloatWidth, C: 32)); |
| 959 | Value *And1 = |
| 960 | Builder.CreateAnd(LHS: AAddr1Off0, RHS: Builder.getIntN(N: FloatWidth, C: 0xFFFFFFFF)); |
| 961 | Value *Or1 = Builder.CreateOr(LHS: Shl1, RHS: And1); |
| 962 | A4 = Builder.CreateBitCast(V: Or1, DestTy: IToFP->getType()); |
| 963 | } else if (IToFP->getType()->isX86_FP80Ty()) { |
| 964 | Value *A40 = |
| 965 | Builder.CreateBitCast(V: Or35, DestTy: Type::getFP128Ty(C&: Builder.getContext())); |
| 966 | A4 = Builder.CreateFPTrunc(V: A40, DestTy: IToFP->getType()); |
| 967 | } else if (IToFP->getType()->isHalfTy() || IToFP->getType()->isBFloatTy()) { |
| 968 | // Deal with "half" situation. This is a workaround since we don't have |
| 969 | // floattihf.c currently as referring. |
| 970 | Value *A40 = |
| 971 | Builder.CreateBitCast(V: Or35, DestTy: Type::getFloatTy(C&: Builder.getContext())); |
| 972 | A4 = Builder.CreateFPTrunc(V: A40, DestTy: IToFP->getType()); |
| 973 | } else // float type |
| 974 | A4 = Builder.CreateBitCast(V: Or35, DestTy: IToFP->getType()); |
| 975 | Builder.CreateBr(Dest: End); |
| 976 | |
| 977 | // return: |
| 978 | Builder.SetInsertPoint(TheBB: End, IP: End->begin()); |
| 979 | PHINode *Retval0 = Builder.CreatePHI(Ty: IToFP->getType(), NumReservedValues: 2); |
| 980 | Retval0->addIncoming(V: A4, BB: IfEnd26); |
| 981 | Retval0->addIncoming(V: ConstantFP::getZero(Ty: IToFP->getType(), Negative: false), BB: Entry); |
| 982 | |
| 983 | IToFP->replaceAllUsesWith(V: Retval0); |
| 984 | IToFP->dropAllReferences(); |
| 985 | IToFP->eraseFromParent(); |
| 986 | } |
| 987 | |
| 988 | static void scalarize(Instruction *I, |
| 989 | SmallVectorImpl<Instruction *> &Worklist) { |
| 990 | VectorType *VTy = cast<FixedVectorType>(Val: I->getType()); |
| 991 | |
| 992 | IRBuilder<> Builder(I); |
| 993 | |
| 994 | unsigned NumElements = VTy->getElementCount().getFixedValue(); |
| 995 | Value *Result = PoisonValue::get(T: VTy); |
| 996 | for (unsigned Idx = 0; Idx < NumElements; ++Idx) { |
| 997 | Value *Ext = Builder.CreateExtractElement(Vec: I->getOperand(i: 0), Idx); |
| 998 | |
| 999 | Value *NewOp = nullptr; |
| 1000 | if (auto *BinOp = dyn_cast<BinaryOperator>(Val: I)) |
| 1001 | NewOp = Builder.CreateBinOp( |
| 1002 | Opc: BinOp->getOpcode(), LHS: Ext, |
| 1003 | RHS: Builder.CreateExtractElement(Vec: I->getOperand(i: 1), Idx)); |
| 1004 | else if (auto *CastI = dyn_cast<CastInst>(Val: I)) |
| 1005 | NewOp = Builder.CreateCast(Op: CastI->getOpcode(), V: Ext, |
| 1006 | DestTy: I->getType()->getScalarType()); |
| 1007 | else |
| 1008 | llvm_unreachable("Unsupported instruction type" ); |
| 1009 | |
| 1010 | Result = Builder.CreateInsertElement(Vec: Result, NewElt: NewOp, Idx); |
| 1011 | if (auto *ScalarizedI = dyn_cast<Instruction>(Val: NewOp)) { |
| 1012 | ScalarizedI->copyIRFlags(V: I, IncludeWrapFlags: true); |
| 1013 | Worklist.push_back(Elt: ScalarizedI); |
| 1014 | } |
| 1015 | } |
| 1016 | |
| 1017 | I->replaceAllUsesWith(V: Result); |
| 1018 | I->dropAllReferences(); |
| 1019 | I->eraseFromParent(); |
| 1020 | } |
| 1021 | |
| 1022 | static void addToWorklist(Instruction &I, |
| 1023 | SmallVector<Instruction *, 4> &Worklist) { |
| 1024 | if (I.getOperand(i: 0)->getType()->isVectorTy()) |
| 1025 | scalarize(I: &I, Worklist); |
| 1026 | else |
| 1027 | Worklist.push_back(Elt: &I); |
| 1028 | } |
| 1029 | |
| 1030 | static bool runImpl(Function &F, const TargetLowering &TLI, |
| 1031 | const LibcallLoweringInfo &Libcalls, AssumptionCache *AC) { |
| 1032 | SmallVector<Instruction *, 4> Worklist; |
| 1033 | |
| 1034 | unsigned MaxLegalFpConvertBitWidth = |
| 1035 | TLI.getMaxLargeFPConvertBitWidthSupported(); |
| 1036 | if (ExpandFpConvertBits != llvm::IntegerType::MAX_INT_BITS) |
| 1037 | MaxLegalFpConvertBitWidth = ExpandFpConvertBits; |
| 1038 | |
| 1039 | unsigned MaxLegalDivRemBitWidth = TLI.getMaxDivRemBitWidthSupported(); |
| 1040 | if (ExpandDivRemBits != llvm::IntegerType::MAX_INT_BITS) |
| 1041 | MaxLegalDivRemBitWidth = ExpandDivRemBits; |
| 1042 | |
| 1043 | bool DisableExpandLargeFp = |
| 1044 | MaxLegalFpConvertBitWidth >= llvm::IntegerType::MAX_INT_BITS; |
| 1045 | bool DisableExpandLargeDivRem = |
| 1046 | MaxLegalDivRemBitWidth >= llvm::IntegerType::MAX_INT_BITS; |
| 1047 | bool DisableFrem = !FRemExpander::shouldExpandAnyFremType(TLI); |
| 1048 | |
| 1049 | if (DisableExpandLargeFp && DisableFrem && DisableExpandLargeDivRem) |
| 1050 | return false; |
| 1051 | |
| 1052 | auto ShouldHandleInst = [&](Instruction &I) { |
| 1053 | Type *Ty = I.getType(); |
| 1054 | // TODO: This pass doesn't handle scalable vectors. |
| 1055 | if (Ty->isScalableTy()) |
| 1056 | return false; |
| 1057 | |
| 1058 | switch (I.getOpcode()) { |
| 1059 | case Instruction::FRem: |
| 1060 | return !DisableFrem && FRemExpander::shouldExpandFremType(TLI, Ty); |
| 1061 | case Instruction::FPToUI: |
| 1062 | case Instruction::FPToSI: |
| 1063 | return !DisableExpandLargeFp && |
| 1064 | cast<IntegerType>(Val: Ty->getScalarType())->getIntegerBitWidth() > |
| 1065 | MaxLegalFpConvertBitWidth; |
| 1066 | case Instruction::UIToFP: |
| 1067 | case Instruction::SIToFP: |
| 1068 | return !DisableExpandLargeFp && |
| 1069 | cast<IntegerType>(Val: I.getOperand(i: 0)->getType()->getScalarType()) |
| 1070 | ->getIntegerBitWidth() > MaxLegalFpConvertBitWidth; |
| 1071 | case Instruction::UDiv: |
| 1072 | case Instruction::SDiv: |
| 1073 | case Instruction::URem: |
| 1074 | case Instruction::SRem: |
| 1075 | return !DisableExpandLargeDivRem && |
| 1076 | cast<IntegerType>(Val: Ty->getScalarType())->getIntegerBitWidth() > |
| 1077 | MaxLegalDivRemBitWidth |
| 1078 | // The backend has peephole optimizations for powers of two. |
| 1079 | // TODO: We don't consider vectors here. |
| 1080 | && !isConstantPowerOfTwo(V: I.getOperand(i: 1), SignedOp: isSigned(Opcode: I.getOpcode())); |
| 1081 | } |
| 1082 | |
| 1083 | return false; |
| 1084 | }; |
| 1085 | |
| 1086 | bool Modified = false; |
| 1087 | for (auto It = inst_begin(F: &F), End = inst_end(F); It != End;) { |
| 1088 | Instruction &I = *It++; |
| 1089 | if (!ShouldHandleInst(I)) |
| 1090 | continue; |
| 1091 | |
| 1092 | addToWorklist(I, Worklist); |
| 1093 | Modified = true; |
| 1094 | } |
| 1095 | |
| 1096 | while (!Worklist.empty()) { |
| 1097 | Instruction *I = Worklist.pop_back_val(); |
| 1098 | |
| 1099 | switch (I->getOpcode()) { |
| 1100 | case Instruction::FRem: { |
| 1101 | auto SQ = [&]() -> std::optional<SimplifyQuery> { |
| 1102 | if (AC) { |
| 1103 | auto Res = std::make_optional<SimplifyQuery>( |
| 1104 | args: I->getModule()->getDataLayout(), args&: I); |
| 1105 | Res->AC = AC; |
| 1106 | return Res; |
| 1107 | } |
| 1108 | return {}; |
| 1109 | }(); |
| 1110 | |
| 1111 | expandFRem(I&: cast<BinaryOperator>(Val&: *I), SQ); |
| 1112 | break; |
| 1113 | } |
| 1114 | |
| 1115 | case Instruction::FPToUI: |
| 1116 | case Instruction::FPToSI: |
| 1117 | expandFPToI(FPToI: I); |
| 1118 | break; |
| 1119 | |
| 1120 | case Instruction::UIToFP: |
| 1121 | case Instruction::SIToFP: |
| 1122 | expandIToFP(IToFP: I); |
| 1123 | break; |
| 1124 | |
| 1125 | case Instruction::UDiv: |
| 1126 | case Instruction::SDiv: |
| 1127 | expandDivision(Div: cast<BinaryOperator>(Val: I)); |
| 1128 | break; |
| 1129 | case Instruction::URem: |
| 1130 | case Instruction::SRem: |
| 1131 | expandRemainder(Rem: cast<BinaryOperator>(Val: I)); |
| 1132 | break; |
| 1133 | } |
| 1134 | } |
| 1135 | |
| 1136 | return Modified; |
| 1137 | } |
| 1138 | |
| 1139 | namespace { |
| 1140 | class ExpandIRInstsLegacyPass : public FunctionPass { |
| 1141 | CodeGenOptLevel OptLevel; |
| 1142 | |
| 1143 | public: |
| 1144 | static char ID; |
| 1145 | |
| 1146 | ExpandIRInstsLegacyPass(CodeGenOptLevel OptLevel) |
| 1147 | : FunctionPass(ID), OptLevel(OptLevel) {} |
| 1148 | |
| 1149 | ExpandIRInstsLegacyPass() : ExpandIRInstsLegacyPass(CodeGenOptLevel::None) {}; |
| 1150 | |
| 1151 | bool runOnFunction(Function &F) override { |
| 1152 | auto *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); |
| 1153 | const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F); |
| 1154 | auto *TLI = Subtarget->getTargetLowering(); |
| 1155 | AssumptionCache *AC = nullptr; |
| 1156 | |
| 1157 | const LibcallLoweringInfo &Libcalls = |
| 1158 | getAnalysis<LibcallLoweringInfoWrapper>().getLibcallLowering( |
| 1159 | M: *F.getParent(), Subtarget: *Subtarget); |
| 1160 | |
| 1161 | if (OptLevel != CodeGenOptLevel::None && !F.hasOptNone()) |
| 1162 | AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); |
| 1163 | return runImpl(F, TLI: *TLI, Libcalls, AC); |
| 1164 | } |
| 1165 | |
| 1166 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 1167 | AU.addRequired<LibcallLoweringInfoWrapper>(); |
| 1168 | AU.addRequired<TargetPassConfig>(); |
| 1169 | if (OptLevel != CodeGenOptLevel::None) |
| 1170 | AU.addRequired<AssumptionCacheTracker>(); |
| 1171 | AU.addPreserved<AAResultsWrapperPass>(); |
| 1172 | AU.addPreserved<GlobalsAAWrapperPass>(); |
| 1173 | AU.addRequired<LibcallLoweringInfoWrapper>(); |
| 1174 | } |
| 1175 | }; |
| 1176 | } // namespace |
| 1177 | |
| 1178 | ExpandIRInstsPass::ExpandIRInstsPass(const TargetMachine &TM, |
| 1179 | CodeGenOptLevel OptLevel) |
| 1180 | : TM(&TM), OptLevel(OptLevel) {} |
| 1181 | |
| 1182 | void ExpandIRInstsPass::printPipeline( |
| 1183 | raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { |
| 1184 | static_cast<PassInfoMixin<ExpandIRInstsPass> *>(this)->printPipeline( |
| 1185 | OS, MapClassName2PassName); |
| 1186 | OS << '<'; |
| 1187 | OS << "O" << (int)OptLevel; |
| 1188 | OS << '>'; |
| 1189 | } |
| 1190 | |
| 1191 | PreservedAnalyses ExpandIRInstsPass::run(Function &F, |
| 1192 | FunctionAnalysisManager &FAM) { |
| 1193 | const TargetSubtargetInfo *STI = TM->getSubtargetImpl(F); |
| 1194 | auto &TLI = *STI->getTargetLowering(); |
| 1195 | AssumptionCache *AC = nullptr; |
| 1196 | if (OptLevel != CodeGenOptLevel::None) |
| 1197 | AC = &FAM.getResult<AssumptionAnalysis>(IR&: F); |
| 1198 | |
| 1199 | auto &MAMProxy = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(IR&: F); |
| 1200 | |
| 1201 | const LibcallLoweringModuleAnalysisResult *LibcallLowering = |
| 1202 | MAMProxy.getCachedResult<LibcallLoweringModuleAnalysis>(IR&: *F.getParent()); |
| 1203 | |
| 1204 | if (!LibcallLowering) { |
| 1205 | F.getContext().emitError(ErrorStr: "'" + LibcallLoweringModuleAnalysis::name() + |
| 1206 | "' analysis required" ); |
| 1207 | return PreservedAnalyses::all(); |
| 1208 | } |
| 1209 | |
| 1210 | const LibcallLoweringInfo &Libcalls = |
| 1211 | LibcallLowering->getLibcallLowering(Subtarget: *STI); |
| 1212 | |
| 1213 | return runImpl(F, TLI, Libcalls, AC) ? PreservedAnalyses::none() |
| 1214 | : PreservedAnalyses::all(); |
| 1215 | } |
| 1216 | |
| 1217 | char ExpandIRInstsLegacyPass::ID = 0; |
| 1218 | INITIALIZE_PASS_BEGIN(ExpandIRInstsLegacyPass, "expand-ir-insts" , |
| 1219 | "Expand certain fp instructions" , false, false) |
| 1220 | INITIALIZE_PASS_DEPENDENCY(LibcallLoweringInfoWrapper) |
| 1221 | INITIALIZE_PASS_END(ExpandIRInstsLegacyPass, "expand-ir-insts" , |
| 1222 | "Expand IR instructions" , false, false) |
| 1223 | |
| 1224 | FunctionPass *llvm::createExpandIRInstsPass(CodeGenOptLevel OptLevel) { |
| 1225 | return new ExpandIRInstsLegacyPass(OptLevel); |
| 1226 | } |
| 1227 | |