| 1 | //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// |
| 9 | /// \file |
| 10 | /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM |
| 11 | /// |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" |
| 15 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| 16 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 17 | #include "llvm/CodeGen/MachineOperand.h" |
| 18 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 19 | #include "llvm/CodeGen/TargetLowering.h" |
| 20 | #include "llvm/IR/Module.h" |
| 21 | |
| 22 | #define DEBUG_TYPE "inline-asm-lowering" |
| 23 | |
| 24 | using namespace llvm; |
| 25 | |
| 26 | void InlineAsmLowering::anchor() {} |
| 27 | |
| 28 | namespace { |
| 29 | |
| 30 | /// GISelAsmOperandInfo - This contains information for each constraint that we |
| 31 | /// are lowering. |
| 32 | class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { |
| 33 | public: |
| 34 | /// Regs - If this is a register or register class operand, this |
| 35 | /// contains the set of assigned registers corresponding to the operand. |
| 36 | SmallVector<Register, 1> Regs; |
| 37 | |
| 38 | explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info) |
| 39 | : TargetLowering::AsmOperandInfo(Info) {} |
| 40 | }; |
| 41 | |
| 42 | using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>; |
| 43 | |
| 44 | class { |
| 45 | unsigned = 0; |
| 46 | |
| 47 | public: |
| 48 | explicit (const CallBase &CB) { |
| 49 | const InlineAsm *IA = cast<InlineAsm>(Val: CB.getCalledOperand()); |
| 50 | if (IA->hasSideEffects()) |
| 51 | Flags |= InlineAsm::Extra_HasSideEffects; |
| 52 | if (IA->isAlignStack()) |
| 53 | Flags |= InlineAsm::Extra_IsAlignStack; |
| 54 | if (IA->canThrow()) |
| 55 | Flags |= InlineAsm::Extra_MayUnwind; |
| 56 | if (CB.isConvergent()) |
| 57 | Flags |= InlineAsm::Extra_IsConvergent; |
| 58 | Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; |
| 59 | } |
| 60 | |
| 61 | void update(const TargetLowering::AsmOperandInfo &OpInfo) { |
| 62 | // Ideally, we would only check against memory constraints. However, the |
| 63 | // meaning of an Other constraint can be target-specific and we can't easily |
| 64 | // reason about it. Therefore, be conservative and set MayLoad/MayStore |
| 65 | // for Other constraints as well. |
| 66 | if (OpInfo.ConstraintType == TargetLowering::C_Memory || |
| 67 | OpInfo.ConstraintType == TargetLowering::C_Other) { |
| 68 | if (OpInfo.Type == InlineAsm::isInput) |
| 69 | Flags |= InlineAsm::Extra_MayLoad; |
| 70 | else if (OpInfo.Type == InlineAsm::isOutput) |
| 71 | Flags |= InlineAsm::Extra_MayStore; |
| 72 | else if (OpInfo.Type == InlineAsm::isClobber) |
| 73 | Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | unsigned () const { return Flags; } |
| 78 | }; |
| 79 | |
| 80 | } // namespace |
| 81 | |
| 82 | /// Assign virtual/physical registers for the specified register operand. |
| 83 | static void getRegistersForValue(MachineFunction &MF, |
| 84 | MachineIRBuilder &MIRBuilder, |
| 85 | GISelAsmOperandInfo &OpInfo, |
| 86 | GISelAsmOperandInfo &RefOpInfo) { |
| 87 | |
| 88 | const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); |
| 89 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
| 90 | |
| 91 | // No work to do for memory operations. |
| 92 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) |
| 93 | return; |
| 94 | |
| 95 | // If this is a constraint for a single physreg, or a constraint for a |
| 96 | // register class, find it. |
| 97 | Register AssignedReg; |
| 98 | const TargetRegisterClass *RC; |
| 99 | std::tie(args&: AssignedReg, args&: RC) = TLI.getRegForInlineAsmConstraint( |
| 100 | TRI: &TRI, Constraint: RefOpInfo.ConstraintCode, VT: RefOpInfo.ConstraintVT); |
| 101 | // RC is unset only on failure. Return immediately. |
| 102 | if (!RC) |
| 103 | return; |
| 104 | |
| 105 | // No need to allocate a matching input constraint since the constraint it's |
| 106 | // matching to has already been allocated. |
| 107 | if (OpInfo.isMatchingInputConstraint()) |
| 108 | return; |
| 109 | |
| 110 | // Initialize NumRegs. |
| 111 | unsigned NumRegs = 1; |
| 112 | if (OpInfo.ConstraintVT != MVT::Other) |
| 113 | NumRegs = |
| 114 | TLI.getNumRegisters(Context&: MF.getFunction().getContext(), VT: OpInfo.ConstraintVT); |
| 115 | |
| 116 | // If this is a constraint for a specific physical register, but the type of |
| 117 | // the operand requires more than one register to be passed, we allocate the |
| 118 | // required amount of physical registers, starting from the selected physical |
| 119 | // register. |
| 120 | // For this, first retrieve a register iterator for the given register class |
| 121 | TargetRegisterClass::iterator I = RC->begin(); |
| 122 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
| 123 | |
| 124 | // Advance the iterator to the assigned register (if set) |
| 125 | if (AssignedReg) { |
| 126 | for (; *I != AssignedReg; ++I) |
| 127 | assert(I != RC->end() && "AssignedReg should be a member of provided RC" ); |
| 128 | } |
| 129 | |
| 130 | // Finally, assign the registers. If the AssignedReg isn't set, create virtual |
| 131 | // registers with the provided register class |
| 132 | for (; NumRegs; --NumRegs, ++I) { |
| 133 | assert(I != RC->end() && "Ran out of registers to allocate!" ); |
| 134 | Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RegClass: RC); |
| 135 | OpInfo.Regs.push_back(Elt: R); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | static void computeConstraintToUse(const TargetLowering *TLI, |
| 140 | TargetLowering::AsmOperandInfo &OpInfo) { |
| 141 | assert(!OpInfo.Codes.empty() && "Must have at least one constraint" ); |
| 142 | |
| 143 | // Single-letter constraints ('r') are very common. |
| 144 | if (OpInfo.Codes.size() == 1) { |
| 145 | OpInfo.ConstraintCode = OpInfo.Codes[0]; |
| 146 | OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode); |
| 147 | } else { |
| 148 | TargetLowering::ConstraintGroup G = TLI->getConstraintPreferences(OpInfo); |
| 149 | if (G.empty()) |
| 150 | return; |
| 151 | // FIXME: prefer immediate constraints if the target allows it |
| 152 | unsigned BestIdx = 0; |
| 153 | for (const unsigned E = G.size(); |
| 154 | BestIdx < E && (G[BestIdx].second == TargetLowering::C_Other || |
| 155 | G[BestIdx].second == TargetLowering::C_Immediate); |
| 156 | ++BestIdx) |
| 157 | ; |
| 158 | OpInfo.ConstraintCode = G[BestIdx].first; |
| 159 | OpInfo.ConstraintType = G[BestIdx].second; |
| 160 | } |
| 161 | |
| 162 | // 'X' matches anything. |
| 163 | if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { |
| 164 | // Labels and constants are handled elsewhere ('X' is the only thing |
| 165 | // that matches labels). For Functions, the type here is the type of |
| 166 | // the result, which is not what we want to look at; leave them alone. |
| 167 | Value *Val = OpInfo.CallOperandVal; |
| 168 | if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val)) |
| 169 | return; |
| 170 | |
| 171 | // Otherwise, try to resolve it to something we know about by looking at |
| 172 | // the actual operand type. |
| 173 | if (const char *Repl = TLI->LowerXConstraint(ConstraintVT: OpInfo.ConstraintVT)) { |
| 174 | OpInfo.ConstraintCode = Repl; |
| 175 | OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode); |
| 176 | } |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) { |
| 181 | const InlineAsm::Flag F(I.getOperand(i: OpIdx).getImm()); |
| 182 | return F.getNumOperandRegisters(); |
| 183 | } |
| 184 | |
| 185 | static bool buildAnyextOrCopy(Register Dst, Register Src, |
| 186 | MachineIRBuilder &MIRBuilder) { |
| 187 | const TargetRegisterInfo *TRI = |
| 188 | MIRBuilder.getMF().getSubtarget().getRegisterInfo(); |
| 189 | MachineRegisterInfo *MRI = MIRBuilder.getMRI(); |
| 190 | |
| 191 | auto SrcTy = MRI->getType(Reg: Src); |
| 192 | if (!SrcTy.isValid()) { |
| 193 | LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n" ); |
| 194 | return false; |
| 195 | } |
| 196 | unsigned SrcSize = TRI->getRegSizeInBits(Reg: Src, MRI: *MRI); |
| 197 | unsigned DstSize = TRI->getRegSizeInBits(Reg: Dst, MRI: *MRI); |
| 198 | |
| 199 | if (DstSize < SrcSize) { |
| 200 | LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n" ); |
| 201 | return false; |
| 202 | } |
| 203 | |
| 204 | // Attempt to anyext small scalar sources. |
| 205 | if (DstSize > SrcSize) { |
| 206 | if (!SrcTy.isScalar()) { |
| 207 | LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of" |
| 208 | "destination register class\n" ); |
| 209 | return false; |
| 210 | } |
| 211 | Src = MIRBuilder.buildAnyExt(Res: LLT::scalar(SizeInBits: DstSize), Op: Src).getReg(Idx: 0); |
| 212 | } |
| 213 | |
| 214 | MIRBuilder.buildCopy(Res: Dst, Op: Src); |
| 215 | return true; |
| 216 | } |
| 217 | |
| 218 | bool InlineAsmLowering::lowerInlineAsm( |
| 219 | MachineIRBuilder &MIRBuilder, const CallBase &Call, |
| 220 | std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs) |
| 221 | const { |
| 222 | const InlineAsm *IA = cast<InlineAsm>(Val: Call.getCalledOperand()); |
| 223 | |
| 224 | /// ConstraintOperands - Information about all of the constraints. |
| 225 | GISelAsmOperandInfoVector ConstraintOperands; |
| 226 | |
| 227 | MachineFunction &MF = MIRBuilder.getMF(); |
| 228 | const Function &F = MF.getFunction(); |
| 229 | const DataLayout &DL = F.getDataLayout(); |
| 230 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
| 231 | |
| 232 | MachineRegisterInfo *MRI = MIRBuilder.getMRI(); |
| 233 | |
| 234 | TargetLowering::AsmOperandInfoVector TargetConstraints = |
| 235 | TLI->ParseConstraints(DL, TRI, Call); |
| 236 | |
| 237 | ExtraFlags (Call); |
| 238 | unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. |
| 239 | unsigned ResNo = 0; // ResNo - The result number of the next output. |
| 240 | for (auto &T : TargetConstraints) { |
| 241 | ConstraintOperands.push_back(Elt: GISelAsmOperandInfo(T)); |
| 242 | GISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); |
| 243 | |
| 244 | // Compute the value type for each operand. |
| 245 | if (OpInfo.hasArg()) { |
| 246 | OpInfo.CallOperandVal = Call.getArgOperand(i: ArgNo); |
| 247 | |
| 248 | if (isa<BasicBlock>(Val: OpInfo.CallOperandVal)) { |
| 249 | LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n" ); |
| 250 | return false; |
| 251 | } |
| 252 | |
| 253 | Type *OpTy = OpInfo.CallOperandVal->getType(); |
| 254 | |
| 255 | // If this is an indirect operand, the operand is a pointer to the |
| 256 | // accessed type. |
| 257 | if (OpInfo.isIndirect) { |
| 258 | OpTy = Call.getParamElementType(ArgNo); |
| 259 | assert(OpTy && "Indirect operand must have elementtype attribute" ); |
| 260 | } |
| 261 | |
| 262 | // FIXME: Support aggregate input operands |
| 263 | if (!OpTy->isSingleValueType()) { |
| 264 | LLVM_DEBUG( |
| 265 | dbgs() << "Aggregate input operands are not supported yet\n" ); |
| 266 | return false; |
| 267 | } |
| 268 | |
| 269 | OpInfo.ConstraintVT = |
| 270 | TLI->getAsmOperandValueType(DL, Ty: OpTy, AllowUnknown: true).getSimpleVT(); |
| 271 | ++ArgNo; |
| 272 | } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) { |
| 273 | assert(!Call.getType()->isVoidTy() && "Bad inline asm!" ); |
| 274 | if (StructType *STy = dyn_cast<StructType>(Val: Call.getType())) { |
| 275 | OpInfo.ConstraintVT = |
| 276 | TLI->getSimpleValueType(DL, Ty: STy->getElementType(N: ResNo)); |
| 277 | } else { |
| 278 | assert(ResNo == 0 && "Asm only has one result!" ); |
| 279 | OpInfo.ConstraintVT = |
| 280 | TLI->getAsmOperandValueType(DL, Ty: Call.getType()).getSimpleVT(); |
| 281 | } |
| 282 | ++ResNo; |
| 283 | } else { |
| 284 | assert(OpInfo.Type != InlineAsm::isLabel && |
| 285 | "GlobalISel currently doesn't support callbr" ); |
| 286 | OpInfo.ConstraintVT = MVT::Other; |
| 287 | } |
| 288 | |
| 289 | if (OpInfo.ConstraintVT == MVT::i64x8) |
| 290 | return false; |
| 291 | |
| 292 | // Compute the constraint code and ConstraintType to use. |
| 293 | computeConstraintToUse(TLI, OpInfo); |
| 294 | |
| 295 | // The selected constraint type might expose new sideeffects |
| 296 | ExtraInfo.update(OpInfo); |
| 297 | } |
| 298 | |
| 299 | // At this point, all operand types are decided. |
| 300 | // Create the MachineInstr, but don't insert it yet since input |
| 301 | // operands still need to insert instructions before this one |
| 302 | auto Inst = MIRBuilder.buildInstrNoInsert(Opcode: TargetOpcode::INLINEASM) |
| 303 | .addExternalSymbol(FnName: IA->getAsmString().data()) |
| 304 | .addImm(Val: ExtraInfo.get()); |
| 305 | |
| 306 | // Starting from this operand: flag followed by register(s) will be added as |
| 307 | // operands to Inst for each constraint. Used for matching input constraints. |
| 308 | unsigned StartIdx = Inst->getNumOperands(); |
| 309 | |
| 310 | // Collects the output operands for later processing |
| 311 | GISelAsmOperandInfoVector OutputOperands; |
| 312 | |
| 313 | for (auto &OpInfo : ConstraintOperands) { |
| 314 | GISelAsmOperandInfo &RefOpInfo = |
| 315 | OpInfo.isMatchingInputConstraint() |
| 316 | ? ConstraintOperands[OpInfo.getMatchedOperand()] |
| 317 | : OpInfo; |
| 318 | |
| 319 | // Assign registers for register operands |
| 320 | getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo); |
| 321 | |
| 322 | switch (OpInfo.Type) { |
| 323 | case InlineAsm::isOutput: |
| 324 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) { |
| 325 | const InlineAsm::ConstraintCode ConstraintID = |
| 326 | TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode); |
| 327 | assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && |
| 328 | "Failed to convert memory constraint code to constraint id." ); |
| 329 | |
| 330 | // Add information to the INLINEASM instruction to know about this |
| 331 | // output. |
| 332 | InlineAsm::Flag Flag(InlineAsm::Kind::Mem, 1); |
| 333 | Flag.setMemConstraint(ConstraintID); |
| 334 | Inst.addImm(Val: Flag); |
| 335 | ArrayRef<Register> SourceRegs = |
| 336 | GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 337 | assert( |
| 338 | SourceRegs.size() == 1 && |
| 339 | "Expected the memory output to fit into a single virtual register" ); |
| 340 | Inst.addReg(RegNo: SourceRegs[0]); |
| 341 | } else { |
| 342 | // Otherwise, this outputs to a register (directly for C_Register / |
| 343 | // C_RegisterClass/C_Other. |
| 344 | assert(OpInfo.ConstraintType == TargetLowering::C_Register || |
| 345 | OpInfo.ConstraintType == TargetLowering::C_RegisterClass || |
| 346 | OpInfo.ConstraintType == TargetLowering::C_Other); |
| 347 | |
| 348 | // Find a register that we can use. |
| 349 | if (OpInfo.Regs.empty()) { |
| 350 | LLVM_DEBUG(dbgs() |
| 351 | << "Couldn't allocate output register for constraint\n" ); |
| 352 | return false; |
| 353 | } |
| 354 | |
| 355 | // Add information to the INLINEASM instruction to know that this |
| 356 | // register is set. |
| 357 | InlineAsm::Flag Flag(OpInfo.isEarlyClobber |
| 358 | ? InlineAsm::Kind::RegDefEarlyClobber |
| 359 | : InlineAsm::Kind::RegDef, |
| 360 | OpInfo.Regs.size()); |
| 361 | if (OpInfo.Regs.front().isVirtual()) { |
| 362 | // Put the register class of the virtual registers in the flag word. |
| 363 | // That way, later passes can recompute register class constraints for |
| 364 | // inline assembly as well as normal instructions. Don't do this for |
| 365 | // tied operands that can use the regclass information from the def. |
| 366 | const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front()); |
| 367 | Flag.setRegClass(RC->getID()); |
| 368 | } |
| 369 | |
| 370 | Inst.addImm(Val: Flag); |
| 371 | |
| 372 | for (Register Reg : OpInfo.Regs) { |
| 373 | Inst.addReg(RegNo: Reg, Flags: RegState::Define | |
| 374 | getImplRegState(B: Reg.isPhysical()) | |
| 375 | getEarlyClobberRegState(B: OpInfo.isEarlyClobber)); |
| 376 | } |
| 377 | |
| 378 | // Remember this output operand for later processing |
| 379 | OutputOperands.push_back(Elt: OpInfo); |
| 380 | } |
| 381 | |
| 382 | break; |
| 383 | case InlineAsm::isInput: |
| 384 | case InlineAsm::isLabel: { |
| 385 | if (OpInfo.isMatchingInputConstraint()) { |
| 386 | unsigned DefIdx = OpInfo.getMatchedOperand(); |
| 387 | // Find operand with register def that corresponds to DefIdx. |
| 388 | unsigned InstFlagIdx = StartIdx; |
| 389 | for (unsigned i = 0; i < DefIdx; ++i) |
| 390 | InstFlagIdx += getNumOpRegs(I: *Inst, OpIdx: InstFlagIdx) + 1; |
| 391 | assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag" ); |
| 392 | |
| 393 | const InlineAsm::Flag MatchedOperandFlag(Inst->getOperand(i: InstFlagIdx).getImm()); |
| 394 | if (MatchedOperandFlag.isMemKind()) { |
| 395 | LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not " |
| 396 | "supported. This should be target specific.\n" ); |
| 397 | return false; |
| 398 | } |
| 399 | if (!MatchedOperandFlag.isRegDefKind() && !MatchedOperandFlag.isRegDefEarlyClobberKind()) { |
| 400 | LLVM_DEBUG(dbgs() << "Unknown matching constraint\n" ); |
| 401 | return false; |
| 402 | } |
| 403 | |
| 404 | // We want to tie input to register in next operand. |
| 405 | unsigned DefRegIdx = InstFlagIdx + 1; |
| 406 | Register Def = Inst->getOperand(i: DefRegIdx).getReg(); |
| 407 | |
| 408 | ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 409 | assert(SrcRegs.size() == 1 && "Single register is expected here" ); |
| 410 | |
| 411 | // When Def is physreg: use given input. |
| 412 | Register In = SrcRegs[0]; |
| 413 | // When Def is vreg: copy input to new vreg with same reg class as Def. |
| 414 | if (Def.isVirtual()) { |
| 415 | In = MRI->createVirtualRegister(RegClass: MRI->getRegClass(Reg: Def)); |
| 416 | if (!buildAnyextOrCopy(Dst: In, Src: SrcRegs[0], MIRBuilder)) |
| 417 | return false; |
| 418 | } |
| 419 | |
| 420 | // Add Flag and input register operand (In) to Inst. Tie In to Def. |
| 421 | InlineAsm::Flag UseFlag(InlineAsm::Kind::RegUse, 1); |
| 422 | UseFlag.setMatchingOp(DefIdx); |
| 423 | Inst.addImm(Val: UseFlag); |
| 424 | Inst.addReg(RegNo: In); |
| 425 | Inst->tieOperands(DefIdx: DefRegIdx, UseIdx: Inst->getNumOperands() - 1); |
| 426 | break; |
| 427 | } |
| 428 | |
| 429 | if (OpInfo.ConstraintType == TargetLowering::C_Other && |
| 430 | OpInfo.isIndirect) { |
| 431 | LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint " |
| 432 | "not supported yet\n" ); |
| 433 | return false; |
| 434 | } |
| 435 | |
| 436 | if (OpInfo.ConstraintType == TargetLowering::C_Immediate || |
| 437 | OpInfo.ConstraintType == TargetLowering::C_Other) { |
| 438 | |
| 439 | std::vector<MachineOperand> Ops; |
| 440 | if (!lowerAsmOperandForConstraint(Val: OpInfo.CallOperandVal, |
| 441 | Constraint: OpInfo.ConstraintCode, Ops, |
| 442 | MIRBuilder)) { |
| 443 | LLVM_DEBUG(dbgs() << "Don't support constraint: " |
| 444 | << OpInfo.ConstraintCode << " yet\n" ); |
| 445 | return false; |
| 446 | } |
| 447 | |
| 448 | assert(Ops.size() > 0 && |
| 449 | "Expected constraint to be lowered to at least one operand" ); |
| 450 | |
| 451 | // Add information to the INLINEASM node to know about this input. |
| 452 | const unsigned OpFlags = |
| 453 | InlineAsm::Flag(InlineAsm::Kind::Imm, Ops.size()); |
| 454 | Inst.addImm(Val: OpFlags); |
| 455 | Inst.add(MOs: Ops); |
| 456 | break; |
| 457 | } |
| 458 | |
| 459 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) { |
| 460 | const InlineAsm::ConstraintCode ConstraintID = |
| 461 | TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode); |
| 462 | InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1); |
| 463 | OpFlags.setMemConstraint(ConstraintID); |
| 464 | Inst.addImm(Val: OpFlags); |
| 465 | |
| 466 | if (OpInfo.isIndirect) { |
| 467 | // already indirect |
| 468 | ArrayRef<Register> SourceRegs = |
| 469 | GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 470 | if (SourceRegs.size() != 1) { |
| 471 | LLVM_DEBUG(dbgs() << "Expected the memory input to fit into a " |
| 472 | "single virtual register " |
| 473 | "for constraint '" |
| 474 | << OpInfo.ConstraintCode << "'\n" ); |
| 475 | return false; |
| 476 | } |
| 477 | Inst.addReg(RegNo: SourceRegs[0]); |
| 478 | break; |
| 479 | } |
| 480 | |
| 481 | // Needs to be made indirect. Store the value on the stack and use |
| 482 | // a pointer to it. |
| 483 | Value *OpVal = OpInfo.CallOperandVal; |
| 484 | TypeSize Bytes = DL.getTypeStoreSize(Ty: OpVal->getType()); |
| 485 | Align Alignment = DL.getPrefTypeAlign(Ty: OpVal->getType()); |
| 486 | int FrameIdx = |
| 487 | MF.getFrameInfo().CreateStackObject(Size: Bytes, Alignment, isSpillSlot: false); |
| 488 | |
| 489 | unsigned AddrSpace = DL.getAllocaAddrSpace(); |
| 490 | LLT FramePtrTy = |
| 491 | LLT::pointer(AddressSpace: AddrSpace, SizeInBits: DL.getPointerSizeInBits(AS: AddrSpace)); |
| 492 | auto Ptr = MIRBuilder.buildFrameIndex(Res: FramePtrTy, Idx: FrameIdx).getReg(Idx: 0); |
| 493 | ArrayRef<Register> SourceRegs = |
| 494 | GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 495 | if (SourceRegs.size() != 1) { |
| 496 | LLVM_DEBUG(dbgs() << "Expected the memory input to fit into a single " |
| 497 | "virtual register " |
| 498 | "for constraint '" |
| 499 | << OpInfo.ConstraintCode << "'\n" ); |
| 500 | return false; |
| 501 | } |
| 502 | MIRBuilder.buildStore(Val: SourceRegs[0], Addr: Ptr, |
| 503 | PtrInfo: MachinePointerInfo::getFixedStack(MF, FI: FrameIdx), |
| 504 | Alignment); |
| 505 | Inst.addReg(RegNo: Ptr); |
| 506 | break; |
| 507 | } |
| 508 | |
| 509 | assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || |
| 510 | OpInfo.ConstraintType == TargetLowering::C_Register) && |
| 511 | "Unknown constraint type!" ); |
| 512 | |
| 513 | if (OpInfo.isIndirect) { |
| 514 | LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet " |
| 515 | "for constraint '" |
| 516 | << OpInfo.ConstraintCode << "'\n" ); |
| 517 | return false; |
| 518 | } |
| 519 | |
| 520 | // Copy the input into the appropriate registers. |
| 521 | if (OpInfo.Regs.empty()) { |
| 522 | LLVM_DEBUG( |
| 523 | dbgs() |
| 524 | << "Couldn't allocate input register for register constraint\n" ); |
| 525 | return false; |
| 526 | } |
| 527 | |
| 528 | unsigned NumRegs = OpInfo.Regs.size(); |
| 529 | ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 530 | assert(NumRegs == SourceRegs.size() && |
| 531 | "Expected the number of input registers to match the number of " |
| 532 | "source registers" ); |
| 533 | |
| 534 | if (NumRegs > 1) { |
| 535 | LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are " |
| 536 | "not supported yet\n" ); |
| 537 | return false; |
| 538 | } |
| 539 | |
| 540 | InlineAsm::Flag Flag(InlineAsm::Kind::RegUse, NumRegs); |
| 541 | if (OpInfo.Regs.front().isVirtual()) { |
| 542 | // Put the register class of the virtual registers in the flag word. |
| 543 | const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front()); |
| 544 | Flag.setRegClass(RC->getID()); |
| 545 | } |
| 546 | Inst.addImm(Val: Flag); |
| 547 | if (!buildAnyextOrCopy(Dst: OpInfo.Regs[0], Src: SourceRegs[0], MIRBuilder)) |
| 548 | return false; |
| 549 | Inst.addReg(RegNo: OpInfo.Regs[0]); |
| 550 | break; |
| 551 | } |
| 552 | |
| 553 | case InlineAsm::isClobber: { |
| 554 | |
| 555 | const unsigned NumRegs = OpInfo.Regs.size(); |
| 556 | if (NumRegs > 0) { |
| 557 | unsigned Flag = InlineAsm::Flag(InlineAsm::Kind::Clobber, NumRegs); |
| 558 | Inst.addImm(Val: Flag); |
| 559 | |
| 560 | for (Register Reg : OpInfo.Regs) { |
| 561 | Inst.addReg(RegNo: Reg, Flags: RegState::Define | RegState::EarlyClobber | |
| 562 | getImplRegState(B: Reg.isPhysical())); |
| 563 | } |
| 564 | } |
| 565 | break; |
| 566 | } |
| 567 | } |
| 568 | } |
| 569 | |
| 570 | if (auto Bundle = Call.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) { |
| 571 | auto *Token = Bundle->Inputs[0].get(); |
| 572 | ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token); |
| 573 | assert(SourceRegs.size() == 1 && |
| 574 | "Expected the control token to fit into a single virtual register" ); |
| 575 | Inst.addUse(RegNo: SourceRegs[0], Flags: RegState::Implicit); |
| 576 | } |
| 577 | |
| 578 | if (const MDNode *SrcLoc = Call.getMetadata(Kind: "srcloc" )) |
| 579 | Inst.addMetadata(MD: SrcLoc); |
| 580 | |
| 581 | // Add rounding control registers as implicit def for inline asm. |
| 582 | if (MF.getFunction().hasFnAttribute(Kind: Attribute::StrictFP)) { |
| 583 | ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters(); |
| 584 | for (MCPhysReg Reg : RCRegs) |
| 585 | Inst.addReg(RegNo: Reg, Flags: RegState::ImplicitDefine); |
| 586 | } |
| 587 | |
| 588 | // All inputs are handled, insert the instruction now |
| 589 | MIRBuilder.insertInstr(MIB: Inst); |
| 590 | |
| 591 | // Finally, copy the output operands into the output registers |
| 592 | ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call); |
| 593 | if (ResRegs.size() != OutputOperands.size()) { |
| 594 | LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the " |
| 595 | "number of destination registers\n" ); |
| 596 | return false; |
| 597 | } |
| 598 | for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) { |
| 599 | GISelAsmOperandInfo &OpInfo = OutputOperands[i]; |
| 600 | |
| 601 | if (OpInfo.Regs.empty()) |
| 602 | continue; |
| 603 | |
| 604 | switch (OpInfo.ConstraintType) { |
| 605 | case TargetLowering::C_Register: |
| 606 | case TargetLowering::C_RegisterClass: { |
| 607 | if (OpInfo.Regs.size() > 1) { |
| 608 | LLVM_DEBUG(dbgs() << "Output operands with multiple defining " |
| 609 | "registers are not supported yet\n" ); |
| 610 | return false; |
| 611 | } |
| 612 | |
| 613 | Register SrcReg = OpInfo.Regs[0]; |
| 614 | unsigned SrcSize = TRI->getRegSizeInBits(Reg: SrcReg, MRI: *MRI); |
| 615 | LLT ResTy = MRI->getType(Reg: ResRegs[i]); |
| 616 | if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) { |
| 617 | // First copy the non-typed virtual register into a generic virtual |
| 618 | // register |
| 619 | Register Tmp1Reg = |
| 620 | MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: SrcSize)); |
| 621 | MIRBuilder.buildCopy(Res: Tmp1Reg, Op: SrcReg); |
| 622 | // Need to truncate the result of the register |
| 623 | MIRBuilder.buildTrunc(Res: ResRegs[i], Op: Tmp1Reg); |
| 624 | } else if (ResTy.getSizeInBits() == SrcSize) { |
| 625 | MIRBuilder.buildCopy(Res: ResRegs[i], Op: SrcReg); |
| 626 | } else { |
| 627 | LLVM_DEBUG(dbgs() << "Unhandled output operand with " |
| 628 | "mismatched register size\n" ); |
| 629 | return false; |
| 630 | } |
| 631 | |
| 632 | break; |
| 633 | } |
| 634 | case TargetLowering::C_Immediate: |
| 635 | case TargetLowering::C_Other: |
| 636 | LLVM_DEBUG( |
| 637 | dbgs() << "Cannot lower target specific output constraints yet\n" ); |
| 638 | return false; |
| 639 | case TargetLowering::C_Memory: |
| 640 | break; // Already handled. |
| 641 | case TargetLowering::C_Address: |
| 642 | break; // Silence warning. |
| 643 | case TargetLowering::C_Unknown: |
| 644 | LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n" ); |
| 645 | return false; |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | return true; |
| 650 | } |
| 651 | |
| 652 | bool InlineAsmLowering::lowerAsmOperandForConstraint( |
| 653 | Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops, |
| 654 | MachineIRBuilder &MIRBuilder) const { |
| 655 | if (Constraint.size() > 1) |
| 656 | return false; |
| 657 | |
| 658 | char ConstraintLetter = Constraint[0]; |
| 659 | switch (ConstraintLetter) { |
| 660 | default: |
| 661 | return false; |
| 662 | case 's': // Integer immediate not known at compile time |
| 663 | if (const auto *GV = dyn_cast<GlobalValue>(Val)) { |
| 664 | Ops.push_back(x: MachineOperand::CreateGA(GV, /*Offset=*/0)); |
| 665 | return true; |
| 666 | } |
| 667 | return false; |
| 668 | case 'i': // Simple Integer or Relocatable Constant |
| 669 | if (const auto *GV = dyn_cast<GlobalValue>(Val)) { |
| 670 | Ops.push_back(x: MachineOperand::CreateGA(GV, /*Offset=*/0)); |
| 671 | return true; |
| 672 | } |
| 673 | [[fallthrough]]; |
| 674 | case 'n': // immediate integer with a known value. |
| 675 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { |
| 676 | assert(CI->getBitWidth() <= 64 && |
| 677 | "expected immediate to fit into 64-bits" ); |
| 678 | // Boolean constants should be zero-extended, others are sign-extended |
| 679 | bool IsBool = CI->getBitWidth() == 1; |
| 680 | int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue(); |
| 681 | Ops.push_back(x: MachineOperand::CreateImm(Val: ExtVal)); |
| 682 | return true; |
| 683 | } |
| 684 | return false; |
| 685 | } |
| 686 | } |
| 687 | |