| 1 | //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// |
| 9 | /// \file |
| 10 | /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM |
| 11 | /// |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" |
| 15 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| 16 | #include "llvm/CodeGen/MachineOperand.h" |
| 17 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 18 | #include "llvm/CodeGen/TargetLowering.h" |
| 19 | #include "llvm/IR/Module.h" |
| 20 | |
| 21 | #define DEBUG_TYPE "inline-asm-lowering" |
| 22 | |
| 23 | using namespace llvm; |
| 24 | |
| 25 | void InlineAsmLowering::anchor() {} |
| 26 | |
| 27 | namespace { |
| 28 | |
| 29 | /// GISelAsmOperandInfo - This contains information for each constraint that we |
| 30 | /// are lowering. |
| 31 | class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { |
| 32 | public: |
| 33 | /// Regs - If this is a register or register class operand, this |
| 34 | /// contains the set of assigned registers corresponding to the operand. |
| 35 | SmallVector<Register, 1> Regs; |
| 36 | |
| 37 | explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info) |
| 38 | : TargetLowering::AsmOperandInfo(Info) {} |
| 39 | }; |
| 40 | |
| 41 | using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>; |
| 42 | |
| 43 | class { |
| 44 | unsigned = 0; |
| 45 | |
| 46 | public: |
| 47 | explicit (const CallBase &CB) { |
| 48 | const InlineAsm *IA = cast<InlineAsm>(Val: CB.getCalledOperand()); |
| 49 | if (IA->hasSideEffects()) |
| 50 | Flags |= InlineAsm::Extra_HasSideEffects; |
| 51 | if (IA->isAlignStack()) |
| 52 | Flags |= InlineAsm::Extra_IsAlignStack; |
| 53 | if (CB.isConvergent()) |
| 54 | Flags |= InlineAsm::Extra_IsConvergent; |
| 55 | Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; |
| 56 | } |
| 57 | |
| 58 | void update(const TargetLowering::AsmOperandInfo &OpInfo) { |
| 59 | // Ideally, we would only check against memory constraints. However, the |
| 60 | // meaning of an Other constraint can be target-specific and we can't easily |
| 61 | // reason about it. Therefore, be conservative and set MayLoad/MayStore |
| 62 | // for Other constraints as well. |
| 63 | if (OpInfo.ConstraintType == TargetLowering::C_Memory || |
| 64 | OpInfo.ConstraintType == TargetLowering::C_Other) { |
| 65 | if (OpInfo.Type == InlineAsm::isInput) |
| 66 | Flags |= InlineAsm::Extra_MayLoad; |
| 67 | else if (OpInfo.Type == InlineAsm::isOutput) |
| 68 | Flags |= InlineAsm::Extra_MayStore; |
| 69 | else if (OpInfo.Type == InlineAsm::isClobber) |
| 70 | Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | unsigned () const { return Flags; } |
| 75 | }; |
| 76 | |
| 77 | } // namespace |
| 78 | |
| 79 | /// Assign virtual/physical registers for the specified register operand. |
| 80 | static void getRegistersForValue(MachineFunction &MF, |
| 81 | MachineIRBuilder &MIRBuilder, |
| 82 | GISelAsmOperandInfo &OpInfo, |
| 83 | GISelAsmOperandInfo &RefOpInfo) { |
| 84 | |
| 85 | const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); |
| 86 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
| 87 | |
| 88 | // No work to do for memory operations. |
| 89 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) |
| 90 | return; |
| 91 | |
| 92 | // If this is a constraint for a single physreg, or a constraint for a |
| 93 | // register class, find it. |
| 94 | Register AssignedReg; |
| 95 | const TargetRegisterClass *RC; |
| 96 | std::tie(args&: AssignedReg, args&: RC) = TLI.getRegForInlineAsmConstraint( |
| 97 | TRI: &TRI, Constraint: RefOpInfo.ConstraintCode, VT: RefOpInfo.ConstraintVT); |
| 98 | // RC is unset only on failure. Return immediately. |
| 99 | if (!RC) |
| 100 | return; |
| 101 | |
| 102 | // No need to allocate a matching input constraint since the constraint it's |
| 103 | // matching to has already been allocated. |
| 104 | if (OpInfo.isMatchingInputConstraint()) |
| 105 | return; |
| 106 | |
| 107 | // Initialize NumRegs. |
| 108 | unsigned NumRegs = 1; |
| 109 | if (OpInfo.ConstraintVT != MVT::Other) |
| 110 | NumRegs = |
| 111 | TLI.getNumRegisters(Context&: MF.getFunction().getContext(), VT: OpInfo.ConstraintVT); |
| 112 | |
| 113 | // If this is a constraint for a specific physical register, but the type of |
| 114 | // the operand requires more than one register to be passed, we allocate the |
| 115 | // required amount of physical registers, starting from the selected physical |
| 116 | // register. |
| 117 | // For this, first retrieve a register iterator for the given register class |
| 118 | TargetRegisterClass::iterator I = RC->begin(); |
| 119 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
| 120 | |
| 121 | // Advance the iterator to the assigned register (if set) |
| 122 | if (AssignedReg) { |
| 123 | for (; *I != AssignedReg; ++I) |
| 124 | assert(I != RC->end() && "AssignedReg should be a member of provided RC" ); |
| 125 | } |
| 126 | |
| 127 | // Finally, assign the registers. If the AssignedReg isn't set, create virtual |
| 128 | // registers with the provided register class |
| 129 | for (; NumRegs; --NumRegs, ++I) { |
| 130 | assert(I != RC->end() && "Ran out of registers to allocate!" ); |
| 131 | Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RegClass: RC); |
| 132 | OpInfo.Regs.push_back(Elt: R); |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | static void computeConstraintToUse(const TargetLowering *TLI, |
| 137 | TargetLowering::AsmOperandInfo &OpInfo) { |
| 138 | assert(!OpInfo.Codes.empty() && "Must have at least one constraint" ); |
| 139 | |
| 140 | // Single-letter constraints ('r') are very common. |
| 141 | if (OpInfo.Codes.size() == 1) { |
| 142 | OpInfo.ConstraintCode = OpInfo.Codes[0]; |
| 143 | OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode); |
| 144 | } else { |
| 145 | TargetLowering::ConstraintGroup G = TLI->getConstraintPreferences(OpInfo); |
| 146 | if (G.empty()) |
| 147 | return; |
| 148 | // FIXME: prefer immediate constraints if the target allows it |
| 149 | unsigned BestIdx = 0; |
| 150 | for (const unsigned E = G.size(); |
| 151 | BestIdx < E && (G[BestIdx].second == TargetLowering::C_Other || |
| 152 | G[BestIdx].second == TargetLowering::C_Immediate); |
| 153 | ++BestIdx) |
| 154 | ; |
| 155 | OpInfo.ConstraintCode = G[BestIdx].first; |
| 156 | OpInfo.ConstraintType = G[BestIdx].second; |
| 157 | } |
| 158 | |
| 159 | // 'X' matches anything. |
| 160 | if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { |
| 161 | // Labels and constants are handled elsewhere ('X' is the only thing |
| 162 | // that matches labels). For Functions, the type here is the type of |
| 163 | // the result, which is not what we want to look at; leave them alone. |
| 164 | Value *Val = OpInfo.CallOperandVal; |
| 165 | if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val)) |
| 166 | return; |
| 167 | |
| 168 | // Otherwise, try to resolve it to something we know about by looking at |
| 169 | // the actual operand type. |
| 170 | if (const char *Repl = TLI->LowerXConstraint(ConstraintVT: OpInfo.ConstraintVT)) { |
| 171 | OpInfo.ConstraintCode = Repl; |
| 172 | OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode); |
| 173 | } |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) { |
| 178 | const InlineAsm::Flag F(I.getOperand(i: OpIdx).getImm()); |
| 179 | return F.getNumOperandRegisters(); |
| 180 | } |
| 181 | |
| 182 | static bool buildAnyextOrCopy(Register Dst, Register Src, |
| 183 | MachineIRBuilder &MIRBuilder) { |
| 184 | const TargetRegisterInfo *TRI = |
| 185 | MIRBuilder.getMF().getSubtarget().getRegisterInfo(); |
| 186 | MachineRegisterInfo *MRI = MIRBuilder.getMRI(); |
| 187 | |
| 188 | auto SrcTy = MRI->getType(Reg: Src); |
| 189 | if (!SrcTy.isValid()) { |
| 190 | LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n" ); |
| 191 | return false; |
| 192 | } |
| 193 | unsigned SrcSize = TRI->getRegSizeInBits(Reg: Src, MRI: *MRI); |
| 194 | unsigned DstSize = TRI->getRegSizeInBits(Reg: Dst, MRI: *MRI); |
| 195 | |
| 196 | if (DstSize < SrcSize) { |
| 197 | LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n" ); |
| 198 | return false; |
| 199 | } |
| 200 | |
| 201 | // Attempt to anyext small scalar sources. |
| 202 | if (DstSize > SrcSize) { |
| 203 | if (!SrcTy.isScalar()) { |
| 204 | LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of" |
| 205 | "destination register class\n" ); |
| 206 | return false; |
| 207 | } |
| 208 | Src = MIRBuilder.buildAnyExt(Res: LLT::scalar(SizeInBits: DstSize), Op: Src).getReg(Idx: 0); |
| 209 | } |
| 210 | |
| 211 | MIRBuilder.buildCopy(Res: Dst, Op: Src); |
| 212 | return true; |
| 213 | } |
| 214 | |
| 215 | bool InlineAsmLowering::lowerInlineAsm( |
| 216 | MachineIRBuilder &MIRBuilder, const CallBase &Call, |
| 217 | std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs) |
| 218 | const { |
| 219 | const InlineAsm *IA = cast<InlineAsm>(Val: Call.getCalledOperand()); |
| 220 | |
| 221 | /// ConstraintOperands - Information about all of the constraints. |
| 222 | GISelAsmOperandInfoVector ConstraintOperands; |
| 223 | |
| 224 | MachineFunction &MF = MIRBuilder.getMF(); |
| 225 | const Function &F = MF.getFunction(); |
| 226 | const DataLayout &DL = F.getDataLayout(); |
| 227 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
| 228 | |
| 229 | MachineRegisterInfo *MRI = MIRBuilder.getMRI(); |
| 230 | |
| 231 | TargetLowering::AsmOperandInfoVector TargetConstraints = |
| 232 | TLI->ParseConstraints(DL, TRI, Call); |
| 233 | |
| 234 | ExtraFlags (Call); |
| 235 | unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. |
| 236 | unsigned ResNo = 0; // ResNo - The result number of the next output. |
| 237 | for (auto &T : TargetConstraints) { |
| 238 | ConstraintOperands.push_back(Elt: GISelAsmOperandInfo(T)); |
| 239 | GISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); |
| 240 | |
| 241 | // Compute the value type for each operand. |
| 242 | if (OpInfo.hasArg()) { |
| 243 | OpInfo.CallOperandVal = Call.getArgOperand(i: ArgNo); |
| 244 | |
| 245 | if (isa<BasicBlock>(Val: OpInfo.CallOperandVal)) { |
| 246 | LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n" ); |
| 247 | return false; |
| 248 | } |
| 249 | |
| 250 | Type *OpTy = OpInfo.CallOperandVal->getType(); |
| 251 | |
| 252 | // If this is an indirect operand, the operand is a pointer to the |
| 253 | // accessed type. |
| 254 | if (OpInfo.isIndirect) { |
| 255 | OpTy = Call.getParamElementType(ArgNo); |
| 256 | assert(OpTy && "Indirect operand must have elementtype attribute" ); |
| 257 | } |
| 258 | |
| 259 | // FIXME: Support aggregate input operands |
| 260 | if (!OpTy->isSingleValueType()) { |
| 261 | LLVM_DEBUG( |
| 262 | dbgs() << "Aggregate input operands are not supported yet\n" ); |
| 263 | return false; |
| 264 | } |
| 265 | |
| 266 | OpInfo.ConstraintVT = |
| 267 | TLI->getAsmOperandValueType(DL, Ty: OpTy, AllowUnknown: true).getSimpleVT(); |
| 268 | ++ArgNo; |
| 269 | } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) { |
| 270 | assert(!Call.getType()->isVoidTy() && "Bad inline asm!" ); |
| 271 | if (StructType *STy = dyn_cast<StructType>(Val: Call.getType())) { |
| 272 | OpInfo.ConstraintVT = |
| 273 | TLI->getSimpleValueType(DL, Ty: STy->getElementType(N: ResNo)); |
| 274 | } else { |
| 275 | assert(ResNo == 0 && "Asm only has one result!" ); |
| 276 | OpInfo.ConstraintVT = |
| 277 | TLI->getAsmOperandValueType(DL, Ty: Call.getType()).getSimpleVT(); |
| 278 | } |
| 279 | ++ResNo; |
| 280 | } else { |
| 281 | assert(OpInfo.Type != InlineAsm::isLabel && |
| 282 | "GlobalISel currently doesn't support callbr" ); |
| 283 | OpInfo.ConstraintVT = MVT::Other; |
| 284 | } |
| 285 | |
| 286 | if (OpInfo.ConstraintVT == MVT::i64x8) |
| 287 | return false; |
| 288 | |
| 289 | // Compute the constraint code and ConstraintType to use. |
| 290 | computeConstraintToUse(TLI, OpInfo); |
| 291 | |
| 292 | // The selected constraint type might expose new sideeffects |
| 293 | ExtraInfo.update(OpInfo); |
| 294 | } |
| 295 | |
| 296 | // At this point, all operand types are decided. |
| 297 | // Create the MachineInstr, but don't insert it yet since input |
| 298 | // operands still need to insert instructions before this one |
| 299 | auto Inst = MIRBuilder.buildInstrNoInsert(Opcode: TargetOpcode::INLINEASM) |
| 300 | .addExternalSymbol(FnName: IA->getAsmString().data()) |
| 301 | .addImm(Val: ExtraInfo.get()); |
| 302 | |
| 303 | // Starting from this operand: flag followed by register(s) will be added as |
| 304 | // operands to Inst for each constraint. Used for matching input constraints. |
| 305 | unsigned StartIdx = Inst->getNumOperands(); |
| 306 | |
| 307 | // Collects the output operands for later processing |
| 308 | GISelAsmOperandInfoVector OutputOperands; |
| 309 | |
| 310 | for (auto &OpInfo : ConstraintOperands) { |
| 311 | GISelAsmOperandInfo &RefOpInfo = |
| 312 | OpInfo.isMatchingInputConstraint() |
| 313 | ? ConstraintOperands[OpInfo.getMatchedOperand()] |
| 314 | : OpInfo; |
| 315 | |
| 316 | // Assign registers for register operands |
| 317 | getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo); |
| 318 | |
| 319 | switch (OpInfo.Type) { |
| 320 | case InlineAsm::isOutput: |
| 321 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) { |
| 322 | const InlineAsm::ConstraintCode ConstraintID = |
| 323 | TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode); |
| 324 | assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && |
| 325 | "Failed to convert memory constraint code to constraint id." ); |
| 326 | |
| 327 | // Add information to the INLINEASM instruction to know about this |
| 328 | // output. |
| 329 | InlineAsm::Flag Flag(InlineAsm::Kind::Mem, 1); |
| 330 | Flag.setMemConstraint(ConstraintID); |
| 331 | Inst.addImm(Val: Flag); |
| 332 | ArrayRef<Register> SourceRegs = |
| 333 | GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 334 | assert( |
| 335 | SourceRegs.size() == 1 && |
| 336 | "Expected the memory output to fit into a single virtual register" ); |
| 337 | Inst.addReg(RegNo: SourceRegs[0]); |
| 338 | } else { |
| 339 | // Otherwise, this outputs to a register (directly for C_Register / |
| 340 | // C_RegisterClass/C_Other. |
| 341 | assert(OpInfo.ConstraintType == TargetLowering::C_Register || |
| 342 | OpInfo.ConstraintType == TargetLowering::C_RegisterClass || |
| 343 | OpInfo.ConstraintType == TargetLowering::C_Other); |
| 344 | |
| 345 | // Find a register that we can use. |
| 346 | if (OpInfo.Regs.empty()) { |
| 347 | LLVM_DEBUG(dbgs() |
| 348 | << "Couldn't allocate output register for constraint\n" ); |
| 349 | return false; |
| 350 | } |
| 351 | |
| 352 | // Add information to the INLINEASM instruction to know that this |
| 353 | // register is set. |
| 354 | InlineAsm::Flag Flag(OpInfo.isEarlyClobber |
| 355 | ? InlineAsm::Kind::RegDefEarlyClobber |
| 356 | : InlineAsm::Kind::RegDef, |
| 357 | OpInfo.Regs.size()); |
| 358 | if (OpInfo.Regs.front().isVirtual()) { |
| 359 | // Put the register class of the virtual registers in the flag word. |
| 360 | // That way, later passes can recompute register class constraints for |
| 361 | // inline assembly as well as normal instructions. Don't do this for |
| 362 | // tied operands that can use the regclass information from the def. |
| 363 | const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front()); |
| 364 | Flag.setRegClass(RC->getID()); |
| 365 | } |
| 366 | |
| 367 | Inst.addImm(Val: Flag); |
| 368 | |
| 369 | for (Register Reg : OpInfo.Regs) { |
| 370 | Inst.addReg(RegNo: Reg, |
| 371 | flags: RegState::Define | getImplRegState(B: Reg.isPhysical()) | |
| 372 | (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0)); |
| 373 | } |
| 374 | |
| 375 | // Remember this output operand for later processing |
| 376 | OutputOperands.push_back(Elt: OpInfo); |
| 377 | } |
| 378 | |
| 379 | break; |
| 380 | case InlineAsm::isInput: |
| 381 | case InlineAsm::isLabel: { |
| 382 | if (OpInfo.isMatchingInputConstraint()) { |
| 383 | unsigned DefIdx = OpInfo.getMatchedOperand(); |
| 384 | // Find operand with register def that corresponds to DefIdx. |
| 385 | unsigned InstFlagIdx = StartIdx; |
| 386 | for (unsigned i = 0; i < DefIdx; ++i) |
| 387 | InstFlagIdx += getNumOpRegs(I: *Inst, OpIdx: InstFlagIdx) + 1; |
| 388 | assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag" ); |
| 389 | |
| 390 | const InlineAsm::Flag MatchedOperandFlag(Inst->getOperand(i: InstFlagIdx).getImm()); |
| 391 | if (MatchedOperandFlag.isMemKind()) { |
| 392 | LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not " |
| 393 | "supported. This should be target specific.\n" ); |
| 394 | return false; |
| 395 | } |
| 396 | if (!MatchedOperandFlag.isRegDefKind() && !MatchedOperandFlag.isRegDefEarlyClobberKind()) { |
| 397 | LLVM_DEBUG(dbgs() << "Unknown matching constraint\n" ); |
| 398 | return false; |
| 399 | } |
| 400 | |
| 401 | // We want to tie input to register in next operand. |
| 402 | unsigned DefRegIdx = InstFlagIdx + 1; |
| 403 | Register Def = Inst->getOperand(i: DefRegIdx).getReg(); |
| 404 | |
| 405 | ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 406 | assert(SrcRegs.size() == 1 && "Single register is expected here" ); |
| 407 | |
| 408 | // When Def is physreg: use given input. |
| 409 | Register In = SrcRegs[0]; |
| 410 | // When Def is vreg: copy input to new vreg with same reg class as Def. |
| 411 | if (Def.isVirtual()) { |
| 412 | In = MRI->createVirtualRegister(RegClass: MRI->getRegClass(Reg: Def)); |
| 413 | if (!buildAnyextOrCopy(Dst: In, Src: SrcRegs[0], MIRBuilder)) |
| 414 | return false; |
| 415 | } |
| 416 | |
| 417 | // Add Flag and input register operand (In) to Inst. Tie In to Def. |
| 418 | InlineAsm::Flag UseFlag(InlineAsm::Kind::RegUse, 1); |
| 419 | UseFlag.setMatchingOp(DefIdx); |
| 420 | Inst.addImm(Val: UseFlag); |
| 421 | Inst.addReg(RegNo: In); |
| 422 | Inst->tieOperands(DefIdx: DefRegIdx, UseIdx: Inst->getNumOperands() - 1); |
| 423 | break; |
| 424 | } |
| 425 | |
| 426 | if (OpInfo.ConstraintType == TargetLowering::C_Other && |
| 427 | OpInfo.isIndirect) { |
| 428 | LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint " |
| 429 | "not supported yet\n" ); |
| 430 | return false; |
| 431 | } |
| 432 | |
| 433 | if (OpInfo.ConstraintType == TargetLowering::C_Immediate || |
| 434 | OpInfo.ConstraintType == TargetLowering::C_Other) { |
| 435 | |
| 436 | std::vector<MachineOperand> Ops; |
| 437 | if (!lowerAsmOperandForConstraint(Val: OpInfo.CallOperandVal, |
| 438 | Constraint: OpInfo.ConstraintCode, Ops, |
| 439 | MIRBuilder)) { |
| 440 | LLVM_DEBUG(dbgs() << "Don't support constraint: " |
| 441 | << OpInfo.ConstraintCode << " yet\n" ); |
| 442 | return false; |
| 443 | } |
| 444 | |
| 445 | assert(Ops.size() > 0 && |
| 446 | "Expected constraint to be lowered to at least one operand" ); |
| 447 | |
| 448 | // Add information to the INLINEASM node to know about this input. |
| 449 | const unsigned OpFlags = |
| 450 | InlineAsm::Flag(InlineAsm::Kind::Imm, Ops.size()); |
| 451 | Inst.addImm(Val: OpFlags); |
| 452 | Inst.add(MOs: Ops); |
| 453 | break; |
| 454 | } |
| 455 | |
| 456 | if (OpInfo.ConstraintType == TargetLowering::C_Memory) { |
| 457 | |
| 458 | if (!OpInfo.isIndirect) { |
| 459 | LLVM_DEBUG(dbgs() |
| 460 | << "Cannot indirectify memory input operands yet\n" ); |
| 461 | return false; |
| 462 | } |
| 463 | |
| 464 | assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!" ); |
| 465 | |
| 466 | const InlineAsm::ConstraintCode ConstraintID = |
| 467 | TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode); |
| 468 | InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1); |
| 469 | OpFlags.setMemConstraint(ConstraintID); |
| 470 | Inst.addImm(Val: OpFlags); |
| 471 | ArrayRef<Register> SourceRegs = |
| 472 | GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 473 | assert( |
| 474 | SourceRegs.size() == 1 && |
| 475 | "Expected the memory input to fit into a single virtual register" ); |
| 476 | Inst.addReg(RegNo: SourceRegs[0]); |
| 477 | break; |
| 478 | } |
| 479 | |
| 480 | assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || |
| 481 | OpInfo.ConstraintType == TargetLowering::C_Register) && |
| 482 | "Unknown constraint type!" ); |
| 483 | |
| 484 | if (OpInfo.isIndirect) { |
| 485 | LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet " |
| 486 | "for constraint '" |
| 487 | << OpInfo.ConstraintCode << "'\n" ); |
| 488 | return false; |
| 489 | } |
| 490 | |
| 491 | // Copy the input into the appropriate registers. |
| 492 | if (OpInfo.Regs.empty()) { |
| 493 | LLVM_DEBUG( |
| 494 | dbgs() |
| 495 | << "Couldn't allocate input register for register constraint\n" ); |
| 496 | return false; |
| 497 | } |
| 498 | |
| 499 | unsigned NumRegs = OpInfo.Regs.size(); |
| 500 | ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); |
| 501 | assert(NumRegs == SourceRegs.size() && |
| 502 | "Expected the number of input registers to match the number of " |
| 503 | "source registers" ); |
| 504 | |
| 505 | if (NumRegs > 1) { |
| 506 | LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are " |
| 507 | "not supported yet\n" ); |
| 508 | return false; |
| 509 | } |
| 510 | |
| 511 | InlineAsm::Flag Flag(InlineAsm::Kind::RegUse, NumRegs); |
| 512 | if (OpInfo.Regs.front().isVirtual()) { |
| 513 | // Put the register class of the virtual registers in the flag word. |
| 514 | const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front()); |
| 515 | Flag.setRegClass(RC->getID()); |
| 516 | } |
| 517 | Inst.addImm(Val: Flag); |
| 518 | if (!buildAnyextOrCopy(Dst: OpInfo.Regs[0], Src: SourceRegs[0], MIRBuilder)) |
| 519 | return false; |
| 520 | Inst.addReg(RegNo: OpInfo.Regs[0]); |
| 521 | break; |
| 522 | } |
| 523 | |
| 524 | case InlineAsm::isClobber: { |
| 525 | |
| 526 | const unsigned NumRegs = OpInfo.Regs.size(); |
| 527 | if (NumRegs > 0) { |
| 528 | unsigned Flag = InlineAsm::Flag(InlineAsm::Kind::Clobber, NumRegs); |
| 529 | Inst.addImm(Val: Flag); |
| 530 | |
| 531 | for (Register Reg : OpInfo.Regs) { |
| 532 | Inst.addReg(RegNo: Reg, flags: RegState::Define | RegState::EarlyClobber | |
| 533 | getImplRegState(B: Reg.isPhysical())); |
| 534 | } |
| 535 | } |
| 536 | break; |
| 537 | } |
| 538 | } |
| 539 | } |
| 540 | |
| 541 | // Add rounding control registers as implicit def for inline asm. |
| 542 | if (MF.getFunction().hasFnAttribute(Kind: Attribute::StrictFP)) { |
| 543 | ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters(); |
| 544 | for (MCPhysReg Reg : RCRegs) |
| 545 | Inst.addReg(RegNo: Reg, flags: RegState::ImplicitDefine); |
| 546 | } |
| 547 | |
| 548 | if (auto Bundle = Call.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) { |
| 549 | auto *Token = Bundle->Inputs[0].get(); |
| 550 | ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token); |
| 551 | assert(SourceRegs.size() == 1 && |
| 552 | "Expected the control token to fit into a single virtual register" ); |
| 553 | Inst.addUse(RegNo: SourceRegs[0], Flags: RegState::Implicit); |
| 554 | } |
| 555 | |
| 556 | if (const MDNode *SrcLoc = Call.getMetadata(Kind: "srcloc" )) |
| 557 | Inst.addMetadata(MD: SrcLoc); |
| 558 | |
| 559 | // All inputs are handled, insert the instruction now |
| 560 | MIRBuilder.insertInstr(MIB: Inst); |
| 561 | |
| 562 | // Finally, copy the output operands into the output registers |
| 563 | ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call); |
| 564 | if (ResRegs.size() != OutputOperands.size()) { |
| 565 | LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the " |
| 566 | "number of destination registers\n" ); |
| 567 | return false; |
| 568 | } |
| 569 | for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) { |
| 570 | GISelAsmOperandInfo &OpInfo = OutputOperands[i]; |
| 571 | |
| 572 | if (OpInfo.Regs.empty()) |
| 573 | continue; |
| 574 | |
| 575 | switch (OpInfo.ConstraintType) { |
| 576 | case TargetLowering::C_Register: |
| 577 | case TargetLowering::C_RegisterClass: { |
| 578 | if (OpInfo.Regs.size() > 1) { |
| 579 | LLVM_DEBUG(dbgs() << "Output operands with multiple defining " |
| 580 | "registers are not supported yet\n" ); |
| 581 | return false; |
| 582 | } |
| 583 | |
| 584 | Register SrcReg = OpInfo.Regs[0]; |
| 585 | unsigned SrcSize = TRI->getRegSizeInBits(Reg: SrcReg, MRI: *MRI); |
| 586 | LLT ResTy = MRI->getType(Reg: ResRegs[i]); |
| 587 | if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) { |
| 588 | // First copy the non-typed virtual register into a generic virtual |
| 589 | // register |
| 590 | Register Tmp1Reg = |
| 591 | MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: SrcSize)); |
| 592 | MIRBuilder.buildCopy(Res: Tmp1Reg, Op: SrcReg); |
| 593 | // Need to truncate the result of the register |
| 594 | MIRBuilder.buildTrunc(Res: ResRegs[i], Op: Tmp1Reg); |
| 595 | } else if (ResTy.getSizeInBits() == SrcSize) { |
| 596 | MIRBuilder.buildCopy(Res: ResRegs[i], Op: SrcReg); |
| 597 | } else { |
| 598 | LLVM_DEBUG(dbgs() << "Unhandled output operand with " |
| 599 | "mismatched register size\n" ); |
| 600 | return false; |
| 601 | } |
| 602 | |
| 603 | break; |
| 604 | } |
| 605 | case TargetLowering::C_Immediate: |
| 606 | case TargetLowering::C_Other: |
| 607 | LLVM_DEBUG( |
| 608 | dbgs() << "Cannot lower target specific output constraints yet\n" ); |
| 609 | return false; |
| 610 | case TargetLowering::C_Memory: |
| 611 | break; // Already handled. |
| 612 | case TargetLowering::C_Address: |
| 613 | break; // Silence warning. |
| 614 | case TargetLowering::C_Unknown: |
| 615 | LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n" ); |
| 616 | return false; |
| 617 | } |
| 618 | } |
| 619 | |
| 620 | return true; |
| 621 | } |
| 622 | |
| 623 | bool InlineAsmLowering::lowerAsmOperandForConstraint( |
| 624 | Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops, |
| 625 | MachineIRBuilder &MIRBuilder) const { |
| 626 | if (Constraint.size() > 1) |
| 627 | return false; |
| 628 | |
| 629 | char ConstraintLetter = Constraint[0]; |
| 630 | switch (ConstraintLetter) { |
| 631 | default: |
| 632 | return false; |
| 633 | case 'i': // Simple Integer or Relocatable Constant |
| 634 | case 'n': // immediate integer with a known value. |
| 635 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { |
| 636 | assert(CI->getBitWidth() <= 64 && |
| 637 | "expected immediate to fit into 64-bits" ); |
| 638 | // Boolean constants should be zero-extended, others are sign-extended |
| 639 | bool IsBool = CI->getBitWidth() == 1; |
| 640 | int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue(); |
| 641 | Ops.push_back(x: MachineOperand::CreateImm(Val: ExtVal)); |
| 642 | return true; |
| 643 | } |
| 644 | return false; |
| 645 | } |
| 646 | } |
| 647 | |