| 1 | //===-- ARM/ARMMCCodeEmitter.cpp - Convert ARM code to machine code -------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the ARMMCCodeEmitter class. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "MCTargetDesc/ARMAddressingModes.h" |
| 14 | #include "MCTargetDesc/ARMBaseInfo.h" |
| 15 | #include "MCTargetDesc/ARMFixupKinds.h" |
| 16 | #include "MCTargetDesc/ARMMCAsmInfo.h" |
| 17 | #include "llvm/ADT/APFloat.h" |
| 18 | #include "llvm/ADT/APInt.h" |
| 19 | #include "llvm/ADT/SmallVector.h" |
| 20 | #include "llvm/ADT/Statistic.h" |
| 21 | #include "llvm/MC/MCCodeEmitter.h" |
| 22 | #include "llvm/MC/MCContext.h" |
| 23 | #include "llvm/MC/MCExpr.h" |
| 24 | #include "llvm/MC/MCFixup.h" |
| 25 | #include "llvm/MC/MCInst.h" |
| 26 | #include "llvm/MC/MCInstrDesc.h" |
| 27 | #include "llvm/MC/MCInstrInfo.h" |
| 28 | #include "llvm/MC/MCRegisterInfo.h" |
| 29 | #include "llvm/MC/MCSubtargetInfo.h" |
| 30 | #include "llvm/Support/Casting.h" |
| 31 | #include "llvm/Support/Compiler.h" |
| 32 | #include "llvm/Support/EndianStream.h" |
| 33 | #include "llvm/Support/MathExtras.h" |
| 34 | #include "llvm/TargetParser/Triple.h" |
| 35 | #include <cassert> |
| 36 | #include <cstdint> |
| 37 | #include <cstdlib> |
| 38 | |
| 39 | using namespace llvm; |
| 40 | |
| 41 | #define DEBUG_TYPE "mccodeemitter" |
| 42 | |
| 43 | STATISTIC(MCNumEmitted, "Number of MC instructions emitted." ); |
| 44 | STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created." ); |
| 45 | |
| 46 | namespace { |
| 47 | |
| 48 | class ARMMCCodeEmitter : public MCCodeEmitter { |
| 49 | const MCInstrInfo &MCII; |
| 50 | MCContext &CTX; |
| 51 | bool IsLittleEndian; |
| 52 | |
| 53 | public: |
| 54 | ARMMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx, bool IsLittle) |
| 55 | : MCII(mcii), CTX(ctx), IsLittleEndian(IsLittle) { |
| 56 | } |
| 57 | ARMMCCodeEmitter(const ARMMCCodeEmitter &) = delete; |
| 58 | ARMMCCodeEmitter &operator=(const ARMMCCodeEmitter &) = delete; |
| 59 | ~ARMMCCodeEmitter() override = default; |
| 60 | |
| 61 | bool isThumb(const MCSubtargetInfo &STI) const { |
| 62 | return STI.hasFeature(Feature: ARM::ModeThumb); |
| 63 | } |
| 64 | |
| 65 | bool isThumb2(const MCSubtargetInfo &STI) const { |
| 66 | return isThumb(STI) && STI.hasFeature(Feature: ARM::FeatureThumb2); |
| 67 | } |
| 68 | |
| 69 | bool isTargetMachO(const MCSubtargetInfo &STI) const { |
| 70 | const Triple &TT = STI.getTargetTriple(); |
| 71 | return TT.isOSBinFormatMachO(); |
| 72 | } |
| 73 | |
| 74 | unsigned getMachineSoImmOpValue(unsigned SoImm) const; |
| 75 | |
| 76 | // getBinaryCodeForInstr - TableGen'erated function for getting the |
| 77 | // binary encoding for an instruction. |
| 78 | uint64_t getBinaryCodeForInstr(const MCInst &MI, |
| 79 | SmallVectorImpl<MCFixup> &Fixups, |
| 80 | const MCSubtargetInfo &STI) const; |
| 81 | |
| 82 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
| 83 | /// operand requires relocation, record the relocation and return zero. |
| 84 | unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, |
| 85 | SmallVectorImpl<MCFixup> &Fixups, |
| 86 | const MCSubtargetInfo &STI) const; |
| 87 | |
| 88 | /// getHiLoImmOpValue - Return the encoding for either the hi / low 16-bit, or |
| 89 | /// high/middle-high/middle-low/low 8 bits of the specified operand. This is |
| 90 | /// used for operands with :lower16:, :upper16: :lower0_7:, :lower8_15:, |
| 91 | /// :higher0_7:, and :higher8_15: prefixes. |
| 92 | uint32_t getHiLoImmOpValue(const MCInst &MI, unsigned OpIdx, |
| 93 | SmallVectorImpl<MCFixup> &Fixups, |
| 94 | const MCSubtargetInfo &STI) const; |
| 95 | |
| 96 | bool EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, |
| 97 | unsigned &Reg, unsigned &Imm, |
| 98 | SmallVectorImpl<MCFixup> &Fixups, |
| 99 | const MCSubtargetInfo &STI) const; |
| 100 | |
| 101 | /// getThumbBLTargetOpValue - Return encoding info for Thumb immediate |
| 102 | /// BL branch target. |
| 103 | uint32_t getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 104 | SmallVectorImpl<MCFixup> &Fixups, |
| 105 | const MCSubtargetInfo &STI) const; |
| 106 | |
| 107 | /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate |
| 108 | /// BLX branch target. |
| 109 | uint32_t getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 110 | SmallVectorImpl<MCFixup> &Fixups, |
| 111 | const MCSubtargetInfo &STI) const; |
| 112 | |
| 113 | /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target. |
| 114 | uint32_t getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 115 | SmallVectorImpl<MCFixup> &Fixups, |
| 116 | const MCSubtargetInfo &STI) const; |
| 117 | |
| 118 | /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target. |
| 119 | uint32_t getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 120 | SmallVectorImpl<MCFixup> &Fixups, |
| 121 | const MCSubtargetInfo &STI) const; |
| 122 | |
| 123 | /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target. |
| 124 | uint32_t getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 125 | SmallVectorImpl<MCFixup> &Fixups, |
| 126 | const MCSubtargetInfo &STI) const; |
| 127 | |
| 128 | /// getBranchTargetOpValue - Return encoding info for 24-bit immediate |
| 129 | /// branch target. |
| 130 | uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 131 | SmallVectorImpl<MCFixup> &Fixups, |
| 132 | const MCSubtargetInfo &STI) const; |
| 133 | |
| 134 | /// getThumbBranchTargetOpValue - Return encoding info for 24-bit |
| 135 | /// immediate Thumb2 direct branch target. |
| 136 | uint32_t getThumbBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 137 | SmallVectorImpl<MCFixup> &Fixups, |
| 138 | const MCSubtargetInfo &STI) const; |
| 139 | |
| 140 | /// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate |
| 141 | /// branch target. |
| 142 | uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 143 | SmallVectorImpl<MCFixup> &Fixups, |
| 144 | const MCSubtargetInfo &STI) const; |
| 145 | uint32_t getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 146 | SmallVectorImpl<MCFixup> &Fixups, |
| 147 | const MCSubtargetInfo &STI) const; |
| 148 | uint32_t getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 149 | SmallVectorImpl<MCFixup> &Fixups, |
| 150 | const MCSubtargetInfo &STI) const; |
| 151 | |
| 152 | /// getAdrLabelOpValue - Return encoding info for 12-bit immediate |
| 153 | /// ADR label target. |
| 154 | uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
| 155 | SmallVectorImpl<MCFixup> &Fixups, |
| 156 | const MCSubtargetInfo &STI) const; |
| 157 | uint32_t getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
| 158 | SmallVectorImpl<MCFixup> &Fixups, |
| 159 | const MCSubtargetInfo &STI) const; |
| 160 | uint32_t getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
| 161 | SmallVectorImpl<MCFixup> &Fixups, |
| 162 | const MCSubtargetInfo &STI) const; |
| 163 | |
| 164 | uint32_t getITMaskOpValue(const MCInst &MI, unsigned OpIdx, |
| 165 | SmallVectorImpl<MCFixup> &Fixups, |
| 166 | const MCSubtargetInfo &STI) const; |
| 167 | |
| 168 | /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5' |
| 169 | /// operand. |
| 170 | uint32_t getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx, |
| 171 | SmallVectorImpl<MCFixup> &Fixups, |
| 172 | const MCSubtargetInfo &STI) const; |
| 173 | |
| 174 | /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' |
| 175 | /// operand. |
| 176 | uint32_t getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx, |
| 177 | SmallVectorImpl<MCFixup> &Fixups, |
| 178 | const MCSubtargetInfo &STI) const; |
| 179 | |
| 180 | /// getThumbAddrModeRegRegOpValue - Return encoding for 'reg + reg' operand. |
| 181 | uint32_t getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx, |
| 182 | SmallVectorImpl<MCFixup> &Fixups, |
| 183 | const MCSubtargetInfo &STI) const; |
| 184 | |
| 185 | /// getT2AddrModeImm8s4OpValue - Return encoding info for 'reg +/- imm8<<2' |
| 186 | /// operand. |
| 187 | uint32_t getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx, |
| 188 | SmallVectorImpl<MCFixup> &Fixups, |
| 189 | const MCSubtargetInfo &STI) const; |
| 190 | |
| 191 | /// getT2AddrModeImm7s4OpValue - Return encoding info for 'reg +/- imm7<<2' |
| 192 | /// operand. |
| 193 | uint32_t getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx, |
| 194 | SmallVectorImpl<MCFixup> &Fixups, |
| 195 | const MCSubtargetInfo &STI) const; |
| 196 | |
| 197 | /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for 'reg + imm8<<2' |
| 198 | /// operand. |
| 199 | uint32_t getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx, |
| 200 | SmallVectorImpl<MCFixup> &Fixups, |
| 201 | const MCSubtargetInfo &STI) const; |
| 202 | |
| 203 | /// getT2ScaledImmOpValue - Return encoding info for '+/- immX<<Y' |
| 204 | /// operand. |
| 205 | template<unsigned Bits, unsigned Shift> |
| 206 | uint32_t getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx, |
| 207 | SmallVectorImpl<MCFixup> &Fixups, |
| 208 | const MCSubtargetInfo &STI) const; |
| 209 | |
| 210 | /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg' |
| 211 | /// operand. |
| 212 | uint32_t getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx, |
| 213 | SmallVectorImpl<MCFixup> &Fixups, |
| 214 | const MCSubtargetInfo &STI) const; |
| 215 | |
| 216 | /// getMveAddrModeQOpValue - Return encoding info for 'reg +/- imm7<<{shift}' |
| 217 | /// operand. |
| 218 | template<int shift> |
| 219 | uint32_t getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx, |
| 220 | SmallVectorImpl<MCFixup> &Fixups, |
| 221 | const MCSubtargetInfo &STI) const; |
| 222 | |
| 223 | /// getLdStSORegOpValue - Return encoding info for 'reg +/- reg shop imm' |
| 224 | /// operand as needed by load/store instructions. |
| 225 | uint32_t getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, |
| 226 | SmallVectorImpl<MCFixup> &Fixups, |
| 227 | const MCSubtargetInfo &STI) const; |
| 228 | |
| 229 | /// getLdStmModeOpValue - Return encoding for load/store multiple mode. |
| 230 | uint32_t getLdStmModeOpValue(const MCInst &MI, unsigned OpIdx, |
| 231 | SmallVectorImpl<MCFixup> &Fixups, |
| 232 | const MCSubtargetInfo &STI) const { |
| 233 | ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(i: OpIdx).getImm(); |
| 234 | switch (Mode) { |
| 235 | default: llvm_unreachable("Unknown addressing sub-mode!" ); |
| 236 | case ARM_AM::da: return 0; |
| 237 | case ARM_AM::ia: return 1; |
| 238 | case ARM_AM::db: return 2; |
| 239 | case ARM_AM::ib: return 3; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | /// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value. |
| 244 | /// |
| 245 | unsigned getShiftOp(ARM_AM::ShiftOpc ShOpc) const { |
| 246 | switch (ShOpc) { |
| 247 | case ARM_AM::no_shift: |
| 248 | case ARM_AM::lsl: return 0; |
| 249 | case ARM_AM::lsr: return 1; |
| 250 | case ARM_AM::asr: return 2; |
| 251 | case ARM_AM::ror: |
| 252 | case ARM_AM::rrx: return 3; |
| 253 | default: |
| 254 | llvm_unreachable("Invalid ShiftOpc!" ); |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | /// getAddrMode2OffsetOpValue - Return encoding for am2offset operands. |
| 259 | uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
| 260 | SmallVectorImpl<MCFixup> &Fixups, |
| 261 | const MCSubtargetInfo &STI) const; |
| 262 | |
| 263 | /// getPostIdxRegOpValue - Return encoding for postidx_reg operands. |
| 264 | uint32_t getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx, |
| 265 | SmallVectorImpl<MCFixup> &Fixups, |
| 266 | const MCSubtargetInfo &STI) const; |
| 267 | |
| 268 | /// getAddrMode3OffsetOpValue - Return encoding for am3offset operands. |
| 269 | uint32_t getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
| 270 | SmallVectorImpl<MCFixup> &Fixups, |
| 271 | const MCSubtargetInfo &STI) const; |
| 272 | |
| 273 | /// getAddrMode3OpValue - Return encoding for addrmode3 operands. |
| 274 | uint32_t getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, |
| 275 | SmallVectorImpl<MCFixup> &Fixups, |
| 276 | const MCSubtargetInfo &STI) const; |
| 277 | |
| 278 | /// getAddrModeThumbSPOpValue - Return encoding info for 'reg +/- imm12' |
| 279 | /// operand. |
| 280 | uint32_t getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx, |
| 281 | SmallVectorImpl<MCFixup> &Fixups, |
| 282 | const MCSubtargetInfo &STI) const; |
| 283 | |
| 284 | /// getAddrModeISOpValue - Encode the t_addrmode_is# operands. |
| 285 | uint32_t getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx, |
| 286 | SmallVectorImpl<MCFixup> &Fixups, |
| 287 | const MCSubtargetInfo &STI) const; |
| 288 | |
| 289 | /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands. |
| 290 | uint32_t getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx, |
| 291 | SmallVectorImpl<MCFixup> &Fixups, |
| 292 | const MCSubtargetInfo &STI) const; |
| 293 | |
| 294 | /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand. |
| 295 | uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx, |
| 296 | SmallVectorImpl<MCFixup> &Fixups, |
| 297 | const MCSubtargetInfo &STI) const; |
| 298 | |
| 299 | /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand. |
| 300 | uint32_t getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx, |
| 301 | SmallVectorImpl<MCFixup> &Fixups, |
| 302 | const MCSubtargetInfo &STI) const; |
| 303 | |
| 304 | /// getCCOutOpValue - Return encoding of the 's' bit. |
| 305 | unsigned getCCOutOpValue(const MCInst &MI, unsigned Op, |
| 306 | SmallVectorImpl<MCFixup> &Fixups, |
| 307 | const MCSubtargetInfo &STI) const { |
| 308 | // The operand is either reg0 or CPSR. The 's' bit is encoded as '0' or |
| 309 | // '1' respectively. |
| 310 | return MI.getOperand(i: Op).getReg() == ARM::CPSR; |
| 311 | } |
| 312 | |
| 313 | unsigned getModImmOpValue(const MCInst &MI, unsigned Op, |
| 314 | SmallVectorImpl<MCFixup> &Fixups, |
| 315 | const MCSubtargetInfo &ST) const; |
| 316 | |
| 317 | /// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value. |
| 318 | unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op, |
| 319 | SmallVectorImpl<MCFixup> &Fixups, |
| 320 | const MCSubtargetInfo &STI) const; |
| 321 | |
| 322 | unsigned getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum, |
| 323 | SmallVectorImpl<MCFixup> &Fixups, |
| 324 | const MCSubtargetInfo &STI) const; |
| 325 | template<unsigned Bits, unsigned Shift> |
| 326 | unsigned getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum, |
| 327 | SmallVectorImpl<MCFixup> &Fixups, |
| 328 | const MCSubtargetInfo &STI) const; |
| 329 | unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum, |
| 330 | SmallVectorImpl<MCFixup> &Fixups, |
| 331 | const MCSubtargetInfo &STI) const; |
| 332 | |
| 333 | /// getSORegOpValue - Return an encoded so_reg shifted register value. |
| 334 | unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op, |
| 335 | SmallVectorImpl<MCFixup> &Fixups, |
| 336 | const MCSubtargetInfo &STI) const; |
| 337 | unsigned getSORegImmOpValue(const MCInst &MI, unsigned Op, |
| 338 | SmallVectorImpl<MCFixup> &Fixups, |
| 339 | const MCSubtargetInfo &STI) const; |
| 340 | unsigned getT2SORegOpValue(const MCInst &MI, unsigned Op, |
| 341 | SmallVectorImpl<MCFixup> &Fixups, |
| 342 | const MCSubtargetInfo &STI) const; |
| 343 | |
| 344 | unsigned getNEONVcvtImm32OpValue(const MCInst &MI, unsigned Op, |
| 345 | SmallVectorImpl<MCFixup> &Fixups, |
| 346 | const MCSubtargetInfo &STI) const { |
| 347 | return 64 - MI.getOperand(i: Op).getImm(); |
| 348 | } |
| 349 | |
| 350 | unsigned getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op, |
| 351 | SmallVectorImpl<MCFixup> &Fixups, |
| 352 | const MCSubtargetInfo &STI) const; |
| 353 | |
| 354 | unsigned getRegisterListOpValue(const MCInst &MI, unsigned Op, |
| 355 | SmallVectorImpl<MCFixup> &Fixups, |
| 356 | const MCSubtargetInfo &STI) const; |
| 357 | unsigned getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op, |
| 358 | SmallVectorImpl<MCFixup> &Fixups, |
| 359 | const MCSubtargetInfo &STI) const; |
| 360 | unsigned getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op, |
| 361 | SmallVectorImpl<MCFixup> &Fixups, |
| 362 | const MCSubtargetInfo &STI) const; |
| 363 | unsigned getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op, |
| 364 | SmallVectorImpl<MCFixup> &Fixups, |
| 365 | const MCSubtargetInfo &STI) const; |
| 366 | unsigned getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, |
| 367 | SmallVectorImpl<MCFixup> &Fixups, |
| 368 | const MCSubtargetInfo &STI) const; |
| 369 | |
| 370 | unsigned getShiftRight8Imm(const MCInst &MI, unsigned Op, |
| 371 | SmallVectorImpl<MCFixup> &Fixups, |
| 372 | const MCSubtargetInfo &STI) const; |
| 373 | unsigned getShiftRight16Imm(const MCInst &MI, unsigned Op, |
| 374 | SmallVectorImpl<MCFixup> &Fixups, |
| 375 | const MCSubtargetInfo &STI) const; |
| 376 | unsigned getShiftRight32Imm(const MCInst &MI, unsigned Op, |
| 377 | SmallVectorImpl<MCFixup> &Fixups, |
| 378 | const MCSubtargetInfo &STI) const; |
| 379 | unsigned getShiftRight64Imm(const MCInst &MI, unsigned Op, |
| 380 | SmallVectorImpl<MCFixup> &Fixups, |
| 381 | const MCSubtargetInfo &STI) const; |
| 382 | |
| 383 | unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op, |
| 384 | SmallVectorImpl<MCFixup> &Fixups, |
| 385 | const MCSubtargetInfo &STI) const; |
| 386 | |
| 387 | unsigned NEONThumb2DataIPostEncoder(const MCInst &MI, |
| 388 | unsigned EncodedValue, |
| 389 | const MCSubtargetInfo &STI) const; |
| 390 | unsigned NEONThumb2LoadStorePostEncoder(const MCInst &MI, |
| 391 | unsigned EncodedValue, |
| 392 | const MCSubtargetInfo &STI) const; |
| 393 | unsigned NEONThumb2DupPostEncoder(const MCInst &MI, |
| 394 | unsigned EncodedValue, |
| 395 | const MCSubtargetInfo &STI) const; |
| 396 | unsigned NEONThumb2V8PostEncoder(const MCInst &MI, |
| 397 | unsigned EncodedValue, |
| 398 | const MCSubtargetInfo &STI) const; |
| 399 | |
| 400 | unsigned VFPThumb2PostEncoder(const MCInst &MI, |
| 401 | unsigned EncodedValue, |
| 402 | const MCSubtargetInfo &STI) const; |
| 403 | |
| 404 | uint32_t getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx, |
| 405 | SmallVectorImpl<MCFixup> &Fixups, |
| 406 | const MCSubtargetInfo &STI) const; |
| 407 | |
| 408 | void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, |
| 409 | SmallVectorImpl<MCFixup> &Fixups, |
| 410 | const MCSubtargetInfo &STI) const override; |
| 411 | |
| 412 | template <bool isNeg, ARM::Fixups fixup> |
| 413 | uint32_t getBFTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 414 | SmallVectorImpl<MCFixup> &Fixups, |
| 415 | const MCSubtargetInfo &STI) const; |
| 416 | |
| 417 | uint32_t getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 418 | SmallVectorImpl<MCFixup> &Fixups, |
| 419 | const MCSubtargetInfo &STI) const; |
| 420 | |
| 421 | uint32_t getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx, |
| 422 | SmallVectorImpl<MCFixup> &Fixups, |
| 423 | const MCSubtargetInfo &STI) const; |
| 424 | uint32_t getRestrictedCondCodeOpValue(const MCInst &MI, unsigned OpIdx, |
| 425 | SmallVectorImpl<MCFixup> &Fixups, |
| 426 | const MCSubtargetInfo &STI) const; |
| 427 | template <unsigned size> |
| 428 | uint32_t getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx, |
| 429 | SmallVectorImpl<MCFixup> &Fixups, |
| 430 | const MCSubtargetInfo &STI) const; |
| 431 | }; |
| 432 | |
| 433 | } // end anonymous namespace |
| 434 | |
| 435 | static void addFixup(SmallVectorImpl<MCFixup> &Fixups, uint32_t Offset, |
| 436 | const MCExpr *Value, uint16_t Kind) { |
| 437 | bool PCRel = false; |
| 438 | switch (Kind) { |
| 439 | case ARM::fixup_arm_ldst_pcrel_12: |
| 440 | case ARM::fixup_t2_ldst_pcrel_12: |
| 441 | case ARM::fixup_arm_pcrel_10_unscaled: |
| 442 | case ARM::fixup_arm_pcrel_10: |
| 443 | case ARM::fixup_t2_pcrel_10: |
| 444 | case ARM::fixup_arm_pcrel_9: |
| 445 | case ARM::fixup_t2_pcrel_9: |
| 446 | case ARM::fixup_thumb_adr_pcrel_10: |
| 447 | case ARM::fixup_arm_adr_pcrel_12: |
| 448 | case ARM::fixup_t2_adr_pcrel_12: |
| 449 | case ARM::fixup_arm_condbranch: |
| 450 | case ARM::fixup_arm_uncondbranch: |
| 451 | case ARM::fixup_t2_condbranch: |
| 452 | case ARM::fixup_t2_uncondbranch: |
| 453 | case ARM::fixup_arm_thumb_br: |
| 454 | case ARM::fixup_arm_uncondbl: |
| 455 | case ARM::fixup_arm_condbl: |
| 456 | case ARM::fixup_arm_blx: |
| 457 | case ARM::fixup_arm_thumb_bl: |
| 458 | case ARM::fixup_arm_thumb_blx: |
| 459 | case ARM::fixup_arm_thumb_cb: |
| 460 | case ARM::fixup_arm_thumb_cp: |
| 461 | case ARM::fixup_arm_thumb_bcc: |
| 462 | case ARM::fixup_bf_branch: |
| 463 | case ARM::fixup_bf_target: |
| 464 | case ARM::fixup_bfl_target: |
| 465 | case ARM::fixup_bfc_target: |
| 466 | case ARM::fixup_wls: |
| 467 | case ARM::fixup_le: |
| 468 | PCRel = true; |
| 469 | } |
| 470 | Fixups.push_back(Elt: MCFixup::create(Offset, Value, Kind, PCRel)); |
| 471 | } |
| 472 | |
| 473 | /// NEONThumb2DataIPostEncoder - Post-process encoded NEON data-processing |
| 474 | /// instructions, and rewrite them to their Thumb2 form if we are currently in |
| 475 | /// Thumb2 mode. |
| 476 | unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI, |
| 477 | unsigned EncodedValue, |
| 478 | const MCSubtargetInfo &STI) const { |
| 479 | if (isThumb2(STI)) { |
| 480 | // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved |
| 481 | // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are |
| 482 | // set to 1111. |
| 483 | unsigned Bit24 = EncodedValue & 0x01000000; |
| 484 | unsigned Bit28 = Bit24 << 4; |
| 485 | EncodedValue &= 0xEFFFFFFF; |
| 486 | EncodedValue |= Bit28; |
| 487 | EncodedValue |= 0x0F000000; |
| 488 | } |
| 489 | |
| 490 | return EncodedValue; |
| 491 | } |
| 492 | |
| 493 | /// NEONThumb2LoadStorePostEncoder - Post-process encoded NEON load/store |
| 494 | /// instructions, and rewrite them to their Thumb2 form if we are currently in |
| 495 | /// Thumb2 mode. |
| 496 | unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI, |
| 497 | unsigned EncodedValue, |
| 498 | const MCSubtargetInfo &STI) const { |
| 499 | if (isThumb2(STI)) { |
| 500 | EncodedValue &= 0xF0FFFFFF; |
| 501 | EncodedValue |= 0x09000000; |
| 502 | } |
| 503 | |
| 504 | return EncodedValue; |
| 505 | } |
| 506 | |
| 507 | /// NEONThumb2DupPostEncoder - Post-process encoded NEON vdup |
| 508 | /// instructions, and rewrite them to their Thumb2 form if we are currently in |
| 509 | /// Thumb2 mode. |
| 510 | unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI, |
| 511 | unsigned EncodedValue, |
| 512 | const MCSubtargetInfo &STI) const { |
| 513 | if (isThumb2(STI)) { |
| 514 | EncodedValue &= 0x00FFFFFF; |
| 515 | EncodedValue |= 0xEE000000; |
| 516 | } |
| 517 | |
| 518 | return EncodedValue; |
| 519 | } |
| 520 | |
| 521 | /// Post-process encoded NEON v8 instructions, and rewrite them to Thumb2 form |
| 522 | /// if we are in Thumb2. |
| 523 | unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI, |
| 524 | unsigned EncodedValue, |
| 525 | const MCSubtargetInfo &STI) const { |
| 526 | if (isThumb2(STI)) { |
| 527 | EncodedValue |= 0xC000000; // Set bits 27-26 |
| 528 | } |
| 529 | |
| 530 | return EncodedValue; |
| 531 | } |
| 532 | |
| 533 | /// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite |
| 534 | /// them to their Thumb2 form if we are currently in Thumb2 mode. |
| 535 | unsigned ARMMCCodeEmitter:: |
| 536 | VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue, |
| 537 | const MCSubtargetInfo &STI) const { |
| 538 | if (isThumb2(STI)) { |
| 539 | EncodedValue &= 0x0FFFFFFF; |
| 540 | EncodedValue |= 0xE0000000; |
| 541 | } |
| 542 | return EncodedValue; |
| 543 | } |
| 544 | |
| 545 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
| 546 | /// operand requires relocation, record the relocation and return zero. |
| 547 | unsigned ARMMCCodeEmitter:: |
| 548 | getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
| 549 | SmallVectorImpl<MCFixup> &Fixups, |
| 550 | const MCSubtargetInfo &STI) const { |
| 551 | if (MO.isReg()) { |
| 552 | MCRegister Reg = MO.getReg(); |
| 553 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg); |
| 554 | |
| 555 | // In NEON, Q registers are encoded as 2x their register number, |
| 556 | // because they're using the same indices as the D registers they |
| 557 | // overlap. In MVE, there are no 64-bit vector instructions, so |
| 558 | // the encodings all refer to Q-registers by their literal |
| 559 | // register number. |
| 560 | |
| 561 | if (STI.hasFeature(Feature: ARM::HasMVEIntegerOps)) |
| 562 | return RegNo; |
| 563 | |
| 564 | switch (Reg.id()) { |
| 565 | default: |
| 566 | return RegNo; |
| 567 | case ARM::Q0: case ARM::Q1: case ARM::Q2: case ARM::Q3: |
| 568 | case ARM::Q4: case ARM::Q5: case ARM::Q6: case ARM::Q7: |
| 569 | case ARM::Q8: case ARM::Q9: case ARM::Q10: case ARM::Q11: |
| 570 | case ARM::Q12: case ARM::Q13: case ARM::Q14: case ARM::Q15: |
| 571 | return 2 * RegNo; |
| 572 | } |
| 573 | } else if (MO.isImm()) { |
| 574 | return static_cast<unsigned>(MO.getImm()); |
| 575 | } else if (MO.isDFPImm()) { |
| 576 | return static_cast<unsigned>(APFloat(bit_cast<double>(from: MO.getDFPImm())) |
| 577 | .bitcastToAPInt() |
| 578 | .getHiBits(numBits: 32) |
| 579 | .getLimitedValue()); |
| 580 | } |
| 581 | |
| 582 | llvm_unreachable("Unable to encode MCOperand!" ); |
| 583 | } |
| 584 | |
| 585 | /// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand. |
| 586 | bool ARMMCCodeEmitter:: |
| 587 | EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg, |
| 588 | unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups, |
| 589 | const MCSubtargetInfo &STI) const { |
| 590 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 591 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 592 | |
| 593 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 594 | |
| 595 | int32_t SImm = MO1.getImm(); |
| 596 | bool isAdd = true; |
| 597 | |
| 598 | // Special value for #-0 |
| 599 | if (SImm == INT32_MIN) { |
| 600 | SImm = 0; |
| 601 | isAdd = false; |
| 602 | } |
| 603 | |
| 604 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
| 605 | if (SImm < 0) { |
| 606 | SImm = -SImm; |
| 607 | isAdd = false; |
| 608 | } |
| 609 | |
| 610 | Imm = SImm; |
| 611 | return isAdd; |
| 612 | } |
| 613 | |
| 614 | /// getBranchTargetOpValue - Helper function to get the branch target operand, |
| 615 | /// which is either an immediate or requires a fixup. |
| 616 | static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 617 | unsigned FixupKind, |
| 618 | SmallVectorImpl<MCFixup> &Fixups, |
| 619 | const MCSubtargetInfo &STI) { |
| 620 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 621 | |
| 622 | // If the destination is an immediate, we have nothing to do. |
| 623 | if (MO.isImm()) return MO.getImm(); |
| 624 | assert(MO.isExpr() && "Unexpected branch target type!" ); |
| 625 | const MCExpr *Expr = MO.getExpr(); |
| 626 | MCFixupKind Kind = MCFixupKind(FixupKind); |
| 627 | addFixup(Fixups, Offset: 0, Value: Expr, Kind); |
| 628 | |
| 629 | // All of the information is in the fixup. |
| 630 | return 0; |
| 631 | } |
| 632 | |
| 633 | // Thumb BL and BLX use a strange offset encoding where bits 22 and 21 are |
| 634 | // determined by negating them and XOR'ing them with bit 23. |
| 635 | static int32_t encodeThumbBLOffset(int32_t offset) { |
| 636 | offset >>= 1; |
| 637 | uint32_t S = (offset & 0x800000) >> 23; |
| 638 | uint32_t J1 = (offset & 0x400000) >> 22; |
| 639 | uint32_t J2 = (offset & 0x200000) >> 21; |
| 640 | J1 = (~J1 & 0x1); |
| 641 | J2 = (~J2 & 0x1); |
| 642 | J1 ^= S; |
| 643 | J2 ^= S; |
| 644 | |
| 645 | offset &= ~0x600000; |
| 646 | offset |= J1 << 22; |
| 647 | offset |= J2 << 21; |
| 648 | |
| 649 | return offset; |
| 650 | } |
| 651 | |
| 652 | /// getThumbBLTargetOpValue - Return encoding info for immediate branch target. |
| 653 | uint32_t ARMMCCodeEmitter:: |
| 654 | getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 655 | SmallVectorImpl<MCFixup> &Fixups, |
| 656 | const MCSubtargetInfo &STI) const { |
| 657 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 658 | if (MO.isExpr()) |
| 659 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_bl, |
| 660 | Fixups, STI); |
| 661 | return encodeThumbBLOffset(offset: MO.getImm()); |
| 662 | } |
| 663 | |
| 664 | /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate |
| 665 | /// BLX branch target. |
| 666 | uint32_t ARMMCCodeEmitter:: |
| 667 | getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 668 | SmallVectorImpl<MCFixup> &Fixups, |
| 669 | const MCSubtargetInfo &STI) const { |
| 670 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 671 | if (MO.isExpr()) |
| 672 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_blx, |
| 673 | Fixups, STI); |
| 674 | return encodeThumbBLOffset(offset: MO.getImm()); |
| 675 | } |
| 676 | |
| 677 | /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target. |
| 678 | uint32_t ARMMCCodeEmitter:: |
| 679 | getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 680 | SmallVectorImpl<MCFixup> &Fixups, |
| 681 | const MCSubtargetInfo &STI) const { |
| 682 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 683 | if (MO.isExpr()) |
| 684 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_br, |
| 685 | Fixups, STI); |
| 686 | return (MO.getImm() >> 1); |
| 687 | } |
| 688 | |
| 689 | /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target. |
| 690 | uint32_t ARMMCCodeEmitter:: |
| 691 | getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 692 | SmallVectorImpl<MCFixup> &Fixups, |
| 693 | const MCSubtargetInfo &STI) const { |
| 694 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 695 | if (MO.isExpr()) |
| 696 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_bcc, |
| 697 | Fixups, STI); |
| 698 | return (MO.getImm() >> 1); |
| 699 | } |
| 700 | |
| 701 | /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target. |
| 702 | uint32_t ARMMCCodeEmitter:: |
| 703 | getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 704 | SmallVectorImpl<MCFixup> &Fixups, |
| 705 | const MCSubtargetInfo &STI) const { |
| 706 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 707 | if (MO.isExpr()) |
| 708 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_cb, Fixups, STI); |
| 709 | return (MO.getImm() >> 1); |
| 710 | } |
| 711 | |
| 712 | /// Return true if this branch has a non-always predication |
| 713 | static bool HasConditionalBranch(const MCInst &MI) { |
| 714 | int NumOp = MI.getNumOperands(); |
| 715 | if (NumOp >= 2) { |
| 716 | for (int i = 0; i < NumOp-1; ++i) { |
| 717 | const MCOperand &MCOp1 = MI.getOperand(i); |
| 718 | const MCOperand &MCOp2 = MI.getOperand(i: i + 1); |
| 719 | if (MCOp1.isImm() && MCOp2.isReg() && |
| 720 | (!MCOp2.getReg() || MCOp2.getReg() == ARM::CPSR)) { |
| 721 | if (ARMCC::CondCodes(MCOp1.getImm()) != ARMCC::AL) |
| 722 | return true; |
| 723 | } |
| 724 | } |
| 725 | } |
| 726 | return false; |
| 727 | } |
| 728 | |
| 729 | /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch |
| 730 | /// target. |
| 731 | uint32_t ARMMCCodeEmitter:: |
| 732 | getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 733 | SmallVectorImpl<MCFixup> &Fixups, |
| 734 | const MCSubtargetInfo &STI) const { |
| 735 | // FIXME: This really, really shouldn't use TargetMachine. We don't want |
| 736 | // coupling between MC and TM anywhere we can help it. |
| 737 | if (isThumb2(STI)) |
| 738 | return |
| 739 | ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_t2_condbranch, Fixups, STI); |
| 740 | return getARMBranchTargetOpValue(MI, OpIdx, Fixups, STI); |
| 741 | } |
| 742 | |
| 743 | /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch |
| 744 | /// target. |
| 745 | uint32_t ARMMCCodeEmitter:: |
| 746 | getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 747 | SmallVectorImpl<MCFixup> &Fixups, |
| 748 | const MCSubtargetInfo &STI) const { |
| 749 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 750 | if (MO.isExpr()) { |
| 751 | if (HasConditionalBranch(MI)) |
| 752 | return ::getBranchTargetOpValue(MI, OpIdx, |
| 753 | FixupKind: ARM::fixup_arm_condbranch, Fixups, STI); |
| 754 | return ::getBranchTargetOpValue(MI, OpIdx, |
| 755 | FixupKind: ARM::fixup_arm_uncondbranch, Fixups, STI); |
| 756 | } |
| 757 | |
| 758 | return MO.getImm() >> 2; |
| 759 | } |
| 760 | |
| 761 | uint32_t ARMMCCodeEmitter:: |
| 762 | getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 763 | SmallVectorImpl<MCFixup> &Fixups, |
| 764 | const MCSubtargetInfo &STI) const { |
| 765 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 766 | if (MO.isExpr()) { |
| 767 | if (HasConditionalBranch(MI)) |
| 768 | return ::getBranchTargetOpValue(MI, OpIdx, |
| 769 | FixupKind: ARM::fixup_arm_condbl, Fixups, STI); |
| 770 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_uncondbl, Fixups, STI); |
| 771 | } |
| 772 | |
| 773 | return MO.getImm() >> 2; |
| 774 | } |
| 775 | |
| 776 | uint32_t ARMMCCodeEmitter:: |
| 777 | getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 778 | SmallVectorImpl<MCFixup> &Fixups, |
| 779 | const MCSubtargetInfo &STI) const { |
| 780 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 781 | if (MO.isExpr()) |
| 782 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_blx, Fixups, STI); |
| 783 | |
| 784 | return MO.getImm() >> 1; |
| 785 | } |
| 786 | |
| 787 | /// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit |
| 788 | /// immediate branch target. |
| 789 | uint32_t ARMMCCodeEmitter::getThumbBranchTargetOpValue( |
| 790 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
| 791 | const MCSubtargetInfo &STI) const { |
| 792 | unsigned Val = 0; |
| 793 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 794 | |
| 795 | if(MO.isExpr()) |
| 796 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_t2_uncondbranch, Fixups, STI); |
| 797 | else |
| 798 | Val = MO.getImm() >> 1; |
| 799 | |
| 800 | bool I = (Val & 0x800000); |
| 801 | bool J1 = (Val & 0x400000); |
| 802 | bool J2 = (Val & 0x200000); |
| 803 | if (I ^ J1) |
| 804 | Val &= ~0x400000; |
| 805 | else |
| 806 | Val |= 0x400000; |
| 807 | |
| 808 | if (I ^ J2) |
| 809 | Val &= ~0x200000; |
| 810 | else |
| 811 | Val |= 0x200000; |
| 812 | |
| 813 | return Val; |
| 814 | } |
| 815 | |
| 816 | /// getAdrLabelOpValue - Return encoding info for 12-bit shifted-immediate |
| 817 | /// ADR label target. |
| 818 | uint32_t ARMMCCodeEmitter:: |
| 819 | getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
| 820 | SmallVectorImpl<MCFixup> &Fixups, |
| 821 | const MCSubtargetInfo &STI) const { |
| 822 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 823 | if (MO.isExpr()) |
| 824 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_adr_pcrel_12, |
| 825 | Fixups, STI); |
| 826 | int64_t offset = MO.getImm(); |
| 827 | uint32_t Val = 0x2000; |
| 828 | |
| 829 | int SoImmVal; |
| 830 | if (offset == INT32_MIN) { |
| 831 | Val = 0x1000; |
| 832 | SoImmVal = 0; |
| 833 | } else if (offset < 0) { |
| 834 | Val = 0x1000; |
| 835 | offset *= -1; |
| 836 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
| 837 | if(SoImmVal == -1) { |
| 838 | Val = 0x2000; |
| 839 | offset *= -1; |
| 840 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
| 841 | } |
| 842 | } else { |
| 843 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
| 844 | if(SoImmVal == -1) { |
| 845 | Val = 0x1000; |
| 846 | offset *= -1; |
| 847 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
| 848 | } |
| 849 | } |
| 850 | |
| 851 | assert(SoImmVal != -1 && "Not a valid so_imm value!" ); |
| 852 | |
| 853 | Val |= SoImmVal; |
| 854 | return Val; |
| 855 | } |
| 856 | |
| 857 | /// getT2AdrLabelOpValue - Return encoding info for 12-bit immediate ADR label |
| 858 | /// target. |
| 859 | uint32_t ARMMCCodeEmitter:: |
| 860 | getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
| 861 | SmallVectorImpl<MCFixup> &Fixups, |
| 862 | const MCSubtargetInfo &STI) const { |
| 863 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 864 | if (MO.isExpr()) |
| 865 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_t2_adr_pcrel_12, |
| 866 | Fixups, STI); |
| 867 | int32_t Val = MO.getImm(); |
| 868 | if (Val == INT32_MIN) |
| 869 | Val = 0x1000; |
| 870 | else if (Val < 0) { |
| 871 | Val *= -1; |
| 872 | Val |= 0x1000; |
| 873 | } |
| 874 | return Val; |
| 875 | } |
| 876 | |
| 877 | /// getITMaskOpValue - Return the architectural encoding of an IT |
| 878 | /// predication mask, given the MCOperand format. |
| 879 | uint32_t ARMMCCodeEmitter:: |
| 880 | getITMaskOpValue(const MCInst &MI, unsigned OpIdx, |
| 881 | SmallVectorImpl<MCFixup> &Fixups, |
| 882 | const MCSubtargetInfo &STI) const { |
| 883 | const MCOperand MaskMO = MI.getOperand(i: OpIdx); |
| 884 | assert(MaskMO.isImm() && "Unexpected operand type!" ); |
| 885 | |
| 886 | unsigned Mask = MaskMO.getImm(); |
| 887 | |
| 888 | // IT masks are encoded as a sequence of replacement low-order bits |
| 889 | // for the condition code. So if the low bit of the starting |
| 890 | // condition code is 1, then we have to flip all the bits above the |
| 891 | // terminating bit (which is the lowest 1 bit). |
| 892 | assert(OpIdx > 0 && "IT mask appears first!" ); |
| 893 | const MCOperand CondMO = MI.getOperand(i: OpIdx-1); |
| 894 | assert(CondMO.isImm() && "Unexpected operand type!" ); |
| 895 | if (CondMO.getImm() & 1) { |
| 896 | unsigned LowBit = Mask & -Mask; |
| 897 | unsigned BitsAboveLowBit = 0xF & (-LowBit << 1); |
| 898 | Mask ^= BitsAboveLowBit; |
| 899 | } |
| 900 | |
| 901 | return Mask; |
| 902 | } |
| 903 | |
| 904 | /// getThumbAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label |
| 905 | /// target. |
| 906 | uint32_t ARMMCCodeEmitter:: |
| 907 | getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
| 908 | SmallVectorImpl<MCFixup> &Fixups, |
| 909 | const MCSubtargetInfo &STI) const { |
| 910 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 911 | if (MO.isExpr()) |
| 912 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_thumb_adr_pcrel_10, |
| 913 | Fixups, STI); |
| 914 | return MO.getImm(); |
| 915 | } |
| 916 | |
| 917 | /// getThumbAddrModeRegRegOpValue - Return encoding info for 'reg + reg' |
| 918 | /// operand. |
| 919 | uint32_t ARMMCCodeEmitter:: |
| 920 | getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx, |
| 921 | SmallVectorImpl<MCFixup> &, |
| 922 | const MCSubtargetInfo &STI) const { |
| 923 | // [Rn, Rm] |
| 924 | // {5-3} = Rm |
| 925 | // {2-0} = Rn |
| 926 | const MCOperand &MO1 = MI.getOperand(i: OpIdx); |
| 927 | const MCOperand &MO2 = MI.getOperand(i: OpIdx + 1); |
| 928 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
| 929 | unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(Reg: MO2.getReg()); |
| 930 | return (Rm << 3) | Rn; |
| 931 | } |
| 932 | |
| 933 | /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5' |
| 934 | /// operand. |
| 935 | uint32_t |
| 936 | ARMMCCodeEmitter::getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx, |
| 937 | SmallVectorImpl<MCFixup> &Fixups, |
| 938 | const MCSubtargetInfo &STI) const { |
| 939 | // {4-0} = szimm5 |
| 940 | // The value we are trying to encode is an immediate between either the |
| 941 | // range of [1-7] or [1-15] depending on whether we are dealing with the |
| 942 | // u8/s8 or the u16/s16 variants respectively. |
| 943 | // This value is encoded as follows, if ShiftImm is the value within those |
| 944 | // ranges then the encoding szimm5 = ShiftImm + size, where size is either 8 |
| 945 | // or 16. |
| 946 | |
| 947 | unsigned Size, ShiftImm; |
| 948 | switch(MI.getOpcode()) { |
| 949 | case ARM::MVE_VSHLL_imms16bh: |
| 950 | case ARM::MVE_VSHLL_imms16th: |
| 951 | case ARM::MVE_VSHLL_immu16bh: |
| 952 | case ARM::MVE_VSHLL_immu16th: |
| 953 | Size = 16; |
| 954 | break; |
| 955 | case ARM::MVE_VSHLL_imms8bh: |
| 956 | case ARM::MVE_VSHLL_imms8th: |
| 957 | case ARM::MVE_VSHLL_immu8bh: |
| 958 | case ARM::MVE_VSHLL_immu8th: |
| 959 | Size = 8; |
| 960 | break; |
| 961 | default: |
| 962 | llvm_unreachable("Use of operand not supported by this instruction" ); |
| 963 | } |
| 964 | ShiftImm = MI.getOperand(i: OpIdx).getImm(); |
| 965 | return Size + ShiftImm; |
| 966 | } |
| 967 | |
| 968 | /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' operand. |
| 969 | uint32_t ARMMCCodeEmitter:: |
| 970 | getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx, |
| 971 | SmallVectorImpl<MCFixup> &Fixups, |
| 972 | const MCSubtargetInfo &STI) const { |
| 973 | // {17-13} = reg |
| 974 | // {12} = (U)nsigned (add == '1', sub == '0') |
| 975 | // {11-0} = imm12 |
| 976 | unsigned Reg = 0, Imm12 = 0; |
| 977 | bool isAdd = true; |
| 978 | // If The first operand isn't a register, we have a label reference. |
| 979 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 980 | if (MO.isReg()) { |
| 981 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 982 | if (MO1.isImm()) { |
| 983 | isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm12, Fixups, STI); |
| 984 | } else if (MO1.isExpr()) { |
| 985 | assert(!isThumb(STI) && !isThumb2(STI) && |
| 986 | "Thumb mode requires different encoding" ); |
| 987 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 988 | isAdd = false; // 'U' bit is set as part of the fixup. |
| 989 | MCFixupKind Kind = ARM::fixup_arm_ldst_abs_12; |
| 990 | addFixup(Fixups, Offset: 0, Value: MO1.getExpr(), Kind); |
| 991 | } |
| 992 | } else if (MO.isExpr()) { |
| 993 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
| 994 | isAdd = false; // 'U' bit is set as part of the fixup. |
| 995 | MCFixupKind Kind; |
| 996 | if (isThumb2(STI)) |
| 997 | Kind = ARM::fixup_t2_ldst_pcrel_12; |
| 998 | else |
| 999 | Kind = ARM::fixup_arm_ldst_pcrel_12; |
| 1000 | addFixup(Fixups, Offset: 0, Value: MO.getExpr(), Kind); |
| 1001 | |
| 1002 | ++MCNumCPRelocations; |
| 1003 | } else { |
| 1004 | Reg = ARM::PC; |
| 1005 | int32_t Offset = MO.getImm(); |
| 1006 | if (Offset == INT32_MIN) { |
| 1007 | Offset = 0; |
| 1008 | isAdd = false; |
| 1009 | } else if (Offset < 0) { |
| 1010 | Offset *= -1; |
| 1011 | isAdd = false; |
| 1012 | } |
| 1013 | Imm12 = Offset; |
| 1014 | } |
| 1015 | uint32_t Binary = Imm12 & 0xfff; |
| 1016 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
| 1017 | if (isAdd) |
| 1018 | Binary |= (1 << 12); |
| 1019 | Binary |= (Reg << 13); |
| 1020 | return Binary; |
| 1021 | } |
| 1022 | |
| 1023 | template<unsigned Bits, unsigned Shift> |
| 1024 | uint32_t ARMMCCodeEmitter:: |
| 1025 | getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx, |
| 1026 | SmallVectorImpl<MCFixup> &Fixups, |
| 1027 | const MCSubtargetInfo &STI) const { |
| 1028 | // FIXME: The immediate operand should have already been encoded like this |
| 1029 | // before ever getting here. The encoder method should just need to combine |
| 1030 | // the MI operands for the register and the offset into a single |
| 1031 | // representation for the complex operand in the .td file. This isn't just |
| 1032 | // style, unfortunately. As-is, we can't represent the distinct encoding |
| 1033 | // for #-0. |
| 1034 | |
| 1035 | // {Bits} = (U)nsigned (add == '1', sub == '0') |
| 1036 | // {(Bits-1)-0} = immediate |
| 1037 | int32_t Imm = MI.getOperand(i: OpIdx).getImm(); |
| 1038 | bool isAdd = Imm >= 0; |
| 1039 | |
| 1040 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
| 1041 | if (Imm < 0) |
| 1042 | Imm = -(uint32_t)Imm; |
| 1043 | |
| 1044 | Imm >>= Shift; |
| 1045 | |
| 1046 | uint32_t Binary = Imm & ((1U << Bits) - 1); |
| 1047 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
| 1048 | if (isAdd) |
| 1049 | Binary |= (1U << Bits); |
| 1050 | return Binary; |
| 1051 | } |
| 1052 | |
| 1053 | /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg' |
| 1054 | /// operand. |
| 1055 | uint32_t ARMMCCodeEmitter:: |
| 1056 | getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx, |
| 1057 | SmallVectorImpl<MCFixup> &Fixups, |
| 1058 | const MCSubtargetInfo &STI) const { |
| 1059 | // {6-3} Rn |
| 1060 | // {2-0} Qm |
| 1061 | const MCOperand &M0 = MI.getOperand(i: OpIdx); |
| 1062 | const MCOperand &M1 = MI.getOperand(i: OpIdx + 1); |
| 1063 | |
| 1064 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: M0.getReg()); |
| 1065 | unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(Reg: M1.getReg()); |
| 1066 | |
| 1067 | assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits" ); |
| 1068 | |
| 1069 | return (Rn << 3) | Qm; |
| 1070 | } |
| 1071 | |
| 1072 | /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg' |
| 1073 | /// operand. |
| 1074 | template<int shift> |
| 1075 | uint32_t ARMMCCodeEmitter:: |
| 1076 | getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx, |
| 1077 | SmallVectorImpl<MCFixup> &Fixups, |
| 1078 | const MCSubtargetInfo &STI) const { |
| 1079 | // {10-8} Qm |
| 1080 | // {7-0} Imm |
| 1081 | const MCOperand &M0 = MI.getOperand(i: OpIdx); |
| 1082 | const MCOperand &M1 = MI.getOperand(i: OpIdx + 1); |
| 1083 | |
| 1084 | unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(Reg: M0.getReg()); |
| 1085 | int32_t Imm = M1.getImm(); |
| 1086 | |
| 1087 | bool isAdd = Imm >= 0; |
| 1088 | |
| 1089 | Imm >>= shift; |
| 1090 | |
| 1091 | if (!isAdd) |
| 1092 | Imm = -(uint32_t)Imm; |
| 1093 | |
| 1094 | Imm &= 0x7f; |
| 1095 | |
| 1096 | if (isAdd) |
| 1097 | Imm |= 0x80; |
| 1098 | |
| 1099 | assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits" ); |
| 1100 | |
| 1101 | return (Qm << 8) | Imm; |
| 1102 | } |
| 1103 | |
| 1104 | /// getT2AddrModeImm8s4OpValue - Return encoding info for |
| 1105 | /// 'reg +/- imm8<<2' operand. |
| 1106 | uint32_t ARMMCCodeEmitter:: |
| 1107 | getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx, |
| 1108 | SmallVectorImpl<MCFixup> &Fixups, |
| 1109 | const MCSubtargetInfo &STI) const { |
| 1110 | // {12-9} = reg |
| 1111 | // {8} = (U)nsigned (add == '1', sub == '0') |
| 1112 | // {7-0} = imm8 |
| 1113 | unsigned Reg, Imm8; |
| 1114 | bool isAdd = true; |
| 1115 | // If The first operand isn't a register, we have a label reference. |
| 1116 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1117 | if (!MO.isReg()) { |
| 1118 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
| 1119 | Imm8 = 0; |
| 1120 | isAdd = false ; // 'U' bit is set as part of the fixup. |
| 1121 | |
| 1122 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
| 1123 | const MCExpr *Expr = MO.getExpr(); |
| 1124 | MCFixupKind Kind = ARM::fixup_t2_pcrel_10; |
| 1125 | addFixup(Fixups, Offset: 0, Value: Expr, Kind); |
| 1126 | |
| 1127 | ++MCNumCPRelocations; |
| 1128 | } else |
| 1129 | isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm8, Fixups, STI); |
| 1130 | |
| 1131 | // FIXME: The immediate operand should have already been encoded like this |
| 1132 | // before ever getting here. The encoder method should just need to combine |
| 1133 | // the MI operands for the register and the offset into a single |
| 1134 | // representation for the complex operand in the .td file. This isn't just |
| 1135 | // style, unfortunately. As-is, we can't represent the distinct encoding |
| 1136 | // for #-0. |
| 1137 | assert(((Imm8 & 0x3) == 0) && "Not a valid immediate!" ); |
| 1138 | uint32_t Binary = (Imm8 >> 2) & 0xff; |
| 1139 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
| 1140 | if (isAdd) |
| 1141 | Binary |= (1 << 8); |
| 1142 | Binary |= (Reg << 9); |
| 1143 | return Binary; |
| 1144 | } |
| 1145 | |
| 1146 | /// getT2AddrModeImm7s4OpValue - Return encoding info for |
| 1147 | /// 'reg +/- imm7<<2' operand. |
| 1148 | uint32_t |
| 1149 | ARMMCCodeEmitter::getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx, |
| 1150 | SmallVectorImpl<MCFixup> &Fixups, |
| 1151 | const MCSubtargetInfo &STI) const { |
| 1152 | // {11-8} = reg |
| 1153 | // {7} = (A)dd (add == '1', sub == '0') |
| 1154 | // {6-0} = imm7 |
| 1155 | unsigned Reg, Imm7; |
| 1156 | // If The first operand isn't a register, we have a label reference. |
| 1157 | bool isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm7, Fixups, STI); |
| 1158 | |
| 1159 | // FIXME: The immediate operand should have already been encoded like this |
| 1160 | // before ever getting here. The encoder method should just need to combine |
| 1161 | // the MI operands for the register and the offset into a single |
| 1162 | // representation for the complex operand in the .td file. This isn't just |
| 1163 | // style, unfortunately. As-is, we can't represent the distinct encoding |
| 1164 | // for #-0. |
| 1165 | uint32_t Binary = (Imm7 >> 2) & 0xff; |
| 1166 | // Immediate is always encoded as positive. The 'A' bit controls add vs sub. |
| 1167 | if (isAdd) |
| 1168 | Binary |= (1 << 7); |
| 1169 | Binary |= (Reg << 8); |
| 1170 | return Binary; |
| 1171 | } |
| 1172 | |
| 1173 | /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for |
| 1174 | /// 'reg + imm8<<2' operand. |
| 1175 | uint32_t ARMMCCodeEmitter:: |
| 1176 | getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx, |
| 1177 | SmallVectorImpl<MCFixup> &Fixups, |
| 1178 | const MCSubtargetInfo &STI) const { |
| 1179 | // {11-8} = reg |
| 1180 | // {7-0} = imm8 |
| 1181 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1182 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 1183 | unsigned Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1184 | unsigned Imm8 = MO1.getImm(); |
| 1185 | return (Reg << 8) | Imm8; |
| 1186 | } |
| 1187 | |
| 1188 | uint32_t ARMMCCodeEmitter::getHiLoImmOpValue(const MCInst &MI, unsigned OpIdx, |
| 1189 | SmallVectorImpl<MCFixup> &Fixups, |
| 1190 | const MCSubtargetInfo &STI) const { |
| 1191 | // {20-16} = imm{15-12} |
| 1192 | // {11-0} = imm{11-0} |
| 1193 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1194 | if (MO.isImm()) |
| 1195 | // Hi / lo bits already extracted during earlier passes. |
| 1196 | return static_cast<unsigned>(MO.getImm()); |
| 1197 | |
| 1198 | // Handle :upper16:, :lower16:, :upper8_15:, :upper0_7:, :lower8_15: |
| 1199 | // :lower0_7: assembly prefixes. |
| 1200 | const MCExpr *E = MO.getExpr(); |
| 1201 | MCFixupKind Kind; |
| 1202 | if (E->getKind() == MCExpr::Specifier) { |
| 1203 | auto *ARM16Expr = cast<MCSpecifierExpr>(Val: E); |
| 1204 | E = ARM16Expr->getSubExpr(); |
| 1205 | |
| 1206 | if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: E)) { |
| 1207 | const int64_t Value = MCE->getValue(); |
| 1208 | if (Value > UINT32_MAX) |
| 1209 | report_fatal_error(reason: "constant value truncated (limited to 32-bit)" ); |
| 1210 | |
| 1211 | switch (ARM16Expr->getSpecifier()) { |
| 1212 | case ARM::S_HI16: |
| 1213 | return (int32_t(Value) & 0xffff0000) >> 16; |
| 1214 | case ARM::S_LO16: |
| 1215 | return (int32_t(Value) & 0x0000ffff); |
| 1216 | |
| 1217 | case ARM::S_HI_8_15: |
| 1218 | return (int32_t(Value) & 0xff000000) >> 24; |
| 1219 | case ARM::S_HI_0_7: |
| 1220 | return (int32_t(Value) & 0x00ff0000) >> 16; |
| 1221 | case ARM::S_LO_8_15: |
| 1222 | return (int32_t(Value) & 0x0000ff00) >> 8; |
| 1223 | case ARM::S_LO_0_7: |
| 1224 | return (int32_t(Value) & 0x000000ff); |
| 1225 | |
| 1226 | default: llvm_unreachable("Unsupported ARMFixup" ); |
| 1227 | } |
| 1228 | } |
| 1229 | |
| 1230 | switch (ARM16Expr->getSpecifier()) { |
| 1231 | default: llvm_unreachable("Unsupported ARMFixup" ); |
| 1232 | case ARM::S_HI16: |
| 1233 | Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movt_hi16 |
| 1234 | : ARM::fixup_arm_movt_hi16); |
| 1235 | break; |
| 1236 | case ARM::S_LO16: |
| 1237 | Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movw_lo16 |
| 1238 | : ARM::fixup_arm_movw_lo16); |
| 1239 | break; |
| 1240 | case ARM::S_HI_8_15: |
| 1241 | if (!isThumb(STI)) |
| 1242 | llvm_unreachable(":upper_8_15: not supported in Arm state" ); |
| 1243 | Kind = ARM::fixup_arm_thumb_upper_8_15; |
| 1244 | break; |
| 1245 | case ARM::S_HI_0_7: |
| 1246 | if (!isThumb(STI)) |
| 1247 | llvm_unreachable(":upper_0_7: not supported in Arm state" ); |
| 1248 | Kind = ARM::fixup_arm_thumb_upper_0_7; |
| 1249 | break; |
| 1250 | case ARM::S_LO_8_15: |
| 1251 | if (!isThumb(STI)) |
| 1252 | llvm_unreachable(":lower_8_15: not supported in Arm state" ); |
| 1253 | Kind = ARM::fixup_arm_thumb_lower_8_15; |
| 1254 | break; |
| 1255 | case ARM::S_LO_0_7: |
| 1256 | if (!isThumb(STI)) |
| 1257 | llvm_unreachable(":lower_0_7: not supported in Arm state" ); |
| 1258 | Kind = ARM::fixup_arm_thumb_lower_0_7; |
| 1259 | break; |
| 1260 | } |
| 1261 | |
| 1262 | addFixup(Fixups, Offset: 0, Value: E, Kind); |
| 1263 | return 0; |
| 1264 | } |
| 1265 | // If the expression doesn't have :upper16:, :lower16: on it, it's just a |
| 1266 | // plain immediate expression, previously those evaluated to the lower 16 bits |
| 1267 | // of the expression regardless of whether we have a movt or a movw, but that |
| 1268 | // led to misleadingly results. This is disallowed in the AsmParser in |
| 1269 | // validateInstruction() so this should never happen. The same holds for |
| 1270 | // thumb1 :upper8_15:, :upper0_7:, lower8_15: or :lower0_7: with movs or adds. |
| 1271 | llvm_unreachable("expression without :upper16:, :lower16:, :upper8_15:," |
| 1272 | ":upper0_7:, lower8_15: or :lower0_7:" ); |
| 1273 | } |
| 1274 | |
| 1275 | uint32_t ARMMCCodeEmitter:: |
| 1276 | getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, |
| 1277 | SmallVectorImpl<MCFixup> &Fixups, |
| 1278 | const MCSubtargetInfo &STI) const { |
| 1279 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1280 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
| 1281 | const MCOperand &MO2 = MI.getOperand(i: OpIdx+2); |
| 1282 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1283 | unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
| 1284 | unsigned ShImm = ARM_AM::getAM2Offset(AM2Opc: MO2.getImm()); |
| 1285 | bool isAdd = ARM_AM::getAM2Op(AM2Opc: MO2.getImm()) == ARM_AM::add; |
| 1286 | ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(AM2Opc: MO2.getImm()); |
| 1287 | unsigned SBits = getShiftOp(ShOpc: ShOp); |
| 1288 | |
| 1289 | // While "lsr #32" and "asr #32" exist, they are encoded with a 0 in the shift |
| 1290 | // amount. However, it would be an easy mistake to make so check here. |
| 1291 | assert((ShImm & ~0x1f) == 0 && "Out of range shift amount" ); |
| 1292 | |
| 1293 | // {16-13} = Rn |
| 1294 | // {12} = isAdd |
| 1295 | // {11-0} = shifter |
| 1296 | // {3-0} = Rm |
| 1297 | // {4} = 0 |
| 1298 | // {6-5} = type |
| 1299 | // {11-7} = imm |
| 1300 | uint32_t Binary = Rm; |
| 1301 | Binary |= Rn << 13; |
| 1302 | Binary |= SBits << 5; |
| 1303 | Binary |= ShImm << 7; |
| 1304 | if (isAdd) |
| 1305 | Binary |= 1 << 12; |
| 1306 | return Binary; |
| 1307 | } |
| 1308 | |
| 1309 | uint32_t ARMMCCodeEmitter:: |
| 1310 | getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
| 1311 | SmallVectorImpl<MCFixup> &Fixups, |
| 1312 | const MCSubtargetInfo &STI) const { |
| 1313 | // {13} 1 == imm12, 0 == Rm |
| 1314 | // {12} isAdd |
| 1315 | // {11-0} imm12/Rm |
| 1316 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1317 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
| 1318 | unsigned Imm = MO1.getImm(); |
| 1319 | bool isAdd = ARM_AM::getAM2Op(AM2Opc: Imm) == ARM_AM::add; |
| 1320 | bool isReg = MO.getReg().isValid(); |
| 1321 | uint32_t Binary = ARM_AM::getAM2Offset(AM2Opc: Imm); |
| 1322 | // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12 |
| 1323 | if (isReg) { |
| 1324 | ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(AM2Opc: Imm); |
| 1325 | Binary <<= 7; // Shift amount is bits [11:7] |
| 1326 | Binary |= getShiftOp(ShOpc: ShOp) << 5; // Shift type is bits [6:5] |
| 1327 | Binary |= CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); // Rm is bits [3:0] |
| 1328 | } |
| 1329 | return Binary | (isAdd << 12) | (isReg << 13); |
| 1330 | } |
| 1331 | |
| 1332 | uint32_t ARMMCCodeEmitter:: |
| 1333 | getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx, |
| 1334 | SmallVectorImpl<MCFixup> &Fixups, |
| 1335 | const MCSubtargetInfo &STI) const { |
| 1336 | // {4} isAdd |
| 1337 | // {3-0} Rm |
| 1338 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1339 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
| 1340 | bool isAdd = MO1.getImm() != 0; |
| 1341 | return CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()) | (isAdd << 4); |
| 1342 | } |
| 1343 | |
| 1344 | uint32_t ARMMCCodeEmitter:: |
| 1345 | getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
| 1346 | SmallVectorImpl<MCFixup> &Fixups, |
| 1347 | const MCSubtargetInfo &STI) const { |
| 1348 | // {9} 1 == imm8, 0 == Rm |
| 1349 | // {8} isAdd |
| 1350 | // {7-4} imm7_4/zero |
| 1351 | // {3-0} imm3_0/Rm |
| 1352 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1353 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
| 1354 | unsigned Imm = MO1.getImm(); |
| 1355 | bool isAdd = ARM_AM::getAM3Op(AM3Opc: Imm) == ARM_AM::add; |
| 1356 | bool isImm = !MO.getReg().isValid(); |
| 1357 | uint32_t Imm8 = ARM_AM::getAM3Offset(AM3Opc: Imm); |
| 1358 | // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 |
| 1359 | if (!isImm) |
| 1360 | Imm8 = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1361 | return Imm8 | (isAdd << 8) | (isImm << 9); |
| 1362 | } |
| 1363 | |
| 1364 | uint32_t ARMMCCodeEmitter:: |
| 1365 | getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, |
| 1366 | SmallVectorImpl<MCFixup> &Fixups, |
| 1367 | const MCSubtargetInfo &STI) const { |
| 1368 | // {13} 1 == imm8, 0 == Rm |
| 1369 | // {12-9} Rn |
| 1370 | // {8} isAdd |
| 1371 | // {7-4} imm7_4/zero |
| 1372 | // {3-0} imm3_0/Rm |
| 1373 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1374 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
| 1375 | const MCOperand &MO2 = MI.getOperand(i: OpIdx+2); |
| 1376 | |
| 1377 | // If The first operand isn't a register, we have a label reference. |
| 1378 | if (!MO.isReg()) { |
| 1379 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
| 1380 | |
| 1381 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
| 1382 | const MCExpr *Expr = MO.getExpr(); |
| 1383 | MCFixupKind Kind = ARM::fixup_arm_pcrel_10_unscaled; |
| 1384 | addFixup(Fixups, Offset: 0, Value: Expr, Kind); |
| 1385 | |
| 1386 | ++MCNumCPRelocations; |
| 1387 | return (Rn << 9) | (1 << 13); |
| 1388 | } |
| 1389 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1390 | unsigned Imm = MO2.getImm(); |
| 1391 | bool isAdd = ARM_AM::getAM3Op(AM3Opc: Imm) == ARM_AM::add; |
| 1392 | bool isImm = !MO1.getReg().isValid(); |
| 1393 | uint32_t Imm8 = ARM_AM::getAM3Offset(AM3Opc: Imm); |
| 1394 | // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 |
| 1395 | if (!isImm) |
| 1396 | Imm8 = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
| 1397 | return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13); |
| 1398 | } |
| 1399 | |
| 1400 | /// getAddrModeThumbSPOpValue - Encode the t_addrmode_sp operands. |
| 1401 | uint32_t ARMMCCodeEmitter:: |
| 1402 | getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx, |
| 1403 | SmallVectorImpl<MCFixup> &Fixups, |
| 1404 | const MCSubtargetInfo &STI) const { |
| 1405 | // [SP, #imm] |
| 1406 | // {7-0} = imm8 |
| 1407 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 1408 | assert(MI.getOperand(OpIdx).getReg() == ARM::SP && |
| 1409 | "Unexpected base register!" ); |
| 1410 | |
| 1411 | // The immediate is already shifted for the implicit zeroes, so no change |
| 1412 | // here. |
| 1413 | return MO1.getImm() & 0xff; |
| 1414 | } |
| 1415 | |
| 1416 | /// getAddrModeISOpValue - Encode the t_addrmode_is# operands. |
| 1417 | uint32_t ARMMCCodeEmitter:: |
| 1418 | getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx, |
| 1419 | SmallVectorImpl<MCFixup> &Fixups, |
| 1420 | const MCSubtargetInfo &STI) const { |
| 1421 | // [Rn, #imm] |
| 1422 | // {7-3} = imm5 |
| 1423 | // {2-0} = Rn |
| 1424 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1425 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 1426 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1427 | unsigned Imm5 = MO1.getImm(); |
| 1428 | return ((Imm5 & 0x1f) << 3) | Rn; |
| 1429 | } |
| 1430 | |
| 1431 | /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands. |
| 1432 | uint32_t ARMMCCodeEmitter:: |
| 1433 | getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx, |
| 1434 | SmallVectorImpl<MCFixup> &Fixups, |
| 1435 | const MCSubtargetInfo &STI) const { |
| 1436 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 1437 | if (MO.isExpr()) |
| 1438 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_cp, Fixups, STI); |
| 1439 | return (MO.getImm() >> 2); |
| 1440 | } |
| 1441 | |
| 1442 | /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand. |
| 1443 | uint32_t ARMMCCodeEmitter:: |
| 1444 | getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx, |
| 1445 | SmallVectorImpl<MCFixup> &Fixups, |
| 1446 | const MCSubtargetInfo &STI) const { |
| 1447 | // {12-9} = reg |
| 1448 | // {8} = (U)nsigned (add == '1', sub == '0') |
| 1449 | // {7-0} = imm8 |
| 1450 | unsigned Reg, Imm8; |
| 1451 | bool isAdd; |
| 1452 | // If The first operand isn't a register, we have a label reference. |
| 1453 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1454 | if (!MO.isReg()) { |
| 1455 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
| 1456 | Imm8 = 0; |
| 1457 | isAdd = false; // 'U' bit is handled as part of the fixup. |
| 1458 | |
| 1459 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
| 1460 | const MCExpr *Expr = MO.getExpr(); |
| 1461 | MCFixupKind Kind; |
| 1462 | if (isThumb2(STI)) |
| 1463 | Kind = ARM::fixup_t2_pcrel_10; |
| 1464 | else |
| 1465 | Kind = ARM::fixup_arm_pcrel_10; |
| 1466 | addFixup(Fixups, Offset: 0, Value: Expr, Kind); |
| 1467 | |
| 1468 | ++MCNumCPRelocations; |
| 1469 | } else { |
| 1470 | EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm8, Fixups, STI); |
| 1471 | isAdd = ARM_AM::getAM5Op(AM5Opc: Imm8) == ARM_AM::add; |
| 1472 | } |
| 1473 | |
| 1474 | uint32_t Binary = ARM_AM::getAM5Offset(AM5Opc: Imm8); |
| 1475 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
| 1476 | if (isAdd) |
| 1477 | Binary |= (1 << 8); |
| 1478 | Binary |= (Reg << 9); |
| 1479 | return Binary; |
| 1480 | } |
| 1481 | |
| 1482 | /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand. |
| 1483 | uint32_t ARMMCCodeEmitter:: |
| 1484 | getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx, |
| 1485 | SmallVectorImpl<MCFixup> &Fixups, |
| 1486 | const MCSubtargetInfo &STI) const { |
| 1487 | // {12-9} = reg |
| 1488 | // {8} = (U)nsigned (add == '1', sub == '0') |
| 1489 | // {7-0} = imm8 |
| 1490 | unsigned Reg, Imm8; |
| 1491 | bool isAdd; |
| 1492 | // If The first operand isn't a register, we have a label reference. |
| 1493 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1494 | if (!MO.isReg()) { |
| 1495 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
| 1496 | Imm8 = 0; |
| 1497 | isAdd = false; // 'U' bit is handled as part of the fixup. |
| 1498 | |
| 1499 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
| 1500 | const MCExpr *Expr = MO.getExpr(); |
| 1501 | MCFixupKind Kind; |
| 1502 | if (isThumb2(STI)) |
| 1503 | Kind = ARM::fixup_t2_pcrel_9; |
| 1504 | else |
| 1505 | Kind = ARM::fixup_arm_pcrel_9; |
| 1506 | addFixup(Fixups, Offset: 0, Value: Expr, Kind); |
| 1507 | |
| 1508 | ++MCNumCPRelocations; |
| 1509 | } else { |
| 1510 | EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm8, Fixups, STI); |
| 1511 | isAdd = ARM_AM::getAM5Op(AM5Opc: Imm8) == ARM_AM::add; |
| 1512 | } |
| 1513 | |
| 1514 | uint32_t Binary = ARM_AM::getAM5Offset(AM5Opc: Imm8); |
| 1515 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
| 1516 | if (isAdd) |
| 1517 | Binary |= (1 << 8); |
| 1518 | Binary |= (Reg << 9); |
| 1519 | return Binary; |
| 1520 | } |
| 1521 | |
| 1522 | unsigned ARMMCCodeEmitter::getModImmOpValue(const MCInst &MI, unsigned Op, |
| 1523 | SmallVectorImpl<MCFixup> &Fixups, |
| 1524 | const MCSubtargetInfo &ST) const { |
| 1525 | const MCOperand &MO = MI.getOperand(i: Op); |
| 1526 | |
| 1527 | // Support for fixups (MCFixup) |
| 1528 | if (MO.isExpr()) { |
| 1529 | const MCExpr *Expr = MO.getExpr(); |
| 1530 | // Fixups resolve to plain values that need to be encoded. |
| 1531 | MCFixupKind Kind = ARM::fixup_arm_mod_imm; |
| 1532 | addFixup(Fixups, Offset: 0, Value: Expr, Kind); |
| 1533 | return 0; |
| 1534 | } |
| 1535 | |
| 1536 | // Immediate is already in its encoded format |
| 1537 | return MO.getImm(); |
| 1538 | } |
| 1539 | |
| 1540 | unsigned ARMMCCodeEmitter::getT2SOImmOpValue(const MCInst &MI, unsigned Op, |
| 1541 | SmallVectorImpl<MCFixup> &Fixups, |
| 1542 | const MCSubtargetInfo &STI) const { |
| 1543 | const MCOperand &MO = MI.getOperand(i: Op); |
| 1544 | |
| 1545 | // Support for fixups (MCFixup) |
| 1546 | if (MO.isExpr()) { |
| 1547 | const MCExpr *Expr = MO.getExpr(); |
| 1548 | // Fixups resolve to plain values that need to be encoded. |
| 1549 | MCFixupKind Kind = ARM::fixup_t2_so_imm; |
| 1550 | addFixup(Fixups, Offset: 0, Value: Expr, Kind); |
| 1551 | return 0; |
| 1552 | } |
| 1553 | unsigned SoImm = MO.getImm(); |
| 1554 | unsigned Encoded = ARM_AM::getT2SOImmVal(Arg: SoImm); |
| 1555 | assert(Encoded != ~0U && "Not a Thumb2 so_imm value?" ); |
| 1556 | return Encoded; |
| 1557 | } |
| 1558 | |
| 1559 | unsigned ARMMCCodeEmitter:: |
| 1560 | getSORegRegOpValue(const MCInst &MI, unsigned OpIdx, |
| 1561 | SmallVectorImpl<MCFixup> &Fixups, |
| 1562 | const MCSubtargetInfo &STI) const { |
| 1563 | // Sub-operands are [reg, reg, imm]. The first register is Rm, the reg to be |
| 1564 | // shifted. The second is Rs, the amount to shift by, and the third specifies |
| 1565 | // the type of the shift. |
| 1566 | // |
| 1567 | // {3-0} = Rm. |
| 1568 | // {4} = 1 |
| 1569 | // {6-5} = type |
| 1570 | // {11-8} = Rs |
| 1571 | // {7} = 0 |
| 1572 | |
| 1573 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1574 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 1575 | const MCOperand &MO2 = MI.getOperand(i: OpIdx + 2); |
| 1576 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: MO2.getImm()); |
| 1577 | |
| 1578 | // Encode Rm. |
| 1579 | unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1580 | |
| 1581 | // Encode the shift opcode. |
| 1582 | unsigned SBits = 0; |
| 1583 | MCRegister Rs = MO1.getReg(); |
| 1584 | if (Rs) { |
| 1585 | // Set shift operand (bit[7:4]). |
| 1586 | // LSL - 0001 |
| 1587 | // LSR - 0011 |
| 1588 | // ASR - 0101 |
| 1589 | // ROR - 0111 |
| 1590 | switch (SOpc) { |
| 1591 | default: llvm_unreachable("Unknown shift opc!" ); |
| 1592 | case ARM_AM::lsl: SBits = 0x1; break; |
| 1593 | case ARM_AM::lsr: SBits = 0x3; break; |
| 1594 | case ARM_AM::asr: SBits = 0x5; break; |
| 1595 | case ARM_AM::ror: SBits = 0x7; break; |
| 1596 | } |
| 1597 | } |
| 1598 | |
| 1599 | Binary |= SBits << 4; |
| 1600 | |
| 1601 | // Encode the shift operation Rs. |
| 1602 | // Encode Rs bit[11:8]. |
| 1603 | assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0); |
| 1604 | return Binary | (CTX.getRegisterInfo()->getEncodingValue(Reg: Rs) << ARMII::RegRsShift); |
| 1605 | } |
| 1606 | |
| 1607 | unsigned ARMMCCodeEmitter:: |
| 1608 | getSORegImmOpValue(const MCInst &MI, unsigned OpIdx, |
| 1609 | SmallVectorImpl<MCFixup> &Fixups, |
| 1610 | const MCSubtargetInfo &STI) const { |
| 1611 | // Sub-operands are [reg, imm]. The first register is Rm, the reg to be |
| 1612 | // shifted. The second is the amount to shift by. |
| 1613 | // |
| 1614 | // {3-0} = Rm. |
| 1615 | // {4} = 0 |
| 1616 | // {6-5} = type |
| 1617 | // {11-7} = imm |
| 1618 | |
| 1619 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1620 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 1621 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: MO1.getImm()); |
| 1622 | |
| 1623 | // Encode Rm. |
| 1624 | unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1625 | |
| 1626 | // Encode the shift opcode. |
| 1627 | unsigned SBits = 0; |
| 1628 | |
| 1629 | // Set shift operand (bit[6:4]). |
| 1630 | // LSL - 000 |
| 1631 | // LSR - 010 |
| 1632 | // ASR - 100 |
| 1633 | // ROR - 110 |
| 1634 | // RRX - 110 and bit[11:8] clear. |
| 1635 | switch (SOpc) { |
| 1636 | default: llvm_unreachable("Unknown shift opc!" ); |
| 1637 | case ARM_AM::lsl: SBits = 0x0; break; |
| 1638 | case ARM_AM::lsr: SBits = 0x2; break; |
| 1639 | case ARM_AM::asr: SBits = 0x4; break; |
| 1640 | case ARM_AM::ror: SBits = 0x6; break; |
| 1641 | case ARM_AM::rrx: |
| 1642 | Binary |= 0x60; |
| 1643 | return Binary; |
| 1644 | } |
| 1645 | |
| 1646 | // Encode shift_imm bit[11:7]. |
| 1647 | Binary |= SBits << 4; |
| 1648 | unsigned Offset = ARM_AM::getSORegOffset(Op: MO1.getImm()); |
| 1649 | assert(Offset < 32 && "Offset must be in range 0-31!" ); |
| 1650 | return Binary | (Offset << 7); |
| 1651 | } |
| 1652 | |
| 1653 | |
| 1654 | unsigned ARMMCCodeEmitter:: |
| 1655 | getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum, |
| 1656 | SmallVectorImpl<MCFixup> &Fixups, |
| 1657 | const MCSubtargetInfo &STI) const { |
| 1658 | const MCOperand &MO1 = MI.getOperand(i: OpNum); |
| 1659 | const MCOperand &MO2 = MI.getOperand(i: OpNum+1); |
| 1660 | const MCOperand &MO3 = MI.getOperand(i: OpNum+2); |
| 1661 | |
| 1662 | // Encoded as [Rn, Rm, imm]. |
| 1663 | // FIXME: Needs fixup support. |
| 1664 | unsigned Value = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
| 1665 | Value <<= 4; |
| 1666 | Value |= CTX.getRegisterInfo()->getEncodingValue(Reg: MO2.getReg()); |
| 1667 | Value <<= 2; |
| 1668 | Value |= MO3.getImm(); |
| 1669 | |
| 1670 | return Value; |
| 1671 | } |
| 1672 | |
| 1673 | template<unsigned Bits, unsigned Shift> |
| 1674 | unsigned ARMMCCodeEmitter:: |
| 1675 | getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum, |
| 1676 | SmallVectorImpl<MCFixup> &Fixups, |
| 1677 | const MCSubtargetInfo &STI) const { |
| 1678 | const MCOperand &MO1 = MI.getOperand(i: OpNum); |
| 1679 | const MCOperand &MO2 = MI.getOperand(i: OpNum+1); |
| 1680 | |
| 1681 | // FIXME: Needs fixup support. |
| 1682 | unsigned Value = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
| 1683 | |
| 1684 | // If the immediate is B bits long, we need B+1 bits in order |
| 1685 | // to represent the (inverse of the) sign bit. |
| 1686 | Value <<= (Bits + 1); |
| 1687 | int32_t tmp = (int32_t)MO2.getImm(); |
| 1688 | if (tmp == INT32_MIN) { // represents subtracting zero rather than adding it |
| 1689 | tmp = 0; |
| 1690 | } else if (tmp < 0) { |
| 1691 | tmp = abs(x: tmp); |
| 1692 | } else { |
| 1693 | Value |= (1U << Bits); // Set the ADD bit |
| 1694 | } |
| 1695 | Value |= (tmp >> Shift) & ((1U << Bits) - 1); |
| 1696 | return Value; |
| 1697 | } |
| 1698 | |
| 1699 | unsigned ARMMCCodeEmitter:: |
| 1700 | getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum, |
| 1701 | SmallVectorImpl<MCFixup> &Fixups, |
| 1702 | const MCSubtargetInfo &STI) const { |
| 1703 | const MCOperand &MO1 = MI.getOperand(i: OpNum); |
| 1704 | |
| 1705 | // FIXME: Needs fixup support. |
| 1706 | unsigned Value = 0; |
| 1707 | auto tmp = static_cast<uint32_t>(MO1.getImm()); |
| 1708 | if (static_cast<int32_t>(tmp) < 0) |
| 1709 | tmp = -tmp; |
| 1710 | else |
| 1711 | Value |= 256; // Set the ADD bit |
| 1712 | Value |= tmp & 255; |
| 1713 | return Value; |
| 1714 | } |
| 1715 | |
| 1716 | unsigned ARMMCCodeEmitter:: |
| 1717 | getT2SORegOpValue(const MCInst &MI, unsigned OpIdx, |
| 1718 | SmallVectorImpl<MCFixup> &Fixups, |
| 1719 | const MCSubtargetInfo &STI) const { |
| 1720 | // Sub-operands are [reg, imm]. The first register is Rm, the reg to be |
| 1721 | // shifted. The second is the amount to shift by. |
| 1722 | // |
| 1723 | // {3-0} = Rm. |
| 1724 | // {4} = 0 |
| 1725 | // {6-5} = type |
| 1726 | // {11-7} = imm |
| 1727 | |
| 1728 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 1729 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
| 1730 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: MO1.getImm()); |
| 1731 | |
| 1732 | // Encode Rm. |
| 1733 | unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1734 | |
| 1735 | // Encode the shift opcode. |
| 1736 | unsigned SBits = 0; |
| 1737 | // Set shift operand (bit[6:4]). |
| 1738 | // LSL - 000 |
| 1739 | // LSR - 010 |
| 1740 | // ASR - 100 |
| 1741 | // ROR - 110 |
| 1742 | switch (SOpc) { |
| 1743 | default: llvm_unreachable("Unknown shift opc!" ); |
| 1744 | case ARM_AM::lsl: SBits = 0x0; break; |
| 1745 | case ARM_AM::lsr: SBits = 0x2; break; |
| 1746 | case ARM_AM::asr: SBits = 0x4; break; |
| 1747 | case ARM_AM::rrx: [[fallthrough]]; |
| 1748 | case ARM_AM::ror: SBits = 0x6; break; |
| 1749 | } |
| 1750 | |
| 1751 | Binary |= SBits << 4; |
| 1752 | if (SOpc == ARM_AM::rrx) |
| 1753 | return Binary; |
| 1754 | |
| 1755 | // Encode shift_imm bit[11:7]. |
| 1756 | return Binary | ARM_AM::getSORegOffset(Op: MO1.getImm()) << 7; |
| 1757 | } |
| 1758 | |
| 1759 | unsigned ARMMCCodeEmitter:: |
| 1760 | getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op, |
| 1761 | SmallVectorImpl<MCFixup> &Fixups, |
| 1762 | const MCSubtargetInfo &STI) const { |
| 1763 | // 10 bits. lower 5 bits are the lsb of the mask, high five bits are the |
| 1764 | // msb of the mask. |
| 1765 | const MCOperand &MO = MI.getOperand(i: Op); |
| 1766 | uint32_t v = ~MO.getImm(); |
| 1767 | uint32_t lsb = llvm::countr_zero(Val: v); |
| 1768 | uint32_t msb = llvm::Log2_32(Value: v); |
| 1769 | assert(v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!" ); |
| 1770 | return lsb | (msb << 5); |
| 1771 | } |
| 1772 | |
| 1773 | unsigned ARMMCCodeEmitter:: |
| 1774 | getRegisterListOpValue(const MCInst &MI, unsigned Op, |
| 1775 | SmallVectorImpl<MCFixup> &Fixups, |
| 1776 | const MCSubtargetInfo &STI) const { |
| 1777 | // VLDM/VSTM/VSCCLRM: |
| 1778 | // {12-8} = Vd |
| 1779 | // {7-0} = Number of registers |
| 1780 | // |
| 1781 | // LDM/STM: |
| 1782 | // {15-0} = Bitfield of GPRs. |
| 1783 | MCRegister Reg = MI.getOperand(i: Op).getReg(); |
| 1784 | bool SPRRegs = ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg); |
| 1785 | bool DPRRegs = ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg); |
| 1786 | |
| 1787 | unsigned Binary = 0; |
| 1788 | |
| 1789 | if (SPRRegs || DPRRegs || Reg == ARM::VPR) { |
| 1790 | // VLDM/VSTM/VSCCLRM |
| 1791 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg); |
| 1792 | unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff; |
| 1793 | Binary |= (RegNo & 0x1f) << 8; |
| 1794 | |
| 1795 | if (MI.getOpcode() == ARM::VSCCLRMD) |
| 1796 | // Ignore VPR |
| 1797 | --NumRegs; |
| 1798 | else if (MI.getOpcode() == ARM::VSCCLRMS) { |
| 1799 | // The register list can contain both S registers and D registers, with D |
| 1800 | // registers counting as two registers. VPR doesn't count towards the |
| 1801 | // number of registers. |
| 1802 | NumRegs = 0; |
| 1803 | for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) { |
| 1804 | Reg = MI.getOperand(i: I).getReg(); |
| 1805 | if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) |
| 1806 | NumRegs += 1; |
| 1807 | else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) |
| 1808 | NumRegs += 2; |
| 1809 | } |
| 1810 | } |
| 1811 | if (SPRRegs) |
| 1812 | Binary |= NumRegs; |
| 1813 | else |
| 1814 | Binary |= NumRegs * 2; |
| 1815 | } else { |
| 1816 | const MCRegisterInfo &MRI = *CTX.getRegisterInfo(); |
| 1817 | assert(is_sorted(drop_begin(MI, Op), |
| 1818 | [&](const MCOperand &LHS, const MCOperand &RHS) { |
| 1819 | return MRI.getEncodingValue(LHS.getReg()) < |
| 1820 | MRI.getEncodingValue(RHS.getReg()); |
| 1821 | })); |
| 1822 | for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) { |
| 1823 | unsigned RegNo = MRI.getEncodingValue(Reg: MI.getOperand(i: I).getReg()); |
| 1824 | Binary |= 1 << RegNo; |
| 1825 | } |
| 1826 | } |
| 1827 | |
| 1828 | return Binary; |
| 1829 | } |
| 1830 | |
| 1831 | /// getAddrMode6AddressOpValue - Encode an addrmode6 register number along |
| 1832 | /// with the alignment operand. |
| 1833 | unsigned ARMMCCodeEmitter:: |
| 1834 | getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op, |
| 1835 | SmallVectorImpl<MCFixup> &Fixups, |
| 1836 | const MCSubtargetInfo &STI) const { |
| 1837 | const MCOperand &Reg = MI.getOperand(i: Op); |
| 1838 | const MCOperand &Imm = MI.getOperand(i: Op + 1); |
| 1839 | |
| 1840 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg: Reg.getReg()); |
| 1841 | unsigned Align = 0; |
| 1842 | |
| 1843 | switch (Imm.getImm()) { |
| 1844 | default: break; |
| 1845 | case 2: |
| 1846 | case 4: |
| 1847 | case 8: Align = 0x01; break; |
| 1848 | case 16: Align = 0x02; break; |
| 1849 | case 32: Align = 0x03; break; |
| 1850 | } |
| 1851 | |
| 1852 | return RegNo | (Align << 4); |
| 1853 | } |
| 1854 | |
| 1855 | /// getAddrMode6OneLane32AddressOpValue - Encode an addrmode6 register number |
| 1856 | /// along with the alignment operand for use in VST1 and VLD1 with size 32. |
| 1857 | unsigned ARMMCCodeEmitter:: |
| 1858 | getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op, |
| 1859 | SmallVectorImpl<MCFixup> &Fixups, |
| 1860 | const MCSubtargetInfo &STI) const { |
| 1861 | const MCOperand &Reg = MI.getOperand(i: Op); |
| 1862 | const MCOperand &Imm = MI.getOperand(i: Op + 1); |
| 1863 | |
| 1864 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg: Reg.getReg()); |
| 1865 | unsigned Align = 0; |
| 1866 | |
| 1867 | switch (Imm.getImm()) { |
| 1868 | default: break; |
| 1869 | case 8: |
| 1870 | case 16: |
| 1871 | case 32: // Default '0' value for invalid alignments of 8, 16, 32 bytes. |
| 1872 | case 2: Align = 0x00; break; |
| 1873 | case 4: Align = 0x03; break; |
| 1874 | } |
| 1875 | |
| 1876 | return RegNo | (Align << 4); |
| 1877 | } |
| 1878 | |
| 1879 | |
| 1880 | /// getAddrMode6DupAddressOpValue - Encode an addrmode6 register number and |
| 1881 | /// alignment operand for use in VLD-dup instructions. This is the same as |
| 1882 | /// getAddrMode6AddressOpValue except for the alignment encoding, which is |
| 1883 | /// different for VLD4-dup. |
| 1884 | unsigned ARMMCCodeEmitter:: |
| 1885 | getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op, |
| 1886 | SmallVectorImpl<MCFixup> &Fixups, |
| 1887 | const MCSubtargetInfo &STI) const { |
| 1888 | const MCOperand &Reg = MI.getOperand(i: Op); |
| 1889 | const MCOperand &Imm = MI.getOperand(i: Op + 1); |
| 1890 | |
| 1891 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg: Reg.getReg()); |
| 1892 | unsigned Align = 0; |
| 1893 | |
| 1894 | switch (Imm.getImm()) { |
| 1895 | default: break; |
| 1896 | case 2: |
| 1897 | case 4: |
| 1898 | case 8: Align = 0x01; break; |
| 1899 | case 16: Align = 0x03; break; |
| 1900 | } |
| 1901 | |
| 1902 | return RegNo | (Align << 4); |
| 1903 | } |
| 1904 | |
| 1905 | unsigned ARMMCCodeEmitter:: |
| 1906 | getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, |
| 1907 | SmallVectorImpl<MCFixup> &Fixups, |
| 1908 | const MCSubtargetInfo &STI) const { |
| 1909 | const MCOperand &MO = MI.getOperand(i: Op); |
| 1910 | if (!MO.getReg()) |
| 1911 | return 0x0D; |
| 1912 | return CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 1913 | } |
| 1914 | |
| 1915 | unsigned ARMMCCodeEmitter:: |
| 1916 | getShiftRight8Imm(const MCInst &MI, unsigned Op, |
| 1917 | SmallVectorImpl<MCFixup> &Fixups, |
| 1918 | const MCSubtargetInfo &STI) const { |
| 1919 | return 8 - MI.getOperand(i: Op).getImm(); |
| 1920 | } |
| 1921 | |
| 1922 | unsigned ARMMCCodeEmitter:: |
| 1923 | getShiftRight16Imm(const MCInst &MI, unsigned Op, |
| 1924 | SmallVectorImpl<MCFixup> &Fixups, |
| 1925 | const MCSubtargetInfo &STI) const { |
| 1926 | return 16 - MI.getOperand(i: Op).getImm(); |
| 1927 | } |
| 1928 | |
| 1929 | unsigned ARMMCCodeEmitter:: |
| 1930 | getShiftRight32Imm(const MCInst &MI, unsigned Op, |
| 1931 | SmallVectorImpl<MCFixup> &Fixups, |
| 1932 | const MCSubtargetInfo &STI) const { |
| 1933 | return 32 - MI.getOperand(i: Op).getImm(); |
| 1934 | } |
| 1935 | |
| 1936 | unsigned ARMMCCodeEmitter:: |
| 1937 | getShiftRight64Imm(const MCInst &MI, unsigned Op, |
| 1938 | SmallVectorImpl<MCFixup> &Fixups, |
| 1939 | const MCSubtargetInfo &STI) const { |
| 1940 | return 64 - MI.getOperand(i: Op).getImm(); |
| 1941 | } |
| 1942 | |
| 1943 | void ARMMCCodeEmitter::encodeInstruction(const MCInst &MI, |
| 1944 | SmallVectorImpl<char> &CB, |
| 1945 | SmallVectorImpl<MCFixup> &Fixups, |
| 1946 | const MCSubtargetInfo &STI) const { |
| 1947 | // Pseudo instructions don't get encoded. |
| 1948 | const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode()); |
| 1949 | uint64_t TSFlags = Desc.TSFlags; |
| 1950 | if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo) |
| 1951 | return; |
| 1952 | |
| 1953 | int Size; |
| 1954 | if (Desc.getSize() == 2 || Desc.getSize() == 4) |
| 1955 | Size = Desc.getSize(); |
| 1956 | else |
| 1957 | llvm_unreachable("Unexpected instruction size!" ); |
| 1958 | |
| 1959 | auto Endian = |
| 1960 | IsLittleEndian ? llvm::endianness::little : llvm::endianness::big; |
| 1961 | uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); |
| 1962 | if (Size == 2) { |
| 1963 | support::endian::write<uint16_t>(Out&: CB, V: Binary, E: Endian); |
| 1964 | } else if (isThumb(STI)) { |
| 1965 | // Thumb 32-bit wide instructions need to emit the high order halfword |
| 1966 | // first. |
| 1967 | support::endian::write<uint16_t>(Out&: CB, V: Binary >> 16, E: Endian); |
| 1968 | support::endian::write<uint16_t>(Out&: CB, V: Binary & 0xffff, E: Endian); |
| 1969 | } else { |
| 1970 | support::endian::write<uint32_t>(Out&: CB, V: Binary, E: Endian); |
| 1971 | } |
| 1972 | ++MCNumEmitted; // Keep track of the # of mi's emitted. |
| 1973 | } |
| 1974 | |
| 1975 | template <bool isNeg, ARM::Fixups fixup> |
| 1976 | uint32_t |
| 1977 | ARMMCCodeEmitter::getBFTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 1978 | SmallVectorImpl<MCFixup> &Fixups, |
| 1979 | const MCSubtargetInfo &STI) const { |
| 1980 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 1981 | if (MO.isExpr()) |
| 1982 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: fixup, Fixups, STI); |
| 1983 | return isNeg ? -(MO.getImm() >> 1) : (MO.getImm() >> 1); |
| 1984 | } |
| 1985 | |
| 1986 | uint32_t |
| 1987 | ARMMCCodeEmitter::getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx, |
| 1988 | SmallVectorImpl<MCFixup> &Fixups, |
| 1989 | const MCSubtargetInfo &STI) const { |
| 1990 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 1991 | const MCOperand BranchMO = MI.getOperand(i: 0); |
| 1992 | |
| 1993 | if (MO.isExpr()) { |
| 1994 | assert(BranchMO.isExpr()); |
| 1995 | const MCExpr *DiffExpr = MCBinaryExpr::createSub( |
| 1996 | LHS: MO.getExpr(), RHS: BranchMO.getExpr(), Ctx&: CTX); |
| 1997 | MCFixupKind Kind = ARM::fixup_bfcsel_else_target; |
| 1998 | addFixup(Fixups, Offset: 0, Value: DiffExpr, Kind); |
| 1999 | return 0; |
| 2000 | } |
| 2001 | |
| 2002 | assert(MO.isImm() && BranchMO.isImm()); |
| 2003 | int Diff = MO.getImm() - BranchMO.getImm(); |
| 2004 | assert(Diff == 4 || Diff == 2); |
| 2005 | |
| 2006 | return Diff == 4; |
| 2007 | } |
| 2008 | |
| 2009 | uint32_t ARMMCCodeEmitter::getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx, |
| 2010 | SmallVectorImpl<MCFixup> &Fixups, |
| 2011 | const MCSubtargetInfo &STI)const { |
| 2012 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 2013 | assert(MO.isImm() && "Unexpected operand type!" ); |
| 2014 | |
| 2015 | int Value = MO.getImm(); |
| 2016 | int Imm = 0; |
| 2017 | |
| 2018 | // VPT Masks are actually encoded as a series of invert/don't invert bits, |
| 2019 | // rather than true/false bits. |
| 2020 | unsigned PrevBit = 0; |
| 2021 | for (int i = 3; i >= 0; --i) { |
| 2022 | unsigned Bit = (Value >> i) & 1; |
| 2023 | |
| 2024 | // Check if we are at the end of the mask. |
| 2025 | if ((Value & ~(~0U << i)) == 0) { |
| 2026 | Imm |= (1 << i); |
| 2027 | break; |
| 2028 | } |
| 2029 | |
| 2030 | // Convert the bit in the mask based on the previous bit. |
| 2031 | if (Bit != PrevBit) |
| 2032 | Imm |= (1 << i); |
| 2033 | |
| 2034 | PrevBit = Bit; |
| 2035 | } |
| 2036 | |
| 2037 | return Imm; |
| 2038 | } |
| 2039 | |
| 2040 | uint32_t ARMMCCodeEmitter::getRestrictedCondCodeOpValue( |
| 2041 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
| 2042 | const MCSubtargetInfo &STI) const { |
| 2043 | |
| 2044 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 2045 | assert(MO.isImm() && "Unexpected operand type!" ); |
| 2046 | |
| 2047 | switch (MO.getImm()) { |
| 2048 | default: |
| 2049 | assert(0 && "Unexpected Condition!" ); |
| 2050 | return 0; |
| 2051 | case ARMCC::HS: |
| 2052 | case ARMCC::EQ: |
| 2053 | return 0; |
| 2054 | case ARMCC::HI: |
| 2055 | case ARMCC::NE: |
| 2056 | return 1; |
| 2057 | case ARMCC::GE: |
| 2058 | return 4; |
| 2059 | case ARMCC::LT: |
| 2060 | return 5; |
| 2061 | case ARMCC::GT: |
| 2062 | return 6; |
| 2063 | case ARMCC::LE: |
| 2064 | return 7; |
| 2065 | } |
| 2066 | } |
| 2067 | |
| 2068 | uint32_t ARMMCCodeEmitter:: |
| 2069 | getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx, |
| 2070 | SmallVectorImpl<MCFixup> &Fixups, |
| 2071 | const MCSubtargetInfo &STI) const { |
| 2072 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
| 2073 | assert(MO.isImm() && "Unexpected operand type!" ); |
| 2074 | return llvm::countr_zero(Val: (uint64_t)MO.getImm()); |
| 2075 | } |
| 2076 | |
| 2077 | template <unsigned start> |
| 2078 | uint32_t ARMMCCodeEmitter:: |
| 2079 | getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx, |
| 2080 | SmallVectorImpl<MCFixup> &Fixups, |
| 2081 | const MCSubtargetInfo &STI) const { |
| 2082 | const MCOperand MO = MI.getOperand(i: OpIdx); |
| 2083 | assert(MO.isImm() && "Unexpected operand type!" ); |
| 2084 | |
| 2085 | int Value = MO.getImm(); |
| 2086 | return Value - start; |
| 2087 | } |
| 2088 | |
| 2089 | #include "ARMGenMCCodeEmitter.inc" |
| 2090 | |
| 2091 | MCCodeEmitter *llvm::createARMLEMCCodeEmitter(const MCInstrInfo &MCII, |
| 2092 | MCContext &Ctx) { |
| 2093 | return new ARMMCCodeEmitter(MCII, Ctx, true); |
| 2094 | } |
| 2095 | |
| 2096 | MCCodeEmitter *llvm::createARMBEMCCodeEmitter(const MCInstrInfo &MCII, |
| 2097 | MCContext &Ctx) { |
| 2098 | return new ARMMCCodeEmitter(MCII, Ctx, false); |
| 2099 | } |
| 2100 | |