1 | //===-- ARM/ARMMCCodeEmitter.cpp - Convert ARM code to machine code -------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the ARMMCCodeEmitter class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "MCTargetDesc/ARMAddressingModes.h" |
14 | #include "MCTargetDesc/ARMBaseInfo.h" |
15 | #include "MCTargetDesc/ARMFixupKinds.h" |
16 | #include "MCTargetDesc/ARMMCAsmInfo.h" |
17 | #include "llvm/ADT/APFloat.h" |
18 | #include "llvm/ADT/APInt.h" |
19 | #include "llvm/ADT/SmallVector.h" |
20 | #include "llvm/ADT/Statistic.h" |
21 | #include "llvm/MC/MCCodeEmitter.h" |
22 | #include "llvm/MC/MCContext.h" |
23 | #include "llvm/MC/MCExpr.h" |
24 | #include "llvm/MC/MCFixup.h" |
25 | #include "llvm/MC/MCInst.h" |
26 | #include "llvm/MC/MCInstrDesc.h" |
27 | #include "llvm/MC/MCInstrInfo.h" |
28 | #include "llvm/MC/MCRegisterInfo.h" |
29 | #include "llvm/MC/MCSubtargetInfo.h" |
30 | #include "llvm/Support/Casting.h" |
31 | #include "llvm/Support/Compiler.h" |
32 | #include "llvm/Support/EndianStream.h" |
33 | #include "llvm/Support/ErrorHandling.h" |
34 | #include "llvm/Support/MathExtras.h" |
35 | #include "llvm/TargetParser/Triple.h" |
36 | #include <cassert> |
37 | #include <cstdint> |
38 | #include <cstdlib> |
39 | |
40 | using namespace llvm; |
41 | |
42 | #define DEBUG_TYPE "mccodeemitter" |
43 | |
44 | STATISTIC(MCNumEmitted, "Number of MC instructions emitted." ); |
45 | STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created." ); |
46 | |
47 | namespace { |
48 | |
49 | class ARMMCCodeEmitter : public MCCodeEmitter { |
50 | const MCInstrInfo &MCII; |
51 | MCContext &CTX; |
52 | bool IsLittleEndian; |
53 | |
54 | public: |
55 | ARMMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx, bool IsLittle) |
56 | : MCII(mcii), CTX(ctx), IsLittleEndian(IsLittle) { |
57 | } |
58 | ARMMCCodeEmitter(const ARMMCCodeEmitter &) = delete; |
59 | ARMMCCodeEmitter &operator=(const ARMMCCodeEmitter &) = delete; |
60 | ~ARMMCCodeEmitter() override = default; |
61 | |
62 | bool isThumb(const MCSubtargetInfo &STI) const { |
63 | return STI.hasFeature(Feature: ARM::ModeThumb); |
64 | } |
65 | |
66 | bool isThumb2(const MCSubtargetInfo &STI) const { |
67 | return isThumb(STI) && STI.hasFeature(Feature: ARM::FeatureThumb2); |
68 | } |
69 | |
70 | bool isTargetMachO(const MCSubtargetInfo &STI) const { |
71 | const Triple &TT = STI.getTargetTriple(); |
72 | return TT.isOSBinFormatMachO(); |
73 | } |
74 | |
75 | unsigned getMachineSoImmOpValue(unsigned SoImm) const; |
76 | |
77 | // getBinaryCodeForInstr - TableGen'erated function for getting the |
78 | // binary encoding for an instruction. |
79 | uint64_t getBinaryCodeForInstr(const MCInst &MI, |
80 | SmallVectorImpl<MCFixup> &Fixups, |
81 | const MCSubtargetInfo &STI) const; |
82 | |
83 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
84 | /// operand requires relocation, record the relocation and return zero. |
85 | unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, |
86 | SmallVectorImpl<MCFixup> &Fixups, |
87 | const MCSubtargetInfo &STI) const; |
88 | |
89 | /// getHiLoImmOpValue - Return the encoding for either the hi / low 16-bit, or |
90 | /// high/middle-high/middle-low/low 8 bits of the specified operand. This is |
91 | /// used for operands with :lower16:, :upper16: :lower0_7:, :lower8_15:, |
92 | /// :higher0_7:, and :higher8_15: prefixes. |
93 | uint32_t getHiLoImmOpValue(const MCInst &MI, unsigned OpIdx, |
94 | SmallVectorImpl<MCFixup> &Fixups, |
95 | const MCSubtargetInfo &STI) const; |
96 | |
97 | bool EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, |
98 | unsigned &Reg, unsigned &Imm, |
99 | SmallVectorImpl<MCFixup> &Fixups, |
100 | const MCSubtargetInfo &STI) const; |
101 | |
102 | /// getThumbBLTargetOpValue - Return encoding info for Thumb immediate |
103 | /// BL branch target. |
104 | uint32_t getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
105 | SmallVectorImpl<MCFixup> &Fixups, |
106 | const MCSubtargetInfo &STI) const; |
107 | |
108 | /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate |
109 | /// BLX branch target. |
110 | uint32_t getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
111 | SmallVectorImpl<MCFixup> &Fixups, |
112 | const MCSubtargetInfo &STI) const; |
113 | |
114 | /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target. |
115 | uint32_t getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx, |
116 | SmallVectorImpl<MCFixup> &Fixups, |
117 | const MCSubtargetInfo &STI) const; |
118 | |
119 | /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target. |
120 | uint32_t getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx, |
121 | SmallVectorImpl<MCFixup> &Fixups, |
122 | const MCSubtargetInfo &STI) const; |
123 | |
124 | /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target. |
125 | uint32_t getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx, |
126 | SmallVectorImpl<MCFixup> &Fixups, |
127 | const MCSubtargetInfo &STI) const; |
128 | |
129 | /// getBranchTargetOpValue - Return encoding info for 24-bit immediate |
130 | /// branch target. |
131 | uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
132 | SmallVectorImpl<MCFixup> &Fixups, |
133 | const MCSubtargetInfo &STI) const; |
134 | |
135 | /// getThumbBranchTargetOpValue - Return encoding info for 24-bit |
136 | /// immediate Thumb2 direct branch target. |
137 | uint32_t getThumbBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
138 | SmallVectorImpl<MCFixup> &Fixups, |
139 | const MCSubtargetInfo &STI) const; |
140 | |
141 | /// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate |
142 | /// branch target. |
143 | uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
144 | SmallVectorImpl<MCFixup> &Fixups, |
145 | const MCSubtargetInfo &STI) const; |
146 | uint32_t getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
147 | SmallVectorImpl<MCFixup> &Fixups, |
148 | const MCSubtargetInfo &STI) const; |
149 | uint32_t getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
150 | SmallVectorImpl<MCFixup> &Fixups, |
151 | const MCSubtargetInfo &STI) const; |
152 | |
153 | /// getAdrLabelOpValue - Return encoding info for 12-bit immediate |
154 | /// ADR label target. |
155 | uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
156 | SmallVectorImpl<MCFixup> &Fixups, |
157 | const MCSubtargetInfo &STI) const; |
158 | uint32_t getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
159 | SmallVectorImpl<MCFixup> &Fixups, |
160 | const MCSubtargetInfo &STI) const; |
161 | uint32_t getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
162 | SmallVectorImpl<MCFixup> &Fixups, |
163 | const MCSubtargetInfo &STI) const; |
164 | |
165 | uint32_t getITMaskOpValue(const MCInst &MI, unsigned OpIdx, |
166 | SmallVectorImpl<MCFixup> &Fixups, |
167 | const MCSubtargetInfo &STI) const; |
168 | |
169 | /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5' |
170 | /// operand. |
171 | uint32_t getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx, |
172 | SmallVectorImpl<MCFixup> &Fixups, |
173 | const MCSubtargetInfo &STI) const; |
174 | |
175 | /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' |
176 | /// operand. |
177 | uint32_t getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx, |
178 | SmallVectorImpl<MCFixup> &Fixups, |
179 | const MCSubtargetInfo &STI) const; |
180 | |
181 | /// getThumbAddrModeRegRegOpValue - Return encoding for 'reg + reg' operand. |
182 | uint32_t getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx, |
183 | SmallVectorImpl<MCFixup> &Fixups, |
184 | const MCSubtargetInfo &STI) const; |
185 | |
186 | /// getT2AddrModeImm8s4OpValue - Return encoding info for 'reg +/- imm8<<2' |
187 | /// operand. |
188 | uint32_t getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx, |
189 | SmallVectorImpl<MCFixup> &Fixups, |
190 | const MCSubtargetInfo &STI) const; |
191 | |
192 | /// getT2AddrModeImm7s4OpValue - Return encoding info for 'reg +/- imm7<<2' |
193 | /// operand. |
194 | uint32_t getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx, |
195 | SmallVectorImpl<MCFixup> &Fixups, |
196 | const MCSubtargetInfo &STI) const; |
197 | |
198 | /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for 'reg + imm8<<2' |
199 | /// operand. |
200 | uint32_t getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx, |
201 | SmallVectorImpl<MCFixup> &Fixups, |
202 | const MCSubtargetInfo &STI) const; |
203 | |
204 | /// getT2ScaledImmOpValue - Return encoding info for '+/- immX<<Y' |
205 | /// operand. |
206 | template<unsigned Bits, unsigned Shift> |
207 | uint32_t getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx, |
208 | SmallVectorImpl<MCFixup> &Fixups, |
209 | const MCSubtargetInfo &STI) const; |
210 | |
211 | /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg' |
212 | /// operand. |
213 | uint32_t getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx, |
214 | SmallVectorImpl<MCFixup> &Fixups, |
215 | const MCSubtargetInfo &STI) const; |
216 | |
217 | /// getMveAddrModeQOpValue - Return encoding info for 'reg +/- imm7<<{shift}' |
218 | /// operand. |
219 | template<int shift> |
220 | uint32_t getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx, |
221 | SmallVectorImpl<MCFixup> &Fixups, |
222 | const MCSubtargetInfo &STI) const; |
223 | |
224 | /// getLdStSORegOpValue - Return encoding info for 'reg +/- reg shop imm' |
225 | /// operand as needed by load/store instructions. |
226 | uint32_t getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, |
227 | SmallVectorImpl<MCFixup> &Fixups, |
228 | const MCSubtargetInfo &STI) const; |
229 | |
230 | /// getLdStmModeOpValue - Return encoding for load/store multiple mode. |
231 | uint32_t getLdStmModeOpValue(const MCInst &MI, unsigned OpIdx, |
232 | SmallVectorImpl<MCFixup> &Fixups, |
233 | const MCSubtargetInfo &STI) const { |
234 | ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(i: OpIdx).getImm(); |
235 | switch (Mode) { |
236 | default: llvm_unreachable("Unknown addressing sub-mode!" ); |
237 | case ARM_AM::da: return 0; |
238 | case ARM_AM::ia: return 1; |
239 | case ARM_AM::db: return 2; |
240 | case ARM_AM::ib: return 3; |
241 | } |
242 | } |
243 | |
244 | /// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value. |
245 | /// |
246 | unsigned getShiftOp(ARM_AM::ShiftOpc ShOpc) const { |
247 | switch (ShOpc) { |
248 | case ARM_AM::no_shift: |
249 | case ARM_AM::lsl: return 0; |
250 | case ARM_AM::lsr: return 1; |
251 | case ARM_AM::asr: return 2; |
252 | case ARM_AM::ror: |
253 | case ARM_AM::rrx: return 3; |
254 | default: |
255 | llvm_unreachable("Invalid ShiftOpc!" ); |
256 | } |
257 | } |
258 | |
259 | /// getAddrMode2OffsetOpValue - Return encoding for am2offset operands. |
260 | uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
261 | SmallVectorImpl<MCFixup> &Fixups, |
262 | const MCSubtargetInfo &STI) const; |
263 | |
264 | /// getPostIdxRegOpValue - Return encoding for postidx_reg operands. |
265 | uint32_t getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx, |
266 | SmallVectorImpl<MCFixup> &Fixups, |
267 | const MCSubtargetInfo &STI) const; |
268 | |
269 | /// getAddrMode3OffsetOpValue - Return encoding for am3offset operands. |
270 | uint32_t getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
271 | SmallVectorImpl<MCFixup> &Fixups, |
272 | const MCSubtargetInfo &STI) const; |
273 | |
274 | /// getAddrMode3OpValue - Return encoding for addrmode3 operands. |
275 | uint32_t getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, |
276 | SmallVectorImpl<MCFixup> &Fixups, |
277 | const MCSubtargetInfo &STI) const; |
278 | |
279 | /// getAddrModeThumbSPOpValue - Return encoding info for 'reg +/- imm12' |
280 | /// operand. |
281 | uint32_t getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx, |
282 | SmallVectorImpl<MCFixup> &Fixups, |
283 | const MCSubtargetInfo &STI) const; |
284 | |
285 | /// getAddrModeISOpValue - Encode the t_addrmode_is# operands. |
286 | uint32_t getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx, |
287 | SmallVectorImpl<MCFixup> &Fixups, |
288 | const MCSubtargetInfo &STI) const; |
289 | |
290 | /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands. |
291 | uint32_t getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx, |
292 | SmallVectorImpl<MCFixup> &Fixups, |
293 | const MCSubtargetInfo &STI) const; |
294 | |
295 | /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand. |
296 | uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx, |
297 | SmallVectorImpl<MCFixup> &Fixups, |
298 | const MCSubtargetInfo &STI) const; |
299 | |
300 | /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand. |
301 | uint32_t getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx, |
302 | SmallVectorImpl<MCFixup> &Fixups, |
303 | const MCSubtargetInfo &STI) const; |
304 | |
305 | /// getCCOutOpValue - Return encoding of the 's' bit. |
306 | unsigned getCCOutOpValue(const MCInst &MI, unsigned Op, |
307 | SmallVectorImpl<MCFixup> &Fixups, |
308 | const MCSubtargetInfo &STI) const { |
309 | // The operand is either reg0 or CPSR. The 's' bit is encoded as '0' or |
310 | // '1' respectively. |
311 | return MI.getOperand(i: Op).getReg() == ARM::CPSR; |
312 | } |
313 | |
314 | unsigned getModImmOpValue(const MCInst &MI, unsigned Op, |
315 | SmallVectorImpl<MCFixup> &Fixups, |
316 | const MCSubtargetInfo &ST) const { |
317 | const MCOperand &MO = MI.getOperand(i: Op); |
318 | |
319 | // Support for fixups (MCFixup) |
320 | if (MO.isExpr()) { |
321 | const MCExpr *Expr = MO.getExpr(); |
322 | // Fixups resolve to plain values that need to be encoded. |
323 | MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_mod_imm); |
324 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
325 | return 0; |
326 | } |
327 | |
328 | // Immediate is already in its encoded format |
329 | return MO.getImm(); |
330 | } |
331 | |
332 | /// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value. |
333 | unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op, |
334 | SmallVectorImpl<MCFixup> &Fixups, |
335 | const MCSubtargetInfo &STI) const { |
336 | const MCOperand &MO = MI.getOperand(i: Op); |
337 | |
338 | // Support for fixups (MCFixup) |
339 | if (MO.isExpr()) { |
340 | const MCExpr *Expr = MO.getExpr(); |
341 | // Fixups resolve to plain values that need to be encoded. |
342 | MCFixupKind Kind = MCFixupKind(ARM::fixup_t2_so_imm); |
343 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
344 | return 0; |
345 | } |
346 | unsigned SoImm = MO.getImm(); |
347 | unsigned Encoded = ARM_AM::getT2SOImmVal(Arg: SoImm); |
348 | assert(Encoded != ~0U && "Not a Thumb2 so_imm value?" ); |
349 | return Encoded; |
350 | } |
351 | |
352 | unsigned getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum, |
353 | SmallVectorImpl<MCFixup> &Fixups, |
354 | const MCSubtargetInfo &STI) const; |
355 | template<unsigned Bits, unsigned Shift> |
356 | unsigned getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum, |
357 | SmallVectorImpl<MCFixup> &Fixups, |
358 | const MCSubtargetInfo &STI) const; |
359 | unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum, |
360 | SmallVectorImpl<MCFixup> &Fixups, |
361 | const MCSubtargetInfo &STI) const; |
362 | |
363 | /// getSORegOpValue - Return an encoded so_reg shifted register value. |
364 | unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op, |
365 | SmallVectorImpl<MCFixup> &Fixups, |
366 | const MCSubtargetInfo &STI) const; |
367 | unsigned getSORegImmOpValue(const MCInst &MI, unsigned Op, |
368 | SmallVectorImpl<MCFixup> &Fixups, |
369 | const MCSubtargetInfo &STI) const; |
370 | unsigned getT2SORegOpValue(const MCInst &MI, unsigned Op, |
371 | SmallVectorImpl<MCFixup> &Fixups, |
372 | const MCSubtargetInfo &STI) const; |
373 | |
374 | unsigned getNEONVcvtImm32OpValue(const MCInst &MI, unsigned Op, |
375 | SmallVectorImpl<MCFixup> &Fixups, |
376 | const MCSubtargetInfo &STI) const { |
377 | return 64 - MI.getOperand(i: Op).getImm(); |
378 | } |
379 | |
380 | unsigned getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op, |
381 | SmallVectorImpl<MCFixup> &Fixups, |
382 | const MCSubtargetInfo &STI) const; |
383 | |
384 | unsigned getRegisterListOpValue(const MCInst &MI, unsigned Op, |
385 | SmallVectorImpl<MCFixup> &Fixups, |
386 | const MCSubtargetInfo &STI) const; |
387 | unsigned getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op, |
388 | SmallVectorImpl<MCFixup> &Fixups, |
389 | const MCSubtargetInfo &STI) const; |
390 | unsigned getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op, |
391 | SmallVectorImpl<MCFixup> &Fixups, |
392 | const MCSubtargetInfo &STI) const; |
393 | unsigned getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op, |
394 | SmallVectorImpl<MCFixup> &Fixups, |
395 | const MCSubtargetInfo &STI) const; |
396 | unsigned getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, |
397 | SmallVectorImpl<MCFixup> &Fixups, |
398 | const MCSubtargetInfo &STI) const; |
399 | |
400 | unsigned getShiftRight8Imm(const MCInst &MI, unsigned Op, |
401 | SmallVectorImpl<MCFixup> &Fixups, |
402 | const MCSubtargetInfo &STI) const; |
403 | unsigned getShiftRight16Imm(const MCInst &MI, unsigned Op, |
404 | SmallVectorImpl<MCFixup> &Fixups, |
405 | const MCSubtargetInfo &STI) const; |
406 | unsigned getShiftRight32Imm(const MCInst &MI, unsigned Op, |
407 | SmallVectorImpl<MCFixup> &Fixups, |
408 | const MCSubtargetInfo &STI) const; |
409 | unsigned getShiftRight64Imm(const MCInst &MI, unsigned Op, |
410 | SmallVectorImpl<MCFixup> &Fixups, |
411 | const MCSubtargetInfo &STI) const; |
412 | |
413 | unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op, |
414 | SmallVectorImpl<MCFixup> &Fixups, |
415 | const MCSubtargetInfo &STI) const; |
416 | |
417 | unsigned NEONThumb2DataIPostEncoder(const MCInst &MI, |
418 | unsigned EncodedValue, |
419 | const MCSubtargetInfo &STI) const; |
420 | unsigned NEONThumb2LoadStorePostEncoder(const MCInst &MI, |
421 | unsigned EncodedValue, |
422 | const MCSubtargetInfo &STI) const; |
423 | unsigned NEONThumb2DupPostEncoder(const MCInst &MI, |
424 | unsigned EncodedValue, |
425 | const MCSubtargetInfo &STI) const; |
426 | unsigned NEONThumb2V8PostEncoder(const MCInst &MI, |
427 | unsigned EncodedValue, |
428 | const MCSubtargetInfo &STI) const; |
429 | |
430 | unsigned VFPThumb2PostEncoder(const MCInst &MI, |
431 | unsigned EncodedValue, |
432 | const MCSubtargetInfo &STI) const; |
433 | |
434 | uint32_t getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx, |
435 | SmallVectorImpl<MCFixup> &Fixups, |
436 | const MCSubtargetInfo &STI) const; |
437 | |
438 | void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, |
439 | SmallVectorImpl<MCFixup> &Fixups, |
440 | const MCSubtargetInfo &STI) const override; |
441 | |
442 | template <bool isNeg, ARM::Fixups fixup> |
443 | uint32_t getBFTargetOpValue(const MCInst &MI, unsigned OpIdx, |
444 | SmallVectorImpl<MCFixup> &Fixups, |
445 | const MCSubtargetInfo &STI) const; |
446 | |
447 | uint32_t getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx, |
448 | SmallVectorImpl<MCFixup> &Fixups, |
449 | const MCSubtargetInfo &STI) const; |
450 | |
451 | uint32_t getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx, |
452 | SmallVectorImpl<MCFixup> &Fixups, |
453 | const MCSubtargetInfo &STI) const; |
454 | uint32_t getRestrictedCondCodeOpValue(const MCInst &MI, unsigned OpIdx, |
455 | SmallVectorImpl<MCFixup> &Fixups, |
456 | const MCSubtargetInfo &STI) const; |
457 | template <unsigned size> |
458 | uint32_t getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx, |
459 | SmallVectorImpl<MCFixup> &Fixups, |
460 | const MCSubtargetInfo &STI) const; |
461 | }; |
462 | |
463 | } // end anonymous namespace |
464 | |
465 | /// NEONThumb2DataIPostEncoder - Post-process encoded NEON data-processing |
466 | /// instructions, and rewrite them to their Thumb2 form if we are currently in |
467 | /// Thumb2 mode. |
468 | unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI, |
469 | unsigned EncodedValue, |
470 | const MCSubtargetInfo &STI) const { |
471 | if (isThumb2(STI)) { |
472 | // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved |
473 | // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are |
474 | // set to 1111. |
475 | unsigned Bit24 = EncodedValue & 0x01000000; |
476 | unsigned Bit28 = Bit24 << 4; |
477 | EncodedValue &= 0xEFFFFFFF; |
478 | EncodedValue |= Bit28; |
479 | EncodedValue |= 0x0F000000; |
480 | } |
481 | |
482 | return EncodedValue; |
483 | } |
484 | |
485 | /// NEONThumb2LoadStorePostEncoder - Post-process encoded NEON load/store |
486 | /// instructions, and rewrite them to their Thumb2 form if we are currently in |
487 | /// Thumb2 mode. |
488 | unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI, |
489 | unsigned EncodedValue, |
490 | const MCSubtargetInfo &STI) const { |
491 | if (isThumb2(STI)) { |
492 | EncodedValue &= 0xF0FFFFFF; |
493 | EncodedValue |= 0x09000000; |
494 | } |
495 | |
496 | return EncodedValue; |
497 | } |
498 | |
499 | /// NEONThumb2DupPostEncoder - Post-process encoded NEON vdup |
500 | /// instructions, and rewrite them to their Thumb2 form if we are currently in |
501 | /// Thumb2 mode. |
502 | unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI, |
503 | unsigned EncodedValue, |
504 | const MCSubtargetInfo &STI) const { |
505 | if (isThumb2(STI)) { |
506 | EncodedValue &= 0x00FFFFFF; |
507 | EncodedValue |= 0xEE000000; |
508 | } |
509 | |
510 | return EncodedValue; |
511 | } |
512 | |
513 | /// Post-process encoded NEON v8 instructions, and rewrite them to Thumb2 form |
514 | /// if we are in Thumb2. |
515 | unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI, |
516 | unsigned EncodedValue, |
517 | const MCSubtargetInfo &STI) const { |
518 | if (isThumb2(STI)) { |
519 | EncodedValue |= 0xC000000; // Set bits 27-26 |
520 | } |
521 | |
522 | return EncodedValue; |
523 | } |
524 | |
525 | /// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite |
526 | /// them to their Thumb2 form if we are currently in Thumb2 mode. |
527 | unsigned ARMMCCodeEmitter:: |
528 | VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue, |
529 | const MCSubtargetInfo &STI) const { |
530 | if (isThumb2(STI)) { |
531 | EncodedValue &= 0x0FFFFFFF; |
532 | EncodedValue |= 0xE0000000; |
533 | } |
534 | return EncodedValue; |
535 | } |
536 | |
537 | /// getMachineOpValue - Return binary encoding of operand. If the machine |
538 | /// operand requires relocation, record the relocation and return zero. |
539 | unsigned ARMMCCodeEmitter:: |
540 | getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
541 | SmallVectorImpl<MCFixup> &Fixups, |
542 | const MCSubtargetInfo &STI) const { |
543 | if (MO.isReg()) { |
544 | MCRegister Reg = MO.getReg(); |
545 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg); |
546 | |
547 | // In NEON, Q registers are encoded as 2x their register number, |
548 | // because they're using the same indices as the D registers they |
549 | // overlap. In MVE, there are no 64-bit vector instructions, so |
550 | // the encodings all refer to Q-registers by their literal |
551 | // register number. |
552 | |
553 | if (STI.hasFeature(Feature: ARM::HasMVEIntegerOps)) |
554 | return RegNo; |
555 | |
556 | switch (Reg.id()) { |
557 | default: |
558 | return RegNo; |
559 | case ARM::Q0: case ARM::Q1: case ARM::Q2: case ARM::Q3: |
560 | case ARM::Q4: case ARM::Q5: case ARM::Q6: case ARM::Q7: |
561 | case ARM::Q8: case ARM::Q9: case ARM::Q10: case ARM::Q11: |
562 | case ARM::Q12: case ARM::Q13: case ARM::Q14: case ARM::Q15: |
563 | return 2 * RegNo; |
564 | } |
565 | } else if (MO.isImm()) { |
566 | return static_cast<unsigned>(MO.getImm()); |
567 | } else if (MO.isDFPImm()) { |
568 | return static_cast<unsigned>(APFloat(bit_cast<double>(from: MO.getDFPImm())) |
569 | .bitcastToAPInt() |
570 | .getHiBits(numBits: 32) |
571 | .getLimitedValue()); |
572 | } |
573 | |
574 | llvm_unreachable("Unable to encode MCOperand!" ); |
575 | } |
576 | |
577 | /// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand. |
578 | bool ARMMCCodeEmitter:: |
579 | EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg, |
580 | unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups, |
581 | const MCSubtargetInfo &STI) const { |
582 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
583 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
584 | |
585 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
586 | |
587 | int32_t SImm = MO1.getImm(); |
588 | bool isAdd = true; |
589 | |
590 | // Special value for #-0 |
591 | if (SImm == INT32_MIN) { |
592 | SImm = 0; |
593 | isAdd = false; |
594 | } |
595 | |
596 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
597 | if (SImm < 0) { |
598 | SImm = -SImm; |
599 | isAdd = false; |
600 | } |
601 | |
602 | Imm = SImm; |
603 | return isAdd; |
604 | } |
605 | |
606 | /// getBranchTargetOpValue - Helper function to get the branch target operand, |
607 | /// which is either an immediate or requires a fixup. |
608 | static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
609 | unsigned FixupKind, |
610 | SmallVectorImpl<MCFixup> &Fixups, |
611 | const MCSubtargetInfo &STI) { |
612 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
613 | |
614 | // If the destination is an immediate, we have nothing to do. |
615 | if (MO.isImm()) return MO.getImm(); |
616 | assert(MO.isExpr() && "Unexpected branch target type!" ); |
617 | const MCExpr *Expr = MO.getExpr(); |
618 | MCFixupKind Kind = MCFixupKind(FixupKind); |
619 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
620 | |
621 | // All of the information is in the fixup. |
622 | return 0; |
623 | } |
624 | |
625 | // Thumb BL and BLX use a strange offset encoding where bits 22 and 21 are |
626 | // determined by negating them and XOR'ing them with bit 23. |
627 | static int32_t encodeThumbBLOffset(int32_t offset) { |
628 | offset >>= 1; |
629 | uint32_t S = (offset & 0x800000) >> 23; |
630 | uint32_t J1 = (offset & 0x400000) >> 22; |
631 | uint32_t J2 = (offset & 0x200000) >> 21; |
632 | J1 = (~J1 & 0x1); |
633 | J2 = (~J2 & 0x1); |
634 | J1 ^= S; |
635 | J2 ^= S; |
636 | |
637 | offset &= ~0x600000; |
638 | offset |= J1 << 22; |
639 | offset |= J2 << 21; |
640 | |
641 | return offset; |
642 | } |
643 | |
644 | /// getThumbBLTargetOpValue - Return encoding info for immediate branch target. |
645 | uint32_t ARMMCCodeEmitter:: |
646 | getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
647 | SmallVectorImpl<MCFixup> &Fixups, |
648 | const MCSubtargetInfo &STI) const { |
649 | const MCOperand MO = MI.getOperand(i: OpIdx); |
650 | if (MO.isExpr()) |
651 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_bl, |
652 | Fixups, STI); |
653 | return encodeThumbBLOffset(offset: MO.getImm()); |
654 | } |
655 | |
656 | /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate |
657 | /// BLX branch target. |
658 | uint32_t ARMMCCodeEmitter:: |
659 | getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
660 | SmallVectorImpl<MCFixup> &Fixups, |
661 | const MCSubtargetInfo &STI) const { |
662 | const MCOperand MO = MI.getOperand(i: OpIdx); |
663 | if (MO.isExpr()) |
664 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_blx, |
665 | Fixups, STI); |
666 | return encodeThumbBLOffset(offset: MO.getImm()); |
667 | } |
668 | |
669 | /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target. |
670 | uint32_t ARMMCCodeEmitter:: |
671 | getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx, |
672 | SmallVectorImpl<MCFixup> &Fixups, |
673 | const MCSubtargetInfo &STI) const { |
674 | const MCOperand MO = MI.getOperand(i: OpIdx); |
675 | if (MO.isExpr()) |
676 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_br, |
677 | Fixups, STI); |
678 | return (MO.getImm() >> 1); |
679 | } |
680 | |
681 | /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target. |
682 | uint32_t ARMMCCodeEmitter:: |
683 | getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx, |
684 | SmallVectorImpl<MCFixup> &Fixups, |
685 | const MCSubtargetInfo &STI) const { |
686 | const MCOperand MO = MI.getOperand(i: OpIdx); |
687 | if (MO.isExpr()) |
688 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_bcc, |
689 | Fixups, STI); |
690 | return (MO.getImm() >> 1); |
691 | } |
692 | |
693 | /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target. |
694 | uint32_t ARMMCCodeEmitter:: |
695 | getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx, |
696 | SmallVectorImpl<MCFixup> &Fixups, |
697 | const MCSubtargetInfo &STI) const { |
698 | const MCOperand MO = MI.getOperand(i: OpIdx); |
699 | if (MO.isExpr()) |
700 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_cb, Fixups, STI); |
701 | return (MO.getImm() >> 1); |
702 | } |
703 | |
704 | /// Return true if this branch has a non-always predication |
705 | static bool HasConditionalBranch(const MCInst &MI) { |
706 | int NumOp = MI.getNumOperands(); |
707 | if (NumOp >= 2) { |
708 | for (int i = 0; i < NumOp-1; ++i) { |
709 | const MCOperand &MCOp1 = MI.getOperand(i); |
710 | const MCOperand &MCOp2 = MI.getOperand(i: i + 1); |
711 | if (MCOp1.isImm() && MCOp2.isReg() && |
712 | (!MCOp2.getReg() || MCOp2.getReg() == ARM::CPSR)) { |
713 | if (ARMCC::CondCodes(MCOp1.getImm()) != ARMCC::AL) |
714 | return true; |
715 | } |
716 | } |
717 | } |
718 | return false; |
719 | } |
720 | |
721 | /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch |
722 | /// target. |
723 | uint32_t ARMMCCodeEmitter:: |
724 | getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
725 | SmallVectorImpl<MCFixup> &Fixups, |
726 | const MCSubtargetInfo &STI) const { |
727 | // FIXME: This really, really shouldn't use TargetMachine. We don't want |
728 | // coupling between MC and TM anywhere we can help it. |
729 | if (isThumb2(STI)) |
730 | return |
731 | ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_t2_condbranch, Fixups, STI); |
732 | return getARMBranchTargetOpValue(MI, OpIdx, Fixups, STI); |
733 | } |
734 | |
735 | /// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch |
736 | /// target. |
737 | uint32_t ARMMCCodeEmitter:: |
738 | getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
739 | SmallVectorImpl<MCFixup> &Fixups, |
740 | const MCSubtargetInfo &STI) const { |
741 | const MCOperand MO = MI.getOperand(i: OpIdx); |
742 | if (MO.isExpr()) { |
743 | if (HasConditionalBranch(MI)) |
744 | return ::getBranchTargetOpValue(MI, OpIdx, |
745 | FixupKind: ARM::fixup_arm_condbranch, Fixups, STI); |
746 | return ::getBranchTargetOpValue(MI, OpIdx, |
747 | FixupKind: ARM::fixup_arm_uncondbranch, Fixups, STI); |
748 | } |
749 | |
750 | return MO.getImm() >> 2; |
751 | } |
752 | |
753 | uint32_t ARMMCCodeEmitter:: |
754 | getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx, |
755 | SmallVectorImpl<MCFixup> &Fixups, |
756 | const MCSubtargetInfo &STI) const { |
757 | const MCOperand MO = MI.getOperand(i: OpIdx); |
758 | if (MO.isExpr()) { |
759 | if (HasConditionalBranch(MI)) |
760 | return ::getBranchTargetOpValue(MI, OpIdx, |
761 | FixupKind: ARM::fixup_arm_condbl, Fixups, STI); |
762 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_uncondbl, Fixups, STI); |
763 | } |
764 | |
765 | return MO.getImm() >> 2; |
766 | } |
767 | |
768 | uint32_t ARMMCCodeEmitter:: |
769 | getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, |
770 | SmallVectorImpl<MCFixup> &Fixups, |
771 | const MCSubtargetInfo &STI) const { |
772 | const MCOperand MO = MI.getOperand(i: OpIdx); |
773 | if (MO.isExpr()) |
774 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_blx, Fixups, STI); |
775 | |
776 | return MO.getImm() >> 1; |
777 | } |
778 | |
779 | /// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit |
780 | /// immediate branch target. |
781 | uint32_t ARMMCCodeEmitter::getThumbBranchTargetOpValue( |
782 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
783 | const MCSubtargetInfo &STI) const { |
784 | unsigned Val = 0; |
785 | const MCOperand MO = MI.getOperand(i: OpIdx); |
786 | |
787 | if(MO.isExpr()) |
788 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_t2_uncondbranch, Fixups, STI); |
789 | else |
790 | Val = MO.getImm() >> 1; |
791 | |
792 | bool I = (Val & 0x800000); |
793 | bool J1 = (Val & 0x400000); |
794 | bool J2 = (Val & 0x200000); |
795 | if (I ^ J1) |
796 | Val &= ~0x400000; |
797 | else |
798 | Val |= 0x400000; |
799 | |
800 | if (I ^ J2) |
801 | Val &= ~0x200000; |
802 | else |
803 | Val |= 0x200000; |
804 | |
805 | return Val; |
806 | } |
807 | |
808 | /// getAdrLabelOpValue - Return encoding info for 12-bit shifted-immediate |
809 | /// ADR label target. |
810 | uint32_t ARMMCCodeEmitter:: |
811 | getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
812 | SmallVectorImpl<MCFixup> &Fixups, |
813 | const MCSubtargetInfo &STI) const { |
814 | const MCOperand MO = MI.getOperand(i: OpIdx); |
815 | if (MO.isExpr()) |
816 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_adr_pcrel_12, |
817 | Fixups, STI); |
818 | int64_t offset = MO.getImm(); |
819 | uint32_t Val = 0x2000; |
820 | |
821 | int SoImmVal; |
822 | if (offset == INT32_MIN) { |
823 | Val = 0x1000; |
824 | SoImmVal = 0; |
825 | } else if (offset < 0) { |
826 | Val = 0x1000; |
827 | offset *= -1; |
828 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
829 | if(SoImmVal == -1) { |
830 | Val = 0x2000; |
831 | offset *= -1; |
832 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
833 | } |
834 | } else { |
835 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
836 | if(SoImmVal == -1) { |
837 | Val = 0x1000; |
838 | offset *= -1; |
839 | SoImmVal = ARM_AM::getSOImmVal(Arg: offset); |
840 | } |
841 | } |
842 | |
843 | assert(SoImmVal != -1 && "Not a valid so_imm value!" ); |
844 | |
845 | Val |= SoImmVal; |
846 | return Val; |
847 | } |
848 | |
849 | /// getT2AdrLabelOpValue - Return encoding info for 12-bit immediate ADR label |
850 | /// target. |
851 | uint32_t ARMMCCodeEmitter:: |
852 | getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
853 | SmallVectorImpl<MCFixup> &Fixups, |
854 | const MCSubtargetInfo &STI) const { |
855 | const MCOperand MO = MI.getOperand(i: OpIdx); |
856 | if (MO.isExpr()) |
857 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_t2_adr_pcrel_12, |
858 | Fixups, STI); |
859 | int32_t Val = MO.getImm(); |
860 | if (Val == INT32_MIN) |
861 | Val = 0x1000; |
862 | else if (Val < 0) { |
863 | Val *= -1; |
864 | Val |= 0x1000; |
865 | } |
866 | return Val; |
867 | } |
868 | |
869 | /// getITMaskOpValue - Return the architectural encoding of an IT |
870 | /// predication mask, given the MCOperand format. |
871 | uint32_t ARMMCCodeEmitter:: |
872 | getITMaskOpValue(const MCInst &MI, unsigned OpIdx, |
873 | SmallVectorImpl<MCFixup> &Fixups, |
874 | const MCSubtargetInfo &STI) const { |
875 | const MCOperand MaskMO = MI.getOperand(i: OpIdx); |
876 | assert(MaskMO.isImm() && "Unexpected operand type!" ); |
877 | |
878 | unsigned Mask = MaskMO.getImm(); |
879 | |
880 | // IT masks are encoded as a sequence of replacement low-order bits |
881 | // for the condition code. So if the low bit of the starting |
882 | // condition code is 1, then we have to flip all the bits above the |
883 | // terminating bit (which is the lowest 1 bit). |
884 | assert(OpIdx > 0 && "IT mask appears first!" ); |
885 | const MCOperand CondMO = MI.getOperand(i: OpIdx-1); |
886 | assert(CondMO.isImm() && "Unexpected operand type!" ); |
887 | if (CondMO.getImm() & 1) { |
888 | unsigned LowBit = Mask & -Mask; |
889 | unsigned BitsAboveLowBit = 0xF & (-LowBit << 1); |
890 | Mask ^= BitsAboveLowBit; |
891 | } |
892 | |
893 | return Mask; |
894 | } |
895 | |
896 | /// getThumbAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label |
897 | /// target. |
898 | uint32_t ARMMCCodeEmitter:: |
899 | getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
900 | SmallVectorImpl<MCFixup> &Fixups, |
901 | const MCSubtargetInfo &STI) const { |
902 | const MCOperand MO = MI.getOperand(i: OpIdx); |
903 | if (MO.isExpr()) |
904 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_thumb_adr_pcrel_10, |
905 | Fixups, STI); |
906 | return MO.getImm(); |
907 | } |
908 | |
909 | /// getThumbAddrModeRegRegOpValue - Return encoding info for 'reg + reg' |
910 | /// operand. |
911 | uint32_t ARMMCCodeEmitter:: |
912 | getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx, |
913 | SmallVectorImpl<MCFixup> &, |
914 | const MCSubtargetInfo &STI) const { |
915 | // [Rn, Rm] |
916 | // {5-3} = Rm |
917 | // {2-0} = Rn |
918 | const MCOperand &MO1 = MI.getOperand(i: OpIdx); |
919 | const MCOperand &MO2 = MI.getOperand(i: OpIdx + 1); |
920 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
921 | unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(Reg: MO2.getReg()); |
922 | return (Rm << 3) | Rn; |
923 | } |
924 | |
925 | /// getMVEShiftImmOpValue - Return encoding info for the 'sz:imm5' |
926 | /// operand. |
927 | uint32_t |
928 | ARMMCCodeEmitter::getMVEShiftImmOpValue(const MCInst &MI, unsigned OpIdx, |
929 | SmallVectorImpl<MCFixup> &Fixups, |
930 | const MCSubtargetInfo &STI) const { |
931 | // {4-0} = szimm5 |
932 | // The value we are trying to encode is an immediate between either the |
933 | // range of [1-7] or [1-15] depending on whether we are dealing with the |
934 | // u8/s8 or the u16/s16 variants respectively. |
935 | // This value is encoded as follows, if ShiftImm is the value within those |
936 | // ranges then the encoding szimm5 = ShiftImm + size, where size is either 8 |
937 | // or 16. |
938 | |
939 | unsigned Size, ShiftImm; |
940 | switch(MI.getOpcode()) { |
941 | case ARM::MVE_VSHLL_imms16bh: |
942 | case ARM::MVE_VSHLL_imms16th: |
943 | case ARM::MVE_VSHLL_immu16bh: |
944 | case ARM::MVE_VSHLL_immu16th: |
945 | Size = 16; |
946 | break; |
947 | case ARM::MVE_VSHLL_imms8bh: |
948 | case ARM::MVE_VSHLL_imms8th: |
949 | case ARM::MVE_VSHLL_immu8bh: |
950 | case ARM::MVE_VSHLL_immu8th: |
951 | Size = 8; |
952 | break; |
953 | default: |
954 | llvm_unreachable("Use of operand not supported by this instruction" ); |
955 | } |
956 | ShiftImm = MI.getOperand(i: OpIdx).getImm(); |
957 | return Size + ShiftImm; |
958 | } |
959 | |
960 | /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' operand. |
961 | uint32_t ARMMCCodeEmitter:: |
962 | getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx, |
963 | SmallVectorImpl<MCFixup> &Fixups, |
964 | const MCSubtargetInfo &STI) const { |
965 | // {17-13} = reg |
966 | // {12} = (U)nsigned (add == '1', sub == '0') |
967 | // {11-0} = imm12 |
968 | unsigned Reg = 0, Imm12 = 0; |
969 | bool isAdd = true; |
970 | // If The first operand isn't a register, we have a label reference. |
971 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
972 | if (MO.isReg()) { |
973 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
974 | if (MO1.isImm()) { |
975 | isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm12, Fixups, STI); |
976 | } else if (MO1.isExpr()) { |
977 | assert(!isThumb(STI) && !isThumb2(STI) && |
978 | "Thumb mode requires different encoding" ); |
979 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
980 | isAdd = false; // 'U' bit is set as part of the fixup. |
981 | MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_ldst_abs_12); |
982 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO1.getExpr(), Kind, Loc: MI.getLoc())); |
983 | } |
984 | } else if (MO.isExpr()) { |
985 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
986 | isAdd = false; // 'U' bit is set as part of the fixup. |
987 | MCFixupKind Kind; |
988 | if (isThumb2(STI)) |
989 | Kind = MCFixupKind(ARM::fixup_t2_ldst_pcrel_12); |
990 | else |
991 | Kind = MCFixupKind(ARM::fixup_arm_ldst_pcrel_12); |
992 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: MO.getExpr(), Kind, Loc: MI.getLoc())); |
993 | |
994 | ++MCNumCPRelocations; |
995 | } else { |
996 | Reg = ARM::PC; |
997 | int32_t Offset = MO.getImm(); |
998 | if (Offset == INT32_MIN) { |
999 | Offset = 0; |
1000 | isAdd = false; |
1001 | } else if (Offset < 0) { |
1002 | Offset *= -1; |
1003 | isAdd = false; |
1004 | } |
1005 | Imm12 = Offset; |
1006 | } |
1007 | uint32_t Binary = Imm12 & 0xfff; |
1008 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
1009 | if (isAdd) |
1010 | Binary |= (1 << 12); |
1011 | Binary |= (Reg << 13); |
1012 | return Binary; |
1013 | } |
1014 | |
1015 | template<unsigned Bits, unsigned Shift> |
1016 | uint32_t ARMMCCodeEmitter:: |
1017 | getT2ScaledImmOpValue(const MCInst &MI, unsigned OpIdx, |
1018 | SmallVectorImpl<MCFixup> &Fixups, |
1019 | const MCSubtargetInfo &STI) const { |
1020 | // FIXME: The immediate operand should have already been encoded like this |
1021 | // before ever getting here. The encoder method should just need to combine |
1022 | // the MI operands for the register and the offset into a single |
1023 | // representation for the complex operand in the .td file. This isn't just |
1024 | // style, unfortunately. As-is, we can't represent the distinct encoding |
1025 | // for #-0. |
1026 | |
1027 | // {Bits} = (U)nsigned (add == '1', sub == '0') |
1028 | // {(Bits-1)-0} = immediate |
1029 | int32_t Imm = MI.getOperand(i: OpIdx).getImm(); |
1030 | bool isAdd = Imm >= 0; |
1031 | |
1032 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
1033 | if (Imm < 0) |
1034 | Imm = -(uint32_t)Imm; |
1035 | |
1036 | Imm >>= Shift; |
1037 | |
1038 | uint32_t Binary = Imm & ((1U << Bits) - 1); |
1039 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
1040 | if (isAdd) |
1041 | Binary |= (1U << Bits); |
1042 | return Binary; |
1043 | } |
1044 | |
1045 | /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg' |
1046 | /// operand. |
1047 | uint32_t ARMMCCodeEmitter:: |
1048 | getMveAddrModeRQOpValue(const MCInst &MI, unsigned OpIdx, |
1049 | SmallVectorImpl<MCFixup> &Fixups, |
1050 | const MCSubtargetInfo &STI) const { |
1051 | // {6-3} Rn |
1052 | // {2-0} Qm |
1053 | const MCOperand &M0 = MI.getOperand(i: OpIdx); |
1054 | const MCOperand &M1 = MI.getOperand(i: OpIdx + 1); |
1055 | |
1056 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: M0.getReg()); |
1057 | unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(Reg: M1.getReg()); |
1058 | |
1059 | assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits" ); |
1060 | |
1061 | return (Rn << 3) | Qm; |
1062 | } |
1063 | |
1064 | /// getMveAddrModeRQOpValue - Return encoding info for 'reg, vreg' |
1065 | /// operand. |
1066 | template<int shift> |
1067 | uint32_t ARMMCCodeEmitter:: |
1068 | getMveAddrModeQOpValue(const MCInst &MI, unsigned OpIdx, |
1069 | SmallVectorImpl<MCFixup> &Fixups, |
1070 | const MCSubtargetInfo &STI) const { |
1071 | // {10-8} Qm |
1072 | // {7-0} Imm |
1073 | const MCOperand &M0 = MI.getOperand(i: OpIdx); |
1074 | const MCOperand &M1 = MI.getOperand(i: OpIdx + 1); |
1075 | |
1076 | unsigned Qm = CTX.getRegisterInfo()->getEncodingValue(Reg: M0.getReg()); |
1077 | int32_t Imm = M1.getImm(); |
1078 | |
1079 | bool isAdd = Imm >= 0; |
1080 | |
1081 | Imm >>= shift; |
1082 | |
1083 | if (!isAdd) |
1084 | Imm = -(uint32_t)Imm; |
1085 | |
1086 | Imm &= 0x7f; |
1087 | |
1088 | if (isAdd) |
1089 | Imm |= 0x80; |
1090 | |
1091 | assert(Qm < 8 && "Qm is supposed to be encodable in 3 bits" ); |
1092 | |
1093 | return (Qm << 8) | Imm; |
1094 | } |
1095 | |
1096 | /// getT2AddrModeImm8s4OpValue - Return encoding info for |
1097 | /// 'reg +/- imm8<<2' operand. |
1098 | uint32_t ARMMCCodeEmitter:: |
1099 | getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx, |
1100 | SmallVectorImpl<MCFixup> &Fixups, |
1101 | const MCSubtargetInfo &STI) const { |
1102 | // {12-9} = reg |
1103 | // {8} = (U)nsigned (add == '1', sub == '0') |
1104 | // {7-0} = imm8 |
1105 | unsigned Reg, Imm8; |
1106 | bool isAdd = true; |
1107 | // If The first operand isn't a register, we have a label reference. |
1108 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1109 | if (!MO.isReg()) { |
1110 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
1111 | Imm8 = 0; |
1112 | isAdd = false ; // 'U' bit is set as part of the fixup. |
1113 | |
1114 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
1115 | const MCExpr *Expr = MO.getExpr(); |
1116 | MCFixupKind Kind = MCFixupKind(ARM::fixup_t2_pcrel_10); |
1117 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
1118 | |
1119 | ++MCNumCPRelocations; |
1120 | } else |
1121 | isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm8, Fixups, STI); |
1122 | |
1123 | // FIXME: The immediate operand should have already been encoded like this |
1124 | // before ever getting here. The encoder method should just need to combine |
1125 | // the MI operands for the register and the offset into a single |
1126 | // representation for the complex operand in the .td file. This isn't just |
1127 | // style, unfortunately. As-is, we can't represent the distinct encoding |
1128 | // for #-0. |
1129 | assert(((Imm8 & 0x3) == 0) && "Not a valid immediate!" ); |
1130 | uint32_t Binary = (Imm8 >> 2) & 0xff; |
1131 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
1132 | if (isAdd) |
1133 | Binary |= (1 << 8); |
1134 | Binary |= (Reg << 9); |
1135 | return Binary; |
1136 | } |
1137 | |
1138 | /// getT2AddrModeImm7s4OpValue - Return encoding info for |
1139 | /// 'reg +/- imm7<<2' operand. |
1140 | uint32_t |
1141 | ARMMCCodeEmitter::getT2AddrModeImm7s4OpValue(const MCInst &MI, unsigned OpIdx, |
1142 | SmallVectorImpl<MCFixup> &Fixups, |
1143 | const MCSubtargetInfo &STI) const { |
1144 | // {11-8} = reg |
1145 | // {7} = (A)dd (add == '1', sub == '0') |
1146 | // {6-0} = imm7 |
1147 | unsigned Reg, Imm7; |
1148 | // If The first operand isn't a register, we have a label reference. |
1149 | bool isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm7, Fixups, STI); |
1150 | |
1151 | // FIXME: The immediate operand should have already been encoded like this |
1152 | // before ever getting here. The encoder method should just need to combine |
1153 | // the MI operands for the register and the offset into a single |
1154 | // representation for the complex operand in the .td file. This isn't just |
1155 | // style, unfortunately. As-is, we can't represent the distinct encoding |
1156 | // for #-0. |
1157 | uint32_t Binary = (Imm7 >> 2) & 0xff; |
1158 | // Immediate is always encoded as positive. The 'A' bit controls add vs sub. |
1159 | if (isAdd) |
1160 | Binary |= (1 << 7); |
1161 | Binary |= (Reg << 8); |
1162 | return Binary; |
1163 | } |
1164 | |
1165 | /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for |
1166 | /// 'reg + imm8<<2' operand. |
1167 | uint32_t ARMMCCodeEmitter:: |
1168 | getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx, |
1169 | SmallVectorImpl<MCFixup> &Fixups, |
1170 | const MCSubtargetInfo &STI) const { |
1171 | // {11-8} = reg |
1172 | // {7-0} = imm8 |
1173 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1174 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
1175 | unsigned Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1176 | unsigned Imm8 = MO1.getImm(); |
1177 | return (Reg << 8) | Imm8; |
1178 | } |
1179 | |
1180 | uint32_t ARMMCCodeEmitter::getHiLoImmOpValue(const MCInst &MI, unsigned OpIdx, |
1181 | SmallVectorImpl<MCFixup> &Fixups, |
1182 | const MCSubtargetInfo &STI) const { |
1183 | // {20-16} = imm{15-12} |
1184 | // {11-0} = imm{11-0} |
1185 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1186 | if (MO.isImm()) |
1187 | // Hi / lo bits already extracted during earlier passes. |
1188 | return static_cast<unsigned>(MO.getImm()); |
1189 | |
1190 | // Handle :upper16:, :lower16:, :upper8_15:, :upper0_7:, :lower8_15: |
1191 | // :lower0_7: assembly prefixes. |
1192 | const MCExpr *E = MO.getExpr(); |
1193 | MCFixupKind Kind; |
1194 | if (E->getKind() == MCExpr::Specifier) { |
1195 | auto *ARM16Expr = cast<MCSpecifierExpr>(Val: E); |
1196 | E = ARM16Expr->getSubExpr(); |
1197 | |
1198 | if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: E)) { |
1199 | const int64_t Value = MCE->getValue(); |
1200 | if (Value > UINT32_MAX) |
1201 | report_fatal_error(reason: "constant value truncated (limited to 32-bit)" ); |
1202 | |
1203 | switch (ARM16Expr->getSpecifier()) { |
1204 | case ARM::S_HI16: |
1205 | return (int32_t(Value) & 0xffff0000) >> 16; |
1206 | case ARM::S_LO16: |
1207 | return (int32_t(Value) & 0x0000ffff); |
1208 | |
1209 | case ARM::S_HI_8_15: |
1210 | return (int32_t(Value) & 0xff000000) >> 24; |
1211 | case ARM::S_HI_0_7: |
1212 | return (int32_t(Value) & 0x00ff0000) >> 16; |
1213 | case ARM::S_LO_8_15: |
1214 | return (int32_t(Value) & 0x0000ff00) >> 8; |
1215 | case ARM::S_LO_0_7: |
1216 | return (int32_t(Value) & 0x000000ff); |
1217 | |
1218 | default: llvm_unreachable("Unsupported ARMFixup" ); |
1219 | } |
1220 | } |
1221 | |
1222 | switch (ARM16Expr->getSpecifier()) { |
1223 | default: llvm_unreachable("Unsupported ARMFixup" ); |
1224 | case ARM::S_HI16: |
1225 | Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movt_hi16 |
1226 | : ARM::fixup_arm_movt_hi16); |
1227 | break; |
1228 | case ARM::S_LO16: |
1229 | Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movw_lo16 |
1230 | : ARM::fixup_arm_movw_lo16); |
1231 | break; |
1232 | case ARM::S_HI_8_15: |
1233 | if (!isThumb(STI)) |
1234 | llvm_unreachable(":upper_8_15: not supported in Arm state" ); |
1235 | Kind = MCFixupKind(ARM::fixup_arm_thumb_upper_8_15); |
1236 | break; |
1237 | case ARM::S_HI_0_7: |
1238 | if (!isThumb(STI)) |
1239 | llvm_unreachable(":upper_0_7: not supported in Arm state" ); |
1240 | Kind = MCFixupKind(ARM::fixup_arm_thumb_upper_0_7); |
1241 | break; |
1242 | case ARM::S_LO_8_15: |
1243 | if (!isThumb(STI)) |
1244 | llvm_unreachable(":lower_8_15: not supported in Arm state" ); |
1245 | Kind = MCFixupKind(ARM::fixup_arm_thumb_lower_8_15); |
1246 | break; |
1247 | case ARM::S_LO_0_7: |
1248 | if (!isThumb(STI)) |
1249 | llvm_unreachable(":lower_0_7: not supported in Arm state" ); |
1250 | Kind = MCFixupKind(ARM::fixup_arm_thumb_lower_0_7); |
1251 | break; |
1252 | } |
1253 | |
1254 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: E, Kind, Loc: MI.getLoc())); |
1255 | return 0; |
1256 | } |
1257 | // If the expression doesn't have :upper16:, :lower16: on it, it's just a |
1258 | // plain immediate expression, previously those evaluated to the lower 16 bits |
1259 | // of the expression regardless of whether we have a movt or a movw, but that |
1260 | // led to misleadingly results. This is disallowed in the AsmParser in |
1261 | // validateInstruction() so this should never happen. The same holds for |
1262 | // thumb1 :upper8_15:, :upper0_7:, lower8_15: or :lower0_7: with movs or adds. |
1263 | llvm_unreachable("expression without :upper16:, :lower16:, :upper8_15:," |
1264 | ":upper0_7:, lower8_15: or :lower0_7:" ); |
1265 | } |
1266 | |
1267 | uint32_t ARMMCCodeEmitter:: |
1268 | getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, |
1269 | SmallVectorImpl<MCFixup> &Fixups, |
1270 | const MCSubtargetInfo &STI) const { |
1271 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1272 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
1273 | const MCOperand &MO2 = MI.getOperand(i: OpIdx+2); |
1274 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1275 | unsigned Rm = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
1276 | unsigned ShImm = ARM_AM::getAM2Offset(AM2Opc: MO2.getImm()); |
1277 | bool isAdd = ARM_AM::getAM2Op(AM2Opc: MO2.getImm()) == ARM_AM::add; |
1278 | ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(AM2Opc: MO2.getImm()); |
1279 | unsigned SBits = getShiftOp(ShOpc: ShOp); |
1280 | |
1281 | // While "lsr #32" and "asr #32" exist, they are encoded with a 0 in the shift |
1282 | // amount. However, it would be an easy mistake to make so check here. |
1283 | assert((ShImm & ~0x1f) == 0 && "Out of range shift amount" ); |
1284 | |
1285 | // {16-13} = Rn |
1286 | // {12} = isAdd |
1287 | // {11-0} = shifter |
1288 | // {3-0} = Rm |
1289 | // {4} = 0 |
1290 | // {6-5} = type |
1291 | // {11-7} = imm |
1292 | uint32_t Binary = Rm; |
1293 | Binary |= Rn << 13; |
1294 | Binary |= SBits << 5; |
1295 | Binary |= ShImm << 7; |
1296 | if (isAdd) |
1297 | Binary |= 1 << 12; |
1298 | return Binary; |
1299 | } |
1300 | |
1301 | uint32_t ARMMCCodeEmitter:: |
1302 | getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
1303 | SmallVectorImpl<MCFixup> &Fixups, |
1304 | const MCSubtargetInfo &STI) const { |
1305 | // {13} 1 == imm12, 0 == Rm |
1306 | // {12} isAdd |
1307 | // {11-0} imm12/Rm |
1308 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1309 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
1310 | unsigned Imm = MO1.getImm(); |
1311 | bool isAdd = ARM_AM::getAM2Op(AM2Opc: Imm) == ARM_AM::add; |
1312 | bool isReg = MO.getReg().isValid(); |
1313 | uint32_t Binary = ARM_AM::getAM2Offset(AM2Opc: Imm); |
1314 | // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12 |
1315 | if (isReg) { |
1316 | ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(AM2Opc: Imm); |
1317 | Binary <<= 7; // Shift amount is bits [11:7] |
1318 | Binary |= getShiftOp(ShOpc: ShOp) << 5; // Shift type is bits [6:5] |
1319 | Binary |= CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); // Rm is bits [3:0] |
1320 | } |
1321 | return Binary | (isAdd << 12) | (isReg << 13); |
1322 | } |
1323 | |
1324 | uint32_t ARMMCCodeEmitter:: |
1325 | getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx, |
1326 | SmallVectorImpl<MCFixup> &Fixups, |
1327 | const MCSubtargetInfo &STI) const { |
1328 | // {4} isAdd |
1329 | // {3-0} Rm |
1330 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1331 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
1332 | bool isAdd = MO1.getImm() != 0; |
1333 | return CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()) | (isAdd << 4); |
1334 | } |
1335 | |
1336 | uint32_t ARMMCCodeEmitter:: |
1337 | getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, |
1338 | SmallVectorImpl<MCFixup> &Fixups, |
1339 | const MCSubtargetInfo &STI) const { |
1340 | // {9} 1 == imm8, 0 == Rm |
1341 | // {8} isAdd |
1342 | // {7-4} imm7_4/zero |
1343 | // {3-0} imm3_0/Rm |
1344 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1345 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
1346 | unsigned Imm = MO1.getImm(); |
1347 | bool isAdd = ARM_AM::getAM3Op(AM3Opc: Imm) == ARM_AM::add; |
1348 | bool isImm = !MO.getReg().isValid(); |
1349 | uint32_t Imm8 = ARM_AM::getAM3Offset(AM3Opc: Imm); |
1350 | // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 |
1351 | if (!isImm) |
1352 | Imm8 = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1353 | return Imm8 | (isAdd << 8) | (isImm << 9); |
1354 | } |
1355 | |
1356 | uint32_t ARMMCCodeEmitter:: |
1357 | getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, |
1358 | SmallVectorImpl<MCFixup> &Fixups, |
1359 | const MCSubtargetInfo &STI) const { |
1360 | // {13} 1 == imm8, 0 == Rm |
1361 | // {12-9} Rn |
1362 | // {8} isAdd |
1363 | // {7-4} imm7_4/zero |
1364 | // {3-0} imm3_0/Rm |
1365 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1366 | const MCOperand &MO1 = MI.getOperand(i: OpIdx+1); |
1367 | const MCOperand &MO2 = MI.getOperand(i: OpIdx+2); |
1368 | |
1369 | // If The first operand isn't a register, we have a label reference. |
1370 | if (!MO.isReg()) { |
1371 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
1372 | |
1373 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
1374 | const MCExpr *Expr = MO.getExpr(); |
1375 | MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_pcrel_10_unscaled); |
1376 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
1377 | |
1378 | ++MCNumCPRelocations; |
1379 | return (Rn << 9) | (1 << 13); |
1380 | } |
1381 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1382 | unsigned Imm = MO2.getImm(); |
1383 | bool isAdd = ARM_AM::getAM3Op(AM3Opc: Imm) == ARM_AM::add; |
1384 | bool isImm = !MO1.getReg().isValid(); |
1385 | uint32_t Imm8 = ARM_AM::getAM3Offset(AM3Opc: Imm); |
1386 | // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 |
1387 | if (!isImm) |
1388 | Imm8 = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
1389 | return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13); |
1390 | } |
1391 | |
1392 | /// getAddrModeThumbSPOpValue - Encode the t_addrmode_sp operands. |
1393 | uint32_t ARMMCCodeEmitter:: |
1394 | getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx, |
1395 | SmallVectorImpl<MCFixup> &Fixups, |
1396 | const MCSubtargetInfo &STI) const { |
1397 | // [SP, #imm] |
1398 | // {7-0} = imm8 |
1399 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
1400 | assert(MI.getOperand(OpIdx).getReg() == ARM::SP && |
1401 | "Unexpected base register!" ); |
1402 | |
1403 | // The immediate is already shifted for the implicit zeroes, so no change |
1404 | // here. |
1405 | return MO1.getImm() & 0xff; |
1406 | } |
1407 | |
1408 | /// getAddrModeISOpValue - Encode the t_addrmode_is# operands. |
1409 | uint32_t ARMMCCodeEmitter:: |
1410 | getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx, |
1411 | SmallVectorImpl<MCFixup> &Fixups, |
1412 | const MCSubtargetInfo &STI) const { |
1413 | // [Rn, #imm] |
1414 | // {7-3} = imm5 |
1415 | // {2-0} = Rn |
1416 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1417 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
1418 | unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1419 | unsigned Imm5 = MO1.getImm(); |
1420 | return ((Imm5 & 0x1f) << 3) | Rn; |
1421 | } |
1422 | |
1423 | /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands. |
1424 | uint32_t ARMMCCodeEmitter:: |
1425 | getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx, |
1426 | SmallVectorImpl<MCFixup> &Fixups, |
1427 | const MCSubtargetInfo &STI) const { |
1428 | const MCOperand MO = MI.getOperand(i: OpIdx); |
1429 | if (MO.isExpr()) |
1430 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: ARM::fixup_arm_thumb_cp, Fixups, STI); |
1431 | return (MO.getImm() >> 2); |
1432 | } |
1433 | |
1434 | /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand. |
1435 | uint32_t ARMMCCodeEmitter:: |
1436 | getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx, |
1437 | SmallVectorImpl<MCFixup> &Fixups, |
1438 | const MCSubtargetInfo &STI) const { |
1439 | // {12-9} = reg |
1440 | // {8} = (U)nsigned (add == '1', sub == '0') |
1441 | // {7-0} = imm8 |
1442 | unsigned Reg, Imm8; |
1443 | bool isAdd; |
1444 | // If The first operand isn't a register, we have a label reference. |
1445 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1446 | if (!MO.isReg()) { |
1447 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
1448 | Imm8 = 0; |
1449 | isAdd = false; // 'U' bit is handled as part of the fixup. |
1450 | |
1451 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
1452 | const MCExpr *Expr = MO.getExpr(); |
1453 | MCFixupKind Kind; |
1454 | if (isThumb2(STI)) |
1455 | Kind = MCFixupKind(ARM::fixup_t2_pcrel_10); |
1456 | else |
1457 | Kind = MCFixupKind(ARM::fixup_arm_pcrel_10); |
1458 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
1459 | |
1460 | ++MCNumCPRelocations; |
1461 | } else { |
1462 | EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm8, Fixups, STI); |
1463 | isAdd = ARM_AM::getAM5Op(AM5Opc: Imm8) == ARM_AM::add; |
1464 | } |
1465 | |
1466 | uint32_t Binary = ARM_AM::getAM5Offset(AM5Opc: Imm8); |
1467 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
1468 | if (isAdd) |
1469 | Binary |= (1 << 8); |
1470 | Binary |= (Reg << 9); |
1471 | return Binary; |
1472 | } |
1473 | |
1474 | /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand. |
1475 | uint32_t ARMMCCodeEmitter:: |
1476 | getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx, |
1477 | SmallVectorImpl<MCFixup> &Fixups, |
1478 | const MCSubtargetInfo &STI) const { |
1479 | // {12-9} = reg |
1480 | // {8} = (U)nsigned (add == '1', sub == '0') |
1481 | // {7-0} = imm8 |
1482 | unsigned Reg, Imm8; |
1483 | bool isAdd; |
1484 | // If The first operand isn't a register, we have a label reference. |
1485 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1486 | if (!MO.isReg()) { |
1487 | Reg = CTX.getRegisterInfo()->getEncodingValue(Reg: ARM::PC); // Rn is PC. |
1488 | Imm8 = 0; |
1489 | isAdd = false; // 'U' bit is handled as part of the fixup. |
1490 | |
1491 | assert(MO.isExpr() && "Unexpected machine operand type!" ); |
1492 | const MCExpr *Expr = MO.getExpr(); |
1493 | MCFixupKind Kind; |
1494 | if (isThumb2(STI)) |
1495 | Kind = MCFixupKind(ARM::fixup_t2_pcrel_9); |
1496 | else |
1497 | Kind = MCFixupKind(ARM::fixup_arm_pcrel_9); |
1498 | Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Expr, Kind, Loc: MI.getLoc())); |
1499 | |
1500 | ++MCNumCPRelocations; |
1501 | } else { |
1502 | EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm&: Imm8, Fixups, STI); |
1503 | isAdd = ARM_AM::getAM5Op(AM5Opc: Imm8) == ARM_AM::add; |
1504 | } |
1505 | |
1506 | uint32_t Binary = ARM_AM::getAM5Offset(AM5Opc: Imm8); |
1507 | // Immediate is always encoded as positive. The 'U' bit controls add vs sub. |
1508 | if (isAdd) |
1509 | Binary |= (1 << 8); |
1510 | Binary |= (Reg << 9); |
1511 | return Binary; |
1512 | } |
1513 | |
1514 | unsigned ARMMCCodeEmitter:: |
1515 | getSORegRegOpValue(const MCInst &MI, unsigned OpIdx, |
1516 | SmallVectorImpl<MCFixup> &Fixups, |
1517 | const MCSubtargetInfo &STI) const { |
1518 | // Sub-operands are [reg, reg, imm]. The first register is Rm, the reg to be |
1519 | // shifted. The second is Rs, the amount to shift by, and the third specifies |
1520 | // the type of the shift. |
1521 | // |
1522 | // {3-0} = Rm. |
1523 | // {4} = 1 |
1524 | // {6-5} = type |
1525 | // {11-8} = Rs |
1526 | // {7} = 0 |
1527 | |
1528 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1529 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
1530 | const MCOperand &MO2 = MI.getOperand(i: OpIdx + 2); |
1531 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: MO2.getImm()); |
1532 | |
1533 | // Encode Rm. |
1534 | unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1535 | |
1536 | // Encode the shift opcode. |
1537 | unsigned SBits = 0; |
1538 | MCRegister Rs = MO1.getReg(); |
1539 | if (Rs) { |
1540 | // Set shift operand (bit[7:4]). |
1541 | // LSL - 0001 |
1542 | // LSR - 0011 |
1543 | // ASR - 0101 |
1544 | // ROR - 0111 |
1545 | switch (SOpc) { |
1546 | default: llvm_unreachable("Unknown shift opc!" ); |
1547 | case ARM_AM::lsl: SBits = 0x1; break; |
1548 | case ARM_AM::lsr: SBits = 0x3; break; |
1549 | case ARM_AM::asr: SBits = 0x5; break; |
1550 | case ARM_AM::ror: SBits = 0x7; break; |
1551 | } |
1552 | } |
1553 | |
1554 | Binary |= SBits << 4; |
1555 | |
1556 | // Encode the shift operation Rs. |
1557 | // Encode Rs bit[11:8]. |
1558 | assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0); |
1559 | return Binary | (CTX.getRegisterInfo()->getEncodingValue(Reg: Rs) << ARMII::RegRsShift); |
1560 | } |
1561 | |
1562 | unsigned ARMMCCodeEmitter:: |
1563 | getSORegImmOpValue(const MCInst &MI, unsigned OpIdx, |
1564 | SmallVectorImpl<MCFixup> &Fixups, |
1565 | const MCSubtargetInfo &STI) const { |
1566 | // Sub-operands are [reg, imm]. The first register is Rm, the reg to be |
1567 | // shifted. The second is the amount to shift by. |
1568 | // |
1569 | // {3-0} = Rm. |
1570 | // {4} = 0 |
1571 | // {6-5} = type |
1572 | // {11-7} = imm |
1573 | |
1574 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1575 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
1576 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: MO1.getImm()); |
1577 | |
1578 | // Encode Rm. |
1579 | unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1580 | |
1581 | // Encode the shift opcode. |
1582 | unsigned SBits = 0; |
1583 | |
1584 | // Set shift operand (bit[6:4]). |
1585 | // LSL - 000 |
1586 | // LSR - 010 |
1587 | // ASR - 100 |
1588 | // ROR - 110 |
1589 | // RRX - 110 and bit[11:8] clear. |
1590 | switch (SOpc) { |
1591 | default: llvm_unreachable("Unknown shift opc!" ); |
1592 | case ARM_AM::lsl: SBits = 0x0; break; |
1593 | case ARM_AM::lsr: SBits = 0x2; break; |
1594 | case ARM_AM::asr: SBits = 0x4; break; |
1595 | case ARM_AM::ror: SBits = 0x6; break; |
1596 | case ARM_AM::rrx: |
1597 | Binary |= 0x60; |
1598 | return Binary; |
1599 | } |
1600 | |
1601 | // Encode shift_imm bit[11:7]. |
1602 | Binary |= SBits << 4; |
1603 | unsigned Offset = ARM_AM::getSORegOffset(Op: MO1.getImm()); |
1604 | assert(Offset < 32 && "Offset must be in range 0-31!" ); |
1605 | return Binary | (Offset << 7); |
1606 | } |
1607 | |
1608 | |
1609 | unsigned ARMMCCodeEmitter:: |
1610 | getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum, |
1611 | SmallVectorImpl<MCFixup> &Fixups, |
1612 | const MCSubtargetInfo &STI) const { |
1613 | const MCOperand &MO1 = MI.getOperand(i: OpNum); |
1614 | const MCOperand &MO2 = MI.getOperand(i: OpNum+1); |
1615 | const MCOperand &MO3 = MI.getOperand(i: OpNum+2); |
1616 | |
1617 | // Encoded as [Rn, Rm, imm]. |
1618 | // FIXME: Needs fixup support. |
1619 | unsigned Value = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
1620 | Value <<= 4; |
1621 | Value |= CTX.getRegisterInfo()->getEncodingValue(Reg: MO2.getReg()); |
1622 | Value <<= 2; |
1623 | Value |= MO3.getImm(); |
1624 | |
1625 | return Value; |
1626 | } |
1627 | |
1628 | template<unsigned Bits, unsigned Shift> |
1629 | unsigned ARMMCCodeEmitter:: |
1630 | getT2AddrModeImmOpValue(const MCInst &MI, unsigned OpNum, |
1631 | SmallVectorImpl<MCFixup> &Fixups, |
1632 | const MCSubtargetInfo &STI) const { |
1633 | const MCOperand &MO1 = MI.getOperand(i: OpNum); |
1634 | const MCOperand &MO2 = MI.getOperand(i: OpNum+1); |
1635 | |
1636 | // FIXME: Needs fixup support. |
1637 | unsigned Value = CTX.getRegisterInfo()->getEncodingValue(Reg: MO1.getReg()); |
1638 | |
1639 | // If the immediate is B bits long, we need B+1 bits in order |
1640 | // to represent the (inverse of the) sign bit. |
1641 | Value <<= (Bits + 1); |
1642 | int32_t tmp = (int32_t)MO2.getImm(); |
1643 | if (tmp == INT32_MIN) { // represents subtracting zero rather than adding it |
1644 | tmp = 0; |
1645 | } else if (tmp < 0) { |
1646 | tmp = abs(x: tmp); |
1647 | } else { |
1648 | Value |= (1U << Bits); // Set the ADD bit |
1649 | } |
1650 | Value |= (tmp >> Shift) & ((1U << Bits) - 1); |
1651 | return Value; |
1652 | } |
1653 | |
1654 | unsigned ARMMCCodeEmitter:: |
1655 | getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum, |
1656 | SmallVectorImpl<MCFixup> &Fixups, |
1657 | const MCSubtargetInfo &STI) const { |
1658 | const MCOperand &MO1 = MI.getOperand(i: OpNum); |
1659 | |
1660 | // FIXME: Needs fixup support. |
1661 | unsigned Value = 0; |
1662 | auto tmp = static_cast<uint32_t>(MO1.getImm()); |
1663 | if (static_cast<int32_t>(tmp) < 0) |
1664 | tmp = -tmp; |
1665 | else |
1666 | Value |= 256; // Set the ADD bit |
1667 | Value |= tmp & 255; |
1668 | return Value; |
1669 | } |
1670 | |
1671 | unsigned ARMMCCodeEmitter:: |
1672 | getT2SORegOpValue(const MCInst &MI, unsigned OpIdx, |
1673 | SmallVectorImpl<MCFixup> &Fixups, |
1674 | const MCSubtargetInfo &STI) const { |
1675 | // Sub-operands are [reg, imm]. The first register is Rm, the reg to be |
1676 | // shifted. The second is the amount to shift by. |
1677 | // |
1678 | // {3-0} = Rm. |
1679 | // {4} = 0 |
1680 | // {6-5} = type |
1681 | // {11-7} = imm |
1682 | |
1683 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
1684 | const MCOperand &MO1 = MI.getOperand(i: OpIdx + 1); |
1685 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: MO1.getImm()); |
1686 | |
1687 | // Encode Rm. |
1688 | unsigned Binary = CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1689 | |
1690 | // Encode the shift opcode. |
1691 | unsigned SBits = 0; |
1692 | // Set shift operand (bit[6:4]). |
1693 | // LSL - 000 |
1694 | // LSR - 010 |
1695 | // ASR - 100 |
1696 | // ROR - 110 |
1697 | switch (SOpc) { |
1698 | default: llvm_unreachable("Unknown shift opc!" ); |
1699 | case ARM_AM::lsl: SBits = 0x0; break; |
1700 | case ARM_AM::lsr: SBits = 0x2; break; |
1701 | case ARM_AM::asr: SBits = 0x4; break; |
1702 | case ARM_AM::rrx: [[fallthrough]]; |
1703 | case ARM_AM::ror: SBits = 0x6; break; |
1704 | } |
1705 | |
1706 | Binary |= SBits << 4; |
1707 | if (SOpc == ARM_AM::rrx) |
1708 | return Binary; |
1709 | |
1710 | // Encode shift_imm bit[11:7]. |
1711 | return Binary | ARM_AM::getSORegOffset(Op: MO1.getImm()) << 7; |
1712 | } |
1713 | |
1714 | unsigned ARMMCCodeEmitter:: |
1715 | getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op, |
1716 | SmallVectorImpl<MCFixup> &Fixups, |
1717 | const MCSubtargetInfo &STI) const { |
1718 | // 10 bits. lower 5 bits are the lsb of the mask, high five bits are the |
1719 | // msb of the mask. |
1720 | const MCOperand &MO = MI.getOperand(i: Op); |
1721 | uint32_t v = ~MO.getImm(); |
1722 | uint32_t lsb = llvm::countr_zero(Val: v); |
1723 | uint32_t msb = llvm::Log2_32(Value: v); |
1724 | assert(v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!" ); |
1725 | return lsb | (msb << 5); |
1726 | } |
1727 | |
1728 | unsigned ARMMCCodeEmitter:: |
1729 | getRegisterListOpValue(const MCInst &MI, unsigned Op, |
1730 | SmallVectorImpl<MCFixup> &Fixups, |
1731 | const MCSubtargetInfo &STI) const { |
1732 | // VLDM/VSTM/VSCCLRM: |
1733 | // {12-8} = Vd |
1734 | // {7-0} = Number of registers |
1735 | // |
1736 | // LDM/STM: |
1737 | // {15-0} = Bitfield of GPRs. |
1738 | MCRegister Reg = MI.getOperand(i: Op).getReg(); |
1739 | bool SPRRegs = ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg); |
1740 | bool DPRRegs = ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg); |
1741 | |
1742 | unsigned Binary = 0; |
1743 | |
1744 | if (SPRRegs || DPRRegs || Reg == ARM::VPR) { |
1745 | // VLDM/VSTM/VSCCLRM |
1746 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg); |
1747 | unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff; |
1748 | Binary |= (RegNo & 0x1f) << 8; |
1749 | |
1750 | if (MI.getOpcode() == ARM::VSCCLRMD) |
1751 | // Ignore VPR |
1752 | --NumRegs; |
1753 | else if (MI.getOpcode() == ARM::VSCCLRMS) { |
1754 | // The register list can contain both S registers and D registers, with D |
1755 | // registers counting as two registers. VPR doesn't count towards the |
1756 | // number of registers. |
1757 | NumRegs = 0; |
1758 | for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) { |
1759 | Reg = MI.getOperand(i: I).getReg(); |
1760 | if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) |
1761 | NumRegs += 1; |
1762 | else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) |
1763 | NumRegs += 2; |
1764 | } |
1765 | } |
1766 | if (SPRRegs) |
1767 | Binary |= NumRegs; |
1768 | else |
1769 | Binary |= NumRegs * 2; |
1770 | } else { |
1771 | const MCRegisterInfo &MRI = *CTX.getRegisterInfo(); |
1772 | assert(is_sorted(drop_begin(MI, Op), |
1773 | [&](const MCOperand &LHS, const MCOperand &RHS) { |
1774 | return MRI.getEncodingValue(LHS.getReg()) < |
1775 | MRI.getEncodingValue(RHS.getReg()); |
1776 | })); |
1777 | for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) { |
1778 | unsigned RegNo = MRI.getEncodingValue(Reg: MI.getOperand(i: I).getReg()); |
1779 | Binary |= 1 << RegNo; |
1780 | } |
1781 | } |
1782 | |
1783 | return Binary; |
1784 | } |
1785 | |
1786 | /// getAddrMode6AddressOpValue - Encode an addrmode6 register number along |
1787 | /// with the alignment operand. |
1788 | unsigned ARMMCCodeEmitter:: |
1789 | getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op, |
1790 | SmallVectorImpl<MCFixup> &Fixups, |
1791 | const MCSubtargetInfo &STI) const { |
1792 | const MCOperand &Reg = MI.getOperand(i: Op); |
1793 | const MCOperand &Imm = MI.getOperand(i: Op + 1); |
1794 | |
1795 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg: Reg.getReg()); |
1796 | unsigned Align = 0; |
1797 | |
1798 | switch (Imm.getImm()) { |
1799 | default: break; |
1800 | case 2: |
1801 | case 4: |
1802 | case 8: Align = 0x01; break; |
1803 | case 16: Align = 0x02; break; |
1804 | case 32: Align = 0x03; break; |
1805 | } |
1806 | |
1807 | return RegNo | (Align << 4); |
1808 | } |
1809 | |
1810 | /// getAddrMode6OneLane32AddressOpValue - Encode an addrmode6 register number |
1811 | /// along with the alignment operand for use in VST1 and VLD1 with size 32. |
1812 | unsigned ARMMCCodeEmitter:: |
1813 | getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op, |
1814 | SmallVectorImpl<MCFixup> &Fixups, |
1815 | const MCSubtargetInfo &STI) const { |
1816 | const MCOperand &Reg = MI.getOperand(i: Op); |
1817 | const MCOperand &Imm = MI.getOperand(i: Op + 1); |
1818 | |
1819 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg: Reg.getReg()); |
1820 | unsigned Align = 0; |
1821 | |
1822 | switch (Imm.getImm()) { |
1823 | default: break; |
1824 | case 8: |
1825 | case 16: |
1826 | case 32: // Default '0' value for invalid alignments of 8, 16, 32 bytes. |
1827 | case 2: Align = 0x00; break; |
1828 | case 4: Align = 0x03; break; |
1829 | } |
1830 | |
1831 | return RegNo | (Align << 4); |
1832 | } |
1833 | |
1834 | |
1835 | /// getAddrMode6DupAddressOpValue - Encode an addrmode6 register number and |
1836 | /// alignment operand for use in VLD-dup instructions. This is the same as |
1837 | /// getAddrMode6AddressOpValue except for the alignment encoding, which is |
1838 | /// different for VLD4-dup. |
1839 | unsigned ARMMCCodeEmitter:: |
1840 | getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op, |
1841 | SmallVectorImpl<MCFixup> &Fixups, |
1842 | const MCSubtargetInfo &STI) const { |
1843 | const MCOperand &Reg = MI.getOperand(i: Op); |
1844 | const MCOperand &Imm = MI.getOperand(i: Op + 1); |
1845 | |
1846 | unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg: Reg.getReg()); |
1847 | unsigned Align = 0; |
1848 | |
1849 | switch (Imm.getImm()) { |
1850 | default: break; |
1851 | case 2: |
1852 | case 4: |
1853 | case 8: Align = 0x01; break; |
1854 | case 16: Align = 0x03; break; |
1855 | } |
1856 | |
1857 | return RegNo | (Align << 4); |
1858 | } |
1859 | |
1860 | unsigned ARMMCCodeEmitter:: |
1861 | getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, |
1862 | SmallVectorImpl<MCFixup> &Fixups, |
1863 | const MCSubtargetInfo &STI) const { |
1864 | const MCOperand &MO = MI.getOperand(i: Op); |
1865 | if (!MO.getReg()) |
1866 | return 0x0D; |
1867 | return CTX.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
1868 | } |
1869 | |
1870 | unsigned ARMMCCodeEmitter:: |
1871 | getShiftRight8Imm(const MCInst &MI, unsigned Op, |
1872 | SmallVectorImpl<MCFixup> &Fixups, |
1873 | const MCSubtargetInfo &STI) const { |
1874 | return 8 - MI.getOperand(i: Op).getImm(); |
1875 | } |
1876 | |
1877 | unsigned ARMMCCodeEmitter:: |
1878 | getShiftRight16Imm(const MCInst &MI, unsigned Op, |
1879 | SmallVectorImpl<MCFixup> &Fixups, |
1880 | const MCSubtargetInfo &STI) const { |
1881 | return 16 - MI.getOperand(i: Op).getImm(); |
1882 | } |
1883 | |
1884 | unsigned ARMMCCodeEmitter:: |
1885 | getShiftRight32Imm(const MCInst &MI, unsigned Op, |
1886 | SmallVectorImpl<MCFixup> &Fixups, |
1887 | const MCSubtargetInfo &STI) const { |
1888 | return 32 - MI.getOperand(i: Op).getImm(); |
1889 | } |
1890 | |
1891 | unsigned ARMMCCodeEmitter:: |
1892 | getShiftRight64Imm(const MCInst &MI, unsigned Op, |
1893 | SmallVectorImpl<MCFixup> &Fixups, |
1894 | const MCSubtargetInfo &STI) const { |
1895 | return 64 - MI.getOperand(i: Op).getImm(); |
1896 | } |
1897 | |
1898 | void ARMMCCodeEmitter::encodeInstruction(const MCInst &MI, |
1899 | SmallVectorImpl<char> &CB, |
1900 | SmallVectorImpl<MCFixup> &Fixups, |
1901 | const MCSubtargetInfo &STI) const { |
1902 | // Pseudo instructions don't get encoded. |
1903 | const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode()); |
1904 | uint64_t TSFlags = Desc.TSFlags; |
1905 | if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo) |
1906 | return; |
1907 | |
1908 | int Size; |
1909 | if (Desc.getSize() == 2 || Desc.getSize() == 4) |
1910 | Size = Desc.getSize(); |
1911 | else |
1912 | llvm_unreachable("Unexpected instruction size!" ); |
1913 | |
1914 | auto Endian = |
1915 | IsLittleEndian ? llvm::endianness::little : llvm::endianness::big; |
1916 | uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); |
1917 | if (Size == 2) { |
1918 | support::endian::write<uint16_t>(Out&: CB, V: Binary, E: Endian); |
1919 | } else if (isThumb(STI)) { |
1920 | // Thumb 32-bit wide instructions need to emit the high order halfword |
1921 | // first. |
1922 | support::endian::write<uint16_t>(Out&: CB, V: Binary >> 16, E: Endian); |
1923 | support::endian::write<uint16_t>(Out&: CB, V: Binary & 0xffff, E: Endian); |
1924 | } else { |
1925 | support::endian::write<uint32_t>(Out&: CB, V: Binary, E: Endian); |
1926 | } |
1927 | ++MCNumEmitted; // Keep track of the # of mi's emitted. |
1928 | } |
1929 | |
1930 | template <bool isNeg, ARM::Fixups fixup> |
1931 | uint32_t |
1932 | ARMMCCodeEmitter::getBFTargetOpValue(const MCInst &MI, unsigned OpIdx, |
1933 | SmallVectorImpl<MCFixup> &Fixups, |
1934 | const MCSubtargetInfo &STI) const { |
1935 | const MCOperand MO = MI.getOperand(i: OpIdx); |
1936 | if (MO.isExpr()) |
1937 | return ::getBranchTargetOpValue(MI, OpIdx, FixupKind: fixup, Fixups, STI); |
1938 | return isNeg ? -(MO.getImm() >> 1) : (MO.getImm() >> 1); |
1939 | } |
1940 | |
1941 | uint32_t |
1942 | ARMMCCodeEmitter::getBFAfterTargetOpValue(const MCInst &MI, unsigned OpIdx, |
1943 | SmallVectorImpl<MCFixup> &Fixups, |
1944 | const MCSubtargetInfo &STI) const { |
1945 | const MCOperand MO = MI.getOperand(i: OpIdx); |
1946 | const MCOperand BranchMO = MI.getOperand(i: 0); |
1947 | |
1948 | if (MO.isExpr()) { |
1949 | assert(BranchMO.isExpr()); |
1950 | const MCExpr *DiffExpr = MCBinaryExpr::createSub( |
1951 | LHS: MO.getExpr(), RHS: BranchMO.getExpr(), Ctx&: CTX); |
1952 | MCFixupKind Kind = MCFixupKind(ARM::fixup_bfcsel_else_target); |
1953 | Fixups.push_back(Elt: llvm::MCFixup::create(Offset: 0, Value: DiffExpr, Kind, Loc: MI.getLoc())); |
1954 | return 0; |
1955 | } |
1956 | |
1957 | assert(MO.isImm() && BranchMO.isImm()); |
1958 | int Diff = MO.getImm() - BranchMO.getImm(); |
1959 | assert(Diff == 4 || Diff == 2); |
1960 | |
1961 | return Diff == 4; |
1962 | } |
1963 | |
1964 | uint32_t ARMMCCodeEmitter::getVPTMaskOpValue(const MCInst &MI, unsigned OpIdx, |
1965 | SmallVectorImpl<MCFixup> &Fixups, |
1966 | const MCSubtargetInfo &STI)const { |
1967 | const MCOperand MO = MI.getOperand(i: OpIdx); |
1968 | assert(MO.isImm() && "Unexpected operand type!" ); |
1969 | |
1970 | int Value = MO.getImm(); |
1971 | int Imm = 0; |
1972 | |
1973 | // VPT Masks are actually encoded as a series of invert/don't invert bits, |
1974 | // rather than true/false bits. |
1975 | unsigned PrevBit = 0; |
1976 | for (int i = 3; i >= 0; --i) { |
1977 | unsigned Bit = (Value >> i) & 1; |
1978 | |
1979 | // Check if we are at the end of the mask. |
1980 | if ((Value & ~(~0U << i)) == 0) { |
1981 | Imm |= (1 << i); |
1982 | break; |
1983 | } |
1984 | |
1985 | // Convert the bit in the mask based on the previous bit. |
1986 | if (Bit != PrevBit) |
1987 | Imm |= (1 << i); |
1988 | |
1989 | PrevBit = Bit; |
1990 | } |
1991 | |
1992 | return Imm; |
1993 | } |
1994 | |
1995 | uint32_t ARMMCCodeEmitter::getRestrictedCondCodeOpValue( |
1996 | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
1997 | const MCSubtargetInfo &STI) const { |
1998 | |
1999 | const MCOperand MO = MI.getOperand(i: OpIdx); |
2000 | assert(MO.isImm() && "Unexpected operand type!" ); |
2001 | |
2002 | switch (MO.getImm()) { |
2003 | default: |
2004 | assert(0 && "Unexpected Condition!" ); |
2005 | return 0; |
2006 | case ARMCC::HS: |
2007 | case ARMCC::EQ: |
2008 | return 0; |
2009 | case ARMCC::HI: |
2010 | case ARMCC::NE: |
2011 | return 1; |
2012 | case ARMCC::GE: |
2013 | return 4; |
2014 | case ARMCC::LT: |
2015 | return 5; |
2016 | case ARMCC::GT: |
2017 | return 6; |
2018 | case ARMCC::LE: |
2019 | return 7; |
2020 | } |
2021 | } |
2022 | |
2023 | uint32_t ARMMCCodeEmitter:: |
2024 | getPowerTwoOpValue(const MCInst &MI, unsigned OpIdx, |
2025 | SmallVectorImpl<MCFixup> &Fixups, |
2026 | const MCSubtargetInfo &STI) const { |
2027 | const MCOperand &MO = MI.getOperand(i: OpIdx); |
2028 | assert(MO.isImm() && "Unexpected operand type!" ); |
2029 | return llvm::countr_zero(Val: (uint64_t)MO.getImm()); |
2030 | } |
2031 | |
2032 | template <unsigned start> |
2033 | uint32_t ARMMCCodeEmitter:: |
2034 | getMVEPairVectorIndexOpValue(const MCInst &MI, unsigned OpIdx, |
2035 | SmallVectorImpl<MCFixup> &Fixups, |
2036 | const MCSubtargetInfo &STI) const { |
2037 | const MCOperand MO = MI.getOperand(i: OpIdx); |
2038 | assert(MO.isImm() && "Unexpected operand type!" ); |
2039 | |
2040 | int Value = MO.getImm(); |
2041 | return Value - start; |
2042 | } |
2043 | |
2044 | #include "ARMGenMCCodeEmitter.inc" |
2045 | |
2046 | MCCodeEmitter *llvm::createARMLEMCCodeEmitter(const MCInstrInfo &MCII, |
2047 | MCContext &Ctx) { |
2048 | return new ARMMCCodeEmitter(MCII, Ctx, true); |
2049 | } |
2050 | |
2051 | MCCodeEmitter *llvm::createARMBEMCCodeEmitter(const MCInstrInfo &MCII, |
2052 | MCContext &Ctx) { |
2053 | return new ARMMCCodeEmitter(MCII, Ctx, false); |
2054 | } |
2055 | |