| 1 | //===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains the AArch64 addressing mode implementation stuff. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H |
| 14 | #define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H |
| 15 | |
| 16 | #include "llvm/ADT/APFloat.h" |
| 17 | #include "llvm/ADT/APInt.h" |
| 18 | #include "llvm/ADT/bit.h" |
| 19 | #include "llvm/Support/ErrorHandling.h" |
| 20 | #include "llvm/Support/MathExtras.h" |
| 21 | #include <cassert> |
| 22 | |
| 23 | namespace llvm { |
| 24 | |
| 25 | /// AArch64_AM - AArch64 Addressing Mode Stuff |
| 26 | namespace AArch64_AM { |
| 27 | |
| 28 | //===----------------------------------------------------------------------===// |
| 29 | // Shifts |
| 30 | // |
| 31 | |
| 32 | enum ShiftExtendType { |
| 33 | InvalidShiftExtend = -1, |
| 34 | LSL = 0, |
| 35 | LSR, |
| 36 | ASR, |
| 37 | ROR, |
| 38 | MSL, |
| 39 | |
| 40 | UXTB, |
| 41 | UXTH, |
| 42 | UXTW, |
| 43 | UXTX, |
| 44 | |
| 45 | SXTB, |
| 46 | SXTH, |
| 47 | SXTW, |
| 48 | SXTX, |
| 49 | }; |
| 50 | |
| 51 | /// isSignExtendShiftType - Returns true if \p Type is sign extending. |
| 52 | static inline bool isSignExtendShiftType(AArch64_AM::ShiftExtendType Type) { |
| 53 | switch (Type) { |
| 54 | case AArch64_AM::SXTB: |
| 55 | case AArch64_AM::SXTH: |
| 56 | case AArch64_AM::SXTW: |
| 57 | case AArch64_AM::SXTX: |
| 58 | return true; |
| 59 | default: |
| 60 | return false; |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | /// getShiftName - Get the string encoding for the shift type. |
| 65 | static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) { |
| 66 | switch (ST) { |
| 67 | default: llvm_unreachable("unhandled shift type!" ); |
| 68 | case AArch64_AM::LSL: return "lsl" ; |
| 69 | case AArch64_AM::LSR: return "lsr" ; |
| 70 | case AArch64_AM::ASR: return "asr" ; |
| 71 | case AArch64_AM::ROR: return "ror" ; |
| 72 | case AArch64_AM::MSL: return "msl" ; |
| 73 | case AArch64_AM::UXTB: return "uxtb" ; |
| 74 | case AArch64_AM::UXTH: return "uxth" ; |
| 75 | case AArch64_AM::UXTW: return "uxtw" ; |
| 76 | case AArch64_AM::UXTX: return "uxtx" ; |
| 77 | case AArch64_AM::SXTB: return "sxtb" ; |
| 78 | case AArch64_AM::SXTH: return "sxth" ; |
| 79 | case AArch64_AM::SXTW: return "sxtw" ; |
| 80 | case AArch64_AM::SXTX: return "sxtx" ; |
| 81 | } |
| 82 | return nullptr; |
| 83 | } |
| 84 | |
| 85 | /// getShiftType - Extract the shift type. |
| 86 | static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) { |
| 87 | switch ((Imm >> 6) & 0x7) { |
| 88 | default: return AArch64_AM::InvalidShiftExtend; |
| 89 | case 0: return AArch64_AM::LSL; |
| 90 | case 1: return AArch64_AM::LSR; |
| 91 | case 2: return AArch64_AM::ASR; |
| 92 | case 3: return AArch64_AM::ROR; |
| 93 | case 4: return AArch64_AM::MSL; |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | /// getShiftValue - Extract the shift value. |
| 98 | static inline unsigned getShiftValue(unsigned Imm) { |
| 99 | return Imm & 0x3f; |
| 100 | } |
| 101 | |
| 102 | /// getShifterImm - Encode the shift type and amount: |
| 103 | /// imm: 6-bit shift amount |
| 104 | /// shifter: 000 ==> lsl |
| 105 | /// 001 ==> lsr |
| 106 | /// 010 ==> asr |
| 107 | /// 011 ==> ror |
| 108 | /// 100 ==> msl |
| 109 | /// {8-6} = shifter |
| 110 | /// {5-0} = imm |
| 111 | static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, |
| 112 | unsigned Imm) { |
| 113 | assert((Imm & 0x3f) == Imm && "Illegal shifted immediate value!" ); |
| 114 | unsigned STEnc = 0; |
| 115 | switch (ST) { |
| 116 | default: llvm_unreachable("Invalid shift requested" ); |
| 117 | case AArch64_AM::LSL: STEnc = 0; break; |
| 118 | case AArch64_AM::LSR: STEnc = 1; break; |
| 119 | case AArch64_AM::ASR: STEnc = 2; break; |
| 120 | case AArch64_AM::ROR: STEnc = 3; break; |
| 121 | case AArch64_AM::MSL: STEnc = 4; break; |
| 122 | } |
| 123 | return (STEnc << 6) | (Imm & 0x3f); |
| 124 | } |
| 125 | |
| 126 | //===----------------------------------------------------------------------===// |
| 127 | // Extends |
| 128 | // |
| 129 | |
| 130 | /// getArithShiftValue - get the arithmetic shift value. |
| 131 | static inline unsigned getArithShiftValue(unsigned Imm) { |
| 132 | return Imm & 0x7; |
| 133 | } |
| 134 | |
| 135 | /// getExtendType - Extract the extend type for operands of arithmetic ops. |
| 136 | static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) { |
| 137 | assert((Imm & 0x7) == Imm && "invalid immediate!" ); |
| 138 | switch (Imm) { |
| 139 | default: llvm_unreachable("Compiler bug!" ); |
| 140 | case 0: return AArch64_AM::UXTB; |
| 141 | case 1: return AArch64_AM::UXTH; |
| 142 | case 2: return AArch64_AM::UXTW; |
| 143 | case 3: return AArch64_AM::UXTX; |
| 144 | case 4: return AArch64_AM::SXTB; |
| 145 | case 5: return AArch64_AM::SXTH; |
| 146 | case 6: return AArch64_AM::SXTW; |
| 147 | case 7: return AArch64_AM::SXTX; |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | static inline AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm) { |
| 152 | return getExtendType(Imm: (Imm >> 3) & 0x7); |
| 153 | } |
| 154 | |
| 155 | /// Mapping from extend bits to required operation: |
| 156 | /// shifter: 000 ==> uxtb |
| 157 | /// 001 ==> uxth |
| 158 | /// 010 ==> uxtw |
| 159 | /// 011 ==> uxtx |
| 160 | /// 100 ==> sxtb |
| 161 | /// 101 ==> sxth |
| 162 | /// 110 ==> sxtw |
| 163 | /// 111 ==> sxtx |
| 164 | inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) { |
| 165 | switch (ET) { |
| 166 | default: llvm_unreachable("Invalid extend type requested" ); |
| 167 | case AArch64_AM::UXTB: return 0; break; |
| 168 | case AArch64_AM::UXTH: return 1; break; |
| 169 | case AArch64_AM::UXTW: return 2; break; |
| 170 | case AArch64_AM::UXTX: return 3; break; |
| 171 | case AArch64_AM::SXTB: return 4; break; |
| 172 | case AArch64_AM::SXTH: return 5; break; |
| 173 | case AArch64_AM::SXTW: return 6; break; |
| 174 | case AArch64_AM::SXTX: return 7; break; |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | /// getArithExtendImm - Encode the extend type and shift amount for an |
| 179 | /// arithmetic instruction: |
| 180 | /// imm: 3-bit extend amount |
| 181 | /// {5-3} = shifter |
| 182 | /// {2-0} = imm3 |
| 183 | static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, |
| 184 | unsigned Imm) { |
| 185 | assert((Imm & 0x7) == Imm && "Illegal shifted immediate value!" ); |
| 186 | return (getExtendEncoding(ET) << 3) | (Imm & 0x7); |
| 187 | } |
| 188 | |
| 189 | /// getMemDoShift - Extract the "do shift" flag value for load/store |
| 190 | /// instructions. |
| 191 | static inline bool getMemDoShift(unsigned Imm) { |
| 192 | return (Imm & 0x1) != 0; |
| 193 | } |
| 194 | |
| 195 | /// getExtendType - Extract the extend type for the offset operand of |
| 196 | /// loads/stores. |
| 197 | static inline AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm) { |
| 198 | return getExtendType(Imm: (Imm >> 1) & 0x7); |
| 199 | } |
| 200 | |
| 201 | /// getExtendImm - Encode the extend type and amount for a load/store inst: |
| 202 | /// doshift: should the offset be scaled by the access size |
| 203 | /// shifter: 000 ==> uxtb |
| 204 | /// 001 ==> uxth |
| 205 | /// 010 ==> uxtw |
| 206 | /// 011 ==> uxtx |
| 207 | /// 100 ==> sxtb |
| 208 | /// 101 ==> sxth |
| 209 | /// 110 ==> sxtw |
| 210 | /// 111 ==> sxtx |
| 211 | /// {3-1} = shifter |
| 212 | /// {0} = doshift |
| 213 | static inline unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET, |
| 214 | bool DoShift) { |
| 215 | return (getExtendEncoding(ET) << 1) | unsigned(DoShift); |
| 216 | } |
| 217 | |
| 218 | static inline uint64_t ror(uint64_t elt, unsigned size) { |
| 219 | return ((elt & 1) << (size-1)) | (elt >> 1); |
| 220 | } |
| 221 | |
| 222 | /// processLogicalImmediate - Determine if an immediate value can be encoded |
| 223 | /// as the immediate operand of a logical instruction for the given register |
| 224 | /// size. If so, return true with "encoding" set to the encoded value in |
| 225 | /// the form N:immr:imms. |
| 226 | static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize, |
| 227 | uint64_t &Encoding) { |
| 228 | if (Imm == 0ULL || Imm == ~0ULL || |
| 229 | (RegSize != 64 && |
| 230 | (Imm >> RegSize != 0 || Imm == (~0ULL >> (64 - RegSize))))) |
| 231 | return false; |
| 232 | |
| 233 | // First, determine the element size. |
| 234 | unsigned Size = RegSize; |
| 235 | |
| 236 | do { |
| 237 | Size /= 2; |
| 238 | uint64_t Mask = (1ULL << Size) - 1; |
| 239 | |
| 240 | if ((Imm & Mask) != ((Imm >> Size) & Mask)) { |
| 241 | Size *= 2; |
| 242 | break; |
| 243 | } |
| 244 | } while (Size > 2); |
| 245 | |
| 246 | // Second, determine the rotation to make the element be: 0^m 1^n. |
| 247 | uint32_t CTO, I; |
| 248 | uint64_t Mask = ((uint64_t)-1LL) >> (64 - Size); |
| 249 | Imm &= Mask; |
| 250 | |
| 251 | if (isShiftedMask_64(Value: Imm)) { |
| 252 | I = llvm::countr_zero(Val: Imm); |
| 253 | assert(I < 64 && "undefined behavior" ); |
| 254 | CTO = llvm::countr_one(Value: Imm >> I); |
| 255 | } else { |
| 256 | Imm |= ~Mask; |
| 257 | if (!isShiftedMask_64(Value: ~Imm)) |
| 258 | return false; |
| 259 | |
| 260 | unsigned CLO = llvm::countl_one(Value: Imm); |
| 261 | I = 64 - CLO; |
| 262 | CTO = CLO + llvm::countr_one(Value: Imm) - (64 - Size); |
| 263 | } |
| 264 | |
| 265 | // Encode in Immr the number of RORs it would take to get *from* 0^m 1^n |
| 266 | // to our target value, where I is the number of RORs to go the opposite |
| 267 | // direction. |
| 268 | assert(Size > I && "I should be smaller than element size" ); |
| 269 | unsigned Immr = (Size - I) & (Size - 1); |
| 270 | |
| 271 | // If size has a 1 in the n'th bit, create a value that has zeroes in |
| 272 | // bits [0, n] and ones above that. |
| 273 | uint64_t NImms = ~(Size-1) << 1; |
| 274 | |
| 275 | // Or the CTO value into the low bits, which must be below the Nth bit |
| 276 | // bit mentioned above. |
| 277 | NImms |= (CTO-1); |
| 278 | |
| 279 | // Extract the seventh bit and toggle it to create the N field. |
| 280 | unsigned N = ((NImms >> 6) & 1) ^ 1; |
| 281 | |
| 282 | Encoding = (N << 12) | (Immr << 6) | (NImms & 0x3f); |
| 283 | return true; |
| 284 | } |
| 285 | |
| 286 | /// isLogicalImmediate - Return true if the immediate is valid for a logical |
| 287 | /// immediate instruction of the given register size. Return false otherwise. |
| 288 | static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) { |
| 289 | uint64_t encoding; |
| 290 | return processLogicalImmediate(Imm: imm, RegSize: regSize, Encoding&: encoding); |
| 291 | } |
| 292 | |
| 293 | /// encodeLogicalImmediate - Return the encoded immediate value for a logical |
| 294 | /// immediate instruction of the given register size. |
| 295 | static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) { |
| 296 | uint64_t encoding = 0; |
| 297 | bool res = processLogicalImmediate(Imm: imm, RegSize: regSize, Encoding&: encoding); |
| 298 | assert(res && "invalid logical immediate" ); |
| 299 | (void)res; |
| 300 | return encoding; |
| 301 | } |
| 302 | |
| 303 | /// decodeLogicalImmediate - Decode a logical immediate value in the form |
| 304 | /// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the |
| 305 | /// integer value it represents with regSize bits. |
| 306 | static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) { |
| 307 | // Extract the N, imms, and immr fields. |
| 308 | unsigned N = (val >> 12) & 1; |
| 309 | unsigned immr = (val >> 6) & 0x3f; |
| 310 | unsigned imms = val & 0x3f; |
| 311 | |
| 312 | assert((regSize == 64 || N == 0) && "undefined logical immediate encoding" ); |
| 313 | int len = 31 - llvm::countl_zero(Val: (N << 6) | (~imms & 0x3f)); |
| 314 | assert(len >= 0 && "undefined logical immediate encoding" ); |
| 315 | unsigned size = (1 << len); |
| 316 | unsigned R = immr & (size - 1); |
| 317 | unsigned S = imms & (size - 1); |
| 318 | assert(S != size - 1 && "undefined logical immediate encoding" ); |
| 319 | uint64_t pattern = (1ULL << (S + 1)) - 1; |
| 320 | for (unsigned i = 0; i < R; ++i) |
| 321 | pattern = ror(elt: pattern, size); |
| 322 | |
| 323 | // Replicate the pattern to fill the regSize. |
| 324 | while (size != regSize) { |
| 325 | pattern |= (pattern << size); |
| 326 | size *= 2; |
| 327 | } |
| 328 | return pattern; |
| 329 | } |
| 330 | |
| 331 | /// isValidDecodeLogicalImmediate - Check to see if the logical immediate value |
| 332 | /// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits) |
| 333 | /// is a valid encoding for an integer value with regSize bits. |
| 334 | static inline bool isValidDecodeLogicalImmediate(uint64_t val, |
| 335 | unsigned regSize) { |
| 336 | // Extract the N and imms fields needed for checking. |
| 337 | unsigned N = (val >> 12) & 1; |
| 338 | unsigned imms = val & 0x3f; |
| 339 | |
| 340 | if (regSize == 32 && N != 0) // undefined logical immediate encoding |
| 341 | return false; |
| 342 | int len = 31 - llvm::countl_zero(Val: (N << 6) | (~imms & 0x3f)); |
| 343 | if (len < 0) // undefined logical immediate encoding |
| 344 | return false; |
| 345 | unsigned size = (1 << len); |
| 346 | unsigned S = imms & (size - 1); |
| 347 | if (S == size - 1) // undefined logical immediate encoding |
| 348 | return false; |
| 349 | |
| 350 | return true; |
| 351 | } |
| 352 | |
| 353 | //===----------------------------------------------------------------------===// |
| 354 | // Floating-point Immediates |
| 355 | // |
| 356 | static inline float getFPImmFloat(unsigned Imm) { |
| 357 | // We expect an 8-bit binary encoding of a floating-point number here. |
| 358 | |
| 359 | uint8_t Sign = (Imm >> 7) & 0x1; |
| 360 | uint8_t Exp = (Imm >> 4) & 0x7; |
| 361 | uint8_t Mantissa = Imm & 0xf; |
| 362 | |
| 363 | // 8-bit FP IEEE Float Encoding |
| 364 | // abcd efgh aBbbbbbc defgh000 00000000 00000000 |
| 365 | // |
| 366 | // where B = NOT(b); |
| 367 | |
| 368 | uint32_t I = 0; |
| 369 | I |= Sign << 31; |
| 370 | I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30; |
| 371 | I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25; |
| 372 | I |= (Exp & 0x3) << 23; |
| 373 | I |= Mantissa << 19; |
| 374 | return bit_cast<float>(from: I); |
| 375 | } |
| 376 | |
| 377 | /// getFP16Imm - Return an 8-bit floating-point version of the 16-bit |
| 378 | /// floating-point value. If the value cannot be represented as an 8-bit |
| 379 | /// floating-point value, then return -1. |
| 380 | static inline int getFP16Imm(const APInt &Imm) { |
| 381 | uint32_t Sign = Imm.lshr(shiftAmt: 15).getZExtValue() & 1; |
| 382 | int32_t Exp = (Imm.lshr(shiftAmt: 10).getSExtValue() & 0x1f) - 15; // -14 to 15 |
| 383 | int32_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits |
| 384 | |
| 385 | // We can handle 4 bits of mantissa. |
| 386 | // mantissa = (16+UInt(e:f:g:h))/16. |
| 387 | if (Mantissa & 0x3f) |
| 388 | return -1; |
| 389 | Mantissa >>= 6; |
| 390 | |
| 391 | // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 |
| 392 | if (Exp < -3 || Exp > 4) |
| 393 | return -1; |
| 394 | Exp = ((Exp+3) & 0x7) ^ 4; |
| 395 | |
| 396 | return ((int)Sign << 7) | (Exp << 4) | Mantissa; |
| 397 | } |
| 398 | |
| 399 | static inline int getFP16Imm(const APFloat &FPImm) { |
| 400 | return getFP16Imm(Imm: FPImm.bitcastToAPInt()); |
| 401 | } |
| 402 | |
| 403 | /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit |
| 404 | /// floating-point value. If the value cannot be represented as an 8-bit |
| 405 | /// floating-point value, then return -1. |
| 406 | static inline int getFP32Imm(const APInt &Imm) { |
| 407 | uint32_t Sign = Imm.lshr(shiftAmt: 31).getZExtValue() & 1; |
| 408 | int32_t Exp = (Imm.lshr(shiftAmt: 23).getSExtValue() & 0xff) - 127; // -126 to 127 |
| 409 | int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits |
| 410 | |
| 411 | // We can handle 4 bits of mantissa. |
| 412 | // mantissa = (16+UInt(e:f:g:h))/16. |
| 413 | if (Mantissa & 0x7ffff) |
| 414 | return -1; |
| 415 | Mantissa >>= 19; |
| 416 | if ((Mantissa & 0xf) != Mantissa) |
| 417 | return -1; |
| 418 | |
| 419 | // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 |
| 420 | if (Exp < -3 || Exp > 4) |
| 421 | return -1; |
| 422 | Exp = ((Exp+3) & 0x7) ^ 4; |
| 423 | |
| 424 | return ((int)Sign << 7) | (Exp << 4) | Mantissa; |
| 425 | } |
| 426 | |
| 427 | static inline int getFP32Imm(const APFloat &FPImm) { |
| 428 | return getFP32Imm(Imm: FPImm.bitcastToAPInt()); |
| 429 | } |
| 430 | |
| 431 | /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit |
| 432 | /// floating-point value. If the value cannot be represented as an 8-bit |
| 433 | /// floating-point value, then return -1. |
| 434 | static inline int getFP64Imm(const APInt &Imm) { |
| 435 | uint64_t Sign = Imm.lshr(shiftAmt: 63).getZExtValue() & 1; |
| 436 | int64_t Exp = (Imm.lshr(shiftAmt: 52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 |
| 437 | uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL; |
| 438 | |
| 439 | // We can handle 4 bits of mantissa. |
| 440 | // mantissa = (16+UInt(e:f:g:h))/16. |
| 441 | if (Mantissa & 0xffffffffffffULL) |
| 442 | return -1; |
| 443 | Mantissa >>= 48; |
| 444 | if ((Mantissa & 0xf) != Mantissa) |
| 445 | return -1; |
| 446 | |
| 447 | // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 |
| 448 | if (Exp < -3 || Exp > 4) |
| 449 | return -1; |
| 450 | Exp = ((Exp+3) & 0x7) ^ 4; |
| 451 | |
| 452 | return ((int)Sign << 7) | (Exp << 4) | Mantissa; |
| 453 | } |
| 454 | |
| 455 | static inline int getFP64Imm(const APFloat &FPImm) { |
| 456 | return getFP64Imm(Imm: FPImm.bitcastToAPInt()); |
| 457 | } |
| 458 | |
| 459 | //===--------------------------------------------------------------------===// |
| 460 | // AdvSIMD Modified Immediates |
| 461 | //===--------------------------------------------------------------------===// |
| 462 | |
| 463 | // 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh |
| 464 | static inline bool isAdvSIMDModImmType1(uint64_t Imm) { |
| 465 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 466 | ((Imm & 0xffffff00ffffff00ULL) == 0); |
| 467 | } |
| 468 | |
| 469 | static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) { |
| 470 | return (Imm & 0xffULL); |
| 471 | } |
| 472 | |
| 473 | static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) { |
| 474 | uint64_t EncVal = Imm; |
| 475 | return (EncVal << 32) | EncVal; |
| 476 | } |
| 477 | |
| 478 | // 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 |
| 479 | static inline bool isAdvSIMDModImmType2(uint64_t Imm) { |
| 480 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 481 | ((Imm & 0xffff00ffffff00ffULL) == 0); |
| 482 | } |
| 483 | |
| 484 | static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) { |
| 485 | return (Imm & 0xff00ULL) >> 8; |
| 486 | } |
| 487 | |
| 488 | static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) { |
| 489 | uint64_t EncVal = Imm; |
| 490 | return (EncVal << 40) | (EncVal << 8); |
| 491 | } |
| 492 | |
| 493 | // 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 |
| 494 | static inline bool isAdvSIMDModImmType3(uint64_t Imm) { |
| 495 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 496 | ((Imm & 0xff00ffffff00ffffULL) == 0); |
| 497 | } |
| 498 | |
| 499 | static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) { |
| 500 | return (Imm & 0xff0000ULL) >> 16; |
| 501 | } |
| 502 | |
| 503 | static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) { |
| 504 | uint64_t EncVal = Imm; |
| 505 | return (EncVal << 48) | (EncVal << 16); |
| 506 | } |
| 507 | |
| 508 | // abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 |
| 509 | static inline bool isAdvSIMDModImmType4(uint64_t Imm) { |
| 510 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 511 | ((Imm & 0x00ffffff00ffffffULL) == 0); |
| 512 | } |
| 513 | |
| 514 | static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) { |
| 515 | return (Imm & 0xff000000ULL) >> 24; |
| 516 | } |
| 517 | |
| 518 | static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) { |
| 519 | uint64_t EncVal = Imm; |
| 520 | return (EncVal << 56) | (EncVal << 24); |
| 521 | } |
| 522 | |
| 523 | // 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh |
| 524 | static inline bool isAdvSIMDModImmType5(uint64_t Imm) { |
| 525 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 526 | (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) && |
| 527 | ((Imm & 0xff00ff00ff00ff00ULL) == 0); |
| 528 | } |
| 529 | |
| 530 | static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) { |
| 531 | return (Imm & 0xffULL); |
| 532 | } |
| 533 | |
| 534 | static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) { |
| 535 | uint64_t EncVal = Imm; |
| 536 | return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal; |
| 537 | } |
| 538 | |
| 539 | // abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 |
| 540 | static inline bool isAdvSIMDModImmType6(uint64_t Imm) { |
| 541 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 542 | (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) && |
| 543 | ((Imm & 0x00ff00ff00ff00ffULL) == 0); |
| 544 | } |
| 545 | |
| 546 | static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) { |
| 547 | return (Imm & 0xff00ULL) >> 8; |
| 548 | } |
| 549 | |
| 550 | static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) { |
| 551 | uint64_t EncVal = Imm; |
| 552 | return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8); |
| 553 | } |
| 554 | |
| 555 | // 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF |
| 556 | static inline bool isAdvSIMDModImmType7(uint64_t Imm) { |
| 557 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 558 | ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL); |
| 559 | } |
| 560 | |
| 561 | static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) { |
| 562 | return (Imm & 0xff00ULL) >> 8; |
| 563 | } |
| 564 | |
| 565 | static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) { |
| 566 | uint64_t EncVal = Imm; |
| 567 | return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL; |
| 568 | } |
| 569 | |
| 570 | // 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF |
| 571 | static inline bool isAdvSIMDModImmType8(uint64_t Imm) { |
| 572 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 573 | ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL); |
| 574 | } |
| 575 | |
| 576 | static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) { |
| 577 | uint64_t EncVal = Imm; |
| 578 | return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL; |
| 579 | } |
| 580 | |
| 581 | static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) { |
| 582 | return (Imm & 0x00ff0000ULL) >> 16; |
| 583 | } |
| 584 | |
| 585 | // abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh |
| 586 | static inline bool isAdvSIMDModImmType9(uint64_t Imm) { |
| 587 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 588 | ((Imm >> 48) == (Imm & 0x0000ffffULL)) && |
| 589 | ((Imm >> 56) == (Imm & 0x000000ffULL)); |
| 590 | } |
| 591 | |
| 592 | static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) { |
| 593 | return (Imm & 0xffULL); |
| 594 | } |
| 595 | |
| 596 | static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) { |
| 597 | uint64_t EncVal = Imm; |
| 598 | EncVal |= (EncVal << 8); |
| 599 | EncVal |= (EncVal << 16); |
| 600 | EncVal |= (EncVal << 32); |
| 601 | return EncVal; |
| 602 | } |
| 603 | |
| 604 | // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh |
| 605 | // cmode: 1110, op: 1 |
| 606 | static inline bool isAdvSIMDModImmType10(uint64_t Imm) { |
| 607 | #if defined(_MSC_VER) && _MSC_VER == 1937 && !defined(__clang__) && \ |
| 608 | defined(_M_ARM64) |
| 609 | // The MSVC compiler 19.37 for ARM64 has an optimization bug that |
| 610 | // causes an incorrect behavior with the original version. Work around |
| 611 | // by using a slightly different variation. |
| 612 | // https://developercommunity.visualstudio.com/t/C-ARM64-compiler-optimization-bug/10481261 |
| 613 | constexpr uint64_t Mask = 0xFFULL; |
| 614 | uint64_t ByteA = (Imm >> 56) & Mask; |
| 615 | uint64_t ByteB = (Imm >> 48) & Mask; |
| 616 | uint64_t ByteC = (Imm >> 40) & Mask; |
| 617 | uint64_t ByteD = (Imm >> 32) & Mask; |
| 618 | uint64_t ByteE = (Imm >> 24) & Mask; |
| 619 | uint64_t ByteF = (Imm >> 16) & Mask; |
| 620 | uint64_t ByteG = (Imm >> 8) & Mask; |
| 621 | uint64_t ByteH = Imm & Mask; |
| 622 | |
| 623 | return (ByteA == 0ULL || ByteA == Mask) && (ByteB == 0ULL || ByteB == Mask) && |
| 624 | (ByteC == 0ULL || ByteC == Mask) && (ByteD == 0ULL || ByteD == Mask) && |
| 625 | (ByteE == 0ULL || ByteE == Mask) && (ByteF == 0ULL || ByteF == Mask) && |
| 626 | (ByteG == 0ULL || ByteG == Mask) && (ByteH == 0ULL || ByteH == Mask); |
| 627 | #else |
| 628 | uint64_t ByteA = Imm & 0xff00000000000000ULL; |
| 629 | uint64_t ByteB = Imm & 0x00ff000000000000ULL; |
| 630 | uint64_t ByteC = Imm & 0x0000ff0000000000ULL; |
| 631 | uint64_t ByteD = Imm & 0x000000ff00000000ULL; |
| 632 | uint64_t ByteE = Imm & 0x00000000ff000000ULL; |
| 633 | uint64_t ByteF = Imm & 0x0000000000ff0000ULL; |
| 634 | uint64_t ByteG = Imm & 0x000000000000ff00ULL; |
| 635 | uint64_t ByteH = Imm & 0x00000000000000ffULL; |
| 636 | |
| 637 | return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) && |
| 638 | (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) && |
| 639 | (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) && |
| 640 | (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) && |
| 641 | (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) && |
| 642 | (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) && |
| 643 | (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) && |
| 644 | (ByteH == 0ULL || ByteH == 0x00000000000000ffULL); |
| 645 | #endif |
| 646 | } |
| 647 | |
| 648 | static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) { |
| 649 | uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0; |
| 650 | uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0; |
| 651 | uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0; |
| 652 | uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0; |
| 653 | uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0; |
| 654 | uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0; |
| 655 | uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0; |
| 656 | uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0; |
| 657 | |
| 658 | uint8_t EncVal = BitA; |
| 659 | EncVal <<= 1; |
| 660 | EncVal |= BitB; |
| 661 | EncVal <<= 1; |
| 662 | EncVal |= BitC; |
| 663 | EncVal <<= 1; |
| 664 | EncVal |= BitD; |
| 665 | EncVal <<= 1; |
| 666 | EncVal |= BitE; |
| 667 | EncVal <<= 1; |
| 668 | EncVal |= BitF; |
| 669 | EncVal <<= 1; |
| 670 | EncVal |= BitG; |
| 671 | EncVal <<= 1; |
| 672 | EncVal |= BitH; |
| 673 | return EncVal; |
| 674 | } |
| 675 | |
| 676 | static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) { |
| 677 | uint64_t EncVal = 0; |
| 678 | if (Imm & 0x80) EncVal |= 0xff00000000000000ULL; |
| 679 | if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL; |
| 680 | if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL; |
| 681 | if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL; |
| 682 | if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL; |
| 683 | if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL; |
| 684 | if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL; |
| 685 | if (Imm & 0x01) EncVal |= 0x00000000000000ffULL; |
| 686 | return EncVal; |
| 687 | } |
| 688 | |
| 689 | // aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00 |
| 690 | static inline bool isAdvSIMDModImmType11(uint64_t Imm) { |
| 691 | uint64_t BString = (Imm & 0x7E000000ULL) >> 25; |
| 692 | return ((Imm >> 32) == (Imm & 0xffffffffULL)) && |
| 693 | (BString == 0x1f || BString == 0x20) && |
| 694 | ((Imm & 0x0007ffff0007ffffULL) == 0); |
| 695 | } |
| 696 | |
| 697 | static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) { |
| 698 | uint8_t BitA = (Imm & 0x80000000ULL) != 0; |
| 699 | uint8_t BitB = (Imm & 0x20000000ULL) != 0; |
| 700 | uint8_t BitC = (Imm & 0x01000000ULL) != 0; |
| 701 | uint8_t BitD = (Imm & 0x00800000ULL) != 0; |
| 702 | uint8_t BitE = (Imm & 0x00400000ULL) != 0; |
| 703 | uint8_t BitF = (Imm & 0x00200000ULL) != 0; |
| 704 | uint8_t BitG = (Imm & 0x00100000ULL) != 0; |
| 705 | uint8_t BitH = (Imm & 0x00080000ULL) != 0; |
| 706 | |
| 707 | uint8_t EncVal = BitA; |
| 708 | EncVal <<= 1; |
| 709 | EncVal |= BitB; |
| 710 | EncVal <<= 1; |
| 711 | EncVal |= BitC; |
| 712 | EncVal <<= 1; |
| 713 | EncVal |= BitD; |
| 714 | EncVal <<= 1; |
| 715 | EncVal |= BitE; |
| 716 | EncVal <<= 1; |
| 717 | EncVal |= BitF; |
| 718 | EncVal <<= 1; |
| 719 | EncVal |= BitG; |
| 720 | EncVal <<= 1; |
| 721 | EncVal |= BitH; |
| 722 | return EncVal; |
| 723 | } |
| 724 | |
| 725 | static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) { |
| 726 | uint64_t EncVal = 0; |
| 727 | if (Imm & 0x80) EncVal |= 0x80000000ULL; |
| 728 | if (Imm & 0x40) EncVal |= 0x3e000000ULL; |
| 729 | else EncVal |= 0x40000000ULL; |
| 730 | if (Imm & 0x20) EncVal |= 0x01000000ULL; |
| 731 | if (Imm & 0x10) EncVal |= 0x00800000ULL; |
| 732 | if (Imm & 0x08) EncVal |= 0x00400000ULL; |
| 733 | if (Imm & 0x04) EncVal |= 0x00200000ULL; |
| 734 | if (Imm & 0x02) EncVal |= 0x00100000ULL; |
| 735 | if (Imm & 0x01) EncVal |= 0x00080000ULL; |
| 736 | return (EncVal << 32) | EncVal; |
| 737 | } |
| 738 | |
| 739 | // aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00 |
| 740 | static inline bool isAdvSIMDModImmType12(uint64_t Imm) { |
| 741 | uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54; |
| 742 | return ((BString == 0xff || BString == 0x100) && |
| 743 | ((Imm & 0x0000ffffffffffffULL) == 0)); |
| 744 | } |
| 745 | |
| 746 | static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) { |
| 747 | uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0; |
| 748 | uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0; |
| 749 | uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0; |
| 750 | uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0; |
| 751 | uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0; |
| 752 | uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0; |
| 753 | uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0; |
| 754 | uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0; |
| 755 | |
| 756 | uint8_t EncVal = BitA; |
| 757 | EncVal <<= 1; |
| 758 | EncVal |= BitB; |
| 759 | EncVal <<= 1; |
| 760 | EncVal |= BitC; |
| 761 | EncVal <<= 1; |
| 762 | EncVal |= BitD; |
| 763 | EncVal <<= 1; |
| 764 | EncVal |= BitE; |
| 765 | EncVal <<= 1; |
| 766 | EncVal |= BitF; |
| 767 | EncVal <<= 1; |
| 768 | EncVal |= BitG; |
| 769 | EncVal <<= 1; |
| 770 | EncVal |= BitH; |
| 771 | return EncVal; |
| 772 | } |
| 773 | |
| 774 | static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) { |
| 775 | uint64_t EncVal = 0; |
| 776 | if (Imm & 0x80) EncVal |= 0x8000000000000000ULL; |
| 777 | if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL; |
| 778 | else EncVal |= 0x4000000000000000ULL; |
| 779 | if (Imm & 0x20) EncVal |= 0x0020000000000000ULL; |
| 780 | if (Imm & 0x10) EncVal |= 0x0010000000000000ULL; |
| 781 | if (Imm & 0x08) EncVal |= 0x0008000000000000ULL; |
| 782 | if (Imm & 0x04) EncVal |= 0x0004000000000000ULL; |
| 783 | if (Imm & 0x02) EncVal |= 0x0002000000000000ULL; |
| 784 | if (Imm & 0x01) EncVal |= 0x0001000000000000ULL; |
| 785 | return EncVal; |
| 786 | } |
| 787 | |
| 788 | /// Returns true if Imm is the concatenation of a repeating pattern of type T. |
| 789 | template <typename T> |
| 790 | static inline bool isSVEMaskOfIdenticalElements(int64_t Imm) { |
| 791 | auto Parts = bit_cast<std::array<T, sizeof(int64_t) / sizeof(T)>>(Imm); |
| 792 | return llvm::all_equal(Parts); |
| 793 | } |
| 794 | |
| 795 | /// Returns true if Imm is valid for CPY/DUP. |
| 796 | template <typename T> |
| 797 | static inline bool isSVECpyImm(int64_t Imm) { |
| 798 | // Imm is interpreted as a signed value, which means top bits must be all ones |
| 799 | // (sign bits if the immediate value is negative and passed in a larger |
| 800 | // container), or all zeroes. |
| 801 | int64_t Mask = ~int64_t(std::numeric_limits<std::make_unsigned_t<T>>::max()); |
| 802 | if ((Imm & Mask) != 0 && (Imm & Mask) != Mask) |
| 803 | return false; |
| 804 | |
| 805 | // Imm is a signed 8-bit value. |
| 806 | // Top bits must be zeroes or sign bits. |
| 807 | if (Imm & 0xff) |
| 808 | return int8_t(Imm) == T(Imm); |
| 809 | |
| 810 | // Imm is a signed 16-bit value and multiple of 256. |
| 811 | // Top bits must be zeroes or sign bits. |
| 812 | if (Imm & 0xff00) |
| 813 | return int16_t(Imm) == T(Imm); |
| 814 | |
| 815 | return Imm == 0; |
| 816 | } |
| 817 | |
| 818 | /// Returns true if Imm is valid for ADD/SUB. |
| 819 | template <typename T> |
| 820 | static inline bool isSVEAddSubImm(int64_t Imm) { |
| 821 | bool IsInt8t = std::is_same<int8_t, std::make_signed_t<T>>::value || |
| 822 | std::is_same<int8_t, T>::value; |
| 823 | return uint8_t(Imm) == Imm || (!IsInt8t && uint16_t(Imm & ~0xff) == Imm); |
| 824 | } |
| 825 | |
| 826 | /// Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent. |
| 827 | static inline bool isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm) { |
| 828 | if (isSVECpyImm<int64_t>(Imm)) |
| 829 | return false; |
| 830 | |
| 831 | auto S = bit_cast<std::array<int32_t, 2>>(from: Imm); |
| 832 | auto H = bit_cast<std::array<int16_t, 4>>(from: Imm); |
| 833 | auto B = bit_cast<std::array<int8_t, 8>>(from: Imm); |
| 834 | |
| 835 | if (isSVEMaskOfIdenticalElements<int32_t>(Imm) && isSVECpyImm<int32_t>(Imm: S[0])) |
| 836 | return false; |
| 837 | if (isSVEMaskOfIdenticalElements<int16_t>(Imm) && isSVECpyImm<int16_t>(Imm: H[0])) |
| 838 | return false; |
| 839 | if (isSVEMaskOfIdenticalElements<int8_t>(Imm) && isSVECpyImm<int8_t>(Imm: B[0])) |
| 840 | return false; |
| 841 | return isLogicalImmediate(imm: Imm, regSize: 64); |
| 842 | } |
| 843 | |
| 844 | inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) { |
| 845 | for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16) |
| 846 | if ((Value & ~(0xffffULL << Shift)) == 0) |
| 847 | return true; |
| 848 | |
| 849 | return false; |
| 850 | } |
| 851 | |
| 852 | inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) { |
| 853 | if (RegWidth == 32) |
| 854 | Value &= 0xffffffffULL; |
| 855 | |
| 856 | // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0". |
| 857 | if (Value == 0 && Shift != 0) |
| 858 | return false; |
| 859 | |
| 860 | return (Value & ~(0xffffULL << Shift)) == 0; |
| 861 | } |
| 862 | |
| 863 | inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) { |
| 864 | // MOVZ takes precedence over MOVN. |
| 865 | if (isAnyMOVZMovAlias(Value, RegWidth)) |
| 866 | return false; |
| 867 | |
| 868 | Value = ~Value; |
| 869 | if (RegWidth == 32) |
| 870 | Value &= 0xffffffffULL; |
| 871 | |
| 872 | return isMOVZMovAlias(Value, Shift, RegWidth); |
| 873 | } |
| 874 | |
| 875 | inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) { |
| 876 | if (isAnyMOVZMovAlias(Value, RegWidth)) |
| 877 | return true; |
| 878 | |
| 879 | // It's not a MOVZ, but it might be a MOVN. |
| 880 | Value = ~Value; |
| 881 | if (RegWidth == 32) |
| 882 | Value &= 0xffffffffULL; |
| 883 | |
| 884 | return isAnyMOVZMovAlias(Value, RegWidth); |
| 885 | } |
| 886 | |
| 887 | static inline bool isSVECpyDupImm(int SizeInBits, int64_t Val, int32_t &Imm, |
| 888 | int32_t &Shift) { |
| 889 | switch (SizeInBits) { |
| 890 | case 8: |
| 891 | // All immediates are supported. |
| 892 | Shift = 0; |
| 893 | Imm = Val & 0xFF; |
| 894 | return true; |
| 895 | case 16: |
| 896 | case 32: |
| 897 | case 64: |
| 898 | // Support 8bit signed immediates. |
| 899 | if (Val >= -128 && Val <= 127) { |
| 900 | Shift = 0; |
| 901 | Imm = Val & 0xFF; |
| 902 | return true; |
| 903 | } |
| 904 | // Support 16bit signed immediates that are a multiple of 256. |
| 905 | if (Val >= -32768 && Val <= 32512 && Val % 256 == 0) { |
| 906 | Shift = 8; |
| 907 | Imm = (Val >> 8) & 0xFF; |
| 908 | return true; |
| 909 | } |
| 910 | break; |
| 911 | default: |
| 912 | break; |
| 913 | } |
| 914 | return false; |
| 915 | } |
| 916 | |
| 917 | static inline bool isSVELogicalImm(unsigned SizeInBits, uint64_t ImmVal, |
| 918 | uint64_t &Encoding) { |
| 919 | // Shift mask depending on type size. |
| 920 | switch (SizeInBits) { |
| 921 | case 8: |
| 922 | ImmVal &= 0xFF; |
| 923 | ImmVal |= ImmVal << 8; |
| 924 | ImmVal |= ImmVal << 16; |
| 925 | ImmVal |= ImmVal << 32; |
| 926 | break; |
| 927 | case 16: |
| 928 | ImmVal &= 0xFFFF; |
| 929 | ImmVal |= ImmVal << 16; |
| 930 | ImmVal |= ImmVal << 32; |
| 931 | break; |
| 932 | case 32: |
| 933 | ImmVal &= 0xFFFFFFFF; |
| 934 | ImmVal |= ImmVal << 32; |
| 935 | break; |
| 936 | case 64: |
| 937 | break; |
| 938 | default: |
| 939 | llvm_unreachable("Unexpected size" ); |
| 940 | } |
| 941 | |
| 942 | return processLogicalImmediate(Imm: ImmVal, RegSize: 64, Encoding); |
| 943 | } |
| 944 | |
| 945 | } // end namespace AArch64_AM |
| 946 | |
| 947 | } // end namespace llvm |
| 948 | |
| 949 | #endif |
| 950 | |