1//===-- X86FixupVectorConstants.cpp - optimize constant generation -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file examines all full size vector constant pool loads and attempts to
10// replace them with smaller constant pool entries, including:
11// * Converting AVX512 memory-fold instructions to their broadcast-fold form.
12// * Using vzload scalar loads.
13// * Broadcasting of full width loads.
14// * Sign/Zero extension of full width loads.
15//
16//===----------------------------------------------------------------------===//
17
18#include "X86.h"
19#include "X86InstrFoldTables.h"
20#include "X86InstrInfo.h"
21#include "X86Subtarget.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/MachineConstantPool.h"
24
25using namespace llvm;
26
27#define DEBUG_TYPE "x86-fixup-vector-constants"
28
29STATISTIC(NumInstChanges, "Number of instructions changes");
30
31namespace {
32class X86FixupVectorConstantsPass : public MachineFunctionPass {
33public:
34 static char ID;
35
36 X86FixupVectorConstantsPass() : MachineFunctionPass(ID) {}
37
38 StringRef getPassName() const override {
39 return "X86 Fixup Vector Constants";
40 }
41
42 bool runOnMachineFunction(MachineFunction &MF) override;
43 bool processInstruction(MachineFunction &MF, MachineBasicBlock &MBB,
44 MachineInstr &MI);
45
46 // This pass runs after regalloc and doesn't support VReg operands.
47 MachineFunctionProperties getRequiredProperties() const override {
48 return MachineFunctionProperties().setNoVRegs();
49 }
50
51private:
52 const X86InstrInfo *TII = nullptr;
53 const X86Subtarget *ST = nullptr;
54 const MCSchedModel *SM = nullptr;
55};
56} // end anonymous namespace
57
58char X86FixupVectorConstantsPass::ID = 0;
59
60INITIALIZE_PASS(X86FixupVectorConstantsPass, DEBUG_TYPE, DEBUG_TYPE, false, false)
61
62FunctionPass *llvm::createX86FixupVectorConstants() {
63 return new X86FixupVectorConstantsPass();
64}
65
66/// Normally, we only allow poison in vector splats. However, as this is part
67/// of the backend, and working with the DAG representation, which currently
68/// only natively represents undef values, we need to accept undefs here.
69static Constant *getSplatValueAllowUndef(const ConstantVector *C) {
70 Constant *Res = nullptr;
71 for (Value *Op : C->operands()) {
72 Constant *OpC = cast<Constant>(Val: Op);
73 if (isa<UndefValue>(Val: OpC))
74 continue;
75 if (!Res)
76 Res = OpC;
77 else if (Res != OpC)
78 return nullptr;
79 }
80 return Res;
81}
82
83// Attempt to extract the full width of bits data from the constant.
84static std::optional<APInt> extractConstantBits(const Constant *C) {
85 unsigned NumBits = C->getType()->getPrimitiveSizeInBits();
86
87 if (isa<UndefValue>(Val: C))
88 return APInt::getZero(numBits: NumBits);
89
90 if (auto *CInt = dyn_cast<ConstantInt>(Val: C)) {
91 if (isa<VectorType>(Val: CInt->getType()))
92 return APInt::getSplat(NewLen: NumBits, V: CInt->getValue());
93
94 return CInt->getValue();
95 }
96
97 if (auto *CFP = dyn_cast<ConstantFP>(Val: C)) {
98 if (isa<VectorType>(Val: CFP->getType()))
99 return APInt::getSplat(NewLen: NumBits, V: CFP->getValue().bitcastToAPInt());
100
101 return CFP->getValue().bitcastToAPInt();
102 }
103
104 if (auto *CV = dyn_cast<ConstantVector>(Val: C)) {
105 if (auto *CVSplat = getSplatValueAllowUndef(C: CV)) {
106 if (std::optional<APInt> Bits = extractConstantBits(C: CVSplat)) {
107 assert((NumBits % Bits->getBitWidth()) == 0 && "Illegal splat");
108 return APInt::getSplat(NewLen: NumBits, V: *Bits);
109 }
110 }
111
112 APInt Bits = APInt::getZero(numBits: NumBits);
113 for (unsigned I = 0, E = CV->getNumOperands(); I != E; ++I) {
114 Constant *Elt = CV->getOperand(i_nocapture: I);
115 std::optional<APInt> SubBits = extractConstantBits(C: Elt);
116 if (!SubBits)
117 return std::nullopt;
118 assert(NumBits == (E * SubBits->getBitWidth()) &&
119 "Illegal vector element size");
120 Bits.insertBits(SubBits: *SubBits, bitPosition: I * SubBits->getBitWidth());
121 }
122 return Bits;
123 }
124
125 if (auto *CDS = dyn_cast<ConstantDataSequential>(Val: C)) {
126 bool IsInteger = CDS->getElementType()->isIntegerTy();
127 bool IsFloat = CDS->getElementType()->isHalfTy() ||
128 CDS->getElementType()->isBFloatTy() ||
129 CDS->getElementType()->isFloatTy() ||
130 CDS->getElementType()->isDoubleTy();
131 if (IsInteger || IsFloat) {
132 APInt Bits = APInt::getZero(numBits: NumBits);
133 unsigned EltBits = CDS->getElementType()->getPrimitiveSizeInBits();
134 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
135 if (IsInteger)
136 Bits.insertBits(SubBits: CDS->getElementAsAPInt(i: I), bitPosition: I * EltBits);
137 else
138 Bits.insertBits(SubBits: CDS->getElementAsAPFloat(i: I).bitcastToAPInt(),
139 bitPosition: I * EltBits);
140 }
141 return Bits;
142 }
143 }
144
145 return std::nullopt;
146}
147
148static std::optional<APInt> extractConstantBits(const Constant *C,
149 unsigned NumBits) {
150 if (std::optional<APInt> Bits = extractConstantBits(C))
151 return Bits->zextOrTrunc(width: NumBits);
152 return std::nullopt;
153}
154
155// Attempt to compute the splat width of bits data by normalizing the splat to
156// remove undefs.
157static std::optional<APInt> getSplatableConstant(const Constant *C,
158 unsigned SplatBitWidth) {
159 const Type *Ty = C->getType();
160 assert((Ty->getPrimitiveSizeInBits() % SplatBitWidth) == 0 &&
161 "Illegal splat width");
162
163 if (std::optional<APInt> Bits = extractConstantBits(C))
164 if (Bits->isSplat(SplatSizeInBits: SplatBitWidth))
165 return Bits->trunc(width: SplatBitWidth);
166
167 // Detect general splats with undefs.
168 // TODO: Do we need to handle NumEltsBits > SplatBitWidth splitting?
169 if (auto *CV = dyn_cast<ConstantVector>(Val: C)) {
170 unsigned NumOps = CV->getNumOperands();
171 unsigned NumEltsBits = Ty->getScalarSizeInBits();
172 unsigned NumScaleOps = SplatBitWidth / NumEltsBits;
173 if ((SplatBitWidth % NumEltsBits) == 0) {
174 // Collect the elements and ensure that within the repeated splat sequence
175 // they either match or are undef.
176 SmallVector<Constant *, 16> Sequence(NumScaleOps, nullptr);
177 for (unsigned Idx = 0; Idx != NumOps; ++Idx) {
178 if (Constant *Elt = CV->getAggregateElement(Elt: Idx)) {
179 if (isa<UndefValue>(Val: Elt))
180 continue;
181 unsigned SplatIdx = Idx % NumScaleOps;
182 if (!Sequence[SplatIdx] || Sequence[SplatIdx] == Elt) {
183 Sequence[SplatIdx] = Elt;
184 continue;
185 }
186 }
187 return std::nullopt;
188 }
189 // Extract the constant bits forming the splat and insert into the bits
190 // data, leave undef as zero.
191 APInt SplatBits = APInt::getZero(numBits: SplatBitWidth);
192 for (unsigned I = 0; I != NumScaleOps; ++I) {
193 if (!Sequence[I])
194 continue;
195 if (std::optional<APInt> Bits = extractConstantBits(C: Sequence[I])) {
196 SplatBits.insertBits(SubBits: *Bits, bitPosition: I * Bits->getBitWidth());
197 continue;
198 }
199 return std::nullopt;
200 }
201 return SplatBits;
202 }
203 }
204
205 return std::nullopt;
206}
207
208// Split raw bits into a constant vector of elements of a specific bit width.
209// NOTE: We don't always bother converting to scalars if the vector length is 1.
210static Constant *rebuildConstant(LLVMContext &Ctx, Type *SclTy,
211 const APInt &Bits, unsigned NumSclBits) {
212 unsigned BitWidth = Bits.getBitWidth();
213
214 if (NumSclBits == 8) {
215 SmallVector<uint8_t> RawBits;
216 for (unsigned I = 0; I != BitWidth; I += 8)
217 RawBits.push_back(Elt: Bits.extractBits(numBits: 8, bitPosition: I).getZExtValue());
218 return ConstantDataVector::get(Context&: Ctx, Elts: RawBits);
219 }
220
221 if (NumSclBits == 16) {
222 SmallVector<uint16_t> RawBits;
223 for (unsigned I = 0; I != BitWidth; I += 16)
224 RawBits.push_back(Elt: Bits.extractBits(numBits: 16, bitPosition: I).getZExtValue());
225 if (SclTy->is16bitFPTy())
226 return ConstantDataVector::getFP(ElementType: SclTy, Elts: RawBits);
227 return ConstantDataVector::get(Context&: Ctx, Elts: RawBits);
228 }
229
230 if (NumSclBits == 32) {
231 SmallVector<uint32_t> RawBits;
232 for (unsigned I = 0; I != BitWidth; I += 32)
233 RawBits.push_back(Elt: Bits.extractBits(numBits: 32, bitPosition: I).getZExtValue());
234 if (SclTy->isFloatTy())
235 return ConstantDataVector::getFP(ElementType: SclTy, Elts: RawBits);
236 return ConstantDataVector::get(Context&: Ctx, Elts: RawBits);
237 }
238
239 assert(NumSclBits == 64 && "Unhandled vector element width");
240
241 SmallVector<uint64_t> RawBits;
242 for (unsigned I = 0; I != BitWidth; I += 64)
243 RawBits.push_back(Elt: Bits.extractBits(numBits: 64, bitPosition: I).getZExtValue());
244 if (SclTy->isDoubleTy())
245 return ConstantDataVector::getFP(ElementType: SclTy, Elts: RawBits);
246 return ConstantDataVector::get(Context&: Ctx, Elts: RawBits);
247}
248
249// Attempt to rebuild a normalized splat vector constant of the requested splat
250// width, built up of potentially smaller scalar values.
251static Constant *rebuildSplatCst(const Constant *C, unsigned /*NumBits*/,
252 unsigned /*NumElts*/, unsigned SplatBitWidth) {
253 // TODO: Truncate to NumBits once ConvertToBroadcastAVX512 support this.
254 std::optional<APInt> Splat = getSplatableConstant(C, SplatBitWidth);
255 if (!Splat)
256 return nullptr;
257
258 // Determine scalar size to use for the constant splat vector, clamping as we
259 // might have found a splat smaller than the original constant data.
260 Type *SclTy = C->getType()->getScalarType();
261 unsigned NumSclBits = SclTy->getPrimitiveSizeInBits();
262 NumSclBits = std::min<unsigned>(a: NumSclBits, b: SplatBitWidth);
263
264 // Fallback to i64 / double.
265 NumSclBits = (NumSclBits == 8 || NumSclBits == 16 || NumSclBits == 32)
266 ? NumSclBits
267 : 64;
268
269 // Extract per-element bits.
270 return rebuildConstant(Ctx&: C->getContext(), SclTy, Bits: *Splat, NumSclBits);
271}
272
273static Constant *rebuildZeroUpperCst(const Constant *C, unsigned NumBits,
274 unsigned /*NumElts*/,
275 unsigned ScalarBitWidth) {
276 Type *SclTy = C->getType()->getScalarType();
277 unsigned NumSclBits = SclTy->getPrimitiveSizeInBits();
278 LLVMContext &Ctx = C->getContext();
279
280 if (NumBits > ScalarBitWidth) {
281 // Determine if the upper bits are all zero.
282 if (std::optional<APInt> Bits = extractConstantBits(C, NumBits)) {
283 if (Bits->countLeadingZeros() >= (NumBits - ScalarBitWidth)) {
284 // If the original constant was made of smaller elements, try to retain
285 // those types.
286 if (ScalarBitWidth > NumSclBits && (ScalarBitWidth % NumSclBits) == 0)
287 return rebuildConstant(Ctx, SclTy, Bits: *Bits, NumSclBits);
288
289 // Fallback to raw integer bits.
290 APInt RawBits = Bits->zextOrTrunc(width: ScalarBitWidth);
291 return ConstantInt::get(Context&: Ctx, V: RawBits);
292 }
293 }
294 }
295
296 return nullptr;
297}
298
299static Constant *rebuildExtCst(const Constant *C, bool IsSExt,
300 unsigned NumBits, unsigned NumElts,
301 unsigned SrcEltBitWidth) {
302 unsigned DstEltBitWidth = NumBits / NumElts;
303 assert((NumBits % NumElts) == 0 && (NumBits % SrcEltBitWidth) == 0 &&
304 (DstEltBitWidth % SrcEltBitWidth) == 0 &&
305 (DstEltBitWidth > SrcEltBitWidth) && "Illegal extension width");
306
307 if (std::optional<APInt> Bits = extractConstantBits(C, NumBits)) {
308 assert((Bits->getBitWidth() / DstEltBitWidth) == NumElts &&
309 (Bits->getBitWidth() % DstEltBitWidth) == 0 &&
310 "Unexpected constant extension");
311
312 // Ensure every vector element can be represented by the src bitwidth.
313 APInt TruncBits = APInt::getZero(numBits: NumElts * SrcEltBitWidth);
314 for (unsigned I = 0; I != NumElts; ++I) {
315 APInt Elt = Bits->extractBits(numBits: DstEltBitWidth, bitPosition: I * DstEltBitWidth);
316 if ((IsSExt && Elt.getSignificantBits() > SrcEltBitWidth) ||
317 (!IsSExt && Elt.getActiveBits() > SrcEltBitWidth))
318 return nullptr;
319 TruncBits.insertBits(SubBits: Elt.trunc(width: SrcEltBitWidth), bitPosition: I * SrcEltBitWidth);
320 }
321
322 Type *Ty = C->getType();
323 return rebuildConstant(Ctx&: Ty->getContext(), SclTy: Ty->getScalarType(), Bits: TruncBits,
324 NumSclBits: SrcEltBitWidth);
325 }
326
327 return nullptr;
328}
329static Constant *rebuildSExtCst(const Constant *C, unsigned NumBits,
330 unsigned NumElts, unsigned SrcEltBitWidth) {
331 return rebuildExtCst(C, IsSExt: true, NumBits, NumElts, SrcEltBitWidth);
332}
333static Constant *rebuildZExtCst(const Constant *C, unsigned NumBits,
334 unsigned NumElts, unsigned SrcEltBitWidth) {
335 return rebuildExtCst(C, IsSExt: false, NumBits, NumElts, SrcEltBitWidth);
336}
337
338bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
339 MachineBasicBlock &MBB,
340 MachineInstr &MI) {
341 unsigned Opc = MI.getOpcode();
342 MachineConstantPool *CP = MI.getParent()->getParent()->getConstantPool();
343 bool HasSSE2 = ST->hasSSE2();
344 bool HasSSE41 = ST->hasSSE41();
345 bool HasAVX2 = ST->hasAVX2();
346 bool HasDQI = ST->hasDQI();
347 bool HasBWI = ST->hasBWI();
348 bool HasVLX = ST->hasVLX();
349 bool MultiDomain = ST->hasAVX512() || ST->hasNoDomainDelayMov();
350 bool OptSize = MF.getFunction().hasOptSize();
351
352 struct FixupEntry {
353 int Op;
354 int NumCstElts;
355 int MemBitWidth;
356 std::function<Constant *(const Constant *, unsigned, unsigned, unsigned)>
357 RebuildConstant;
358 };
359
360 auto NewOpcPreferable = [&](const FixupEntry &Fixup,
361 unsigned RegBitWidth) -> bool {
362 if (SM->hasInstrSchedModel()) {
363 unsigned NewOpc = Fixup.Op;
364 auto *OldDesc = SM->getSchedClassDesc(SchedClassIdx: TII->get(Opcode: Opc).getSchedClass());
365 auto *NewDesc = SM->getSchedClassDesc(SchedClassIdx: TII->get(Opcode: NewOpc).getSchedClass());
366 unsigned BitsSaved = RegBitWidth - (Fixup.NumCstElts * Fixup.MemBitWidth);
367
368 // Compare tput/lat - avoid any regressions, but allow extra cycle of
369 // latency in exchange for each 128-bit (or less) constant pool reduction
370 // (this is a very simple cost:benefit estimate - there will probably be
371 // better ways to calculate this).
372 double OldTput = MCSchedModel::getReciprocalThroughput(STI: *ST, SCDesc: *OldDesc);
373 double NewTput = MCSchedModel::getReciprocalThroughput(STI: *ST, SCDesc: *NewDesc);
374 if (OldTput != NewTput)
375 return NewTput < OldTput;
376
377 int LatTol = (BitsSaved + 127) / 128;
378 int OldLat = MCSchedModel::computeInstrLatency(STI: *ST, SCDesc: *OldDesc);
379 int NewLat = MCSchedModel::computeInstrLatency(STI: *ST, SCDesc: *NewDesc);
380 if (OldLat != NewLat)
381 return NewLat < (OldLat + LatTol);
382 }
383
384 // We either were unable to get tput/lat or all values were equal.
385 // Prefer the new opcode for reduced constant pool size.
386 return true;
387 };
388
389 auto FixupConstant = [&](ArrayRef<FixupEntry> Fixups, unsigned RegBitWidth,
390 unsigned OperandNo) {
391#ifdef EXPENSIVE_CHECKS
392 assert(llvm::is_sorted(Fixups,
393 [](const FixupEntry &A, const FixupEntry &B) {
394 return (A.NumCstElts * A.MemBitWidth) <
395 (B.NumCstElts * B.MemBitWidth);
396 }) &&
397 "Constant fixup table not sorted in ascending constant size");
398#endif
399 assert(MI.getNumOperands() >= (OperandNo + X86::AddrNumOperands) &&
400 "Unexpected number of operands!");
401 if (auto *C = X86::getConstantFromPool(MI, OpNo: OperandNo)) {
402 unsigned CstBitWidth = C->getType()->getPrimitiveSizeInBits();
403 RegBitWidth = RegBitWidth ? RegBitWidth : CstBitWidth;
404 for (const FixupEntry &Fixup : Fixups) {
405 // Always uses the smallest possible constant load with opt/minsize,
406 // otherwise use the smallest instruction that doesn't affect
407 // performance.
408 // TODO: If constant has been hoisted from loop, use smallest constant.
409 if (Fixup.Op && (OptSize || NewOpcPreferable(Fixup, RegBitWidth))) {
410 // Construct a suitable constant and adjust the MI to use the new
411 // constant pool entry.
412 if (Constant *NewCst = Fixup.RebuildConstant(
413 C, RegBitWidth, Fixup.NumCstElts, Fixup.MemBitWidth)) {
414 unsigned NewCPI =
415 CP->getConstantPoolIndex(C: NewCst, Alignment: Align(Fixup.MemBitWidth / 8));
416 MI.setDesc(TII->get(Opcode: Fixup.Op));
417 MI.getOperand(i: OperandNo + X86::AddrDisp).setIndex(NewCPI);
418 return true;
419 }
420 }
421 }
422 }
423 return false;
424 };
425
426 // Attempt to detect a suitable vzload/broadcast/vextload from increasing
427 // constant bitwidths. Prefer vzload/broadcast/vextload for same bitwidth:
428 // - vzload shouldn't ever need a shuffle port to zero the upper elements and
429 // the fp/int domain versions are equally available so we don't introduce a
430 // domain crossing penalty.
431 // - broadcast sometimes need a shuffle port (especially for 8/16-bit
432 // variants), AVX1 only has fp domain broadcasts but AVX2+ have good fp/int
433 // domain equivalents.
434 // - vextload always needs a shuffle port and is only ever int domain.
435 switch (Opc) {
436 /* FP Loads */
437 case X86::MOVAPDrm:
438 case X86::MOVAPSrm:
439 case X86::MOVUPDrm:
440 case X86::MOVUPSrm: {
441 // TODO: SSE3 MOVDDUP Handling
442 FixupEntry Fixups[] = {
443 {.Op: X86::MOVSSrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildZeroUpperCst},
444 {.Op: HasSSE2 ? X86::MOVSDrm : 0, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildZeroUpperCst}};
445 return FixupConstant(Fixups, 128, 1);
446 }
447 case X86::VMOVAPDrm:
448 case X86::VMOVAPSrm:
449 case X86::VMOVUPDrm:
450 case X86::VMOVUPSrm: {
451 FixupEntry Fixups[] = {
452 {.Op: MultiDomain ? X86::VPMOVSXBQrm : 0, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
453 {.Op: MultiDomain ? X86::VPMOVZXBQrm : 0, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
454 {.Op: X86::VMOVSSrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildZeroUpperCst},
455 {.Op: X86::VBROADCASTSSrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
456 {.Op: MultiDomain ? X86::VPMOVSXBDrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
457 {.Op: MultiDomain ? X86::VPMOVZXBDrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
458 {.Op: MultiDomain ? X86::VPMOVSXWQrm : 0, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
459 {.Op: MultiDomain ? X86::VPMOVZXWQrm : 0, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
460 {.Op: X86::VMOVSDrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildZeroUpperCst},
461 {.Op: X86::VMOVDDUPrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
462 {.Op: MultiDomain ? X86::VPMOVSXWDrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
463 {.Op: MultiDomain ? X86::VPMOVZXWDrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
464 {.Op: MultiDomain ? X86::VPMOVSXDQrm : 0, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
465 {.Op: MultiDomain ? X86::VPMOVZXDQrm : 0, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
466 return FixupConstant(Fixups, 128, 1);
467 }
468 case X86::VMOVAPDYrm:
469 case X86::VMOVAPSYrm:
470 case X86::VMOVUPDYrm:
471 case X86::VMOVUPSYrm: {
472 FixupEntry Fixups[] = {
473 {.Op: X86::VBROADCASTSSYrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
474 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVSXBQYrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
475 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVZXBQYrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
476 {.Op: X86::VBROADCASTSDYrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
477 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVSXBDYrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
478 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVZXBDYrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
479 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVSXWQYrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
480 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVZXWQYrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
481 {.Op: X86::VBROADCASTF128rm, .NumCstElts: 1, .MemBitWidth: 128, .RebuildConstant: rebuildSplatCst},
482 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVSXWDYrm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
483 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVZXWDYrm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
484 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVSXDQYrm : 0, .NumCstElts: 4, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
485 {.Op: HasAVX2 && MultiDomain ? X86::VPMOVZXDQYrm : 0, .NumCstElts: 4, .MemBitWidth: 32,
486 .RebuildConstant: rebuildZExtCst}};
487 return FixupConstant(Fixups, 256, 1);
488 }
489 case X86::VMOVAPDZ128rm:
490 case X86::VMOVAPSZ128rm:
491 case X86::VMOVUPDZ128rm:
492 case X86::VMOVUPSZ128rm: {
493 FixupEntry Fixups[] = {
494 {.Op: MultiDomain ? X86::VPMOVSXBQZ128rm : 0, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
495 {.Op: MultiDomain ? X86::VPMOVZXBQZ128rm : 0, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
496 {.Op: X86::VMOVSSZrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildZeroUpperCst},
497 {.Op: X86::VBROADCASTSSZ128rm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
498 {.Op: MultiDomain ? X86::VPMOVSXBDZ128rm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
499 {.Op: MultiDomain ? X86::VPMOVZXBDZ128rm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
500 {.Op: MultiDomain ? X86::VPMOVSXWQZ128rm : 0, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
501 {.Op: MultiDomain ? X86::VPMOVZXWQZ128rm : 0, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
502 {.Op: X86::VMOVSDZrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildZeroUpperCst},
503 {.Op: X86::VMOVDDUPZ128rm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
504 {.Op: MultiDomain ? X86::VPMOVSXWDZ128rm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
505 {.Op: MultiDomain ? X86::VPMOVZXWDZ128rm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
506 {.Op: MultiDomain ? X86::VPMOVSXDQZ128rm : 0, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
507 {.Op: MultiDomain ? X86::VPMOVZXDQZ128rm : 0, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
508 return FixupConstant(Fixups, 128, 1);
509 }
510 case X86::VMOVAPDZ256rm:
511 case X86::VMOVAPSZ256rm:
512 case X86::VMOVUPDZ256rm:
513 case X86::VMOVUPSZ256rm: {
514 FixupEntry Fixups[] = {
515 {.Op: X86::VBROADCASTSSZ256rm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
516 {.Op: MultiDomain ? X86::VPMOVSXBQZ256rm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
517 {.Op: MultiDomain ? X86::VPMOVZXBQZ256rm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
518 {.Op: X86::VBROADCASTSDZ256rm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
519 {.Op: MultiDomain ? X86::VPMOVSXBDZ256rm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
520 {.Op: MultiDomain ? X86::VPMOVZXBDZ256rm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
521 {.Op: MultiDomain ? X86::VPMOVSXWQZ256rm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
522 {.Op: MultiDomain ? X86::VPMOVZXWQZ256rm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
523 {.Op: X86::VBROADCASTF32X4Z256rm, .NumCstElts: 1, .MemBitWidth: 128, .RebuildConstant: rebuildSplatCst},
524 {.Op: MultiDomain ? X86::VPMOVSXWDZ256rm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
525 {.Op: MultiDomain ? X86::VPMOVZXWDZ256rm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
526 {.Op: MultiDomain ? X86::VPMOVSXDQZ256rm : 0, .NumCstElts: 4, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
527 {.Op: MultiDomain ? X86::VPMOVZXDQZ256rm : 0, .NumCstElts: 4, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
528 return FixupConstant(Fixups, 256, 1);
529 }
530 case X86::VMOVAPDZrm:
531 case X86::VMOVAPSZrm:
532 case X86::VMOVUPDZrm:
533 case X86::VMOVUPSZrm: {
534 FixupEntry Fixups[] = {
535 {.Op: X86::VBROADCASTSSZrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
536 {.Op: X86::VBROADCASTSDZrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
537 {.Op: MultiDomain ? X86::VPMOVSXBQZrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
538 {.Op: MultiDomain ? X86::VPMOVZXBQZrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
539 {.Op: X86::VBROADCASTF32X4Zrm, .NumCstElts: 1, .MemBitWidth: 128, .RebuildConstant: rebuildSplatCst},
540 {.Op: MultiDomain ? X86::VPMOVSXBDZrm : 0, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
541 {.Op: MultiDomain ? X86::VPMOVZXBDZrm : 0, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
542 {.Op: MultiDomain ? X86::VPMOVSXWQZrm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
543 {.Op: MultiDomain ? X86::VPMOVZXWQZrm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
544 {.Op: X86::VBROADCASTF64X4Zrm, .NumCstElts: 1, .MemBitWidth: 256, .RebuildConstant: rebuildSplatCst},
545 {.Op: MultiDomain ? X86::VPMOVSXWDZrm : 0, .NumCstElts: 16, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
546 {.Op: MultiDomain ? X86::VPMOVZXWDZrm : 0, .NumCstElts: 16, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
547 {.Op: MultiDomain ? X86::VPMOVSXDQZrm : 0, .NumCstElts: 8, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
548 {.Op: MultiDomain ? X86::VPMOVZXDQZrm : 0, .NumCstElts: 8, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
549 return FixupConstant(Fixups, 512, 1);
550 }
551 /* Integer Loads */
552 case X86::MOVDQArm:
553 case X86::MOVDQUrm: {
554 FixupEntry Fixups[] = {
555 {.Op: HasSSE41 ? X86::PMOVSXBQrm : 0, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
556 {.Op: HasSSE41 ? X86::PMOVZXBQrm : 0, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
557 {.Op: X86::MOVDI2PDIrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildZeroUpperCst},
558 {.Op: HasSSE41 ? X86::PMOVSXBDrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
559 {.Op: HasSSE41 ? X86::PMOVZXBDrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
560 {.Op: HasSSE41 ? X86::PMOVSXWQrm : 0, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
561 {.Op: HasSSE41 ? X86::PMOVZXWQrm : 0, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
562 {.Op: X86::MOVQI2PQIrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildZeroUpperCst},
563 {.Op: HasSSE41 ? X86::PMOVSXBWrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
564 {.Op: HasSSE41 ? X86::PMOVZXBWrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
565 {.Op: HasSSE41 ? X86::PMOVSXWDrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
566 {.Op: HasSSE41 ? X86::PMOVZXWDrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
567 {.Op: HasSSE41 ? X86::PMOVSXDQrm : 0, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
568 {.Op: HasSSE41 ? X86::PMOVZXDQrm : 0, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
569 return FixupConstant(Fixups, 128, 1);
570 }
571 case X86::VMOVDQArm:
572 case X86::VMOVDQUrm: {
573 FixupEntry Fixups[] = {
574 {.Op: HasAVX2 ? X86::VPBROADCASTBrm : 0, .NumCstElts: 1, .MemBitWidth: 8, .RebuildConstant: rebuildSplatCst},
575 {.Op: HasAVX2 ? X86::VPBROADCASTWrm : 0, .NumCstElts: 1, .MemBitWidth: 16, .RebuildConstant: rebuildSplatCst},
576 {.Op: X86::VPMOVSXBQrm, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
577 {.Op: X86::VPMOVZXBQrm, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
578 {.Op: X86::VMOVDI2PDIrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildZeroUpperCst},
579 {.Op: HasAVX2 ? X86::VPBROADCASTDrm : X86::VBROADCASTSSrm, .NumCstElts: 1, .MemBitWidth: 32,
580 .RebuildConstant: rebuildSplatCst},
581 {.Op: X86::VPMOVSXBDrm, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
582 {.Op: X86::VPMOVZXBDrm, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
583 {.Op: X86::VPMOVSXWQrm, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
584 {.Op: X86::VPMOVZXWQrm, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
585 {.Op: X86::VMOVQI2PQIrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildZeroUpperCst},
586 {.Op: HasAVX2 ? X86::VPBROADCASTQrm : X86::VMOVDDUPrm, .NumCstElts: 1, .MemBitWidth: 64,
587 .RebuildConstant: rebuildSplatCst},
588 {.Op: X86::VPMOVSXBWrm, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
589 {.Op: X86::VPMOVZXBWrm, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
590 {.Op: X86::VPMOVSXWDrm, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
591 {.Op: X86::VPMOVZXWDrm, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
592 {.Op: X86::VPMOVSXDQrm, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
593 {.Op: X86::VPMOVZXDQrm, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
594 return FixupConstant(Fixups, 128, 1);
595 }
596 case X86::VMOVDQAYrm:
597 case X86::VMOVDQUYrm: {
598 FixupEntry Fixups[] = {
599 {.Op: HasAVX2 ? X86::VPBROADCASTBYrm : 0, .NumCstElts: 1, .MemBitWidth: 8, .RebuildConstant: rebuildSplatCst},
600 {.Op: HasAVX2 ? X86::VPBROADCASTWYrm : 0, .NumCstElts: 1, .MemBitWidth: 16, .RebuildConstant: rebuildSplatCst},
601 {.Op: HasAVX2 ? X86::VPBROADCASTDYrm : X86::VBROADCASTSSYrm, .NumCstElts: 1, .MemBitWidth: 32,
602 .RebuildConstant: rebuildSplatCst},
603 {.Op: HasAVX2 ? X86::VPMOVSXBQYrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
604 {.Op: HasAVX2 ? X86::VPMOVZXBQYrm : 0, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
605 {.Op: HasAVX2 ? X86::VPBROADCASTQYrm : X86::VBROADCASTSDYrm, .NumCstElts: 1, .MemBitWidth: 64,
606 .RebuildConstant: rebuildSplatCst},
607 {.Op: HasAVX2 ? X86::VPMOVSXBDYrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
608 {.Op: HasAVX2 ? X86::VPMOVZXBDYrm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
609 {.Op: HasAVX2 ? X86::VPMOVSXWQYrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
610 {.Op: HasAVX2 ? X86::VPMOVZXWQYrm : 0, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
611 {.Op: HasAVX2 ? X86::VBROADCASTI128rm : X86::VBROADCASTF128rm, .NumCstElts: 1, .MemBitWidth: 128,
612 .RebuildConstant: rebuildSplatCst},
613 {.Op: HasAVX2 ? X86::VPMOVSXBWYrm : 0, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
614 {.Op: HasAVX2 ? X86::VPMOVZXBWYrm : 0, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
615 {.Op: HasAVX2 ? X86::VPMOVSXWDYrm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
616 {.Op: HasAVX2 ? X86::VPMOVZXWDYrm : 0, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
617 {.Op: HasAVX2 ? X86::VPMOVSXDQYrm : 0, .NumCstElts: 4, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
618 {.Op: HasAVX2 ? X86::VPMOVZXDQYrm : 0, .NumCstElts: 4, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
619 return FixupConstant(Fixups, 256, 1);
620 }
621 case X86::VMOVDQA32Z128rm:
622 case X86::VMOVDQA64Z128rm:
623 case X86::VMOVDQU32Z128rm:
624 case X86::VMOVDQU64Z128rm: {
625 FixupEntry Fixups[] = {
626 {.Op: HasBWI ? X86::VPBROADCASTBZ128rm : 0, .NumCstElts: 1, .MemBitWidth: 8, .RebuildConstant: rebuildSplatCst},
627 {.Op: HasBWI ? X86::VPBROADCASTWZ128rm : 0, .NumCstElts: 1, .MemBitWidth: 16, .RebuildConstant: rebuildSplatCst},
628 {.Op: X86::VPMOVSXBQZ128rm, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
629 {.Op: X86::VPMOVZXBQZ128rm, .NumCstElts: 2, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
630 {.Op: X86::VMOVDI2PDIZrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildZeroUpperCst},
631 {.Op: X86::VPBROADCASTDZ128rm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
632 {.Op: X86::VPMOVSXBDZ128rm, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
633 {.Op: X86::VPMOVZXBDZ128rm, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
634 {.Op: X86::VPMOVSXWQZ128rm, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
635 {.Op: X86::VPMOVZXWQZ128rm, .NumCstElts: 2, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
636 {.Op: X86::VMOVQI2PQIZrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildZeroUpperCst},
637 {.Op: X86::VPBROADCASTQZ128rm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
638 {.Op: HasBWI ? X86::VPMOVSXBWZ128rm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
639 {.Op: HasBWI ? X86::VPMOVZXBWZ128rm : 0, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
640 {.Op: X86::VPMOVSXWDZ128rm, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
641 {.Op: X86::VPMOVZXWDZ128rm, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
642 {.Op: X86::VPMOVSXDQZ128rm, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
643 {.Op: X86::VPMOVZXDQZ128rm, .NumCstElts: 2, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
644 return FixupConstant(Fixups, 128, 1);
645 }
646 case X86::VMOVDQA32Z256rm:
647 case X86::VMOVDQA64Z256rm:
648 case X86::VMOVDQU32Z256rm:
649 case X86::VMOVDQU64Z256rm: {
650 FixupEntry Fixups[] = {
651 {.Op: HasBWI ? X86::VPBROADCASTBZ256rm : 0, .NumCstElts: 1, .MemBitWidth: 8, .RebuildConstant: rebuildSplatCst},
652 {.Op: HasBWI ? X86::VPBROADCASTWZ256rm : 0, .NumCstElts: 1, .MemBitWidth: 16, .RebuildConstant: rebuildSplatCst},
653 {.Op: X86::VPBROADCASTDZ256rm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
654 {.Op: X86::VPMOVSXBQZ256rm, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
655 {.Op: X86::VPMOVZXBQZ256rm, .NumCstElts: 4, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
656 {.Op: X86::VPBROADCASTQZ256rm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
657 {.Op: X86::VPMOVSXBDZ256rm, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
658 {.Op: X86::VPMOVZXBDZ256rm, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
659 {.Op: X86::VPMOVSXWQZ256rm, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
660 {.Op: X86::VPMOVZXWQZ256rm, .NumCstElts: 4, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
661 {.Op: X86::VBROADCASTI32X4Z256rm, .NumCstElts: 1, .MemBitWidth: 128, .RebuildConstant: rebuildSplatCst},
662 {.Op: HasBWI ? X86::VPMOVSXBWZ256rm : 0, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
663 {.Op: HasBWI ? X86::VPMOVZXBWZ256rm : 0, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
664 {.Op: X86::VPMOVSXWDZ256rm, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
665 {.Op: X86::VPMOVZXWDZ256rm, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
666 {.Op: X86::VPMOVSXDQZ256rm, .NumCstElts: 4, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
667 {.Op: X86::VPMOVZXDQZ256rm, .NumCstElts: 4, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
668 return FixupConstant(Fixups, 256, 1);
669 }
670 case X86::VMOVDQA32Zrm:
671 case X86::VMOVDQA64Zrm:
672 case X86::VMOVDQU32Zrm:
673 case X86::VMOVDQU64Zrm: {
674 FixupEntry Fixups[] = {
675 {.Op: HasBWI ? X86::VPBROADCASTBZrm : 0, .NumCstElts: 1, .MemBitWidth: 8, .RebuildConstant: rebuildSplatCst},
676 {.Op: HasBWI ? X86::VPBROADCASTWZrm : 0, .NumCstElts: 1, .MemBitWidth: 16, .RebuildConstant: rebuildSplatCst},
677 {.Op: X86::VPBROADCASTDZrm, .NumCstElts: 1, .MemBitWidth: 32, .RebuildConstant: rebuildSplatCst},
678 {.Op: X86::VPBROADCASTQZrm, .NumCstElts: 1, .MemBitWidth: 64, .RebuildConstant: rebuildSplatCst},
679 {.Op: X86::VPMOVSXBQZrm, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
680 {.Op: X86::VPMOVZXBQZrm, .NumCstElts: 8, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
681 {.Op: X86::VBROADCASTI32X4Zrm, .NumCstElts: 1, .MemBitWidth: 128, .RebuildConstant: rebuildSplatCst},
682 {.Op: X86::VPMOVSXBDZrm, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
683 {.Op: X86::VPMOVZXBDZrm, .NumCstElts: 16, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
684 {.Op: X86::VPMOVSXWQZrm, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
685 {.Op: X86::VPMOVZXWQZrm, .NumCstElts: 8, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
686 {.Op: X86::VBROADCASTI64X4Zrm, .NumCstElts: 1, .MemBitWidth: 256, .RebuildConstant: rebuildSplatCst},
687 {.Op: HasBWI ? X86::VPMOVSXBWZrm : 0, .NumCstElts: 32, .MemBitWidth: 8, .RebuildConstant: rebuildSExtCst},
688 {.Op: HasBWI ? X86::VPMOVZXBWZrm : 0, .NumCstElts: 32, .MemBitWidth: 8, .RebuildConstant: rebuildZExtCst},
689 {.Op: X86::VPMOVSXWDZrm, .NumCstElts: 16, .MemBitWidth: 16, .RebuildConstant: rebuildSExtCst},
690 {.Op: X86::VPMOVZXWDZrm, .NumCstElts: 16, .MemBitWidth: 16, .RebuildConstant: rebuildZExtCst},
691 {.Op: X86::VPMOVSXDQZrm, .NumCstElts: 8, .MemBitWidth: 32, .RebuildConstant: rebuildSExtCst},
692 {.Op: X86::VPMOVZXDQZrm, .NumCstElts: 8, .MemBitWidth: 32, .RebuildConstant: rebuildZExtCst}};
693 return FixupConstant(Fixups, 512, 1);
694 }
695 }
696
697 auto ConvertToBroadcast = [&](unsigned OpSrc, int BW) {
698 if (OpSrc) {
699 if (const X86FoldTableEntry *Mem2Bcst =
700 llvm::lookupBroadcastFoldTableBySize(MemOp: OpSrc, BroadcastBits: BW)) {
701 unsigned OpBcst = Mem2Bcst->DstOp;
702 unsigned OpNoBcst = Mem2Bcst->Flags & TB_INDEX_MASK;
703 FixupEntry Fixups[] = {{.Op: (int)OpBcst, .NumCstElts: 1, .MemBitWidth: BW, .RebuildConstant: rebuildSplatCst}};
704 // TODO: Add support for RegBitWidth, but currently rebuildSplatCst
705 // doesn't require it (defaults to Constant::getPrimitiveSizeInBits).
706 return FixupConstant(Fixups, 0, OpNoBcst);
707 }
708 }
709 return false;
710 };
711
712 // Attempt to find a AVX512 mapping from a full width memory-fold instruction
713 // to a broadcast-fold instruction variant.
714 if ((MI.getDesc().TSFlags & X86II::EncodingMask) == X86II::EVEX)
715 return ConvertToBroadcast(Opc, 32) || ConvertToBroadcast(Opc, 64);
716
717 // Reverse the X86InstrInfo::setExecutionDomainCustom EVEX->VEX logic
718 // conversion to see if we can convert to a broadcasted (integer) logic op.
719 if (HasVLX && !HasDQI) {
720 unsigned OpSrc32 = 0, OpSrc64 = 0;
721 switch (Opc) {
722 case X86::VANDPDrm:
723 case X86::VANDPSrm:
724 case X86::VPANDrm:
725 OpSrc32 = X86 ::VPANDDZ128rm;
726 OpSrc64 = X86 ::VPANDQZ128rm;
727 break;
728 case X86::VANDPDYrm:
729 case X86::VANDPSYrm:
730 case X86::VPANDYrm:
731 OpSrc32 = X86 ::VPANDDZ256rm;
732 OpSrc64 = X86 ::VPANDQZ256rm;
733 break;
734 case X86::VANDNPDrm:
735 case X86::VANDNPSrm:
736 case X86::VPANDNrm:
737 OpSrc32 = X86 ::VPANDNDZ128rm;
738 OpSrc64 = X86 ::VPANDNQZ128rm;
739 break;
740 case X86::VANDNPDYrm:
741 case X86::VANDNPSYrm:
742 case X86::VPANDNYrm:
743 OpSrc32 = X86 ::VPANDNDZ256rm;
744 OpSrc64 = X86 ::VPANDNQZ256rm;
745 break;
746 case X86::VORPDrm:
747 case X86::VORPSrm:
748 case X86::VPORrm:
749 OpSrc32 = X86 ::VPORDZ128rm;
750 OpSrc64 = X86 ::VPORQZ128rm;
751 break;
752 case X86::VORPDYrm:
753 case X86::VORPSYrm:
754 case X86::VPORYrm:
755 OpSrc32 = X86 ::VPORDZ256rm;
756 OpSrc64 = X86 ::VPORQZ256rm;
757 break;
758 case X86::VXORPDrm:
759 case X86::VXORPSrm:
760 case X86::VPXORrm:
761 OpSrc32 = X86 ::VPXORDZ128rm;
762 OpSrc64 = X86 ::VPXORQZ128rm;
763 break;
764 case X86::VXORPDYrm:
765 case X86::VXORPSYrm:
766 case X86::VPXORYrm:
767 OpSrc32 = X86 ::VPXORDZ256rm;
768 OpSrc64 = X86 ::VPXORQZ256rm;
769 break;
770 }
771 if (OpSrc32 || OpSrc64)
772 return ConvertToBroadcast(OpSrc32, 32) || ConvertToBroadcast(OpSrc64, 64);
773 }
774
775 return false;
776}
777
778bool X86FixupVectorConstantsPass::runOnMachineFunction(MachineFunction &MF) {
779 LLVM_DEBUG(dbgs() << "Start X86FixupVectorConstants\n";);
780 bool Changed = false;
781 ST = &MF.getSubtarget<X86Subtarget>();
782 TII = ST->getInstrInfo();
783 SM = &ST->getSchedModel();
784
785 for (MachineBasicBlock &MBB : MF) {
786 for (MachineInstr &MI : MBB) {
787 if (processInstruction(MF, MBB, MI)) {
788 ++NumInstChanges;
789 Changed = true;
790 }
791 }
792 }
793 LLVM_DEBUG(dbgs() << "End X86FixupVectorConstants\n";);
794 return Changed;
795}
796