1//===- InferAlignment.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Infer alignment for load, stores and other memory operations based on
10// trailing zero known bits information.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/InferAlignment.h"
15#include "llvm/Analysis/AssumptionCache.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/IR/Instructions.h"
18#include "llvm/InitializePasses.h"
19#include "llvm/Support/KnownBits.h"
20#include "llvm/Transforms/Scalar.h"
21#include "llvm/Transforms/Utils/Local.h"
22
23using namespace llvm;
24
25static bool tryToImproveAlign(
26 const DataLayout &DL, Instruction *I,
27 function_ref<Align(Value *PtrOp, Align OldAlign, Align PrefAlign)> Fn) {
28 if (auto *LI = dyn_cast<LoadInst>(Val: I)) {
29 Value *PtrOp = LI->getPointerOperand();
30 Align OldAlign = LI->getAlign();
31 Align NewAlign = Fn(PtrOp, OldAlign, DL.getPrefTypeAlign(Ty: LI->getType()));
32 if (NewAlign > OldAlign) {
33 LI->setAlignment(NewAlign);
34 return true;
35 }
36 } else if (auto *SI = dyn_cast<StoreInst>(Val: I)) {
37 Value *PtrOp = SI->getPointerOperand();
38 Value *ValOp = SI->getValueOperand();
39 Align OldAlign = SI->getAlign();
40 Align NewAlign = Fn(PtrOp, OldAlign, DL.getPrefTypeAlign(Ty: ValOp->getType()));
41 if (NewAlign > OldAlign) {
42 SI->setAlignment(NewAlign);
43 return true;
44 }
45 }
46 // TODO: Also handle memory intrinsics.
47 return false;
48}
49
50bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) {
51 const DataLayout &DL = F.getDataLayout();
52 bool Changed = false;
53
54 // Enforce preferred type alignment if possible. We do this as a separate
55 // pass first, because it may improve the alignments we infer below.
56 for (BasicBlock &BB : F) {
57 for (Instruction &I : BB) {
58 Changed |= tryToImproveAlign(
59 DL, I: &I, Fn: [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
60 if (PrefAlign > OldAlign)
61 return std::max(a: OldAlign,
62 b: tryEnforceAlignment(V: PtrOp, PrefAlign, DL));
63 return OldAlign;
64 });
65 }
66 }
67
68 // Compute alignment from known bits.
69 for (BasicBlock &BB : F) {
70 for (Instruction &I : BB) {
71 Changed |= tryToImproveAlign(
72 DL, I: &I, Fn: [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
73 KnownBits Known = computeKnownBits(V: PtrOp, DL, Depth: 0, AC: &AC, CxtI: &I, DT: &DT);
74 unsigned TrailZ = std::min(a: Known.countMinTrailingZeros(),
75 b: +Value::MaxAlignmentExponent);
76 return Align(1ull << std::min(a: Known.getBitWidth() - 1, b: TrailZ));
77 });
78 }
79 }
80
81 return Changed;
82}
83
84PreservedAnalyses InferAlignmentPass::run(Function &F,
85 FunctionAnalysisManager &AM) {
86 AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(IR&: F);
87 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
88 inferAlignment(F, AC, DT);
89 // Changes to alignment shouldn't invalidated analyses.
90 return PreservedAnalyses::all();
91}
92