1 | //===- BoundsChecking.cpp - Instrumentation for run-time bounds checking --===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "llvm/Transforms/Instrumentation/BoundsChecking.h" |
10 | #include "llvm/ADT/Statistic.h" |
11 | #include "llvm/ADT/Twine.h" |
12 | #include "llvm/Analysis/MemoryBuiltins.h" |
13 | #include "llvm/Analysis/ScalarEvolution.h" |
14 | #include "llvm/Analysis/TargetFolder.h" |
15 | #include "llvm/Analysis/TargetLibraryInfo.h" |
16 | #include "llvm/IR/BasicBlock.h" |
17 | #include "llvm/IR/Constants.h" |
18 | #include "llvm/IR/DataLayout.h" |
19 | #include "llvm/IR/Function.h" |
20 | #include "llvm/IR/IRBuilder.h" |
21 | #include "llvm/IR/InstIterator.h" |
22 | #include "llvm/IR/Instruction.h" |
23 | #include "llvm/IR/Instructions.h" |
24 | #include "llvm/IR/Intrinsics.h" |
25 | #include "llvm/IR/Value.h" |
26 | #include "llvm/Support/Casting.h" |
27 | #include "llvm/Support/CommandLine.h" |
28 | #include "llvm/Support/Debug.h" |
29 | #include "llvm/Support/raw_ostream.h" |
30 | #include <cstdint> |
31 | #include <utility> |
32 | |
33 | using namespace llvm; |
34 | |
35 | #define DEBUG_TYPE "bounds-checking" |
36 | |
37 | static cl::opt<bool> SingleTrapBB("bounds-checking-single-trap" , |
38 | cl::desc("Use one trap block per function" )); |
39 | |
40 | static cl::opt<bool> DebugTrapBB("bounds-checking-unique-traps" , |
41 | cl::desc("Always use one trap per check" )); |
42 | |
43 | STATISTIC(ChecksAdded, "Bounds checks added" ); |
44 | STATISTIC(ChecksSkipped, "Bounds checks skipped" ); |
45 | STATISTIC(ChecksUnable, "Bounds checks unable to add" ); |
46 | |
47 | using BuilderTy = IRBuilder<TargetFolder>; |
48 | |
49 | /// Gets the conditions under which memory accessing instructions will overflow. |
50 | /// |
51 | /// \p Ptr is the pointer that will be read/written, and \p InstVal is either |
52 | /// the result from the load or the value being stored. It is used to determine |
53 | /// the size of memory block that is touched. |
54 | /// |
55 | /// Returns the condition under which the access will overflow. |
56 | static Value *getBoundsCheckCond(Value *Ptr, Value *InstVal, |
57 | const DataLayout &DL, TargetLibraryInfo &TLI, |
58 | ObjectSizeOffsetEvaluator &ObjSizeEval, |
59 | BuilderTy &IRB, ScalarEvolution &SE) { |
60 | TypeSize NeededSize = DL.getTypeStoreSize(Ty: InstVal->getType()); |
61 | LLVM_DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize) |
62 | << " bytes\n" ); |
63 | |
64 | SizeOffsetValue SizeOffset = ObjSizeEval.compute(V: Ptr); |
65 | |
66 | if (!SizeOffset.bothKnown()) { |
67 | ++ChecksUnable; |
68 | return nullptr; |
69 | } |
70 | |
71 | Value *Size = SizeOffset.Size; |
72 | Value *Offset = SizeOffset.Offset; |
73 | ConstantInt *SizeCI = dyn_cast<ConstantInt>(Val: Size); |
74 | |
75 | Type *IndexTy = DL.getIndexType(PtrTy: Ptr->getType()); |
76 | Value *NeededSizeVal = IRB.CreateTypeSize(DstType: IndexTy, Size: NeededSize); |
77 | |
78 | auto SizeRange = SE.getUnsignedRange(S: SE.getSCEV(V: Size)); |
79 | auto OffsetRange = SE.getUnsignedRange(S: SE.getSCEV(V: Offset)); |
80 | auto NeededSizeRange = SE.getUnsignedRange(S: SE.getSCEV(V: NeededSizeVal)); |
81 | |
82 | // three checks are required to ensure safety: |
83 | // . Offset >= 0 (since the offset is given from the base ptr) |
84 | // . Size >= Offset (unsigned) |
85 | // . Size - Offset >= NeededSize (unsigned) |
86 | // |
87 | // optimization: if Size >= 0 (signed), skip 1st check |
88 | // FIXME: add NSW/NUW here? -- we dont care if the subtraction overflows |
89 | Value *ObjSize = IRB.CreateSub(LHS: Size, RHS: Offset); |
90 | Value *Cmp2 = SizeRange.getUnsignedMin().uge(RHS: OffsetRange.getUnsignedMax()) |
91 | ? ConstantInt::getFalse(Context&: Ptr->getContext()) |
92 | : IRB.CreateICmpULT(LHS: Size, RHS: Offset); |
93 | Value *Cmp3 = SizeRange.sub(Other: OffsetRange) |
94 | .getUnsignedMin() |
95 | .uge(RHS: NeededSizeRange.getUnsignedMax()) |
96 | ? ConstantInt::getFalse(Context&: Ptr->getContext()) |
97 | : IRB.CreateICmpULT(LHS: ObjSize, RHS: NeededSizeVal); |
98 | Value *Or = IRB.CreateOr(LHS: Cmp2, RHS: Cmp3); |
99 | if ((!SizeCI || SizeCI->getValue().slt(RHS: 0)) && |
100 | !SizeRange.getSignedMin().isNonNegative()) { |
101 | Value *Cmp1 = IRB.CreateICmpSLT(LHS: Offset, RHS: ConstantInt::get(Ty: IndexTy, V: 0)); |
102 | Or = IRB.CreateOr(LHS: Cmp1, RHS: Or); |
103 | } |
104 | |
105 | return Or; |
106 | } |
107 | |
108 | /// Adds run-time bounds checks to memory accessing instructions. |
109 | /// |
110 | /// \p Or is the condition that should guard the trap. |
111 | /// |
112 | /// \p GetTrapBB is a callable that returns the trap BB to use on failure. |
113 | template <typename GetTrapBBT> |
114 | static void insertBoundsCheck(Value *Or, BuilderTy &IRB, GetTrapBBT GetTrapBB) { |
115 | // check if the comparison is always false |
116 | ConstantInt *C = dyn_cast_or_null<ConstantInt>(Val: Or); |
117 | if (C) { |
118 | ++ChecksSkipped; |
119 | // If non-zero, nothing to do. |
120 | if (!C->getZExtValue()) |
121 | return; |
122 | } |
123 | ++ChecksAdded; |
124 | |
125 | BasicBlock::iterator SplitI = IRB.GetInsertPoint(); |
126 | BasicBlock *OldBB = SplitI->getParent(); |
127 | BasicBlock *Cont = OldBB->splitBasicBlock(I: SplitI); |
128 | OldBB->getTerminator()->eraseFromParent(); |
129 | |
130 | if (C) { |
131 | // If we have a constant zero, unconditionally branch. |
132 | // FIXME: We should really handle this differently to bypass the splitting |
133 | // the block. |
134 | BranchInst::Create(GetTrapBB(IRB), OldBB); |
135 | return; |
136 | } |
137 | |
138 | // Create the conditional branch. |
139 | BranchInst::Create(GetTrapBB(IRB), Cont, Or, OldBB); |
140 | } |
141 | |
142 | static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI, |
143 | ScalarEvolution &SE) { |
144 | if (F.hasFnAttribute(Kind: Attribute::NoSanitizeBounds)) |
145 | return false; |
146 | |
147 | const DataLayout &DL = F.getDataLayout(); |
148 | ObjectSizeOpts EvalOpts; |
149 | EvalOpts.RoundToAlign = true; |
150 | EvalOpts.EvalMode = ObjectSizeOpts::Mode::ExactUnderlyingSizeAndOffset; |
151 | ObjectSizeOffsetEvaluator ObjSizeEval(DL, &TLI, F.getContext(), EvalOpts); |
152 | |
153 | // check HANDLE_MEMORY_INST in include/llvm/Instruction.def for memory |
154 | // touching instructions |
155 | SmallVector<std::pair<Instruction *, Value *>, 4> TrapInfo; |
156 | for (Instruction &I : instructions(F)) { |
157 | Value *Or = nullptr; |
158 | BuilderTy IRB(I.getParent(), BasicBlock::iterator(&I), TargetFolder(DL)); |
159 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: &I)) { |
160 | if (!LI->isVolatile()) |
161 | Or = getBoundsCheckCond(Ptr: LI->getPointerOperand(), InstVal: LI, DL, TLI, |
162 | ObjSizeEval, IRB, SE); |
163 | } else if (StoreInst *SI = dyn_cast<StoreInst>(Val: &I)) { |
164 | if (!SI->isVolatile()) |
165 | Or = getBoundsCheckCond(Ptr: SI->getPointerOperand(), InstVal: SI->getValueOperand(), |
166 | DL, TLI, ObjSizeEval, IRB, SE); |
167 | } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Val: &I)) { |
168 | if (!AI->isVolatile()) |
169 | Or = |
170 | getBoundsCheckCond(Ptr: AI->getPointerOperand(), InstVal: AI->getCompareOperand(), |
171 | DL, TLI, ObjSizeEval, IRB, SE); |
172 | } else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Val: &I)) { |
173 | if (!AI->isVolatile()) |
174 | Or = getBoundsCheckCond(Ptr: AI->getPointerOperand(), InstVal: AI->getValOperand(), |
175 | DL, TLI, ObjSizeEval, IRB, SE); |
176 | } |
177 | if (Or) |
178 | TrapInfo.push_back(Elt: std::make_pair(x: &I, y&: Or)); |
179 | } |
180 | |
181 | // Create a trapping basic block on demand using a callback. Depending on |
182 | // flags, this will either create a single block for the entire function or |
183 | // will create a fresh block every time it is called. |
184 | BasicBlock *TrapBB = nullptr; |
185 | auto GetTrapBB = [&TrapBB](BuilderTy &IRB) { |
186 | Function *Fn = IRB.GetInsertBlock()->getParent(); |
187 | auto DebugLoc = IRB.getCurrentDebugLocation(); |
188 | IRBuilder<>::InsertPointGuard Guard(IRB); |
189 | |
190 | if (TrapBB && SingleTrapBB && !DebugTrapBB) |
191 | return TrapBB; |
192 | |
193 | TrapBB = BasicBlock::Create(Context&: Fn->getContext(), Name: "trap" , Parent: Fn); |
194 | IRB.SetInsertPoint(TrapBB); |
195 | |
196 | Intrinsic::ID IntrID = DebugTrapBB ? Intrinsic::ubsantrap : Intrinsic::trap; |
197 | auto *F = Intrinsic::getDeclaration(M: Fn->getParent(), id: IntrID); |
198 | |
199 | CallInst *TrapCall; |
200 | if (DebugTrapBB) { |
201 | TrapCall = |
202 | IRB.CreateCall(Callee: F, Args: ConstantInt::get(Ty: IRB.getInt8Ty(), V: Fn->size())); |
203 | } else { |
204 | TrapCall = IRB.CreateCall(Callee: F, Args: {}); |
205 | } |
206 | |
207 | TrapCall->setDoesNotReturn(); |
208 | TrapCall->setDoesNotThrow(); |
209 | TrapCall->setDebugLoc(DebugLoc); |
210 | IRB.CreateUnreachable(); |
211 | |
212 | return TrapBB; |
213 | }; |
214 | |
215 | // Add the checks. |
216 | for (const auto &Entry : TrapInfo) { |
217 | Instruction *Inst = Entry.first; |
218 | BuilderTy IRB(Inst->getParent(), BasicBlock::iterator(Inst), TargetFolder(DL)); |
219 | insertBoundsCheck(Or: Entry.second, IRB, GetTrapBB); |
220 | } |
221 | |
222 | return !TrapInfo.empty(); |
223 | } |
224 | |
225 | PreservedAnalyses BoundsCheckingPass::run(Function &F, FunctionAnalysisManager &AM) { |
226 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F); |
227 | auto &SE = AM.getResult<ScalarEvolutionAnalysis>(IR&: F); |
228 | |
229 | if (!addBoundsChecking(F, TLI, SE)) |
230 | return PreservedAnalyses::all(); |
231 | |
232 | return PreservedAnalyses::none(); |
233 | } |
234 | |