1 | //===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass statically checks for common and easily-identified constructs |
10 | // which produce undefined or likely unintended behavior in LLVM IR. |
11 | // |
12 | // It is not a guarantee of correctness, in two ways. First, it isn't |
13 | // comprehensive. There are checks which could be done statically which are |
14 | // not yet implemented. Some of these are indicated by TODO comments, but |
15 | // those aren't comprehensive either. Second, many conditions cannot be |
16 | // checked statically. This pass does no dynamic instrumentation, so it |
17 | // can't check for all possible problems. |
18 | // |
19 | // Another limitation is that it assumes all code will be executed. A store |
20 | // through a null pointer in a basic block which is never reached is harmless, |
21 | // but this pass will warn about it anyway. This is the main reason why most |
22 | // of these checks live here instead of in the Verifier pass. |
23 | // |
24 | // Optimization passes may make conditions that this pass checks for more or |
25 | // less obvious. If an optimization pass appears to be introducing a warning, |
26 | // it may be that the optimization pass is merely exposing an existing |
27 | // condition in the code. |
28 | // |
29 | // This code may be run before instcombine. In many cases, instcombine checks |
30 | // for the same kinds of things and turns instructions with undefined behavior |
31 | // into unreachable (or equivalent). Because of this, this pass makes some |
32 | // effort to look through bitcasts and so on. |
33 | // |
34 | //===----------------------------------------------------------------------===// |
35 | |
36 | #include "llvm/Analysis/Lint.h" |
37 | #include "llvm/ADT/APInt.h" |
38 | #include "llvm/ADT/ArrayRef.h" |
39 | #include "llvm/ADT/SmallPtrSet.h" |
40 | #include "llvm/ADT/Twine.h" |
41 | #include "llvm/Analysis/AliasAnalysis.h" |
42 | #include "llvm/Analysis/AssumptionCache.h" |
43 | #include "llvm/Analysis/BasicAliasAnalysis.h" |
44 | #include "llvm/Analysis/ConstantFolding.h" |
45 | #include "llvm/Analysis/InstructionSimplify.h" |
46 | #include "llvm/Analysis/Loads.h" |
47 | #include "llvm/Analysis/MemoryLocation.h" |
48 | #include "llvm/Analysis/ScopedNoAliasAA.h" |
49 | #include "llvm/Analysis/TargetLibraryInfo.h" |
50 | #include "llvm/Analysis/TypeBasedAliasAnalysis.h" |
51 | #include "llvm/Analysis/ValueTracking.h" |
52 | #include "llvm/IR/Argument.h" |
53 | #include "llvm/IR/BasicBlock.h" |
54 | #include "llvm/IR/Constant.h" |
55 | #include "llvm/IR/Constants.h" |
56 | #include "llvm/IR/DataLayout.h" |
57 | #include "llvm/IR/DerivedTypes.h" |
58 | #include "llvm/IR/Dominators.h" |
59 | #include "llvm/IR/Function.h" |
60 | #include "llvm/IR/GlobalVariable.h" |
61 | #include "llvm/IR/InstVisitor.h" |
62 | #include "llvm/IR/InstrTypes.h" |
63 | #include "llvm/IR/Instruction.h" |
64 | #include "llvm/IR/Instructions.h" |
65 | #include "llvm/IR/IntrinsicInst.h" |
66 | #include "llvm/IR/Module.h" |
67 | #include "llvm/IR/PassManager.h" |
68 | #include "llvm/IR/Type.h" |
69 | #include "llvm/IR/Value.h" |
70 | #include "llvm/Support/Casting.h" |
71 | #include "llvm/Support/KnownBits.h" |
72 | #include "llvm/Support/raw_ostream.h" |
73 | #include <cassert> |
74 | #include <cstdint> |
75 | #include <iterator> |
76 | #include <string> |
77 | |
78 | using namespace llvm; |
79 | |
80 | static const char LintAbortOnErrorArgName[] = "lint-abort-on-error" ; |
81 | static cl::opt<bool> |
82 | LintAbortOnError(LintAbortOnErrorArgName, cl::init(Val: false), |
83 | cl::desc("In the Lint pass, abort on errors." )); |
84 | |
85 | namespace { |
86 | namespace MemRef { |
87 | static const unsigned Read = 1; |
88 | static const unsigned Write = 2; |
89 | static const unsigned Callee = 4; |
90 | static const unsigned Branchee = 8; |
91 | } // end namespace MemRef |
92 | |
93 | class Lint : public InstVisitor<Lint> { |
94 | friend class InstVisitor<Lint>; |
95 | |
96 | void visitFunction(Function &F); |
97 | |
98 | void visitCallBase(CallBase &CB); |
99 | void visitMemoryReference(Instruction &I, const MemoryLocation &Loc, |
100 | MaybeAlign Alignment, Type *Ty, unsigned Flags); |
101 | |
102 | void visitReturnInst(ReturnInst &I); |
103 | void visitLoadInst(LoadInst &I); |
104 | void visitStoreInst(StoreInst &I); |
105 | void visitXor(BinaryOperator &I); |
106 | void visitSub(BinaryOperator &I); |
107 | void visitLShr(BinaryOperator &I); |
108 | void visitAShr(BinaryOperator &I); |
109 | void visitShl(BinaryOperator &I); |
110 | void visitSDiv(BinaryOperator &I); |
111 | void visitUDiv(BinaryOperator &I); |
112 | void visitSRem(BinaryOperator &I); |
113 | void visitURem(BinaryOperator &I); |
114 | void visitAllocaInst(AllocaInst &I); |
115 | void visitVAArgInst(VAArgInst &I); |
116 | void visitIndirectBrInst(IndirectBrInst &I); |
117 | void visitExtractElementInst(ExtractElementInst &I); |
118 | void visitInsertElementInst(InsertElementInst &I); |
119 | void visitUnreachableInst(UnreachableInst &I); |
120 | |
121 | Value *findValue(Value *V, bool OffsetOk) const; |
122 | Value *findValueImpl(Value *V, bool OffsetOk, |
123 | SmallPtrSetImpl<Value *> &Visited) const; |
124 | |
125 | public: |
126 | Module *Mod; |
127 | const DataLayout *DL; |
128 | AliasAnalysis *AA; |
129 | AssumptionCache *AC; |
130 | DominatorTree *DT; |
131 | TargetLibraryInfo *TLI; |
132 | |
133 | std::string Messages; |
134 | raw_string_ostream MessagesStr; |
135 | |
136 | Lint(Module *Mod, const DataLayout *DL, AliasAnalysis *AA, |
137 | AssumptionCache *AC, DominatorTree *DT, TargetLibraryInfo *TLI) |
138 | : Mod(Mod), DL(DL), AA(AA), AC(AC), DT(DT), TLI(TLI), |
139 | MessagesStr(Messages) {} |
140 | |
141 | void WriteValues(ArrayRef<const Value *> Vs) { |
142 | for (const Value *V : Vs) { |
143 | if (!V) |
144 | continue; |
145 | if (isa<Instruction>(Val: V)) { |
146 | MessagesStr << *V << '\n'; |
147 | } else { |
148 | V->printAsOperand(O&: MessagesStr, PrintType: true, M: Mod); |
149 | MessagesStr << '\n'; |
150 | } |
151 | } |
152 | } |
153 | |
154 | /// A check failed, so printout out the condition and the message. |
155 | /// |
156 | /// This provides a nice place to put a breakpoint if you want to see why |
157 | /// something is not correct. |
158 | void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; } |
159 | |
160 | /// A check failed (with values to print). |
161 | /// |
162 | /// This calls the Message-only version so that the above is easier to set |
163 | /// a breakpoint on. |
164 | template <typename T1, typename... Ts> |
165 | void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) { |
166 | CheckFailed(Message); |
167 | WriteValues(Vs: {V1, Vs...}); |
168 | } |
169 | }; |
170 | } // end anonymous namespace |
171 | |
172 | // Check - We know that cond should be true, if not print an error message. |
173 | #define Check(C, ...) \ |
174 | do { \ |
175 | if (!(C)) { \ |
176 | CheckFailed(__VA_ARGS__); \ |
177 | return; \ |
178 | } \ |
179 | } while (false) |
180 | |
181 | void Lint::visitFunction(Function &F) { |
182 | // This isn't undefined behavior, it's just a little unusual, and it's a |
183 | // fairly common mistake to neglect to name a function. |
184 | Check(F.hasName() || F.hasLocalLinkage(), |
185 | "Unusual: Unnamed function with non-local linkage" , &F); |
186 | |
187 | // TODO: Check for irreducible control flow. |
188 | } |
189 | |
190 | void Lint::visitCallBase(CallBase &I) { |
191 | Value *Callee = I.getCalledOperand(); |
192 | |
193 | visitMemoryReference(I, Loc: MemoryLocation::getAfter(Ptr: Callee), Alignment: std::nullopt, |
194 | Ty: nullptr, Flags: MemRef::Callee); |
195 | |
196 | if (Function *F = dyn_cast<Function>(Val: findValue(V: Callee, |
197 | /*OffsetOk=*/false))) { |
198 | Check(I.getCallingConv() == F->getCallingConv(), |
199 | "Undefined behavior: Caller and callee calling convention differ" , |
200 | &I); |
201 | |
202 | FunctionType *FT = F->getFunctionType(); |
203 | unsigned NumActualArgs = I.arg_size(); |
204 | |
205 | Check(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs |
206 | : FT->getNumParams() == NumActualArgs, |
207 | "Undefined behavior: Call argument count mismatches callee " |
208 | "argument count" , |
209 | &I); |
210 | |
211 | Check(FT->getReturnType() == I.getType(), |
212 | "Undefined behavior: Call return type mismatches " |
213 | "callee return type" , |
214 | &I); |
215 | |
216 | // Check argument types (in case the callee was casted) and attributes. |
217 | // TODO: Verify that caller and callee attributes are compatible. |
218 | Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end(); |
219 | auto AI = I.arg_begin(), AE = I.arg_end(); |
220 | for (; AI != AE; ++AI) { |
221 | Value *Actual = *AI; |
222 | if (PI != PE) { |
223 | Argument *Formal = &*PI++; |
224 | Check(Formal->getType() == Actual->getType(), |
225 | "Undefined behavior: Call argument type mismatches " |
226 | "callee parameter type" , |
227 | &I); |
228 | |
229 | // Check that noalias arguments don't alias other arguments. This is |
230 | // not fully precise because we don't know the sizes of the dereferenced |
231 | // memory regions. |
232 | if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy()) { |
233 | AttributeList PAL = I.getAttributes(); |
234 | unsigned ArgNo = 0; |
235 | for (auto *BI = I.arg_begin(); BI != AE; ++BI, ++ArgNo) { |
236 | // Skip ByVal arguments since they will be memcpy'd to the callee's |
237 | // stack so we're not really passing the pointer anyway. |
238 | if (PAL.hasParamAttr(ArgNo, Kind: Attribute::ByVal)) |
239 | continue; |
240 | // If both arguments are readonly, they have no dependence. |
241 | if (Formal->onlyReadsMemory() && I.onlyReadsMemory(OpNo: ArgNo)) |
242 | continue; |
243 | // Skip readnone arguments since those are guaranteed not to be |
244 | // dereferenced anyway. |
245 | if (I.doesNotAccessMemory(OpNo: ArgNo)) |
246 | continue; |
247 | if (AI != BI && (*BI)->getType()->isPointerTy()) { |
248 | AliasResult Result = AA->alias(V1: *AI, V2: *BI); |
249 | Check(Result != AliasResult::MustAlias && |
250 | Result != AliasResult::PartialAlias, |
251 | "Unusual: noalias argument aliases another argument" , &I); |
252 | } |
253 | } |
254 | } |
255 | |
256 | // Check that an sret argument points to valid memory. |
257 | if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) { |
258 | Type *Ty = Formal->getParamStructRetType(); |
259 | MemoryLocation Loc( |
260 | Actual, LocationSize::precise(Value: DL->getTypeStoreSize(Ty))); |
261 | visitMemoryReference(I, Loc, Alignment: DL->getABITypeAlign(Ty), Ty, |
262 | Flags: MemRef::Read | MemRef::Write); |
263 | } |
264 | } |
265 | } |
266 | } |
267 | |
268 | if (const auto *CI = dyn_cast<CallInst>(Val: &I)) { |
269 | if (CI->isTailCall()) { |
270 | const AttributeList &PAL = CI->getAttributes(); |
271 | unsigned ArgNo = 0; |
272 | for (Value *Arg : I.args()) { |
273 | // Skip ByVal arguments since they will be memcpy'd to the callee's |
274 | // stack anyway. |
275 | if (PAL.hasParamAttr(ArgNo: ArgNo++, Kind: Attribute::ByVal)) |
276 | continue; |
277 | Value *Obj = findValue(V: Arg, /*OffsetOk=*/true); |
278 | Check(!isa<AllocaInst>(Obj), |
279 | "Undefined behavior: Call with \"tail\" keyword references " |
280 | "alloca" , |
281 | &I); |
282 | } |
283 | } |
284 | } |
285 | |
286 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: &I)) |
287 | switch (II->getIntrinsicID()) { |
288 | default: |
289 | break; |
290 | |
291 | // TODO: Check more intrinsics |
292 | |
293 | case Intrinsic::memcpy: |
294 | case Intrinsic::memcpy_inline: { |
295 | MemCpyInst *MCI = cast<MemCpyInst>(Val: &I); |
296 | visitMemoryReference(I, Loc: MemoryLocation::getForDest(MI: MCI), |
297 | Alignment: MCI->getDestAlign(), Ty: nullptr, Flags: MemRef::Write); |
298 | visitMemoryReference(I, Loc: MemoryLocation::getForSource(MTI: MCI), |
299 | Alignment: MCI->getSourceAlign(), Ty: nullptr, Flags: MemRef::Read); |
300 | |
301 | // Check that the memcpy arguments don't overlap. The AliasAnalysis API |
302 | // isn't expressive enough for what we really want to do. Known partial |
303 | // overlap is not distinguished from the case where nothing is known. |
304 | auto Size = LocationSize::afterPointer(); |
305 | if (const ConstantInt *Len = |
306 | dyn_cast<ConstantInt>(Val: findValue(V: MCI->getLength(), |
307 | /*OffsetOk=*/false))) |
308 | if (Len->getValue().isIntN(N: 32)) |
309 | Size = LocationSize::precise(Value: Len->getValue().getZExtValue()); |
310 | Check(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) != |
311 | AliasResult::MustAlias, |
312 | "Undefined behavior: memcpy source and destination overlap" , &I); |
313 | break; |
314 | } |
315 | case Intrinsic::memmove: { |
316 | MemMoveInst *MMI = cast<MemMoveInst>(Val: &I); |
317 | visitMemoryReference(I, Loc: MemoryLocation::getForDest(MI: MMI), |
318 | Alignment: MMI->getDestAlign(), Ty: nullptr, Flags: MemRef::Write); |
319 | visitMemoryReference(I, Loc: MemoryLocation::getForSource(MTI: MMI), |
320 | Alignment: MMI->getSourceAlign(), Ty: nullptr, Flags: MemRef::Read); |
321 | break; |
322 | } |
323 | case Intrinsic::memset: { |
324 | MemSetInst *MSI = cast<MemSetInst>(Val: &I); |
325 | visitMemoryReference(I, Loc: MemoryLocation::getForDest(MI: MSI), |
326 | Alignment: MSI->getDestAlign(), Ty: nullptr, Flags: MemRef::Write); |
327 | break; |
328 | } |
329 | case Intrinsic::memset_inline: { |
330 | MemSetInlineInst *MSII = cast<MemSetInlineInst>(Val: &I); |
331 | visitMemoryReference(I, Loc: MemoryLocation::getForDest(MI: MSII), |
332 | Alignment: MSII->getDestAlign(), Ty: nullptr, Flags: MemRef::Write); |
333 | break; |
334 | } |
335 | |
336 | case Intrinsic::vastart: |
337 | // vastart in non-varargs function is rejected by the verifier |
338 | visitMemoryReference(I, Loc: MemoryLocation::getForArgument(Call: &I, ArgIdx: 0, TLI), |
339 | Alignment: std::nullopt, Ty: nullptr, Flags: MemRef::Read | MemRef::Write); |
340 | break; |
341 | case Intrinsic::vacopy: |
342 | visitMemoryReference(I, Loc: MemoryLocation::getForArgument(Call: &I, ArgIdx: 0, TLI), |
343 | Alignment: std::nullopt, Ty: nullptr, Flags: MemRef::Write); |
344 | visitMemoryReference(I, Loc: MemoryLocation::getForArgument(Call: &I, ArgIdx: 1, TLI), |
345 | Alignment: std::nullopt, Ty: nullptr, Flags: MemRef::Read); |
346 | break; |
347 | case Intrinsic::vaend: |
348 | visitMemoryReference(I, Loc: MemoryLocation::getForArgument(Call: &I, ArgIdx: 0, TLI), |
349 | Alignment: std::nullopt, Ty: nullptr, Flags: MemRef::Read | MemRef::Write); |
350 | break; |
351 | |
352 | case Intrinsic::stackrestore: |
353 | // Stackrestore doesn't read or write memory, but it sets the |
354 | // stack pointer, which the compiler may read from or write to |
355 | // at any time, so check it for both readability and writeability. |
356 | visitMemoryReference(I, Loc: MemoryLocation::getForArgument(Call: &I, ArgIdx: 0, TLI), |
357 | Alignment: std::nullopt, Ty: nullptr, Flags: MemRef::Read | MemRef::Write); |
358 | break; |
359 | case Intrinsic::get_active_lane_mask: |
360 | if (auto *TripCount = dyn_cast<ConstantInt>(Val: I.getArgOperand(i: 1))) |
361 | Check(!TripCount->isZero(), |
362 | "get_active_lane_mask: operand #2 " |
363 | "must be greater than 0" , |
364 | &I); |
365 | break; |
366 | } |
367 | } |
368 | |
369 | void Lint::visitReturnInst(ReturnInst &I) { |
370 | Function *F = I.getParent()->getParent(); |
371 | Check(!F->doesNotReturn(), |
372 | "Unusual: Return statement in function with noreturn attribute" , &I); |
373 | |
374 | if (Value *V = I.getReturnValue()) { |
375 | Value *Obj = findValue(V, /*OffsetOk=*/true); |
376 | Check(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value" , &I); |
377 | } |
378 | } |
379 | |
380 | // TODO: Check that the reference is in bounds. |
381 | // TODO: Check readnone/readonly function attributes. |
382 | void Lint::visitMemoryReference(Instruction &I, const MemoryLocation &Loc, |
383 | MaybeAlign Align, Type *Ty, unsigned Flags) { |
384 | // If no memory is being referenced, it doesn't matter if the pointer |
385 | // is valid. |
386 | if (Loc.Size.isZero()) |
387 | return; |
388 | |
389 | Value *Ptr = const_cast<Value *>(Loc.Ptr); |
390 | Value *UnderlyingObject = findValue(V: Ptr, /*OffsetOk=*/true); |
391 | Check(!isa<ConstantPointerNull>(UnderlyingObject), |
392 | "Undefined behavior: Null pointer dereference" , &I); |
393 | Check(!isa<UndefValue>(UnderlyingObject), |
394 | "Undefined behavior: Undef pointer dereference" , &I); |
395 | Check(!isa<ConstantInt>(UnderlyingObject) || |
396 | !cast<ConstantInt>(UnderlyingObject)->isMinusOne(), |
397 | "Unusual: All-ones pointer dereference" , &I); |
398 | Check(!isa<ConstantInt>(UnderlyingObject) || |
399 | !cast<ConstantInt>(UnderlyingObject)->isOne(), |
400 | "Unusual: Address one pointer dereference" , &I); |
401 | |
402 | if (Flags & MemRef::Write) { |
403 | if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: UnderlyingObject)) |
404 | Check(!GV->isConstant(), "Undefined behavior: Write to read-only memory" , |
405 | &I); |
406 | Check(!isa<Function>(UnderlyingObject) && |
407 | !isa<BlockAddress>(UnderlyingObject), |
408 | "Undefined behavior: Write to text section" , &I); |
409 | } |
410 | if (Flags & MemRef::Read) { |
411 | Check(!isa<Function>(UnderlyingObject), "Unusual: Load from function body" , |
412 | &I); |
413 | Check(!isa<BlockAddress>(UnderlyingObject), |
414 | "Undefined behavior: Load from block address" , &I); |
415 | } |
416 | if (Flags & MemRef::Callee) { |
417 | Check(!isa<BlockAddress>(UnderlyingObject), |
418 | "Undefined behavior: Call to block address" , &I); |
419 | } |
420 | if (Flags & MemRef::Branchee) { |
421 | Check(!isa<Constant>(UnderlyingObject) || |
422 | isa<BlockAddress>(UnderlyingObject), |
423 | "Undefined behavior: Branch to non-blockaddress" , &I); |
424 | } |
425 | |
426 | // Check for buffer overflows and misalignment. |
427 | // Only handles memory references that read/write something simple like an |
428 | // alloca instruction or a global variable. |
429 | int64_t Offset = 0; |
430 | if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL: *DL)) { |
431 | // OK, so the access is to a constant offset from Ptr. Check that Ptr is |
432 | // something we can handle and if so extract the size of this base object |
433 | // along with its alignment. |
434 | uint64_t BaseSize = MemoryLocation::UnknownSize; |
435 | MaybeAlign BaseAlign; |
436 | |
437 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: Base)) { |
438 | Type *ATy = AI->getAllocatedType(); |
439 | if (!AI->isArrayAllocation() && ATy->isSized()) |
440 | BaseSize = DL->getTypeAllocSize(Ty: ATy); |
441 | BaseAlign = AI->getAlign(); |
442 | } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: Base)) { |
443 | // If the global may be defined differently in another compilation unit |
444 | // then don't warn about funky memory accesses. |
445 | if (GV->hasDefinitiveInitializer()) { |
446 | Type *GTy = GV->getValueType(); |
447 | if (GTy->isSized()) |
448 | BaseSize = DL->getTypeAllocSize(Ty: GTy); |
449 | BaseAlign = GV->getAlign(); |
450 | if (!BaseAlign && GTy->isSized()) |
451 | BaseAlign = DL->getABITypeAlign(Ty: GTy); |
452 | } |
453 | } |
454 | |
455 | // Accesses from before the start or after the end of the object are not |
456 | // defined. |
457 | Check(!Loc.Size.hasValue() || BaseSize == MemoryLocation::UnknownSize || |
458 | (Offset >= 0 && Offset + Loc.Size.getValue() <= BaseSize), |
459 | "Undefined behavior: Buffer overflow" , &I); |
460 | |
461 | // Accesses that say that the memory is more aligned than it is are not |
462 | // defined. |
463 | if (!Align && Ty && Ty->isSized()) |
464 | Align = DL->getABITypeAlign(Ty); |
465 | if (BaseAlign && Align) |
466 | Check(*Align <= commonAlignment(*BaseAlign, Offset), |
467 | "Undefined behavior: Memory reference address is misaligned" , &I); |
468 | } |
469 | } |
470 | |
471 | void Lint::visitLoadInst(LoadInst &I) { |
472 | visitMemoryReference(I, Loc: MemoryLocation::get(LI: &I), Align: I.getAlign(), Ty: I.getType(), |
473 | Flags: MemRef::Read); |
474 | } |
475 | |
476 | void Lint::visitStoreInst(StoreInst &I) { |
477 | visitMemoryReference(I, Loc: MemoryLocation::get(SI: &I), Align: I.getAlign(), |
478 | Ty: I.getOperand(i_nocapture: 0)->getType(), Flags: MemRef::Write); |
479 | } |
480 | |
481 | void Lint::visitXor(BinaryOperator &I) { |
482 | Check(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)), |
483 | "Undefined result: xor(undef, undef)" , &I); |
484 | } |
485 | |
486 | void Lint::visitSub(BinaryOperator &I) { |
487 | Check(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)), |
488 | "Undefined result: sub(undef, undef)" , &I); |
489 | } |
490 | |
491 | void Lint::visitLShr(BinaryOperator &I) { |
492 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: findValue(V: I.getOperand(i_nocapture: 1), |
493 | /*OffsetOk=*/false))) |
494 | Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()), |
495 | "Undefined result: Shift count out of range" , &I); |
496 | } |
497 | |
498 | void Lint::visitAShr(BinaryOperator &I) { |
499 | if (ConstantInt *CI = |
500 | dyn_cast<ConstantInt>(Val: findValue(V: I.getOperand(i_nocapture: 1), /*OffsetOk=*/false))) |
501 | Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()), |
502 | "Undefined result: Shift count out of range" , &I); |
503 | } |
504 | |
505 | void Lint::visitShl(BinaryOperator &I) { |
506 | if (ConstantInt *CI = |
507 | dyn_cast<ConstantInt>(Val: findValue(V: I.getOperand(i_nocapture: 1), /*OffsetOk=*/false))) |
508 | Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()), |
509 | "Undefined result: Shift count out of range" , &I); |
510 | } |
511 | |
512 | static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, |
513 | AssumptionCache *AC) { |
514 | // Assume undef could be zero. |
515 | if (isa<UndefValue>(Val: V)) |
516 | return true; |
517 | |
518 | VectorType *VecTy = dyn_cast<VectorType>(Val: V->getType()); |
519 | if (!VecTy) { |
520 | KnownBits Known = |
521 | computeKnownBits(V, DL, Depth: 0, AC, CxtI: dyn_cast<Instruction>(Val: V), DT); |
522 | return Known.isZero(); |
523 | } |
524 | |
525 | // Per-component check doesn't work with zeroinitializer |
526 | Constant *C = dyn_cast<Constant>(Val: V); |
527 | if (!C) |
528 | return false; |
529 | |
530 | if (C->isZeroValue()) |
531 | return true; |
532 | |
533 | // For a vector, KnownZero will only be true if all values are zero, so check |
534 | // this per component |
535 | for (unsigned I = 0, N = cast<FixedVectorType>(Val: VecTy)->getNumElements(); |
536 | I != N; ++I) { |
537 | Constant *Elem = C->getAggregateElement(Elt: I); |
538 | if (isa<UndefValue>(Val: Elem)) |
539 | return true; |
540 | |
541 | KnownBits Known = computeKnownBits(V: Elem, DL); |
542 | if (Known.isZero()) |
543 | return true; |
544 | } |
545 | |
546 | return false; |
547 | } |
548 | |
549 | void Lint::visitSDiv(BinaryOperator &I) { |
550 | Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC), |
551 | "Undefined behavior: Division by zero" , &I); |
552 | } |
553 | |
554 | void Lint::visitUDiv(BinaryOperator &I) { |
555 | Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC), |
556 | "Undefined behavior: Division by zero" , &I); |
557 | } |
558 | |
559 | void Lint::visitSRem(BinaryOperator &I) { |
560 | Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC), |
561 | "Undefined behavior: Division by zero" , &I); |
562 | } |
563 | |
564 | void Lint::visitURem(BinaryOperator &I) { |
565 | Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC), |
566 | "Undefined behavior: Division by zero" , &I); |
567 | } |
568 | |
569 | void Lint::visitAllocaInst(AllocaInst &I) { |
570 | if (isa<ConstantInt>(Val: I.getArraySize())) |
571 | // This isn't undefined behavior, it's just an obvious pessimization. |
572 | Check(&I.getParent()->getParent()->getEntryBlock() == I.getParent(), |
573 | "Pessimization: Static alloca outside of entry block" , &I); |
574 | |
575 | // TODO: Check for an unusual size (MSB set?) |
576 | } |
577 | |
578 | void Lint::visitVAArgInst(VAArgInst &I) { |
579 | visitMemoryReference(I, Loc: MemoryLocation::get(VI: &I), Align: std::nullopt, Ty: nullptr, |
580 | Flags: MemRef::Read | MemRef::Write); |
581 | } |
582 | |
583 | void Lint::visitIndirectBrInst(IndirectBrInst &I) { |
584 | visitMemoryReference(I, Loc: MemoryLocation::getAfter(Ptr: I.getAddress()), |
585 | Align: std::nullopt, Ty: nullptr, Flags: MemRef::Branchee); |
586 | |
587 | Check(I.getNumDestinations() != 0, |
588 | "Undefined behavior: indirectbr with no destinations" , &I); |
589 | } |
590 | |
591 | void Lint::(ExtractElementInst &I) { |
592 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: findValue(V: I.getIndexOperand(), |
593 | /*OffsetOk=*/false))) |
594 | Check( |
595 | CI->getValue().ult( |
596 | cast<FixedVectorType>(I.getVectorOperandType())->getNumElements()), |
597 | "Undefined result: extractelement index out of range" , &I); |
598 | } |
599 | |
600 | void Lint::visitInsertElementInst(InsertElementInst &I) { |
601 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: findValue(V: I.getOperand(i_nocapture: 2), |
602 | /*OffsetOk=*/false))) |
603 | Check(CI->getValue().ult( |
604 | cast<FixedVectorType>(I.getType())->getNumElements()), |
605 | "Undefined result: insertelement index out of range" , &I); |
606 | } |
607 | |
608 | void Lint::visitUnreachableInst(UnreachableInst &I) { |
609 | // This isn't undefined behavior, it's merely suspicious. |
610 | Check(&I == &I.getParent()->front() || |
611 | std::prev(I.getIterator())->mayHaveSideEffects(), |
612 | "Unusual: unreachable immediately preceded by instruction without " |
613 | "side effects" , |
614 | &I); |
615 | } |
616 | |
617 | /// findValue - Look through bitcasts and simple memory reference patterns |
618 | /// to identify an equivalent, but more informative, value. If OffsetOk |
619 | /// is true, look through getelementptrs with non-zero offsets too. |
620 | /// |
621 | /// Most analysis passes don't require this logic, because instcombine |
622 | /// will simplify most of these kinds of things away. But it's a goal of |
623 | /// this Lint pass to be useful even on non-optimized IR. |
624 | Value *Lint::findValue(Value *V, bool OffsetOk) const { |
625 | SmallPtrSet<Value *, 4> Visited; |
626 | return findValueImpl(V, OffsetOk, Visited); |
627 | } |
628 | |
629 | /// findValueImpl - Implementation helper for findValue. |
630 | Value *Lint::findValueImpl(Value *V, bool OffsetOk, |
631 | SmallPtrSetImpl<Value *> &Visited) const { |
632 | // Detect self-referential values. |
633 | if (!Visited.insert(Ptr: V).second) |
634 | return PoisonValue::get(T: V->getType()); |
635 | |
636 | // TODO: Look through sext or zext cast, when the result is known to |
637 | // be interpreted as signed or unsigned, respectively. |
638 | // TODO: Look through eliminable cast pairs. |
639 | // TODO: Look through calls with unique return values. |
640 | // TODO: Look through vector insert/extract/shuffle. |
641 | V = OffsetOk ? getUnderlyingObject(V) : V->stripPointerCasts(); |
642 | if (LoadInst *L = dyn_cast<LoadInst>(Val: V)) { |
643 | BasicBlock::iterator BBI = L->getIterator(); |
644 | BasicBlock *BB = L->getParent(); |
645 | SmallPtrSet<BasicBlock *, 4> VisitedBlocks; |
646 | BatchAAResults BatchAA(*AA); |
647 | for (;;) { |
648 | if (!VisitedBlocks.insert(Ptr: BB).second) |
649 | break; |
650 | if (Value *U = |
651 | FindAvailableLoadedValue(Load: L, ScanBB: BB, ScanFrom&: BBI, MaxInstsToScan: DefMaxInstsToScan, AA: &BatchAA)) |
652 | return findValueImpl(V: U, OffsetOk, Visited); |
653 | if (BBI != BB->begin()) |
654 | break; |
655 | BB = BB->getUniquePredecessor(); |
656 | if (!BB) |
657 | break; |
658 | BBI = BB->end(); |
659 | } |
660 | } else if (PHINode *PN = dyn_cast<PHINode>(Val: V)) { |
661 | if (Value *W = PN->hasConstantValue()) |
662 | return findValueImpl(V: W, OffsetOk, Visited); |
663 | } else if (CastInst *CI = dyn_cast<CastInst>(Val: V)) { |
664 | if (CI->isNoopCast(DL: *DL)) |
665 | return findValueImpl(V: CI->getOperand(i_nocapture: 0), OffsetOk, Visited); |
666 | } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(Val: V)) { |
667 | if (Value *W = |
668 | FindInsertedValue(V: Ex->getAggregateOperand(), idx_range: Ex->getIndices())) |
669 | if (W != V) |
670 | return findValueImpl(V: W, OffsetOk, Visited); |
671 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: V)) { |
672 | // Same as above, but for ConstantExpr instead of Instruction. |
673 | if (Instruction::isCast(Opcode: CE->getOpcode())) { |
674 | if (CastInst::isNoopCast(Opcode: Instruction::CastOps(CE->getOpcode()), |
675 | SrcTy: CE->getOperand(i_nocapture: 0)->getType(), DstTy: CE->getType(), |
676 | DL: *DL)) |
677 | return findValueImpl(V: CE->getOperand(i_nocapture: 0), OffsetOk, Visited); |
678 | } |
679 | } |
680 | |
681 | // As a last resort, try SimplifyInstruction or constant folding. |
682 | if (Instruction *Inst = dyn_cast<Instruction>(Val: V)) { |
683 | if (Value *W = simplifyInstruction(I: Inst, Q: {*DL, TLI, DT, AC})) |
684 | return findValueImpl(V: W, OffsetOk, Visited); |
685 | } else if (auto *C = dyn_cast<Constant>(Val: V)) { |
686 | Value *W = ConstantFoldConstant(C, DL: *DL, TLI); |
687 | if (W != V) |
688 | return findValueImpl(V: W, OffsetOk, Visited); |
689 | } |
690 | |
691 | return V; |
692 | } |
693 | |
694 | PreservedAnalyses LintPass::run(Function &F, FunctionAnalysisManager &AM) { |
695 | auto *Mod = F.getParent(); |
696 | auto *DL = &F.getDataLayout(); |
697 | auto *AA = &AM.getResult<AAManager>(IR&: F); |
698 | auto *AC = &AM.getResult<AssumptionAnalysis>(IR&: F); |
699 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(IR&: F); |
700 | auto *TLI = &AM.getResult<TargetLibraryAnalysis>(IR&: F); |
701 | Lint L(Mod, DL, AA, AC, DT, TLI); |
702 | L.visit(F); |
703 | dbgs() << L.MessagesStr.str(); |
704 | if (LintAbortOnError && !L.MessagesStr.str().empty()) |
705 | report_fatal_error(reason: Twine("Linter found errors, aborting. (enabled by --" ) + |
706 | LintAbortOnErrorArgName + ")" , |
707 | gen_crash_diag: false); |
708 | return PreservedAnalyses::all(); |
709 | } |
710 | |
711 | //===----------------------------------------------------------------------===// |
712 | // Implement the public interfaces to this file... |
713 | //===----------------------------------------------------------------------===// |
714 | |
715 | /// lintFunction - Check a function for errors, printing messages on stderr. |
716 | /// |
717 | void llvm::lintFunction(const Function &f) { |
718 | Function &F = const_cast<Function &>(f); |
719 | assert(!F.isDeclaration() && "Cannot lint external functions" ); |
720 | |
721 | FunctionAnalysisManager FAM; |
722 | FAM.registerPass(PassBuilder: [&] { return TargetLibraryAnalysis(); }); |
723 | FAM.registerPass(PassBuilder: [&] { return DominatorTreeAnalysis(); }); |
724 | FAM.registerPass(PassBuilder: [&] { return AssumptionAnalysis(); }); |
725 | FAM.registerPass(PassBuilder: [&] { |
726 | AAManager AA; |
727 | AA.registerFunctionAnalysis<BasicAA>(); |
728 | AA.registerFunctionAnalysis<ScopedNoAliasAA>(); |
729 | AA.registerFunctionAnalysis<TypeBasedAA>(); |
730 | return AA; |
731 | }); |
732 | LintPass().run(F, AM&: FAM); |
733 | } |
734 | |
735 | /// lintModule - Check a module for errors, printing messages on stderr. |
736 | /// |
737 | void llvm::lintModule(const Module &M) { |
738 | for (const Function &F : M) { |
739 | if (!F.isDeclaration()) |
740 | lintFunction(f: F); |
741 | } |
742 | } |
743 | |