1 | //===- Store.cpp - Interface for maps from Locations to Values ------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defined the types Store and StoreManager. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "clang/StaticAnalyzer/Core/PathSensitive/Store.h" |
14 | #include "clang/AST/ASTContext.h" |
15 | #include "clang/AST/CXXInheritance.h" |
16 | #include "clang/AST/CharUnits.h" |
17 | #include "clang/AST/Decl.h" |
18 | #include "clang/AST/DeclCXX.h" |
19 | #include "clang/AST/DeclObjC.h" |
20 | #include "clang/AST/Expr.h" |
21 | #include "clang/AST/Type.h" |
22 | #include "clang/Basic/LLVM.h" |
23 | #include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h" |
24 | #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" |
25 | #include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h" |
26 | #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" |
27 | #include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h" |
28 | #include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h" |
29 | #include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h" |
30 | #include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h" |
31 | #include "llvm/ADT/APSInt.h" |
32 | #include "llvm/ADT/SmallVector.h" |
33 | #include "llvm/Support/Casting.h" |
34 | #include "llvm/Support/ErrorHandling.h" |
35 | #include <cassert> |
36 | #include <cstdint> |
37 | #include <optional> |
38 | |
39 | using namespace clang; |
40 | using namespace ento; |
41 | |
42 | StoreManager::StoreManager(ProgramStateManager &stateMgr) |
43 | : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr), |
44 | MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {} |
45 | |
46 | StoreRef StoreManager::enterStackFrame(Store OldStore, |
47 | const CallEvent &Call, |
48 | const StackFrameContext *LCtx) { |
49 | StoreRef Store = StoreRef(OldStore, *this); |
50 | |
51 | SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings; |
52 | Call.getInitialStackFrameContents(CalleeCtx: LCtx, Bindings&: InitialBindings); |
53 | |
54 | for (const auto &I : InitialBindings) |
55 | Store = Bind(store: Store.getStore(), loc: I.first.castAs<Loc>(), val: I.second); |
56 | |
57 | return Store; |
58 | } |
59 | |
60 | const ElementRegion *StoreManager::MakeElementRegion(const SubRegion *Base, |
61 | QualType EleTy, |
62 | uint64_t index) { |
63 | NonLoc idx = svalBuilder.makeArrayIndex(idx: index); |
64 | return MRMgr.getElementRegion(elementType: EleTy, Idx: idx, superRegion: Base, Ctx: svalBuilder.getContext()); |
65 | } |
66 | |
67 | const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R, |
68 | QualType T) { |
69 | NonLoc idx = svalBuilder.makeZeroArrayIndex(); |
70 | assert(!T.isNull()); |
71 | return MRMgr.getElementRegion(elementType: T, Idx: idx, superRegion: R, Ctx); |
72 | } |
73 | |
74 | std::optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R, |
75 | QualType CastToTy) { |
76 | ASTContext &Ctx = StateMgr.getContext(); |
77 | |
78 | // Handle casts to Objective-C objects. |
79 | if (CastToTy->isObjCObjectPointerType()) |
80 | return R->StripCasts(); |
81 | |
82 | if (CastToTy->isBlockPointerType()) { |
83 | // FIXME: We may need different solutions, depending on the symbol |
84 | // involved. Blocks can be casted to/from 'id', as they can be treated |
85 | // as Objective-C objects. This could possibly be handled by enhancing |
86 | // our reasoning of downcasts of symbolic objects. |
87 | if (isa<CodeTextRegion, SymbolicRegion>(Val: R)) |
88 | return R; |
89 | |
90 | // We don't know what to make of it. Return a NULL region, which |
91 | // will be interpreted as UnknownVal. |
92 | return std::nullopt; |
93 | } |
94 | |
95 | // Now assume we are casting from pointer to pointer. Other cases should |
96 | // already be handled. |
97 | QualType PointeeTy = CastToTy->getPointeeType(); |
98 | QualType CanonPointeeTy = Ctx.getCanonicalType(T: PointeeTy); |
99 | CanonPointeeTy = CanonPointeeTy.getLocalUnqualifiedType(); |
100 | |
101 | // Handle casts to void*. We just pass the region through. |
102 | if (CanonPointeeTy == Ctx.VoidTy) |
103 | return R; |
104 | |
105 | const auto IsSameRegionType = [&Ctx](const MemRegion *R, QualType OtherTy) { |
106 | if (const auto *TR = dyn_cast<TypedValueRegion>(Val: R)) { |
107 | QualType ObjTy = Ctx.getCanonicalType(T: TR->getValueType()); |
108 | if (OtherTy == ObjTy.getLocalUnqualifiedType()) |
109 | return true; |
110 | } |
111 | return false; |
112 | }; |
113 | |
114 | // Handle casts from compatible types. |
115 | if (R->isBoundable() && IsSameRegionType(R, CanonPointeeTy)) |
116 | return R; |
117 | |
118 | // Process region cast according to the kind of the region being cast. |
119 | switch (R->getKind()) { |
120 | case MemRegion::CXXThisRegionKind: |
121 | case MemRegion::CodeSpaceRegionKind: |
122 | case MemRegion::StackLocalsSpaceRegionKind: |
123 | case MemRegion::StackArgumentsSpaceRegionKind: |
124 | case MemRegion::HeapSpaceRegionKind: |
125 | case MemRegion::UnknownSpaceRegionKind: |
126 | case MemRegion::StaticGlobalSpaceRegionKind: |
127 | case MemRegion::GlobalInternalSpaceRegionKind: |
128 | case MemRegion::GlobalSystemSpaceRegionKind: |
129 | case MemRegion::GlobalImmutableSpaceRegionKind: { |
130 | llvm_unreachable("Invalid region cast" ); |
131 | } |
132 | |
133 | case MemRegion::FunctionCodeRegionKind: |
134 | case MemRegion::BlockCodeRegionKind: |
135 | case MemRegion::BlockDataRegionKind: |
136 | case MemRegion::StringRegionKind: |
137 | // FIXME: Need to handle arbitrary downcasts. |
138 | case MemRegion::SymbolicRegionKind: |
139 | case MemRegion::AllocaRegionKind: |
140 | case MemRegion::CompoundLiteralRegionKind: |
141 | case MemRegion::FieldRegionKind: |
142 | case MemRegion::ObjCIvarRegionKind: |
143 | case MemRegion::ObjCStringRegionKind: |
144 | case MemRegion::NonParamVarRegionKind: |
145 | case MemRegion::ParamVarRegionKind: |
146 | case MemRegion::CXXTempObjectRegionKind: |
147 | case MemRegion::CXXLifetimeExtendedObjectRegionKind: |
148 | case MemRegion::CXXBaseObjectRegionKind: |
149 | case MemRegion::CXXDerivedObjectRegionKind: |
150 | return MakeElementRegion(Base: cast<SubRegion>(Val: R), EleTy: PointeeTy); |
151 | |
152 | case MemRegion::ElementRegionKind: { |
153 | // If we are casting from an ElementRegion to another type, the |
154 | // algorithm is as follows: |
155 | // |
156 | // (1) Compute the "raw offset" of the ElementRegion from the |
157 | // base region. This is done by calling 'getAsRawOffset()'. |
158 | // |
159 | // (2a) If we get a 'RegionRawOffset' after calling |
160 | // 'getAsRawOffset()', determine if the absolute offset |
161 | // can be exactly divided into chunks of the size of the |
162 | // casted-pointee type. If so, create a new ElementRegion with |
163 | // the pointee-cast type as the new ElementType and the index |
164 | // being the offset divded by the chunk size. If not, create |
165 | // a new ElementRegion at offset 0 off the raw offset region. |
166 | // |
167 | // (2b) If we don't a get a 'RegionRawOffset' after calling |
168 | // 'getAsRawOffset()', it means that we are at offset 0. |
169 | // |
170 | // FIXME: Handle symbolic raw offsets. |
171 | |
172 | const ElementRegion *elementR = cast<ElementRegion>(Val: R); |
173 | const RegionRawOffset &rawOff = elementR->getAsArrayOffset(); |
174 | const MemRegion *baseR = rawOff.getRegion(); |
175 | |
176 | // If we cannot compute a raw offset, throw up our hands and return |
177 | // a NULL MemRegion*. |
178 | if (!baseR) |
179 | return std::nullopt; |
180 | |
181 | CharUnits off = rawOff.getOffset(); |
182 | |
183 | if (off.isZero()) { |
184 | // Edge case: we are at 0 bytes off the beginning of baseR. We check to |
185 | // see if the type we are casting to is the same as the type of the base |
186 | // region. If so, just return the base region. |
187 | if (IsSameRegionType(baseR, CanonPointeeTy)) |
188 | return baseR; |
189 | // Otherwise, create a new ElementRegion at offset 0. |
190 | return MakeElementRegion(Base: cast<SubRegion>(Val: baseR), EleTy: PointeeTy); |
191 | } |
192 | |
193 | // We have a non-zero offset from the base region. We want to determine |
194 | // if the offset can be evenly divided by sizeof(PointeeTy). If so, |
195 | // we create an ElementRegion whose index is that value. Otherwise, we |
196 | // create two ElementRegions, one that reflects a raw offset and the other |
197 | // that reflects the cast. |
198 | |
199 | // Compute the index for the new ElementRegion. |
200 | int64_t newIndex = 0; |
201 | const MemRegion *newSuperR = nullptr; |
202 | |
203 | // We can only compute sizeof(PointeeTy) if it is a complete type. |
204 | if (!PointeeTy->isIncompleteType()) { |
205 | // Compute the size in **bytes**. |
206 | CharUnits pointeeTySize = Ctx.getTypeSizeInChars(T: PointeeTy); |
207 | if (!pointeeTySize.isZero()) { |
208 | // Is the offset a multiple of the size? If so, we can layer the |
209 | // ElementRegion (with elementType == PointeeTy) directly on top of |
210 | // the base region. |
211 | if (off % pointeeTySize == 0) { |
212 | newIndex = off / pointeeTySize; |
213 | newSuperR = baseR; |
214 | } |
215 | } |
216 | } |
217 | |
218 | if (!newSuperR) { |
219 | // Create an intermediate ElementRegion to represent the raw byte. |
220 | // This will be the super region of the final ElementRegion. |
221 | newSuperR = MakeElementRegion(Base: cast<SubRegion>(Val: baseR), EleTy: Ctx.CharTy, |
222 | index: off.getQuantity()); |
223 | } |
224 | |
225 | return MakeElementRegion(Base: cast<SubRegion>(Val: newSuperR), EleTy: PointeeTy, index: newIndex); |
226 | } |
227 | } |
228 | |
229 | llvm_unreachable("unreachable" ); |
230 | } |
231 | |
232 | static bool regionMatchesCXXRecordType(SVal V, QualType Ty) { |
233 | const MemRegion *MR = V.getAsRegion(); |
234 | if (!MR) |
235 | return true; |
236 | |
237 | const auto *TVR = dyn_cast<TypedValueRegion>(Val: MR); |
238 | if (!TVR) |
239 | return true; |
240 | |
241 | const CXXRecordDecl *RD = TVR->getValueType()->getAsCXXRecordDecl(); |
242 | if (!RD) |
243 | return true; |
244 | |
245 | const CXXRecordDecl *Expected = Ty->getPointeeCXXRecordDecl(); |
246 | if (!Expected) |
247 | Expected = Ty->getAsCXXRecordDecl(); |
248 | |
249 | return Expected->getCanonicalDecl() == RD->getCanonicalDecl(); |
250 | } |
251 | |
252 | SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) { |
253 | // Early return to avoid doing the wrong thing in the face of |
254 | // reinterpret_cast. |
255 | if (!regionMatchesCXXRecordType(V: Derived, Ty: Cast->getSubExpr()->getType())) |
256 | return UnknownVal(); |
257 | |
258 | // Walk through the cast path to create nested CXXBaseRegions. |
259 | SVal Result = Derived; |
260 | for (const CXXBaseSpecifier *Base : Cast->path()) { |
261 | Result = evalDerivedToBase(Derived: Result, DerivedPtrType: Base->getType(), IsVirtual: Base->isVirtual()); |
262 | } |
263 | return Result; |
264 | } |
265 | |
266 | SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) { |
267 | // Walk through the path to create nested CXXBaseRegions. |
268 | SVal Result = Derived; |
269 | for (const auto &I : Path) |
270 | Result = evalDerivedToBase(Derived: Result, DerivedPtrType: I.Base->getType(), |
271 | IsVirtual: I.Base->isVirtual()); |
272 | return Result; |
273 | } |
274 | |
275 | SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType, |
276 | bool IsVirtual) { |
277 | const MemRegion *DerivedReg = Derived.getAsRegion(); |
278 | if (!DerivedReg) |
279 | return Derived; |
280 | |
281 | const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl(); |
282 | if (!BaseDecl) |
283 | BaseDecl = BaseType->getAsCXXRecordDecl(); |
284 | assert(BaseDecl && "not a C++ object?" ); |
285 | |
286 | if (const auto *AlreadyDerivedReg = |
287 | dyn_cast<CXXDerivedObjectRegion>(Val: DerivedReg)) { |
288 | if (const auto *SR = |
289 | dyn_cast<SymbolicRegion>(Val: AlreadyDerivedReg->getSuperRegion())) |
290 | if (SR->getSymbol()->getType()->getPointeeCXXRecordDecl() == BaseDecl) |
291 | return loc::MemRegionVal(SR); |
292 | |
293 | DerivedReg = AlreadyDerivedReg->getSuperRegion(); |
294 | } |
295 | |
296 | const MemRegion *BaseReg = MRMgr.getCXXBaseObjectRegion( |
297 | BaseClass: BaseDecl, Super: cast<SubRegion>(Val: DerivedReg), IsVirtual); |
298 | |
299 | return loc::MemRegionVal(BaseReg); |
300 | } |
301 | |
302 | /// Returns the static type of the given region, if it represents a C++ class |
303 | /// object. |
304 | /// |
305 | /// This handles both fully-typed regions, where the dynamic type is known, and |
306 | /// symbolic regions, where the dynamic type is merely bounded (and even then, |
307 | /// only ostensibly!), but does not take advantage of any dynamic type info. |
308 | static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) { |
309 | if (const auto *TVR = dyn_cast<TypedValueRegion>(Val: MR)) |
310 | return TVR->getValueType()->getAsCXXRecordDecl(); |
311 | if (const auto *SR = dyn_cast<SymbolicRegion>(Val: MR)) |
312 | return SR->getSymbol()->getType()->getPointeeCXXRecordDecl(); |
313 | return nullptr; |
314 | } |
315 | |
316 | std::optional<SVal> StoreManager::evalBaseToDerived(SVal Base, |
317 | QualType TargetType) { |
318 | const MemRegion *MR = Base.getAsRegion(); |
319 | if (!MR) |
320 | return UnknownVal(); |
321 | |
322 | // Assume the derived class is a pointer or a reference to a CXX record. |
323 | TargetType = TargetType->getPointeeType(); |
324 | assert(!TargetType.isNull()); |
325 | const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl(); |
326 | if (!TargetClass && !TargetType->isVoidType()) |
327 | return UnknownVal(); |
328 | |
329 | // Drill down the CXXBaseObject chains, which represent upcasts (casts from |
330 | // derived to base). |
331 | while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) { |
332 | // If found the derived class, the cast succeeds. |
333 | if (MRClass == TargetClass) |
334 | return loc::MemRegionVal(MR); |
335 | |
336 | // We skip over incomplete types. They must be the result of an earlier |
337 | // reinterpret_cast, as one can only dynamic_cast between types in the same |
338 | // class hierarchy. |
339 | if (!TargetType->isVoidType() && MRClass->hasDefinition()) { |
340 | // Static upcasts are marked as DerivedToBase casts by Sema, so this will |
341 | // only happen when multiple or virtual inheritance is involved. |
342 | CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, |
343 | /*DetectVirtual=*/false); |
344 | if (MRClass->isDerivedFrom(Base: TargetClass, Paths)) |
345 | return evalDerivedToBase(Derived: loc::MemRegionVal(MR), Path: Paths.front()); |
346 | } |
347 | |
348 | if (const auto *BaseR = dyn_cast<CXXBaseObjectRegion>(Val: MR)) { |
349 | // Drill down the chain to get the derived classes. |
350 | MR = BaseR->getSuperRegion(); |
351 | continue; |
352 | } |
353 | |
354 | // If this is a cast to void*, return the region. |
355 | if (TargetType->isVoidType()) |
356 | return loc::MemRegionVal(MR); |
357 | |
358 | // Strange use of reinterpret_cast can give us paths we don't reason |
359 | // about well, by putting in ElementRegions where we'd expect |
360 | // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the |
361 | // derived class has a zero offset from the base class), then it's safe |
362 | // to strip the cast; if it's invalid, -Wreinterpret-base-class should |
363 | // catch it. In the interest of performance, the analyzer will silently |
364 | // do the wrong thing in the invalid case (because offsets for subregions |
365 | // will be wrong). |
366 | const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/StripBaseAndDerivedCasts: false); |
367 | if (Uncasted == MR) { |
368 | // We reached the bottom of the hierarchy and did not find the derived |
369 | // class. We must be casting the base to derived, so the cast should |
370 | // fail. |
371 | break; |
372 | } |
373 | |
374 | MR = Uncasted; |
375 | } |
376 | |
377 | // If we're casting a symbolic base pointer to a derived class, use |
378 | // CXXDerivedObjectRegion to represent the cast. If it's a pointer to an |
379 | // unrelated type, it must be a weird reinterpret_cast and we have to |
380 | // be fine with ElementRegion. TODO: Should we instead make |
381 | // Derived{TargetClass, Element{SourceClass, SR}}? |
382 | if (const auto *SR = dyn_cast<SymbolicRegion>(Val: MR)) { |
383 | QualType T = SR->getSymbol()->getType(); |
384 | const CXXRecordDecl *SourceClass = T->getPointeeCXXRecordDecl(); |
385 | if (TargetClass && SourceClass && TargetClass->isDerivedFrom(Base: SourceClass)) |
386 | return loc::MemRegionVal( |
387 | MRMgr.getCXXDerivedObjectRegion(BaseClass: TargetClass, Super: SR)); |
388 | return loc::MemRegionVal(GetElementZeroRegion(R: SR, T: TargetType)); |
389 | } |
390 | |
391 | // We failed if the region we ended up with has perfect type info. |
392 | if (isa<TypedValueRegion>(Val: MR)) |
393 | return std::nullopt; |
394 | |
395 | return UnknownVal(); |
396 | } |
397 | |
398 | SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) { |
399 | if (Base.isUnknownOrUndef()) |
400 | return Base; |
401 | |
402 | Loc BaseL = Base.castAs<Loc>(); |
403 | const SubRegion* BaseR = nullptr; |
404 | |
405 | switch (BaseL.getKind()) { |
406 | case loc::MemRegionValKind: |
407 | BaseR = cast<SubRegion>(Val: BaseL.castAs<loc::MemRegionVal>().getRegion()); |
408 | break; |
409 | |
410 | case loc::GotoLabelKind: |
411 | // These are anormal cases. Flag an undefined value. |
412 | return UndefinedVal(); |
413 | |
414 | case loc::ConcreteIntKind: |
415 | // While these seem funny, this can happen through casts. |
416 | // FIXME: What we should return is the field offset, not base. For example, |
417 | // add the field offset to the integer value. That way things |
418 | // like this work properly: &(((struct foo *) 0xa)->f) |
419 | // However, that's not easy to fix without reducing our abilities |
420 | // to catch null pointer dereference. Eg., ((struct foo *)0x0)->f = 7 |
421 | // is a null dereference even though we're dereferencing offset of f |
422 | // rather than null. Coming up with an approach that computes offsets |
423 | // over null pointers properly while still being able to catch null |
424 | // dereferences might be worth it. |
425 | return Base; |
426 | |
427 | default: |
428 | llvm_unreachable("Unhandled Base." ); |
429 | } |
430 | |
431 | // NOTE: We must have this check first because ObjCIvarDecl is a subclass |
432 | // of FieldDecl. |
433 | if (const auto *ID = dyn_cast<ObjCIvarDecl>(Val: D)) |
434 | return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ivd: ID, superRegion: BaseR)); |
435 | |
436 | return loc::MemRegionVal(MRMgr.getFieldRegion(fd: cast<FieldDecl>(Val: D), superRegion: BaseR)); |
437 | } |
438 | |
439 | SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) { |
440 | return getLValueFieldOrIvar(D: decl, Base: base); |
441 | } |
442 | |
443 | SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, |
444 | SVal Base) { |
445 | |
446 | // Special case, if index is 0, return the same type as if |
447 | // this was not an array dereference. |
448 | if (Offset.isZeroConstant()) { |
449 | QualType BT = Base.getType(this->Ctx); |
450 | if (!BT.isNull() && !elementType.isNull()) { |
451 | QualType PointeeTy = BT->getPointeeType(); |
452 | if (!PointeeTy.isNull() && |
453 | PointeeTy.getCanonicalType() == elementType.getCanonicalType()) |
454 | return Base; |
455 | } |
456 | } |
457 | |
458 | // If the base is an unknown or undefined value, just return it back. |
459 | // FIXME: For absolute pointer addresses, we just return that value back as |
460 | // well, although in reality we should return the offset added to that |
461 | // value. See also the similar FIXME in getLValueFieldOrIvar(). |
462 | if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Val: Base)) |
463 | return Base; |
464 | |
465 | if (isa<loc::GotoLabel>(Val: Base)) |
466 | return UnknownVal(); |
467 | |
468 | const SubRegion *BaseRegion = |
469 | Base.castAs<loc::MemRegionVal>().getRegionAs<SubRegion>(); |
470 | |
471 | // Pointer of any type can be cast and used as array base. |
472 | const auto *ElemR = dyn_cast<ElementRegion>(Val: BaseRegion); |
473 | |
474 | // Convert the offset to the appropriate size and signedness. |
475 | auto Off = svalBuilder.convertToArrayIndex(val: Offset).getAs<NonLoc>(); |
476 | if (!Off) { |
477 | // Handle cases when LazyCompoundVal is used for an array index. |
478 | // Such case is possible if code does: |
479 | // char b[4]; |
480 | // a[__builtin_bitcast(int, b)]; |
481 | // Return UnknownVal, since we cannot model it. |
482 | return UnknownVal(); |
483 | } |
484 | |
485 | Offset = Off.value(); |
486 | |
487 | if (!ElemR) { |
488 | // If the base region is not an ElementRegion, create one. |
489 | // This can happen in the following example: |
490 | // |
491 | // char *p = __builtin_alloc(10); |
492 | // p[1] = 8; |
493 | // |
494 | // Observe that 'p' binds to an AllocaRegion. |
495 | return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Idx: Offset, |
496 | superRegion: BaseRegion, Ctx)); |
497 | } |
498 | |
499 | SVal BaseIdx = ElemR->getIndex(); |
500 | |
501 | if (!isa<nonloc::ConcreteInt>(Val: BaseIdx)) |
502 | return UnknownVal(); |
503 | |
504 | const llvm::APSInt &BaseIdxI = |
505 | BaseIdx.castAs<nonloc::ConcreteInt>().getValue(); |
506 | |
507 | // Only allow non-integer offsets if the base region has no offset itself. |
508 | // FIXME: This is a somewhat arbitrary restriction. We should be using |
509 | // SValBuilder here to add the two offsets without checking their types. |
510 | if (!isa<nonloc::ConcreteInt>(Val: Offset)) { |
511 | if (isa<ElementRegion>(Val: BaseRegion->StripCasts())) |
512 | return UnknownVal(); |
513 | |
514 | return loc::MemRegionVal(MRMgr.getElementRegion( |
515 | elementType, Idx: Offset, superRegion: cast<SubRegion>(Val: ElemR->getSuperRegion()), Ctx)); |
516 | } |
517 | |
518 | const llvm::APSInt& OffI = Offset.castAs<nonloc::ConcreteInt>().getValue(); |
519 | assert(BaseIdxI.isSigned()); |
520 | |
521 | // Compute the new index. |
522 | nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(X: BaseIdxI + |
523 | OffI)); |
524 | |
525 | // Construct the new ElementRegion. |
526 | const SubRegion *ArrayR = cast<SubRegion>(Val: ElemR->getSuperRegion()); |
527 | return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Idx: NewIdx, superRegion: ArrayR, |
528 | Ctx)); |
529 | } |
530 | |
531 | StoreManager::BindingsHandler::~BindingsHandler() = default; |
532 | |
533 | bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr, |
534 | Store store, |
535 | const MemRegion* R, |
536 | SVal val) { |
537 | SymbolRef SymV = val.getAsLocSymbol(); |
538 | if (!SymV || SymV != Sym) |
539 | return true; |
540 | |
541 | if (Binding) { |
542 | First = false; |
543 | return false; |
544 | } |
545 | else |
546 | Binding = R; |
547 | |
548 | return true; |
549 | } |
550 | |