1//===- Context.cpp - State Tracking for llubi -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file tracks the global states (e.g., memory) of the interpreter.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Context.h"
14#include "llvm/Support/MathExtras.h"
15
16namespace llvm::ubi {
17
18Context::Context(Module &M)
19 : Ctx(M.getContext()), M(M), DL(M.getDataLayout()),
20 TLIImpl(M.getTargetTriple()) {}
21
22Context::~Context() = default;
23
24bool Context::initGlobalValues() {
25 // Register all function and block targets that may be used by indirect calls
26 // and branches.
27 for (Function &F : M) {
28 if (F.hasAddressTaken()) {
29 // TODO: Use precise alignment for function pointers if it is necessary.
30 auto FuncObj = allocate(Size: 0, Align: F.getPointerAlignment(DL).value(), Name: F.getName(),
31 AS: DL.getProgramAddressSpace(), InitKind: MemInitKind::Zeroed);
32 if (!FuncObj)
33 return false;
34 ValidFuncTargets.try_emplace(Key: FuncObj->getAddress(),
35 Args: std::make_pair(x: &F, y&: FuncObj));
36 FuncAddrMap.try_emplace(Key: &F, Args: deriveFromMemoryObject(Obj: FuncObj));
37 }
38
39 for (BasicBlock &BB : F) {
40 if (!BB.hasAddressTaken())
41 continue;
42 auto BlockObj = allocate(Size: 0, Align: 1, Name: BB.getName(), AS: DL.getProgramAddressSpace(),
43 InitKind: MemInitKind::Zeroed);
44 if (!BlockObj)
45 return false;
46 ValidBlockTargets.try_emplace(Key: BlockObj->getAddress(),
47 Args: std::make_pair(x: &BB, y&: BlockObj));
48 BlockAddrMap.try_emplace(Key: &BB, Args: deriveFromMemoryObject(Obj: BlockObj));
49 }
50 }
51 // TODO: initialize global variables.
52 return true;
53}
54
55AnyValue Context::getConstantValueImpl(Constant *C) {
56 if (isa<PoisonValue>(Val: C))
57 return AnyValue::getPoisonValue(Ctx&: *this, Ty: C->getType());
58
59 if (isa<ConstantAggregateZero>(Val: C))
60 return AnyValue::getNullValue(Ctx&: *this, Ty: C->getType());
61
62 if (isa<ConstantPointerNull>(Val: C))
63 return Pointer::null(
64 BitWidth: DL.getPointerSizeInBits(AS: C->getType()->getPointerAddressSpace()));
65
66 if (auto *CI = dyn_cast<ConstantInt>(Val: C)) {
67 if (auto *VecTy = dyn_cast<VectorType>(Val: CI->getType()))
68 return std::vector<AnyValue>(getEVL(EC: VecTy->getElementCount()),
69 AnyValue(CI->getValue()));
70 return CI->getValue();
71 }
72
73 if (auto *CFP = dyn_cast<ConstantFP>(Val: C)) {
74 if (auto *VecTy = dyn_cast<VectorType>(Val: CFP->getType()))
75 return std::vector<AnyValue>(getEVL(EC: VecTy->getElementCount()),
76 AnyValue(CFP->getValue()));
77 return CFP->getValue();
78 }
79
80 if (auto *CDS = dyn_cast<ConstantDataSequential>(Val: C)) {
81 std::vector<AnyValue> Elts;
82 Elts.reserve(n: CDS->getNumElements());
83 for (uint32_t I = 0, E = CDS->getNumElements(); I != E; ++I)
84 Elts.push_back(x: getConstantValue(C: CDS->getElementAsConstant(i: I)));
85 return std::move(Elts);
86 }
87
88 if (auto *CA = dyn_cast<ConstantAggregate>(Val: C)) {
89 std::vector<AnyValue> Elts;
90 Elts.reserve(n: CA->getNumOperands());
91 for (uint32_t I = 0, E = CA->getNumOperands(); I != E; ++I)
92 Elts.push_back(x: getConstantValue(C: CA->getOperand(i_nocapture: I)));
93 return std::move(Elts);
94 }
95
96 if (auto *BA = dyn_cast<BlockAddress>(Val: C))
97 return BlockAddrMap.at(Val: BA->getBasicBlock());
98
99 if (auto *F = dyn_cast<Function>(Val: C))
100 return FuncAddrMap.at(Val: F);
101
102 llvm_unreachable("Unrecognized constant");
103}
104
105const AnyValue &Context::getConstantValue(Constant *C) {
106 auto It = ConstCache.find(x: C);
107 if (It != ConstCache.end())
108 return It->second;
109
110 return ConstCache.emplace(args&: C, args: getConstantValueImpl(C)).first->second;
111}
112
113AnyValue Context::fromBytes(ConstBytesView Bytes, Type *Ty,
114 uint32_t OffsetInBits, bool CheckPaddingBits) {
115 uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
116 uint32_t NewOffsetInBits = OffsetInBits + NumBits;
117 if (CheckPaddingBits)
118 NewOffsetInBits = alignTo(Value: NewOffsetInBits, Align: 8);
119 bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
120 uint32_t NumBitsToExtract = NewOffsetInBits - OffsetInBits;
121 SmallVector<uint64_t> RawBits(alignTo(Value: NumBitsToExtract, Align: 8));
122 for (uint32_t I = 0; I < NumBitsToExtract; I += 8) {
123 // Try to form a 'logical' byte that represents the bits in the range
124 // [BitsStart, BitsEnd].
125 uint32_t NumBitsInByte = std::min(a: 8U, b: NumBitsToExtract - I);
126 uint32_t BitsStart = OffsetInBits + I;
127 uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
128 Byte LogicalByte;
129 // Check whether it is a cross-byte access.
130 if (((BitsStart ^ BitsEnd) & ~7) == 0)
131 LogicalByte = Bytes[BitsStart / 8].lshr(Shift: BitsStart % 8);
132 else
133 LogicalByte =
134 Byte::fshr(Low: Bytes[BitsStart / 8], High: Bytes[BitsEnd / 8], ShAmt: BitsStart % 8);
135
136 uint32_t Mask = (1U << NumBitsInByte) - 1;
137 // If any of the bits in the byte is poison, the whole value is poison.
138 if (~LogicalByte.ConcreteMask & ~LogicalByte.Value & Mask) {
139 OffsetInBits = NewOffsetInBits;
140 return AnyValue::poison();
141 }
142 uint8_t RandomBits = 0;
143 if (UndefBehavior == UndefValueBehavior::NonDeterministic &&
144 (~LogicalByte.ConcreteMask & Mask)) {
145 // This byte contains undef bits.
146 // We don't use std::uniform_int_distribution here because it produces
147 // different results across different library implementations. Instead,
148 // we directly use the low bits from Rng.
149 RandomBits = static_cast<uint8_t>(Rng());
150 }
151 uint8_t ActualBits = ((LogicalByte.Value & LogicalByte.ConcreteMask) |
152 (RandomBits & ~LogicalByte.ConcreteMask)) &
153 Mask;
154 RawBits[I / 64] |= static_cast<APInt::WordType>(ActualBits) << (I % 64);
155 }
156 OffsetInBits = NewOffsetInBits;
157
158 APInt Bits(NumBitsToExtract, RawBits);
159
160 // Padding bits for non-byte-sized scalar types must be zero.
161 if (NeedsPadding) {
162 if (!Bits.isIntN(N: NumBits))
163 return AnyValue::poison();
164 Bits = Bits.trunc(width: NumBits);
165 }
166
167 if (Ty->isIntegerTy())
168 return Bits;
169 if (Ty->isFloatingPointTy())
170 return APFloat(Ty->getFltSemantics(), Bits);
171 assert(Ty->isPointerTy() && "Expect a pointer type");
172 // TODO: recover provenance
173 return Pointer(Bits);
174}
175
176AnyValue Context::fromBytes(ArrayRef<Byte> Bytes, Type *Ty) {
177 assert(Bytes.size() == getEffectiveTypeStoreSize(Ty) &&
178 "Invalid byte array size for the type");
179 if (Ty->isIntegerTy() || Ty->isFloatingPointTy() || Ty->isPointerTy())
180 return fromBytes(Bytes: ConstBytesView(Bytes, DL), Ty, /*OffsetInBits=*/0,
181 /*CheckPaddingBits=*/true);
182
183 if (auto *VecTy = dyn_cast<VectorType>(Val: Ty)) {
184 Type *ElemTy = VecTy->getElementType();
185 uint32_t ElemBits = DL.getTypeSizeInBits(Ty: ElemTy).getFixedValue();
186 uint32_t NumElements = getEVL(EC: VecTy->getElementCount());
187 // Check padding bits. <N x iM> acts as if an integer type with N * M bits.
188 uint32_t VecBits = ElemBits * NumElements;
189 uint32_t AlignedVecBits = alignTo(Value: VecBits, Align: 8);
190 ConstBytesView View(Bytes, DL);
191 if (VecBits != AlignedVecBits) {
192 const Byte &PaddingByte = View[Bytes.size() - 1];
193 uint32_t Mask = (~0U << (VecBits % 8)) & 255U;
194 // Make sure all high padding bits are zero.
195 if ((PaddingByte.ConcreteMask & ~PaddingByte.Value & Mask) != Mask)
196 return AnyValue::getPoisonValue(Ctx&: *this, Ty);
197 }
198
199 std::vector<AnyValue> ValVec;
200 ValVec.reserve(n: NumElements);
201 // For little endian element zero is put in the least significant bits of
202 // the integer, and for big endian element zero is put in the most
203 // significant bits.
204 for (uint32_t I = 0; I != NumElements; ++I)
205 ValVec.push_back(x: fromBytes(Bytes: View, Ty: ElemTy,
206 OffsetInBits: DL.isLittleEndian()
207 ? I * ElemBits
208 : VecBits - ElemBits - I * ElemBits,
209 /*CheckPaddingBits=*/false));
210 return AnyValue(std::move(ValVec));
211 }
212 if (auto *ArrTy = dyn_cast<ArrayType>(Val: Ty)) {
213 Type *ElemTy = ArrTy->getElementType();
214 uint64_t Stride = getEffectiveTypeAllocSize(Ty: ElemTy);
215 uint64_t StoreSize = getEffectiveTypeStoreSize(Ty: ElemTy);
216 uint32_t NumElements = ArrTy->getNumElements();
217 std::vector<AnyValue> ValVec;
218 ValVec.reserve(n: NumElements);
219 for (uint32_t I = 0; I != NumElements; ++I)
220 ValVec.push_back(x: fromBytes(Bytes: Bytes.slice(N: I * Stride, M: StoreSize), Ty: ElemTy));
221 return AnyValue(std::move(ValVec));
222 }
223 if (auto *StructTy = dyn_cast<StructType>(Val: Ty)) {
224 const StructLayout *Layout = DL.getStructLayout(Ty: StructTy);
225 std::vector<AnyValue> ValVec;
226 uint32_t NumElements = StructTy->getNumElements();
227 ValVec.reserve(n: NumElements);
228 for (uint32_t I = 0; I != NumElements; ++I) {
229 Type *ElemTy = StructTy->getElementType(N: I);
230 ValVec.push_back(x: fromBytes(
231 Bytes: Bytes.slice(N: getEffectiveTypeSize(Size: Layout->getElementOffset(Idx: I)),
232 M: getEffectiveTypeStoreSize(Ty: ElemTy)),
233 Ty: ElemTy));
234 }
235 return AnyValue(std::move(ValVec));
236 }
237 llvm_unreachable("Unsupported first class type.");
238}
239
240void Context::toBytes(const AnyValue &Val, Type *Ty, uint32_t OffsetInBits,
241 MutableBytesView Bytes, bool PaddingBits) {
242 uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
243 uint32_t NewOffsetInBits = OffsetInBits + NumBits;
244 if (PaddingBits)
245 NewOffsetInBits = alignTo(Value: NewOffsetInBits, Align: 8);
246 bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
247 auto WriteBits = [&](const APInt &Bits) {
248 for (uint32_t I = 0, E = Bits.getBitWidth(); I < E; I += 8) {
249 uint32_t NumBitsInByte = std::min(a: 8U, b: E - I);
250 uint32_t BitsStart = OffsetInBits + I;
251 uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
252 uint8_t BitsVal =
253 static_cast<uint8_t>(Bits.extractBitsAsZExtValue(numBits: NumBitsInByte, bitPosition: I));
254
255 Bytes[BitsStart / 8].writeBits(
256 Mask: static_cast<uint8_t>(((1U << NumBitsInByte) - 1) << (BitsStart % 8)),
257 Val: static_cast<uint8_t>(BitsVal << (BitsStart % 8)));
258 // If it is a cross-byte access, write the remaining bits to the next
259 // byte.
260 if (((BitsStart ^ BitsEnd) & ~7) != 0)
261 Bytes[BitsEnd / 8].writeBits(
262 Mask: static_cast<uint8_t>((1U << (BitsEnd % 8 + 1)) - 1),
263 Val: static_cast<uint8_t>(BitsVal >> (8 - (BitsStart % 8))));
264 }
265 };
266 if (Val.isPoison()) {
267 for (uint32_t I = 0, E = NewOffsetInBits - OffsetInBits; I < E;) {
268 uint32_t NumBitsInByte = std::min(a: 8 - (OffsetInBits + I) % 8, b: E - I);
269 assert(((OffsetInBits ^ (OffsetInBits + NumBitsInByte - 1)) & ~7) == 0 &&
270 "Across byte boundary.");
271 Bytes[(OffsetInBits + I) / 8].poisonBits(Mask: static_cast<uint8_t>(
272 ((1U << NumBitsInByte) - 1) << ((OffsetInBits + I) % 8)));
273 I += NumBitsInByte;
274 }
275 } else if (Ty->isIntegerTy()) {
276 auto &Bits = Val.asInteger();
277 WriteBits(NeedsPadding ? Bits.zext(width: NewOffsetInBits - OffsetInBits) : Bits);
278 } else if (Ty->isFloatingPointTy()) {
279 auto Bits = Val.asFloat().bitcastToAPInt();
280 WriteBits(NeedsPadding ? Bits.zext(width: NewOffsetInBits - OffsetInBits) : Bits);
281 } else if (Ty->isPointerTy()) {
282 auto &Bits = Val.asPointer().address();
283 WriteBits(NeedsPadding ? Bits.zext(width: NewOffsetInBits - OffsetInBits) : Bits);
284 // TODO: save metadata of the pointer.
285 } else {
286 llvm_unreachable("Unsupported scalar type.");
287 }
288}
289
290void Context::toBytes(const AnyValue &Val, Type *Ty,
291 MutableArrayRef<Byte> Bytes) {
292 assert(Bytes.size() == getEffectiveTypeStoreSize(Ty) &&
293 "Invalid byte array size for the type");
294 if (Ty->isIntegerTy() || Ty->isFloatingPointTy() || Ty->isPointerTy()) {
295 toBytes(Val, Ty, /*OffsetInBits=*/0, Bytes: MutableBytesView(Bytes, DL),
296 /*PaddingBits=*/true);
297 return;
298 }
299
300 if (auto *VecTy = dyn_cast<VectorType>(Val: Ty)) {
301 Type *ElemTy = VecTy->getElementType();
302 uint32_t ElemBits = DL.getTypeSizeInBits(Ty: ElemTy).getFixedValue();
303 uint32_t NumElements = getEVL(EC: VecTy->getElementCount());
304 // Zero padding bits. <N x iM> acts as if an integer type with N * M bits.
305 uint32_t VecBits = ElemBits * NumElements;
306 uint32_t AlignedVecBits = alignTo(Value: VecBits, Align: 8);
307 MutableBytesView View(Bytes, DL);
308 if (VecBits != AlignedVecBits) {
309 Byte &PaddingByte = View[Bytes.size() - 1];
310 uint32_t Mask = (~0U << (VecBits % 8)) & 255U;
311 PaddingByte.zeroBits(Mask);
312 }
313 // For little endian element zero is put in the least significant bits of
314 // the integer, and for big endian element zero is put in the most
315 // significant bits.
316 if (DL.isLittleEndian()) {
317 for (const auto &[I, Val] : enumerate(First: Val.asAggregate()))
318 toBytes(Val, Ty: ElemTy, OffsetInBits: ElemBits * I, Bytes: View, /*PaddingBits=*/false);
319 } else {
320 for (const auto &[I, Val] : enumerate(First: reverse(C: Val.asAggregate())))
321 toBytes(Val, Ty: ElemTy, OffsetInBits: ElemBits * I, Bytes: View, /*PaddingBits=*/false);
322 }
323 return;
324 }
325
326 // Fill padding bytes due to alignment requirement.
327 auto FillUndefBytes = [&](uint64_t Begin, uint64_t End) {
328 fill(Range: Bytes.slice(N: Begin, M: End - Begin), Value: Byte::undef());
329 };
330 if (auto *ArrTy = dyn_cast<ArrayType>(Val: Ty)) {
331 Type *ElemTy = ArrTy->getElementType();
332 uint64_t Offset = 0;
333 uint64_t Stride = getEffectiveTypeAllocSize(Ty: ElemTy);
334 uint64_t StoreSize = getEffectiveTypeStoreSize(Ty: ElemTy);
335 for (const auto &SubVal : Val.asAggregate()) {
336 toBytes(Val: SubVal, Ty: ElemTy, Bytes: Bytes.slice(N: Offset, M: StoreSize));
337 FillUndefBytes(Offset + StoreSize, Offset + Stride);
338 Offset += Stride;
339 }
340 return;
341 }
342 if (auto *StructTy = dyn_cast<StructType>(Val: Ty)) {
343 const StructLayout *Layout = DL.getStructLayout(Ty: StructTy);
344 uint64_t LastAccessedOffset = 0;
345 for (uint32_t I = 0, E = Val.asAggregate().size(); I != E; ++I) {
346 Type *ElemTy = StructTy->getElementType(N: I);
347 uint64_t ElemOffset = getEffectiveTypeSize(Size: Layout->getElementOffset(Idx: I));
348 uint64_t ElemStoreSize = getEffectiveTypeStoreSize(Ty: ElemTy);
349 FillUndefBytes(LastAccessedOffset, ElemOffset);
350 toBytes(Val: Val.asAggregate()[I], Ty: ElemTy,
351 Bytes: Bytes.slice(N: ElemOffset, M: ElemStoreSize));
352 LastAccessedOffset = ElemOffset + ElemStoreSize;
353 }
354 FillUndefBytes(LastAccessedOffset, getEffectiveTypeStoreSize(Ty: StructTy));
355 return;
356 }
357
358 llvm_unreachable("Unsupported first class type.");
359}
360
361AnyValue Context::load(MemoryObject &MO, uint64_t Offset, Type *ValTy) {
362 return fromBytes(
363 Bytes: MO.getBytes().slice(N: Offset, M: getEffectiveTypeStoreSize(Ty: ValTy)), Ty: ValTy);
364}
365
366void Context::store(MemoryObject &MO, uint64_t Offset, const AnyValue &Val,
367 Type *ValTy) {
368 toBytes(Val, Ty: ValTy,
369 Bytes: MO.getBytes().slice(N: Offset, M: getEffectiveTypeStoreSize(Ty: ValTy)));
370}
371
372void Context::storeRawBytes(MemoryObject &MO, uint64_t Offset, const void *Data,
373 uint64_t Size) {
374 for (uint64_t I = 0; I != Size; ++I)
375 MO[Offset + I] = Byte::concrete(Val: static_cast<const uint8_t *>(Data)[I]);
376}
377
378void Context::freeze(AnyValue &Val, Type *Ty) {
379 if (Val.isPoison()) {
380 uint32_t Bits = DL.getTypeSizeInBits(Ty);
381 APInt RandomVal = APInt::getZero(numBits: Bits);
382 if (UndefBehavior == UndefValueBehavior::NonDeterministic) {
383 SmallVector<APInt::WordType> RandomWords;
384 uint32_t NumWords = APInt::getNumWords(BitWidth: Bits);
385 RandomWords.reserve(N: NumWords);
386 static_assert(decltype(Rng)::word_size >=
387 std::numeric_limits<APInt::WordType>::digits,
388 "Unexpected Rng result type.");
389 for (uint32_t I = 0; I != NumWords; ++I)
390 RandomWords.push_back(Elt: static_cast<APInt::WordType>(Rng()));
391 RandomVal = APInt(Bits, RandomWords);
392 }
393 if (Ty->isIntegerTy())
394 Val = AnyValue(RandomVal);
395 else if (Ty->isFloatingPointTy())
396 Val = AnyValue(APFloat(Ty->getFltSemantics(), RandomVal));
397 else if (Ty->isPointerTy())
398 Val = AnyValue(Pointer(RandomVal));
399 else
400 llvm_unreachable("Unsupported scalar type for poison value");
401 return;
402 }
403 if (Val.isAggregate()) {
404 auto &SubVals = Val.asAggregate();
405 if (auto *VecTy = dyn_cast<VectorType>(Val: Ty)) {
406 Type *ElemTy = VecTy->getElementType();
407 for (auto &SubVal : SubVals)
408 freeze(Val&: SubVal, Ty: ElemTy);
409 } else if (auto *ArrTy = dyn_cast<ArrayType>(Val: Ty)) {
410 Type *ElemTy = ArrTy->getElementType();
411 for (auto &SubVal : SubVals)
412 freeze(Val&: SubVal, Ty: ElemTy);
413 } else if (auto *StructTy = dyn_cast<StructType>(Val: Ty)) {
414 for (uint32_t I = 0, E = SubVals.size(); I != E; ++I)
415 freeze(Val&: SubVals[I], Ty: StructTy->getElementType(N: I));
416 } else {
417 llvm_unreachable("Invalid aggregate type");
418 }
419 }
420}
421
422MemoryObject::~MemoryObject() = default;
423MemoryObject::MemoryObject(uint64_t Addr, uint64_t Size, StringRef Name,
424 unsigned AS, MemInitKind InitKind)
425 : Address(Addr), Size(Size), Name(Name), AS(AS),
426 State(InitKind != MemInitKind::Poisoned ? MemoryObjectState::Alive
427 : MemoryObjectState::Dead) {
428 switch (InitKind) {
429 case MemInitKind::Zeroed:
430 Bytes.resize(N: Size, NV: Byte::concrete(Val: 0));
431 break;
432 case MemInitKind::Uninitialized:
433 Bytes.resize(N: Size, NV: Byte::undef());
434 break;
435 case MemInitKind::Poisoned:
436 Bytes.resize(N: Size, NV: Byte::poison());
437 break;
438 }
439}
440
441IntrusiveRefCntPtr<MemoryObject> Context::allocate(uint64_t Size,
442 uint64_t Align,
443 StringRef Name, unsigned AS,
444 MemInitKind InitKind) {
445 // Even if the memory object is zero-sized, it still occupies a byte to obtain
446 // a unique address.
447 uint64_t AllocateSize = std::max(a: Size, b: (uint64_t)1);
448 if (MaxMem != 0 && SaturatingAdd(X: UsedMem, Y: AllocateSize) >= MaxMem)
449 return nullptr;
450 uint64_t AlignedAddr = alignTo(Value: AllocationBase, Align);
451 auto MemObj =
452 makeIntrusiveRefCnt<MemoryObject>(A&: AlignedAddr, A&: Size, A&: Name, A&: AS, A&: InitKind);
453 MemoryObjects[AlignedAddr] = MemObj;
454 AllocationBase = AlignedAddr + AllocateSize;
455 UsedMem += AllocateSize;
456 return MemObj;
457}
458
459bool Context::free(uint64_t Address) {
460 auto It = MemoryObjects.find(x: Address);
461 if (It == MemoryObjects.end())
462 return false;
463 UsedMem -= std::max(a: It->second->getSize(), b: (uint64_t)1);
464 It->second->markAsFreed();
465 MemoryObjects.erase(position: It);
466 return true;
467}
468
469Pointer Context::deriveFromMemoryObject(IntrusiveRefCntPtr<MemoryObject> Obj) {
470 assert(Obj && "Cannot determine the address space of a null memory object");
471 return Pointer(Obj, APInt(DL.getPointerSizeInBits(AS: Obj->getAddressSpace()),
472 Obj->getAddress()));
473}
474
475Function *Context::getTargetFunction(const Pointer &Ptr) {
476 if (Ptr.address().getActiveBits() > 64)
477 return nullptr;
478 auto It = ValidFuncTargets.find(Val: Ptr.address().getZExtValue());
479 if (It == ValidFuncTargets.end())
480 return nullptr;
481 // TODO: check the provenance of pointer.
482 return It->second.first;
483}
484BasicBlock *Context::getTargetBlock(const Pointer &Ptr) {
485 if (Ptr.address().getActiveBits() > 64)
486 return nullptr;
487 auto It = ValidBlockTargets.find(Val: Ptr.address().getZExtValue());
488 if (It == ValidBlockTargets.end())
489 return nullptr;
490 // TODO: check the provenance of pointer.
491 return It->second.first;
492}
493
494uint64_t Context::getEffectiveTypeAllocSize(Type *Ty) {
495 // FIXME: It is incorrect for overaligned scalable vector types.
496 return getEffectiveTypeSize(Size: DL.getTypeAllocSize(Ty));
497}
498uint64_t Context::getEffectiveTypeStoreSize(Type *Ty) {
499 return getEffectiveTypeSize(Size: DL.getTypeStoreSize(Ty));
500}
501
502void MemoryObject::markAsFreed() {
503 State = MemoryObjectState::Freed;
504 Bytes.clear();
505}
506
507} // namespace llvm::ubi
508