1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Statistic.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/Analysis/BlockFrequencyInfo.h"
22#include "llvm/Analysis/DomTreeUpdater.h"
23#include "llvm/Analysis/GlobalsModRef.h"
24#include "llvm/Analysis/OptimizationRemarkEmitter.h"
25#include "llvm/Analysis/PostDominators.h"
26#include "llvm/Analysis/ProfileSummaryInfo.h"
27#include "llvm/Analysis/StackSafetyAnalysis.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/BinaryFormat/Dwarf.h"
31#include "llvm/BinaryFormat/ELF.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
42#include "llvm/IR/InstIterator.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
52#include "llvm/Support/Casting.h"
53#include "llvm/Support/CommandLine.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/MD5.h"
56#include "llvm/Support/RandomNumberGenerator.h"
57#include "llvm/Support/raw_ostream.h"
58#include "llvm/TargetParser/Triple.h"
59#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
60#include "llvm/Transforms/Utils/BasicBlockUtils.h"
61#include "llvm/Transforms/Utils/Instrumentation.h"
62#include "llvm/Transforms/Utils/Local.h"
63#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
64#include "llvm/Transforms/Utils/ModuleUtils.h"
65#include "llvm/Transforms/Utils/PromoteMemToReg.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
78const char kHwasanShadowMemoryDynamicAddress[] =
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
85
86static const unsigned kShadowBaseAlignment = 32;
87
88namespace {
89enum class OffsetKind {
90 kFixed = 0,
91 kGlobal,
92 kIfunc,
93 kTls,
94};
95}
96
97static cl::opt<std::string>
98 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
99 cl::desc("Prefix for memory access callbacks"),
100 cl::Hidden, cl::init(Val: "__hwasan_"));
101
102static cl::opt<bool> ClKasanMemIntrinCallbackPrefix(
103 "hwasan-kernel-mem-intrinsic-prefix",
104 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
105 cl::init(Val: false));
106
107static cl::opt<bool> ClInstrumentWithCalls(
108 "hwasan-instrument-with-calls",
109 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
110 cl::init(Val: false));
111
112static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
113 cl::desc("instrument read instructions"),
114 cl::Hidden, cl::init(Val: true));
115
116static cl::opt<bool>
117 ClInstrumentWrites("hwasan-instrument-writes",
118 cl::desc("instrument write instructions"), cl::Hidden,
119 cl::init(Val: true));
120
121static cl::opt<bool> ClInstrumentAtomics(
122 "hwasan-instrument-atomics",
123 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
124 cl::init(Val: true));
125
126static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
127 cl::desc("instrument byval arguments"),
128 cl::Hidden, cl::init(Val: true));
129
130static cl::opt<bool>
131 ClRecover("hwasan-recover",
132 cl::desc("Enable recovery mode (continue-after-error)."),
133 cl::Hidden, cl::init(Val: false));
134
135static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
136 cl::desc("instrument stack (allocas)"),
137 cl::Hidden, cl::init(Val: true));
138
139static cl::opt<bool>
140 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(Val: true),
141 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
142 cl::Optional);
143
144static cl::opt<size_t> ClMaxLifetimes(
145 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(Val: 3),
146 cl::ReallyHidden,
147 cl::desc("How many lifetime ends to handle for a single alloca."),
148 cl::Optional);
149
150static cl::opt<bool>
151 ClUseAfterScope("hwasan-use-after-scope",
152 cl::desc("detect use after scope within function"),
153 cl::Hidden, cl::init(Val: true));
154
155static cl::opt<bool> ClGenerateTagsWithCalls(
156 "hwasan-generate-tags-with-calls",
157 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
158 cl::init(Val: false));
159
160static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
161 cl::Hidden, cl::init(Val: false));
162
163static cl::opt<int> ClMatchAllTag(
164 "hwasan-match-all-tag",
165 cl::desc("don't report bad accesses via pointers with this tag"),
166 cl::Hidden, cl::init(Val: -1));
167
168static cl::opt<bool>
169 ClEnableKhwasan("hwasan-kernel",
170 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
171 cl::Hidden, cl::init(Val: false));
172
173// These flags allow to change the shadow mapping and control how shadow memory
174// is accessed. The shadow mapping looks like:
175// Shadow = (Mem >> scale) + offset
176
177static cl::opt<uint64_t>
178 ClMappingOffset("hwasan-mapping-offset",
179 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
180 cl::Hidden);
181
182static cl::opt<OffsetKind> ClMappingOffsetDynamic(
183 "hwasan-mapping-offset-dynamic",
184 cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden,
185 cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"),
186 clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"),
187 clEnumValN(OffsetKind::kTls, "tls", "Use TLS")));
188
189static cl::opt<bool>
190 ClFrameRecords("hwasan-with-frame-record",
191 cl::desc("Use ring buffer for stack allocations"),
192 cl::Hidden);
193
194static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
195 cl::desc("Hot percentile cutoff."));
196
197static cl::opt<float>
198 ClRandomKeepRate("hwasan-random-rate",
199 cl::desc("Probability value in the range [0.0, 1.0] "
200 "to keep instrumentation of a function. "
201 "Note: instrumentation can be skipped randomly "
202 "OR because of the hot percentile cutoff, if "
203 "both are supplied."));
204
205STATISTIC(NumTotalFuncs, "Number of total funcs");
206STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
207STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
208
209// Mode for selecting how to insert frame record info into the stack ring
210// buffer.
211enum RecordStackHistoryMode {
212 // Do not record frame record info.
213 none,
214
215 // Insert instructions into the prologue for storing into the stack ring
216 // buffer directly.
217 instr,
218
219 // Add a call to __hwasan_add_frame_record in the runtime.
220 libcall,
221};
222
223static cl::opt<RecordStackHistoryMode> ClRecordStackHistory(
224 "hwasan-record-stack-history",
225 cl::desc("Record stack frames with tagged allocations in a thread-local "
226 "ring buffer"),
227 cl::values(clEnumVal(none, "Do not record stack ring history"),
228 clEnumVal(instr, "Insert instructions into the prologue for "
229 "storing into the stack ring buffer directly"),
230 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
231 "storing into the stack ring buffer")),
232 cl::Hidden, cl::init(Val: instr));
233
234static cl::opt<bool>
235 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
236 cl::desc("instrument memory intrinsics"),
237 cl::Hidden, cl::init(Val: true));
238
239static cl::opt<bool>
240 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
241 cl::desc("instrument landing pads"), cl::Hidden,
242 cl::init(Val: false));
243
244static cl::opt<bool> ClUseShortGranules(
245 "hwasan-use-short-granules",
246 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
247 cl::init(Val: false));
248
249static cl::opt<bool> ClInstrumentPersonalityFunctions(
250 "hwasan-instrument-personality-functions",
251 cl::desc("instrument personality functions"), cl::Hidden);
252
253static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
254 cl::desc("inline all checks"),
255 cl::Hidden, cl::init(Val: false));
256
257static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
258 cl::desc("inline all checks"),
259 cl::Hidden, cl::init(Val: false));
260
261// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
262static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
263 cl::desc("Use page aliasing in HWASan"),
264 cl::Hidden, cl::init(Val: false));
265
266namespace {
267
268template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
269 return Opt.getNumOccurrences() ? Opt : Other;
270}
271
272bool shouldUsePageAliases(const Triple &TargetTriple) {
273 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
274}
275
276bool shouldInstrumentStack(const Triple &TargetTriple) {
277 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
278}
279
280bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
281 return optOr(Opt&: ClInstrumentWithCalls, Other: TargetTriple.getArch() == Triple::x86_64);
282}
283
284bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
285 return optOr(Opt&: ClUseStackSafety, Other: !DisableOptimization);
286}
287
288bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
289 bool DisableOptimization) {
290 return shouldInstrumentStack(TargetTriple) &&
291 mightUseStackSafetyAnalysis(DisableOptimization);
292}
293
294bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
295 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
296}
297
298/// An instrumentation pass implementing detection of addressability bugs
299/// using tagged pointers.
300class HWAddressSanitizer {
301public:
302 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
303 const StackSafetyGlobalInfo *SSI)
304 : M(M), SSI(SSI) {
305 this->Recover = optOr(Opt&: ClRecover, Other: Recover);
306 this->CompileKernel = optOr(Opt&: ClEnableKhwasan, Other: CompileKernel);
307 this->Rng = ClRandomKeepRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
308 : nullptr;
309
310 initializeModule();
311 }
312
313 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
314
315private:
316 struct ShadowTagCheckInfo {
317 Instruction *TagMismatchTerm = nullptr;
318 Value *PtrLong = nullptr;
319 Value *AddrLong = nullptr;
320 Value *PtrTag = nullptr;
321 Value *MemTag = nullptr;
322 };
323
324 bool selectiveInstrumentationShouldSkip(Function &F,
325 FunctionAnalysisManager &FAM) const;
326 void initializeModule();
327 void createHwasanCtorComdat();
328
329 void initializeCallbacks(Module &M);
330
331 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
332
333 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
334 Value *getShadowNonTls(IRBuilder<> &IRB);
335
336 void untagPointerOperand(Instruction *I, Value *Addr);
337 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
338
339 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
340 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
341 DomTreeUpdater &DTU, LoopInfo *LI);
342 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
343 unsigned AccessSizeIndex,
344 Instruction *InsertBefore,
345 DomTreeUpdater &DTU, LoopInfo *LI);
346 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
347 unsigned AccessSizeIndex,
348 Instruction *InsertBefore, DomTreeUpdater &DTU,
349 LoopInfo *LI);
350 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
351 void instrumentMemIntrinsic(MemIntrinsic *MI);
352 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
353 LoopInfo *LI, const DataLayout &DL);
354 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
355 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
356 Value *Ptr);
357
358 void getInterestingMemoryOperands(
359 OptimizationRemarkEmitter &ORE, Instruction *I,
360 const TargetLibraryInfo &TLI,
361 SmallVectorImpl<InterestingMemoryOperand> &Interesting);
362
363 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
364 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
365 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
366 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
367 const DominatorTree &DT, const PostDominatorTree &PDT,
368 const LoopInfo &LI);
369 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
370 Value *getNextTagWithCall(IRBuilder<> &IRB);
371 Value *getStackBaseTag(IRBuilder<> &IRB);
372 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
373 Value *getUARTag(IRBuilder<> &IRB);
374
375 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
376 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
377 unsigned retagMask(unsigned AllocaNo);
378
379 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
380
381 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
382 void instrumentGlobals();
383
384 Value *getCachedFP(IRBuilder<> &IRB);
385 Value *getFrameRecordInfo(IRBuilder<> &IRB);
386
387 void instrumentPersonalityFunctions();
388
389 LLVMContext *C;
390 Module &M;
391 const StackSafetyGlobalInfo *SSI;
392 Triple TargetTriple;
393 std::unique_ptr<RandomNumberGenerator> Rng;
394
395 /// This struct defines the shadow mapping using the rule:
396 /// If `kFixed`, then
397 /// shadow = (mem >> Scale) + Offset.
398 /// If `kGlobal`, then
399 /// extern char* __hwasan_shadow_memory_dynamic_address;
400 /// shadow = (mem >> Scale) + __hwasan_shadow_memory_dynamic_address
401 /// If `kIfunc`, then
402 /// extern char __hwasan_shadow[];
403 /// shadow = (mem >> Scale) + &__hwasan_shadow
404 /// If `kTls`, then
405 /// extern char *__hwasan_tls;
406 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
407 ///
408 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
409 /// ring buffer for storing stack allocations on targets that support it.
410 class ShadowMapping {
411 OffsetKind Kind;
412 uint64_t Offset;
413 uint8_t Scale;
414 bool WithFrameRecord;
415
416 void SetFixed(uint64_t O) {
417 Kind = OffsetKind::kFixed;
418 Offset = O;
419 }
420
421 public:
422 void init(Triple &TargetTriple, bool InstrumentWithCalls,
423 bool CompileKernel);
424 Align getObjectAlignment() const { return Align(1ULL << Scale); }
425 bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
426 bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
427 bool isInTls() const { return Kind == OffsetKind::kTls; }
428 bool isFixed() const { return Kind == OffsetKind::kFixed; }
429 uint8_t scale() const { return Scale; };
430 uint64_t offset() const {
431 assert(isFixed());
432 return Offset;
433 };
434 bool withFrameRecord() const { return WithFrameRecord; };
435 };
436
437 ShadowMapping Mapping;
438
439 Type *VoidTy = Type::getVoidTy(C&: M.getContext());
440 Type *IntptrTy = M.getDataLayout().getIntPtrType(C&: M.getContext());
441 PointerType *PtrTy = PointerType::getUnqual(C&: M.getContext());
442 Type *Int8Ty = Type::getInt8Ty(C&: M.getContext());
443 Type *Int32Ty = Type::getInt32Ty(C&: M.getContext());
444 Type *Int64Ty = Type::getInt64Ty(C&: M.getContext());
445
446 bool CompileKernel;
447 bool Recover;
448 bool OutlinedChecks;
449 bool InlineFastPath;
450 bool UseShortGranules;
451 bool InstrumentLandingPads;
452 bool InstrumentWithCalls;
453 bool InstrumentStack;
454 bool InstrumentGlobals;
455 bool DetectUseAfterScope;
456 bool UsePageAliases;
457 bool UseMatchAllCallback;
458
459 std::optional<uint8_t> MatchAllTag;
460
461 unsigned PointerTagShift;
462 uint64_t TagMaskByte;
463
464 Function *HwasanCtorFunction;
465
466 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
467 FunctionCallee HwasanMemoryAccessCallbackSized[2];
468
469 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
470 FunctionCallee HwasanHandleVfork;
471
472 FunctionCallee HwasanTagMemoryFunc;
473 FunctionCallee HwasanGenerateTagFunc;
474 FunctionCallee HwasanRecordFrameRecordFunc;
475
476 Constant *ShadowGlobal;
477
478 Value *ShadowBase = nullptr;
479 Value *StackBaseTag = nullptr;
480 Value *CachedFP = nullptr;
481 GlobalValue *ThreadPtrGlobal = nullptr;
482};
483
484} // end anonymous namespace
485
486PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
487 ModuleAnalysisManager &MAM) {
488 // Return early if nosanitize_hwaddress module flag is present for the module.
489 if (checkIfAlreadyInstrumented(M, Flag: "nosanitize_hwaddress"))
490 return PreservedAnalyses::all();
491 const StackSafetyGlobalInfo *SSI = nullptr;
492 const Triple &TargetTriple = M.getTargetTriple();
493 if (shouldUseStackSafetyAnalysis(TargetTriple, DisableOptimization: Options.DisableOptimization))
494 SSI = &MAM.getResult<StackSafetyGlobalAnalysis>(IR&: M);
495
496 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
497 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
498 for (Function &F : M)
499 HWASan.sanitizeFunction(F, FAM);
500
501 PreservedAnalyses PA = PreservedAnalyses::none();
502 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
503 // are incrementally updated throughout this pass whenever
504 // SplitBlockAndInsertIfThen is called.
505 PA.preserve<DominatorTreeAnalysis>();
506 PA.preserve<PostDominatorTreeAnalysis>();
507 PA.preserve<LoopAnalysis>();
508 // GlobalsAA is considered stateless and does not get invalidated unless
509 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
510 // make changes that require GlobalsAA to be invalidated.
511 PA.abandon<GlobalsAA>();
512 return PA;
513}
514void HWAddressSanitizerPass::printPipeline(
515 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
516 static_cast<PassInfoMixin<HWAddressSanitizerPass> *>(this)->printPipeline(
517 OS, MapClassName2PassName);
518 OS << '<';
519 if (Options.CompileKernel)
520 OS << "kernel;";
521 if (Options.Recover)
522 OS << "recover";
523 OS << '>';
524}
525
526void HWAddressSanitizer::createHwasanCtorComdat() {
527 std::tie(args&: HwasanCtorFunction, args: std::ignore) =
528 getOrCreateSanitizerCtorAndInitFunctions(
529 M, CtorName: kHwasanModuleCtorName, InitName: kHwasanInitName,
530 /*InitArgTypes=*/{},
531 /*InitArgs=*/{},
532 // This callback is invoked when the functions are created the first
533 // time. Hook them into the global ctors list in that case:
534 FunctionsCreatedCallback: [&](Function *Ctor, FunctionCallee) {
535 Comdat *CtorComdat = M.getOrInsertComdat(Name: kHwasanModuleCtorName);
536 Ctor->setComdat(CtorComdat);
537 appendToGlobalCtors(M, F: Ctor, Priority: 0, Data: Ctor);
538 });
539
540 // Create a note that contains pointers to the list of global
541 // descriptors. Adding a note to the output file will cause the linker to
542 // create a PT_NOTE program header pointing to the note that we can use to
543 // find the descriptor list starting from the program headers. A function
544 // provided by the runtime initializes the shadow memory for the globals by
545 // accessing the descriptor list via the note. The dynamic loader needs to
546 // call this function whenever a library is loaded.
547 //
548 // The reason why we use a note for this instead of a more conventional
549 // approach of having a global constructor pass a descriptor list pointer to
550 // the runtime is because of an order of initialization problem. With
551 // constructors we can encounter the following problematic scenario:
552 //
553 // 1) library A depends on library B and also interposes one of B's symbols
554 // 2) B's constructors are called before A's (as required for correctness)
555 // 3) during construction, B accesses one of its "own" globals (actually
556 // interposed by A) and triggers a HWASAN failure due to the initialization
557 // for A not having happened yet
558 //
559 // Even without interposition it is possible to run into similar situations in
560 // cases where two libraries mutually depend on each other.
561 //
562 // We only need one note per binary, so put everything for the note in a
563 // comdat. This needs to be a comdat with an .init_array section to prevent
564 // newer versions of lld from discarding the note.
565 //
566 // Create the note even if we aren't instrumenting globals. This ensures that
567 // binaries linked from object files with both instrumented and
568 // non-instrumented globals will end up with a note, even if a comdat from an
569 // object file with non-instrumented globals is selected. The note is harmless
570 // if the runtime doesn't support it, since it will just be ignored.
571 Comdat *NoteComdat = M.getOrInsertComdat(Name: kHwasanModuleCtorName);
572
573 Type *Int8Arr0Ty = ArrayType::get(ElementType: Int8Ty, NumElements: 0);
574 auto *Start =
575 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
576 nullptr, "__start_hwasan_globals");
577 Start->setVisibility(GlobalValue::HiddenVisibility);
578 auto *Stop =
579 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
580 nullptr, "__stop_hwasan_globals");
581 Stop->setVisibility(GlobalValue::HiddenVisibility);
582
583 // Null-terminated so actually 8 bytes, which are required in order to align
584 // the note properly.
585 auto *Name = ConstantDataArray::get(Context&: *C, Elts: "LLVM\0\0\0");
586
587 auto *NoteTy = StructType::get(elt1: Int32Ty, elts: Int32Ty, elts: Int32Ty, elts: Name->getType(),
588 elts: Int32Ty, elts: Int32Ty);
589 auto *Note =
590 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
591 GlobalValue::PrivateLinkage, nullptr, kHwasanNoteName);
592 Note->setSection(".note.hwasan.globals");
593 Note->setComdat(NoteComdat);
594 Note->setAlignment(Align(4));
595
596 // The pointers in the note need to be relative so that the note ends up being
597 // placed in rodata, which is the standard location for notes.
598 auto CreateRelPtr = [&](Constant *Ptr) {
599 return ConstantExpr::getTrunc(
600 C: ConstantExpr::getSub(C1: ConstantExpr::getPtrToInt(C: Ptr, Ty: Int64Ty),
601 C2: ConstantExpr::getPtrToInt(C: Note, Ty: Int64Ty)),
602 Ty: Int32Ty);
603 };
604 Note->setInitializer(ConstantStruct::getAnon(
605 V: {ConstantInt::get(Ty: Int32Ty, V: 8), // n_namesz
606 ConstantInt::get(Ty: Int32Ty, V: 8), // n_descsz
607 ConstantInt::get(Ty: Int32Ty, V: ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
608 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
609 appendToCompilerUsed(M, Values: Note);
610
611 // Create a zero-length global in hwasan_globals so that the linker will
612 // always create start and stop symbols.
613 auto *Dummy = new GlobalVariable(
614 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
615 Constant::getNullValue(Ty: Int8Arr0Ty), "hwasan.dummy.global");
616 Dummy->setSection("hwasan_globals");
617 Dummy->setComdat(NoteComdat);
618 Dummy->setMetadata(KindID: LLVMContext::MD_associated,
619 Node: MDNode::get(Context&: *C, MDs: ValueAsMetadata::get(V: Note)));
620 appendToCompilerUsed(M, Values: Dummy);
621}
622
623/// Module-level initialization.
624///
625/// inserts a call to __hwasan_init to the module's constructor list.
626void HWAddressSanitizer::initializeModule() {
627 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
628 TargetTriple = M.getTargetTriple();
629
630 // HWASan may do short granule checks on function arguments read from the
631 // argument memory (last byte of the granule), which invalidates writeonly.
632 for (Function &F : M.functions())
633 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/true);
634
635 // x86_64 currently has two modes:
636 // - Intel LAM (default)
637 // - pointer aliasing (heap only)
638 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
639 UsePageAliases = shouldUsePageAliases(TargetTriple);
640 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
641 InstrumentStack = shouldInstrumentStack(TargetTriple);
642 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
643 PointerTagShift = IsX86_64 ? 57 : 56;
644 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
645
646 Mapping.init(TargetTriple, InstrumentWithCalls, CompileKernel);
647
648 C = &(M.getContext());
649 IRBuilder<> IRB(*C);
650
651 HwasanCtorFunction = nullptr;
652
653 // Older versions of Android do not have the required runtime support for
654 // short granules, global or personality function instrumentation. On other
655 // platforms we currently require using the latest version of the runtime.
656 bool NewRuntime =
657 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(Major: 30);
658
659 UseShortGranules = optOr(Opt&: ClUseShortGranules, Other: NewRuntime);
660 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
661 TargetTriple.isOSBinFormatELF() &&
662 !optOr(Opt&: ClInlineAllChecks, Other: Recover);
663
664 // These platforms may prefer less inlining to reduce binary size.
665 InlineFastPath = optOr(Opt&: ClInlineFastPathChecks, Other: !(TargetTriple.isAndroid() ||
666 TargetTriple.isOSFuchsia()));
667
668 if (ClMatchAllTag.getNumOccurrences()) {
669 if (ClMatchAllTag != -1) {
670 MatchAllTag = ClMatchAllTag & 0xFF;
671 }
672 } else if (CompileKernel) {
673 MatchAllTag = 0xFF;
674 }
675 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
676
677 // If we don't have personality function support, fall back to landing pads.
678 InstrumentLandingPads = optOr(Opt&: ClInstrumentLandingPads, Other: !NewRuntime);
679
680 InstrumentGlobals =
681 !CompileKernel && !UsePageAliases && optOr(Opt&: ClGlobals, Other: NewRuntime);
682
683 if (!CompileKernel) {
684 createHwasanCtorComdat();
685
686 if (InstrumentGlobals)
687 instrumentGlobals();
688
689 bool InstrumentPersonalityFunctions =
690 optOr(Opt&: ClInstrumentPersonalityFunctions, Other: NewRuntime);
691 if (InstrumentPersonalityFunctions)
692 instrumentPersonalityFunctions();
693 }
694
695 if (!TargetTriple.isAndroid()) {
696 ThreadPtrGlobal = M.getOrInsertGlobal(Name: "__hwasan_tls", Ty: IntptrTy, CreateGlobalCallback: [&] {
697 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
698 GlobalValue::ExternalLinkage, nullptr,
699 "__hwasan_tls", nullptr,
700 GlobalVariable::InitialExecTLSModel);
701 appendToCompilerUsed(M, Values: GV);
702 return GV;
703 });
704 }
705}
706
707void HWAddressSanitizer::initializeCallbacks(Module &M) {
708 IRBuilder<> IRB(*C);
709 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
710 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
711 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
712 *HwasanMemsetFnTy;
713 if (UseMatchAllCallback) {
714 HwasanMemoryAccessCallbackSizedFnTy =
715 FunctionType::get(Result: VoidTy, Params: {IntptrTy, IntptrTy, Int8Ty}, isVarArg: false);
716 HwasanMemoryAccessCallbackFnTy =
717 FunctionType::get(Result: VoidTy, Params: {IntptrTy, Int8Ty}, isVarArg: false);
718 HwasanMemTransferFnTy =
719 FunctionType::get(Result: PtrTy, Params: {PtrTy, PtrTy, IntptrTy, Int8Ty}, isVarArg: false);
720 HwasanMemsetFnTy =
721 FunctionType::get(Result: PtrTy, Params: {PtrTy, Int32Ty, IntptrTy, Int8Ty}, isVarArg: false);
722 } else {
723 HwasanMemoryAccessCallbackSizedFnTy =
724 FunctionType::get(Result: VoidTy, Params: {IntptrTy, IntptrTy}, isVarArg: false);
725 HwasanMemoryAccessCallbackFnTy =
726 FunctionType::get(Result: VoidTy, Params: {IntptrTy}, isVarArg: false);
727 HwasanMemTransferFnTy =
728 FunctionType::get(Result: PtrTy, Params: {PtrTy, PtrTy, IntptrTy}, isVarArg: false);
729 HwasanMemsetFnTy =
730 FunctionType::get(Result: PtrTy, Params: {PtrTy, Int32Ty, IntptrTy}, isVarArg: false);
731 }
732
733 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
734 const std::string TypeStr = AccessIsWrite ? "store" : "load";
735 const std::string EndingStr = Recover ? "_noabort" : "";
736
737 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
738 Name: ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
739 T: HwasanMemoryAccessCallbackSizedFnTy);
740
741 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
742 AccessSizeIndex++) {
743 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
744 M.getOrInsertFunction(Name: ClMemoryAccessCallbackPrefix + TypeStr +
745 itostr(X: 1ULL << AccessSizeIndex) +
746 MatchAllStr + EndingStr,
747 T: HwasanMemoryAccessCallbackFnTy);
748 }
749 }
750
751 const std::string MemIntrinCallbackPrefix =
752 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
753 ? std::string("")
754 : ClMemoryAccessCallbackPrefix;
755
756 HwasanMemmove = M.getOrInsertFunction(
757 Name: MemIntrinCallbackPrefix + "memmove" + MatchAllStr, T: HwasanMemTransferFnTy);
758 HwasanMemcpy = M.getOrInsertFunction(
759 Name: MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, T: HwasanMemTransferFnTy);
760 HwasanMemset = M.getOrInsertFunction(
761 Name: MemIntrinCallbackPrefix + "memset" + MatchAllStr, T: HwasanMemsetFnTy);
762
763 HwasanTagMemoryFunc = M.getOrInsertFunction(Name: "__hwasan_tag_memory", RetTy: VoidTy,
764 Args: PtrTy, Args: Int8Ty, Args: IntptrTy);
765 HwasanGenerateTagFunc =
766 M.getOrInsertFunction(Name: "__hwasan_generate_tag", RetTy: Int8Ty);
767
768 HwasanRecordFrameRecordFunc =
769 M.getOrInsertFunction(Name: "__hwasan_add_frame_record", RetTy: VoidTy, Args: Int64Ty);
770
771 ShadowGlobal =
772 M.getOrInsertGlobal(Name: "__hwasan_shadow", Ty: ArrayType::get(ElementType: Int8Ty, NumElements: 0));
773
774 HwasanHandleVfork =
775 M.getOrInsertFunction(Name: "__hwasan_handle_vfork", RetTy: VoidTy, Args: IntptrTy);
776}
777
778Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
779 // An empty inline asm with input reg == output reg.
780 // An opaque no-op cast, basically.
781 // This prevents code bloat as a result of rematerializing trivial definitions
782 // such as constants or global addresses at every load and store.
783 InlineAsm *Asm =
784 InlineAsm::get(Ty: FunctionType::get(Result: PtrTy, Params: {Val->getType()}, isVarArg: false),
785 AsmString: StringRef(""), Constraints: StringRef("=r,0"),
786 /*hasSideEffects=*/false);
787 return IRB.CreateCall(Callee: Asm, Args: {Val}, Name: ".hwasan.shadow");
788}
789
790Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
791 return getOpaqueNoopCast(IRB, Val: ShadowGlobal);
792}
793
794Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
795 if (Mapping.isFixed()) {
796 return getOpaqueNoopCast(
797 IRB, Val: ConstantExpr::getIntToPtr(
798 C: ConstantInt::get(Ty: IntptrTy, V: Mapping.offset()), Ty: PtrTy));
799 }
800
801 if (Mapping.isInIfunc())
802 return getDynamicShadowIfunc(IRB);
803
804 Value *GlobalDynamicAddress =
805 IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
806 Name: kHwasanShadowMemoryDynamicAddress, Ty: PtrTy);
807 return IRB.CreateLoad(Ty: PtrTy, Ptr: GlobalDynamicAddress);
808}
809
810bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
811 Value *Ptr) {
812 // Do not instrument accesses from different address spaces; we cannot deal
813 // with them.
814 Type *PtrTy = cast<PointerType>(Val: Ptr->getType()->getScalarType());
815 if (PtrTy->getPointerAddressSpace() != 0)
816 return true;
817
818 // Ignore swifterror addresses.
819 // swifterror memory addresses are mem2reg promoted by instruction
820 // selection. As such they cannot have regular uses like an instrumentation
821 // function and it makes no sense to track them as memory.
822 if (Ptr->isSwiftError())
823 return true;
824
825 if (findAllocaForValue(V: Ptr)) {
826 if (!InstrumentStack)
827 return true;
828 if (SSI && SSI->stackAccessIsSafe(I: *Inst))
829 return true;
830 }
831
832 if (isa<GlobalVariable>(Val: getUnderlyingObject(V: Ptr))) {
833 if (!InstrumentGlobals)
834 return true;
835 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
836 }
837
838 return false;
839}
840
841bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
842 Instruction *Inst, Value *Ptr) {
843 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
844 if (Ignored) {
845 ORE.emit(
846 RemarkBuilder: [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
847 } else {
848 ORE.emit(RemarkBuilder: [&]() {
849 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
850 });
851 }
852 return Ignored;
853}
854
855void HWAddressSanitizer::getInterestingMemoryOperands(
856 OptimizationRemarkEmitter &ORE, Instruction *I,
857 const TargetLibraryInfo &TLI,
858 SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
859 // Skip memory accesses inserted by another instrumentation.
860 if (I->hasMetadata(KindID: LLVMContext::MD_nosanitize))
861 return;
862
863 // Do not instrument the load fetching the dynamic shadow address.
864 if (ShadowBase == I)
865 return;
866
867 if (LoadInst *LI = dyn_cast<LoadInst>(Val: I)) {
868 if (!ClInstrumentReads || ignoreAccess(ORE, Inst: I, Ptr: LI->getPointerOperand()))
869 return;
870 Interesting.emplace_back(Args&: I, Args: LI->getPointerOperandIndex(), Args: false,
871 Args: LI->getType(), Args: LI->getAlign());
872 } else if (StoreInst *SI = dyn_cast<StoreInst>(Val: I)) {
873 if (!ClInstrumentWrites || ignoreAccess(ORE, Inst: I, Ptr: SI->getPointerOperand()))
874 return;
875 Interesting.emplace_back(Args&: I, Args: SI->getPointerOperandIndex(), Args: true,
876 Args: SI->getValueOperand()->getType(), Args: SI->getAlign());
877 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Val: I)) {
878 if (!ClInstrumentAtomics || ignoreAccess(ORE, Inst: I, Ptr: RMW->getPointerOperand()))
879 return;
880 Interesting.emplace_back(Args&: I, Args: RMW->getPointerOperandIndex(), Args: true,
881 Args: RMW->getValOperand()->getType(), Args: std::nullopt);
882 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(Val: I)) {
883 if (!ClInstrumentAtomics || ignoreAccess(ORE, Inst: I, Ptr: XCHG->getPointerOperand()))
884 return;
885 Interesting.emplace_back(Args&: I, Args: XCHG->getPointerOperandIndex(), Args: true,
886 Args: XCHG->getCompareOperand()->getType(),
887 Args: std::nullopt);
888 } else if (auto *CI = dyn_cast<CallInst>(Val: I)) {
889 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
890 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
891 ignoreAccess(ORE, Inst: I, Ptr: CI->getArgOperand(i: ArgNo)))
892 continue;
893 Type *Ty = CI->getParamByValType(ArgNo);
894 Interesting.emplace_back(Args&: I, Args&: ArgNo, Args: false, Args&: Ty, Args: Align(1));
895 }
896 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI: &TLI);
897 }
898}
899
900static unsigned getPointerOperandIndex(Instruction *I) {
901 if (LoadInst *LI = dyn_cast<LoadInst>(Val: I))
902 return LI->getPointerOperandIndex();
903 if (StoreInst *SI = dyn_cast<StoreInst>(Val: I))
904 return SI->getPointerOperandIndex();
905 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Val: I))
906 return RMW->getPointerOperandIndex();
907 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(Val: I))
908 return XCHG->getPointerOperandIndex();
909 report_fatal_error(reason: "Unexpected instruction");
910 return -1;
911}
912
913static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
914 size_t Res = llvm::countr_zero(Val: TypeSize / 8);
915 assert(Res < kNumberOfAccessSizes);
916 return Res;
917}
918
919void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
920 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
921 TargetTriple.isRISCV64())
922 return;
923
924 IRBuilder<> IRB(I);
925 Value *AddrLong = IRB.CreatePointerCast(V: Addr, DestTy: IntptrTy);
926 Value *UntaggedPtr =
927 IRB.CreateIntToPtr(V: untagPointer(IRB, PtrLong: AddrLong), DestTy: Addr->getType());
928 I->setOperand(i: getPointerOperandIndex(I), Val: UntaggedPtr);
929}
930
931Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
932 // Mem >> Scale
933 Value *Shadow = IRB.CreateLShr(LHS: Mem, RHS: Mapping.scale());
934 if (Mapping.isFixed() && Mapping.offset() == 0)
935 return IRB.CreateIntToPtr(V: Shadow, DestTy: PtrTy);
936 // (Mem >> Scale) + Offset
937 return IRB.CreatePtrAdd(Ptr: ShadowBase, Offset: Shadow);
938}
939
940int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
941 unsigned AccessSizeIndex) {
942 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
943 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
944 (MatchAllTag.value_or(u: 0) << HWASanAccessInfo::MatchAllShift) |
945 (Recover << HWASanAccessInfo::RecoverShift) |
946 (IsWrite << HWASanAccessInfo::IsWriteShift) |
947 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
948}
949
950HWAddressSanitizer::ShadowTagCheckInfo
951HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
952 DomTreeUpdater &DTU, LoopInfo *LI) {
953 ShadowTagCheckInfo R;
954
955 IRBuilder<> IRB(InsertBefore);
956
957 R.PtrLong = IRB.CreatePointerCast(V: Ptr, DestTy: IntptrTy);
958 R.PtrTag =
959 IRB.CreateTrunc(V: IRB.CreateLShr(LHS: R.PtrLong, RHS: PointerTagShift), DestTy: Int8Ty);
960 R.AddrLong = untagPointer(IRB, PtrLong: R.PtrLong);
961 Value *Shadow = memToShadow(Mem: R.AddrLong, IRB);
962 R.MemTag = IRB.CreateLoad(Ty: Int8Ty, Ptr: Shadow);
963 Value *TagMismatch = IRB.CreateICmpNE(LHS: R.PtrTag, RHS: R.MemTag);
964
965 if (MatchAllTag.has_value()) {
966 Value *TagNotIgnored = IRB.CreateICmpNE(
967 LHS: R.PtrTag, RHS: ConstantInt::get(Ty: R.PtrTag->getType(), V: *MatchAllTag));
968 TagMismatch = IRB.CreateAnd(LHS: TagMismatch, RHS: TagNotIgnored);
969 }
970
971 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
972 Cond: TagMismatch, SplitBefore: InsertBefore, Unreachable: false,
973 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU, LI);
974
975 return R;
976}
977
978void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
979 unsigned AccessSizeIndex,
980 Instruction *InsertBefore,
981 DomTreeUpdater &DTU,
982 LoopInfo *LI) {
983 assert(!UsePageAliases);
984 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
985
986 if (InlineFastPath)
987 InsertBefore =
988 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
989
990 IRBuilder<> IRB(InsertBefore);
991 bool UseFixedShadowIntrinsic = false;
992 // The memaccess fixed shadow intrinsic is only supported on AArch64,
993 // which allows a 16-bit immediate to be left-shifted by 32.
994 // Since kShadowBaseAlignment == 32, and Linux by default will not
995 // mmap above 48-bits, practically any valid shadow offset is
996 // representable.
997 // In particular, an offset of 4TB (1024 << 32) is representable, and
998 // ought to be good enough for anybody.
999 if (TargetTriple.isAArch64() && Mapping.isFixed()) {
1000 uint16_t OffsetShifted = Mapping.offset() >> 32;
1001 UseFixedShadowIntrinsic =
1002 static_cast<uint64_t>(OffsetShifted) << 32 == Mapping.offset();
1003 }
1004
1005 if (UseFixedShadowIntrinsic) {
1006 IRB.CreateIntrinsic(
1007 ID: UseShortGranules
1008 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
1009 : Intrinsic::hwasan_check_memaccess_fixedshadow,
1010 Args: {Ptr, ConstantInt::get(Ty: Int32Ty, V: AccessInfo),
1011 ConstantInt::get(Ty: Int64Ty, V: Mapping.offset())});
1012 } else {
1013 IRB.CreateIntrinsic(
1014 ID: UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules
1015 : Intrinsic::hwasan_check_memaccess,
1016 Args: {ShadowBase, Ptr, ConstantInt::get(Ty: Int32Ty, V: AccessInfo)});
1017 }
1018}
1019
1020void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
1021 unsigned AccessSizeIndex,
1022 Instruction *InsertBefore,
1023 DomTreeUpdater &DTU,
1024 LoopInfo *LI) {
1025 assert(!UsePageAliases);
1026 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1027
1028 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
1029
1030 IRBuilder<> IRB(TCI.TagMismatchTerm);
1031 Value *OutOfShortGranuleTagRange =
1032 IRB.CreateICmpUGT(LHS: TCI.MemTag, RHS: ConstantInt::get(Ty: Int8Ty, V: 15));
1033 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1034 Cond: OutOfShortGranuleTagRange, SplitBefore: TCI.TagMismatchTerm, Unreachable: !Recover,
1035 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU, LI);
1036
1037 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1038 Value *PtrLowBits = IRB.CreateTrunc(V: IRB.CreateAnd(LHS: TCI.PtrLong, RHS: 15), DestTy: Int8Ty);
1039 PtrLowBits = IRB.CreateAdd(
1040 LHS: PtrLowBits, RHS: ConstantInt::get(Ty: Int8Ty, V: (1 << AccessSizeIndex) - 1));
1041 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(LHS: PtrLowBits, RHS: TCI.MemTag);
1042 SplitBlockAndInsertIfThen(Cond: PtrLowBitsOOB, SplitBefore: TCI.TagMismatchTerm, Unreachable: false,
1043 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU,
1044 LI, ThenBlock: CheckFailTerm->getParent());
1045
1046 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1047 Value *InlineTagAddr = IRB.CreateOr(LHS: TCI.AddrLong, RHS: 15);
1048 InlineTagAddr = IRB.CreateIntToPtr(V: InlineTagAddr, DestTy: PtrTy);
1049 Value *InlineTag = IRB.CreateLoad(Ty: Int8Ty, Ptr: InlineTagAddr);
1050 Value *InlineTagMismatch = IRB.CreateICmpNE(LHS: TCI.PtrTag, RHS: InlineTag);
1051 SplitBlockAndInsertIfThen(Cond: InlineTagMismatch, SplitBefore: TCI.TagMismatchTerm, Unreachable: false,
1052 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU,
1053 LI, ThenBlock: CheckFailTerm->getParent());
1054
1055 IRB.SetInsertPoint(CheckFailTerm);
1056 InlineAsm *Asm;
1057 switch (TargetTriple.getArch()) {
1058 case Triple::x86_64:
1059 // The signal handler will find the data address in rdi.
1060 Asm = InlineAsm::get(
1061 Ty: FunctionType::get(Result: VoidTy, Params: {TCI.PtrLong->getType()}, isVarArg: false),
1062 AsmString: "int3\nnopl " +
1063 itostr(X: 0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1064 "(%rax)",
1065 Constraints: "{rdi}",
1066 /*hasSideEffects=*/true);
1067 break;
1068 case Triple::aarch64:
1069 case Triple::aarch64_be:
1070 // The signal handler will find the data address in x0.
1071 Asm = InlineAsm::get(
1072 Ty: FunctionType::get(Result: VoidTy, Params: {TCI.PtrLong->getType()}, isVarArg: false),
1073 AsmString: "brk #" + itostr(X: 0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1074 Constraints: "{x0}",
1075 /*hasSideEffects=*/true);
1076 break;
1077 case Triple::riscv64:
1078 // The signal handler will find the data address in x10.
1079 Asm = InlineAsm::get(
1080 Ty: FunctionType::get(Result: VoidTy, Params: {TCI.PtrLong->getType()}, isVarArg: false),
1081 AsmString: "ebreak\naddiw x0, x11, " +
1082 itostr(X: 0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1083 Constraints: "{x10}",
1084 /*hasSideEffects=*/true);
1085 break;
1086 default:
1087 report_fatal_error(reason: "unsupported architecture");
1088 }
1089 IRB.CreateCall(Callee: Asm, Args: TCI.PtrLong);
1090 if (Recover)
1091 cast<BranchInst>(Val: CheckFailTerm)
1092 ->setSuccessor(idx: 0, NewSucc: TCI.TagMismatchTerm->getParent());
1093}
1094
1095bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1096 MemIntrinsic *MI) {
1097 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Val: MI)) {
1098 return (!ClInstrumentWrites || ignoreAccess(ORE, Inst: MTI, Ptr: MTI->getDest())) &&
1099 (!ClInstrumentReads || ignoreAccess(ORE, Inst: MTI, Ptr: MTI->getSource()));
1100 }
1101 if (isa<MemSetInst>(Val: MI))
1102 return !ClInstrumentWrites || ignoreAccess(ORE, Inst: MI, Ptr: MI->getDest());
1103 return false;
1104}
1105
1106void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1107 IRBuilder<> IRB(MI);
1108 if (isa<MemTransferInst>(Val: MI)) {
1109 SmallVector<Value *, 4> Args{
1110 MI->getOperand(i_nocapture: 0), MI->getOperand(i_nocapture: 1),
1111 IRB.CreateIntCast(V: MI->getOperand(i_nocapture: 2), DestTy: IntptrTy, isSigned: false)};
1112
1113 if (UseMatchAllCallback)
1114 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1115 IRB.CreateCall(Callee: isa<MemMoveInst>(Val: MI) ? HwasanMemmove : HwasanMemcpy, Args);
1116 } else if (isa<MemSetInst>(Val: MI)) {
1117 SmallVector<Value *, 4> Args{
1118 MI->getOperand(i_nocapture: 0),
1119 IRB.CreateIntCast(V: MI->getOperand(i_nocapture: 1), DestTy: IRB.getInt32Ty(), isSigned: false),
1120 IRB.CreateIntCast(V: MI->getOperand(i_nocapture: 2), DestTy: IntptrTy, isSigned: false)};
1121 if (UseMatchAllCallback)
1122 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1123 IRB.CreateCall(Callee: HwasanMemset, Args);
1124 }
1125 MI->eraseFromParent();
1126}
1127
1128bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1129 DomTreeUpdater &DTU, LoopInfo *LI,
1130 const DataLayout &DL) {
1131 Value *Addr = O.getPtr();
1132
1133 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1134
1135 // If the pointer is statically known to be zero, the tag check will pass
1136 // since:
1137 // 1) it has a zero tag
1138 // 2) the shadow memory corresponding to address 0 is initialized to zero and
1139 // never updated.
1140 // We can therefore elide the tag check.
1141 llvm::KnownBits Known(DL.getPointerTypeSizeInBits(Addr->getType()));
1142 llvm::computeKnownBits(V: Addr, Known, DL);
1143 if (Known.isZero())
1144 return false;
1145
1146 if (O.MaybeMask)
1147 return false; // FIXME
1148
1149 IRBuilder<> IRB(O.getInsn());
1150 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(Value: O.TypeStoreSize) &&
1151 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1152 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1153 *O.Alignment >= O.TypeStoreSize / 8)) {
1154 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize: O.TypeStoreSize);
1155 if (InstrumentWithCalls) {
1156 SmallVector<Value *, 2> Args{IRB.CreatePointerCast(V: Addr, DestTy: IntptrTy)};
1157 if (UseMatchAllCallback)
1158 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1159 IRB.CreateCall(Callee: HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1160 Args);
1161 } else if (OutlinedChecks) {
1162 instrumentMemAccessOutline(Ptr: Addr, IsWrite: O.IsWrite, AccessSizeIndex, InsertBefore: O.getInsn(),
1163 DTU, LI);
1164 } else {
1165 instrumentMemAccessInline(Ptr: Addr, IsWrite: O.IsWrite, AccessSizeIndex, InsertBefore: O.getInsn(),
1166 DTU, LI);
1167 }
1168 } else {
1169 SmallVector<Value *, 3> Args{
1170 IRB.CreatePointerCast(V: Addr, DestTy: IntptrTy),
1171 IRB.CreateUDiv(LHS: IRB.CreateTypeSize(Ty: IntptrTy, Size: O.TypeStoreSize),
1172 RHS: ConstantInt::get(Ty: IntptrTy, V: 8))};
1173 if (UseMatchAllCallback)
1174 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1175 IRB.CreateCall(Callee: HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1176 }
1177 untagPointerOperand(I: O.getInsn(), Addr);
1178
1179 return true;
1180}
1181
1182void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1183 size_t Size) {
1184 size_t AlignedSize = alignTo(Size, A: Mapping.getObjectAlignment());
1185 if (!UseShortGranules)
1186 Size = AlignedSize;
1187
1188 Tag = IRB.CreateTrunc(V: Tag, DestTy: Int8Ty);
1189 if (InstrumentWithCalls) {
1190 IRB.CreateCall(Callee: HwasanTagMemoryFunc,
1191 Args: {IRB.CreatePointerCast(V: AI, DestTy: PtrTy), Tag,
1192 ConstantInt::get(Ty: IntptrTy, V: AlignedSize)});
1193 } else {
1194 size_t ShadowSize = Size >> Mapping.scale();
1195 Value *AddrLong = untagPointer(IRB, PtrLong: IRB.CreatePointerCast(V: AI, DestTy: IntptrTy));
1196 Value *ShadowPtr = memToShadow(Mem: AddrLong, IRB);
1197 // If this memset is not inlined, it will be intercepted in the hwasan
1198 // runtime library. That's OK, because the interceptor skips the checks if
1199 // the address is in the shadow region.
1200 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1201 // llvm.memset right here into either a sequence of stores, or a call to
1202 // hwasan_tag_memory.
1203 if (ShadowSize)
1204 IRB.CreateMemSet(Ptr: ShadowPtr, Val: Tag, Size: ShadowSize, Align: Align(1));
1205 if (Size != AlignedSize) {
1206 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1207 IRB.CreateStore(Val: ConstantInt::get(Ty: Int8Ty, V: SizeRemainder),
1208 Ptr: IRB.CreateConstGEP1_32(Ty: Int8Ty, Ptr: ShadowPtr, Idx0: ShadowSize));
1209 IRB.CreateStore(
1210 Val: Tag, Ptr: IRB.CreateConstGEP1_32(Ty: Int8Ty, Ptr: IRB.CreatePointerCast(V: AI, DestTy: PtrTy),
1211 Idx0: AlignedSize - 1));
1212 }
1213 }
1214}
1215
1216unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1217 if (TargetTriple.getArch() == Triple::x86_64)
1218 return AllocaNo & TagMaskByte;
1219
1220 // A list of 8-bit numbers that have at most one run of non-zero bits.
1221 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1222 // masks.
1223 // The list does not include the value 255, which is used for UAR.
1224 //
1225 // Because we are more likely to use earlier elements of this list than later
1226 // ones, it is sorted in increasing order of probability of collision with a
1227 // mask allocated (temporally) nearby. The program that generated this list
1228 // can be found at:
1229 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1230 static const unsigned FastMasks[] = {
1231 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1232 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1233 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1234 return FastMasks[AllocaNo % std::size(FastMasks)];
1235}
1236
1237Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1238 if (TagMaskByte == 0xFF)
1239 return OldTag; // No need to clear the tag byte.
1240 return IRB.CreateAnd(LHS: OldTag,
1241 RHS: ConstantInt::get(Ty: OldTag->getType(), V: TagMaskByte));
1242}
1243
1244Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1245 return IRB.CreateZExt(V: IRB.CreateCall(Callee: HwasanGenerateTagFunc), DestTy: IntptrTy);
1246}
1247
1248Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1249 if (ClGenerateTagsWithCalls)
1250 return nullptr;
1251 if (StackBaseTag)
1252 return StackBaseTag;
1253 // Extract some entropy from the stack pointer for the tags.
1254 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1255 // between functions).
1256 Value *FramePointerLong = getCachedFP(IRB);
1257 Value *StackTag =
1258 applyTagMask(IRB, OldTag: IRB.CreateXor(LHS: FramePointerLong,
1259 RHS: IRB.CreateLShr(LHS: FramePointerLong, RHS: 20)));
1260 StackTag->setName("hwasan.stack.base.tag");
1261 return StackTag;
1262}
1263
1264Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1265 unsigned AllocaNo) {
1266 if (ClGenerateTagsWithCalls)
1267 return getNextTagWithCall(IRB);
1268 return IRB.CreateXor(
1269 LHS: StackTag, RHS: ConstantInt::get(Ty: StackTag->getType(), V: retagMask(AllocaNo)));
1270}
1271
1272Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1273 Value *FramePointerLong = getCachedFP(IRB);
1274 Value *UARTag =
1275 applyTagMask(IRB, OldTag: IRB.CreateLShr(LHS: FramePointerLong, RHS: PointerTagShift));
1276
1277 UARTag->setName("hwasan.uar.tag");
1278 return UARTag;
1279}
1280
1281// Add a tag to an address.
1282Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1283 Value *PtrLong, Value *Tag) {
1284 assert(!UsePageAliases);
1285 Value *TaggedPtrLong;
1286 if (CompileKernel) {
1287 // Kernel addresses have 0xFF in the most significant byte.
1288 Value *ShiftedTag =
1289 IRB.CreateOr(LHS: IRB.CreateShl(LHS: Tag, RHS: PointerTagShift),
1290 RHS: ConstantInt::get(Ty: IntptrTy, V: (1ULL << PointerTagShift) - 1));
1291 TaggedPtrLong = IRB.CreateAnd(LHS: PtrLong, RHS: ShiftedTag);
1292 } else {
1293 // Userspace can simply do OR (tag << PointerTagShift);
1294 Value *ShiftedTag = IRB.CreateShl(LHS: Tag, RHS: PointerTagShift);
1295 TaggedPtrLong = IRB.CreateOr(LHS: PtrLong, RHS: ShiftedTag);
1296 }
1297 return IRB.CreateIntToPtr(V: TaggedPtrLong, DestTy: Ty);
1298}
1299
1300// Remove tag from an address.
1301Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1302 assert(!UsePageAliases);
1303 Value *UntaggedPtrLong;
1304 if (CompileKernel) {
1305 // Kernel addresses have 0xFF in the most significant byte.
1306 UntaggedPtrLong =
1307 IRB.CreateOr(LHS: PtrLong, RHS: ConstantInt::get(Ty: PtrLong->getType(),
1308 V: TagMaskByte << PointerTagShift));
1309 } else {
1310 // Userspace addresses have 0x00.
1311 UntaggedPtrLong = IRB.CreateAnd(
1312 LHS: PtrLong, RHS: ConstantInt::get(Ty: PtrLong->getType(),
1313 V: ~(TagMaskByte << PointerTagShift)));
1314 }
1315 return UntaggedPtrLong;
1316}
1317
1318Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1319 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1320 // in Bionic's libc/platform/bionic/tls_defines.h.
1321 constexpr int SanitizerSlot = 6;
1322 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1323 return memtag::getAndroidSlotPtr(IRB, Slot: SanitizerSlot);
1324 return ThreadPtrGlobal;
1325}
1326
1327Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1328 if (!CachedFP)
1329 CachedFP = memtag::getFP(IRB);
1330 return CachedFP;
1331}
1332
1333Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1334 // Prepare ring buffer data.
1335 Value *PC = memtag::getPC(TargetTriple, IRB);
1336 Value *FP = getCachedFP(IRB);
1337
1338 // Mix FP and PC.
1339 // Assumptions:
1340 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1341 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1342 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1343 // 0xFFFFPPPPPPPPPPPP
1344 //
1345 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1346 // prefer FP-relative offsets for functions compiled with HWASan.
1347 FP = IRB.CreateShl(LHS: FP, RHS: 44);
1348 return IRB.CreateOr(LHS: PC, RHS: FP);
1349}
1350
1351void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1352 if (!Mapping.isInTls())
1353 ShadowBase = getShadowNonTls(IRB);
1354 else if (!WithFrameRecord && TargetTriple.isAndroid())
1355 ShadowBase = getDynamicShadowIfunc(IRB);
1356
1357 if (!WithFrameRecord && ShadowBase)
1358 return;
1359
1360 Value *SlotPtr = nullptr;
1361 Value *ThreadLong = nullptr;
1362 Value *ThreadLongMaybeUntagged = nullptr;
1363
1364 auto getThreadLongMaybeUntagged = [&]() {
1365 if (!SlotPtr)
1366 SlotPtr = getHwasanThreadSlotPtr(IRB);
1367 if (!ThreadLong)
1368 ThreadLong = IRB.CreateLoad(Ty: IntptrTy, Ptr: SlotPtr);
1369 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1370 // TBI.
1371 return TargetTriple.isAArch64() ? ThreadLong
1372 : untagPointer(IRB, PtrLong: ThreadLong);
1373 };
1374
1375 if (WithFrameRecord) {
1376 switch (ClRecordStackHistory) {
1377 case libcall: {
1378 // Emit a runtime call into hwasan rather than emitting instructions for
1379 // recording stack history.
1380 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1381 IRB.CreateCall(Callee: HwasanRecordFrameRecordFunc, Args: {FrameRecordInfo});
1382 break;
1383 }
1384 case instr: {
1385 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1386
1387 StackBaseTag = IRB.CreateAShr(LHS: ThreadLong, RHS: 3);
1388
1389 // Store data to ring buffer.
1390 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1391 Value *RecordPtr =
1392 IRB.CreateIntToPtr(V: ThreadLongMaybeUntagged, DestTy: IRB.getPtrTy(AddrSpace: 0));
1393 IRB.CreateStore(Val: FrameRecordInfo, Ptr: RecordPtr);
1394
1395 IRB.CreateStore(Val: memtag::incrementThreadLong(IRB, ThreadLong, Inc: 8), Ptr: SlotPtr);
1396 break;
1397 }
1398 case none: {
1399 llvm_unreachable(
1400 "A stack history recording mode should've been selected.");
1401 }
1402 }
1403 }
1404
1405 if (!ShadowBase) {
1406 if (!ThreadLongMaybeUntagged)
1407 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1408
1409 // Get shadow base address by aligning RecordPtr up.
1410 // Note: this is not correct if the pointer is already aligned.
1411 // Runtime library will make sure this never happens.
1412 ShadowBase = IRB.CreateAdd(
1413 LHS: IRB.CreateOr(
1414 LHS: ThreadLongMaybeUntagged,
1415 RHS: ConstantInt::get(Ty: IntptrTy, V: (1ULL << kShadowBaseAlignment) - 1)),
1416 RHS: ConstantInt::get(Ty: IntptrTy, V: 1), Name: "hwasan.shadow");
1417 ShadowBase = IRB.CreateIntToPtr(V: ShadowBase, DestTy: PtrTy);
1418 }
1419}
1420
1421bool HWAddressSanitizer::instrumentLandingPads(
1422 SmallVectorImpl<Instruction *> &LandingPadVec) {
1423 for (auto *LP : LandingPadVec) {
1424 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1425 IRB.CreateCall(
1426 Callee: HwasanHandleVfork,
1427 Args: {memtag::readRegister(
1428 IRB, Name: (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1429 }
1430 return true;
1431}
1432
1433bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1434 Value *StackTag, Value *UARTag,
1435 const DominatorTree &DT,
1436 const PostDominatorTree &PDT,
1437 const LoopInfo &LI) {
1438 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1439 // alloca addresses using that. Unfortunately, offsets are not known yet
1440 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1441 // temp, shift-OR it into each alloca address and xor with the retag mask.
1442 // This generates one extra instruction per alloca use.
1443 unsigned int I = 0;
1444
1445 for (auto &KV : SInfo.AllocasToInstrument) {
1446 auto N = I++;
1447 auto *AI = KV.first;
1448 memtag::AllocaInfo &Info = KV.second;
1449 IRBuilder<> IRB(AI->getNextNonDebugInstruction());
1450
1451 // Replace uses of the alloca with tagged address.
1452 Value *Tag = getAllocaTag(IRB, StackTag, AllocaNo: N);
1453 Value *AILong = IRB.CreatePointerCast(V: AI, DestTy: IntptrTy);
1454 Value *AINoTagLong = untagPointer(IRB, PtrLong: AILong);
1455 Value *Replacement = tagPointer(IRB, Ty: AI->getType(), PtrLong: AINoTagLong, Tag);
1456 std::string Name =
1457 AI->hasName() ? AI->getName().str() : "alloca." + itostr(X: N);
1458 Replacement->setName(Name + ".hwasan");
1459
1460 size_t Size = memtag::getAllocaSizeInBytes(AI: *AI);
1461 size_t AlignedSize = alignTo(Size, A: Mapping.getObjectAlignment());
1462
1463 Value *AICast = IRB.CreatePointerCast(V: AI, DestTy: PtrTy);
1464
1465 auto HandleLifetime = [&](IntrinsicInst *II) {
1466 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1467 // set of assumptions we need to make about the lifetime. Without this we
1468 // would need to ensure that we can track the lifetime pointer to a
1469 // constant offset from the alloca, and would still need to change the
1470 // size to include the extra alignment we use for the untagging to make
1471 // the size consistent.
1472 //
1473 // The check for standard lifetime below makes sure that we have exactly
1474 // one set of start / end in any execution (i.e. the ends are not
1475 // reachable from each other), so this will not cause any problems.
1476 II->setArgOperand(i: 0, v: ConstantInt::get(Ty: Int64Ty, V: AlignedSize));
1477 II->setArgOperand(i: 1, v: AICast);
1478 };
1479 llvm::for_each(Range&: Info.LifetimeStart, F: HandleLifetime);
1480 llvm::for_each(Range&: Info.LifetimeEnd, F: HandleLifetime);
1481
1482 AI->replaceUsesWithIf(New: Replacement, ShouldReplace: [AICast, AILong](const Use &U) {
1483 auto *User = U.getUser();
1484 return User != AILong && User != AICast && !isa<LifetimeIntrinsic>(Val: User);
1485 });
1486
1487 memtag::annotateDebugRecords(Info, Tag: retagMask(AllocaNo: N));
1488
1489 auto TagEnd = [&](Instruction *Node) {
1490 IRB.SetInsertPoint(Node);
1491 // When untagging, use the `AlignedSize` because we need to set the tags
1492 // for the entire alloca to original. If we used `Size` here, we would
1493 // keep the last granule tagged, and store zero in the last byte of the
1494 // last granule, due to how short granules are implemented.
1495 tagAlloca(IRB, AI, Tag: UARTag, Size: AlignedSize);
1496 };
1497 // Calls to functions that may return twice (e.g. setjmp) confuse the
1498 // postdominator analysis, and will leave us to keep memory tagged after
1499 // function return. Work around this by always untagging at every return
1500 // statement if return_twice functions are called.
1501 bool StandardLifetime =
1502 !SInfo.CallsReturnTwice &&
1503 SInfo.UnrecognizedLifetimes.empty() &&
1504 memtag::isStandardLifetime(LifetimeStart: Info.LifetimeStart, LifetimeEnd: Info.LifetimeEnd, DT: &DT,
1505 LI: &LI, MaxLifetimes: ClMaxLifetimes);
1506 if (DetectUseAfterScope && StandardLifetime) {
1507 IntrinsicInst *Start = Info.LifetimeStart[0];
1508 IRB.SetInsertPoint(Start->getNextNode());
1509 tagAlloca(IRB, AI, Tag, Size);
1510 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Ends: Info.LifetimeEnd,
1511 RetVec: SInfo.RetVec, Callback: TagEnd)) {
1512 for (auto *End : Info.LifetimeEnd)
1513 End->eraseFromParent();
1514 }
1515 } else {
1516 tagAlloca(IRB, AI, Tag, Size);
1517 for (auto *RI : SInfo.RetVec)
1518 TagEnd(RI);
1519 // We inserted tagging outside of the lifetimes, so we have to remove
1520 // them.
1521 for (auto &II : Info.LifetimeStart)
1522 II->eraseFromParent();
1523 for (auto &II : Info.LifetimeEnd)
1524 II->eraseFromParent();
1525 }
1526 memtag::alignAndPadAlloca(Info, Align: Mapping.getObjectAlignment());
1527 }
1528 for (auto &I : SInfo.UnrecognizedLifetimes)
1529 I->eraseFromParent();
1530 return true;
1531}
1532
1533static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE,
1534 bool Skip) {
1535 if (Skip) {
1536 ORE.emit(RemarkBuilder: [&]() {
1537 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1538 << "Skipped: F=" << ore::NV("Function", &F);
1539 });
1540 } else {
1541 ORE.emit(RemarkBuilder: [&]() {
1542 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1543 << "Sanitized: F=" << ore::NV("Function", &F);
1544 });
1545 }
1546}
1547
1548bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1549 Function &F, FunctionAnalysisManager &FAM) const {
1550 auto SkipHot = [&]() {
1551 if (!ClHotPercentileCutoff.getNumOccurrences())
1552 return false;
1553 auto &MAMProxy = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(IR&: F);
1554 ProfileSummaryInfo *PSI =
1555 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(IR&: *F.getParent());
1556 if (!PSI || !PSI->hasProfileSummary()) {
1557 ++NumNoProfileSummaryFuncs;
1558 return false;
1559 }
1560 return PSI->isFunctionHotInCallGraphNthPercentile(
1561 PercentileCutoff: ClHotPercentileCutoff, F: &F, BFI&: FAM.getResult<BlockFrequencyAnalysis>(IR&: F));
1562 };
1563
1564 auto SkipRandom = [&]() {
1565 if (!ClRandomKeepRate.getNumOccurrences())
1566 return false;
1567 std::bernoulli_distribution D(ClRandomKeepRate);
1568 return !D(*Rng);
1569 };
1570
1571 bool Skip = SkipRandom() || SkipHot();
1572 emitRemark(F, ORE&: FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F), Skip);
1573 return Skip;
1574}
1575
1576void HWAddressSanitizer::sanitizeFunction(Function &F,
1577 FunctionAnalysisManager &FAM) {
1578 if (&F == HwasanCtorFunction)
1579 return;
1580
1581 // Do not apply any instrumentation for naked functions.
1582 if (F.hasFnAttribute(Kind: Attribute::Naked))
1583 return;
1584
1585 if (!F.hasFnAttribute(Kind: Attribute::SanitizeHWAddress))
1586 return;
1587
1588 if (F.empty())
1589 return;
1590
1591 NumTotalFuncs++;
1592
1593 OptimizationRemarkEmitter &ORE =
1594 FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
1595
1596 if (selectiveInstrumentationShouldSkip(F, FAM))
1597 return;
1598
1599 NumInstrumentedFuncs++;
1600
1601 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1602
1603 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1604 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1605 SmallVector<Instruction *, 8> LandingPadVec;
1606 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(IR&: F);
1607
1608 memtag::StackInfoBuilder SIB(SSI, DEBUG_TYPE);
1609 for (auto &Inst : instructions(F)) {
1610 if (InstrumentStack) {
1611 SIB.visit(ORE, Inst);
1612 }
1613
1614 if (InstrumentLandingPads && isa<LandingPadInst>(Val: Inst))
1615 LandingPadVec.push_back(Elt: &Inst);
1616
1617 getInterestingMemoryOperands(ORE, I: &Inst, TLI, Interesting&: OperandsToInstrument);
1618
1619 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: &Inst))
1620 if (!ignoreMemIntrinsic(ORE, MI))
1621 IntrinToInstrument.push_back(Elt: MI);
1622 }
1623
1624 memtag::StackInfo &SInfo = SIB.get();
1625
1626 initializeCallbacks(M&: *F.getParent());
1627
1628 if (!LandingPadVec.empty())
1629 instrumentLandingPads(LandingPadVec);
1630
1631 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1632 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1633 // __hwasan_personality_thunk is a no-op for functions without an
1634 // instrumented stack, so we can drop it.
1635 F.setPersonalityFn(nullptr);
1636 }
1637
1638 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1639 IntrinToInstrument.empty())
1640 return;
1641
1642 assert(!ShadowBase);
1643
1644 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1645 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1646 emitPrologue(IRB&: EntryIRB,
1647 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1648 Mapping.withFrameRecord() &&
1649 !SInfo.AllocasToInstrument.empty());
1650
1651 if (!SInfo.AllocasToInstrument.empty()) {
1652 const DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(IR&: F);
1653 const PostDominatorTree &PDT = FAM.getResult<PostDominatorTreeAnalysis>(IR&: F);
1654 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(IR&: F);
1655 Value *StackTag = getStackBaseTag(IRB&: EntryIRB);
1656 Value *UARTag = getUARTag(IRB&: EntryIRB);
1657 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1658 }
1659
1660 // If we split the entry block, move any allocas that were originally in the
1661 // entry block back into the entry block so that they aren't treated as
1662 // dynamic allocas.
1663 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1664 InsertPt = F.getEntryBlock().begin();
1665 for (Instruction &I :
1666 llvm::make_early_inc_range(Range&: *EntryIRB.GetInsertBlock())) {
1667 if (auto *AI = dyn_cast<AllocaInst>(Val: &I))
1668 if (isa<ConstantInt>(Val: AI->getArraySize()))
1669 I.moveBefore(BB&: F.getEntryBlock(), I: InsertPt);
1670 }
1671 }
1672
1673 DominatorTree *DT = FAM.getCachedResult<DominatorTreeAnalysis>(IR&: F);
1674 PostDominatorTree *PDT = FAM.getCachedResult<PostDominatorTreeAnalysis>(IR&: F);
1675 LoopInfo *LI = FAM.getCachedResult<LoopAnalysis>(IR&: F);
1676 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1677 const DataLayout &DL = F.getDataLayout();
1678 for (auto &Operand : OperandsToInstrument)
1679 instrumentMemAccess(O&: Operand, DTU, LI, DL);
1680 DTU.flush();
1681
1682 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1683 for (auto *Inst : IntrinToInstrument)
1684 instrumentMemIntrinsic(MI: Inst);
1685 }
1686
1687 ShadowBase = nullptr;
1688 StackBaseTag = nullptr;
1689 CachedFP = nullptr;
1690}
1691
1692void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1693 assert(!UsePageAliases);
1694 Constant *Initializer = GV->getInitializer();
1695 uint64_t SizeInBytes =
1696 M.getDataLayout().getTypeAllocSize(Ty: Initializer->getType());
1697 uint64_t NewSize = alignTo(Size: SizeInBytes, A: Mapping.getObjectAlignment());
1698 if (SizeInBytes != NewSize) {
1699 // Pad the initializer out to the next multiple of 16 bytes and add the
1700 // required short granule tag.
1701 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1702 Init.back() = Tag;
1703 Constant *Padding = ConstantDataArray::get(Context&: *C, Elts&: Init);
1704 Initializer = ConstantStruct::getAnon(V: {Initializer, Padding});
1705 }
1706
1707 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1708 GlobalValue::ExternalLinkage, Initializer,
1709 GV->getName() + ".hwasan");
1710 NewGV->copyAttributesFrom(Src: GV);
1711 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1712 NewGV->copyMetadata(Src: GV, Offset: 0);
1713 NewGV->setAlignment(
1714 std::max(a: GV->getAlign().valueOrOne(), b: Mapping.getObjectAlignment()));
1715
1716 // It is invalid to ICF two globals that have different tags. In the case
1717 // where the size of the global is a multiple of the tag granularity the
1718 // contents of the globals may be the same but the tags (i.e. symbol values)
1719 // may be different, and the symbols are not considered during ICF. In the
1720 // case where the size is not a multiple of the granularity, the short granule
1721 // tags would discriminate two globals with different tags, but there would
1722 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1723 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1724 // granule tag in the last byte.
1725 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1726
1727 // Descriptor format (assuming little-endian):
1728 // bytes 0-3: relative address of global
1729 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1730 // it isn't, we create multiple descriptors)
1731 // byte 7: tag
1732 auto *DescriptorTy = StructType::get(elt1: Int32Ty, elts: Int32Ty);
1733 const uint64_t MaxDescriptorSize = 0xfffff0;
1734 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1735 DescriptorPos += MaxDescriptorSize) {
1736 auto *Descriptor =
1737 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1738 nullptr, GV->getName() + ".hwasan.descriptor");
1739 auto *GVRelPtr = ConstantExpr::getTrunc(
1740 C: ConstantExpr::getAdd(
1741 C1: ConstantExpr::getSub(
1742 C1: ConstantExpr::getPtrToInt(C: NewGV, Ty: Int64Ty),
1743 C2: ConstantExpr::getPtrToInt(C: Descriptor, Ty: Int64Ty)),
1744 C2: ConstantInt::get(Ty: Int64Ty, V: DescriptorPos)),
1745 Ty: Int32Ty);
1746 uint32_t Size = std::min(a: SizeInBytes - DescriptorPos, b: MaxDescriptorSize);
1747 auto *SizeAndTag = ConstantInt::get(Ty: Int32Ty, V: Size | (uint32_t(Tag) << 24));
1748 Descriptor->setComdat(NewGV->getComdat());
1749 Descriptor->setInitializer(ConstantStruct::getAnon(V: {GVRelPtr, SizeAndTag}));
1750 Descriptor->setSection("hwasan_globals");
1751 Descriptor->setMetadata(KindID: LLVMContext::MD_associated,
1752 Node: MDNode::get(Context&: *C, MDs: ValueAsMetadata::get(V: NewGV)));
1753 appendToCompilerUsed(M, Values: Descriptor);
1754 }
1755
1756 Constant *Aliasee = ConstantExpr::getIntToPtr(
1757 C: ConstantExpr::getAdd(
1758 C1: ConstantExpr::getPtrToInt(C: NewGV, Ty: Int64Ty),
1759 C2: ConstantInt::get(Ty: Int64Ty, V: uint64_t(Tag) << PointerTagShift)),
1760 Ty: GV->getType());
1761 auto *Alias = GlobalAlias::create(Ty: GV->getValueType(), AddressSpace: GV->getAddressSpace(),
1762 Linkage: GV->getLinkage(), Name: "", Aliasee, Parent: &M);
1763 Alias->setVisibility(GV->getVisibility());
1764 Alias->takeName(V: GV);
1765 GV->replaceAllUsesWith(V: Alias);
1766 GV->eraseFromParent();
1767}
1768
1769void HWAddressSanitizer::instrumentGlobals() {
1770 std::vector<GlobalVariable *> Globals;
1771 for (GlobalVariable &GV : M.globals()) {
1772 if (GV.hasSanitizerMetadata() && GV.getSanitizerMetadata().NoHWAddress)
1773 continue;
1774
1775 if (GV.isDeclarationForLinker() || GV.getName().starts_with(Prefix: "llvm.") ||
1776 GV.isThreadLocal())
1777 continue;
1778
1779 // Common symbols can't have aliases point to them, so they can't be tagged.
1780 if (GV.hasCommonLinkage())
1781 continue;
1782
1783 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1784 // which would be broken both by adding tags and potentially by the extra
1785 // padding/alignment that we insert.
1786 if (GV.hasSection())
1787 continue;
1788
1789 Globals.push_back(x: &GV);
1790 }
1791
1792 MD5 Hasher;
1793 Hasher.update(Str: M.getSourceFileName());
1794 MD5::MD5Result Hash;
1795 Hasher.final(Result&: Hash);
1796 uint8_t Tag = Hash[0];
1797
1798 assert(TagMaskByte >= 16);
1799
1800 for (GlobalVariable *GV : Globals) {
1801 // Don't allow globals to be tagged with something that looks like a
1802 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1803 // the fast path shadow-vs-address check succeeds.
1804 if (Tag < 16 || Tag > TagMaskByte)
1805 Tag = 16;
1806 instrumentGlobal(GV, Tag: Tag++);
1807 }
1808}
1809
1810void HWAddressSanitizer::instrumentPersonalityFunctions() {
1811 // We need to untag stack frames as we unwind past them. That is the job of
1812 // the personality function wrapper, which either wraps an existing
1813 // personality function or acts as a personality function on its own. Each
1814 // function that has a personality function or that can be unwound past has
1815 // its personality function changed to a thunk that calls the personality
1816 // function wrapper in the runtime.
1817 MapVector<Constant *, std::vector<Function *>> PersonalityFns;
1818 for (Function &F : M) {
1819 if (F.isDeclaration() || !F.hasFnAttribute(Kind: Attribute::SanitizeHWAddress))
1820 continue;
1821
1822 if (F.hasPersonalityFn()) {
1823 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(x: &F);
1824 } else if (!F.hasFnAttribute(Kind: Attribute::NoUnwind)) {
1825 PersonalityFns[nullptr].push_back(x: &F);
1826 }
1827 }
1828
1829 if (PersonalityFns.empty())
1830 return;
1831
1832 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1833 Name: "__hwasan_personality_wrapper", RetTy: Int32Ty, Args: Int32Ty, Args: Int32Ty, Args: Int64Ty, Args: PtrTy,
1834 Args: PtrTy, Args: PtrTy, Args: PtrTy, Args: PtrTy);
1835 FunctionCallee UnwindGetGR = M.getOrInsertFunction(Name: "_Unwind_GetGR", RetTy: VoidTy);
1836 FunctionCallee UnwindGetCFA = M.getOrInsertFunction(Name: "_Unwind_GetCFA", RetTy: VoidTy);
1837
1838 for (auto &P : PersonalityFns) {
1839 std::string ThunkName = kHwasanPersonalityThunkName;
1840 if (P.first)
1841 ThunkName += ("." + P.first->getName()).str();
1842 FunctionType *ThunkFnTy = FunctionType::get(
1843 Result: Int32Ty, Params: {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, isVarArg: false);
1844 bool IsLocal = P.first && (!isa<GlobalValue>(Val: P.first) ||
1845 cast<GlobalValue>(Val: P.first)->hasLocalLinkage());
1846 auto *ThunkFn = Function::Create(Ty: ThunkFnTy,
1847 Linkage: IsLocal ? GlobalValue::InternalLinkage
1848 : GlobalValue::LinkOnceODRLinkage,
1849 N: ThunkName, M: &M);
1850 // TODO: think about other attributes as well.
1851 if (any_of(Range&: P.second, P: [](const Function *F) {
1852 return F->hasFnAttribute(Kind: "branch-target-enforcement");
1853 })) {
1854 ThunkFn->addFnAttr(Kind: "branch-target-enforcement");
1855 }
1856 if (!IsLocal) {
1857 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1858 ThunkFn->setComdat(M.getOrInsertComdat(Name: ThunkName));
1859 }
1860
1861 auto *BB = BasicBlock::Create(Context&: *C, Name: "entry", Parent: ThunkFn);
1862 IRBuilder<> IRB(BB);
1863 CallInst *WrapperCall = IRB.CreateCall(
1864 Callee: HwasanPersonalityWrapper,
1865 Args: {ThunkFn->getArg(i: 0), ThunkFn->getArg(i: 1), ThunkFn->getArg(i: 2),
1866 ThunkFn->getArg(i: 3), ThunkFn->getArg(i: 4),
1867 P.first ? P.first : Constant::getNullValue(Ty: PtrTy),
1868 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1869 WrapperCall->setTailCall();
1870 IRB.CreateRet(V: WrapperCall);
1871
1872 for (Function *F : P.second)
1873 F->setPersonalityFn(ThunkFn);
1874 }
1875}
1876
1877void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1878 bool InstrumentWithCalls,
1879 bool CompileKernel) {
1880 // Start with defaults.
1881 Scale = kDefaultShadowScale;
1882 Kind = OffsetKind::kTls;
1883 WithFrameRecord = true;
1884
1885 // Tune for the target.
1886 if (TargetTriple.isOSFuchsia()) {
1887 // Fuchsia is always PIE, which means that the beginning of the address
1888 // space is always available.
1889 SetFixed(0);
1890 } else if (CompileKernel || InstrumentWithCalls) {
1891 SetFixed(0);
1892 WithFrameRecord = false;
1893 }
1894
1895 WithFrameRecord = optOr(Opt&: ClFrameRecords, Other: WithFrameRecord);
1896
1897 // Apply the last of ClMappingOffset and ClMappingOffsetDynamic.
1898 Kind = optOr(Opt&: ClMappingOffsetDynamic, Other: Kind);
1899 if (ClMappingOffset.getNumOccurrences() > 0 &&
1900 !(ClMappingOffsetDynamic.getNumOccurrences() > 0 &&
1901 ClMappingOffsetDynamic.getPosition() > ClMappingOffset.getPosition())) {
1902 SetFixed(ClMappingOffset);
1903 }
1904}
1905