1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Statistic.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/Analysis/BlockFrequencyInfo.h"
22#include "llvm/Analysis/DomTreeUpdater.h"
23#include "llvm/Analysis/GlobalsModRef.h"
24#include "llvm/Analysis/OptimizationRemarkEmitter.h"
25#include "llvm/Analysis/PostDominators.h"
26#include "llvm/Analysis/ProfileSummaryInfo.h"
27#include "llvm/Analysis/StackSafetyAnalysis.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/BinaryFormat/Dwarf.h"
31#include "llvm/BinaryFormat/ELF.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
42#include "llvm/IR/InstIterator.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
52#include "llvm/Support/Casting.h"
53#include "llvm/Support/CommandLine.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/MD5.h"
56#include "llvm/Support/RandomNumberGenerator.h"
57#include "llvm/Support/raw_ostream.h"
58#include "llvm/TargetParser/Triple.h"
59#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
60#include "llvm/Transforms/Utils/BasicBlockUtils.h"
61#include "llvm/Transforms/Utils/Instrumentation.h"
62#include "llvm/Transforms/Utils/Local.h"
63#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
64#include "llvm/Transforms/Utils/ModuleUtils.h"
65#include "llvm/Transforms/Utils/PromoteMemToReg.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
78const char kHwasanShadowMemoryDynamicAddress[] =
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
85
86static const unsigned kShadowBaseAlignment = 32;
87
88namespace {
89enum class OffsetKind {
90 kFixed = 0,
91 kGlobal,
92 kIfunc,
93 kTls,
94};
95}
96
97static cl::opt<std::string>
98 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
99 cl::desc("Prefix for memory access callbacks"),
100 cl::Hidden, cl::init(Val: "__hwasan_"));
101
102static cl::opt<bool> ClKasanMemIntrinCallbackPrefix(
103 "hwasan-kernel-mem-intrinsic-prefix",
104 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
105 cl::init(Val: false));
106
107static cl::opt<bool> ClInstrumentWithCalls(
108 "hwasan-instrument-with-calls",
109 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
110 cl::init(Val: false));
111
112static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
113 cl::desc("instrument read instructions"),
114 cl::Hidden, cl::init(Val: true));
115
116static cl::opt<bool>
117 ClInstrumentWrites("hwasan-instrument-writes",
118 cl::desc("instrument write instructions"), cl::Hidden,
119 cl::init(Val: true));
120
121static cl::opt<bool> ClInstrumentAtomics(
122 "hwasan-instrument-atomics",
123 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
124 cl::init(Val: true));
125
126static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
127 cl::desc("instrument byval arguments"),
128 cl::Hidden, cl::init(Val: true));
129
130static cl::opt<bool>
131 ClRecover("hwasan-recover",
132 cl::desc("Enable recovery mode (continue-after-error)."),
133 cl::Hidden, cl::init(Val: false));
134
135static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
136 cl::desc("instrument stack (allocas)"),
137 cl::Hidden, cl::init(Val: true));
138
139static cl::opt<bool>
140 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(Val: true),
141 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
142 cl::Optional);
143
144static cl::opt<size_t> ClMaxLifetimes(
145 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(Val: 3),
146 cl::ReallyHidden,
147 cl::desc("How many lifetime ends to handle for a single alloca."),
148 cl::Optional);
149
150static cl::opt<bool>
151 ClUseAfterScope("hwasan-use-after-scope",
152 cl::desc("detect use after scope within function"),
153 cl::Hidden, cl::init(Val: true));
154
155static cl::opt<bool> ClGenerateTagsWithCalls(
156 "hwasan-generate-tags-with-calls",
157 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
158 cl::init(Val: false));
159
160static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
161 cl::Hidden, cl::init(Val: false));
162
163static cl::opt<bool> ClAllGlobals(
164 "hwasan-all-globals",
165 cl::desc(
166 "Instrument globals, even those within user-defined sections. Warning: "
167 "This may break existing code which walks globals via linker-generated "
168 "symbols, expects certain globals to be contiguous with each other, or "
169 "makes other assumptions which are invalidated by HWASan "
170 "instrumentation."),
171 cl::Hidden, cl::init(Val: false));
172
173static cl::opt<int> ClMatchAllTag(
174 "hwasan-match-all-tag",
175 cl::desc("don't report bad accesses via pointers with this tag"),
176 cl::Hidden, cl::init(Val: -1));
177
178static cl::opt<bool>
179 ClEnableKhwasan("hwasan-kernel",
180 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
181 cl::Hidden, cl::init(Val: false));
182
183// These flags allow to change the shadow mapping and control how shadow memory
184// is accessed. The shadow mapping looks like:
185// Shadow = (Mem >> scale) + offset
186
187static cl::opt<uint64_t>
188 ClMappingOffset("hwasan-mapping-offset",
189 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
190 cl::Hidden);
191
192static cl::opt<OffsetKind> ClMappingOffsetDynamic(
193 "hwasan-mapping-offset-dynamic",
194 cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden,
195 cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"),
196 clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"),
197 clEnumValN(OffsetKind::kTls, "tls", "Use TLS")));
198
199static cl::opt<bool>
200 ClFrameRecords("hwasan-with-frame-record",
201 cl::desc("Use ring buffer for stack allocations"),
202 cl::Hidden);
203
204static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
205 cl::desc("Hot percentile cutoff."));
206
207static cl::opt<float>
208 ClRandomKeepRate("hwasan-random-rate",
209 cl::desc("Probability value in the range [0.0, 1.0] "
210 "to keep instrumentation of a function. "
211 "Note: instrumentation can be skipped randomly "
212 "OR because of the hot percentile cutoff, if "
213 "both are supplied."));
214
215static cl::opt<bool> ClStaticLinking(
216 "hwasan-static-linking",
217 cl::desc("Don't use .note.hwasan.globals section to instrument globals "
218 "from loadable libraries. "
219 "Note: in static binaries, the global variables section can be "
220 "accessed directly via linker-provided "
221 "__start_hwasan_globals and __stop_hwasan_globals symbols"),
222 cl::Hidden, cl::init(Val: false));
223
224STATISTIC(NumTotalFuncs, "Number of total funcs");
225STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
226STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
227
228// Mode for selecting how to insert frame record info into the stack ring
229// buffer.
230enum RecordStackHistoryMode {
231 // Do not record frame record info.
232 none,
233
234 // Insert instructions into the prologue for storing into the stack ring
235 // buffer directly.
236 instr,
237
238 // Add a call to __hwasan_add_frame_record in the runtime.
239 libcall,
240};
241
242static cl::opt<RecordStackHistoryMode> ClRecordStackHistory(
243 "hwasan-record-stack-history",
244 cl::desc("Record stack frames with tagged allocations in a thread-local "
245 "ring buffer"),
246 cl::values(clEnumVal(none, "Do not record stack ring history"),
247 clEnumVal(instr, "Insert instructions into the prologue for "
248 "storing into the stack ring buffer directly"),
249 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
250 "storing into the stack ring buffer")),
251 cl::Hidden, cl::init(Val: instr));
252
253static cl::opt<bool>
254 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
255 cl::desc("instrument memory intrinsics"),
256 cl::Hidden, cl::init(Val: true));
257
258static cl::opt<bool>
259 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
260 cl::desc("instrument landing pads"), cl::Hidden,
261 cl::init(Val: false));
262
263static cl::opt<bool> ClUseShortGranules(
264 "hwasan-use-short-granules",
265 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
266 cl::init(Val: false));
267
268static cl::opt<bool> ClInstrumentPersonalityFunctions(
269 "hwasan-instrument-personality-functions",
270 cl::desc("instrument personality functions"), cl::Hidden);
271
272static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
273 cl::desc("inline all checks"),
274 cl::Hidden, cl::init(Val: false));
275
276static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
277 cl::desc("inline all checks"),
278 cl::Hidden, cl::init(Val: false));
279
280// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
281static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
282 cl::desc("Use page aliasing in HWASan"),
283 cl::Hidden, cl::init(Val: false));
284
285namespace {
286
287template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
288 return Opt.getNumOccurrences() ? Opt : Other;
289}
290
291bool shouldUsePageAliases(const Triple &TargetTriple) {
292 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
293}
294
295bool shouldInstrumentStack(const Triple &TargetTriple) {
296 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
297}
298
299bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
300 return optOr(Opt&: ClInstrumentWithCalls, Other: TargetTriple.getArch() == Triple::x86_64);
301}
302
303bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
304 return optOr(Opt&: ClUseStackSafety, Other: !DisableOptimization);
305}
306
307bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
308 bool DisableOptimization) {
309 return shouldInstrumentStack(TargetTriple) &&
310 mightUseStackSafetyAnalysis(DisableOptimization);
311}
312
313bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
314 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
315}
316
317/// An instrumentation pass implementing detection of addressability bugs
318/// using tagged pointers.
319class HWAddressSanitizer {
320public:
321 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
322 const StackSafetyGlobalInfo *SSI)
323 : M(M), SSI(SSI) {
324 this->Recover = optOr(Opt&: ClRecover, Other: Recover);
325 this->CompileKernel = optOr(Opt&: ClEnableKhwasan, Other: CompileKernel);
326 this->Rng = ClRandomKeepRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
327 : nullptr;
328
329 initializeModule();
330 }
331
332 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
333
334private:
335 struct ShadowTagCheckInfo {
336 Instruction *TagMismatchTerm = nullptr;
337 Value *PtrLong = nullptr;
338 Value *AddrLong = nullptr;
339 Value *PtrTag = nullptr;
340 Value *MemTag = nullptr;
341 };
342
343 bool selectiveInstrumentationShouldSkip(Function &F,
344 FunctionAnalysisManager &FAM) const;
345 void initializeModule();
346 void createHwasanCtorComdat();
347 void createHwasanNote();
348
349 void initializeCallbacks(Module &M);
350
351 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
352
353 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
354 Value *getShadowNonTls(IRBuilder<> &IRB);
355
356 void untagPointerOperand(Instruction *I, Value *Addr);
357 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
358
359 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
360 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
361 DomTreeUpdater &DTU, LoopInfo *LI);
362 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
363 unsigned AccessSizeIndex,
364 Instruction *InsertBefore,
365 DomTreeUpdater &DTU, LoopInfo *LI);
366 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
367 unsigned AccessSizeIndex,
368 Instruction *InsertBefore, DomTreeUpdater &DTU,
369 LoopInfo *LI);
370 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
371 void instrumentMemIntrinsic(MemIntrinsic *MI);
372 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
373 LoopInfo *LI, const DataLayout &DL);
374 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
375 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
376 Value *Ptr);
377
378 void getInterestingMemoryOperands(
379 OptimizationRemarkEmitter &ORE, Instruction *I,
380 const TargetLibraryInfo &TLI,
381 SmallVectorImpl<InterestingMemoryOperand> &Interesting);
382
383 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
384 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
385 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
386 void instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
387 const DominatorTree &DT, const PostDominatorTree &PDT,
388 const LoopInfo &LI);
389 void instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
390 Value *getNextTagWithCall(IRBuilder<> &IRB);
391 Value *getStackBaseTag(IRBuilder<> &IRB);
392 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
393 Value *getUARTag(IRBuilder<> &IRB);
394
395 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
396 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
397 unsigned retagMask(unsigned AllocaNo);
398
399 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
400
401 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
402 void instrumentGlobals();
403
404 Value *getCachedFP(IRBuilder<> &IRB);
405 Value *getFrameRecordInfo(IRBuilder<> &IRB);
406
407 void instrumentPersonalityFunctions();
408
409 LLVMContext *C;
410 Module &M;
411 const StackSafetyGlobalInfo *SSI;
412 Triple TargetTriple;
413 std::unique_ptr<RandomNumberGenerator> Rng;
414
415 /// This struct defines the shadow mapping using the rule:
416 /// If `kFixed`, then
417 /// shadow = (mem >> Scale) + Offset.
418 /// If `kGlobal`, then
419 /// extern char* __hwasan_shadow_memory_dynamic_address;
420 /// shadow = (mem >> Scale) + __hwasan_shadow_memory_dynamic_address
421 /// If `kIfunc`, then
422 /// extern char __hwasan_shadow[];
423 /// shadow = (mem >> Scale) + &__hwasan_shadow
424 /// If `kTls`, then
425 /// extern char *__hwasan_tls;
426 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
427 ///
428 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
429 /// ring buffer for storing stack allocations on targets that support it.
430 class ShadowMapping {
431 OffsetKind Kind;
432 uint64_t Offset;
433 uint8_t Scale;
434 bool WithFrameRecord;
435
436 void SetFixed(uint64_t O) {
437 Kind = OffsetKind::kFixed;
438 Offset = O;
439 }
440
441 public:
442 void init(Triple &TargetTriple, bool InstrumentWithCalls,
443 bool CompileKernel);
444 Align getObjectAlignment() const { return Align(1ULL << Scale); }
445 bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
446 bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
447 bool isInTls() const { return Kind == OffsetKind::kTls; }
448 bool isFixed() const { return Kind == OffsetKind::kFixed; }
449 uint8_t scale() const { return Scale; };
450 uint64_t offset() const {
451 assert(isFixed());
452 return Offset;
453 };
454 bool withFrameRecord() const { return WithFrameRecord; };
455 };
456
457 ShadowMapping Mapping;
458
459 Type *VoidTy = Type::getVoidTy(C&: M.getContext());
460 Type *IntptrTy = M.getDataLayout().getIntPtrType(C&: M.getContext());
461 PointerType *PtrTy = PointerType::getUnqual(C&: M.getContext());
462 Type *Int8Ty = Type::getInt8Ty(C&: M.getContext());
463 Type *Int32Ty = Type::getInt32Ty(C&: M.getContext());
464 Type *Int64Ty = Type::getInt64Ty(C&: M.getContext());
465
466 bool CompileKernel;
467 bool Recover;
468 bool OutlinedChecks;
469 bool InlineFastPath;
470 bool UseShortGranules;
471 bool InstrumentLandingPads;
472 bool InstrumentWithCalls;
473 bool InstrumentStack;
474 bool InstrumentGlobals;
475 bool DetectUseAfterScope;
476 bool UsePageAliases;
477 bool UseMatchAllCallback;
478
479 std::optional<uint8_t> MatchAllTag;
480
481 unsigned PointerTagShift;
482 uint64_t TagMaskByte;
483
484 Function *HwasanCtorFunction;
485
486 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
487 FunctionCallee HwasanMemoryAccessCallbackSized[2];
488
489 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
490 FunctionCallee HwasanHandleVfork;
491
492 FunctionCallee HwasanTagMemoryFunc;
493 FunctionCallee HwasanGenerateTagFunc;
494 FunctionCallee HwasanRecordFrameRecordFunc;
495
496 Constant *ShadowGlobal;
497
498 Value *ShadowBase = nullptr;
499 Value *StackBaseTag = nullptr;
500 Value *CachedFP = nullptr;
501 GlobalValue *ThreadPtrGlobal = nullptr;
502};
503
504} // end anonymous namespace
505
506PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
507 ModuleAnalysisManager &MAM) {
508 // Return early if nosanitize_hwaddress module flag is present for the module.
509 if (checkIfAlreadyInstrumented(M, Flag: "nosanitize_hwaddress"))
510 return PreservedAnalyses::all();
511 const StackSafetyGlobalInfo *SSI = nullptr;
512 const Triple &TargetTriple = M.getTargetTriple();
513 if (shouldUseStackSafetyAnalysis(TargetTriple, DisableOptimization: Options.DisableOptimization))
514 SSI = &MAM.getResult<StackSafetyGlobalAnalysis>(IR&: M);
515
516 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
517 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
518 for (Function &F : M)
519 HWASan.sanitizeFunction(F, FAM);
520
521 PreservedAnalyses PA = PreservedAnalyses::none();
522 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
523 // are incrementally updated throughout this pass whenever
524 // SplitBlockAndInsertIfThen is called.
525 PA.preserve<DominatorTreeAnalysis>();
526 PA.preserve<PostDominatorTreeAnalysis>();
527 PA.preserve<LoopAnalysis>();
528 // GlobalsAA is considered stateless and does not get invalidated unless
529 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
530 // make changes that require GlobalsAA to be invalidated.
531 PA.abandon<GlobalsAA>();
532 return PA;
533}
534void HWAddressSanitizerPass::printPipeline(
535 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
536 static_cast<PassInfoMixin<HWAddressSanitizerPass> *>(this)->printPipeline(
537 OS, MapClassName2PassName);
538 OS << '<';
539 if (Options.CompileKernel)
540 OS << "kernel;";
541 if (Options.Recover)
542 OS << "recover";
543 OS << '>';
544}
545
546void HWAddressSanitizer::createHwasanNote() {
547 // Create a note that contains pointers to the list of global
548 // descriptors. Adding a note to the output file will cause the linker to
549 // create a PT_NOTE program header pointing to the note that we can use to
550 // find the descriptor list starting from the program headers. A function
551 // provided by the runtime initializes the shadow memory for the globals by
552 // accessing the descriptor list via the note. The dynamic loader needs to
553 // call this function whenever a library is loaded.
554 //
555 // The reason why we use a note for this instead of a more conventional
556 // approach of having a global constructor pass a descriptor list pointer to
557 // the runtime is because of an order of initialization problem. With
558 // constructors we can encounter the following problematic scenario:
559 //
560 // 1) library A depends on library B and also interposes one of B's symbols
561 // 2) B's constructors are called before A's (as required for correctness)
562 // 3) during construction, B accesses one of its "own" globals (actually
563 // interposed by A) and triggers a HWASAN failure due to the initialization
564 // for A not having happened yet
565 //
566 // Even without interposition it is possible to run into similar situations in
567 // cases where two libraries mutually depend on each other.
568 //
569 // We only need one note per binary, so put everything for the note in a
570 // comdat. This needs to be a comdat with an .init_array section to prevent
571 // newer versions of lld from discarding the note.
572 //
573 // Create the note even if we aren't instrumenting globals. This ensures that
574 // binaries linked from object files with both instrumented and
575 // non-instrumented globals will end up with a note, even if a comdat from an
576 // object file with non-instrumented globals is selected. The note is harmless
577 // if the runtime doesn't support it, since it will just be ignored.
578 Comdat *NoteComdat = M.getOrInsertComdat(Name: kHwasanModuleCtorName);
579
580 Type *Int8Arr0Ty = ArrayType::get(ElementType: Int8Ty, NumElements: 0);
581 auto *Start =
582 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
583 nullptr, "__start_hwasan_globals");
584 Start->setVisibility(GlobalValue::HiddenVisibility);
585 auto *Stop =
586 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
587 nullptr, "__stop_hwasan_globals");
588 Stop->setVisibility(GlobalValue::HiddenVisibility);
589
590 // Null-terminated so actually 8 bytes, which are required in order to align
591 // the note properly.
592 auto *Name = ConstantDataArray::get(Context&: *C, Elts: "LLVM\0\0\0");
593
594 auto *NoteTy = StructType::get(elt1: Int32Ty, elts: Int32Ty, elts: Int32Ty, elts: Name->getType(),
595 elts: Int32Ty, elts: Int32Ty);
596 auto *Note =
597 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
598 GlobalValue::PrivateLinkage, nullptr, kHwasanNoteName);
599 Note->setSection(".note.hwasan.globals");
600 Note->setComdat(NoteComdat);
601 Note->setAlignment(Align(4));
602
603 // The pointers in the note need to be relative so that the note ends up being
604 // placed in rodata, which is the standard location for notes.
605 auto CreateRelPtr = [&](Constant *Ptr) {
606 return ConstantExpr::getTrunc(
607 C: ConstantExpr::getSub(C1: ConstantExpr::getPtrToInt(C: Ptr, Ty: Int64Ty),
608 C2: ConstantExpr::getPtrToInt(C: Note, Ty: Int64Ty)),
609 Ty: Int32Ty);
610 };
611 Note->setInitializer(ConstantStruct::getAnon(
612 V: {ConstantInt::get(Ty: Int32Ty, V: 8), // n_namesz
613 ConstantInt::get(Ty: Int32Ty, V: 8), // n_descsz
614 ConstantInt::get(Ty: Int32Ty, V: ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
615 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
616 appendToCompilerUsed(M, Values: Note);
617
618 // Create a zero-length global in hwasan_globals so that the linker will
619 // always create start and stop symbols.
620 auto *Dummy = new GlobalVariable(
621 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
622 Constant::getNullValue(Ty: Int8Arr0Ty), "hwasan.dummy.global");
623 Dummy->setSection("hwasan_globals");
624 Dummy->setComdat(NoteComdat);
625 Dummy->setMetadata(KindID: LLVMContext::MD_associated,
626 Node: MDNode::get(Context&: *C, MDs: ValueAsMetadata::get(V: Note)));
627 appendToCompilerUsed(M, Values: Dummy);
628}
629
630void HWAddressSanitizer::createHwasanCtorComdat() {
631 std::tie(args&: HwasanCtorFunction, args: std::ignore) =
632 getOrCreateSanitizerCtorAndInitFunctions(
633 M, CtorName: kHwasanModuleCtorName, InitName: kHwasanInitName,
634 /*InitArgTypes=*/{},
635 /*InitArgs=*/{},
636 // This callback is invoked when the functions are created the first
637 // time. Hook them into the global ctors list in that case:
638 FunctionsCreatedCallback: [&](Function *Ctor, FunctionCallee) {
639 Comdat *CtorComdat = M.getOrInsertComdat(Name: kHwasanModuleCtorName);
640 Ctor->setComdat(CtorComdat);
641 appendToGlobalCtors(M, F: Ctor, Priority: 0, Data: Ctor);
642 });
643
644 // Do not create .note.hwasan.globals for static binaries, as it is only
645 // needed for instrumenting globals from dynamic libraries. In static
646 // binaries, the global variables section can be accessed directly via the
647 // __start_hwasan_globals and __stop_hwasan_globals symbols inserted by the
648 // linker.
649 if (!ClStaticLinking)
650 createHwasanNote();
651}
652
653/// Module-level initialization.
654///
655/// inserts a call to __hwasan_init to the module's constructor list.
656void HWAddressSanitizer::initializeModule() {
657 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
658 TargetTriple = M.getTargetTriple();
659
660 // HWASan may do short granule checks on function arguments read from the
661 // argument memory (last byte of the granule), which invalidates writeonly.
662 for (Function &F : M.functions())
663 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/true);
664
665 // x86_64 currently has two modes:
666 // - Intel LAM (default)
667 // - pointer aliasing (heap only)
668 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
669 UsePageAliases = shouldUsePageAliases(TargetTriple);
670 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
671 InstrumentStack = shouldInstrumentStack(TargetTriple);
672 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
673 PointerTagShift = IsX86_64 ? 57 : 56;
674 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
675
676 Mapping.init(TargetTriple, InstrumentWithCalls, CompileKernel);
677
678 C = &(M.getContext());
679 IRBuilder<> IRB(*C);
680
681 HwasanCtorFunction = nullptr;
682
683 // Older versions of Android do not have the required runtime support for
684 // short granules, global or personality function instrumentation. On other
685 // platforms we currently require using the latest version of the runtime.
686 bool NewRuntime =
687 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(Major: 30);
688
689 UseShortGranules = optOr(Opt&: ClUseShortGranules, Other: NewRuntime);
690 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
691 TargetTriple.isOSBinFormatELF() &&
692 !optOr(Opt&: ClInlineAllChecks, Other: Recover);
693
694 // These platforms may prefer less inlining to reduce binary size.
695 InlineFastPath = optOr(Opt&: ClInlineFastPathChecks, Other: !(TargetTriple.isAndroid() ||
696 TargetTriple.isOSFuchsia()));
697
698 if (ClMatchAllTag.getNumOccurrences()) {
699 if (ClMatchAllTag != -1) {
700 MatchAllTag = ClMatchAllTag & 0xFF;
701 }
702 } else if (CompileKernel) {
703 MatchAllTag = 0xFF;
704 }
705 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
706
707 // If we don't have personality function support, fall back to landing pads.
708 InstrumentLandingPads = optOr(Opt&: ClInstrumentLandingPads, Other: !NewRuntime);
709
710 InstrumentGlobals =
711 !CompileKernel && !UsePageAliases && optOr(Opt&: ClGlobals, Other: NewRuntime);
712
713 if (!CompileKernel) {
714 if (InstrumentGlobals)
715 instrumentGlobals();
716
717 createHwasanCtorComdat();
718
719 bool InstrumentPersonalityFunctions =
720 optOr(Opt&: ClInstrumentPersonalityFunctions, Other: NewRuntime);
721 if (InstrumentPersonalityFunctions)
722 instrumentPersonalityFunctions();
723 }
724
725 if (!TargetTriple.isAndroid()) {
726 ThreadPtrGlobal = M.getOrInsertGlobal(Name: "__hwasan_tls", Ty: IntptrTy, CreateGlobalCallback: [&] {
727 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
728 GlobalValue::ExternalLinkage, nullptr,
729 "__hwasan_tls", nullptr,
730 GlobalVariable::InitialExecTLSModel);
731 appendToCompilerUsed(M, Values: GV);
732 return GV;
733 });
734 }
735}
736
737void HWAddressSanitizer::initializeCallbacks(Module &M) {
738 IRBuilder<> IRB(*C);
739 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
740 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
741 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
742 *HwasanMemsetFnTy;
743 if (UseMatchAllCallback) {
744 HwasanMemoryAccessCallbackSizedFnTy =
745 FunctionType::get(Result: VoidTy, Params: {IntptrTy, IntptrTy, Int8Ty}, isVarArg: false);
746 HwasanMemoryAccessCallbackFnTy =
747 FunctionType::get(Result: VoidTy, Params: {IntptrTy, Int8Ty}, isVarArg: false);
748 HwasanMemTransferFnTy =
749 FunctionType::get(Result: PtrTy, Params: {PtrTy, PtrTy, IntptrTy, Int8Ty}, isVarArg: false);
750 HwasanMemsetFnTy =
751 FunctionType::get(Result: PtrTy, Params: {PtrTy, Int32Ty, IntptrTy, Int8Ty}, isVarArg: false);
752 } else {
753 HwasanMemoryAccessCallbackSizedFnTy =
754 FunctionType::get(Result: VoidTy, Params: {IntptrTy, IntptrTy}, isVarArg: false);
755 HwasanMemoryAccessCallbackFnTy =
756 FunctionType::get(Result: VoidTy, Params: {IntptrTy}, isVarArg: false);
757 HwasanMemTransferFnTy =
758 FunctionType::get(Result: PtrTy, Params: {PtrTy, PtrTy, IntptrTy}, isVarArg: false);
759 HwasanMemsetFnTy =
760 FunctionType::get(Result: PtrTy, Params: {PtrTy, Int32Ty, IntptrTy}, isVarArg: false);
761 }
762
763 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
764 const std::string TypeStr = AccessIsWrite ? "store" : "load";
765 const std::string EndingStr = Recover ? "_noabort" : "";
766
767 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
768 Name: ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
769 T: HwasanMemoryAccessCallbackSizedFnTy);
770
771 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
772 AccessSizeIndex++) {
773 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
774 M.getOrInsertFunction(Name: ClMemoryAccessCallbackPrefix + TypeStr +
775 itostr(X: 1ULL << AccessSizeIndex) +
776 MatchAllStr + EndingStr,
777 T: HwasanMemoryAccessCallbackFnTy);
778 }
779 }
780
781 const std::string MemIntrinCallbackPrefix =
782 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
783 ? std::string("")
784 : ClMemoryAccessCallbackPrefix;
785
786 HwasanMemmove = M.getOrInsertFunction(
787 Name: MemIntrinCallbackPrefix + "memmove" + MatchAllStr, T: HwasanMemTransferFnTy);
788 HwasanMemcpy = M.getOrInsertFunction(
789 Name: MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, T: HwasanMemTransferFnTy);
790 HwasanMemset = M.getOrInsertFunction(
791 Name: MemIntrinCallbackPrefix + "memset" + MatchAllStr, T: HwasanMemsetFnTy);
792
793 HwasanTagMemoryFunc = M.getOrInsertFunction(Name: "__hwasan_tag_memory", RetTy: VoidTy,
794 Args: PtrTy, Args: Int8Ty, Args: IntptrTy);
795 HwasanGenerateTagFunc =
796 M.getOrInsertFunction(Name: "__hwasan_generate_tag", RetTy: Int8Ty);
797
798 HwasanRecordFrameRecordFunc =
799 M.getOrInsertFunction(Name: "__hwasan_add_frame_record", RetTy: VoidTy, Args: Int64Ty);
800
801 ShadowGlobal =
802 M.getOrInsertGlobal(Name: "__hwasan_shadow", Ty: ArrayType::get(ElementType: Int8Ty, NumElements: 0));
803
804 HwasanHandleVfork =
805 M.getOrInsertFunction(Name: "__hwasan_handle_vfork", RetTy: VoidTy, Args: IntptrTy);
806}
807
808Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
809 // An empty inline asm with input reg == output reg.
810 // An opaque no-op cast, basically.
811 // This prevents code bloat as a result of rematerializing trivial definitions
812 // such as constants or global addresses at every load and store.
813 InlineAsm *Asm =
814 InlineAsm::get(Ty: FunctionType::get(Result: PtrTy, Params: {Val->getType()}, isVarArg: false),
815 AsmString: StringRef(""), Constraints: StringRef("=r,0"),
816 /*hasSideEffects=*/false);
817 return IRB.CreateCall(Callee: Asm, Args: {Val}, Name: ".hwasan.shadow");
818}
819
820Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
821 return getOpaqueNoopCast(IRB, Val: ShadowGlobal);
822}
823
824Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
825 if (Mapping.isFixed()) {
826 return getOpaqueNoopCast(
827 IRB, Val: ConstantExpr::getIntToPtr(
828 C: ConstantInt::get(Ty: IntptrTy, V: Mapping.offset()), Ty: PtrTy));
829 }
830
831 if (Mapping.isInIfunc())
832 return getDynamicShadowIfunc(IRB);
833
834 Value *GlobalDynamicAddress =
835 IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
836 Name: kHwasanShadowMemoryDynamicAddress, Ty: PtrTy);
837 return IRB.CreateLoad(Ty: PtrTy, Ptr: GlobalDynamicAddress);
838}
839
840bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
841 Value *Ptr) {
842 // Do not instrument accesses from different address spaces; we cannot deal
843 // with them.
844 Type *PtrTy = cast<PointerType>(Val: Ptr->getType()->getScalarType());
845 if (PtrTy->getPointerAddressSpace() != 0)
846 return true;
847
848 // Ignore swifterror addresses.
849 // swifterror memory addresses are mem2reg promoted by instruction
850 // selection. As such they cannot have regular uses like an instrumentation
851 // function and it makes no sense to track them as memory.
852 if (Ptr->isSwiftError())
853 return true;
854
855 if (findAllocaForValue(V: Ptr)) {
856 if (!InstrumentStack)
857 return true;
858 if (SSI && SSI->stackAccessIsSafe(I: *Inst))
859 return true;
860 }
861
862 if (isa<GlobalVariable>(Val: getUnderlyingObject(V: Ptr))) {
863 if (!InstrumentGlobals)
864 return true;
865 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
866 }
867
868 return false;
869}
870
871bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
872 Instruction *Inst, Value *Ptr) {
873 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
874 if (Ignored) {
875 ORE.emit(
876 RemarkBuilder: [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
877 } else {
878 ORE.emit(RemarkBuilder: [&]() {
879 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
880 });
881 }
882 return Ignored;
883}
884
885void HWAddressSanitizer::getInterestingMemoryOperands(
886 OptimizationRemarkEmitter &ORE, Instruction *I,
887 const TargetLibraryInfo &TLI,
888 SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
889 // Skip memory accesses inserted by another instrumentation.
890 if (I->hasMetadata(KindID: LLVMContext::MD_nosanitize))
891 return;
892
893 // Do not instrument the load fetching the dynamic shadow address.
894 if (ShadowBase == I)
895 return;
896
897 if (LoadInst *LI = dyn_cast<LoadInst>(Val: I)) {
898 if (!ClInstrumentReads || ignoreAccess(ORE, Inst: I, Ptr: LI->getPointerOperand()))
899 return;
900 Interesting.emplace_back(Args&: I, Args: LI->getPointerOperandIndex(), Args: false,
901 Args: LI->getType(), Args: LI->getAlign());
902 } else if (StoreInst *SI = dyn_cast<StoreInst>(Val: I)) {
903 if (!ClInstrumentWrites || ignoreAccess(ORE, Inst: I, Ptr: SI->getPointerOperand()))
904 return;
905 Interesting.emplace_back(Args&: I, Args: SI->getPointerOperandIndex(), Args: true,
906 Args: SI->getValueOperand()->getType(), Args: SI->getAlign());
907 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Val: I)) {
908 if (!ClInstrumentAtomics || ignoreAccess(ORE, Inst: I, Ptr: RMW->getPointerOperand()))
909 return;
910 Interesting.emplace_back(Args&: I, Args: RMW->getPointerOperandIndex(), Args: true,
911 Args: RMW->getValOperand()->getType(), Args: std::nullopt);
912 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(Val: I)) {
913 if (!ClInstrumentAtomics || ignoreAccess(ORE, Inst: I, Ptr: XCHG->getPointerOperand()))
914 return;
915 Interesting.emplace_back(Args&: I, Args: XCHG->getPointerOperandIndex(), Args: true,
916 Args: XCHG->getCompareOperand()->getType(),
917 Args: std::nullopt);
918 } else if (auto *CI = dyn_cast<CallInst>(Val: I)) {
919 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
920 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
921 ignoreAccess(ORE, Inst: I, Ptr: CI->getArgOperand(i: ArgNo)))
922 continue;
923 Type *Ty = CI->getParamByValType(ArgNo);
924 Interesting.emplace_back(Args&: I, Args&: ArgNo, Args: false, Args&: Ty, Args: Align(1));
925 }
926 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI: &TLI);
927 }
928}
929
930static unsigned getPointerOperandIndex(Instruction *I) {
931 if (LoadInst *LI = dyn_cast<LoadInst>(Val: I))
932 return LI->getPointerOperandIndex();
933 if (StoreInst *SI = dyn_cast<StoreInst>(Val: I))
934 return SI->getPointerOperandIndex();
935 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Val: I))
936 return RMW->getPointerOperandIndex();
937 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(Val: I))
938 return XCHG->getPointerOperandIndex();
939 report_fatal_error(reason: "Unexpected instruction");
940 return -1;
941}
942
943static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
944 size_t Res = llvm::countr_zero(Val: TypeSize / 8);
945 assert(Res < kNumberOfAccessSizes);
946 return Res;
947}
948
949void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
950 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
951 TargetTriple.isRISCV64())
952 return;
953
954 IRBuilder<> IRB(I);
955 Value *AddrLong = IRB.CreatePointerCast(V: Addr, DestTy: IntptrTy);
956 Value *UntaggedPtr =
957 IRB.CreateIntToPtr(V: untagPointer(IRB, PtrLong: AddrLong), DestTy: Addr->getType());
958 I->setOperand(i: getPointerOperandIndex(I), Val: UntaggedPtr);
959}
960
961Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
962 // Mem >> Scale
963 Value *Shadow = IRB.CreateLShr(LHS: Mem, RHS: Mapping.scale());
964 if (Mapping.isFixed() && Mapping.offset() == 0)
965 return IRB.CreateIntToPtr(V: Shadow, DestTy: PtrTy);
966 // (Mem >> Scale) + Offset
967 return IRB.CreatePtrAdd(Ptr: ShadowBase, Offset: Shadow);
968}
969
970int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
971 unsigned AccessSizeIndex) {
972 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
973 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
974 (MatchAllTag.value_or(u: 0) << HWASanAccessInfo::MatchAllShift) |
975 (Recover << HWASanAccessInfo::RecoverShift) |
976 (IsWrite << HWASanAccessInfo::IsWriteShift) |
977 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
978}
979
980HWAddressSanitizer::ShadowTagCheckInfo
981HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
982 DomTreeUpdater &DTU, LoopInfo *LI) {
983 ShadowTagCheckInfo R;
984
985 IRBuilder<> IRB(InsertBefore);
986
987 R.PtrLong = IRB.CreatePointerCast(V: Ptr, DestTy: IntptrTy);
988 R.PtrTag =
989 IRB.CreateTrunc(V: IRB.CreateLShr(LHS: R.PtrLong, RHS: PointerTagShift), DestTy: Int8Ty);
990 R.AddrLong = untagPointer(IRB, PtrLong: R.PtrLong);
991 Value *Shadow = memToShadow(Mem: R.AddrLong, IRB);
992 R.MemTag = IRB.CreateLoad(Ty: Int8Ty, Ptr: Shadow);
993 Value *TagMismatch = IRB.CreateICmpNE(LHS: R.PtrTag, RHS: R.MemTag);
994
995 if (MatchAllTag.has_value()) {
996 Value *TagNotIgnored = IRB.CreateICmpNE(
997 LHS: R.PtrTag, RHS: ConstantInt::get(Ty: R.PtrTag->getType(), V: *MatchAllTag));
998 TagMismatch = IRB.CreateAnd(LHS: TagMismatch, RHS: TagNotIgnored);
999 }
1000
1001 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
1002 Cond: TagMismatch, SplitBefore: InsertBefore, Unreachable: false,
1003 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU, LI);
1004
1005 return R;
1006}
1007
1008void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
1009 unsigned AccessSizeIndex,
1010 Instruction *InsertBefore,
1011 DomTreeUpdater &DTU,
1012 LoopInfo *LI) {
1013 assert(!UsePageAliases);
1014 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1015
1016 if (InlineFastPath)
1017 InsertBefore =
1018 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
1019
1020 IRBuilder<> IRB(InsertBefore);
1021 bool UseFixedShadowIntrinsic = false;
1022 // The memaccess fixed shadow intrinsic is only supported on AArch64,
1023 // which allows a 16-bit immediate to be left-shifted by 32.
1024 // Since kShadowBaseAlignment == 32, and Linux by default will not
1025 // mmap above 48-bits, practically any valid shadow offset is
1026 // representable.
1027 // In particular, an offset of 4TB (1024 << 32) is representable, and
1028 // ought to be good enough for anybody.
1029 if (TargetTriple.isAArch64() && Mapping.isFixed()) {
1030 uint16_t OffsetShifted = Mapping.offset() >> 32;
1031 UseFixedShadowIntrinsic =
1032 static_cast<uint64_t>(OffsetShifted) << 32 == Mapping.offset();
1033 }
1034
1035 if (UseFixedShadowIntrinsic) {
1036 IRB.CreateIntrinsic(
1037 ID: UseShortGranules
1038 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
1039 : Intrinsic::hwasan_check_memaccess_fixedshadow,
1040 Args: {Ptr, ConstantInt::get(Ty: Int32Ty, V: AccessInfo),
1041 ConstantInt::get(Ty: Int64Ty, V: Mapping.offset())});
1042 } else {
1043 IRB.CreateIntrinsic(
1044 ID: UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules
1045 : Intrinsic::hwasan_check_memaccess,
1046 Args: {ShadowBase, Ptr, ConstantInt::get(Ty: Int32Ty, V: AccessInfo)});
1047 }
1048}
1049
1050void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
1051 unsigned AccessSizeIndex,
1052 Instruction *InsertBefore,
1053 DomTreeUpdater &DTU,
1054 LoopInfo *LI) {
1055 assert(!UsePageAliases);
1056 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1057
1058 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
1059
1060 IRBuilder<> IRB(TCI.TagMismatchTerm);
1061 Value *OutOfShortGranuleTagRange =
1062 IRB.CreateICmpUGT(LHS: TCI.MemTag, RHS: ConstantInt::get(Ty: Int8Ty, V: 15));
1063 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1064 Cond: OutOfShortGranuleTagRange, SplitBefore: TCI.TagMismatchTerm, Unreachable: !Recover,
1065 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU, LI);
1066
1067 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1068 Value *PtrLowBits = IRB.CreateTrunc(V: IRB.CreateAnd(LHS: TCI.PtrLong, RHS: 15), DestTy: Int8Ty);
1069 PtrLowBits = IRB.CreateAdd(
1070 LHS: PtrLowBits, RHS: ConstantInt::get(Ty: Int8Ty, V: (1 << AccessSizeIndex) - 1));
1071 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(LHS: PtrLowBits, RHS: TCI.MemTag);
1072 SplitBlockAndInsertIfThen(Cond: PtrLowBitsOOB, SplitBefore: TCI.TagMismatchTerm, Unreachable: false,
1073 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU,
1074 LI, ThenBlock: CheckFailTerm->getParent());
1075
1076 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1077 Value *InlineTagAddr = IRB.CreateOr(LHS: TCI.AddrLong, RHS: 15);
1078 InlineTagAddr = IRB.CreateIntToPtr(V: InlineTagAddr, DestTy: PtrTy);
1079 Value *InlineTag = IRB.CreateLoad(Ty: Int8Ty, Ptr: InlineTagAddr);
1080 Value *InlineTagMismatch = IRB.CreateICmpNE(LHS: TCI.PtrTag, RHS: InlineTag);
1081 SplitBlockAndInsertIfThen(Cond: InlineTagMismatch, SplitBefore: TCI.TagMismatchTerm, Unreachable: false,
1082 BranchWeights: MDBuilder(*C).createUnlikelyBranchWeights(), DTU: &DTU,
1083 LI, ThenBlock: CheckFailTerm->getParent());
1084
1085 IRB.SetInsertPoint(CheckFailTerm);
1086 InlineAsm *Asm;
1087 switch (TargetTriple.getArch()) {
1088 case Triple::x86_64:
1089 // The signal handler will find the data address in rdi.
1090 Asm = InlineAsm::get(
1091 Ty: FunctionType::get(Result: VoidTy, Params: {TCI.PtrLong->getType()}, isVarArg: false),
1092 AsmString: "int3\nnopl " +
1093 itostr(X: 0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1094 "(%rax)",
1095 Constraints: "{rdi}",
1096 /*hasSideEffects=*/true);
1097 break;
1098 case Triple::aarch64:
1099 case Triple::aarch64_be:
1100 // The signal handler will find the data address in x0.
1101 Asm = InlineAsm::get(
1102 Ty: FunctionType::get(Result: VoidTy, Params: {TCI.PtrLong->getType()}, isVarArg: false),
1103 AsmString: "brk #" + itostr(X: 0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1104 Constraints: "{x0}",
1105 /*hasSideEffects=*/true);
1106 break;
1107 case Triple::riscv64:
1108 // The signal handler will find the data address in x10.
1109 Asm = InlineAsm::get(
1110 Ty: FunctionType::get(Result: VoidTy, Params: {TCI.PtrLong->getType()}, isVarArg: false),
1111 AsmString: "ebreak\naddiw x0, x11, " +
1112 itostr(X: 0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1113 Constraints: "{x10}",
1114 /*hasSideEffects=*/true);
1115 break;
1116 default:
1117 report_fatal_error(reason: "unsupported architecture");
1118 }
1119 IRB.CreateCall(Callee: Asm, Args: TCI.PtrLong);
1120 if (Recover)
1121 cast<BranchInst>(Val: CheckFailTerm)
1122 ->setSuccessor(idx: 0, NewSucc: TCI.TagMismatchTerm->getParent());
1123}
1124
1125bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1126 MemIntrinsic *MI) {
1127 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Val: MI)) {
1128 return (!ClInstrumentWrites || ignoreAccess(ORE, Inst: MTI, Ptr: MTI->getDest())) &&
1129 (!ClInstrumentReads || ignoreAccess(ORE, Inst: MTI, Ptr: MTI->getSource()));
1130 }
1131 if (isa<MemSetInst>(Val: MI))
1132 return !ClInstrumentWrites || ignoreAccess(ORE, Inst: MI, Ptr: MI->getDest());
1133 return false;
1134}
1135
1136void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1137 IRBuilder<> IRB(MI);
1138 if (isa<MemTransferInst>(Val: MI)) {
1139 SmallVector<Value *, 4> Args{
1140 MI->getOperand(i_nocapture: 0), MI->getOperand(i_nocapture: 1),
1141 IRB.CreateIntCast(V: MI->getOperand(i_nocapture: 2), DestTy: IntptrTy, isSigned: false)};
1142
1143 if (UseMatchAllCallback)
1144 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1145 IRB.CreateCall(Callee: isa<MemMoveInst>(Val: MI) ? HwasanMemmove : HwasanMemcpy, Args);
1146 } else if (isa<MemSetInst>(Val: MI)) {
1147 SmallVector<Value *, 4> Args{
1148 MI->getOperand(i_nocapture: 0),
1149 IRB.CreateIntCast(V: MI->getOperand(i_nocapture: 1), DestTy: IRB.getInt32Ty(), isSigned: false),
1150 IRB.CreateIntCast(V: MI->getOperand(i_nocapture: 2), DestTy: IntptrTy, isSigned: false)};
1151 if (UseMatchAllCallback)
1152 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1153 IRB.CreateCall(Callee: HwasanMemset, Args);
1154 }
1155 MI->eraseFromParent();
1156}
1157
1158bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1159 DomTreeUpdater &DTU, LoopInfo *LI,
1160 const DataLayout &DL) {
1161 Value *Addr = O.getPtr();
1162
1163 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1164
1165 // If the pointer is statically known to be zero, the tag check will pass
1166 // since:
1167 // 1) it has a zero tag
1168 // 2) the shadow memory corresponding to address 0 is initialized to zero and
1169 // never updated.
1170 // We can therefore elide the tag check.
1171 llvm::KnownBits Known(DL.getPointerTypeSizeInBits(Addr->getType()));
1172 llvm::computeKnownBits(V: Addr, Known, DL);
1173 if (Known.isZero())
1174 return false;
1175
1176 if (O.MaybeMask)
1177 return false; // FIXME
1178
1179 IRBuilder<> IRB(O.getInsn());
1180 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(Value: O.TypeStoreSize) &&
1181 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1182 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1183 *O.Alignment >= O.TypeStoreSize / 8)) {
1184 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize: O.TypeStoreSize);
1185 if (InstrumentWithCalls) {
1186 SmallVector<Value *, 2> Args{IRB.CreatePointerCast(V: Addr, DestTy: IntptrTy)};
1187 if (UseMatchAllCallback)
1188 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1189 IRB.CreateCall(Callee: HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1190 Args);
1191 } else if (OutlinedChecks) {
1192 instrumentMemAccessOutline(Ptr: Addr, IsWrite: O.IsWrite, AccessSizeIndex, InsertBefore: O.getInsn(),
1193 DTU, LI);
1194 } else {
1195 instrumentMemAccessInline(Ptr: Addr, IsWrite: O.IsWrite, AccessSizeIndex, InsertBefore: O.getInsn(),
1196 DTU, LI);
1197 }
1198 } else {
1199 SmallVector<Value *, 3> Args{
1200 IRB.CreatePointerCast(V: Addr, DestTy: IntptrTy),
1201 IRB.CreateUDiv(LHS: IRB.CreateTypeSize(Ty: IntptrTy, Size: O.TypeStoreSize),
1202 RHS: ConstantInt::get(Ty: IntptrTy, V: 8))};
1203 if (UseMatchAllCallback)
1204 Args.emplace_back(Args: ConstantInt::get(Ty: Int8Ty, V: *MatchAllTag));
1205 IRB.CreateCall(Callee: HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1206 }
1207 untagPointerOperand(I: O.getInsn(), Addr);
1208
1209 return true;
1210}
1211
1212void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1213 size_t Size) {
1214 size_t AlignedSize = alignTo(Size, A: Mapping.getObjectAlignment());
1215 if (!UseShortGranules)
1216 Size = AlignedSize;
1217
1218 Tag = IRB.CreateTrunc(V: Tag, DestTy: Int8Ty);
1219 if (InstrumentWithCalls) {
1220 IRB.CreateCall(Callee: HwasanTagMemoryFunc,
1221 Args: {IRB.CreatePointerCast(V: AI, DestTy: PtrTy), Tag,
1222 ConstantInt::get(Ty: IntptrTy, V: AlignedSize)});
1223 } else {
1224 size_t ShadowSize = Size >> Mapping.scale();
1225 Value *AddrLong = untagPointer(IRB, PtrLong: IRB.CreatePointerCast(V: AI, DestTy: IntptrTy));
1226 Value *ShadowPtr = memToShadow(Mem: AddrLong, IRB);
1227 // If this memset is not inlined, it will be intercepted in the hwasan
1228 // runtime library. That's OK, because the interceptor skips the checks if
1229 // the address is in the shadow region.
1230 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1231 // llvm.memset right here into either a sequence of stores, or a call to
1232 // hwasan_tag_memory.
1233 if (ShadowSize)
1234 IRB.CreateMemSet(Ptr: ShadowPtr, Val: Tag, Size: ShadowSize, Align: Align(1));
1235 if (Size != AlignedSize) {
1236 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1237 IRB.CreateStore(Val: ConstantInt::get(Ty: Int8Ty, V: SizeRemainder),
1238 Ptr: IRB.CreateConstGEP1_32(Ty: Int8Ty, Ptr: ShadowPtr, Idx0: ShadowSize));
1239 IRB.CreateStore(
1240 Val: Tag, Ptr: IRB.CreateConstGEP1_32(Ty: Int8Ty, Ptr: IRB.CreatePointerCast(V: AI, DestTy: PtrTy),
1241 Idx0: AlignedSize - 1));
1242 }
1243 }
1244}
1245
1246unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1247 if (TargetTriple.getArch() == Triple::x86_64)
1248 return AllocaNo & TagMaskByte;
1249
1250 // A list of 8-bit numbers that have at most one run of non-zero bits.
1251 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1252 // masks.
1253 // The list does not include the value 255, which is used for UAR.
1254 //
1255 // Because we are more likely to use earlier elements of this list than later
1256 // ones, it is sorted in increasing order of probability of collision with a
1257 // mask allocated (temporally) nearby. The program that generated this list
1258 // can be found at:
1259 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1260 static const unsigned FastMasks[] = {
1261 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1262 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1263 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1264 return FastMasks[AllocaNo % std::size(FastMasks)];
1265}
1266
1267Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1268 if (TagMaskByte == 0xFF)
1269 return OldTag; // No need to clear the tag byte.
1270 return IRB.CreateAnd(LHS: OldTag,
1271 RHS: ConstantInt::get(Ty: OldTag->getType(), V: TagMaskByte));
1272}
1273
1274Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1275 return IRB.CreateZExt(V: IRB.CreateCall(Callee: HwasanGenerateTagFunc), DestTy: IntptrTy);
1276}
1277
1278Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1279 if (ClGenerateTagsWithCalls)
1280 return nullptr;
1281 if (StackBaseTag)
1282 return StackBaseTag;
1283 // Extract some entropy from the stack pointer for the tags.
1284 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1285 // between functions).
1286 Value *FramePointerLong = getCachedFP(IRB);
1287 Value *StackTag =
1288 applyTagMask(IRB, OldTag: IRB.CreateXor(LHS: FramePointerLong,
1289 RHS: IRB.CreateLShr(LHS: FramePointerLong, RHS: 20)));
1290 StackTag->setName("hwasan.stack.base.tag");
1291 return StackTag;
1292}
1293
1294Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1295 unsigned AllocaNo) {
1296 if (ClGenerateTagsWithCalls)
1297 return getNextTagWithCall(IRB);
1298 return IRB.CreateXor(
1299 LHS: StackTag, RHS: ConstantInt::get(Ty: StackTag->getType(), V: retagMask(AllocaNo)));
1300}
1301
1302Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1303 Value *FramePointerLong = getCachedFP(IRB);
1304 Value *UARTag =
1305 applyTagMask(IRB, OldTag: IRB.CreateLShr(LHS: FramePointerLong, RHS: PointerTagShift));
1306
1307 UARTag->setName("hwasan.uar.tag");
1308 return UARTag;
1309}
1310
1311// Add a tag to an address.
1312Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1313 Value *PtrLong, Value *Tag) {
1314 assert(!UsePageAliases);
1315 Value *TaggedPtrLong;
1316 if (CompileKernel) {
1317 // Kernel addresses have 0xFF in the most significant byte.
1318 Value *ShiftedTag =
1319 IRB.CreateOr(LHS: IRB.CreateShl(LHS: Tag, RHS: PointerTagShift),
1320 RHS: ConstantInt::get(Ty: IntptrTy, V: (1ULL << PointerTagShift) - 1));
1321 TaggedPtrLong = IRB.CreateAnd(LHS: PtrLong, RHS: ShiftedTag);
1322 } else {
1323 // Userspace can simply do OR (tag << PointerTagShift);
1324 Value *ShiftedTag = IRB.CreateShl(LHS: Tag, RHS: PointerTagShift);
1325 TaggedPtrLong = IRB.CreateOr(LHS: PtrLong, RHS: ShiftedTag);
1326 }
1327 return IRB.CreateIntToPtr(V: TaggedPtrLong, DestTy: Ty);
1328}
1329
1330// Remove tag from an address.
1331Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1332 assert(!UsePageAliases);
1333 Value *UntaggedPtrLong;
1334 if (CompileKernel) {
1335 // Kernel addresses have 0xFF in the most significant byte.
1336 UntaggedPtrLong =
1337 IRB.CreateOr(LHS: PtrLong, RHS: ConstantInt::get(Ty: PtrLong->getType(),
1338 V: TagMaskByte << PointerTagShift));
1339 } else {
1340 // Userspace addresses have 0x00.
1341 UntaggedPtrLong = IRB.CreateAnd(
1342 LHS: PtrLong, RHS: ConstantInt::get(Ty: PtrLong->getType(),
1343 V: ~(TagMaskByte << PointerTagShift)));
1344 }
1345 return UntaggedPtrLong;
1346}
1347
1348Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1349 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1350 // in Bionic's libc/platform/bionic/tls_defines.h.
1351 constexpr int SanitizerSlot = 6;
1352 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1353 return memtag::getAndroidSlotPtr(IRB, Slot: SanitizerSlot);
1354 return ThreadPtrGlobal;
1355}
1356
1357Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1358 if (!CachedFP)
1359 CachedFP = memtag::getFP(IRB);
1360 return CachedFP;
1361}
1362
1363Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1364 // Prepare ring buffer data.
1365 Value *PC = memtag::getPC(TargetTriple, IRB);
1366 Value *FP = getCachedFP(IRB);
1367
1368 // Mix FP and PC.
1369 // Assumptions:
1370 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1371 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1372 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1373 // 0xFFFFPPPPPPPPPPPP
1374 //
1375 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1376 // prefer FP-relative offsets for functions compiled with HWASan.
1377 FP = IRB.CreateShl(LHS: FP, RHS: 44);
1378 return IRB.CreateOr(LHS: PC, RHS: FP);
1379}
1380
1381void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1382 if (!Mapping.isInTls())
1383 ShadowBase = getShadowNonTls(IRB);
1384 else if (!WithFrameRecord && TargetTriple.isAndroid())
1385 ShadowBase = getDynamicShadowIfunc(IRB);
1386
1387 if (!WithFrameRecord && ShadowBase)
1388 return;
1389
1390 Value *SlotPtr = nullptr;
1391 Value *ThreadLong = nullptr;
1392 Value *ThreadLongMaybeUntagged = nullptr;
1393
1394 auto getThreadLongMaybeUntagged = [&]() {
1395 if (!SlotPtr)
1396 SlotPtr = getHwasanThreadSlotPtr(IRB);
1397 if (!ThreadLong)
1398 ThreadLong = IRB.CreateLoad(Ty: IntptrTy, Ptr: SlotPtr);
1399 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1400 // TBI.
1401 return TargetTriple.isAArch64() ? ThreadLong
1402 : untagPointer(IRB, PtrLong: ThreadLong);
1403 };
1404
1405 if (WithFrameRecord) {
1406 switch (ClRecordStackHistory) {
1407 case libcall: {
1408 // Emit a runtime call into hwasan rather than emitting instructions for
1409 // recording stack history.
1410 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1411 IRB.CreateCall(Callee: HwasanRecordFrameRecordFunc, Args: {FrameRecordInfo});
1412 break;
1413 }
1414 case instr: {
1415 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1416
1417 StackBaseTag = IRB.CreateAShr(LHS: ThreadLong, RHS: 3);
1418
1419 // Store data to ring buffer.
1420 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1421 Value *RecordPtr =
1422 IRB.CreateIntToPtr(V: ThreadLongMaybeUntagged, DestTy: IRB.getPtrTy(AddrSpace: 0));
1423 IRB.CreateStore(Val: FrameRecordInfo, Ptr: RecordPtr);
1424
1425 IRB.CreateStore(Val: memtag::incrementThreadLong(IRB, ThreadLong, Inc: 8), Ptr: SlotPtr);
1426 break;
1427 }
1428 case none: {
1429 llvm_unreachable(
1430 "A stack history recording mode should've been selected.");
1431 }
1432 }
1433 }
1434
1435 if (!ShadowBase) {
1436 if (!ThreadLongMaybeUntagged)
1437 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1438
1439 // Get shadow base address by aligning RecordPtr up.
1440 // Note: this is not correct if the pointer is already aligned.
1441 // Runtime library will make sure this never happens.
1442 ShadowBase = IRB.CreateAdd(
1443 LHS: IRB.CreateOr(
1444 LHS: ThreadLongMaybeUntagged,
1445 RHS: ConstantInt::get(Ty: IntptrTy, V: (1ULL << kShadowBaseAlignment) - 1)),
1446 RHS: ConstantInt::get(Ty: IntptrTy, V: 1), Name: "hwasan.shadow");
1447 ShadowBase = IRB.CreateIntToPtr(V: ShadowBase, DestTy: PtrTy);
1448 }
1449}
1450
1451void HWAddressSanitizer::instrumentLandingPads(
1452 SmallVectorImpl<Instruction *> &LandingPadVec) {
1453 for (auto *LP : LandingPadVec) {
1454 IRBuilder<> IRB(LP->getNextNode());
1455 IRB.CreateCall(
1456 Callee: HwasanHandleVfork,
1457 Args: {memtag::readRegister(
1458 IRB, Name: (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1459 }
1460}
1461
1462void HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1463 Value *StackTag, Value *UARTag,
1464 const DominatorTree &DT,
1465 const PostDominatorTree &PDT,
1466 const LoopInfo &LI) {
1467 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1468 // alloca addresses using that. Unfortunately, offsets are not known yet
1469 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1470 // temp, shift-OR it into each alloca address and xor with the retag mask.
1471 // This generates one extra instruction per alloca use.
1472 unsigned int I = 0;
1473
1474 for (auto &KV : SInfo.AllocasToInstrument) {
1475 auto N = I++;
1476 auto *AI = KV.first;
1477 memtag::AllocaInfo &Info = KV.second;
1478 IRBuilder<> IRB(AI->getNextNode());
1479
1480 // Replace uses of the alloca with tagged address.
1481 Value *Tag = getAllocaTag(IRB, StackTag, AllocaNo: N);
1482 Value *AILong = IRB.CreatePointerCast(V: AI, DestTy: IntptrTy);
1483 Value *AINoTagLong = untagPointer(IRB, PtrLong: AILong);
1484 Value *Replacement = tagPointer(IRB, Ty: AI->getType(), PtrLong: AINoTagLong, Tag);
1485 std::string Name =
1486 AI->hasName() ? AI->getName().str() : "alloca." + itostr(X: N);
1487 Replacement->setName(Name + ".hwasan");
1488
1489 size_t Size = memtag::getAllocaSizeInBytes(AI: *AI);
1490 size_t AlignedSize = alignTo(Size, A: Mapping.getObjectAlignment());
1491
1492 AI->replaceUsesWithIf(New: Replacement, ShouldReplace: [AILong](const Use &U) {
1493 auto *User = U.getUser();
1494 return User != AILong && !isa<LifetimeIntrinsic>(Val: User);
1495 });
1496
1497 memtag::annotateDebugRecords(Info, Tag: retagMask(AllocaNo: N));
1498
1499 auto TagEnd = [&](Instruction *Node) {
1500 IRB.SetInsertPoint(Node);
1501 // When untagging, use the `AlignedSize` because we need to set the tags
1502 // for the entire alloca to original. If we used `Size` here, we would
1503 // keep the last granule tagged, and store zero in the last byte of the
1504 // last granule, due to how short granules are implemented.
1505 tagAlloca(IRB, AI, Tag: UARTag, Size: AlignedSize);
1506 };
1507 // Calls to functions that may return twice (e.g. setjmp) confuse the
1508 // postdominator analysis, and will leave us to keep memory tagged after
1509 // function return. Work around this by always untagging at every return
1510 // statement if return_twice functions are called.
1511 if (DetectUseAfterScope && !SInfo.CallsReturnTwice &&
1512 memtag::isStandardLifetime(LifetimeStart: Info.LifetimeStart, LifetimeEnd: Info.LifetimeEnd, DT: &DT,
1513 LI: &LI, MaxLifetimes: ClMaxLifetimes)) {
1514 for (IntrinsicInst *Start : Info.LifetimeStart) {
1515 IRB.SetInsertPoint(Start->getNextNode());
1516 tagAlloca(IRB, AI, Tag, Size);
1517 }
1518 if (!memtag::forAllReachableExits(DT, PDT, LI, AInfo: Info, RetVec: SInfo.RetVec,
1519 Callback: TagEnd)) {
1520 for (auto *End : Info.LifetimeEnd)
1521 End->eraseFromParent();
1522 }
1523 } else {
1524 tagAlloca(IRB, AI, Tag, Size);
1525 for (auto *RI : SInfo.RetVec)
1526 TagEnd(RI);
1527 // We inserted tagging outside of the lifetimes, so we have to remove
1528 // them.
1529 for (auto &II : Info.LifetimeStart)
1530 II->eraseFromParent();
1531 for (auto &II : Info.LifetimeEnd)
1532 II->eraseFromParent();
1533 }
1534 memtag::alignAndPadAlloca(Info, Align: Mapping.getObjectAlignment());
1535 }
1536}
1537
1538static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE,
1539 bool Skip) {
1540 if (Skip) {
1541 ORE.emit(RemarkBuilder: [&]() {
1542 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1543 << "Skipped: F=" << ore::NV("Function", &F);
1544 });
1545 } else {
1546 ORE.emit(RemarkBuilder: [&]() {
1547 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1548 << "Sanitized: F=" << ore::NV("Function", &F);
1549 });
1550 }
1551}
1552
1553bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1554 Function &F, FunctionAnalysisManager &FAM) const {
1555 auto SkipHot = [&]() {
1556 if (!ClHotPercentileCutoff.getNumOccurrences())
1557 return false;
1558 auto &MAMProxy = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(IR&: F);
1559 ProfileSummaryInfo *PSI =
1560 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(IR&: *F.getParent());
1561 if (!PSI || !PSI->hasProfileSummary()) {
1562 ++NumNoProfileSummaryFuncs;
1563 return false;
1564 }
1565 return PSI->isFunctionHotInCallGraphNthPercentile(
1566 PercentileCutoff: ClHotPercentileCutoff, F: &F, BFI&: FAM.getResult<BlockFrequencyAnalysis>(IR&: F));
1567 };
1568
1569 auto SkipRandom = [&]() {
1570 if (!ClRandomKeepRate.getNumOccurrences())
1571 return false;
1572 std::bernoulli_distribution D(ClRandomKeepRate);
1573 return !D(*Rng);
1574 };
1575
1576 bool Skip = SkipRandom() || SkipHot();
1577 emitRemark(F, ORE&: FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F), Skip);
1578 return Skip;
1579}
1580
1581void HWAddressSanitizer::sanitizeFunction(Function &F,
1582 FunctionAnalysisManager &FAM) {
1583 if (&F == HwasanCtorFunction)
1584 return;
1585
1586 // Do not apply any instrumentation for naked functions.
1587 if (F.hasFnAttribute(Kind: Attribute::Naked))
1588 return;
1589
1590 if (!F.hasFnAttribute(Kind: Attribute::SanitizeHWAddress))
1591 return;
1592
1593 if (F.empty())
1594 return;
1595
1596 if (F.isPresplitCoroutine())
1597 return;
1598
1599 NumTotalFuncs++;
1600
1601 OptimizationRemarkEmitter &ORE =
1602 FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
1603
1604 if (selectiveInstrumentationShouldSkip(F, FAM))
1605 return;
1606
1607 NumInstrumentedFuncs++;
1608
1609 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1610
1611 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1612 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1613 SmallVector<Instruction *, 8> LandingPadVec;
1614 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(IR&: F);
1615
1616 memtag::StackInfoBuilder SIB(SSI, DEBUG_TYPE);
1617 for (auto &Inst : instructions(F)) {
1618 if (InstrumentStack) {
1619 SIB.visit(ORE, Inst);
1620 }
1621
1622 if (InstrumentLandingPads && isa<LandingPadInst>(Val: Inst))
1623 LandingPadVec.push_back(Elt: &Inst);
1624
1625 getInterestingMemoryOperands(ORE, I: &Inst, TLI, Interesting&: OperandsToInstrument);
1626
1627 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: &Inst))
1628 if (!ignoreMemIntrinsic(ORE, MI))
1629 IntrinToInstrument.push_back(Elt: MI);
1630 }
1631
1632 memtag::StackInfo &SInfo = SIB.get();
1633
1634 initializeCallbacks(M&: *F.getParent());
1635
1636 if (!LandingPadVec.empty())
1637 instrumentLandingPads(LandingPadVec);
1638
1639 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1640 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1641 // __hwasan_personality_thunk is a no-op for functions without an
1642 // instrumented stack, so we can drop it.
1643 F.setPersonalityFn(nullptr);
1644 }
1645
1646 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1647 IntrinToInstrument.empty())
1648 return;
1649
1650 assert(!ShadowBase);
1651
1652 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1653 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1654 emitPrologue(IRB&: EntryIRB,
1655 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1656 Mapping.withFrameRecord() &&
1657 !SInfo.AllocasToInstrument.empty());
1658
1659 if (!SInfo.AllocasToInstrument.empty()) {
1660 const DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(IR&: F);
1661 const PostDominatorTree &PDT = FAM.getResult<PostDominatorTreeAnalysis>(IR&: F);
1662 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(IR&: F);
1663 Value *StackTag = getStackBaseTag(IRB&: EntryIRB);
1664 Value *UARTag = getUARTag(IRB&: EntryIRB);
1665 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1666 }
1667
1668 // If we split the entry block, move any allocas that were originally in the
1669 // entry block back into the entry block so that they aren't treated as
1670 // dynamic allocas.
1671 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1672 InsertPt = F.getEntryBlock().begin();
1673 for (Instruction &I :
1674 llvm::make_early_inc_range(Range&: *EntryIRB.GetInsertBlock())) {
1675 if (auto *AI = dyn_cast<AllocaInst>(Val: &I))
1676 if (isa<ConstantInt>(Val: AI->getArraySize()))
1677 I.moveBefore(BB&: F.getEntryBlock(), I: InsertPt);
1678 }
1679 }
1680
1681 DominatorTree *DT = FAM.getCachedResult<DominatorTreeAnalysis>(IR&: F);
1682 PostDominatorTree *PDT = FAM.getCachedResult<PostDominatorTreeAnalysis>(IR&: F);
1683 LoopInfo *LI = FAM.getCachedResult<LoopAnalysis>(IR&: F);
1684 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1685 const DataLayout &DL = F.getDataLayout();
1686 for (auto &Operand : OperandsToInstrument)
1687 instrumentMemAccess(O&: Operand, DTU, LI, DL);
1688 DTU.flush();
1689
1690 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1691 for (auto *Inst : IntrinToInstrument)
1692 instrumentMemIntrinsic(MI: Inst);
1693 }
1694
1695 ShadowBase = nullptr;
1696 StackBaseTag = nullptr;
1697 CachedFP = nullptr;
1698}
1699
1700void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1701 assert(!UsePageAliases);
1702 Constant *Initializer = GV->getInitializer();
1703 uint64_t SizeInBytes =
1704 M.getDataLayout().getTypeAllocSize(Ty: Initializer->getType());
1705 uint64_t NewSize = alignTo(Size: SizeInBytes, A: Mapping.getObjectAlignment());
1706 if (SizeInBytes != NewSize) {
1707 // Pad the initializer out to the next multiple of 16 bytes and add the
1708 // required short granule tag.
1709 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1710 Init.back() = Tag;
1711 Constant *Padding = ConstantDataArray::get(Context&: *C, Elts&: Init);
1712 Initializer = ConstantStruct::getAnon(V: {Initializer, Padding});
1713 }
1714
1715 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1716 GlobalValue::ExternalLinkage, Initializer,
1717 GV->getName() + ".hwasan");
1718 NewGV->copyAttributesFrom(Src: GV);
1719 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1720 NewGV->copyMetadata(Src: GV, Offset: 0);
1721 NewGV->setAlignment(
1722 std::max(a: GV->getAlign().valueOrOne(), b: Mapping.getObjectAlignment()));
1723
1724 // It is invalid to ICF two globals that have different tags. In the case
1725 // where the size of the global is a multiple of the tag granularity the
1726 // contents of the globals may be the same but the tags (i.e. symbol values)
1727 // may be different, and the symbols are not considered during ICF. In the
1728 // case where the size is not a multiple of the granularity, the short granule
1729 // tags would discriminate two globals with different tags, but there would
1730 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1731 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1732 // granule tag in the last byte.
1733 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1734
1735 // Descriptor format (assuming little-endian):
1736 // bytes 0-3: relative address of global
1737 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1738 // it isn't, we create multiple descriptors)
1739 // byte 7: tag
1740 auto *DescriptorTy = StructType::get(elt1: Int32Ty, elts: Int32Ty);
1741 const uint64_t MaxDescriptorSize = 0xfffff0;
1742 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1743 DescriptorPos += MaxDescriptorSize) {
1744 auto *Descriptor =
1745 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1746 nullptr, GV->getName() + ".hwasan.descriptor");
1747 auto *GVRelPtr = ConstantExpr::getTrunc(
1748 C: ConstantExpr::getAdd(
1749 C1: ConstantExpr::getSub(
1750 C1: ConstantExpr::getPtrToInt(C: NewGV, Ty: Int64Ty),
1751 C2: ConstantExpr::getPtrToInt(C: Descriptor, Ty: Int64Ty)),
1752 C2: ConstantInt::get(Ty: Int64Ty, V: DescriptorPos)),
1753 Ty: Int32Ty);
1754 uint32_t Size = std::min(a: SizeInBytes - DescriptorPos, b: MaxDescriptorSize);
1755 auto *SizeAndTag = ConstantInt::get(Ty: Int32Ty, V: Size | (uint32_t(Tag) << 24));
1756 Descriptor->setComdat(NewGV->getComdat());
1757 Descriptor->setInitializer(ConstantStruct::getAnon(V: {GVRelPtr, SizeAndTag}));
1758 Descriptor->setSection("hwasan_globals");
1759 Descriptor->setMetadata(KindID: LLVMContext::MD_associated,
1760 Node: MDNode::get(Context&: *C, MDs: ValueAsMetadata::get(V: NewGV)));
1761 appendToCompilerUsed(M, Values: Descriptor);
1762 }
1763
1764 Constant *Aliasee = ConstantExpr::getIntToPtr(
1765 C: ConstantExpr::getAdd(
1766 C1: ConstantExpr::getPtrToInt(C: NewGV, Ty: Int64Ty),
1767 C2: ConstantInt::get(Ty: Int64Ty, V: uint64_t(Tag) << PointerTagShift)),
1768 Ty: GV->getType());
1769 auto *Alias = GlobalAlias::create(Ty: GV->getValueType(), AddressSpace: GV->getAddressSpace(),
1770 Linkage: GV->getLinkage(), Name: "", Aliasee, Parent: &M);
1771 Alias->setVisibility(GV->getVisibility());
1772 Alias->takeName(V: GV);
1773 GV->replaceAllUsesWith(V: Alias);
1774 GV->eraseFromParent();
1775}
1776
1777void HWAddressSanitizer::instrumentGlobals() {
1778 std::vector<GlobalVariable *> Globals;
1779 for (GlobalVariable &GV : M.globals()) {
1780 if (GV.hasSanitizerMetadata() && GV.getSanitizerMetadata().NoHWAddress)
1781 continue;
1782
1783 if (GV.isDeclarationForLinker() || GV.getName().starts_with(Prefix: "llvm.") ||
1784 GV.isThreadLocal())
1785 continue;
1786
1787 // Common symbols can't have aliases point to them, so they can't be tagged.
1788 if (GV.hasCommonLinkage())
1789 continue;
1790
1791 if (ClAllGlobals) {
1792 // Avoid instrumenting intrinsic global variables.
1793 if (GV.getSection() == "llvm.metadata")
1794 continue;
1795 } else {
1796 // Globals with custom sections may be used in __start_/__stop_
1797 // enumeration, which would be broken both by adding tags and potentially
1798 // by the extra padding/alignment that we insert.
1799 if (GV.hasSection())
1800 continue;
1801 }
1802
1803 Globals.push_back(x: &GV);
1804 }
1805
1806 MD5 Hasher;
1807 Hasher.update(Str: M.getSourceFileName());
1808 MD5::MD5Result Hash;
1809 Hasher.final(Result&: Hash);
1810 uint8_t Tag = Hash[0];
1811
1812 assert(TagMaskByte >= 16);
1813
1814 for (GlobalVariable *GV : Globals) {
1815 // Don't allow globals to be tagged with something that looks like a
1816 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1817 // the fast path shadow-vs-address check succeeds.
1818 if (Tag < 16 || Tag > TagMaskByte)
1819 Tag = 16;
1820 instrumentGlobal(GV, Tag: Tag++);
1821 }
1822}
1823
1824void HWAddressSanitizer::instrumentPersonalityFunctions() {
1825 // We need to untag stack frames as we unwind past them. That is the job of
1826 // the personality function wrapper, which either wraps an existing
1827 // personality function or acts as a personality function on its own. Each
1828 // function that has a personality function or that can be unwound past has
1829 // its personality function changed to a thunk that calls the personality
1830 // function wrapper in the runtime.
1831 MapVector<Constant *, std::vector<Function *>> PersonalityFns;
1832 for (Function &F : M) {
1833 if (F.isDeclaration() || !F.hasFnAttribute(Kind: Attribute::SanitizeHWAddress))
1834 continue;
1835
1836 if (F.hasPersonalityFn()) {
1837 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(x: &F);
1838 } else if (!F.hasFnAttribute(Kind: Attribute::NoUnwind)) {
1839 PersonalityFns[nullptr].push_back(x: &F);
1840 }
1841 }
1842
1843 if (PersonalityFns.empty())
1844 return;
1845
1846 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1847 Name: "__hwasan_personality_wrapper", RetTy: Int32Ty, Args: Int32Ty, Args: Int32Ty, Args: Int64Ty, Args: PtrTy,
1848 Args: PtrTy, Args: PtrTy, Args: PtrTy, Args: PtrTy);
1849 FunctionCallee UnwindGetGR = M.getOrInsertFunction(Name: "_Unwind_GetGR", RetTy: VoidTy);
1850 FunctionCallee UnwindGetCFA = M.getOrInsertFunction(Name: "_Unwind_GetCFA", RetTy: VoidTy);
1851
1852 for (auto &P : PersonalityFns) {
1853 std::string ThunkName = kHwasanPersonalityThunkName;
1854 if (P.first)
1855 ThunkName += ("." + P.first->getName()).str();
1856 FunctionType *ThunkFnTy = FunctionType::get(
1857 Result: Int32Ty, Params: {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, isVarArg: false);
1858 bool IsLocal = P.first && (!isa<GlobalValue>(Val: P.first) ||
1859 cast<GlobalValue>(Val: P.first)->hasLocalLinkage());
1860 auto *ThunkFn = Function::Create(Ty: ThunkFnTy,
1861 Linkage: IsLocal ? GlobalValue::InternalLinkage
1862 : GlobalValue::LinkOnceODRLinkage,
1863 N: ThunkName, M: &M);
1864 // TODO: think about other attributes as well.
1865 if (any_of(Range&: P.second, P: [](const Function *F) {
1866 return F->hasFnAttribute(Kind: "branch-target-enforcement");
1867 })) {
1868 ThunkFn->addFnAttr(Kind: "branch-target-enforcement");
1869 }
1870 if (!IsLocal) {
1871 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1872 ThunkFn->setComdat(M.getOrInsertComdat(Name: ThunkName));
1873 }
1874
1875 auto *BB = BasicBlock::Create(Context&: *C, Name: "entry", Parent: ThunkFn);
1876 IRBuilder<> IRB(BB);
1877 CallInst *WrapperCall = IRB.CreateCall(
1878 Callee: HwasanPersonalityWrapper,
1879 Args: {ThunkFn->getArg(i: 0), ThunkFn->getArg(i: 1), ThunkFn->getArg(i: 2),
1880 ThunkFn->getArg(i: 3), ThunkFn->getArg(i: 4),
1881 P.first ? P.first : Constant::getNullValue(Ty: PtrTy),
1882 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1883 WrapperCall->setTailCall();
1884 IRB.CreateRet(V: WrapperCall);
1885
1886 for (Function *F : P.second)
1887 F->setPersonalityFn(ThunkFn);
1888 }
1889}
1890
1891void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1892 bool InstrumentWithCalls,
1893 bool CompileKernel) {
1894 // Start with defaults.
1895 Scale = kDefaultShadowScale;
1896 Kind = OffsetKind::kTls;
1897 WithFrameRecord = true;
1898
1899 // Tune for the target.
1900 if (TargetTriple.isOSFuchsia()) {
1901 // Fuchsia is always PIE, which means that the beginning of the address
1902 // space is always available.
1903 SetFixed(0);
1904 } else if (CompileKernel || InstrumentWithCalls) {
1905 SetFixed(0);
1906 WithFrameRecord = false;
1907 }
1908
1909 WithFrameRecord = optOr(Opt&: ClFrameRecords, Other: WithFrameRecord);
1910
1911 // Apply the last of ClMappingOffset and ClMappingOffsetDynamic.
1912 Kind = optOr(Opt&: ClMappingOffsetDynamic, Other: Kind);
1913 if (ClMappingOffset.getNumOccurrences() > 0 &&
1914 !(ClMappingOffsetDynamic.getNumOccurrences() > 0 &&
1915 ClMappingOffsetDynamic.getPosition() > ClMappingOffset.getPosition())) {
1916 SetFixed(ClMappingOffset);
1917 }
1918}
1919