1//===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10// and the unsafe stack (explicitly allocated and managed through the runtime
11// support library).
12//
13// http://clang.llvm.org/docs/SafeStack.html
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/CodeGen/SafeStack.h"
18#include "SafeStackLayout.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Analysis/AssumptionCache.h"
25#include "llvm/Analysis/BranchProbabilityInfo.h"
26#include "llvm/Analysis/DomTreeUpdater.h"
27#include "llvm/Analysis/InlineCost.h"
28#include "llvm/Analysis/LoopInfo.h"
29#include "llvm/Analysis/ScalarEvolution.h"
30#include "llvm/Analysis/ScalarEvolutionExpressions.h"
31#include "llvm/Analysis/StackLifetime.h"
32#include "llvm/Analysis/TargetLibraryInfo.h"
33#include "llvm/CodeGen/TargetLowering.h"
34#include "llvm/CodeGen/TargetPassConfig.h"
35#include "llvm/CodeGen/TargetSubtargetInfo.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/ConstantRange.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DIBuilder.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/DerivedTypes.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
46#include "llvm/IR/InstIterator.h"
47#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/IntrinsicInst.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/MDBuilder.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Use.h"
56#include "llvm/IR/Value.h"
57#include "llvm/InitializePasses.h"
58#include "llvm/Pass.h"
59#include "llvm/Support/Casting.h"
60#include "llvm/Support/Debug.h"
61#include "llvm/Support/ErrorHandling.h"
62#include "llvm/Support/raw_ostream.h"
63#include "llvm/Target/TargetMachine.h"
64#include "llvm/Transforms/Utils/BasicBlockUtils.h"
65#include "llvm/Transforms/Utils/Cloning.h"
66#include "llvm/Transforms/Utils/Local.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <optional>
71#include <string>
72
73using namespace llvm;
74using namespace llvm::safestack;
75
76#define DEBUG_TYPE "safe-stack"
77
78STATISTIC(NumFunctions, "Total number of functions");
79STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
80STATISTIC(NumUnsafeStackRestorePointsFunctions,
81 "Number of functions that use setjmp or exceptions");
82
83STATISTIC(NumAllocas, "Total number of allocas");
84STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
85STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
86STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
87STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
88
89/// Use __safestack_pointer_address even if the platform has a faster way of
90/// access safe stack pointer.
91static cl::opt<bool>
92 SafeStackUsePointerAddress("safestack-use-pointer-address",
93 cl::init(Val: false), cl::Hidden);
94
95static cl::opt<bool> ClColoring("safe-stack-coloring",
96 cl::desc("enable safe stack coloring"),
97 cl::Hidden, cl::init(Val: true));
98
99namespace {
100
101/// The SafeStack pass splits the stack of each function into the safe
102/// stack, which is only accessed through memory safe dereferences (as
103/// determined statically), and the unsafe stack, which contains all
104/// local variables that are accessed in ways that we can't prove to
105/// be safe.
106class SafeStack {
107 Function &F;
108 const TargetLoweringBase &TL;
109 const LibcallLoweringInfo &Libcalls;
110 const DataLayout &DL;
111 DomTreeUpdater *DTU;
112 ScalarEvolution &SE;
113
114 Type *StackPtrTy;
115 Type *AddrTy;
116 Type *Int32Ty;
117
118 Value *UnsafeStackPtr = nullptr;
119
120 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
121 /// aligned to this value. We need to re-align the unsafe stack if the
122 /// alignment of any object on the stack exceeds this value.
123 ///
124 /// 16 seems like a reasonable upper bound on the alignment of objects that we
125 /// might expect to appear on the stack on most common targets.
126 static constexpr Align StackAlignment = Align::Constant<16>();
127
128 /// Return the value of the stack canary.
129 Value *getStackGuard(IRBuilder<> &IRB, Function &F);
130
131 /// Load stack guard from the frame and check if it has changed.
132 void checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
133 AllocaInst *StackGuardSlot, Value *StackGuard);
134
135 /// Find all static allocas, dynamic allocas, return instructions and
136 /// stack restore points (exception unwind blocks and setjmp calls) in the
137 /// given function and append them to the respective vectors.
138 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
139 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
140 SmallVectorImpl<Argument *> &ByValArguments,
141 SmallVectorImpl<Instruction *> &Returns,
142 SmallVectorImpl<Instruction *> &StackRestorePoints);
143
144 /// Calculate the allocation size of a given alloca. Returns 0 if the
145 /// size can not be statically determined.
146 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
147
148 /// Allocate space for all static allocas in \p StaticAllocas,
149 /// replace allocas with pointers into the unsafe stack.
150 ///
151 /// \returns A pointer to the top of the unsafe stack after all unsafe static
152 /// allocas are allocated.
153 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
154 ArrayRef<AllocaInst *> StaticAllocas,
155 ArrayRef<Argument *> ByValArguments,
156 Instruction *BasePointer,
157 AllocaInst *StackGuardSlot);
158
159 /// Generate code to restore the stack after all stack restore points
160 /// in \p StackRestorePoints.
161 ///
162 /// \returns A local variable in which to maintain the dynamic top of the
163 /// unsafe stack if needed.
164 AllocaInst *
165 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
166 ArrayRef<Instruction *> StackRestorePoints,
167 Value *StaticTop, bool NeedDynamicTop);
168
169 /// Replace all allocas in \p DynamicAllocas with code to allocate
170 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
171 /// top to \p DynamicTop if non-null.
172 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
173 AllocaInst *DynamicTop,
174 ArrayRef<AllocaInst *> DynamicAllocas);
175
176 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
177
178 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
179 const Value *AllocaPtr, uint64_t AllocaSize);
180 bool IsAccessSafe(Value *Addr, TypeSize Size, const Value *AllocaPtr,
181 uint64_t AllocaSize);
182 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
183 uint64_t AllocaSize);
184
185 bool ShouldInlinePointerAddress(CallInst &CI);
186 void TryInlinePointerAddress();
187
188public:
189 SafeStack(Function &F, const TargetLoweringBase &TL,
190 const LibcallLoweringInfo &Libcalls, const DataLayout &DL,
191 DomTreeUpdater *DTU, ScalarEvolution &SE)
192 : F(F), TL(TL), Libcalls(Libcalls), DL(DL), DTU(DTU), SE(SE),
193 StackPtrTy(DL.getAllocaPtrType(Ctx&: F.getContext())),
194 AddrTy(DL.getAddressType(PtrTy: StackPtrTy)),
195 Int32Ty(Type::getInt32Ty(C&: F.getContext())) {}
196
197 // Run the transformation on the associated function.
198 // Returns whether the function was changed.
199 bool run();
200};
201
202uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
203 if (auto Size = AI->getAllocationSize(DL))
204 if (Size->isFixed())
205 return Size->getFixedValue();
206 return 0;
207}
208
209bool SafeStack::IsAccessSafe(Value *Addr, TypeSize AccessSize,
210 const Value *AllocaPtr, uint64_t AllocaSize) {
211 if (AccessSize.isScalable()) {
212 // In case we don't know the size at compile time we cannot verify if the
213 // access is safe.
214 return false;
215 }
216 return IsAccessSafe(Addr, Size: AccessSize.getFixedValue(), AllocaPtr, AllocaSize);
217}
218
219bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
220 const Value *AllocaPtr, uint64_t AllocaSize) {
221 const SCEV *AddrExpr = SE.getSCEV(V: Addr);
222 const auto *Base = dyn_cast<SCEVUnknown>(Val: SE.getPointerBase(V: AddrExpr));
223 if (!Base || Base->getValue() != AllocaPtr) {
224 LLVM_DEBUG(
225 dbgs() << "[SafeStack] "
226 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
227 << *AllocaPtr << "\n"
228 << "SCEV " << *AddrExpr << " not directly based on alloca\n");
229 return false;
230 }
231
232 const SCEV *Expr = SE.removePointerBase(S: AddrExpr);
233 uint64_t BitWidth = SE.getTypeSizeInBits(Ty: Expr->getType());
234 ConstantRange AccessStartRange = SE.getUnsignedRange(S: Expr);
235 ConstantRange SizeRange =
236 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
237 ConstantRange AccessRange = AccessStartRange.add(Other: SizeRange);
238 ConstantRange AllocaRange =
239 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
240 bool Safe = AllocaRange.contains(CR: AccessRange);
241
242 LLVM_DEBUG(
243 dbgs() << "[SafeStack] "
244 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
245 << *AllocaPtr << "\n"
246 << " Access " << *Addr << "\n"
247 << " SCEV " << *Expr
248 << " U: " << SE.getUnsignedRange(Expr)
249 << ", S: " << SE.getSignedRange(Expr) << "\n"
250 << " Range " << AccessRange << "\n"
251 << " AllocaRange " << AllocaRange << "\n"
252 << " " << (Safe ? "safe" : "unsafe") << "\n");
253
254 return Safe;
255}
256
257bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
258 const Value *AllocaPtr,
259 uint64_t AllocaSize) {
260 if (auto MTI = dyn_cast<MemTransferInst>(Val: MI)) {
261 if (MTI->getRawSource() != U && MTI->getRawDest() != U)
262 return true;
263 } else {
264 if (MI->getRawDest() != U)
265 return true;
266 }
267
268 auto Len = MI->getLengthInBytes();
269 // Non-constant size => unsafe. FIXME: try SCEV getRange.
270 if (!Len) return false;
271 return IsAccessSafe(Addr: U, AccessSize: Len->getZExtValue(), AllocaPtr, AllocaSize);
272}
273
274/// Check whether a given allocation must be put on the safe
275/// stack or not. The function analyzes all uses of AI and checks whether it is
276/// only accessed in a memory safe way (as decided statically).
277bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
278 // Go through all uses of this alloca and check whether all accesses to the
279 // allocated object are statically known to be memory safe and, hence, the
280 // object can be placed on the safe stack.
281 SmallPtrSet<const Value *, 16> Visited;
282 SmallVector<const Value *, 8> WorkList;
283 WorkList.push_back(Elt: AllocaPtr);
284
285 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
286 while (!WorkList.empty()) {
287 const Value *V = WorkList.pop_back_val();
288 for (const Use &UI : V->uses()) {
289 auto I = cast<const Instruction>(Val: UI.getUser());
290 assert(V == UI.get());
291
292 switch (I->getOpcode()) {
293 case Instruction::Load:
294 if (!IsAccessSafe(Addr: UI, AccessSize: DL.getTypeStoreSize(Ty: I->getType()), AllocaPtr,
295 AllocaSize))
296 return false;
297 break;
298
299 case Instruction::VAArg:
300 // "va-arg" from a pointer is safe.
301 break;
302 case Instruction::Store:
303 if (V == I->getOperand(i: 0)) {
304 // Stored the pointer - conservatively assume it may be unsafe.
305 LLVM_DEBUG(dbgs()
306 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
307 << "\n store of address: " << *I << "\n");
308 return false;
309 }
310
311 if (!IsAccessSafe(Addr: UI, AccessSize: DL.getTypeStoreSize(Ty: I->getOperand(i: 0)->getType()),
312 AllocaPtr, AllocaSize))
313 return false;
314 break;
315
316 case Instruction::Ret:
317 // Information leak.
318 return false;
319
320 case Instruction::Call:
321 case Instruction::Invoke: {
322 const CallBase &CS = *cast<CallBase>(Val: I);
323
324 if (I->isLifetimeStartOrEnd())
325 continue;
326
327 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: I)) {
328 if (!IsMemIntrinsicSafe(MI, U: UI, AllocaPtr, AllocaSize)) {
329 LLVM_DEBUG(dbgs()
330 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
331 << "\n unsafe memintrinsic: " << *I << "\n");
332 return false;
333 }
334 continue;
335 }
336
337 // LLVM 'nocapture' attribute is only set for arguments whose address
338 // is not stored, passed around, or used in any other non-trivial way.
339 // We assume that passing a pointer to an object as a 'nocapture
340 // readnone' argument is safe.
341 // FIXME: a more precise solution would require an interprocedural
342 // analysis here, which would look at all uses of an argument inside
343 // the function being called.
344 auto B = CS.arg_begin(), E = CS.arg_end();
345 for (const auto *A = B; A != E; ++A)
346 if (A->get() == V)
347 if (!(CS.doesNotCapture(OpNo: A - B) && (CS.doesNotAccessMemory(OpNo: A - B) ||
348 CS.doesNotAccessMemory()))) {
349 LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
350 << "\n unsafe call: " << *I << "\n");
351 return false;
352 }
353 continue;
354 }
355
356 default:
357 if (Visited.insert(Ptr: I).second)
358 WorkList.push_back(Elt: cast<const Instruction>(Val: I));
359 }
360 }
361 }
362
363 // All uses of the alloca are safe, we can place it on the safe stack.
364 return true;
365}
366
367Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
368 Value *StackGuardVar = TL.getIRStackGuard(IRB, Libcalls);
369 Module *M = F.getParent();
370
371 if (!StackGuardVar) {
372 TL.insertSSPDeclarations(M&: *M, Libcalls);
373 return IRB.CreateIntrinsic(ID: Intrinsic::stackguard, Args: {});
374 }
375
376 return IRB.CreateLoad(Ty: StackPtrTy, Ptr: StackGuardVar, Name: "StackGuard");
377}
378
379void SafeStack::findInsts(Function &F,
380 SmallVectorImpl<AllocaInst *> &StaticAllocas,
381 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
382 SmallVectorImpl<Argument *> &ByValArguments,
383 SmallVectorImpl<Instruction *> &Returns,
384 SmallVectorImpl<Instruction *> &StackRestorePoints) {
385 for (Instruction &I : instructions(F: &F)) {
386 if (auto AI = dyn_cast<AllocaInst>(Val: &I)) {
387 ++NumAllocas;
388
389 uint64_t Size = getStaticAllocaAllocationSize(AI);
390 if (IsSafeStackAlloca(AllocaPtr: AI, AllocaSize: Size))
391 continue;
392
393 if (AI->isStaticAlloca()) {
394 ++NumUnsafeStaticAllocas;
395 StaticAllocas.push_back(Elt: AI);
396 } else {
397 ++NumUnsafeDynamicAllocas;
398 DynamicAllocas.push_back(Elt: AI);
399 }
400 } else if (auto RI = dyn_cast<ReturnInst>(Val: &I)) {
401 if (CallInst *CI = I.getParent()->getTerminatingMustTailCall())
402 Returns.push_back(Elt: CI);
403 else
404 Returns.push_back(Elt: RI);
405 } else if (auto CI = dyn_cast<CallInst>(Val: &I)) {
406 // setjmps require stack restore.
407 if (CI->getCalledFunction() && CI->canReturnTwice())
408 StackRestorePoints.push_back(Elt: CI);
409 } else if (auto LP = dyn_cast<LandingPadInst>(Val: &I)) {
410 // Exception landing pads require stack restore.
411 StackRestorePoints.push_back(Elt: LP);
412 } else if (auto II = dyn_cast<IntrinsicInst>(Val: &I)) {
413 if (II->getIntrinsicID() == Intrinsic::gcroot)
414 report_fatal_error(
415 reason: "gcroot intrinsic not compatible with safestack attribute");
416 }
417 }
418 for (Argument &Arg : F.args()) {
419 if (!Arg.hasByValAttr())
420 continue;
421 uint64_t Size = DL.getTypeStoreSize(Ty: Arg.getParamByValType());
422 if (IsSafeStackAlloca(AllocaPtr: &Arg, AllocaSize: Size))
423 continue;
424
425 ++NumUnsafeByValArguments;
426 ByValArguments.push_back(Elt: &Arg);
427 }
428}
429
430AllocaInst *
431SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
432 ArrayRef<Instruction *> StackRestorePoints,
433 Value *StaticTop, bool NeedDynamicTop) {
434 assert(StaticTop && "The stack top isn't set.");
435
436 if (StackRestorePoints.empty())
437 return nullptr;
438
439 // We need the current value of the shadow stack pointer to restore
440 // after longjmp or exception catching.
441
442 // FIXME: On some platforms this could be handled by the longjmp/exception
443 // runtime itself.
444
445 AllocaInst *DynamicTop = nullptr;
446 if (NeedDynamicTop) {
447 // If we also have dynamic alloca's, the stack pointer value changes
448 // throughout the function. For now we store it in an alloca.
449 DynamicTop = IRB.CreateAlloca(Ty: StackPtrTy, /*ArraySize=*/nullptr,
450 Name: "unsafe_stack_dynamic_ptr");
451 IRB.CreateStore(Val: StaticTop, Ptr: DynamicTop);
452 }
453
454 // Restore current stack pointer after longjmp/exception catch.
455 for (Instruction *I : StackRestorePoints) {
456 ++NumUnsafeStackRestorePoints;
457
458 IRB.SetInsertPoint(I->getNextNode());
459 Value *CurrentTop =
460 DynamicTop ? IRB.CreateLoad(Ty: StackPtrTy, Ptr: DynamicTop) : StaticTop;
461 IRB.CreateStore(Val: CurrentTop, Ptr: UnsafeStackPtr);
462 }
463
464 return DynamicTop;
465}
466
467void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
468 AllocaInst *StackGuardSlot, Value *StackGuard) {
469 Value *V = IRB.CreateLoad(Ty: StackPtrTy, Ptr: StackGuardSlot);
470 Value *Cmp = IRB.CreateICmpNE(LHS: StackGuard, RHS: V);
471
472 auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(IsLikely: true);
473 auto FailureProb = BranchProbabilityInfo::getBranchProbStackProtector(IsLikely: false);
474 MDNode *Weights = MDBuilder(F.getContext())
475 .createBranchWeights(TrueWeight: SuccessProb.getNumerator(),
476 FalseWeight: FailureProb.getNumerator());
477 Instruction *CheckTerm =
478 SplitBlockAndInsertIfThen(Cond: Cmp, SplitBefore: &RI, /* Unreachable */ true, BranchWeights: Weights, DTU);
479 IRBuilder<> IRBFail(CheckTerm);
480 // FIXME: respect -fsanitize-trap / -ftrap-function here?
481 RTLIB::LibcallImpl StackChkFailImpl =
482 Libcalls.getLibcallImpl(Call: RTLIB::STACKPROTECTOR_CHECK_FAIL);
483 if (StackChkFailImpl == RTLIB::Unsupported) {
484 F.getContext().emitError(
485 ErrorStr: "no libcall available for stackprotector check fail");
486 return;
487 }
488
489 StringRef StackChkFailName =
490 RTLIB::RuntimeLibcallsInfo::getLibcallImplName(CallImpl: StackChkFailImpl);
491
492 FunctionCallee StackChkFail =
493 F.getParent()->getOrInsertFunction(Name: StackChkFailName, RetTy: IRB.getVoidTy());
494 IRBFail.CreateCall(Callee: StackChkFail, Args: {});
495}
496
497/// We explicitly compute and set the unsafe stack layout for all unsafe
498/// static alloca instructions. We save the unsafe "base pointer" in the
499/// prologue into a local variable and restore it in the epilogue.
500Value *SafeStack::moveStaticAllocasToUnsafeStack(
501 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
502 ArrayRef<Argument *> ByValArguments, Instruction *BasePointer,
503 AllocaInst *StackGuardSlot) {
504 if (StaticAllocas.empty() && ByValArguments.empty())
505 return BasePointer;
506
507 DIBuilder DIB(*F.getParent());
508
509 StackLifetime SSC(F, StaticAllocas, StackLifetime::LivenessType::May);
510 static const StackLifetime::LiveRange NoColoringRange(1, true);
511 if (ClColoring)
512 SSC.run();
513
514 for (const auto *I : SSC.getMarkers()) {
515 auto *Op = dyn_cast<Instruction>(Val: I->getOperand(i_nocapture: 1));
516 const_cast<IntrinsicInst *>(I)->eraseFromParent();
517 // Remove the operand bitcast, too, if it has no more uses left.
518 if (Op && Op->use_empty())
519 Op->eraseFromParent();
520 }
521
522 // Unsafe stack always grows down.
523 StackLayout SSL(StackAlignment);
524 if (StackGuardSlot) {
525 SSL.addObject(V: StackGuardSlot, Size: getStaticAllocaAllocationSize(AI: StackGuardSlot),
526 Alignment: StackGuardSlot->getAlign(), Range: SSC.getFullLiveRange());
527 }
528
529 for (Argument *Arg : ByValArguments) {
530 Type *Ty = Arg->getParamByValType();
531 uint64_t Size = DL.getTypeStoreSize(Ty);
532 if (Size == 0)
533 Size = 1; // Don't create zero-sized stack objects.
534
535 // Ensure the object is properly aligned.
536 Align Align = DL.getPrefTypeAlign(Ty);
537 if (auto A = Arg->getParamAlign())
538 Align = std::max(a: Align, b: *A);
539 SSL.addObject(V: Arg, Size, Alignment: Align, Range: SSC.getFullLiveRange());
540 }
541
542 for (AllocaInst *AI : StaticAllocas) {
543 uint64_t Size = getStaticAllocaAllocationSize(AI);
544 if (Size == 0)
545 Size = 1; // Don't create zero-sized stack objects.
546
547 SSL.addObject(V: AI, Size, Alignment: AI->getAlign(),
548 Range: ClColoring ? SSC.getLiveRange(AI) : NoColoringRange);
549 }
550
551 SSL.computeLayout();
552 Align FrameAlignment = SSL.getFrameAlignment();
553
554 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
555 // (AlignmentSkew).
556 if (FrameAlignment > StackAlignment) {
557 // Re-align the base pointer according to the max requested alignment.
558 IRB.SetInsertPoint(BasePointer->getNextNode());
559 BasePointer = IRB.CreateIntrinsic(
560 RetTy: StackPtrTy, ID: Intrinsic::ptrmask,
561 Args: {BasePointer, ConstantInt::get(Ty: AddrTy, V: ~(FrameAlignment.value() - 1))});
562 }
563
564 IRB.SetInsertPoint(BasePointer->getNextNode());
565
566 if (StackGuardSlot) {
567 unsigned Offset = SSL.getObjectOffset(V: StackGuardSlot);
568 Value *Off =
569 IRB.CreatePtrAdd(Ptr: BasePointer, Offset: ConstantInt::get(Ty: Int32Ty, V: -Offset));
570 Value *NewAI =
571 IRB.CreateBitCast(V: Off, DestTy: StackGuardSlot->getType(), Name: "StackGuardSlot");
572
573 // Replace alloc with the new location.
574 StackGuardSlot->replaceAllUsesWith(V: NewAI);
575 StackGuardSlot->eraseFromParent();
576 }
577
578 for (Argument *Arg : ByValArguments) {
579 unsigned Offset = SSL.getObjectOffset(V: Arg);
580 MaybeAlign Align(SSL.getObjectAlignment(V: Arg));
581 Type *Ty = Arg->getParamByValType();
582
583 uint64_t Size = DL.getTypeStoreSize(Ty);
584 if (Size == 0)
585 Size = 1; // Don't create zero-sized stack objects.
586
587 Value *Off =
588 IRB.CreatePtrAdd(Ptr: BasePointer, Offset: ConstantInt::get(Ty: Int32Ty, V: -Offset));
589 Value *NewArg = IRB.CreateBitCast(V: Off, DestTy: Arg->getType(),
590 Name: Arg->getName() + ".unsafe-byval");
591
592 // Replace alloc with the new location.
593 replaceDbgDeclare(Address: Arg, NewAddress: BasePointer, Builder&: DIB, DIExprFlags: DIExpression::ApplyOffset,
594 Offset: -Offset);
595 Arg->replaceAllUsesWith(V: NewArg);
596 IRB.SetInsertPoint(cast<Instruction>(Val: NewArg)->getNextNode());
597 IRB.CreateMemCpy(Dst: Off, DstAlign: Align, Src: Arg, SrcAlign: Arg->getParamAlign(), Size);
598 }
599
600 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
601 for (AllocaInst *AI : StaticAllocas) {
602 IRB.SetInsertPoint(AI);
603 unsigned Offset = SSL.getObjectOffset(V: AI);
604
605 replaceDbgDeclare(Address: AI, NewAddress: BasePointer, Builder&: DIB, DIExprFlags: DIExpression::ApplyOffset, Offset: -Offset);
606 replaceDbgValueForAlloca(AI, NewAllocaAddress: BasePointer, Builder&: DIB, Offset: -Offset);
607
608 // Replace uses of the alloca with the new location.
609 // Insert address calculation close to each use to work around PR27844.
610 std::string Name = std::string(AI->getName()) + ".unsafe";
611 while (!AI->use_empty()) {
612 Use &U = *AI->use_begin();
613 Instruction *User = cast<Instruction>(Val: U.getUser());
614
615 // Drop lifetime markers now that this is no longer an alloca.
616 // SafeStack has already performed its own stack coloring.
617 if (User->isLifetimeStartOrEnd()) {
618 User->eraseFromParent();
619 continue;
620 }
621
622 Instruction *InsertBefore;
623 if (auto *PHI = dyn_cast<PHINode>(Val: User))
624 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
625 else
626 InsertBefore = User;
627
628 IRBuilder<> IRBUser(InsertBefore);
629 Value *Off =
630 IRBUser.CreatePtrAdd(Ptr: BasePointer, Offset: ConstantInt::get(Ty: Int32Ty, V: -Offset));
631 Value *Replacement =
632 IRBUser.CreateAddrSpaceCast(V: Off, DestTy: AI->getType(), Name);
633
634 if (auto *PHI = dyn_cast<PHINode>(Val: User))
635 // PHI nodes may have multiple incoming edges from the same BB (why??),
636 // all must be updated at once with the same incoming value.
637 PHI->setIncomingValueForBlock(BB: PHI->getIncomingBlock(U), V: Replacement);
638 else
639 U.set(Replacement);
640 }
641
642 AI->eraseFromParent();
643 }
644
645 // Re-align BasePointer so that our callees would see it aligned as
646 // expected.
647 // FIXME: no need to update BasePointer in leaf functions.
648 unsigned FrameSize = alignTo(Size: SSL.getFrameSize(), A: StackAlignment);
649
650 MDBuilder MDB(F.getContext());
651 SmallVector<Metadata *, 2> Data;
652 Data.push_back(Elt: MDB.createString(Str: "unsafe-stack-size"));
653 Data.push_back(Elt: MDB.createConstant(C: ConstantInt::get(Ty: Int32Ty, V: FrameSize)));
654 MDNode *MD = MDTuple::get(Context&: F.getContext(), MDs: Data);
655 F.setMetadata(KindID: LLVMContext::MD_annotation, Node: MD);
656
657 // Update shadow stack pointer in the function epilogue.
658 IRB.SetInsertPoint(BasePointer->getNextNode());
659
660 Value *StaticTop =
661 IRB.CreatePtrAdd(Ptr: BasePointer, Offset: ConstantInt::get(Ty: Int32Ty, V: -FrameSize),
662 Name: "unsafe_stack_static_top");
663 IRB.CreateStore(Val: StaticTop, Ptr: UnsafeStackPtr);
664 return StaticTop;
665}
666
667void SafeStack::moveDynamicAllocasToUnsafeStack(
668 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
669 ArrayRef<AllocaInst *> DynamicAllocas) {
670 DIBuilder DIB(*F.getParent());
671
672 for (AllocaInst *AI : DynamicAllocas) {
673 IRBuilder<> IRB(AI);
674
675 // Compute the new SP value (after AI).
676 Value *Size = IRB.CreateAllocationSize(DestTy: AddrTy, AI);
677 Value *SP = IRB.CreateLoad(Ty: StackPtrTy, Ptr: UnsafeStackPtr);
678 SP = IRB.CreatePtrAdd(Ptr: SP, Offset: IRB.CreateNeg(V: Size));
679
680 // Align the SP value to satisfy the AllocaInst and stack alignments.
681 auto Align = std::max(a: AI->getAlign(), b: StackAlignment);
682
683 Value *NewTop = IRB.CreateIntrinsic(
684 RetTy: StackPtrTy, ID: Intrinsic::ptrmask,
685 Args: {SP, ConstantInt::getSigned(Ty: AddrTy, V: ~uint64_t(Align.value() - 1))});
686
687 // Save the stack pointer.
688 IRB.CreateStore(Val: NewTop, Ptr: UnsafeStackPtr);
689 if (DynamicTop)
690 IRB.CreateStore(Val: NewTop, Ptr: DynamicTop);
691
692 Value *NewAI = IRB.CreatePointerCast(V: NewTop, DestTy: AI->getType());
693 if (AI->hasName() && isa<Instruction>(Val: NewAI))
694 NewAI->takeName(V: AI);
695
696 replaceDbgDeclare(Address: AI, NewAddress: NewAI, Builder&: DIB, DIExprFlags: DIExpression::ApplyOffset, Offset: 0);
697 AI->replaceAllUsesWith(V: NewAI);
698 AI->eraseFromParent();
699 }
700
701 if (!DynamicAllocas.empty()) {
702 // Now go through the instructions again, replacing stacksave/stackrestore.
703 for (Instruction &I : llvm::make_early_inc_range(Range: instructions(F: &F))) {
704 auto *II = dyn_cast<IntrinsicInst>(Val: &I);
705 if (!II)
706 continue;
707
708 if (II->getIntrinsicID() == Intrinsic::stacksave) {
709 IRBuilder<> IRB(II);
710 Instruction *LI = IRB.CreateLoad(Ty: StackPtrTy, Ptr: UnsafeStackPtr);
711 LI->takeName(V: II);
712 II->replaceAllUsesWith(V: LI);
713 II->eraseFromParent();
714 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
715 IRBuilder<> IRB(II);
716 Instruction *SI = IRB.CreateStore(Val: II->getArgOperand(i: 0), Ptr: UnsafeStackPtr);
717 SI->takeName(V: II);
718 assert(II->use_empty());
719 II->eraseFromParent();
720 }
721 }
722 }
723}
724
725bool SafeStack::ShouldInlinePointerAddress(CallInst &CI) {
726 Function *Callee = CI.getCalledFunction();
727 if (CI.hasFnAttr(Kind: Attribute::AlwaysInline) &&
728 isInlineViable(Callee&: *Callee).isSuccess())
729 return true;
730 if (Callee->isInterposable() || Callee->hasFnAttribute(Kind: Attribute::NoInline) ||
731 CI.isNoInline())
732 return false;
733 return true;
734}
735
736void SafeStack::TryInlinePointerAddress() {
737 auto *CI = dyn_cast<CallInst>(Val: UnsafeStackPtr);
738 if (!CI)
739 return;
740
741 if(F.hasOptNone())
742 return;
743
744 Function *Callee = CI->getCalledFunction();
745 if (!Callee || Callee->isDeclaration())
746 return;
747
748 if (!ShouldInlinePointerAddress(CI&: *CI))
749 return;
750
751 InlineFunctionInfo IFI;
752 InlineFunction(CB&: *CI, IFI);
753}
754
755bool SafeStack::run() {
756 assert(F.hasFnAttribute(Attribute::SafeStack) &&
757 "Can't run SafeStack on a function without the attribute");
758 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
759
760 ++NumFunctions;
761
762 SmallVector<AllocaInst *, 16> StaticAllocas;
763 SmallVector<AllocaInst *, 4> DynamicAllocas;
764 SmallVector<Argument *, 4> ByValArguments;
765 SmallVector<Instruction *, 4> Returns;
766
767 // Collect all points where stack gets unwound and needs to be restored
768 // This is only necessary because the runtime (setjmp and unwind code) is
769 // not aware of the unsafe stack and won't unwind/restore it properly.
770 // To work around this problem without changing the runtime, we insert
771 // instrumentation to restore the unsafe stack pointer when necessary.
772 SmallVector<Instruction *, 4> StackRestorePoints;
773
774 // Find all static and dynamic alloca instructions that must be moved to the
775 // unsafe stack, all return instructions and stack restore points.
776 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
777 StackRestorePoints);
778
779 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
780 ByValArguments.empty() && StackRestorePoints.empty())
781 return false; // Nothing to do in this function.
782
783 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
784 !ByValArguments.empty())
785 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
786
787 if (!StackRestorePoints.empty())
788 ++NumUnsafeStackRestorePointsFunctions;
789
790 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
791 // Calls must always have a debug location, or else inlining breaks. So
792 // we explicitly set a artificial debug location here.
793 if (DISubprogram *SP = F.getSubprogram())
794 IRB.SetCurrentDebugLocation(
795 DILocation::get(Context&: SP->getContext(), Line: SP->getScopeLine(), Column: 0, Scope: SP));
796 if (SafeStackUsePointerAddress) {
797 // FIXME: A more correct implementation of SafeStackUsePointerAddress would
798 // change the libcall availability in RuntimeLibcallsInfo
799 StringRef SafestackPointerAddressName =
800 RTLIB::RuntimeLibcallsInfo::getLibcallImplName(
801 CallImpl: RTLIB::impl___safestack_pointer_address);
802
803 FunctionCallee Fn = F.getParent()->getOrInsertFunction(
804 Name: SafestackPointerAddressName, RetTy: IRB.getPtrTy(AddrSpace: 0));
805 UnsafeStackPtr = IRB.CreateCall(Callee: Fn);
806 } else {
807 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB, Libcalls);
808 if (!UnsafeStackPtr) {
809 F.getContext().emitError(
810 ErrorStr: "no location available for safestack pointer address");
811 UnsafeStackPtr = PoisonValue::get(T: StackPtrTy);
812 }
813 }
814
815 // Load the current stack pointer (we'll also use it as a base pointer).
816 // FIXME: use a dedicated register for it ?
817 Instruction *BasePointer =
818 IRB.CreateLoad(Ty: StackPtrTy, Ptr: UnsafeStackPtr, isVolatile: false, Name: "unsafe_stack_ptr");
819 assert(BasePointer->getType() == StackPtrTy);
820
821 AllocaInst *StackGuardSlot = nullptr;
822 // FIXME: implement weaker forms of stack protector.
823 if (F.hasFnAttribute(Kind: Attribute::StackProtect) ||
824 F.hasFnAttribute(Kind: Attribute::StackProtectStrong) ||
825 F.hasFnAttribute(Kind: Attribute::StackProtectReq)) {
826 Value *StackGuard = getStackGuard(IRB, F);
827 StackGuardSlot = IRB.CreateAlloca(Ty: StackPtrTy, ArraySize: nullptr);
828 IRB.CreateStore(Val: StackGuard, Ptr: StackGuardSlot);
829
830 for (Instruction *RI : Returns) {
831 IRBuilder<> IRBRet(RI);
832 checkStackGuard(IRB&: IRBRet, F, RI&: *RI, StackGuardSlot, StackGuard);
833 }
834 }
835
836 // The top of the unsafe stack after all unsafe static allocas are
837 // allocated.
838 Value *StaticTop = moveStaticAllocasToUnsafeStack(
839 IRB, F, StaticAllocas, ByValArguments, BasePointer, StackGuardSlot);
840
841 // Safe stack object that stores the current unsafe stack top. It is updated
842 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
843 // This is only needed if we need to restore stack pointer after longjmp
844 // or exceptions, and we have dynamic allocations.
845 // FIXME: a better alternative might be to store the unsafe stack pointer
846 // before setjmp / invoke instructions.
847 AllocaInst *DynamicTop = createStackRestorePoints(
848 IRB, F, StackRestorePoints, StaticTop, NeedDynamicTop: !DynamicAllocas.empty());
849
850 // Handle dynamic allocas.
851 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
852 DynamicAllocas);
853
854 // Restore the unsafe stack pointer before each return.
855 for (Instruction *RI : Returns) {
856 IRB.SetInsertPoint(RI);
857 IRB.CreateStore(Val: BasePointer, Ptr: UnsafeStackPtr);
858 }
859
860 TryInlinePointerAddress();
861
862 LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
863 return true;
864}
865
866class SafeStackLegacyPass : public FunctionPass {
867 const TargetMachine *TM = nullptr;
868
869public:
870 static char ID; // Pass identification, replacement for typeid..
871
872 SafeStackLegacyPass() : FunctionPass(ID) {}
873
874 void getAnalysisUsage(AnalysisUsage &AU) const override {
875 AU.addRequired<LibcallLoweringInfoWrapper>();
876 AU.addRequired<TargetPassConfig>();
877 AU.addRequired<TargetLibraryInfoWrapperPass>();
878 AU.addRequired<AssumptionCacheTracker>();
879 AU.addPreserved<DominatorTreeWrapperPass>();
880 }
881
882 bool runOnFunction(Function &F) override {
883 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
884
885 if (!F.hasFnAttribute(Kind: Attribute::SafeStack)) {
886 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
887 " for this function\n");
888 return false;
889 }
890
891 if (F.isDeclaration()) {
892 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
893 " is not available\n");
894 return false;
895 }
896
897 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
898 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
899 auto *TL = Subtarget->getTargetLowering();
900 if (!TL)
901 report_fatal_error(reason: "TargetLowering instance is required");
902
903 const LibcallLoweringInfo &Libcalls =
904 getAnalysis<LibcallLoweringInfoWrapper>().getLibcallLowering(
905 M: *F.getParent(), Subtarget: *Subtarget);
906
907 auto *DL = &F.getDataLayout();
908 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
909 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
910
911 // Compute DT and LI only for functions that have the attribute.
912 // This is only useful because the legacy pass manager doesn't let us
913 // compute analyzes lazily.
914
915 DominatorTree *DT;
916 bool ShouldPreserveDominatorTree;
917 std::optional<DominatorTree> LazilyComputedDomTree;
918
919 // Do we already have a DominatorTree available from the previous pass?
920 // Note that we should *NOT* require it, to avoid the case where we end up
921 // not needing it, but the legacy PM would have computed it for us anyways.
922 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
923 DT = &DTWP->getDomTree();
924 ShouldPreserveDominatorTree = true;
925 } else {
926 // Otherwise, we need to compute it.
927 LazilyComputedDomTree.emplace(args&: F);
928 DT = &*LazilyComputedDomTree;
929 ShouldPreserveDominatorTree = false;
930 }
931
932 // Likewise, lazily compute loop info.
933 LoopInfo LI(*DT);
934
935 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
936
937 ScalarEvolution SE(F, TLI, ACT, *DT, LI);
938
939 return SafeStack(F, *TL, Libcalls, *DL,
940 ShouldPreserveDominatorTree ? &DTU : nullptr, SE)
941 .run();
942 }
943};
944
945} // end anonymous namespace
946
947PreservedAnalyses SafeStackPass::run(Function &F,
948 FunctionAnalysisManager &FAM) {
949 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
950
951 if (!F.hasFnAttribute(Kind: Attribute::SafeStack)) {
952 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
953 " for this function\n");
954 return PreservedAnalyses::all();
955 }
956
957 if (F.isDeclaration()) {
958 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
959 " is not available\n");
960 return PreservedAnalyses::all();
961 }
962
963 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
964 auto *TL = Subtarget->getTargetLowering();
965
966 auto &DL = F.getDataLayout();
967
968 // preserve DominatorTree
969 auto &DT = FAM.getResult<DominatorTreeAnalysis>(IR&: F);
970 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(IR&: F);
971
972 auto &MAMProxy = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(IR&: F);
973 const LibcallLoweringModuleAnalysisResult *LibcallLowering =
974 MAMProxy.getCachedResult<LibcallLoweringModuleAnalysis>(IR&: *F.getParent());
975
976 if (!LibcallLowering) {
977 F.getContext().emitError(ErrorStr: "'" + LibcallLoweringModuleAnalysis::name() +
978 "' analysis required");
979 return PreservedAnalyses::all();
980 }
981
982 const LibcallLoweringInfo &Libcalls =
983 LibcallLowering->getLibcallLowering(Subtarget: *Subtarget);
984
985 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
986
987 bool Changed = SafeStack(F, *TL, Libcalls, DL, &DTU, SE).run();
988
989 if (!Changed)
990 return PreservedAnalyses::all();
991 PreservedAnalyses PA;
992 PA.preserve<DominatorTreeAnalysis>();
993 return PA;
994}
995
996char SafeStackLegacyPass::ID = 0;
997
998INITIALIZE_PASS_BEGIN(SafeStackLegacyPass, DEBUG_TYPE,
999 "Safe Stack instrumentation pass", false, false)
1000INITIALIZE_PASS_DEPENDENCY(LibcallLoweringInfoWrapper)
1001INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
1002INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1003INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
1004 "Safe Stack instrumentation pass", false, false)
1005
1006FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
1007