1//===- AArch64SLSHardening.cpp - Harden Straight Line Missspeculation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a pass to insert code to mitigate against side channel
10// vulnerabilities that may happen under straight line miss-speculation.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64InstrInfo.h"
15#include "AArch64Subtarget.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/CodeGen/IndirectThunks.h"
18#include "llvm/CodeGen/MachineBasicBlock.h"
19#include "llvm/CodeGen/MachineFunction.h"
20#include "llvm/CodeGen/MachineInstr.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineOperand.h"
23#include "llvm/CodeGen/RegisterScavenging.h"
24#include "llvm/IR/DebugLoc.h"
25#include "llvm/Pass.h"
26#include "llvm/Support/ErrorHandling.h"
27#include "llvm/Support/FormatVariadic.h"
28#include "llvm/Target/TargetMachine.h"
29#include <cassert>
30#include <climits>
31#include <tuple>
32
33using namespace llvm;
34
35#define DEBUG_TYPE "aarch64-sls-hardening"
36
37#define AARCH64_SLS_HARDENING_NAME "AArch64 sls hardening pass"
38
39// Common name prefix of all thunks generated by this pass.
40//
41// The generic form is
42// __llvm_slsblr_thunk_xN for BLR thunks
43// __llvm_slsblr_thunk_(aaz|abz)_xN for BLRAAZ and BLRABZ thunks
44// __llvm_slsblr_thunk_(aa|ab)_xN_xM for BLRAA and BLRAB thunks
45static constexpr StringRef CommonNamePrefix = "__llvm_slsblr_thunk_";
46
47namespace {
48
49struct ThunkKind {
50 enum ThunkKindId {
51 ThunkBR,
52 ThunkBRAA,
53 ThunkBRAB,
54 ThunkBRAAZ,
55 ThunkBRABZ,
56 };
57
58 ThunkKindId Id;
59 StringRef NameInfix;
60 bool HasXmOperand;
61 bool NeedsPAuth;
62
63 // Opcode to perform indirect jump from inside the thunk.
64 unsigned BROpcode;
65
66 static const ThunkKind BR;
67 static const ThunkKind BRAA;
68 static const ThunkKind BRAB;
69 static const ThunkKind BRAAZ;
70 static const ThunkKind BRABZ;
71};
72
73// Set of inserted thunks.
74class ThunksSet {
75public:
76 static constexpr unsigned NumXRegisters = 32;
77
78 // Given Xn register, returns n.
79 static unsigned indexOfXReg(Register Xn);
80 // Given n, returns Xn register.
81 static Register xRegByIndex(unsigned N);
82
83 ThunksSet &operator|=(const ThunksSet &Other) {
84 BLRThunks |= Other.BLRThunks;
85 BLRAAZThunks |= Other.BLRAAZThunks;
86 BLRABZThunks |= Other.BLRABZThunks;
87 for (unsigned I = 0; I < NumXRegisters; ++I)
88 BLRAAThunks[I] |= Other.BLRAAThunks[I];
89 for (unsigned I = 0; I < NumXRegisters; ++I)
90 BLRABThunks[I] |= Other.BLRABThunks[I];
91
92 return *this;
93 }
94
95 bool get(ThunkKind::ThunkKindId Kind, Register Xn, Register Xm) {
96 reg_bitmask_t XnBit = reg_bitmask_t(1) << indexOfXReg(Xn);
97 return getBitmask(Kind, Xm) & XnBit;
98 }
99
100 void set(ThunkKind::ThunkKindId Kind, Register Xn, Register Xm) {
101 reg_bitmask_t XnBit = reg_bitmask_t(1) << indexOfXReg(Xn);
102 getBitmask(Kind, Xm) |= XnBit;
103 }
104
105private:
106 typedef uint32_t reg_bitmask_t;
107 static_assert(NumXRegisters <= sizeof(reg_bitmask_t) * CHAR_BIT,
108 "Bitmask is not wide enough to hold all Xn registers");
109
110 // Bitmasks representing operands used, with n-th bit corresponding to Xn
111 // register operand. If the instruction has a second operand (Xm), an array
112 // of bitmasks is used, indexed by m.
113 // Indexes corresponding to the forbidden x16, x17 and x30 registers are
114 // always unset, for simplicity there are no holes.
115 reg_bitmask_t BLRThunks = 0;
116 reg_bitmask_t BLRAAZThunks = 0;
117 reg_bitmask_t BLRABZThunks = 0;
118 reg_bitmask_t BLRAAThunks[NumXRegisters] = {};
119 reg_bitmask_t BLRABThunks[NumXRegisters] = {};
120
121 reg_bitmask_t &getBitmask(ThunkKind::ThunkKindId Kind, Register Xm) {
122 switch (Kind) {
123 case ThunkKind::ThunkBR:
124 return BLRThunks;
125 case ThunkKind::ThunkBRAAZ:
126 return BLRAAZThunks;
127 case ThunkKind::ThunkBRABZ:
128 return BLRABZThunks;
129 case ThunkKind::ThunkBRAA:
130 return BLRAAThunks[indexOfXReg(Xn: Xm)];
131 case ThunkKind::ThunkBRAB:
132 return BLRABThunks[indexOfXReg(Xn: Xm)];
133 }
134 llvm_unreachable("Unknown ThunkKindId enum");
135 }
136};
137
138struct SLSHardeningInserter : ThunkInserter<SLSHardeningInserter, ThunksSet> {
139public:
140 const char *getThunkPrefix() { return CommonNamePrefix.data(); }
141 bool mayUseThunk(const MachineFunction &MF) {
142 ComdatThunks &= !MF.getSubtarget<AArch64Subtarget>().hardenSlsNoComdat();
143 // We are inserting barriers aside from thunk calls, so
144 // check hardenSlsRetBr() as well.
145 return MF.getSubtarget<AArch64Subtarget>().hardenSlsBlr() ||
146 MF.getSubtarget<AArch64Subtarget>().hardenSlsRetBr();
147 }
148 ThunksSet insertThunks(MachineModuleInfo &MMI, MachineFunction &MF,
149 ThunksSet ExistingThunks);
150 void populateThunk(MachineFunction &MF);
151
152private:
153 bool ComdatThunks = true;
154
155 bool hardenReturnsAndBRs(MachineModuleInfo &MMI, MachineBasicBlock &MBB);
156 bool hardenBLRs(MachineModuleInfo &MMI, MachineBasicBlock &MBB,
157 ThunksSet &Thunks);
158
159 void convertBLRToBL(MachineModuleInfo &MMI, MachineBasicBlock &MBB,
160 MachineBasicBlock::instr_iterator MBBI,
161 ThunksSet &Thunks);
162};
163
164} // end anonymous namespace
165
166const ThunkKind ThunkKind::BR = {.Id: ThunkBR, .NameInfix: "", /*HasXmOperand=*/false,
167 /*NeedsPAuth=*/false, .BROpcode: AArch64::BR};
168const ThunkKind ThunkKind::BRAA = {.Id: ThunkBRAA, .NameInfix: "aa_", /*HasXmOperand=*/true,
169 /*NeedsPAuth=*/true, .BROpcode: AArch64::BRAA};
170const ThunkKind ThunkKind::BRAB = {.Id: ThunkBRAB, .NameInfix: "ab_", /*HasXmOperand=*/true,
171 /*NeedsPAuth=*/true, .BROpcode: AArch64::BRAB};
172const ThunkKind ThunkKind::BRAAZ = {.Id: ThunkBRAAZ, .NameInfix: "aaz_", /*HasXmOperand=*/false,
173 /*NeedsPAuth=*/true, .BROpcode: AArch64::BRAAZ};
174const ThunkKind ThunkKind::BRABZ = {.Id: ThunkBRABZ, .NameInfix: "abz_", /*HasXmOperand=*/false,
175 /*NeedsPAuth=*/true, .BROpcode: AArch64::BRABZ};
176
177// Returns thunk kind to emit, or nullptr if not a BLR* instruction.
178static const ThunkKind *getThunkKind(unsigned OriginalOpcode) {
179 switch (OriginalOpcode) {
180 case AArch64::BLR:
181 case AArch64::BLRNoIP:
182 return &ThunkKind::BR;
183 case AArch64::BLRAA:
184 return &ThunkKind::BRAA;
185 case AArch64::BLRAB:
186 return &ThunkKind::BRAB;
187 case AArch64::BLRAAZ:
188 return &ThunkKind::BRAAZ;
189 case AArch64::BLRABZ:
190 return &ThunkKind::BRABZ;
191 }
192 return nullptr;
193}
194
195static bool isBLR(const MachineInstr &MI) {
196 return getThunkKind(OriginalOpcode: MI.getOpcode()) != nullptr;
197}
198
199unsigned ThunksSet::indexOfXReg(Register Reg) {
200 assert(AArch64::GPR64RegClass.contains(Reg));
201 assert(Reg != AArch64::X16 && Reg != AArch64::X17 && Reg != AArch64::LR);
202
203 // Most Xn registers have consecutive ids, except for FP and XZR.
204 unsigned Result = (unsigned)Reg - (unsigned)AArch64::X0;
205 if (Reg == AArch64::FP)
206 Result = 29;
207 else if (Reg == AArch64::XZR)
208 Result = 31;
209
210 assert(Result < NumXRegisters && "Internal register numbering changed");
211 assert(AArch64::GPR64RegClass.getRegister(Result).id() == Reg &&
212 "Internal register numbering changed");
213
214 return Result;
215}
216
217Register ThunksSet::xRegByIndex(unsigned N) {
218 return AArch64::GPR64RegClass.getRegister(i: N);
219}
220
221static void insertSpeculationBarrier(const AArch64Subtarget *ST,
222 MachineBasicBlock &MBB,
223 MachineBasicBlock::iterator MBBI,
224 DebugLoc DL,
225 bool AlwaysUseISBDSB = false) {
226 assert(MBBI != MBB.begin() &&
227 "Must not insert SpeculationBarrierEndBB as only instruction in MBB.");
228 assert(std::prev(MBBI)->isBarrier() &&
229 "SpeculationBarrierEndBB must only follow unconditional control flow "
230 "instructions.");
231 assert(std::prev(MBBI)->isTerminator() &&
232 "SpeculationBarrierEndBB must only follow terminators.");
233 const TargetInstrInfo *TII = ST->getInstrInfo();
234 unsigned BarrierOpc = ST->hasSB() && !AlwaysUseISBDSB
235 ? AArch64::SpeculationBarrierSBEndBB
236 : AArch64::SpeculationBarrierISBDSBEndBB;
237 if (MBBI == MBB.end() ||
238 (MBBI->getOpcode() != AArch64::SpeculationBarrierSBEndBB &&
239 MBBI->getOpcode() != AArch64::SpeculationBarrierISBDSBEndBB))
240 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: BarrierOpc));
241}
242
243ThunksSet SLSHardeningInserter::insertThunks(MachineModuleInfo &MMI,
244 MachineFunction &MF,
245 ThunksSet ExistingThunks) {
246 const AArch64Subtarget *ST = &MF.getSubtarget<AArch64Subtarget>();
247
248 for (auto &MBB : MF) {
249 if (ST->hardenSlsRetBr())
250 hardenReturnsAndBRs(MMI, MBB);
251 if (ST->hardenSlsBlr())
252 hardenBLRs(MMI, MBB, Thunks&: ExistingThunks);
253 }
254 return ExistingThunks;
255}
256
257bool SLSHardeningInserter::hardenReturnsAndBRs(MachineModuleInfo &MMI,
258 MachineBasicBlock &MBB) {
259 const AArch64Subtarget *ST =
260 &MBB.getParent()->getSubtarget<AArch64Subtarget>();
261 bool Modified = false;
262 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(), E = MBB.end();
263 MachineBasicBlock::iterator NextMBBI;
264 for (; MBBI != E; MBBI = NextMBBI) {
265 MachineInstr &MI = *MBBI;
266 NextMBBI = std::next(x: MBBI);
267 if (MI.isReturn() || isIndirectBranchOpcode(Opc: MI.getOpcode())) {
268 assert(MI.isTerminator());
269 insertSpeculationBarrier(ST, MBB, MBBI: std::next(x: MBBI), DL: MI.getDebugLoc());
270 Modified = true;
271 }
272 }
273 return Modified;
274}
275
276// Currently, the longest possible thunk name is
277// __llvm_slsblr_thunk_aa_xNN_xMM
278// which is 31 characters (without the '\0' character).
279static SmallString<32> createThunkName(const ThunkKind &Kind, Register Xn,
280 Register Xm) {
281 unsigned N = ThunksSet::indexOfXReg(Reg: Xn);
282 if (!Kind.HasXmOperand)
283 return formatv(Fmt: "{0}{1}x{2}", Vals: CommonNamePrefix, Vals: Kind.NameInfix, Vals&: N);
284
285 unsigned M = ThunksSet::indexOfXReg(Reg: Xm);
286 return formatv(Fmt: "{0}{1}x{2}_x{3}", Vals: CommonNamePrefix, Vals: Kind.NameInfix, Vals&: N, Vals&: M);
287}
288
289static std::tuple<const ThunkKind &, Register, Register>
290parseThunkName(StringRef ThunkName) {
291 assert(ThunkName.starts_with(CommonNamePrefix) &&
292 "Should be filtered out by ThunkInserter");
293 // Thunk name suffix, such as "x1" or "aa_x2_x3".
294 StringRef NameSuffix = ThunkName.drop_front(N: CommonNamePrefix.size());
295
296 // Parse thunk kind based on thunk name infix.
297 const ThunkKind &Kind = *StringSwitch<const ThunkKind *>(NameSuffix)
298 .StartsWith(S: "aa_", Value: &ThunkKind::BRAA)
299 .StartsWith(S: "ab_", Value: &ThunkKind::BRAB)
300 .StartsWith(S: "aaz_", Value: &ThunkKind::BRAAZ)
301 .StartsWith(S: "abz_", Value: &ThunkKind::BRABZ)
302 .Default(Value: &ThunkKind::BR);
303
304 auto ParseRegName = [](StringRef Name) {
305 unsigned N;
306
307 assert(Name.starts_with("x") && "xN register name expected");
308 bool Fail = Name.drop_front(N: 1).getAsInteger(/*Radix=*/10, Result&: N);
309 assert(!Fail && N < ThunksSet::NumXRegisters && "Unexpected register");
310 (void)Fail;
311
312 return ThunksSet::xRegByIndex(N);
313 };
314
315 // For example, "x1" or "x2_x3".
316 StringRef RegsStr = NameSuffix.drop_front(N: Kind.NameInfix.size());
317 StringRef XnStr, XmStr;
318 std::tie(args&: XnStr, args&: XmStr) = RegsStr.split(Separator: '_');
319
320 // Parse register operands.
321 Register Xn = ParseRegName(XnStr);
322 Register Xm = Kind.HasXmOperand ? ParseRegName(XmStr) : AArch64::NoRegister;
323
324 return std::make_tuple(args: std::ref(t: Kind), args&: Xn, args&: Xm);
325}
326
327void SLSHardeningInserter::populateThunk(MachineFunction &MF) {
328 assert(MF.getFunction().hasComdat() == ComdatThunks &&
329 "ComdatThunks value changed since MF creation");
330 Register Xn, Xm;
331 auto KindAndRegs = parseThunkName(ThunkName: MF.getName());
332 const ThunkKind &Kind = std::get<0>(t&: KindAndRegs);
333 std::tie(args: std::ignore, args&: Xn, args&: Xm) = KindAndRegs;
334
335 const TargetInstrInfo *TII =
336 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
337
338 // Depending on whether this pass is in the same FunctionPassManager as the
339 // IR->MIR conversion, the thunk may be completely empty, or contain a single
340 // basic block with a single return instruction. Normalise it to contain a
341 // single empty basic block.
342 if (MF.size() == 1) {
343 assert(MF.front().size() == 1);
344 assert(MF.front().front().getOpcode() == AArch64::RET);
345 MF.front().erase(I: MF.front().begin());
346 } else {
347 assert(MF.size() == 0);
348 MF.push_back(MBB: MF.CreateMachineBasicBlock());
349 }
350
351 MachineBasicBlock *Entry = &MF.front();
352 Entry->clear();
353
354 // These thunks need to consist of the following instructions:
355 // __llvm_slsblr_thunk_...:
356 // MOV x16, xN ; BR* instructions are not compatible with "BTI c"
357 // ; branch target unless xN is x16 or x17.
358 // BR* ... ; One of: BR x16
359 // ; BRA(A|B) x16, xM
360 // ; BRA(A|B)Z x16
361 // barrierInsts
362 Entry->addLiveIn(PhysReg: Xn);
363 // MOV X16, Reg == ORR X16, XZR, Reg, LSL #0
364 BuildMI(BB: Entry, MIMD: DebugLoc(), MCID: TII->get(Opcode: AArch64::ORRXrs), DestReg: AArch64::X16)
365 .addReg(RegNo: AArch64::XZR)
366 .addReg(RegNo: Xn)
367 .addImm(Val: 0);
368 MachineInstrBuilder Builder =
369 BuildMI(BB: Entry, MIMD: DebugLoc(), MCID: TII->get(Opcode: Kind.BROpcode)).addReg(RegNo: AArch64::X16);
370 if (Xm != AArch64::NoRegister) {
371 Entry->addLiveIn(PhysReg: Xm);
372 Builder.addReg(RegNo: Xm);
373 }
374
375 // Make sure the thunks do not make use of the SB extension in case there is
376 // a function somewhere that will call to it that for some reason disabled
377 // the SB extension locally on that function, even though it's enabled for
378 // the module otherwise. Therefore set AlwaysUseISBSDB to true.
379 insertSpeculationBarrier(ST: &MF.getSubtarget<AArch64Subtarget>(), MBB&: *Entry,
380 MBBI: Entry->end(), DL: DebugLoc(), AlwaysUseISBDSB: true /*AlwaysUseISBDSB*/);
381}
382
383void SLSHardeningInserter::convertBLRToBL(
384 MachineModuleInfo &MMI, MachineBasicBlock &MBB,
385 MachineBasicBlock::instr_iterator MBBI, ThunksSet &Thunks) {
386 // Transform a BLR* instruction (one of BLR, BLRAA/BLRAB or BLRAAZ/BLRABZ) to
387 // a BL to the thunk containing BR, BRAA/BRAB or BRAAZ/BRABZ, respectively.
388 //
389 // Before:
390 // |-----------------------------|
391 // | ... |
392 // | instI |
393 // | BLR* xN or BLR* xN, xM |
394 // | instJ |
395 // | ... |
396 // |-----------------------------|
397 //
398 // After:
399 // |-----------------------------|
400 // | ... |
401 // | instI |
402 // | BL __llvm_slsblr_thunk_... |
403 // | instJ |
404 // | ... |
405 // |-----------------------------|
406 //
407 // __llvm_slsblr_thunk_...:
408 // |-----------------------------|
409 // | MOV x16, xN |
410 // | BR* x16 or BR* x16, xM |
411 // | barrierInsts |
412 // |-----------------------------|
413 //
414 // This function needs to transform BLR* instruction into BL with the correct
415 // thunk name and lazily create the thunk if it does not exist yet.
416 //
417 // Since linkers are allowed to clobber X16 and X17 on function calls, the
418 // above mitigation only works if the original BLR* instruction had neither
419 // X16 nor X17 as one of its operands. Code generation before must make sure
420 // that no such BLR* instruction was produced if the mitigation is enabled.
421
422 MachineInstr &BLR = *MBBI;
423 assert(isBLR(BLR));
424 const ThunkKind &Kind = *getThunkKind(OriginalOpcode: BLR.getOpcode());
425
426 unsigned NumRegOperands = Kind.HasXmOperand ? 2 : 1;
427 assert(BLR.getNumExplicitOperands() == NumRegOperands &&
428 "Expected one or two register inputs");
429 Register Xn = BLR.getOperand(i: 0).getReg();
430 Register Xm =
431 Kind.HasXmOperand ? BLR.getOperand(i: 1).getReg() : AArch64::NoRegister;
432
433 DebugLoc DL = BLR.getDebugLoc();
434
435 MachineFunction &MF = *MBBI->getMF();
436 MCContext &Context = MBB.getParent()->getContext();
437 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
438
439 auto ThunkName = createThunkName(Kind, Xn, Xm);
440 MCSymbol *Sym = Context.getOrCreateSymbol(Name: ThunkName);
441
442 if (!Thunks.get(Kind: Kind.Id, Xn, Xm)) {
443 StringRef TargetAttrs = Kind.NeedsPAuth ? "+pauth" : "";
444 Thunks.set(Kind: Kind.Id, Xn, Xm);
445 createThunkFunction(MMI, Name: ThunkName, Comdat: ComdatThunks, TargetAttrs);
446 }
447
448 MachineInstr *BL = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: AArch64::BL)).addSym(Sym);
449
450 // Now copy the implicit operands from BLR to BL and copy other necessary
451 // info.
452 // However, both BLR and BL instructions implictly use SP and implicitly
453 // define LR. Blindly copying implicit operands would result in SP and LR
454 // operands to be present multiple times. While this may not be too much of
455 // an issue, let's avoid that for cleanliness, by removing those implicit
456 // operands from the BL created above before we copy over all implicit
457 // operands from the BLR.
458 int ImpLROpIdx = -1;
459 int ImpSPOpIdx = -1;
460 for (unsigned OpIdx = BL->getNumExplicitOperands();
461 OpIdx < BL->getNumOperands(); OpIdx++) {
462 MachineOperand Op = BL->getOperand(i: OpIdx);
463 if (!Op.isReg())
464 continue;
465 if (Op.getReg() == AArch64::LR && Op.isDef())
466 ImpLROpIdx = OpIdx;
467 if (Op.getReg() == AArch64::SP && !Op.isDef())
468 ImpSPOpIdx = OpIdx;
469 }
470 assert(ImpLROpIdx != -1);
471 assert(ImpSPOpIdx != -1);
472 int FirstOpIdxToRemove = std::max(a: ImpLROpIdx, b: ImpSPOpIdx);
473 int SecondOpIdxToRemove = std::min(a: ImpLROpIdx, b: ImpSPOpIdx);
474 BL->removeOperand(OpNo: FirstOpIdxToRemove);
475 BL->removeOperand(OpNo: SecondOpIdxToRemove);
476 // Now copy over the implicit operands from the original BLR
477 BL->copyImplicitOps(MF, MI: BLR);
478 MF.moveCallSiteInfo(Old: &BLR, New: BL);
479 // Also add the register operands of the original BLR* instruction
480 // as being used in the called thunk.
481 for (unsigned OpIdx = 0; OpIdx < NumRegOperands; ++OpIdx) {
482 MachineOperand &Op = BLR.getOperand(i: OpIdx);
483 BL->addOperand(Op: MachineOperand::CreateReg(Reg: Op.getReg(), /*isDef=*/false,
484 /*isImp=*/true, isKill: Op.isKill()));
485 }
486 // Remove BLR instruction
487 MBB.erase(I: MBBI);
488}
489
490bool SLSHardeningInserter::hardenBLRs(MachineModuleInfo &MMI,
491 MachineBasicBlock &MBB,
492 ThunksSet &Thunks) {
493 bool Modified = false;
494 MachineBasicBlock::instr_iterator MBBI = MBB.instr_begin(),
495 E = MBB.instr_end();
496 MachineBasicBlock::instr_iterator NextMBBI;
497 for (; MBBI != E; MBBI = NextMBBI) {
498 MachineInstr &MI = *MBBI;
499 NextMBBI = std::next(x: MBBI);
500 if (isBLR(MI)) {
501 convertBLRToBL(MMI, MBB, MBBI, Thunks);
502 Modified = true;
503 }
504 }
505 return Modified;
506}
507
508namespace {
509class AArch64SLSHardening : public ThunkInserterPass<SLSHardeningInserter> {
510public:
511 static char ID;
512
513 AArch64SLSHardening() : ThunkInserterPass(ID) {}
514
515 StringRef getPassName() const override { return AARCH64_SLS_HARDENING_NAME; }
516};
517
518} // end anonymous namespace
519
520char AArch64SLSHardening::ID = 0;
521
522INITIALIZE_PASS(AArch64SLSHardening, "aarch64-sls-hardening",
523 AARCH64_SLS_HARDENING_NAME, false, false)
524
525FunctionPass *llvm::createAArch64SLSHardeningPass() {
526 return new AArch64SLSHardening();
527}
528