1 | //===------ BPFPreserveStaticOffset.cpp -----------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // TLDR: replaces llvm.preserve.static.offset + GEP + load / store |
10 | // with llvm.bpf.getelementptr.and.load / store |
11 | // |
12 | // This file implements BPFPreserveStaticOffsetPass transformation. |
13 | // This transformation address two BPF verifier specific issues: |
14 | // |
15 | // (a) Access to the fields of some structural types is allowed only |
16 | // using load and store instructions with static immediate offsets. |
17 | // |
18 | // Examples of such types are `struct __sk_buff` and `struct |
19 | // bpf_sock_ops`. This is so because offsets of the fields of |
20 | // these structures do not match real offsets in the running |
21 | // kernel. During BPF program load LDX and STX instructions |
22 | // referring to the fields of these types are rewritten so that |
23 | // offsets match real offsets. For this rewrite to happen field |
24 | // offsets have to be encoded as immediate operands of the |
25 | // instructions. |
26 | // |
27 | // See kernel/bpf/verifier.c:convert_ctx_access function in the |
28 | // Linux kernel source tree for details. |
29 | // |
30 | // (b) Pointers to context parameters of BPF programs must not be |
31 | // modified before access. |
32 | // |
33 | // During BPF program verification a tag PTR_TO_CTX is tracked for |
34 | // register values. In case if register with such tag is modified |
35 | // BPF program is not allowed to read or write memory using this |
36 | // register. See kernel/bpf/verifier.c:check_mem_access function |
37 | // in the Linux kernel source tree for details. |
38 | // |
39 | // The following sequence of the IR instructions: |
40 | // |
41 | // %x = getelementptr %ptr, %constant_offset |
42 | // %y = load %x |
43 | // |
44 | // Is translated as a single machine instruction: |
45 | // |
46 | // LDW %ptr, %constant_offset |
47 | // |
48 | // In order for cases (a) and (b) to work the sequence %x-%y above has |
49 | // to be preserved by the IR passes. |
50 | // |
51 | // However, several optimization passes might sink `load` instruction |
52 | // or hoist `getelementptr` instruction so that the instructions are |
53 | // no longer in sequence. Examples of such passes are: |
54 | // SimplifyCFGPass, InstCombinePass, GVNPass. |
55 | // After such modification the verifier would reject the BPF program. |
56 | // |
57 | // To avoid this issue the patterns like (load/store (getelementptr ...)) |
58 | // are replaced by calls to BPF specific intrinsic functions: |
59 | // - llvm.bpf.getelementptr.and.load |
60 | // - llvm.bpf.getelementptr.and.store |
61 | // |
62 | // These calls are lowered back to (load/store (getelementptr ...)) |
63 | // by BPFCheckAndAdjustIR pass right before the translation from IR to |
64 | // machine instructions. |
65 | // |
66 | // The transformation is split into the following steps: |
67 | // - When IR is generated from AST the calls to intrinsic function |
68 | // llvm.preserve.static.offset are inserted. |
69 | // - BPFPreserveStaticOffsetPass is executed as early as possible |
70 | // with AllowPatial set to true, this handles marked GEP chains |
71 | // with constant offsets. |
72 | // - BPFPreserveStaticOffsetPass is executed at ScalarOptimizerLateEPCallback |
73 | // with AllowPatial set to false, this handles marked GEP chains |
74 | // with offsets that became constant after loop unrolling, e.g. |
75 | // to handle the following code: |
76 | // |
77 | // struct context { int x[4]; } __attribute__((preserve_static_offset)); |
78 | // |
79 | // struct context *ctx = ...; |
80 | // #pragma clang loop unroll(full) |
81 | // for (int i = 0; i < 4; ++i) |
82 | // foo(ctx->x[i]); |
83 | // |
84 | // The early BPFPreserveStaticOffsetPass run is necessary to allow |
85 | // additional GVN / CSE opportunities after functions inlining. |
86 | // The relative order of optimization applied to function: |
87 | // - early stage (1) |
88 | // - ... |
89 | // - function inlining (2) |
90 | // - ... |
91 | // - loop unrolling |
92 | // - ... |
93 | // - ScalarOptimizerLateEPCallback (3) |
94 | // |
95 | // When function A is inlined into function B all optimizations for A |
96 | // are already done, while some passes remain for B. In case if |
97 | // BPFPreserveStaticOffsetPass is done at (3) but not done at (1) |
98 | // the code after (2) would contain a mix of |
99 | // (load (gep %p)) and (get.and.load %p) usages: |
100 | // - the (load (gep %p)) would come from the calling function; |
101 | // - the (get.and.load %p) would come from the callee function. |
102 | // Thus clobbering CSE / GVN passes done after inlining. |
103 | |
104 | #include "BPF.h" |
105 | #include "BPFCORE.h" |
106 | #include "llvm/ADT/SmallPtrSet.h" |
107 | #include "llvm/ADT/SmallVector.h" |
108 | #include "llvm/IR/Argument.h" |
109 | #include "llvm/IR/Attributes.h" |
110 | #include "llvm/IR/BasicBlock.h" |
111 | #include "llvm/IR/Constants.h" |
112 | #include "llvm/IR/DebugInfoMetadata.h" |
113 | #include "llvm/IR/DiagnosticInfo.h" |
114 | #include "llvm/IR/IRBuilder.h" |
115 | #include "llvm/IR/InstIterator.h" |
116 | #include "llvm/IR/Instructions.h" |
117 | #include "llvm/IR/Intrinsics.h" |
118 | #include "llvm/IR/IntrinsicsBPF.h" |
119 | #include "llvm/IR/Module.h" |
120 | #include "llvm/Support/Debug.h" |
121 | #include "llvm/Support/ErrorHandling.h" |
122 | |
123 | #define DEBUG_TYPE "bpf-preserve-static-offset" |
124 | |
125 | using namespace llvm; |
126 | |
127 | static const unsigned GepAndLoadFirstIdxArg = 6; |
128 | static const unsigned GepAndStoreFirstIdxArg = 7; |
129 | |
130 | static bool isIntrinsicCall(Value *I, Intrinsic::ID Id) { |
131 | if (auto *Call = dyn_cast<CallInst>(Val: I)) |
132 | if (Function *Func = Call->getCalledFunction()) |
133 | return Func->getIntrinsicID() == Id; |
134 | return false; |
135 | } |
136 | |
137 | static bool isPreserveStaticOffsetCall(Value *I) { |
138 | return isIntrinsicCall(I, Id: Intrinsic::preserve_static_offset); |
139 | } |
140 | |
141 | static CallInst *isGEPAndLoad(Value *I) { |
142 | if (isIntrinsicCall(I, Id: Intrinsic::bpf_getelementptr_and_load)) |
143 | return cast<CallInst>(Val: I); |
144 | return nullptr; |
145 | } |
146 | |
147 | static CallInst *isGEPAndStore(Value *I) { |
148 | if (isIntrinsicCall(I, Id: Intrinsic::bpf_getelementptr_and_store)) |
149 | return cast<CallInst>(Val: I); |
150 | return nullptr; |
151 | } |
152 | |
153 | template <class T = Instruction> |
154 | static DILocation *mergeDILocations(SmallVector<T *> &Insns) { |
155 | DILocation *Merged = (*Insns.begin())->getDebugLoc(); |
156 | for (T *I : Insns) |
157 | Merged = DILocation::getMergedLocation(LocA: Merged, LocB: I->getDebugLoc()); |
158 | return Merged; |
159 | } |
160 | |
161 | static CallInst *makeIntrinsicCall(Module *M, |
162 | Intrinsic::BPFIntrinsics Intrinsic, |
163 | ArrayRef<Type *> Types, |
164 | ArrayRef<Value *> Args) { |
165 | |
166 | Function *Fn = Intrinsic::getDeclaration(M, id: Intrinsic, Tys: Types); |
167 | return CallInst::Create(Func: Fn, Args); |
168 | } |
169 | |
170 | static void setParamElementType(CallInst *Call, unsigned ArgNo, Type *Type) { |
171 | LLVMContext &C = Call->getContext(); |
172 | Call->addParamAttr(ArgNo, Attr: Attribute::get(Context&: C, Kind: Attribute::ElementType, Ty: Type)); |
173 | } |
174 | |
175 | static void setParamReadNone(CallInst *Call, unsigned ArgNo) { |
176 | LLVMContext &C = Call->getContext(); |
177 | Call->addParamAttr(ArgNo, Attr: Attribute::get(Context&: C, Kind: Attribute::ReadNone)); |
178 | } |
179 | |
180 | static void setParamReadOnly(CallInst *Call, unsigned ArgNo) { |
181 | LLVMContext &C = Call->getContext(); |
182 | Call->addParamAttr(ArgNo, Attr: Attribute::get(Context&: C, Kind: Attribute::ReadOnly)); |
183 | } |
184 | |
185 | static void setParamWriteOnly(CallInst *Call, unsigned ArgNo) { |
186 | LLVMContext &C = Call->getContext(); |
187 | Call->addParamAttr(ArgNo, Attr: Attribute::get(Context&: C, Kind: Attribute::WriteOnly)); |
188 | } |
189 | |
190 | namespace { |
191 | struct GEPChainInfo { |
192 | bool InBounds; |
193 | Type *SourceElementType; |
194 | SmallVector<Value *> Indices; |
195 | SmallVector<GetElementPtrInst *> Members; |
196 | |
197 | GEPChainInfo() { reset(); } |
198 | |
199 | void reset() { |
200 | InBounds = true; |
201 | SourceElementType = nullptr; |
202 | Indices.clear(); |
203 | Members.clear(); |
204 | } |
205 | }; |
206 | } // Anonymous namespace |
207 | |
208 | template <class T = std::disjunction<LoadInst, StoreInst>> |
209 | static void fillCommonArgs(LLVMContext &C, SmallVector<Value *> &Args, |
210 | GEPChainInfo &GEP, T *Insn) { |
211 | Type *Int8Ty = Type::getInt8Ty(C); |
212 | Type *Int1Ty = Type::getInt1Ty(C); |
213 | // Implementation of Align guarantees that ShiftValue < 64 |
214 | unsigned AlignShiftValue = Log2_64(Insn->getAlign().value()); |
215 | Args.push_back(Elt: GEP.Members[0]->getPointerOperand()); |
216 | Args.push_back(Elt: ConstantInt::get(Int1Ty, Insn->isVolatile())); |
217 | Args.push_back(Elt: ConstantInt::get(Ty: Int8Ty, V: (unsigned)Insn->getOrdering())); |
218 | Args.push_back(Elt: ConstantInt::get(Ty: Int8Ty, V: (unsigned)Insn->getSyncScopeID())); |
219 | Args.push_back(Elt: ConstantInt::get(Ty: Int8Ty, V: AlignShiftValue)); |
220 | Args.push_back(Elt: ConstantInt::get(Ty: Int1Ty, V: GEP.InBounds)); |
221 | Args.append(in_start: GEP.Indices.begin(), in_end: GEP.Indices.end()); |
222 | } |
223 | |
224 | static Instruction *makeGEPAndLoad(Module *M, GEPChainInfo &GEP, |
225 | LoadInst *Load) { |
226 | SmallVector<Value *> Args; |
227 | fillCommonArgs(C&: M->getContext(), Args, GEP, Insn: Load); |
228 | CallInst *Call = makeIntrinsicCall(M, Intrinsic: Intrinsic::bpf_getelementptr_and_load, |
229 | Types: {Load->getType()}, Args); |
230 | setParamElementType(Call, ArgNo: 0, Type: GEP.SourceElementType); |
231 | Call->applyMergedLocation(LocA: mergeDILocations(Insns&: GEP.Members), LocB: Load->getDebugLoc()); |
232 | Call->setName((*GEP.Members.rbegin())->getName()); |
233 | if (Load->isUnordered()) { |
234 | Call->setOnlyReadsMemory(); |
235 | Call->setOnlyAccessesArgMemory(); |
236 | setParamReadOnly(Call, ArgNo: 0); |
237 | } |
238 | for (unsigned I = GepAndLoadFirstIdxArg; I < Args.size(); ++I) |
239 | Call->addParamAttr(ArgNo: I, Kind: Attribute::ImmArg); |
240 | Call->setAAMetadata(Load->getAAMetadata()); |
241 | return Call; |
242 | } |
243 | |
244 | static Instruction *makeGEPAndStore(Module *M, GEPChainInfo &GEP, |
245 | StoreInst *Store) { |
246 | SmallVector<Value *> Args; |
247 | Args.push_back(Elt: Store->getValueOperand()); |
248 | fillCommonArgs(C&: M->getContext(), Args, GEP, Insn: Store); |
249 | CallInst *Call = |
250 | makeIntrinsicCall(M, Intrinsic: Intrinsic::bpf_getelementptr_and_store, |
251 | Types: {Store->getValueOperand()->getType()}, Args); |
252 | setParamElementType(Call, ArgNo: 1, Type: GEP.SourceElementType); |
253 | if (Store->getValueOperand()->getType()->isPointerTy()) |
254 | setParamReadNone(Call, ArgNo: 0); |
255 | Call->applyMergedLocation(LocA: mergeDILocations(Insns&: GEP.Members), |
256 | LocB: Store->getDebugLoc()); |
257 | if (Store->isUnordered()) { |
258 | Call->setOnlyWritesMemory(); |
259 | Call->setOnlyAccessesArgMemory(); |
260 | setParamWriteOnly(Call, ArgNo: 1); |
261 | } |
262 | for (unsigned I = GepAndStoreFirstIdxArg; I < Args.size(); ++I) |
263 | Call->addParamAttr(ArgNo: I, Kind: Attribute::ImmArg); |
264 | Call->setAAMetadata(Store->getAAMetadata()); |
265 | return Call; |
266 | } |
267 | |
268 | static unsigned getOperandAsUnsigned(CallInst *Call, unsigned ArgNo) { |
269 | if (auto *Int = dyn_cast<ConstantInt>(Val: Call->getOperand(i_nocapture: ArgNo))) |
270 | return Int->getValue().getZExtValue(); |
271 | std::string Report; |
272 | raw_string_ostream ReportS(Report); |
273 | ReportS << "Expecting ConstantInt as argument #" << ArgNo << " of " << *Call |
274 | << "\n" ; |
275 | report_fatal_error(reason: StringRef(Report)); |
276 | } |
277 | |
278 | static GetElementPtrInst *reconstructGEP(CallInst *Call, int Delta) { |
279 | SmallVector<Value *> Indices; |
280 | Indices.append(in_start: Call->data_operands_begin() + 6 + Delta, |
281 | in_end: Call->data_operands_end()); |
282 | Type *GEPPointeeType = Call->getParamElementType(ArgNo: Delta); |
283 | auto *GEP = |
284 | GetElementPtrInst::Create(PointeeType: GEPPointeeType, Ptr: Call->getOperand(i_nocapture: Delta), |
285 | IdxList: ArrayRef<Value *>(Indices), NameStr: Call->getName()); |
286 | GEP->setIsInBounds(getOperandAsUnsigned(Call, ArgNo: 5 + Delta)); |
287 | return GEP; |
288 | } |
289 | |
290 | template <class T = std::disjunction<LoadInst, StoreInst>> |
291 | static void reconstructCommon(CallInst *Call, GetElementPtrInst *GEP, T *Insn, |
292 | int Delta) { |
293 | Insn->setVolatile(getOperandAsUnsigned(Call, ArgNo: 1 + Delta)); |
294 | Insn->setOrdering((AtomicOrdering)getOperandAsUnsigned(Call, ArgNo: 2 + Delta)); |
295 | Insn->setSyncScopeID(getOperandAsUnsigned(Call, ArgNo: 3 + Delta)); |
296 | unsigned AlignShiftValue = getOperandAsUnsigned(Call, ArgNo: 4 + Delta); |
297 | Insn->setAlignment(Align(1ULL << AlignShiftValue)); |
298 | GEP->setDebugLoc(Call->getDebugLoc()); |
299 | Insn->setDebugLoc(Call->getDebugLoc()); |
300 | Insn->setAAMetadata(Call->getAAMetadata()); |
301 | } |
302 | |
303 | std::pair<GetElementPtrInst *, LoadInst *> |
304 | BPFPreserveStaticOffsetPass::reconstructLoad(CallInst *Call) { |
305 | GetElementPtrInst *GEP = reconstructGEP(Call, Delta: 0); |
306 | Type *ReturnType = Call->getFunctionType()->getReturnType(); |
307 | auto *Load = new LoadInst(ReturnType, GEP, "" , |
308 | /* These would be set in reconstructCommon */ |
309 | false, Align(1)); |
310 | reconstructCommon(Call, GEP, Insn: Load, Delta: 0); |
311 | return std::pair{GEP, Load}; |
312 | } |
313 | |
314 | std::pair<GetElementPtrInst *, StoreInst *> |
315 | BPFPreserveStaticOffsetPass::reconstructStore(CallInst *Call) { |
316 | GetElementPtrInst *GEP = reconstructGEP(Call, Delta: 1); |
317 | auto *Store = new StoreInst(Call->getOperand(i_nocapture: 0), GEP, |
318 | /* These would be set in reconstructCommon */ |
319 | false, Align(1)); |
320 | reconstructCommon(Call, GEP, Insn: Store, Delta: 1); |
321 | return std::pair{GEP, Store}; |
322 | } |
323 | |
324 | static bool isZero(Value *V) { |
325 | auto *CI = dyn_cast<ConstantInt>(Val: V); |
326 | return CI && CI->isZero(); |
327 | } |
328 | |
329 | // Given a chain of GEP instructions collect information necessary to |
330 | // merge this chain as a single GEP instruction of form: |
331 | // getelementptr %<type>, ptr %p, i32 0, <field_idx1>, <field_idx2>, ... |
332 | static bool foldGEPChainAsStructAccess(SmallVector<GetElementPtrInst *> &GEPs, |
333 | GEPChainInfo &Info) { |
334 | if (GEPs.empty()) |
335 | return false; |
336 | |
337 | if (!all_of(Range&: GEPs, P: [=](GetElementPtrInst *GEP) { |
338 | return GEP->hasAllConstantIndices(); |
339 | })) |
340 | return false; |
341 | |
342 | GetElementPtrInst *First = GEPs[0]; |
343 | Info.InBounds = First->isInBounds(); |
344 | Info.SourceElementType = First->getSourceElementType(); |
345 | Type *ResultElementType = First->getResultElementType(); |
346 | Info.Indices.append(in_start: First->idx_begin(), in_end: First->idx_end()); |
347 | Info.Members.push_back(Elt: First); |
348 | |
349 | for (auto *Iter = GEPs.begin() + 1; Iter != GEPs.end(); ++Iter) { |
350 | GetElementPtrInst *GEP = *Iter; |
351 | if (!isZero(V: *GEP->idx_begin())) { |
352 | Info.reset(); |
353 | return false; |
354 | } |
355 | if (!GEP->getSourceElementType() || |
356 | GEP->getSourceElementType() != ResultElementType) { |
357 | Info.reset(); |
358 | return false; |
359 | } |
360 | Info.InBounds &= GEP->isInBounds(); |
361 | Info.Indices.append(in_start: GEP->idx_begin() + 1, in_end: GEP->idx_end()); |
362 | Info.Members.push_back(Elt: GEP); |
363 | ResultElementType = GEP->getResultElementType(); |
364 | } |
365 | |
366 | return true; |
367 | } |
368 | |
369 | // Given a chain of GEP instructions collect information necessary to |
370 | // merge this chain as a single GEP instruction of form: |
371 | // getelementptr i8, ptr %p, i64 %offset |
372 | static bool foldGEPChainAsU8Access(SmallVector<GetElementPtrInst *> &GEPs, |
373 | GEPChainInfo &Info) { |
374 | if (GEPs.empty()) |
375 | return false; |
376 | |
377 | GetElementPtrInst *First = GEPs[0]; |
378 | const DataLayout &DL = First->getDataLayout(); |
379 | LLVMContext &C = First->getContext(); |
380 | Type *PtrTy = First->getType()->getScalarType(); |
381 | APInt Offset(DL.getIndexTypeSizeInBits(Ty: PtrTy), 0); |
382 | for (GetElementPtrInst *GEP : GEPs) { |
383 | if (!GEP->accumulateConstantOffset(DL, Offset)) { |
384 | Info.reset(); |
385 | return false; |
386 | } |
387 | Info.InBounds &= GEP->isInBounds(); |
388 | Info.Members.push_back(Elt: GEP); |
389 | } |
390 | Info.SourceElementType = Type::getInt8Ty(C); |
391 | Info.Indices.push_back(Elt: ConstantInt::get(Context&: C, V: Offset)); |
392 | |
393 | return true; |
394 | } |
395 | |
396 | static void reportNonStaticGEPChain(Instruction *Insn) { |
397 | auto Msg = DiagnosticInfoUnsupported( |
398 | *Insn->getFunction(), |
399 | Twine("Non-constant offset in access to a field of a type marked " |
400 | "with preserve_static_offset might be rejected by BPF verifier" ) |
401 | .concat(Suffix: Insn->getDebugLoc() |
402 | ? "" |
403 | : " (pass -g option to get exact location)" ), |
404 | Insn->getDebugLoc(), DS_Warning); |
405 | Insn->getContext().diagnose(DI: Msg); |
406 | } |
407 | |
408 | static bool allZeroIndices(SmallVector<GetElementPtrInst *> &GEPs) { |
409 | return GEPs.empty() || all_of(Range&: GEPs, P: [=](GetElementPtrInst *GEP) { |
410 | return GEP->hasAllZeroIndices(); |
411 | }); |
412 | } |
413 | |
414 | static bool tryToReplaceWithGEPBuiltin(Instruction *LoadOrStoreTemplate, |
415 | SmallVector<GetElementPtrInst *> &GEPs, |
416 | Instruction *InsnToReplace) { |
417 | GEPChainInfo GEPChain; |
418 | if (!foldGEPChainAsStructAccess(GEPs, Info&: GEPChain) && |
419 | !foldGEPChainAsU8Access(GEPs, Info&: GEPChain)) { |
420 | return false; |
421 | } |
422 | Module *M = InsnToReplace->getModule(); |
423 | if (auto *Load = dyn_cast<LoadInst>(Val: LoadOrStoreTemplate)) { |
424 | Instruction *Replacement = makeGEPAndLoad(M, GEP&: GEPChain, Load); |
425 | Replacement->insertBefore(InsertPos: InsnToReplace); |
426 | InsnToReplace->replaceAllUsesWith(V: Replacement); |
427 | } |
428 | if (auto *Store = dyn_cast<StoreInst>(Val: LoadOrStoreTemplate)) { |
429 | Instruction *Replacement = makeGEPAndStore(M, GEP&: GEPChain, Store); |
430 | Replacement->insertBefore(InsertPos: InsnToReplace); |
431 | } |
432 | return true; |
433 | } |
434 | |
435 | // Check if U->getPointerOperand() == I |
436 | static bool isPointerOperand(Value *I, User *U) { |
437 | if (auto *L = dyn_cast<LoadInst>(Val: U)) |
438 | return L->getPointerOperand() == I; |
439 | if (auto *S = dyn_cast<StoreInst>(Val: U)) |
440 | return S->getPointerOperand() == I; |
441 | if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: U)) |
442 | return GEP->getPointerOperand() == I; |
443 | if (auto *Call = isGEPAndLoad(I: U)) |
444 | return Call->getArgOperand(i: 0) == I; |
445 | if (auto *Call = isGEPAndStore(I: U)) |
446 | return Call->getArgOperand(i: 1) == I; |
447 | return false; |
448 | } |
449 | |
450 | static bool isInlineableCall(User *U) { |
451 | if (auto *Call = dyn_cast<CallInst>(Val: U)) |
452 | return Call->hasFnAttr(Kind: Attribute::InlineHint); |
453 | return false; |
454 | } |
455 | |
456 | static void rewriteAccessChain(Instruction *Insn, |
457 | SmallVector<GetElementPtrInst *> &GEPs, |
458 | SmallVector<Instruction *> &Visited, |
459 | bool AllowPatial, bool &StillUsed); |
460 | |
461 | static void rewriteUses(Instruction *Insn, |
462 | SmallVector<GetElementPtrInst *> &GEPs, |
463 | SmallVector<Instruction *> &Visited, bool AllowPatial, |
464 | bool &StillUsed) { |
465 | for (User *U : Insn->users()) { |
466 | auto *UI = dyn_cast<Instruction>(Val: U); |
467 | if (UI && (isPointerOperand(I: Insn, U: UI) || isPreserveStaticOffsetCall(I: UI) || |
468 | isInlineableCall(U: UI))) |
469 | rewriteAccessChain(Insn: UI, GEPs, Visited, AllowPatial, StillUsed); |
470 | else |
471 | LLVM_DEBUG({ |
472 | llvm::dbgs() << "unsupported usage in BPFPreserveStaticOffsetPass:\n" ; |
473 | llvm::dbgs() << " Insn: " << *Insn << "\n" ; |
474 | llvm::dbgs() << " User: " << *U << "\n" ; |
475 | }); |
476 | } |
477 | } |
478 | |
479 | // A DFS traversal of GEP chain trees starting from Root. |
480 | // |
481 | // Recursion descends through GEP instructions and |
482 | // llvm.preserve.static.offset calls. Recursion stops at any other |
483 | // instruction. If load or store instruction is reached it is replaced |
484 | // by a call to `llvm.bpf.getelementptr.and.load` or |
485 | // `llvm.bpf.getelementptr.and.store` intrinsic. |
486 | // If `llvm.bpf.getelementptr.and.load/store` is reached the accumulated |
487 | // GEPs are merged into the intrinsic call. |
488 | // If nested calls to `llvm.preserve.static.offset` are encountered these |
489 | // calls are marked for deletion. |
490 | // |
491 | // Parameters description: |
492 | // - Insn - current position in the tree |
493 | // - GEPs - GEP instructions for the current branch |
494 | // - Visited - a list of visited instructions in DFS order, |
495 | // order is important for unused instruction deletion. |
496 | // - AllowPartial - when true GEP chains that can't be folded are |
497 | // not reported, otherwise diagnostic message is show for such chains. |
498 | // - StillUsed - set to true if one of the GEP chains could not be |
499 | // folded, makes sense when AllowPartial is false, means that root |
500 | // preserve.static.offset call is still in use and should remain |
501 | // until the next run of this pass. |
502 | static void rewriteAccessChain(Instruction *Insn, |
503 | SmallVector<GetElementPtrInst *> &GEPs, |
504 | SmallVector<Instruction *> &Visited, |
505 | bool AllowPatial, bool &StillUsed) { |
506 | auto MarkAndTraverseUses = [&]() { |
507 | Visited.push_back(Elt: Insn); |
508 | rewriteUses(Insn, GEPs, Visited, AllowPatial, StillUsed); |
509 | }; |
510 | auto TryToReplace = [&](Instruction *LoadOrStore) { |
511 | // Do nothing for (preserve.static.offset (load/store ..)) or for |
512 | // GEPs with zero indices. Such constructs lead to zero offset and |
513 | // are simplified by other passes. |
514 | if (allZeroIndices(GEPs)) |
515 | return; |
516 | if (tryToReplaceWithGEPBuiltin(LoadOrStoreTemplate: LoadOrStore, GEPs, InsnToReplace: Insn)) { |
517 | Visited.push_back(Elt: Insn); |
518 | return; |
519 | } |
520 | if (!AllowPatial) |
521 | reportNonStaticGEPChain(Insn); |
522 | StillUsed = true; |
523 | }; |
524 | if (isa<LoadInst>(Val: Insn) || isa<StoreInst>(Val: Insn)) { |
525 | TryToReplace(Insn); |
526 | } else if (isGEPAndLoad(I: Insn)) { |
527 | auto [GEP, Load] = |
528 | BPFPreserveStaticOffsetPass::reconstructLoad(Call: cast<CallInst>(Val: Insn)); |
529 | GEPs.push_back(Elt: GEP); |
530 | TryToReplace(Load); |
531 | GEPs.pop_back(); |
532 | delete Load; |
533 | delete GEP; |
534 | } else if (isGEPAndStore(I: Insn)) { |
535 | // This case can't be merged with the above because |
536 | // `delete Load` / `delete Store` wants a concrete type, |
537 | // destructor of Instruction is protected. |
538 | auto [GEP, Store] = |
539 | BPFPreserveStaticOffsetPass::reconstructStore(Call: cast<CallInst>(Val: Insn)); |
540 | GEPs.push_back(Elt: GEP); |
541 | TryToReplace(Store); |
542 | GEPs.pop_back(); |
543 | delete Store; |
544 | delete GEP; |
545 | } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: Insn)) { |
546 | GEPs.push_back(Elt: GEP); |
547 | MarkAndTraverseUses(); |
548 | GEPs.pop_back(); |
549 | } else if (isPreserveStaticOffsetCall(I: Insn)) { |
550 | MarkAndTraverseUses(); |
551 | } else if (isInlineableCall(U: Insn)) { |
552 | // Preserve preserve.static.offset call for parameters of |
553 | // functions that might be inlined. These would be removed on a |
554 | // second pass after inlining. |
555 | // Might happen when a pointer to a preserve_static_offset |
556 | // structure is passed as parameter of a function that would be |
557 | // inlined inside a loop that would be unrolled. |
558 | if (AllowPatial) |
559 | StillUsed = true; |
560 | } else { |
561 | SmallString<128> Buf; |
562 | raw_svector_ostream BufStream(Buf); |
563 | BufStream << *Insn; |
564 | report_fatal_error( |
565 | reason: Twine("Unexpected rewriteAccessChain Insn = " ).concat(Suffix: Buf)); |
566 | } |
567 | } |
568 | |
569 | static void removeMarkerCall(Instruction *Marker) { |
570 | Marker->replaceAllUsesWith(V: Marker->getOperand(i: 0)); |
571 | Marker->eraseFromParent(); |
572 | } |
573 | |
574 | static bool rewriteAccessChain(Instruction *Marker, bool AllowPatial, |
575 | SmallPtrSetImpl<Instruction *> &RemovedMarkers) { |
576 | SmallVector<GetElementPtrInst *> GEPs; |
577 | SmallVector<Instruction *> Visited; |
578 | bool StillUsed = false; |
579 | rewriteUses(Insn: Marker, GEPs, Visited, AllowPatial, StillUsed); |
580 | // Check if Visited instructions could be removed, iterate in |
581 | // reverse to unblock instructions higher in the chain. |
582 | for (auto V = Visited.rbegin(); V != Visited.rend(); ++V) { |
583 | if (isPreserveStaticOffsetCall(I: *V)) { |
584 | removeMarkerCall(Marker: *V); |
585 | RemovedMarkers.insert(Ptr: *V); |
586 | } else if ((*V)->use_empty()) { |
587 | (*V)->eraseFromParent(); |
588 | } |
589 | } |
590 | return StillUsed; |
591 | } |
592 | |
593 | static std::vector<Instruction *> |
594 | collectPreserveStaticOffsetCalls(Function &F) { |
595 | std::vector<Instruction *> Calls; |
596 | for (Instruction &Insn : instructions(F)) |
597 | if (isPreserveStaticOffsetCall(I: &Insn)) |
598 | Calls.push_back(x: &Insn); |
599 | return Calls; |
600 | } |
601 | |
602 | bool isPreserveArrayIndex(Value *V) { |
603 | return isIntrinsicCall(I: V, Id: Intrinsic::preserve_array_access_index); |
604 | } |
605 | |
606 | bool isPreserveStructIndex(Value *V) { |
607 | return isIntrinsicCall(I: V, Id: Intrinsic::preserve_struct_access_index); |
608 | } |
609 | |
610 | bool isPreserveUnionIndex(Value *V) { |
611 | return isIntrinsicCall(I: V, Id: Intrinsic::preserve_union_access_index); |
612 | } |
613 | |
614 | static void removePAICalls(Instruction *Marker) { |
615 | auto IsPointerOperand = [](Value *Op, User *U) { |
616 | if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: U)) |
617 | return GEP->getPointerOperand() == Op; |
618 | if (isPreserveStaticOffsetCall(I: U) || isPreserveArrayIndex(V: U) || |
619 | isPreserveStructIndex(V: U) || isPreserveUnionIndex(V: U)) |
620 | return cast<CallInst>(Val: U)->getArgOperand(i: 0) == Op; |
621 | return false; |
622 | }; |
623 | |
624 | SmallVector<Value *, 32> WorkList; |
625 | WorkList.push_back(Elt: Marker); |
626 | do { |
627 | Value *V = WorkList.pop_back_val(); |
628 | for (User *U : V->users()) |
629 | if (IsPointerOperand(V, U)) |
630 | WorkList.push_back(Elt: U); |
631 | auto *Call = dyn_cast<CallInst>(Val: V); |
632 | if (!Call) |
633 | continue; |
634 | if (isPreserveArrayIndex(V)) |
635 | BPFCoreSharedInfo::removeArrayAccessCall(Call); |
636 | else if (isPreserveStructIndex(V)) |
637 | BPFCoreSharedInfo::removeStructAccessCall(Call); |
638 | else if (isPreserveUnionIndex(V)) |
639 | BPFCoreSharedInfo::removeUnionAccessCall(Call); |
640 | } while (!WorkList.empty()); |
641 | } |
642 | |
643 | // Look for sequences: |
644 | // - llvm.preserve.static.offset -> getelementptr... -> load |
645 | // - llvm.preserve.static.offset -> getelementptr... -> store |
646 | // And replace those with calls to intrinsics: |
647 | // - llvm.bpf.getelementptr.and.load |
648 | // - llvm.bpf.getelementptr.and.store |
649 | static bool rewriteFunction(Function &F, bool AllowPartial) { |
650 | LLVM_DEBUG(dbgs() << "********** BPFPreserveStaticOffsetPass (AllowPartial=" |
651 | << AllowPartial << ") ************\n" ); |
652 | |
653 | auto MarkerCalls = collectPreserveStaticOffsetCalls(F); |
654 | SmallPtrSet<Instruction *, 16> RemovedMarkers; |
655 | |
656 | LLVM_DEBUG(dbgs() << "There are " << MarkerCalls.size() |
657 | << " preserve.static.offset calls\n" ); |
658 | |
659 | if (MarkerCalls.empty()) |
660 | return false; |
661 | |
662 | for (auto *Call : MarkerCalls) |
663 | removePAICalls(Marker: Call); |
664 | |
665 | for (auto *Call : MarkerCalls) { |
666 | if (RemovedMarkers.contains(Ptr: Call)) |
667 | continue; |
668 | bool StillUsed = rewriteAccessChain(Marker: Call, AllowPatial: AllowPartial, RemovedMarkers); |
669 | if (!StillUsed || !AllowPartial) |
670 | removeMarkerCall(Marker: Call); |
671 | } |
672 | |
673 | return true; |
674 | } |
675 | |
676 | PreservedAnalyses |
677 | llvm::BPFPreserveStaticOffsetPass::run(Function &F, |
678 | FunctionAnalysisManager &AM) { |
679 | return rewriteFunction(F, AllowPartial) ? PreservedAnalyses::none() |
680 | : PreservedAnalyses::all(); |
681 | } |
682 | |