1 | //===- WholeProgramDevirt.cpp - Whole program virtual call optimization ---===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This pass implements whole program optimization of virtual calls in cases |
10 | // where we know (via !type metadata) that the list of callees is fixed. This |
11 | // includes the following: |
12 | // - Single implementation devirtualization: if a virtual call has a single |
13 | // possible callee, replace all calls with a direct call to that callee. |
14 | // - Virtual constant propagation: if the virtual function's return type is an |
15 | // integer <=64 bits and all possible callees are readnone, for each class and |
16 | // each list of constant arguments: evaluate the function, store the return |
17 | // value alongside the virtual table, and rewrite each virtual call as a load |
18 | // from the virtual table. |
19 | // - Uniform return value optimization: if the conditions for virtual constant |
20 | // propagation hold and each function returns the same constant value, replace |
21 | // each virtual call with that constant. |
22 | // - Unique return value optimization for i1 return values: if the conditions |
23 | // for virtual constant propagation hold and a single vtable's function |
24 | // returns 0, or a single vtable's function returns 1, replace each virtual |
25 | // call with a comparison of the vptr against that vtable's address. |
26 | // |
27 | // This pass is intended to be used during the regular and thin LTO pipelines: |
28 | // |
29 | // During regular LTO, the pass determines the best optimization for each |
30 | // virtual call and applies the resolutions directly to virtual calls that are |
31 | // eligible for virtual call optimization (i.e. calls that use either of the |
32 | // llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics). |
33 | // |
34 | // During hybrid Regular/ThinLTO, the pass operates in two phases: |
35 | // - Export phase: this is run during the thin link over a single merged module |
36 | // that contains all vtables with !type metadata that participate in the link. |
37 | // The pass computes a resolution for each virtual call and stores it in the |
38 | // type identifier summary. |
39 | // - Import phase: this is run during the thin backends over the individual |
40 | // modules. The pass applies the resolutions previously computed during the |
41 | // import phase to each eligible virtual call. |
42 | // |
43 | // During ThinLTO, the pass operates in two phases: |
44 | // - Export phase: this is run during the thin link over the index which |
45 | // contains a summary of all vtables with !type metadata that participate in |
46 | // the link. It computes a resolution for each virtual call and stores it in |
47 | // the type identifier summary. Only single implementation devirtualization |
48 | // is supported. |
49 | // - Import phase: (same as with hybrid case above). |
50 | // |
51 | //===----------------------------------------------------------------------===// |
52 | |
53 | #include "llvm/Transforms/IPO/WholeProgramDevirt.h" |
54 | #include "llvm/ADT/ArrayRef.h" |
55 | #include "llvm/ADT/DenseMap.h" |
56 | #include "llvm/ADT/DenseMapInfo.h" |
57 | #include "llvm/ADT/DenseSet.h" |
58 | #include "llvm/ADT/MapVector.h" |
59 | #include "llvm/ADT/SmallVector.h" |
60 | #include "llvm/ADT/Statistic.h" |
61 | #include "llvm/Analysis/AssumptionCache.h" |
62 | #include "llvm/Analysis/BasicAliasAnalysis.h" |
63 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" |
64 | #include "llvm/Analysis/TypeMetadataUtils.h" |
65 | #include "llvm/Bitcode/BitcodeReader.h" |
66 | #include "llvm/Bitcode/BitcodeWriter.h" |
67 | #include "llvm/IR/Constants.h" |
68 | #include "llvm/IR/DataLayout.h" |
69 | #include "llvm/IR/DebugLoc.h" |
70 | #include "llvm/IR/DerivedTypes.h" |
71 | #include "llvm/IR/Dominators.h" |
72 | #include "llvm/IR/Function.h" |
73 | #include "llvm/IR/GlobalAlias.h" |
74 | #include "llvm/IR/GlobalVariable.h" |
75 | #include "llvm/IR/IRBuilder.h" |
76 | #include "llvm/IR/InstrTypes.h" |
77 | #include "llvm/IR/Instruction.h" |
78 | #include "llvm/IR/Instructions.h" |
79 | #include "llvm/IR/Intrinsics.h" |
80 | #include "llvm/IR/LLVMContext.h" |
81 | #include "llvm/IR/MDBuilder.h" |
82 | #include "llvm/IR/Metadata.h" |
83 | #include "llvm/IR/Module.h" |
84 | #include "llvm/IR/ModuleSummaryIndexYAML.h" |
85 | #include "llvm/Support/Casting.h" |
86 | #include "llvm/Support/CommandLine.h" |
87 | #include "llvm/Support/Errc.h" |
88 | #include "llvm/Support/Error.h" |
89 | #include "llvm/Support/FileSystem.h" |
90 | #include "llvm/Support/GlobPattern.h" |
91 | #include "llvm/TargetParser/Triple.h" |
92 | #include "llvm/Transforms/IPO.h" |
93 | #include "llvm/Transforms/IPO/FunctionAttrs.h" |
94 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
95 | #include "llvm/Transforms/Utils/CallPromotionUtils.h" |
96 | #include "llvm/Transforms/Utils/Evaluator.h" |
97 | #include <algorithm> |
98 | #include <cstddef> |
99 | #include <map> |
100 | #include <set> |
101 | #include <string> |
102 | |
103 | using namespace llvm; |
104 | using namespace wholeprogramdevirt; |
105 | |
106 | #define DEBUG_TYPE "wholeprogramdevirt" |
107 | |
108 | STATISTIC(NumDevirtTargets, "Number of whole program devirtualization targets" ); |
109 | STATISTIC(NumSingleImpl, "Number of single implementation devirtualizations" ); |
110 | STATISTIC(NumBranchFunnel, "Number of branch funnels" ); |
111 | STATISTIC(NumUniformRetVal, "Number of uniform return value optimizations" ); |
112 | STATISTIC(NumUniqueRetVal, "Number of unique return value optimizations" ); |
113 | STATISTIC(NumVirtConstProp1Bit, |
114 | "Number of 1 bit virtual constant propagations" ); |
115 | STATISTIC(NumVirtConstProp, "Number of virtual constant propagations" ); |
116 | |
117 | static cl::opt<PassSummaryAction> ClSummaryAction( |
118 | "wholeprogramdevirt-summary-action" , |
119 | cl::desc("What to do with the summary when running this pass" ), |
120 | cl::values(clEnumValN(PassSummaryAction::None, "none" , "Do nothing" ), |
121 | clEnumValN(PassSummaryAction::Import, "import" , |
122 | "Import typeid resolutions from summary and globals" ), |
123 | clEnumValN(PassSummaryAction::Export, "export" , |
124 | "Export typeid resolutions to summary and globals" )), |
125 | cl::Hidden); |
126 | |
127 | static cl::opt<std::string> ClReadSummary( |
128 | "wholeprogramdevirt-read-summary" , |
129 | cl::desc( |
130 | "Read summary from given bitcode or YAML file before running pass" ), |
131 | cl::Hidden); |
132 | |
133 | static cl::opt<std::string> ClWriteSummary( |
134 | "wholeprogramdevirt-write-summary" , |
135 | cl::desc("Write summary to given bitcode or YAML file after running pass. " |
136 | "Output file format is deduced from extension: *.bc means writing " |
137 | "bitcode, otherwise YAML" ), |
138 | cl::Hidden); |
139 | |
140 | static cl::opt<unsigned> |
141 | ClThreshold("wholeprogramdevirt-branch-funnel-threshold" , cl::Hidden, |
142 | cl::init(Val: 10), |
143 | cl::desc("Maximum number of call targets per " |
144 | "call site to enable branch funnels" )); |
145 | |
146 | static cl::opt<bool> |
147 | PrintSummaryDevirt("wholeprogramdevirt-print-index-based" , cl::Hidden, |
148 | cl::desc("Print index-based devirtualization messages" )); |
149 | |
150 | /// Provide a way to force enable whole program visibility in tests. |
151 | /// This is needed to support legacy tests that don't contain |
152 | /// !vcall_visibility metadata (the mere presense of type tests |
153 | /// previously implied hidden visibility). |
154 | static cl::opt<bool> |
155 | WholeProgramVisibility("whole-program-visibility" , cl::Hidden, |
156 | cl::desc("Enable whole program visibility" )); |
157 | |
158 | /// Provide a way to force disable whole program for debugging or workarounds, |
159 | /// when enabled via the linker. |
160 | static cl::opt<bool> DisableWholeProgramVisibility( |
161 | "disable-whole-program-visibility" , cl::Hidden, |
162 | cl::desc("Disable whole program visibility (overrides enabling options)" )); |
163 | |
164 | /// Provide way to prevent certain function from being devirtualized |
165 | static cl::list<std::string> |
166 | SkipFunctionNames("wholeprogramdevirt-skip" , |
167 | cl::desc("Prevent function(s) from being devirtualized" ), |
168 | cl::Hidden, cl::CommaSeparated); |
169 | |
170 | /// With Clang, a pure virtual class's deleting destructor is emitted as a |
171 | /// `llvm.trap` intrinsic followed by an unreachable IR instruction. In the |
172 | /// context of whole program devirtualization, the deleting destructor of a pure |
173 | /// virtual class won't be invoked by the source code so safe to skip as a |
174 | /// devirtualize target. |
175 | /// |
176 | /// However, not all unreachable functions are safe to skip. In some cases, the |
177 | /// program intends to run such functions and terminate, for instance, a unit |
178 | /// test may run a death test. A non-test program might (or allowed to) invoke |
179 | /// such functions to report failures (whether/when it's a good practice or not |
180 | /// is a different topic). |
181 | /// |
182 | /// This option is enabled to keep an unreachable function as a possible |
183 | /// devirtualize target to conservatively keep the program behavior. |
184 | /// |
185 | /// TODO: Make a pure virtual class's deleting destructor precisely identifiable |
186 | /// in Clang's codegen for more devirtualization in LLVM. |
187 | static cl::opt<bool> WholeProgramDevirtKeepUnreachableFunction( |
188 | "wholeprogramdevirt-keep-unreachable-function" , |
189 | cl::desc("Regard unreachable functions as possible devirtualize targets." ), |
190 | cl::Hidden, cl::init(Val: true)); |
191 | |
192 | /// If explicitly specified, the devirt module pass will stop transformation |
193 | /// once the total number of devirtualizations reach the cutoff value. Setting |
194 | /// this option to 0 explicitly will do 0 devirtualization. |
195 | static cl::opt<unsigned> WholeProgramDevirtCutoff( |
196 | "wholeprogramdevirt-cutoff" , |
197 | cl::desc("Max number of devirtualizations for devirt module pass" ), |
198 | cl::init(Val: 0)); |
199 | |
200 | /// Mechanism to add runtime checking of devirtualization decisions, optionally |
201 | /// trapping or falling back to indirect call on any that are not correct. |
202 | /// Trapping mode is useful for debugging undefined behavior leading to failures |
203 | /// with WPD. Fallback mode is useful for ensuring safety when whole program |
204 | /// visibility may be compromised. |
205 | enum WPDCheckMode { None, Trap, Fallback }; |
206 | static cl::opt<WPDCheckMode> DevirtCheckMode( |
207 | "wholeprogramdevirt-check" , cl::Hidden, |
208 | cl::desc("Type of checking for incorrect devirtualizations" ), |
209 | cl::values(clEnumValN(WPDCheckMode::None, "none" , "No checking" ), |
210 | clEnumValN(WPDCheckMode::Trap, "trap" , "Trap when incorrect" ), |
211 | clEnumValN(WPDCheckMode::Fallback, "fallback" , |
212 | "Fallback to indirect when incorrect" ))); |
213 | |
214 | namespace { |
215 | struct PatternList { |
216 | std::vector<GlobPattern> Patterns; |
217 | template <class T> void init(const T &StringList) { |
218 | for (const auto &S : StringList) |
219 | if (Expected<GlobPattern> Pat = GlobPattern::create(Pat: S)) |
220 | Patterns.push_back(x: std::move(*Pat)); |
221 | } |
222 | bool match(StringRef S) { |
223 | for (const GlobPattern &P : Patterns) |
224 | if (P.match(S)) |
225 | return true; |
226 | return false; |
227 | } |
228 | }; |
229 | } // namespace |
230 | |
231 | // Find the minimum offset that we may store a value of size Size bits at. If |
232 | // IsAfter is set, look for an offset before the object, otherwise look for an |
233 | // offset after the object. |
234 | uint64_t |
235 | wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets, |
236 | bool IsAfter, uint64_t Size) { |
237 | // Find a minimum offset taking into account only vtable sizes. |
238 | uint64_t MinByte = 0; |
239 | for (const VirtualCallTarget &Target : Targets) { |
240 | if (IsAfter) |
241 | MinByte = std::max(a: MinByte, b: Target.minAfterBytes()); |
242 | else |
243 | MinByte = std::max(a: MinByte, b: Target.minBeforeBytes()); |
244 | } |
245 | |
246 | // Build a vector of arrays of bytes covering, for each target, a slice of the |
247 | // used region (see AccumBitVector::BytesUsed in |
248 | // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively, |
249 | // this aligns the used regions to start at MinByte. |
250 | // |
251 | // In this example, A, B and C are vtables, # is a byte already allocated for |
252 | // a virtual function pointer, AAAA... (etc.) are the used regions for the |
253 | // vtables and Offset(X) is the value computed for the Offset variable below |
254 | // for X. |
255 | // |
256 | // Offset(A) |
257 | // | | |
258 | // |MinByte |
259 | // A: ################AAAAAAAA|AAAAAAAA |
260 | // B: ########BBBBBBBBBBBBBBBB|BBBB |
261 | // C: ########################|CCCCCCCCCCCCCCCC |
262 | // | Offset(B) | |
263 | // |
264 | // This code produces the slices of A, B and C that appear after the divider |
265 | // at MinByte. |
266 | std::vector<ArrayRef<uint8_t>> Used; |
267 | for (const VirtualCallTarget &Target : Targets) { |
268 | ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed |
269 | : Target.TM->Bits->Before.BytesUsed; |
270 | uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes() |
271 | : MinByte - Target.minBeforeBytes(); |
272 | |
273 | // Disregard used regions that are smaller than Offset. These are |
274 | // effectively all-free regions that do not need to be checked. |
275 | if (VTUsed.size() > Offset) |
276 | Used.push_back(x: VTUsed.slice(N: Offset)); |
277 | } |
278 | |
279 | if (Size == 1) { |
280 | // Find a free bit in each member of Used. |
281 | for (unsigned I = 0;; ++I) { |
282 | uint8_t BitsUsed = 0; |
283 | for (auto &&B : Used) |
284 | if (I < B.size()) |
285 | BitsUsed |= B[I]; |
286 | if (BitsUsed != 0xff) |
287 | return (MinByte + I) * 8 + llvm::countr_zero(Val: uint8_t(~BitsUsed)); |
288 | } |
289 | } else { |
290 | // Find a free (Size/8) byte region in each member of Used. |
291 | // FIXME: see if alignment helps. |
292 | for (unsigned I = 0;; ++I) { |
293 | for (auto &&B : Used) { |
294 | unsigned Byte = 0; |
295 | while ((I + Byte) < B.size() && Byte < (Size / 8)) { |
296 | if (B[I + Byte]) |
297 | goto NextI; |
298 | ++Byte; |
299 | } |
300 | } |
301 | // Rounding up ensures the constant is always stored at address we |
302 | // can directly load from without misalignment. |
303 | return alignTo(Value: (MinByte + I) * 8, Align: Size); |
304 | NextI:; |
305 | } |
306 | } |
307 | } |
308 | |
309 | void wholeprogramdevirt::setBeforeReturnValues( |
310 | MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocBefore, |
311 | unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { |
312 | if (BitWidth == 1) |
313 | OffsetByte = -(AllocBefore / 8 + 1); |
314 | else |
315 | OffsetByte = -((AllocBefore + 7) / 8 + (BitWidth + 7) / 8); |
316 | OffsetBit = AllocBefore % 8; |
317 | |
318 | for (VirtualCallTarget &Target : Targets) { |
319 | if (BitWidth == 1) |
320 | Target.setBeforeBit(AllocBefore); |
321 | else |
322 | Target.setBeforeBytes(Pos: AllocBefore, Size: (BitWidth + 7) / 8); |
323 | } |
324 | } |
325 | |
326 | void wholeprogramdevirt::setAfterReturnValues( |
327 | MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocAfter, |
328 | unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { |
329 | if (BitWidth == 1) |
330 | OffsetByte = AllocAfter / 8; |
331 | else |
332 | OffsetByte = (AllocAfter + 7) / 8; |
333 | OffsetBit = AllocAfter % 8; |
334 | |
335 | for (VirtualCallTarget &Target : Targets) { |
336 | if (BitWidth == 1) |
337 | Target.setAfterBit(AllocAfter); |
338 | else |
339 | Target.setAfterBytes(Pos: AllocAfter, Size: (BitWidth + 7) / 8); |
340 | } |
341 | } |
342 | |
343 | VirtualCallTarget::VirtualCallTarget(GlobalValue *Fn, const TypeMemberInfo *TM) |
344 | : Fn(Fn), TM(TM), |
345 | IsBigEndian(Fn->getDataLayout().isBigEndian()), |
346 | WasDevirt(false) {} |
347 | |
348 | namespace { |
349 | |
350 | // Tracks the number of devirted calls in the IR transformation. |
351 | static unsigned NumDevirtCalls = 0; |
352 | |
353 | // A slot in a set of virtual tables. The TypeID identifies the set of virtual |
354 | // tables, and the ByteOffset is the offset in bytes from the address point to |
355 | // the virtual function pointer. |
356 | struct VTableSlot { |
357 | Metadata *TypeID; |
358 | uint64_t ByteOffset; |
359 | }; |
360 | |
361 | } // end anonymous namespace |
362 | |
363 | namespace llvm { |
364 | |
365 | template <> struct DenseMapInfo<VTableSlot> { |
366 | static VTableSlot getEmptyKey() { |
367 | return {.TypeID: DenseMapInfo<Metadata *>::getEmptyKey(), |
368 | .ByteOffset: DenseMapInfo<uint64_t>::getEmptyKey()}; |
369 | } |
370 | static VTableSlot getTombstoneKey() { |
371 | return {.TypeID: DenseMapInfo<Metadata *>::getTombstoneKey(), |
372 | .ByteOffset: DenseMapInfo<uint64_t>::getTombstoneKey()}; |
373 | } |
374 | static unsigned getHashValue(const VTableSlot &I) { |
375 | return DenseMapInfo<Metadata *>::getHashValue(PtrVal: I.TypeID) ^ |
376 | DenseMapInfo<uint64_t>::getHashValue(Val: I.ByteOffset); |
377 | } |
378 | static bool isEqual(const VTableSlot &LHS, |
379 | const VTableSlot &RHS) { |
380 | return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset; |
381 | } |
382 | }; |
383 | |
384 | template <> struct DenseMapInfo<VTableSlotSummary> { |
385 | static VTableSlotSummary getEmptyKey() { |
386 | return {.TypeID: DenseMapInfo<StringRef>::getEmptyKey(), |
387 | .ByteOffset: DenseMapInfo<uint64_t>::getEmptyKey()}; |
388 | } |
389 | static VTableSlotSummary getTombstoneKey() { |
390 | return {.TypeID: DenseMapInfo<StringRef>::getTombstoneKey(), |
391 | .ByteOffset: DenseMapInfo<uint64_t>::getTombstoneKey()}; |
392 | } |
393 | static unsigned getHashValue(const VTableSlotSummary &I) { |
394 | return DenseMapInfo<StringRef>::getHashValue(Val: I.TypeID) ^ |
395 | DenseMapInfo<uint64_t>::getHashValue(Val: I.ByteOffset); |
396 | } |
397 | static bool isEqual(const VTableSlotSummary &LHS, |
398 | const VTableSlotSummary &RHS) { |
399 | return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset; |
400 | } |
401 | }; |
402 | |
403 | } // end namespace llvm |
404 | |
405 | // Returns true if the function must be unreachable based on ValueInfo. |
406 | // |
407 | // In particular, identifies a function as unreachable in the following |
408 | // conditions |
409 | // 1) All summaries are live. |
410 | // 2) All function summaries indicate it's unreachable |
411 | // 3) There is no non-function with the same GUID (which is rare) |
412 | static bool mustBeUnreachableFunction(ValueInfo TheFnVI) { |
413 | if (WholeProgramDevirtKeepUnreachableFunction) |
414 | return false; |
415 | |
416 | if ((!TheFnVI) || TheFnVI.getSummaryList().empty()) { |
417 | // Returns false if ValueInfo is absent, or the summary list is empty |
418 | // (e.g., function declarations). |
419 | return false; |
420 | } |
421 | |
422 | for (const auto &Summary : TheFnVI.getSummaryList()) { |
423 | // Conservatively returns false if any non-live functions are seen. |
424 | // In general either all summaries should be live or all should be dead. |
425 | if (!Summary->isLive()) |
426 | return false; |
427 | if (auto *FS = dyn_cast<FunctionSummary>(Val: Summary->getBaseObject())) { |
428 | if (!FS->fflags().MustBeUnreachable) |
429 | return false; |
430 | } |
431 | // Be conservative if a non-function has the same GUID (which is rare). |
432 | else |
433 | return false; |
434 | } |
435 | // All function summaries are live and all of them agree that the function is |
436 | // unreachble. |
437 | return true; |
438 | } |
439 | |
440 | namespace { |
441 | // A virtual call site. VTable is the loaded virtual table pointer, and CS is |
442 | // the indirect virtual call. |
443 | struct VirtualCallSite { |
444 | Value *VTable = nullptr; |
445 | CallBase &CB; |
446 | |
447 | // If non-null, this field points to the associated unsafe use count stored in |
448 | // the DevirtModule::NumUnsafeUsesForTypeTest map below. See the description |
449 | // of that field for details. |
450 | unsigned *NumUnsafeUses = nullptr; |
451 | |
452 | void |
453 | (const StringRef OptName, const StringRef TargetName, |
454 | function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter) { |
455 | Function *F = CB.getCaller(); |
456 | DebugLoc DLoc = CB.getDebugLoc(); |
457 | BasicBlock *Block = CB.getParent(); |
458 | |
459 | using namespace ore; |
460 | OREGetter(F).emit(OptDiag: OptimizationRemark(DEBUG_TYPE, OptName, DLoc, Block) |
461 | << NV("Optimization" , OptName) |
462 | << ": devirtualized a call to " |
463 | << NV("FunctionName" , TargetName)); |
464 | } |
465 | |
466 | void replaceAndErase( |
467 | const StringRef OptName, const StringRef TargetName, bool , |
468 | function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, |
469 | Value *New) { |
470 | if (RemarksEnabled) |
471 | emitRemark(OptName, TargetName, OREGetter); |
472 | CB.replaceAllUsesWith(V: New); |
473 | if (auto *II = dyn_cast<InvokeInst>(Val: &CB)) { |
474 | BranchInst::Create(IfTrue: II->getNormalDest(), InsertBefore: CB.getIterator()); |
475 | II->getUnwindDest()->removePredecessor(Pred: II->getParent()); |
476 | } |
477 | CB.eraseFromParent(); |
478 | // This use is no longer unsafe. |
479 | if (NumUnsafeUses) |
480 | --*NumUnsafeUses; |
481 | } |
482 | }; |
483 | |
484 | // Call site information collected for a specific VTableSlot and possibly a list |
485 | // of constant integer arguments. The grouping by arguments is handled by the |
486 | // VTableSlotInfo class. |
487 | struct CallSiteInfo { |
488 | /// The set of call sites for this slot. Used during regular LTO and the |
489 | /// import phase of ThinLTO (as well as the export phase of ThinLTO for any |
490 | /// call sites that appear in the merged module itself); in each of these |
491 | /// cases we are directly operating on the call sites at the IR level. |
492 | std::vector<VirtualCallSite> CallSites; |
493 | |
494 | /// Whether all call sites represented by this CallSiteInfo, including those |
495 | /// in summaries, have been devirtualized. This starts off as true because a |
496 | /// default constructed CallSiteInfo represents no call sites. |
497 | /// |
498 | /// If at the end of the pass there are still undevirtualized calls, we will |
499 | /// need to add a use of llvm.type.test to each of the function summaries in |
500 | /// the vector. |
501 | bool AllCallSitesDevirted = true; |
502 | |
503 | // These fields are used during the export phase of ThinLTO and reflect |
504 | // information collected from function summaries. |
505 | |
506 | /// CFI-specific: a vector containing the list of function summaries that use |
507 | /// the llvm.type.checked.load intrinsic and therefore will require |
508 | /// resolutions for llvm.type.test in order to implement CFI checks if |
509 | /// devirtualization was unsuccessful. |
510 | std::vector<FunctionSummary *> SummaryTypeCheckedLoadUsers; |
511 | |
512 | /// A vector containing the list of function summaries that use |
513 | /// assume(llvm.type.test). |
514 | std::vector<FunctionSummary *> SummaryTypeTestAssumeUsers; |
515 | |
516 | bool isExported() const { |
517 | return !SummaryTypeCheckedLoadUsers.empty() || |
518 | !SummaryTypeTestAssumeUsers.empty(); |
519 | } |
520 | |
521 | void addSummaryTypeCheckedLoadUser(FunctionSummary *FS) { |
522 | SummaryTypeCheckedLoadUsers.push_back(x: FS); |
523 | AllCallSitesDevirted = false; |
524 | } |
525 | |
526 | void addSummaryTypeTestAssumeUser(FunctionSummary *FS) { |
527 | SummaryTypeTestAssumeUsers.push_back(x: FS); |
528 | AllCallSitesDevirted = false; |
529 | } |
530 | |
531 | void markDevirt() { AllCallSitesDevirted = true; } |
532 | }; |
533 | |
534 | // Call site information collected for a specific VTableSlot. |
535 | struct VTableSlotInfo { |
536 | // The set of call sites which do not have all constant integer arguments |
537 | // (excluding "this"). |
538 | CallSiteInfo CSInfo; |
539 | |
540 | // The set of call sites with all constant integer arguments (excluding |
541 | // "this"), grouped by argument list. |
542 | std::map<std::vector<uint64_t>, CallSiteInfo> ConstCSInfo; |
543 | |
544 | void addCallSite(Value *VTable, CallBase &CB, unsigned *NumUnsafeUses); |
545 | |
546 | private: |
547 | CallSiteInfo &findCallSiteInfo(CallBase &CB); |
548 | }; |
549 | |
550 | CallSiteInfo &VTableSlotInfo::findCallSiteInfo(CallBase &CB) { |
551 | std::vector<uint64_t> Args; |
552 | auto *CBType = dyn_cast<IntegerType>(Val: CB.getType()); |
553 | if (!CBType || CBType->getBitWidth() > 64 || CB.arg_empty()) |
554 | return CSInfo; |
555 | for (auto &&Arg : drop_begin(RangeOrContainer: CB.args())) { |
556 | auto *CI = dyn_cast<ConstantInt>(Val&: Arg); |
557 | if (!CI || CI->getBitWidth() > 64) |
558 | return CSInfo; |
559 | Args.push_back(x: CI->getZExtValue()); |
560 | } |
561 | return ConstCSInfo[Args]; |
562 | } |
563 | |
564 | void VTableSlotInfo::addCallSite(Value *VTable, CallBase &CB, |
565 | unsigned *NumUnsafeUses) { |
566 | auto &CSI = findCallSiteInfo(CB); |
567 | CSI.AllCallSitesDevirted = false; |
568 | CSI.CallSites.push_back(x: {.VTable: VTable, .CB: CB, .NumUnsafeUses: NumUnsafeUses}); |
569 | } |
570 | |
571 | struct DevirtModule { |
572 | Module &M; |
573 | function_ref<AAResults &(Function &)> AARGetter; |
574 | function_ref<DominatorTree &(Function &)> LookupDomTree; |
575 | |
576 | ModuleSummaryIndex *ExportSummary; |
577 | const ModuleSummaryIndex *ImportSummary; |
578 | |
579 | IntegerType *Int8Ty; |
580 | PointerType *Int8PtrTy; |
581 | IntegerType *Int32Ty; |
582 | IntegerType *Int64Ty; |
583 | IntegerType *IntPtrTy; |
584 | /// Sizeless array type, used for imported vtables. This provides a signal |
585 | /// to analyzers that these imports may alias, as they do for example |
586 | /// when multiple unique return values occur in the same vtable. |
587 | ArrayType *Int8Arr0Ty; |
588 | |
589 | bool ; |
590 | function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter; |
591 | |
592 | MapVector<VTableSlot, VTableSlotInfo> CallSlots; |
593 | |
594 | // Calls that have already been optimized. We may add a call to multiple |
595 | // VTableSlotInfos if vtable loads are coalesced and need to make sure not to |
596 | // optimize a call more than once. |
597 | SmallPtrSet<CallBase *, 8> OptimizedCalls; |
598 | |
599 | // Store calls that had their ptrauth bundle removed. They are to be deleted |
600 | // at the end of the optimization. |
601 | SmallVector<CallBase *, 8> CallsWithPtrAuthBundleRemoved; |
602 | |
603 | // This map keeps track of the number of "unsafe" uses of a loaded function |
604 | // pointer. The key is the associated llvm.type.test intrinsic call generated |
605 | // by this pass. An unsafe use is one that calls the loaded function pointer |
606 | // directly. Every time we eliminate an unsafe use (for example, by |
607 | // devirtualizing it or by applying virtual constant propagation), we |
608 | // decrement the value stored in this map. If a value reaches zero, we can |
609 | // eliminate the type check by RAUWing the associated llvm.type.test call with |
610 | // true. |
611 | std::map<CallInst *, unsigned> NumUnsafeUsesForTypeTest; |
612 | PatternList FunctionsToSkip; |
613 | |
614 | (Module &M, function_ref<AAResults &(Function &)> AARGetter, |
615 | function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, |
616 | function_ref<DominatorTree &(Function &)> LookupDomTree, |
617 | ModuleSummaryIndex *ExportSummary, |
618 | const ModuleSummaryIndex *ImportSummary) |
619 | : M(M), AARGetter(AARGetter), LookupDomTree(LookupDomTree), |
620 | ExportSummary(ExportSummary), ImportSummary(ImportSummary), |
621 | Int8Ty(Type::getInt8Ty(C&: M.getContext())), |
622 | Int8PtrTy(PointerType::getUnqual(C&: M.getContext())), |
623 | Int32Ty(Type::getInt32Ty(C&: M.getContext())), |
624 | Int64Ty(Type::getInt64Ty(C&: M.getContext())), |
625 | IntPtrTy(M.getDataLayout().getIntPtrType(C&: M.getContext(), AddressSpace: 0)), |
626 | Int8Arr0Ty(ArrayType::get(ElementType: Type::getInt8Ty(C&: M.getContext()), NumElements: 0)), |
627 | RemarksEnabled(areRemarksEnabled()), OREGetter(OREGetter) { |
628 | assert(!(ExportSummary && ImportSummary)); |
629 | FunctionsToSkip.init(StringList: SkipFunctionNames); |
630 | } |
631 | |
632 | bool areRemarksEnabled(); |
633 | |
634 | void |
635 | scanTypeTestUsers(Function *TypeTestFunc, |
636 | DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap); |
637 | void scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc); |
638 | |
639 | void buildTypeIdentifierMap( |
640 | std::vector<VTableBits> &Bits, |
641 | DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap); |
642 | |
643 | bool |
644 | tryFindVirtualCallTargets(std::vector<VirtualCallTarget> &TargetsForSlot, |
645 | const std::set<TypeMemberInfo> &TypeMemberInfos, |
646 | uint64_t ByteOffset, |
647 | ModuleSummaryIndex *ExportSummary); |
648 | |
649 | void applySingleImplDevirt(VTableSlotInfo &SlotInfo, Constant *TheFn, |
650 | bool &IsExported); |
651 | bool trySingleImplDevirt(ModuleSummaryIndex *ExportSummary, |
652 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
653 | VTableSlotInfo &SlotInfo, |
654 | WholeProgramDevirtResolution *Res); |
655 | |
656 | void applyICallBranchFunnel(VTableSlotInfo &SlotInfo, Constant *JT, |
657 | bool &IsExported); |
658 | void tryICallBranchFunnel(MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
659 | VTableSlotInfo &SlotInfo, |
660 | WholeProgramDevirtResolution *Res, VTableSlot Slot); |
661 | |
662 | bool tryEvaluateFunctionsWithArgs( |
663 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
664 | ArrayRef<uint64_t> Args); |
665 | |
666 | void applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, |
667 | uint64_t TheRetVal); |
668 | bool tryUniformRetValOpt(MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
669 | CallSiteInfo &CSInfo, |
670 | WholeProgramDevirtResolution::ByArg *Res); |
671 | |
672 | // Returns the global symbol name that is used to export information about the |
673 | // given vtable slot and list of arguments. |
674 | std::string getGlobalName(VTableSlot Slot, ArrayRef<uint64_t> Args, |
675 | StringRef Name); |
676 | |
677 | bool shouldExportConstantsAsAbsoluteSymbols(); |
678 | |
679 | // This function is called during the export phase to create a symbol |
680 | // definition containing information about the given vtable slot and list of |
681 | // arguments. |
682 | void exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name, |
683 | Constant *C); |
684 | void exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name, |
685 | uint32_t Const, uint32_t &Storage); |
686 | |
687 | // This function is called during the import phase to create a reference to |
688 | // the symbol definition created during the export phase. |
689 | Constant *importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, |
690 | StringRef Name); |
691 | Constant *importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, |
692 | StringRef Name, IntegerType *IntTy, |
693 | uint32_t Storage); |
694 | |
695 | Constant *getMemberAddr(const TypeMemberInfo *M); |
696 | |
697 | void applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, bool IsOne, |
698 | Constant *UniqueMemberAddr); |
699 | bool tryUniqueRetValOpt(unsigned BitWidth, |
700 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
701 | CallSiteInfo &CSInfo, |
702 | WholeProgramDevirtResolution::ByArg *Res, |
703 | VTableSlot Slot, ArrayRef<uint64_t> Args); |
704 | |
705 | void applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName, |
706 | Constant *Byte, Constant *Bit); |
707 | bool tryVirtualConstProp(MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
708 | VTableSlotInfo &SlotInfo, |
709 | WholeProgramDevirtResolution *Res, VTableSlot Slot); |
710 | |
711 | void rebuildGlobal(VTableBits &B); |
712 | |
713 | // Apply the summary resolution for Slot to all virtual calls in SlotInfo. |
714 | void importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo); |
715 | |
716 | // If we were able to eliminate all unsafe uses for a type checked load, |
717 | // eliminate the associated type tests by replacing them with true. |
718 | void removeRedundantTypeTests(); |
719 | |
720 | bool run(); |
721 | |
722 | // Look up the corresponding ValueInfo entry of `TheFn` in `ExportSummary`. |
723 | // |
724 | // Caller guarantees that `ExportSummary` is not nullptr. |
725 | static ValueInfo lookUpFunctionValueInfo(Function *TheFn, |
726 | ModuleSummaryIndex *ExportSummary); |
727 | |
728 | // Returns true if the function definition must be unreachable. |
729 | // |
730 | // Note if this helper function returns true, `F` is guaranteed |
731 | // to be unreachable; if it returns false, `F` might still |
732 | // be unreachable but not covered by this helper function. |
733 | // |
734 | // Implementation-wise, if function definition is present, IR is analyzed; if |
735 | // not, look up function flags from ExportSummary as a fallback. |
736 | static bool mustBeUnreachableFunction(Function *const F, |
737 | ModuleSummaryIndex *ExportSummary); |
738 | |
739 | // Lower the module using the action and summary passed as command line |
740 | // arguments. For testing purposes only. |
741 | static bool |
742 | runForTesting(Module &M, function_ref<AAResults &(Function &)> AARGetter, |
743 | function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, |
744 | function_ref<DominatorTree &(Function &)> LookupDomTree); |
745 | }; |
746 | |
747 | struct DevirtIndex { |
748 | ModuleSummaryIndex &ExportSummary; |
749 | // The set in which to record GUIDs exported from their module by |
750 | // devirtualization, used by client to ensure they are not internalized. |
751 | std::set<GlobalValue::GUID> &ExportedGUIDs; |
752 | // A map in which to record the information necessary to locate the WPD |
753 | // resolution for local targets in case they are exported by cross module |
754 | // importing. |
755 | std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap; |
756 | |
757 | MapVector<VTableSlotSummary, VTableSlotInfo> CallSlots; |
758 | |
759 | PatternList FunctionsToSkip; |
760 | |
761 | DevirtIndex( |
762 | ModuleSummaryIndex &ExportSummary, |
763 | std::set<GlobalValue::GUID> &ExportedGUIDs, |
764 | std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) |
765 | : ExportSummary(ExportSummary), ExportedGUIDs(ExportedGUIDs), |
766 | LocalWPDTargetsMap(LocalWPDTargetsMap) { |
767 | FunctionsToSkip.init(StringList: SkipFunctionNames); |
768 | } |
769 | |
770 | bool tryFindVirtualCallTargets(std::vector<ValueInfo> &TargetsForSlot, |
771 | const TypeIdCompatibleVtableInfo TIdInfo, |
772 | uint64_t ByteOffset); |
773 | |
774 | bool trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot, |
775 | VTableSlotSummary &SlotSummary, |
776 | VTableSlotInfo &SlotInfo, |
777 | WholeProgramDevirtResolution *Res, |
778 | std::set<ValueInfo> &DevirtTargets); |
779 | |
780 | void run(); |
781 | }; |
782 | } // end anonymous namespace |
783 | |
784 | PreservedAnalyses WholeProgramDevirtPass::run(Module &M, |
785 | ModuleAnalysisManager &AM) { |
786 | auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager(); |
787 | auto AARGetter = [&](Function &F) -> AAResults & { |
788 | return FAM.getResult<AAManager>(IR&: F); |
789 | }; |
790 | auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & { |
791 | return FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: *F); |
792 | }; |
793 | auto LookupDomTree = [&FAM](Function &F) -> DominatorTree & { |
794 | return FAM.getResult<DominatorTreeAnalysis>(IR&: F); |
795 | }; |
796 | if (UseCommandLine) { |
797 | if (!DevirtModule::runForTesting(M, AARGetter, OREGetter, LookupDomTree)) |
798 | return PreservedAnalyses::all(); |
799 | return PreservedAnalyses::none(); |
800 | } |
801 | if (!DevirtModule(M, AARGetter, OREGetter, LookupDomTree, ExportSummary, |
802 | ImportSummary) |
803 | .run()) |
804 | return PreservedAnalyses::all(); |
805 | return PreservedAnalyses::none(); |
806 | } |
807 | |
808 | // Enable whole program visibility if enabled by client (e.g. linker) or |
809 | // internal option, and not force disabled. |
810 | bool llvm::hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO) { |
811 | return (WholeProgramVisibilityEnabledInLTO || WholeProgramVisibility) && |
812 | !DisableWholeProgramVisibility; |
813 | } |
814 | |
815 | static bool |
816 | typeIDVisibleToRegularObj(StringRef TypeID, |
817 | function_ref<bool(StringRef)> IsVisibleToRegularObj) { |
818 | // TypeID for member function pointer type is an internal construct |
819 | // and won't exist in IsVisibleToRegularObj. The full TypeID |
820 | // will be present and participate in invalidation. |
821 | if (TypeID.ends_with(Suffix: ".virtual" )) |
822 | return false; |
823 | |
824 | // TypeID that doesn't start with Itanium mangling (_ZTS) will be |
825 | // non-externally visible types which cannot interact with |
826 | // external native files. See CodeGenModule::CreateMetadataIdentifierImpl. |
827 | if (!TypeID.consume_front(Prefix: "_ZTS" )) |
828 | return false; |
829 | |
830 | // TypeID is keyed off the type name symbol (_ZTS). However, the native |
831 | // object may not contain this symbol if it does not contain a key |
832 | // function for the base type and thus only contains a reference to the |
833 | // type info (_ZTI). To catch this case we query using the type info |
834 | // symbol corresponding to the TypeID. |
835 | std::string typeInfo = ("_ZTI" + TypeID).str(); |
836 | return IsVisibleToRegularObj(typeInfo); |
837 | } |
838 | |
839 | static bool |
840 | skipUpdateDueToValidation(GlobalVariable &GV, |
841 | function_ref<bool(StringRef)> IsVisibleToRegularObj) { |
842 | SmallVector<MDNode *, 2> Types; |
843 | GV.getMetadata(KindID: LLVMContext::MD_type, MDs&: Types); |
844 | |
845 | for (auto Type : Types) |
846 | if (auto *TypeID = dyn_cast<MDString>(Val: Type->getOperand(I: 1).get())) |
847 | return typeIDVisibleToRegularObj(TypeID: TypeID->getString(), |
848 | IsVisibleToRegularObj); |
849 | |
850 | return false; |
851 | } |
852 | |
853 | /// If whole program visibility asserted, then upgrade all public vcall |
854 | /// visibility metadata on vtable definitions to linkage unit visibility in |
855 | /// Module IR (for regular or hybrid LTO). |
856 | void llvm::updateVCallVisibilityInModule( |
857 | Module &M, bool WholeProgramVisibilityEnabledInLTO, |
858 | const DenseSet<GlobalValue::GUID> &DynamicExportSymbols, |
859 | bool ValidateAllVtablesHaveTypeInfos, |
860 | function_ref<bool(StringRef)> IsVisibleToRegularObj) { |
861 | if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO)) |
862 | return; |
863 | for (GlobalVariable &GV : M.globals()) { |
864 | // Add linkage unit visibility to any variable with type metadata, which are |
865 | // the vtable definitions. We won't have an existing vcall_visibility |
866 | // metadata on vtable definitions with public visibility. |
867 | if (GV.hasMetadata(KindID: LLVMContext::MD_type) && |
868 | GV.getVCallVisibility() == GlobalObject::VCallVisibilityPublic && |
869 | // Don't upgrade the visibility for symbols exported to the dynamic |
870 | // linker, as we have no information on their eventual use. |
871 | !DynamicExportSymbols.count(V: GV.getGUID()) && |
872 | // With validation enabled, we want to exclude symbols visible to |
873 | // regular objects. Local symbols will be in this group due to the |
874 | // current implementation but those with VCallVisibilityTranslationUnit |
875 | // will have already been marked in clang so are unaffected. |
876 | !(ValidateAllVtablesHaveTypeInfos && |
877 | skipUpdateDueToValidation(GV, IsVisibleToRegularObj))) |
878 | GV.setVCallVisibilityMetadata(GlobalObject::VCallVisibilityLinkageUnit); |
879 | } |
880 | } |
881 | |
882 | void llvm::updatePublicTypeTestCalls(Module &M, |
883 | bool WholeProgramVisibilityEnabledInLTO) { |
884 | Function *PublicTypeTestFunc = |
885 | Intrinsic::getDeclarationIfExists(M: &M, id: Intrinsic::public_type_test); |
886 | if (!PublicTypeTestFunc) |
887 | return; |
888 | if (hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO)) { |
889 | Function *TypeTestFunc = |
890 | Intrinsic::getOrInsertDeclaration(M: &M, id: Intrinsic::type_test); |
891 | for (Use &U : make_early_inc_range(Range: PublicTypeTestFunc->uses())) { |
892 | auto *CI = cast<CallInst>(Val: U.getUser()); |
893 | auto *NewCI = CallInst::Create( |
894 | Func: TypeTestFunc, Args: {CI->getArgOperand(i: 0), CI->getArgOperand(i: 1)}, Bundles: {}, NameStr: "" , |
895 | InsertBefore: CI->getIterator()); |
896 | CI->replaceAllUsesWith(V: NewCI); |
897 | CI->eraseFromParent(); |
898 | } |
899 | } else { |
900 | auto *True = ConstantInt::getTrue(Context&: M.getContext()); |
901 | for (Use &U : make_early_inc_range(Range: PublicTypeTestFunc->uses())) { |
902 | auto *CI = cast<CallInst>(Val: U.getUser()); |
903 | CI->replaceAllUsesWith(V: True); |
904 | CI->eraseFromParent(); |
905 | } |
906 | } |
907 | } |
908 | |
909 | /// Based on typeID string, get all associated vtable GUIDS that are |
910 | /// visible to regular objects. |
911 | void llvm::getVisibleToRegularObjVtableGUIDs( |
912 | ModuleSummaryIndex &Index, |
913 | DenseSet<GlobalValue::GUID> &VisibleToRegularObjSymbols, |
914 | function_ref<bool(StringRef)> IsVisibleToRegularObj) { |
915 | for (const auto &typeID : Index.typeIdCompatibleVtableMap()) { |
916 | if (typeIDVisibleToRegularObj(TypeID: typeID.first, IsVisibleToRegularObj)) |
917 | for (const TypeIdOffsetVtableInfo &P : typeID.second) |
918 | VisibleToRegularObjSymbols.insert(V: P.VTableVI.getGUID()); |
919 | } |
920 | } |
921 | |
922 | /// If whole program visibility asserted, then upgrade all public vcall |
923 | /// visibility metadata on vtable definition summaries to linkage unit |
924 | /// visibility in Module summary index (for ThinLTO). |
925 | void llvm::updateVCallVisibilityInIndex( |
926 | ModuleSummaryIndex &Index, bool WholeProgramVisibilityEnabledInLTO, |
927 | const DenseSet<GlobalValue::GUID> &DynamicExportSymbols, |
928 | const DenseSet<GlobalValue::GUID> &VisibleToRegularObjSymbols) { |
929 | if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO)) |
930 | return; |
931 | for (auto &P : Index) { |
932 | // Don't upgrade the visibility for symbols exported to the dynamic |
933 | // linker, as we have no information on their eventual use. |
934 | if (DynamicExportSymbols.count(V: P.first)) |
935 | continue; |
936 | for (auto &S : P.second.SummaryList) { |
937 | auto *GVar = dyn_cast<GlobalVarSummary>(Val: S.get()); |
938 | if (!GVar || |
939 | GVar->getVCallVisibility() != GlobalObject::VCallVisibilityPublic) |
940 | continue; |
941 | // With validation enabled, we want to exclude symbols visible to regular |
942 | // objects. Local symbols will be in this group due to the current |
943 | // implementation but those with VCallVisibilityTranslationUnit will have |
944 | // already been marked in clang so are unaffected. |
945 | if (VisibleToRegularObjSymbols.count(V: P.first)) |
946 | continue; |
947 | GVar->setVCallVisibility(GlobalObject::VCallVisibilityLinkageUnit); |
948 | } |
949 | } |
950 | } |
951 | |
952 | void llvm::runWholeProgramDevirtOnIndex( |
953 | ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs, |
954 | std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) { |
955 | DevirtIndex(Summary, ExportedGUIDs, LocalWPDTargetsMap).run(); |
956 | } |
957 | |
958 | void llvm::updateIndexWPDForExports( |
959 | ModuleSummaryIndex &Summary, |
960 | function_ref<bool(StringRef, ValueInfo)> isExported, |
961 | std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) { |
962 | for (auto &T : LocalWPDTargetsMap) { |
963 | auto &VI = T.first; |
964 | // This was enforced earlier during trySingleImplDevirt. |
965 | assert(VI.getSummaryList().size() == 1 && |
966 | "Devirt of local target has more than one copy" ); |
967 | auto &S = VI.getSummaryList()[0]; |
968 | if (!isExported(S->modulePath(), VI)) |
969 | continue; |
970 | |
971 | // It's been exported by a cross module import. |
972 | for (auto &SlotSummary : T.second) { |
973 | auto *TIdSum = Summary.getTypeIdSummary(TypeId: SlotSummary.TypeID); |
974 | assert(TIdSum); |
975 | auto WPDRes = TIdSum->WPDRes.find(x: SlotSummary.ByteOffset); |
976 | assert(WPDRes != TIdSum->WPDRes.end()); |
977 | WPDRes->second.SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal( |
978 | Name: WPDRes->second.SingleImplName, |
979 | ModHash: Summary.getModuleHash(ModPath: S->modulePath())); |
980 | } |
981 | } |
982 | } |
983 | |
984 | static Error checkCombinedSummaryForTesting(ModuleSummaryIndex *Summary) { |
985 | // Check that summary index contains regular LTO module when performing |
986 | // export to prevent occasional use of index from pure ThinLTO compilation |
987 | // (-fno-split-lto-module). This kind of summary index is passed to |
988 | // DevirtIndex::run, not to DevirtModule::run used by opt/runForTesting. |
989 | const auto &ModPaths = Summary->modulePaths(); |
990 | if (ClSummaryAction != PassSummaryAction::Import && |
991 | !ModPaths.contains(Key: ModuleSummaryIndex::getRegularLTOModuleName())) |
992 | return createStringError( |
993 | EC: errc::invalid_argument, |
994 | S: "combined summary should contain Regular LTO module" ); |
995 | return ErrorSuccess(); |
996 | } |
997 | |
998 | bool DevirtModule::( |
999 | Module &M, function_ref<AAResults &(Function &)> AARGetter, |
1000 | function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter, |
1001 | function_ref<DominatorTree &(Function &)> LookupDomTree) { |
1002 | std::unique_ptr<ModuleSummaryIndex> Summary = |
1003 | std::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/args: false); |
1004 | |
1005 | // Handle the command-line summary arguments. This code is for testing |
1006 | // purposes only, so we handle errors directly. |
1007 | if (!ClReadSummary.empty()) { |
1008 | ExitOnError ExitOnErr("-wholeprogramdevirt-read-summary: " + ClReadSummary + |
1009 | ": " ); |
1010 | auto ReadSummaryFile = |
1011 | ExitOnErr(errorOrToExpected(EO: MemoryBuffer::getFile(Filename: ClReadSummary))); |
1012 | if (Expected<std::unique_ptr<ModuleSummaryIndex>> SummaryOrErr = |
1013 | getModuleSummaryIndex(Buffer: *ReadSummaryFile)) { |
1014 | Summary = std::move(*SummaryOrErr); |
1015 | ExitOnErr(checkCombinedSummaryForTesting(Summary: Summary.get())); |
1016 | } else { |
1017 | // Try YAML if we've failed with bitcode. |
1018 | consumeError(Err: SummaryOrErr.takeError()); |
1019 | yaml::Input In(ReadSummaryFile->getBuffer()); |
1020 | In >> *Summary; |
1021 | ExitOnErr(errorCodeToError(EC: In.error())); |
1022 | } |
1023 | } |
1024 | |
1025 | bool Changed = |
1026 | DevirtModule(M, AARGetter, OREGetter, LookupDomTree, |
1027 | ClSummaryAction == PassSummaryAction::Export ? Summary.get() |
1028 | : nullptr, |
1029 | ClSummaryAction == PassSummaryAction::Import ? Summary.get() |
1030 | : nullptr) |
1031 | .run(); |
1032 | |
1033 | if (!ClWriteSummary.empty()) { |
1034 | ExitOnError ExitOnErr( |
1035 | "-wholeprogramdevirt-write-summary: " + ClWriteSummary + ": " ); |
1036 | std::error_code EC; |
1037 | if (StringRef(ClWriteSummary).ends_with(Suffix: ".bc" )) { |
1038 | raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_None); |
1039 | ExitOnErr(errorCodeToError(EC)); |
1040 | writeIndexToFile(Index: *Summary, Out&: OS); |
1041 | } else { |
1042 | raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF); |
1043 | ExitOnErr(errorCodeToError(EC)); |
1044 | yaml::Output Out(OS); |
1045 | Out << *Summary; |
1046 | } |
1047 | } |
1048 | |
1049 | return Changed; |
1050 | } |
1051 | |
1052 | void DevirtModule::buildTypeIdentifierMap( |
1053 | std::vector<VTableBits> &Bits, |
1054 | DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) { |
1055 | DenseMap<GlobalVariable *, VTableBits *> GVToBits; |
1056 | Bits.reserve(n: M.global_size()); |
1057 | SmallVector<MDNode *, 2> Types; |
1058 | for (GlobalVariable &GV : M.globals()) { |
1059 | Types.clear(); |
1060 | GV.getMetadata(KindID: LLVMContext::MD_type, MDs&: Types); |
1061 | if (GV.isDeclaration() || Types.empty()) |
1062 | continue; |
1063 | |
1064 | VTableBits *&BitsPtr = GVToBits[&GV]; |
1065 | if (!BitsPtr) { |
1066 | Bits.emplace_back(); |
1067 | Bits.back().GV = &GV; |
1068 | Bits.back().ObjectSize = |
1069 | M.getDataLayout().getTypeAllocSize(Ty: GV.getInitializer()->getType()); |
1070 | BitsPtr = &Bits.back(); |
1071 | } |
1072 | |
1073 | for (MDNode *Type : Types) { |
1074 | auto TypeID = Type->getOperand(I: 1).get(); |
1075 | |
1076 | uint64_t Offset = |
1077 | cast<ConstantInt>( |
1078 | Val: cast<ConstantAsMetadata>(Val: Type->getOperand(I: 0))->getValue()) |
1079 | ->getZExtValue(); |
1080 | |
1081 | TypeIdMap[TypeID].insert(x: {.Bits: BitsPtr, .Offset: Offset}); |
1082 | } |
1083 | } |
1084 | } |
1085 | |
1086 | bool DevirtModule::tryFindVirtualCallTargets( |
1087 | std::vector<VirtualCallTarget> &TargetsForSlot, |
1088 | const std::set<TypeMemberInfo> &TypeMemberInfos, uint64_t ByteOffset, |
1089 | ModuleSummaryIndex *ExportSummary) { |
1090 | for (const TypeMemberInfo &TM : TypeMemberInfos) { |
1091 | if (!TM.Bits->GV->isConstant()) |
1092 | return false; |
1093 | |
1094 | // We cannot perform whole program devirtualization analysis on a vtable |
1095 | // with public LTO visibility. |
1096 | if (TM.Bits->GV->getVCallVisibility() == |
1097 | GlobalObject::VCallVisibilityPublic) |
1098 | return false; |
1099 | |
1100 | Function *Fn = nullptr; |
1101 | Constant *C = nullptr; |
1102 | std::tie(args&: Fn, args&: C) = |
1103 | getFunctionAtVTableOffset(GV: TM.Bits->GV, Offset: TM.Offset + ByteOffset, M); |
1104 | |
1105 | if (!Fn) |
1106 | return false; |
1107 | |
1108 | if (FunctionsToSkip.match(S: Fn->getName())) |
1109 | return false; |
1110 | |
1111 | // We can disregard __cxa_pure_virtual as a possible call target, as |
1112 | // calls to pure virtuals are UB. |
1113 | if (Fn->getName() == "__cxa_pure_virtual" ) |
1114 | continue; |
1115 | |
1116 | // We can disregard unreachable functions as possible call targets, as |
1117 | // unreachable functions shouldn't be called. |
1118 | if (mustBeUnreachableFunction(F: Fn, ExportSummary)) |
1119 | continue; |
1120 | |
1121 | // Save the symbol used in the vtable to use as the devirtualization |
1122 | // target. |
1123 | auto GV = dyn_cast<GlobalValue>(Val: C); |
1124 | assert(GV); |
1125 | TargetsForSlot.push_back(x: {GV, &TM}); |
1126 | } |
1127 | |
1128 | // Give up if we couldn't find any targets. |
1129 | return !TargetsForSlot.empty(); |
1130 | } |
1131 | |
1132 | bool DevirtIndex::tryFindVirtualCallTargets( |
1133 | std::vector<ValueInfo> &TargetsForSlot, |
1134 | const TypeIdCompatibleVtableInfo TIdInfo, uint64_t ByteOffset) { |
1135 | for (const TypeIdOffsetVtableInfo &P : TIdInfo) { |
1136 | // Find a representative copy of the vtable initializer. |
1137 | // We can have multiple available_externally, linkonce_odr and weak_odr |
1138 | // vtable initializers. We can also have multiple external vtable |
1139 | // initializers in the case of comdats, which we cannot check here. |
1140 | // The linker should give an error in this case. |
1141 | // |
1142 | // Also, handle the case of same-named local Vtables with the same path |
1143 | // and therefore the same GUID. This can happen if there isn't enough |
1144 | // distinguishing path when compiling the source file. In that case we |
1145 | // conservatively return false early. |
1146 | const GlobalVarSummary *VS = nullptr; |
1147 | bool LocalFound = false; |
1148 | for (const auto &S : P.VTableVI.getSummaryList()) { |
1149 | if (GlobalValue::isLocalLinkage(Linkage: S->linkage())) { |
1150 | if (LocalFound) |
1151 | return false; |
1152 | LocalFound = true; |
1153 | } |
1154 | auto *CurVS = cast<GlobalVarSummary>(Val: S->getBaseObject()); |
1155 | if (!CurVS->vTableFuncs().empty() || |
1156 | // Previously clang did not attach the necessary type metadata to |
1157 | // available_externally vtables, in which case there would not |
1158 | // be any vtable functions listed in the summary and we need |
1159 | // to treat this case conservatively (in case the bitcode is old). |
1160 | // However, we will also not have any vtable functions in the |
1161 | // case of a pure virtual base class. In that case we do want |
1162 | // to set VS to avoid treating it conservatively. |
1163 | !GlobalValue::isAvailableExternallyLinkage(Linkage: S->linkage())) { |
1164 | VS = CurVS; |
1165 | // We cannot perform whole program devirtualization analysis on a vtable |
1166 | // with public LTO visibility. |
1167 | if (VS->getVCallVisibility() == GlobalObject::VCallVisibilityPublic) |
1168 | return false; |
1169 | } |
1170 | } |
1171 | // There will be no VS if all copies are available_externally having no |
1172 | // type metadata. In that case we can't safely perform WPD. |
1173 | if (!VS) |
1174 | return false; |
1175 | if (!VS->isLive()) |
1176 | continue; |
1177 | for (auto VTP : VS->vTableFuncs()) { |
1178 | if (VTP.VTableOffset != P.AddressPointOffset + ByteOffset) |
1179 | continue; |
1180 | |
1181 | if (mustBeUnreachableFunction(TheFnVI: VTP.FuncVI)) |
1182 | continue; |
1183 | |
1184 | TargetsForSlot.push_back(x: VTP.FuncVI); |
1185 | } |
1186 | } |
1187 | |
1188 | // Give up if we couldn't find any targets. |
1189 | return !TargetsForSlot.empty(); |
1190 | } |
1191 | |
1192 | void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo, |
1193 | Constant *TheFn, bool &IsExported) { |
1194 | // Don't devirtualize function if we're told to skip it |
1195 | // in -wholeprogramdevirt-skip. |
1196 | if (FunctionsToSkip.match(S: TheFn->stripPointerCasts()->getName())) |
1197 | return; |
1198 | auto Apply = [&](CallSiteInfo &CSInfo) { |
1199 | for (auto &&VCallSite : CSInfo.CallSites) { |
1200 | if (!OptimizedCalls.insert(Ptr: &VCallSite.CB).second) |
1201 | continue; |
1202 | |
1203 | // Stop when the number of devirted calls reaches the cutoff. |
1204 | if (WholeProgramDevirtCutoff.getNumOccurrences() > 0 && |
1205 | NumDevirtCalls >= WholeProgramDevirtCutoff) |
1206 | return; |
1207 | |
1208 | if (RemarksEnabled) |
1209 | VCallSite.emitRemark(OptName: "single-impl" , |
1210 | TargetName: TheFn->stripPointerCasts()->getName(), OREGetter); |
1211 | NumSingleImpl++; |
1212 | NumDevirtCalls++; |
1213 | auto &CB = VCallSite.CB; |
1214 | assert(!CB.getCalledFunction() && "devirtualizing direct call?" ); |
1215 | IRBuilder<> Builder(&CB); |
1216 | Value *Callee = |
1217 | Builder.CreateBitCast(V: TheFn, DestTy: CB.getCalledOperand()->getType()); |
1218 | |
1219 | // If trap checking is enabled, add support to compare the virtual |
1220 | // function pointer to the devirtualized target. In case of a mismatch, |
1221 | // perform a debug trap. |
1222 | if (DevirtCheckMode == WPDCheckMode::Trap) { |
1223 | auto *Cond = Builder.CreateICmpNE(LHS: CB.getCalledOperand(), RHS: Callee); |
1224 | Instruction *ThenTerm = SplitBlockAndInsertIfThen( |
1225 | Cond, SplitBefore: &CB, /*Unreachable=*/false, |
1226 | BranchWeights: MDBuilder(M.getContext()).createUnlikelyBranchWeights()); |
1227 | Builder.SetInsertPoint(ThenTerm); |
1228 | Function *TrapFn = |
1229 | Intrinsic::getOrInsertDeclaration(M: &M, id: Intrinsic::debugtrap); |
1230 | auto *CallTrap = Builder.CreateCall(Callee: TrapFn); |
1231 | CallTrap->setDebugLoc(CB.getDebugLoc()); |
1232 | } |
1233 | |
1234 | // If fallback checking is enabled, add support to compare the virtual |
1235 | // function pointer to the devirtualized target. In case of a mismatch, |
1236 | // fall back to indirect call. |
1237 | if (DevirtCheckMode == WPDCheckMode::Fallback) { |
1238 | MDNode *Weights = MDBuilder(M.getContext()).createLikelyBranchWeights(); |
1239 | // Version the indirect call site. If the called value is equal to the |
1240 | // given callee, 'NewInst' will be executed, otherwise the original call |
1241 | // site will be executed. |
1242 | CallBase &NewInst = versionCallSite(CB, Callee, BranchWeights: Weights); |
1243 | NewInst.setCalledOperand(Callee); |
1244 | // Since the new call site is direct, we must clear metadata that |
1245 | // is only appropriate for indirect calls. This includes !prof and |
1246 | // !callees metadata. |
1247 | NewInst.setMetadata(KindID: LLVMContext::MD_prof, Node: nullptr); |
1248 | NewInst.setMetadata(KindID: LLVMContext::MD_callees, Node: nullptr); |
1249 | // Additionally, we should remove them from the fallback indirect call, |
1250 | // so that we don't attempt to perform indirect call promotion later. |
1251 | CB.setMetadata(KindID: LLVMContext::MD_prof, Node: nullptr); |
1252 | CB.setMetadata(KindID: LLVMContext::MD_callees, Node: nullptr); |
1253 | } |
1254 | |
1255 | // In either trapping or non-checking mode, devirtualize original call. |
1256 | else { |
1257 | // Devirtualize unconditionally. |
1258 | CB.setCalledOperand(Callee); |
1259 | // Since the call site is now direct, we must clear metadata that |
1260 | // is only appropriate for indirect calls. This includes !prof and |
1261 | // !callees metadata. |
1262 | CB.setMetadata(KindID: LLVMContext::MD_prof, Node: nullptr); |
1263 | CB.setMetadata(KindID: LLVMContext::MD_callees, Node: nullptr); |
1264 | if (CB.getCalledOperand() && |
1265 | CB.getOperandBundle(ID: LLVMContext::OB_ptrauth)) { |
1266 | auto *NewCS = CallBase::removeOperandBundle( |
1267 | CB: &CB, ID: LLVMContext::OB_ptrauth, InsertPt: CB.getIterator()); |
1268 | CB.replaceAllUsesWith(V: NewCS); |
1269 | // Schedule for deletion at the end of pass run. |
1270 | CallsWithPtrAuthBundleRemoved.push_back(Elt: &CB); |
1271 | } |
1272 | } |
1273 | |
1274 | // This use is no longer unsafe. |
1275 | if (VCallSite.NumUnsafeUses) |
1276 | --*VCallSite.NumUnsafeUses; |
1277 | } |
1278 | if (CSInfo.isExported()) |
1279 | IsExported = true; |
1280 | CSInfo.markDevirt(); |
1281 | }; |
1282 | Apply(SlotInfo.CSInfo); |
1283 | for (auto &P : SlotInfo.ConstCSInfo) |
1284 | Apply(P.second); |
1285 | } |
1286 | |
1287 | static bool AddCalls(VTableSlotInfo &SlotInfo, const ValueInfo &Callee) { |
1288 | // We can't add calls if we haven't seen a definition |
1289 | if (Callee.getSummaryList().empty()) |
1290 | return false; |
1291 | |
1292 | // Insert calls into the summary index so that the devirtualized targets |
1293 | // are eligible for import. |
1294 | // FIXME: Annotate type tests with hotness. For now, mark these as hot |
1295 | // to better ensure we have the opportunity to inline them. |
1296 | bool IsExported = false; |
1297 | auto &S = Callee.getSummaryList()[0]; |
1298 | CalleeInfo CI(CalleeInfo::HotnessType::Hot, /* HasTailCall = */ false, |
1299 | /* RelBF = */ 0); |
1300 | auto AddCalls = [&](CallSiteInfo &CSInfo) { |
1301 | for (auto *FS : CSInfo.SummaryTypeCheckedLoadUsers) { |
1302 | FS->addCall(E: {Callee, CI}); |
1303 | IsExported |= S->modulePath() != FS->modulePath(); |
1304 | } |
1305 | for (auto *FS : CSInfo.SummaryTypeTestAssumeUsers) { |
1306 | FS->addCall(E: {Callee, CI}); |
1307 | IsExported |= S->modulePath() != FS->modulePath(); |
1308 | } |
1309 | }; |
1310 | AddCalls(SlotInfo.CSInfo); |
1311 | for (auto &P : SlotInfo.ConstCSInfo) |
1312 | AddCalls(P.second); |
1313 | return IsExported; |
1314 | } |
1315 | |
1316 | bool DevirtModule::trySingleImplDevirt( |
1317 | ModuleSummaryIndex *ExportSummary, |
1318 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo, |
1319 | WholeProgramDevirtResolution *Res) { |
1320 | // See if the program contains a single implementation of this virtual |
1321 | // function. |
1322 | auto *TheFn = TargetsForSlot[0].Fn; |
1323 | for (auto &&Target : TargetsForSlot) |
1324 | if (TheFn != Target.Fn) |
1325 | return false; |
1326 | |
1327 | // If so, update each call site to call that implementation directly. |
1328 | if (RemarksEnabled || AreStatisticsEnabled()) |
1329 | TargetsForSlot[0].WasDevirt = true; |
1330 | |
1331 | bool IsExported = false; |
1332 | applySingleImplDevirt(SlotInfo, TheFn, IsExported); |
1333 | if (!IsExported) |
1334 | return false; |
1335 | |
1336 | // If the only implementation has local linkage, we must promote to external |
1337 | // to make it visible to thin LTO objects. We can only get here during the |
1338 | // ThinLTO export phase. |
1339 | if (TheFn->hasLocalLinkage()) { |
1340 | std::string NewName = (TheFn->getName() + ".llvm.merged" ).str(); |
1341 | |
1342 | // Since we are renaming the function, any comdats with the same name must |
1343 | // also be renamed. This is required when targeting COFF, as the comdat name |
1344 | // must match one of the names of the symbols in the comdat. |
1345 | if (Comdat *C = TheFn->getComdat()) { |
1346 | if (C->getName() == TheFn->getName()) { |
1347 | Comdat *NewC = M.getOrInsertComdat(Name: NewName); |
1348 | NewC->setSelectionKind(C->getSelectionKind()); |
1349 | for (GlobalObject &GO : M.global_objects()) |
1350 | if (GO.getComdat() == C) |
1351 | GO.setComdat(NewC); |
1352 | } |
1353 | } |
1354 | |
1355 | TheFn->setLinkage(GlobalValue::ExternalLinkage); |
1356 | TheFn->setVisibility(GlobalValue::HiddenVisibility); |
1357 | TheFn->setName(NewName); |
1358 | } |
1359 | if (ValueInfo TheFnVI = ExportSummary->getValueInfo(GUID: TheFn->getGUID())) |
1360 | // Any needed promotion of 'TheFn' has already been done during |
1361 | // LTO unit split, so we can ignore return value of AddCalls. |
1362 | AddCalls(SlotInfo, Callee: TheFnVI); |
1363 | |
1364 | Res->TheKind = WholeProgramDevirtResolution::SingleImpl; |
1365 | Res->SingleImplName = std::string(TheFn->getName()); |
1366 | |
1367 | return true; |
1368 | } |
1369 | |
1370 | bool DevirtIndex::trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot, |
1371 | VTableSlotSummary &SlotSummary, |
1372 | VTableSlotInfo &SlotInfo, |
1373 | WholeProgramDevirtResolution *Res, |
1374 | std::set<ValueInfo> &DevirtTargets) { |
1375 | // See if the program contains a single implementation of this virtual |
1376 | // function. |
1377 | auto TheFn = TargetsForSlot[0]; |
1378 | for (auto &&Target : TargetsForSlot) |
1379 | if (TheFn != Target) |
1380 | return false; |
1381 | |
1382 | // Don't devirtualize if we don't have target definition. |
1383 | auto Size = TheFn.getSummaryList().size(); |
1384 | if (!Size) |
1385 | return false; |
1386 | |
1387 | // Don't devirtualize function if we're told to skip it |
1388 | // in -wholeprogramdevirt-skip. |
1389 | if (FunctionsToSkip.match(S: TheFn.name())) |
1390 | return false; |
1391 | |
1392 | // If the summary list contains multiple summaries where at least one is |
1393 | // a local, give up, as we won't know which (possibly promoted) name to use. |
1394 | for (const auto &S : TheFn.getSummaryList()) |
1395 | if (GlobalValue::isLocalLinkage(Linkage: S->linkage()) && Size > 1) |
1396 | return false; |
1397 | |
1398 | // Collect functions devirtualized at least for one call site for stats. |
1399 | if (PrintSummaryDevirt || AreStatisticsEnabled()) |
1400 | DevirtTargets.insert(x: TheFn); |
1401 | |
1402 | auto &S = TheFn.getSummaryList()[0]; |
1403 | bool IsExported = AddCalls(SlotInfo, Callee: TheFn); |
1404 | if (IsExported) |
1405 | ExportedGUIDs.insert(x: TheFn.getGUID()); |
1406 | |
1407 | // Record in summary for use in devirtualization during the ThinLTO import |
1408 | // step. |
1409 | Res->TheKind = WholeProgramDevirtResolution::SingleImpl; |
1410 | if (GlobalValue::isLocalLinkage(Linkage: S->linkage())) { |
1411 | if (IsExported) |
1412 | // If target is a local function and we are exporting it by |
1413 | // devirtualizing a call in another module, we need to record the |
1414 | // promoted name. |
1415 | Res->SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal( |
1416 | Name: TheFn.name(), ModHash: ExportSummary.getModuleHash(ModPath: S->modulePath())); |
1417 | else { |
1418 | LocalWPDTargetsMap[TheFn].push_back(x: SlotSummary); |
1419 | Res->SingleImplName = std::string(TheFn.name()); |
1420 | } |
1421 | } else |
1422 | Res->SingleImplName = std::string(TheFn.name()); |
1423 | |
1424 | // Name will be empty if this thin link driven off of serialized combined |
1425 | // index (e.g. llvm-lto). However, WPD is not supported/invoked for the |
1426 | // legacy LTO API anyway. |
1427 | assert(!Res->SingleImplName.empty()); |
1428 | |
1429 | return true; |
1430 | } |
1431 | |
1432 | void DevirtModule::tryICallBranchFunnel( |
1433 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo, |
1434 | WholeProgramDevirtResolution *Res, VTableSlot Slot) { |
1435 | Triple T(M.getTargetTriple()); |
1436 | if (T.getArch() != Triple::x86_64) |
1437 | return; |
1438 | |
1439 | if (TargetsForSlot.size() > ClThreshold) |
1440 | return; |
1441 | |
1442 | bool HasNonDevirt = !SlotInfo.CSInfo.AllCallSitesDevirted; |
1443 | if (!HasNonDevirt) |
1444 | for (auto &P : SlotInfo.ConstCSInfo) |
1445 | if (!P.second.AllCallSitesDevirted) { |
1446 | HasNonDevirt = true; |
1447 | break; |
1448 | } |
1449 | |
1450 | if (!HasNonDevirt) |
1451 | return; |
1452 | |
1453 | // If any GV is AvailableExternally, not to generate branch.funnel. |
1454 | // NOTE: It is to avoid crash in LowerTypeTest. |
1455 | // If the branch.funnel is generated, because GV.isDeclarationForLinker(), |
1456 | // in LowerTypeTestsModule::lower(), its GlobalTypeMember would NOT |
1457 | // be saved in GlobalTypeMembers[&GV]. Then crash happens in |
1458 | // buildBitSetsFromDisjointSet due to GlobalTypeMembers[&GV] is NULL. |
1459 | // Even doing experiment to save it in GlobalTypeMembers[&GV] and |
1460 | // making GlobalTypeMembers[&GV] be not NULL, crash could avoid from |
1461 | // buildBitSetsFromDisjointSet. But still report_fatal_error in Verifier |
1462 | // or SelectionDAGBuilder later, because operands linkage type consistency |
1463 | // check of icall.branch.funnel can not pass. |
1464 | for (auto &T : TargetsForSlot) { |
1465 | if (T.TM->Bits->GV->hasAvailableExternallyLinkage()) |
1466 | return; |
1467 | } |
1468 | |
1469 | FunctionType *FT = |
1470 | FunctionType::get(Result: Type::getVoidTy(C&: M.getContext()), Params: {Int8PtrTy}, isVarArg: true); |
1471 | Function *JT; |
1472 | if (isa<MDString>(Val: Slot.TypeID)) { |
1473 | JT = Function::Create(Ty: FT, Linkage: Function::ExternalLinkage, |
1474 | AddrSpace: M.getDataLayout().getProgramAddressSpace(), |
1475 | N: getGlobalName(Slot, Args: {}, Name: "branch_funnel" ), M: &M); |
1476 | JT->setVisibility(GlobalValue::HiddenVisibility); |
1477 | } else { |
1478 | JT = Function::Create(Ty: FT, Linkage: Function::InternalLinkage, |
1479 | AddrSpace: M.getDataLayout().getProgramAddressSpace(), |
1480 | N: "branch_funnel" , M: &M); |
1481 | } |
1482 | JT->addParamAttr(ArgNo: 0, Kind: Attribute::Nest); |
1483 | |
1484 | std::vector<Value *> JTArgs; |
1485 | JTArgs.push_back(x: JT->arg_begin()); |
1486 | for (auto &T : TargetsForSlot) { |
1487 | JTArgs.push_back(x: getMemberAddr(M: T.TM)); |
1488 | JTArgs.push_back(x: T.Fn); |
1489 | } |
1490 | |
1491 | BasicBlock *BB = BasicBlock::Create(Context&: M.getContext(), Name: "" , Parent: JT, InsertBefore: nullptr); |
1492 | Function *Intr = Intrinsic::getOrInsertDeclaration( |
1493 | M: &M, id: llvm::Intrinsic::icall_branch_funnel, Tys: {}); |
1494 | |
1495 | auto *CI = CallInst::Create(Func: Intr, Args: JTArgs, NameStr: "" , InsertBefore: BB); |
1496 | CI->setTailCallKind(CallInst::TCK_MustTail); |
1497 | ReturnInst::Create(C&: M.getContext(), retVal: nullptr, InsertBefore: BB); |
1498 | |
1499 | bool IsExported = false; |
1500 | applyICallBranchFunnel(SlotInfo, JT, IsExported); |
1501 | if (IsExported) |
1502 | Res->TheKind = WholeProgramDevirtResolution::BranchFunnel; |
1503 | } |
1504 | |
1505 | void DevirtModule::applyICallBranchFunnel(VTableSlotInfo &SlotInfo, |
1506 | Constant *JT, bool &IsExported) { |
1507 | auto Apply = [&](CallSiteInfo &CSInfo) { |
1508 | if (CSInfo.isExported()) |
1509 | IsExported = true; |
1510 | if (CSInfo.AllCallSitesDevirted) |
1511 | return; |
1512 | |
1513 | std::map<CallBase *, CallBase *> CallBases; |
1514 | for (auto &&VCallSite : CSInfo.CallSites) { |
1515 | CallBase &CB = VCallSite.CB; |
1516 | |
1517 | if (CallBases.find(x: &CB) != CallBases.end()) { |
1518 | // When finding devirtualizable calls, it's possible to find the same |
1519 | // vtable passed to multiple llvm.type.test or llvm.type.checked.load |
1520 | // calls, which can cause duplicate call sites to be recorded in |
1521 | // [Const]CallSites. If we've already found one of these |
1522 | // call instances, just ignore it. It will be replaced later. |
1523 | continue; |
1524 | } |
1525 | |
1526 | // Jump tables are only profitable if the retpoline mitigation is enabled. |
1527 | Attribute FSAttr = CB.getCaller()->getFnAttribute(Kind: "target-features" ); |
1528 | if (!FSAttr.isValid() || |
1529 | !FSAttr.getValueAsString().contains(Other: "+retpoline" )) |
1530 | continue; |
1531 | |
1532 | NumBranchFunnel++; |
1533 | if (RemarksEnabled) |
1534 | VCallSite.emitRemark(OptName: "branch-funnel" , |
1535 | TargetName: JT->stripPointerCasts()->getName(), OREGetter); |
1536 | |
1537 | // Pass the address of the vtable in the nest register, which is r10 on |
1538 | // x86_64. |
1539 | std::vector<Type *> NewArgs; |
1540 | NewArgs.push_back(x: Int8PtrTy); |
1541 | append_range(C&: NewArgs, R: CB.getFunctionType()->params()); |
1542 | FunctionType *NewFT = |
1543 | FunctionType::get(Result: CB.getFunctionType()->getReturnType(), Params: NewArgs, |
1544 | isVarArg: CB.getFunctionType()->isVarArg()); |
1545 | IRBuilder<> IRB(&CB); |
1546 | std::vector<Value *> Args; |
1547 | Args.push_back(x: VCallSite.VTable); |
1548 | llvm::append_range(C&: Args, R: CB.args()); |
1549 | |
1550 | CallBase *NewCS = nullptr; |
1551 | if (isa<CallInst>(Val: CB)) |
1552 | NewCS = IRB.CreateCall(FTy: NewFT, Callee: JT, Args); |
1553 | else |
1554 | NewCS = |
1555 | IRB.CreateInvoke(Ty: NewFT, Callee: JT, NormalDest: cast<InvokeInst>(Val&: CB).getNormalDest(), |
1556 | UnwindDest: cast<InvokeInst>(Val&: CB).getUnwindDest(), Args); |
1557 | NewCS->setCallingConv(CB.getCallingConv()); |
1558 | |
1559 | AttributeList Attrs = CB.getAttributes(); |
1560 | std::vector<AttributeSet> NewArgAttrs; |
1561 | NewArgAttrs.push_back(x: AttributeSet::get( |
1562 | C&: M.getContext(), Attrs: ArrayRef<Attribute>{Attribute::get( |
1563 | Context&: M.getContext(), Kind: Attribute::Nest)})); |
1564 | for (unsigned I = 0; I + 2 < Attrs.getNumAttrSets(); ++I) |
1565 | NewArgAttrs.push_back(x: Attrs.getParamAttrs(ArgNo: I)); |
1566 | NewCS->setAttributes( |
1567 | AttributeList::get(C&: M.getContext(), FnAttrs: Attrs.getFnAttrs(), |
1568 | RetAttrs: Attrs.getRetAttrs(), ArgAttrs: NewArgAttrs)); |
1569 | |
1570 | CallBases[&CB] = NewCS; |
1571 | |
1572 | // This use is no longer unsafe. |
1573 | if (VCallSite.NumUnsafeUses) |
1574 | --*VCallSite.NumUnsafeUses; |
1575 | } |
1576 | // Don't mark as devirtualized because there may be callers compiled without |
1577 | // retpoline mitigation, which would mean that they are lowered to |
1578 | // llvm.type.test and therefore require an llvm.type.test resolution for the |
1579 | // type identifier. |
1580 | |
1581 | for (auto &[Old, New] : CallBases) { |
1582 | Old->replaceAllUsesWith(V: New); |
1583 | Old->eraseFromParent(); |
1584 | } |
1585 | }; |
1586 | Apply(SlotInfo.CSInfo); |
1587 | for (auto &P : SlotInfo.ConstCSInfo) |
1588 | Apply(P.second); |
1589 | } |
1590 | |
1591 | bool DevirtModule::tryEvaluateFunctionsWithArgs( |
1592 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
1593 | ArrayRef<uint64_t> Args) { |
1594 | // Evaluate each function and store the result in each target's RetVal |
1595 | // field. |
1596 | for (VirtualCallTarget &Target : TargetsForSlot) { |
1597 | // TODO: Skip for now if the vtable symbol was an alias to a function, |
1598 | // need to evaluate whether it would be correct to analyze the aliasee |
1599 | // function for this optimization. |
1600 | auto Fn = dyn_cast<Function>(Val: Target.Fn); |
1601 | if (!Fn) |
1602 | return false; |
1603 | |
1604 | if (Fn->arg_size() != Args.size() + 1) |
1605 | return false; |
1606 | |
1607 | Evaluator Eval(M.getDataLayout(), nullptr); |
1608 | SmallVector<Constant *, 2> EvalArgs; |
1609 | EvalArgs.push_back( |
1610 | Elt: Constant::getNullValue(Ty: Fn->getFunctionType()->getParamType(i: 0))); |
1611 | for (unsigned I = 0; I != Args.size(); ++I) { |
1612 | auto *ArgTy = |
1613 | dyn_cast<IntegerType>(Val: Fn->getFunctionType()->getParamType(i: I + 1)); |
1614 | if (!ArgTy) |
1615 | return false; |
1616 | EvalArgs.push_back(Elt: ConstantInt::get(Ty: ArgTy, V: Args[I])); |
1617 | } |
1618 | |
1619 | Constant *RetVal; |
1620 | if (!Eval.EvaluateFunction(F: Fn, RetVal, ActualArgs: EvalArgs) || |
1621 | !isa<ConstantInt>(Val: RetVal)) |
1622 | return false; |
1623 | Target.RetVal = cast<ConstantInt>(Val: RetVal)->getZExtValue(); |
1624 | } |
1625 | return true; |
1626 | } |
1627 | |
1628 | void DevirtModule::applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, |
1629 | uint64_t TheRetVal) { |
1630 | for (auto Call : CSInfo.CallSites) { |
1631 | if (!OptimizedCalls.insert(Ptr: &Call.CB).second) |
1632 | continue; |
1633 | NumUniformRetVal++; |
1634 | Call.replaceAndErase( |
1635 | OptName: "uniform-ret-val" , TargetName: FnName, RemarksEnabled, OREGetter, |
1636 | New: ConstantInt::get(Ty: cast<IntegerType>(Val: Call.CB.getType()), V: TheRetVal)); |
1637 | } |
1638 | CSInfo.markDevirt(); |
1639 | } |
1640 | |
1641 | bool DevirtModule::tryUniformRetValOpt( |
1642 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, CallSiteInfo &CSInfo, |
1643 | WholeProgramDevirtResolution::ByArg *Res) { |
1644 | // Uniform return value optimization. If all functions return the same |
1645 | // constant, replace all calls with that constant. |
1646 | uint64_t TheRetVal = TargetsForSlot[0].RetVal; |
1647 | for (const VirtualCallTarget &Target : TargetsForSlot) |
1648 | if (Target.RetVal != TheRetVal) |
1649 | return false; |
1650 | |
1651 | if (CSInfo.isExported()) { |
1652 | Res->TheKind = WholeProgramDevirtResolution::ByArg::UniformRetVal; |
1653 | Res->Info = TheRetVal; |
1654 | } |
1655 | |
1656 | applyUniformRetValOpt(CSInfo, FnName: TargetsForSlot[0].Fn->getName(), TheRetVal); |
1657 | if (RemarksEnabled || AreStatisticsEnabled()) |
1658 | for (auto &&Target : TargetsForSlot) |
1659 | Target.WasDevirt = true; |
1660 | return true; |
1661 | } |
1662 | |
1663 | std::string DevirtModule::getGlobalName(VTableSlot Slot, |
1664 | ArrayRef<uint64_t> Args, |
1665 | StringRef Name) { |
1666 | std::string FullName = "__typeid_" ; |
1667 | raw_string_ostream OS(FullName); |
1668 | OS << cast<MDString>(Val: Slot.TypeID)->getString() << '_' << Slot.ByteOffset; |
1669 | for (uint64_t Arg : Args) |
1670 | OS << '_' << Arg; |
1671 | OS << '_' << Name; |
1672 | return FullName; |
1673 | } |
1674 | |
1675 | bool DevirtModule::shouldExportConstantsAsAbsoluteSymbols() { |
1676 | Triple T(M.getTargetTriple()); |
1677 | return T.isX86() && T.getObjectFormat() == Triple::ELF; |
1678 | } |
1679 | |
1680 | void DevirtModule::exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, |
1681 | StringRef Name, Constant *C) { |
1682 | GlobalAlias *GA = GlobalAlias::create(Ty: Int8Ty, AddressSpace: 0, Linkage: GlobalValue::ExternalLinkage, |
1683 | Name: getGlobalName(Slot, Args, Name), Aliasee: C, Parent: &M); |
1684 | GA->setVisibility(GlobalValue::HiddenVisibility); |
1685 | } |
1686 | |
1687 | void DevirtModule::exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, |
1688 | StringRef Name, uint32_t Const, |
1689 | uint32_t &Storage) { |
1690 | if (shouldExportConstantsAsAbsoluteSymbols()) { |
1691 | exportGlobal( |
1692 | Slot, Args, Name, |
1693 | C: ConstantExpr::getIntToPtr(C: ConstantInt::get(Ty: Int32Ty, V: Const), Ty: Int8PtrTy)); |
1694 | return; |
1695 | } |
1696 | |
1697 | Storage = Const; |
1698 | } |
1699 | |
1700 | Constant *DevirtModule::importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, |
1701 | StringRef Name) { |
1702 | GlobalVariable *GV = |
1703 | M.getOrInsertGlobal(Name: getGlobalName(Slot, Args, Name), Ty: Int8Arr0Ty); |
1704 | GV->setVisibility(GlobalValue::HiddenVisibility); |
1705 | return GV; |
1706 | } |
1707 | |
1708 | Constant *DevirtModule::importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, |
1709 | StringRef Name, IntegerType *IntTy, |
1710 | uint32_t Storage) { |
1711 | if (!shouldExportConstantsAsAbsoluteSymbols()) |
1712 | return ConstantInt::get(Ty: IntTy, V: Storage); |
1713 | |
1714 | Constant *C = importGlobal(Slot, Args, Name); |
1715 | auto *GV = cast<GlobalVariable>(Val: C->stripPointerCasts()); |
1716 | C = ConstantExpr::getPtrToInt(C, Ty: IntTy); |
1717 | |
1718 | // We only need to set metadata if the global is newly created, in which |
1719 | // case it would not have hidden visibility. |
1720 | if (GV->hasMetadata(KindID: LLVMContext::MD_absolute_symbol)) |
1721 | return C; |
1722 | |
1723 | auto SetAbsRange = [&](uint64_t Min, uint64_t Max) { |
1724 | auto *MinC = ConstantAsMetadata::get(C: ConstantInt::get(Ty: IntPtrTy, V: Min)); |
1725 | auto *MaxC = ConstantAsMetadata::get(C: ConstantInt::get(Ty: IntPtrTy, V: Max)); |
1726 | GV->setMetadata(KindID: LLVMContext::MD_absolute_symbol, |
1727 | Node: MDNode::get(Context&: M.getContext(), MDs: {MinC, MaxC})); |
1728 | }; |
1729 | unsigned AbsWidth = IntTy->getBitWidth(); |
1730 | if (AbsWidth == IntPtrTy->getBitWidth()) |
1731 | SetAbsRange(~0ull, ~0ull); // Full set. |
1732 | else |
1733 | SetAbsRange(0, 1ull << AbsWidth); |
1734 | return C; |
1735 | } |
1736 | |
1737 | void DevirtModule::applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, |
1738 | bool IsOne, |
1739 | Constant *UniqueMemberAddr) { |
1740 | for (auto &&Call : CSInfo.CallSites) { |
1741 | if (!OptimizedCalls.insert(Ptr: &Call.CB).second) |
1742 | continue; |
1743 | IRBuilder<> B(&Call.CB); |
1744 | Value *Cmp = |
1745 | B.CreateICmp(P: IsOne ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, LHS: Call.VTable, |
1746 | RHS: B.CreateBitCast(V: UniqueMemberAddr, DestTy: Call.VTable->getType())); |
1747 | Cmp = B.CreateZExt(V: Cmp, DestTy: Call.CB.getType()); |
1748 | NumUniqueRetVal++; |
1749 | Call.replaceAndErase(OptName: "unique-ret-val" , TargetName: FnName, RemarksEnabled, OREGetter, |
1750 | New: Cmp); |
1751 | } |
1752 | CSInfo.markDevirt(); |
1753 | } |
1754 | |
1755 | Constant *DevirtModule::getMemberAddr(const TypeMemberInfo *M) { |
1756 | return ConstantExpr::getGetElementPtr(Ty: Int8Ty, C: M->Bits->GV, |
1757 | Idx: ConstantInt::get(Ty: Int64Ty, V: M->Offset)); |
1758 | } |
1759 | |
1760 | bool DevirtModule::tryUniqueRetValOpt( |
1761 | unsigned BitWidth, MutableArrayRef<VirtualCallTarget> TargetsForSlot, |
1762 | CallSiteInfo &CSInfo, WholeProgramDevirtResolution::ByArg *Res, |
1763 | VTableSlot Slot, ArrayRef<uint64_t> Args) { |
1764 | // IsOne controls whether we look for a 0 or a 1. |
1765 | auto tryUniqueRetValOptFor = [&](bool IsOne) { |
1766 | const TypeMemberInfo *UniqueMember = nullptr; |
1767 | for (const VirtualCallTarget &Target : TargetsForSlot) { |
1768 | if (Target.RetVal == (IsOne ? 1 : 0)) { |
1769 | if (UniqueMember) |
1770 | return false; |
1771 | UniqueMember = Target.TM; |
1772 | } |
1773 | } |
1774 | |
1775 | // We should have found a unique member or bailed out by now. We already |
1776 | // checked for a uniform return value in tryUniformRetValOpt. |
1777 | assert(UniqueMember); |
1778 | |
1779 | Constant *UniqueMemberAddr = getMemberAddr(M: UniqueMember); |
1780 | if (CSInfo.isExported()) { |
1781 | Res->TheKind = WholeProgramDevirtResolution::ByArg::UniqueRetVal; |
1782 | Res->Info = IsOne; |
1783 | |
1784 | exportGlobal(Slot, Args, Name: "unique_member" , C: UniqueMemberAddr); |
1785 | } |
1786 | |
1787 | // Replace each call with the comparison. |
1788 | applyUniqueRetValOpt(CSInfo, FnName: TargetsForSlot[0].Fn->getName(), IsOne, |
1789 | UniqueMemberAddr); |
1790 | |
1791 | // Update devirtualization statistics for targets. |
1792 | if (RemarksEnabled || AreStatisticsEnabled()) |
1793 | for (auto &&Target : TargetsForSlot) |
1794 | Target.WasDevirt = true; |
1795 | |
1796 | return true; |
1797 | }; |
1798 | |
1799 | if (BitWidth == 1) { |
1800 | if (tryUniqueRetValOptFor(true)) |
1801 | return true; |
1802 | if (tryUniqueRetValOptFor(false)) |
1803 | return true; |
1804 | } |
1805 | return false; |
1806 | } |
1807 | |
1808 | void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName, |
1809 | Constant *Byte, Constant *Bit) { |
1810 | for (auto Call : CSInfo.CallSites) { |
1811 | if (!OptimizedCalls.insert(Ptr: &Call.CB).second) |
1812 | continue; |
1813 | auto *RetType = cast<IntegerType>(Val: Call.CB.getType()); |
1814 | IRBuilder<> B(&Call.CB); |
1815 | Value *Addr = B.CreatePtrAdd(Ptr: Call.VTable, Offset: Byte); |
1816 | if (RetType->getBitWidth() == 1) { |
1817 | Value *Bits = B.CreateLoad(Ty: Int8Ty, Ptr: Addr); |
1818 | Value *BitsAndBit = B.CreateAnd(LHS: Bits, RHS: Bit); |
1819 | auto IsBitSet = B.CreateICmpNE(LHS: BitsAndBit, RHS: ConstantInt::get(Ty: Int8Ty, V: 0)); |
1820 | NumVirtConstProp1Bit++; |
1821 | Call.replaceAndErase(OptName: "virtual-const-prop-1-bit" , TargetName: FnName, RemarksEnabled, |
1822 | OREGetter, New: IsBitSet); |
1823 | } else { |
1824 | Value *Val = B.CreateLoad(Ty: RetType, Ptr: Addr); |
1825 | NumVirtConstProp++; |
1826 | Call.replaceAndErase(OptName: "virtual-const-prop" , TargetName: FnName, RemarksEnabled, |
1827 | OREGetter, New: Val); |
1828 | } |
1829 | } |
1830 | CSInfo.markDevirt(); |
1831 | } |
1832 | |
1833 | bool DevirtModule::tryVirtualConstProp( |
1834 | MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo, |
1835 | WholeProgramDevirtResolution *Res, VTableSlot Slot) { |
1836 | // TODO: Skip for now if the vtable symbol was an alias to a function, |
1837 | // need to evaluate whether it would be correct to analyze the aliasee |
1838 | // function for this optimization. |
1839 | auto Fn = dyn_cast<Function>(Val: TargetsForSlot[0].Fn); |
1840 | if (!Fn) |
1841 | return false; |
1842 | // This only works if the function returns an integer. |
1843 | auto RetType = dyn_cast<IntegerType>(Val: Fn->getReturnType()); |
1844 | if (!RetType) |
1845 | return false; |
1846 | unsigned BitWidth = RetType->getBitWidth(); |
1847 | |
1848 | // TODO: Since we can evaluated these constants at compile-time, we can save |
1849 | // some space by calculating the smallest range of values that all these |
1850 | // constants can fit in, then only allocate enough space to fit those values. |
1851 | // At each callsite, we can get the original type by doing a sign/zero |
1852 | // extension. For example, if we would store an i64, but we can see that all |
1853 | // the values fit into an i16, then we can store an i16 before/after the |
1854 | // vtable and at each callsite do a s/zext. |
1855 | if (BitWidth > 64) |
1856 | return false; |
1857 | |
1858 | Align TypeAlignment = M.getDataLayout().getABIIntegerTypeAlignment(BitWidth); |
1859 | |
1860 | // Make sure that each function is defined, does not access memory, takes at |
1861 | // least one argument, does not use its first argument (which we assume is |
1862 | // 'this'), and has the same return type. |
1863 | // |
1864 | // Note that we test whether this copy of the function is readnone, rather |
1865 | // than testing function attributes, which must hold for any copy of the |
1866 | // function, even a less optimized version substituted at link time. This is |
1867 | // sound because the virtual constant propagation optimizations effectively |
1868 | // inline all implementations of the virtual function into each call site, |
1869 | // rather than using function attributes to perform local optimization. |
1870 | for (VirtualCallTarget &Target : TargetsForSlot) { |
1871 | // TODO: Skip for now if the vtable symbol was an alias to a function, |
1872 | // need to evaluate whether it would be correct to analyze the aliasee |
1873 | // function for this optimization. |
1874 | auto Fn = dyn_cast<Function>(Val: Target.Fn); |
1875 | if (!Fn) |
1876 | return false; |
1877 | |
1878 | if (Fn->isDeclaration() || |
1879 | !computeFunctionBodyMemoryAccess(F&: *Fn, AAR&: AARGetter(*Fn)) |
1880 | .doesNotAccessMemory() || |
1881 | Fn->arg_empty() || !Fn->arg_begin()->use_empty() || |
1882 | Fn->getReturnType() != RetType) |
1883 | return false; |
1884 | |
1885 | // This only works if the integer size is at most the alignment of the |
1886 | // vtable. If the table is underaligned, then we can't guarantee that the |
1887 | // constant will always be aligned to the integer type alignment. For |
1888 | // example, if the table is `align 1`, we can never guarantee that an i32 |
1889 | // stored before/after the vtable is 32-bit aligned without changing the |
1890 | // alignment of the new global. |
1891 | GlobalVariable *GV = Target.TM->Bits->GV; |
1892 | Align TableAlignment = M.getDataLayout().getValueOrABITypeAlignment( |
1893 | Alignment: GV->getAlign(), Ty: GV->getValueType()); |
1894 | if (TypeAlignment > TableAlignment) |
1895 | return false; |
1896 | } |
1897 | |
1898 | for (auto &&CSByConstantArg : SlotInfo.ConstCSInfo) { |
1899 | if (!tryEvaluateFunctionsWithArgs(TargetsForSlot, Args: CSByConstantArg.first)) |
1900 | continue; |
1901 | |
1902 | WholeProgramDevirtResolution::ByArg *ResByArg = nullptr; |
1903 | if (Res) |
1904 | ResByArg = &Res->ResByArg[CSByConstantArg.first]; |
1905 | |
1906 | if (tryUniformRetValOpt(TargetsForSlot, CSInfo&: CSByConstantArg.second, Res: ResByArg)) |
1907 | continue; |
1908 | |
1909 | if (tryUniqueRetValOpt(BitWidth, TargetsForSlot, CSInfo&: CSByConstantArg.second, |
1910 | Res: ResByArg, Slot, Args: CSByConstantArg.first)) |
1911 | continue; |
1912 | |
1913 | // Find an allocation offset in bits in all vtables associated with the |
1914 | // type. |
1915 | // TODO: If there would be "holes" in the vtable that were added by |
1916 | // padding, we could place i1s there to reduce any extra padding that |
1917 | // would be introduced by the i1s. |
1918 | uint64_t AllocBefore = |
1919 | findLowestOffset(Targets: TargetsForSlot, /*IsAfter=*/false, Size: BitWidth); |
1920 | uint64_t AllocAfter = |
1921 | findLowestOffset(Targets: TargetsForSlot, /*IsAfter=*/true, Size: BitWidth); |
1922 | |
1923 | // Calculate the total amount of padding needed to store a value at both |
1924 | // ends of the object. |
1925 | uint64_t TotalPaddingBefore = 0, TotalPaddingAfter = 0; |
1926 | for (auto &&Target : TargetsForSlot) { |
1927 | TotalPaddingBefore += std::max<int64_t>( |
1928 | a: (AllocBefore + 7) / 8 - Target.allocatedBeforeBytes() - 1, b: 0); |
1929 | TotalPaddingAfter += std::max<int64_t>( |
1930 | a: (AllocAfter + 7) / 8 - Target.allocatedAfterBytes() - 1, b: 0); |
1931 | } |
1932 | |
1933 | // If the amount of padding is too large, give up. |
1934 | // FIXME: do something smarter here. |
1935 | if (std::min(a: TotalPaddingBefore, b: TotalPaddingAfter) > 128) |
1936 | continue; |
1937 | |
1938 | // Calculate the offset to the value as a (possibly negative) byte offset |
1939 | // and (if applicable) a bit offset, and store the values in the targets. |
1940 | int64_t OffsetByte; |
1941 | uint64_t OffsetBit; |
1942 | if (TotalPaddingBefore <= TotalPaddingAfter) |
1943 | setBeforeReturnValues(Targets: TargetsForSlot, AllocBefore, BitWidth, OffsetByte, |
1944 | OffsetBit); |
1945 | else |
1946 | setAfterReturnValues(Targets: TargetsForSlot, AllocAfter, BitWidth, OffsetByte, |
1947 | OffsetBit); |
1948 | |
1949 | // In an earlier check we forbade constant propagation from operating on |
1950 | // tables whose alignment is less than the alignment needed for loading |
1951 | // the constant. Thus, the address we take the offset from will always be |
1952 | // aligned to at least this integer alignment. Now, we need to ensure that |
1953 | // the offset is also aligned to this integer alignment to ensure we always |
1954 | // have an aligned load. |
1955 | assert(OffsetByte % TypeAlignment.value() == 0); |
1956 | |
1957 | if (RemarksEnabled || AreStatisticsEnabled()) |
1958 | for (auto &&Target : TargetsForSlot) |
1959 | Target.WasDevirt = true; |
1960 | |
1961 | |
1962 | if (CSByConstantArg.second.isExported()) { |
1963 | ResByArg->TheKind = WholeProgramDevirtResolution::ByArg::VirtualConstProp; |
1964 | exportConstant(Slot, Args: CSByConstantArg.first, Name: "byte" , Const: OffsetByte, |
1965 | Storage&: ResByArg->Byte); |
1966 | exportConstant(Slot, Args: CSByConstantArg.first, Name: "bit" , Const: 1ULL << OffsetBit, |
1967 | Storage&: ResByArg->Bit); |
1968 | } |
1969 | |
1970 | // Rewrite each call to a load from OffsetByte/OffsetBit. |
1971 | Constant *ByteConst = ConstantInt::get(Ty: Int32Ty, V: OffsetByte); |
1972 | Constant *BitConst = ConstantInt::get(Ty: Int8Ty, V: 1ULL << OffsetBit); |
1973 | applyVirtualConstProp(CSInfo&: CSByConstantArg.second, |
1974 | FnName: TargetsForSlot[0].Fn->getName(), Byte: ByteConst, Bit: BitConst); |
1975 | } |
1976 | return true; |
1977 | } |
1978 | |
1979 | void DevirtModule::rebuildGlobal(VTableBits &B) { |
1980 | if (B.Before.Bytes.empty() && B.After.Bytes.empty()) |
1981 | return; |
1982 | |
1983 | // Align the before byte array to the global's minimum alignment so that we |
1984 | // don't break any alignment requirements on the global. |
1985 | Align Alignment = M.getDataLayout().getValueOrABITypeAlignment( |
1986 | Alignment: B.GV->getAlign(), Ty: B.GV->getValueType()); |
1987 | B.Before.Bytes.resize(new_size: alignTo(Size: B.Before.Bytes.size(), A: Alignment)); |
1988 | |
1989 | // Before was stored in reverse order; flip it now. |
1990 | for (size_t I = 0, Size = B.Before.Bytes.size(); I != Size / 2; ++I) |
1991 | std::swap(a&: B.Before.Bytes[I], b&: B.Before.Bytes[Size - 1 - I]); |
1992 | |
1993 | // Build an anonymous global containing the before bytes, followed by the |
1994 | // original initializer, followed by the after bytes. |
1995 | auto NewInit = ConstantStruct::getAnon( |
1996 | V: {ConstantDataArray::get(Context&: M.getContext(), Elts&: B.Before.Bytes), |
1997 | B.GV->getInitializer(), |
1998 | ConstantDataArray::get(Context&: M.getContext(), Elts&: B.After.Bytes)}); |
1999 | auto NewGV = |
2000 | new GlobalVariable(M, NewInit->getType(), B.GV->isConstant(), |
2001 | GlobalVariable::PrivateLinkage, NewInit, "" , B.GV); |
2002 | NewGV->setSection(B.GV->getSection()); |
2003 | NewGV->setComdat(B.GV->getComdat()); |
2004 | NewGV->setAlignment(B.GV->getAlign()); |
2005 | |
2006 | // Copy the original vtable's metadata to the anonymous global, adjusting |
2007 | // offsets as required. |
2008 | NewGV->copyMetadata(Src: B.GV, Offset: B.Before.Bytes.size()); |
2009 | |
2010 | // Build an alias named after the original global, pointing at the second |
2011 | // element (the original initializer). |
2012 | auto Alias = GlobalAlias::create( |
2013 | Ty: B.GV->getInitializer()->getType(), AddressSpace: 0, Linkage: B.GV->getLinkage(), Name: "" , |
2014 | Aliasee: ConstantExpr::getInBoundsGetElementPtr( |
2015 | Ty: NewInit->getType(), C: NewGV, |
2016 | IdxList: ArrayRef<Constant *>{ConstantInt::get(Ty: Int32Ty, V: 0), |
2017 | ConstantInt::get(Ty: Int32Ty, V: 1)}), |
2018 | Parent: &M); |
2019 | Alias->setVisibility(B.GV->getVisibility()); |
2020 | Alias->takeName(V: B.GV); |
2021 | |
2022 | B.GV->replaceAllUsesWith(V: Alias); |
2023 | B.GV->eraseFromParent(); |
2024 | } |
2025 | |
2026 | bool DevirtModule::() { |
2027 | const auto &FL = M.getFunctionList(); |
2028 | for (const Function &Fn : FL) { |
2029 | if (Fn.empty()) |
2030 | continue; |
2031 | auto DI = OptimizationRemark(DEBUG_TYPE, "" , DebugLoc(), &Fn.front()); |
2032 | return DI.isEnabled(); |
2033 | } |
2034 | return false; |
2035 | } |
2036 | |
2037 | void DevirtModule::scanTypeTestUsers( |
2038 | Function *TypeTestFunc, |
2039 | DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) { |
2040 | // Find all virtual calls via a virtual table pointer %p under an assumption |
2041 | // of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p |
2042 | // points to a member of the type identifier %md. Group calls by (type ID, |
2043 | // offset) pair (effectively the identity of the virtual function) and store |
2044 | // to CallSlots. |
2045 | for (Use &U : llvm::make_early_inc_range(Range: TypeTestFunc->uses())) { |
2046 | auto *CI = dyn_cast<CallInst>(Val: U.getUser()); |
2047 | if (!CI) |
2048 | continue; |
2049 | |
2050 | // Search for virtual calls based on %p and add them to DevirtCalls. |
2051 | SmallVector<DevirtCallSite, 1> DevirtCalls; |
2052 | SmallVector<CallInst *, 1> Assumes; |
2053 | auto &DT = LookupDomTree(*CI->getFunction()); |
2054 | findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI, DT); |
2055 | |
2056 | Metadata *TypeId = |
2057 | cast<MetadataAsValue>(Val: CI->getArgOperand(i: 1))->getMetadata(); |
2058 | // If we found any, add them to CallSlots. |
2059 | if (!Assumes.empty()) { |
2060 | Value *Ptr = CI->getArgOperand(i: 0)->stripPointerCasts(); |
2061 | for (DevirtCallSite Call : DevirtCalls) |
2062 | CallSlots[{.TypeID: TypeId, .ByteOffset: Call.Offset}].addCallSite(VTable: Ptr, CB&: Call.CB, NumUnsafeUses: nullptr); |
2063 | } |
2064 | |
2065 | auto RemoveTypeTestAssumes = [&]() { |
2066 | // We no longer need the assumes or the type test. |
2067 | for (auto *Assume : Assumes) |
2068 | Assume->eraseFromParent(); |
2069 | // We can't use RecursivelyDeleteTriviallyDeadInstructions here because we |
2070 | // may use the vtable argument later. |
2071 | if (CI->use_empty()) |
2072 | CI->eraseFromParent(); |
2073 | }; |
2074 | |
2075 | // At this point we could remove all type test assume sequences, as they |
2076 | // were originally inserted for WPD. However, we can keep these in the |
2077 | // code stream for later analysis (e.g. to help drive more efficient ICP |
2078 | // sequences). They will eventually be removed by a second LowerTypeTests |
2079 | // invocation that cleans them up. In order to do this correctly, the first |
2080 | // LowerTypeTests invocation needs to know that they have "Unknown" type |
2081 | // test resolution, so that they aren't treated as Unsat and lowered to |
2082 | // False, which will break any uses on assumes. Below we remove any type |
2083 | // test assumes that will not be treated as Unknown by LTT. |
2084 | |
2085 | // The type test assumes will be treated by LTT as Unsat if the type id is |
2086 | // not used on a global (in which case it has no entry in the TypeIdMap). |
2087 | if (!TypeIdMap.count(Val: TypeId)) |
2088 | RemoveTypeTestAssumes(); |
2089 | |
2090 | // For ThinLTO importing, we need to remove the type test assumes if this is |
2091 | // an MDString type id without a corresponding TypeIdSummary. Any |
2092 | // non-MDString type ids are ignored and treated as Unknown by LTT, so their |
2093 | // type test assumes can be kept. If the MDString type id is missing a |
2094 | // TypeIdSummary (e.g. because there was no use on a vcall, preventing the |
2095 | // exporting phase of WPD from analyzing it), then it would be treated as |
2096 | // Unsat by LTT and we need to remove its type test assumes here. If not |
2097 | // used on a vcall we don't need them for later optimization use in any |
2098 | // case. |
2099 | else if (ImportSummary && isa<MDString>(Val: TypeId)) { |
2100 | const TypeIdSummary *TidSummary = |
2101 | ImportSummary->getTypeIdSummary(TypeId: cast<MDString>(Val: TypeId)->getString()); |
2102 | if (!TidSummary) |
2103 | RemoveTypeTestAssumes(); |
2104 | else |
2105 | // If one was created it should not be Unsat, because if we reached here |
2106 | // the type id was used on a global. |
2107 | assert(TidSummary->TTRes.TheKind != TypeTestResolution::Unsat); |
2108 | } |
2109 | } |
2110 | } |
2111 | |
2112 | void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) { |
2113 | Function *TypeTestFunc = |
2114 | Intrinsic::getOrInsertDeclaration(M: &M, id: Intrinsic::type_test); |
2115 | |
2116 | for (Use &U : llvm::make_early_inc_range(Range: TypeCheckedLoadFunc->uses())) { |
2117 | auto *CI = dyn_cast<CallInst>(Val: U.getUser()); |
2118 | if (!CI) |
2119 | continue; |
2120 | |
2121 | Value *Ptr = CI->getArgOperand(i: 0); |
2122 | Value *Offset = CI->getArgOperand(i: 1); |
2123 | Value *TypeIdValue = CI->getArgOperand(i: 2); |
2124 | Metadata *TypeId = cast<MetadataAsValue>(Val: TypeIdValue)->getMetadata(); |
2125 | |
2126 | SmallVector<DevirtCallSite, 1> DevirtCalls; |
2127 | SmallVector<Instruction *, 1> LoadedPtrs; |
2128 | SmallVector<Instruction *, 1> Preds; |
2129 | bool HasNonCallUses = false; |
2130 | auto &DT = LookupDomTree(*CI->getFunction()); |
2131 | findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds, |
2132 | HasNonCallUses, CI, DT); |
2133 | |
2134 | // Start by generating "pessimistic" code that explicitly loads the function |
2135 | // pointer from the vtable and performs the type check. If possible, we will |
2136 | // eliminate the load and the type check later. |
2137 | |
2138 | // If possible, only generate the load at the point where it is used. |
2139 | // This helps avoid unnecessary spills. |
2140 | IRBuilder<> LoadB( |
2141 | (LoadedPtrs.size() == 1 && !HasNonCallUses) ? LoadedPtrs[0] : CI); |
2142 | |
2143 | Value *LoadedValue = nullptr; |
2144 | if (TypeCheckedLoadFunc->getIntrinsicID() == |
2145 | Intrinsic::type_checked_load_relative) { |
2146 | Function *LoadRelFunc = Intrinsic::getOrInsertDeclaration( |
2147 | M: &M, id: Intrinsic::load_relative, Tys: {Int32Ty}); |
2148 | LoadedValue = LoadB.CreateCall(Callee: LoadRelFunc, Args: {Ptr, Offset}); |
2149 | } else { |
2150 | Value *GEP = LoadB.CreatePtrAdd(Ptr, Offset); |
2151 | LoadedValue = LoadB.CreateLoad(Ty: Int8PtrTy, Ptr: GEP); |
2152 | } |
2153 | |
2154 | for (Instruction *LoadedPtr : LoadedPtrs) { |
2155 | LoadedPtr->replaceAllUsesWith(V: LoadedValue); |
2156 | LoadedPtr->eraseFromParent(); |
2157 | } |
2158 | |
2159 | // Likewise for the type test. |
2160 | IRBuilder<> CallB((Preds.size() == 1 && !HasNonCallUses) ? Preds[0] : CI); |
2161 | CallInst *TypeTestCall = CallB.CreateCall(Callee: TypeTestFunc, Args: {Ptr, TypeIdValue}); |
2162 | |
2163 | for (Instruction *Pred : Preds) { |
2164 | Pred->replaceAllUsesWith(V: TypeTestCall); |
2165 | Pred->eraseFromParent(); |
2166 | } |
2167 | |
2168 | // We have already erased any extractvalue instructions that refer to the |
2169 | // intrinsic call, but the intrinsic may have other non-extractvalue uses |
2170 | // (although this is unlikely). In that case, explicitly build a pair and |
2171 | // RAUW it. |
2172 | if (!CI->use_empty()) { |
2173 | Value *Pair = PoisonValue::get(T: CI->getType()); |
2174 | IRBuilder<> B(CI); |
2175 | Pair = B.CreateInsertValue(Agg: Pair, Val: LoadedValue, Idxs: {0}); |
2176 | Pair = B.CreateInsertValue(Agg: Pair, Val: TypeTestCall, Idxs: {1}); |
2177 | CI->replaceAllUsesWith(V: Pair); |
2178 | } |
2179 | |
2180 | // The number of unsafe uses is initially the number of uses. |
2181 | auto &NumUnsafeUses = NumUnsafeUsesForTypeTest[TypeTestCall]; |
2182 | NumUnsafeUses = DevirtCalls.size(); |
2183 | |
2184 | // If the function pointer has a non-call user, we cannot eliminate the type |
2185 | // check, as one of those users may eventually call the pointer. Increment |
2186 | // the unsafe use count to make sure it cannot reach zero. |
2187 | if (HasNonCallUses) |
2188 | ++NumUnsafeUses; |
2189 | for (DevirtCallSite Call : DevirtCalls) { |
2190 | CallSlots[{.TypeID: TypeId, .ByteOffset: Call.Offset}].addCallSite(VTable: Ptr, CB&: Call.CB, |
2191 | NumUnsafeUses: &NumUnsafeUses); |
2192 | } |
2193 | |
2194 | CI->eraseFromParent(); |
2195 | } |
2196 | } |
2197 | |
2198 | void DevirtModule::importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo) { |
2199 | auto *TypeId = dyn_cast<MDString>(Val: Slot.TypeID); |
2200 | if (!TypeId) |
2201 | return; |
2202 | const TypeIdSummary *TidSummary = |
2203 | ImportSummary->getTypeIdSummary(TypeId: TypeId->getString()); |
2204 | if (!TidSummary) |
2205 | return; |
2206 | auto ResI = TidSummary->WPDRes.find(x: Slot.ByteOffset); |
2207 | if (ResI == TidSummary->WPDRes.end()) |
2208 | return; |
2209 | const WholeProgramDevirtResolution &Res = ResI->second; |
2210 | |
2211 | if (Res.TheKind == WholeProgramDevirtResolution::SingleImpl) { |
2212 | assert(!Res.SingleImplName.empty()); |
2213 | // The type of the function in the declaration is irrelevant because every |
2214 | // call site will cast it to the correct type. |
2215 | Constant *SingleImpl = |
2216 | cast<Constant>(Val: M.getOrInsertFunction(Name: Res.SingleImplName, |
2217 | RetTy: Type::getVoidTy(C&: M.getContext())) |
2218 | .getCallee()); |
2219 | |
2220 | // This is the import phase so we should not be exporting anything. |
2221 | bool IsExported = false; |
2222 | applySingleImplDevirt(SlotInfo, TheFn: SingleImpl, IsExported); |
2223 | assert(!IsExported); |
2224 | } |
2225 | |
2226 | for (auto &CSByConstantArg : SlotInfo.ConstCSInfo) { |
2227 | auto I = Res.ResByArg.find(x: CSByConstantArg.first); |
2228 | if (I == Res.ResByArg.end()) |
2229 | continue; |
2230 | auto &ResByArg = I->second; |
2231 | // FIXME: We should figure out what to do about the "function name" argument |
2232 | // to the apply* functions, as the function names are unavailable during the |
2233 | // importing phase. For now we just pass the empty string. This does not |
2234 | // impact correctness because the function names are just used for remarks. |
2235 | switch (ResByArg.TheKind) { |
2236 | case WholeProgramDevirtResolution::ByArg::UniformRetVal: |
2237 | applyUniformRetValOpt(CSInfo&: CSByConstantArg.second, FnName: "" , TheRetVal: ResByArg.Info); |
2238 | break; |
2239 | case WholeProgramDevirtResolution::ByArg::UniqueRetVal: { |
2240 | Constant *UniqueMemberAddr = |
2241 | importGlobal(Slot, Args: CSByConstantArg.first, Name: "unique_member" ); |
2242 | applyUniqueRetValOpt(CSInfo&: CSByConstantArg.second, FnName: "" , IsOne: ResByArg.Info, |
2243 | UniqueMemberAddr); |
2244 | break; |
2245 | } |
2246 | case WholeProgramDevirtResolution::ByArg::VirtualConstProp: { |
2247 | Constant *Byte = importConstant(Slot, Args: CSByConstantArg.first, Name: "byte" , |
2248 | IntTy: Int32Ty, Storage: ResByArg.Byte); |
2249 | Constant *Bit = importConstant(Slot, Args: CSByConstantArg.first, Name: "bit" , IntTy: Int8Ty, |
2250 | Storage: ResByArg.Bit); |
2251 | applyVirtualConstProp(CSInfo&: CSByConstantArg.second, FnName: "" , Byte, Bit); |
2252 | break; |
2253 | } |
2254 | default: |
2255 | break; |
2256 | } |
2257 | } |
2258 | |
2259 | if (Res.TheKind == WholeProgramDevirtResolution::BranchFunnel) { |
2260 | // The type of the function is irrelevant, because it's bitcast at calls |
2261 | // anyhow. |
2262 | Constant *JT = cast<Constant>( |
2263 | Val: M.getOrInsertFunction(Name: getGlobalName(Slot, Args: {}, Name: "branch_funnel" ), |
2264 | RetTy: Type::getVoidTy(C&: M.getContext())) |
2265 | .getCallee()); |
2266 | bool IsExported = false; |
2267 | applyICallBranchFunnel(SlotInfo, JT, IsExported); |
2268 | assert(!IsExported); |
2269 | } |
2270 | } |
2271 | |
2272 | void DevirtModule::removeRedundantTypeTests() { |
2273 | auto True = ConstantInt::getTrue(Context&: M.getContext()); |
2274 | for (auto &&U : NumUnsafeUsesForTypeTest) { |
2275 | if (U.second == 0) { |
2276 | U.first->replaceAllUsesWith(V: True); |
2277 | U.first->eraseFromParent(); |
2278 | } |
2279 | } |
2280 | } |
2281 | |
2282 | ValueInfo |
2283 | DevirtModule::lookUpFunctionValueInfo(Function *TheFn, |
2284 | ModuleSummaryIndex *ExportSummary) { |
2285 | assert((ExportSummary != nullptr) && |
2286 | "Caller guarantees ExportSummary is not nullptr" ); |
2287 | |
2288 | const auto TheFnGUID = TheFn->getGUID(); |
2289 | const auto TheFnGUIDWithExportedName = |
2290 | GlobalValue::getGUIDAssumingExternalLinkage(GlobalName: TheFn->getName()); |
2291 | // Look up ValueInfo with the GUID in the current linkage. |
2292 | ValueInfo TheFnVI = ExportSummary->getValueInfo(GUID: TheFnGUID); |
2293 | // If no entry is found and GUID is different from GUID computed using |
2294 | // exported name, look up ValueInfo with the exported name unconditionally. |
2295 | // This is a fallback. |
2296 | // |
2297 | // The reason to have a fallback: |
2298 | // 1. LTO could enable global value internalization via |
2299 | // `enable-lto-internalization`. |
2300 | // 2. The GUID in ExportedSummary is computed using exported name. |
2301 | if ((!TheFnVI) && (TheFnGUID != TheFnGUIDWithExportedName)) { |
2302 | TheFnVI = ExportSummary->getValueInfo(GUID: TheFnGUIDWithExportedName); |
2303 | } |
2304 | return TheFnVI; |
2305 | } |
2306 | |
2307 | bool DevirtModule::mustBeUnreachableFunction( |
2308 | Function *const F, ModuleSummaryIndex *ExportSummary) { |
2309 | if (WholeProgramDevirtKeepUnreachableFunction) |
2310 | return false; |
2311 | // First, learn unreachability by analyzing function IR. |
2312 | if (!F->isDeclaration()) { |
2313 | // A function must be unreachable if its entry block ends with an |
2314 | // 'unreachable'. |
2315 | return isa<UnreachableInst>(Val: F->getEntryBlock().getTerminator()); |
2316 | } |
2317 | // Learn unreachability from ExportSummary if ExportSummary is present. |
2318 | return ExportSummary && |
2319 | ::mustBeUnreachableFunction( |
2320 | TheFnVI: DevirtModule::lookUpFunctionValueInfo(TheFn: F, ExportSummary)); |
2321 | } |
2322 | |
2323 | bool DevirtModule::run() { |
2324 | // If only some of the modules were split, we cannot correctly perform |
2325 | // this transformation. We already checked for the presense of type tests |
2326 | // with partially split modules during the thin link, and would have emitted |
2327 | // an error if any were found, so here we can simply return. |
2328 | if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) || |
2329 | (ImportSummary && ImportSummary->partiallySplitLTOUnits())) |
2330 | return false; |
2331 | |
2332 | Function *TypeTestFunc = |
2333 | Intrinsic::getDeclarationIfExists(M: &M, id: Intrinsic::type_test); |
2334 | Function *TypeCheckedLoadFunc = |
2335 | Intrinsic::getDeclarationIfExists(M: &M, id: Intrinsic::type_checked_load); |
2336 | Function *TypeCheckedLoadRelativeFunc = Intrinsic::getDeclarationIfExists( |
2337 | M: &M, id: Intrinsic::type_checked_load_relative); |
2338 | Function *AssumeFunc = |
2339 | Intrinsic::getDeclarationIfExists(M: &M, id: Intrinsic::assume); |
2340 | |
2341 | // Normally if there are no users of the devirtualization intrinsics in the |
2342 | // module, this pass has nothing to do. But if we are exporting, we also need |
2343 | // to handle any users that appear only in the function summaries. |
2344 | if (!ExportSummary && |
2345 | (!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc || |
2346 | AssumeFunc->use_empty()) && |
2347 | (!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty()) && |
2348 | (!TypeCheckedLoadRelativeFunc || |
2349 | TypeCheckedLoadRelativeFunc->use_empty())) |
2350 | return false; |
2351 | |
2352 | // Rebuild type metadata into a map for easy lookup. |
2353 | std::vector<VTableBits> Bits; |
2354 | DenseMap<Metadata *, std::set<TypeMemberInfo>> TypeIdMap; |
2355 | buildTypeIdentifierMap(Bits, TypeIdMap); |
2356 | |
2357 | if (TypeTestFunc && AssumeFunc) |
2358 | scanTypeTestUsers(TypeTestFunc, TypeIdMap); |
2359 | |
2360 | if (TypeCheckedLoadFunc) |
2361 | scanTypeCheckedLoadUsers(TypeCheckedLoadFunc); |
2362 | |
2363 | if (TypeCheckedLoadRelativeFunc) |
2364 | scanTypeCheckedLoadUsers(TypeCheckedLoadFunc: TypeCheckedLoadRelativeFunc); |
2365 | |
2366 | if (ImportSummary) { |
2367 | for (auto &S : CallSlots) |
2368 | importResolution(Slot: S.first, SlotInfo&: S.second); |
2369 | |
2370 | removeRedundantTypeTests(); |
2371 | |
2372 | // We have lowered or deleted the type intrinsics, so we will no longer have |
2373 | // enough information to reason about the liveness of virtual function |
2374 | // pointers in GlobalDCE. |
2375 | for (GlobalVariable &GV : M.globals()) |
2376 | GV.eraseMetadata(KindID: LLVMContext::MD_vcall_visibility); |
2377 | |
2378 | // The rest of the code is only necessary when exporting or during regular |
2379 | // LTO, so we are done. |
2380 | return true; |
2381 | } |
2382 | |
2383 | if (TypeIdMap.empty()) |
2384 | return true; |
2385 | |
2386 | // Collect information from summary about which calls to try to devirtualize. |
2387 | if (ExportSummary) { |
2388 | DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID; |
2389 | for (auto &P : TypeIdMap) { |
2390 | if (auto *TypeId = dyn_cast<MDString>(Val: P.first)) |
2391 | MetadataByGUID[GlobalValue::getGUIDAssumingExternalLinkage( |
2392 | GlobalName: TypeId->getString())] |
2393 | .push_back(NewVal: TypeId); |
2394 | } |
2395 | |
2396 | for (auto &P : *ExportSummary) { |
2397 | for (auto &S : P.second.SummaryList) { |
2398 | auto *FS = dyn_cast<FunctionSummary>(Val: S.get()); |
2399 | if (!FS) |
2400 | continue; |
2401 | // FIXME: Only add live functions. |
2402 | for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) { |
2403 | for (Metadata *MD : MetadataByGUID[VF.GUID]) { |
2404 | CallSlots[{.TypeID: MD, .ByteOffset: VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS); |
2405 | } |
2406 | } |
2407 | for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) { |
2408 | for (Metadata *MD : MetadataByGUID[VF.GUID]) { |
2409 | CallSlots[{.TypeID: MD, .ByteOffset: VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS); |
2410 | } |
2411 | } |
2412 | for (const FunctionSummary::ConstVCall &VC : |
2413 | FS->type_test_assume_const_vcalls()) { |
2414 | for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) { |
2415 | CallSlots[{.TypeID: MD, .ByteOffset: VC.VFunc.Offset}] |
2416 | .ConstCSInfo[VC.Args] |
2417 | .addSummaryTypeTestAssumeUser(FS); |
2418 | } |
2419 | } |
2420 | for (const FunctionSummary::ConstVCall &VC : |
2421 | FS->type_checked_load_const_vcalls()) { |
2422 | for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) { |
2423 | CallSlots[{.TypeID: MD, .ByteOffset: VC.VFunc.Offset}] |
2424 | .ConstCSInfo[VC.Args] |
2425 | .addSummaryTypeCheckedLoadUser(FS); |
2426 | } |
2427 | } |
2428 | } |
2429 | } |
2430 | } |
2431 | |
2432 | // For each (type, offset) pair: |
2433 | bool DidVirtualConstProp = false; |
2434 | std::map<std::string, GlobalValue *> DevirtTargets; |
2435 | for (auto &S : CallSlots) { |
2436 | // Search each of the members of the type identifier for the virtual |
2437 | // function implementation at offset S.first.ByteOffset, and add to |
2438 | // TargetsForSlot. |
2439 | std::vector<VirtualCallTarget> TargetsForSlot; |
2440 | WholeProgramDevirtResolution *Res = nullptr; |
2441 | const std::set<TypeMemberInfo> &TypeMemberInfos = TypeIdMap[S.first.TypeID]; |
2442 | if (ExportSummary && isa<MDString>(Val: S.first.TypeID) && |
2443 | TypeMemberInfos.size()) |
2444 | // For any type id used on a global's type metadata, create the type id |
2445 | // summary resolution regardless of whether we can devirtualize, so that |
2446 | // lower type tests knows the type id is not Unsat. If it was not used on |
2447 | // a global's type metadata, the TypeIdMap entry set will be empty, and |
2448 | // we don't want to create an entry (with the default Unknown type |
2449 | // resolution), which can prevent detection of the Unsat. |
2450 | Res = &ExportSummary |
2451 | ->getOrInsertTypeIdSummary( |
2452 | TypeId: cast<MDString>(Val: S.first.TypeID)->getString()) |
2453 | .WPDRes[S.first.ByteOffset]; |
2454 | if (tryFindVirtualCallTargets(TargetsForSlot, TypeMemberInfos, |
2455 | ByteOffset: S.first.ByteOffset, ExportSummary)) { |
2456 | |
2457 | if (!trySingleImplDevirt(ExportSummary, TargetsForSlot, SlotInfo&: S.second, Res)) { |
2458 | DidVirtualConstProp |= |
2459 | tryVirtualConstProp(TargetsForSlot, SlotInfo&: S.second, Res, Slot: S.first); |
2460 | |
2461 | tryICallBranchFunnel(TargetsForSlot, SlotInfo&: S.second, Res, Slot: S.first); |
2462 | } |
2463 | |
2464 | // Collect functions devirtualized at least for one call site for stats. |
2465 | if (RemarksEnabled || AreStatisticsEnabled()) |
2466 | for (const auto &T : TargetsForSlot) |
2467 | if (T.WasDevirt) |
2468 | DevirtTargets[std::string(T.Fn->getName())] = T.Fn; |
2469 | } |
2470 | |
2471 | // CFI-specific: if we are exporting and any llvm.type.checked.load |
2472 | // intrinsics were *not* devirtualized, we need to add the resulting |
2473 | // llvm.type.test intrinsics to the function summaries so that the |
2474 | // LowerTypeTests pass will export them. |
2475 | if (ExportSummary && isa<MDString>(Val: S.first.TypeID)) { |
2476 | auto GUID = GlobalValue::getGUIDAssumingExternalLinkage( |
2477 | GlobalName: cast<MDString>(Val: S.first.TypeID)->getString()); |
2478 | auto AddTypeTestsForTypeCheckedLoads = [&](CallSiteInfo &CSI) { |
2479 | if (!CSI.AllCallSitesDevirted) |
2480 | for (auto *FS : CSI.SummaryTypeCheckedLoadUsers) |
2481 | FS->addTypeTest(Guid: GUID); |
2482 | }; |
2483 | AddTypeTestsForTypeCheckedLoads(S.second.CSInfo); |
2484 | for (auto &CCS : S.second.ConstCSInfo) |
2485 | AddTypeTestsForTypeCheckedLoads(CCS.second); |
2486 | } |
2487 | } |
2488 | |
2489 | if (RemarksEnabled) { |
2490 | // Generate remarks for each devirtualized function. |
2491 | for (const auto &DT : DevirtTargets) { |
2492 | GlobalValue *GV = DT.second; |
2493 | auto F = dyn_cast<Function>(Val: GV); |
2494 | if (!F) { |
2495 | auto A = dyn_cast<GlobalAlias>(Val: GV); |
2496 | assert(A && isa<Function>(A->getAliasee())); |
2497 | F = dyn_cast<Function>(Val: A->getAliasee()); |
2498 | assert(F); |
2499 | } |
2500 | |
2501 | using namespace ore; |
2502 | OREGetter(F).emit(OptDiag: OptimizationRemark(DEBUG_TYPE, "Devirtualized" , F) |
2503 | << "devirtualized " |
2504 | << NV("FunctionName" , DT.first)); |
2505 | } |
2506 | } |
2507 | |
2508 | NumDevirtTargets += DevirtTargets.size(); |
2509 | |
2510 | removeRedundantTypeTests(); |
2511 | |
2512 | // Rebuild each global we touched as part of virtual constant propagation to |
2513 | // include the before and after bytes. |
2514 | if (DidVirtualConstProp) |
2515 | for (VTableBits &B : Bits) |
2516 | rebuildGlobal(B); |
2517 | |
2518 | // We have lowered or deleted the type intrinsics, so we will no longer have |
2519 | // enough information to reason about the liveness of virtual function |
2520 | // pointers in GlobalDCE. |
2521 | for (GlobalVariable &GV : M.globals()) |
2522 | GV.eraseMetadata(KindID: LLVMContext::MD_vcall_visibility); |
2523 | |
2524 | for (auto *CI : CallsWithPtrAuthBundleRemoved) |
2525 | CI->eraseFromParent(); |
2526 | |
2527 | return true; |
2528 | } |
2529 | |
2530 | void DevirtIndex::run() { |
2531 | if (ExportSummary.typeIdCompatibleVtableMap().empty()) |
2532 | return; |
2533 | |
2534 | DenseMap<GlobalValue::GUID, std::vector<StringRef>> NameByGUID; |
2535 | for (const auto &P : ExportSummary.typeIdCompatibleVtableMap()) { |
2536 | NameByGUID[GlobalValue::getGUIDAssumingExternalLinkage(GlobalName: P.first)].push_back( |
2537 | x: P.first); |
2538 | // Create the type id summary resolution regardlness of whether we can |
2539 | // devirtualize, so that lower type tests knows the type id is used on |
2540 | // a global and not Unsat. We do this here rather than in the loop over the |
2541 | // CallSlots, since that handling will only see type tests that directly |
2542 | // feed assumes, and we would miss any that aren't currently handled by WPD |
2543 | // (such as type tests that feed assumes via phis). |
2544 | ExportSummary.getOrInsertTypeIdSummary(TypeId: P.first); |
2545 | } |
2546 | |
2547 | // Collect information from summary about which calls to try to devirtualize. |
2548 | for (auto &P : ExportSummary) { |
2549 | for (auto &S : P.second.SummaryList) { |
2550 | auto *FS = dyn_cast<FunctionSummary>(Val: S.get()); |
2551 | if (!FS) |
2552 | continue; |
2553 | // FIXME: Only add live functions. |
2554 | for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) { |
2555 | for (StringRef Name : NameByGUID[VF.GUID]) { |
2556 | CallSlots[{.TypeID: Name, .ByteOffset: VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS); |
2557 | } |
2558 | } |
2559 | for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) { |
2560 | for (StringRef Name : NameByGUID[VF.GUID]) { |
2561 | CallSlots[{.TypeID: Name, .ByteOffset: VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS); |
2562 | } |
2563 | } |
2564 | for (const FunctionSummary::ConstVCall &VC : |
2565 | FS->type_test_assume_const_vcalls()) { |
2566 | for (StringRef Name : NameByGUID[VC.VFunc.GUID]) { |
2567 | CallSlots[{.TypeID: Name, .ByteOffset: VC.VFunc.Offset}] |
2568 | .ConstCSInfo[VC.Args] |
2569 | .addSummaryTypeTestAssumeUser(FS); |
2570 | } |
2571 | } |
2572 | for (const FunctionSummary::ConstVCall &VC : |
2573 | FS->type_checked_load_const_vcalls()) { |
2574 | for (StringRef Name : NameByGUID[VC.VFunc.GUID]) { |
2575 | CallSlots[{.TypeID: Name, .ByteOffset: VC.VFunc.Offset}] |
2576 | .ConstCSInfo[VC.Args] |
2577 | .addSummaryTypeCheckedLoadUser(FS); |
2578 | } |
2579 | } |
2580 | } |
2581 | } |
2582 | |
2583 | std::set<ValueInfo> DevirtTargets; |
2584 | // For each (type, offset) pair: |
2585 | for (auto &S : CallSlots) { |
2586 | // Search each of the members of the type identifier for the virtual |
2587 | // function implementation at offset S.first.ByteOffset, and add to |
2588 | // TargetsForSlot. |
2589 | std::vector<ValueInfo> TargetsForSlot; |
2590 | auto TidSummary = ExportSummary.getTypeIdCompatibleVtableSummary(TypeId: S.first.TypeID); |
2591 | assert(TidSummary); |
2592 | // The type id summary would have been created while building the NameByGUID |
2593 | // map earlier. |
2594 | WholeProgramDevirtResolution *Res = |
2595 | &ExportSummary.getTypeIdSummary(TypeId: S.first.TypeID) |
2596 | ->WPDRes[S.first.ByteOffset]; |
2597 | if (tryFindVirtualCallTargets(TargetsForSlot, TIdInfo: *TidSummary, |
2598 | ByteOffset: S.first.ByteOffset)) { |
2599 | |
2600 | if (!trySingleImplDevirt(TargetsForSlot, SlotSummary&: S.first, SlotInfo&: S.second, Res, |
2601 | DevirtTargets)) |
2602 | continue; |
2603 | } |
2604 | } |
2605 | |
2606 | // Optionally have the thin link print message for each devirtualized |
2607 | // function. |
2608 | if (PrintSummaryDevirt) |
2609 | for (const auto &DT : DevirtTargets) |
2610 | errs() << "Devirtualized call to " << DT << "\n" ; |
2611 | |
2612 | NumDevirtTargets += DevirtTargets.size(); |
2613 | } |
2614 | |