1//===- ThinLTOBitcodeWriter.cpp - Bitcode writing pass for ThinLTO --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
10#include "llvm/Analysis/BasicAliasAnalysis.h"
11#include "llvm/Analysis/ModuleSummaryAnalysis.h"
12#include "llvm/Analysis/ProfileSummaryInfo.h"
13#include "llvm/Analysis/TypeMetadataUtils.h"
14#include "llvm/Bitcode/BitcodeWriter.h"
15#include "llvm/IR/Constants.h"
16#include "llvm/IR/DebugInfo.h"
17#include "llvm/IR/Instructions.h"
18#include "llvm/IR/Intrinsics.h"
19#include "llvm/IR/Module.h"
20#include "llvm/IR/PassManager.h"
21#include "llvm/Object/ModuleSymbolTable.h"
22#include "llvm/Support/raw_ostream.h"
23#include "llvm/Transforms/IPO.h"
24#include "llvm/Transforms/IPO/FunctionAttrs.h"
25#include "llvm/Transforms/IPO/FunctionImport.h"
26#include "llvm/Transforms/IPO/LowerTypeTests.h"
27#include "llvm/Transforms/Utils/Cloning.h"
28#include "llvm/Transforms/Utils/ModuleUtils.h"
29using namespace llvm;
30
31namespace {
32
33// Determine if a promotion alias should be created for a symbol name.
34static bool allowPromotionAlias(const std::string &Name) {
35 // Promotion aliases are used only in inline assembly. It's safe to
36 // simply skip unusual names. Subset of MCAsmInfo::isAcceptableChar()
37 // and MCAsmInfoXCOFF::isAcceptableChar().
38 for (const char &C : Name) {
39 if (isAlnum(C) || C == '_' || C == '.')
40 continue;
41 return false;
42 }
43 return true;
44}
45
46// Promote each local-linkage entity defined by ExportM and used by ImportM by
47// changing visibility and appending the given ModuleId.
48void promoteInternals(Module &ExportM, Module &ImportM, StringRef ModuleId,
49 SetVector<GlobalValue *> &PromoteExtra) {
50 DenseMap<const Comdat *, Comdat *> RenamedComdats;
51 for (auto &ExportGV : ExportM.global_values()) {
52 if (!ExportGV.hasLocalLinkage())
53 continue;
54
55 auto Name = ExportGV.getName();
56 GlobalValue *ImportGV = nullptr;
57 if (!PromoteExtra.count(key: &ExportGV)) {
58 ImportGV = ImportM.getNamedValue(Name);
59 if (!ImportGV)
60 continue;
61 ImportGV->removeDeadConstantUsers();
62 if (ImportGV->use_empty()) {
63 ImportGV->eraseFromParent();
64 continue;
65 }
66 }
67
68 std::string OldName = Name.str();
69 std::string NewName = (Name + ModuleId).str();
70
71 if (const auto *C = ExportGV.getComdat())
72 if (C->getName() == Name)
73 RenamedComdats.try_emplace(Key: C, Args: ExportM.getOrInsertComdat(Name: NewName));
74
75 ExportGV.setName(NewName);
76 ExportGV.setLinkage(GlobalValue::ExternalLinkage);
77 ExportGV.setVisibility(GlobalValue::HiddenVisibility);
78
79 if (ImportGV) {
80 ImportGV->setName(NewName);
81 ImportGV->setVisibility(GlobalValue::HiddenVisibility);
82 }
83
84 if (isa<Function>(Val: &ExportGV) && allowPromotionAlias(Name: OldName)) {
85 // Create a local alias with the original name to avoid breaking
86 // references from inline assembly.
87 std::string Alias =
88 ".lto_set_conditional " + OldName + "," + NewName + "\n";
89 ExportM.appendModuleInlineAsm(Asm: Alias);
90 }
91 }
92
93 if (!RenamedComdats.empty())
94 for (auto &GO : ExportM.global_objects())
95 if (auto *C = GO.getComdat()) {
96 auto Replacement = RenamedComdats.find(Val: C);
97 if (Replacement != RenamedComdats.end())
98 GO.setComdat(Replacement->second);
99 }
100}
101
102// Promote all internal (i.e. distinct) type ids used by the module by replacing
103// them with external type ids formed using the module id.
104//
105// Note that this needs to be done before we clone the module because each clone
106// will receive its own set of distinct metadata nodes.
107void promoteTypeIds(Module &M, StringRef ModuleId) {
108 DenseMap<Metadata *, Metadata *> LocalToGlobal;
109 auto ExternalizeTypeId = [&](CallInst *CI, unsigned ArgNo) {
110 Metadata *MD =
111 cast<MetadataAsValue>(Val: CI->getArgOperand(i: ArgNo))->getMetadata();
112
113 if (isa<MDNode>(Val: MD) && cast<MDNode>(Val: MD)->isDistinct()) {
114 Metadata *&GlobalMD = LocalToGlobal[MD];
115 if (!GlobalMD) {
116 std::string NewName = (Twine(LocalToGlobal.size()) + ModuleId).str();
117 GlobalMD = MDString::get(Context&: M.getContext(), Str: NewName);
118 }
119
120 CI->setArgOperand(i: ArgNo,
121 v: MetadataAsValue::get(Context&: M.getContext(), MD: GlobalMD));
122 }
123 };
124
125 if (Function *TypeTestFunc =
126 Intrinsic::getDeclarationIfExists(M: &M, id: Intrinsic::type_test)) {
127 for (const Use &U : TypeTestFunc->uses()) {
128 auto CI = cast<CallInst>(Val: U.getUser());
129 ExternalizeTypeId(CI, 1);
130 }
131 }
132
133 if (Function *PublicTypeTestFunc =
134 Intrinsic::getDeclarationIfExists(M: &M, id: Intrinsic::public_type_test)) {
135 for (const Use &U : PublicTypeTestFunc->uses()) {
136 auto CI = cast<CallInst>(Val: U.getUser());
137 ExternalizeTypeId(CI, 1);
138 }
139 }
140
141 if (Function *TypeCheckedLoadFunc =
142 Intrinsic::getDeclarationIfExists(M: &M, id: Intrinsic::type_checked_load)) {
143 for (const Use &U : TypeCheckedLoadFunc->uses()) {
144 auto CI = cast<CallInst>(Val: U.getUser());
145 ExternalizeTypeId(CI, 2);
146 }
147 }
148
149 if (Function *TypeCheckedLoadRelativeFunc = Intrinsic::getDeclarationIfExists(
150 M: &M, id: Intrinsic::type_checked_load_relative)) {
151 for (const Use &U : TypeCheckedLoadRelativeFunc->uses()) {
152 auto CI = cast<CallInst>(Val: U.getUser());
153 ExternalizeTypeId(CI, 2);
154 }
155 }
156
157 for (GlobalObject &GO : M.global_objects()) {
158 SmallVector<MDNode *, 1> MDs;
159 GO.getMetadata(KindID: LLVMContext::MD_type, MDs);
160
161 GO.eraseMetadata(KindID: LLVMContext::MD_type);
162 for (auto *MD : MDs) {
163 auto I = LocalToGlobal.find(Val: MD->getOperand(I: 1));
164 if (I == LocalToGlobal.end()) {
165 GO.addMetadata(KindID: LLVMContext::MD_type, MD&: *MD);
166 continue;
167 }
168 GO.addMetadata(
169 KindID: LLVMContext::MD_type,
170 MD&: *MDNode::get(Context&: M.getContext(), MDs: {MD->getOperand(I: 0), I->second}));
171 }
172 }
173}
174
175// Drop unused globals, and drop type information from function declarations.
176// FIXME: If we made functions typeless then there would be no need to do this.
177void simplifyExternals(Module &M) {
178 FunctionType *EmptyFT =
179 FunctionType::get(Result: Type::getVoidTy(C&: M.getContext()), isVarArg: false);
180
181 for (Function &F : llvm::make_early_inc_range(Range&: M)) {
182 if (F.isDeclaration() && F.use_empty()) {
183 F.eraseFromParent();
184 continue;
185 }
186
187 if (!F.isDeclaration() || F.getFunctionType() == EmptyFT ||
188 // Changing the type of an intrinsic may invalidate the IR.
189 F.getName().starts_with(Prefix: "llvm."))
190 continue;
191
192 Function *NewF =
193 Function::Create(Ty: EmptyFT, Linkage: GlobalValue::ExternalLinkage,
194 AddrSpace: F.getAddressSpace(), N: "", M: &M);
195 NewF->copyAttributesFrom(Src: &F);
196 // Only copy function attribtues.
197 NewF->setAttributes(AttributeList::get(C&: M.getContext(),
198 Index: AttributeList::FunctionIndex,
199 Attrs: F.getAttributes().getFnAttrs()));
200 NewF->takeName(V: &F);
201 F.replaceAllUsesWith(V: NewF);
202 F.eraseFromParent();
203 }
204
205 for (GlobalIFunc &I : llvm::make_early_inc_range(Range: M.ifuncs())) {
206 if (I.use_empty())
207 I.eraseFromParent();
208 else
209 assert(I.getResolverFunction() && "ifunc misses its resolver function");
210 }
211
212 for (GlobalVariable &GV : llvm::make_early_inc_range(Range: M.globals())) {
213 if (GV.isDeclaration() && GV.use_empty()) {
214 GV.eraseFromParent();
215 continue;
216 }
217 }
218}
219
220static void
221filterModule(Module *M,
222 function_ref<bool(const GlobalValue *)> ShouldKeepDefinition) {
223 std::vector<GlobalValue *> V;
224 for (GlobalValue &GV : M->global_values())
225 if (!ShouldKeepDefinition(&GV))
226 V.push_back(x: &GV);
227
228 for (GlobalValue *GV : V)
229 if (!convertToDeclaration(GV&: *GV))
230 GV->eraseFromParent();
231}
232
233void forEachVirtualFunction(Constant *C, function_ref<void(Function *)> Fn) {
234 if (auto *F = dyn_cast<Function>(Val: C))
235 return Fn(F);
236 if (isa<GlobalValue>(Val: C))
237 return;
238 for (Value *Op : C->operands())
239 forEachVirtualFunction(C: cast<Constant>(Val: Op), Fn);
240}
241
242// Clone any @llvm[.compiler].used over to the new module and append
243// values whose defs were cloned into that module.
244static void cloneUsedGlobalVariables(const Module &SrcM, Module &DestM,
245 bool CompilerUsed) {
246 SmallVector<GlobalValue *, 4> Used, NewUsed;
247 // First collect those in the llvm[.compiler].used set.
248 collectUsedGlobalVariables(M: SrcM, Vec&: Used, CompilerUsed);
249 // Next build a set of the equivalent values defined in DestM.
250 for (auto *V : Used) {
251 auto *GV = DestM.getNamedValue(Name: V->getName());
252 if (GV && !GV->isDeclaration())
253 NewUsed.push_back(Elt: GV);
254 }
255 // Finally, add them to a llvm[.compiler].used variable in DestM.
256 if (CompilerUsed)
257 appendToCompilerUsed(M&: DestM, Values: NewUsed);
258 else
259 appendToUsed(M&: DestM, Values: NewUsed);
260}
261
262#ifndef NDEBUG
263static bool enableUnifiedLTO(Module &M) {
264 bool UnifiedLTO = false;
265 if (auto *MD =
266 mdconst::extract_or_null<ConstantInt>(M.getModuleFlag("UnifiedLTO")))
267 UnifiedLTO = MD->getZExtValue();
268 return UnifiedLTO;
269}
270#endif
271
272bool mustEmitToMergedModule(const GlobalValue *GV) {
273 // The __cfi_check definition is filled in by the CrossDSOCFI pass which
274 // runs only in the merged module.
275 return GV->getName() == "__cfi_check";
276}
277
278// If it's possible to split M into regular and thin LTO parts, do so and write
279// a multi-module bitcode file with the two parts to OS. Otherwise, write only a
280// regular LTO bitcode file to OS.
281void splitAndWriteThinLTOBitcode(
282 raw_ostream &OS, raw_ostream *ThinLinkOS,
283 function_ref<AAResults &(Function &)> AARGetter, Module &M,
284 const bool ShouldPreserveUseListOrder) {
285 std::string ModuleId = getUniqueModuleId(M: &M);
286 if (ModuleId.empty()) {
287 assert(!enableUnifiedLTO(M));
288 // We couldn't generate a module ID for this module, write it out as a
289 // regular LTO module with an index for summary-based dead stripping.
290 ProfileSummaryInfo PSI(M);
291 M.addModuleFlag(Behavior: Module::Error, Key: "ThinLTO", Val: uint32_t(0));
292 ModuleSummaryIndex Index = buildModuleSummaryIndex(M, GetBFICallback: nullptr, PSI: &PSI);
293 WriteBitcodeToFile(M, Out&: OS, ShouldPreserveUseListOrder, Index: &Index,
294 /*UnifiedLTO=*/GenerateHash: false);
295
296 if (ThinLinkOS)
297 // We don't have a ThinLTO part, but still write the module to the
298 // ThinLinkOS if requested so that the expected output file is produced.
299 WriteBitcodeToFile(M, Out&: *ThinLinkOS, ShouldPreserveUseListOrder, Index: &Index,
300 /*UnifiedLTO=*/GenerateHash: false);
301
302 return;
303 }
304
305 promoteTypeIds(M, ModuleId);
306
307 // Returns whether a global or its associated global has attached type
308 // metadata. The former may participate in CFI or whole-program
309 // devirtualization, so they need to appear in the merged module instead of
310 // the thin LTO module. Similarly, globals that are associated with globals
311 // with type metadata need to appear in the merged module because they will
312 // reference the global's section directly.
313 auto HasTypeMetadata = [](const GlobalObject *GO) {
314 if (MDNode *MD = GO->getMetadata(KindID: LLVMContext::MD_associated))
315 if (auto *AssocVM = dyn_cast_or_null<ValueAsMetadata>(Val: MD->getOperand(I: 0)))
316 if (auto *AssocGO = dyn_cast<GlobalObject>(Val: AssocVM->getValue()))
317 if (AssocGO->hasMetadata(KindID: LLVMContext::MD_type))
318 return true;
319 return GO->hasMetadata(KindID: LLVMContext::MD_type);
320 };
321
322 // Collect the set of virtual functions that are eligible for virtual constant
323 // propagation. Each eligible function must not access memory, must return
324 // an integer of width <=64 bits, must take at least one argument, must not
325 // use its first argument (assumed to be "this") and all arguments other than
326 // the first one must be of <=64 bit integer type.
327 //
328 // Note that we test whether this copy of the function is readnone, rather
329 // than testing function attributes, which must hold for any copy of the
330 // function, even a less optimized version substituted at link time. This is
331 // sound because the virtual constant propagation optimizations effectively
332 // inline all implementations of the virtual function into each call site,
333 // rather than using function attributes to perform local optimization.
334 DenseSet<const Function *> EligibleVirtualFns;
335 // If any member of a comdat lives in MergedM, put all members of that
336 // comdat in MergedM to keep the comdat together.
337 DenseSet<const Comdat *> MergedMComdats;
338 for (GlobalVariable &GV : M.globals())
339 if (!GV.isDeclaration() && HasTypeMetadata(&GV)) {
340 if (const auto *C = GV.getComdat())
341 MergedMComdats.insert(V: C);
342 forEachVirtualFunction(C: GV.getInitializer(), Fn: [&](Function *F) {
343 auto *RT = dyn_cast<IntegerType>(Val: F->getReturnType());
344 if (!RT || RT->getBitWidth() > 64 || F->arg_empty() ||
345 !F->arg_begin()->use_empty())
346 return;
347 for (auto &Arg : drop_begin(RangeOrContainer: F->args())) {
348 auto *ArgT = dyn_cast<IntegerType>(Val: Arg.getType());
349 if (!ArgT || ArgT->getBitWidth() > 64)
350 return;
351 }
352 if (!F->isDeclaration() &&
353 computeFunctionBodyMemoryAccess(F&: *F, AAR&: AARGetter(*F))
354 .doesNotAccessMemory())
355 EligibleVirtualFns.insert(V: F);
356 });
357 }
358
359 ValueToValueMapTy VMap;
360 std::unique_ptr<Module> MergedM(
361 CloneModule(M, VMap, ShouldCloneDefinition: [&](const GlobalValue *GV) -> bool {
362 if (const auto *C = GV->getComdat())
363 if (MergedMComdats.count(V: C))
364 return true;
365 if (mustEmitToMergedModule(GV))
366 return true;
367 if (auto *F = dyn_cast<Function>(Val: GV))
368 return EligibleVirtualFns.count(V: F);
369 if (auto *GVar =
370 dyn_cast_or_null<GlobalVariable>(Val: GV->getAliaseeObject()))
371 return HasTypeMetadata(GVar);
372 return false;
373 }));
374 StripDebugInfo(M&: *MergedM);
375 MergedM->setModuleInlineAsm("");
376
377 // Clone any llvm.*used globals to ensure the included values are
378 // not deleted.
379 cloneUsedGlobalVariables(SrcM: M, DestM&: *MergedM, /*CompilerUsed*/ false);
380 cloneUsedGlobalVariables(SrcM: M, DestM&: *MergedM, /*CompilerUsed*/ true);
381
382 for (Function &F : *MergedM)
383 if (!F.isDeclaration() && !mustEmitToMergedModule(GV: &F)) {
384 // Reset the linkage of all functions eligible for virtual constant
385 // propagation. The canonical definitions live in the thin LTO module so
386 // that they can be imported.
387 F.setLinkage(GlobalValue::AvailableExternallyLinkage);
388 F.setComdat(nullptr);
389 }
390
391 SetVector<GlobalValue *> CfiFunctions;
392 for (auto &F : M)
393 if ((!F.hasLocalLinkage() || F.hasAddressTaken()) && HasTypeMetadata(&F))
394 CfiFunctions.insert(X: &F);
395 for (auto &A : M.aliases())
396 if (auto *F = dyn_cast<Function>(Val: A.getAliasee()))
397 if (HasTypeMetadata(F))
398 CfiFunctions.insert(X: &A);
399
400 // Remove all globals with type metadata, globals with comdats that live in
401 // MergedM, and aliases pointing to such globals from the thin LTO module.
402 filterModule(M: &M, ShouldKeepDefinition: [&](const GlobalValue *GV) {
403 if (auto *GVar = dyn_cast_or_null<GlobalVariable>(Val: GV->getAliaseeObject()))
404 if (HasTypeMetadata(GVar))
405 return false;
406 if (const auto *C = GV->getComdat())
407 if (MergedMComdats.count(V: C))
408 return false;
409 if (mustEmitToMergedModule(GV))
410 return false;
411 return true;
412 });
413
414 promoteInternals(ExportM&: *MergedM, ImportM&: M, ModuleId, PromoteExtra&: CfiFunctions);
415 promoteInternals(ExportM&: M, ImportM&: *MergedM, ModuleId, PromoteExtra&: CfiFunctions);
416
417 auto &Ctx = MergedM->getContext();
418 SmallVector<MDNode *, 8> CfiFunctionMDs;
419 for (auto *V : CfiFunctions) {
420 Function &F = *cast<Function>(Val: V->getAliaseeObject());
421 SmallVector<MDNode *, 2> Types;
422 F.getMetadata(KindID: LLVMContext::MD_type, MDs&: Types);
423
424 SmallVector<Metadata *, 4> Elts;
425 Elts.push_back(Elt: MDString::get(Context&: Ctx, Str: V->getName()));
426 CfiFunctionLinkage Linkage;
427 if (lowertypetests::isJumpTableCanonical(F: &F))
428 Linkage = CFL_Definition;
429 else if (F.hasExternalWeakLinkage())
430 Linkage = CFL_WeakDeclaration;
431 else
432 Linkage = CFL_Declaration;
433 Elts.push_back(Elt: ConstantAsMetadata::get(
434 C: llvm::ConstantInt::get(Ty: Type::getInt8Ty(C&: Ctx), V: Linkage)));
435 append_range(C&: Elts, R&: Types);
436 CfiFunctionMDs.push_back(Elt: MDTuple::get(Context&: Ctx, MDs: Elts));
437 }
438
439 if(!CfiFunctionMDs.empty()) {
440 NamedMDNode *NMD = MergedM->getOrInsertNamedMetadata(Name: "cfi.functions");
441 for (auto *MD : CfiFunctionMDs)
442 NMD->addOperand(M: MD);
443 }
444
445 MapVector<Function *, std::vector<GlobalAlias *>> FunctionAliases;
446 for (auto &A : M.aliases()) {
447 if (!isa<Function>(Val: A.getAliasee()))
448 continue;
449
450 auto *F = cast<Function>(Val: A.getAliasee());
451 FunctionAliases[F].push_back(x: &A);
452 }
453
454 if (!FunctionAliases.empty()) {
455 NamedMDNode *NMD = MergedM->getOrInsertNamedMetadata(Name: "aliases");
456 for (auto &Alias : FunctionAliases) {
457 SmallVector<Metadata *> Elts;
458 Elts.push_back(Elt: MDString::get(Context&: Ctx, Str: Alias.first->getName()));
459 for (auto *A : Alias.second)
460 Elts.push_back(Elt: MDString::get(Context&: Ctx, Str: A->getName()));
461 NMD->addOperand(M: MDTuple::get(Context&: Ctx, MDs: Elts));
462 }
463 }
464
465 SmallVector<MDNode *, 8> Symvers;
466 ModuleSymbolTable::CollectAsmSymvers(M, AsmSymver: [&](StringRef Name, StringRef Alias) {
467 Function *F = M.getFunction(Name);
468 if (!F || F->use_empty())
469 return;
470
471 Symvers.push_back(Elt: MDTuple::get(
472 Context&: Ctx, MDs: {MDString::get(Context&: Ctx, Str: Name), MDString::get(Context&: Ctx, Str: Alias)}));
473 });
474
475 if (!Symvers.empty()) {
476 NamedMDNode *NMD = MergedM->getOrInsertNamedMetadata(Name: "symvers");
477 for (auto *MD : Symvers)
478 NMD->addOperand(M: MD);
479 }
480
481 simplifyExternals(M&: *MergedM);
482
483 // FIXME: Try to re-use BSI and PFI from the original module here.
484 ProfileSummaryInfo PSI(M);
485 ModuleSummaryIndex Index = buildModuleSummaryIndex(M, GetBFICallback: nullptr, PSI: &PSI);
486
487 // Mark the merged module as requiring full LTO. We still want an index for
488 // it though, so that it can participate in summary-based dead stripping.
489 MergedM->addModuleFlag(Behavior: Module::Error, Key: "ThinLTO", Val: uint32_t(0));
490 ModuleSummaryIndex MergedMIndex =
491 buildModuleSummaryIndex(M: *MergedM, GetBFICallback: nullptr, PSI: &PSI);
492
493 SmallVector<char, 0> Buffer;
494
495 BitcodeWriter W(Buffer);
496 // Save the module hash produced for the full bitcode, which will
497 // be used in the backends, and use that in the minimized bitcode
498 // produced for the full link.
499 ModuleHash ModHash = {._M_elems: {0}};
500 W.writeModule(M, ShouldPreserveUseListOrder, Index: &Index,
501 /*GenerateHash=*/true, ModHash: &ModHash);
502 W.writeModule(M: *MergedM, ShouldPreserveUseListOrder, Index: &MergedMIndex);
503 W.writeSymtab();
504 W.writeStrtab();
505 OS << Buffer;
506
507 // If a minimized bitcode module was requested for the thin link, only
508 // the information that is needed by thin link will be written in the
509 // given OS (the merged module will be written as usual).
510 if (ThinLinkOS) {
511 Buffer.clear();
512 BitcodeWriter W2(Buffer);
513 StripDebugInfo(M);
514 W2.writeThinLinkBitcode(M, Index, ModHash);
515 W2.writeModule(M: *MergedM, /*ShouldPreserveUseListOrder=*/false,
516 Index: &MergedMIndex);
517 W2.writeSymtab();
518 W2.writeStrtab();
519 *ThinLinkOS << Buffer;
520 }
521}
522
523// Check if the LTO Unit splitting has been enabled.
524bool enableSplitLTOUnit(Module &M) {
525 bool EnableSplitLTOUnit = false;
526 if (auto *MD = mdconst::extract_or_null<ConstantInt>(
527 MD: M.getModuleFlag(Key: "EnableSplitLTOUnit")))
528 EnableSplitLTOUnit = MD->getZExtValue();
529 return EnableSplitLTOUnit;
530}
531
532// Returns whether this module needs to be split (if splitting is enabled).
533bool requiresSplit(Module &M) {
534 for (auto &GO : M.global_objects()) {
535 if (GO.hasMetadata(KindID: LLVMContext::MD_type))
536 return true;
537 if (mustEmitToMergedModule(GV: &GO))
538 return true;
539 }
540 return false;
541}
542
543bool writeThinLTOBitcode(raw_ostream &OS, raw_ostream *ThinLinkOS,
544 function_ref<AAResults &(Function &)> AARGetter,
545 Module &M, const ModuleSummaryIndex *Index,
546 const bool ShouldPreserveUseListOrder) {
547 std::unique_ptr<ModuleSummaryIndex> NewIndex = nullptr;
548 // See if this module needs to be split. If so, we try to split it
549 // or at least promote type ids to enable WPD.
550 if (requiresSplit(M)) {
551 if (enableSplitLTOUnit(M)) {
552 splitAndWriteThinLTOBitcode(OS, ThinLinkOS, AARGetter, M,
553 ShouldPreserveUseListOrder);
554 return true;
555 }
556 // Promote type ids as needed for index-based WPD.
557 std::string ModuleId = getUniqueModuleId(M: &M);
558 if (!ModuleId.empty()) {
559 promoteTypeIds(M, ModuleId);
560 // Need to rebuild the index so that it contains type metadata
561 // for the newly promoted type ids.
562 // FIXME: Probably should not bother building the index at all
563 // in the caller of writeThinLTOBitcode (which does so via the
564 // ModuleSummaryIndexAnalysis pass), since we have to rebuild it
565 // anyway whenever there is type metadata (here or in
566 // splitAndWriteThinLTOBitcode). Just always build it once via the
567 // buildModuleSummaryIndex when Module(s) are ready.
568 ProfileSummaryInfo PSI(M);
569 NewIndex = std::make_unique<ModuleSummaryIndex>(
570 args: buildModuleSummaryIndex(M, GetBFICallback: nullptr, PSI: &PSI));
571 Index = NewIndex.get();
572 }
573 }
574
575 // Write it out as an unsplit ThinLTO module.
576
577 // Save the module hash produced for the full bitcode, which will
578 // be used in the backends, and use that in the minimized bitcode
579 // produced for the full link.
580 ModuleHash ModHash = {._M_elems: {0}};
581 WriteBitcodeToFile(M, Out&: OS, ShouldPreserveUseListOrder, Index,
582 /*GenerateHash=*/true, ModHash: &ModHash);
583 // If a minimized bitcode module was requested for the thin link, only
584 // the information that is needed by thin link will be written in the
585 // given OS.
586 if (ThinLinkOS && Index)
587 writeThinLinkBitcodeToFile(M, Out&: *ThinLinkOS, Index: *Index, ModHash);
588 return false;
589}
590
591} // anonymous namespace
592
593PreservedAnalyses
594llvm::ThinLTOBitcodeWriterPass::run(Module &M, ModuleAnalysisManager &AM) {
595 FunctionAnalysisManager &FAM =
596 AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager();
597
598 M.removeDebugIntrinsicDeclarations();
599
600 bool Changed = writeThinLTOBitcode(
601 OS, ThinLinkOS,
602 AARGetter: [&FAM](Function &F) -> AAResults & {
603 return FAM.getResult<AAManager>(IR&: F);
604 },
605 M, Index: &AM.getResult<ModuleSummaryIndexAnalysis>(IR&: M),
606 ShouldPreserveUseListOrder);
607
608 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
609}
610