1//===- InstrProf.cpp - Instrumented profiling format support --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for clang's instrumentation based PGO and
10// coverage.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ProfileData/InstrProf.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/StringExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/Config/config.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/Function.h"
23#include "llvm/IR/GlobalValue.h"
24#include "llvm/IR/GlobalVariable.h"
25#include "llvm/IR/Instruction.h"
26#include "llvm/IR/LLVMContext.h"
27#include "llvm/IR/MDBuilder.h"
28#include "llvm/IR/Metadata.h"
29#include "llvm/IR/Module.h"
30#include "llvm/IR/ProfDataUtils.h"
31#include "llvm/IR/Type.h"
32#include "llvm/ProfileData/InstrProfReader.h"
33#include "llvm/ProfileData/SampleProf.h"
34#include "llvm/Support/Casting.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Compiler.h"
37#include "llvm/Support/Compression.h"
38#include "llvm/Support/Debug.h"
39#include "llvm/Support/Endian.h"
40#include "llvm/Support/Error.h"
41#include "llvm/Support/ErrorHandling.h"
42#include "llvm/Support/LEB128.h"
43#include "llvm/Support/MathExtras.h"
44#include "llvm/Support/Path.h"
45#include "llvm/Support/SwapByteOrder.h"
46#include "llvm/Support/VirtualFileSystem.h"
47#include "llvm/Support/raw_ostream.h"
48#include "llvm/TargetParser/Triple.h"
49#include <algorithm>
50#include <cassert>
51#include <cstddef>
52#include <cstdint>
53#include <cstring>
54#include <memory>
55#include <string>
56#include <system_error>
57#include <type_traits>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63#define DEBUG_TYPE "instrprof"
64
65static cl::opt<bool> StaticFuncFullModulePrefix(
66 "static-func-full-module-prefix", cl::init(Val: true), cl::Hidden,
67 cl::desc("Use full module build paths in the profile counter names for "
68 "static functions."));
69
70// This option is tailored to users that have different top-level directory in
71// profile-gen and profile-use compilation. Users need to specific the number
72// of levels to strip. A value larger than the number of directories in the
73// source file will strip all the directory names and only leave the basename.
74//
75// Note current ThinLTO module importing for the indirect-calls assumes
76// the source directory name not being stripped. A non-zero option value here
77// can potentially prevent some inter-module indirect-call-promotions.
78static cl::opt<unsigned> StaticFuncStripDirNamePrefix(
79 "static-func-strip-dirname-prefix", cl::init(Val: 0), cl::Hidden,
80 cl::desc("Strip specified level of directory name from source path in "
81 "the profile counter name for static functions."));
82
83static std::string getInstrProfErrString(instrprof_error Err,
84 const std::string &ErrMsg = "") {
85 std::string Msg;
86 raw_string_ostream OS(Msg);
87
88 switch (Err) {
89 case instrprof_error::success:
90 OS << "success";
91 break;
92 case instrprof_error::eof:
93 OS << "end of File";
94 break;
95 case instrprof_error::unrecognized_format:
96 OS << "unrecognized instrumentation profile encoding format";
97 break;
98 case instrprof_error::bad_magic:
99 OS << "invalid instrumentation profile data (bad magic)";
100 break;
101 case instrprof_error::bad_header:
102 OS << "invalid instrumentation profile data (file header is corrupt)";
103 break;
104 case instrprof_error::unsupported_version:
105 OS << "unsupported instrumentation profile format version";
106 break;
107 case instrprof_error::unsupported_hash_type:
108 OS << "unsupported instrumentation profile hash type";
109 break;
110 case instrprof_error::too_large:
111 OS << "too much profile data";
112 break;
113 case instrprof_error::truncated:
114 OS << "truncated profile data";
115 break;
116 case instrprof_error::malformed:
117 OS << "malformed instrumentation profile data";
118 break;
119 case instrprof_error::missing_correlation_info:
120 OS << "debug info/binary for correlation is required";
121 break;
122 case instrprof_error::unexpected_correlation_info:
123 OS << "debug info/binary for correlation is not necessary";
124 break;
125 case instrprof_error::unable_to_correlate_profile:
126 OS << "unable to correlate profile";
127 break;
128 case instrprof_error::invalid_prof:
129 OS << "invalid profile created. Please file a bug "
130 "at: " BUG_REPORT_URL
131 " and include the profraw files that caused this error.";
132 break;
133 case instrprof_error::unknown_function:
134 OS << "no profile data available for function";
135 break;
136 case instrprof_error::hash_mismatch:
137 OS << "function control flow change detected (hash mismatch)";
138 break;
139 case instrprof_error::count_mismatch:
140 OS << "function basic block count change detected (counter mismatch)";
141 break;
142 case instrprof_error::bitmap_mismatch:
143 OS << "function bitmap size change detected (bitmap size mismatch)";
144 break;
145 case instrprof_error::counter_overflow:
146 OS << "counter overflow";
147 break;
148 case instrprof_error::value_site_count_mismatch:
149 OS << "function value site count change detected (counter mismatch)";
150 break;
151 case instrprof_error::compress_failed:
152 OS << "failed to compress data (zlib)";
153 break;
154 case instrprof_error::uncompress_failed:
155 OS << "failed to uncompress data (zlib)";
156 break;
157 case instrprof_error::empty_raw_profile:
158 OS << "empty raw profile file";
159 break;
160 case instrprof_error::zlib_unavailable:
161 OS << "profile uses zlib compression but the profile reader was built "
162 "without zlib support";
163 break;
164 case instrprof_error::raw_profile_version_mismatch:
165 OS << "raw profile version mismatch";
166 break;
167 case instrprof_error::counter_value_too_large:
168 OS << "excessively large counter value suggests corrupted profile data";
169 break;
170 }
171
172 // If optional error message is not empty, append it to the message.
173 if (!ErrMsg.empty())
174 OS << ": " << ErrMsg;
175
176 return OS.str();
177}
178
179namespace {
180
181// FIXME: This class is only here to support the transition to llvm::Error. It
182// will be removed once this transition is complete. Clients should prefer to
183// deal with the Error value directly, rather than converting to error_code.
184class InstrProfErrorCategoryType : public std::error_category {
185 const char *name() const noexcept override { return "llvm.instrprof"; }
186
187 std::string message(int IE) const override {
188 return getInstrProfErrString(Err: static_cast<instrprof_error>(IE));
189 }
190};
191
192} // end anonymous namespace
193
194const std::error_category &llvm::instrprof_category() {
195 static InstrProfErrorCategoryType ErrorCategory;
196 return ErrorCategory;
197}
198
199namespace {
200
201const char *InstrProfSectNameCommon[] = {
202#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
203 SectNameCommon,
204#include "llvm/ProfileData/InstrProfData.inc"
205};
206
207const char *InstrProfSectNameCoff[] = {
208#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
209 SectNameCoff,
210#include "llvm/ProfileData/InstrProfData.inc"
211};
212
213const char *InstrProfSectNamePrefix[] = {
214#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
215 Prefix,
216#include "llvm/ProfileData/InstrProfData.inc"
217};
218
219} // namespace
220
221namespace llvm {
222
223cl::opt<bool> DoInstrProfNameCompression(
224 "enable-name-compression",
225 cl::desc("Enable name/filename string compression"), cl::init(Val: true));
226
227cl::opt<bool> EnableVTableValueProfiling(
228 "enable-vtable-value-profiling", cl::init(Val: false),
229 cl::desc("If true, the virtual table address will be instrumented to know "
230 "the types of a C++ pointer. The information is used in indirect "
231 "call promotion to do selective vtable-based comparison."));
232
233cl::opt<bool> EnableVTableProfileUse(
234 "enable-vtable-profile-use", cl::init(Val: false),
235 cl::desc("If ThinLTO and WPD is enabled and this option is true, vtable "
236 "profiles will be used by ICP pass for more efficient indirect "
237 "call sequence. If false, type profiles won't be used."));
238
239std::string getInstrProfSectionName(InstrProfSectKind IPSK,
240 Triple::ObjectFormatType OF,
241 bool AddSegmentInfo) {
242 std::string SectName;
243
244 if (OF == Triple::MachO && AddSegmentInfo)
245 SectName = InstrProfSectNamePrefix[IPSK];
246
247 if (OF == Triple::COFF)
248 SectName += InstrProfSectNameCoff[IPSK];
249 else
250 SectName += InstrProfSectNameCommon[IPSK];
251
252 if (OF == Triple::MachO && IPSK == IPSK_data && AddSegmentInfo)
253 SectName += ",regular,live_support";
254
255 return SectName;
256}
257
258std::string InstrProfError::message() const {
259 return getInstrProfErrString(Err, ErrMsg: Msg);
260}
261
262char InstrProfError::ID = 0;
263
264ProfOStream::ProfOStream(raw_fd_ostream &FD)
265 : IsFDOStream(true), OS(FD), LE(FD, llvm::endianness::little) {}
266
267ProfOStream::ProfOStream(raw_string_ostream &STR)
268 : IsFDOStream(false), OS(STR), LE(STR, llvm::endianness::little) {}
269
270uint64_t ProfOStream::tell() const { return OS.tell(); }
271void ProfOStream::write(uint64_t V) { LE.write<uint64_t>(Val: V); }
272void ProfOStream::write32(uint32_t V) { LE.write<uint32_t>(Val: V); }
273void ProfOStream::writeByte(uint8_t V) { LE.write<uint8_t>(Val: V); }
274
275void ProfOStream::patch(ArrayRef<PatchItem> P) {
276 using namespace support;
277
278 if (IsFDOStream) {
279 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
280 const uint64_t LastPos = FDOStream.tell();
281 for (const auto &K : P) {
282 FDOStream.seek(off: K.Pos);
283 for (uint64_t Elem : K.D)
284 write(V: Elem);
285 }
286 // Reset the stream to the last position after patching so that users
287 // don't accidentally overwrite data. This makes it consistent with
288 // the string stream below which replaces the data directly.
289 FDOStream.seek(off: LastPos);
290 } else {
291 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
292 std::string &Data = SOStream.str(); // with flush
293 for (const auto &K : P) {
294 for (int I = 0, E = K.D.size(); I != E; I++) {
295 uint64_t Bytes =
296 endian::byte_swap<uint64_t>(value: K.D[I], endian: llvm::endianness::little);
297 Data.replace(pos: K.Pos + I * sizeof(uint64_t), n1: sizeof(uint64_t),
298 s: (const char *)&Bytes, n2: sizeof(uint64_t));
299 }
300 }
301 }
302}
303
304std::string getPGOFuncName(StringRef Name, GlobalValue::LinkageTypes Linkage,
305 StringRef FileName,
306 [[maybe_unused]] uint64_t Version) {
307 // Value names may be prefixed with a binary '1' to indicate
308 // that the backend should not modify the symbols due to any platform
309 // naming convention. Do not include that '1' in the PGO profile name.
310 if (Name[0] == '\1')
311 Name = Name.substr(Start: 1);
312
313 std::string NewName = std::string(Name);
314 if (llvm::GlobalValue::isLocalLinkage(Linkage)) {
315 // For local symbols, prepend the main file name to distinguish them.
316 // Do not include the full path in the file name since there's no guarantee
317 // that it will stay the same, e.g., if the files are checked out from
318 // version control in different locations.
319 if (FileName.empty())
320 NewName = NewName.insert(pos: 0, s: "<unknown>:");
321 else
322 NewName = NewName.insert(pos1: 0, str: FileName.str() + ":");
323 }
324 return NewName;
325}
326
327// Strip NumPrefix level of directory name from PathNameStr. If the number of
328// directory separators is less than NumPrefix, strip all the directories and
329// leave base file name only.
330static StringRef stripDirPrefix(StringRef PathNameStr, uint32_t NumPrefix) {
331 uint32_t Count = NumPrefix;
332 uint32_t Pos = 0, LastPos = 0;
333 for (const auto &CI : PathNameStr) {
334 ++Pos;
335 if (llvm::sys::path::is_separator(value: CI)) {
336 LastPos = Pos;
337 --Count;
338 }
339 if (Count == 0)
340 break;
341 }
342 return PathNameStr.substr(Start: LastPos);
343}
344
345static StringRef getStrippedSourceFileName(const GlobalObject &GO) {
346 StringRef FileName(GO.getParent()->getSourceFileName());
347 uint32_t StripLevel = StaticFuncFullModulePrefix ? 0 : (uint32_t)-1;
348 if (StripLevel < StaticFuncStripDirNamePrefix)
349 StripLevel = StaticFuncStripDirNamePrefix;
350 if (StripLevel)
351 FileName = stripDirPrefix(PathNameStr: FileName, NumPrefix: StripLevel);
352 return FileName;
353}
354
355// The PGO name has the format [<filepath>;]<mangled-name> where <filepath>; is
356// provided if linkage is local and is used to discriminate possibly identical
357// mangled names. ";" is used because it is unlikely to be found in either
358// <filepath> or <mangled-name>.
359//
360// Older compilers used getPGOFuncName() which has the format
361// [<filepath>:]<mangled-name>. This caused trouble for Objective-C functions
362// which commonly have :'s in their names. We still need to compute this name to
363// lookup functions from profiles built by older compilers.
364static std::string
365getIRPGONameForGlobalObject(const GlobalObject &GO,
366 GlobalValue::LinkageTypes Linkage,
367 StringRef FileName) {
368 return GlobalValue::getGlobalIdentifier(Name: GO.getName(), Linkage, FileName);
369}
370
371static std::optional<std::string> lookupPGONameFromMetadata(MDNode *MD) {
372 if (MD != nullptr) {
373 StringRef S = cast<MDString>(Val: MD->getOperand(I: 0))->getString();
374 return S.str();
375 }
376 return {};
377}
378
379// Returns the PGO object name. This function has some special handling
380// when called in LTO optimization. The following only applies when calling in
381// LTO passes (when \c InLTO is true): LTO's internalization privatizes many
382// global linkage symbols. This happens after value profile annotation, but
383// those internal linkage functions should not have a source prefix.
384// Additionally, for ThinLTO mode, exported internal functions are promoted
385// and renamed. We need to ensure that the original internal PGO name is
386// used when computing the GUID that is compared against the profiled GUIDs.
387// To differentiate compiler generated internal symbols from original ones,
388// PGOFuncName meta data are created and attached to the original internal
389// symbols in the value profile annotation step
390// (PGOUseFunc::annotateIndirectCallSites). If a symbol does not have the meta
391// data, its original linkage must be non-internal.
392static std::string getIRPGOObjectName(const GlobalObject &GO, bool InLTO,
393 MDNode *PGONameMetadata) {
394 if (!InLTO) {
395 auto FileName = getStrippedSourceFileName(GO);
396 return getIRPGONameForGlobalObject(GO, Linkage: GO.getLinkage(), FileName);
397 }
398
399 // In LTO mode (when InLTO is true), first check if there is a meta data.
400 if (auto IRPGOFuncName = lookupPGONameFromMetadata(MD: PGONameMetadata))
401 return *IRPGOFuncName;
402
403 // If there is no meta data, the function must be a global before the value
404 // profile annotation pass. Its current linkage may be internal if it is
405 // internalized in LTO mode.
406 return getIRPGONameForGlobalObject(GO, Linkage: GlobalValue::ExternalLinkage, FileName: "");
407}
408
409// Returns the IRPGO function name and does special handling when called
410// in LTO optimization. See the comments of `getIRPGOObjectName` for details.
411std::string getIRPGOFuncName(const Function &F, bool InLTO) {
412 return getIRPGOObjectName(GO: F, InLTO, PGONameMetadata: getPGOFuncNameMetadata(F));
413}
414
415// Please use getIRPGOFuncName for LLVM IR instrumentation. This function is
416// for front-end (Clang, etc) instrumentation.
417// The implementation is kept for profile matching from older profiles.
418// This is similar to `getIRPGOFuncName` except that this function calls
419// 'getPGOFuncName' to get a name and `getIRPGOFuncName` calls
420// 'getIRPGONameForGlobalObject'. See the difference between two callees in the
421// comments of `getIRPGONameForGlobalObject`.
422std::string getPGOFuncName(const Function &F, bool InLTO, uint64_t Version) {
423 if (!InLTO) {
424 auto FileName = getStrippedSourceFileName(GO: F);
425 return getPGOFuncName(Name: F.getName(), Linkage: F.getLinkage(), FileName, Version);
426 }
427
428 // In LTO mode (when InLTO is true), first check if there is a meta data.
429 if (auto PGOFuncName = lookupPGONameFromMetadata(MD: getPGOFuncNameMetadata(F)))
430 return *PGOFuncName;
431
432 // If there is no meta data, the function must be a global before the value
433 // profile annotation pass. Its current linkage may be internal if it is
434 // internalized in LTO mode.
435 return getPGOFuncName(Name: F.getName(), Linkage: GlobalValue::ExternalLinkage, FileName: "");
436}
437
438std::string getPGOName(const GlobalVariable &V, bool InLTO) {
439 // PGONameMetadata should be set by compiler at profile use time
440 // and read by symtab creation to look up symbols corresponding to
441 // a MD5 hash.
442 return getIRPGOObjectName(GO: V, InLTO, PGONameMetadata: V.getMetadata(Kind: getPGONameMetadataName()));
443}
444
445// See getIRPGOObjectName() for a discription of the format.
446std::pair<StringRef, StringRef> getParsedIRPGOName(StringRef IRPGOName) {
447 auto [FileName, MangledName] = IRPGOName.split(Separator: GlobalIdentifierDelimiter);
448 if (MangledName.empty())
449 return std::make_pair(x: StringRef(), y&: IRPGOName);
450 return std::make_pair(x&: FileName, y&: MangledName);
451}
452
453StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName) {
454 if (FileName.empty())
455 return PGOFuncName;
456 // Drop the file name including ':' or ';'. See getIRPGONameForGlobalObject as
457 // well.
458 if (PGOFuncName.starts_with(Prefix: FileName))
459 PGOFuncName = PGOFuncName.drop_front(N: FileName.size() + 1);
460 return PGOFuncName;
461}
462
463// \p FuncName is the string used as profile lookup key for the function. A
464// symbol is created to hold the name. Return the legalized symbol name.
465std::string getPGOFuncNameVarName(StringRef FuncName,
466 GlobalValue::LinkageTypes Linkage) {
467 std::string VarName = std::string(getInstrProfNameVarPrefix());
468 VarName += FuncName;
469
470 if (!GlobalValue::isLocalLinkage(Linkage))
471 return VarName;
472
473 // Now fix up illegal chars in local VarName that may upset the assembler.
474 const char InvalidChars[] = "-:;<>/\"'";
475 size_t FoundPos = VarName.find_first_of(s: InvalidChars);
476 while (FoundPos != std::string::npos) {
477 VarName[FoundPos] = '_';
478 FoundPos = VarName.find_first_of(s: InvalidChars, pos: FoundPos + 1);
479 }
480 return VarName;
481}
482
483bool isGPUProfTarget(const Module &M) {
484 const Triple &T = M.getTargetTriple();
485 return T.isGPU();
486}
487
488void setPGOFuncVisibility(Module &M, GlobalVariable *FuncNameVar) {
489 // Hide the symbol so that we correctly get a copy for each executable.
490 if (!GlobalValue::isLocalLinkage(Linkage: FuncNameVar->getLinkage()))
491 FuncNameVar->setVisibility(GlobalValue::HiddenVisibility);
492}
493
494GlobalVariable *createPGOFuncNameVar(Module &M,
495 GlobalValue::LinkageTypes Linkage,
496 StringRef PGOFuncName) {
497 // We generally want to match the function's linkage, but available_externally
498 // and extern_weak both have the wrong semantics, and anything that doesn't
499 // need to link across compilation units doesn't need to be visible at all.
500 if (Linkage == GlobalValue::ExternalWeakLinkage)
501 Linkage = GlobalValue::LinkOnceAnyLinkage;
502 else if (Linkage == GlobalValue::AvailableExternallyLinkage)
503 Linkage = GlobalValue::LinkOnceODRLinkage;
504 else if (Linkage == GlobalValue::InternalLinkage ||
505 Linkage == GlobalValue::ExternalLinkage)
506 Linkage = GlobalValue::PrivateLinkage;
507
508 auto *Value =
509 ConstantDataArray::getString(Context&: M.getContext(), Initializer: PGOFuncName, AddNull: false);
510 auto *FuncNameVar =
511 new GlobalVariable(M, Value->getType(), true, Linkage, Value,
512 getPGOFuncNameVarName(FuncName: PGOFuncName, Linkage));
513
514 setPGOFuncVisibility(M, FuncNameVar);
515 return FuncNameVar;
516}
517
518GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName) {
519 return createPGOFuncNameVar(M&: *F.getParent(), Linkage: F.getLinkage(), PGOFuncName);
520}
521
522Error InstrProfSymtab::create(Module &M, bool InLTO, bool AddCanonical) {
523 for (Function &F : M) {
524 // Function may not have a name: like using asm("") to overwrite the name.
525 // Ignore in this case.
526 if (!F.hasName())
527 continue;
528 auto IRPGOFuncName = getIRPGOFuncName(F, InLTO);
529 if (Error E = addFuncWithName(F, PGOFuncName: IRPGOFuncName, AddCanonical))
530 return E;
531 // Also use getPGOFuncName() so that we can find records from older profiles
532 auto PGOFuncName = getPGOFuncName(F, InLTO);
533 if (PGOFuncName != IRPGOFuncName)
534 if (Error E = addFuncWithName(F, PGOFuncName, AddCanonical))
535 return E;
536 }
537
538 for (GlobalVariable &G : M.globals()) {
539 if (!G.hasName() || !G.hasMetadata(KindID: LLVMContext::MD_type))
540 continue;
541 if (Error E = addVTableWithName(V&: G, PGOVTableName: getPGOName(V: G, InLTO)))
542 return E;
543 }
544
545 Sorted = false;
546 finalizeSymtab();
547 return Error::success();
548}
549
550Error InstrProfSymtab::addVTableWithName(GlobalVariable &VTable,
551 StringRef VTablePGOName) {
552 auto NameToGUIDMap = [&](StringRef Name) -> Error {
553 if (Error E = addSymbolName(SymbolName: Name))
554 return E;
555
556 bool Inserted = true;
557 std::tie(args: std::ignore, args&: Inserted) = MD5VTableMap.try_emplace(
558 Key: GlobalValue::getGUIDAssumingExternalLinkage(GlobalName: Name), Args: &VTable);
559 if (!Inserted)
560 LLVM_DEBUG(dbgs() << "GUID conflict within one module");
561 return Error::success();
562 };
563 if (Error E = NameToGUIDMap(VTablePGOName))
564 return E;
565
566 StringRef CanonicalName = getCanonicalName(PGOName: VTablePGOName);
567 if (CanonicalName != VTablePGOName)
568 return NameToGUIDMap(CanonicalName);
569
570 return Error::success();
571}
572
573Error readAndDecodeStrings(StringRef NameStrings,
574 std::function<Error(StringRef)> NameCallback) {
575 const uint8_t *P = NameStrings.bytes_begin();
576 const uint8_t *EndP = NameStrings.bytes_end();
577 while (P < EndP) {
578 uint32_t N;
579 uint64_t UncompressedSize = decodeULEB128(p: P, n: &N);
580 P += N;
581 uint64_t CompressedSize = decodeULEB128(p: P, n: &N);
582 P += N;
583 const bool IsCompressed = (CompressedSize != 0);
584 SmallVector<uint8_t, 128> UncompressedNameStrings;
585 StringRef NameStrings;
586 if (IsCompressed) {
587 if (!llvm::compression::zlib::isAvailable())
588 return make_error<InstrProfError>(Args: instrprof_error::zlib_unavailable);
589
590 if (Error E = compression::zlib::decompress(Input: ArrayRef(P, CompressedSize),
591 Output&: UncompressedNameStrings,
592 UncompressedSize)) {
593 consumeError(Err: std::move(E));
594 return make_error<InstrProfError>(Args: instrprof_error::uncompress_failed);
595 }
596 P += CompressedSize;
597 NameStrings = toStringRef(Input: UncompressedNameStrings);
598 } else {
599 NameStrings =
600 StringRef(reinterpret_cast<const char *>(P), UncompressedSize);
601 P += UncompressedSize;
602 }
603 // Now parse the name strings.
604 SmallVector<StringRef, 0> Names;
605 NameStrings.split(A&: Names, Separator: getInstrProfNameSeparator());
606 for (StringRef &Name : Names)
607 if (Error E = NameCallback(Name))
608 return E;
609
610 while (P < EndP && *P == 0)
611 P++;
612 }
613 return Error::success();
614}
615
616Error InstrProfSymtab::create(StringRef NameStrings) {
617 return readAndDecodeStrings(NameStrings,
618 NameCallback: [&](StringRef S) { return addFuncName(FuncName: S); });
619}
620
621Error InstrProfSymtab::create(StringRef FuncNameStrings,
622 StringRef VTableNameStrings) {
623 if (Error E = readAndDecodeStrings(
624 NameStrings: FuncNameStrings, NameCallback: [&](StringRef S) { return addFuncName(FuncName: S); }))
625 return E;
626
627 return readAndDecodeStrings(NameStrings: VTableNameStrings,
628 NameCallback: [&](StringRef S) { return addVTableName(VTableName: S); });
629}
630
631Error InstrProfSymtab::initVTableNamesFromCompressedStrings(
632 StringRef CompressedVTableStrings) {
633 return readAndDecodeStrings(NameStrings: CompressedVTableStrings,
634 NameCallback: [&](StringRef S) { return addVTableName(VTableName: S); });
635}
636
637StringRef InstrProfSymtab::getCanonicalName(StringRef PGOName) {
638 // In ThinLTO, local function may have been promoted to global and have
639 // suffix ".llvm." added to the function name. We need to add the
640 // stripped function name to the symbol table so that we can find a match
641 // from profile.
642 //
643 // ".__uniq." suffix is used to differentiate internal linkage functions in
644 // different modules and should be kept. This is the only suffix with the
645 // pattern ".xxx" which is kept before matching, other suffixes ".llvm." and
646 // ".part" will be stripped.
647 //
648 // Leverage the common canonicalization logic from FunctionSamples. Instead of
649 // removing all suffixes except ".__uniq.", explicitly specify the ones to be
650 // removed. This avoids the issue of colliding the canonical names of
651 // coroutine function with its await suspend wrappers or with its post-split
652 // clones. i.e. coro function foo, its wrappers
653 // (foo.__await_suspend_wrapper__init, and foo.__await_suspend_wrapper__final)
654 // and its post-split clones (foo.resume, foo.cleanup) are all canonicalized
655 // to "foo" otherwise, which can make the symtab lookup return unexpected
656 // result.
657 const SmallVector<StringRef> SuffixesToRemove{".llvm.", ".part."};
658 return FunctionSamples::getCanonicalFnName(FnName: PGOName, Suffixes: SuffixesToRemove);
659}
660
661Error InstrProfSymtab::addFuncWithName(Function &F, StringRef PGOFuncName,
662 bool AddCanonical) {
663 auto NameToGUIDMap = [&](StringRef Name) -> Error {
664 if (Error E = addFuncName(FuncName: Name))
665 return E;
666 MD5FuncMap.emplace_back(args: Function::getGUIDAssumingExternalLinkage(GlobalName: Name), args: &F);
667 return Error::success();
668 };
669 if (Error E = NameToGUIDMap(PGOFuncName))
670 return E;
671
672 if (!AddCanonical)
673 return Error::success();
674
675 StringRef CanonicalFuncName = getCanonicalName(PGOName: PGOFuncName);
676 if (CanonicalFuncName != PGOFuncName)
677 return NameToGUIDMap(CanonicalFuncName);
678
679 return Error::success();
680}
681
682uint64_t InstrProfSymtab::getVTableHashFromAddress(uint64_t Address) const {
683 // Given a runtime address, look up the hash value in the interval map, and
684 // fallback to value 0 if a hash value is not found.
685 return VTableAddrMap.lookup(x: Address, NotFound: 0);
686}
687
688uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) const {
689 finalizeSymtab();
690 auto It = partition_point(Range&: AddrToMD5Map, P: [=](std::pair<uint64_t, uint64_t> A) {
691 return A.first < Address;
692 });
693 // Raw function pointer collected by value profiler may be from
694 // external functions that are not instrumented. They won't have
695 // mapping data to be used by the deserializer. Force the value to
696 // be 0 in this case.
697 if (It != AddrToMD5Map.end() && It->first == Address)
698 return (uint64_t)It->second;
699 return 0;
700}
701
702void InstrProfSymtab::dumpNames(raw_ostream &OS) const {
703 SmallVector<StringRef, 0> Sorted(NameTab.keys());
704 llvm::sort(C&: Sorted);
705 for (StringRef S : Sorted)
706 OS << S << '\n';
707}
708
709Error collectGlobalObjectNameStrings(ArrayRef<std::string> NameStrs,
710 bool DoCompression, std::string &Result) {
711 assert(!NameStrs.empty() && "No name data to emit");
712
713 uint8_t Header[20], *P = Header;
714 std::string UncompressedNameStrings =
715 join(Begin: NameStrs.begin(), End: NameStrs.end(), Separator: getInstrProfNameSeparator());
716
717 assert(StringRef(UncompressedNameStrings)
718 .count(getInstrProfNameSeparator()) == (NameStrs.size() - 1) &&
719 "PGO name is invalid (contains separator token)");
720
721 unsigned EncLen = encodeULEB128(Value: UncompressedNameStrings.length(), p: P);
722 P += EncLen;
723
724 auto WriteStringToResult = [&](size_t CompressedLen, StringRef InputStr) {
725 EncLen = encodeULEB128(Value: CompressedLen, p: P);
726 P += EncLen;
727 char *HeaderStr = reinterpret_cast<char *>(&Header[0]);
728 unsigned HeaderLen = P - &Header[0];
729 Result.append(s: HeaderStr, n: HeaderLen);
730 Result += InputStr;
731 return Error::success();
732 };
733
734 if (!DoCompression) {
735 return WriteStringToResult(0, UncompressedNameStrings);
736 }
737
738 SmallVector<uint8_t, 128> CompressedNameStrings;
739 compression::zlib::compress(Input: arrayRefFromStringRef(Input: UncompressedNameStrings),
740 CompressedBuffer&: CompressedNameStrings,
741 Level: compression::zlib::BestSizeCompression);
742
743 return WriteStringToResult(CompressedNameStrings.size(),
744 toStringRef(Input: CompressedNameStrings));
745}
746
747StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar) {
748 auto *Arr = cast<ConstantDataArray>(Val: NameVar->getInitializer());
749 StringRef NameStr =
750 Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
751 return NameStr;
752}
753
754Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
755 std::string &Result, bool DoCompression) {
756 std::vector<std::string> NameStrs;
757 for (auto *NameVar : NameVars) {
758 NameStrs.push_back(x: std::string(getPGOFuncNameVarInitializer(NameVar)));
759 }
760 return collectGlobalObjectNameStrings(
761 NameStrs, DoCompression: compression::zlib::isAvailable() && DoCompression, Result);
762}
763
764Error collectVTableStrings(ArrayRef<GlobalVariable *> VTables,
765 std::string &Result, bool DoCompression) {
766 std::vector<std::string> VTableNameStrs;
767 for (auto *VTable : VTables)
768 VTableNameStrs.push_back(x: getPGOName(V: *VTable));
769 return collectGlobalObjectNameStrings(
770 NameStrs: VTableNameStrs, DoCompression: compression::zlib::isAvailable() && DoCompression,
771 Result);
772}
773
774void InstrProfRecord::accumulateCounts(CountSumOrPercent &Sum) const {
775 uint64_t FuncSum = 0;
776 Sum.NumEntries += Counts.size();
777 for (uint64_t Count : Counts)
778 FuncSum += Count;
779 Sum.CountSum += FuncSum;
780
781 for (uint32_t VK = IPVK_First; VK <= IPVK_Last; ++VK) {
782 uint64_t KindSum = 0;
783 uint32_t NumValueSites = getNumValueSites(ValueKind: VK);
784 for (size_t I = 0; I < NumValueSites; ++I) {
785 for (const auto &V : getValueArrayForSite(ValueKind: VK, Site: I))
786 KindSum += V.Count;
787 }
788 Sum.ValueCounts[VK] += KindSum;
789 }
790}
791
792void InstrProfValueSiteRecord::overlap(InstrProfValueSiteRecord &Input,
793 uint32_t ValueKind,
794 OverlapStats &Overlap,
795 OverlapStats &FuncLevelOverlap) {
796 this->sortByTargetValues();
797 Input.sortByTargetValues();
798 double Score = 0.0f, FuncLevelScore = 0.0f;
799 auto I = ValueData.begin();
800 auto IE = ValueData.end();
801 auto J = Input.ValueData.begin();
802 auto JE = Input.ValueData.end();
803 while (I != IE && J != JE) {
804 if (I->Value == J->Value) {
805 Score += OverlapStats::score(Val1: I->Count, Val2: J->Count,
806 Sum1: Overlap.Base.ValueCounts[ValueKind],
807 Sum2: Overlap.Test.ValueCounts[ValueKind]);
808 FuncLevelScore += OverlapStats::score(
809 Val1: I->Count, Val2: J->Count, Sum1: FuncLevelOverlap.Base.ValueCounts[ValueKind],
810 Sum2: FuncLevelOverlap.Test.ValueCounts[ValueKind]);
811 ++I;
812 } else if (I->Value < J->Value) {
813 ++I;
814 continue;
815 }
816 ++J;
817 }
818 Overlap.Overlap.ValueCounts[ValueKind] += Score;
819 FuncLevelOverlap.Overlap.ValueCounts[ValueKind] += FuncLevelScore;
820}
821
822// Return false on mismatch.
823void InstrProfRecord::overlapValueProfData(uint32_t ValueKind,
824 InstrProfRecord &Other,
825 OverlapStats &Overlap,
826 OverlapStats &FuncLevelOverlap) {
827 uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
828 assert(ThisNumValueSites == Other.getNumValueSites(ValueKind));
829 if (!ThisNumValueSites)
830 return;
831
832 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
833 getOrCreateValueSitesForKind(ValueKind);
834 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
835 Other.getValueSitesForKind(ValueKind);
836 for (uint32_t I = 0; I < ThisNumValueSites; I++)
837 ThisSiteRecords[I].overlap(Input&: OtherSiteRecords[I], ValueKind, Overlap,
838 FuncLevelOverlap);
839}
840
841void InstrProfRecord::overlap(InstrProfRecord &Other, OverlapStats &Overlap,
842 OverlapStats &FuncLevelOverlap,
843 uint64_t ValueCutoff) {
844 // FuncLevel CountSum for other should already computed and nonzero.
845 assert(FuncLevelOverlap.Test.CountSum >= 1.0f);
846 accumulateCounts(Sum&: FuncLevelOverlap.Base);
847 bool Mismatch = (Counts.size() != Other.Counts.size());
848
849 // Check if the value profiles mismatch.
850 if (!Mismatch) {
851 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) {
852 uint32_t ThisNumValueSites = getNumValueSites(ValueKind: Kind);
853 uint32_t OtherNumValueSites = Other.getNumValueSites(ValueKind: Kind);
854 if (ThisNumValueSites != OtherNumValueSites) {
855 Mismatch = true;
856 break;
857 }
858 }
859 }
860 if (Mismatch) {
861 Overlap.addOneMismatch(MismatchFunc: FuncLevelOverlap.Test);
862 return;
863 }
864
865 // Compute overlap for value counts.
866 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
867 overlapValueProfData(ValueKind: Kind, Other, Overlap, FuncLevelOverlap);
868
869 double Score = 0.0;
870 uint64_t MaxCount = 0;
871 // Compute overlap for edge counts.
872 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
873 Score += OverlapStats::score(Val1: Counts[I], Val2: Other.Counts[I],
874 Sum1: Overlap.Base.CountSum, Sum2: Overlap.Test.CountSum);
875 MaxCount = std::max(a: Other.Counts[I], b: MaxCount);
876 }
877 Overlap.Overlap.CountSum += Score;
878 Overlap.Overlap.NumEntries += 1;
879
880 if (MaxCount >= ValueCutoff) {
881 double FuncScore = 0.0;
882 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I)
883 FuncScore += OverlapStats::score(Val1: Counts[I], Val2: Other.Counts[I],
884 Sum1: FuncLevelOverlap.Base.CountSum,
885 Sum2: FuncLevelOverlap.Test.CountSum);
886 FuncLevelOverlap.Overlap.CountSum = FuncScore;
887 FuncLevelOverlap.Overlap.NumEntries = Other.Counts.size();
888 FuncLevelOverlap.Valid = true;
889 }
890}
891
892void InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input,
893 uint64_t Weight,
894 function_ref<void(instrprof_error)> Warn) {
895 this->sortByTargetValues();
896 Input.sortByTargetValues();
897 auto I = ValueData.begin();
898 auto IE = ValueData.end();
899 std::vector<InstrProfValueData> Merged;
900 Merged.reserve(n: std::max(a: ValueData.size(), b: Input.ValueData.size()));
901 for (const InstrProfValueData &J : Input.ValueData) {
902 while (I != IE && I->Value < J.Value) {
903 Merged.push_back(x: *I);
904 ++I;
905 }
906 if (I != IE && I->Value == J.Value) {
907 bool Overflowed;
908 I->Count = SaturatingMultiplyAdd(X: J.Count, Y: Weight, A: I->Count, ResultOverflowed: &Overflowed);
909 if (Overflowed)
910 Warn(instrprof_error::counter_overflow);
911 Merged.push_back(x: *I);
912 ++I;
913 continue;
914 }
915 Merged.push_back(x: J);
916 }
917 Merged.insert(position: Merged.end(), first: I, last: IE);
918 ValueData = std::move(Merged);
919}
920
921void InstrProfValueSiteRecord::scale(uint64_t N, uint64_t D,
922 function_ref<void(instrprof_error)> Warn) {
923 for (InstrProfValueData &I : ValueData) {
924 bool Overflowed;
925 I.Count = SaturatingMultiply(X: I.Count, Y: N, ResultOverflowed: &Overflowed) / D;
926 if (Overflowed)
927 Warn(instrprof_error::counter_overflow);
928 }
929}
930
931// Merge Value Profile data from Src record to this record for ValueKind.
932// Scale merged value counts by \p Weight.
933void InstrProfRecord::mergeValueProfData(
934 uint32_t ValueKind, InstrProfRecord &Src, uint64_t Weight,
935 function_ref<void(instrprof_error)> Warn) {
936 uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
937 uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind);
938 if (ThisNumValueSites != OtherNumValueSites) {
939 Warn(instrprof_error::value_site_count_mismatch);
940 return;
941 }
942 if (!ThisNumValueSites)
943 return;
944 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
945 getOrCreateValueSitesForKind(ValueKind);
946 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
947 Src.getValueSitesForKind(ValueKind);
948 for (uint32_t I = 0; I < ThisNumValueSites; I++)
949 ThisSiteRecords[I].merge(Input&: OtherSiteRecords[I], Weight, Warn);
950}
951
952void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight,
953 function_ref<void(instrprof_error)> Warn) {
954 // If the number of counters doesn't match we either have bad data
955 // or a hash collision.
956 if (Counts.size() != Other.Counts.size()) {
957 Warn(instrprof_error::count_mismatch);
958 return;
959 }
960
961 // Special handling of the first count as the PseudoCount.
962 CountPseudoKind OtherKind = Other.getCountPseudoKind();
963 CountPseudoKind ThisKind = getCountPseudoKind();
964 if (OtherKind != NotPseudo || ThisKind != NotPseudo) {
965 // We don't allow the merge of a profile with pseudo counts and
966 // a normal profile (i.e. without pesudo counts).
967 // Profile supplimenation should be done after the profile merge.
968 if (OtherKind == NotPseudo || ThisKind == NotPseudo) {
969 Warn(instrprof_error::count_mismatch);
970 return;
971 }
972 if (OtherKind == PseudoHot || ThisKind == PseudoHot)
973 setPseudoCount(PseudoHot);
974 else
975 setPseudoCount(PseudoWarm);
976 return;
977 }
978
979 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
980 bool Overflowed;
981 uint64_t Value =
982 SaturatingMultiplyAdd(X: Other.Counts[I], Y: Weight, A: Counts[I], ResultOverflowed: &Overflowed);
983 if (Value > getInstrMaxCountValue()) {
984 Value = getInstrMaxCountValue();
985 Overflowed = true;
986 }
987 Counts[I] = Value;
988 if (Overflowed)
989 Warn(instrprof_error::counter_overflow);
990 }
991
992 // If the number of bitmap bytes doesn't match we either have bad data
993 // or a hash collision.
994 if (BitmapBytes.size() != Other.BitmapBytes.size()) {
995 Warn(instrprof_error::bitmap_mismatch);
996 return;
997 }
998
999 // Bitmap bytes are merged by simply ORing them together.
1000 for (size_t I = 0, E = Other.BitmapBytes.size(); I < E; ++I) {
1001 BitmapBytes[I] = Other.BitmapBytes[I] | BitmapBytes[I];
1002 }
1003
1004 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1005 mergeValueProfData(ValueKind: Kind, Src&: Other, Weight, Warn);
1006}
1007
1008void InstrProfRecord::scaleValueProfData(
1009 uint32_t ValueKind, uint64_t N, uint64_t D,
1010 function_ref<void(instrprof_error)> Warn) {
1011 for (auto &R : getValueSitesForKind(ValueKind))
1012 R.scale(N, D, Warn);
1013}
1014
1015void InstrProfRecord::scale(uint64_t N, uint64_t D,
1016 function_ref<void(instrprof_error)> Warn) {
1017 assert(D != 0 && "D cannot be 0");
1018 for (auto &Count : this->Counts) {
1019 bool Overflowed;
1020 Count = SaturatingMultiply(X: Count, Y: N, ResultOverflowed: &Overflowed) / D;
1021 if (Count > getInstrMaxCountValue()) {
1022 Count = getInstrMaxCountValue();
1023 Overflowed = true;
1024 }
1025 if (Overflowed)
1026 Warn(instrprof_error::counter_overflow);
1027 }
1028 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1029 scaleValueProfData(ValueKind: Kind, N, D, Warn);
1030}
1031
1032// Map indirect call target name hash to name string.
1033uint64_t InstrProfRecord::remapValue(uint64_t Value, uint32_t ValueKind,
1034 InstrProfSymtab *SymTab) {
1035 if (!SymTab)
1036 return Value;
1037
1038 if (ValueKind == IPVK_IndirectCallTarget)
1039 return SymTab->getFunctionHashFromAddress(Address: Value);
1040
1041 if (ValueKind == IPVK_VTableTarget)
1042 return SymTab->getVTableHashFromAddress(Address: Value);
1043
1044 return Value;
1045}
1046
1047void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site,
1048 ArrayRef<InstrProfValueData> VData,
1049 InstrProfSymtab *ValueMap) {
1050 // Remap values.
1051 std::vector<InstrProfValueData> RemappedVD;
1052 RemappedVD.reserve(n: VData.size());
1053 for (const auto &V : VData) {
1054 uint64_t NewValue = remapValue(Value: V.Value, ValueKind, SymTab: ValueMap);
1055 RemappedVD.push_back(x: {.Value: NewValue, .Count: V.Count});
1056 }
1057
1058 std::vector<InstrProfValueSiteRecord> &ValueSites =
1059 getOrCreateValueSitesForKind(ValueKind);
1060 assert(ValueSites.size() == Site);
1061
1062 // Add a new value site with remapped value profiling data.
1063 ValueSites.emplace_back(args: std::move(RemappedVD));
1064}
1065
1066void TemporalProfTraceTy::createBPFunctionNodes(
1067 ArrayRef<TemporalProfTraceTy> Traces, std::vector<BPFunctionNode> &Nodes,
1068 bool RemoveOutlierUNs) {
1069 using IDT = BPFunctionNode::IDT;
1070 using UtilityNodeT = BPFunctionNode::UtilityNodeT;
1071 UtilityNodeT MaxUN = 0;
1072 DenseMap<IDT, size_t> IdToFirstTimestamp;
1073 DenseMap<IDT, UtilityNodeT> IdToFirstUN;
1074 DenseMap<IDT, SmallVector<UtilityNodeT>> IdToUNs;
1075 // TODO: We need to use the Trace.Weight field to give more weight to more
1076 // important utilities
1077 for (auto &Trace : Traces) {
1078 size_t CutoffTimestamp = 1;
1079 for (size_t Timestamp = 0; Timestamp < Trace.FunctionNameRefs.size();
1080 Timestamp++) {
1081 IDT Id = Trace.FunctionNameRefs[Timestamp];
1082 auto [It, WasInserted] = IdToFirstTimestamp.try_emplace(Key: Id, Args&: Timestamp);
1083 if (!WasInserted)
1084 It->getSecond() = std::min<size_t>(a: It->getSecond(), b: Timestamp);
1085 if (Timestamp >= CutoffTimestamp) {
1086 ++MaxUN;
1087 CutoffTimestamp = 2 * Timestamp;
1088 }
1089 IdToFirstUN.try_emplace(Key: Id, Args&: MaxUN);
1090 }
1091 for (auto &[Id, FirstUN] : IdToFirstUN)
1092 for (auto UN = FirstUN; UN <= MaxUN; ++UN)
1093 IdToUNs[Id].push_back(Elt: UN);
1094 ++MaxUN;
1095 IdToFirstUN.clear();
1096 }
1097
1098 if (RemoveOutlierUNs) {
1099 DenseMap<UtilityNodeT, unsigned> UNFrequency;
1100 for (auto &[Id, UNs] : IdToUNs)
1101 for (auto &UN : UNs)
1102 ++UNFrequency[UN];
1103 // Filter out utility nodes that are too infrequent or too prevalent to make
1104 // BalancedPartitioning more effective.
1105 for (auto &[Id, UNs] : IdToUNs)
1106 llvm::erase_if(C&: UNs, P: [&](auto &UN) {
1107 unsigned Freq = UNFrequency[UN];
1108 return Freq <= 1 || 2 * Freq > IdToUNs.size();
1109 });
1110 }
1111
1112 for (auto &[Id, UNs] : IdToUNs)
1113 Nodes.emplace_back(args&: Id, args&: UNs);
1114
1115 // Since BalancedPartitioning is sensitive to the initial order, we explicitly
1116 // order nodes by their earliest timestamp.
1117 llvm::sort(C&: Nodes, Comp: [&](auto &L, auto &R) {
1118 return std::make_pair(IdToFirstTimestamp[L.Id], L.Id) <
1119 std::make_pair(IdToFirstTimestamp[R.Id], R.Id);
1120 });
1121}
1122
1123#define INSTR_PROF_COMMON_API_IMPL
1124#include "llvm/ProfileData/InstrProfData.inc"
1125
1126/*!
1127 * ValueProfRecordClosure Interface implementation for InstrProfRecord
1128 * class. These C wrappers are used as adaptors so that C++ code can be
1129 * invoked as callbacks.
1130 */
1131uint32_t getNumValueKindsInstrProf(const void *Record) {
1132 return reinterpret_cast<const InstrProfRecord *>(Record)->getNumValueKinds();
1133}
1134
1135uint32_t getNumValueSitesInstrProf(const void *Record, uint32_t VKind) {
1136 return reinterpret_cast<const InstrProfRecord *>(Record)
1137 ->getNumValueSites(ValueKind: VKind);
1138}
1139
1140uint32_t getNumValueDataInstrProf(const void *Record, uint32_t VKind) {
1141 return reinterpret_cast<const InstrProfRecord *>(Record)
1142 ->getNumValueData(ValueKind: VKind);
1143}
1144
1145uint32_t getNumValueDataForSiteInstrProf(const void *R, uint32_t VK,
1146 uint32_t S) {
1147 const auto *IPR = reinterpret_cast<const InstrProfRecord *>(R);
1148 return IPR->getValueArrayForSite(ValueKind: VK, Site: S).size();
1149}
1150
1151void getValueForSiteInstrProf(const void *R, InstrProfValueData *Dst,
1152 uint32_t K, uint32_t S) {
1153 const auto *IPR = reinterpret_cast<const InstrProfRecord *>(R);
1154 llvm::copy(Range: IPR->getValueArrayForSite(ValueKind: K, Site: S), Out: Dst);
1155}
1156
1157ValueProfData *allocValueProfDataInstrProf(size_t TotalSizeInBytes) {
1158 ValueProfData *VD = new (::operator new(TotalSizeInBytes)) ValueProfData();
1159 memset(s: VD, c: 0, n: TotalSizeInBytes);
1160 return VD;
1161}
1162
1163static ValueProfRecordClosure InstrProfRecordClosure = {
1164 .Record: nullptr,
1165 .GetNumValueKinds: getNumValueKindsInstrProf,
1166 .GetNumValueSites: getNumValueSitesInstrProf,
1167 .GetNumValueData: getNumValueDataInstrProf,
1168 .GetNumValueDataForSite: getNumValueDataForSiteInstrProf,
1169 .RemapValueData: nullptr,
1170 .GetValueForSite: getValueForSiteInstrProf,
1171 .AllocValueProfData: allocValueProfDataInstrProf};
1172
1173// Wrapper implementation using the closure mechanism.
1174uint32_t ValueProfData::getSize(const InstrProfRecord &Record) {
1175 auto Closure = InstrProfRecordClosure;
1176 Closure.Record = &Record;
1177 return getValueProfDataSize(Closure: &Closure);
1178}
1179
1180// Wrapper implementation using the closure mechanism.
1181std::unique_ptr<ValueProfData>
1182ValueProfData::serializeFrom(const InstrProfRecord &Record) {
1183 InstrProfRecordClosure.Record = &Record;
1184
1185 std::unique_ptr<ValueProfData> VPD(
1186 serializeValueProfDataFrom(Closure: &InstrProfRecordClosure, DstData: nullptr));
1187 return VPD;
1188}
1189
1190void ValueProfRecord::deserializeTo(InstrProfRecord &Record,
1191 InstrProfSymtab *SymTab) {
1192 Record.reserveSites(ValueKind: Kind, NumValueSites);
1193
1194 InstrProfValueData *ValueData = getValueProfRecordValueData(This: this);
1195 for (uint64_t VSite = 0; VSite < NumValueSites; ++VSite) {
1196 uint8_t ValueDataCount = this->SiteCountArray[VSite];
1197 ArrayRef<InstrProfValueData> VDs(ValueData, ValueDataCount);
1198 Record.addValueData(ValueKind: Kind, Site: VSite, VData: VDs, ValueMap: SymTab);
1199 ValueData += ValueDataCount;
1200 }
1201}
1202
1203// For writing/serializing, Old is the host endianness, and New is
1204// byte order intended on disk. For Reading/deserialization, Old
1205// is the on-disk source endianness, and New is the host endianness.
1206void ValueProfRecord::swapBytes(llvm::endianness Old, llvm::endianness New) {
1207 using namespace support;
1208
1209 if (Old == New)
1210 return;
1211
1212 if (llvm::endianness::native != Old) {
1213 sys::swapByteOrder<uint32_t>(Value&: NumValueSites);
1214 sys::swapByteOrder<uint32_t>(Value&: Kind);
1215 }
1216 uint32_t ND = getValueProfRecordNumValueData(This: this);
1217 InstrProfValueData *VD = getValueProfRecordValueData(This: this);
1218
1219 // No need to swap byte array: SiteCountArrray.
1220 for (uint32_t I = 0; I < ND; I++) {
1221 sys::swapByteOrder<uint64_t>(Value&: VD[I].Value);
1222 sys::swapByteOrder<uint64_t>(Value&: VD[I].Count);
1223 }
1224 if (llvm::endianness::native == Old) {
1225 sys::swapByteOrder<uint32_t>(Value&: NumValueSites);
1226 sys::swapByteOrder<uint32_t>(Value&: Kind);
1227 }
1228}
1229
1230void ValueProfData::deserializeTo(InstrProfRecord &Record,
1231 InstrProfSymtab *SymTab) {
1232 if (NumValueKinds == 0)
1233 return;
1234
1235 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1236 for (uint32_t K = 0; K < NumValueKinds; K++) {
1237 VR->deserializeTo(Record, SymTab);
1238 VR = getValueProfRecordNext(This: VR);
1239 }
1240}
1241
1242static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) {
1243 return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize))
1244 ValueProfData());
1245}
1246
1247Error ValueProfData::checkIntegrity() {
1248 if (NumValueKinds > IPVK_Last + 1)
1249 return make_error<InstrProfError>(
1250 Args: instrprof_error::malformed, Args: "number of value profile kinds is invalid");
1251 // Total size needs to be multiple of quadword size.
1252 if (TotalSize % sizeof(uint64_t))
1253 return make_error<InstrProfError>(
1254 Args: instrprof_error::malformed, Args: "total size is not multiples of quardword");
1255
1256 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1257 for (uint32_t K = 0; K < this->NumValueKinds; K++) {
1258 if (VR->Kind > IPVK_Last)
1259 return make_error<InstrProfError>(Args: instrprof_error::malformed,
1260 Args: "value kind is invalid");
1261 VR = getValueProfRecordNext(This: VR);
1262 if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize)
1263 return make_error<InstrProfError>(
1264 Args: instrprof_error::malformed,
1265 Args: "value profile address is greater than total size");
1266 }
1267 return Error::success();
1268}
1269
1270Expected<std::unique_ptr<ValueProfData>>
1271ValueProfData::getValueProfData(const unsigned char *D,
1272 const unsigned char *const BufferEnd,
1273 llvm::endianness Endianness) {
1274 using namespace support;
1275
1276 if (D + sizeof(ValueProfData) > BufferEnd)
1277 return make_error<InstrProfError>(Args: instrprof_error::truncated);
1278
1279 const unsigned char *Header = D;
1280 uint32_t TotalSize = endian::readNext<uint32_t>(memory&: Header, endian: Endianness);
1281
1282 if (D + TotalSize > BufferEnd)
1283 return make_error<InstrProfError>(Args: instrprof_error::too_large);
1284
1285 std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize);
1286 memcpy(dest: VPD.get(), src: D, n: TotalSize);
1287 // Byte swap.
1288 VPD->swapBytesToHost(Endianness);
1289
1290 Error E = VPD->checkIntegrity();
1291 if (E)
1292 return std::move(E);
1293
1294 return std::move(VPD);
1295}
1296
1297void ValueProfData::swapBytesToHost(llvm::endianness Endianness) {
1298 using namespace support;
1299
1300 if (Endianness == llvm::endianness::native)
1301 return;
1302
1303 sys::swapByteOrder<uint32_t>(Value&: TotalSize);
1304 sys::swapByteOrder<uint32_t>(Value&: NumValueKinds);
1305
1306 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1307 for (uint32_t K = 0; K < NumValueKinds; K++) {
1308 VR->swapBytes(Old: Endianness, New: llvm::endianness::native);
1309 VR = getValueProfRecordNext(This: VR);
1310 }
1311}
1312
1313void ValueProfData::swapBytesFromHost(llvm::endianness Endianness) {
1314 using namespace support;
1315
1316 if (Endianness == llvm::endianness::native)
1317 return;
1318
1319 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1320 for (uint32_t K = 0; K < NumValueKinds; K++) {
1321 ValueProfRecord *NVR = getValueProfRecordNext(This: VR);
1322 VR->swapBytes(Old: llvm::endianness::native, New: Endianness);
1323 VR = NVR;
1324 }
1325 sys::swapByteOrder<uint32_t>(Value&: TotalSize);
1326 sys::swapByteOrder<uint32_t>(Value&: NumValueKinds);
1327}
1328
1329void annotateValueSite(Module &M, Instruction &Inst,
1330 const InstrProfRecord &InstrProfR,
1331 InstrProfValueKind ValueKind, uint32_t SiteIdx,
1332 uint32_t MaxMDCount) {
1333 auto VDs = InstrProfR.getValueArrayForSite(ValueKind, Site: SiteIdx);
1334 if (VDs.empty())
1335 return;
1336 uint64_t Sum = 0;
1337 for (const InstrProfValueData &V : VDs)
1338 Sum = SaturatingAdd(X: Sum, Y: V.Count);
1339 annotateValueSite(M, Inst, VDs, Sum, ValueKind, MaxMDCount);
1340}
1341
1342void annotateValueSite(Module &M, Instruction &Inst,
1343 ArrayRef<InstrProfValueData> VDs,
1344 uint64_t Sum, InstrProfValueKind ValueKind,
1345 uint32_t MaxMDCount) {
1346 if (VDs.empty())
1347 return;
1348 LLVMContext &Ctx = M.getContext();
1349 MDBuilder MDHelper(Ctx);
1350 SmallVector<Metadata *, 3> Vals;
1351 // Tag
1352 Vals.push_back(Elt: MDHelper.createString(Str: MDProfLabels::ValueProfile));
1353 // Value Kind
1354 Vals.push_back(Elt: MDHelper.createConstant(
1355 C: ConstantInt::get(Ty: Type::getInt32Ty(C&: Ctx), V: ValueKind)));
1356 // Total Count
1357 Vals.push_back(
1358 Elt: MDHelper.createConstant(C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: Sum)));
1359
1360 // Value Profile Data
1361 uint32_t MDCount = MaxMDCount;
1362 for (const auto &VD : VDs) {
1363 Vals.push_back(Elt: MDHelper.createConstant(
1364 C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: VD.Value)));
1365 Vals.push_back(Elt: MDHelper.createConstant(
1366 C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: VD.Count)));
1367 if (--MDCount == 0)
1368 break;
1369 }
1370 Inst.setMetadata(KindID: LLVMContext::MD_prof, Node: MDNode::get(Context&: Ctx, MDs: Vals));
1371}
1372
1373MDNode *mayHaveValueProfileOfKind(const Instruction &Inst,
1374 InstrProfValueKind ValueKind) {
1375 MDNode *MD = Inst.getMetadata(KindID: LLVMContext::MD_prof);
1376 if (!MD)
1377 return nullptr;
1378
1379 if (MD->getNumOperands() < 5)
1380 return nullptr;
1381
1382 MDString *Tag = cast<MDString>(Val: MD->getOperand(I: 0));
1383 if (!Tag || Tag->getString() != MDProfLabels::ValueProfile)
1384 return nullptr;
1385
1386 // Now check kind:
1387 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 1));
1388 if (!KindInt)
1389 return nullptr;
1390 if (KindInt->getZExtValue() != ValueKind)
1391 return nullptr;
1392
1393 return MD;
1394}
1395
1396SmallVector<InstrProfValueData, 4>
1397getValueProfDataFromInst(const Instruction &Inst, InstrProfValueKind ValueKind,
1398 uint32_t MaxNumValueData, uint64_t &TotalC,
1399 bool GetNoICPValue) {
1400 // Four inline elements seem to work well in practice. With MaxNumValueData,
1401 // this array won't grow very big anyway.
1402 SmallVector<InstrProfValueData, 4> ValueData;
1403 MDNode *MD = mayHaveValueProfileOfKind(Inst, ValueKind);
1404 if (!MD)
1405 return ValueData;
1406 const unsigned NOps = MD->getNumOperands();
1407 // Get total count
1408 ConstantInt *TotalCInt = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 2));
1409 if (!TotalCInt)
1410 return ValueData;
1411 TotalC = TotalCInt->getZExtValue();
1412
1413 ValueData.reserve(N: (NOps - 3) / 2);
1414 for (unsigned I = 3; I < NOps; I += 2) {
1415 if (ValueData.size() >= MaxNumValueData)
1416 break;
1417 ConstantInt *Value = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I));
1418 ConstantInt *Count =
1419 mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: I + 1));
1420 if (!Value || !Count) {
1421 ValueData.clear();
1422 return ValueData;
1423 }
1424 uint64_t CntValue = Count->getZExtValue();
1425 if (!GetNoICPValue && (CntValue == NOMORE_ICP_MAGICNUM))
1426 continue;
1427 InstrProfValueData V;
1428 V.Value = Value->getZExtValue();
1429 V.Count = CntValue;
1430 ValueData.push_back(Elt: V);
1431 }
1432 return ValueData;
1433}
1434
1435MDNode *getPGOFuncNameMetadata(const Function &F) {
1436 return F.getMetadata(Kind: getPGOFuncNameMetadataName());
1437}
1438
1439static void createPGONameMetadata(GlobalObject &GO, StringRef MetadataName,
1440 StringRef PGOName) {
1441 // Only for internal linkage functions or global variables. The name is not
1442 // the same as PGO name for these global objects.
1443 if (GO.getName() == PGOName)
1444 return;
1445
1446 // Don't create duplicated metadata.
1447 if (GO.getMetadata(Kind: MetadataName))
1448 return;
1449
1450 LLVMContext &C = GO.getContext();
1451 MDNode *N = MDNode::get(Context&: C, MDs: MDString::get(Context&: C, Str: PGOName));
1452 GO.setMetadata(Kind: MetadataName, Node: N);
1453}
1454
1455void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName) {
1456 return createPGONameMetadata(GO&: F, MetadataName: getPGOFuncNameMetadataName(), PGOName: PGOFuncName);
1457}
1458
1459void createPGONameMetadata(GlobalObject &GO, StringRef PGOName) {
1460 return createPGONameMetadata(GO, MetadataName: getPGONameMetadataName(), PGOName);
1461}
1462
1463bool needsComdatForCounter(const GlobalObject &GO, const Module &M) {
1464 if (GO.hasComdat())
1465 return true;
1466
1467 if (!M.getTargetTriple().supportsCOMDAT())
1468 return false;
1469
1470 // See createPGOFuncNameVar for more details. To avoid link errors, profile
1471 // counters for function with available_externally linkage needs to be changed
1472 // to linkonce linkage. On ELF based systems, this leads to weak symbols to be
1473 // created. Without using comdat, duplicate entries won't be removed by the
1474 // linker leading to increased data segement size and raw profile size. Even
1475 // worse, since the referenced counter from profile per-function data object
1476 // will be resolved to the common strong definition, the profile counts for
1477 // available_externally functions will end up being duplicated in raw profile
1478 // data. This can result in distorted profile as the counts of those dups
1479 // will be accumulated by the profile merger.
1480 GlobalValue::LinkageTypes Linkage = GO.getLinkage();
1481 if (Linkage != GlobalValue::ExternalWeakLinkage &&
1482 Linkage != GlobalValue::AvailableExternallyLinkage)
1483 return false;
1484
1485 return true;
1486}
1487
1488// Check if INSTR_PROF_RAW_VERSION_VAR is defined.
1489bool isIRPGOFlagSet(const Module *M) {
1490 const GlobalVariable *IRInstrVar =
1491 M->getNamedGlobal(INSTR_PROF_QUOTE(INSTR_PROF_RAW_VERSION_VAR));
1492 if (!IRInstrVar || IRInstrVar->hasLocalLinkage())
1493 return false;
1494
1495 // For CSPGO+LTO, this variable might be marked as non-prevailing and we only
1496 // have the decl.
1497 if (IRInstrVar->isDeclaration())
1498 return true;
1499
1500 // Check if the flag is set.
1501 if (!IRInstrVar->hasInitializer())
1502 return false;
1503
1504 auto *InitVal = dyn_cast_or_null<ConstantInt>(Val: IRInstrVar->getInitializer());
1505 if (!InitVal)
1506 return false;
1507 return (InitVal->getZExtValue() & VARIANT_MASK_IR_PROF) != 0;
1508}
1509
1510// Check if we can safely rename this Comdat function.
1511bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken) {
1512 if (F.getName().empty())
1513 return false;
1514 if (!needsComdatForCounter(GO: F, M: *(F.getParent())))
1515 return false;
1516 // Unsafe to rename the address-taken function (which can be used in
1517 // function comparison).
1518 if (CheckAddressTaken && F.hasAddressTaken())
1519 return false;
1520 // Only safe to do if this function may be discarded if it is not used
1521 // in the compilation unit.
1522 if (!GlobalValue::isDiscardableIfUnused(Linkage: F.getLinkage()))
1523 return false;
1524
1525 // For AvailableExternallyLinkage functions.
1526 if (!F.hasComdat()) {
1527 assert(F.getLinkage() == GlobalValue::AvailableExternallyLinkage);
1528 return true;
1529 }
1530 return true;
1531}
1532
1533// Create the variable for the profile file name.
1534void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput) {
1535 if (InstrProfileOutput.empty())
1536 return;
1537 Constant *ProfileNameConst =
1538 ConstantDataArray::getString(Context&: M.getContext(), Initializer: InstrProfileOutput, AddNull: true);
1539 GlobalVariable *ProfileNameVar = new GlobalVariable(
1540 M, ProfileNameConst->getType(), true, GlobalValue::WeakAnyLinkage,
1541 ProfileNameConst, INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR));
1542 ProfileNameVar->setVisibility(GlobalValue::HiddenVisibility);
1543 Triple TT(M.getTargetTriple());
1544 if (TT.supportsCOMDAT()) {
1545 ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage);
1546 ProfileNameVar->setComdat(M.getOrInsertComdat(
1547 Name: StringRef(INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR))));
1548 }
1549}
1550
1551Error OverlapStats::accumulateCounts(const std::string &BaseFilename,
1552 const std::string &TestFilename,
1553 bool IsCS) {
1554 auto GetProfileSum = [IsCS](const std::string &Filename,
1555 CountSumOrPercent &Sum) -> Error {
1556 // This function is only used from llvm-profdata that doesn't use any kind
1557 // of VFS. Just create a default RealFileSystem to read profiles.
1558 auto FS = vfs::getRealFileSystem();
1559 auto ReaderOrErr = InstrProfReader::create(Path: Filename, FS&: *FS);
1560 if (Error E = ReaderOrErr.takeError()) {
1561 return E;
1562 }
1563 auto Reader = std::move(ReaderOrErr.get());
1564 Reader->accumulateCounts(Sum, IsCS);
1565 return Error::success();
1566 };
1567 auto Ret = GetProfileSum(BaseFilename, Base);
1568 if (Ret)
1569 return Ret;
1570 Ret = GetProfileSum(TestFilename, Test);
1571 if (Ret)
1572 return Ret;
1573 this->BaseFilename = &BaseFilename;
1574 this->TestFilename = &TestFilename;
1575 Valid = true;
1576 return Error::success();
1577}
1578
1579void OverlapStats::addOneMismatch(const CountSumOrPercent &MismatchFunc) {
1580 Mismatch.NumEntries += 1;
1581 Mismatch.CountSum += MismatchFunc.CountSum / Test.CountSum;
1582 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1583 if (Test.ValueCounts[I] >= 1.0f)
1584 Mismatch.ValueCounts[I] +=
1585 MismatchFunc.ValueCounts[I] / Test.ValueCounts[I];
1586 }
1587}
1588
1589void OverlapStats::addOneUnique(const CountSumOrPercent &UniqueFunc) {
1590 Unique.NumEntries += 1;
1591 Unique.CountSum += UniqueFunc.CountSum / Test.CountSum;
1592 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1593 if (Test.ValueCounts[I] >= 1.0f)
1594 Unique.ValueCounts[I] += UniqueFunc.ValueCounts[I] / Test.ValueCounts[I];
1595 }
1596}
1597
1598void OverlapStats::dump(raw_fd_ostream &OS) const {
1599 if (!Valid)
1600 return;
1601
1602 const char *EntryName =
1603 (Level == ProgramLevel ? "functions" : "edge counters");
1604 if (Level == ProgramLevel) {
1605 OS << "Profile overlap information for base_profile: " << *BaseFilename
1606 << " and test_profile: " << *TestFilename << "\nProgram level:\n";
1607 } else {
1608 OS << "Function level:\n"
1609 << " Function: " << FuncName << " (Hash=" << FuncHash << ")\n";
1610 }
1611
1612 OS << " # of " << EntryName << " overlap: " << Overlap.NumEntries << "\n";
1613 if (Mismatch.NumEntries)
1614 OS << " # of " << EntryName << " mismatch: " << Mismatch.NumEntries
1615 << "\n";
1616 if (Unique.NumEntries)
1617 OS << " # of " << EntryName
1618 << " only in test_profile: " << Unique.NumEntries << "\n";
1619
1620 OS << " Edge profile overlap: " << format(Fmt: "%.3f%%", Vals: Overlap.CountSum * 100)
1621 << "\n";
1622 if (Mismatch.NumEntries)
1623 OS << " Mismatched count percentage (Edge): "
1624 << format(Fmt: "%.3f%%", Vals: Mismatch.CountSum * 100) << "\n";
1625 if (Unique.NumEntries)
1626 OS << " Percentage of Edge profile only in test_profile: "
1627 << format(Fmt: "%.3f%%", Vals: Unique.CountSum * 100) << "\n";
1628 OS << " Edge profile base count sum: " << format(Fmt: "%.0f", Vals: Base.CountSum)
1629 << "\n"
1630 << " Edge profile test count sum: " << format(Fmt: "%.0f", Vals: Test.CountSum)
1631 << "\n";
1632
1633 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1634 if (Base.ValueCounts[I] < 1.0f && Test.ValueCounts[I] < 1.0f)
1635 continue;
1636 char ProfileKindName[20] = {0};
1637 switch (I) {
1638 case IPVK_IndirectCallTarget:
1639 strncpy(dest: ProfileKindName, src: "IndirectCall", n: 19);
1640 break;
1641 case IPVK_MemOPSize:
1642 strncpy(dest: ProfileKindName, src: "MemOP", n: 19);
1643 break;
1644 case IPVK_VTableTarget:
1645 strncpy(dest: ProfileKindName, src: "VTable", n: 19);
1646 break;
1647 default:
1648 snprintf(s: ProfileKindName, maxlen: 19, format: "VP[%d]", I);
1649 break;
1650 }
1651 OS << " " << ProfileKindName
1652 << " profile overlap: " << format(Fmt: "%.3f%%", Vals: Overlap.ValueCounts[I] * 100)
1653 << "\n";
1654 if (Mismatch.NumEntries)
1655 OS << " Mismatched count percentage (" << ProfileKindName
1656 << "): " << format(Fmt: "%.3f%%", Vals: Mismatch.ValueCounts[I] * 100) << "\n";
1657 if (Unique.NumEntries)
1658 OS << " Percentage of " << ProfileKindName
1659 << " profile only in test_profile: "
1660 << format(Fmt: "%.3f%%", Vals: Unique.ValueCounts[I] * 100) << "\n";
1661 OS << " " << ProfileKindName
1662 << " profile base count sum: " << format(Fmt: "%.0f", Vals: Base.ValueCounts[I])
1663 << "\n"
1664 << " " << ProfileKindName
1665 << " profile test count sum: " << format(Fmt: "%.0f", Vals: Test.ValueCounts[I])
1666 << "\n";
1667 }
1668}
1669
1670namespace IndexedInstrProf {
1671Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
1672 using namespace support;
1673 static_assert(std::is_standard_layout_v<Header>,
1674 "Use standard layout for Header for simplicity");
1675 Header H;
1676
1677 H.Magic = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1678 // Check the magic number.
1679 if (H.Magic != IndexedInstrProf::Magic)
1680 return make_error<InstrProfError>(Args: instrprof_error::bad_magic);
1681
1682 // Read the version.
1683 H.Version = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1684 if (H.getIndexedProfileVersion() >
1685 IndexedInstrProf::ProfVersion::CurrentVersion)
1686 return make_error<InstrProfError>(Args: instrprof_error::unsupported_version);
1687
1688 static_assert(IndexedInstrProf::ProfVersion::CurrentVersion == Version13,
1689 "Please update the reader as needed when a new field is added "
1690 "or when indexed profile version gets bumped.");
1691
1692 Buffer += sizeof(uint64_t); // Skip Header.Unused field.
1693 H.HashType = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1694 H.HashOffset = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1695 if (H.getIndexedProfileVersion() >= 8)
1696 H.MemProfOffset =
1697 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1698 if (H.getIndexedProfileVersion() >= 9)
1699 H.BinaryIdOffset =
1700 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1701 // Version 11 is handled by this condition.
1702 if (H.getIndexedProfileVersion() >= 10)
1703 H.TemporalProfTracesOffset =
1704 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1705 if (H.getIndexedProfileVersion() >= 12)
1706 H.VTableNamesOffset =
1707 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1708 return H;
1709}
1710
1711uint64_t Header::getIndexedProfileVersion() const {
1712 return GET_VERSION(Version);
1713}
1714
1715size_t Header::size() const {
1716 switch (getIndexedProfileVersion()) {
1717 // To retain backward compatibility, new fields must be appended to the end
1718 // of the header, and byte offset of existing fields shouldn't change when
1719 // indexed profile version gets incremented.
1720 static_assert(
1721 IndexedInstrProf::ProfVersion::CurrentVersion == Version13,
1722 "Please update the size computation below if a new field has "
1723 "been added to the header; for a version bump without new "
1724 "fields, add a case statement to fall through to the latest version.");
1725 case 13ull:
1726 case 12ull:
1727 return 72;
1728 case 11ull:
1729 [[fallthrough]];
1730 case 10ull:
1731 return 64;
1732 case 9ull:
1733 return 56;
1734 case 8ull:
1735 return 48;
1736 default: // Version7 (when the backwards compatible header was introduced).
1737 return 40;
1738 }
1739}
1740
1741} // namespace IndexedInstrProf
1742
1743} // end namespace llvm
1744