1//===- Writer.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "Writer.h"
10#include "Config.h"
11#include "InputChunks.h"
12#include "InputElement.h"
13#include "MapFile.h"
14#include "OutputSections.h"
15#include "OutputSegment.h"
16#include "Relocations.h"
17#include "SymbolTable.h"
18#include "SyntheticSections.h"
19#include "WriterUtils.h"
20#include "lld/Common/Arrays.h"
21#include "lld/Common/CommonLinkerContext.h"
22#include "lld/Common/Strings.h"
23#include "llvm/ADT/ArrayRef.h"
24#include "llvm/ADT/MapVector.h"
25#include "llvm/ADT/SmallSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringMap.h"
28#include "llvm/BinaryFormat/Wasm.h"
29#include "llvm/Support/FileOutputBuffer.h"
30#include "llvm/Support/FormatVariadic.h"
31#include "llvm/Support/Parallel.h"
32#include "llvm/Support/RandomNumberGenerator.h"
33#include "llvm/Support/SHA1.h"
34#include "llvm/Support/xxhash.h"
35
36#include <cstdarg>
37#include <optional>
38
39#define DEBUG_TYPE "lld"
40
41using namespace llvm;
42using namespace llvm::wasm;
43
44namespace lld::wasm {
45static constexpr int stackAlignment = 16;
46static constexpr int heapAlignment = 16;
47
48namespace {
49
50// The writer writes a SymbolTable result to a file.
51class Writer {
52public:
53 void run();
54
55private:
56 void openFile();
57
58 bool needsPassiveInitialization(const OutputSegment *segment);
59 bool hasPassiveInitializedSegments();
60
61 void createSyntheticInitFunctions();
62 void createInitMemoryFunction();
63 void createStartFunction();
64 void createApplyDataRelocationsFunction();
65 void createApplyGlobalRelocationsFunction();
66 void createApplyTLSRelocationsFunction();
67 void createApplyGlobalTLSRelocationsFunction();
68 void createCallCtorsFunction();
69 void createInitTLSFunction();
70 void createCommandExportWrappers();
71 void createCommandExportWrapper(uint32_t functionIndex, DefinedFunction *f);
72
73 void assignIndexes();
74 void populateSymtab();
75 void populateProducers();
76 void populateTargetFeatures();
77 // populateTargetFeatures happens early on so some checks are delayed
78 // until imports and exports are finalized. There are run unstead
79 // in checkImportExportTargetFeatures
80 void checkImportExportTargetFeatures();
81 void calculateInitFunctions();
82 void calculateImports();
83 void calculateExports();
84 void calculateCustomSections();
85 void calculateTypes();
86 void createOutputSegments();
87 OutputSegment *createOutputSegment(StringRef name);
88 void combineOutputSegments();
89 void layoutMemory();
90 void createHeader();
91
92 void addSection(OutputSection *sec);
93
94 void addSections();
95
96 void createCustomSections();
97 void createSyntheticSections();
98 void createSyntheticSectionsPostLayout();
99 void finalizeSections();
100
101 // Custom sections
102 void createRelocSections();
103
104 void writeHeader();
105 void writeSections();
106 void writeBuildId();
107
108 uint64_t fileSize = 0;
109
110 std::vector<WasmInitEntry> initFunctions;
111 llvm::MapVector<StringRef, std::vector<InputChunk *>> customSectionMapping;
112
113 // Stable storage for command export wrapper function name strings.
114 std::list<std::string> commandExportWrapperNames;
115
116 // Elements that are used to construct the final output
117 std::string header;
118 std::vector<OutputSection *> outputSections;
119
120 std::unique_ptr<FileOutputBuffer> buffer;
121
122 std::vector<OutputSegment *> segments;
123 llvm::SmallDenseMap<StringRef, OutputSegment *> segmentMap;
124};
125
126} // anonymous namespace
127
128void Writer::calculateCustomSections() {
129 log(msg: "calculateCustomSections");
130 bool stripDebug = ctx.arg.stripDebug || ctx.arg.stripAll;
131 for (ObjFile *file : ctx.objectFiles) {
132 for (InputChunk *section : file->customSections) {
133 // Exclude COMDAT sections that are not selected for inclusion
134 if (section->discarded)
135 continue;
136 // Ignore empty custom sections. In particular objcopy/strip will
137 // sometimes replace stripped sections with empty custom sections to
138 // avoid section re-numbering.
139 if (section->getSize() == 0)
140 continue;
141 StringRef name = section->name;
142 // These custom sections are known the linker and synthesized rather than
143 // blindly copied.
144 if (name == "linking" || name == "name" || name == "producers" ||
145 name == "target_features" || name.starts_with(Prefix: "reloc."))
146 continue;
147 // These custom sections are generated by `clang -fembed-bitcode`.
148 // These are used by the rust toolchain to ship LTO data along with
149 // compiled object code, but they don't want this included in the linker
150 // output.
151 if (name == ".llvmbc" || name == ".llvmcmd")
152 continue;
153 // Strip debug section in that option was specified.
154 if (stripDebug && name.starts_with(Prefix: ".debug_"))
155 continue;
156 // Otherwise include custom sections by default and concatenate their
157 // contents.
158 customSectionMapping[name].push_back(x: section);
159 }
160 }
161}
162
163void Writer::createCustomSections() {
164 log(msg: "createCustomSections");
165 for (auto &pair : customSectionMapping) {
166 StringRef name = pair.first;
167 LLVM_DEBUG(dbgs() << "createCustomSection: " << name << "\n");
168
169 OutputSection *sec = make<CustomSection>(args: std::string(name), args&: pair.second);
170 if (ctx.arg.relocatable || ctx.arg.emitRelocs) {
171 auto *sym = make<OutputSectionSymbol>(args&: sec);
172 out.linkingSec->addToSymtab(sym);
173 sec->sectionSym = sym;
174 }
175 addSection(sec);
176 }
177}
178
179// Create relocations sections in the final output.
180// These are only created when relocatable output is requested.
181void Writer::createRelocSections() {
182 log(msg: "createRelocSections");
183 // Don't use iterator here since we are adding to OutputSection
184 size_t origSize = outputSections.size();
185 for (size_t i = 0; i < origSize; i++) {
186 LLVM_DEBUG(dbgs() << "check section " << i << "\n");
187 OutputSection *sec = outputSections[i];
188
189 // Count the number of needed sections.
190 uint32_t count = sec->getNumRelocations();
191 if (!count)
192 continue;
193
194 StringRef name;
195 if (sec->type == WASM_SEC_DATA)
196 name = "reloc.DATA";
197 else if (sec->type == WASM_SEC_CODE)
198 name = "reloc.CODE";
199 else if (sec->type == WASM_SEC_CUSTOM)
200 name = saver().save(S: "reloc." + sec->name);
201 else
202 llvm_unreachable(
203 "relocations only supported for code, data, or custom sections");
204
205 addSection(sec: make<RelocSection>(args&: name, args&: sec));
206 }
207}
208
209void Writer::populateProducers() {
210 for (ObjFile *file : ctx.objectFiles) {
211 const WasmProducerInfo &info = file->getWasmObj()->getProducerInfo();
212 out.producersSec->addInfo(info);
213 }
214}
215
216void Writer::writeHeader() {
217 memcpy(dest: buffer->getBufferStart(), src: header.data(), n: header.size());
218}
219
220void Writer::writeSections() {
221 uint8_t *buf = buffer->getBufferStart();
222 parallelForEach(R&: outputSections, Fn: [buf](OutputSection *s) {
223 assert(s->isNeeded());
224 s->writeTo(buf);
225 });
226}
227
228// Computes a hash value of Data using a given hash function.
229// In order to utilize multiple cores, we first split data into 1MB
230// chunks, compute a hash for each chunk, and then compute a hash value
231// of the hash values.
232
233static void
234computeHash(llvm::MutableArrayRef<uint8_t> hashBuf,
235 llvm::ArrayRef<uint8_t> data,
236 std::function<void(uint8_t *dest, ArrayRef<uint8_t> arr)> hashFn) {
237 std::vector<ArrayRef<uint8_t>> chunks = split(arr: data, chunkSize: 1024 * 1024);
238 std::vector<uint8_t> hashes(chunks.size() * hashBuf.size());
239
240 // Compute hash values.
241 parallelFor(Begin: 0, End: chunks.size(), Fn: [&](size_t i) {
242 hashFn(hashes.data() + i * hashBuf.size(), chunks[i]);
243 });
244
245 // Write to the final output buffer.
246 hashFn(hashBuf.data(), hashes);
247}
248
249static void makeUUID(unsigned version, llvm::ArrayRef<uint8_t> fileHash,
250 llvm::MutableArrayRef<uint8_t> output) {
251 assert((version == 4 || version == 5) && "Unknown UUID version");
252 assert(output.size() == 16 && "Wrong size for UUID output");
253 if (version == 5) {
254 // Build a valid v5 UUID from a hardcoded (randomly-generated) namespace
255 // UUID, and the computed hash of the output.
256 std::array<uint8_t, 16> namespaceUUID{0xA1, 0xFA, 0x48, 0x2D, 0x0E, 0x22,
257 0x03, 0x8D, 0x33, 0x8B, 0x52, 0x1C,
258 0xD6, 0xD2, 0x12, 0xB2};
259 SHA1 sha;
260 sha.update(Data: namespaceUUID);
261 sha.update(Data: fileHash);
262 auto s = sha.final();
263 std::copy(first: s.data(), last: &s.data()[output.size()], result: output.data());
264 } else if (version == 4) {
265 if (auto ec = llvm::getRandomBytes(Buffer: output.data(), Size: output.size()))
266 error(msg: "entropy source failure: " + ec.message());
267 }
268 // Set the UUID version and variant fields.
269 // The version is the upper nibble of byte 6 (0b0101xxxx or 0b0100xxxx)
270 output[6] = (static_cast<uint8_t>(version) << 4) | (output[6] & 0xF);
271
272 // The variant is DCE 1.1/ISO 11578 (0b10xxxxxx)
273 output[8] &= 0xBF;
274 output[8] |= 0x80;
275}
276
277void Writer::writeBuildId() {
278 if (!out.buildIdSec->isNeeded())
279 return;
280 if (ctx.arg.buildId == BuildIdKind::Hexstring) {
281 out.buildIdSec->writeBuildId(buf: ctx.arg.buildIdVector);
282 return;
283 }
284
285 // Compute a hash of all sections of the output file.
286 size_t hashSize = out.buildIdSec->hashSize;
287 std::vector<uint8_t> buildId(hashSize);
288 llvm::ArrayRef<uint8_t> buf{buffer->getBufferStart(), size_t(fileSize)};
289
290 switch (ctx.arg.buildId) {
291 case BuildIdKind::Fast: {
292 std::vector<uint8_t> fileHash(8);
293 computeHash(hashBuf: fileHash, data: buf, hashFn: [](uint8_t *dest, ArrayRef<uint8_t> arr) {
294 support::endian::write64le(P: dest, V: xxh3_64bits(data: arr));
295 });
296 makeUUID(version: 5, fileHash, output: buildId);
297 break;
298 }
299 case BuildIdKind::Sha1:
300 computeHash(hashBuf: buildId, data: buf, hashFn: [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
301 memcpy(dest: dest, src: SHA1::hash(Data: arr).data(), n: hashSize);
302 });
303 break;
304 case BuildIdKind::Uuid:
305 makeUUID(version: 4, fileHash: {}, output: buildId);
306 break;
307 default:
308 llvm_unreachable("unknown BuildIdKind");
309 }
310 out.buildIdSec->writeBuildId(buf: buildId);
311}
312
313static void setGlobalPtr(DefinedGlobal *g, uint64_t memoryPtr) {
314 LLVM_DEBUG(dbgs() << "setGlobalPtr " << g->getName() << " -> " << memoryPtr << "\n");
315 g->global->setPointerValue(memoryPtr);
316}
317
318static void checkPageAligned(StringRef name, uint64_t value) {
319 if (value != alignTo(Value: value, Align: ctx.arg.pageSize))
320 error(msg: name + " must be aligned to the page size (" +
321 Twine(ctx.arg.pageSize) + " bytes)");
322}
323
324// Fix the memory layout of the output binary. This assigns memory offsets
325// to each of the input data sections as well as the explicit stack region.
326// The default memory layout is as follows, from low to high.
327//
328// - initialized data (starting at ctx.arg.globalBase)
329// - BSS data (not currently implemented in llvm)
330// - explicit stack (ctx.arg.ZStackSize)
331// - heap start / unallocated
332//
333// The --stack-first option means that stack is placed before any static data.
334// This can be useful since it means that stack overflow traps immediately
335// rather than overwriting global data, but also increases code size since all
336// static data loads and stores requires larger offsets.
337void Writer::layoutMemory() {
338 uint64_t memoryPtr = 0;
339
340 auto placeStack = [&]() {
341 if (ctx.arg.relocatable || ctx.isPic)
342 return;
343 memoryPtr = alignTo(Value: memoryPtr, Align: stackAlignment);
344 if (ctx.sym.stackLow)
345 ctx.sym.stackLow->setVA(memoryPtr);
346 if (ctx.arg.zStackSize != alignTo(Value: ctx.arg.zStackSize, Align: stackAlignment))
347 error(msg: "stack size must be " + Twine(stackAlignment) + "-byte aligned");
348 log(msg: "mem: stack size = " + Twine(ctx.arg.zStackSize));
349 log(msg: "mem: stack base = " + Twine(memoryPtr));
350 memoryPtr += ctx.arg.zStackSize;
351 setGlobalPtr(g: cast<DefinedGlobal>(Val: ctx.sym.stackPointer), memoryPtr);
352 if (ctx.sym.stackHigh)
353 ctx.sym.stackHigh->setVA(memoryPtr);
354 log(msg: "mem: stack top = " + Twine(memoryPtr));
355 };
356
357 if (ctx.arg.stackFirst) {
358 placeStack();
359 if (ctx.arg.globalBase) {
360 if (ctx.arg.globalBase < memoryPtr) {
361 error(msg: "--global-base cannot be less than stack size when --stack-first is used");
362 return;
363 }
364 memoryPtr = ctx.arg.globalBase;
365 }
366 } else {
367 memoryPtr = ctx.arg.globalBase;
368 }
369
370 log(msg: "mem: global base = " + Twine(memoryPtr));
371 if (ctx.sym.globalBase)
372 ctx.sym.globalBase->setVA(memoryPtr);
373
374 uint64_t dataStart = memoryPtr;
375
376 // Arbitrarily set __dso_handle handle to point to the start of the data
377 // segments.
378 if (ctx.sym.dsoHandle)
379 ctx.sym.dsoHandle->setVA(dataStart);
380
381 out.dylinkSec->memAlign = 0;
382 for (OutputSegment *seg : segments) {
383 out.dylinkSec->memAlign = std::max(a: out.dylinkSec->memAlign, b: seg->alignment);
384 memoryPtr = alignTo(Value: memoryPtr, Align: 1ULL << seg->alignment);
385 seg->startVA = memoryPtr;
386 log(msg: formatv(Fmt: "mem: {0,-15} offset={1,-8} size={2,-8} align={3}", Vals&: seg->name,
387 Vals&: memoryPtr, Vals&: seg->size, Vals&: seg->alignment));
388
389 if (!ctx.arg.relocatable && seg->isTLS()) {
390 if (ctx.sym.tlsSize) {
391 auto *tlsSize = cast<DefinedGlobal>(Val: ctx.sym.tlsSize);
392 setGlobalPtr(g: tlsSize, memoryPtr: seg->size);
393 }
394 if (ctx.sym.tlsAlign) {
395 auto *tlsAlign = cast<DefinedGlobal>(Val: ctx.sym.tlsAlign);
396 setGlobalPtr(g: tlsAlign, memoryPtr: int64_t{1} << seg->alignment);
397 }
398 if (!ctx.arg.sharedMemory && ctx.sym.tlsBase) {
399 auto *tlsBase = cast<DefinedGlobal>(Val: ctx.sym.tlsBase);
400 setGlobalPtr(g: tlsBase, memoryPtr);
401 }
402 }
403
404 memoryPtr += seg->size;
405 }
406
407 // Make space for the memory initialization flag
408 if (ctx.arg.sharedMemory && hasPassiveInitializedSegments()) {
409 memoryPtr = alignTo(Value: memoryPtr, Align: 4);
410 ctx.sym.initMemoryFlag = symtab->addSyntheticDataSymbol(
411 name: "__wasm_init_memory_flag", flags: WASM_SYMBOL_VISIBILITY_HIDDEN);
412 ctx.sym.initMemoryFlag->markLive();
413 ctx.sym.initMemoryFlag->setVA(memoryPtr);
414 log(msg: formatv(Fmt: "mem: {0,-15} offset={1,-8} size={2,-8} align={3}",
415 Vals: "__wasm_init_memory_flag", Vals&: memoryPtr, Vals: 4, Vals: 4));
416 memoryPtr += 4;
417 }
418
419 if (ctx.sym.dataEnd)
420 ctx.sym.dataEnd->setVA(memoryPtr);
421
422 uint64_t staticDataSize = memoryPtr - dataStart;
423 log(msg: "mem: static data = " + Twine(staticDataSize));
424 if (ctx.isPic)
425 out.dylinkSec->memSize = staticDataSize;
426
427 if (!ctx.arg.stackFirst)
428 placeStack();
429
430 if (ctx.sym.heapBase) {
431 // Set `__heap_base` to follow the end of the stack or global data. The
432 // fact that this comes last means that a malloc/brk implementation can
433 // grow the heap at runtime.
434 // We'll align the heap base here because memory allocators might expect
435 // __heap_base to be aligned already.
436 memoryPtr = alignTo(Value: memoryPtr, Align: heapAlignment);
437 log(msg: "mem: heap base = " + Twine(memoryPtr));
438 ctx.sym.heapBase->setVA(memoryPtr);
439 }
440
441 uint64_t maxMemorySetting = 1ULL << 32;
442 if (ctx.arg.is64.value_or(u: false)) {
443 // TODO: Update once we decide on a reasonable limit here:
444 // https://github.com/WebAssembly/memory64/issues/33
445 maxMemorySetting = 1ULL << 34;
446 }
447
448 if (ctx.arg.initialHeap != 0) {
449 checkPageAligned(name: "initial heap", value: ctx.arg.initialHeap);
450 uint64_t maxInitialHeap = maxMemorySetting - memoryPtr;
451 if (ctx.arg.initialHeap > maxInitialHeap)
452 error(msg: "initial heap too large, cannot be greater than " +
453 Twine(maxInitialHeap));
454 memoryPtr += ctx.arg.initialHeap;
455 }
456
457 if (ctx.arg.initialMemory != 0) {
458 checkPageAligned(name: "initial memory", value: ctx.arg.initialMemory);
459 if (memoryPtr > ctx.arg.initialMemory)
460 error(msg: "initial memory too small, " + Twine(memoryPtr) + " bytes needed");
461 if (ctx.arg.initialMemory > maxMemorySetting)
462 error(msg: "initial memory too large, cannot be greater than " +
463 Twine(maxMemorySetting));
464 memoryPtr = ctx.arg.initialMemory;
465 }
466
467 memoryPtr = alignTo(Value: memoryPtr, Align: ctx.arg.pageSize);
468
469 out.memorySec->numMemoryPages = memoryPtr / ctx.arg.pageSize;
470 log(msg: "mem: total pages = " + Twine(out.memorySec->numMemoryPages));
471
472 if (ctx.sym.heapEnd) {
473 // Set `__heap_end` to follow the end of the statically allocated linear
474 // memory. The fact that this comes last means that a malloc/brk
475 // implementation can grow the heap at runtime.
476 log(msg: "mem: heap end = " + Twine(memoryPtr));
477 ctx.sym.heapEnd->setVA(memoryPtr);
478 }
479
480 uint64_t maxMemory = 0;
481 if (ctx.arg.maxMemory != 0) {
482 checkPageAligned(name: "maximum memory", value: ctx.arg.maxMemory);
483 if (memoryPtr > ctx.arg.maxMemory)
484 error(msg: "maximum memory too small, " + Twine(memoryPtr) + " bytes needed");
485 if (ctx.arg.maxMemory > maxMemorySetting)
486 error(msg: "maximum memory too large, cannot be greater than " +
487 Twine(maxMemorySetting));
488
489 maxMemory = ctx.arg.maxMemory;
490 } else if (ctx.arg.noGrowableMemory) {
491 maxMemory = memoryPtr;
492 }
493
494 // If no maxMemory config was supplied but we are building with
495 // shared memory, we need to pick a sensible upper limit.
496 if (ctx.arg.sharedMemory && maxMemory == 0) {
497 if (ctx.isPic)
498 maxMemory = maxMemorySetting;
499 else
500 maxMemory = memoryPtr;
501 }
502
503 if (maxMemory != 0) {
504 out.memorySec->maxMemoryPages = maxMemory / ctx.arg.pageSize;
505 log(msg: "mem: max pages = " + Twine(out.memorySec->maxMemoryPages));
506 }
507}
508
509void Writer::addSection(OutputSection *sec) {
510 if (!sec->isNeeded())
511 return;
512 log(msg: "addSection: " + toString(section: *sec));
513 sec->sectionIndex = outputSections.size();
514 outputSections.push_back(x: sec);
515}
516
517// If a section name is valid as a C identifier (which is rare because of
518// the leading '.'), linkers are expected to define __start_<secname> and
519// __stop_<secname> symbols. They are at beginning and end of the section,
520// respectively. This is not requested by the ELF standard, but GNU ld and
521// gold provide the feature, and used by many programs.
522static void addStartStopSymbols(const OutputSegment *seg) {
523 StringRef name = seg->name;
524 if (!isValidCIdentifier(s: name))
525 return;
526 LLVM_DEBUG(dbgs() << "addStartStopSymbols: " << name << "\n");
527 uint64_t start = seg->startVA;
528 uint64_t stop = start + seg->size;
529 symtab->addOptionalDataSymbol(name: saver().save(S: "__start_" + name), value: start);
530 symtab->addOptionalDataSymbol(name: saver().save(S: "__stop_" + name), value: stop);
531}
532
533void Writer::addSections() {
534 addSection(sec: out.dylinkSec);
535 addSection(sec: out.typeSec);
536 addSection(sec: out.importSec);
537 addSection(sec: out.functionSec);
538 addSection(sec: out.tableSec);
539 addSection(sec: out.memorySec);
540 addSection(sec: out.tagSec);
541 addSection(sec: out.globalSec);
542 addSection(sec: out.exportSec);
543 addSection(sec: out.startSec);
544 addSection(sec: out.elemSec);
545 addSection(sec: out.dataCountSec);
546
547 addSection(sec: make<CodeSection>(args&: out.functionSec->inputFunctions));
548 addSection(sec: make<DataSection>(args&: segments));
549
550 createCustomSections();
551
552 addSection(sec: out.linkingSec);
553 if (ctx.arg.emitRelocs || ctx.arg.relocatable) {
554 createRelocSections();
555 }
556
557 addSection(sec: out.nameSec);
558 addSection(sec: out.producersSec);
559 addSection(sec: out.targetFeaturesSec);
560 addSection(sec: out.buildIdSec);
561}
562
563void Writer::finalizeSections() {
564 for (OutputSection *s : outputSections) {
565 s->setOffset(fileSize);
566 s->finalizeContents();
567 fileSize += s->getSize();
568 }
569}
570
571void Writer::populateTargetFeatures() {
572 StringMap<std::string> used;
573 StringMap<std::string> disallowed;
574 SmallSet<std::string, 8> &allowed = out.targetFeaturesSec->features;
575 bool tlsUsed = false;
576
577 if (ctx.isPic) {
578 // This should not be necessary because all PIC objects should
579 // contain the `mutable-globals` feature.
580 // TODO (https://github.com/llvm/llvm-project/issues/51681)
581 allowed.insert(V: "mutable-globals");
582 }
583
584 if (ctx.arg.extraFeatures.has_value()) {
585 auto &extraFeatures = *ctx.arg.extraFeatures;
586 allowed.insert_range(R&: extraFeatures);
587 }
588
589 // Only infer used features if user did not specify features
590 bool inferFeatures = !ctx.arg.features.has_value();
591
592 if (!inferFeatures) {
593 auto &explicitFeatures = *ctx.arg.features;
594 allowed.insert_range(R&: explicitFeatures);
595 if (!ctx.arg.checkFeatures)
596 goto done;
597 }
598
599 // Find the sets of used and disallowed features
600 for (ObjFile *file : ctx.objectFiles) {
601 StringRef fileName(file->getName());
602 for (auto &feature : file->getWasmObj()->getTargetFeatures()) {
603 switch (feature.Prefix) {
604 case WASM_FEATURE_PREFIX_USED:
605 used.insert(KV: {feature.Name, std::string(fileName)});
606 break;
607 case WASM_FEATURE_PREFIX_DISALLOWED:
608 disallowed.insert(KV: {feature.Name, std::string(fileName)});
609 break;
610 default:
611 error(msg: "Unrecognized feature policy prefix " +
612 std::to_string(val: feature.Prefix));
613 }
614 }
615
616 // Find TLS data segments
617 auto isTLS = [](InputChunk *segment) {
618 return segment->live && segment->isTLS();
619 };
620 tlsUsed = tlsUsed || llvm::any_of(Range&: file->segments, P: isTLS);
621 }
622
623 if (inferFeatures)
624 for (const auto &key : used.keys())
625 allowed.insert(V: std::string(key));
626
627 if (!ctx.arg.checkFeatures)
628 goto done;
629
630 if (ctx.arg.sharedMemory) {
631 if (disallowed.contains(Key: "shared-mem"))
632 error(msg: "--shared-memory is disallowed by " + disallowed["shared-mem"] +
633 " because it was not compiled with 'atomics' or 'bulk-memory' "
634 "features.");
635
636 for (auto feature : {"atomics", "bulk-memory"})
637 if (!allowed.contains(V: feature))
638 error(msg: StringRef("'") + feature +
639 "' feature must be used in order to use shared memory");
640 }
641
642 if (tlsUsed) {
643 for (auto feature : {"atomics", "bulk-memory"})
644 if (!allowed.contains(V: feature))
645 error(msg: StringRef("'") + feature +
646 "' feature must be used in order to use thread-local storage");
647 }
648
649 // Validate that used features are allowed in output
650 if (!inferFeatures) {
651 for (const auto &feature : used.keys()) {
652 if (!allowed.contains(V: std::string(feature)))
653 error(msg: Twine("Target feature '") + feature + "' used by " +
654 used[feature] + " is not allowed.");
655 }
656 }
657
658 // Validate the disallowed constraints for each file
659 for (ObjFile *file : ctx.objectFiles) {
660 StringRef fileName(file->getName());
661 SmallSet<std::string, 8> objectFeatures;
662 for (const auto &feature : file->getWasmObj()->getTargetFeatures()) {
663 if (feature.Prefix == WASM_FEATURE_PREFIX_DISALLOWED)
664 continue;
665 objectFeatures.insert(V: feature.Name);
666 if (disallowed.contains(Key: feature.Name))
667 error(msg: Twine("Target feature '") + feature.Name + "' used in " +
668 fileName + " is disallowed by " + disallowed[feature.Name] +
669 ". Use --no-check-features to suppress.");
670 }
671 }
672
673done:
674 // Normally we don't include bss segments in the binary. In particular if
675 // memory is not being imported then we can assume its zero initialized.
676 // In the case the memory is imported, and we can use the memory.fill
677 // instruction, then we can also avoid including the segments.
678 // Finally, if we are emitting relocations, they may refer to locations within
679 // the bss segments, so these segments need to exist in the binary.
680 if (ctx.arg.emitRelocs ||
681 (ctx.arg.memoryImport.has_value() && !allowed.contains(V: "bulk-memory")))
682 ctx.emitBssSegments = true;
683
684 if (allowed.contains(V: "extended-const"))
685 ctx.arg.extendedConst = true;
686
687 for (auto &feature : allowed)
688 log(msg: "Allowed feature: " + feature);
689}
690
691void Writer::checkImportExportTargetFeatures() {
692 if (ctx.arg.relocatable || !ctx.arg.checkFeatures)
693 return;
694
695 if (!out.targetFeaturesSec->features.contains(V: "mutable-globals")) {
696 for (const Symbol *sym : out.importSec->importedSymbols) {
697 if (auto *global = dyn_cast<GlobalSymbol>(Val: sym)) {
698 if (global->getGlobalType()->Mutable) {
699 error(msg: Twine("mutable global imported but 'mutable-globals' feature "
700 "not present in inputs: `") +
701 toString(sym: *sym) + "`. Use --no-check-features to suppress.");
702 }
703 }
704 }
705 for (const Symbol *sym : out.exportSec->exportedSymbols) {
706 if (auto *global = dyn_cast<GlobalSymbol>(Val: sym)) {
707 if (global->getGlobalType()->Mutable) {
708 error(msg: Twine("mutable global exported but 'mutable-globals' feature "
709 "not present in inputs: `") +
710 toString(sym: *sym) + "`. Use --no-check-features to suppress.");
711 }
712 }
713 }
714 }
715}
716
717static bool shouldImport(Symbol *sym) {
718 // We don't generate imports for data symbols. They however can be imported
719 // as GOT entries.
720 if (isa<DataSymbol>(Val: sym))
721 return false;
722 if (!sym->isLive())
723 return false;
724 if (!sym->isUsedInRegularObj)
725 return false;
726
727 // When a symbol is weakly defined in a shared library we need to allow
728 // it to be overridden by another module so need to both import
729 // and export the symbol.
730 if (ctx.arg.shared && sym->isWeak() && !sym->isUndefined() &&
731 !sym->isHidden())
732 return true;
733 if (sym->isShared())
734 return true;
735 if (!sym->isUndefined())
736 return false;
737 if (sym->isWeak() && !ctx.arg.relocatable && !ctx.isPic)
738 return false;
739
740 // In PIC mode we only need to import functions when they are called directly.
741 // Indirect usage all goes via GOT imports.
742 if (ctx.isPic) {
743 if (auto *f = dyn_cast<UndefinedFunction>(Val: sym))
744 if (!f->isCalledDirectly)
745 return false;
746 }
747
748 if (ctx.isPic || ctx.arg.relocatable || ctx.arg.importUndefined ||
749 ctx.arg.unresolvedSymbols == UnresolvedPolicy::ImportDynamic)
750 return true;
751 if (ctx.arg.allowUndefinedSymbols.contains(key: sym->getName()))
752 return true;
753
754 return sym->isImported();
755}
756
757void Writer::calculateImports() {
758 // Some inputs require that the indirect function table be assigned to table
759 // number 0, so if it is present and is an import, allocate it before any
760 // other tables.
761 if (ctx.sym.indirectFunctionTable &&
762 shouldImport(sym: ctx.sym.indirectFunctionTable))
763 out.importSec->addImport(sym: ctx.sym.indirectFunctionTable);
764
765 for (Symbol *sym : symtab->symbols()) {
766 if (!shouldImport(sym))
767 continue;
768 if (sym == ctx.sym.indirectFunctionTable)
769 continue;
770 LLVM_DEBUG(dbgs() << "import: " << sym->getName() << "\n");
771 out.importSec->addImport(sym);
772 }
773}
774
775void Writer::calculateExports() {
776 if (ctx.arg.relocatable)
777 return;
778
779 if (!ctx.arg.relocatable && ctx.arg.memoryExport.has_value()) {
780 out.exportSec->exports.push_back(
781 x: WasmExport{.Name: *ctx.arg.memoryExport, .Kind: WASM_EXTERNAL_MEMORY, .Index: 0});
782 }
783
784 unsigned globalIndex =
785 out.importSec->getNumImportedGlobals() + out.globalSec->numGlobals();
786
787 bool hasMutableGlobals =
788 out.targetFeaturesSec->features.contains(V: "mutable-globals");
789
790 for (Symbol *sym : symtab->symbols()) {
791 if (!sym->isExported())
792 continue;
793 if (!sym->isLive())
794 continue;
795 if (isa<SharedFunctionSymbol>(Val: sym) || sym->isShared())
796 continue;
797
798 StringRef name = sym->getName();
799 LLVM_DEBUG(dbgs() << "Export: " << name << "\n");
800 WasmExport export_;
801 if (auto *f = dyn_cast<DefinedFunction>(Val: sym)) {
802 if (std::optional<StringRef> exportName = f->function->getExportName()) {
803 name = *exportName;
804 }
805 export_ = {.Name: name, .Kind: WASM_EXTERNAL_FUNCTION, .Index: f->getExportedFunctionIndex()};
806 } else if (auto *g = dyn_cast<DefinedGlobal>(Val: sym)) {
807 if (!hasMutableGlobals && g->getGlobalType()->Mutable && !g->getFile() &&
808 !g->isExportedExplicit()) {
809 // Avoid exporting mutable globals are linker synthesized (e.g.
810 // __stack_pointer or __tls_base) unless they are explicitly exported
811 // from the command line.
812 // Without this check `--export-all` would cause any program using the
813 // stack pointer to export a mutable global even if none of the input
814 // files were built with the `mutable-globals` feature.
815 continue;
816 }
817 export_ = {.Name: name, .Kind: WASM_EXTERNAL_GLOBAL, .Index: g->getGlobalIndex()};
818 } else if (auto *t = dyn_cast<DefinedTag>(Val: sym)) {
819 export_ = {.Name: name, .Kind: WASM_EXTERNAL_TAG, .Index: t->getTagIndex()};
820 } else if (auto *d = dyn_cast<DefinedData>(Val: sym)) {
821 out.globalSec->dataAddressGlobals.push_back(x: d);
822 export_ = {.Name: name, .Kind: WASM_EXTERNAL_GLOBAL, .Index: globalIndex++};
823 } else {
824 auto *t = cast<DefinedTable>(Val: sym);
825 export_ = {.Name: name, .Kind: WASM_EXTERNAL_TABLE, .Index: t->getTableNumber()};
826 }
827
828 out.exportSec->exports.push_back(x: export_);
829 out.exportSec->exportedSymbols.push_back(x: sym);
830 }
831}
832
833void Writer::populateSymtab() {
834 if (!ctx.arg.relocatable && !ctx.arg.emitRelocs)
835 return;
836
837 for (Symbol *sym : symtab->symbols())
838 if (sym->isUsedInRegularObj && sym->isLive() && !sym->isShared())
839 out.linkingSec->addToSymtab(sym);
840
841 for (ObjFile *file : ctx.objectFiles) {
842 LLVM_DEBUG(dbgs() << "Local symtab entries: " << file->getName() << "\n");
843 for (Symbol *sym : file->getSymbols())
844 if (sym->isLocal() && !isa<SectionSymbol>(Val: sym) && sym->isLive())
845 out.linkingSec->addToSymtab(sym);
846 }
847}
848
849void Writer::calculateTypes() {
850 // The output type section is the union of the following sets:
851 // 1. Any signature used in the TYPE relocation
852 // 2. The signatures of all imported functions
853 // 3. The signatures of all defined functions
854 // 4. The signatures of all imported tags
855 // 5. The signatures of all defined tags
856
857 for (ObjFile *file : ctx.objectFiles) {
858 ArrayRef<WasmSignature> types = file->getWasmObj()->types();
859 for (uint32_t i = 0; i < types.size(); i++)
860 if (file->typeIsUsed[i])
861 file->typeMap[i] = out.typeSec->registerType(sig: types[i]);
862 }
863
864 for (const Symbol *sym : out.importSec->importedSymbols) {
865 if (auto *f = dyn_cast<FunctionSymbol>(Val: sym))
866 out.typeSec->registerType(sig: *f->signature);
867 else if (auto *t = dyn_cast<TagSymbol>(Val: sym))
868 out.typeSec->registerType(sig: *t->signature);
869 }
870
871 for (const InputFunction *f : out.functionSec->inputFunctions)
872 out.typeSec->registerType(sig: f->signature);
873
874 for (const InputTag *t : out.tagSec->inputTags)
875 out.typeSec->registerType(sig: t->signature);
876}
877
878// In a command-style link, create a wrapper for each exported symbol
879// which calls the constructors and destructors.
880void Writer::createCommandExportWrappers() {
881 // This logic doesn't currently support Emscripten-style PIC mode.
882 assert(!ctx.isPic);
883
884 // If there are no ctors and there's no libc `__wasm_call_dtors` to
885 // call, don't wrap the exports.
886 if (initFunctions.empty() && ctx.sym.callDtors == nullptr)
887 return;
888
889 std::vector<DefinedFunction *> toWrap;
890
891 for (Symbol *sym : symtab->symbols())
892 if (sym->isExported())
893 if (auto *f = dyn_cast<DefinedFunction>(Val: sym))
894 toWrap.push_back(x: f);
895
896 for (auto *f : toWrap) {
897 auto funcNameStr = (f->getName() + ".command_export").str();
898 commandExportWrapperNames.push_back(x: funcNameStr);
899 const std::string &funcName = commandExportWrapperNames.back();
900
901 auto func = make<SyntheticFunction>(args: *f->getSignature(), args: funcName);
902 if (f->function->getExportName())
903 func->setExportName(f->function->getExportName()->str());
904 else
905 func->setExportName(f->getName().str());
906
907 DefinedFunction *def =
908 symtab->addSyntheticFunction(name: funcName, flags: f->flags, function: func);
909 def->markLive();
910
911 def->flags |= WASM_SYMBOL_EXPORTED;
912 def->flags &= ~WASM_SYMBOL_VISIBILITY_HIDDEN;
913 def->forceExport = f->forceExport;
914
915 f->flags |= WASM_SYMBOL_VISIBILITY_HIDDEN;
916 f->flags &= ~WASM_SYMBOL_EXPORTED;
917 f->forceExport = false;
918
919 out.functionSec->addFunction(func);
920
921 createCommandExportWrapper(functionIndex: f->getFunctionIndex(), f: def);
922 }
923}
924
925static void finalizeIndirectFunctionTable() {
926 if (!ctx.sym.indirectFunctionTable)
927 return;
928
929 if (shouldImport(sym: ctx.sym.indirectFunctionTable) &&
930 !ctx.sym.indirectFunctionTable->hasTableNumber()) {
931 // Processing -Bsymbolic relocations resulted in a late requirement that the
932 // indirect function table be present, and we are running in --import-table
933 // mode. Add the table now to the imports section. Otherwise it will be
934 // added to the tables section later in assignIndexes.
935 out.importSec->addImport(sym: ctx.sym.indirectFunctionTable);
936 }
937
938 uint32_t tableSize = ctx.arg.tableBase + out.elemSec->numEntries();
939 WasmLimits limits = {.Flags: 0, .Minimum: tableSize, .Maximum: 0, .PageSize: 0};
940 if (ctx.sym.indirectFunctionTable->isDefined() && !ctx.arg.growableTable) {
941 limits.Flags |= WASM_LIMITS_FLAG_HAS_MAX;
942 limits.Maximum = limits.Minimum;
943 }
944 if (ctx.arg.is64.value_or(u: false))
945 limits.Flags |= WASM_LIMITS_FLAG_IS_64;
946 ctx.sym.indirectFunctionTable->setLimits(limits);
947}
948
949static void scanRelocations() {
950 for (ObjFile *file : ctx.objectFiles) {
951 LLVM_DEBUG(dbgs() << "scanRelocations: " << file->getName() << "\n");
952 for (InputChunk *chunk : file->functions)
953 scanRelocations(chunk);
954 for (InputChunk *chunk : file->segments)
955 scanRelocations(chunk);
956 for (auto &p : file->customSections)
957 scanRelocations(chunk: p);
958 }
959}
960
961void Writer::assignIndexes() {
962 // Seal the import section, since other index spaces such as function and
963 // global are effected by the number of imports.
964 out.importSec->seal();
965
966 for (InputFunction *func : ctx.syntheticFunctions)
967 out.functionSec->addFunction(func);
968
969 for (ObjFile *file : ctx.objectFiles) {
970 LLVM_DEBUG(dbgs() << "Functions: " << file->getName() << "\n");
971 for (InputFunction *func : file->functions)
972 out.functionSec->addFunction(func);
973 }
974
975 for (InputGlobal *global : ctx.syntheticGlobals)
976 out.globalSec->addGlobal(global);
977
978 for (ObjFile *file : ctx.objectFiles) {
979 LLVM_DEBUG(dbgs() << "Globals: " << file->getName() << "\n");
980 for (InputGlobal *global : file->globals)
981 out.globalSec->addGlobal(global);
982 }
983
984 for (ObjFile *file : ctx.objectFiles) {
985 LLVM_DEBUG(dbgs() << "Tags: " << file->getName() << "\n");
986 for (InputTag *tag : file->tags)
987 out.tagSec->addTag(tag);
988 }
989
990 for (ObjFile *file : ctx.objectFiles) {
991 LLVM_DEBUG(dbgs() << "Tables: " << file->getName() << "\n");
992 for (InputTable *table : file->tables)
993 out.tableSec->addTable(table);
994 }
995
996 for (InputTable *table : ctx.syntheticTables)
997 out.tableSec->addTable(table);
998
999 out.globalSec->assignIndexes();
1000 out.tableSec->assignIndexes();
1001}
1002
1003static StringRef getOutputDataSegmentName(const InputChunk &seg) {
1004 // We always merge .tbss and .tdata into a single TLS segment so all TLS
1005 // symbols are be relative to single __tls_base.
1006 if (seg.isTLS())
1007 return ".tdata";
1008 if (!ctx.arg.mergeDataSegments)
1009 return seg.name;
1010 if (seg.name.starts_with(Prefix: ".text."))
1011 return ".text";
1012 if (seg.name.starts_with(Prefix: ".data."))
1013 return ".data";
1014 if (seg.name.starts_with(Prefix: ".bss."))
1015 return ".bss";
1016 if (seg.name.starts_with(Prefix: ".rodata."))
1017 return ".rodata";
1018 return seg.name;
1019}
1020
1021OutputSegment *Writer::createOutputSegment(StringRef name) {
1022 LLVM_DEBUG(dbgs() << "new segment: " << name << "\n");
1023 OutputSegment *s = make<OutputSegment>(args&: name);
1024 if (ctx.arg.sharedMemory)
1025 s->initFlags = WASM_DATA_SEGMENT_IS_PASSIVE;
1026 if (!ctx.arg.relocatable && name.starts_with(Prefix: ".bss"))
1027 s->isBss = true;
1028 segments.push_back(x: s);
1029 return s;
1030}
1031
1032void Writer::createOutputSegments() {
1033 for (ObjFile *file : ctx.objectFiles) {
1034 for (InputChunk *segment : file->segments) {
1035 if (!segment->live)
1036 continue;
1037 StringRef name = getOutputDataSegmentName(seg: *segment);
1038 OutputSegment *s = nullptr;
1039 // When running in relocatable mode we can't merge segments that are part
1040 // of comdat groups since the ultimate linker needs to be able exclude or
1041 // include them individually.
1042 if (ctx.arg.relocatable && !segment->getComdatName().empty()) {
1043 s = createOutputSegment(name);
1044 } else {
1045 if (!segmentMap.contains(Val: name))
1046 segmentMap[name] = createOutputSegment(name);
1047 s = segmentMap[name];
1048 }
1049 s->addInputSegment(inSeg: segment);
1050 }
1051 }
1052
1053 // Sort segments by type, placing .bss last
1054 llvm::stable_sort(Range&: segments,
1055 C: [](const OutputSegment *a, const OutputSegment *b) {
1056 auto order = [](StringRef name) {
1057 return StringSwitch<int>(name)
1058 .StartsWith(S: ".tdata", Value: 0)
1059 .StartsWith(S: ".rodata", Value: 1)
1060 .StartsWith(S: ".data", Value: 2)
1061 .StartsWith(S: ".bss", Value: 4)
1062 .Default(Value: 3);
1063 };
1064 return order(a->name) < order(b->name);
1065 });
1066
1067 for (size_t i = 0; i < segments.size(); ++i)
1068 segments[i]->index = i;
1069
1070 // Merge MergeInputSections into a single MergeSyntheticSection.
1071 LLVM_DEBUG(dbgs() << "-- finalize input semgments\n");
1072 for (OutputSegment *seg : segments)
1073 seg->finalizeInputSegments();
1074}
1075
1076void Writer::combineOutputSegments() {
1077 // With PIC code we currently only support a single active data segment since
1078 // we only have a single __memory_base to use as our base address. This pass
1079 // combines all data segments into a single .data segment.
1080 // This restriction does not apply when the extended const extension is
1081 // available: https://github.com/WebAssembly/extended-const
1082 assert(!ctx.arg.extendedConst);
1083 assert(ctx.isPic && !ctx.arg.sharedMemory);
1084 if (segments.size() <= 1)
1085 return;
1086 OutputSegment *combined = make<OutputSegment>(args: ".data");
1087 combined->startVA = segments[0]->startVA;
1088 std::vector<OutputSegment *> newSegments = {combined};
1089 for (OutputSegment *s : segments) {
1090 if (!s->requiredInBinary()) {
1091 newSegments.push_back(x: s);
1092 continue;
1093 }
1094 bool first = true;
1095 for (InputChunk *inSeg : s->inputSegments) {
1096 if (first)
1097 inSeg->alignment = std::max(a: inSeg->alignment, b: s->alignment);
1098 first = false;
1099#ifndef NDEBUG
1100 uint64_t oldVA = inSeg->getVA();
1101#endif
1102 combined->addInputSegment(inSeg);
1103#ifndef NDEBUG
1104 uint64_t newVA = inSeg->getVA();
1105 LLVM_DEBUG(dbgs() << "added input segment. name=" << inSeg->name
1106 << " oldVA=" << oldVA << " newVA=" << newVA << "\n");
1107 assert(oldVA == newVA);
1108#endif
1109 }
1110 }
1111
1112 segments = newSegments;
1113}
1114
1115static void createFunction(DefinedFunction *func, StringRef bodyContent) {
1116 std::string functionBody;
1117 {
1118 raw_string_ostream os(functionBody);
1119 writeUleb128(os, number: bodyContent.size(), msg: "function size");
1120 os << bodyContent;
1121 }
1122 ArrayRef<uint8_t> body = arrayRefFromStringRef(Input: saver().save(S: functionBody));
1123 cast<SyntheticFunction>(Val: func->function)->setBody(body);
1124}
1125
1126bool Writer::needsPassiveInitialization(const OutputSegment *segment) {
1127 // If bulk memory features is supported then we can perform bss initialization
1128 // (via memory.fill) during `__wasm_init_memory`.
1129 if (ctx.arg.memoryImport.has_value() && !segment->requiredInBinary())
1130 return true;
1131 return segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE;
1132}
1133
1134bool Writer::hasPassiveInitializedSegments() {
1135 return llvm::any_of(Range&: segments, P: [this](const OutputSegment *s) {
1136 return this->needsPassiveInitialization(segment: s);
1137 });
1138}
1139
1140void Writer::createSyntheticInitFunctions() {
1141 if (ctx.arg.relocatable)
1142 return;
1143
1144 static WasmSignature nullSignature = {{}, {}};
1145
1146 createApplyDataRelocationsFunction();
1147
1148 // Passive segments are used to avoid memory being reinitialized on each
1149 // thread's instantiation. These passive segments are initialized and
1150 // dropped in __wasm_init_memory, which is registered as the start function
1151 // We also initialize bss segments (using memory.fill) as part of this
1152 // function.
1153 if (hasPassiveInitializedSegments()) {
1154 ctx.sym.initMemory = symtab->addSyntheticFunction(
1155 name: "__wasm_init_memory", flags: WASM_SYMBOL_VISIBILITY_HIDDEN,
1156 function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_init_memory"));
1157 ctx.sym.initMemory->markLive();
1158 if (ctx.arg.sharedMemory) {
1159 // This global is assigned during __wasm_init_memory in the shared memory
1160 // case.
1161 ctx.sym.tlsBase->markLive();
1162 }
1163 }
1164
1165 if (ctx.arg.sharedMemory) {
1166 if (out.globalSec->needsTLSRelocations()) {
1167 ctx.sym.applyGlobalTLSRelocs = symtab->addSyntheticFunction(
1168 name: "__wasm_apply_global_tls_relocs", flags: WASM_SYMBOL_VISIBILITY_HIDDEN,
1169 function: make<SyntheticFunction>(args&: nullSignature,
1170 args: "__wasm_apply_global_tls_relocs"));
1171 ctx.sym.applyGlobalTLSRelocs->markLive();
1172 // TLS relocations depend on the __tls_base symbols
1173 ctx.sym.tlsBase->markLive();
1174 }
1175
1176 auto hasTLSRelocs = [](const OutputSegment *segment) {
1177 if (segment->isTLS())
1178 for (const auto* is: segment->inputSegments)
1179 if (is->getRelocations().size())
1180 return true;
1181 return false;
1182 };
1183 if (llvm::any_of(Range&: segments, P: hasTLSRelocs)) {
1184 ctx.sym.applyTLSRelocs = symtab->addSyntheticFunction(
1185 name: "__wasm_apply_tls_relocs", flags: WASM_SYMBOL_VISIBILITY_HIDDEN,
1186 function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_apply_tls_relocs"));
1187 ctx.sym.applyTLSRelocs->markLive();
1188 }
1189 }
1190
1191 if (ctx.isPic && out.globalSec->needsRelocations()) {
1192 ctx.sym.applyGlobalRelocs = symtab->addSyntheticFunction(
1193 name: "__wasm_apply_global_relocs", flags: WASM_SYMBOL_VISIBILITY_HIDDEN,
1194 function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_apply_global_relocs"));
1195 ctx.sym.applyGlobalRelocs->markLive();
1196 }
1197
1198 // If there is only one start function we can just use that function
1199 // itself as the Wasm start function, otherwise we need to synthesize
1200 // a new function to call them in sequence.
1201 if (ctx.sym.applyGlobalRelocs && ctx.sym.initMemory) {
1202 ctx.sym.startFunction = symtab->addSyntheticFunction(
1203 name: "__wasm_start", flags: WASM_SYMBOL_VISIBILITY_HIDDEN,
1204 function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_start"));
1205 ctx.sym.startFunction->markLive();
1206 }
1207}
1208
1209void Writer::createInitMemoryFunction() {
1210 LLVM_DEBUG(dbgs() << "createInitMemoryFunction\n");
1211 assert(ctx.sym.initMemory);
1212 assert(hasPassiveInitializedSegments());
1213 uint64_t flagAddress;
1214 if (ctx.arg.sharedMemory) {
1215 assert(ctx.sym.initMemoryFlag);
1216 flagAddress = ctx.sym.initMemoryFlag->getVA();
1217 }
1218 bool is64 = ctx.arg.is64.value_or(u: false);
1219 std::string bodyContent;
1220 {
1221 raw_string_ostream os(bodyContent);
1222 // Initialize memory in a thread-safe manner. The thread that successfully
1223 // increments the flag from 0 to 1 is responsible for performing the memory
1224 // initialization. Other threads go sleep on the flag until the first thread
1225 // finishing initializing memory, increments the flag to 2, and wakes all
1226 // the other threads. Once the flag has been set to 2, subsequently started
1227 // threads will skip the sleep. All threads unconditionally drop their
1228 // passive data segments once memory has been initialized. The generated
1229 // code is as follows:
1230 //
1231 // (func $__wasm_init_memory
1232 // (block $drop
1233 // (block $wait
1234 // (block $init
1235 // (br_table $init $wait $drop
1236 // (i32.atomic.rmw.cmpxchg align=2 offset=0
1237 // (i32.const $__init_memory_flag)
1238 // (i32.const 0)
1239 // (i32.const 1)
1240 // )
1241 // )
1242 // ) ;; $init
1243 // ( ... initialize data segments ... )
1244 // (i32.atomic.store align=2 offset=0
1245 // (i32.const $__init_memory_flag)
1246 // (i32.const 2)
1247 // )
1248 // (drop
1249 // (i32.atomic.notify align=2 offset=0
1250 // (i32.const $__init_memory_flag)
1251 // (i32.const -1u)
1252 // )
1253 // )
1254 // (br $drop)
1255 // ) ;; $wait
1256 // (drop
1257 // (i32.atomic.wait align=2 offset=0
1258 // (i32.const $__init_memory_flag)
1259 // (i32.const 1)
1260 // (i32.const -1)
1261 // )
1262 // )
1263 // ) ;; $drop
1264 // ( ... drop data segments ... )
1265 // )
1266 //
1267 // When we are building with PIC, calculate the flag location using:
1268 //
1269 // (global.get $__memory_base)
1270 // (i32.const $__init_memory_flag)
1271 // (i32.const 1)
1272
1273 auto writeGetFlagAddress = [&]() {
1274 if (ctx.isPic) {
1275 writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get");
1276 writeUleb128(os, number: 0, msg: "local 0");
1277 } else {
1278 writePtrConst(os, number: flagAddress, is64, msg: "flag address");
1279 }
1280 };
1281
1282 if (ctx.arg.sharedMemory) {
1283 // With PIC code we cache the flag address in local 0
1284 if (ctx.isPic) {
1285 writeUleb128(os, number: 1, msg: "num local decls");
1286 writeUleb128(os, number: 2, msg: "local count");
1287 writeU8(os, byte: is64 ? WASM_TYPE_I64 : WASM_TYPE_I32, msg: "address type");
1288 writeU8(os, byte: WASM_OPCODE_GLOBAL_GET, msg: "GLOBAL_GET");
1289 writeUleb128(os, number: ctx.sym.memoryBase->getGlobalIndex(), msg: "memory_base");
1290 writePtrConst(os, number: flagAddress, is64, msg: "flag address");
1291 writeU8(os, byte: is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD, msg: "add");
1292 writeU8(os, byte: WASM_OPCODE_LOCAL_SET, msg: "local.set");
1293 writeUleb128(os, number: 0, msg: "local 0");
1294 } else {
1295 writeUleb128(os, number: 0, msg: "num locals");
1296 }
1297
1298 // Set up destination blocks
1299 writeU8(os, byte: WASM_OPCODE_BLOCK, msg: "block $drop");
1300 writeU8(os, byte: WASM_TYPE_NORESULT, msg: "block type");
1301 writeU8(os, byte: WASM_OPCODE_BLOCK, msg: "block $wait");
1302 writeU8(os, byte: WASM_TYPE_NORESULT, msg: "block type");
1303 writeU8(os, byte: WASM_OPCODE_BLOCK, msg: "block $init");
1304 writeU8(os, byte: WASM_TYPE_NORESULT, msg: "block type");
1305
1306 // Atomically check whether we win the race.
1307 writeGetFlagAddress();
1308 writeI32Const(os, number: 0, msg: "expected flag value");
1309 writeI32Const(os, number: 1, msg: "new flag value");
1310 writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix");
1311 writeUleb128(os, number: WASM_OPCODE_I32_RMW_CMPXCHG, msg: "i32.atomic.rmw.cmpxchg");
1312 writeMemArg(os, alignment: 2, offset: 0);
1313
1314 // Based on the value, decide what to do next.
1315 writeU8(os, byte: WASM_OPCODE_BR_TABLE, msg: "br_table");
1316 writeUleb128(os, number: 2, msg: "label vector length");
1317 writeUleb128(os, number: 0, msg: "label $init");
1318 writeUleb128(os, number: 1, msg: "label $wait");
1319 writeUleb128(os, number: 2, msg: "default label $drop");
1320
1321 // Initialize passive data segments
1322 writeU8(os, byte: WASM_OPCODE_END, msg: "end $init");
1323 } else {
1324 writeUleb128(os, number: 0, msg: "num local decls");
1325 }
1326
1327 for (const OutputSegment *s : segments) {
1328 if (needsPassiveInitialization(segment: s)) {
1329 // For passive BSS segments we can simple issue a memory.fill(0).
1330 // For non-BSS segments we do a memory.init. Both these
1331 // instructions take as their first argument the destination
1332 // address.
1333 writePtrConst(os, number: s->startVA, is64, msg: "destination address");
1334 if (ctx.isPic) {
1335 writeU8(os, byte: WASM_OPCODE_GLOBAL_GET, msg: "GLOBAL_GET");
1336 writeUleb128(os, number: ctx.sym.memoryBase->getGlobalIndex(),
1337 msg: "__memory_base");
1338 writeU8(os, byte: is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD,
1339 msg: "i32.add");
1340 }
1341
1342 // When we initialize the TLS segment we also set the `__tls_base`
1343 // global. This allows the runtime to use this static copy of the
1344 // TLS data for the first/main thread.
1345 if (ctx.arg.sharedMemory && s->isTLS()) {
1346 if (ctx.isPic) {
1347 // Cache the result of the addionion in local 0
1348 writeU8(os, byte: WASM_OPCODE_LOCAL_TEE, msg: "local.tee");
1349 writeUleb128(os, number: 1, msg: "local 1");
1350 } else {
1351 writePtrConst(os, number: s->startVA, is64, msg: "destination address");
1352 }
1353 writeU8(os, byte: WASM_OPCODE_GLOBAL_SET, msg: "GLOBAL_SET");
1354 writeUleb128(os, number: ctx.sym.tlsBase->getGlobalIndex(), msg: "__tls_base");
1355 if (ctx.isPic) {
1356 writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.tee");
1357 writeUleb128(os, number: 1, msg: "local 1");
1358 }
1359 }
1360
1361 if (s->isBss) {
1362 writeI32Const(os, number: 0, msg: "fill value");
1363 writePtrConst(os, number: s->size, is64, msg: "memory region size");
1364 writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix");
1365 writeUleb128(os, number: WASM_OPCODE_MEMORY_FILL, msg: "memory.fill");
1366 writeU8(os, byte: 0, msg: "memory index immediate");
1367 } else {
1368 writeI32Const(os, number: 0, msg: "source segment offset");
1369 writeI32Const(os, number: s->size, msg: "memory region size");
1370 writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix");
1371 writeUleb128(os, number: WASM_OPCODE_MEMORY_INIT, msg: "memory.init");
1372 writeUleb128(os, number: s->index, msg: "segment index immediate");
1373 writeU8(os, byte: 0, msg: "memory index immediate");
1374 }
1375 }
1376 }
1377
1378 if (ctx.arg.sharedMemory) {
1379 // Set flag to 2 to mark end of initialization
1380 writeGetFlagAddress();
1381 writeI32Const(os, number: 2, msg: "flag value");
1382 writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix");
1383 writeUleb128(os, number: WASM_OPCODE_I32_ATOMIC_STORE, msg: "i32.atomic.store");
1384 writeMemArg(os, alignment: 2, offset: 0);
1385
1386 // Notify any waiters that memory initialization is complete
1387 writeGetFlagAddress();
1388 writeI32Const(os, number: -1, msg: "number of waiters");
1389 writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix");
1390 writeUleb128(os, number: WASM_OPCODE_ATOMIC_NOTIFY, msg: "atomic.notify");
1391 writeMemArg(os, alignment: 2, offset: 0);
1392 writeU8(os, byte: WASM_OPCODE_DROP, msg: "drop");
1393
1394 // Branch to drop the segments
1395 writeU8(os, byte: WASM_OPCODE_BR, msg: "br");
1396 writeUleb128(os, number: 1, msg: "label $drop");
1397
1398 // Wait for the winning thread to initialize memory
1399 writeU8(os, byte: WASM_OPCODE_END, msg: "end $wait");
1400 writeGetFlagAddress();
1401 writeI32Const(os, number: 1, msg: "expected flag value");
1402 writeI64Const(os, number: -1, msg: "timeout");
1403
1404 writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix");
1405 writeUleb128(os, number: WASM_OPCODE_I32_ATOMIC_WAIT, msg: "i32.atomic.wait");
1406 writeMemArg(os, alignment: 2, offset: 0);
1407 writeU8(os, byte: WASM_OPCODE_DROP, msg: "drop");
1408
1409 // Unconditionally drop passive data segments
1410 writeU8(os, byte: WASM_OPCODE_END, msg: "end $drop");
1411 }
1412
1413 for (const OutputSegment *s : segments) {
1414 if (needsPassiveInitialization(segment: s) && !s->isBss) {
1415 // The TLS region should not be dropped since its is needed
1416 // during the initialization of each thread (__wasm_init_tls).
1417 if (ctx.arg.sharedMemory && s->isTLS())
1418 continue;
1419 // data.drop instruction
1420 writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix");
1421 writeUleb128(os, number: WASM_OPCODE_DATA_DROP, msg: "data.drop");
1422 writeUleb128(os, number: s->index, msg: "segment index immediate");
1423 }
1424 }
1425
1426 // End the function
1427 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1428 }
1429
1430 createFunction(func: ctx.sym.initMemory, bodyContent);
1431}
1432
1433void Writer::createStartFunction() {
1434 // If the start function exists when we have more than one function to call.
1435 if (ctx.sym.initMemory && ctx.sym.applyGlobalRelocs) {
1436 assert(ctx.sym.startFunction);
1437 std::string bodyContent;
1438 {
1439 raw_string_ostream os(bodyContent);
1440 writeUleb128(os, number: 0, msg: "num locals");
1441 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1442 writeUleb128(os, number: ctx.sym.applyGlobalRelocs->getFunctionIndex(),
1443 msg: "function index");
1444 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1445 writeUleb128(os, number: ctx.sym.initMemory->getFunctionIndex(),
1446 msg: "function index");
1447 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1448 }
1449 createFunction(func: ctx.sym.startFunction, bodyContent);
1450 } else if (ctx.sym.initMemory) {
1451 ctx.sym.startFunction = ctx.sym.initMemory;
1452 } else if (ctx.sym.applyGlobalRelocs) {
1453 ctx.sym.startFunction = ctx.sym.applyGlobalRelocs;
1454 }
1455}
1456
1457// For -shared (PIC) output, we create create a synthetic function which will
1458// apply any relocations to the data segments on startup. This function is
1459// called `__wasm_apply_data_relocs` and is expected to be called before
1460// any user code (i.e. before `__wasm_call_ctors`).
1461void Writer::createApplyDataRelocationsFunction() {
1462 LLVM_DEBUG(dbgs() << "createApplyDataRelocationsFunction\n");
1463 // First write the body's contents to a string.
1464 std::string bodyContent;
1465 {
1466 raw_string_ostream os(bodyContent);
1467 writeUleb128(os, number: 0, msg: "num locals");
1468 bool generated = false;
1469 for (const OutputSegment *seg : segments)
1470 if (!ctx.arg.sharedMemory || !seg->isTLS())
1471 for (const InputChunk *inSeg : seg->inputSegments)
1472 generated |= inSeg->generateRelocationCode(os);
1473
1474 if (!generated) {
1475 LLVM_DEBUG(dbgs() << "skipping empty __wasm_apply_data_relocs\n");
1476 return;
1477 }
1478 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1479 }
1480
1481 // __wasm_apply_data_relocs
1482 // Function that applies relocations to data segment post-instantiation.
1483 static WasmSignature nullSignature = {{}, {}};
1484 auto def = symtab->addSyntheticFunction(
1485 name: "__wasm_apply_data_relocs",
1486 flags: WASM_SYMBOL_VISIBILITY_DEFAULT | WASM_SYMBOL_EXPORTED,
1487 function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_apply_data_relocs"));
1488 def->markLive();
1489
1490 createFunction(func: def, bodyContent);
1491}
1492
1493void Writer::createApplyTLSRelocationsFunction() {
1494 LLVM_DEBUG(dbgs() << "createApplyTLSRelocationsFunction\n");
1495 std::string bodyContent;
1496 {
1497 raw_string_ostream os(bodyContent);
1498 writeUleb128(os, number: 0, msg: "num locals");
1499 for (const OutputSegment *seg : segments)
1500 if (seg->isTLS())
1501 for (const InputChunk *inSeg : seg->inputSegments)
1502 inSeg->generateRelocationCode(os);
1503
1504 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1505 }
1506
1507 createFunction(func: ctx.sym.applyTLSRelocs, bodyContent);
1508}
1509
1510// Similar to createApplyDataRelocationsFunction but generates relocation code
1511// for WebAssembly globals. Because these globals are not shared between threads
1512// these relocation need to run on every thread.
1513void Writer::createApplyGlobalRelocationsFunction() {
1514 // First write the body's contents to a string.
1515 std::string bodyContent;
1516 {
1517 raw_string_ostream os(bodyContent);
1518 writeUleb128(os, number: 0, msg: "num locals");
1519 out.globalSec->generateRelocationCode(os, TLS: false);
1520 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1521 }
1522
1523 createFunction(func: ctx.sym.applyGlobalRelocs, bodyContent);
1524}
1525
1526// Similar to createApplyGlobalRelocationsFunction but for
1527// TLS symbols. This cannot be run during the start function
1528// but must be delayed until __wasm_init_tls is called.
1529void Writer::createApplyGlobalTLSRelocationsFunction() {
1530 // First write the body's contents to a string.
1531 std::string bodyContent;
1532 {
1533 raw_string_ostream os(bodyContent);
1534 writeUleb128(os, number: 0, msg: "num locals");
1535 out.globalSec->generateRelocationCode(os, TLS: true);
1536 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1537 }
1538
1539 createFunction(func: ctx.sym.applyGlobalTLSRelocs, bodyContent);
1540}
1541
1542// Create synthetic "__wasm_call_ctors" function based on ctor functions
1543// in input object.
1544void Writer::createCallCtorsFunction() {
1545 // If __wasm_call_ctors isn't referenced, there aren't any ctors, don't
1546 // define the `__wasm_call_ctors` function.
1547 if (!ctx.sym.callCtors->isLive() && initFunctions.empty())
1548 return;
1549
1550 // First write the body's contents to a string.
1551 std::string bodyContent;
1552 {
1553 raw_string_ostream os(bodyContent);
1554 writeUleb128(os, number: 0, msg: "num locals");
1555
1556 // Call constructors
1557 for (const WasmInitEntry &f : initFunctions) {
1558 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1559 writeUleb128(os, number: f.sym->getFunctionIndex(), msg: "function index");
1560 for (size_t i = 0; i < f.sym->signature->Returns.size(); i++) {
1561 writeU8(os, byte: WASM_OPCODE_DROP, msg: "DROP");
1562 }
1563 }
1564
1565 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1566 }
1567
1568 createFunction(func: ctx.sym.callCtors, bodyContent);
1569}
1570
1571// Create a wrapper around a function export which calls the
1572// static constructors and destructors.
1573void Writer::createCommandExportWrapper(uint32_t functionIndex,
1574 DefinedFunction *f) {
1575 // First write the body's contents to a string.
1576 std::string bodyContent;
1577 {
1578 raw_string_ostream os(bodyContent);
1579 writeUleb128(os, number: 0, msg: "num locals");
1580
1581 // Call `__wasm_call_ctors` which call static constructors (and
1582 // applies any runtime relocations in Emscripten-style PIC mode)
1583 if (ctx.sym.callCtors->isLive()) {
1584 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1585 writeUleb128(os, number: ctx.sym.callCtors->getFunctionIndex(), msg: "function index");
1586 }
1587
1588 // Call the user's code, leaving any return values on the operand stack.
1589 for (size_t i = 0; i < f->signature->Params.size(); ++i) {
1590 writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get");
1591 writeUleb128(os, number: i, msg: "local index");
1592 }
1593 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1594 writeUleb128(os, number: functionIndex, msg: "function index");
1595
1596 // Call the function that calls the destructors.
1597 if (DefinedFunction *callDtors = ctx.sym.callDtors) {
1598 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1599 writeUleb128(os, number: callDtors->getFunctionIndex(), msg: "function index");
1600 }
1601
1602 // End the function, returning the return values from the user's code.
1603 writeU8(os, byte: WASM_OPCODE_END, msg: "END");
1604 }
1605
1606 createFunction(func: f, bodyContent);
1607}
1608
1609void Writer::createInitTLSFunction() {
1610 std::string bodyContent;
1611 {
1612 raw_string_ostream os(bodyContent);
1613
1614 OutputSegment *tlsSeg = nullptr;
1615 for (auto *seg : segments) {
1616 if (seg->name == ".tdata") {
1617 tlsSeg = seg;
1618 break;
1619 }
1620 }
1621
1622 writeUleb128(os, number: 0, msg: "num locals");
1623 if (tlsSeg) {
1624 writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get");
1625 writeUleb128(os, number: 0, msg: "local index");
1626
1627 writeU8(os, byte: WASM_OPCODE_GLOBAL_SET, msg: "global.set");
1628 writeUleb128(os, number: ctx.sym.tlsBase->getGlobalIndex(), msg: "global index");
1629
1630 // FIXME(wvo): this local needs to be I64 in wasm64, or we need an extend op.
1631 writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get");
1632 writeUleb128(os, number: 0, msg: "local index");
1633
1634 writeI32Const(os, number: 0, msg: "segment offset");
1635
1636 writeI32Const(os, number: tlsSeg->size, msg: "memory region size");
1637
1638 writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix");
1639 writeUleb128(os, number: WASM_OPCODE_MEMORY_INIT, msg: "MEMORY.INIT");
1640 writeUleb128(os, number: tlsSeg->index, msg: "segment index immediate");
1641 writeU8(os, byte: 0, msg: "memory index immediate");
1642 }
1643
1644 if (ctx.sym.applyTLSRelocs) {
1645 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1646 writeUleb128(os, number: ctx.sym.applyTLSRelocs->getFunctionIndex(),
1647 msg: "function index");
1648 }
1649
1650 if (ctx.sym.applyGlobalTLSRelocs) {
1651 writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL");
1652 writeUleb128(os, number: ctx.sym.applyGlobalTLSRelocs->getFunctionIndex(),
1653 msg: "function index");
1654 }
1655 writeU8(os, byte: WASM_OPCODE_END, msg: "end function");
1656 }
1657
1658 createFunction(func: ctx.sym.initTLS, bodyContent);
1659}
1660
1661// Populate InitFunctions vector with init functions from all input objects.
1662// This is then used either when creating the output linking section or to
1663// synthesize the "__wasm_call_ctors" function.
1664void Writer::calculateInitFunctions() {
1665 if (!ctx.arg.relocatable && !ctx.sym.callCtors->isLive())
1666 return;
1667
1668 for (ObjFile *file : ctx.objectFiles) {
1669 const WasmLinkingData &l = file->getWasmObj()->linkingData();
1670 for (const WasmInitFunc &f : l.InitFunctions) {
1671 FunctionSymbol *sym = file->getFunctionSymbol(index: f.Symbol);
1672 // comdat exclusions can cause init functions be discarded.
1673 if (sym->isDiscarded() || !sym->isLive())
1674 continue;
1675 if (sym->signature->Params.size() != 0)
1676 error(msg: "constructor functions cannot take arguments: " + toString(sym: *sym));
1677 LLVM_DEBUG(dbgs() << "initFunctions: " << toString(*sym) << "\n");
1678 initFunctions.emplace_back(args: WasmInitEntry{.sym: sym, .priority: f.Priority});
1679 }
1680 }
1681
1682 // Sort in order of priority (lowest first) so that they are called
1683 // in the correct order.
1684 llvm::stable_sort(Range&: initFunctions,
1685 C: [](const WasmInitEntry &l, const WasmInitEntry &r) {
1686 return l.priority < r.priority;
1687 });
1688}
1689
1690void Writer::createSyntheticSections() {
1691 out.dylinkSec = make<DylinkSection>();
1692 out.typeSec = make<TypeSection>();
1693 out.importSec = make<ImportSection>();
1694 out.functionSec = make<FunctionSection>();
1695 out.tableSec = make<TableSection>();
1696 out.memorySec = make<MemorySection>();
1697 out.tagSec = make<TagSection>();
1698 out.globalSec = make<GlobalSection>();
1699 out.exportSec = make<ExportSection>();
1700 out.startSec = make<StartSection>();
1701 out.elemSec = make<ElemSection>();
1702 out.producersSec = make<ProducersSection>();
1703 out.targetFeaturesSec = make<TargetFeaturesSection>();
1704 out.buildIdSec = make<BuildIdSection>();
1705}
1706
1707void Writer::createSyntheticSectionsPostLayout() {
1708 out.dataCountSec = make<DataCountSection>(args&: segments);
1709 out.linkingSec = make<LinkingSection>(args&: initFunctions, args&: segments);
1710 out.nameSec = make<NameSection>(args&: segments);
1711}
1712
1713void Writer::run() {
1714 // For PIC code the table base is assigned dynamically by the loader.
1715 // For non-PIC, we start at 1 so that accessing table index 0 always traps.
1716 if (!ctx.isPic && ctx.sym.tableBase)
1717 setGlobalPtr(g: cast<DefinedGlobal>(Val: ctx.sym.tableBase), memoryPtr: ctx.arg.tableBase);
1718
1719 log(msg: "-- createOutputSegments");
1720 createOutputSegments();
1721 log(msg: "-- createSyntheticSections");
1722 createSyntheticSections();
1723 log(msg: "-- layoutMemory");
1724 layoutMemory();
1725
1726 if (!ctx.arg.relocatable) {
1727 // Create linker synthesized __start_SECNAME/__stop_SECNAME symbols
1728 // This has to be done after memory layout is performed.
1729 for (const OutputSegment *seg : segments) {
1730 addStartStopSymbols(seg);
1731 }
1732 }
1733
1734 for (auto &pair : ctx.arg.exportedSymbols) {
1735 Symbol *sym = symtab->find(name: pair.first());
1736 if (sym && sym->isDefined())
1737 sym->forceExport = true;
1738 }
1739
1740 // Delay reporting errors about explicit exports until after
1741 // addStartStopSymbols which can create optional symbols.
1742 for (auto &name : ctx.arg.requiredExports) {
1743 Symbol *sym = symtab->find(name);
1744 if (!sym || !sym->isDefined()) {
1745 if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::ReportError)
1746 error(msg: Twine("symbol exported via --export not found: ") + name);
1747 if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Warn)
1748 warn(msg: Twine("symbol exported via --export not found: ") + name);
1749 }
1750 }
1751
1752 log(msg: "-- populateTargetFeatures");
1753 populateTargetFeatures();
1754
1755 // When outputting PIC code each segment lives at at fixes offset from the
1756 // `__memory_base` import. Unless we support the extended const expression we
1757 // can't do addition inside the constant expression, so we much combine the
1758 // segments into a single one that can live at `__memory_base`.
1759 if (ctx.isPic && !ctx.arg.extendedConst && !ctx.arg.sharedMemory) {
1760 // In shared memory mode all data segments are passive and initialized
1761 // via __wasm_init_memory.
1762 log(msg: "-- combineOutputSegments");
1763 combineOutputSegments();
1764 }
1765
1766 log(msg: "-- createSyntheticSectionsPostLayout");
1767 createSyntheticSectionsPostLayout();
1768 log(msg: "-- populateProducers");
1769 populateProducers();
1770 log(msg: "-- calculateImports");
1771 calculateImports();
1772 log(msg: "-- scanRelocations");
1773 scanRelocations();
1774 log(msg: "-- finalizeIndirectFunctionTable");
1775 finalizeIndirectFunctionTable();
1776 log(msg: "-- createSyntheticInitFunctions");
1777 createSyntheticInitFunctions();
1778 log(msg: "-- assignIndexes");
1779 assignIndexes();
1780 log(msg: "-- calculateInitFunctions");
1781 calculateInitFunctions();
1782
1783 if (!ctx.arg.relocatable) {
1784 // Create linker synthesized functions
1785 if (ctx.sym.applyGlobalRelocs) {
1786 createApplyGlobalRelocationsFunction();
1787 }
1788 if (ctx.sym.applyTLSRelocs) {
1789 createApplyTLSRelocationsFunction();
1790 }
1791 if (ctx.sym.applyGlobalTLSRelocs) {
1792 createApplyGlobalTLSRelocationsFunction();
1793 }
1794 if (ctx.sym.initMemory) {
1795 createInitMemoryFunction();
1796 }
1797 createStartFunction();
1798
1799 createCallCtorsFunction();
1800
1801 // Create export wrappers for commands if needed.
1802 //
1803 // If the input contains a call to `__wasm_call_ctors`, either in one of
1804 // the input objects or an explicit export from the command-line, we
1805 // assume ctors and dtors are taken care of already.
1806 if (!ctx.arg.relocatable && !ctx.isPic &&
1807 !ctx.sym.callCtors->isUsedInRegularObj &&
1808 !ctx.sym.callCtors->isExported()) {
1809 log(msg: "-- createCommandExportWrappers");
1810 createCommandExportWrappers();
1811 }
1812 }
1813
1814 if (ctx.sym.initTLS && ctx.sym.initTLS->isLive()) {
1815 log(msg: "-- createInitTLSFunction");
1816 createInitTLSFunction();
1817 }
1818
1819 if (errorCount())
1820 return;
1821
1822 log(msg: "-- calculateTypes");
1823 calculateTypes();
1824 log(msg: "-- calculateExports");
1825 calculateExports();
1826 log(msg: "-- calculateCustomSections");
1827 calculateCustomSections();
1828 log(msg: "-- populateSymtab");
1829 populateSymtab();
1830 log(msg: "-- checkImportExportTargetFeatures");
1831 checkImportExportTargetFeatures();
1832 log(msg: "-- addSections");
1833 addSections();
1834
1835 if (errorHandler().verbose) {
1836 log(msg: "Defined Functions: " + Twine(out.functionSec->inputFunctions.size()));
1837 log(msg: "Defined Globals : " + Twine(out.globalSec->numGlobals()));
1838 log(msg: "Defined Tags : " + Twine(out.tagSec->inputTags.size()));
1839 log(msg: "Defined Tables : " + Twine(out.tableSec->inputTables.size()));
1840 log(msg: "Function Imports : " +
1841 Twine(out.importSec->getNumImportedFunctions()));
1842 log(msg: "Global Imports : " + Twine(out.importSec->getNumImportedGlobals()));
1843 log(msg: "Tag Imports : " + Twine(out.importSec->getNumImportedTags()));
1844 log(msg: "Table Imports : " + Twine(out.importSec->getNumImportedTables()));
1845 }
1846
1847 createHeader();
1848 log(msg: "-- finalizeSections");
1849 finalizeSections();
1850
1851 log(msg: "-- writeMapFile");
1852 writeMapFile(outputSections);
1853
1854 log(msg: "-- openFile");
1855 openFile();
1856 if (errorCount())
1857 return;
1858
1859 writeHeader();
1860
1861 log(msg: "-- writeSections");
1862 writeSections();
1863 writeBuildId();
1864 if (errorCount())
1865 return;
1866
1867 if (Error e = buffer->commit())
1868 fatal(msg: "failed to write output '" + buffer->getPath() +
1869 "': " + toString(E: std::move(e)));
1870}
1871
1872// Open a result file.
1873void Writer::openFile() {
1874 log(msg: "writing: " + ctx.arg.outputFile);
1875
1876 Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr =
1877 FileOutputBuffer::create(FilePath: ctx.arg.outputFile, Size: fileSize,
1878 Flags: FileOutputBuffer::F_executable);
1879
1880 if (!bufferOrErr)
1881 error(msg: "failed to open " + ctx.arg.outputFile + ": " +
1882 toString(E: bufferOrErr.takeError()));
1883 else
1884 buffer = std::move(*bufferOrErr);
1885}
1886
1887void Writer::createHeader() {
1888 raw_string_ostream os(header);
1889 writeBytes(os, bytes: WasmMagic, count: sizeof(WasmMagic), msg: "wasm magic");
1890 writeU32(os, number: WasmVersion, msg: "wasm version");
1891 fileSize += header.size();
1892}
1893
1894void writeResult() { Writer().run(); }
1895
1896} // namespace wasm::lld
1897