| 1 | //===- Writer.cpp ---------------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "Writer.h" |
| 10 | #include "Config.h" |
| 11 | #include "InputChunks.h" |
| 12 | #include "InputElement.h" |
| 13 | #include "MapFile.h" |
| 14 | #include "OutputSections.h" |
| 15 | #include "OutputSegment.h" |
| 16 | #include "Relocations.h" |
| 17 | #include "SymbolTable.h" |
| 18 | #include "SyntheticSections.h" |
| 19 | #include "WriterUtils.h" |
| 20 | #include "lld/Common/Arrays.h" |
| 21 | #include "lld/Common/CommonLinkerContext.h" |
| 22 | #include "lld/Common/Strings.h" |
| 23 | #include "llvm/ADT/ArrayRef.h" |
| 24 | #include "llvm/ADT/MapVector.h" |
| 25 | #include "llvm/ADT/SmallSet.h" |
| 26 | #include "llvm/ADT/SmallVector.h" |
| 27 | #include "llvm/ADT/StringMap.h" |
| 28 | #include "llvm/BinaryFormat/Wasm.h" |
| 29 | #include "llvm/Support/FileOutputBuffer.h" |
| 30 | #include "llvm/Support/FormatVariadic.h" |
| 31 | #include "llvm/Support/Parallel.h" |
| 32 | #include "llvm/Support/RandomNumberGenerator.h" |
| 33 | #include "llvm/Support/SHA1.h" |
| 34 | #include "llvm/Support/xxhash.h" |
| 35 | |
| 36 | #include <cstdarg> |
| 37 | #include <optional> |
| 38 | |
| 39 | #define DEBUG_TYPE "lld" |
| 40 | |
| 41 | using namespace llvm; |
| 42 | using namespace llvm::wasm; |
| 43 | |
| 44 | namespace lld::wasm { |
| 45 | static constexpr int stackAlignment = 16; |
| 46 | static constexpr int heapAlignment = 16; |
| 47 | |
| 48 | namespace { |
| 49 | |
| 50 | // The writer writes a SymbolTable result to a file. |
| 51 | class Writer { |
| 52 | public: |
| 53 | void run(); |
| 54 | |
| 55 | private: |
| 56 | void openFile(); |
| 57 | |
| 58 | bool needsPassiveInitialization(const OutputSegment *segment); |
| 59 | bool hasPassiveInitializedSegments(); |
| 60 | |
| 61 | void createSyntheticInitFunctions(); |
| 62 | void createInitMemoryFunction(); |
| 63 | void createStartFunction(); |
| 64 | void createApplyDataRelocationsFunction(); |
| 65 | void createApplyGlobalRelocationsFunction(); |
| 66 | void createApplyTLSRelocationsFunction(); |
| 67 | void createApplyGlobalTLSRelocationsFunction(); |
| 68 | void createCallCtorsFunction(); |
| 69 | void createInitTLSFunction(); |
| 70 | void createCommandExportWrappers(); |
| 71 | void createCommandExportWrapper(uint32_t functionIndex, DefinedFunction *f); |
| 72 | |
| 73 | void assignIndexes(); |
| 74 | void populateSymtab(); |
| 75 | void populateProducers(); |
| 76 | void populateTargetFeatures(); |
| 77 | // populateTargetFeatures happens early on so some checks are delayed |
| 78 | // until imports and exports are finalized. There are run unstead |
| 79 | // in checkImportExportTargetFeatures |
| 80 | void checkImportExportTargetFeatures(); |
| 81 | void calculateInitFunctions(); |
| 82 | void calculateImports(); |
| 83 | void calculateExports(); |
| 84 | void calculateCustomSections(); |
| 85 | void calculateTypes(); |
| 86 | void createOutputSegments(); |
| 87 | OutputSegment *createOutputSegment(StringRef name); |
| 88 | void combineOutputSegments(); |
| 89 | void layoutMemory(); |
| 90 | void createHeader(); |
| 91 | |
| 92 | void addSection(OutputSection *sec); |
| 93 | |
| 94 | void addSections(); |
| 95 | |
| 96 | void createCustomSections(); |
| 97 | void createSyntheticSections(); |
| 98 | void createSyntheticSectionsPostLayout(); |
| 99 | void finalizeSections(); |
| 100 | |
| 101 | // Custom sections |
| 102 | void createRelocSections(); |
| 103 | |
| 104 | void writeHeader(); |
| 105 | void writeSections(); |
| 106 | void writeBuildId(); |
| 107 | |
| 108 | uint64_t fileSize = 0; |
| 109 | |
| 110 | std::vector<WasmInitEntry> initFunctions; |
| 111 | llvm::MapVector<StringRef, std::vector<InputChunk *>> customSectionMapping; |
| 112 | |
| 113 | // Stable storage for command export wrapper function name strings. |
| 114 | std::list<std::string> commandExportWrapperNames; |
| 115 | |
| 116 | // Elements that are used to construct the final output |
| 117 | std::string ; |
| 118 | std::vector<OutputSection *> outputSections; |
| 119 | |
| 120 | std::unique_ptr<FileOutputBuffer> buffer; |
| 121 | |
| 122 | std::vector<OutputSegment *> segments; |
| 123 | llvm::SmallDenseMap<StringRef, OutputSegment *> segmentMap; |
| 124 | }; |
| 125 | |
| 126 | } // anonymous namespace |
| 127 | |
| 128 | void Writer::calculateCustomSections() { |
| 129 | log(msg: "calculateCustomSections" ); |
| 130 | bool stripDebug = ctx.arg.stripDebug || ctx.arg.stripAll; |
| 131 | for (ObjFile *file : ctx.objectFiles) { |
| 132 | for (InputChunk *section : file->customSections) { |
| 133 | // Exclude COMDAT sections that are not selected for inclusion |
| 134 | if (section->discarded) |
| 135 | continue; |
| 136 | // Ignore empty custom sections. In particular objcopy/strip will |
| 137 | // sometimes replace stripped sections with empty custom sections to |
| 138 | // avoid section re-numbering. |
| 139 | if (section->getSize() == 0) |
| 140 | continue; |
| 141 | StringRef name = section->name; |
| 142 | // These custom sections are known the linker and synthesized rather than |
| 143 | // blindly copied. |
| 144 | if (name == "linking" || name == "name" || name == "producers" || |
| 145 | name == "target_features" || name.starts_with(Prefix: "reloc." )) |
| 146 | continue; |
| 147 | // These custom sections are generated by `clang -fembed-bitcode`. |
| 148 | // These are used by the rust toolchain to ship LTO data along with |
| 149 | // compiled object code, but they don't want this included in the linker |
| 150 | // output. |
| 151 | if (name == ".llvmbc" || name == ".llvmcmd" ) |
| 152 | continue; |
| 153 | // Strip debug section in that option was specified. |
| 154 | if (stripDebug && name.starts_with(Prefix: ".debug_" )) |
| 155 | continue; |
| 156 | // Otherwise include custom sections by default and concatenate their |
| 157 | // contents. |
| 158 | customSectionMapping[name].push_back(x: section); |
| 159 | } |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | void Writer::createCustomSections() { |
| 164 | log(msg: "createCustomSections" ); |
| 165 | for (auto &pair : customSectionMapping) { |
| 166 | StringRef name = pair.first; |
| 167 | LLVM_DEBUG(dbgs() << "createCustomSection: " << name << "\n" ); |
| 168 | |
| 169 | OutputSection *sec = make<CustomSection>(args: std::string(name), args&: pair.second); |
| 170 | if (ctx.arg.relocatable || ctx.arg.emitRelocs) { |
| 171 | auto *sym = make<OutputSectionSymbol>(args&: sec); |
| 172 | out.linkingSec->addToSymtab(sym); |
| 173 | sec->sectionSym = sym; |
| 174 | } |
| 175 | addSection(sec); |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | // Create relocations sections in the final output. |
| 180 | // These are only created when relocatable output is requested. |
| 181 | void Writer::createRelocSections() { |
| 182 | log(msg: "createRelocSections" ); |
| 183 | // Don't use iterator here since we are adding to OutputSection |
| 184 | size_t origSize = outputSections.size(); |
| 185 | for (size_t i = 0; i < origSize; i++) { |
| 186 | LLVM_DEBUG(dbgs() << "check section " << i << "\n" ); |
| 187 | OutputSection *sec = outputSections[i]; |
| 188 | |
| 189 | // Count the number of needed sections. |
| 190 | uint32_t count = sec->getNumRelocations(); |
| 191 | if (!count) |
| 192 | continue; |
| 193 | |
| 194 | StringRef name; |
| 195 | if (sec->type == WASM_SEC_DATA) |
| 196 | name = "reloc.DATA" ; |
| 197 | else if (sec->type == WASM_SEC_CODE) |
| 198 | name = "reloc.CODE" ; |
| 199 | else if (sec->type == WASM_SEC_CUSTOM) |
| 200 | name = saver().save(S: "reloc." + sec->name); |
| 201 | else |
| 202 | llvm_unreachable( |
| 203 | "relocations only supported for code, data, or custom sections" ); |
| 204 | |
| 205 | addSection(sec: make<RelocSection>(args&: name, args&: sec)); |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | void Writer::populateProducers() { |
| 210 | for (ObjFile *file : ctx.objectFiles) { |
| 211 | const WasmProducerInfo &info = file->getWasmObj()->getProducerInfo(); |
| 212 | out.producersSec->addInfo(info); |
| 213 | } |
| 214 | } |
| 215 | |
| 216 | void Writer::() { |
| 217 | memcpy(dest: buffer->getBufferStart(), src: header.data(), n: header.size()); |
| 218 | } |
| 219 | |
| 220 | void Writer::writeSections() { |
| 221 | uint8_t *buf = buffer->getBufferStart(); |
| 222 | parallelForEach(R&: outputSections, Fn: [buf](OutputSection *s) { |
| 223 | assert(s->isNeeded()); |
| 224 | s->writeTo(buf); |
| 225 | }); |
| 226 | } |
| 227 | |
| 228 | // Computes a hash value of Data using a given hash function. |
| 229 | // In order to utilize multiple cores, we first split data into 1MB |
| 230 | // chunks, compute a hash for each chunk, and then compute a hash value |
| 231 | // of the hash values. |
| 232 | |
| 233 | static void |
| 234 | computeHash(llvm::MutableArrayRef<uint8_t> hashBuf, |
| 235 | llvm::ArrayRef<uint8_t> data, |
| 236 | std::function<void(uint8_t *dest, ArrayRef<uint8_t> arr)> hashFn) { |
| 237 | std::vector<ArrayRef<uint8_t>> chunks = split(arr: data, chunkSize: 1024 * 1024); |
| 238 | std::vector<uint8_t> hashes(chunks.size() * hashBuf.size()); |
| 239 | |
| 240 | // Compute hash values. |
| 241 | parallelFor(Begin: 0, End: chunks.size(), Fn: [&](size_t i) { |
| 242 | hashFn(hashes.data() + i * hashBuf.size(), chunks[i]); |
| 243 | }); |
| 244 | |
| 245 | // Write to the final output buffer. |
| 246 | hashFn(hashBuf.data(), hashes); |
| 247 | } |
| 248 | |
| 249 | static void makeUUID(unsigned version, llvm::ArrayRef<uint8_t> fileHash, |
| 250 | llvm::MutableArrayRef<uint8_t> output) { |
| 251 | assert((version == 4 || version == 5) && "Unknown UUID version" ); |
| 252 | assert(output.size() == 16 && "Wrong size for UUID output" ); |
| 253 | if (version == 5) { |
| 254 | // Build a valid v5 UUID from a hardcoded (randomly-generated) namespace |
| 255 | // UUID, and the computed hash of the output. |
| 256 | std::array<uint8_t, 16> namespaceUUID{0xA1, 0xFA, 0x48, 0x2D, 0x0E, 0x22, |
| 257 | 0x03, 0x8D, 0x33, 0x8B, 0x52, 0x1C, |
| 258 | 0xD6, 0xD2, 0x12, 0xB2}; |
| 259 | SHA1 sha; |
| 260 | sha.update(Data: namespaceUUID); |
| 261 | sha.update(Data: fileHash); |
| 262 | auto s = sha.final(); |
| 263 | std::copy(first: s.data(), last: &s.data()[output.size()], result: output.data()); |
| 264 | } else if (version == 4) { |
| 265 | if (auto ec = llvm::getRandomBytes(Buffer: output.data(), Size: output.size())) |
| 266 | error(msg: "entropy source failure: " + ec.message()); |
| 267 | } |
| 268 | // Set the UUID version and variant fields. |
| 269 | // The version is the upper nibble of byte 6 (0b0101xxxx or 0b0100xxxx) |
| 270 | output[6] = (static_cast<uint8_t>(version) << 4) | (output[6] & 0xF); |
| 271 | |
| 272 | // The variant is DCE 1.1/ISO 11578 (0b10xxxxxx) |
| 273 | output[8] &= 0xBF; |
| 274 | output[8] |= 0x80; |
| 275 | } |
| 276 | |
| 277 | void Writer::writeBuildId() { |
| 278 | if (!out.buildIdSec->isNeeded()) |
| 279 | return; |
| 280 | if (ctx.arg.buildId == BuildIdKind::Hexstring) { |
| 281 | out.buildIdSec->writeBuildId(buf: ctx.arg.buildIdVector); |
| 282 | return; |
| 283 | } |
| 284 | |
| 285 | // Compute a hash of all sections of the output file. |
| 286 | size_t hashSize = out.buildIdSec->hashSize; |
| 287 | std::vector<uint8_t> buildId(hashSize); |
| 288 | llvm::ArrayRef<uint8_t> buf{buffer->getBufferStart(), size_t(fileSize)}; |
| 289 | |
| 290 | switch (ctx.arg.buildId) { |
| 291 | case BuildIdKind::Fast: { |
| 292 | std::vector<uint8_t> fileHash(8); |
| 293 | computeHash(hashBuf: fileHash, data: buf, hashFn: [](uint8_t *dest, ArrayRef<uint8_t> arr) { |
| 294 | support::endian::write64le(P: dest, V: xxh3_64bits(data: arr)); |
| 295 | }); |
| 296 | makeUUID(version: 5, fileHash, output: buildId); |
| 297 | break; |
| 298 | } |
| 299 | case BuildIdKind::Sha1: |
| 300 | computeHash(hashBuf: buildId, data: buf, hashFn: [&](uint8_t *dest, ArrayRef<uint8_t> arr) { |
| 301 | memcpy(dest: dest, src: SHA1::hash(Data: arr).data(), n: hashSize); |
| 302 | }); |
| 303 | break; |
| 304 | case BuildIdKind::Uuid: |
| 305 | makeUUID(version: 4, fileHash: {}, output: buildId); |
| 306 | break; |
| 307 | default: |
| 308 | llvm_unreachable("unknown BuildIdKind" ); |
| 309 | } |
| 310 | out.buildIdSec->writeBuildId(buf: buildId); |
| 311 | } |
| 312 | |
| 313 | static void setGlobalPtr(DefinedGlobal *g, uint64_t memoryPtr) { |
| 314 | LLVM_DEBUG(dbgs() << "setGlobalPtr " << g->getName() << " -> " << memoryPtr << "\n" ); |
| 315 | g->global->setPointerValue(memoryPtr); |
| 316 | } |
| 317 | |
| 318 | static void checkPageAligned(StringRef name, uint64_t value) { |
| 319 | if (value != alignTo(Value: value, Align: ctx.arg.pageSize)) |
| 320 | error(msg: name + " must be aligned to the page size (" + |
| 321 | Twine(ctx.arg.pageSize) + " bytes)" ); |
| 322 | } |
| 323 | |
| 324 | // Fix the memory layout of the output binary. This assigns memory offsets |
| 325 | // to each of the input data sections as well as the explicit stack region. |
| 326 | // The default memory layout is as follows, from low to high. |
| 327 | // |
| 328 | // - initialized data (starting at ctx.arg.globalBase) |
| 329 | // - BSS data (not currently implemented in llvm) |
| 330 | // - explicit stack (ctx.arg.ZStackSize) |
| 331 | // - heap start / unallocated |
| 332 | // |
| 333 | // The --stack-first option means that stack is placed before any static data. |
| 334 | // This can be useful since it means that stack overflow traps immediately |
| 335 | // rather than overwriting global data, but also increases code size since all |
| 336 | // static data loads and stores requires larger offsets. |
| 337 | void Writer::layoutMemory() { |
| 338 | uint64_t memoryPtr = 0; |
| 339 | |
| 340 | auto placeStack = [&]() { |
| 341 | if (ctx.arg.relocatable || ctx.isPic) |
| 342 | return; |
| 343 | memoryPtr = alignTo(Value: memoryPtr, Align: stackAlignment); |
| 344 | if (ctx.sym.stackLow) |
| 345 | ctx.sym.stackLow->setVA(memoryPtr); |
| 346 | if (ctx.arg.zStackSize != alignTo(Value: ctx.arg.zStackSize, Align: stackAlignment)) |
| 347 | error(msg: "stack size must be " + Twine(stackAlignment) + "-byte aligned" ); |
| 348 | log(msg: "mem: stack size = " + Twine(ctx.arg.zStackSize)); |
| 349 | log(msg: "mem: stack base = " + Twine(memoryPtr)); |
| 350 | memoryPtr += ctx.arg.zStackSize; |
| 351 | setGlobalPtr(g: cast<DefinedGlobal>(Val: ctx.sym.stackPointer), memoryPtr); |
| 352 | if (ctx.sym.stackHigh) |
| 353 | ctx.sym.stackHigh->setVA(memoryPtr); |
| 354 | log(msg: "mem: stack top = " + Twine(memoryPtr)); |
| 355 | }; |
| 356 | |
| 357 | if (ctx.arg.stackFirst) { |
| 358 | placeStack(); |
| 359 | if (ctx.arg.globalBase) { |
| 360 | if (ctx.arg.globalBase < memoryPtr) { |
| 361 | error(msg: "--global-base cannot be less than stack size when --stack-first is used" ); |
| 362 | return; |
| 363 | } |
| 364 | memoryPtr = ctx.arg.globalBase; |
| 365 | } |
| 366 | } else { |
| 367 | memoryPtr = ctx.arg.globalBase; |
| 368 | } |
| 369 | |
| 370 | log(msg: "mem: global base = " + Twine(memoryPtr)); |
| 371 | if (ctx.sym.globalBase) |
| 372 | ctx.sym.globalBase->setVA(memoryPtr); |
| 373 | |
| 374 | uint64_t dataStart = memoryPtr; |
| 375 | |
| 376 | // Arbitrarily set __dso_handle handle to point to the start of the data |
| 377 | // segments. |
| 378 | if (ctx.sym.dsoHandle) |
| 379 | ctx.sym.dsoHandle->setVA(dataStart); |
| 380 | |
| 381 | out.dylinkSec->memAlign = 0; |
| 382 | for (OutputSegment *seg : segments) { |
| 383 | out.dylinkSec->memAlign = std::max(a: out.dylinkSec->memAlign, b: seg->alignment); |
| 384 | memoryPtr = alignTo(Value: memoryPtr, Align: 1ULL << seg->alignment); |
| 385 | seg->startVA = memoryPtr; |
| 386 | log(msg: formatv(Fmt: "mem: {0,-15} offset={1,-8} size={2,-8} align={3}" , Vals&: seg->name, |
| 387 | Vals&: memoryPtr, Vals&: seg->size, Vals&: seg->alignment)); |
| 388 | |
| 389 | if (!ctx.arg.relocatable && seg->isTLS()) { |
| 390 | if (ctx.sym.tlsSize) { |
| 391 | auto *tlsSize = cast<DefinedGlobal>(Val: ctx.sym.tlsSize); |
| 392 | setGlobalPtr(g: tlsSize, memoryPtr: seg->size); |
| 393 | } |
| 394 | if (ctx.sym.tlsAlign) { |
| 395 | auto *tlsAlign = cast<DefinedGlobal>(Val: ctx.sym.tlsAlign); |
| 396 | setGlobalPtr(g: tlsAlign, memoryPtr: int64_t{1} << seg->alignment); |
| 397 | } |
| 398 | if (!ctx.arg.sharedMemory && ctx.sym.tlsBase) { |
| 399 | auto *tlsBase = cast<DefinedGlobal>(Val: ctx.sym.tlsBase); |
| 400 | setGlobalPtr(g: tlsBase, memoryPtr); |
| 401 | } |
| 402 | } |
| 403 | |
| 404 | memoryPtr += seg->size; |
| 405 | } |
| 406 | |
| 407 | // Make space for the memory initialization flag |
| 408 | if (ctx.arg.sharedMemory && hasPassiveInitializedSegments()) { |
| 409 | memoryPtr = alignTo(Value: memoryPtr, Align: 4); |
| 410 | ctx.sym.initMemoryFlag = symtab->addSyntheticDataSymbol( |
| 411 | name: "__wasm_init_memory_flag" , flags: WASM_SYMBOL_VISIBILITY_HIDDEN); |
| 412 | ctx.sym.initMemoryFlag->markLive(); |
| 413 | ctx.sym.initMemoryFlag->setVA(memoryPtr); |
| 414 | log(msg: formatv(Fmt: "mem: {0,-15} offset={1,-8} size={2,-8} align={3}" , |
| 415 | Vals: "__wasm_init_memory_flag" , Vals&: memoryPtr, Vals: 4, Vals: 4)); |
| 416 | memoryPtr += 4; |
| 417 | } |
| 418 | |
| 419 | if (ctx.sym.dataEnd) |
| 420 | ctx.sym.dataEnd->setVA(memoryPtr); |
| 421 | |
| 422 | uint64_t staticDataSize = memoryPtr - dataStart; |
| 423 | log(msg: "mem: static data = " + Twine(staticDataSize)); |
| 424 | if (ctx.isPic) |
| 425 | out.dylinkSec->memSize = staticDataSize; |
| 426 | |
| 427 | if (!ctx.arg.stackFirst) |
| 428 | placeStack(); |
| 429 | |
| 430 | if (ctx.sym.heapBase) { |
| 431 | // Set `__heap_base` to follow the end of the stack or global data. The |
| 432 | // fact that this comes last means that a malloc/brk implementation can |
| 433 | // grow the heap at runtime. |
| 434 | // We'll align the heap base here because memory allocators might expect |
| 435 | // __heap_base to be aligned already. |
| 436 | memoryPtr = alignTo(Value: memoryPtr, Align: heapAlignment); |
| 437 | log(msg: "mem: heap base = " + Twine(memoryPtr)); |
| 438 | ctx.sym.heapBase->setVA(memoryPtr); |
| 439 | } |
| 440 | |
| 441 | uint64_t maxMemorySetting = 1ULL << 32; |
| 442 | if (ctx.arg.is64.value_or(u: false)) { |
| 443 | // TODO: Update once we decide on a reasonable limit here: |
| 444 | // https://github.com/WebAssembly/memory64/issues/33 |
| 445 | maxMemorySetting = 1ULL << 34; |
| 446 | } |
| 447 | |
| 448 | if (ctx.arg.initialHeap != 0) { |
| 449 | checkPageAligned(name: "initial heap" , value: ctx.arg.initialHeap); |
| 450 | uint64_t maxInitialHeap = maxMemorySetting - memoryPtr; |
| 451 | if (ctx.arg.initialHeap > maxInitialHeap) |
| 452 | error(msg: "initial heap too large, cannot be greater than " + |
| 453 | Twine(maxInitialHeap)); |
| 454 | memoryPtr += ctx.arg.initialHeap; |
| 455 | } |
| 456 | |
| 457 | if (ctx.arg.initialMemory != 0) { |
| 458 | checkPageAligned(name: "initial memory" , value: ctx.arg.initialMemory); |
| 459 | if (memoryPtr > ctx.arg.initialMemory) |
| 460 | error(msg: "initial memory too small, " + Twine(memoryPtr) + " bytes needed" ); |
| 461 | if (ctx.arg.initialMemory > maxMemorySetting) |
| 462 | error(msg: "initial memory too large, cannot be greater than " + |
| 463 | Twine(maxMemorySetting)); |
| 464 | memoryPtr = ctx.arg.initialMemory; |
| 465 | } |
| 466 | |
| 467 | memoryPtr = alignTo(Value: memoryPtr, Align: ctx.arg.pageSize); |
| 468 | |
| 469 | out.memorySec->numMemoryPages = memoryPtr / ctx.arg.pageSize; |
| 470 | log(msg: "mem: total pages = " + Twine(out.memorySec->numMemoryPages)); |
| 471 | |
| 472 | if (ctx.sym.heapEnd) { |
| 473 | // Set `__heap_end` to follow the end of the statically allocated linear |
| 474 | // memory. The fact that this comes last means that a malloc/brk |
| 475 | // implementation can grow the heap at runtime. |
| 476 | log(msg: "mem: heap end = " + Twine(memoryPtr)); |
| 477 | ctx.sym.heapEnd->setVA(memoryPtr); |
| 478 | } |
| 479 | |
| 480 | uint64_t maxMemory = 0; |
| 481 | if (ctx.arg.maxMemory != 0) { |
| 482 | checkPageAligned(name: "maximum memory" , value: ctx.arg.maxMemory); |
| 483 | if (memoryPtr > ctx.arg.maxMemory) |
| 484 | error(msg: "maximum memory too small, " + Twine(memoryPtr) + " bytes needed" ); |
| 485 | if (ctx.arg.maxMemory > maxMemorySetting) |
| 486 | error(msg: "maximum memory too large, cannot be greater than " + |
| 487 | Twine(maxMemorySetting)); |
| 488 | |
| 489 | maxMemory = ctx.arg.maxMemory; |
| 490 | } else if (ctx.arg.noGrowableMemory) { |
| 491 | maxMemory = memoryPtr; |
| 492 | } |
| 493 | |
| 494 | // If no maxMemory config was supplied but we are building with |
| 495 | // shared memory, we need to pick a sensible upper limit. |
| 496 | if (ctx.arg.sharedMemory && maxMemory == 0) { |
| 497 | if (ctx.isPic) |
| 498 | maxMemory = maxMemorySetting; |
| 499 | else |
| 500 | maxMemory = memoryPtr; |
| 501 | } |
| 502 | |
| 503 | if (maxMemory != 0) { |
| 504 | out.memorySec->maxMemoryPages = maxMemory / ctx.arg.pageSize; |
| 505 | log(msg: "mem: max pages = " + Twine(out.memorySec->maxMemoryPages)); |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | void Writer::addSection(OutputSection *sec) { |
| 510 | if (!sec->isNeeded()) |
| 511 | return; |
| 512 | log(msg: "addSection: " + toString(section: *sec)); |
| 513 | sec->sectionIndex = outputSections.size(); |
| 514 | outputSections.push_back(x: sec); |
| 515 | } |
| 516 | |
| 517 | // If a section name is valid as a C identifier (which is rare because of |
| 518 | // the leading '.'), linkers are expected to define __start_<secname> and |
| 519 | // __stop_<secname> symbols. They are at beginning and end of the section, |
| 520 | // respectively. This is not requested by the ELF standard, but GNU ld and |
| 521 | // gold provide the feature, and used by many programs. |
| 522 | static void addStartStopSymbols(const OutputSegment *seg) { |
| 523 | StringRef name = seg->name; |
| 524 | if (!isValidCIdentifier(s: name)) |
| 525 | return; |
| 526 | LLVM_DEBUG(dbgs() << "addStartStopSymbols: " << name << "\n" ); |
| 527 | uint64_t start = seg->startVA; |
| 528 | uint64_t stop = start + seg->size; |
| 529 | symtab->addOptionalDataSymbol(name: saver().save(S: "__start_" + name), value: start); |
| 530 | symtab->addOptionalDataSymbol(name: saver().save(S: "__stop_" + name), value: stop); |
| 531 | } |
| 532 | |
| 533 | void Writer::addSections() { |
| 534 | addSection(sec: out.dylinkSec); |
| 535 | addSection(sec: out.typeSec); |
| 536 | addSection(sec: out.importSec); |
| 537 | addSection(sec: out.functionSec); |
| 538 | addSection(sec: out.tableSec); |
| 539 | addSection(sec: out.memorySec); |
| 540 | addSection(sec: out.tagSec); |
| 541 | addSection(sec: out.globalSec); |
| 542 | addSection(sec: out.exportSec); |
| 543 | addSection(sec: out.startSec); |
| 544 | addSection(sec: out.elemSec); |
| 545 | addSection(sec: out.dataCountSec); |
| 546 | |
| 547 | addSection(sec: make<CodeSection>(args&: out.functionSec->inputFunctions)); |
| 548 | addSection(sec: make<DataSection>(args&: segments)); |
| 549 | |
| 550 | createCustomSections(); |
| 551 | |
| 552 | addSection(sec: out.linkingSec); |
| 553 | if (ctx.arg.emitRelocs || ctx.arg.relocatable) { |
| 554 | createRelocSections(); |
| 555 | } |
| 556 | |
| 557 | addSection(sec: out.nameSec); |
| 558 | addSection(sec: out.producersSec); |
| 559 | addSection(sec: out.targetFeaturesSec); |
| 560 | addSection(sec: out.buildIdSec); |
| 561 | } |
| 562 | |
| 563 | void Writer::finalizeSections() { |
| 564 | for (OutputSection *s : outputSections) { |
| 565 | s->setOffset(fileSize); |
| 566 | s->finalizeContents(); |
| 567 | fileSize += s->getSize(); |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | void Writer::populateTargetFeatures() { |
| 572 | StringMap<std::string> used; |
| 573 | StringMap<std::string> disallowed; |
| 574 | SmallSet<std::string, 8> &allowed = out.targetFeaturesSec->features; |
| 575 | bool tlsUsed = false; |
| 576 | |
| 577 | if (ctx.isPic) { |
| 578 | // This should not be necessary because all PIC objects should |
| 579 | // contain the mutable-globals feature. |
| 580 | // TODO (https://github.com/llvm/llvm-project/issues/51681) |
| 581 | allowed.insert(V: "mutable-globals" ); |
| 582 | } |
| 583 | |
| 584 | if (ctx.arg.extraFeatures.has_value()) { |
| 585 | auto & = *ctx.arg.extraFeatures; |
| 586 | allowed.insert_range(R&: extraFeatures); |
| 587 | } |
| 588 | |
| 589 | // Only infer used features if user did not specify features |
| 590 | bool inferFeatures = !ctx.arg.features.has_value(); |
| 591 | |
| 592 | if (!inferFeatures) { |
| 593 | auto &explicitFeatures = *ctx.arg.features; |
| 594 | allowed.insert_range(R&: explicitFeatures); |
| 595 | if (!ctx.arg.checkFeatures) |
| 596 | goto done; |
| 597 | } |
| 598 | |
| 599 | // Find the sets of used and disallowed features |
| 600 | for (ObjFile *file : ctx.objectFiles) { |
| 601 | StringRef fileName(file->getName()); |
| 602 | for (auto &feature : file->getWasmObj()->getTargetFeatures()) { |
| 603 | switch (feature.Prefix) { |
| 604 | case WASM_FEATURE_PREFIX_USED: |
| 605 | used.insert(KV: {feature.Name, std::string(fileName)}); |
| 606 | break; |
| 607 | case WASM_FEATURE_PREFIX_DISALLOWED: |
| 608 | disallowed.insert(KV: {feature.Name, std::string(fileName)}); |
| 609 | break; |
| 610 | default: |
| 611 | error(msg: "Unrecognized feature policy prefix " + |
| 612 | std::to_string(val: feature.Prefix)); |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | // Find TLS data segments |
| 617 | auto isTLS = [](InputChunk *segment) { |
| 618 | return segment->live && segment->isTLS(); |
| 619 | }; |
| 620 | tlsUsed = tlsUsed || llvm::any_of(Range&: file->segments, P: isTLS); |
| 621 | } |
| 622 | |
| 623 | if (inferFeatures) |
| 624 | for (const auto &key : used.keys()) |
| 625 | allowed.insert(V: std::string(key)); |
| 626 | |
| 627 | if (!ctx.arg.checkFeatures) |
| 628 | goto done; |
| 629 | |
| 630 | if (ctx.arg.sharedMemory) { |
| 631 | if (disallowed.count(Key: "shared-mem" )) |
| 632 | error(msg: "--shared-memory is disallowed by " + disallowed["shared-mem" ] + |
| 633 | " because it was not compiled with 'atomics' or 'bulk-memory' " |
| 634 | "features." ); |
| 635 | |
| 636 | for (auto feature : {"atomics" , "bulk-memory" }) |
| 637 | if (!allowed.count(V: feature)) |
| 638 | error(msg: StringRef("'" ) + feature + |
| 639 | "' feature must be used in order to use shared memory" ); |
| 640 | } |
| 641 | |
| 642 | if (tlsUsed) { |
| 643 | for (auto feature : {"atomics" , "bulk-memory" }) |
| 644 | if (!allowed.count(V: feature)) |
| 645 | error(msg: StringRef("'" ) + feature + |
| 646 | "' feature must be used in order to use thread-local storage" ); |
| 647 | } |
| 648 | |
| 649 | // Validate that used features are allowed in output |
| 650 | if (!inferFeatures) { |
| 651 | for (const auto &feature : used.keys()) { |
| 652 | if (!allowed.count(V: std::string(feature))) |
| 653 | error(msg: Twine("Target feature '" ) + feature + "' used by " + |
| 654 | used[feature] + " is not allowed." ); |
| 655 | } |
| 656 | } |
| 657 | |
| 658 | // Validate the disallowed constraints for each file |
| 659 | for (ObjFile *file : ctx.objectFiles) { |
| 660 | StringRef fileName(file->getName()); |
| 661 | SmallSet<std::string, 8> objectFeatures; |
| 662 | for (const auto &feature : file->getWasmObj()->getTargetFeatures()) { |
| 663 | if (feature.Prefix == WASM_FEATURE_PREFIX_DISALLOWED) |
| 664 | continue; |
| 665 | objectFeatures.insert(V: feature.Name); |
| 666 | if (disallowed.count(Key: feature.Name)) |
| 667 | error(msg: Twine("Target feature '" ) + feature.Name + "' used in " + |
| 668 | fileName + " is disallowed by " + disallowed[feature.Name] + |
| 669 | ". Use --no-check-features to suppress." ); |
| 670 | } |
| 671 | } |
| 672 | |
| 673 | done: |
| 674 | // Normally we don't include bss segments in the binary. In particular if |
| 675 | // memory is not being imported then we can assume its zero initialized. |
| 676 | // In the case the memory is imported, and we can use the memory.fill |
| 677 | // instruction, then we can also avoid including the segments. |
| 678 | // Finally, if we are emitting relocations, they may refer to locations within |
| 679 | // the bss segments, so these segments need to exist in the binary. |
| 680 | if (ctx.arg.emitRelocs || |
| 681 | (ctx.arg.memoryImport.has_value() && !allowed.count(V: "bulk-memory" ))) |
| 682 | ctx.emitBssSegments = true; |
| 683 | |
| 684 | if (allowed.count(V: "extended-const" )) |
| 685 | ctx.arg.extendedConst = true; |
| 686 | |
| 687 | for (auto &feature : allowed) |
| 688 | log(msg: "Allowed feature: " + feature); |
| 689 | } |
| 690 | |
| 691 | void Writer::checkImportExportTargetFeatures() { |
| 692 | if (ctx.arg.relocatable || !ctx.arg.checkFeatures) |
| 693 | return; |
| 694 | |
| 695 | if (out.targetFeaturesSec->features.count(V: "mutable-globals" ) == 0) { |
| 696 | for (const Symbol *sym : out.importSec->importedSymbols) { |
| 697 | if (auto *global = dyn_cast<GlobalSymbol>(Val: sym)) { |
| 698 | if (global->getGlobalType()->Mutable) { |
| 699 | error(msg: Twine("mutable global imported but 'mutable-globals' feature " |
| 700 | "not present in inputs: `" ) + |
| 701 | toString(sym: *sym) + "`. Use --no-check-features to suppress." ); |
| 702 | } |
| 703 | } |
| 704 | } |
| 705 | for (const Symbol *sym : out.exportSec->exportedSymbols) { |
| 706 | if (isa<GlobalSymbol>(Val: sym)) { |
| 707 | error(msg: Twine("mutable global exported but 'mutable-globals' feature " |
| 708 | "not present in inputs: `" ) + |
| 709 | toString(sym: *sym) + "`. Use --no-check-features to suppress." ); |
| 710 | } |
| 711 | } |
| 712 | } |
| 713 | } |
| 714 | |
| 715 | static bool shouldImport(Symbol *sym) { |
| 716 | // We don't generate imports for data symbols. They however can be imported |
| 717 | // as GOT entries. |
| 718 | if (isa<DataSymbol>(Val: sym)) |
| 719 | return false; |
| 720 | if (!sym->isLive()) |
| 721 | return false; |
| 722 | if (!sym->isUsedInRegularObj) |
| 723 | return false; |
| 724 | |
| 725 | // When a symbol is weakly defined in a shared library we need to allow |
| 726 | // it to be overridden by another module so need to both import |
| 727 | // and export the symbol. |
| 728 | if (ctx.arg.shared && sym->isWeak() && !sym->isUndefined() && |
| 729 | !sym->isHidden()) |
| 730 | return true; |
| 731 | if (sym->isShared()) |
| 732 | return true; |
| 733 | if (!sym->isUndefined()) |
| 734 | return false; |
| 735 | if (sym->isWeak() && !ctx.arg.relocatable && !ctx.isPic) |
| 736 | return false; |
| 737 | |
| 738 | // In PIC mode we only need to import functions when they are called directly. |
| 739 | // Indirect usage all goes via GOT imports. |
| 740 | if (ctx.isPic) { |
| 741 | if (auto *f = dyn_cast<UndefinedFunction>(Val: sym)) |
| 742 | if (!f->isCalledDirectly) |
| 743 | return false; |
| 744 | } |
| 745 | |
| 746 | if (ctx.isPic || ctx.arg.relocatable || ctx.arg.importUndefined || |
| 747 | ctx.arg.unresolvedSymbols == UnresolvedPolicy::ImportDynamic) |
| 748 | return true; |
| 749 | if (ctx.arg.allowUndefinedSymbols.count(Key: sym->getName()) != 0) |
| 750 | return true; |
| 751 | |
| 752 | return sym->isImported(); |
| 753 | } |
| 754 | |
| 755 | void Writer::calculateImports() { |
| 756 | // Some inputs require that the indirect function table be assigned to table |
| 757 | // number 0, so if it is present and is an import, allocate it before any |
| 758 | // other tables. |
| 759 | if (ctx.sym.indirectFunctionTable && |
| 760 | shouldImport(sym: ctx.sym.indirectFunctionTable)) |
| 761 | out.importSec->addImport(sym: ctx.sym.indirectFunctionTable); |
| 762 | |
| 763 | for (Symbol *sym : symtab->symbols()) { |
| 764 | if (!shouldImport(sym)) |
| 765 | continue; |
| 766 | if (sym == ctx.sym.indirectFunctionTable) |
| 767 | continue; |
| 768 | LLVM_DEBUG(dbgs() << "import: " << sym->getName() << "\n" ); |
| 769 | out.importSec->addImport(sym); |
| 770 | } |
| 771 | } |
| 772 | |
| 773 | void Writer::calculateExports() { |
| 774 | if (ctx.arg.relocatable) |
| 775 | return; |
| 776 | |
| 777 | if (!ctx.arg.relocatable && ctx.arg.memoryExport.has_value()) { |
| 778 | out.exportSec->exports.push_back( |
| 779 | x: WasmExport{.Name: *ctx.arg.memoryExport, .Kind: WASM_EXTERNAL_MEMORY, .Index: 0}); |
| 780 | } |
| 781 | |
| 782 | unsigned globalIndex = |
| 783 | out.importSec->getNumImportedGlobals() + out.globalSec->numGlobals(); |
| 784 | |
| 785 | for (Symbol *sym : symtab->symbols()) { |
| 786 | if (!sym->isExported()) |
| 787 | continue; |
| 788 | if (!sym->isLive()) |
| 789 | continue; |
| 790 | if (isa<SharedFunctionSymbol>(Val: sym) || sym->isShared()) |
| 791 | continue; |
| 792 | |
| 793 | StringRef name = sym->getName(); |
| 794 | LLVM_DEBUG(dbgs() << "Export: " << name << "\n" ); |
| 795 | WasmExport export_; |
| 796 | if (auto *f = dyn_cast<DefinedFunction>(Val: sym)) { |
| 797 | if (std::optional<StringRef> exportName = f->function->getExportName()) { |
| 798 | name = *exportName; |
| 799 | } |
| 800 | export_ = {.Name: name, .Kind: WASM_EXTERNAL_FUNCTION, .Index: f->getExportedFunctionIndex()}; |
| 801 | } else if (auto *g = dyn_cast<DefinedGlobal>(Val: sym)) { |
| 802 | if (g->getGlobalType()->Mutable && !g->getFile() && !g->forceExport) { |
| 803 | // Avoid exporting mutable globals are linker synthesized (e.g. |
| 804 | // __stack_pointer or __tls_base) unless they are explicitly exported |
| 805 | // from the command line. |
| 806 | // Without this check `--export-all` would cause any program using the |
| 807 | // stack pointer to export a mutable global even if none of the input |
| 808 | // files were built with the `mutable-globals` feature. |
| 809 | continue; |
| 810 | } |
| 811 | export_ = {.Name: name, .Kind: WASM_EXTERNAL_GLOBAL, .Index: g->getGlobalIndex()}; |
| 812 | } else if (auto *t = dyn_cast<DefinedTag>(Val: sym)) { |
| 813 | export_ = {.Name: name, .Kind: WASM_EXTERNAL_TAG, .Index: t->getTagIndex()}; |
| 814 | } else if (auto *d = dyn_cast<DefinedData>(Val: sym)) { |
| 815 | out.globalSec->dataAddressGlobals.push_back(x: d); |
| 816 | export_ = {.Name: name, .Kind: WASM_EXTERNAL_GLOBAL, .Index: globalIndex++}; |
| 817 | } else { |
| 818 | auto *t = cast<DefinedTable>(Val: sym); |
| 819 | export_ = {.Name: name, .Kind: WASM_EXTERNAL_TABLE, .Index: t->getTableNumber()}; |
| 820 | } |
| 821 | |
| 822 | out.exportSec->exports.push_back(x: export_); |
| 823 | out.exportSec->exportedSymbols.push_back(x: sym); |
| 824 | } |
| 825 | } |
| 826 | |
| 827 | void Writer::populateSymtab() { |
| 828 | if (!ctx.arg.relocatable && !ctx.arg.emitRelocs) |
| 829 | return; |
| 830 | |
| 831 | for (Symbol *sym : symtab->symbols()) |
| 832 | if (sym->isUsedInRegularObj && sym->isLive() && !sym->isShared()) |
| 833 | out.linkingSec->addToSymtab(sym); |
| 834 | |
| 835 | for (ObjFile *file : ctx.objectFiles) { |
| 836 | LLVM_DEBUG(dbgs() << "Local symtab entries: " << file->getName() << "\n" ); |
| 837 | for (Symbol *sym : file->getSymbols()) |
| 838 | if (sym->isLocal() && !isa<SectionSymbol>(Val: sym) && sym->isLive()) |
| 839 | out.linkingSec->addToSymtab(sym); |
| 840 | } |
| 841 | } |
| 842 | |
| 843 | void Writer::calculateTypes() { |
| 844 | // The output type section is the union of the following sets: |
| 845 | // 1. Any signature used in the TYPE relocation |
| 846 | // 2. The signatures of all imported functions |
| 847 | // 3. The signatures of all defined functions |
| 848 | // 4. The signatures of all imported tags |
| 849 | // 5. The signatures of all defined tags |
| 850 | |
| 851 | for (ObjFile *file : ctx.objectFiles) { |
| 852 | ArrayRef<WasmSignature> types = file->getWasmObj()->types(); |
| 853 | for (uint32_t i = 0; i < types.size(); i++) |
| 854 | if (file->typeIsUsed[i]) |
| 855 | file->typeMap[i] = out.typeSec->registerType(sig: types[i]); |
| 856 | } |
| 857 | |
| 858 | for (const Symbol *sym : out.importSec->importedSymbols) { |
| 859 | if (auto *f = dyn_cast<FunctionSymbol>(Val: sym)) |
| 860 | out.typeSec->registerType(sig: *f->signature); |
| 861 | else if (auto *t = dyn_cast<TagSymbol>(Val: sym)) |
| 862 | out.typeSec->registerType(sig: *t->signature); |
| 863 | } |
| 864 | |
| 865 | for (const InputFunction *f : out.functionSec->inputFunctions) |
| 866 | out.typeSec->registerType(sig: f->signature); |
| 867 | |
| 868 | for (const InputTag *t : out.tagSec->inputTags) |
| 869 | out.typeSec->registerType(sig: t->signature); |
| 870 | } |
| 871 | |
| 872 | // In a command-style link, create a wrapper for each exported symbol |
| 873 | // which calls the constructors and destructors. |
| 874 | void Writer::createCommandExportWrappers() { |
| 875 | // This logic doesn't currently support Emscripten-style PIC mode. |
| 876 | assert(!ctx.isPic); |
| 877 | |
| 878 | // If there are no ctors and there's no libc `__wasm_call_dtors` to |
| 879 | // call, don't wrap the exports. |
| 880 | if (initFunctions.empty() && ctx.sym.callDtors == nullptr) |
| 881 | return; |
| 882 | |
| 883 | std::vector<DefinedFunction *> toWrap; |
| 884 | |
| 885 | for (Symbol *sym : symtab->symbols()) |
| 886 | if (sym->isExported()) |
| 887 | if (auto *f = dyn_cast<DefinedFunction>(Val: sym)) |
| 888 | toWrap.push_back(x: f); |
| 889 | |
| 890 | for (auto *f : toWrap) { |
| 891 | auto funcNameStr = (f->getName() + ".command_export" ).str(); |
| 892 | commandExportWrapperNames.push_back(x: funcNameStr); |
| 893 | const std::string &funcName = commandExportWrapperNames.back(); |
| 894 | |
| 895 | auto func = make<SyntheticFunction>(args: *f->getSignature(), args: funcName); |
| 896 | if (f->function->getExportName()) |
| 897 | func->setExportName(f->function->getExportName()->str()); |
| 898 | else |
| 899 | func->setExportName(f->getName().str()); |
| 900 | |
| 901 | DefinedFunction *def = |
| 902 | symtab->addSyntheticFunction(name: funcName, flags: f->flags, function: func); |
| 903 | def->markLive(); |
| 904 | |
| 905 | def->flags |= WASM_SYMBOL_EXPORTED; |
| 906 | def->flags &= ~WASM_SYMBOL_VISIBILITY_HIDDEN; |
| 907 | def->forceExport = f->forceExport; |
| 908 | |
| 909 | f->flags |= WASM_SYMBOL_VISIBILITY_HIDDEN; |
| 910 | f->flags &= ~WASM_SYMBOL_EXPORTED; |
| 911 | f->forceExport = false; |
| 912 | |
| 913 | out.functionSec->addFunction(func); |
| 914 | |
| 915 | createCommandExportWrapper(functionIndex: f->getFunctionIndex(), f: def); |
| 916 | } |
| 917 | } |
| 918 | |
| 919 | static void finalizeIndirectFunctionTable() { |
| 920 | if (!ctx.sym.indirectFunctionTable) |
| 921 | return; |
| 922 | |
| 923 | if (shouldImport(sym: ctx.sym.indirectFunctionTable) && |
| 924 | !ctx.sym.indirectFunctionTable->hasTableNumber()) { |
| 925 | // Processing -Bsymbolic relocations resulted in a late requirement that the |
| 926 | // indirect function table be present, and we are running in --import-table |
| 927 | // mode. Add the table now to the imports section. Otherwise it will be |
| 928 | // added to the tables section later in assignIndexes. |
| 929 | out.importSec->addImport(sym: ctx.sym.indirectFunctionTable); |
| 930 | } |
| 931 | |
| 932 | uint32_t tableSize = ctx.arg.tableBase + out.elemSec->numEntries(); |
| 933 | WasmLimits limits = {.Flags: 0, .Minimum: tableSize, .Maximum: 0, .PageSize: 0}; |
| 934 | if (ctx.sym.indirectFunctionTable->isDefined() && !ctx.arg.growableTable) { |
| 935 | limits.Flags |= WASM_LIMITS_FLAG_HAS_MAX; |
| 936 | limits.Maximum = limits.Minimum; |
| 937 | } |
| 938 | if (ctx.arg.is64.value_or(u: false)) |
| 939 | limits.Flags |= WASM_LIMITS_FLAG_IS_64; |
| 940 | ctx.sym.indirectFunctionTable->setLimits(limits); |
| 941 | } |
| 942 | |
| 943 | static void scanRelocations() { |
| 944 | for (ObjFile *file : ctx.objectFiles) { |
| 945 | LLVM_DEBUG(dbgs() << "scanRelocations: " << file->getName() << "\n" ); |
| 946 | for (InputChunk *chunk : file->functions) |
| 947 | scanRelocations(chunk); |
| 948 | for (InputChunk *chunk : file->segments) |
| 949 | scanRelocations(chunk); |
| 950 | for (auto &p : file->customSections) |
| 951 | scanRelocations(chunk: p); |
| 952 | } |
| 953 | } |
| 954 | |
| 955 | void Writer::assignIndexes() { |
| 956 | // Seal the import section, since other index spaces such as function and |
| 957 | // global are effected by the number of imports. |
| 958 | out.importSec->seal(); |
| 959 | |
| 960 | for (InputFunction *func : ctx.syntheticFunctions) |
| 961 | out.functionSec->addFunction(func); |
| 962 | |
| 963 | for (ObjFile *file : ctx.objectFiles) { |
| 964 | LLVM_DEBUG(dbgs() << "Functions: " << file->getName() << "\n" ); |
| 965 | for (InputFunction *func : file->functions) |
| 966 | out.functionSec->addFunction(func); |
| 967 | } |
| 968 | |
| 969 | for (InputGlobal *global : ctx.syntheticGlobals) |
| 970 | out.globalSec->addGlobal(global); |
| 971 | |
| 972 | for (ObjFile *file : ctx.objectFiles) { |
| 973 | LLVM_DEBUG(dbgs() << "Globals: " << file->getName() << "\n" ); |
| 974 | for (InputGlobal *global : file->globals) |
| 975 | out.globalSec->addGlobal(global); |
| 976 | } |
| 977 | |
| 978 | for (ObjFile *file : ctx.objectFiles) { |
| 979 | LLVM_DEBUG(dbgs() << "Tags: " << file->getName() << "\n" ); |
| 980 | for (InputTag *tag : file->tags) |
| 981 | out.tagSec->addTag(tag); |
| 982 | } |
| 983 | |
| 984 | for (ObjFile *file : ctx.objectFiles) { |
| 985 | LLVM_DEBUG(dbgs() << "Tables: " << file->getName() << "\n" ); |
| 986 | for (InputTable *table : file->tables) |
| 987 | out.tableSec->addTable(table); |
| 988 | } |
| 989 | |
| 990 | for (InputTable *table : ctx.syntheticTables) |
| 991 | out.tableSec->addTable(table); |
| 992 | |
| 993 | out.globalSec->assignIndexes(); |
| 994 | out.tableSec->assignIndexes(); |
| 995 | } |
| 996 | |
| 997 | static StringRef getOutputDataSegmentName(const InputChunk &seg) { |
| 998 | // We always merge .tbss and .tdata into a single TLS segment so all TLS |
| 999 | // symbols are be relative to single __tls_base. |
| 1000 | if (seg.isTLS()) |
| 1001 | return ".tdata" ; |
| 1002 | if (!ctx.arg.mergeDataSegments) |
| 1003 | return seg.name; |
| 1004 | if (seg.name.starts_with(Prefix: ".text." )) |
| 1005 | return ".text" ; |
| 1006 | if (seg.name.starts_with(Prefix: ".data." )) |
| 1007 | return ".data" ; |
| 1008 | if (seg.name.starts_with(Prefix: ".bss." )) |
| 1009 | return ".bss" ; |
| 1010 | if (seg.name.starts_with(Prefix: ".rodata." )) |
| 1011 | return ".rodata" ; |
| 1012 | return seg.name; |
| 1013 | } |
| 1014 | |
| 1015 | OutputSegment *Writer::createOutputSegment(StringRef name) { |
| 1016 | LLVM_DEBUG(dbgs() << "new segment: " << name << "\n" ); |
| 1017 | OutputSegment *s = make<OutputSegment>(args&: name); |
| 1018 | if (ctx.arg.sharedMemory) |
| 1019 | s->initFlags = WASM_DATA_SEGMENT_IS_PASSIVE; |
| 1020 | if (!ctx.arg.relocatable && name.starts_with(Prefix: ".bss" )) |
| 1021 | s->isBss = true; |
| 1022 | segments.push_back(x: s); |
| 1023 | return s; |
| 1024 | } |
| 1025 | |
| 1026 | void Writer::createOutputSegments() { |
| 1027 | for (ObjFile *file : ctx.objectFiles) { |
| 1028 | for (InputChunk *segment : file->segments) { |
| 1029 | if (!segment->live) |
| 1030 | continue; |
| 1031 | StringRef name = getOutputDataSegmentName(seg: *segment); |
| 1032 | OutputSegment *s = nullptr; |
| 1033 | // When running in relocatable mode we can't merge segments that are part |
| 1034 | // of comdat groups since the ultimate linker needs to be able exclude or |
| 1035 | // include them individually. |
| 1036 | if (ctx.arg.relocatable && !segment->getComdatName().empty()) { |
| 1037 | s = createOutputSegment(name); |
| 1038 | } else { |
| 1039 | if (segmentMap.count(Val: name) == 0) |
| 1040 | segmentMap[name] = createOutputSegment(name); |
| 1041 | s = segmentMap[name]; |
| 1042 | } |
| 1043 | s->addInputSegment(inSeg: segment); |
| 1044 | } |
| 1045 | } |
| 1046 | |
| 1047 | // Sort segments by type, placing .bss last |
| 1048 | llvm::stable_sort(Range&: segments, |
| 1049 | C: [](const OutputSegment *a, const OutputSegment *b) { |
| 1050 | auto order = [](StringRef name) { |
| 1051 | return StringSwitch<int>(name) |
| 1052 | .StartsWith(S: ".tdata" , Value: 0) |
| 1053 | .StartsWith(S: ".rodata" , Value: 1) |
| 1054 | .StartsWith(S: ".data" , Value: 2) |
| 1055 | .StartsWith(S: ".bss" , Value: 4) |
| 1056 | .Default(Value: 3); |
| 1057 | }; |
| 1058 | return order(a->name) < order(b->name); |
| 1059 | }); |
| 1060 | |
| 1061 | for (size_t i = 0; i < segments.size(); ++i) |
| 1062 | segments[i]->index = i; |
| 1063 | |
| 1064 | // Merge MergeInputSections into a single MergeSyntheticSection. |
| 1065 | LLVM_DEBUG(dbgs() << "-- finalize input semgments\n" ); |
| 1066 | for (OutputSegment *seg : segments) |
| 1067 | seg->finalizeInputSegments(); |
| 1068 | } |
| 1069 | |
| 1070 | void Writer::combineOutputSegments() { |
| 1071 | // With PIC code we currently only support a single active data segment since |
| 1072 | // we only have a single __memory_base to use as our base address. This pass |
| 1073 | // combines all data segments into a single .data segment. |
| 1074 | // This restriction does not apply when the extended const extension is |
| 1075 | // available: https://github.com/WebAssembly/extended-const |
| 1076 | assert(!ctx.arg.extendedConst); |
| 1077 | assert(ctx.isPic && !ctx.arg.sharedMemory); |
| 1078 | if (segments.size() <= 1) |
| 1079 | return; |
| 1080 | OutputSegment *combined = make<OutputSegment>(args: ".data" ); |
| 1081 | combined->startVA = segments[0]->startVA; |
| 1082 | std::vector<OutputSegment *> newSegments = {combined}; |
| 1083 | for (OutputSegment *s : segments) { |
| 1084 | if (!s->requiredInBinary()) { |
| 1085 | newSegments.push_back(x: s); |
| 1086 | continue; |
| 1087 | } |
| 1088 | bool first = true; |
| 1089 | for (InputChunk *inSeg : s->inputSegments) { |
| 1090 | if (first) |
| 1091 | inSeg->alignment = std::max(a: inSeg->alignment, b: s->alignment); |
| 1092 | first = false; |
| 1093 | #ifndef NDEBUG |
| 1094 | uint64_t oldVA = inSeg->getVA(); |
| 1095 | #endif |
| 1096 | combined->addInputSegment(inSeg); |
| 1097 | #ifndef NDEBUG |
| 1098 | uint64_t newVA = inSeg->getVA(); |
| 1099 | LLVM_DEBUG(dbgs() << "added input segment. name=" << inSeg->name |
| 1100 | << " oldVA=" << oldVA << " newVA=" << newVA << "\n" ); |
| 1101 | assert(oldVA == newVA); |
| 1102 | #endif |
| 1103 | } |
| 1104 | } |
| 1105 | |
| 1106 | segments = newSegments; |
| 1107 | } |
| 1108 | |
| 1109 | static void createFunction(DefinedFunction *func, StringRef bodyContent) { |
| 1110 | std::string functionBody; |
| 1111 | { |
| 1112 | raw_string_ostream os(functionBody); |
| 1113 | writeUleb128(os, number: bodyContent.size(), msg: "function size" ); |
| 1114 | os << bodyContent; |
| 1115 | } |
| 1116 | ArrayRef<uint8_t> body = arrayRefFromStringRef(Input: saver().save(S: functionBody)); |
| 1117 | cast<SyntheticFunction>(Val: func->function)->setBody(body); |
| 1118 | } |
| 1119 | |
| 1120 | bool Writer::needsPassiveInitialization(const OutputSegment *segment) { |
| 1121 | // If bulk memory features is supported then we can perform bss initialization |
| 1122 | // (via memory.fill) during `__wasm_init_memory`. |
| 1123 | if (ctx.arg.memoryImport.has_value() && !segment->requiredInBinary()) |
| 1124 | return true; |
| 1125 | return segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE; |
| 1126 | } |
| 1127 | |
| 1128 | bool Writer::hasPassiveInitializedSegments() { |
| 1129 | return llvm::any_of(Range&: segments, P: [this](const OutputSegment *s) { |
| 1130 | return this->needsPassiveInitialization(segment: s); |
| 1131 | }); |
| 1132 | } |
| 1133 | |
| 1134 | void Writer::createSyntheticInitFunctions() { |
| 1135 | if (ctx.arg.relocatable) |
| 1136 | return; |
| 1137 | |
| 1138 | static WasmSignature nullSignature = {{}, {}}; |
| 1139 | |
| 1140 | createApplyDataRelocationsFunction(); |
| 1141 | |
| 1142 | // Passive segments are used to avoid memory being reinitialized on each |
| 1143 | // thread's instantiation. These passive segments are initialized and |
| 1144 | // dropped in __wasm_init_memory, which is registered as the start function |
| 1145 | // We also initialize bss segments (using memory.fill) as part of this |
| 1146 | // function. |
| 1147 | if (hasPassiveInitializedSegments()) { |
| 1148 | ctx.sym.initMemory = symtab->addSyntheticFunction( |
| 1149 | name: "__wasm_init_memory" , flags: WASM_SYMBOL_VISIBILITY_HIDDEN, |
| 1150 | function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_init_memory" )); |
| 1151 | ctx.sym.initMemory->markLive(); |
| 1152 | if (ctx.arg.sharedMemory) { |
| 1153 | // This global is assigned during __wasm_init_memory in the shared memory |
| 1154 | // case. |
| 1155 | ctx.sym.tlsBase->markLive(); |
| 1156 | } |
| 1157 | } |
| 1158 | |
| 1159 | if (ctx.arg.sharedMemory) { |
| 1160 | if (out.globalSec->needsTLSRelocations()) { |
| 1161 | ctx.sym.applyGlobalTLSRelocs = symtab->addSyntheticFunction( |
| 1162 | name: "__wasm_apply_global_tls_relocs" , flags: WASM_SYMBOL_VISIBILITY_HIDDEN, |
| 1163 | function: make<SyntheticFunction>(args&: nullSignature, |
| 1164 | args: "__wasm_apply_global_tls_relocs" )); |
| 1165 | ctx.sym.applyGlobalTLSRelocs->markLive(); |
| 1166 | // TLS relocations depend on the __tls_base symbols |
| 1167 | ctx.sym.tlsBase->markLive(); |
| 1168 | } |
| 1169 | |
| 1170 | auto hasTLSRelocs = [](const OutputSegment *segment) { |
| 1171 | if (segment->isTLS()) |
| 1172 | for (const auto* is: segment->inputSegments) |
| 1173 | if (is->getRelocations().size()) |
| 1174 | return true; |
| 1175 | return false; |
| 1176 | }; |
| 1177 | if (llvm::any_of(Range&: segments, P: hasTLSRelocs)) { |
| 1178 | ctx.sym.applyTLSRelocs = symtab->addSyntheticFunction( |
| 1179 | name: "__wasm_apply_tls_relocs" , flags: WASM_SYMBOL_VISIBILITY_HIDDEN, |
| 1180 | function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_apply_tls_relocs" )); |
| 1181 | ctx.sym.applyTLSRelocs->markLive(); |
| 1182 | } |
| 1183 | } |
| 1184 | |
| 1185 | if (ctx.isPic && out.globalSec->needsRelocations()) { |
| 1186 | ctx.sym.applyGlobalRelocs = symtab->addSyntheticFunction( |
| 1187 | name: "__wasm_apply_global_relocs" , flags: WASM_SYMBOL_VISIBILITY_HIDDEN, |
| 1188 | function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_apply_global_relocs" )); |
| 1189 | ctx.sym.applyGlobalRelocs->markLive(); |
| 1190 | } |
| 1191 | |
| 1192 | // If there is only one start function we can just use that function |
| 1193 | // itself as the Wasm start function, otherwise we need to synthesize |
| 1194 | // a new function to call them in sequence. |
| 1195 | if (ctx.sym.applyGlobalRelocs && ctx.sym.initMemory) { |
| 1196 | ctx.sym.startFunction = symtab->addSyntheticFunction( |
| 1197 | name: "__wasm_start" , flags: WASM_SYMBOL_VISIBILITY_HIDDEN, |
| 1198 | function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_start" )); |
| 1199 | ctx.sym.startFunction->markLive(); |
| 1200 | } |
| 1201 | } |
| 1202 | |
| 1203 | void Writer::createInitMemoryFunction() { |
| 1204 | LLVM_DEBUG(dbgs() << "createInitMemoryFunction\n" ); |
| 1205 | assert(ctx.sym.initMemory); |
| 1206 | assert(hasPassiveInitializedSegments()); |
| 1207 | uint64_t flagAddress; |
| 1208 | if (ctx.arg.sharedMemory) { |
| 1209 | assert(ctx.sym.initMemoryFlag); |
| 1210 | flagAddress = ctx.sym.initMemoryFlag->getVA(); |
| 1211 | } |
| 1212 | bool is64 = ctx.arg.is64.value_or(u: false); |
| 1213 | std::string bodyContent; |
| 1214 | { |
| 1215 | raw_string_ostream os(bodyContent); |
| 1216 | // Initialize memory in a thread-safe manner. The thread that successfully |
| 1217 | // increments the flag from 0 to 1 is responsible for performing the memory |
| 1218 | // initialization. Other threads go sleep on the flag until the first thread |
| 1219 | // finishing initializing memory, increments the flag to 2, and wakes all |
| 1220 | // the other threads. Once the flag has been set to 2, subsequently started |
| 1221 | // threads will skip the sleep. All threads unconditionally drop their |
| 1222 | // passive data segments once memory has been initialized. The generated |
| 1223 | // code is as follows: |
| 1224 | // |
| 1225 | // (func $__wasm_init_memory |
| 1226 | // (block $drop |
| 1227 | // (block $wait |
| 1228 | // (block $init |
| 1229 | // (br_table $init $wait $drop |
| 1230 | // (i32.atomic.rmw.cmpxchg align=2 offset=0 |
| 1231 | // (i32.const $__init_memory_flag) |
| 1232 | // (i32.const 0) |
| 1233 | // (i32.const 1) |
| 1234 | // ) |
| 1235 | // ) |
| 1236 | // ) ;; $init |
| 1237 | // ( ... initialize data segments ... ) |
| 1238 | // (i32.atomic.store align=2 offset=0 |
| 1239 | // (i32.const $__init_memory_flag) |
| 1240 | // (i32.const 2) |
| 1241 | // ) |
| 1242 | // (drop |
| 1243 | // (i32.atomic.notify align=2 offset=0 |
| 1244 | // (i32.const $__init_memory_flag) |
| 1245 | // (i32.const -1u) |
| 1246 | // ) |
| 1247 | // ) |
| 1248 | // (br $drop) |
| 1249 | // ) ;; $wait |
| 1250 | // (drop |
| 1251 | // (i32.atomic.wait align=2 offset=0 |
| 1252 | // (i32.const $__init_memory_flag) |
| 1253 | // (i32.const 1) |
| 1254 | // (i32.const -1) |
| 1255 | // ) |
| 1256 | // ) |
| 1257 | // ) ;; $drop |
| 1258 | // ( ... drop data segments ... ) |
| 1259 | // ) |
| 1260 | // |
| 1261 | // When we are building with PIC, calculate the flag location using: |
| 1262 | // |
| 1263 | // (global.get $__memory_base) |
| 1264 | // (i32.const $__init_memory_flag) |
| 1265 | // (i32.const 1) |
| 1266 | |
| 1267 | auto writeGetFlagAddress = [&]() { |
| 1268 | if (ctx.isPic) { |
| 1269 | writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get" ); |
| 1270 | writeUleb128(os, number: 0, msg: "local 0" ); |
| 1271 | } else { |
| 1272 | writePtrConst(os, number: flagAddress, is64, msg: "flag address" ); |
| 1273 | } |
| 1274 | }; |
| 1275 | |
| 1276 | if (ctx.arg.sharedMemory) { |
| 1277 | // With PIC code we cache the flag address in local 0 |
| 1278 | if (ctx.isPic) { |
| 1279 | writeUleb128(os, number: 1, msg: "num local decls" ); |
| 1280 | writeUleb128(os, number: 2, msg: "local count" ); |
| 1281 | writeU8(os, byte: is64 ? WASM_TYPE_I64 : WASM_TYPE_I32, msg: "address type" ); |
| 1282 | writeU8(os, byte: WASM_OPCODE_GLOBAL_GET, msg: "GLOBAL_GET" ); |
| 1283 | writeUleb128(os, number: ctx.sym.memoryBase->getGlobalIndex(), msg: "memory_base" ); |
| 1284 | writePtrConst(os, number: flagAddress, is64, msg: "flag address" ); |
| 1285 | writeU8(os, byte: is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD, msg: "add" ); |
| 1286 | writeU8(os, byte: WASM_OPCODE_LOCAL_SET, msg: "local.set" ); |
| 1287 | writeUleb128(os, number: 0, msg: "local 0" ); |
| 1288 | } else { |
| 1289 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1290 | } |
| 1291 | |
| 1292 | // Set up destination blocks |
| 1293 | writeU8(os, byte: WASM_OPCODE_BLOCK, msg: "block $drop" ); |
| 1294 | writeU8(os, byte: WASM_TYPE_NORESULT, msg: "block type" ); |
| 1295 | writeU8(os, byte: WASM_OPCODE_BLOCK, msg: "block $wait" ); |
| 1296 | writeU8(os, byte: WASM_TYPE_NORESULT, msg: "block type" ); |
| 1297 | writeU8(os, byte: WASM_OPCODE_BLOCK, msg: "block $init" ); |
| 1298 | writeU8(os, byte: WASM_TYPE_NORESULT, msg: "block type" ); |
| 1299 | |
| 1300 | // Atomically check whether we win the race. |
| 1301 | writeGetFlagAddress(); |
| 1302 | writeI32Const(os, number: 0, msg: "expected flag value" ); |
| 1303 | writeI32Const(os, number: 1, msg: "new flag value" ); |
| 1304 | writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix" ); |
| 1305 | writeUleb128(os, number: WASM_OPCODE_I32_RMW_CMPXCHG, msg: "i32.atomic.rmw.cmpxchg" ); |
| 1306 | writeMemArg(os, alignment: 2, offset: 0); |
| 1307 | |
| 1308 | // Based on the value, decide what to do next. |
| 1309 | writeU8(os, byte: WASM_OPCODE_BR_TABLE, msg: "br_table" ); |
| 1310 | writeUleb128(os, number: 2, msg: "label vector length" ); |
| 1311 | writeUleb128(os, number: 0, msg: "label $init" ); |
| 1312 | writeUleb128(os, number: 1, msg: "label $wait" ); |
| 1313 | writeUleb128(os, number: 2, msg: "default label $drop" ); |
| 1314 | |
| 1315 | // Initialize passive data segments |
| 1316 | writeU8(os, byte: WASM_OPCODE_END, msg: "end $init" ); |
| 1317 | } else { |
| 1318 | writeUleb128(os, number: 0, msg: "num local decls" ); |
| 1319 | } |
| 1320 | |
| 1321 | for (const OutputSegment *s : segments) { |
| 1322 | if (needsPassiveInitialization(segment: s)) { |
| 1323 | // For passive BSS segments we can simple issue a memory.fill(0). |
| 1324 | // For non-BSS segments we do a memory.init. Both these |
| 1325 | // instructions take as their first argument the destination |
| 1326 | // address. |
| 1327 | writePtrConst(os, number: s->startVA, is64, msg: "destination address" ); |
| 1328 | if (ctx.isPic) { |
| 1329 | writeU8(os, byte: WASM_OPCODE_GLOBAL_GET, msg: "GLOBAL_GET" ); |
| 1330 | writeUleb128(os, number: ctx.sym.memoryBase->getGlobalIndex(), |
| 1331 | msg: "__memory_base" ); |
| 1332 | writeU8(os, byte: is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD, |
| 1333 | msg: "i32.add" ); |
| 1334 | } |
| 1335 | |
| 1336 | // When we initialize the TLS segment we also set the `__tls_base` |
| 1337 | // global. This allows the runtime to use this static copy of the |
| 1338 | // TLS data for the first/main thread. |
| 1339 | if (ctx.arg.sharedMemory && s->isTLS()) { |
| 1340 | if (ctx.isPic) { |
| 1341 | // Cache the result of the addionion in local 0 |
| 1342 | writeU8(os, byte: WASM_OPCODE_LOCAL_TEE, msg: "local.tee" ); |
| 1343 | writeUleb128(os, number: 1, msg: "local 1" ); |
| 1344 | } else { |
| 1345 | writePtrConst(os, number: s->startVA, is64, msg: "destination address" ); |
| 1346 | } |
| 1347 | writeU8(os, byte: WASM_OPCODE_GLOBAL_SET, msg: "GLOBAL_SET" ); |
| 1348 | writeUleb128(os, number: ctx.sym.tlsBase->getGlobalIndex(), msg: "__tls_base" ); |
| 1349 | if (ctx.isPic) { |
| 1350 | writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.tee" ); |
| 1351 | writeUleb128(os, number: 1, msg: "local 1" ); |
| 1352 | } |
| 1353 | } |
| 1354 | |
| 1355 | if (s->isBss) { |
| 1356 | writeI32Const(os, number: 0, msg: "fill value" ); |
| 1357 | writePtrConst(os, number: s->size, is64, msg: "memory region size" ); |
| 1358 | writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix" ); |
| 1359 | writeUleb128(os, number: WASM_OPCODE_MEMORY_FILL, msg: "memory.fill" ); |
| 1360 | writeU8(os, byte: 0, msg: "memory index immediate" ); |
| 1361 | } else { |
| 1362 | writeI32Const(os, number: 0, msg: "source segment offset" ); |
| 1363 | writeI32Const(os, number: s->size, msg: "memory region size" ); |
| 1364 | writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix" ); |
| 1365 | writeUleb128(os, number: WASM_OPCODE_MEMORY_INIT, msg: "memory.init" ); |
| 1366 | writeUleb128(os, number: s->index, msg: "segment index immediate" ); |
| 1367 | writeU8(os, byte: 0, msg: "memory index immediate" ); |
| 1368 | } |
| 1369 | } |
| 1370 | } |
| 1371 | |
| 1372 | if (ctx.arg.sharedMemory) { |
| 1373 | // Set flag to 2 to mark end of initialization |
| 1374 | writeGetFlagAddress(); |
| 1375 | writeI32Const(os, number: 2, msg: "flag value" ); |
| 1376 | writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix" ); |
| 1377 | writeUleb128(os, number: WASM_OPCODE_I32_ATOMIC_STORE, msg: "i32.atomic.store" ); |
| 1378 | writeMemArg(os, alignment: 2, offset: 0); |
| 1379 | |
| 1380 | // Notify any waiters that memory initialization is complete |
| 1381 | writeGetFlagAddress(); |
| 1382 | writeI32Const(os, number: -1, msg: "number of waiters" ); |
| 1383 | writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix" ); |
| 1384 | writeUleb128(os, number: WASM_OPCODE_ATOMIC_NOTIFY, msg: "atomic.notify" ); |
| 1385 | writeMemArg(os, alignment: 2, offset: 0); |
| 1386 | writeU8(os, byte: WASM_OPCODE_DROP, msg: "drop" ); |
| 1387 | |
| 1388 | // Branch to drop the segments |
| 1389 | writeU8(os, byte: WASM_OPCODE_BR, msg: "br" ); |
| 1390 | writeUleb128(os, number: 1, msg: "label $drop" ); |
| 1391 | |
| 1392 | // Wait for the winning thread to initialize memory |
| 1393 | writeU8(os, byte: WASM_OPCODE_END, msg: "end $wait" ); |
| 1394 | writeGetFlagAddress(); |
| 1395 | writeI32Const(os, number: 1, msg: "expected flag value" ); |
| 1396 | writeI64Const(os, number: -1, msg: "timeout" ); |
| 1397 | |
| 1398 | writeU8(os, byte: WASM_OPCODE_ATOMICS_PREFIX, msg: "atomics prefix" ); |
| 1399 | writeUleb128(os, number: WASM_OPCODE_I32_ATOMIC_WAIT, msg: "i32.atomic.wait" ); |
| 1400 | writeMemArg(os, alignment: 2, offset: 0); |
| 1401 | writeU8(os, byte: WASM_OPCODE_DROP, msg: "drop" ); |
| 1402 | |
| 1403 | // Unconditionally drop passive data segments |
| 1404 | writeU8(os, byte: WASM_OPCODE_END, msg: "end $drop" ); |
| 1405 | } |
| 1406 | |
| 1407 | for (const OutputSegment *s : segments) { |
| 1408 | if (needsPassiveInitialization(segment: s) && !s->isBss) { |
| 1409 | // The TLS region should not be dropped since its is needed |
| 1410 | // during the initialization of each thread (__wasm_init_tls). |
| 1411 | if (ctx.arg.sharedMemory && s->isTLS()) |
| 1412 | continue; |
| 1413 | // data.drop instruction |
| 1414 | writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix" ); |
| 1415 | writeUleb128(os, number: WASM_OPCODE_DATA_DROP, msg: "data.drop" ); |
| 1416 | writeUleb128(os, number: s->index, msg: "segment index immediate" ); |
| 1417 | } |
| 1418 | } |
| 1419 | |
| 1420 | // End the function |
| 1421 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1422 | } |
| 1423 | |
| 1424 | createFunction(func: ctx.sym.initMemory, bodyContent); |
| 1425 | } |
| 1426 | |
| 1427 | void Writer::createStartFunction() { |
| 1428 | // If the start function exists when we have more than one function to call. |
| 1429 | if (ctx.sym.initMemory && ctx.sym.applyGlobalRelocs) { |
| 1430 | assert(ctx.sym.startFunction); |
| 1431 | std::string bodyContent; |
| 1432 | { |
| 1433 | raw_string_ostream os(bodyContent); |
| 1434 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1435 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1436 | writeUleb128(os, number: ctx.sym.applyGlobalRelocs->getFunctionIndex(), |
| 1437 | msg: "function index" ); |
| 1438 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1439 | writeUleb128(os, number: ctx.sym.initMemory->getFunctionIndex(), |
| 1440 | msg: "function index" ); |
| 1441 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1442 | } |
| 1443 | createFunction(func: ctx.sym.startFunction, bodyContent); |
| 1444 | } else if (ctx.sym.initMemory) { |
| 1445 | ctx.sym.startFunction = ctx.sym.initMemory; |
| 1446 | } else if (ctx.sym.applyGlobalRelocs) { |
| 1447 | ctx.sym.startFunction = ctx.sym.applyGlobalRelocs; |
| 1448 | } |
| 1449 | } |
| 1450 | |
| 1451 | // For -shared (PIC) output, we create create a synthetic function which will |
| 1452 | // apply any relocations to the data segments on startup. This function is |
| 1453 | // called `__wasm_apply_data_relocs` and is expected to be called before |
| 1454 | // any user code (i.e. before `__wasm_call_ctors`). |
| 1455 | void Writer::createApplyDataRelocationsFunction() { |
| 1456 | LLVM_DEBUG(dbgs() << "createApplyDataRelocationsFunction\n" ); |
| 1457 | // First write the body's contents to a string. |
| 1458 | std::string bodyContent; |
| 1459 | { |
| 1460 | raw_string_ostream os(bodyContent); |
| 1461 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1462 | bool generated = false; |
| 1463 | for (const OutputSegment *seg : segments) |
| 1464 | if (!ctx.arg.sharedMemory || !seg->isTLS()) |
| 1465 | for (const InputChunk *inSeg : seg->inputSegments) |
| 1466 | generated |= inSeg->generateRelocationCode(os); |
| 1467 | |
| 1468 | if (!generated) { |
| 1469 | LLVM_DEBUG(dbgs() << "skipping empty __wasm_apply_data_relocs\n" ); |
| 1470 | return; |
| 1471 | } |
| 1472 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1473 | } |
| 1474 | |
| 1475 | // __wasm_apply_data_relocs |
| 1476 | // Function that applies relocations to data segment post-instantiation. |
| 1477 | static WasmSignature nullSignature = {{}, {}}; |
| 1478 | auto def = symtab->addSyntheticFunction( |
| 1479 | name: "__wasm_apply_data_relocs" , |
| 1480 | flags: WASM_SYMBOL_VISIBILITY_DEFAULT | WASM_SYMBOL_EXPORTED, |
| 1481 | function: make<SyntheticFunction>(args&: nullSignature, args: "__wasm_apply_data_relocs" )); |
| 1482 | def->markLive(); |
| 1483 | |
| 1484 | createFunction(func: def, bodyContent); |
| 1485 | } |
| 1486 | |
| 1487 | void Writer::createApplyTLSRelocationsFunction() { |
| 1488 | LLVM_DEBUG(dbgs() << "createApplyTLSRelocationsFunction\n" ); |
| 1489 | std::string bodyContent; |
| 1490 | { |
| 1491 | raw_string_ostream os(bodyContent); |
| 1492 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1493 | for (const OutputSegment *seg : segments) |
| 1494 | if (seg->isTLS()) |
| 1495 | for (const InputChunk *inSeg : seg->inputSegments) |
| 1496 | inSeg->generateRelocationCode(os); |
| 1497 | |
| 1498 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1499 | } |
| 1500 | |
| 1501 | createFunction(func: ctx.sym.applyTLSRelocs, bodyContent); |
| 1502 | } |
| 1503 | |
| 1504 | // Similar to createApplyDataRelocationsFunction but generates relocation code |
| 1505 | // for WebAssembly globals. Because these globals are not shared between threads |
| 1506 | // these relocation need to run on every thread. |
| 1507 | void Writer::createApplyGlobalRelocationsFunction() { |
| 1508 | // First write the body's contents to a string. |
| 1509 | std::string bodyContent; |
| 1510 | { |
| 1511 | raw_string_ostream os(bodyContent); |
| 1512 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1513 | out.globalSec->generateRelocationCode(os, TLS: false); |
| 1514 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1515 | } |
| 1516 | |
| 1517 | createFunction(func: ctx.sym.applyGlobalRelocs, bodyContent); |
| 1518 | } |
| 1519 | |
| 1520 | // Similar to createApplyGlobalRelocationsFunction but for |
| 1521 | // TLS symbols. This cannot be run during the start function |
| 1522 | // but must be delayed until __wasm_init_tls is called. |
| 1523 | void Writer::createApplyGlobalTLSRelocationsFunction() { |
| 1524 | // First write the body's contents to a string. |
| 1525 | std::string bodyContent; |
| 1526 | { |
| 1527 | raw_string_ostream os(bodyContent); |
| 1528 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1529 | out.globalSec->generateRelocationCode(os, TLS: true); |
| 1530 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1531 | } |
| 1532 | |
| 1533 | createFunction(func: ctx.sym.applyGlobalTLSRelocs, bodyContent); |
| 1534 | } |
| 1535 | |
| 1536 | // Create synthetic "__wasm_call_ctors" function based on ctor functions |
| 1537 | // in input object. |
| 1538 | void Writer::createCallCtorsFunction() { |
| 1539 | // If __wasm_call_ctors isn't referenced, there aren't any ctors, don't |
| 1540 | // define the `__wasm_call_ctors` function. |
| 1541 | if (!ctx.sym.callCtors->isLive() && initFunctions.empty()) |
| 1542 | return; |
| 1543 | |
| 1544 | // First write the body's contents to a string. |
| 1545 | std::string bodyContent; |
| 1546 | { |
| 1547 | raw_string_ostream os(bodyContent); |
| 1548 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1549 | |
| 1550 | // Call constructors |
| 1551 | for (const WasmInitEntry &f : initFunctions) { |
| 1552 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1553 | writeUleb128(os, number: f.sym->getFunctionIndex(), msg: "function index" ); |
| 1554 | for (size_t i = 0; i < f.sym->signature->Returns.size(); i++) { |
| 1555 | writeU8(os, byte: WASM_OPCODE_DROP, msg: "DROP" ); |
| 1556 | } |
| 1557 | } |
| 1558 | |
| 1559 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1560 | } |
| 1561 | |
| 1562 | createFunction(func: ctx.sym.callCtors, bodyContent); |
| 1563 | } |
| 1564 | |
| 1565 | // Create a wrapper around a function export which calls the |
| 1566 | // static constructors and destructors. |
| 1567 | void Writer::createCommandExportWrapper(uint32_t functionIndex, |
| 1568 | DefinedFunction *f) { |
| 1569 | // First write the body's contents to a string. |
| 1570 | std::string bodyContent; |
| 1571 | { |
| 1572 | raw_string_ostream os(bodyContent); |
| 1573 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1574 | |
| 1575 | // Call `__wasm_call_ctors` which call static constructors (and |
| 1576 | // applies any runtime relocations in Emscripten-style PIC mode) |
| 1577 | if (ctx.sym.callCtors->isLive()) { |
| 1578 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1579 | writeUleb128(os, number: ctx.sym.callCtors->getFunctionIndex(), msg: "function index" ); |
| 1580 | } |
| 1581 | |
| 1582 | // Call the user's code, leaving any return values on the operand stack. |
| 1583 | for (size_t i = 0; i < f->signature->Params.size(); ++i) { |
| 1584 | writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get" ); |
| 1585 | writeUleb128(os, number: i, msg: "local index" ); |
| 1586 | } |
| 1587 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1588 | writeUleb128(os, number: functionIndex, msg: "function index" ); |
| 1589 | |
| 1590 | // Call the function that calls the destructors. |
| 1591 | if (DefinedFunction *callDtors = ctx.sym.callDtors) { |
| 1592 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1593 | writeUleb128(os, number: callDtors->getFunctionIndex(), msg: "function index" ); |
| 1594 | } |
| 1595 | |
| 1596 | // End the function, returning the return values from the user's code. |
| 1597 | writeU8(os, byte: WASM_OPCODE_END, msg: "END" ); |
| 1598 | } |
| 1599 | |
| 1600 | createFunction(func: f, bodyContent); |
| 1601 | } |
| 1602 | |
| 1603 | void Writer::createInitTLSFunction() { |
| 1604 | std::string bodyContent; |
| 1605 | { |
| 1606 | raw_string_ostream os(bodyContent); |
| 1607 | |
| 1608 | OutputSegment *tlsSeg = nullptr; |
| 1609 | for (auto *seg : segments) { |
| 1610 | if (seg->name == ".tdata" ) { |
| 1611 | tlsSeg = seg; |
| 1612 | break; |
| 1613 | } |
| 1614 | } |
| 1615 | |
| 1616 | writeUleb128(os, number: 0, msg: "num locals" ); |
| 1617 | if (tlsSeg) { |
| 1618 | writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get" ); |
| 1619 | writeUleb128(os, number: 0, msg: "local index" ); |
| 1620 | |
| 1621 | writeU8(os, byte: WASM_OPCODE_GLOBAL_SET, msg: "global.set" ); |
| 1622 | writeUleb128(os, number: ctx.sym.tlsBase->getGlobalIndex(), msg: "global index" ); |
| 1623 | |
| 1624 | // FIXME(wvo): this local needs to be I64 in wasm64, or we need an extend op. |
| 1625 | writeU8(os, byte: WASM_OPCODE_LOCAL_GET, msg: "local.get" ); |
| 1626 | writeUleb128(os, number: 0, msg: "local index" ); |
| 1627 | |
| 1628 | writeI32Const(os, number: 0, msg: "segment offset" ); |
| 1629 | |
| 1630 | writeI32Const(os, number: tlsSeg->size, msg: "memory region size" ); |
| 1631 | |
| 1632 | writeU8(os, byte: WASM_OPCODE_MISC_PREFIX, msg: "bulk-memory prefix" ); |
| 1633 | writeUleb128(os, number: WASM_OPCODE_MEMORY_INIT, msg: "MEMORY.INIT" ); |
| 1634 | writeUleb128(os, number: tlsSeg->index, msg: "segment index immediate" ); |
| 1635 | writeU8(os, byte: 0, msg: "memory index immediate" ); |
| 1636 | } |
| 1637 | |
| 1638 | if (ctx.sym.applyTLSRelocs) { |
| 1639 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1640 | writeUleb128(os, number: ctx.sym.applyTLSRelocs->getFunctionIndex(), |
| 1641 | msg: "function index" ); |
| 1642 | } |
| 1643 | |
| 1644 | if (ctx.sym.applyGlobalTLSRelocs) { |
| 1645 | writeU8(os, byte: WASM_OPCODE_CALL, msg: "CALL" ); |
| 1646 | writeUleb128(os, number: ctx.sym.applyGlobalTLSRelocs->getFunctionIndex(), |
| 1647 | msg: "function index" ); |
| 1648 | } |
| 1649 | writeU8(os, byte: WASM_OPCODE_END, msg: "end function" ); |
| 1650 | } |
| 1651 | |
| 1652 | createFunction(func: ctx.sym.initTLS, bodyContent); |
| 1653 | } |
| 1654 | |
| 1655 | // Populate InitFunctions vector with init functions from all input objects. |
| 1656 | // This is then used either when creating the output linking section or to |
| 1657 | // synthesize the "__wasm_call_ctors" function. |
| 1658 | void Writer::calculateInitFunctions() { |
| 1659 | if (!ctx.arg.relocatable && !ctx.sym.callCtors->isLive()) |
| 1660 | return; |
| 1661 | |
| 1662 | for (ObjFile *file : ctx.objectFiles) { |
| 1663 | const WasmLinkingData &l = file->getWasmObj()->linkingData(); |
| 1664 | for (const WasmInitFunc &f : l.InitFunctions) { |
| 1665 | FunctionSymbol *sym = file->getFunctionSymbol(index: f.Symbol); |
| 1666 | // comdat exclusions can cause init functions be discarded. |
| 1667 | if (sym->isDiscarded() || !sym->isLive()) |
| 1668 | continue; |
| 1669 | if (sym->signature->Params.size() != 0) |
| 1670 | error(msg: "constructor functions cannot take arguments: " + toString(sym: *sym)); |
| 1671 | LLVM_DEBUG(dbgs() << "initFunctions: " << toString(*sym) << "\n" ); |
| 1672 | initFunctions.emplace_back(args: WasmInitEntry{.sym: sym, .priority: f.Priority}); |
| 1673 | } |
| 1674 | } |
| 1675 | |
| 1676 | // Sort in order of priority (lowest first) so that they are called |
| 1677 | // in the correct order. |
| 1678 | llvm::stable_sort(Range&: initFunctions, |
| 1679 | C: [](const WasmInitEntry &l, const WasmInitEntry &r) { |
| 1680 | return l.priority < r.priority; |
| 1681 | }); |
| 1682 | } |
| 1683 | |
| 1684 | void Writer::createSyntheticSections() { |
| 1685 | out.dylinkSec = make<DylinkSection>(); |
| 1686 | out.typeSec = make<TypeSection>(); |
| 1687 | out.importSec = make<ImportSection>(); |
| 1688 | out.functionSec = make<FunctionSection>(); |
| 1689 | out.tableSec = make<TableSection>(); |
| 1690 | out.memorySec = make<MemorySection>(); |
| 1691 | out.tagSec = make<TagSection>(); |
| 1692 | out.globalSec = make<GlobalSection>(); |
| 1693 | out.exportSec = make<ExportSection>(); |
| 1694 | out.startSec = make<StartSection>(); |
| 1695 | out.elemSec = make<ElemSection>(); |
| 1696 | out.producersSec = make<ProducersSection>(); |
| 1697 | out.targetFeaturesSec = make<TargetFeaturesSection>(); |
| 1698 | out.buildIdSec = make<BuildIdSection>(); |
| 1699 | } |
| 1700 | |
| 1701 | void Writer::createSyntheticSectionsPostLayout() { |
| 1702 | out.dataCountSec = make<DataCountSection>(args&: segments); |
| 1703 | out.linkingSec = make<LinkingSection>(args&: initFunctions, args&: segments); |
| 1704 | out.nameSec = make<NameSection>(args&: segments); |
| 1705 | } |
| 1706 | |
| 1707 | void Writer::run() { |
| 1708 | // For PIC code the table base is assigned dynamically by the loader. |
| 1709 | // For non-PIC, we start at 1 so that accessing table index 0 always traps. |
| 1710 | if (!ctx.isPic && ctx.sym.definedTableBase) |
| 1711 | ctx.sym.definedTableBase->setVA(ctx.arg.tableBase); |
| 1712 | |
| 1713 | log(msg: "-- createOutputSegments" ); |
| 1714 | createOutputSegments(); |
| 1715 | log(msg: "-- createSyntheticSections" ); |
| 1716 | createSyntheticSections(); |
| 1717 | log(msg: "-- layoutMemory" ); |
| 1718 | layoutMemory(); |
| 1719 | |
| 1720 | if (!ctx.arg.relocatable) { |
| 1721 | // Create linker synthesized __start_SECNAME/__stop_SECNAME symbols |
| 1722 | // This has to be done after memory layout is performed. |
| 1723 | for (const OutputSegment *seg : segments) { |
| 1724 | addStartStopSymbols(seg); |
| 1725 | } |
| 1726 | } |
| 1727 | |
| 1728 | for (auto &pair : ctx.arg.exportedSymbols) { |
| 1729 | Symbol *sym = symtab->find(name: pair.first()); |
| 1730 | if (sym && sym->isDefined()) |
| 1731 | sym->forceExport = true; |
| 1732 | } |
| 1733 | |
| 1734 | // Delay reporting errors about explicit exports until after |
| 1735 | // addStartStopSymbols which can create optional symbols. |
| 1736 | for (auto &name : ctx.arg.requiredExports) { |
| 1737 | Symbol *sym = symtab->find(name); |
| 1738 | if (!sym || !sym->isDefined()) { |
| 1739 | if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::ReportError) |
| 1740 | error(msg: Twine("symbol exported via --export not found: " ) + name); |
| 1741 | if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Warn) |
| 1742 | warn(msg: Twine("symbol exported via --export not found: " ) + name); |
| 1743 | } |
| 1744 | } |
| 1745 | |
| 1746 | log(msg: "-- populateTargetFeatures" ); |
| 1747 | populateTargetFeatures(); |
| 1748 | |
| 1749 | // When outputting PIC code each segment lives at at fixes offset from the |
| 1750 | // `__memory_base` import. Unless we support the extended const expression we |
| 1751 | // can't do addition inside the constant expression, so we much combine the |
| 1752 | // segments into a single one that can live at `__memory_base`. |
| 1753 | if (ctx.isPic && !ctx.arg.extendedConst && !ctx.arg.sharedMemory) { |
| 1754 | // In shared memory mode all data segments are passive and initialized |
| 1755 | // via __wasm_init_memory. |
| 1756 | log(msg: "-- combineOutputSegments" ); |
| 1757 | combineOutputSegments(); |
| 1758 | } |
| 1759 | |
| 1760 | log(msg: "-- createSyntheticSectionsPostLayout" ); |
| 1761 | createSyntheticSectionsPostLayout(); |
| 1762 | log(msg: "-- populateProducers" ); |
| 1763 | populateProducers(); |
| 1764 | log(msg: "-- calculateImports" ); |
| 1765 | calculateImports(); |
| 1766 | log(msg: "-- scanRelocations" ); |
| 1767 | scanRelocations(); |
| 1768 | log(msg: "-- finalizeIndirectFunctionTable" ); |
| 1769 | finalizeIndirectFunctionTable(); |
| 1770 | log(msg: "-- createSyntheticInitFunctions" ); |
| 1771 | createSyntheticInitFunctions(); |
| 1772 | log(msg: "-- assignIndexes" ); |
| 1773 | assignIndexes(); |
| 1774 | log(msg: "-- calculateInitFunctions" ); |
| 1775 | calculateInitFunctions(); |
| 1776 | |
| 1777 | if (!ctx.arg.relocatable) { |
| 1778 | // Create linker synthesized functions |
| 1779 | if (ctx.sym.applyGlobalRelocs) { |
| 1780 | createApplyGlobalRelocationsFunction(); |
| 1781 | } |
| 1782 | if (ctx.sym.applyTLSRelocs) { |
| 1783 | createApplyTLSRelocationsFunction(); |
| 1784 | } |
| 1785 | if (ctx.sym.applyGlobalTLSRelocs) { |
| 1786 | createApplyGlobalTLSRelocationsFunction(); |
| 1787 | } |
| 1788 | if (ctx.sym.initMemory) { |
| 1789 | createInitMemoryFunction(); |
| 1790 | } |
| 1791 | createStartFunction(); |
| 1792 | |
| 1793 | createCallCtorsFunction(); |
| 1794 | |
| 1795 | // Create export wrappers for commands if needed. |
| 1796 | // |
| 1797 | // If the input contains a call to `__wasm_call_ctors`, either in one of |
| 1798 | // the input objects or an explicit export from the command-line, we |
| 1799 | // assume ctors and dtors are taken care of already. |
| 1800 | if (!ctx.arg.relocatable && !ctx.isPic && |
| 1801 | !ctx.sym.callCtors->isUsedInRegularObj && |
| 1802 | !ctx.sym.callCtors->isExported()) { |
| 1803 | log(msg: "-- createCommandExportWrappers" ); |
| 1804 | createCommandExportWrappers(); |
| 1805 | } |
| 1806 | } |
| 1807 | |
| 1808 | if (ctx.sym.initTLS && ctx.sym.initTLS->isLive()) { |
| 1809 | log(msg: "-- createInitTLSFunction" ); |
| 1810 | createInitTLSFunction(); |
| 1811 | } |
| 1812 | |
| 1813 | if (errorCount()) |
| 1814 | return; |
| 1815 | |
| 1816 | log(msg: "-- calculateTypes" ); |
| 1817 | calculateTypes(); |
| 1818 | log(msg: "-- calculateExports" ); |
| 1819 | calculateExports(); |
| 1820 | log(msg: "-- calculateCustomSections" ); |
| 1821 | calculateCustomSections(); |
| 1822 | log(msg: "-- populateSymtab" ); |
| 1823 | populateSymtab(); |
| 1824 | log(msg: "-- checkImportExportTargetFeatures" ); |
| 1825 | checkImportExportTargetFeatures(); |
| 1826 | log(msg: "-- addSections" ); |
| 1827 | addSections(); |
| 1828 | |
| 1829 | if (errorHandler().verbose) { |
| 1830 | log(msg: "Defined Functions: " + Twine(out.functionSec->inputFunctions.size())); |
| 1831 | log(msg: "Defined Globals : " + Twine(out.globalSec->numGlobals())); |
| 1832 | log(msg: "Defined Tags : " + Twine(out.tagSec->inputTags.size())); |
| 1833 | log(msg: "Defined Tables : " + Twine(out.tableSec->inputTables.size())); |
| 1834 | log(msg: "Function Imports : " + |
| 1835 | Twine(out.importSec->getNumImportedFunctions())); |
| 1836 | log(msg: "Global Imports : " + Twine(out.importSec->getNumImportedGlobals())); |
| 1837 | log(msg: "Tag Imports : " + Twine(out.importSec->getNumImportedTags())); |
| 1838 | log(msg: "Table Imports : " + Twine(out.importSec->getNumImportedTables())); |
| 1839 | } |
| 1840 | |
| 1841 | createHeader(); |
| 1842 | log(msg: "-- finalizeSections" ); |
| 1843 | finalizeSections(); |
| 1844 | |
| 1845 | log(msg: "-- writeMapFile" ); |
| 1846 | writeMapFile(outputSections); |
| 1847 | |
| 1848 | log(msg: "-- openFile" ); |
| 1849 | openFile(); |
| 1850 | if (errorCount()) |
| 1851 | return; |
| 1852 | |
| 1853 | writeHeader(); |
| 1854 | |
| 1855 | log(msg: "-- writeSections" ); |
| 1856 | writeSections(); |
| 1857 | writeBuildId(); |
| 1858 | if (errorCount()) |
| 1859 | return; |
| 1860 | |
| 1861 | if (Error e = buffer->commit()) |
| 1862 | fatal(msg: "failed to write output '" + buffer->getPath() + |
| 1863 | "': " + toString(E: std::move(e))); |
| 1864 | } |
| 1865 | |
| 1866 | // Open a result file. |
| 1867 | void Writer::openFile() { |
| 1868 | log(msg: "writing: " + ctx.arg.outputFile); |
| 1869 | |
| 1870 | Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr = |
| 1871 | FileOutputBuffer::create(FilePath: ctx.arg.outputFile, Size: fileSize, |
| 1872 | Flags: FileOutputBuffer::F_executable); |
| 1873 | |
| 1874 | if (!bufferOrErr) |
| 1875 | error(msg: "failed to open " + ctx.arg.outputFile + ": " + |
| 1876 | toString(E: bufferOrErr.takeError())); |
| 1877 | else |
| 1878 | buffer = std::move(*bufferOrErr); |
| 1879 | } |
| 1880 | |
| 1881 | void Writer::() { |
| 1882 | raw_string_ostream os(header); |
| 1883 | writeBytes(os, bytes: WasmMagic, count: sizeof(WasmMagic), msg: "wasm magic" ); |
| 1884 | writeU32(os, number: WasmVersion, msg: "wasm version" ); |
| 1885 | fileSize += header.size(); |
| 1886 | } |
| 1887 | |
| 1888 | void writeResult() { Writer().run(); } |
| 1889 | |
| 1890 | } // namespace wasm::lld |
| 1891 | |