1//===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/MC/MCAssembler.h"
10#include "llvm/ADT/ArrayRef.h"
11#include "llvm/ADT/SmallString.h"
12#include "llvm/ADT/SmallVector.h"
13#include "llvm/ADT/Statistic.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/ADT/Twine.h"
16#include "llvm/MC/MCAsmBackend.h"
17#include "llvm/MC/MCAsmInfo.h"
18#include "llvm/MC/MCCodeEmitter.h"
19#include "llvm/MC/MCCodeView.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCDwarf.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCFixup.h"
24#include "llvm/MC/MCFixupKindInfo.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCObjectWriter.h"
27#include "llvm/MC/MCSection.h"
28#include "llvm/MC/MCSymbol.h"
29#include "llvm/MC/MCValue.h"
30#include "llvm/Support/Alignment.h"
31#include "llvm/Support/Casting.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/EndianStream.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/LEB128.h"
36#include "llvm/Support/raw_ostream.h"
37#include <cassert>
38#include <cstdint>
39#include <tuple>
40#include <utility>
41
42using namespace llvm;
43
44namespace llvm {
45class MCSubtargetInfo;
46}
47
48#define DEBUG_TYPE "assembler"
49
50namespace {
51namespace stats {
52
53STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total");
54STATISTIC(EmittedRelaxableFragments,
55 "Number of emitted assembler fragments - relaxable");
56STATISTIC(EmittedDataFragments,
57 "Number of emitted assembler fragments - data");
58STATISTIC(EmittedAlignFragments,
59 "Number of emitted assembler fragments - align");
60STATISTIC(EmittedFillFragments,
61 "Number of emitted assembler fragments - fill");
62STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops");
63STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org");
64STATISTIC(evaluateFixup, "Number of evaluated fixups");
65STATISTIC(ObjectBytes, "Number of emitted object file bytes");
66STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
67STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
68
69} // end namespace stats
70} // end anonymous namespace
71
72// FIXME FIXME FIXME: There are number of places in this file where we convert
73// what is a 64-bit assembler value used for computation into a value in the
74// object file, which may truncate it. We should detect that truncation where
75// invalid and report errors back.
76
77/* *** */
78
79MCAssembler::MCAssembler(MCContext &Context,
80 std::unique_ptr<MCAsmBackend> Backend,
81 std::unique_ptr<MCCodeEmitter> Emitter,
82 std::unique_ptr<MCObjectWriter> Writer)
83 : Context(Context), Backend(std::move(Backend)),
84 Emitter(std::move(Emitter)), Writer(std::move(Writer)) {
85 if (this->Backend)
86 this->Backend->setAssembler(this);
87 if (this->Writer)
88 this->Writer->setAssembler(this);
89}
90
91void MCAssembler::reset() {
92 HasLayout = false;
93 HasFinalLayout = false;
94 RelaxAll = false;
95 Sections.clear();
96 Symbols.clear();
97 ThumbFuncs.clear();
98 BundleAlignSize = 0;
99
100 // reset objects owned by us
101 if (getBackendPtr())
102 getBackendPtr()->reset();
103 if (getEmitterPtr())
104 getEmitterPtr()->reset();
105 if (Writer)
106 Writer->reset();
107}
108
109bool MCAssembler::registerSection(MCSection &Section) {
110 if (Section.isRegistered())
111 return false;
112 assert(Section.curFragList()->Head && "allocInitialFragment not called");
113 Sections.push_back(Elt: &Section);
114 Section.setIsRegistered(true);
115 return true;
116}
117
118bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
119 if (ThumbFuncs.count(Ptr: Symbol))
120 return true;
121
122 if (!Symbol->isVariable())
123 return false;
124
125 const MCExpr *Expr = Symbol->getVariableValue();
126
127 MCValue V;
128 if (!Expr->evaluateAsRelocatable(Res&: V, Asm: nullptr))
129 return false;
130
131 if (V.getSubSym() || V.getSpecifier())
132 return false;
133
134 auto *Sym = V.getAddSym();
135 if (!Sym || V.getSpecifier())
136 return false;
137
138 if (!isThumbFunc(Symbol: Sym))
139 return false;
140
141 ThumbFuncs.insert(Ptr: Symbol); // Cache it.
142 return true;
143}
144
145bool MCAssembler::evaluateFixup(const MCFragment &F, MCFixup &Fixup,
146 MCValue &Target, uint64_t &Value,
147 bool RecordReloc,
148 MutableArrayRef<char> Contents) const {
149 ++stats::evaluateFixup;
150
151 // FIXME: This code has some duplication with recordRelocation. We should
152 // probably merge the two into a single callback that tries to evaluate a
153 // fixup and records a relocation if one is needed.
154
155 // On error claim to have completely evaluated the fixup, to prevent any
156 // further processing from being done.
157 const MCExpr *Expr = Fixup.getValue();
158 Value = 0;
159 if (!Expr->evaluateAsRelocatable(Res&: Target, Asm: this)) {
160 reportError(L: Fixup.getLoc(), Msg: "expected relocatable expression");
161 return true;
162 }
163
164 bool IsResolved = false;
165 unsigned FixupFlags = getBackend().getFixupKindInfo(Kind: Fixup.getKind()).Flags;
166 bool IsPCRel = FixupFlags & MCFixupKindInfo::FKF_IsPCRel;
167 if (FixupFlags & MCFixupKindInfo::FKF_IsTarget) {
168 IsResolved = getBackend().evaluateTargetFixup(Fixup, Target, Value);
169 } else {
170 const MCSymbol *Add = Target.getAddSym();
171 const MCSymbol *Sub = Target.getSubSym();
172 Value = Target.getConstant();
173 if (Add && Add->isDefined())
174 Value += getSymbolOffset(S: *Add);
175 if (Sub && Sub->isDefined())
176 Value -= getSymbolOffset(S: *Sub);
177
178 bool ShouldAlignPC =
179 FixupFlags & MCFixupKindInfo::FKF_IsAlignedDownTo32Bits;
180 if (IsPCRel) {
181 uint64_t Offset = getFragmentOffset(F) + Fixup.getOffset();
182
183 // A number of ARM fixups in Thumb mode require that the effective PC
184 // address be determined as the 32-bit aligned version of the actual
185 // offset.
186 if (ShouldAlignPC)
187 Offset &= ~0x3;
188 Value -= Offset;
189
190 if (Add && !Sub && !Add->isUndefined() && !Add->isAbsolute()) {
191 IsResolved = getWriter().isSymbolRefDifferenceFullyResolvedImpl(
192 SymA: *Add, FB: F, InSet: false, IsPCRel: true);
193 }
194 } else {
195 IsResolved = Target.isAbsolute();
196 assert(!ShouldAlignPC && "FKF_IsAlignedDownTo32Bits must be PC-relative");
197 }
198 }
199
200 if (!RecordReloc)
201 return IsResolved;
202
203 if (IsResolved && mc::isRelocRelocation(FixupKind: Fixup.getKind()))
204 IsResolved = false;
205 if (IsPCRel)
206 Fixup.setPCRel();
207 getBackend().applyFixup(F, Fixup, Target, Data: Contents, Value, IsResolved);
208 return true;
209}
210
211uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
212 assert(getBackendPtr() && "Requires assembler backend");
213 switch (F.getKind()) {
214 case MCFragment::FT_Data:
215 return cast<MCDataFragment>(Val: F).getContents().size();
216 case MCFragment::FT_Relaxable:
217 return cast<MCRelaxableFragment>(Val: F).getContents().size();
218 case MCFragment::FT_Fill: {
219 auto &FF = cast<MCFillFragment>(Val: F);
220 int64_t NumValues = 0;
221 if (!FF.getNumValues().evaluateKnownAbsolute(Res&: NumValues, Asm: *this)) {
222 recordError(L: FF.getLoc(), Msg: "expected assembly-time absolute expression");
223 return 0;
224 }
225 int64_t Size = NumValues * FF.getValueSize();
226 if (Size < 0) {
227 recordError(L: FF.getLoc(), Msg: "invalid number of bytes");
228 return 0;
229 }
230 return Size;
231 }
232
233 case MCFragment::FT_Nops:
234 return cast<MCNopsFragment>(Val: F).getNumBytes();
235
236 case MCFragment::FT_LEB:
237 return cast<MCLEBFragment>(Val: F).getContents().size();
238
239 case MCFragment::FT_BoundaryAlign:
240 return cast<MCBoundaryAlignFragment>(Val: F).getSize();
241
242 case MCFragment::FT_SymbolId:
243 return 4;
244
245 case MCFragment::FT_Align: {
246 const MCAlignFragment &AF = cast<MCAlignFragment>(Val: F);
247 unsigned Offset = getFragmentOffset(F: AF);
248 unsigned Size = offsetToAlignment(Value: Offset, Alignment: AF.getAlignment());
249
250 // Insert extra Nops for code alignment if the target define
251 // shouldInsertExtraNopBytesForCodeAlign target hook.
252 if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() &&
253 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size))
254 return Size;
255
256 // If we are padding with nops, force the padding to be larger than the
257 // minimum nop size.
258 if (Size > 0 && AF.hasEmitNops()) {
259 while (Size % getBackend().getMinimumNopSize())
260 Size += AF.getAlignment().value();
261 }
262 if (Size > AF.getMaxBytesToEmit())
263 return 0;
264 return Size;
265 }
266
267 case MCFragment::FT_Org: {
268 const MCOrgFragment &OF = cast<MCOrgFragment>(Val: F);
269 MCValue Value;
270 if (!OF.getOffset().evaluateAsValue(Res&: Value, Asm: *this)) {
271 recordError(L: OF.getLoc(), Msg: "expected assembly-time absolute expression");
272 return 0;
273 }
274
275 uint64_t FragmentOffset = getFragmentOffset(F: OF);
276 int64_t TargetLocation = Value.getConstant();
277 if (const auto *SA = Value.getAddSym()) {
278 uint64_t Val;
279 if (!getSymbolOffset(S: *SA, Val)) {
280 recordError(L: OF.getLoc(), Msg: "expected absolute expression");
281 return 0;
282 }
283 TargetLocation += Val;
284 }
285 int64_t Size = TargetLocation - FragmentOffset;
286 if (Size < 0 || Size >= 0x40000000) {
287 recordError(L: OF.getLoc(), Msg: "invalid .org offset '" + Twine(TargetLocation) +
288 "' (at offset '" + Twine(FragmentOffset) +
289 "')");
290 return 0;
291 }
292 return Size;
293 }
294
295 case MCFragment::FT_Dwarf:
296 return cast<MCDwarfLineAddrFragment>(Val: F).getContents().size();
297 case MCFragment::FT_DwarfFrame:
298 return cast<MCDwarfCallFrameFragment>(Val: F).getContents().size();
299 case MCFragment::FT_CVInlineLines:
300 return cast<MCCVInlineLineTableFragment>(Val: F).getContents().size();
301 case MCFragment::FT_CVDefRange:
302 return cast<MCCVDefRangeFragment>(Val: F).getContents().size();
303 case MCFragment::FT_PseudoProbe:
304 return cast<MCPseudoProbeAddrFragment>(Val: F).getContents().size();
305 }
306
307 llvm_unreachable("invalid fragment kind");
308}
309
310// Compute the amount of padding required before the fragment \p F to
311// obey bundling restrictions, where \p FOffset is the fragment's offset in
312// its section and \p FSize is the fragment's size.
313static uint64_t computeBundlePadding(unsigned BundleSize,
314 const MCEncodedFragment *F,
315 uint64_t FOffset, uint64_t FSize) {
316 uint64_t OffsetInBundle = FOffset & (BundleSize - 1);
317 uint64_t EndOfFragment = OffsetInBundle + FSize;
318
319 // There are two kinds of bundling restrictions:
320 //
321 // 1) For alignToBundleEnd(), add padding to ensure that the fragment will
322 // *end* on a bundle boundary.
323 // 2) Otherwise, check if the fragment would cross a bundle boundary. If it
324 // would, add padding until the end of the bundle so that the fragment
325 // will start in a new one.
326 if (F->alignToBundleEnd()) {
327 // Three possibilities here:
328 //
329 // A) The fragment just happens to end at a bundle boundary, so we're good.
330 // B) The fragment ends before the current bundle boundary: pad it just
331 // enough to reach the boundary.
332 // C) The fragment ends after the current bundle boundary: pad it until it
333 // reaches the end of the next bundle boundary.
334 //
335 // Note: this code could be made shorter with some modulo trickery, but it's
336 // intentionally kept in its more explicit form for simplicity.
337 if (EndOfFragment == BundleSize)
338 return 0;
339 else if (EndOfFragment < BundleSize)
340 return BundleSize - EndOfFragment;
341 else { // EndOfFragment > BundleSize
342 return 2 * BundleSize - EndOfFragment;
343 }
344 } else if (OffsetInBundle > 0 && EndOfFragment > BundleSize)
345 return BundleSize - OffsetInBundle;
346 else
347 return 0;
348}
349
350void MCAssembler::layoutBundle(MCFragment *Prev, MCFragment *F) const {
351 // If bundling is enabled and this fragment has instructions in it, it has to
352 // obey the bundling restrictions. With padding, we'll have:
353 //
354 //
355 // BundlePadding
356 // |||
357 // -------------------------------------
358 // Prev |##########| F |
359 // -------------------------------------
360 // ^
361 // |
362 // F->Offset
363 //
364 // The fragment's offset will point to after the padding, and its computed
365 // size won't include the padding.
366 //
367 // ".align N" is an example of a directive that introduces multiple
368 // fragments. We could add a special case to handle ".align N" by emitting
369 // within-fragment padding (which would produce less padding when N is less
370 // than the bundle size), but for now we don't.
371 //
372 assert(isa<MCEncodedFragment>(F) &&
373 "Only MCEncodedFragment implementations have instructions");
374 MCEncodedFragment *EF = cast<MCEncodedFragment>(Val: F);
375 uint64_t FSize = computeFragmentSize(F: *EF);
376
377 if (FSize > getBundleAlignSize())
378 report_fatal_error(reason: "Fragment can't be larger than a bundle size");
379
380 uint64_t RequiredBundlePadding =
381 computeBundlePadding(BundleSize: getBundleAlignSize(), F: EF, FOffset: EF->Offset, FSize);
382 if (RequiredBundlePadding > UINT8_MAX)
383 report_fatal_error(reason: "Padding cannot exceed 255 bytes");
384 EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding));
385 EF->Offset += RequiredBundlePadding;
386 if (auto *DF = dyn_cast_or_null<MCDataFragment>(Val: Prev))
387 if (DF->getContents().empty())
388 DF->Offset = EF->Offset;
389}
390
391// Simple getSymbolOffset helper for the non-variable case.
392static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S,
393 bool ReportError, uint64_t &Val) {
394 if (!S.getFragment()) {
395 if (ReportError)
396 reportFatalUsageError(reason: "cannot evaluate undefined symbol '" + S.getName() +
397 "'");
398 return false;
399 }
400 Val = Asm.getFragmentOffset(F: *S.getFragment()) + S.getOffset();
401 return true;
402}
403
404static bool getSymbolOffsetImpl(const MCAssembler &Asm, const MCSymbol &S,
405 bool ReportError, uint64_t &Val) {
406 if (!S.isVariable())
407 return getLabelOffset(Asm, S, ReportError, Val);
408
409 // If SD is a variable, evaluate it.
410 MCValue Target;
411 if (!S.getVariableValue()->evaluateAsValue(Res&: Target, Asm))
412 reportFatalUsageError(reason: "cannot evaluate equated symbol '" + S.getName() +
413 "'");
414
415 uint64_t Offset = Target.getConstant();
416
417 const MCSymbol *A = Target.getAddSym();
418 if (A) {
419 uint64_t ValA;
420 // FIXME: On most platforms, `Target`'s component symbols are labels from
421 // having been simplified during evaluation, but on Mach-O they can be
422 // variables due to PR19203. This, and the line below for `B` can be
423 // restored to call `getLabelOffset` when PR19203 is fixed.
424 if (!getSymbolOffsetImpl(Asm, S: *A, ReportError, Val&: ValA))
425 return false;
426 Offset += ValA;
427 }
428
429 const MCSymbol *B = Target.getSubSym();
430 if (B) {
431 uint64_t ValB;
432 if (!getSymbolOffsetImpl(Asm, S: *B, ReportError, Val&: ValB))
433 return false;
434 Offset -= ValB;
435 }
436
437 Val = Offset;
438 return true;
439}
440
441bool MCAssembler::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const {
442 return getSymbolOffsetImpl(Asm: *this, S, ReportError: false, Val);
443}
444
445uint64_t MCAssembler::getSymbolOffset(const MCSymbol &S) const {
446 uint64_t Val;
447 getSymbolOffsetImpl(Asm: *this, S, ReportError: true, Val);
448 return Val;
449}
450
451const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const {
452 assert(HasLayout);
453 if (!Symbol.isVariable())
454 return &Symbol;
455
456 const MCExpr *Expr = Symbol.getVariableValue();
457 MCValue Value;
458 if (!Expr->evaluateAsValue(Res&: Value, Asm: *this)) {
459 reportError(L: Expr->getLoc(), Msg: "expression could not be evaluated");
460 return nullptr;
461 }
462
463 const MCSymbol *SymB = Value.getSubSym();
464 if (SymB) {
465 reportError(L: Expr->getLoc(),
466 Msg: Twine("symbol '") + SymB->getName() +
467 "' could not be evaluated in a subtraction expression");
468 return nullptr;
469 }
470
471 const MCSymbol *A = Value.getAddSym();
472 if (!A)
473 return nullptr;
474
475 const MCSymbol &ASym = *A;
476 if (ASym.isCommon()) {
477 reportError(L: Expr->getLoc(), Msg: "Common symbol '" + ASym.getName() +
478 "' cannot be used in assignment expr");
479 return nullptr;
480 }
481
482 return &ASym;
483}
484
485uint64_t MCAssembler::getSectionAddressSize(const MCSection &Sec) const {
486 assert(HasLayout);
487 // The size is the last fragment's end offset.
488 const MCFragment &F = *Sec.curFragList()->Tail;
489 return getFragmentOffset(F) + computeFragmentSize(F);
490}
491
492uint64_t MCAssembler::getSectionFileSize(const MCSection &Sec) const {
493 // Virtual sections have no file size.
494 if (Sec.isVirtualSection())
495 return 0;
496 return getSectionAddressSize(Sec);
497}
498
499bool MCAssembler::registerSymbol(const MCSymbol &Symbol) {
500 bool Changed = !Symbol.isRegistered();
501 if (Changed) {
502 Symbol.setIsRegistered(true);
503 Symbols.push_back(Elt: &Symbol);
504 }
505 return Changed;
506}
507
508void MCAssembler::writeFragmentPadding(raw_ostream &OS,
509 const MCEncodedFragment &EF,
510 uint64_t FSize) const {
511 assert(getBackendPtr() && "Expected assembler backend");
512 // Should NOP padding be written out before this fragment?
513 unsigned BundlePadding = EF.getBundlePadding();
514 if (BundlePadding > 0) {
515 assert(isBundlingEnabled() &&
516 "Writing bundle padding with disabled bundling");
517 assert(EF.hasInstructions() &&
518 "Writing bundle padding for a fragment without instructions");
519
520 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize);
521 const MCSubtargetInfo *STI = EF.getSubtargetInfo();
522 if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) {
523 // If the padding itself crosses a bundle boundary, it must be emitted
524 // in 2 pieces, since even nop instructions must not cross boundaries.
525 // v--------------v <- BundleAlignSize
526 // v---------v <- BundlePadding
527 // ----------------------------
528 // | Prev |####|####| F |
529 // ----------------------------
530 // ^-------------------^ <- TotalLength
531 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize();
532 if (!getBackend().writeNopData(OS, Count: DistanceToBoundary, STI))
533 report_fatal_error(reason: "unable to write NOP sequence of " +
534 Twine(DistanceToBoundary) + " bytes");
535 BundlePadding -= DistanceToBoundary;
536 }
537 if (!getBackend().writeNopData(OS, Count: BundlePadding, STI))
538 report_fatal_error(reason: "unable to write NOP sequence of " +
539 Twine(BundlePadding) + " bytes");
540 }
541}
542
543/// Write the fragment \p F to the output file.
544static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
545 const MCFragment &F) {
546 // FIXME: Embed in fragments instead?
547 uint64_t FragmentSize = Asm.computeFragmentSize(F);
548
549 llvm::endianness Endian = Asm.getBackend().Endian;
550
551 if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(Val: &F))
552 Asm.writeFragmentPadding(OS, EF: *EF, FSize: FragmentSize);
553
554 // This variable (and its dummy usage) is to participate in the assert at
555 // the end of the function.
556 uint64_t Start = OS.tell();
557 (void) Start;
558
559 ++stats::EmittedFragments;
560
561 switch (F.getKind()) {
562 case MCFragment::FT_Align: {
563 ++stats::EmittedAlignFragments;
564 const MCAlignFragment &AF = cast<MCAlignFragment>(Val: F);
565 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
566
567 uint64_t Count = FragmentSize / AF.getValueSize();
568
569 // FIXME: This error shouldn't actually occur (the front end should emit
570 // multiple .align directives to enforce the semantics it wants), but is
571 // severe enough that we want to report it. How to handle this?
572 if (Count * AF.getValueSize() != FragmentSize)
573 report_fatal_error(reason: "undefined .align directive, value size '" +
574 Twine(AF.getValueSize()) +
575 "' is not a divisor of padding size '" +
576 Twine(FragmentSize) + "'");
577
578 // See if we are aligning with nops, and if so do that first to try to fill
579 // the Count bytes. Then if that did not fill any bytes or there are any
580 // bytes left to fill use the Value and ValueSize to fill the rest.
581 // If we are aligning with nops, ask that target to emit the right data.
582 if (AF.hasEmitNops()) {
583 if (!Asm.getBackend().writeNopData(OS, Count, STI: AF.getSubtargetInfo()))
584 report_fatal_error(reason: "unable to write nop sequence of " +
585 Twine(Count) + " bytes");
586 break;
587 }
588
589 // Otherwise, write out in multiples of the value size.
590 for (uint64_t i = 0; i != Count; ++i) {
591 switch (AF.getValueSize()) {
592 default: llvm_unreachable("Invalid size!");
593 case 1: OS << char(AF.getValue()); break;
594 case 2:
595 support::endian::write<uint16_t>(os&: OS, value: AF.getValue(), endian: Endian);
596 break;
597 case 4:
598 support::endian::write<uint32_t>(os&: OS, value: AF.getValue(), endian: Endian);
599 break;
600 case 8:
601 support::endian::write<uint64_t>(os&: OS, value: AF.getValue(), endian: Endian);
602 break;
603 }
604 }
605 break;
606 }
607
608 case MCFragment::FT_Data:
609 ++stats::EmittedDataFragments;
610 OS << StringRef(cast<MCDataFragment>(Val: F).getContents().data(),
611 cast<MCDataFragment>(Val: F).getContents().size());
612 break;
613
614 case MCFragment::FT_Relaxable:
615 ++stats::EmittedRelaxableFragments;
616 OS << StringRef(cast<MCRelaxableFragment>(Val: F).getContents().data(),
617 cast<MCRelaxableFragment>(Val: F).getContents().size());
618 break;
619
620 case MCFragment::FT_Fill: {
621 ++stats::EmittedFillFragments;
622 const MCFillFragment &FF = cast<MCFillFragment>(Val: F);
623 uint64_t V = FF.getValue();
624 unsigned VSize = FF.getValueSize();
625 const unsigned MaxChunkSize = 16;
626 char Data[MaxChunkSize];
627 assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size");
628 // Duplicate V into Data as byte vector to reduce number of
629 // writes done. As such, do endian conversion here.
630 for (unsigned I = 0; I != VSize; ++I) {
631 unsigned index = Endian == llvm::endianness::little ? I : (VSize - I - 1);
632 Data[I] = uint8_t(V >> (index * 8));
633 }
634 for (unsigned I = VSize; I < MaxChunkSize; ++I)
635 Data[I] = Data[I - VSize];
636
637 // Set to largest multiple of VSize in Data.
638 const unsigned NumPerChunk = MaxChunkSize / VSize;
639 // Set ChunkSize to largest multiple of VSize in Data
640 const unsigned ChunkSize = VSize * NumPerChunk;
641
642 // Do copies by chunk.
643 StringRef Ref(Data, ChunkSize);
644 for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I)
645 OS << Ref;
646
647 // do remainder if needed.
648 unsigned TrailingCount = FragmentSize % ChunkSize;
649 if (TrailingCount)
650 OS.write(Ptr: Data, Size: TrailingCount);
651 break;
652 }
653
654 case MCFragment::FT_Nops: {
655 ++stats::EmittedNopsFragments;
656 const MCNopsFragment &NF = cast<MCNopsFragment>(Val: F);
657
658 int64_t NumBytes = NF.getNumBytes();
659 int64_t ControlledNopLength = NF.getControlledNopLength();
660 int64_t MaximumNopLength =
661 Asm.getBackend().getMaximumNopSize(STI: *NF.getSubtargetInfo());
662
663 assert(NumBytes > 0 && "Expected positive NOPs fragment size");
664 assert(ControlledNopLength >= 0 && "Expected non-negative NOP size");
665
666 if (ControlledNopLength > MaximumNopLength) {
667 Asm.reportError(L: NF.getLoc(), Msg: "illegal NOP size " +
668 std::to_string(val: ControlledNopLength) +
669 ". (expected within [0, " +
670 std::to_string(val: MaximumNopLength) + "])");
671 // Clamp the NOP length as reportError does not stop the execution
672 // immediately.
673 ControlledNopLength = MaximumNopLength;
674 }
675
676 // Use maximum value if the size of each NOP is not specified
677 if (!ControlledNopLength)
678 ControlledNopLength = MaximumNopLength;
679
680 while (NumBytes) {
681 uint64_t NumBytesToEmit =
682 (uint64_t)std::min(a: NumBytes, b: ControlledNopLength);
683 assert(NumBytesToEmit && "try to emit empty NOP instruction");
684 if (!Asm.getBackend().writeNopData(OS, Count: NumBytesToEmit,
685 STI: NF.getSubtargetInfo())) {
686 report_fatal_error(reason: "unable to write nop sequence of the remaining " +
687 Twine(NumBytesToEmit) + " bytes");
688 break;
689 }
690 NumBytes -= NumBytesToEmit;
691 }
692 break;
693 }
694
695 case MCFragment::FT_LEB: {
696 const MCLEBFragment &LF = cast<MCLEBFragment>(Val: F);
697 OS << StringRef(LF.getContents().data(), LF.getContents().size());
698 break;
699 }
700
701 case MCFragment::FT_BoundaryAlign: {
702 const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(Val: F);
703 if (!Asm.getBackend().writeNopData(OS, Count: FragmentSize, STI: BF.getSubtargetInfo()))
704 report_fatal_error(reason: "unable to write nop sequence of " +
705 Twine(FragmentSize) + " bytes");
706 break;
707 }
708
709 case MCFragment::FT_SymbolId: {
710 const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(Val: F);
711 support::endian::write<uint32_t>(os&: OS, value: SF.getSymbol()->getIndex(), endian: Endian);
712 break;
713 }
714
715 case MCFragment::FT_Org: {
716 ++stats::EmittedOrgFragments;
717 const MCOrgFragment &OF = cast<MCOrgFragment>(Val: F);
718
719 for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
720 OS << char(OF.getValue());
721
722 break;
723 }
724
725 case MCFragment::FT_Dwarf: {
726 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(Val: F);
727 OS << StringRef(OF.getContents().data(), OF.getContents().size());
728 break;
729 }
730 case MCFragment::FT_DwarfFrame: {
731 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(Val: F);
732 OS << StringRef(CF.getContents().data(), CF.getContents().size());
733 break;
734 }
735 case MCFragment::FT_CVInlineLines: {
736 const auto &OF = cast<MCCVInlineLineTableFragment>(Val: F);
737 OS << StringRef(OF.getContents().data(), OF.getContents().size());
738 break;
739 }
740 case MCFragment::FT_CVDefRange: {
741 const auto &DRF = cast<MCCVDefRangeFragment>(Val: F);
742 OS << StringRef(DRF.getContents().data(), DRF.getContents().size());
743 break;
744 }
745 case MCFragment::FT_PseudoProbe: {
746 const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Val: F);
747 OS << StringRef(PF.getContents().data(), PF.getContents().size());
748 break;
749 }
750 }
751
752 assert(OS.tell() - Start == FragmentSize &&
753 "The stream should advance by fragment size");
754}
755
756void MCAssembler::writeSectionData(raw_ostream &OS,
757 const MCSection *Sec) const {
758 assert(getBackendPtr() && "Expected assembler backend");
759
760 // Ignore virtual sections.
761 if (Sec->isVirtualSection()) {
762 assert(getSectionFileSize(*Sec) == 0 && "Invalid size for section!");
763
764 // Check that contents are only things legal inside a virtual section.
765 for (const MCFragment &F : *Sec) {
766 switch (F.getKind()) {
767 default: llvm_unreachable("Invalid fragment in virtual section!");
768 case MCFragment::FT_Data: {
769 // Check that we aren't trying to write a non-zero contents (or fixups)
770 // into a virtual section. This is to support clients which use standard
771 // directives to fill the contents of virtual sections.
772 const MCDataFragment &DF = cast<MCDataFragment>(Val: F);
773 if (DF.getFixups().size())
774 reportError(L: SMLoc(), Msg: Sec->getVirtualSectionKind() + " section '" +
775 Sec->getName() + "' cannot have fixups");
776 for (char C : DF.getContents())
777 if (C) {
778 reportError(L: SMLoc(), Msg: Sec->getVirtualSectionKind() + " section '" +
779 Sec->getName() +
780 "' cannot have non-zero initializers");
781 break;
782 }
783 break;
784 }
785 case MCFragment::FT_Align:
786 // Check that we aren't trying to write a non-zero value into a virtual
787 // section.
788 assert((cast<MCAlignFragment>(F).getValueSize() == 0 ||
789 cast<MCAlignFragment>(F).getValue() == 0) &&
790 "Invalid align in virtual section!");
791 break;
792 case MCFragment::FT_Fill:
793 assert((cast<MCFillFragment>(F).getValue() == 0) &&
794 "Invalid fill in virtual section!");
795 break;
796 case MCFragment::FT_Org:
797 break;
798 }
799 }
800
801 return;
802 }
803
804 uint64_t Start = OS.tell();
805 (void)Start;
806
807 for (const MCFragment &F : *Sec)
808 writeFragment(OS, Asm: *this, F);
809
810 flushPendingErrors();
811 assert(getContext().hadError() ||
812 OS.tell() - Start == getSectionAddressSize(*Sec));
813}
814
815void MCAssembler::layout() {
816 assert(getBackendPtr() && "Expected assembler backend");
817 DEBUG_WITH_TYPE("mc-dump-pre", {
818 errs() << "assembler backend - pre-layout\n--\n";
819 dump();
820 });
821
822 // Assign section ordinals.
823 unsigned SectionIndex = 0;
824 for (MCSection &Sec : *this) {
825 Sec.setOrdinal(SectionIndex++);
826
827 // Chain together fragments from all subsections.
828 if (Sec.Subsections.size() > 1) {
829 MCDataFragment Dummy;
830 MCFragment *Tail = &Dummy;
831 for (auto &[_, List] : Sec.Subsections) {
832 assert(List.Head);
833 Tail->Next = List.Head;
834 Tail = List.Tail;
835 }
836 Sec.Subsections.clear();
837 Sec.Subsections.push_back(Elt: {0u, {.Head: Dummy.getNext(), .Tail: Tail}});
838 Sec.CurFragList = &Sec.Subsections[0].second;
839
840 unsigned FragmentIndex = 0;
841 for (MCFragment &Frag : Sec)
842 Frag.setLayoutOrder(FragmentIndex++);
843 }
844 }
845
846 // Layout until everything fits.
847 this->HasLayout = true;
848 for (MCSection &Sec : *this)
849 layoutSection(Sec);
850 while (relaxOnce())
851 if (getContext().hadError())
852 return;
853
854 // Some targets might want to adjust fragment offsets. If so, perform another
855 // layout iteration.
856 if (getBackend().finishLayout(Asm: *this))
857 for (MCSection &Sec : *this)
858 layoutSection(Sec);
859
860 flushPendingErrors();
861
862 DEBUG_WITH_TYPE("mc-dump", {
863 errs() << "assembler backend - final-layout\n--\n";
864 dump(); });
865
866 // Allow the object writer a chance to perform post-layout binding (for
867 // example, to set the index fields in the symbol data).
868 getWriter().executePostLayoutBinding();
869
870 // Fragment sizes are finalized. For RISC-V linker relaxation, this flag
871 // helps check whether a PC-relative fixup is fully resolved.
872 this->HasFinalLayout = true;
873
874 // Evaluate and apply the fixups, generating relocation entries as necessary.
875 for (MCSection &Sec : *this) {
876 for (MCFragment &Frag : Sec) {
877 // Process fragments with fixups here.
878 if (auto *F = dyn_cast<MCEncodedFragment>(Val: &Frag)) {
879 auto Contents = F->getContents();
880 for (MCFixup &Fixup : F->getFixups()) {
881 uint64_t FixedValue;
882 MCValue Target;
883 evaluateFixup(F: Frag, Fixup, Target, Value&: FixedValue,
884 /*RecordReloc=*/true, Contents);
885 }
886 } else if (auto *AF = dyn_cast<MCAlignFragment>(Val: &Frag)) {
887 // For RISC-V linker relaxation, an alignment relocation might be
888 // needed.
889 if (AF->hasEmitNops())
890 getBackend().shouldInsertFixupForCodeAlign(Asm&: *this, AF&: *AF);
891 }
892 }
893 }
894}
895
896void MCAssembler::Finish() {
897 layout();
898
899 // Write the object file.
900 stats::ObjectBytes += getWriter().writeObject();
901
902 HasLayout = false;
903 assert(PendingErrors.empty());
904}
905
906bool MCAssembler::fixupNeedsRelaxation(const MCRelaxableFragment &F,
907 const MCFixup &Fixup) const {
908 assert(getBackendPtr() && "Expected assembler backend");
909 MCValue Target;
910 uint64_t Value;
911 bool Resolved = evaluateFixup(F, Fixup&: const_cast<MCFixup &>(Fixup), Target, Value,
912 /*RecordReloc=*/false, Contents: {});
913 return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Target, Value,
914 Resolved);
915}
916
917bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment &F) const {
918 assert(getBackendPtr() && "Expected assembler backend");
919 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
920 // are intentionally pushing out inst fragments, or because we relaxed a
921 // previous instruction to one that doesn't need relaxation.
922 if (!getBackend().mayNeedRelaxation(Inst: F.getInst(), STI: *F.getSubtargetInfo()))
923 return false;
924
925 for (const MCFixup &Fixup : F.getFixups())
926 if (fixupNeedsRelaxation(F, Fixup))
927 return true;
928
929 return false;
930}
931
932bool MCAssembler::relaxInstruction(MCRelaxableFragment &F) {
933 assert(getEmitterPtr() &&
934 "Expected CodeEmitter defined for relaxInstruction");
935 if (!fragmentNeedsRelaxation(F))
936 return false;
937
938 ++stats::RelaxedInstructions;
939
940 // FIXME-PERF: We could immediately lower out instructions if we can tell
941 // they are fully resolved, to avoid retesting on later passes.
942
943 // Relax the fragment.
944
945 MCInst Relaxed = F.getInst();
946 getBackend().relaxInstruction(Inst&: Relaxed, STI: *F.getSubtargetInfo());
947
948 // Encode the new instruction.
949 F.setInst(Relaxed);
950 SmallVector<char, 16> Data;
951 SmallVector<MCFixup, 1> Fixups;
952 getEmitter().encodeInstruction(Inst: Relaxed, CB&: Data, Fixups, STI: *F.getSubtargetInfo());
953 F.setContents(Data);
954 F.setFixups(Fixups);
955 return true;
956}
957
958bool MCAssembler::relaxLEB(MCLEBFragment &LF) {
959 const unsigned OldSize = static_cast<unsigned>(LF.getContents().size());
960 unsigned PadTo = OldSize;
961 int64_t Value;
962 LF.clearFixups();
963 // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols
964 // requires that .uleb128 A-B is foldable where A and B reside in different
965 // fragments. This is used by __gcc_except_table.
966 bool Abs = getWriter().getSubsectionsViaSymbols()
967 ? LF.getValue().evaluateKnownAbsolute(Res&: Value, Asm: *this)
968 : LF.getValue().evaluateAsAbsolute(Res&: Value, Asm: *this);
969 if (!Abs) {
970 bool Relaxed, UseZeroPad;
971 std::tie(args&: Relaxed, args&: UseZeroPad) = getBackend().relaxLEB128(LF, Value);
972 if (!Relaxed) {
973 reportError(L: LF.getValue().getLoc(),
974 Msg: Twine(LF.isSigned() ? ".s" : ".u") +
975 "leb128 expression is not absolute");
976 LF.setValue(MCConstantExpr::create(Value: 0, Ctx&: Context));
977 }
978 uint8_t Tmp[10]; // maximum size: ceil(64/7)
979 PadTo = std::max(a: PadTo, b: encodeULEB128(Value: uint64_t(Value), p: Tmp));
980 if (UseZeroPad)
981 Value = 0;
982 }
983 uint8_t Data[16];
984 size_t Size = 0;
985 // The compiler can generate EH table assembly that is impossible to assemble
986 // without either adding padding to an LEB fragment or adding extra padding
987 // to a later alignment fragment. To accommodate such tables, relaxation can
988 // only increase an LEB fragment size here, not decrease it. See PR35809.
989 if (LF.isSigned())
990 Size = encodeSLEB128(Value, p: Data, PadTo);
991 else
992 Size = encodeULEB128(Value, p: Data, PadTo);
993 LF.setContents({reinterpret_cast<char *>(Data), Size});
994 return OldSize != Size;
995}
996
997/// Check if the branch crosses the boundary.
998///
999/// \param StartAddr start address of the fused/unfused branch.
1000/// \param Size size of the fused/unfused branch.
1001/// \param BoundaryAlignment alignment requirement of the branch.
1002/// \returns true if the branch cross the boundary.
1003static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size,
1004 Align BoundaryAlignment) {
1005 uint64_t EndAddr = StartAddr + Size;
1006 return (StartAddr >> Log2(A: BoundaryAlignment)) !=
1007 ((EndAddr - 1) >> Log2(A: BoundaryAlignment));
1008}
1009
1010/// Check if the branch is against the boundary.
1011///
1012/// \param StartAddr start address of the fused/unfused branch.
1013/// \param Size size of the fused/unfused branch.
1014/// \param BoundaryAlignment alignment requirement of the branch.
1015/// \returns true if the branch is against the boundary.
1016static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size,
1017 Align BoundaryAlignment) {
1018 uint64_t EndAddr = StartAddr + Size;
1019 return (EndAddr & (BoundaryAlignment.value() - 1)) == 0;
1020}
1021
1022/// Check if the branch needs padding.
1023///
1024/// \param StartAddr start address of the fused/unfused branch.
1025/// \param Size size of the fused/unfused branch.
1026/// \param BoundaryAlignment alignment requirement of the branch.
1027/// \returns true if the branch needs padding.
1028static bool needPadding(uint64_t StartAddr, uint64_t Size,
1029 Align BoundaryAlignment) {
1030 return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) ||
1031 isAgainstBoundary(StartAddr, Size, BoundaryAlignment);
1032}
1033
1034bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) {
1035 // BoundaryAlignFragment that doesn't need to align any fragment should not be
1036 // relaxed.
1037 if (!BF.getLastFragment())
1038 return false;
1039
1040 uint64_t AlignedOffset = getFragmentOffset(F: BF);
1041 uint64_t AlignedSize = 0;
1042 for (const MCFragment *F = BF.getNext();; F = F->getNext()) {
1043 AlignedSize += computeFragmentSize(F: *F);
1044 if (F == BF.getLastFragment())
1045 break;
1046 }
1047
1048 Align BoundaryAlignment = BF.getAlignment();
1049 uint64_t NewSize = needPadding(StartAddr: AlignedOffset, Size: AlignedSize, BoundaryAlignment)
1050 ? offsetToAlignment(Value: AlignedOffset, Alignment: BoundaryAlignment)
1051 : 0U;
1052 if (NewSize == BF.getSize())
1053 return false;
1054 BF.setSize(NewSize);
1055 return true;
1056}
1057
1058bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) {
1059 bool WasRelaxed;
1060 if (getBackend().relaxDwarfLineAddr(DF, WasRelaxed))
1061 return WasRelaxed;
1062
1063 MCContext &Context = getContext();
1064 auto OldSize = DF.getContents().size();
1065 int64_t AddrDelta;
1066 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(Res&: AddrDelta, Asm: *this);
1067 assert(Abs && "We created a line delta with an invalid expression");
1068 (void)Abs;
1069 int64_t LineDelta;
1070 LineDelta = DF.getLineDelta();
1071 SmallVector<char, 8> Data;
1072
1073 MCDwarfLineAddr::encode(Context, Params: getDWARFLinetableParams(), LineDelta,
1074 AddrDelta, OS&: Data);
1075 DF.setContents(Data);
1076 DF.clearFixups();
1077 return OldSize != Data.size();
1078}
1079
1080bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) {
1081 bool WasRelaxed;
1082 if (getBackend().relaxDwarfCFA(DF, WasRelaxed))
1083 return WasRelaxed;
1084
1085 MCContext &Context = getContext();
1086 int64_t Value;
1087 bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Res&: Value, Asm: *this);
1088 if (!Abs) {
1089 reportError(L: DF.getAddrDelta().getLoc(),
1090 Msg: "invalid CFI advance_loc expression");
1091 DF.setAddrDelta(MCConstantExpr::create(Value: 0, Ctx&: Context));
1092 return false;
1093 }
1094
1095 auto OldSize = DF.getContents().size();
1096 SmallVector<char, 8> Data;
1097 MCDwarfFrameEmitter::encodeAdvanceLoc(Context, AddrDelta: Value, OS&: Data);
1098 DF.setContents(Data);
1099 DF.clearFixups();
1100 return OldSize != Data.size();
1101}
1102
1103bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) {
1104 unsigned OldSize = F.getContents().size();
1105 getContext().getCVContext().encodeInlineLineTable(Asm: *this, F);
1106 return OldSize != F.getContents().size();
1107}
1108
1109bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) {
1110 unsigned OldSize = F.getContents().size();
1111 getContext().getCVContext().encodeDefRange(Asm: *this, F);
1112 return OldSize != F.getContents().size();
1113}
1114
1115bool MCAssembler::relaxFill(MCFillFragment &F) {
1116 uint64_t Size = computeFragmentSize(F);
1117 if (F.getSize() == Size)
1118 return false;
1119 F.setSize(Size);
1120 return true;
1121}
1122
1123bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) {
1124 uint64_t OldSize = PF.getContents().size();
1125 int64_t AddrDelta;
1126 bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(Res&: AddrDelta, Asm: *this);
1127 assert(Abs && "We created a pseudo probe with an invalid expression");
1128 (void)Abs;
1129 SmallVector<char, 8> Data;
1130 raw_svector_ostream OSE(Data);
1131
1132 // AddrDelta is a signed integer
1133 encodeSLEB128(Value: AddrDelta, OS&: OSE, PadTo: OldSize);
1134 PF.setContents(Data);
1135 PF.clearFixups();
1136 return OldSize != Data.size();
1137}
1138
1139bool MCAssembler::relaxFragment(MCFragment &F) {
1140 switch(F.getKind()) {
1141 default:
1142 return false;
1143 case MCFragment::FT_Relaxable:
1144 assert(!getRelaxAll() &&
1145 "Did not expect a MCRelaxableFragment in RelaxAll mode");
1146 return relaxInstruction(F&: cast<MCRelaxableFragment>(Val&: F));
1147 case MCFragment::FT_Dwarf:
1148 return relaxDwarfLineAddr(DF&: cast<MCDwarfLineAddrFragment>(Val&: F));
1149 case MCFragment::FT_DwarfFrame:
1150 return relaxDwarfCallFrameFragment(DF&: cast<MCDwarfCallFrameFragment>(Val&: F));
1151 case MCFragment::FT_LEB:
1152 return relaxLEB(LF&: cast<MCLEBFragment>(Val&: F));
1153 case MCFragment::FT_BoundaryAlign:
1154 return relaxBoundaryAlign(BF&: cast<MCBoundaryAlignFragment>(Val&: F));
1155 case MCFragment::FT_CVInlineLines:
1156 return relaxCVInlineLineTable(F&: cast<MCCVInlineLineTableFragment>(Val&: F));
1157 case MCFragment::FT_CVDefRange:
1158 return relaxCVDefRange(F&: cast<MCCVDefRangeFragment>(Val&: F));
1159 case MCFragment::FT_Fill:
1160 return relaxFill(F&: cast<MCFillFragment>(Val&: F));
1161 case MCFragment::FT_PseudoProbe:
1162 return relaxPseudoProbeAddr(PF&: cast<MCPseudoProbeAddrFragment>(Val&: F));
1163 }
1164}
1165
1166void MCAssembler::layoutSection(MCSection &Sec) {
1167 MCFragment *Prev = nullptr;
1168 uint64_t Offset = 0;
1169 for (MCFragment &F : Sec) {
1170 F.Offset = Offset;
1171 if (LLVM_UNLIKELY(isBundlingEnabled())) {
1172 if (F.hasInstructions()) {
1173 layoutBundle(Prev, F: &F);
1174 Offset = F.Offset;
1175 }
1176 Prev = &F;
1177 }
1178 Offset += computeFragmentSize(F);
1179 }
1180}
1181
1182bool MCAssembler::relaxOnce() {
1183 ++stats::RelaxationSteps;
1184 PendingErrors.clear();
1185
1186 // Size of fragments in one section can depend on the size of fragments in
1187 // another. If any fragment has changed size, we have to re-layout (and
1188 // as a result possibly further relax) all sections.
1189 bool ChangedAny = false;
1190 for (MCSection &Sec : *this) {
1191 // Assume each iteration finalizes at least one extra fragment. If the
1192 // layout does not converge after N+1 iterations, bail out.
1193 auto MaxIter = Sec.curFragList()->Tail->getLayoutOrder() + 1;
1194 for (;;) {
1195 bool Changed = false;
1196 for (MCFragment &F : Sec)
1197 if (relaxFragment(F))
1198 Changed = true;
1199
1200 ChangedAny |= Changed;
1201 if (!Changed || --MaxIter == 0)
1202 break;
1203 layoutSection(Sec);
1204 }
1205 }
1206 return ChangedAny;
1207}
1208
1209void MCAssembler::reportError(SMLoc L, const Twine &Msg) const {
1210 getContext().reportError(L, Msg);
1211}
1212
1213void MCAssembler::recordError(SMLoc Loc, const Twine &Msg) const {
1214 PendingErrors.emplace_back(Args&: Loc, Args: Msg.str());
1215}
1216
1217void MCAssembler::flushPendingErrors() const {
1218 for (auto &Err : PendingErrors)
1219 reportError(L: Err.first, Msg: Err.second);
1220 PendingErrors.clear();
1221}
1222
1223#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1224LLVM_DUMP_METHOD void MCAssembler::dump() const{
1225 raw_ostream &OS = errs();
1226 DenseMap<const MCFragment *, SmallVector<const MCSymbol *, 0>> FragToSyms;
1227 // Scan symbols and build a map of fragments to their corresponding symbols.
1228 // For variable symbols, we don't want to call their getFragment, which might
1229 // modify `Fragment`.
1230 for (const MCSymbol &Sym : symbols())
1231 if (!Sym.isVariable())
1232 if (auto *F = Sym.getFragment())
1233 FragToSyms.try_emplace(F).first->second.push_back(&Sym);
1234
1235 OS << "Sections:[";
1236 for (const MCSection &Sec : *this) {
1237 OS << '\n';
1238 Sec.dump(&FragToSyms);
1239 }
1240 OS << "\n]\n";
1241}
1242#endif
1243