1 | //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "llvm/MC/MCAssembler.h" |
10 | #include "llvm/ADT/ArrayRef.h" |
11 | #include "llvm/ADT/SmallString.h" |
12 | #include "llvm/ADT/SmallVector.h" |
13 | #include "llvm/ADT/Statistic.h" |
14 | #include "llvm/ADT/StringRef.h" |
15 | #include "llvm/ADT/Twine.h" |
16 | #include "llvm/MC/MCAsmBackend.h" |
17 | #include "llvm/MC/MCAsmInfo.h" |
18 | #include "llvm/MC/MCCodeEmitter.h" |
19 | #include "llvm/MC/MCCodeView.h" |
20 | #include "llvm/MC/MCContext.h" |
21 | #include "llvm/MC/MCDwarf.h" |
22 | #include "llvm/MC/MCExpr.h" |
23 | #include "llvm/MC/MCFixup.h" |
24 | #include "llvm/MC/MCFixupKindInfo.h" |
25 | #include "llvm/MC/MCFragment.h" |
26 | #include "llvm/MC/MCInst.h" |
27 | #include "llvm/MC/MCObjectWriter.h" |
28 | #include "llvm/MC/MCSection.h" |
29 | #include "llvm/MC/MCSymbol.h" |
30 | #include "llvm/MC/MCValue.h" |
31 | #include "llvm/Support/Alignment.h" |
32 | #include "llvm/Support/Casting.h" |
33 | #include "llvm/Support/Debug.h" |
34 | #include "llvm/Support/EndianStream.h" |
35 | #include "llvm/Support/ErrorHandling.h" |
36 | #include "llvm/Support/LEB128.h" |
37 | #include "llvm/Support/raw_ostream.h" |
38 | #include <cassert> |
39 | #include <cstdint> |
40 | #include <tuple> |
41 | #include <utility> |
42 | |
43 | using namespace llvm; |
44 | |
45 | namespace llvm { |
46 | class MCSubtargetInfo; |
47 | } |
48 | |
49 | #define DEBUG_TYPE "assembler" |
50 | |
51 | namespace { |
52 | namespace stats { |
53 | |
54 | STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total" ); |
55 | STATISTIC(EmittedRelaxableFragments, |
56 | "Number of emitted assembler fragments - relaxable" ); |
57 | STATISTIC(EmittedDataFragments, |
58 | "Number of emitted assembler fragments - data" ); |
59 | STATISTIC(EmittedCompactEncodedInstFragments, |
60 | "Number of emitted assembler fragments - compact encoded inst" ); |
61 | STATISTIC(EmittedAlignFragments, |
62 | "Number of emitted assembler fragments - align" ); |
63 | STATISTIC(EmittedFillFragments, |
64 | "Number of emitted assembler fragments - fill" ); |
65 | STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops" ); |
66 | STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org" ); |
67 | STATISTIC(evaluateFixup, "Number of evaluated fixups" ); |
68 | STATISTIC(ObjectBytes, "Number of emitted object file bytes" ); |
69 | STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps" ); |
70 | STATISTIC(RelaxedInstructions, "Number of relaxed instructions" ); |
71 | |
72 | } // end namespace stats |
73 | } // end anonymous namespace |
74 | |
75 | // FIXME FIXME FIXME: There are number of places in this file where we convert |
76 | // what is a 64-bit assembler value used for computation into a value in the |
77 | // object file, which may truncate it. We should detect that truncation where |
78 | // invalid and report errors back. |
79 | |
80 | /* *** */ |
81 | |
82 | MCAssembler::MCAssembler(MCContext &Context, |
83 | std::unique_ptr<MCAsmBackend> Backend, |
84 | std::unique_ptr<MCCodeEmitter> Emitter, |
85 | std::unique_ptr<MCObjectWriter> Writer) |
86 | : Context(Context), Backend(std::move(Backend)), |
87 | Emitter(std::move(Emitter)), Writer(std::move(Writer)) {} |
88 | |
89 | void MCAssembler::reset() { |
90 | RelaxAll = false; |
91 | Sections.clear(); |
92 | Symbols.clear(); |
93 | ThumbFuncs.clear(); |
94 | BundleAlignSize = 0; |
95 | |
96 | // reset objects owned by us |
97 | if (getBackendPtr()) |
98 | getBackendPtr()->reset(); |
99 | if (getEmitterPtr()) |
100 | getEmitterPtr()->reset(); |
101 | if (Writer) |
102 | Writer->reset(); |
103 | } |
104 | |
105 | bool MCAssembler::registerSection(MCSection &Section) { |
106 | if (Section.isRegistered()) |
107 | return false; |
108 | assert(Section.curFragList()->Head && "allocInitialFragment not called" ); |
109 | Sections.push_back(Elt: &Section); |
110 | Section.setIsRegistered(true); |
111 | return true; |
112 | } |
113 | |
114 | bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { |
115 | if (ThumbFuncs.count(Ptr: Symbol)) |
116 | return true; |
117 | |
118 | if (!Symbol->isVariable()) |
119 | return false; |
120 | |
121 | const MCExpr *Expr = Symbol->getVariableValue(); |
122 | |
123 | MCValue V; |
124 | if (!Expr->evaluateAsRelocatable(Res&: V, Asm: nullptr, Fixup: nullptr)) |
125 | return false; |
126 | |
127 | if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) |
128 | return false; |
129 | |
130 | const MCSymbolRefExpr *Ref = V.getSymA(); |
131 | if (!Ref) |
132 | return false; |
133 | |
134 | if (Ref->getKind() != MCSymbolRefExpr::VK_None) |
135 | return false; |
136 | |
137 | const MCSymbol &Sym = Ref->getSymbol(); |
138 | if (!isThumbFunc(Symbol: &Sym)) |
139 | return false; |
140 | |
141 | ThumbFuncs.insert(Ptr: Symbol); // Cache it. |
142 | return true; |
143 | } |
144 | |
145 | bool MCAssembler::evaluateFixup(const MCFixup &Fixup, const MCFragment *DF, |
146 | MCValue &Target, const MCSubtargetInfo *STI, |
147 | uint64_t &Value, bool &WasForced) const { |
148 | ++stats::evaluateFixup; |
149 | |
150 | // FIXME: This code has some duplication with recordRelocation. We should |
151 | // probably merge the two into a single callback that tries to evaluate a |
152 | // fixup and records a relocation if one is needed. |
153 | |
154 | // On error claim to have completely evaluated the fixup, to prevent any |
155 | // further processing from being done. |
156 | const MCExpr *Expr = Fixup.getValue(); |
157 | MCContext &Ctx = getContext(); |
158 | Value = 0; |
159 | WasForced = false; |
160 | if (!Expr->evaluateAsRelocatable(Res&: Target, Asm: this, Fixup: &Fixup)) { |
161 | Ctx.reportError(L: Fixup.getLoc(), Msg: "expected relocatable expression" ); |
162 | return true; |
163 | } |
164 | if (const MCSymbolRefExpr *RefB = Target.getSymB()) { |
165 | if (RefB->getKind() != MCSymbolRefExpr::VK_None) { |
166 | Ctx.reportError(L: Fixup.getLoc(), |
167 | Msg: "unsupported subtraction of qualified symbol" ); |
168 | return true; |
169 | } |
170 | } |
171 | |
172 | assert(getBackendPtr() && "Expected assembler backend" ); |
173 | bool IsTarget = getBackendPtr()->getFixupKindInfo(Kind: Fixup.getKind()).Flags & |
174 | MCFixupKindInfo::FKF_IsTarget; |
175 | |
176 | if (IsTarget) |
177 | return getBackend().evaluateTargetFixup(Asm: *this, Fixup, DF, Target, STI, |
178 | Value, WasForced); |
179 | |
180 | unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Kind: Fixup.getKind()).Flags; |
181 | bool IsPCRel = getBackendPtr()->getFixupKindInfo(Kind: Fixup.getKind()).Flags & |
182 | MCFixupKindInfo::FKF_IsPCRel; |
183 | |
184 | bool IsResolved = false; |
185 | if (IsPCRel) { |
186 | if (Target.getSymB()) { |
187 | IsResolved = false; |
188 | } else if (!Target.getSymA()) { |
189 | IsResolved = false; |
190 | } else { |
191 | const MCSymbolRefExpr *A = Target.getSymA(); |
192 | const MCSymbol &SA = A->getSymbol(); |
193 | if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { |
194 | IsResolved = false; |
195 | } else { |
196 | IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) || |
197 | getWriter().isSymbolRefDifferenceFullyResolvedImpl( |
198 | Asm: *this, SymA: SA, FB: *DF, InSet: false, IsPCRel: true); |
199 | } |
200 | } |
201 | } else { |
202 | IsResolved = Target.isAbsolute(); |
203 | } |
204 | |
205 | Value = Target.getConstant(); |
206 | |
207 | if (const MCSymbolRefExpr *A = Target.getSymA()) { |
208 | const MCSymbol &Sym = A->getSymbol(); |
209 | if (Sym.isDefined()) |
210 | Value += getSymbolOffset(S: Sym); |
211 | } |
212 | if (const MCSymbolRefExpr *B = Target.getSymB()) { |
213 | const MCSymbol &Sym = B->getSymbol(); |
214 | if (Sym.isDefined()) |
215 | Value -= getSymbolOffset(S: Sym); |
216 | } |
217 | |
218 | bool ShouldAlignPC = getBackend().getFixupKindInfo(Kind: Fixup.getKind()).Flags & |
219 | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; |
220 | assert((ShouldAlignPC ? IsPCRel : true) && |
221 | "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!" ); |
222 | |
223 | if (IsPCRel) { |
224 | uint64_t Offset = getFragmentOffset(F: *DF) + Fixup.getOffset(); |
225 | |
226 | // A number of ARM fixups in Thumb mode require that the effective PC |
227 | // address be determined as the 32-bit aligned version of the actual offset. |
228 | if (ShouldAlignPC) Offset &= ~0x3; |
229 | Value -= Offset; |
230 | } |
231 | |
232 | // Let the backend force a relocation if needed. |
233 | if (IsResolved && |
234 | getBackend().shouldForceRelocation(Asm: *this, Fixup, Target, STI)) { |
235 | IsResolved = false; |
236 | WasForced = true; |
237 | } |
238 | |
239 | // A linker relaxation target may emit ADD/SUB relocations for A-B+C. Let |
240 | // recordRelocation handle non-VK_None cases like A@plt-B+C. |
241 | if (!IsResolved && Target.getSymA() && Target.getSymB() && |
242 | Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None && |
243 | getBackend().handleAddSubRelocations(Asm: *this, F: *DF, Fixup, Target, FixedValue&: Value)) |
244 | return true; |
245 | |
246 | return IsResolved; |
247 | } |
248 | |
249 | uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const { |
250 | assert(getBackendPtr() && "Requires assembler backend" ); |
251 | switch (F.getKind()) { |
252 | case MCFragment::FT_Data: |
253 | return cast<MCDataFragment>(Val: F).getContents().size(); |
254 | case MCFragment::FT_Relaxable: |
255 | return cast<MCRelaxableFragment>(Val: F).getContents().size(); |
256 | case MCFragment::FT_CompactEncodedInst: |
257 | return cast<MCCompactEncodedInstFragment>(Val: F).getContents().size(); |
258 | case MCFragment::FT_Fill: { |
259 | auto &FF = cast<MCFillFragment>(Val: F); |
260 | int64_t NumValues = 0; |
261 | if (!FF.getNumValues().evaluateKnownAbsolute(Res&: NumValues, Asm: *this)) { |
262 | getContext().reportError(L: FF.getLoc(), |
263 | Msg: "expected assembly-time absolute expression" ); |
264 | return 0; |
265 | } |
266 | int64_t Size = NumValues * FF.getValueSize(); |
267 | if (Size < 0) { |
268 | getContext().reportError(L: FF.getLoc(), Msg: "invalid number of bytes" ); |
269 | return 0; |
270 | } |
271 | return Size; |
272 | } |
273 | |
274 | case MCFragment::FT_Nops: |
275 | return cast<MCNopsFragment>(Val: F).getNumBytes(); |
276 | |
277 | case MCFragment::FT_LEB: |
278 | return cast<MCLEBFragment>(Val: F).getContents().size(); |
279 | |
280 | case MCFragment::FT_BoundaryAlign: |
281 | return cast<MCBoundaryAlignFragment>(Val: F).getSize(); |
282 | |
283 | case MCFragment::FT_SymbolId: |
284 | return 4; |
285 | |
286 | case MCFragment::FT_Align: { |
287 | const MCAlignFragment &AF = cast<MCAlignFragment>(Val: F); |
288 | unsigned Offset = getFragmentOffset(F: AF); |
289 | unsigned Size = offsetToAlignment(Value: Offset, Alignment: AF.getAlignment()); |
290 | |
291 | // Insert extra Nops for code alignment if the target define |
292 | // shouldInsertExtraNopBytesForCodeAlign target hook. |
293 | if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() && |
294 | getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) |
295 | return Size; |
296 | |
297 | // If we are padding with nops, force the padding to be larger than the |
298 | // minimum nop size. |
299 | if (Size > 0 && AF.hasEmitNops()) { |
300 | while (Size % getBackend().getMinimumNopSize()) |
301 | Size += AF.getAlignment().value(); |
302 | } |
303 | if (Size > AF.getMaxBytesToEmit()) |
304 | return 0; |
305 | return Size; |
306 | } |
307 | |
308 | case MCFragment::FT_Org: { |
309 | const MCOrgFragment &OF = cast<MCOrgFragment>(Val: F); |
310 | MCValue Value; |
311 | if (!OF.getOffset().evaluateAsValue(Res&: Value, Asm: *this)) { |
312 | getContext().reportError(L: OF.getLoc(), |
313 | Msg: "expected assembly-time absolute expression" ); |
314 | return 0; |
315 | } |
316 | |
317 | uint64_t FragmentOffset = getFragmentOffset(F: OF); |
318 | int64_t TargetLocation = Value.getConstant(); |
319 | if (const MCSymbolRefExpr *A = Value.getSymA()) { |
320 | uint64_t Val; |
321 | if (!getSymbolOffset(S: A->getSymbol(), Val)) { |
322 | getContext().reportError(L: OF.getLoc(), Msg: "expected absolute expression" ); |
323 | return 0; |
324 | } |
325 | TargetLocation += Val; |
326 | } |
327 | int64_t Size = TargetLocation - FragmentOffset; |
328 | if (Size < 0 || Size >= 0x40000000) { |
329 | getContext().reportError( |
330 | L: OF.getLoc(), Msg: "invalid .org offset '" + Twine(TargetLocation) + |
331 | "' (at offset '" + Twine(FragmentOffset) + "')" ); |
332 | return 0; |
333 | } |
334 | return Size; |
335 | } |
336 | |
337 | case MCFragment::FT_Dwarf: |
338 | return cast<MCDwarfLineAddrFragment>(Val: F).getContents().size(); |
339 | case MCFragment::FT_DwarfFrame: |
340 | return cast<MCDwarfCallFrameFragment>(Val: F).getContents().size(); |
341 | case MCFragment::FT_CVInlineLines: |
342 | return cast<MCCVInlineLineTableFragment>(Val: F).getContents().size(); |
343 | case MCFragment::FT_CVDefRange: |
344 | return cast<MCCVDefRangeFragment>(Val: F).getContents().size(); |
345 | case MCFragment::FT_PseudoProbe: |
346 | return cast<MCPseudoProbeAddrFragment>(Val: F).getContents().size(); |
347 | case MCFragment::FT_Dummy: |
348 | llvm_unreachable("Should not have been added" ); |
349 | } |
350 | |
351 | llvm_unreachable("invalid fragment kind" ); |
352 | } |
353 | |
354 | // Compute the amount of padding required before the fragment \p F to |
355 | // obey bundling restrictions, where \p FOffset is the fragment's offset in |
356 | // its section and \p FSize is the fragment's size. |
357 | static uint64_t computeBundlePadding(unsigned BundleSize, |
358 | const MCEncodedFragment *F, |
359 | uint64_t FOffset, uint64_t FSize) { |
360 | uint64_t OffsetInBundle = FOffset & (BundleSize - 1); |
361 | uint64_t EndOfFragment = OffsetInBundle + FSize; |
362 | |
363 | // There are two kinds of bundling restrictions: |
364 | // |
365 | // 1) For alignToBundleEnd(), add padding to ensure that the fragment will |
366 | // *end* on a bundle boundary. |
367 | // 2) Otherwise, check if the fragment would cross a bundle boundary. If it |
368 | // would, add padding until the end of the bundle so that the fragment |
369 | // will start in a new one. |
370 | if (F->alignToBundleEnd()) { |
371 | // Three possibilities here: |
372 | // |
373 | // A) The fragment just happens to end at a bundle boundary, so we're good. |
374 | // B) The fragment ends before the current bundle boundary: pad it just |
375 | // enough to reach the boundary. |
376 | // C) The fragment ends after the current bundle boundary: pad it until it |
377 | // reaches the end of the next bundle boundary. |
378 | // |
379 | // Note: this code could be made shorter with some modulo trickery, but it's |
380 | // intentionally kept in its more explicit form for simplicity. |
381 | if (EndOfFragment == BundleSize) |
382 | return 0; |
383 | else if (EndOfFragment < BundleSize) |
384 | return BundleSize - EndOfFragment; |
385 | else { // EndOfFragment > BundleSize |
386 | return 2 * BundleSize - EndOfFragment; |
387 | } |
388 | } else if (OffsetInBundle > 0 && EndOfFragment > BundleSize) |
389 | return BundleSize - OffsetInBundle; |
390 | else |
391 | return 0; |
392 | } |
393 | |
394 | void MCAssembler::layoutBundle(MCFragment *Prev, MCFragment *F) const { |
395 | // If bundling is enabled and this fragment has instructions in it, it has to |
396 | // obey the bundling restrictions. With padding, we'll have: |
397 | // |
398 | // |
399 | // BundlePadding |
400 | // ||| |
401 | // ------------------------------------- |
402 | // Prev |##########| F | |
403 | // ------------------------------------- |
404 | // ^ |
405 | // | |
406 | // F->Offset |
407 | // |
408 | // The fragment's offset will point to after the padding, and its computed |
409 | // size won't include the padding. |
410 | // |
411 | // ".align N" is an example of a directive that introduces multiple |
412 | // fragments. We could add a special case to handle ".align N" by emitting |
413 | // within-fragment padding (which would produce less padding when N is less |
414 | // than the bundle size), but for now we don't. |
415 | // |
416 | assert(isa<MCEncodedFragment>(F) && |
417 | "Only MCEncodedFragment implementations have instructions" ); |
418 | MCEncodedFragment *EF = cast<MCEncodedFragment>(Val: F); |
419 | uint64_t FSize = computeFragmentSize(F: *EF); |
420 | |
421 | if (FSize > getBundleAlignSize()) |
422 | report_fatal_error(reason: "Fragment can't be larger than a bundle size" ); |
423 | |
424 | uint64_t RequiredBundlePadding = |
425 | computeBundlePadding(BundleSize: getBundleAlignSize(), F: EF, FOffset: EF->Offset, FSize); |
426 | if (RequiredBundlePadding > UINT8_MAX) |
427 | report_fatal_error(reason: "Padding cannot exceed 255 bytes" ); |
428 | EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); |
429 | EF->Offset += RequiredBundlePadding; |
430 | if (auto *DF = dyn_cast_or_null<MCDataFragment>(Val: Prev)) |
431 | if (DF->getContents().empty()) |
432 | DF->Offset = EF->Offset; |
433 | } |
434 | |
435 | void MCAssembler::ensureValid(MCSection &Sec) const { |
436 | if (Sec.hasLayout()) |
437 | return; |
438 | Sec.setHasLayout(true); |
439 | MCFragment *Prev = nullptr; |
440 | uint64_t Offset = 0; |
441 | for (MCFragment &F : Sec) { |
442 | F.Offset = Offset; |
443 | if (isBundlingEnabled() && F.hasInstructions()) { |
444 | layoutBundle(Prev, F: &F); |
445 | Offset = F.Offset; |
446 | } |
447 | Offset += computeFragmentSize(F); |
448 | Prev = &F; |
449 | } |
450 | } |
451 | |
452 | uint64_t MCAssembler::getFragmentOffset(const MCFragment &F) const { |
453 | ensureValid(Sec&: *F.getParent()); |
454 | return F.Offset; |
455 | } |
456 | |
457 | // Simple getSymbolOffset helper for the non-variable case. |
458 | static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S, |
459 | bool ReportError, uint64_t &Val) { |
460 | if (!S.getFragment()) { |
461 | if (ReportError) |
462 | report_fatal_error(reason: "unable to evaluate offset to undefined symbol '" + |
463 | S.getName() + "'" ); |
464 | return false; |
465 | } |
466 | Val = Asm.getFragmentOffset(F: *S.getFragment()) + S.getOffset(); |
467 | return true; |
468 | } |
469 | |
470 | static bool getSymbolOffsetImpl(const MCAssembler &Asm, const MCSymbol &S, |
471 | bool ReportError, uint64_t &Val) { |
472 | if (!S.isVariable()) |
473 | return getLabelOffset(Asm, S, ReportError, Val); |
474 | |
475 | // If SD is a variable, evaluate it. |
476 | MCValue Target; |
477 | if (!S.getVariableValue()->evaluateAsValue(Res&: Target, Asm)) |
478 | report_fatal_error(reason: "unable to evaluate offset for variable '" + |
479 | S.getName() + "'" ); |
480 | |
481 | uint64_t Offset = Target.getConstant(); |
482 | |
483 | const MCSymbolRefExpr *A = Target.getSymA(); |
484 | if (A) { |
485 | uint64_t ValA; |
486 | // FIXME: On most platforms, `Target`'s component symbols are labels from |
487 | // having been simplified during evaluation, but on Mach-O they can be |
488 | // variables due to PR19203. This, and the line below for `B` can be |
489 | // restored to call `getLabelOffset` when PR19203 is fixed. |
490 | if (!getSymbolOffsetImpl(Asm, S: A->getSymbol(), ReportError, Val&: ValA)) |
491 | return false; |
492 | Offset += ValA; |
493 | } |
494 | |
495 | const MCSymbolRefExpr *B = Target.getSymB(); |
496 | if (B) { |
497 | uint64_t ValB; |
498 | if (!getSymbolOffsetImpl(Asm, S: B->getSymbol(), ReportError, Val&: ValB)) |
499 | return false; |
500 | Offset -= ValB; |
501 | } |
502 | |
503 | Val = Offset; |
504 | return true; |
505 | } |
506 | |
507 | bool MCAssembler::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const { |
508 | return getSymbolOffsetImpl(Asm: *this, S, ReportError: false, Val); |
509 | } |
510 | |
511 | uint64_t MCAssembler::getSymbolOffset(const MCSymbol &S) const { |
512 | uint64_t Val; |
513 | getSymbolOffsetImpl(Asm: *this, S, ReportError: true, Val); |
514 | return Val; |
515 | } |
516 | |
517 | const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const { |
518 | assert(HasLayout); |
519 | if (!Symbol.isVariable()) |
520 | return &Symbol; |
521 | |
522 | const MCExpr *Expr = Symbol.getVariableValue(); |
523 | MCValue Value; |
524 | if (!Expr->evaluateAsValue(Res&: Value, Asm: *this)) { |
525 | getContext().reportError(L: Expr->getLoc(), |
526 | Msg: "expression could not be evaluated" ); |
527 | return nullptr; |
528 | } |
529 | |
530 | const MCSymbolRefExpr *RefB = Value.getSymB(); |
531 | if (RefB) { |
532 | getContext().reportError( |
533 | L: Expr->getLoc(), |
534 | Msg: Twine("symbol '" ) + RefB->getSymbol().getName() + |
535 | "' could not be evaluated in a subtraction expression" ); |
536 | return nullptr; |
537 | } |
538 | |
539 | const MCSymbolRefExpr *A = Value.getSymA(); |
540 | if (!A) |
541 | return nullptr; |
542 | |
543 | const MCSymbol &ASym = A->getSymbol(); |
544 | if (ASym.isCommon()) { |
545 | getContext().reportError(L: Expr->getLoc(), |
546 | Msg: "Common symbol '" + ASym.getName() + |
547 | "' cannot be used in assignment expr" ); |
548 | return nullptr; |
549 | } |
550 | |
551 | return &ASym; |
552 | } |
553 | |
554 | uint64_t MCAssembler::getSectionAddressSize(const MCSection &Sec) const { |
555 | assert(HasLayout); |
556 | // The size is the last fragment's end offset. |
557 | const MCFragment &F = *Sec.curFragList()->Tail; |
558 | return getFragmentOffset(F) + computeFragmentSize(F); |
559 | } |
560 | |
561 | uint64_t MCAssembler::getSectionFileSize(const MCSection &Sec) const { |
562 | // Virtual sections have no file size. |
563 | if (Sec.isVirtualSection()) |
564 | return 0; |
565 | return getSectionAddressSize(Sec); |
566 | } |
567 | |
568 | bool MCAssembler::registerSymbol(const MCSymbol &Symbol) { |
569 | bool Changed = !Symbol.isRegistered(); |
570 | if (Changed) { |
571 | Symbol.setIsRegistered(true); |
572 | Symbols.push_back(Elt: &Symbol); |
573 | } |
574 | return Changed; |
575 | } |
576 | |
577 | void MCAssembler::writeFragmentPadding(raw_ostream &OS, |
578 | const MCEncodedFragment &EF, |
579 | uint64_t FSize) const { |
580 | assert(getBackendPtr() && "Expected assembler backend" ); |
581 | // Should NOP padding be written out before this fragment? |
582 | unsigned BundlePadding = EF.getBundlePadding(); |
583 | if (BundlePadding > 0) { |
584 | assert(isBundlingEnabled() && |
585 | "Writing bundle padding with disabled bundling" ); |
586 | assert(EF.hasInstructions() && |
587 | "Writing bundle padding for a fragment without instructions" ); |
588 | |
589 | unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); |
590 | const MCSubtargetInfo *STI = EF.getSubtargetInfo(); |
591 | if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { |
592 | // If the padding itself crosses a bundle boundary, it must be emitted |
593 | // in 2 pieces, since even nop instructions must not cross boundaries. |
594 | // v--------------v <- BundleAlignSize |
595 | // v---------v <- BundlePadding |
596 | // ---------------------------- |
597 | // | Prev |####|####| F | |
598 | // ---------------------------- |
599 | // ^-------------------^ <- TotalLength |
600 | unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); |
601 | if (!getBackend().writeNopData(OS, Count: DistanceToBoundary, STI)) |
602 | report_fatal_error(reason: "unable to write NOP sequence of " + |
603 | Twine(DistanceToBoundary) + " bytes" ); |
604 | BundlePadding -= DistanceToBoundary; |
605 | } |
606 | if (!getBackend().writeNopData(OS, Count: BundlePadding, STI)) |
607 | report_fatal_error(reason: "unable to write NOP sequence of " + |
608 | Twine(BundlePadding) + " bytes" ); |
609 | } |
610 | } |
611 | |
612 | /// Write the fragment \p F to the output file. |
613 | static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, |
614 | const MCFragment &F) { |
615 | // FIXME: Embed in fragments instead? |
616 | uint64_t FragmentSize = Asm.computeFragmentSize(F); |
617 | |
618 | llvm::endianness Endian = Asm.getBackend().Endian; |
619 | |
620 | if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(Val: &F)) |
621 | Asm.writeFragmentPadding(OS, EF: *EF, FSize: FragmentSize); |
622 | |
623 | // This variable (and its dummy usage) is to participate in the assert at |
624 | // the end of the function. |
625 | uint64_t Start = OS.tell(); |
626 | (void) Start; |
627 | |
628 | ++stats::EmittedFragments; |
629 | |
630 | switch (F.getKind()) { |
631 | case MCFragment::FT_Align: { |
632 | ++stats::EmittedAlignFragments; |
633 | const MCAlignFragment &AF = cast<MCAlignFragment>(Val: F); |
634 | assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!" ); |
635 | |
636 | uint64_t Count = FragmentSize / AF.getValueSize(); |
637 | |
638 | // FIXME: This error shouldn't actually occur (the front end should emit |
639 | // multiple .align directives to enforce the semantics it wants), but is |
640 | // severe enough that we want to report it. How to handle this? |
641 | if (Count * AF.getValueSize() != FragmentSize) |
642 | report_fatal_error(reason: "undefined .align directive, value size '" + |
643 | Twine(AF.getValueSize()) + |
644 | "' is not a divisor of padding size '" + |
645 | Twine(FragmentSize) + "'" ); |
646 | |
647 | // See if we are aligning with nops, and if so do that first to try to fill |
648 | // the Count bytes. Then if that did not fill any bytes or there are any |
649 | // bytes left to fill use the Value and ValueSize to fill the rest. |
650 | // If we are aligning with nops, ask that target to emit the right data. |
651 | if (AF.hasEmitNops()) { |
652 | if (!Asm.getBackend().writeNopData(OS, Count, STI: AF.getSubtargetInfo())) |
653 | report_fatal_error(reason: "unable to write nop sequence of " + |
654 | Twine(Count) + " bytes" ); |
655 | break; |
656 | } |
657 | |
658 | // Otherwise, write out in multiples of the value size. |
659 | for (uint64_t i = 0; i != Count; ++i) { |
660 | switch (AF.getValueSize()) { |
661 | default: llvm_unreachable("Invalid size!" ); |
662 | case 1: OS << char(AF.getValue()); break; |
663 | case 2: |
664 | support::endian::write<uint16_t>(os&: OS, value: AF.getValue(), endian: Endian); |
665 | break; |
666 | case 4: |
667 | support::endian::write<uint32_t>(os&: OS, value: AF.getValue(), endian: Endian); |
668 | break; |
669 | case 8: |
670 | support::endian::write<uint64_t>(os&: OS, value: AF.getValue(), endian: Endian); |
671 | break; |
672 | } |
673 | } |
674 | break; |
675 | } |
676 | |
677 | case MCFragment::FT_Data: |
678 | ++stats::EmittedDataFragments; |
679 | OS << cast<MCDataFragment>(Val: F).getContents(); |
680 | break; |
681 | |
682 | case MCFragment::FT_Relaxable: |
683 | ++stats::EmittedRelaxableFragments; |
684 | OS << cast<MCRelaxableFragment>(Val: F).getContents(); |
685 | break; |
686 | |
687 | case MCFragment::FT_CompactEncodedInst: |
688 | ++stats::EmittedCompactEncodedInstFragments; |
689 | OS << cast<MCCompactEncodedInstFragment>(Val: F).getContents(); |
690 | break; |
691 | |
692 | case MCFragment::FT_Fill: { |
693 | ++stats::EmittedFillFragments; |
694 | const MCFillFragment &FF = cast<MCFillFragment>(Val: F); |
695 | uint64_t V = FF.getValue(); |
696 | unsigned VSize = FF.getValueSize(); |
697 | const unsigned MaxChunkSize = 16; |
698 | char Data[MaxChunkSize]; |
699 | assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size" ); |
700 | // Duplicate V into Data as byte vector to reduce number of |
701 | // writes done. As such, do endian conversion here. |
702 | for (unsigned I = 0; I != VSize; ++I) { |
703 | unsigned index = Endian == llvm::endianness::little ? I : (VSize - I - 1); |
704 | Data[I] = uint8_t(V >> (index * 8)); |
705 | } |
706 | for (unsigned I = VSize; I < MaxChunkSize; ++I) |
707 | Data[I] = Data[I - VSize]; |
708 | |
709 | // Set to largest multiple of VSize in Data. |
710 | const unsigned NumPerChunk = MaxChunkSize / VSize; |
711 | // Set ChunkSize to largest multiple of VSize in Data |
712 | const unsigned ChunkSize = VSize * NumPerChunk; |
713 | |
714 | // Do copies by chunk. |
715 | StringRef Ref(Data, ChunkSize); |
716 | for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) |
717 | OS << Ref; |
718 | |
719 | // do remainder if needed. |
720 | unsigned TrailingCount = FragmentSize % ChunkSize; |
721 | if (TrailingCount) |
722 | OS.write(Ptr: Data, Size: TrailingCount); |
723 | break; |
724 | } |
725 | |
726 | case MCFragment::FT_Nops: { |
727 | ++stats::EmittedNopsFragments; |
728 | const MCNopsFragment &NF = cast<MCNopsFragment>(Val: F); |
729 | |
730 | int64_t NumBytes = NF.getNumBytes(); |
731 | int64_t ControlledNopLength = NF.getControlledNopLength(); |
732 | int64_t MaximumNopLength = |
733 | Asm.getBackend().getMaximumNopSize(STI: *NF.getSubtargetInfo()); |
734 | |
735 | assert(NumBytes > 0 && "Expected positive NOPs fragment size" ); |
736 | assert(ControlledNopLength >= 0 && "Expected non-negative NOP size" ); |
737 | |
738 | if (ControlledNopLength > MaximumNopLength) { |
739 | Asm.getContext().reportError(L: NF.getLoc(), |
740 | Msg: "illegal NOP size " + |
741 | std::to_string(val: ControlledNopLength) + |
742 | ". (expected within [0, " + |
743 | std::to_string(val: MaximumNopLength) + "])" ); |
744 | // Clamp the NOP length as reportError does not stop the execution |
745 | // immediately. |
746 | ControlledNopLength = MaximumNopLength; |
747 | } |
748 | |
749 | // Use maximum value if the size of each NOP is not specified |
750 | if (!ControlledNopLength) |
751 | ControlledNopLength = MaximumNopLength; |
752 | |
753 | while (NumBytes) { |
754 | uint64_t NumBytesToEmit = |
755 | (uint64_t)std::min(a: NumBytes, b: ControlledNopLength); |
756 | assert(NumBytesToEmit && "try to emit empty NOP instruction" ); |
757 | if (!Asm.getBackend().writeNopData(OS, Count: NumBytesToEmit, |
758 | STI: NF.getSubtargetInfo())) { |
759 | report_fatal_error(reason: "unable to write nop sequence of the remaining " + |
760 | Twine(NumBytesToEmit) + " bytes" ); |
761 | break; |
762 | } |
763 | NumBytes -= NumBytesToEmit; |
764 | } |
765 | break; |
766 | } |
767 | |
768 | case MCFragment::FT_LEB: { |
769 | const MCLEBFragment &LF = cast<MCLEBFragment>(Val: F); |
770 | OS << LF.getContents(); |
771 | break; |
772 | } |
773 | |
774 | case MCFragment::FT_BoundaryAlign: { |
775 | const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(Val: F); |
776 | if (!Asm.getBackend().writeNopData(OS, Count: FragmentSize, STI: BF.getSubtargetInfo())) |
777 | report_fatal_error(reason: "unable to write nop sequence of " + |
778 | Twine(FragmentSize) + " bytes" ); |
779 | break; |
780 | } |
781 | |
782 | case MCFragment::FT_SymbolId: { |
783 | const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(Val: F); |
784 | support::endian::write<uint32_t>(os&: OS, value: SF.getSymbol()->getIndex(), endian: Endian); |
785 | break; |
786 | } |
787 | |
788 | case MCFragment::FT_Org: { |
789 | ++stats::EmittedOrgFragments; |
790 | const MCOrgFragment &OF = cast<MCOrgFragment>(Val: F); |
791 | |
792 | for (uint64_t i = 0, e = FragmentSize; i != e; ++i) |
793 | OS << char(OF.getValue()); |
794 | |
795 | break; |
796 | } |
797 | |
798 | case MCFragment::FT_Dwarf: { |
799 | const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(Val: F); |
800 | OS << OF.getContents(); |
801 | break; |
802 | } |
803 | case MCFragment::FT_DwarfFrame: { |
804 | const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(Val: F); |
805 | OS << CF.getContents(); |
806 | break; |
807 | } |
808 | case MCFragment::FT_CVInlineLines: { |
809 | const auto &OF = cast<MCCVInlineLineTableFragment>(Val: F); |
810 | OS << OF.getContents(); |
811 | break; |
812 | } |
813 | case MCFragment::FT_CVDefRange: { |
814 | const auto &DRF = cast<MCCVDefRangeFragment>(Val: F); |
815 | OS << DRF.getContents(); |
816 | break; |
817 | } |
818 | case MCFragment::FT_PseudoProbe: { |
819 | const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Val: F); |
820 | OS << PF.getContents(); |
821 | break; |
822 | } |
823 | case MCFragment::FT_Dummy: |
824 | llvm_unreachable("Should not have been added" ); |
825 | } |
826 | |
827 | assert(OS.tell() - Start == FragmentSize && |
828 | "The stream should advance by fragment size" ); |
829 | } |
830 | |
831 | void MCAssembler::writeSectionData(raw_ostream &OS, |
832 | const MCSection *Sec) const { |
833 | assert(getBackendPtr() && "Expected assembler backend" ); |
834 | |
835 | // Ignore virtual sections. |
836 | if (Sec->isVirtualSection()) { |
837 | assert(getSectionFileSize(*Sec) == 0 && "Invalid size for section!" ); |
838 | |
839 | // Check that contents are only things legal inside a virtual section. |
840 | for (const MCFragment &F : *Sec) { |
841 | switch (F.getKind()) { |
842 | default: llvm_unreachable("Invalid fragment in virtual section!" ); |
843 | case MCFragment::FT_Data: { |
844 | // Check that we aren't trying to write a non-zero contents (or fixups) |
845 | // into a virtual section. This is to support clients which use standard |
846 | // directives to fill the contents of virtual sections. |
847 | const MCDataFragment &DF = cast<MCDataFragment>(Val: F); |
848 | if (DF.fixup_begin() != DF.fixup_end()) |
849 | getContext().reportError(L: SMLoc(), Msg: Sec->getVirtualSectionKind() + |
850 | " section '" + Sec->getName() + |
851 | "' cannot have fixups" ); |
852 | for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) |
853 | if (DF.getContents()[i]) { |
854 | getContext().reportError(L: SMLoc(), |
855 | Msg: Sec->getVirtualSectionKind() + |
856 | " section '" + Sec->getName() + |
857 | "' cannot have non-zero initializers" ); |
858 | break; |
859 | } |
860 | break; |
861 | } |
862 | case MCFragment::FT_Align: |
863 | // Check that we aren't trying to write a non-zero value into a virtual |
864 | // section. |
865 | assert((cast<MCAlignFragment>(F).getValueSize() == 0 || |
866 | cast<MCAlignFragment>(F).getValue() == 0) && |
867 | "Invalid align in virtual section!" ); |
868 | break; |
869 | case MCFragment::FT_Fill: |
870 | assert((cast<MCFillFragment>(F).getValue() == 0) && |
871 | "Invalid fill in virtual section!" ); |
872 | break; |
873 | case MCFragment::FT_Org: |
874 | break; |
875 | } |
876 | } |
877 | |
878 | return; |
879 | } |
880 | |
881 | uint64_t Start = OS.tell(); |
882 | (void)Start; |
883 | |
884 | for (const MCFragment &F : *Sec) |
885 | writeFragment(OS, Asm: *this, F); |
886 | |
887 | assert(getContext().hadError() || |
888 | OS.tell() - Start == getSectionAddressSize(*Sec)); |
889 | } |
890 | |
891 | std::tuple<MCValue, uint64_t, bool> |
892 | MCAssembler::handleFixup(MCFragment &F, const MCFixup &Fixup, |
893 | const MCSubtargetInfo *STI) { |
894 | // Evaluate the fixup. |
895 | MCValue Target; |
896 | uint64_t FixedValue; |
897 | bool WasForced; |
898 | bool IsResolved = |
899 | evaluateFixup(Fixup, DF: &F, Target, STI, Value&: FixedValue, WasForced); |
900 | if (!IsResolved) { |
901 | // The fixup was unresolved, we need a relocation. Inform the object |
902 | // writer of the relocation, and give it an opportunity to adjust the |
903 | // fixup value if need be. |
904 | getWriter().recordRelocation(Asm&: *this, Fragment: &F, Fixup, Target, FixedValue); |
905 | } |
906 | return std::make_tuple(args&: Target, args&: FixedValue, args&: IsResolved); |
907 | } |
908 | |
909 | void MCAssembler::layout() { |
910 | assert(getBackendPtr() && "Expected assembler backend" ); |
911 | DEBUG_WITH_TYPE("mc-dump" , { |
912 | errs() << "assembler backend - pre-layout\n--\n" ; |
913 | dump(); }); |
914 | |
915 | // Assign section ordinals. |
916 | unsigned SectionIndex = 0; |
917 | for (MCSection &Sec : *this) { |
918 | Sec.setOrdinal(SectionIndex++); |
919 | |
920 | // Chain together fragments from all subsections. |
921 | if (Sec.Subsections.size() > 1) { |
922 | MCDummyFragment Dummy; |
923 | MCFragment *Tail = &Dummy; |
924 | for (auto &[_, List] : Sec.Subsections) { |
925 | assert(List.Head); |
926 | Tail->Next = List.Head; |
927 | Tail = List.Tail; |
928 | } |
929 | Sec.Subsections.clear(); |
930 | Sec.Subsections.push_back(Elt: {0u, {.Head: Dummy.getNext(), .Tail: Tail}}); |
931 | Sec.CurFragList = &Sec.Subsections[0].second; |
932 | |
933 | unsigned FragmentIndex = 0; |
934 | for (MCFragment &Frag : Sec) |
935 | Frag.setLayoutOrder(FragmentIndex++); |
936 | } |
937 | } |
938 | |
939 | // Layout until everything fits. |
940 | this->HasLayout = true; |
941 | while (layoutOnce()) { |
942 | if (getContext().hadError()) |
943 | return; |
944 | // Size of fragments in one section can depend on the size of fragments in |
945 | // another. If any fragment has changed size, we have to re-layout (and |
946 | // as a result possibly further relax) all. |
947 | for (MCSection &Sec : *this) |
948 | Sec.setHasLayout(false); |
949 | } |
950 | |
951 | DEBUG_WITH_TYPE("mc-dump" , { |
952 | errs() << "assembler backend - post-relaxation\n--\n" ; |
953 | dump(); }); |
954 | |
955 | // Finalize the layout, including fragment lowering. |
956 | getBackend().finishLayout(Asm: *this); |
957 | |
958 | DEBUG_WITH_TYPE("mc-dump" , { |
959 | errs() << "assembler backend - final-layout\n--\n" ; |
960 | dump(); }); |
961 | |
962 | // Allow the object writer a chance to perform post-layout binding (for |
963 | // example, to set the index fields in the symbol data). |
964 | getWriter().executePostLayoutBinding(Asm&: *this); |
965 | |
966 | // Evaluate and apply the fixups, generating relocation entries as necessary. |
967 | for (MCSection &Sec : *this) { |
968 | for (MCFragment &Frag : Sec) { |
969 | ArrayRef<MCFixup> Fixups; |
970 | MutableArrayRef<char> Contents; |
971 | const MCSubtargetInfo *STI = nullptr; |
972 | |
973 | // Process MCAlignFragment and MCEncodedFragmentWithFixups here. |
974 | switch (Frag.getKind()) { |
975 | default: |
976 | continue; |
977 | case MCFragment::FT_Align: { |
978 | MCAlignFragment &AF = cast<MCAlignFragment>(Val&: Frag); |
979 | // Insert fixup type for code alignment if the target define |
980 | // shouldInsertFixupForCodeAlign target hook. |
981 | if (Sec.useCodeAlign() && AF.hasEmitNops()) |
982 | getBackend().shouldInsertFixupForCodeAlign(Asm&: *this, AF); |
983 | continue; |
984 | } |
985 | case MCFragment::FT_Data: { |
986 | MCDataFragment &DF = cast<MCDataFragment>(Val&: Frag); |
987 | Fixups = DF.getFixups(); |
988 | Contents = DF.getContents(); |
989 | STI = DF.getSubtargetInfo(); |
990 | assert(!DF.hasInstructions() || STI != nullptr); |
991 | break; |
992 | } |
993 | case MCFragment::FT_Relaxable: { |
994 | MCRelaxableFragment &RF = cast<MCRelaxableFragment>(Val&: Frag); |
995 | Fixups = RF.getFixups(); |
996 | Contents = RF.getContents(); |
997 | STI = RF.getSubtargetInfo(); |
998 | assert(!RF.hasInstructions() || STI != nullptr); |
999 | break; |
1000 | } |
1001 | case MCFragment::FT_CVDefRange: { |
1002 | MCCVDefRangeFragment &CF = cast<MCCVDefRangeFragment>(Val&: Frag); |
1003 | Fixups = CF.getFixups(); |
1004 | Contents = CF.getContents(); |
1005 | break; |
1006 | } |
1007 | case MCFragment::FT_Dwarf: { |
1008 | MCDwarfLineAddrFragment &DF = cast<MCDwarfLineAddrFragment>(Val&: Frag); |
1009 | Fixups = DF.getFixups(); |
1010 | Contents = DF.getContents(); |
1011 | break; |
1012 | } |
1013 | case MCFragment::FT_DwarfFrame: { |
1014 | MCDwarfCallFrameFragment &DF = cast<MCDwarfCallFrameFragment>(Val&: Frag); |
1015 | Fixups = DF.getFixups(); |
1016 | Contents = DF.getContents(); |
1017 | break; |
1018 | } |
1019 | case MCFragment::FT_LEB: { |
1020 | auto &LF = cast<MCLEBFragment>(Val&: Frag); |
1021 | Fixups = LF.getFixups(); |
1022 | Contents = LF.getContents(); |
1023 | break; |
1024 | } |
1025 | case MCFragment::FT_PseudoProbe: { |
1026 | MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Val&: Frag); |
1027 | Fixups = PF.getFixups(); |
1028 | Contents = PF.getContents(); |
1029 | break; |
1030 | } |
1031 | } |
1032 | for (const MCFixup &Fixup : Fixups) { |
1033 | uint64_t FixedValue; |
1034 | bool IsResolved; |
1035 | MCValue Target; |
1036 | std::tie(args&: Target, args&: FixedValue, args&: IsResolved) = |
1037 | handleFixup(F&: Frag, Fixup, STI); |
1038 | getBackend().applyFixup(Asm: *this, Fixup, Target, Data: Contents, Value: FixedValue, |
1039 | IsResolved, STI); |
1040 | } |
1041 | } |
1042 | } |
1043 | } |
1044 | |
1045 | void MCAssembler::Finish() { |
1046 | layout(); |
1047 | |
1048 | // Write the object file. |
1049 | stats::ObjectBytes += getWriter().writeObject(Asm&: *this); |
1050 | |
1051 | HasLayout = false; |
1052 | } |
1053 | |
1054 | bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, |
1055 | const MCRelaxableFragment *DF) const { |
1056 | assert(getBackendPtr() && "Expected assembler backend" ); |
1057 | MCValue Target; |
1058 | uint64_t Value; |
1059 | bool WasForced; |
1060 | bool Resolved = evaluateFixup(Fixup, DF, Target, STI: DF->getSubtargetInfo(), |
1061 | Value, WasForced); |
1062 | if (Target.getSymA() && |
1063 | Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && |
1064 | Fixup.getKind() == FK_Data_1) |
1065 | return false; |
1066 | return getBackend().fixupNeedsRelaxationAdvanced(Asm: *this, Fixup, Resolved, |
1067 | Value, DF, WasForced); |
1068 | } |
1069 | |
1070 | bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F) const { |
1071 | assert(getBackendPtr() && "Expected assembler backend" ); |
1072 | // If this inst doesn't ever need relaxation, ignore it. This occurs when we |
1073 | // are intentionally pushing out inst fragments, or because we relaxed a |
1074 | // previous instruction to one that doesn't need relaxation. |
1075 | if (!getBackend().mayNeedRelaxation(Inst: F->getInst(), STI: *F->getSubtargetInfo())) |
1076 | return false; |
1077 | |
1078 | for (const MCFixup &Fixup : F->getFixups()) |
1079 | if (fixupNeedsRelaxation(Fixup, DF: F)) |
1080 | return true; |
1081 | |
1082 | return false; |
1083 | } |
1084 | |
1085 | bool MCAssembler::relaxInstruction(MCRelaxableFragment &F) { |
1086 | assert(getEmitterPtr() && |
1087 | "Expected CodeEmitter defined for relaxInstruction" ); |
1088 | if (!fragmentNeedsRelaxation(F: &F)) |
1089 | return false; |
1090 | |
1091 | ++stats::RelaxedInstructions; |
1092 | |
1093 | // FIXME-PERF: We could immediately lower out instructions if we can tell |
1094 | // they are fully resolved, to avoid retesting on later passes. |
1095 | |
1096 | // Relax the fragment. |
1097 | |
1098 | MCInst Relaxed = F.getInst(); |
1099 | getBackend().relaxInstruction(Inst&: Relaxed, STI: *F.getSubtargetInfo()); |
1100 | |
1101 | // Encode the new instruction. |
1102 | F.setInst(Relaxed); |
1103 | F.getFixups().clear(); |
1104 | F.getContents().clear(); |
1105 | getEmitter().encodeInstruction(Inst: Relaxed, CB&: F.getContents(), Fixups&: F.getFixups(), |
1106 | STI: *F.getSubtargetInfo()); |
1107 | return true; |
1108 | } |
1109 | |
1110 | bool MCAssembler::relaxLEB(MCLEBFragment &LF) { |
1111 | const unsigned OldSize = static_cast<unsigned>(LF.getContents().size()); |
1112 | unsigned PadTo = OldSize; |
1113 | int64_t Value; |
1114 | SmallVectorImpl<char> &Data = LF.getContents(); |
1115 | LF.getFixups().clear(); |
1116 | // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols |
1117 | // requires that .uleb128 A-B is foldable where A and B reside in different |
1118 | // fragments. This is used by __gcc_except_table. |
1119 | bool Abs = getWriter().getSubsectionsViaSymbols() |
1120 | ? LF.getValue().evaluateKnownAbsolute(Res&: Value, Asm: *this) |
1121 | : LF.getValue().evaluateAsAbsolute(Res&: Value, Asm: *this); |
1122 | if (!Abs) { |
1123 | bool Relaxed, UseZeroPad; |
1124 | std::tie(args&: Relaxed, args&: UseZeroPad) = getBackend().relaxLEB128(Asm: *this, LF, Value); |
1125 | if (!Relaxed) { |
1126 | getContext().reportError(L: LF.getValue().getLoc(), |
1127 | Msg: Twine(LF.isSigned() ? ".s" : ".u" ) + |
1128 | "leb128 expression is not absolute" ); |
1129 | LF.setValue(MCConstantExpr::create(Value: 0, Ctx&: Context)); |
1130 | } |
1131 | uint8_t Tmp[10]; // maximum size: ceil(64/7) |
1132 | PadTo = std::max(a: PadTo, b: encodeULEB128(Value: uint64_t(Value), p: Tmp)); |
1133 | if (UseZeroPad) |
1134 | Value = 0; |
1135 | } |
1136 | Data.clear(); |
1137 | raw_svector_ostream OSE(Data); |
1138 | // The compiler can generate EH table assembly that is impossible to assemble |
1139 | // without either adding padding to an LEB fragment or adding extra padding |
1140 | // to a later alignment fragment. To accommodate such tables, relaxation can |
1141 | // only increase an LEB fragment size here, not decrease it. See PR35809. |
1142 | if (LF.isSigned()) |
1143 | encodeSLEB128(Value, OS&: OSE, PadTo); |
1144 | else |
1145 | encodeULEB128(Value, OS&: OSE, PadTo); |
1146 | return OldSize != LF.getContents().size(); |
1147 | } |
1148 | |
1149 | /// Check if the branch crosses the boundary. |
1150 | /// |
1151 | /// \param StartAddr start address of the fused/unfused branch. |
1152 | /// \param Size size of the fused/unfused branch. |
1153 | /// \param BoundaryAlignment alignment requirement of the branch. |
1154 | /// \returns true if the branch cross the boundary. |
1155 | static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, |
1156 | Align BoundaryAlignment) { |
1157 | uint64_t EndAddr = StartAddr + Size; |
1158 | return (StartAddr >> Log2(A: BoundaryAlignment)) != |
1159 | ((EndAddr - 1) >> Log2(A: BoundaryAlignment)); |
1160 | } |
1161 | |
1162 | /// Check if the branch is against the boundary. |
1163 | /// |
1164 | /// \param StartAddr start address of the fused/unfused branch. |
1165 | /// \param Size size of the fused/unfused branch. |
1166 | /// \param BoundaryAlignment alignment requirement of the branch. |
1167 | /// \returns true if the branch is against the boundary. |
1168 | static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, |
1169 | Align BoundaryAlignment) { |
1170 | uint64_t EndAddr = StartAddr + Size; |
1171 | return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; |
1172 | } |
1173 | |
1174 | /// Check if the branch needs padding. |
1175 | /// |
1176 | /// \param StartAddr start address of the fused/unfused branch. |
1177 | /// \param Size size of the fused/unfused branch. |
1178 | /// \param BoundaryAlignment alignment requirement of the branch. |
1179 | /// \returns true if the branch needs padding. |
1180 | static bool needPadding(uint64_t StartAddr, uint64_t Size, |
1181 | Align BoundaryAlignment) { |
1182 | return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || |
1183 | isAgainstBoundary(StartAddr, Size, BoundaryAlignment); |
1184 | } |
1185 | |
1186 | bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) { |
1187 | // BoundaryAlignFragment that doesn't need to align any fragment should not be |
1188 | // relaxed. |
1189 | if (!BF.getLastFragment()) |
1190 | return false; |
1191 | |
1192 | uint64_t AlignedOffset = getFragmentOffset(F: BF); |
1193 | uint64_t AlignedSize = 0; |
1194 | for (const MCFragment *F = BF.getNext();; F = F->getNext()) { |
1195 | AlignedSize += computeFragmentSize(F: *F); |
1196 | if (F == BF.getLastFragment()) |
1197 | break; |
1198 | } |
1199 | |
1200 | Align BoundaryAlignment = BF.getAlignment(); |
1201 | uint64_t NewSize = needPadding(StartAddr: AlignedOffset, Size: AlignedSize, BoundaryAlignment) |
1202 | ? offsetToAlignment(Value: AlignedOffset, Alignment: BoundaryAlignment) |
1203 | : 0U; |
1204 | if (NewSize == BF.getSize()) |
1205 | return false; |
1206 | BF.setSize(NewSize); |
1207 | return true; |
1208 | } |
1209 | |
1210 | bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) { |
1211 | bool WasRelaxed; |
1212 | if (getBackend().relaxDwarfLineAddr(Asm: *this, DF, WasRelaxed)) |
1213 | return WasRelaxed; |
1214 | |
1215 | MCContext &Context = getContext(); |
1216 | uint64_t OldSize = DF.getContents().size(); |
1217 | int64_t AddrDelta; |
1218 | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(Res&: AddrDelta, Asm: *this); |
1219 | assert(Abs && "We created a line delta with an invalid expression" ); |
1220 | (void)Abs; |
1221 | int64_t LineDelta; |
1222 | LineDelta = DF.getLineDelta(); |
1223 | SmallVectorImpl<char> &Data = DF.getContents(); |
1224 | Data.clear(); |
1225 | DF.getFixups().clear(); |
1226 | |
1227 | MCDwarfLineAddr::encode(Context, Params: getDWARFLinetableParams(), LineDelta, |
1228 | AddrDelta, OS&: Data); |
1229 | return OldSize != Data.size(); |
1230 | } |
1231 | |
1232 | bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) { |
1233 | bool WasRelaxed; |
1234 | if (getBackend().relaxDwarfCFA(Asm: *this, DF, WasRelaxed)) |
1235 | return WasRelaxed; |
1236 | |
1237 | MCContext &Context = getContext(); |
1238 | int64_t Value; |
1239 | bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Res&: Value, Asm: *this); |
1240 | if (!Abs) { |
1241 | getContext().reportError(L: DF.getAddrDelta().getLoc(), |
1242 | Msg: "invalid CFI advance_loc expression" ); |
1243 | DF.setAddrDelta(MCConstantExpr::create(Value: 0, Ctx&: Context)); |
1244 | return false; |
1245 | } |
1246 | |
1247 | SmallVectorImpl<char> &Data = DF.getContents(); |
1248 | uint64_t OldSize = Data.size(); |
1249 | Data.clear(); |
1250 | DF.getFixups().clear(); |
1251 | |
1252 | MCDwarfFrameEmitter::encodeAdvanceLoc(Context, AddrDelta: Value, OS&: Data); |
1253 | return OldSize != Data.size(); |
1254 | } |
1255 | |
1256 | bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) { |
1257 | unsigned OldSize = F.getContents().size(); |
1258 | getContext().getCVContext().encodeInlineLineTable(Asm: *this, F); |
1259 | return OldSize != F.getContents().size(); |
1260 | } |
1261 | |
1262 | bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) { |
1263 | unsigned OldSize = F.getContents().size(); |
1264 | getContext().getCVContext().encodeDefRange(Asm: *this, F); |
1265 | return OldSize != F.getContents().size(); |
1266 | } |
1267 | |
1268 | bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) { |
1269 | uint64_t OldSize = PF.getContents().size(); |
1270 | int64_t AddrDelta; |
1271 | bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(Res&: AddrDelta, Asm: *this); |
1272 | assert(Abs && "We created a pseudo probe with an invalid expression" ); |
1273 | (void)Abs; |
1274 | SmallVectorImpl<char> &Data = PF.getContents(); |
1275 | Data.clear(); |
1276 | raw_svector_ostream OSE(Data); |
1277 | PF.getFixups().clear(); |
1278 | |
1279 | // AddrDelta is a signed integer |
1280 | encodeSLEB128(Value: AddrDelta, OS&: OSE, PadTo: OldSize); |
1281 | return OldSize != Data.size(); |
1282 | } |
1283 | |
1284 | bool MCAssembler::relaxFragment(MCFragment &F) { |
1285 | switch(F.getKind()) { |
1286 | default: |
1287 | return false; |
1288 | case MCFragment::FT_Relaxable: |
1289 | assert(!getRelaxAll() && |
1290 | "Did not expect a MCRelaxableFragment in RelaxAll mode" ); |
1291 | return relaxInstruction(F&: cast<MCRelaxableFragment>(Val&: F)); |
1292 | case MCFragment::FT_Dwarf: |
1293 | return relaxDwarfLineAddr(DF&: cast<MCDwarfLineAddrFragment>(Val&: F)); |
1294 | case MCFragment::FT_DwarfFrame: |
1295 | return relaxDwarfCallFrameFragment(DF&: cast<MCDwarfCallFrameFragment>(Val&: F)); |
1296 | case MCFragment::FT_LEB: |
1297 | return relaxLEB(LF&: cast<MCLEBFragment>(Val&: F)); |
1298 | case MCFragment::FT_BoundaryAlign: |
1299 | return relaxBoundaryAlign(BF&: cast<MCBoundaryAlignFragment>(Val&: F)); |
1300 | case MCFragment::FT_CVInlineLines: |
1301 | return relaxCVInlineLineTable(F&: cast<MCCVInlineLineTableFragment>(Val&: F)); |
1302 | case MCFragment::FT_CVDefRange: |
1303 | return relaxCVDefRange(F&: cast<MCCVDefRangeFragment>(Val&: F)); |
1304 | case MCFragment::FT_PseudoProbe: |
1305 | return relaxPseudoProbeAddr(PF&: cast<MCPseudoProbeAddrFragment>(Val&: F)); |
1306 | } |
1307 | } |
1308 | |
1309 | bool MCAssembler::layoutOnce() { |
1310 | ++stats::RelaxationSteps; |
1311 | |
1312 | bool Changed = false; |
1313 | for (MCSection &Sec : *this) |
1314 | for (MCFragment &Frag : Sec) |
1315 | if (relaxFragment(F&: Frag)) |
1316 | Changed = true; |
1317 | return Changed; |
1318 | } |
1319 | |
1320 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
1321 | LLVM_DUMP_METHOD void MCAssembler::dump() const{ |
1322 | raw_ostream &OS = errs(); |
1323 | |
1324 | OS << "<MCAssembler\n" ; |
1325 | OS << " Sections:[\n " ; |
1326 | bool First = true; |
1327 | for (const MCSection &Sec : *this) { |
1328 | if (First) |
1329 | First = false; |
1330 | else |
1331 | OS << ",\n " ; |
1332 | Sec.dump(); |
1333 | } |
1334 | OS << "],\n" ; |
1335 | OS << " Symbols:[" ; |
1336 | |
1337 | First = true; |
1338 | for (const MCSymbol &Sym : symbols()) { |
1339 | if (First) |
1340 | First = false; |
1341 | else |
1342 | OS << ",\n " ; |
1343 | OS << "(" ; |
1344 | Sym.dump(); |
1345 | OS << ", Index:" << Sym.getIndex() << ", " ; |
1346 | OS << ")" ; |
1347 | } |
1348 | OS << "]>\n" ; |
1349 | } |
1350 | #endif |
1351 | |