1 | //===------- ELF_riscv.cpp -JIT linker implementation for ELF/riscv -------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // ELF/riscv jit-link implementation. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/ExecutionEngine/JITLink/ELF_riscv.h" |
14 | #include "EHFrameSupportImpl.h" |
15 | #include "ELFLinkGraphBuilder.h" |
16 | #include "JITLinkGeneric.h" |
17 | #include "PerGraphGOTAndPLTStubsBuilder.h" |
18 | #include "llvm/BinaryFormat/ELF.h" |
19 | #include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h" |
20 | #include "llvm/ExecutionEngine/JITLink/JITLink.h" |
21 | #include "llvm/ExecutionEngine/JITLink/riscv.h" |
22 | #include "llvm/Object/ELF.h" |
23 | #include "llvm/Object/ELFObjectFile.h" |
24 | #include "llvm/Support/Endian.h" |
25 | |
26 | #define DEBUG_TYPE "jitlink" |
27 | using namespace llvm; |
28 | using namespace llvm::jitlink; |
29 | using namespace llvm::jitlink::riscv; |
30 | |
31 | namespace { |
32 | |
33 | class PerGraphGOTAndPLTStubsBuilder_ELF_riscv |
34 | : public PerGraphGOTAndPLTStubsBuilder< |
35 | PerGraphGOTAndPLTStubsBuilder_ELF_riscv> { |
36 | public: |
37 | static constexpr size_t StubEntrySize = 16; |
38 | static const uint8_t NullGOTEntryContent[8]; |
39 | static const uint8_t RV64StubContent[StubEntrySize]; |
40 | static const uint8_t RV32StubContent[StubEntrySize]; |
41 | |
42 | using PerGraphGOTAndPLTStubsBuilder< |
43 | PerGraphGOTAndPLTStubsBuilder_ELF_riscv>::PerGraphGOTAndPLTStubsBuilder; |
44 | |
45 | bool isRV64() const { return G.getPointerSize() == 8; } |
46 | |
47 | bool isGOTEdgeToFix(Edge &E) const { return E.getKind() == R_RISCV_GOT_HI20; } |
48 | |
49 | Symbol &createGOTEntry(Symbol &Target) { |
50 | Block &GOTBlock = |
51 | G.createContentBlock(Parent&: getGOTSection(), Content: getGOTEntryBlockContent(), |
52 | Address: orc::ExecutorAddr(), Alignment: G.getPointerSize(), AlignmentOffset: 0); |
53 | GOTBlock.addEdge(K: isRV64() ? R_RISCV_64 : R_RISCV_32, Offset: 0, Target, Addend: 0); |
54 | return G.addAnonymousSymbol(Content&: GOTBlock, Offset: 0, Size: G.getPointerSize(), IsCallable: false, IsLive: false); |
55 | } |
56 | |
57 | Symbol &createPLTStub(Symbol &Target) { |
58 | Block &StubContentBlock = G.createContentBlock( |
59 | Parent&: getStubsSection(), Content: getStubBlockContent(), Address: orc::ExecutorAddr(), Alignment: 4, AlignmentOffset: 0); |
60 | auto &GOTEntrySymbol = getGOTEntry(Target); |
61 | StubContentBlock.addEdge(K: R_RISCV_CALL, Offset: 0, Target&: GOTEntrySymbol, Addend: 0); |
62 | return G.addAnonymousSymbol(Content&: StubContentBlock, Offset: 0, Size: StubEntrySize, IsCallable: true, |
63 | IsLive: false); |
64 | } |
65 | |
66 | void fixGOTEdge(Edge &E, Symbol &GOTEntry) { |
67 | // Replace the relocation pair (R_RISCV_GOT_HI20, R_RISCV_PCREL_LO12) |
68 | // with (R_RISCV_PCREL_HI20, R_RISCV_PCREL_LO12) |
69 | // Therefore, here just change the R_RISCV_GOT_HI20 to R_RISCV_PCREL_HI20 |
70 | E.setKind(R_RISCV_PCREL_HI20); |
71 | E.setTarget(GOTEntry); |
72 | } |
73 | |
74 | void fixPLTEdge(Edge &E, Symbol &PLTStubs) { |
75 | assert((E.getKind() == R_RISCV_CALL || E.getKind() == R_RISCV_CALL_PLT || |
76 | E.getKind() == CallRelaxable) && |
77 | "Not a PLT edge?" ); |
78 | E.setKind(R_RISCV_CALL); |
79 | E.setTarget(PLTStubs); |
80 | } |
81 | |
82 | bool isExternalBranchEdge(Edge &E) const { |
83 | return (E.getKind() == R_RISCV_CALL || E.getKind() == R_RISCV_CALL_PLT || |
84 | E.getKind() == CallRelaxable) && |
85 | !E.getTarget().isDefined(); |
86 | } |
87 | |
88 | private: |
89 | Section &getGOTSection() const { |
90 | if (!GOTSection) |
91 | GOTSection = &G.createSection(Name: "$__GOT" , Prot: orc::MemProt::Read); |
92 | return *GOTSection; |
93 | } |
94 | |
95 | Section &getStubsSection() const { |
96 | if (!StubsSection) |
97 | StubsSection = |
98 | &G.createSection(Name: "$__STUBS" , Prot: orc::MemProt::Read | orc::MemProt::Exec); |
99 | return *StubsSection; |
100 | } |
101 | |
102 | ArrayRef<char> getGOTEntryBlockContent() { |
103 | return {reinterpret_cast<const char *>(NullGOTEntryContent), |
104 | G.getPointerSize()}; |
105 | } |
106 | |
107 | ArrayRef<char> getStubBlockContent() { |
108 | auto StubContent = isRV64() ? RV64StubContent : RV32StubContent; |
109 | return {reinterpret_cast<const char *>(StubContent), StubEntrySize}; |
110 | } |
111 | |
112 | mutable Section *GOTSection = nullptr; |
113 | mutable Section *StubsSection = nullptr; |
114 | }; |
115 | |
116 | const uint8_t PerGraphGOTAndPLTStubsBuilder_ELF_riscv::NullGOTEntryContent[8] = |
117 | {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; |
118 | |
119 | const uint8_t |
120 | PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV64StubContent[StubEntrySize] = { |
121 | 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal |
122 | 0x03, 0x3e, 0x0e, 0x00, // ld t3, literal(t3) |
123 | 0x67, 0x00, 0x0e, 0x00, // jr t3 |
124 | 0x13, 0x00, 0x00, 0x00}; // nop |
125 | |
126 | const uint8_t |
127 | PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV32StubContent[StubEntrySize] = { |
128 | 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal |
129 | 0x03, 0x2e, 0x0e, 0x00, // lw t3, literal(t3) |
130 | 0x67, 0x00, 0x0e, 0x00, // jr t3 |
131 | 0x13, 0x00, 0x00, 0x00}; // nop |
132 | } // namespace |
133 | namespace llvm { |
134 | namespace jitlink { |
135 | |
136 | static uint32_t (uint32_t Num, unsigned Low, unsigned Size) { |
137 | return (Num & (((1ULL << Size) - 1) << Low)) >> Low; |
138 | } |
139 | |
140 | static inline bool isAlignmentCorrect(uint64_t Value, int N) { |
141 | return (Value & (N - 1)) ? false : true; |
142 | } |
143 | |
144 | // Requires 0 < N <= 64. |
145 | static inline bool isInRangeForImm(int64_t Value, int N) { |
146 | return Value == llvm::SignExtend64(X: Value, B: N); |
147 | } |
148 | |
149 | class ELFJITLinker_riscv : public JITLinker<ELFJITLinker_riscv> { |
150 | friend class JITLinker<ELFJITLinker_riscv>; |
151 | |
152 | public: |
153 | ELFJITLinker_riscv(std::unique_ptr<JITLinkContext> Ctx, |
154 | std::unique_ptr<LinkGraph> G, PassConfiguration PassConfig) |
155 | : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) { |
156 | JITLinkerBase::getPassConfig().PostAllocationPasses.push_back( |
157 | x: [this](LinkGraph &G) { return gatherRISCVPCRelHi20(G); }); |
158 | } |
159 | |
160 | private: |
161 | DenseMap<std::pair<const Block *, orc::ExecutorAddrDiff>, const Edge *> |
162 | RelHi20; |
163 | |
164 | Error gatherRISCVPCRelHi20(LinkGraph &G) { |
165 | for (Block *B : G.blocks()) |
166 | for (Edge &E : B->edges()) |
167 | if (E.getKind() == R_RISCV_PCREL_HI20) |
168 | RelHi20[{B, E.getOffset()}] = &E; |
169 | |
170 | return Error::success(); |
171 | } |
172 | |
173 | Expected<const Edge &> getRISCVPCRelHi20(const Edge &E) const { |
174 | using namespace riscv; |
175 | assert((E.getKind() == R_RISCV_PCREL_LO12_I || |
176 | E.getKind() == R_RISCV_PCREL_LO12_S) && |
177 | "Can only have high relocation for R_RISCV_PCREL_LO12_I or " |
178 | "R_RISCV_PCREL_LO12_S" ); |
179 | |
180 | const Symbol &Sym = E.getTarget(); |
181 | const Block &B = Sym.getBlock(); |
182 | orc::ExecutorAddrDiff Offset = Sym.getOffset(); |
183 | |
184 | auto It = RelHi20.find(Val: {&B, Offset}); |
185 | if (It != RelHi20.end()) |
186 | return *It->second; |
187 | |
188 | return make_error<JITLinkError>(Args: "No HI20 PCREL relocation type be found " |
189 | "for LO12 PCREL relocation type" ); |
190 | } |
191 | |
192 | Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const { |
193 | using namespace riscv; |
194 | using namespace llvm::support; |
195 | |
196 | char *BlockWorkingMem = B.getAlreadyMutableContent().data(); |
197 | char *FixupPtr = BlockWorkingMem + E.getOffset(); |
198 | orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset(); |
199 | switch (E.getKind()) { |
200 | case R_RISCV_32: { |
201 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
202 | *(little32_t *)FixupPtr = static_cast<uint32_t>(Value); |
203 | break; |
204 | } |
205 | case R_RISCV_64: { |
206 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
207 | *(little64_t *)FixupPtr = static_cast<uint64_t>(Value); |
208 | break; |
209 | } |
210 | case R_RISCV_BRANCH: { |
211 | int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress; |
212 | if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 12))) |
213 | return makeTargetOutOfRangeError(G, B, E); |
214 | if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2))) |
215 | return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E); |
216 | uint32_t Imm12 = extractBits(Num: Value, Low: 12, Size: 1) << 31; |
217 | uint32_t Imm10_5 = extractBits(Num: Value, Low: 5, Size: 6) << 25; |
218 | uint32_t Imm4_1 = extractBits(Num: Value, Low: 1, Size: 4) << 8; |
219 | uint32_t Imm11 = extractBits(Num: Value, Low: 11, Size: 1) << 7; |
220 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
221 | *(little32_t *)FixupPtr = |
222 | (RawInstr & 0x1FFF07F) | Imm12 | Imm10_5 | Imm4_1 | Imm11; |
223 | break; |
224 | } |
225 | case R_RISCV_JAL: { |
226 | int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress; |
227 | if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 20))) |
228 | return makeTargetOutOfRangeError(G, B, E); |
229 | if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2))) |
230 | return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E); |
231 | uint32_t Imm20 = extractBits(Num: Value, Low: 20, Size: 1) << 31; |
232 | uint32_t Imm10_1 = extractBits(Num: Value, Low: 1, Size: 10) << 21; |
233 | uint32_t Imm11 = extractBits(Num: Value, Low: 11, Size: 1) << 20; |
234 | uint32_t Imm19_12 = extractBits(Num: Value, Low: 12, Size: 8) << 12; |
235 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
236 | *(little32_t *)FixupPtr = |
237 | (RawInstr & 0xFFF) | Imm20 | Imm10_1 | Imm11 | Imm19_12; |
238 | break; |
239 | } |
240 | case CallRelaxable: |
241 | // Treat as R_RISCV_CALL when the relaxation pass did not run |
242 | case R_RISCV_CALL_PLT: |
243 | case R_RISCV_CALL: { |
244 | int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress; |
245 | int64_t Hi = Value + 0x800; |
246 | if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32))) |
247 | return makeTargetOutOfRangeError(G, B, E); |
248 | int32_t Lo = Value & 0xFFF; |
249 | uint32_t RawInstrAuipc = *(little32_t *)FixupPtr; |
250 | uint32_t RawInstrJalr = *(little32_t *)(FixupPtr + 4); |
251 | *(little32_t *)FixupPtr = |
252 | RawInstrAuipc | (static_cast<uint32_t>(Hi & 0xFFFFF000)); |
253 | *(little32_t *)(FixupPtr + 4) = |
254 | RawInstrJalr | (static_cast<uint32_t>(Lo) << 20); |
255 | break; |
256 | } |
257 | // The relocations R_RISCV_CALL_PLT and R_RISCV_GOT_HI20 are handled by |
258 | // PerGraphGOTAndPLTStubsBuilder_ELF_riscv and are transformed into |
259 | // R_RISCV_CALL and R_RISCV_PCREL_HI20. |
260 | case R_RISCV_PCREL_HI20: { |
261 | int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress; |
262 | int64_t Hi = Value + 0x800; |
263 | if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32))) |
264 | return makeTargetOutOfRangeError(G, B, E); |
265 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
266 | *(little32_t *)FixupPtr = |
267 | (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000)); |
268 | break; |
269 | } |
270 | case R_RISCV_PCREL_LO12_I: { |
271 | // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and |
272 | // pairs with current relocation R_RISCV_PCREL_LO12_I. So here may need a |
273 | // check. |
274 | auto RelHI20 = getRISCVPCRelHi20(E); |
275 | if (!RelHI20) |
276 | return RelHI20.takeError(); |
277 | int64_t Value = RelHI20->getTarget().getAddress() + |
278 | RelHI20->getAddend() - E.getTarget().getAddress(); |
279 | int64_t Lo = Value & 0xFFF; |
280 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
281 | *(little32_t *)FixupPtr = |
282 | (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20); |
283 | break; |
284 | } |
285 | case R_RISCV_PCREL_LO12_S: { |
286 | // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and |
287 | // pairs with current relocation R_RISCV_PCREL_LO12_S. So here may need a |
288 | // check. |
289 | auto RelHI20 = getRISCVPCRelHi20(E); |
290 | if (!RelHI20) |
291 | return RelHI20.takeError(); |
292 | int64_t Value = RelHI20->getTarget().getAddress() + |
293 | RelHI20->getAddend() - E.getTarget().getAddress(); |
294 | int64_t Lo = Value & 0xFFF; |
295 | uint32_t Imm11_5 = extractBits(Num: Lo, Low: 5, Size: 7) << 25; |
296 | uint32_t Imm4_0 = extractBits(Num: Lo, Low: 0, Size: 5) << 7; |
297 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
298 | |
299 | *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm11_5 | Imm4_0; |
300 | break; |
301 | } |
302 | case R_RISCV_HI20: { |
303 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
304 | int64_t Hi = Value + 0x800; |
305 | if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32))) |
306 | return makeTargetOutOfRangeError(G, B, E); |
307 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
308 | *(little32_t *)FixupPtr = |
309 | (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000)); |
310 | break; |
311 | } |
312 | case R_RISCV_LO12_I: { |
313 | // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs |
314 | // with current relocation R_RISCV_LO12_I. So here may need a check. |
315 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
316 | int32_t Lo = Value & 0xFFF; |
317 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
318 | *(little32_t *)FixupPtr = |
319 | (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20); |
320 | break; |
321 | } |
322 | case R_RISCV_LO12_S: { |
323 | // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs |
324 | // with current relocation R_RISCV_LO12_S. So here may need a check. |
325 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
326 | int64_t Lo = Value & 0xFFF; |
327 | uint32_t Imm11_5 = extractBits(Num: Lo, Low: 5, Size: 7) << 25; |
328 | uint32_t Imm4_0 = extractBits(Num: Lo, Low: 0, Size: 5) << 7; |
329 | uint32_t RawInstr = *(little32_t *)FixupPtr; |
330 | *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm11_5 | Imm4_0; |
331 | break; |
332 | } |
333 | case R_RISCV_ADD8: { |
334 | int64_t Value = |
335 | (E.getTarget().getAddress() + |
336 | *(reinterpret_cast<const uint8_t *>(FixupPtr)) + E.getAddend()) |
337 | .getValue(); |
338 | *FixupPtr = static_cast<uint8_t>(Value); |
339 | break; |
340 | } |
341 | case R_RISCV_ADD16: { |
342 | int64_t Value = (E.getTarget().getAddress() + |
343 | support::endian::read16le(P: FixupPtr) + E.getAddend()) |
344 | .getValue(); |
345 | *(little16_t *)FixupPtr = static_cast<uint16_t>(Value); |
346 | break; |
347 | } |
348 | case R_RISCV_ADD32: { |
349 | int64_t Value = (E.getTarget().getAddress() + |
350 | support::endian::read32le(P: FixupPtr) + E.getAddend()) |
351 | .getValue(); |
352 | *(little32_t *)FixupPtr = static_cast<uint32_t>(Value); |
353 | break; |
354 | } |
355 | case R_RISCV_ADD64: { |
356 | int64_t Value = (E.getTarget().getAddress() + |
357 | support::endian::read64le(P: FixupPtr) + E.getAddend()) |
358 | .getValue(); |
359 | *(little64_t *)FixupPtr = static_cast<uint64_t>(Value); |
360 | break; |
361 | } |
362 | case R_RISCV_SUB8: { |
363 | int64_t Value = *(reinterpret_cast<const uint8_t *>(FixupPtr)) - |
364 | E.getTarget().getAddress().getValue() - E.getAddend(); |
365 | *FixupPtr = static_cast<uint8_t>(Value); |
366 | break; |
367 | } |
368 | case R_RISCV_SUB16: { |
369 | int64_t Value = support::endian::read16le(P: FixupPtr) - |
370 | E.getTarget().getAddress().getValue() - E.getAddend(); |
371 | *(little16_t *)FixupPtr = static_cast<uint32_t>(Value); |
372 | break; |
373 | } |
374 | case R_RISCV_SUB32: { |
375 | int64_t Value = support::endian::read32le(P: FixupPtr) - |
376 | E.getTarget().getAddress().getValue() - E.getAddend(); |
377 | *(little32_t *)FixupPtr = static_cast<uint32_t>(Value); |
378 | break; |
379 | } |
380 | case R_RISCV_SUB64: { |
381 | int64_t Value = support::endian::read64le(P: FixupPtr) - |
382 | E.getTarget().getAddress().getValue() - E.getAddend(); |
383 | *(little64_t *)FixupPtr = static_cast<uint64_t>(Value); |
384 | break; |
385 | } |
386 | case R_RISCV_RVC_BRANCH: { |
387 | int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress; |
388 | if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 8))) |
389 | return makeTargetOutOfRangeError(G, B, E); |
390 | if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2))) |
391 | return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E); |
392 | uint16_t Imm8 = extractBits(Num: Value, Low: 8, Size: 1) << 12; |
393 | uint16_t Imm4_3 = extractBits(Num: Value, Low: 3, Size: 2) << 10; |
394 | uint16_t Imm7_6 = extractBits(Num: Value, Low: 6, Size: 2) << 5; |
395 | uint16_t Imm2_1 = extractBits(Num: Value, Low: 1, Size: 2) << 3; |
396 | uint16_t Imm5 = extractBits(Num: Value, Low: 5, Size: 1) << 2; |
397 | uint16_t RawInstr = *(little16_t *)FixupPtr; |
398 | *(little16_t *)FixupPtr = |
399 | (RawInstr & 0xE383) | Imm8 | Imm4_3 | Imm7_6 | Imm2_1 | Imm5; |
400 | break; |
401 | } |
402 | case R_RISCV_RVC_JUMP: { |
403 | int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress; |
404 | if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 11))) |
405 | return makeTargetOutOfRangeError(G, B, E); |
406 | if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2))) |
407 | return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E); |
408 | uint16_t Imm11 = extractBits(Num: Value, Low: 11, Size: 1) << 12; |
409 | uint16_t Imm4 = extractBits(Num: Value, Low: 4, Size: 1) << 11; |
410 | uint16_t Imm9_8 = extractBits(Num: Value, Low: 8, Size: 2) << 9; |
411 | uint16_t Imm10 = extractBits(Num: Value, Low: 10, Size: 1) << 8; |
412 | uint16_t Imm6 = extractBits(Num: Value, Low: 6, Size: 1) << 7; |
413 | uint16_t Imm7 = extractBits(Num: Value, Low: 7, Size: 1) << 6; |
414 | uint16_t Imm3_1 = extractBits(Num: Value, Low: 1, Size: 3) << 3; |
415 | uint16_t Imm5 = extractBits(Num: Value, Low: 5, Size: 1) << 2; |
416 | uint16_t RawInstr = *(little16_t *)FixupPtr; |
417 | *(little16_t *)FixupPtr = (RawInstr & 0xE003) | Imm11 | Imm4 | Imm9_8 | |
418 | Imm10 | Imm6 | Imm7 | Imm3_1 | Imm5; |
419 | break; |
420 | } |
421 | case R_RISCV_SUB6: { |
422 | int64_t Value = *(reinterpret_cast<const uint8_t *>(FixupPtr)) & 0x3f; |
423 | Value -= E.getTarget().getAddress().getValue() - E.getAddend(); |
424 | *FixupPtr = (*FixupPtr & 0xc0) | (static_cast<uint8_t>(Value) & 0x3f); |
425 | break; |
426 | } |
427 | case R_RISCV_SET6: { |
428 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
429 | uint32_t RawData = *(little32_t *)FixupPtr; |
430 | int64_t Word6 = Value & 0x3f; |
431 | *(little32_t *)FixupPtr = (RawData & 0xffffffc0) | Word6; |
432 | break; |
433 | } |
434 | case R_RISCV_SET8: { |
435 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
436 | uint32_t RawData = *(little32_t *)FixupPtr; |
437 | int64_t Word8 = Value & 0xff; |
438 | *(little32_t *)FixupPtr = (RawData & 0xffffff00) | Word8; |
439 | break; |
440 | } |
441 | case R_RISCV_SET16: { |
442 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
443 | uint32_t RawData = *(little32_t *)FixupPtr; |
444 | int64_t Word16 = Value & 0xffff; |
445 | *(little32_t *)FixupPtr = (RawData & 0xffff0000) | Word16; |
446 | break; |
447 | } |
448 | case R_RISCV_SET32: { |
449 | int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue(); |
450 | int64_t Word32 = Value & 0xffffffff; |
451 | *(little32_t *)FixupPtr = Word32; |
452 | break; |
453 | } |
454 | case R_RISCV_32_PCREL: { |
455 | int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress; |
456 | int64_t Word32 = Value & 0xffffffff; |
457 | *(little32_t *)FixupPtr = Word32; |
458 | break; |
459 | } |
460 | case AlignRelaxable: |
461 | // Ignore when the relaxation pass did not run |
462 | break; |
463 | case NegDelta32: { |
464 | int64_t Value = FixupAddress - E.getTarget().getAddress() + E.getAddend(); |
465 | if (LLVM_UNLIKELY(!isInRangeForImm(Value, 32))) |
466 | return makeTargetOutOfRangeError(G, B, E); |
467 | *(little32_t *)FixupPtr = static_cast<uint32_t>(Value); |
468 | break; |
469 | } |
470 | } |
471 | return Error::success(); |
472 | } |
473 | }; |
474 | |
475 | namespace { |
476 | |
477 | struct SymbolAnchor { |
478 | uint64_t Offset; |
479 | Symbol *Sym; |
480 | bool End; // true for the anchor of getOffset() + getSize() |
481 | }; |
482 | |
483 | struct BlockRelaxAux { |
484 | // This records symbol start and end offsets which will be adjusted according |
485 | // to the nearest RelocDeltas element. |
486 | SmallVector<SymbolAnchor, 0> Anchors; |
487 | // All edges that either 1) are R_RISCV_ALIGN or 2) have a R_RISCV_RELAX edge |
488 | // at the same offset. |
489 | SmallVector<Edge *, 0> RelaxEdges; |
490 | // For RelaxEdges[I], the actual offset is RelaxEdges[I]->getOffset() - (I ? |
491 | // RelocDeltas[I - 1] : 0). |
492 | SmallVector<uint32_t, 0> RelocDeltas; |
493 | // For RelaxEdges[I], the actual type is EdgeKinds[I]. |
494 | SmallVector<Edge::Kind, 0> EdgeKinds; |
495 | // List of rewritten instructions. Contains one raw encoded instruction per |
496 | // element in EdgeKinds that isn't Invalid or R_RISCV_ALIGN. |
497 | SmallVector<uint32_t, 0> Writes; |
498 | }; |
499 | |
500 | struct RelaxConfig { |
501 | bool IsRV32; |
502 | bool HasRVC; |
503 | }; |
504 | |
505 | struct RelaxAux { |
506 | RelaxConfig Config; |
507 | DenseMap<Block *, BlockRelaxAux> Blocks; |
508 | }; |
509 | |
510 | } // namespace |
511 | |
512 | static bool shouldRelax(const Section &S) { |
513 | return (S.getMemProt() & orc::MemProt::Exec) != orc::MemProt::None; |
514 | } |
515 | |
516 | static bool isRelaxable(const Edge &E) { |
517 | switch (E.getKind()) { |
518 | default: |
519 | return false; |
520 | case CallRelaxable: |
521 | case AlignRelaxable: |
522 | return true; |
523 | } |
524 | } |
525 | |
526 | static RelaxAux initRelaxAux(LinkGraph &G) { |
527 | RelaxAux Aux; |
528 | Aux.Config.IsRV32 = G.getTargetTriple().isRISCV32(); |
529 | const auto &Features = G.getFeatures().getFeatures(); |
530 | Aux.Config.HasRVC = llvm::is_contained(Range: Features, Element: "+c" ) || |
531 | llvm::is_contained(Range: Features, Element: "+zca" ); |
532 | |
533 | for (auto &S : G.sections()) { |
534 | if (!shouldRelax(S)) |
535 | continue; |
536 | for (auto *B : S.blocks()) { |
537 | auto BlockEmplaceResult = Aux.Blocks.try_emplace(Key: B); |
538 | assert(BlockEmplaceResult.second && "Block encountered twice" ); |
539 | auto &BlockAux = BlockEmplaceResult.first->second; |
540 | |
541 | for (auto &E : B->edges()) |
542 | if (isRelaxable(E)) |
543 | BlockAux.RelaxEdges.push_back(Elt: &E); |
544 | |
545 | if (BlockAux.RelaxEdges.empty()) { |
546 | Aux.Blocks.erase(I: BlockEmplaceResult.first); |
547 | continue; |
548 | } |
549 | |
550 | const auto NumEdges = BlockAux.RelaxEdges.size(); |
551 | BlockAux.RelocDeltas.resize(N: NumEdges, NV: 0); |
552 | BlockAux.EdgeKinds.resize_for_overwrite(N: NumEdges); |
553 | |
554 | // Store anchors (offset and offset+size) for symbols. |
555 | for (auto *Sym : S.symbols()) { |
556 | if (!Sym->isDefined() || &Sym->getBlock() != B) |
557 | continue; |
558 | |
559 | BlockAux.Anchors.push_back(Elt: {.Offset: Sym->getOffset(), .Sym: Sym, .End: false}); |
560 | BlockAux.Anchors.push_back( |
561 | Elt: {.Offset: Sym->getOffset() + Sym->getSize(), .Sym: Sym, .End: true}); |
562 | } |
563 | } |
564 | } |
565 | |
566 | // Sort anchors by offset so that we can find the closest relocation |
567 | // efficiently. For a zero size symbol, ensure that its start anchor precedes |
568 | // its end anchor. For two symbols with anchors at the same offset, their |
569 | // order does not matter. |
570 | for (auto &BlockAuxIter : Aux.Blocks) { |
571 | llvm::sort(C&: BlockAuxIter.second.Anchors, Comp: [](auto &A, auto &B) { |
572 | return std::make_pair(A.Offset, A.End) < std::make_pair(B.Offset, B.End); |
573 | }); |
574 | } |
575 | |
576 | return Aux; |
577 | } |
578 | |
579 | static void relaxAlign(orc::ExecutorAddr Loc, const Edge &E, uint32_t &Remove, |
580 | Edge::Kind &NewEdgeKind) { |
581 | // E points to the start of the padding bytes. |
582 | // E + Addend points to the instruction to be aligned by removing padding. |
583 | // Alignment is the smallest power of 2 strictly greater than Addend. |
584 | const auto Align = NextPowerOf2(A: E.getAddend()); |
585 | const auto DestLoc = alignTo(Value: Loc.getValue(), Align); |
586 | const auto SrcLoc = Loc.getValue() + E.getAddend(); |
587 | Remove = SrcLoc - DestLoc; |
588 | assert(static_cast<int32_t>(Remove) >= 0 && |
589 | "R_RISCV_ALIGN needs expanding the content" ); |
590 | NewEdgeKind = AlignRelaxable; |
591 | } |
592 | |
593 | static void relaxCall(const Block &B, BlockRelaxAux &Aux, |
594 | const RelaxConfig &Config, orc::ExecutorAddr Loc, |
595 | const Edge &E, uint32_t &Remove, |
596 | Edge::Kind &NewEdgeKind) { |
597 | const auto JALR = |
598 | support::endian::read32le(P: B.getContent().data() + E.getOffset() + 4); |
599 | const auto RD = extractBits(Num: JALR, Low: 7, Size: 5); |
600 | const auto Dest = E.getTarget().getAddress() + E.getAddend(); |
601 | const auto Displace = Dest - Loc; |
602 | |
603 | if (Config.HasRVC && isInt<12>(x: Displace) && RD == 0) { |
604 | NewEdgeKind = R_RISCV_RVC_JUMP; |
605 | Aux.Writes.push_back(Elt: 0xa001); // c.j |
606 | Remove = 6; |
607 | } else if (Config.HasRVC && Config.IsRV32 && isInt<12>(x: Displace) && RD == 1) { |
608 | NewEdgeKind = R_RISCV_RVC_JUMP; |
609 | Aux.Writes.push_back(Elt: 0x2001); // c.jal |
610 | Remove = 6; |
611 | } else if (isInt<21>(x: Displace)) { |
612 | NewEdgeKind = R_RISCV_JAL; |
613 | Aux.Writes.push_back(Elt: 0x6f | RD << 7); // jal |
614 | Remove = 4; |
615 | } else { |
616 | // Not relaxable |
617 | NewEdgeKind = R_RISCV_CALL_PLT; |
618 | Remove = 0; |
619 | } |
620 | } |
621 | |
622 | static bool relaxBlock(LinkGraph &G, Block &Block, BlockRelaxAux &Aux, |
623 | const RelaxConfig &Config) { |
624 | const auto BlockAddr = Block.getAddress(); |
625 | bool Changed = false; |
626 | ArrayRef<SymbolAnchor> SA = ArrayRef(Aux.Anchors); |
627 | uint32_t Delta = 0; |
628 | |
629 | Aux.EdgeKinds.assign(NumElts: Aux.EdgeKinds.size(), Elt: Edge::Invalid); |
630 | Aux.Writes.clear(); |
631 | |
632 | for (auto [I, E] : llvm::enumerate(First&: Aux.RelaxEdges)) { |
633 | const auto Loc = BlockAddr + E->getOffset() - Delta; |
634 | auto &Cur = Aux.RelocDeltas[I]; |
635 | uint32_t Remove = 0; |
636 | switch (E->getKind()) { |
637 | case AlignRelaxable: |
638 | relaxAlign(Loc, E: *E, Remove, NewEdgeKind&: Aux.EdgeKinds[I]); |
639 | break; |
640 | case CallRelaxable: |
641 | relaxCall(B: Block, Aux, Config, Loc, E: *E, Remove, NewEdgeKind&: Aux.EdgeKinds[I]); |
642 | break; |
643 | default: |
644 | llvm_unreachable("Unexpected relaxable edge kind" ); |
645 | } |
646 | |
647 | // For all anchors whose offsets are <= E->getOffset(), they are preceded by |
648 | // the previous relocation whose RelocDeltas value equals Delta. |
649 | // Decrease their offset and update their size. |
650 | for (; SA.size() && SA[0].Offset <= E->getOffset(); SA = SA.slice(N: 1)) { |
651 | if (SA[0].End) |
652 | SA[0].Sym->setSize(SA[0].Offset - Delta - SA[0].Sym->getOffset()); |
653 | else |
654 | SA[0].Sym->setOffset(SA[0].Offset - Delta); |
655 | } |
656 | |
657 | Delta += Remove; |
658 | if (Delta != Cur) { |
659 | Cur = Delta; |
660 | Changed = true; |
661 | } |
662 | } |
663 | |
664 | for (const SymbolAnchor &A : SA) { |
665 | if (A.End) |
666 | A.Sym->setSize(A.Offset - Delta - A.Sym->getOffset()); |
667 | else |
668 | A.Sym->setOffset(A.Offset - Delta); |
669 | } |
670 | |
671 | return Changed; |
672 | } |
673 | |
674 | static bool relaxOnce(LinkGraph &G, RelaxAux &Aux) { |
675 | bool Changed = false; |
676 | |
677 | for (auto &[B, BlockAux] : Aux.Blocks) |
678 | Changed |= relaxBlock(G, Block&: *B, Aux&: BlockAux, Config: Aux.Config); |
679 | |
680 | return Changed; |
681 | } |
682 | |
683 | static void finalizeBlockRelax(LinkGraph &G, Block &Block, BlockRelaxAux &Aux) { |
684 | auto Contents = Block.getAlreadyMutableContent(); |
685 | auto *Dest = Contents.data(); |
686 | auto NextWrite = Aux.Writes.begin(); |
687 | uint32_t Offset = 0; |
688 | uint32_t Delta = 0; |
689 | |
690 | // Update section content: remove NOPs for R_RISCV_ALIGN and rewrite |
691 | // instructions for relaxed relocations. |
692 | for (auto [I, E] : llvm::enumerate(First&: Aux.RelaxEdges)) { |
693 | uint32_t Remove = Aux.RelocDeltas[I] - Delta; |
694 | Delta = Aux.RelocDeltas[I]; |
695 | if (Remove == 0 && Aux.EdgeKinds[I] == Edge::Invalid) |
696 | continue; |
697 | |
698 | // Copy from last location to the current relocated location. |
699 | const auto Size = E->getOffset() - Offset; |
700 | std::memmove(dest: Dest, src: Contents.data() + Offset, n: Size); |
701 | Dest += Size; |
702 | |
703 | uint32_t Skip = 0; |
704 | switch (Aux.EdgeKinds[I]) { |
705 | case Edge::Invalid: |
706 | break; |
707 | case AlignRelaxable: |
708 | // For R_RISCV_ALIGN, we will place Offset in a location (among NOPs) to |
709 | // satisfy the alignment requirement. If both Remove and E->getAddend() |
710 | // are multiples of 4, it is as if we have skipped some NOPs. Otherwise we |
711 | // are in the middle of a 4-byte NOP, and we need to rewrite the NOP |
712 | // sequence. |
713 | if (Remove % 4 || E->getAddend() % 4) { |
714 | Skip = E->getAddend() - Remove; |
715 | uint32_t J = 0; |
716 | for (; J + 4 <= Skip; J += 4) |
717 | support::endian::write32le(P: Dest + J, V: 0x00000013); // nop |
718 | if (J != Skip) { |
719 | assert(J + 2 == Skip); |
720 | support::endian::write16le(P: Dest + J, V: 0x0001); // c.nop |
721 | } |
722 | } |
723 | break; |
724 | case R_RISCV_RVC_JUMP: |
725 | Skip = 2; |
726 | support::endian::write16le(P: Dest, V: *NextWrite++); |
727 | break; |
728 | case R_RISCV_JAL: |
729 | Skip = 4; |
730 | support::endian::write32le(P: Dest, V: *NextWrite++); |
731 | break; |
732 | } |
733 | |
734 | Dest += Skip; |
735 | Offset = E->getOffset() + Skip + Remove; |
736 | } |
737 | |
738 | std::memmove(dest: Dest, src: Contents.data() + Offset, n: Contents.size() - Offset); |
739 | |
740 | // Fixup edge offsets and kinds. |
741 | Delta = 0; |
742 | size_t I = 0; |
743 | for (auto &E : Block.edges()) { |
744 | E.setOffset(E.getOffset() - Delta); |
745 | |
746 | if (I < Aux.RelaxEdges.size() && Aux.RelaxEdges[I] == &E) { |
747 | if (Aux.EdgeKinds[I] != Edge::Invalid) |
748 | E.setKind(Aux.EdgeKinds[I]); |
749 | |
750 | Delta = Aux.RelocDeltas[I]; |
751 | ++I; |
752 | } |
753 | } |
754 | |
755 | // Remove AlignRelaxable edges: all other relaxable edges got modified and |
756 | // will be used later while linking. Alignment is entirely handled here so we |
757 | // don't need these edges anymore. |
758 | for (auto IE = Block.edges().begin(); IE != Block.edges().end();) { |
759 | if (IE->getKind() == AlignRelaxable) |
760 | IE = Block.removeEdge(I: IE); |
761 | else |
762 | ++IE; |
763 | } |
764 | } |
765 | |
766 | static void finalizeRelax(LinkGraph &G, RelaxAux &Aux) { |
767 | for (auto &[B, BlockAux] : Aux.Blocks) |
768 | finalizeBlockRelax(G, Block&: *B, Aux&: BlockAux); |
769 | } |
770 | |
771 | static Error relax(LinkGraph &G) { |
772 | auto Aux = initRelaxAux(G); |
773 | while (relaxOnce(G, Aux)) { |
774 | } |
775 | finalizeRelax(G, Aux); |
776 | return Error::success(); |
777 | } |
778 | |
779 | template <typename ELFT> |
780 | class ELFLinkGraphBuilder_riscv : public ELFLinkGraphBuilder<ELFT> { |
781 | private: |
782 | static Expected<riscv::EdgeKind_riscv> |
783 | getRelocationKind(const uint32_t Type) { |
784 | using namespace riscv; |
785 | switch (Type) { |
786 | case ELF::R_RISCV_32: |
787 | return EdgeKind_riscv::R_RISCV_32; |
788 | case ELF::R_RISCV_64: |
789 | return EdgeKind_riscv::R_RISCV_64; |
790 | case ELF::R_RISCV_BRANCH: |
791 | return EdgeKind_riscv::R_RISCV_BRANCH; |
792 | case ELF::R_RISCV_JAL: |
793 | return EdgeKind_riscv::R_RISCV_JAL; |
794 | case ELF::R_RISCV_CALL: |
795 | return EdgeKind_riscv::R_RISCV_CALL; |
796 | case ELF::R_RISCV_CALL_PLT: |
797 | return EdgeKind_riscv::R_RISCV_CALL_PLT; |
798 | case ELF::R_RISCV_GOT_HI20: |
799 | return EdgeKind_riscv::R_RISCV_GOT_HI20; |
800 | case ELF::R_RISCV_PCREL_HI20: |
801 | return EdgeKind_riscv::R_RISCV_PCREL_HI20; |
802 | case ELF::R_RISCV_PCREL_LO12_I: |
803 | return EdgeKind_riscv::R_RISCV_PCREL_LO12_I; |
804 | case ELF::R_RISCV_PCREL_LO12_S: |
805 | return EdgeKind_riscv::R_RISCV_PCREL_LO12_S; |
806 | case ELF::R_RISCV_HI20: |
807 | return EdgeKind_riscv::R_RISCV_HI20; |
808 | case ELF::R_RISCV_LO12_I: |
809 | return EdgeKind_riscv::R_RISCV_LO12_I; |
810 | case ELF::R_RISCV_LO12_S: |
811 | return EdgeKind_riscv::R_RISCV_LO12_S; |
812 | case ELF::R_RISCV_ADD8: |
813 | return EdgeKind_riscv::R_RISCV_ADD8; |
814 | case ELF::R_RISCV_ADD16: |
815 | return EdgeKind_riscv::R_RISCV_ADD16; |
816 | case ELF::R_RISCV_ADD32: |
817 | return EdgeKind_riscv::R_RISCV_ADD32; |
818 | case ELF::R_RISCV_ADD64: |
819 | return EdgeKind_riscv::R_RISCV_ADD64; |
820 | case ELF::R_RISCV_SUB8: |
821 | return EdgeKind_riscv::R_RISCV_SUB8; |
822 | case ELF::R_RISCV_SUB16: |
823 | return EdgeKind_riscv::R_RISCV_SUB16; |
824 | case ELF::R_RISCV_SUB32: |
825 | return EdgeKind_riscv::R_RISCV_SUB32; |
826 | case ELF::R_RISCV_SUB64: |
827 | return EdgeKind_riscv::R_RISCV_SUB64; |
828 | case ELF::R_RISCV_RVC_BRANCH: |
829 | return EdgeKind_riscv::R_RISCV_RVC_BRANCH; |
830 | case ELF::R_RISCV_RVC_JUMP: |
831 | return EdgeKind_riscv::R_RISCV_RVC_JUMP; |
832 | case ELF::R_RISCV_SUB6: |
833 | return EdgeKind_riscv::R_RISCV_SUB6; |
834 | case ELF::R_RISCV_SET6: |
835 | return EdgeKind_riscv::R_RISCV_SET6; |
836 | case ELF::R_RISCV_SET8: |
837 | return EdgeKind_riscv::R_RISCV_SET8; |
838 | case ELF::R_RISCV_SET16: |
839 | return EdgeKind_riscv::R_RISCV_SET16; |
840 | case ELF::R_RISCV_SET32: |
841 | return EdgeKind_riscv::R_RISCV_SET32; |
842 | case ELF::R_RISCV_32_PCREL: |
843 | return EdgeKind_riscv::R_RISCV_32_PCREL; |
844 | case ELF::R_RISCV_ALIGN: |
845 | return EdgeKind_riscv::AlignRelaxable; |
846 | } |
847 | |
848 | return make_error<JITLinkError>( |
849 | Args: "Unsupported riscv relocation:" + formatv(Fmt: "{0:d}: " , Vals: Type) + |
850 | object::getELFRelocationTypeName(Machine: ELF::EM_RISCV, Type)); |
851 | } |
852 | |
853 | EdgeKind_riscv getRelaxableRelocationKind(EdgeKind_riscv Kind) { |
854 | switch (Kind) { |
855 | default: |
856 | // Just ignore unsupported relaxations |
857 | return Kind; |
858 | case R_RISCV_CALL: |
859 | case R_RISCV_CALL_PLT: |
860 | return CallRelaxable; |
861 | } |
862 | } |
863 | |
864 | Error addRelocations() override { |
865 | LLVM_DEBUG(dbgs() << "Processing relocations:\n" ); |
866 | |
867 | using Base = ELFLinkGraphBuilder<ELFT>; |
868 | using Self = ELFLinkGraphBuilder_riscv<ELFT>; |
869 | for (const auto &RelSect : Base::Sections) |
870 | if (Error Err = Base::forEachRelaRelocation(RelSect, this, |
871 | &Self::addSingleRelocation)) |
872 | return Err; |
873 | |
874 | return Error::success(); |
875 | } |
876 | |
877 | Error addSingleRelocation(const typename ELFT::Rela &Rel, |
878 | const typename ELFT::Shdr &FixupSect, |
879 | Block &BlockToFix) { |
880 | using Base = ELFLinkGraphBuilder<ELFT>; |
881 | |
882 | uint32_t Type = Rel.getType(false); |
883 | int64_t Addend = Rel.r_addend; |
884 | |
885 | if (Type == ELF::R_RISCV_RELAX) { |
886 | if (BlockToFix.edges_empty()) |
887 | return make_error<StringError>( |
888 | Args: "R_RISCV_RELAX without preceding relocation" , |
889 | Args: inconvertibleErrorCode()); |
890 | |
891 | auto &PrevEdge = *std::prev(x: BlockToFix.edges().end()); |
892 | auto Kind = static_cast<EdgeKind_riscv>(PrevEdge.getKind()); |
893 | PrevEdge.setKind(getRelaxableRelocationKind(Kind)); |
894 | return Error::success(); |
895 | } |
896 | |
897 | Expected<riscv::EdgeKind_riscv> Kind = getRelocationKind(Type); |
898 | if (!Kind) |
899 | return Kind.takeError(); |
900 | |
901 | uint32_t SymbolIndex = Rel.getSymbol(false); |
902 | auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec); |
903 | if (!ObjSymbol) |
904 | return ObjSymbol.takeError(); |
905 | |
906 | Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex); |
907 | if (!GraphSymbol) |
908 | return make_error<StringError>( |
909 | formatv("Could not find symbol at given index, did you add it to " |
910 | "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}" , |
911 | SymbolIndex, (*ObjSymbol)->st_shndx, |
912 | Base::GraphSymbols.size()), |
913 | inconvertibleErrorCode()); |
914 | |
915 | auto FixupAddress = orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset; |
916 | Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress(); |
917 | Edge GE(*Kind, Offset, *GraphSymbol, Addend); |
918 | LLVM_DEBUG({ |
919 | dbgs() << " " ; |
920 | printEdge(dbgs(), BlockToFix, GE, riscv::getEdgeKindName(*Kind)); |
921 | dbgs() << "\n" ; |
922 | }); |
923 | |
924 | BlockToFix.addEdge(E: std::move(GE)); |
925 | return Error::success(); |
926 | } |
927 | |
928 | public: |
929 | ELFLinkGraphBuilder_riscv(StringRef FileName, |
930 | const object::ELFFile<ELFT> &Obj, Triple TT, |
931 | SubtargetFeatures Features) |
932 | : ELFLinkGraphBuilder<ELFT>(Obj, std::move(TT), std::move(Features), |
933 | FileName, riscv::getEdgeKindName) {} |
934 | }; |
935 | |
936 | Expected<std::unique_ptr<LinkGraph>> |
937 | createLinkGraphFromELFObject_riscv(MemoryBufferRef ObjectBuffer) { |
938 | LLVM_DEBUG({ |
939 | dbgs() << "Building jitlink graph for new input " |
940 | << ObjectBuffer.getBufferIdentifier() << "...\n" ; |
941 | }); |
942 | |
943 | auto ELFObj = object::ObjectFile::createELFObjectFile(Object: ObjectBuffer); |
944 | if (!ELFObj) |
945 | return ELFObj.takeError(); |
946 | |
947 | auto Features = (*ELFObj)->getFeatures(); |
948 | if (!Features) |
949 | return Features.takeError(); |
950 | |
951 | if ((*ELFObj)->getArch() == Triple::riscv64) { |
952 | auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(Val&: **ELFObj); |
953 | return ELFLinkGraphBuilder_riscv<object::ELF64LE>( |
954 | (*ELFObj)->getFileName(), ELFObjFile.getELFFile(), |
955 | (*ELFObj)->makeTriple(), std::move(*Features)) |
956 | .buildGraph(); |
957 | } else { |
958 | assert((*ELFObj)->getArch() == Triple::riscv32 && |
959 | "Invalid triple for RISCV ELF object file" ); |
960 | auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF32LE>>(Val&: **ELFObj); |
961 | return ELFLinkGraphBuilder_riscv<object::ELF32LE>( |
962 | (*ELFObj)->getFileName(), ELFObjFile.getELFFile(), |
963 | (*ELFObj)->makeTriple(), std::move(*Features)) |
964 | .buildGraph(); |
965 | } |
966 | } |
967 | |
968 | void link_ELF_riscv(std::unique_ptr<LinkGraph> G, |
969 | std::unique_ptr<JITLinkContext> Ctx) { |
970 | PassConfiguration Config; |
971 | const Triple &TT = G->getTargetTriple(); |
972 | if (Ctx->shouldAddDefaultTargetPasses(TT)) { |
973 | |
974 | Config.PrePrunePasses.push_back(x: DWARFRecordSectionSplitter(".eh_frame" )); |
975 | Config.PrePrunePasses.push_back(x: EHFrameEdgeFixer( |
976 | ".eh_frame" , G->getPointerSize(), Edge::Invalid, Edge::Invalid, |
977 | Edge::Invalid, Edge::Invalid, NegDelta32)); |
978 | Config.PrePrunePasses.push_back(x: EHFrameNullTerminator(".eh_frame" )); |
979 | |
980 | if (auto MarkLive = Ctx->getMarkLivePass(TT)) |
981 | Config.PrePrunePasses.push_back(x: std::move(MarkLive)); |
982 | else |
983 | Config.PrePrunePasses.push_back(x: markAllSymbolsLive); |
984 | Config.PostPrunePasses.push_back( |
985 | x: PerGraphGOTAndPLTStubsBuilder_ELF_riscv::asPass); |
986 | Config.PostAllocationPasses.push_back(x: relax); |
987 | } |
988 | if (auto Err = Ctx->modifyPassConfig(G&: *G, Config)) |
989 | return Ctx->notifyFailed(Err: std::move(Err)); |
990 | |
991 | ELFJITLinker_riscv::link(Args: std::move(Ctx), Args: std::move(G), Args: std::move(Config)); |
992 | } |
993 | |
994 | LinkGraphPassFunction createRelaxationPass_ELF_riscv() { return relax; } |
995 | |
996 | } // namespace jitlink |
997 | } // namespace llvm |
998 | |