1//===------- ELF_riscv.cpp -JIT linker implementation for ELF/riscv -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// ELF/riscv jit-link implementation.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/ExecutionEngine/JITLink/ELF_riscv.h"
14#include "EHFrameSupportImpl.h"
15#include "ELFLinkGraphBuilder.h"
16#include "JITLinkGeneric.h"
17#include "PerGraphGOTAndPLTStubsBuilder.h"
18#include "llvm/BinaryFormat/ELF.h"
19#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
20#include "llvm/ExecutionEngine/JITLink/JITLink.h"
21#include "llvm/ExecutionEngine/JITLink/riscv.h"
22#include "llvm/Object/ELF.h"
23#include "llvm/Object/ELFObjectFile.h"
24#include "llvm/Support/Endian.h"
25
26#define DEBUG_TYPE "jitlink"
27using namespace llvm;
28using namespace llvm::jitlink;
29using namespace llvm::jitlink::riscv;
30
31namespace {
32
33class PerGraphGOTAndPLTStubsBuilder_ELF_riscv
34 : public PerGraphGOTAndPLTStubsBuilder<
35 PerGraphGOTAndPLTStubsBuilder_ELF_riscv> {
36public:
37 static constexpr size_t StubEntrySize = 16;
38 static const uint8_t NullGOTEntryContent[8];
39 static const uint8_t RV64StubContent[StubEntrySize];
40 static const uint8_t RV32StubContent[StubEntrySize];
41
42 using PerGraphGOTAndPLTStubsBuilder<
43 PerGraphGOTAndPLTStubsBuilder_ELF_riscv>::PerGraphGOTAndPLTStubsBuilder;
44
45 bool isRV64() const { return G.getPointerSize() == 8; }
46
47 bool isGOTEdgeToFix(Edge &E) const { return E.getKind() == R_RISCV_GOT_HI20; }
48
49 Symbol &createGOTEntry(Symbol &Target) {
50 Block &GOTBlock =
51 G.createContentBlock(Parent&: getGOTSection(), Content: getGOTEntryBlockContent(),
52 Address: orc::ExecutorAddr(), Alignment: G.getPointerSize(), AlignmentOffset: 0);
53 GOTBlock.addEdge(K: isRV64() ? R_RISCV_64 : R_RISCV_32, Offset: 0, Target, Addend: 0);
54 return G.addAnonymousSymbol(Content&: GOTBlock, Offset: 0, Size: G.getPointerSize(), IsCallable: false, IsLive: false);
55 }
56
57 Symbol &createPLTStub(Symbol &Target) {
58 Block &StubContentBlock = G.createContentBlock(
59 Parent&: getStubsSection(), Content: getStubBlockContent(), Address: orc::ExecutorAddr(), Alignment: 4, AlignmentOffset: 0);
60 auto &GOTEntrySymbol = getGOTEntry(Target);
61 StubContentBlock.addEdge(K: R_RISCV_CALL, Offset: 0, Target&: GOTEntrySymbol, Addend: 0);
62 return G.addAnonymousSymbol(Content&: StubContentBlock, Offset: 0, Size: StubEntrySize, IsCallable: true,
63 IsLive: false);
64 }
65
66 void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
67 // Replace the relocation pair (R_RISCV_GOT_HI20, R_RISCV_PCREL_LO12)
68 // with (R_RISCV_PCREL_HI20, R_RISCV_PCREL_LO12)
69 // Therefore, here just change the R_RISCV_GOT_HI20 to R_RISCV_PCREL_HI20
70 E.setKind(R_RISCV_PCREL_HI20);
71 E.setTarget(GOTEntry);
72 }
73
74 void fixPLTEdge(Edge &E, Symbol &PLTStubs) {
75 assert((E.getKind() == R_RISCV_CALL || E.getKind() == R_RISCV_CALL_PLT ||
76 E.getKind() == CallRelaxable) &&
77 "Not a PLT edge?");
78 E.setKind(R_RISCV_CALL);
79 E.setTarget(PLTStubs);
80 }
81
82 bool isExternalBranchEdge(Edge &E) const {
83 return (E.getKind() == R_RISCV_CALL || E.getKind() == R_RISCV_CALL_PLT ||
84 E.getKind() == CallRelaxable) &&
85 !E.getTarget().isDefined();
86 }
87
88private:
89 Section &getGOTSection() const {
90 if (!GOTSection)
91 GOTSection = &G.createSection(Name: "$__GOT", Prot: orc::MemProt::Read);
92 return *GOTSection;
93 }
94
95 Section &getStubsSection() const {
96 if (!StubsSection)
97 StubsSection =
98 &G.createSection(Name: "$__STUBS", Prot: orc::MemProt::Read | orc::MemProt::Exec);
99 return *StubsSection;
100 }
101
102 ArrayRef<char> getGOTEntryBlockContent() {
103 return {reinterpret_cast<const char *>(NullGOTEntryContent),
104 G.getPointerSize()};
105 }
106
107 ArrayRef<char> getStubBlockContent() {
108 auto StubContent = isRV64() ? RV64StubContent : RV32StubContent;
109 return {reinterpret_cast<const char *>(StubContent), StubEntrySize};
110 }
111
112 mutable Section *GOTSection = nullptr;
113 mutable Section *StubsSection = nullptr;
114};
115
116const uint8_t PerGraphGOTAndPLTStubsBuilder_ELF_riscv::NullGOTEntryContent[8] =
117 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
118
119const uint8_t
120 PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV64StubContent[StubEntrySize] = {
121 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal
122 0x03, 0x3e, 0x0e, 0x00, // ld t3, literal(t3)
123 0x67, 0x00, 0x0e, 0x00, // jr t3
124 0x13, 0x00, 0x00, 0x00}; // nop
125
126const uint8_t
127 PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV32StubContent[StubEntrySize] = {
128 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal
129 0x03, 0x2e, 0x0e, 0x00, // lw t3, literal(t3)
130 0x67, 0x00, 0x0e, 0x00, // jr t3
131 0x13, 0x00, 0x00, 0x00}; // nop
132} // namespace
133namespace llvm {
134namespace jitlink {
135
136static uint32_t extractBits(uint32_t Num, unsigned Low, unsigned Size) {
137 return (Num & (((1ULL << Size) - 1) << Low)) >> Low;
138}
139
140static inline bool isAlignmentCorrect(uint64_t Value, int N) {
141 return (Value & (N - 1)) ? false : true;
142}
143
144// Requires 0 < N <= 64.
145static inline bool isInRangeForImm(int64_t Value, int N) {
146 return Value == llvm::SignExtend64(X: Value, B: N);
147}
148
149class ELFJITLinker_riscv : public JITLinker<ELFJITLinker_riscv> {
150 friend class JITLinker<ELFJITLinker_riscv>;
151
152public:
153 ELFJITLinker_riscv(std::unique_ptr<JITLinkContext> Ctx,
154 std::unique_ptr<LinkGraph> G, PassConfiguration PassConfig)
155 : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {
156 JITLinkerBase::getPassConfig().PostAllocationPasses.push_back(
157 x: [this](LinkGraph &G) { return gatherRISCVPairs(G); });
158 }
159
160private:
161 DenseMap<std::pair<const Block *, orc::ExecutorAddrDiff>, const Edge *>
162 RelHi20;
163 DenseMap<std::pair<const Block *, orc::ExecutorAddrDiff>, const Edge *>
164 SetULEB128;
165
166 Error gatherRISCVPairs(LinkGraph &G) {
167 for (Block *B : G.blocks())
168 for (Edge &E : B->edges())
169 if (E.getKind() == R_RISCV_PCREL_HI20)
170 RelHi20[{B, E.getOffset()}] = &E;
171 else if (E.getKind() == R_RISCV_SET_ULEB128)
172 SetULEB128[{B, E.getOffset()}] = &E;
173
174 return Error::success();
175 }
176
177 Expected<const Edge &> getRISCVPCRelHi20(const Edge &E) const {
178 using namespace riscv;
179 assert((E.getKind() == R_RISCV_PCREL_LO12_I ||
180 E.getKind() == R_RISCV_PCREL_LO12_S) &&
181 "Can only have high relocation for R_RISCV_PCREL_LO12_I or "
182 "R_RISCV_PCREL_LO12_S");
183
184 const Symbol &Sym = E.getTarget();
185 const Block &B = Sym.getBlock();
186 orc::ExecutorAddrDiff Offset = Sym.getOffset();
187
188 auto It = RelHi20.find(Val: {&B, Offset});
189 if (It != RelHi20.end())
190 return *It->second;
191
192 return make_error<JITLinkError>(Args: "No HI20 PCREL relocation type be found "
193 "for LO12 PCREL relocation type");
194 }
195
196 Expected<const Edge &> getRISCVSetULEB128(const Block &B,
197 const Edge &E) const {
198 using namespace riscv;
199 assert(E.getKind() == R_RISCV_SUB_ULEB128 &&
200 "Can only have pair relocation for R_RISCV_SUB_ULEB128");
201
202 auto It = SetULEB128.find(Val: {&B, E.getOffset()});
203 if (It != SetULEB128.end())
204 return *It->second;
205
206 return make_error<JITLinkError>(
207 Args: "No RISCV_SET_ULEB128 relocation type be found");
208 }
209
210 Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
211 using namespace riscv;
212 using namespace llvm::support;
213
214 char *BlockWorkingMem = B.getAlreadyMutableContent().data();
215 char *FixupPtr = BlockWorkingMem + E.getOffset();
216 orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
217 switch (E.getKind()) {
218 case R_RISCV_32: {
219 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
220 *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
221 break;
222 }
223 case R_RISCV_64: {
224 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
225 *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
226 break;
227 }
228 case R_RISCV_BRANCH: {
229 int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
230 if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 12)))
231 return makeTargetOutOfRangeError(G, B, E);
232 if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
233 return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E);
234 uint32_t Imm12 = extractBits(Num: Value, Low: 12, Size: 1) << 31;
235 uint32_t Imm10_5 = extractBits(Num: Value, Low: 5, Size: 6) << 25;
236 uint32_t Imm4_1 = extractBits(Num: Value, Low: 1, Size: 4) << 8;
237 uint32_t Imm11 = extractBits(Num: Value, Low: 11, Size: 1) << 7;
238 uint32_t RawInstr = *(little32_t *)FixupPtr;
239 *(little32_t *)FixupPtr =
240 (RawInstr & 0x1FFF07F) | Imm12 | Imm10_5 | Imm4_1 | Imm11;
241 break;
242 }
243 case R_RISCV_JAL: {
244 int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
245 if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 20)))
246 return makeTargetOutOfRangeError(G, B, E);
247 if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
248 return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E);
249 uint32_t Imm20 = extractBits(Num: Value, Low: 20, Size: 1) << 31;
250 uint32_t Imm10_1 = extractBits(Num: Value, Low: 1, Size: 10) << 21;
251 uint32_t Imm11 = extractBits(Num: Value, Low: 11, Size: 1) << 20;
252 uint32_t Imm19_12 = extractBits(Num: Value, Low: 12, Size: 8) << 12;
253 uint32_t RawInstr = *(little32_t *)FixupPtr;
254 *(little32_t *)FixupPtr =
255 (RawInstr & 0xFFF) | Imm20 | Imm10_1 | Imm11 | Imm19_12;
256 break;
257 }
258 case CallRelaxable:
259 // Treat as R_RISCV_CALL when the relaxation pass did not run
260 case R_RISCV_CALL_PLT:
261 case R_RISCV_CALL: {
262 int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
263 int64_t Hi = Value + 0x800;
264 if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32)))
265 return makeTargetOutOfRangeError(G, B, E);
266 int32_t Lo = Value & 0xFFF;
267 uint32_t RawInstrAuipc = *(little32_t *)FixupPtr;
268 uint32_t RawInstrJalr = *(little32_t *)(FixupPtr + 4);
269 *(little32_t *)FixupPtr =
270 RawInstrAuipc | (static_cast<uint32_t>(Hi & 0xFFFFF000));
271 *(little32_t *)(FixupPtr + 4) =
272 RawInstrJalr | (static_cast<uint32_t>(Lo) << 20);
273 break;
274 }
275 // The relocations R_RISCV_CALL_PLT and R_RISCV_GOT_HI20 are handled by
276 // PerGraphGOTAndPLTStubsBuilder_ELF_riscv and are transformed into
277 // R_RISCV_CALL and R_RISCV_PCREL_HI20.
278 case R_RISCV_PCREL_HI20: {
279 int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
280 int64_t Hi = Value + 0x800;
281 if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32)))
282 return makeTargetOutOfRangeError(G, B, E);
283 uint32_t RawInstr = *(little32_t *)FixupPtr;
284 *(little32_t *)FixupPtr =
285 (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
286 break;
287 }
288 case R_RISCV_PCREL_LO12_I: {
289 // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
290 // pairs with current relocation R_RISCV_PCREL_LO12_I. So here may need a
291 // check.
292 auto RelHI20 = getRISCVPCRelHi20(E);
293 if (!RelHI20)
294 return RelHI20.takeError();
295 int64_t Value = RelHI20->getTarget().getAddress() +
296 RelHI20->getAddend() - E.getTarget().getAddress();
297 int64_t Lo = Value & 0xFFF;
298 uint32_t RawInstr = *(little32_t *)FixupPtr;
299 *(little32_t *)FixupPtr =
300 (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20);
301 break;
302 }
303 case R_RISCV_PCREL_LO12_S: {
304 // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
305 // pairs with current relocation R_RISCV_PCREL_LO12_S. So here may need a
306 // check.
307 auto RelHI20 = getRISCVPCRelHi20(E);
308 if (!RelHI20)
309 return RelHI20.takeError();
310 int64_t Value = RelHI20->getTarget().getAddress() +
311 RelHI20->getAddend() - E.getTarget().getAddress();
312 int64_t Lo = Value & 0xFFF;
313 uint32_t Imm11_5 = extractBits(Num: Lo, Low: 5, Size: 7) << 25;
314 uint32_t Imm4_0 = extractBits(Num: Lo, Low: 0, Size: 5) << 7;
315 uint32_t RawInstr = *(little32_t *)FixupPtr;
316
317 *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm11_5 | Imm4_0;
318 break;
319 }
320 case R_RISCV_HI20: {
321 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
322 int64_t Hi = Value + 0x800;
323 if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32)))
324 return makeTargetOutOfRangeError(G, B, E);
325 uint32_t RawInstr = *(little32_t *)FixupPtr;
326 *(little32_t *)FixupPtr =
327 (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
328 break;
329 }
330 case R_RISCV_LO12_I: {
331 // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs
332 // with current relocation R_RISCV_LO12_I. So here may need a check.
333 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
334 int32_t Lo = Value & 0xFFF;
335 uint32_t RawInstr = *(little32_t *)FixupPtr;
336 *(little32_t *)FixupPtr =
337 (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20);
338 break;
339 }
340 case R_RISCV_LO12_S: {
341 // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs
342 // with current relocation R_RISCV_LO12_S. So here may need a check.
343 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
344 int64_t Lo = Value & 0xFFF;
345 uint32_t Imm11_5 = extractBits(Num: Lo, Low: 5, Size: 7) << 25;
346 uint32_t Imm4_0 = extractBits(Num: Lo, Low: 0, Size: 5) << 7;
347 uint32_t RawInstr = *(little32_t *)FixupPtr;
348 *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm11_5 | Imm4_0;
349 break;
350 }
351 case R_RISCV_ADD8: {
352 int64_t Value =
353 (E.getTarget().getAddress() +
354 *(reinterpret_cast<const uint8_t *>(FixupPtr)) + E.getAddend())
355 .getValue();
356 *FixupPtr = static_cast<uint8_t>(Value);
357 break;
358 }
359 case R_RISCV_ADD16: {
360 int64_t Value = (E.getTarget().getAddress() +
361 support::endian::read16le(P: FixupPtr) + E.getAddend())
362 .getValue();
363 *(little16_t *)FixupPtr = static_cast<uint16_t>(Value);
364 break;
365 }
366 case R_RISCV_ADD32: {
367 int64_t Value = (E.getTarget().getAddress() +
368 support::endian::read32le(P: FixupPtr) + E.getAddend())
369 .getValue();
370 *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
371 break;
372 }
373 case R_RISCV_ADD64: {
374 int64_t Value = (E.getTarget().getAddress() +
375 support::endian::read64le(P: FixupPtr) + E.getAddend())
376 .getValue();
377 *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
378 break;
379 }
380 case R_RISCV_SUB8: {
381 int64_t Value = *(reinterpret_cast<const uint8_t *>(FixupPtr)) -
382 E.getTarget().getAddress().getValue() - E.getAddend();
383 *FixupPtr = static_cast<uint8_t>(Value);
384 break;
385 }
386 case R_RISCV_SUB16: {
387 int64_t Value = support::endian::read16le(P: FixupPtr) -
388 E.getTarget().getAddress().getValue() - E.getAddend();
389 *(little16_t *)FixupPtr = static_cast<uint32_t>(Value);
390 break;
391 }
392 case R_RISCV_SUB32: {
393 int64_t Value = support::endian::read32le(P: FixupPtr) -
394 E.getTarget().getAddress().getValue() - E.getAddend();
395 *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
396 break;
397 }
398 case R_RISCV_SUB64: {
399 int64_t Value = support::endian::read64le(P: FixupPtr) -
400 E.getTarget().getAddress().getValue() - E.getAddend();
401 *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
402 break;
403 }
404 case R_RISCV_RVC_BRANCH: {
405 int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
406 if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 8)))
407 return makeTargetOutOfRangeError(G, B, E);
408 if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
409 return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E);
410 uint16_t Imm8 = extractBits(Num: Value, Low: 8, Size: 1) << 12;
411 uint16_t Imm4_3 = extractBits(Num: Value, Low: 3, Size: 2) << 10;
412 uint16_t Imm7_6 = extractBits(Num: Value, Low: 6, Size: 2) << 5;
413 uint16_t Imm2_1 = extractBits(Num: Value, Low: 1, Size: 2) << 3;
414 uint16_t Imm5 = extractBits(Num: Value, Low: 5, Size: 1) << 2;
415 uint16_t RawInstr = *(little16_t *)FixupPtr;
416 *(little16_t *)FixupPtr =
417 (RawInstr & 0xE383) | Imm8 | Imm4_3 | Imm7_6 | Imm2_1 | Imm5;
418 break;
419 }
420 case R_RISCV_RVC_JUMP: {
421 int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
422 if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 11)))
423 return makeTargetOutOfRangeError(G, B, E);
424 if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
425 return makeAlignmentError(Loc: FixupAddress, Value, N: 2, E);
426 uint16_t Imm11 = extractBits(Num: Value, Low: 11, Size: 1) << 12;
427 uint16_t Imm4 = extractBits(Num: Value, Low: 4, Size: 1) << 11;
428 uint16_t Imm9_8 = extractBits(Num: Value, Low: 8, Size: 2) << 9;
429 uint16_t Imm10 = extractBits(Num: Value, Low: 10, Size: 1) << 8;
430 uint16_t Imm6 = extractBits(Num: Value, Low: 6, Size: 1) << 7;
431 uint16_t Imm7 = extractBits(Num: Value, Low: 7, Size: 1) << 6;
432 uint16_t Imm3_1 = extractBits(Num: Value, Low: 1, Size: 3) << 3;
433 uint16_t Imm5 = extractBits(Num: Value, Low: 5, Size: 1) << 2;
434 uint16_t RawInstr = *(little16_t *)FixupPtr;
435 *(little16_t *)FixupPtr = (RawInstr & 0xE003) | Imm11 | Imm4 | Imm9_8 |
436 Imm10 | Imm6 | Imm7 | Imm3_1 | Imm5;
437 break;
438 }
439 case R_RISCV_SUB6: {
440 int64_t Value = *(reinterpret_cast<const uint8_t *>(FixupPtr)) & 0x3f;
441 Value -= E.getTarget().getAddress().getValue() - E.getAddend();
442 *FixupPtr = (*FixupPtr & 0xc0) | (static_cast<uint8_t>(Value) & 0x3f);
443 break;
444 }
445 case R_RISCV_SET6: {
446 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
447 uint32_t RawData = *(little32_t *)FixupPtr;
448 int64_t Word6 = Value & 0x3f;
449 *(little32_t *)FixupPtr = (RawData & 0xffffffc0) | Word6;
450 break;
451 }
452 case R_RISCV_SET8: {
453 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
454 uint32_t RawData = *(little32_t *)FixupPtr;
455 int64_t Word8 = Value & 0xff;
456 *(little32_t *)FixupPtr = (RawData & 0xffffff00) | Word8;
457 break;
458 }
459 case R_RISCV_SET16: {
460 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
461 uint32_t RawData = *(little32_t *)FixupPtr;
462 int64_t Word16 = Value & 0xffff;
463 *(little32_t *)FixupPtr = (RawData & 0xffff0000) | Word16;
464 break;
465 }
466 case R_RISCV_SET32: {
467 int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
468 int64_t Word32 = Value & 0xffffffff;
469 *(little32_t *)FixupPtr = Word32;
470 break;
471 }
472 case R_RISCV_32_PCREL: {
473 int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
474 int64_t Word32 = Value & 0xffffffff;
475 *(little32_t *)FixupPtr = Word32;
476 break;
477 }
478 case AlignRelaxable:
479 // Ignore when the relaxation pass did not run
480 break;
481 case NegDelta32: {
482 int64_t Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
483 if (LLVM_UNLIKELY(!isInRangeForImm(Value, 32)))
484 return makeTargetOutOfRangeError(G, B, E);
485 *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
486 break;
487 }
488 case R_RISCV_SET_ULEB128:
489 break;
490 case R_RISCV_SUB_ULEB128: {
491 auto SetULEB128 = getRISCVSetULEB128(B, E);
492 if (!SetULEB128)
493 return SetULEB128.takeError();
494 uint64_t Value = SetULEB128->getTarget().getAddress() +
495 SetULEB128->getAddend() - E.getTarget().getAddress() -
496 E.getAddend();
497 if (overwriteULEB128(bufLoc: reinterpret_cast<uint8_t *>(FixupPtr), val: Value) >=
498 0x80)
499 return make_error<StringError>(Args: "ULEB128 value exceeds available space",
500 Args: inconvertibleErrorCode());
501 break;
502 }
503 }
504 return Error::success();
505 }
506};
507
508namespace {
509
510struct SymbolAnchor {
511 uint64_t Offset;
512 Symbol *Sym;
513 bool End; // true for the anchor of getOffset() + getSize()
514};
515
516struct BlockRelaxAux {
517 // This records symbol start and end offsets which will be adjusted according
518 // to the nearest RelocDeltas element.
519 SmallVector<SymbolAnchor, 0> Anchors;
520 // All edges that either 1) are R_RISCV_ALIGN or 2) have a R_RISCV_RELAX edge
521 // at the same offset.
522 SmallVector<Edge *, 0> RelaxEdges;
523 // For RelaxEdges[I], the actual offset is RelaxEdges[I]->getOffset() - (I ?
524 // RelocDeltas[I - 1] : 0).
525 SmallVector<uint32_t, 0> RelocDeltas;
526 // For RelaxEdges[I], the actual type is EdgeKinds[I].
527 SmallVector<Edge::Kind, 0> EdgeKinds;
528 // List of rewritten instructions. Contains one raw encoded instruction per
529 // element in EdgeKinds that isn't Invalid or R_RISCV_ALIGN.
530 SmallVector<uint32_t, 0> Writes;
531};
532
533struct RelaxConfig {
534 bool IsRV32;
535 bool HasRVC;
536};
537
538struct RelaxAux {
539 RelaxConfig Config;
540 DenseMap<Block *, BlockRelaxAux> Blocks;
541};
542
543} // namespace
544
545static bool shouldRelax(const Section &S) {
546 return (S.getMemProt() & orc::MemProt::Exec) != orc::MemProt::None;
547}
548
549static bool isRelaxable(const Edge &E) {
550 switch (E.getKind()) {
551 default:
552 return false;
553 case CallRelaxable:
554 case AlignRelaxable:
555 return true;
556 }
557}
558
559static RelaxAux initRelaxAux(LinkGraph &G) {
560 RelaxAux Aux;
561 Aux.Config.IsRV32 = G.getTargetTriple().isRISCV32();
562 const auto &Features = G.getFeatures().getFeatures();
563 Aux.Config.HasRVC = llvm::is_contained(Range: Features, Element: "+c") ||
564 llvm::is_contained(Range: Features, Element: "+zca");
565
566 for (auto &S : G.sections()) {
567 if (!shouldRelax(S))
568 continue;
569 for (auto *B : S.blocks()) {
570 auto BlockEmplaceResult = Aux.Blocks.try_emplace(Key: B);
571 assert(BlockEmplaceResult.second && "Block encountered twice");
572 auto &BlockAux = BlockEmplaceResult.first->second;
573
574 for (auto &E : B->edges())
575 if (isRelaxable(E))
576 BlockAux.RelaxEdges.push_back(Elt: &E);
577
578 if (BlockAux.RelaxEdges.empty()) {
579 Aux.Blocks.erase(I: BlockEmplaceResult.first);
580 continue;
581 }
582
583 const auto NumEdges = BlockAux.RelaxEdges.size();
584 BlockAux.RelocDeltas.resize(N: NumEdges, NV: 0);
585 BlockAux.EdgeKinds.resize_for_overwrite(N: NumEdges);
586
587 // Store anchors (offset and offset+size) for symbols.
588 for (auto *Sym : S.symbols()) {
589 if (!Sym->isDefined() || &Sym->getBlock() != B)
590 continue;
591
592 BlockAux.Anchors.push_back(Elt: {.Offset: Sym->getOffset(), .Sym: Sym, .End: false});
593 BlockAux.Anchors.push_back(
594 Elt: {.Offset: Sym->getOffset() + Sym->getSize(), .Sym: Sym, .End: true});
595 }
596 }
597 }
598
599 // Sort anchors by offset so that we can find the closest relocation
600 // efficiently. For a zero size symbol, ensure that its start anchor precedes
601 // its end anchor. For two symbols with anchors at the same offset, their
602 // order does not matter.
603 for (auto &BlockAuxIter : Aux.Blocks) {
604 llvm::sort(C&: BlockAuxIter.second.Anchors, Comp: [](auto &A, auto &B) {
605 return std::make_pair(A.Offset, A.End) < std::make_pair(B.Offset, B.End);
606 });
607 }
608
609 return Aux;
610}
611
612static void relaxAlign(orc::ExecutorAddr Loc, const Edge &E, uint32_t &Remove,
613 Edge::Kind &NewEdgeKind) {
614 // E points to the start of the padding bytes.
615 // E + Addend points to the instruction to be aligned by removing padding.
616 // Alignment is the smallest power of 2 strictly greater than Addend.
617 const auto Align = NextPowerOf2(A: E.getAddend());
618 const auto DestLoc = alignTo(Value: Loc.getValue(), Align);
619 const auto SrcLoc = Loc.getValue() + E.getAddend();
620 Remove = SrcLoc - DestLoc;
621 assert(static_cast<int32_t>(Remove) >= 0 &&
622 "R_RISCV_ALIGN needs expanding the content");
623 NewEdgeKind = AlignRelaxable;
624}
625
626static void relaxCall(const Block &B, BlockRelaxAux &Aux,
627 const RelaxConfig &Config, orc::ExecutorAddr Loc,
628 const Edge &E, uint32_t &Remove,
629 Edge::Kind &NewEdgeKind) {
630 const auto JALR =
631 support::endian::read32le(P: B.getContent().data() + E.getOffset() + 4);
632 const auto RD = extractBits(Num: JALR, Low: 7, Size: 5);
633 const auto Dest = E.getTarget().getAddress() + E.getAddend();
634 const auto Displace = Dest - Loc;
635
636 if (Config.HasRVC && isInt<12>(x: Displace) && RD == 0) {
637 NewEdgeKind = R_RISCV_RVC_JUMP;
638 Aux.Writes.push_back(Elt: 0xa001); // c.j
639 Remove = 6;
640 } else if (Config.HasRVC && Config.IsRV32 && isInt<12>(x: Displace) && RD == 1) {
641 NewEdgeKind = R_RISCV_RVC_JUMP;
642 Aux.Writes.push_back(Elt: 0x2001); // c.jal
643 Remove = 6;
644 } else if (isInt<21>(x: Displace)) {
645 NewEdgeKind = R_RISCV_JAL;
646 Aux.Writes.push_back(Elt: 0x6f | RD << 7); // jal
647 Remove = 4;
648 } else {
649 // Not relaxable
650 NewEdgeKind = R_RISCV_CALL_PLT;
651 Remove = 0;
652 }
653}
654
655static bool relaxBlock(LinkGraph &G, Block &Block, BlockRelaxAux &Aux,
656 const RelaxConfig &Config) {
657 const auto BlockAddr = Block.getAddress();
658 bool Changed = false;
659 ArrayRef<SymbolAnchor> SA = ArrayRef(Aux.Anchors);
660 uint32_t Delta = 0;
661
662 Aux.EdgeKinds.assign(NumElts: Aux.EdgeKinds.size(), Elt: Edge::Invalid);
663 Aux.Writes.clear();
664
665 for (auto [I, E] : llvm::enumerate(First&: Aux.RelaxEdges)) {
666 const auto Loc = BlockAddr + E->getOffset() - Delta;
667 auto &Cur = Aux.RelocDeltas[I];
668 uint32_t Remove = 0;
669 switch (E->getKind()) {
670 case AlignRelaxable:
671 relaxAlign(Loc, E: *E, Remove, NewEdgeKind&: Aux.EdgeKinds[I]);
672 break;
673 case CallRelaxable:
674 relaxCall(B: Block, Aux, Config, Loc, E: *E, Remove, NewEdgeKind&: Aux.EdgeKinds[I]);
675 break;
676 default:
677 llvm_unreachable("Unexpected relaxable edge kind");
678 }
679
680 // For all anchors whose offsets are <= E->getOffset(), they are preceded by
681 // the previous relocation whose RelocDeltas value equals Delta.
682 // Decrease their offset and update their size.
683 for (; SA.size() && SA[0].Offset <= E->getOffset(); SA = SA.slice(N: 1)) {
684 if (SA[0].End)
685 SA[0].Sym->setSize(SA[0].Offset - Delta - SA[0].Sym->getOffset());
686 else
687 SA[0].Sym->setOffset(SA[0].Offset - Delta);
688 }
689
690 Delta += Remove;
691 if (Delta != Cur) {
692 Cur = Delta;
693 Changed = true;
694 }
695 }
696
697 for (const SymbolAnchor &A : SA) {
698 if (A.End)
699 A.Sym->setSize(A.Offset - Delta - A.Sym->getOffset());
700 else
701 A.Sym->setOffset(A.Offset - Delta);
702 }
703
704 return Changed;
705}
706
707static bool relaxOnce(LinkGraph &G, RelaxAux &Aux) {
708 bool Changed = false;
709
710 for (auto &[B, BlockAux] : Aux.Blocks)
711 Changed |= relaxBlock(G, Block&: *B, Aux&: BlockAux, Config: Aux.Config);
712
713 return Changed;
714}
715
716static void finalizeBlockRelax(LinkGraph &G, Block &Block, BlockRelaxAux &Aux) {
717 auto Contents = Block.getAlreadyMutableContent();
718 auto *Dest = Contents.data();
719 auto NextWrite = Aux.Writes.begin();
720 uint32_t Offset = 0;
721 uint32_t Delta = 0;
722
723 // Update section content: remove NOPs for R_RISCV_ALIGN and rewrite
724 // instructions for relaxed relocations.
725 for (auto [I, E] : llvm::enumerate(First&: Aux.RelaxEdges)) {
726 uint32_t Remove = Aux.RelocDeltas[I] - Delta;
727 Delta = Aux.RelocDeltas[I];
728 if (Remove == 0 && Aux.EdgeKinds[I] == Edge::Invalid)
729 continue;
730
731 // Copy from last location to the current relocated location.
732 const auto Size = E->getOffset() - Offset;
733 std::memmove(dest: Dest, src: Contents.data() + Offset, n: Size);
734 Dest += Size;
735
736 uint32_t Skip = 0;
737 switch (Aux.EdgeKinds[I]) {
738 case Edge::Invalid:
739 break;
740 case AlignRelaxable:
741 // For R_RISCV_ALIGN, we will place Offset in a location (among NOPs) to
742 // satisfy the alignment requirement. If both Remove and E->getAddend()
743 // are multiples of 4, it is as if we have skipped some NOPs. Otherwise we
744 // are in the middle of a 4-byte NOP, and we need to rewrite the NOP
745 // sequence.
746 if (Remove % 4 || E->getAddend() % 4) {
747 Skip = E->getAddend() - Remove;
748 uint32_t J = 0;
749 for (; J + 4 <= Skip; J += 4)
750 support::endian::write32le(P: Dest + J, V: 0x00000013); // nop
751 if (J != Skip) {
752 assert(J + 2 == Skip);
753 support::endian::write16le(P: Dest + J, V: 0x0001); // c.nop
754 }
755 }
756 break;
757 case R_RISCV_RVC_JUMP:
758 Skip = 2;
759 support::endian::write16le(P: Dest, V: *NextWrite++);
760 break;
761 case R_RISCV_JAL:
762 Skip = 4;
763 support::endian::write32le(P: Dest, V: *NextWrite++);
764 break;
765 }
766
767 Dest += Skip;
768 Offset = E->getOffset() + Skip + Remove;
769 }
770
771 std::memmove(dest: Dest, src: Contents.data() + Offset, n: Contents.size() - Offset);
772
773 // Fixup edge offsets and kinds.
774 Delta = 0;
775 size_t I = 0;
776 for (auto &E : Block.edges()) {
777 E.setOffset(E.getOffset() - Delta);
778
779 if (I < Aux.RelaxEdges.size() && Aux.RelaxEdges[I] == &E) {
780 if (Aux.EdgeKinds[I] != Edge::Invalid)
781 E.setKind(Aux.EdgeKinds[I]);
782
783 Delta = Aux.RelocDeltas[I];
784 ++I;
785 }
786 }
787
788 // Remove AlignRelaxable edges: all other relaxable edges got modified and
789 // will be used later while linking. Alignment is entirely handled here so we
790 // don't need these edges anymore.
791 for (auto IE = Block.edges().begin(); IE != Block.edges().end();) {
792 if (IE->getKind() == AlignRelaxable)
793 IE = Block.removeEdge(I: IE);
794 else
795 ++IE;
796 }
797}
798
799static void finalizeRelax(LinkGraph &G, RelaxAux &Aux) {
800 for (auto &[B, BlockAux] : Aux.Blocks)
801 finalizeBlockRelax(G, Block&: *B, Aux&: BlockAux);
802}
803
804static Error relax(LinkGraph &G) {
805 auto Aux = initRelaxAux(G);
806 while (relaxOnce(G, Aux)) {
807 }
808 finalizeRelax(G, Aux);
809 return Error::success();
810}
811
812template <typename ELFT>
813class ELFLinkGraphBuilder_riscv : public ELFLinkGraphBuilder<ELFT> {
814private:
815 static Expected<riscv::EdgeKind_riscv>
816 getRelocationKind(const uint32_t Type) {
817 using namespace riscv;
818 switch (Type) {
819 case ELF::R_RISCV_32:
820 return EdgeKind_riscv::R_RISCV_32;
821 case ELF::R_RISCV_64:
822 return EdgeKind_riscv::R_RISCV_64;
823 case ELF::R_RISCV_BRANCH:
824 return EdgeKind_riscv::R_RISCV_BRANCH;
825 case ELF::R_RISCV_JAL:
826 return EdgeKind_riscv::R_RISCV_JAL;
827 case ELF::R_RISCV_CALL:
828 return EdgeKind_riscv::R_RISCV_CALL;
829 case ELF::R_RISCV_CALL_PLT:
830 return EdgeKind_riscv::R_RISCV_CALL_PLT;
831 case ELF::R_RISCV_GOT_HI20:
832 return EdgeKind_riscv::R_RISCV_GOT_HI20;
833 case ELF::R_RISCV_PCREL_HI20:
834 return EdgeKind_riscv::R_RISCV_PCREL_HI20;
835 case ELF::R_RISCV_PCREL_LO12_I:
836 return EdgeKind_riscv::R_RISCV_PCREL_LO12_I;
837 case ELF::R_RISCV_PCREL_LO12_S:
838 return EdgeKind_riscv::R_RISCV_PCREL_LO12_S;
839 case ELF::R_RISCV_HI20:
840 return EdgeKind_riscv::R_RISCV_HI20;
841 case ELF::R_RISCV_LO12_I:
842 return EdgeKind_riscv::R_RISCV_LO12_I;
843 case ELF::R_RISCV_LO12_S:
844 return EdgeKind_riscv::R_RISCV_LO12_S;
845 case ELF::R_RISCV_ADD8:
846 return EdgeKind_riscv::R_RISCV_ADD8;
847 case ELF::R_RISCV_ADD16:
848 return EdgeKind_riscv::R_RISCV_ADD16;
849 case ELF::R_RISCV_ADD32:
850 return EdgeKind_riscv::R_RISCV_ADD32;
851 case ELF::R_RISCV_ADD64:
852 return EdgeKind_riscv::R_RISCV_ADD64;
853 case ELF::R_RISCV_SUB8:
854 return EdgeKind_riscv::R_RISCV_SUB8;
855 case ELF::R_RISCV_SUB16:
856 return EdgeKind_riscv::R_RISCV_SUB16;
857 case ELF::R_RISCV_SUB32:
858 return EdgeKind_riscv::R_RISCV_SUB32;
859 case ELF::R_RISCV_SUB64:
860 return EdgeKind_riscv::R_RISCV_SUB64;
861 case ELF::R_RISCV_RVC_BRANCH:
862 return EdgeKind_riscv::R_RISCV_RVC_BRANCH;
863 case ELF::R_RISCV_RVC_JUMP:
864 return EdgeKind_riscv::R_RISCV_RVC_JUMP;
865 case ELF::R_RISCV_SUB6:
866 return EdgeKind_riscv::R_RISCV_SUB6;
867 case ELF::R_RISCV_SET6:
868 return EdgeKind_riscv::R_RISCV_SET6;
869 case ELF::R_RISCV_SET8:
870 return EdgeKind_riscv::R_RISCV_SET8;
871 case ELF::R_RISCV_SET16:
872 return EdgeKind_riscv::R_RISCV_SET16;
873 case ELF::R_RISCV_SET32:
874 return EdgeKind_riscv::R_RISCV_SET32;
875 case ELF::R_RISCV_32_PCREL:
876 return EdgeKind_riscv::R_RISCV_32_PCREL;
877 case ELF::R_RISCV_ALIGN:
878 return EdgeKind_riscv::AlignRelaxable;
879 case ELF::R_RISCV_SET_ULEB128:
880 return EdgeKind_riscv::R_RISCV_SET_ULEB128;
881 case ELF::R_RISCV_SUB_ULEB128:
882 return EdgeKind_riscv::R_RISCV_SUB_ULEB128;
883 }
884
885 return make_error<JITLinkError>(
886 Args: "Unsupported riscv relocation:" + formatv(Fmt: "{0:d}: ", Vals: Type) +
887 object::getELFRelocationTypeName(Machine: ELF::EM_RISCV, Type));
888 }
889
890 EdgeKind_riscv getRelaxableRelocationKind(EdgeKind_riscv Kind) {
891 switch (Kind) {
892 default:
893 // Just ignore unsupported relaxations
894 return Kind;
895 case R_RISCV_CALL:
896 case R_RISCV_CALL_PLT:
897 return CallRelaxable;
898 }
899 }
900
901 Error addRelocations() override {
902 LLVM_DEBUG(dbgs() << "Processing relocations:\n");
903
904 using Base = ELFLinkGraphBuilder<ELFT>;
905 using Self = ELFLinkGraphBuilder_riscv<ELFT>;
906 for (const auto &RelSect : Base::Sections)
907 if (Error Err = Base::forEachRelaRelocation(RelSect, this,
908 &Self::addSingleRelocation))
909 return Err;
910
911 return Error::success();
912 }
913
914 Error addSingleRelocation(const typename ELFT::Rela &Rel,
915 const typename ELFT::Shdr &FixupSect,
916 Block &BlockToFix) {
917 using Base = ELFLinkGraphBuilder<ELFT>;
918
919 uint32_t Type = Rel.getType(false);
920 int64_t Addend = Rel.r_addend;
921
922 if (Type == ELF::R_RISCV_RELAX) {
923 if (BlockToFix.edges_empty())
924 return make_error<StringError>(
925 Args: "R_RISCV_RELAX without preceding relocation",
926 Args: inconvertibleErrorCode());
927
928 auto &PrevEdge = *std::prev(x: BlockToFix.edges().end());
929 auto Kind = static_cast<EdgeKind_riscv>(PrevEdge.getKind());
930 PrevEdge.setKind(getRelaxableRelocationKind(Kind));
931 return Error::success();
932 }
933
934 Expected<riscv::EdgeKind_riscv> Kind = getRelocationKind(Type);
935 if (!Kind)
936 return Kind.takeError();
937
938 uint32_t SymbolIndex = Rel.getSymbol(false);
939 auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
940 if (!ObjSymbol)
941 return ObjSymbol.takeError();
942
943 Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
944 if (!GraphSymbol)
945 return make_error<StringError>(
946 formatv("Could not find symbol at given index, did you add it to "
947 "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
948 SymbolIndex, (*ObjSymbol)->st_shndx,
949 Base::GraphSymbols.size()),
950 inconvertibleErrorCode());
951
952 auto FixupAddress = orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
953 Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
954 Edge GE(*Kind, Offset, *GraphSymbol, Addend);
955 LLVM_DEBUG({
956 dbgs() << " ";
957 printEdge(dbgs(), BlockToFix, GE, riscv::getEdgeKindName(*Kind));
958 dbgs() << "\n";
959 });
960
961 BlockToFix.addEdge(E: std::move(GE));
962 return Error::success();
963 }
964
965public:
966 ELFLinkGraphBuilder_riscv(StringRef FileName,
967 const object::ELFFile<ELFT> &Obj,
968 std::shared_ptr<orc::SymbolStringPool> SSP,
969 Triple TT, SubtargetFeatures Features)
970 : ELFLinkGraphBuilder<ELFT>(Obj, std::move(SSP), std::move(TT),
971 std::move(Features), FileName,
972 riscv::getEdgeKindName) {}
973};
974
975Expected<std::unique_ptr<LinkGraph>>
976createLinkGraphFromELFObject_riscv(MemoryBufferRef ObjectBuffer,
977 std::shared_ptr<orc::SymbolStringPool> SSP) {
978 LLVM_DEBUG({
979 dbgs() << "Building jitlink graph for new input "
980 << ObjectBuffer.getBufferIdentifier() << "...\n";
981 });
982
983 auto ELFObj = object::ObjectFile::createELFObjectFile(Object: ObjectBuffer);
984 if (!ELFObj)
985 return ELFObj.takeError();
986
987 auto Features = (*ELFObj)->getFeatures();
988 if (!Features)
989 return Features.takeError();
990
991 if ((*ELFObj)->getArch() == Triple::riscv64) {
992 auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(Val&: **ELFObj);
993 return ELFLinkGraphBuilder_riscv<object::ELF64LE>(
994 (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
995 std::move(SSP), (*ELFObj)->makeTriple(), std::move(*Features))
996 .buildGraph();
997 } else {
998 assert((*ELFObj)->getArch() == Triple::riscv32 &&
999 "Invalid triple for RISCV ELF object file");
1000 auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF32LE>>(Val&: **ELFObj);
1001 return ELFLinkGraphBuilder_riscv<object::ELF32LE>(
1002 (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
1003 std::move(SSP), (*ELFObj)->makeTriple(), std::move(*Features))
1004 .buildGraph();
1005 }
1006}
1007
1008void link_ELF_riscv(std::unique_ptr<LinkGraph> G,
1009 std::unique_ptr<JITLinkContext> Ctx) {
1010 PassConfiguration Config;
1011 const Triple &TT = G->getTargetTriple();
1012 if (Ctx->shouldAddDefaultTargetPasses(TT)) {
1013
1014 Config.PrePrunePasses.push_back(x: DWARFRecordSectionSplitter(".eh_frame"));
1015 Config.PrePrunePasses.push_back(x: EHFrameEdgeFixer(
1016 ".eh_frame", G->getPointerSize(), Edge::Invalid, Edge::Invalid,
1017 Edge::Invalid, Edge::Invalid, NegDelta32));
1018 Config.PrePrunePasses.push_back(x: EHFrameNullTerminator(".eh_frame"));
1019
1020 if (auto MarkLive = Ctx->getMarkLivePass(TT))
1021 Config.PrePrunePasses.push_back(x: std::move(MarkLive));
1022 else
1023 Config.PrePrunePasses.push_back(x: markAllSymbolsLive);
1024 Config.PostPrunePasses.push_back(
1025 x: PerGraphGOTAndPLTStubsBuilder_ELF_riscv::asPass);
1026 Config.PostAllocationPasses.push_back(x: relax);
1027 }
1028 if (auto Err = Ctx->modifyPassConfig(G&: *G, Config))
1029 return Ctx->notifyFailed(Err: std::move(Err));
1030
1031 ELFJITLinker_riscv::link(Args: std::move(Ctx), Args: std::move(G), Args: std::move(Config));
1032}
1033
1034LinkGraphPassFunction createRelaxationPass_ELF_riscv() { return relax; }
1035
1036} // namespace jitlink
1037} // namespace llvm
1038