1 | //===--------- aarch32.cpp - Generic JITLink arm/thumb utilities ----------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Generic utilities for graphs representing arm/thumb objects. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/ExecutionEngine/JITLink/aarch32.h" |
14 | |
15 | #include "llvm/ADT/StringExtras.h" |
16 | #include "llvm/BinaryFormat/ELF.h" |
17 | #include "llvm/ExecutionEngine/JITLink/JITLink.h" |
18 | #include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h" |
19 | #include "llvm/Object/ELFObjectFile.h" |
20 | #include "llvm/Support/Endian.h" |
21 | #include "llvm/Support/ManagedStatic.h" |
22 | #include "llvm/Support/MathExtras.h" |
23 | |
24 | #define DEBUG_TYPE "jitlink" |
25 | |
26 | namespace llvm { |
27 | namespace jitlink { |
28 | namespace aarch32 { |
29 | |
30 | /// Check whether the given target flags are set for this Symbol. |
31 | bool hasTargetFlags(Symbol &Sym, TargetFlagsType Flags) { |
32 | return static_cast<TargetFlagsType>(Sym.getTargetFlags()) & Flags; |
33 | } |
34 | |
35 | /// Encode 22-bit immediate value for branch instructions without J1J2 range |
36 | /// extension (formats B T4, BL T1 and BLX T2). |
37 | /// |
38 | /// 00000:Imm11H:Imm11L:0 -> [ 00000:Imm11H, 00000:Imm11L ] |
39 | /// J1^ ^J2 will always be 1 |
40 | /// |
41 | HalfWords encodeImmBT4BlT1BlxT2(int64_t Value) { |
42 | constexpr uint32_t J1J2 = 0x2800; |
43 | uint32_t Imm11H = (Value >> 12) & 0x07ff; |
44 | uint32_t Imm11L = (Value >> 1) & 0x07ff; |
45 | return HalfWords{Imm11H, Imm11L | J1J2}; |
46 | } |
47 | |
48 | /// Decode 22-bit immediate value for branch instructions without J1J2 range |
49 | /// extension (formats B T4, BL T1 and BLX T2). |
50 | /// |
51 | /// [ 00000:Imm11H, 00000:Imm11L ] -> 00000:Imm11H:Imm11L:0 |
52 | /// J1^ ^J2 will always be 1 |
53 | /// |
54 | int64_t decodeImmBT4BlT1BlxT2(uint32_t Hi, uint32_t Lo) { |
55 | uint32_t Imm11H = Hi & 0x07ff; |
56 | uint32_t Imm11L = Lo & 0x07ff; |
57 | return SignExtend64<22>(x: Imm11H << 12 | Imm11L << 1); |
58 | } |
59 | |
60 | /// Encode 25-bit immediate value for branch instructions with J1J2 range |
61 | /// extension (formats B T4, BL T1 and BLX T2). |
62 | /// |
63 | /// S:I1:I2:Imm10:Imm11:0 -> [ 00000:S:Imm10, 00:J1:0:J2:Imm11 ] |
64 | /// |
65 | HalfWords encodeImmBT4BlT1BlxT2_J1J2(int64_t Value) { |
66 | uint32_t S = (Value >> 14) & 0x0400; |
67 | uint32_t J1 = (((~(Value >> 10)) ^ (Value >> 11)) & 0x2000); |
68 | uint32_t J2 = (((~(Value >> 11)) ^ (Value >> 13)) & 0x0800); |
69 | uint32_t Imm10 = (Value >> 12) & 0x03ff; |
70 | uint32_t Imm11 = (Value >> 1) & 0x07ff; |
71 | return HalfWords{S | Imm10, J1 | J2 | Imm11}; |
72 | } |
73 | |
74 | /// Decode 25-bit immediate value for branch instructions with J1J2 range |
75 | /// extension (formats B T4, BL T1 and BLX T2). |
76 | /// |
77 | /// [ 00000:S:Imm10, 00:J1:0:J2:Imm11] -> S:I1:I2:Imm10:Imm11:0 |
78 | /// |
79 | int64_t decodeImmBT4BlT1BlxT2_J1J2(uint32_t Hi, uint32_t Lo) { |
80 | uint32_t S = Hi & 0x0400; |
81 | uint32_t I1 = ~((Lo ^ (Hi << 3)) << 10) & 0x00800000; |
82 | uint32_t I2 = ~((Lo ^ (Hi << 1)) << 11) & 0x00400000; |
83 | uint32_t Imm10 = Hi & 0x03ff; |
84 | uint32_t Imm11 = Lo & 0x07ff; |
85 | return SignExtend64<25>(x: S << 14 | I1 | I2 | Imm10 << 12 | Imm11 << 1); |
86 | } |
87 | |
88 | /// Encode 26-bit immediate value for branch instructions |
89 | /// (formats B A1, BL A1 and BLX A2). |
90 | /// |
91 | /// Imm24:00 -> 00000000:Imm24 |
92 | /// |
93 | uint32_t encodeImmBA1BlA1BlxA2(int64_t Value) { |
94 | return (Value >> 2) & 0x00ffffff; |
95 | } |
96 | |
97 | /// Decode 26-bit immediate value for branch instructions |
98 | /// (formats B A1, BL A1 and BLX A2). |
99 | /// |
100 | /// 00000000:Imm24 -> Imm24:00 |
101 | /// |
102 | int64_t decodeImmBA1BlA1BlxA2(int64_t Value) { |
103 | return SignExtend64<26>(x: (Value & 0x00ffffff) << 2); |
104 | } |
105 | |
106 | /// Encode 16-bit immediate value for move instruction formats MOVT T1 and |
107 | /// MOVW T3. |
108 | /// |
109 | /// Imm4:Imm1:Imm3:Imm8 -> [ 00000:i:000000:Imm4, 0:Imm3:0000:Imm8 ] |
110 | /// |
111 | HalfWords encodeImmMovtT1MovwT3(uint16_t Value) { |
112 | uint32_t Imm4 = (Value >> 12) & 0x0f; |
113 | uint32_t Imm1 = (Value >> 11) & 0x01; |
114 | uint32_t Imm3 = (Value >> 8) & 0x07; |
115 | uint32_t Imm8 = Value & 0xff; |
116 | return HalfWords{Imm1 << 10 | Imm4, Imm3 << 12 | Imm8}; |
117 | } |
118 | |
119 | /// Decode 16-bit immediate value from move instruction formats MOVT T1 and |
120 | /// MOVW T3. |
121 | /// |
122 | /// [ 00000:i:000000:Imm4, 0:Imm3:0000:Imm8 ] -> Imm4:Imm1:Imm3:Imm8 |
123 | /// |
124 | uint16_t decodeImmMovtT1MovwT3(uint32_t Hi, uint32_t Lo) { |
125 | uint32_t Imm4 = Hi & 0x0f; |
126 | uint32_t Imm1 = (Hi >> 10) & 0x01; |
127 | uint32_t Imm3 = (Lo >> 12) & 0x07; |
128 | uint32_t Imm8 = Lo & 0xff; |
129 | uint32_t Imm16 = Imm4 << 12 | Imm1 << 11 | Imm3 << 8 | Imm8; |
130 | assert(Imm16 <= 0xffff && "Decoded value out-of-range" ); |
131 | return Imm16; |
132 | } |
133 | |
134 | /// Encode register ID for instruction formats MOVT T1 and MOVW T3. |
135 | /// |
136 | /// Rd4 -> [0000000000000000, 0000:Rd4:00000000] |
137 | /// |
138 | HalfWords encodeRegMovtT1MovwT3(int64_t Value) { |
139 | uint32_t Rd4 = (Value & 0x0f) << 8; |
140 | return HalfWords{0, Rd4}; |
141 | } |
142 | |
143 | /// Decode register ID from instruction formats MOVT T1 and MOVW T3. |
144 | /// |
145 | /// [0000000000000000, 0000:Rd4:00000000] -> Rd4 |
146 | /// |
147 | int64_t decodeRegMovtT1MovwT3(uint32_t Hi, uint32_t Lo) { |
148 | uint32_t Rd4 = (Lo >> 8) & 0x0f; |
149 | return Rd4; |
150 | } |
151 | |
152 | /// Encode 16-bit immediate value for move instruction formats MOVT A1 and |
153 | /// MOVW A2. |
154 | /// |
155 | /// Imm4:Imm12 -> 000000000000:Imm4:0000:Imm12 |
156 | /// |
157 | uint32_t encodeImmMovtA1MovwA2(uint16_t Value) { |
158 | uint32_t Imm4 = (Value >> 12) & 0x0f; |
159 | uint32_t Imm12 = Value & 0x0fff; |
160 | return (Imm4 << 16) | Imm12; |
161 | } |
162 | |
163 | /// Decode 16-bit immediate value for move instruction formats MOVT A1 and |
164 | /// MOVW A2. |
165 | /// |
166 | /// 000000000000:Imm4:0000:Imm12 -> Imm4:Imm12 |
167 | /// |
168 | uint16_t decodeImmMovtA1MovwA2(uint64_t Value) { |
169 | uint32_t Imm4 = (Value >> 16) & 0x0f; |
170 | uint32_t Imm12 = Value & 0x0fff; |
171 | return (Imm4 << 12) | Imm12; |
172 | } |
173 | |
174 | /// Encode register ID for instruction formats MOVT A1 and |
175 | /// MOVW A2. |
176 | /// |
177 | /// Rd4 -> 0000000000000000:Rd4:000000000000 |
178 | /// |
179 | uint32_t encodeRegMovtA1MovwA2(int64_t Value) { |
180 | uint32_t Rd4 = (Value & 0x00000f) << 12; |
181 | return Rd4; |
182 | } |
183 | |
184 | /// Decode register ID for instruction formats MOVT A1 and |
185 | /// MOVW A2. |
186 | /// |
187 | /// 0000000000000000:Rd4:000000000000 -> Rd4 |
188 | /// |
189 | int64_t decodeRegMovtA1MovwA2(uint64_t Value) { |
190 | uint32_t Rd4 = (Value >> 12) & 0x00000f; |
191 | return Rd4; |
192 | } |
193 | |
194 | namespace { |
195 | |
196 | /// 32-bit Thumb instructions are stored as two little-endian halfwords. |
197 | /// An instruction at address A encodes bytes A+1, A in the first halfword (Hi), |
198 | /// followed by bytes A+3, A+2 in the second halfword (Lo). |
199 | struct WritableThumbRelocation { |
200 | /// Create a writable reference to a Thumb32 fixup. |
201 | WritableThumbRelocation(char *FixupPtr) |
202 | : Hi{*reinterpret_cast<support::ulittle16_t *>(FixupPtr)}, |
203 | Lo{*reinterpret_cast<support::ulittle16_t *>(FixupPtr + 2)} {} |
204 | |
205 | support::ulittle16_t &Hi; // First halfword |
206 | support::ulittle16_t &Lo; // Second halfword |
207 | }; |
208 | |
209 | struct ThumbRelocation { |
210 | /// Create a read-only reference to a Thumb32 fixup. |
211 | ThumbRelocation(const char *FixupPtr) |
212 | : Hi{*reinterpret_cast<const support::ulittle16_t *>(FixupPtr)}, |
213 | Lo{*reinterpret_cast<const support::ulittle16_t *>(FixupPtr + 2)} {} |
214 | |
215 | /// Create a read-only Thumb32 fixup from a writeable one. |
216 | ThumbRelocation(WritableThumbRelocation &Writable) |
217 | : Hi{Writable.Hi}, Lo(Writable.Lo) {} |
218 | |
219 | const support::ulittle16_t &Hi; // First halfword |
220 | const support::ulittle16_t &Lo; // Second halfword |
221 | }; |
222 | |
223 | struct WritableArmRelocation { |
224 | WritableArmRelocation(char *FixupPtr) |
225 | : Wd{*reinterpret_cast<support::ulittle32_t *>(FixupPtr)} {} |
226 | |
227 | support::ulittle32_t &Wd; |
228 | }; |
229 | |
230 | struct ArmRelocation { |
231 | ArmRelocation(const char *FixupPtr) |
232 | : Wd{*reinterpret_cast<const support::ulittle32_t *>(FixupPtr)} {} |
233 | |
234 | ArmRelocation(WritableArmRelocation &Writable) : Wd{Writable.Wd} {} |
235 | |
236 | const support::ulittle32_t &Wd; |
237 | }; |
238 | |
239 | Error makeUnexpectedOpcodeError(const LinkGraph &G, const ThumbRelocation &R, |
240 | Edge::Kind Kind) { |
241 | return make_error<JITLinkError>( |
242 | Args: formatv(Fmt: "Invalid opcode [ {0:x4}, {1:x4} ] for relocation: {2}" , |
243 | Vals: static_cast<uint16_t>(R.Hi), Vals: static_cast<uint16_t>(R.Lo), |
244 | Vals: G.getEdgeKindName(K: Kind))); |
245 | } |
246 | |
247 | Error makeUnexpectedOpcodeError(const LinkGraph &G, const ArmRelocation &R, |
248 | Edge::Kind Kind) { |
249 | return make_error<JITLinkError>( |
250 | Args: formatv(Fmt: "Invalid opcode {0:x8} for relocation: {1}" , |
251 | Vals: static_cast<uint32_t>(R.Wd), Vals: G.getEdgeKindName(K: Kind))); |
252 | } |
253 | |
254 | template <EdgeKind_aarch32 K> constexpr bool isArm() { |
255 | return FirstArmRelocation <= K && K <= LastArmRelocation; |
256 | } |
257 | template <EdgeKind_aarch32 K> constexpr bool isThumb() { |
258 | return FirstThumbRelocation <= K && K <= LastThumbRelocation; |
259 | } |
260 | |
261 | template <EdgeKind_aarch32 K> static bool checkOpcodeArm(uint32_t Wd) { |
262 | return (Wd & FixupInfo<K>::OpcodeMask) == FixupInfo<K>::Opcode; |
263 | } |
264 | |
265 | template <EdgeKind_aarch32 K> |
266 | static bool checkOpcodeThumb(uint16_t Hi, uint16_t Lo) { |
267 | return (Hi & FixupInfo<K>::OpcodeMask.Hi) == FixupInfo<K>::Opcode.Hi && |
268 | (Lo & FixupInfo<K>::OpcodeMask.Lo) == FixupInfo<K>::Opcode.Lo; |
269 | } |
270 | |
271 | class FixupInfoTable { |
272 | static constexpr size_t Items = LastRelocation + 1; |
273 | |
274 | public: |
275 | FixupInfoTable() { |
276 | populateEntries<FirstArmRelocation, LastArmRelocation>(); |
277 | populateEntries<FirstThumbRelocation, LastThumbRelocation>(); |
278 | } |
279 | |
280 | const FixupInfoBase *getEntry(Edge::Kind K) { |
281 | assert(K < Data.size() && "Index out of bounds" ); |
282 | return Data.at(n: K).get(); |
283 | } |
284 | |
285 | private: |
286 | template <EdgeKind_aarch32 K, EdgeKind_aarch32 LastK> void populateEntries() { |
287 | assert(K < Data.size() && "Index out of range" ); |
288 | assert(Data.at(K) == nullptr && "Initialized entries are immutable" ); |
289 | Data[K] = initEntry<K>(); |
290 | if constexpr (K < LastK) { |
291 | constexpr auto Next = static_cast<EdgeKind_aarch32>(K + 1); |
292 | populateEntries<Next, LastK>(); |
293 | } |
294 | } |
295 | |
296 | template <EdgeKind_aarch32 K> |
297 | static std::unique_ptr<FixupInfoBase> initEntry() { |
298 | auto Entry = std::make_unique<FixupInfo<K>>(); |
299 | static_assert(isArm<K>() != isThumb<K>(), "Classes are mutually exclusive" ); |
300 | if constexpr (isArm<K>()) |
301 | Entry->checkOpcode = checkOpcodeArm<K>; |
302 | if constexpr (isThumb<K>()) |
303 | Entry->checkOpcode = checkOpcodeThumb<K>; |
304 | return Entry; |
305 | } |
306 | |
307 | private: |
308 | std::array<std::unique_ptr<FixupInfoBase>, Items> Data; |
309 | }; |
310 | |
311 | ManagedStatic<FixupInfoTable> DynFixupInfos; |
312 | |
313 | } // namespace |
314 | |
315 | static Error checkOpcode(LinkGraph &G, const ArmRelocation &R, |
316 | Edge::Kind Kind) { |
317 | assert(Kind >= FirstArmRelocation && Kind <= LastArmRelocation && |
318 | "Edge kind must be Arm relocation" ); |
319 | const FixupInfoBase *Entry = DynFixupInfos->getEntry(K: Kind); |
320 | const FixupInfoArm &Info = *static_cast<const FixupInfoArm *>(Entry); |
321 | assert(Info.checkOpcode && "Opcode check is mandatory for Arm edges" ); |
322 | if (!Info.checkOpcode(R.Wd)) |
323 | return makeUnexpectedOpcodeError(G, R, Kind); |
324 | |
325 | return Error::success(); |
326 | } |
327 | |
328 | static Error checkOpcode(LinkGraph &G, const ThumbRelocation &R, |
329 | Edge::Kind Kind) { |
330 | assert(Kind >= FirstThumbRelocation && Kind <= LastThumbRelocation && |
331 | "Edge kind must be Thumb relocation" ); |
332 | const FixupInfoBase *Entry = DynFixupInfos->getEntry(K: Kind); |
333 | const FixupInfoThumb &Info = *static_cast<const FixupInfoThumb *>(Entry); |
334 | assert(Info.checkOpcode && "Opcode check is mandatory for Thumb edges" ); |
335 | if (!Info.checkOpcode(R.Hi, R.Lo)) |
336 | return makeUnexpectedOpcodeError(G, R, Kind); |
337 | |
338 | return Error::success(); |
339 | } |
340 | |
341 | const FixupInfoBase *FixupInfoBase::getDynFixupInfo(Edge::Kind K) { |
342 | return DynFixupInfos->getEntry(K); |
343 | } |
344 | |
345 | template <EdgeKind_aarch32 Kind> |
346 | bool checkRegister(const ThumbRelocation &R, HalfWords Reg) { |
347 | uint16_t Hi = R.Hi & FixupInfo<Kind>::RegMask.Hi; |
348 | uint16_t Lo = R.Lo & FixupInfo<Kind>::RegMask.Lo; |
349 | return Hi == Reg.Hi && Lo == Reg.Lo; |
350 | } |
351 | |
352 | template <EdgeKind_aarch32 Kind> |
353 | bool checkRegister(const ArmRelocation &R, uint32_t Reg) { |
354 | uint32_t Wd = R.Wd & FixupInfo<Kind>::RegMask; |
355 | return Wd == Reg; |
356 | } |
357 | |
358 | template <EdgeKind_aarch32 Kind> |
359 | void writeRegister(WritableThumbRelocation &R, HalfWords Reg) { |
360 | static constexpr HalfWords Mask = FixupInfo<Kind>::RegMask; |
361 | assert((Mask.Hi & Reg.Hi) == Reg.Hi && (Mask.Lo & Reg.Lo) == Reg.Lo && |
362 | "Value bits exceed bit range of given mask" ); |
363 | R.Hi = (R.Hi & ~Mask.Hi) | Reg.Hi; |
364 | R.Lo = (R.Lo & ~Mask.Lo) | Reg.Lo; |
365 | } |
366 | |
367 | template <EdgeKind_aarch32 Kind> |
368 | void writeRegister(WritableArmRelocation &R, uint32_t Reg) { |
369 | static constexpr uint32_t Mask = FixupInfo<Kind>::RegMask; |
370 | assert((Mask & Reg) == Reg && "Value bits exceed bit range of given mask" ); |
371 | R.Wd = (R.Wd & ~Mask) | Reg; |
372 | } |
373 | |
374 | template <EdgeKind_aarch32 Kind> |
375 | void writeImmediate(WritableThumbRelocation &R, HalfWords Imm) { |
376 | static constexpr HalfWords Mask = FixupInfo<Kind>::ImmMask; |
377 | assert((Mask.Hi & Imm.Hi) == Imm.Hi && (Mask.Lo & Imm.Lo) == Imm.Lo && |
378 | "Value bits exceed bit range of given mask" ); |
379 | R.Hi = (R.Hi & ~Mask.Hi) | Imm.Hi; |
380 | R.Lo = (R.Lo & ~Mask.Lo) | Imm.Lo; |
381 | } |
382 | |
383 | template <EdgeKind_aarch32 Kind> |
384 | void writeImmediate(WritableArmRelocation &R, uint32_t Imm) { |
385 | static constexpr uint32_t Mask = FixupInfo<Kind>::ImmMask; |
386 | assert((Mask & Imm) == Imm && "Value bits exceed bit range of given mask" ); |
387 | R.Wd = (R.Wd & ~Mask) | Imm; |
388 | } |
389 | |
390 | Expected<int64_t> readAddendData(LinkGraph &G, Block &B, Edge::OffsetT Offset, |
391 | Edge::Kind Kind) { |
392 | endianness Endian = G.getEndianness(); |
393 | const char *BlockWorkingMem = B.getContent().data(); |
394 | const char *FixupPtr = BlockWorkingMem + Offset; |
395 | |
396 | switch (Kind) { |
397 | case Data_Delta32: |
398 | case Data_Pointer32: |
399 | case Data_RequestGOTAndTransformToDelta32: |
400 | return SignExtend64<32>(x: support::endian::read32(P: FixupPtr, E: Endian)); |
401 | case Data_PRel31: |
402 | return SignExtend64<31>(x: support::endian::read32(P: FixupPtr, E: Endian)); |
403 | default: |
404 | return make_error<JITLinkError>( |
405 | Args: "In graph " + G.getName() + ", section " + B.getSection().getName() + |
406 | " can not read implicit addend for aarch32 edge kind " + |
407 | G.getEdgeKindName(K: Kind)); |
408 | } |
409 | } |
410 | |
411 | Expected<int64_t> readAddendArm(LinkGraph &G, Block &B, Edge::OffsetT Offset, |
412 | Edge::Kind Kind) { |
413 | ArmRelocation R(B.getContent().data() + Offset); |
414 | if (Error Err = checkOpcode(G, R, Kind)) |
415 | return std::move(Err); |
416 | |
417 | switch (Kind) { |
418 | case Arm_Call: |
419 | case Arm_Jump24: |
420 | return decodeImmBA1BlA1BlxA2(Value: R.Wd); |
421 | |
422 | case Arm_MovtAbs: |
423 | case Arm_MovwAbsNC: |
424 | return decodeImmMovtA1MovwA2(Value: R.Wd); |
425 | |
426 | default: |
427 | return make_error<JITLinkError>( |
428 | Args: "In graph " + G.getName() + ", section " + B.getSection().getName() + |
429 | " can not read implicit addend for aarch32 edge kind " + |
430 | G.getEdgeKindName(K: Kind)); |
431 | } |
432 | } |
433 | |
434 | Expected<int64_t> readAddendThumb(LinkGraph &G, Block &B, Edge::OffsetT Offset, |
435 | Edge::Kind Kind, const ArmConfig &ArmCfg) { |
436 | ThumbRelocation R(B.getContent().data() + Offset); |
437 | if (Error Err = checkOpcode(G, R, Kind)) |
438 | return std::move(Err); |
439 | |
440 | switch (Kind) { |
441 | case Thumb_Call: |
442 | case Thumb_Jump24: |
443 | return LLVM_LIKELY(ArmCfg.J1J2BranchEncoding) |
444 | ? decodeImmBT4BlT1BlxT2_J1J2(Hi: R.Hi, Lo: R.Lo) |
445 | : decodeImmBT4BlT1BlxT2(Hi: R.Hi, Lo: R.Lo); |
446 | |
447 | case Thumb_MovwAbsNC: |
448 | case Thumb_MovwPrelNC: |
449 | // Initial addend is interpreted as a signed value |
450 | return SignExtend64<16>(x: decodeImmMovtT1MovwT3(Hi: R.Hi, Lo: R.Lo)); |
451 | |
452 | case Thumb_MovtAbs: |
453 | case Thumb_MovtPrel: |
454 | // Initial addend is interpreted as a signed value |
455 | return SignExtend64<16>(x: decodeImmMovtT1MovwT3(Hi: R.Hi, Lo: R.Lo)); |
456 | |
457 | default: |
458 | return make_error<JITLinkError>( |
459 | Args: "In graph " + G.getName() + ", section " + B.getSection().getName() + |
460 | " can not read implicit addend for aarch32 edge kind " + |
461 | G.getEdgeKindName(K: Kind)); |
462 | } |
463 | } |
464 | |
465 | Error applyFixupData(LinkGraph &G, Block &B, const Edge &E) { |
466 | using namespace support; |
467 | |
468 | char *BlockWorkingMem = B.getAlreadyMutableContent().data(); |
469 | char *FixupPtr = BlockWorkingMem + E.getOffset(); |
470 | |
471 | Edge::Kind Kind = E.getKind(); |
472 | uint64_t FixupAddress = (B.getAddress() + E.getOffset()).getValue(); |
473 | int64_t Addend = E.getAddend(); |
474 | Symbol &TargetSymbol = E.getTarget(); |
475 | uint64_t TargetAddress = TargetSymbol.getAddress().getValue(); |
476 | |
477 | // Data relocations have alignment 1, size 4 (except R_ARM_ABS8 and |
478 | // R_ARM_ABS16) and write the full 32-bit result (except R_ARM_PREL31). |
479 | switch (Kind) { |
480 | case Data_Delta32: { |
481 | int64_t Value = TargetAddress - FixupAddress + Addend; |
482 | if (!isInt<32>(x: Value)) |
483 | return makeTargetOutOfRangeError(G, B, E); |
484 | if (LLVM_LIKELY(G.getEndianness() == endianness::little)) |
485 | endian::write32le(P: FixupPtr, V: Value); |
486 | else |
487 | endian::write32be(P: FixupPtr, V: Value); |
488 | return Error::success(); |
489 | } |
490 | case Data_Pointer32: { |
491 | int64_t Value = TargetAddress + Addend; |
492 | if (!isUInt<32>(x: Value)) |
493 | return makeTargetOutOfRangeError(G, B, E); |
494 | if (LLVM_LIKELY(G.getEndianness() == endianness::little)) |
495 | endian::write32le(P: FixupPtr, V: Value); |
496 | else |
497 | endian::write32be(P: FixupPtr, V: Value); |
498 | return Error::success(); |
499 | } |
500 | case Data_PRel31: { |
501 | int64_t Value = TargetAddress - FixupAddress + Addend; |
502 | if (!isInt<31>(x: Value)) |
503 | return makeTargetOutOfRangeError(G, B, E); |
504 | if (LLVM_LIKELY(G.getEndianness() == endianness::little)) { |
505 | uint32_t MSB = endian::read32le(P: FixupPtr) & 0x80000000; |
506 | endian::write32le(P: FixupPtr, V: MSB | (Value & ~0x80000000)); |
507 | } else { |
508 | uint32_t MSB = endian::read32be(P: FixupPtr) & 0x80000000; |
509 | endian::write32be(P: FixupPtr, V: MSB | (Value & ~0x80000000)); |
510 | } |
511 | return Error::success(); |
512 | } |
513 | case Data_RequestGOTAndTransformToDelta32: |
514 | llvm_unreachable("Should be transformed" ); |
515 | default: |
516 | return make_error<JITLinkError>( |
517 | Args: "In graph " + G.getName() + ", section " + B.getSection().getName() + |
518 | " encountered unfixable aarch32 edge kind " + |
519 | G.getEdgeKindName(K: E.getKind())); |
520 | } |
521 | } |
522 | |
523 | Error applyFixupArm(LinkGraph &G, Block &B, const Edge &E) { |
524 | WritableArmRelocation R(B.getAlreadyMutableContent().data() + E.getOffset()); |
525 | Edge::Kind Kind = E.getKind(); |
526 | if (Error Err = checkOpcode(G, R, Kind)) |
527 | return Err; |
528 | |
529 | uint64_t FixupAddress = (B.getAddress() + E.getOffset()).getValue(); |
530 | int64_t Addend = E.getAddend(); |
531 | Symbol &TargetSymbol = E.getTarget(); |
532 | uint64_t TargetAddress = TargetSymbol.getAddress().getValue(); |
533 | |
534 | switch (Kind) { |
535 | case Arm_Jump24: { |
536 | if (hasTargetFlags(Sym&: TargetSymbol, Flags: ThumbSymbol)) |
537 | return make_error<JITLinkError>(Args: "Branch relocation needs interworking " |
538 | "stub when bridging to Thumb: " + |
539 | StringRef(G.getEdgeKindName(K: Kind))); |
540 | |
541 | int64_t Value = TargetAddress - FixupAddress + Addend; |
542 | |
543 | if (!isInt<26>(x: Value)) |
544 | return makeTargetOutOfRangeError(G, B, E); |
545 | writeImmediate<Arm_Jump24>(R, Imm: encodeImmBA1BlA1BlxA2(Value)); |
546 | |
547 | return Error::success(); |
548 | } |
549 | case Arm_Call: { |
550 | if ((R.Wd & FixupInfo<Arm_Call>::CondMask) != |
551 | FixupInfo<Arm_Call>::Unconditional) |
552 | return make_error<JITLinkError>(Args: "Relocation expects an unconditional " |
553 | "BL/BLX branch instruction: " + |
554 | StringRef(G.getEdgeKindName(K: Kind))); |
555 | |
556 | int64_t Value = TargetAddress - FixupAddress + Addend; |
557 | |
558 | // The call instruction itself is Arm. The call destination can either be |
559 | // Thumb or Arm. We use BL to stay in Arm and BLX to change to Thumb. |
560 | bool TargetIsThumb = hasTargetFlags(Sym&: TargetSymbol, Flags: ThumbSymbol); |
561 | bool InstrIsBlx = (~R.Wd & FixupInfo<Arm_Call>::BitBlx) == 0; |
562 | if (TargetIsThumb != InstrIsBlx) { |
563 | if (LLVM_LIKELY(TargetIsThumb)) { |
564 | // Change opcode BL -> BLX |
565 | R.Wd = R.Wd | FixupInfo<Arm_Call>::BitBlx; |
566 | R.Wd = R.Wd & ~FixupInfo<Arm_Call>::BitH; |
567 | } else { |
568 | // Change opcode BLX -> BL |
569 | R.Wd = R.Wd & ~FixupInfo<Arm_Call>::BitBlx; |
570 | } |
571 | } |
572 | |
573 | if (!isInt<26>(x: Value)) |
574 | return makeTargetOutOfRangeError(G, B, E); |
575 | writeImmediate<Arm_Call>(R, Imm: encodeImmBA1BlA1BlxA2(Value)); |
576 | |
577 | return Error::success(); |
578 | } |
579 | case Arm_MovwAbsNC: { |
580 | uint16_t Value = (TargetAddress + Addend) & 0xffff; |
581 | writeImmediate<Arm_MovwAbsNC>(R, Imm: encodeImmMovtA1MovwA2(Value)); |
582 | return Error::success(); |
583 | } |
584 | case Arm_MovtAbs: { |
585 | uint16_t Value = ((TargetAddress + Addend) >> 16) & 0xffff; |
586 | writeImmediate<Arm_MovtAbs>(R, Imm: encodeImmMovtA1MovwA2(Value)); |
587 | return Error::success(); |
588 | } |
589 | default: |
590 | return make_error<JITLinkError>( |
591 | Args: "In graph " + G.getName() + ", section " + B.getSection().getName() + |
592 | " encountered unfixable aarch32 edge kind " + |
593 | G.getEdgeKindName(K: E.getKind())); |
594 | } |
595 | } |
596 | |
597 | Error applyFixupThumb(LinkGraph &G, Block &B, const Edge &E, |
598 | const ArmConfig &ArmCfg) { |
599 | WritableThumbRelocation R(B.getAlreadyMutableContent().data() + |
600 | E.getOffset()); |
601 | Edge::Kind Kind = E.getKind(); |
602 | if (Error Err = checkOpcode(G, R, Kind)) |
603 | return Err; |
604 | |
605 | uint64_t FixupAddress = (B.getAddress() + E.getOffset()).getValue(); |
606 | int64_t Addend = E.getAddend(); |
607 | Symbol &TargetSymbol = E.getTarget(); |
608 | uint64_t TargetAddress = TargetSymbol.getAddress().getValue(); |
609 | |
610 | switch (Kind) { |
611 | case Thumb_Jump24: { |
612 | if (!hasTargetFlags(Sym&: TargetSymbol, Flags: ThumbSymbol)) |
613 | return make_error<JITLinkError>(Args: "Branch relocation needs interworking " |
614 | "stub when bridging to ARM: " + |
615 | StringRef(G.getEdgeKindName(K: Kind))); |
616 | |
617 | int64_t Value = TargetAddress - FixupAddress + Addend; |
618 | if (LLVM_LIKELY(ArmCfg.J1J2BranchEncoding)) { |
619 | if (!isInt<25>(x: Value)) |
620 | return makeTargetOutOfRangeError(G, B, E); |
621 | writeImmediate<Thumb_Jump24>(R, Imm: encodeImmBT4BlT1BlxT2_J1J2(Value)); |
622 | } else { |
623 | if (!isInt<22>(x: Value)) |
624 | return makeTargetOutOfRangeError(G, B, E); |
625 | writeImmediate<Thumb_Jump24>(R, Imm: encodeImmBT4BlT1BlxT2(Value)); |
626 | } |
627 | |
628 | return Error::success(); |
629 | } |
630 | |
631 | case Thumb_Call: { |
632 | int64_t Value = TargetAddress - FixupAddress + Addend; |
633 | |
634 | // The call instruction itself is Thumb. The call destination can either be |
635 | // Thumb or Arm. We use BL to stay in Thumb and BLX to change to Arm. |
636 | bool TargetIsArm = !hasTargetFlags(Sym&: TargetSymbol, Flags: ThumbSymbol); |
637 | bool InstrIsBlx = (R.Lo & FixupInfo<Thumb_Call>::LoBitNoBlx) == 0; |
638 | if (TargetIsArm != InstrIsBlx) { |
639 | if (LLVM_LIKELY(TargetIsArm)) { |
640 | // Change opcode BL -> BLX and fix range value: account for 4-byte |
641 | // aligned destination while instruction may only be 2-byte aligned |
642 | R.Lo = R.Lo & ~FixupInfo<Thumb_Call>::LoBitNoBlx; |
643 | R.Lo = R.Lo & ~FixupInfo<Thumb_Call>::LoBitH; |
644 | Value = alignTo(Value, Align: 4); |
645 | } else { |
646 | // Change opcode BLX -> BL |
647 | R.Lo = R.Lo & ~FixupInfo<Thumb_Call>::LoBitNoBlx; |
648 | } |
649 | } |
650 | |
651 | if (LLVM_LIKELY(ArmCfg.J1J2BranchEncoding)) { |
652 | if (!isInt<25>(x: Value)) |
653 | return makeTargetOutOfRangeError(G, B, E); |
654 | writeImmediate<Thumb_Call>(R, Imm: encodeImmBT4BlT1BlxT2_J1J2(Value)); |
655 | } else { |
656 | if (!isInt<22>(x: Value)) |
657 | return makeTargetOutOfRangeError(G, B, E); |
658 | writeImmediate<Thumb_Call>(R, Imm: encodeImmBT4BlT1BlxT2(Value)); |
659 | } |
660 | |
661 | assert(((R.Lo & FixupInfo<Thumb_Call>::LoBitNoBlx) || |
662 | (R.Lo & FixupInfo<Thumb_Call>::LoBitH) == 0) && |
663 | "Opcode BLX implies H bit is clear (avoid UB in BLX T2)" ); |
664 | return Error::success(); |
665 | } |
666 | |
667 | case Thumb_MovwAbsNC: { |
668 | uint16_t Value = (TargetAddress + Addend) & 0xffff; |
669 | writeImmediate<Thumb_MovwAbsNC>(R, Imm: encodeImmMovtT1MovwT3(Value)); |
670 | return Error::success(); |
671 | } |
672 | case Thumb_MovtAbs: { |
673 | uint16_t Value = ((TargetAddress + Addend) >> 16) & 0xffff; |
674 | writeImmediate<Thumb_MovtAbs>(R, Imm: encodeImmMovtT1MovwT3(Value)); |
675 | return Error::success(); |
676 | } |
677 | case Thumb_MovwPrelNC: { |
678 | uint16_t Value = ((TargetAddress + Addend - FixupAddress) & 0xffff); |
679 | writeImmediate<Thumb_MovwPrelNC>(R, Imm: encodeImmMovtT1MovwT3(Value)); |
680 | return Error::success(); |
681 | } |
682 | case Thumb_MovtPrel: { |
683 | uint16_t Value = (((TargetAddress + Addend - FixupAddress) >> 16) & 0xffff); |
684 | writeImmediate<Thumb_MovtPrel>(R, Imm: encodeImmMovtT1MovwT3(Value)); |
685 | return Error::success(); |
686 | } |
687 | |
688 | default: |
689 | return make_error<JITLinkError>( |
690 | Args: "In graph " + G.getName() + ", section " + B.getSection().getName() + |
691 | " encountered unfixable aarch32 edge kind " + |
692 | G.getEdgeKindName(K: E.getKind())); |
693 | } |
694 | } |
695 | |
696 | const uint8_t GOTEntryInit[] = { |
697 | 0x00, |
698 | 0x00, |
699 | 0x00, |
700 | 0x00, |
701 | }; |
702 | |
703 | /// Create a new node in the link-graph for the given pointer value. |
704 | template <size_t Size> |
705 | static Block &allocPointer(LinkGraph &G, Section &S, |
706 | const uint8_t (&Content)[Size]) { |
707 | static_assert(Size == 4, "Pointers are 32-bit" ); |
708 | constexpr uint64_t Alignment = 4; |
709 | ArrayRef<char> Init(reinterpret_cast<const char *>(Content), Size); |
710 | return G.createContentBlock(Parent&: S, Content: Init, Address: orc::ExecutorAddr(), Alignment, AlignmentOffset: 0); |
711 | } |
712 | |
713 | Symbol &GOTBuilder::createEntry(LinkGraph &G, Symbol &Target) { |
714 | if (!GOTSection) |
715 | GOTSection = &G.createSection(Name: getSectionName(), Prot: orc::MemProt::Read); |
716 | Block &B = allocPointer(G, S&: *GOTSection, Content: GOTEntryInit); |
717 | constexpr int64_t GOTEntryAddend = 0; |
718 | B.addEdge(K: Data_Pointer32, Offset: 0, Target, Addend: GOTEntryAddend); |
719 | return G.addAnonymousSymbol(Content&: B, Offset: 0, Size: B.getSize(), IsCallable: false, IsLive: false); |
720 | } |
721 | |
722 | bool GOTBuilder::visitEdge(LinkGraph &G, Block *B, Edge &E) { |
723 | Edge::Kind KindToSet = Edge::Invalid; |
724 | switch (E.getKind()) { |
725 | case aarch32::Data_RequestGOTAndTransformToDelta32: { |
726 | KindToSet = aarch32::Data_Delta32; |
727 | break; |
728 | } |
729 | default: |
730 | return false; |
731 | } |
732 | LLVM_DEBUG(dbgs() << " Transforming " << G.getEdgeKindName(E.getKind()) |
733 | << " edge at " << B->getFixupAddress(E) << " (" |
734 | << B->getAddress() << " + " |
735 | << formatv("{0:x}" , E.getOffset()) << ") into " |
736 | << G.getEdgeKindName(KindToSet) << "\n" ); |
737 | E.setKind(KindToSet); |
738 | E.setTarget(getEntryForTarget(G, Target&: E.getTarget())); |
739 | return true; |
740 | } |
741 | |
742 | const uint8_t ArmThumbv5LdrPc[] = { |
743 | 0x78, 0x47, // bx pc |
744 | 0xfd, 0xe7, // b #-6 ; Arm recommended sequence to follow bx pc |
745 | 0x04, 0xf0, 0x1f, 0xe5, // ldr pc, [pc,#-4] ; L1 |
746 | 0x00, 0x00, 0x00, 0x00, // L1: .word S |
747 | }; |
748 | |
749 | const uint8_t Armv7ABS[] = { |
750 | 0x00, 0xc0, 0x00, 0xe3, // movw r12, #0x0000 ; lower 16-bit |
751 | 0x00, 0xc0, 0x40, 0xe3, // movt r12, #0x0000 ; upper 16-bit |
752 | 0x1c, 0xff, 0x2f, 0xe1 // bx r12 |
753 | }; |
754 | |
755 | const uint8_t Thumbv7ABS[] = { |
756 | 0x40, 0xf2, 0x00, 0x0c, // movw r12, #0x0000 ; lower 16-bit |
757 | 0xc0, 0xf2, 0x00, 0x0c, // movt r12, #0x0000 ; upper 16-bit |
758 | 0x60, 0x47 // bx r12 |
759 | }; |
760 | |
761 | /// Create a new node in the link-graph for the given stub template. |
762 | template <size_t Size> |
763 | static Block &allocStub(LinkGraph &G, Section &S, const uint8_t (&Code)[Size]) { |
764 | constexpr uint64_t Alignment = 4; |
765 | ArrayRef<char> Template(reinterpret_cast<const char *>(Code), Size); |
766 | return G.createContentBlock(Parent&: S, Content: Template, Address: orc::ExecutorAddr(), Alignment, AlignmentOffset: 0); |
767 | } |
768 | |
769 | static Block &createStubPrev7(LinkGraph &G, Section &S, Symbol &Target) { |
770 | Block &B = allocStub(G, S, Code: ArmThumbv5LdrPc); |
771 | B.addEdge(K: Data_Pointer32, Offset: 8, Target, Addend: 0); |
772 | return B; |
773 | } |
774 | |
775 | static Block &createStubThumbv7(LinkGraph &G, Section &S, Symbol &Target) { |
776 | Block &B = allocStub(G, S, Code: Thumbv7ABS); |
777 | B.addEdge(K: Thumb_MovwAbsNC, Offset: 0, Target, Addend: 0); |
778 | B.addEdge(K: Thumb_MovtAbs, Offset: 4, Target, Addend: 0); |
779 | |
780 | [[maybe_unused]] const char *StubPtr = B.getContent().data(); |
781 | [[maybe_unused]] HalfWords Reg12 = encodeRegMovtT1MovwT3(Value: 12); |
782 | assert(checkRegister<Thumb_MovwAbsNC>(StubPtr, Reg12) && |
783 | checkRegister<Thumb_MovtAbs>(StubPtr + 4, Reg12) && |
784 | "Linker generated stubs may only corrupt register r12 (IP)" ); |
785 | return B; |
786 | } |
787 | |
788 | static Block &createStubArmv7(LinkGraph &G, Section &S, Symbol &Target) { |
789 | Block &B = allocStub(G, S, Code: Armv7ABS); |
790 | B.addEdge(K: Arm_MovwAbsNC, Offset: 0, Target, Addend: 0); |
791 | B.addEdge(K: Arm_MovtAbs, Offset: 4, Target, Addend: 0); |
792 | |
793 | [[maybe_unused]] const char *StubPtr = B.getContent().data(); |
794 | [[maybe_unused]] uint32_t Reg12 = encodeRegMovtA1MovwA2(Value: 12); |
795 | assert(checkRegister<Arm_MovwAbsNC>(StubPtr, Reg12) && |
796 | checkRegister<Arm_MovtAbs>(StubPtr + 4, Reg12) && |
797 | "Linker generated stubs may only corrupt register r12 (IP)" ); |
798 | return B; |
799 | } |
800 | |
801 | static bool needsStub(const Edge &E) { |
802 | Symbol &Target = E.getTarget(); |
803 | |
804 | // Create stubs for external branch targets. |
805 | if (!Target.isDefined()) { |
806 | switch (E.getKind()) { |
807 | case Arm_Call: |
808 | case Arm_Jump24: |
809 | case Thumb_Call: |
810 | case Thumb_Jump24: |
811 | return true; |
812 | default: |
813 | return false; |
814 | } |
815 | } |
816 | |
817 | // For local targets, create interworking stubs if we switch Arm/Thumb with an |
818 | // instruction that cannot switch the instruction set state natively. |
819 | bool TargetIsThumb = Target.getTargetFlags() & ThumbSymbol; |
820 | switch (E.getKind()) { |
821 | case Arm_Jump24: |
822 | return TargetIsThumb; // Branch to Thumb needs interworking stub |
823 | case Thumb_Jump24: |
824 | return !TargetIsThumb; // Branch to Arm needs interworking stub |
825 | default: |
826 | break; |
827 | } |
828 | |
829 | return false; |
830 | } |
831 | |
832 | // The ArmThumbv5LdrPc stub has 2 entrypoints: Thumb at offset 0 is taken only |
833 | // for Thumb B instructions. Thumb BL is rewritten to BLX and takes the Arm |
834 | // entrypoint at offset 4. Arm branches always use that one. |
835 | Symbol *StubsManager_prev7::getOrCreateSlotEntrypoint(LinkGraph &G, |
836 | StubMapEntry &Slot, |
837 | bool Thumb) { |
838 | constexpr orc::ExecutorAddrDiff ThumbEntrypointOffset = 0; |
839 | constexpr orc::ExecutorAddrDiff ArmEntrypointOffset = 4; |
840 | if (Thumb && !Slot.ThumbEntry) { |
841 | Slot.ThumbEntry = |
842 | &G.addAnonymousSymbol(Content&: *Slot.B, Offset: ThumbEntrypointOffset, Size: 4, IsCallable: true, IsLive: false); |
843 | Slot.ThumbEntry->setTargetFlags(ThumbSymbol); |
844 | } |
845 | if (!Thumb && !Slot.ArmEntry) |
846 | Slot.ArmEntry = |
847 | &G.addAnonymousSymbol(Content&: *Slot.B, Offset: ArmEntrypointOffset, Size: 8, IsCallable: true, IsLive: false); |
848 | return Thumb ? Slot.ThumbEntry : Slot.ArmEntry; |
849 | } |
850 | |
851 | bool StubsManager_prev7::visitEdge(LinkGraph &G, Block *B, Edge &E) { |
852 | if (!needsStub(E)) |
853 | return false; |
854 | |
855 | Symbol &Target = E.getTarget(); |
856 | assert(Target.hasName() && "Edge cannot point to anonymous target" ); |
857 | auto [Slot, NewStub] = getStubMapSlot(Name: Target.getName()); |
858 | |
859 | if (NewStub) { |
860 | if (!StubsSection) |
861 | StubsSection = &G.createSection(Name: getSectionName(), |
862 | Prot: orc::MemProt::Read | orc::MemProt::Exec); |
863 | LLVM_DEBUG({ |
864 | dbgs() << " Created stub entry for " << Target.getName() << " in " |
865 | << StubsSection->getName() << "\n" ; |
866 | }); |
867 | Slot->B = &createStubPrev7(G, S&: *StubsSection, Target); |
868 | } |
869 | |
870 | // The ArmThumbv5LdrPc stub has 2 entrypoints: Thumb at offset 0 is taken only |
871 | // for Thumb B instructions. Thumb BL is rewritten to BLX and takes the Arm |
872 | // entrypoint at offset 4. Arm branches always use that one. |
873 | bool UseThumb = E.getKind() == Thumb_Jump24; |
874 | Symbol *StubEntrypoint = getOrCreateSlotEntrypoint(G, Slot&: *Slot, Thumb: UseThumb); |
875 | |
876 | LLVM_DEBUG({ |
877 | dbgs() << " Using " << (UseThumb ? "Thumb" : "Arm" ) << " entrypoint " |
878 | << *StubEntrypoint << " in " |
879 | << StubEntrypoint->getBlock().getSection().getName() << "\n" ; |
880 | }); |
881 | |
882 | E.setTarget(*StubEntrypoint); |
883 | return true; |
884 | } |
885 | |
886 | bool StubsManager_v7::visitEdge(LinkGraph &G, Block *B, Edge &E) { |
887 | if (!needsStub(E)) |
888 | return false; |
889 | |
890 | // Stub Arm/Thumb follows instruction set state at relocation site. |
891 | // TODO: We may reduce them at relaxation time and reuse freed slots. |
892 | bool MakeThumb = (E.getKind() > LastArmRelocation); |
893 | LLVM_DEBUG(dbgs() << " Preparing " << (MakeThumb ? "Thumb" : "Arm" ) |
894 | << " stub for " << G.getEdgeKindName(E.getKind()) |
895 | << " edge at " << B->getFixupAddress(E) << " (" |
896 | << B->getAddress() << " + " |
897 | << formatv("{0:x}" , E.getOffset()) << ")\n" ); |
898 | |
899 | Symbol &Target = E.getTarget(); |
900 | assert(Target.hasName() && "Edge cannot point to anonymous target" ); |
901 | Symbol *&StubSymbol = getStubSymbolSlot(Name: Target.getName(), Thumb: MakeThumb); |
902 | |
903 | if (!StubSymbol) { |
904 | if (!StubsSection) |
905 | StubsSection = &G.createSection(Name: getSectionName(), |
906 | Prot: orc::MemProt::Read | orc::MemProt::Exec); |
907 | Block &B = MakeThumb ? createStubThumbv7(G, S&: *StubsSection, Target) |
908 | : createStubArmv7(G, S&: *StubsSection, Target); |
909 | StubSymbol = &G.addAnonymousSymbol(Content&: B, Offset: 0, Size: B.getSize(), IsCallable: true, IsLive: false); |
910 | if (MakeThumb) |
911 | StubSymbol->setTargetFlags(ThumbSymbol); |
912 | |
913 | LLVM_DEBUG({ |
914 | dbgs() << " Created " << (MakeThumb ? "Thumb" : "Arm" ) << " entry for " |
915 | << Target.getName() << " in " << StubsSection->getName() << ": " |
916 | << *StubSymbol << "\n" ; |
917 | }); |
918 | } |
919 | |
920 | assert(MakeThumb == (StubSymbol->getTargetFlags() & ThumbSymbol) && |
921 | "Instruction set states of stub and relocation site should be equal" ); |
922 | LLVM_DEBUG({ |
923 | dbgs() << " Using " << (MakeThumb ? "Thumb" : "Arm" ) << " entry " |
924 | << *StubSymbol << " in " |
925 | << StubSymbol->getBlock().getSection().getName() << "\n" ; |
926 | }); |
927 | |
928 | E.setTarget(*StubSymbol); |
929 | return true; |
930 | } |
931 | |
932 | const char *getEdgeKindName(Edge::Kind K) { |
933 | #define KIND_NAME_CASE(K) \ |
934 | case K: \ |
935 | return #K; |
936 | |
937 | switch (K) { |
938 | KIND_NAME_CASE(Data_Delta32) |
939 | KIND_NAME_CASE(Data_Pointer32) |
940 | KIND_NAME_CASE(Data_PRel31) |
941 | KIND_NAME_CASE(Data_RequestGOTAndTransformToDelta32) |
942 | KIND_NAME_CASE(Arm_Call) |
943 | KIND_NAME_CASE(Arm_Jump24) |
944 | KIND_NAME_CASE(Arm_MovwAbsNC) |
945 | KIND_NAME_CASE(Arm_MovtAbs) |
946 | KIND_NAME_CASE(Thumb_Call) |
947 | KIND_NAME_CASE(Thumb_Jump24) |
948 | KIND_NAME_CASE(Thumb_MovwAbsNC) |
949 | KIND_NAME_CASE(Thumb_MovtAbs) |
950 | KIND_NAME_CASE(Thumb_MovwPrelNC) |
951 | KIND_NAME_CASE(Thumb_MovtPrel) |
952 | KIND_NAME_CASE(None) |
953 | default: |
954 | return getGenericEdgeKindName(K); |
955 | } |
956 | #undef KIND_NAME_CASE |
957 | } |
958 | |
959 | const char *getCPUArchName(ARMBuildAttrs::CPUArch K) { |
960 | #define CPUARCH_NAME_CASE(K) \ |
961 | case K: \ |
962 | return #K; |
963 | |
964 | using namespace ARMBuildAttrs; |
965 | switch (K) { |
966 | CPUARCH_NAME_CASE(Pre_v4) |
967 | CPUARCH_NAME_CASE(v4) |
968 | CPUARCH_NAME_CASE(v4T) |
969 | CPUARCH_NAME_CASE(v5T) |
970 | CPUARCH_NAME_CASE(v5TE) |
971 | CPUARCH_NAME_CASE(v5TEJ) |
972 | CPUARCH_NAME_CASE(v6) |
973 | CPUARCH_NAME_CASE(v6KZ) |
974 | CPUARCH_NAME_CASE(v6T2) |
975 | CPUARCH_NAME_CASE(v6K) |
976 | CPUARCH_NAME_CASE(v7) |
977 | CPUARCH_NAME_CASE(v6_M) |
978 | CPUARCH_NAME_CASE(v6S_M) |
979 | CPUARCH_NAME_CASE(v7E_M) |
980 | CPUARCH_NAME_CASE(v8_A) |
981 | CPUARCH_NAME_CASE(v8_R) |
982 | CPUARCH_NAME_CASE(v8_M_Base) |
983 | CPUARCH_NAME_CASE(v8_M_Main) |
984 | CPUARCH_NAME_CASE(v8_1_M_Main) |
985 | CPUARCH_NAME_CASE(v9_A) |
986 | } |
987 | llvm_unreachable("Missing CPUArch in switch?" ); |
988 | #undef CPUARCH_NAME_CASE |
989 | } |
990 | |
991 | } // namespace aarch32 |
992 | } // namespace jitlink |
993 | } // namespace llvm |
994 | |