1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#if !defined(__wasm__)
10
11#include "assembly.h"
12
13#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
14#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
15
16#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
17#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
18
19#if defined(_AIX)
20 .toc
21#elif defined(__aarch64__) && defined(__ELF__) && defined(_LIBUNWIND_EXECUTE_ONLY_CODE)
22 .section .text,"axy",@progbits,unique,0
23#else
24 .text
25#endif
26
27#if !defined(__USING_SJLJ_EXCEPTIONS__)
28
29#if defined(__i386__)
30.att_syntax
31
32DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
33#
34# extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
35#
36# On entry:
37# + +
38# +-----------------------+
39# + thread_state pointer +
40# +-----------------------+
41# + return address +
42# +-----------------------+ <-- SP
43# + +
44
45 _LIBUNWIND_CET_ENDBR
46 movl 4(%esp), %eax
47 # set up eax and ret on new stack location
48 movl 28(%eax), %edx # edx holds new stack pointer
49 subl $8,%edx
50 movl %edx, 28(%eax)
51 movl 0(%eax), %ebx
52 movl %ebx, 0(%edx)
53 movl 40(%eax), %ebx
54 movl %ebx, 4(%edx)
55 # we now have ret and eax pushed onto where new stack will be
56 # restore all registers
57 movl 4(%eax), %ebx
58 movl 8(%eax), %ecx
59 movl 12(%eax), %edx
60 movl 16(%eax), %edi
61 movl 20(%eax), %esi
62 movl 24(%eax), %ebp
63 movl 28(%eax), %esp
64 # skip ss
65 # skip eflags
66 pop %eax # eax was already pushed on new stack
67 pop %ecx
68 jmp *%ecx
69 # skip cs
70 # skip ds
71 # skip es
72 # skip fs
73 # skip gs
74
75#elif defined(__x86_64__) && !defined(__arm64ec__)
76.att_syntax
77
78DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
79#
80# extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
81#
82#if defined(_WIN64)
83# On entry, thread_state pointer is in rcx; move it into rdi
84# to share restore code below. Since this routine restores and
85# overwrites all registers, we can use the same registers for
86# pointers and temporaries as on unix even though win64 normally
87# mustn't clobber some of them.
88 movq %rcx, %rdi
89#else
90# On entry, thread_state pointer is in rdi
91#endif
92
93 _LIBUNWIND_CET_ENDBR
94 movq 56(%rdi), %rax # rax holds new stack pointer
95 subq $16, %rax
96 movq %rax, 56(%rdi)
97 movq 32(%rdi), %rbx # store new rdi on new stack
98 movq %rbx, 0(%rax)
99 movq 128(%rdi), %rbx # store new rip on new stack
100 movq %rbx, 8(%rax)
101 # restore all registers
102 movq 0(%rdi), %rax
103 movq 8(%rdi), %rbx
104 movq 16(%rdi), %rcx
105 movq 24(%rdi), %rdx
106 # restore rdi later
107 movq 40(%rdi), %rsi
108 movq 48(%rdi), %rbp
109 # restore rsp later
110 movq 64(%rdi), %r8
111 movq 72(%rdi), %r9
112 movq 80(%rdi), %r10
113 movq 88(%rdi), %r11
114 movq 96(%rdi), %r12
115 movq 104(%rdi), %r13
116 movq 112(%rdi), %r14
117 movq 120(%rdi), %r15
118 # skip rflags
119 # skip cs
120 # skip fs
121 # skip gs
122
123#if defined(_WIN64)
124 movdqu 176(%rdi),%xmm0
125 movdqu 192(%rdi),%xmm1
126 movdqu 208(%rdi),%xmm2
127 movdqu 224(%rdi),%xmm3
128 movdqu 240(%rdi),%xmm4
129 movdqu 256(%rdi),%xmm5
130 movdqu 272(%rdi),%xmm6
131 movdqu 288(%rdi),%xmm7
132 movdqu 304(%rdi),%xmm8
133 movdqu 320(%rdi),%xmm9
134 movdqu 336(%rdi),%xmm10
135 movdqu 352(%rdi),%xmm11
136 movdqu 368(%rdi),%xmm12
137 movdqu 384(%rdi),%xmm13
138 movdqu 400(%rdi),%xmm14
139 movdqu 416(%rdi),%xmm15
140#endif
141 movq 56(%rdi), %rsp # cut back rsp to new location
142 pop %rdi # rdi was saved here earlier
143 pop %rcx
144 jmpq *%rcx
145
146
147#elif defined(__powerpc64__)
148
149DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
150//
151// void libunwind::Registers_ppc64::jumpto()
152//
153// On entry:
154// thread_state pointer is in r3
155//
156
157// load register (GPR)
158#define PPC64_LR(n) \
159 ld n, (8 * (n + 2))(3)
160
161 // restore integral registers
162 // skip r0 for now
163 // skip r1 for now
164 PPC64_LR(2)
165 // skip r3 for now
166 // skip r4 for now
167 // skip r5 for now
168 PPC64_LR(6)
169 PPC64_LR(7)
170 PPC64_LR(8)
171 PPC64_LR(9)
172 PPC64_LR(10)
173 PPC64_LR(11)
174 PPC64_LR(12)
175 PPC64_LR(13)
176 PPC64_LR(14)
177 PPC64_LR(15)
178 PPC64_LR(16)
179 PPC64_LR(17)
180 PPC64_LR(18)
181 PPC64_LR(19)
182 PPC64_LR(20)
183 PPC64_LR(21)
184 PPC64_LR(22)
185 PPC64_LR(23)
186 PPC64_LR(24)
187 PPC64_LR(25)
188 PPC64_LR(26)
189 PPC64_LR(27)
190 PPC64_LR(28)
191 PPC64_LR(29)
192 PPC64_LR(30)
193 PPC64_LR(31)
194
195#if defined(__VSX__)
196
197 // restore VS registers
198 // (note that this also restores floating point registers and V registers,
199 // because part of VS is mapped to these registers)
200
201 addi 4, 3, PPC64_OFFS_FP
202
203// load VS register
204#ifdef __LITTLE_ENDIAN__
205// For little-endian targets, we need a swap since lxvd2x will load the register
206// in the incorrect doubleword order.
207// FIXME: when supporting targets older than Power9 on LE is no longer required,
208// this can be changed to simply `lxv n, (16 * n)(4)`.
209#define PPC64_LVS(n) \
210 lxvd2x n, 0, 4 ;\
211 xxswapd n, n ;\
212 addi 4, 4, 16
213#else
214#define PPC64_LVS(n) \
215 lxvd2x n, 0, 4 ;\
216 addi 4, 4, 16
217#endif
218
219 // restore the first 32 VS regs (and also all floating point regs)
220 PPC64_LVS(0)
221 PPC64_LVS(1)
222 PPC64_LVS(2)
223 PPC64_LVS(3)
224 PPC64_LVS(4)
225 PPC64_LVS(5)
226 PPC64_LVS(6)
227 PPC64_LVS(7)
228 PPC64_LVS(8)
229 PPC64_LVS(9)
230 PPC64_LVS(10)
231 PPC64_LVS(11)
232 PPC64_LVS(12)
233 PPC64_LVS(13)
234 PPC64_LVS(14)
235 PPC64_LVS(15)
236 PPC64_LVS(16)
237 PPC64_LVS(17)
238 PPC64_LVS(18)
239 PPC64_LVS(19)
240 PPC64_LVS(20)
241 PPC64_LVS(21)
242 PPC64_LVS(22)
243 PPC64_LVS(23)
244 PPC64_LVS(24)
245 PPC64_LVS(25)
246 PPC64_LVS(26)
247 PPC64_LVS(27)
248 PPC64_LVS(28)
249 PPC64_LVS(29)
250 PPC64_LVS(30)
251 PPC64_LVS(31)
252
253#ifdef __LITTLE_ENDIAN__
254#define PPC64_CLVS_RESTORE(n) \
255 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
256 lxvd2x n, 0, 4 ;\
257 xxswapd n, n
258#else
259#define PPC64_CLVS_RESTORE(n) \
260 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
261 lxvd2x n, 0, 4
262#endif
263
264#if !defined(_AIX)
265 // use VRSAVE to conditionally restore the remaining VS regs, that are
266 // where the V regs are mapped. In the AIX ABI, VRSAVE is not used.
267 ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
268 cmpwi 5, 0
269 beq Lnovec
270
271// conditionally load VS
272#define PPC64_CLVSl(n) \
273 andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\
274 beq Ldone##n ;\
275 PPC64_CLVS_RESTORE(n) ;\
276Ldone##n:
277
278#define PPC64_CLVSh(n) \
279 andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\
280 beq Ldone##n ;\
281 PPC64_CLVS_RESTORE(n) ;\
282Ldone##n:
283
284#else
285
286#define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n)
287#define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n)
288
289#endif // !defined(_AIX)
290
291 PPC64_CLVSl(32)
292 PPC64_CLVSl(33)
293 PPC64_CLVSl(34)
294 PPC64_CLVSl(35)
295 PPC64_CLVSl(36)
296 PPC64_CLVSl(37)
297 PPC64_CLVSl(38)
298 PPC64_CLVSl(39)
299 PPC64_CLVSl(40)
300 PPC64_CLVSl(41)
301 PPC64_CLVSl(42)
302 PPC64_CLVSl(43)
303 PPC64_CLVSl(44)
304 PPC64_CLVSl(45)
305 PPC64_CLVSl(46)
306 PPC64_CLVSl(47)
307 PPC64_CLVSh(48)
308 PPC64_CLVSh(49)
309 PPC64_CLVSh(50)
310 PPC64_CLVSh(51)
311 PPC64_CLVSh(52)
312 PPC64_CLVSh(53)
313 PPC64_CLVSh(54)
314 PPC64_CLVSh(55)
315 PPC64_CLVSh(56)
316 PPC64_CLVSh(57)
317 PPC64_CLVSh(58)
318 PPC64_CLVSh(59)
319 PPC64_CLVSh(60)
320 PPC64_CLVSh(61)
321 PPC64_CLVSh(62)
322 PPC64_CLVSh(63)
323
324#else
325
326// load FP register
327#define PPC64_LF(n) \
328 lfd n, (PPC64_OFFS_FP + n * 16)(3)
329
330 // restore float registers
331 PPC64_LF(0)
332 PPC64_LF(1)
333 PPC64_LF(2)
334 PPC64_LF(3)
335 PPC64_LF(4)
336 PPC64_LF(5)
337 PPC64_LF(6)
338 PPC64_LF(7)
339 PPC64_LF(8)
340 PPC64_LF(9)
341 PPC64_LF(10)
342 PPC64_LF(11)
343 PPC64_LF(12)
344 PPC64_LF(13)
345 PPC64_LF(14)
346 PPC64_LF(15)
347 PPC64_LF(16)
348 PPC64_LF(17)
349 PPC64_LF(18)
350 PPC64_LF(19)
351 PPC64_LF(20)
352 PPC64_LF(21)
353 PPC64_LF(22)
354 PPC64_LF(23)
355 PPC64_LF(24)
356 PPC64_LF(25)
357 PPC64_LF(26)
358 PPC64_LF(27)
359 PPC64_LF(28)
360 PPC64_LF(29)
361 PPC64_LF(30)
362 PPC64_LF(31)
363
364#if defined(__ALTIVEC__)
365
366#define PPC64_CLV_UNALIGNED_RESTORE(n) \
367 ld 0, (PPC64_OFFS_V + n * 16)(3) ;\
368 std 0, 0(4) ;\
369 ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\
370 std 0, 8(4) ;\
371 lvx n, 0, 4
372
373#if !defined(_AIX)
374 // restore vector registers if any are in use. In the AIX ABI, VRSAVE is
375 // not used.
376 ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
377 cmpwi 5, 0
378 beq Lnovec
379
380#define PPC64_CLV_UNALIGNEDl(n) \
381 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\
382 beq Ldone##n ;\
383 PPC64_CLV_UNALIGNED_RESTORE(n) ;\
384Ldone ## n:
385
386#define PPC64_CLV_UNALIGNEDh(n) \
387 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\
388 beq Ldone##n ;\
389 PPC64_CLV_UNALIGNED_RESTORE(n) ;\
390Ldone ## n:
391
392#else
393
394#define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n)
395#define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n)
396
397#endif // !defined(_AIX)
398
399 subi 4, 1, 16
400 // r4 is now a 16-byte aligned pointer into the red zone
401 // the _vectorScalarRegisters may not be 16-byte aligned
402 // so copy via red zone temp buffer
403
404 PPC64_CLV_UNALIGNEDl(0)
405 PPC64_CLV_UNALIGNEDl(1)
406 PPC64_CLV_UNALIGNEDl(2)
407 PPC64_CLV_UNALIGNEDl(3)
408 PPC64_CLV_UNALIGNEDl(4)
409 PPC64_CLV_UNALIGNEDl(5)
410 PPC64_CLV_UNALIGNEDl(6)
411 PPC64_CLV_UNALIGNEDl(7)
412 PPC64_CLV_UNALIGNEDl(8)
413 PPC64_CLV_UNALIGNEDl(9)
414 PPC64_CLV_UNALIGNEDl(10)
415 PPC64_CLV_UNALIGNEDl(11)
416 PPC64_CLV_UNALIGNEDl(12)
417 PPC64_CLV_UNALIGNEDl(13)
418 PPC64_CLV_UNALIGNEDl(14)
419 PPC64_CLV_UNALIGNEDl(15)
420 PPC64_CLV_UNALIGNEDh(16)
421 PPC64_CLV_UNALIGNEDh(17)
422 PPC64_CLV_UNALIGNEDh(18)
423 PPC64_CLV_UNALIGNEDh(19)
424 PPC64_CLV_UNALIGNEDh(20)
425 PPC64_CLV_UNALIGNEDh(21)
426 PPC64_CLV_UNALIGNEDh(22)
427 PPC64_CLV_UNALIGNEDh(23)
428 PPC64_CLV_UNALIGNEDh(24)
429 PPC64_CLV_UNALIGNEDh(25)
430 PPC64_CLV_UNALIGNEDh(26)
431 PPC64_CLV_UNALIGNEDh(27)
432 PPC64_CLV_UNALIGNEDh(28)
433 PPC64_CLV_UNALIGNEDh(29)
434 PPC64_CLV_UNALIGNEDh(30)
435 PPC64_CLV_UNALIGNEDh(31)
436
437#endif
438#endif
439
440Lnovec:
441 ld 0, PPC64_OFFS_CR(3)
442 mtcr 0
443 ld 0, PPC64_OFFS_SRR0(3)
444 mtctr 0
445
446#if defined(_AIX)
447 // After setting GPR1 to a higher address, AIX wipes out the original
448 // stack space below that address invalidated by the new GPR1 value. Use
449 // GPR0 to save the value of GPR3 in the context before it is wiped out.
450 // This compromises the content of GPR0 which is a volatile register.
451 ld 0, (8 * (3 + 2))(3)
452#else
453 PPC64_LR(0)
454#endif
455 PPC64_LR(5)
456 PPC64_LR(4)
457 PPC64_LR(1)
458#if defined(_AIX)
459 mr 3, 0
460#else
461 PPC64_LR(3)
462#endif
463 bctr
464
465#elif defined(__powerpc__)
466
467DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
468//
469// void libunwind::Registers_ppc::jumpto()
470//
471// On entry:
472// thread_state pointer is in r3
473//
474
475 // restore integral registers
476 // skip r0 for now
477 // skip r1 for now
478 lwz 2, 16(3)
479 // skip r3 for now
480 // skip r4 for now
481 // skip r5 for now
482 lwz 6, 32(3)
483 lwz 7, 36(3)
484 lwz 8, 40(3)
485 lwz 9, 44(3)
486 lwz 10, 48(3)
487 lwz 11, 52(3)
488 lwz 12, 56(3)
489 lwz 13, 60(3)
490 lwz 14, 64(3)
491 lwz 15, 68(3)
492 lwz 16, 72(3)
493 lwz 17, 76(3)
494 lwz 18, 80(3)
495 lwz 19, 84(3)
496 lwz 20, 88(3)
497 lwz 21, 92(3)
498 lwz 22, 96(3)
499 lwz 23,100(3)
500 lwz 24,104(3)
501 lwz 25,108(3)
502 lwz 26,112(3)
503 lwz 27,116(3)
504 lwz 28,120(3)
505 lwz 29,124(3)
506 lwz 30,128(3)
507 lwz 31,132(3)
508
509#ifndef __NO_FPRS__
510 // restore float registers
511 lfd 0, 160(3)
512 lfd 1, 168(3)
513 lfd 2, 176(3)
514 lfd 3, 184(3)
515 lfd 4, 192(3)
516 lfd 5, 200(3)
517 lfd 6, 208(3)
518 lfd 7, 216(3)
519 lfd 8, 224(3)
520 lfd 9, 232(3)
521 lfd 10,240(3)
522 lfd 11,248(3)
523 lfd 12,256(3)
524 lfd 13,264(3)
525 lfd 14,272(3)
526 lfd 15,280(3)
527 lfd 16,288(3)
528 lfd 17,296(3)
529 lfd 18,304(3)
530 lfd 19,312(3)
531 lfd 20,320(3)
532 lfd 21,328(3)
533 lfd 22,336(3)
534 lfd 23,344(3)
535 lfd 24,352(3)
536 lfd 25,360(3)
537 lfd 26,368(3)
538 lfd 27,376(3)
539 lfd 28,384(3)
540 lfd 29,392(3)
541 lfd 30,400(3)
542 lfd 31,408(3)
543#endif
544
545#if defined(__ALTIVEC__)
546
547#define LOAD_VECTOR_RESTORE(_index) \
548 lwz 0, 424+_index*16(3) SEPARATOR \
549 stw 0, 0(4) SEPARATOR \
550 lwz 0, 424+_index*16+4(3) SEPARATOR \
551 stw 0, 4(4) SEPARATOR \
552 lwz 0, 424+_index*16+8(3) SEPARATOR \
553 stw 0, 8(4) SEPARATOR \
554 lwz 0, 424+_index*16+12(3) SEPARATOR \
555 stw 0, 12(4) SEPARATOR \
556 lvx _index, 0, 4
557
558#if !defined(_AIX)
559 // restore vector registers if any are in use. In the AIX ABI, VRSAVE
560 // is not used.
561 lwz 5, 156(3) // test VRsave
562 cmpwi 5, 0
563 beq Lnovec
564
565#define LOAD_VECTOR_UNALIGNEDl(_index) \
566 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \
567 beq Ldone ## _index SEPARATOR \
568 LOAD_VECTOR_RESTORE(_index) SEPARATOR \
569 Ldone ## _index:
570
571#define LOAD_VECTOR_UNALIGNEDh(_index) \
572 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \
573 beq Ldone ## _index SEPARATOR \
574 LOAD_VECTOR_RESTORE(_index) SEPARATOR \
575 Ldone ## _index:
576
577#else
578
579#define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index)
580#define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index)
581
582#endif // !defined(_AIX)
583
584 subi 4, 1, 16
585 rlwinm 4, 4, 0, 0, 27 // mask low 4-bits
586 // r4 is now a 16-byte aligned pointer into the red zone
587 // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
588
589 LOAD_VECTOR_UNALIGNEDl(0)
590 LOAD_VECTOR_UNALIGNEDl(1)
591 LOAD_VECTOR_UNALIGNEDl(2)
592 LOAD_VECTOR_UNALIGNEDl(3)
593 LOAD_VECTOR_UNALIGNEDl(4)
594 LOAD_VECTOR_UNALIGNEDl(5)
595 LOAD_VECTOR_UNALIGNEDl(6)
596 LOAD_VECTOR_UNALIGNEDl(7)
597 LOAD_VECTOR_UNALIGNEDl(8)
598 LOAD_VECTOR_UNALIGNEDl(9)
599 LOAD_VECTOR_UNALIGNEDl(10)
600 LOAD_VECTOR_UNALIGNEDl(11)
601 LOAD_VECTOR_UNALIGNEDl(12)
602 LOAD_VECTOR_UNALIGNEDl(13)
603 LOAD_VECTOR_UNALIGNEDl(14)
604 LOAD_VECTOR_UNALIGNEDl(15)
605 LOAD_VECTOR_UNALIGNEDh(16)
606 LOAD_VECTOR_UNALIGNEDh(17)
607 LOAD_VECTOR_UNALIGNEDh(18)
608 LOAD_VECTOR_UNALIGNEDh(19)
609 LOAD_VECTOR_UNALIGNEDh(20)
610 LOAD_VECTOR_UNALIGNEDh(21)
611 LOAD_VECTOR_UNALIGNEDh(22)
612 LOAD_VECTOR_UNALIGNEDh(23)
613 LOAD_VECTOR_UNALIGNEDh(24)
614 LOAD_VECTOR_UNALIGNEDh(25)
615 LOAD_VECTOR_UNALIGNEDh(26)
616 LOAD_VECTOR_UNALIGNEDh(27)
617 LOAD_VECTOR_UNALIGNEDh(28)
618 LOAD_VECTOR_UNALIGNEDh(29)
619 LOAD_VECTOR_UNALIGNEDh(30)
620 LOAD_VECTOR_UNALIGNEDh(31)
621#endif
622
623Lnovec:
624 lwz 0, 136(3) // __cr
625 mtcr 0
626 lwz 0, 148(3) // __ctr
627 mtctr 0
628 lwz 0, 0(3) // __ssr0
629 mtctr 0
630 lwz 0, 8(3) // do r0 now
631 lwz 5, 28(3) // do r5 now
632 lwz 4, 24(3) // do r4 now
633 lwz 1, 12(3) // do sp now
634 lwz 3, 20(3) // do r3 last
635 bctr
636
637#elif defined(__aarch64__)
638
639#ifndef __has_feature
640#define __has_feature(__feature) 0
641#endif
642
643#if defined(__ARM_FEATURE_GCS_DEFAULT)
644.arch_extension gcs
645#endif
646
647//
648// extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *, unsigned);
649//
650// On entry:
651// thread_state pointer is in x0
652// walked_frames counter is in x1
653//
654 .p2align 2
655DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
656
657 #if defined(_LIBUNWIND_TRACE_RET_INJECT)
658 cbz w1, 1f
659 0:
660 subs w1, w1, #1
661 adr x16, #8
662 ret x16
663
664 b.ne 0b
665 1:
666 #endif
667
668 // skip restore of x0,x1 for now
669 ldp x2, x3, [x0, #0x010]
670 ldp x4, x5, [x0, #0x020]
671 ldp x6, x7, [x0, #0x030]
672 ldp x8, x9, [x0, #0x040]
673 ldp x10,x11, [x0, #0x050]
674 ldp x12,x13, [x0, #0x060]
675 ldp x14,x15, [x0, #0x070]
676 // x16 and x17 were clobbered by the call into the unwinder, so no point in
677 // restoring them.
678 ldp x18,x19, [x0, #0x090]
679 ldp x20,x21, [x0, #0x0A0]
680 ldp x22,x23, [x0, #0x0B0]
681 ldp x24,x25, [x0, #0x0C0]
682 ldp x26,x27, [x0, #0x0D0]
683 ldp x28,x29, [x0, #0x0E0]
684
685#if defined(__ARM_FP) && __ARM_FP != 0
686 ldp d0, d1, [x0, #0x110]
687 ldp d2, d3, [x0, #0x120]
688 ldp d4, d5, [x0, #0x130]
689 ldp d6, d7, [x0, #0x140]
690 ldp d8, d9, [x0, #0x150]
691 ldp d10,d11, [x0, #0x160]
692 ldp d12,d13, [x0, #0x170]
693 ldp d14,d15, [x0, #0x180]
694 ldp d16,d17, [x0, #0x190]
695 ldp d18,d19, [x0, #0x1A0]
696 ldp d20,d21, [x0, #0x1B0]
697 ldp d22,d23, [x0, #0x1C0]
698 ldp d24,d25, [x0, #0x1D0]
699 ldp d26,d27, [x0, #0x1E0]
700 ldp d28,d29, [x0, #0x1F0]
701 ldr d30, [x0, #0x200]
702 ldr d31, [x0, #0x208]
703#endif
704 // Finally, restore sp. This must be done after the last read from the
705 // context struct, because it is allocated on the stack, and an exception
706 // could clobber the de-allocated portion of the stack after sp has been
707 // restored.
708
709 ldr x16, [x0, #0x0F8] // load sp into scratch
710 ldr lr, [x0, #0x100] // restore pc into lr
711
712#if __has_feature(ptrauth_calls)
713 // The LR is signed with its address inside the register state. Time
714 // to resign to be a regular ROP protected signed pointer
715 add x1, x0, #0x100
716 autib lr, x1
717 pacib lr, x16 // signed the scratch register for sp
718#endif
719
720 ldp x0, x1, [x0, #0x000] // restore x0,x1
721 mov sp,x16 // restore sp
722#if defined(__ARM_FEATURE_GCS_DEFAULT)
723 // If GCS is enabled we need to push the address we're returning to onto the
724 // GCS stack. We can't just return using br, as there won't be a BTI landing
725 // pad instruction at the destination.
726 mov x16, #1
727 chkfeat x16
728 cbnz x16, Lnogcs
729 gcspushm x30
730Lnogcs:
731#endif
732
733#if __has_feature(ptrauth_calls)
734 retab
735#else
736 ret x30 // jump to pc
737#endif
738
739#elif defined(__arm__) && !defined(__APPLE__)
740
741#if !defined(__ARM_ARCH_ISA_ARM)
742#if (__ARM_ARCH_ISA_THUMB == 2)
743 .syntax unified
744#endif
745 .thumb
746#endif
747
748@
749@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
750@
751@ On entry:
752@ thread_state pointer is in r0
753@
754 .p2align 2
755DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
756#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
757 @ r8-r11: ldm into r1-r4, then mov to r8-r11
758 adds r0, #0x20
759 ldm r0!, {r1-r4}
760 subs r0, #0x30
761 mov r8, r1
762 mov r9, r2
763 mov r10, r3
764 mov r11, r4
765 @ r12 does not need loading, it it the intra-procedure-call scratch register
766 ldr r2, [r0, #0x34]
767 ldr r3, [r0, #0x3c]
768 mov sp, r2
769 mov lr, r3 @ restore pc into lr
770 ldm r0, {r0-r7}
771#else
772 @ Use lr as base so that r0 can be restored.
773 mov lr, r0
774 @ 32bit thumb-2 restrictions for ldm:
775 @ . the sp (r13) cannot be in the list
776 @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
777 ldm lr, {r0-r12}
778 ldr sp, [lr, #52]
779 ldr lr, [lr, #60] @ restore pc into lr
780#endif
781#if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM)
782 // 'bx' is not BTI setting when used with lr, therefore r12 is used instead
783 mov r12, lr
784 JMP(r12)
785#else
786 JMP(lr)
787#endif
788
789@
790@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
791@
792@ On entry:
793@ values pointer is in r0
794@
795 .p2align 2
796#if defined(__ELF__)
797 .fpu vfpv3-d16
798#endif
799DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
800 @ VFP and iwMMX instructions are only available when compiling with the flags
801 @ that enable them. We do not want to do that in the library (because we do not
802 @ want the compiler to generate instructions that access those) but this is
803 @ only accessed if the personality routine needs these registers. Use of
804 @ these registers implies they are, actually, available on the target, so
805 @ it's ok to execute.
806 @ So, generate the instruction using the corresponding coprocessor mnemonic.
807 vldmia r0, {d0-d15}
808 JMP(lr)
809
810@
811@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
812@
813@ On entry:
814@ values pointer is in r0
815@
816 .p2align 2
817#if defined(__ELF__)
818 .fpu vfpv3-d16
819#endif
820DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
821 vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
822 JMP(lr)
823
824@
825@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
826@
827@ On entry:
828@ values pointer is in r0
829@
830 .p2align 2
831#if defined(__ELF__)
832 .fpu vfpv3
833#endif
834DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
835 vldmia r0, {d16-d31}
836 JMP(lr)
837
838#if defined(__ARM_WMMX)
839
840@
841@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
842@
843@ On entry:
844@ values pointer is in r0
845@
846 .p2align 2
847#if defined(__ELF__)
848 .arch armv5te
849#endif
850DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
851 ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
852 ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
853 ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
854 ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
855 ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
856 ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
857 ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
858 ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
859 ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
860 ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
861 ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
862 ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
863 ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
864 ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
865 ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
866 ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
867 JMP(lr)
868
869@
870@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
871@
872@ On entry:
873@ values pointer is in r0
874@
875 .p2align 2
876#if defined(__ELF__)
877 .arch armv5te
878#endif
879DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
880 ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
881 ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
882 ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
883 ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
884 JMP(lr)
885
886#endif
887
888#elif defined(__or1k__)
889
890DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
891#
892# void libunwind::Registers_or1k::jumpto()
893#
894# On entry:
895# thread_state pointer is in r3
896#
897
898 # restore integral registers
899 l.lwz r0, 0(r3)
900 l.lwz r1, 4(r3)
901 l.lwz r2, 8(r3)
902 # skip r3 for now
903 l.lwz r4, 16(r3)
904 l.lwz r5, 20(r3)
905 l.lwz r6, 24(r3)
906 l.lwz r7, 28(r3)
907 l.lwz r8, 32(r3)
908 # skip r9
909 l.lwz r10, 40(r3)
910 l.lwz r11, 44(r3)
911 l.lwz r12, 48(r3)
912 l.lwz r13, 52(r3)
913 l.lwz r14, 56(r3)
914 l.lwz r15, 60(r3)
915 l.lwz r16, 64(r3)
916 l.lwz r17, 68(r3)
917 l.lwz r18, 72(r3)
918 l.lwz r19, 76(r3)
919 l.lwz r20, 80(r3)
920 l.lwz r21, 84(r3)
921 l.lwz r22, 88(r3)
922 l.lwz r23, 92(r3)
923 l.lwz r24, 96(r3)
924 l.lwz r25,100(r3)
925 l.lwz r26,104(r3)
926 l.lwz r27,108(r3)
927 l.lwz r28,112(r3)
928 l.lwz r29,116(r3)
929 l.lwz r30,120(r3)
930 l.lwz r31,124(r3)
931
932 # load new pc into ra
933 l.lwz r9, 128(r3)
934
935 # at last, restore r3
936 l.lwz r3, 12(r3)
937
938 # jump to pc
939 l.jr r9
940 l.nop
941
942#elif defined(__hexagon__)
943# On entry:
944# thread_state pointer is in r2
945DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
946#
947# void libunwind::Registers_hexagon::jumpto()
948#
949 r8 = memw(r0+#32)
950 r9 = memw(r0+#36)
951 r10 = memw(r0+#40)
952 r11 = memw(r0+#44)
953
954 r12 = memw(r0+#48)
955 r13 = memw(r0+#52)
956 r14 = memw(r0+#56)
957 r15 = memw(r0+#60)
958
959 r16 = memw(r0+#64)
960 r17 = memw(r0+#68)
961 r18 = memw(r0+#72)
962 r19 = memw(r0+#76)
963
964 r20 = memw(r0+#80)
965 r21 = memw(r0+#84)
966 r22 = memw(r0+#88)
967 r23 = memw(r0+#92)
968
969 r24 = memw(r0+#96)
970 r25 = memw(r0+#100)
971 r26 = memw(r0+#104)
972 r27 = memw(r0+#108)
973
974 r28 = memw(r0+#112)
975 r29 = memw(r0+#116)
976 r30 = memw(r0+#120)
977 r31 = memw(r0+#132)
978
979 r1 = memw(r0+#128)
980 c4 = r1 // Predicate register
981 r1 = memw(r0+#4)
982 r0 = memw(r0)
983 jumpr r31
984#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
985
986//
987// void libunwind::Registers_mips_o32::jumpto()
988//
989// On entry:
990// thread state pointer is in a0 ($4)
991//
992DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
993 .set push
994 .set noat
995 .set noreorder
996 .set nomacro
997#ifdef __mips_hard_float
998#if __mips_fpr != 64
999 ldc1 $f0, (4 * 36 + 8 * 0)($4)
1000 ldc1 $f2, (4 * 36 + 8 * 2)($4)
1001 ldc1 $f4, (4 * 36 + 8 * 4)($4)
1002 ldc1 $f6, (4 * 36 + 8 * 6)($4)
1003 ldc1 $f8, (4 * 36 + 8 * 8)($4)
1004 ldc1 $f10, (4 * 36 + 8 * 10)($4)
1005 ldc1 $f12, (4 * 36 + 8 * 12)($4)
1006 ldc1 $f14, (4 * 36 + 8 * 14)($4)
1007 ldc1 $f16, (4 * 36 + 8 * 16)($4)
1008 ldc1 $f18, (4 * 36 + 8 * 18)($4)
1009 ldc1 $f20, (4 * 36 + 8 * 20)($4)
1010 ldc1 $f22, (4 * 36 + 8 * 22)($4)
1011 ldc1 $f24, (4 * 36 + 8 * 24)($4)
1012 ldc1 $f26, (4 * 36 + 8 * 26)($4)
1013 ldc1 $f28, (4 * 36 + 8 * 28)($4)
1014 ldc1 $f30, (4 * 36 + 8 * 30)($4)
1015#else
1016 ldc1 $f0, (4 * 36 + 8 * 0)($4)
1017 ldc1 $f1, (4 * 36 + 8 * 1)($4)
1018 ldc1 $f2, (4 * 36 + 8 * 2)($4)
1019 ldc1 $f3, (4 * 36 + 8 * 3)($4)
1020 ldc1 $f4, (4 * 36 + 8 * 4)($4)
1021 ldc1 $f5, (4 * 36 + 8 * 5)($4)
1022 ldc1 $f6, (4 * 36 + 8 * 6)($4)
1023 ldc1 $f7, (4 * 36 + 8 * 7)($4)
1024 ldc1 $f8, (4 * 36 + 8 * 8)($4)
1025 ldc1 $f9, (4 * 36 + 8 * 9)($4)
1026 ldc1 $f10, (4 * 36 + 8 * 10)($4)
1027 ldc1 $f11, (4 * 36 + 8 * 11)($4)
1028 ldc1 $f12, (4 * 36 + 8 * 12)($4)
1029 ldc1 $f13, (4 * 36 + 8 * 13)($4)
1030 ldc1 $f14, (4 * 36 + 8 * 14)($4)
1031 ldc1 $f15, (4 * 36 + 8 * 15)($4)
1032 ldc1 $f16, (4 * 36 + 8 * 16)($4)
1033 ldc1 $f17, (4 * 36 + 8 * 17)($4)
1034 ldc1 $f18, (4 * 36 + 8 * 18)($4)
1035 ldc1 $f19, (4 * 36 + 8 * 19)($4)
1036 ldc1 $f20, (4 * 36 + 8 * 20)($4)
1037 ldc1 $f21, (4 * 36 + 8 * 21)($4)
1038 ldc1 $f22, (4 * 36 + 8 * 22)($4)
1039 ldc1 $f23, (4 * 36 + 8 * 23)($4)
1040 ldc1 $f24, (4 * 36 + 8 * 24)($4)
1041 ldc1 $f25, (4 * 36 + 8 * 25)($4)
1042 ldc1 $f26, (4 * 36 + 8 * 26)($4)
1043 ldc1 $f27, (4 * 36 + 8 * 27)($4)
1044 ldc1 $f28, (4 * 36 + 8 * 28)($4)
1045 ldc1 $f29, (4 * 36 + 8 * 29)($4)
1046 ldc1 $f30, (4 * 36 + 8 * 30)($4)
1047 ldc1 $f31, (4 * 36 + 8 * 31)($4)
1048#endif
1049#endif
1050#if __mips_isa_rev < 6
1051 // restore hi and lo
1052 lw $8, (4 * 33)($4)
1053 mthi $8
1054 lw $8, (4 * 34)($4)
1055 mtlo $8
1056#endif
1057 // r0 is zero
1058 lw $1, (4 * 1)($4)
1059 lw $2, (4 * 2)($4)
1060 lw $3, (4 * 3)($4)
1061 // skip a0 for now
1062 lw $5, (4 * 5)($4)
1063 lw $6, (4 * 6)($4)
1064 lw $7, (4 * 7)($4)
1065 lw $8, (4 * 8)($4)
1066 lw $9, (4 * 9)($4)
1067 lw $10, (4 * 10)($4)
1068 lw $11, (4 * 11)($4)
1069 lw $12, (4 * 12)($4)
1070 lw $13, (4 * 13)($4)
1071 lw $14, (4 * 14)($4)
1072 lw $15, (4 * 15)($4)
1073 lw $16, (4 * 16)($4)
1074 lw $17, (4 * 17)($4)
1075 lw $18, (4 * 18)($4)
1076 lw $19, (4 * 19)($4)
1077 lw $20, (4 * 20)($4)
1078 lw $21, (4 * 21)($4)
1079 lw $22, (4 * 22)($4)
1080 lw $23, (4 * 23)($4)
1081 lw $24, (4 * 24)($4)
1082 lw $25, (4 * 25)($4)
1083 lw $26, (4 * 26)($4)
1084 lw $27, (4 * 27)($4)
1085 lw $28, (4 * 28)($4)
1086 lw $29, (4 * 29)($4)
1087 // load new pc into ra
1088 lw $31, (4 * 32)($4)
1089 // MIPS 1 has load delay slot. Ensure lw $31 and jr are separated by an instruction.
1090 lw $30, (4 * 30)($4)
1091 // jump to ra, load a0 in the delay slot
1092 jr $31
1093 lw $4, (4 * 4)($4)
1094 .set pop
1095
1096#elif defined(__mips64)
1097
1098//
1099// void libunwind::Registers_mips_newabi::jumpto()
1100//
1101// On entry:
1102// thread state pointer is in a0 ($4)
1103//
1104DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
1105 .set push
1106 .set noat
1107 .set noreorder
1108 .set nomacro
1109#ifdef __mips_hard_float
1110 .irp i,FROM_0_TO_31
1111 ldc1 $f\i, (280+8*\i)($4)
1112 .endr
1113#endif
1114#if __mips_isa_rev < 6
1115 // restore hi and lo
1116 ld $8, (8 * 33)($4)
1117 mthi $8
1118 ld $8, (8 * 34)($4)
1119 mtlo $8
1120#endif
1121 // r0 is zero
1122 ld $1, (8 * 1)($4)
1123 ld $2, (8 * 2)($4)
1124 ld $3, (8 * 3)($4)
1125 // skip a0 for now
1126 .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
1127 ld $\i, (8 * \i)($4)
1128 .endr
1129 // load new pc into ra
1130 ld $31, (8 * 32)($4)
1131 // MIPS 1 has load delay slot. Ensure lw $31 and jr are separated by an instruction.
1132 ld $30, (8 * 30)($4)
1133 // jump to ra, load a0 in the delay slot
1134 jr $31
1135 ld $4, (8 * 4)($4)
1136 .set pop
1137
1138#elif defined(__sparc__) && defined(__arch64__)
1139
1140DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv)
1141//
1142// void libunwind::Registers_sparc64::jumpto()
1143//
1144// On entry:
1145// thread_state pointer is in %o0
1146//
1147 .register %g2, #scratch
1148 .register %g3, #scratch
1149 .register %g6, #scratch
1150 .register %g7, #scratch
1151 flushw
1152 ldx [%o0 + 0x08], %g1
1153 ldx [%o0 + 0x10], %g2
1154 ldx [%o0 + 0x18], %g3
1155 ldx [%o0 + 0x20], %g4
1156 ldx [%o0 + 0x28], %g5
1157 ldx [%o0 + 0x30], %g6
1158 ldx [%o0 + 0x38], %g7
1159 ldx [%o0 + 0x48], %o1
1160 ldx [%o0 + 0x50], %o2
1161 ldx [%o0 + 0x58], %o3
1162 ldx [%o0 + 0x60], %o4
1163 ldx [%o0 + 0x68], %o5
1164 ldx [%o0 + 0x70], %o6
1165 ldx [%o0 + 0x78], %o7
1166 ldx [%o0 + 0x80], %l0
1167 ldx [%o0 + 0x88], %l1
1168 ldx [%o0 + 0x90], %l2
1169 ldx [%o0 + 0x98], %l3
1170 ldx [%o0 + 0xa0], %l4
1171 ldx [%o0 + 0xa8], %l5
1172 ldx [%o0 + 0xb0], %l6
1173 ldx [%o0 + 0xb8], %l7
1174 ldx [%o0 + 0xc0], %i0
1175 ldx [%o0 + 0xc8], %i1
1176 ldx [%o0 + 0xd0], %i2
1177 ldx [%o0 + 0xd8], %i3
1178 ldx [%o0 + 0xe0], %i4
1179 ldx [%o0 + 0xe8], %i5
1180 ldx [%o0 + 0xf0], %i6
1181 ldx [%o0 + 0xf8], %i7
1182 jmp %o7
1183 ldx [%o0 + 0x40], %o0
1184
1185#elif defined(__sparc__)
1186
1187//
1188// void libunwind::Registers_sparc_o32::jumpto()
1189//
1190// On entry:
1191// thread_state pointer is in o0
1192//
1193DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1194 ta 3
1195 ldd [%o0 + 64], %l0
1196 ldd [%o0 + 72], %l2
1197 ldd [%o0 + 80], %l4
1198 ldd [%o0 + 88], %l6
1199 ldd [%o0 + 96], %i0
1200 ldd [%o0 + 104], %i2
1201 ldd [%o0 + 112], %i4
1202 ldd [%o0 + 120], %i6
1203 ld [%o0 + 60], %o7
1204 jmp %o7
1205 nop
1206
1207#elif defined(__riscv)
1208
1209//
1210// void libunwind::Registers_riscv::jumpto()
1211//
1212// On entry:
1213// thread_state pointer is in a0
1214//
1215 .p2align 2
1216DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1217# if defined(__riscv_flen)
1218 .irp i,FROM_0_TO_31
1219 FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1220 .endr
1221# endif
1222
1223 // x0 is zero
1224 ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
1225 .irp i,2,3,4,5,6,7,8,9
1226 ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1227 .endr
1228 // skip a0 for now
1229#if defined(__riscv_32e)
1230 .irp i,11,12,13,14,15
1231#else
1232 .irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1233#endif
1234 ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1235 .endr
1236 ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0
1237
1238 ret // jump to ra
1239
1240#elif defined(__s390x__)
1241
1242DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)
1243//
1244// void libunwind::Registers_s390x::jumpto()
1245//
1246// On entry:
1247// thread_state pointer is in r2
1248//
1249
1250 // Skip PSWM, but load PSWA into r1
1251 lg %r1, 8(%r2)
1252
1253 // Restore FPRs
1254 .irp i,FROM_0_TO_15
1255 ld %f\i, (144+8*\i)(%r2)
1256 .endr
1257
1258 // Restore GPRs - skipping %r0 and %r1
1259 lmg %r2, %r15, 32(%r2)
1260
1261 // Return to PSWA (was loaded into %r1 above)
1262 br %r1
1263
1264#elif defined(__loongarch__) && __loongarch_grlen == 64
1265
1266//
1267// void libunwind::Registers_loongarch::jumpto()
1268//
1269// On entry:
1270// thread_state pointer is in $a0($r4)
1271//
1272 .p2align 2
1273DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)
1274# if __loongarch_frlen == 64
1275 .irp i,FROM_0_TO_31
1276 fld.d $f\i, $a0, (8 * 33 + 8 * \i)
1277 .endr
1278# endif
1279
1280 // $r0 is zero
1281 .irp i,1,2,3
1282 ld.d $r\i, $a0, (8 * \i)
1283 .endr
1284 // skip $a0 for now
1285 .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1286 ld.d $r\i, $a0, (8 * \i)
1287 .endr
1288
1289 ld.d $ra, $a0, (8 * 32) // load new pc into $ra
1290 ld.d $a0, $a0, (8 * 4) // restore $a0 last
1291
1292 jr $ra
1293
1294#endif
1295
1296#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1297
1298NO_EXEC_STACK_DIRECTIVE
1299
1300#endif /* !defined(__wasm__) */
1301